code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import tensorflow as tf
from tensorflow.keras.callbacks import LearningRateScheduler
import numpy as np
def step_decay_schedule(initial_lr=1e-3, decay_factor=0.75, step_size=10):
'''
Wrapper function to create a LearningRateScheduler with step decay schedule.
'''
def schedule(epoch):
return initial_lr * (decay_factor ** np.floor(epoch/step_size))
return LearningRateScheduler(schedule)
| [
"numpy.floor",
"tensorflow.keras.callbacks.LearningRateScheduler"
] | [((391, 422), 'tensorflow.keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['schedule'], {}), '(schedule)\n', (412, 422), False, 'from tensorflow.keras.callbacks import LearningRateScheduler\n'), ((348, 375), 'numpy.floor', 'np.floor', (['(epoch / step_size)'], {}), '(epoch / step_size)\n', (356, 375), True, 'import numpy as np\n')] |
import numpy as np
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input as preprocess_input_vgg
from keras.preprocessing import image
from numpy import linalg as LA
from common.const import input_shape
class VGGNet:
def __init__(self):
self.input_shape = (224, 224, 3)
self.weight = 'imagenet'
self.pooling = 'max'
self.model_vgg = VGG16(weights=self.weight,
input_shape=(self.input_shape[0], self.input_shape[1], self.input_shape[2]),
pooling=self.pooling,
include_top=False)
self.model_vgg.predict(np.zeros((1, 224, 224, 3)))
def vgg_extract_feat(self, img_path):
img = image.load_img(img_path, target_size=(self.input_shape[0], self.input_shape[1]))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input_vgg(img)
feat = self.model_vgg.predict(img)
norm_feat = feat[0] / LA.norm(feat[0])
norm_feat = [i.item() for i in norm_feat]
return norm_feat
def vgg_extract_feat(img_path, model, graph, sess):
with sess.as_default():
with graph.as_default():
img = image.load_img(img_path, target_size=(input_shape[0], input_shape[1]))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input_vgg(img)
feat = model.predict(img)
norm_feat = feat[0] / LA.norm(feat[0])
norm_feat = [i.item() for i in norm_feat]
return norm_feat
| [
"numpy.zeros",
"numpy.expand_dims",
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.load_img",
"numpy.linalg.norm",
"keras.applications.vgg16.VGG16",
"keras.applications.vgg16.preprocess_input"
] | [((413, 567), 'keras.applications.vgg16.VGG16', 'VGG16', ([], {'weights': 'self.weight', 'input_shape': '(self.input_shape[0], self.input_shape[1], self.input_shape[2])', 'pooling': 'self.pooling', 'include_top': '(False)'}), '(weights=self.weight, input_shape=(self.input_shape[0], self.\n input_shape[1], self.input_shape[2]), pooling=self.pooling, include_top\n =False)\n', (418, 567), False, 'from keras.applications.vgg16 import VGG16\n'), ((767, 852), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(self.input_shape[0], self.input_shape[1])'}), '(img_path, target_size=(self.input_shape[0], self.input_shape[1])\n )\n', (781, 852), False, 'from keras.preprocessing import image\n'), ((862, 885), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (880, 885), False, 'from keras.preprocessing import image\n'), ((900, 927), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (914, 927), True, 'import numpy as np\n'), ((942, 967), 'keras.applications.vgg16.preprocess_input', 'preprocess_input_vgg', (['img'], {}), '(img)\n', (962, 967), True, 'from keras.applications.vgg16 import preprocess_input as preprocess_input_vgg\n'), ((682, 708), 'numpy.zeros', 'np.zeros', (['(1, 224, 224, 3)'], {}), '((1, 224, 224, 3))\n', (690, 708), True, 'import numpy as np\n'), ((1041, 1057), 'numpy.linalg.norm', 'LA.norm', (['feat[0]'], {}), '(feat[0])\n', (1048, 1057), True, 'from numpy import linalg as LA\n'), ((1266, 1336), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(input_shape[0], input_shape[1])'}), '(img_path, target_size=(input_shape[0], input_shape[1]))\n', (1280, 1336), False, 'from keras.preprocessing import image\n'), ((1355, 1378), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (1373, 1378), False, 'from keras.preprocessing import image\n'), ((1397, 1424), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1411, 1424), True, 'import numpy as np\n'), ((1443, 1468), 'keras.applications.vgg16.preprocess_input', 'preprocess_input_vgg', (['img'], {}), '(img)\n', (1463, 1468), True, 'from keras.applications.vgg16 import preprocess_input as preprocess_input_vgg\n'), ((1541, 1557), 'numpy.linalg.norm', 'LA.norm', (['feat[0]'], {}), '(feat[0])\n', (1548, 1557), True, 'from numpy import linalg as LA\n')] |
import cv2
import numpy as np
img = cv2.imread('images/input.jpg')
rows, cols = img.shape[:2]
kernel_identity = np.array([[0,0,0], [0,1,0], [0,0,0]])
kernel_3x3 = np.ones((3,3), np.float32) / 9.0
kernel_5x5 = np.ones((5,5), np.float32) / 25.0
cv2.imshow('Original', img)
#-1 for keep source image depth
output = cv2.filter2D(img, -1, kernel_identity) # value -1 is to maintain source image depth
cv2.imshow('Identity filter', output)
output = cv2.filter2D(img, -1, kernel_3x3)
cv2.imshow('3x3 filter', output)
output = cv2.filter2D(img, -1, kernel_5x5)
cv2.imshow('5x5 filter', output)
cv2.waitKey(0)
| [
"cv2.filter2D",
"cv2.waitKey",
"numpy.ones",
"cv2.imread",
"numpy.array",
"cv2.imshow"
] | [((40, 70), 'cv2.imread', 'cv2.imread', (['"""images/input.jpg"""'], {}), "('images/input.jpg')\n", (50, 70), False, 'import cv2\n'), ((120, 163), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 1, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 1, 0], [0, 0, 0]])\n', (128, 163), True, 'import numpy as np\n'), ((256, 283), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'img'], {}), "('Original', img)\n", (266, 283), False, 'import cv2\n'), ((327, 365), 'cv2.filter2D', 'cv2.filter2D', (['img', '(-1)', 'kernel_identity'], {}), '(img, -1, kernel_identity)\n', (339, 365), False, 'import cv2\n'), ((411, 448), 'cv2.imshow', 'cv2.imshow', (['"""Identity filter"""', 'output'], {}), "('Identity filter', output)\n", (421, 448), False, 'import cv2\n'), ((460, 493), 'cv2.filter2D', 'cv2.filter2D', (['img', '(-1)', 'kernel_3x3'], {}), '(img, -1, kernel_3x3)\n', (472, 493), False, 'import cv2\n'), ((495, 527), 'cv2.imshow', 'cv2.imshow', (['"""3x3 filter"""', 'output'], {}), "('3x3 filter', output)\n", (505, 527), False, 'import cv2\n'), ((539, 572), 'cv2.filter2D', 'cv2.filter2D', (['img', '(-1)', 'kernel_5x5'], {}), '(img, -1, kernel_5x5)\n', (551, 572), False, 'import cv2\n'), ((574, 606), 'cv2.imshow', 'cv2.imshow', (['"""5x5 filter"""', 'output'], {}), "('5x5 filter', output)\n", (584, 606), False, 'import cv2\n'), ((608, 622), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (619, 622), False, 'import cv2\n'), ((172, 199), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.float32'], {}), '((3, 3), np.float32)\n', (179, 199), True, 'import numpy as np\n'), ((219, 246), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.float32'], {}), '((5, 5), np.float32)\n', (226, 246), True, 'import numpy as np\n')] |
import argparse
import os
# workaround to unpickle olf model files
import sys
from pdb import set_trace as bp
import numpy as np
import torch
import gym
import my_pybullet_envs
import pybullet as p
import time
from a2c_ppo_acktr.envs import VecPyTorch, make_vec_envs
from a2c_ppo_acktr.utils import get_render_func, get_vec_normalize
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
homedir = os.path.expanduser("~")
ts = 1/240
# may need to refactor this into robot class
def planning(robot):
for ind in range(len(Traj) - 1):
tar_armq = Traj[ind,0:7]
# for ji, i in enumerate(robot.arm_dofs):
# p.resetJointState(robot.arm_id, i, tar_armq[ji])
# for ind in range(len(robot.fin_actdofs)):
# p.resetJointState(robot.arm_id, robot.fin_actdofs[ind], robot.init_fin_q[ind], 0.0)
# for ind in range(len(robot.fin_zerodofs)):
# p.resetJointState(robot.arm_id, robot.fin_zerodofs[ind], 0.0, 0.0)
#print(tar_armq)
p.setJointMotorControlArray(
bodyIndex=robot.arm_id,
jointIndices=robot.arm_dofs,
controlMode=p.POSITION_CONTROL,
targetPositions=list(tar_armq),
forces=[robot.maxForce * 3] * len(robot.arm_dofs))
p.setJointMotorControlArray(
bodyIndex=robot.arm_id,
jointIndices=robot.fin_actdofs,
controlMode=p.POSITION_CONTROL,
targetPositions=list(robot.tar_fin_q),
forces=[robot.maxForce] * len(robot.tar_fin_q))
p.setJointMotorControlArray(
bodyIndex=robot.arm_id,
jointIndices=robot.fin_zerodofs,
controlMode=p.POSITION_CONTROL,
targetPositions=[0.0] * len(robot.fin_zerodofs),
forces=[robot.maxForce / 4.0] * len(robot.fin_zerodofs))
p.stepSimulation()
# print(robot.tar_fin_q)
time.sleep(ts)
cps = p.getContactPoints(bodyA=robot.arm_id)
print(len(cps) == 0)
for _ in range(50):
robot.tar_arm_q = tar_armq
p.stepSimulation()
#time.sleep(1. / 240.) # TODO: stay still for a while
sys.path.append('a2c_ppo_acktr')
parser = argparse.ArgumentParser(description='RL')
parser.add_argument(
'--seed', type=int, default=1, help='random seed (default: 1)')
parser.add_argument(
'--log-interval',
type=int,
default=10,
help='log interval, one log per n updates (default: 10)')
parser.add_argument(
'--env-name',
default='ShadowHandDemoBulletEnv-v1',
help='environment to train on (default: PongNoFrameskip-v4)')
parser.add_argument(
'--load-dir',
# default='./trained_models_0114_box_l_4/ppo/', # TODO
default='./trained_models_0117_box_l_1/ppo/',
help='directory to save agent logs (default: ./trained_models/)')
parser.add_argument(
'--non-det',
type=int,
default=0,
help='whether to use a non-deterministic policy, 1 true 0 false')
parser.add_argument(
'--iter',
type=int,
default=-1,
help='which iter pi to test')
args = parser.parse_args()
# TODO
is_cuda = True
device = 'cuda' if is_cuda else 'cpu'
args.det = not args.non_det
#np.random.seed(123)
p.connect(p.GUI)
p.resetSimulation()
p.setTimeStep(ts)
p.setGravity(0, 0, -10)
# must use vector version of make_env as to use vec_normalize
env = make_vec_envs(
args.env_name,
args.seed + 1000,
1,
None,
None,
device=device,
allow_early_resets=False)
# dont know why there are so many wrappers in make_vec_envs...
env_core = env.venv.venv.envs[0]
table_id = p.loadURDF(os.path.join(currentdir, 'my_pybullet_envs/assets/tabletop.urdf'), [0.27, 0.1, 0.0], useFixedBase=1) # TODO
############################# HERE IS THE INPUT FROM VISION AND LANGUAGE MODULE
tx = np.random.uniform(low=0, high=0.25) # target object location
ty = np.random.uniform(low=-0.1, high=0.5)
destin_x = np.random.uniform(low=0, high=0.25) # destination location for target object
destin_y = np.random.uniform(low=-0.1, high=0.5)
destin_z = 0
#tx = 0.1
#ty = 0.0
#est_tx = tx
#est_ty = ty
est_tx = tx + np.random.uniform(low=-0.01, high=0.01)
est_ty = ty + np.random.uniform(low=-0.01, high=0.01)
OBJECTS = np.array([[est_tx,est_ty,0,0],[0.8, 0.8, 0, 0]])
# oid1 = p.loadURDF(os.path.join(currentdir, 'my_pybullet_envs/assets/cylinder.urdf'), [a_tx, a_ty, 0.1], useFixedBase=0) # tar obj
oid1 = p.loadURDF(os.path.join(currentdir, 'my_pybullet_envs/assets/box.urdf'), [tx, ty, 0.1], useFixedBase=0) # tar obj
# oid2 = p.loadURDF(os.path.join(currentdir, 'my_pybullet_envs/assets/box.urdf'), [0.1, 0.2, 0.1], useFixedBase=0)
# oid3 = p.loadURDF(os.path.join(currentdir, 'my_pybullet_envs/assets/cylinder.urdf'), [0.2, -0.15, 0.1], useFixedBase=0)
env_core.assign_estimated_obj_pos(est_tx, est_ty)
p.changeDynamics(oid1, -1, lateralFriction=1.0)
p.changeDynamics(table_id, -1, lateralFriction=1.0)
# print(oid1)
# # Get a render function
# render_func = get_render_func(env)
#
# print(render_func)
from my_pybullet_envs.inmoov_shadow_grasp_env_v2 import ImaginaryArmObjSession
sess = ImaginaryArmObjSession()
Qreach = np.array(sess.get_most_comfortable_q(OBJECTS[0,0],OBJECTS[0,1]))
Qdestin = np.array(sess.get_most_comfortable_q(destin_x,destin_y)) #################################################### NEEDS TO HAVE Z and object orientation!!!
# send command to Openrave
file_path = homedir+'/container_data/PB_REACH.npz'
np.savez(file_path,OBJECTS,Qreach)
# We need to use the same statistics for normalization as used in training
ori_env_name = 'InmoovHandGraspBulletEnv-v1'
if args.iter >= 0:
path = os.path.join(args.load_dir, ori_env_name + "_" + str(args.iter) + ".pt")
else:
path = os.path.join(args.load_dir, ori_env_name + ".pt")
if is_cuda:
actor_critic, ob_rms = torch.load(path)
else:
actor_critic, ob_rms = torch.load(path, map_location='cpu')
vec_norm = get_vec_normalize(env)
if vec_norm is not None:
vec_norm.eval()
vec_norm.ob_rms = ob_rms
recurrent_hidden_states = torch.zeros(1,
actor_critic.recurrent_hidden_state_size)
masks = torch.zeros(1, 1)
# if render_func is not None:
# render_func('human')
# obs_old = env.reset()
# print(obs_old)
env.reset()
#
# if args.env_name.find('Bullet') > -1:
# import pybullet as p
#
# torsoId = -1
# for i in range(p.getNumBodies()):
# if (p.getBodyInfo(i)[0].decode() == "r_forearm_link"):
# torsoId = i
control_steps = 0
# TODO: change this to read it OpenRave file
###
#Get planned trajectory from Openrave:
file_path = homedir+'/container_data/OR_REACH.npy'
while not os.path.exists(file_path):
time.sleep(1)
if os.path.isfile(file_path):
Traj = np.load(file_path)
os.remove(file_path)
else:
raise ValueError("%s isn't a file!" % file_path)
###
print("Trajectory obtained from OpenRave!")
input("press enter")
planning(env_core.robot)
# print(robot.get_q_dq(robot.arm_dofs))
# print(robot.tar_arm_q)
# print(robot.tar_fin_q)
# input("press enter")
# env_core.robot.reset_with_certain_arm_q([-7.60999597e-01, 3.05809706e-02, -5.82112526e-01,
# -1.40855264e+00, -6.49374902e-01, -2.42410664e-01,
# 0.00000000e+00])
# env_core.robot.tar_arm_q = [-7.60999597e-01, 3.05809706e-02, -5.82112526e-01,
# -1.40855264e+00, -6.49374902e-01, -2.42410664e-01,
# 0.00000000e+00]
# p.stepSimulation()
# print("tar arm q after reset", robot.tar_arm_q)
# time.sleep(3)
obs = torch.Tensor([env_core.getExtendedObservation()])
if is_cuda:
obs = obs.cuda()
# print("tar arm q after getting obs using env_core", env_core.robot.tar_arm_q)
# print("tar arm q after getting obs", robot.tar_arm_q)
#print("init obs", obs)
# input("press enter")
# # print(obs)
#
# print("diff", obs - obs_old)
for i in range(200):
with torch.no_grad():
value, action, _, recurrent_hidden_states = actor_critic.act(
obs, recurrent_hidden_states, masks, deterministic=args.det)
# print(action)
# if i > 100:
# action[0, 1] = 0.5
obs, reward, done, _ = env.step(action)
control_steps += 1
# if control_steps >= 100: # done grasping
# for _ in range(1000):
# p.stepSimulation()
# time.sleep(ts)
masks.fill_(0.0 if done else 1.0)
Qmove_init = np.concatenate((env_core.robot.get_q_dq(env_core.robot.arm_dofs)[0],env_core.robot.get_q_dq(env_core.robot.arm_dofs)[1]))
file_path = homedir+'/container_data/PB_MOVE.npz'
np.savez(file_path,OBJECTS,Qmove_init,Qdestin)
file_path = homedir+'/container_data/OR_MOVE.npy'
while not os.path.exists(file_path):
time.sleep(1)
if os.path.isfile(file_path):
Traj = np.load(file_path)
os.remove(file_path)
else:
raise ValueError("%s isn't a file!" % file_path)
print("Trajectory obtained from OpenRave!")
input("press enter")
planning(env_core.robot)
#print(env_core.getExtendedObservation())
bp()
#input("press enter") | [
"numpy.load",
"os.remove",
"pybullet.resetSimulation",
"argparse.ArgumentParser",
"os.path.isfile",
"pybullet.connect",
"torch.no_grad",
"os.path.join",
"sys.path.append",
"pybullet.getContactPoints",
"pybullet.setGravity",
"torch.load",
"os.path.exists",
"pybullet.setTimeStep",
"torch.z... | [((460, 483), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (478, 483), False, 'import os\n'), ((2317, 2349), 'sys.path.append', 'sys.path.append', (['"""a2c_ppo_acktr"""'], {}), "('a2c_ppo_acktr')\n", (2332, 2349), False, 'import sys\n'), ((2359, 2400), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""RL"""'}), "(description='RL')\n", (2382, 2400), False, 'import argparse\n'), ((3367, 3383), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (3376, 3383), True, 'import pybullet as p\n'), ((3384, 3403), 'pybullet.resetSimulation', 'p.resetSimulation', ([], {}), '()\n', (3401, 3403), True, 'import pybullet as p\n'), ((3404, 3421), 'pybullet.setTimeStep', 'p.setTimeStep', (['ts'], {}), '(ts)\n', (3417, 3421), True, 'import pybullet as p\n'), ((3422, 3445), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-10)'], {}), '(0, 0, -10)\n', (3434, 3445), True, 'import pybullet as p\n'), ((3515, 3621), 'a2c_ppo_acktr.envs.make_vec_envs', 'make_vec_envs', (['args.env_name', '(args.seed + 1000)', '(1)', 'None', 'None'], {'device': 'device', 'allow_early_resets': '(False)'}), '(args.env_name, args.seed + 1000, 1, None, None, device=device,\n allow_early_resets=False)\n', (3528, 3621), False, 'from a2c_ppo_acktr.envs import VecPyTorch, make_vec_envs\n'), ((3962, 3997), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(0.25)'}), '(low=0, high=0.25)\n', (3979, 3997), True, 'import numpy as np\n'), ((4028, 4065), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-0.1)', 'high': '(0.5)'}), '(low=-0.1, high=0.5)\n', (4045, 4065), True, 'import numpy as np\n'), ((4078, 4113), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(0.25)'}), '(low=0, high=0.25)\n', (4095, 4113), True, 'import numpy as np\n'), ((4166, 4203), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-0.1)', 'high': '(0.5)'}), '(low=-0.1, high=0.5)\n', (4183, 4203), True, 'import numpy as np\n'), ((4382, 4434), 'numpy.array', 'np.array', (['[[est_tx, est_ty, 0, 0], [0.8, 0.8, 0, 0]]'], {}), '([[est_tx, est_ty, 0, 0], [0.8, 0.8, 0, 0]])\n', (4390, 4434), True, 'import numpy as np\n'), ((4975, 5022), 'pybullet.changeDynamics', 'p.changeDynamics', (['oid1', '(-1)'], {'lateralFriction': '(1.0)'}), '(oid1, -1, lateralFriction=1.0)\n', (4991, 5022), True, 'import pybullet as p\n'), ((5023, 5074), 'pybullet.changeDynamics', 'p.changeDynamics', (['table_id', '(-1)'], {'lateralFriction': '(1.0)'}), '(table_id, -1, lateralFriction=1.0)\n', (5039, 5074), True, 'import pybullet as p\n'), ((5264, 5288), 'my_pybullet_envs.inmoov_shadow_grasp_env_v2.ImaginaryArmObjSession', 'ImaginaryArmObjSession', ([], {}), '()\n', (5286, 5288), False, 'from my_pybullet_envs.inmoov_shadow_grasp_env_v2 import ImaginaryArmObjSession\n'), ((5605, 5641), 'numpy.savez', 'np.savez', (['file_path', 'OBJECTS', 'Qreach'], {}), '(file_path, OBJECTS, Qreach)\n', (5613, 5641), True, 'import numpy as np\n'), ((6070, 6092), 'a2c_ppo_acktr.utils.get_vec_normalize', 'get_vec_normalize', (['env'], {}), '(env)\n', (6087, 6092), False, 'from a2c_ppo_acktr.utils import get_render_func, get_vec_normalize\n'), ((6194, 6250), 'torch.zeros', 'torch.zeros', (['(1)', 'actor_critic.recurrent_hidden_state_size'], {}), '(1, actor_critic.recurrent_hidden_state_size)\n', (6205, 6250), False, 'import torch\n'), ((6297, 6314), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (6308, 6314), False, 'import torch\n'), ((6870, 6895), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (6884, 6895), False, 'import os\n'), ((8681, 8730), 'numpy.savez', 'np.savez', (['file_path', 'OBJECTS', 'Qmove_init', 'Qdestin'], {}), '(file_path, OBJECTS, Qmove_init, Qdestin)\n', (8689, 8730), True, 'import numpy as np\n'), ((8837, 8862), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (8851, 8862), False, 'import os\n'), ((9111, 9115), 'pdb.set_trace', 'bp', ([], {}), '()\n', (9113, 9115), True, 'from pdb import set_trace as bp\n'), ((2096, 2134), 'pybullet.getContactPoints', 'p.getContactPoints', ([], {'bodyA': 'robot.arm_id'}), '(bodyA=robot.arm_id)\n', (2114, 2134), True, 'import pybullet as p\n'), ((3767, 3832), 'os.path.join', 'os.path.join', (['currentdir', '"""my_pybullet_envs/assets/tabletop.urdf"""'], {}), "(currentdir, 'my_pybullet_envs/assets/tabletop.urdf')\n", (3779, 3832), False, 'import os\n'), ((4278, 4317), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-0.01)', 'high': '(0.01)'}), '(low=-0.01, high=0.01)\n', (4295, 4317), True, 'import numpy as np\n'), ((4332, 4371), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-0.01)', 'high': '(0.01)'}), '(low=-0.01, high=0.01)\n', (4349, 4371), True, 'import numpy as np\n'), ((4583, 4643), 'os.path.join', 'os.path.join', (['currentdir', '"""my_pybullet_envs/assets/box.urdf"""'], {}), "(currentdir, 'my_pybullet_envs/assets/box.urdf')\n", (4595, 4643), False, 'import os\n'), ((5881, 5930), 'os.path.join', 'os.path.join', (['args.load_dir', "(ori_env_name + '.pt')"], {}), "(args.load_dir, ori_env_name + '.pt')\n", (5893, 5930), False, 'import os\n'), ((5971, 5987), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (5981, 5987), False, 'import torch\n'), ((6021, 6057), 'torch.load', 'torch.load', (['path'], {'map_location': '"""cpu"""'}), "(path, map_location='cpu')\n", (6031, 6057), False, 'import torch\n'), ((6822, 6847), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (6836, 6847), False, 'import os\n'), ((6853, 6866), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6863, 6866), False, 'import time\n'), ((6908, 6926), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (6915, 6926), True, 'import numpy as np\n'), ((6931, 6951), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (6940, 6951), False, 'import os\n'), ((8789, 8814), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (8803, 8814), False, 'import os\n'), ((8820, 8833), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8830, 8833), False, 'import time\n'), ((8875, 8893), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (8882, 8893), True, 'import numpy as np\n'), ((8898, 8918), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (8907, 8918), False, 'import os\n'), ((2002, 2020), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (2018, 2020), True, 'import pybullet as p\n'), ((2070, 2084), 'time.sleep', 'time.sleep', (['ts'], {}), '(ts)\n', (2080, 2084), False, 'import time\n'), ((2227, 2245), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (2243, 2245), True, 'import pybullet as p\n'), ((8049, 8064), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8062, 8064), False, 'import torch\n'), ((424, 446), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (444, 446), False, 'import inspect\n')] |
import numpy as np
import gym
import time
from gym import error, spaces
from gym import core, spaces
from gym.envs.registration import register
import random
# from attn_toy.env.rendering import *
from copy import copy
class Fourrooms(object):
# metadata = {'render.modes':['human']}
# state : number of state, counted from row and col
# cell : (i,j)
# observation : resultList[state]
# small : 104 large 461
def __init__(self, max_epilen=100):
self.init_basic(max_epilen)
self.viewer = Viewer(self.block_size * len(self.occupancy), self.block_size * len(self.occupancy[0]))
self.blocks = self.make_blocks()
def init_basic(self, max_epilen):
self.layout = """\
1111111111111
1 1 1
1 1 1
1 1
1 1 1
1 1 1
11 1111 1
1 111 111
1 1 1
1 1 1
1 1
1 1 1
1111111111111
"""
self.block_size = 8
self.occupancy = np.array(
[list(map(lambda c: 1 if c == '1' else 0, line)) for line in self.layout.splitlines()])
self.num_pos = int(np.sum(self.occupancy == 0))
self.obs_height = self.block_size * len(self.occupancy)
self.obs_width = self.block_size * len(self.occupancy[0])
# From any state the agent can perform one of four actions, up, down, left or right
self.action_space = spaces.Discrete(4)
# self.observation_space = spaces.Discrete(np.sum(self.occupancy == 0))
self.observation_space = spaces.Discrete(self.num_pos)
self.directions = [np.array((-1, 0)), np.array((1, 0)), np.array((0, -1)), np.array((0, 1))]
# self.rng = np.random.RandomState(1234)
self.rand_color = np.random.randint(0, 255, (200, 3))
self.tostate = {}
self.semantics = dict()
statenum = 0
# print("Here", len(self.occupancy), len(self.occupancy[0]))
for i in range(len(self.occupancy)):
for j in range(len(self.occupancy[0])):
if self.occupancy[i, j] == 0:
self.tostate[(i, j)] = statenum
statenum += 1
self.tocell = {v: k for k, v in self.tostate.items()}
self.goal = 62
self.init_states = list(range(self.observation_space.n))
self.init_states.remove(self.goal)
# random encode
self.mapping = np.arange(self.num_pos)
self.dict = np.zeros((self.observation_space.n, 3))
self.Row = np.shape(self.occupancy)[0]
self.Col = np.shape(self.occupancy)[1]
self.current_steps = 0
self.max_epilen = max_epilen
self.get_dict()
self.currentcell = (-1, -1)
self.reward_range = (0, 1)
self.metadata = None
self.done = False
self.allow_early_resets = True
self.unwrapped = self
self.state = -1
def make_blocks(self):
blocks = []
size = self.block_size
for i, row in enumerate(self.occupancy):
for j, o in enumerate(row):
if o == 1:
v = [[i * size, j * size], [i * size, (j + 1) * size], [(i + 1) * size, (j + 1) * size],
[(i + 1) * size, (j) * size]]
geom = make_polygon(v, filled=True)
geom.set_color(0, 0, 0)
blocks.append(geom)
self.viewer.add_geom(geom)
return blocks
def check_obs(self, obs, info="None"):
# print([ob for ob in obs if ob not in self.mapping])
assert all([int(ob) in self.mapping for ob in obs]), "what happened? " + info
def empty_around(self, cell):
avail = []
for action in range(self.action_space.n):
nextcell = tuple(cell + self.directions[action])
if not self.occupancy[nextcell]:
avail.append(nextcell)
return avail
def reset(self, state=-1):
# state = self.rng.choice(self.init_states)
# self.viewer.close()
if state < 0:
state = np.random.choice(self.init_states)
self.currentcell = self.tocell[state]
self.done = False
self.current_steps = 0
self.state = state
return np.array(self.mapping[state])
def step(self, action):
"""
The agent can perform one of four actions,
up, down, left or right, which have a stochastic effect. With probability 2/3, the actions
cause the agent to move one cell in the corresponding direction, and with probability 1/3,
the agent moves instead in one of the other three directions, each with 1/9 probability. In
either case, if the movement would take the agent into a wall then the agent remains in the
same cell.
We consider a case in which rewards are zero on all state transitions.
"""
# print(self.currentcell, self.directions, action)
try:
nextcell = tuple(self.currentcell + self.directions[action])
except TypeError:
nextcell = tuple(self.currentcell + self.directions[action[0]])
if not self.occupancy[nextcell]:
self.currentcell = nextcell
if np.random.uniform() < 0.:
# if self.rng.uniform() < 1/3.:
empty_cells = self.empty_around(self.currentcell)
# self.currentcell = empty_cells[self.rng.randint(len(empty_cells))]
self.currentcell = empty_cells[np.random.randint(len(empty_cells))]
state = self.tostate[self.currentcell]
self.current_steps += 1
self.done = state == self.goal or self.current_steps >= self.max_epilen
# if self.current_steps >= self.max_epilen:
# self.done = True
info = {}
if self.done:
# print(self.current_step, state == self.goal, self.max_epilen)
info = {'episode': {'r': 100 - self.current_steps if state == self.goal else -self.current_steps,
'l': self.current_steps}}
# print(self.currentcell)
self.state = state
if state == self.goal:
reward = 100
else:
reward = -1
return np.array(self.mapping[state]), reward, self.done, info
def get_dict(self):
count = 0
for i in range(self.Row):
for j in range(self.Col):
if self.occupancy[i, j] == 0:
# code
self.dict[count, 0] = self.mapping[count]
# i,j
self.dict[count, 1] = i
self.dict[count, 2] = j
self.semantics[self.mapping[count]] = str(i) + '_' + str(j)
count += 1
# print(self.semantics)
return self.semantics
def add_block(self, x, y, color):
size = self.block_size
v = [[x * size, y * size], [x * size, (y + 1) * size], [(x + 1) * size, (y + 1) * size],
[(x + 1) * size, y * size]]
geom = make_polygon(v, filled=True)
r, g, b = color
geom.set_color(r, g, b)
self.viewer.add_onetime(geom)
def render(self, mode=0):
if self.currentcell[0] > 0:
x, y = self.currentcell
# state = self.tostate[self.currentcell]
# self.add_block(x, y, tuple(self.rand_color[state]/255))
self.add_block(x, y, (0, 0, 1))
x, y = self.tocell[self.goal]
self.add_block(x, y, (1, 0, 0))
# self.viewer.
arr = self.viewer.render(return_rgb_array=True)
return arr
def seed(self, seed):
pass
def close(self):
pass
def all_states(self):
return self.mapping
class FourroomsNorender(Fourrooms):
def __init__(self, max_epilen=100):
self.init_basic(max_epilen)
self.blocks = self.make_blocks()
self.background = self.render_with_blocks(np.zeros((self.obs_height, self.obs_width, 3), dtype=np.uint8),
self.blocks)
def render(self, mode=0):
blocks = []
if self.currentcell[0] > 0:
x, y = self.currentcell
# state = self.tostate[self.currentcell]
# self.add_block(x, y, tuple(self.rand_color[state]/255))
blocks.append(self.make_block(x, y, (0, 0, 1)))
x, y = self.tocell[self.goal]
blocks.append(self.make_block(x, y, (1, 0, 0)))
# self.add_block(x, y, (1, 0, 0))
# self.viewer.
arr = self.render_with_blocks(self.background, blocks)
return arr
def render_with_blocks(self, background, blocks):
background = np.copy(np.array(background))
assert background.shape[-1] == len(background.shape) == 3
for block in blocks:
v, color = block
background[v[0, 0]:v[2, 0], v[0, 1]:v[2, 1], :] = np.array(color)
return background
def make_blocks(self):
blocks = []
size = self.block_size
for i, row in enumerate(self.occupancy):
for j, o in enumerate(row):
if o == 1:
v = [[i * size, j * size], [i * size, (j + 1) * size], [(i + 1) * size, (j + 1) * size],
[(i + 1) * size, (j) * size]]
color = (0, 0, 0)
geom = (v, color)
blocks.append(geom)
return blocks
def make_block(self, x, y, color):
size = self.block_size
v = [[x * size, y * size], [x * size, (y + 1) * size], [(x + 1) * size, (y + 1) * size],
[(x + 1) * size, y * size]]
geom = (v, color)
return geom
# register(
# id='Fourrooms-v0',
# entry_point='fourrooms:Fourrooms',
# timestep_limit=20000,
# reward_threshold=1,
# )
| [
"numpy.random.uniform",
"numpy.sum",
"numpy.zeros",
"gym.spaces.Discrete",
"numpy.shape",
"numpy.random.randint",
"numpy.arange",
"numpy.array",
"numpy.random.choice"
] | [((1493, 1511), 'gym.spaces.Discrete', 'spaces.Discrete', (['(4)'], {}), '(4)\n', (1508, 1511), False, 'from gym import core, spaces\n'), ((1625, 1654), 'gym.spaces.Discrete', 'spaces.Discrete', (['self.num_pos'], {}), '(self.num_pos)\n', (1640, 1654), False, 'from gym import core, spaces\n'), ((1833, 1868), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(200, 3)'], {}), '(0, 255, (200, 3))\n', (1850, 1868), True, 'import numpy as np\n'), ((2488, 2511), 'numpy.arange', 'np.arange', (['self.num_pos'], {}), '(self.num_pos)\n', (2497, 2511), True, 'import numpy as np\n'), ((2532, 2571), 'numpy.zeros', 'np.zeros', (['(self.observation_space.n, 3)'], {}), '((self.observation_space.n, 3))\n', (2540, 2571), True, 'import numpy as np\n'), ((4343, 4372), 'numpy.array', 'np.array', (['self.mapping[state]'], {}), '(self.mapping[state])\n', (4351, 4372), True, 'import numpy as np\n'), ((1214, 1241), 'numpy.sum', 'np.sum', (['(self.occupancy == 0)'], {}), '(self.occupancy == 0)\n', (1220, 1241), True, 'import numpy as np\n'), ((1683, 1700), 'numpy.array', 'np.array', (['(-1, 0)'], {}), '((-1, 0))\n', (1691, 1700), True, 'import numpy as np\n'), ((1702, 1718), 'numpy.array', 'np.array', (['(1, 0)'], {}), '((1, 0))\n', (1710, 1718), True, 'import numpy as np\n'), ((1720, 1737), 'numpy.array', 'np.array', (['(0, -1)'], {}), '((0, -1))\n', (1728, 1737), True, 'import numpy as np\n'), ((1739, 1755), 'numpy.array', 'np.array', (['(0, 1)'], {}), '((0, 1))\n', (1747, 1755), True, 'import numpy as np\n'), ((2591, 2615), 'numpy.shape', 'np.shape', (['self.occupancy'], {}), '(self.occupancy)\n', (2599, 2615), True, 'import numpy as np\n'), ((2638, 2662), 'numpy.shape', 'np.shape', (['self.occupancy'], {}), '(self.occupancy)\n', (2646, 2662), True, 'import numpy as np\n'), ((4163, 4197), 'numpy.random.choice', 'np.random.choice', (['self.init_states'], {}), '(self.init_states)\n', (4179, 4197), True, 'import numpy as np\n'), ((6327, 6356), 'numpy.array', 'np.array', (['self.mapping[state]'], {}), '(self.mapping[state])\n', (6335, 6356), True, 'import numpy as np\n'), ((8051, 8113), 'numpy.zeros', 'np.zeros', (['(self.obs_height, self.obs_width, 3)'], {'dtype': 'np.uint8'}), '((self.obs_height, self.obs_width, 3), dtype=np.uint8)\n', (8059, 8113), True, 'import numpy as np\n'), ((8811, 8831), 'numpy.array', 'np.array', (['background'], {}), '(background)\n', (8819, 8831), True, 'import numpy as np\n'), ((9019, 9034), 'numpy.array', 'np.array', (['color'], {}), '(color)\n', (9027, 9034), True, 'import numpy as np\n'), ((5319, 5338), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5336, 5338), True, 'import numpy as np\n')] |
'''
Usage:
tflite_test.py --model="mymodel.tflite"
Note:
may require tensorflow > 1.11 or
pip install tf-nightly
'''
import os
from docopt import docopt
import tensorflow as tf
import numpy as np
from irmark1.utils import FPSTimer
args = docopt(__doc__)
in_model = os.path.expanduser(args['--model'])
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path=in_model)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test model on random input data.
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
#sample output
for tensor in output_details:
output_data = interpreter.get_tensor(tensor['index'])
print(output_data)
#run in a loop to test performance.
print("test performance: hit CTRL+C to break")
timer = FPSTimer()
while True:
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
timer.on_frame()
| [
"numpy.random.random_sample",
"docopt.docopt",
"tensorflow.lite.Interpreter",
"os.path.expanduser",
"irmark1.utils.FPSTimer"
] | [((255, 270), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (261, 270), False, 'from docopt import docopt\n'), ((283, 318), 'os.path.expanduser', 'os.path.expanduser', (["args['--model']"], {}), "(args['--model'])\n", (301, 318), False, 'import os\n'), ((376, 416), 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_path': 'in_model'}), '(model_path=in_model)\n', (395, 416), True, 'import tensorflow as tf\n'), ((1036, 1046), 'irmark1.utils.FPSTimer', 'FPSTimer', ([], {}), '()\n', (1044, 1046), False, 'from irmark1.utils import FPSTimer\n'), ((677, 713), 'numpy.random.random_sample', 'np.random.random_sample', (['input_shape'], {}), '(input_shape)\n', (700, 713), True, 'import numpy as np\n')] |
import torch
import numpy as np
import torch.nn.functional as F
import pdb
from analysis import entropy
from utils.utils import to_sqnp
from utils.constants import TZ_COND_DICT, P_TZ_CONDS
from models import get_reward, compute_returns, compute_a2c_loss
# from task.utils import scramble_array, scramble_array_list
def run_aba(
agent, optimizer, task, p, n_examples, supervised,
fix_cond=None, fix_penalty=None,
slience_recall_time=None, scramble=False,
learning=True, get_cache=True, get_data=False,
):
# sample data
X, Y = task.sample(n_examples, interleave=True, to_torch=True)
n_examples = n_examples // 2
# logger
log_return, log_pi_ent = 0, 0
log_loss_sup, log_loss_actor, log_loss_critic = 0, 0, 0
log_cond = np.zeros(n_examples,)
log_dist_a = [[] for _ in range(n_examples)]
log_targ_a = [[] for _ in range(n_examples)]
log_cache = [None] * n_examples
for i in range(n_examples):
# pick a condition
cond_i = pick_condition(p, rm_only=supervised, fix_cond=fix_cond)
# get the example for this trial
X_i, Y_i = X[i], Y[i]
T_total = np.shape(X_i)[0]
event_ends = np.array(
[t for t in range(T_total + 1) if t % p.env.n_param == 0][1:]
) - 1
# prealloc
loss_sup = 0
probs, rewards, values, ents = [], [], [], []
log_cache_i = [None] * T_total
# init model wm and em
penalty_val, penalty_rep = sample_penalty(p, fix_penalty)
hc_t = agent.get_init_states()
agent.flush_episodic_memory()
agent.retrieval_on()
agent.encoding_off()
for t in range(T_total):
if t in event_ends and cond_i != 'NM':
agent.encoding_on()
else:
agent.encoding_off()
# forward
x_it = append_prev_info(X_i[t], [penalty_rep])
pi_a_t, v_t, hc_t, cache_t = agent.forward(
x_it.view(1, 1, -1), hc_t)
# after delay period, compute loss
a_t, p_a_t = agent.pick_action(pi_a_t)
# get reward
r_t = get_reward(a_t, Y_i[t], penalty_val)
# cache the results for later RL loss computation
rewards.append(r_t)
values.append(v_t)
probs.append(p_a_t)
ents.append(entropy(pi_a_t))
# compute supervised loss
yhat_t = torch.squeeze(pi_a_t)[:-1]
loss_sup += F.mse_loss(yhat_t, Y_i[t])
# flush at event boundary
if t in event_ends and cond_i != 'RM':
hc_t = agent.get_init_states()
# cache results for later analysis
if get_cache:
log_cache_i[t] = cache_t
# for behavioral stuff, only record prediction time steps
log_dist_a[i].append(to_sqnp(pi_a_t))
log_targ_a[i].append(to_sqnp(Y_i[t]))
# compute RL loss
returns = compute_returns(rewards, normalize=p.env.normalize_return)
loss_actor, loss_critic = compute_a2c_loss(probs, values, returns)
# print('loss')
# print(loss_actor)
# print(loss_critic)
pi_ent = torch.stack(ents).sum()
if learning:
loss = loss_actor + loss_critic
# loss = loss_actor + loss_critic - pi_ent * p.net.eta
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(agent.parameters(), .25)
optimizer.step()
# after every event sequence, log stuff
log_loss_sup += loss_sup / n_examples
log_pi_ent += pi_ent.item() / n_examples
log_return += torch.stack(rewards).sum().item() / n_examples
log_loss_actor += loss_actor.item() / n_examples
log_loss_critic += loss_critic.item() / n_examples
log_cond[i] = TZ_COND_DICT.inverse[cond_i]
if get_cache:
log_cache[i] = log_cache_i
# return cache
log_dist_a = np.array(log_dist_a)
log_targ_a = np.array(log_targ_a)
results = [log_dist_a, log_targ_a, log_cache, log_cond]
metrics = [log_loss_sup, log_loss_actor, log_loss_critic,
log_return, log_pi_ent]
out = [results, metrics]
if get_data:
X_array_list = [to_sqnp(X[i]) for i in range(n_examples)]
Y_array_list = [to_sqnp(Y[i]) for i in range(n_examples)]
training_data = [X_array_list, Y_array_list]
out.append(training_data)
return out
def append_prev_info(x_it_, scalar_list):
for s in scalar_list:
x_it_ = torch.cat(
[x_it_, s.type(torch.FloatTensor).view(tensor_length(s))]
)
return x_it_
def tensor_length(tensor):
if tensor.dim() == 0:
length = 1
elif tensor.dim() > 1:
raise ValueError('length for high dim tensor is undefined')
else:
length = len(tensor)
return length
def pick_condition(p, rm_only=True, fix_cond=None):
all_tz_conditions = list(TZ_COND_DICT.values())
if fix_cond is not None:
return fix_cond
else:
if rm_only:
tz_cond = 'RM'
else:
tz_cond = np.random.choice(all_tz_conditions, p=P_TZ_CONDS)
return tz_cond
def sample_penalty(p, fix_penalty):
# if penalty level is fixed, usually used during test
if fix_penalty is not None:
penalty_val = fix_penalty
else:
# otherwise sample a penalty level
if p.env.penalty_random:
if p.env.penalty_discrete:
penalty_val = np.random.choice(p.env.penalty_range)
else:
penalty_val = np.random.uniform(0, p.env.penalty)
else:
# or train with a fixed penalty level
penalty_val = p.env.penalty
# form the input representation of the current penalty signal
if p.env.penalty_onehot:
penalty_rep = one_hot_penalty(penalty_val, p)
else:
penalty_rep = penalty_val
return torch.tensor(penalty_val), torch.tensor(penalty_rep)
def one_hot_penalty(penalty_int, p):
assert penalty_int in p.env.penalty_range, \
print(f'invalid penalty_int = {penalty_int}')
one_hot_dim = len(p.env.penalty_range)
penalty_id = p.env.penalty_range.index(penalty_int)
return np.eye(one_hot_dim)[penalty_id, :]
| [
"numpy.random.uniform",
"utils.constants.TZ_COND_DICT.values",
"utils.utils.to_sqnp",
"torch.stack",
"models.compute_a2c_loss",
"models.get_reward",
"torch.nn.functional.mse_loss",
"numpy.zeros",
"numpy.shape",
"torch.squeeze",
"models.compute_returns",
"numpy.array",
"numpy.random.choice",
... | [((779, 799), 'numpy.zeros', 'np.zeros', (['n_examples'], {}), '(n_examples)\n', (787, 799), True, 'import numpy as np\n'), ((4026, 4046), 'numpy.array', 'np.array', (['log_dist_a'], {}), '(log_dist_a)\n', (4034, 4046), True, 'import numpy as np\n'), ((4064, 4084), 'numpy.array', 'np.array', (['log_targ_a'], {}), '(log_targ_a)\n', (4072, 4084), True, 'import numpy as np\n'), ((3000, 3058), 'models.compute_returns', 'compute_returns', (['rewards'], {'normalize': 'p.env.normalize_return'}), '(rewards, normalize=p.env.normalize_return)\n', (3015, 3058), False, 'from models import get_reward, compute_returns, compute_a2c_loss\n'), ((3093, 3133), 'models.compute_a2c_loss', 'compute_a2c_loss', (['probs', 'values', 'returns'], {}), '(probs, values, returns)\n', (3109, 3133), False, 'from models import get_reward, compute_returns, compute_a2c_loss\n'), ((5029, 5050), 'utils.constants.TZ_COND_DICT.values', 'TZ_COND_DICT.values', ([], {}), '()\n', (5048, 5050), False, 'from utils.constants import TZ_COND_DICT, P_TZ_CONDS\n'), ((6018, 6043), 'torch.tensor', 'torch.tensor', (['penalty_val'], {}), '(penalty_val)\n', (6030, 6043), False, 'import torch\n'), ((6045, 6070), 'torch.tensor', 'torch.tensor', (['penalty_rep'], {}), '(penalty_rep)\n', (6057, 6070), False, 'import torch\n'), ((6323, 6342), 'numpy.eye', 'np.eye', (['one_hot_dim'], {}), '(one_hot_dim)\n', (6329, 6342), True, 'import numpy as np\n'), ((1158, 1171), 'numpy.shape', 'np.shape', (['X_i'], {}), '(X_i)\n', (1166, 1171), True, 'import numpy as np\n'), ((2160, 2196), 'models.get_reward', 'get_reward', (['a_t', 'Y_i[t]', 'penalty_val'], {}), '(a_t, Y_i[t], penalty_val)\n', (2170, 2196), False, 'from models import get_reward, compute_returns, compute_a2c_loss\n'), ((2506, 2532), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['yhat_t', 'Y_i[t]'], {}), '(yhat_t, Y_i[t])\n', (2516, 2532), True, 'import torch.nn.functional as F\n'), ((4316, 4329), 'utils.utils.to_sqnp', 'to_sqnp', (['X[i]'], {}), '(X[i])\n', (4323, 4329), False, 'from utils.utils import to_sqnp\n'), ((4382, 4395), 'utils.utils.to_sqnp', 'to_sqnp', (['Y[i]'], {}), '(Y[i])\n', (4389, 4395), False, 'from utils.utils import to_sqnp\n'), ((5198, 5247), 'numpy.random.choice', 'np.random.choice', (['all_tz_conditions'], {'p': 'P_TZ_CONDS'}), '(all_tz_conditions, p=P_TZ_CONDS)\n', (5214, 5247), True, 'import numpy as np\n'), ((2379, 2394), 'analysis.entropy', 'entropy', (['pi_a_t'], {}), '(pi_a_t)\n', (2386, 2394), False, 'from analysis import entropy\n'), ((2455, 2476), 'torch.squeeze', 'torch.squeeze', (['pi_a_t'], {}), '(pi_a_t)\n', (2468, 2476), False, 'import torch\n'), ((2888, 2903), 'utils.utils.to_sqnp', 'to_sqnp', (['pi_a_t'], {}), '(pi_a_t)\n', (2895, 2903), False, 'from utils.utils import to_sqnp\n'), ((2938, 2953), 'utils.utils.to_sqnp', 'to_sqnp', (['Y_i[t]'], {}), '(Y_i[t])\n', (2945, 2953), False, 'from utils.utils import to_sqnp\n'), ((3232, 3249), 'torch.stack', 'torch.stack', (['ents'], {}), '(ents)\n', (3243, 3249), False, 'import torch\n'), ((5588, 5625), 'numpy.random.choice', 'np.random.choice', (['p.env.penalty_range'], {}), '(p.env.penalty_range)\n', (5604, 5625), True, 'import numpy as np\n'), ((5674, 5709), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'p.env.penalty'], {}), '(0, p.env.penalty)\n', (5691, 5709), True, 'import numpy as np\n'), ((3714, 3734), 'torch.stack', 'torch.stack', (['rewards'], {}), '(rewards)\n', (3725, 3734), False, 'import torch\n')] |
# ,---------------------------------------------------------------------------,
# | This module is part of the krangpower electrical distribution simulation |
# | suit by <NAME> <<EMAIL>> et al. |
# | Please refer to the license file published together with this code. |
# | All rights not explicitly granted by the license are reserved. |
# '---------------------------------------------------------------------------'
# OpendssdirectEnhancer by <NAME>
# a wrapper for opendssdirect.py by <NAME> and <NAME>
import logging
import types
from copy import deepcopy as _deepcopy
from functools import reduce as _reduce, partial as _partial
from inspect import getmembers as _inspect_getmembers
from json import load as _json_load
from logging import DEBUG as _DBG_LVL
from math import sqrt as _sqrt
from operator import getitem as _getitem, attrgetter as _attrgetter
from re import compile as _re_compile
from re import sub as _sub
from sys import modules as _sys_modules
import numpy as _np
import opendssdirect as _odr
from pandas import DataFrame as _DataFrame
from ._stdout_hijack import stdout_redirected, NullCm
from .._aux_fcn import lower as _lower
from .._aux_fcn import pairwise as _pairwise
from .._components import LineGeometry
from .._components import _resolve_unit, _type_recovery, _odssrep, matricize_str
from .._components import get_classmap as _get_classmap
from .._config_loader import DEFAULT_ENH_NAME, UNIT_MEASUREMENT_PATH, TREATMENTS_PATH, ERROR_STRINGS, DANGEROUS_STACKS, \
UM as _UM, INTERFACE_METHODS_PATH, DEFAULT_COMP as _DEFAULT_COMP, PINT_QTY_TYPE, INTERF_SELECTORS_PATH, \
C_FORCE_UNSAFE_CALLS as _FORCE_UNSAFE_CALLS
from .._exceptions import UnsolvedCircuitError
from .._logging_init import clog, mlog, bclog, get_log_level
try:
assert _odr.Basic.Start(0)
except TypeError:
# retrocompatibility with OpenDSSDirect.py <0.3
assert _odr.Basic.Start()
# <editor-fold desc="Auxiliary functions">
def _assign_unit(item, unit: type(_UM.m) or None):
if unit is None:
return item
elif isinstance(item, dict):
return {k: v * unit for k, v in item.items()}
elif isinstance(item, _DataFrame):
# pandas' _DataFrame is a mess together with pint
return item
# elif hasattr(item, '__iter__'):
# # return [el * unit for el in item]
# return _asarray(item) * unit
else:
return item * unit
def _asarray(item):
return _np.asarray(item)
def _couplearray(item):
return _np.array(item[0::2]) + _np.array(item[1::2]) * 1j
def _terminalize_cpx(item):
# note that, when I pass an item to terminalize, I am taking for granted that I can find nterm and ncond in the
# respective calls to odr. If you called odr.CktElement.Powers(), for example, I take it that you knew what
# you were doing. Calls coming from PackedElements, instead, should perform the cktelement selection just before
# the call.
nterm = _odr.CktElement.NumTerminals()
ncond = _odr.CktElement.NumConductors()
assert len(item) == nterm * ncond * 2
cpxr = _np.zeros([nterm, ncond], dtype=_np.complex)
for idx, couple in enumerate(_pairwise(item)):
real = couple[0]
imag = couple[1]
cpxr[int(idx / ncond), (idx % ncond)] = _np.sum([_np.multiply(1j, imag), real], axis=0)
return cpxr
def _terminalize_int(item):
nterm = _odr.CktElement.NumTerminals()
ncond = _odr.CktElement.NumConductors()
assert len(item) == nterm * ncond * 1
int_r = _np.zeros([nterm, ncond], dtype=_np.int)
for idx, element in enumerate(item):
int_r[int(idx / ncond), (idx % ncond)] = element
return int_r
def _cpx(item):
return item[0] + 1j*item[1]
def _dictionize_cktbus(item):
return dict(zip(_lower(_odr.Circuit.AllBusNames()), item))
def _dictionize_cktels(item):
return dict(zip(_lower(_odr.Circuit.AllElementNames()), item))
def _dictionize_cktnodes(item):
return dict(zip(_lower(_odr.Circuit.YNodeOrder()), item))
def _matricize_ybus(item):
raw_n_ord = _lower(_odr.Circuit.YNodeOrder())
mtx = _matricize(item)
return _DataFrame(data=mtx, index=raw_n_ord, columns=raw_n_ord)
def _matricize(item):
if len(item) == 1:
return item
side = _sqrt(len(item)/2)
assert side == int(side)
side = int(side)
mtx = _np.reshape(item[0::2], (side, side)) + \
_np.reshape(item[1::2], (side, side)) * 1j
return _np.transpose(mtx) # it's transposed because it's originally given in column order
# there's no XYCurves.AllNames() or similar, so we have to mock up one ourselves
def _xycurve_names():
if _odr.XYCurves.Count() == 0:
return []
xynames = []
i = 1
_odr.XYCurves.First()
while i != 0:
xynames.append(_odr.XYCurves.Name())
i = _odr.XYCurves.Next()
return xynames
# </editor-fold>
# <editor-fold desc="Loads and declarations">
_this_module = _sys_modules[__name__]
_classmap = _get_classmap()
_command_manager = stdout_redirected
_DISKFULL_RE = _re_compile('Disk Full')
_ID = 'OpendssdirectEnhancer'
setattr(_this_module, 'utils', _odr.utils)
# loads chains of functions through which to pass the rough outputs of opendss.
with open(TREATMENTS_PATH, 'r') as _tfile:
_rtrt = _json_load(_tfile)
_trt = dict()
for _subdic_name, _subdic in _rtrt.items():
_nsd = {k: tuple([globals()[_t] for _t in _v]) for k, _v in _subdic.items()}
_trt[_subdic_name] = _nsd
# loads measurement units for the interface of components without self-referencing.
# the components with self referencing, like lines and loadshapes, are taken care of at runtime.
with open(UNIT_MEASUREMENT_PATH, 'r') as _ufile:
_rumr = _json_load(_ufile)
_umr = dict()
for _subdic_name, _subdic in _rumr.items():
_nsd = {_k: _UM.parse_units(_v) for _k, _v in _subdic.items()}
_umr[_subdic_name] = _nsd
with open(INTERFACE_METHODS_PATH, 'r') as _ifile:
_itf = _json_load(_ifile)
_interface_methods = {(_k,): _v for _k, _v in _itf.items()}
with open(INTERF_SELECTORS_PATH, 'r') as _ifile:
_itf_sel_names = _json_load(_ifile)
# </editor-fold>
# <editor-fold desc="Text output check and conversion">
class OpenDSSTextError(Exception):
"""Meant to be thrown when the string returned by opendss text interface represents an error."""
pass
# this ctxman is necessary for suppressing the rogue warnings from OpenDSSDirect.py
# when the situation is under control and they are unnecessary
class _LogKiller:
def __enter__(self):
logging.disable(logging.CRITICAL)
def __exit__(self, a, b, c):
logging.disable(logging.NOTSET)
def _validate_text_interface_result(result_string: str):
"""This function is passed the raw, direct output opendss text interface and performs checks on the results to see
if the string returned is valid (and not, for example, a warning). This function either returns nothing or
raises an error."""
error_strings_begin = tuple(ERROR_STRINGS['beginning'])
if result_string.lower().startswith(error_strings_begin):
raise OpenDSSTextError(result_string)
error_strings_middle = tuple(ERROR_STRINGS['middle'])
for inner_message in error_strings_middle:
if result_string.lower().find(inner_message) != -1:
raise OpenDSSTextError(result_string)
# this is what odr returns, instead of raising, if you request invalid props with ?
if result_string == 'Property Unknown':
raise KeyError('Property Unknown')
def _cast_dumbstring(string: str, data_type):
"""Casts, if possible, a raw string returned by the OpenDSS text interface to the type specified in data_type."""
if data_type == str:
return string
if data_type in (int, float):
if string == '----': # this happens, f.e., if you define a line by rmatrix, xmatrix and then ask for r0, x0...
return _np.NaN
else:
return data_type(string)
elif data_type == _np.ndarray:
try:
return _np.asarray(eval(string))
except:
return matricize_str(string
.replace(' ', ' ').replace(' ', ' ')
.replace(' | ', ';').replace('| ', ';').replace(' |', ';').replace('|', ';')
.replace('[ ', '').replace('[ ', '').replace('[', '')
.replace(' ]', '').replace(' ]', '').replace(']', '')
.replace(', ', ',')
.replace(' ', ','))
elif data_type == list:
dp_str = _sub('[\,|\ ]*(\]|"|\))', '', string)
dp_str = _sub('(\[|"|\()\ *', '', dp_str)
items = dp_str.split(',')
try:
return [int(x) for x in items]
except ValueError:
try:
return [float(x) for x in items]
except ValueError:
return items
else:
raise TypeError('Could not cast the DSS property string "{1}": type {0} unknown'.format(str(data_type), string))
def _influences_names(cmd_str):
if cmd_str.lower().startswith('new'):
return True
else:
return False
# </editor-fold>
# <editor-fold desc="Dynamic unitmeasure registries">
_line_um = {
0: _UM.unitlength,
1: _UM.mile,
2: _UM.kft,
3: _UM.km,
4: _UM.m,
5: _UM.ft,
6: _UM.inch,
7: _UM.cm
}
def _loadshape_umd(use_actual: bool):
"""Dynamically generates the measurement units dictionary for a loadshape based on the property use_actual."""
if use_actual:
return {'Loadshapes':
{'HrInterval': _UM.hr,
'MinInterval': _UM.min,
'PBase': _UM.kW,
'PMult': _UM.kW,
'QBase': _UM.kVA,
'QMult': _UM.kVA,
'SInterval': _UM.s,
'TimeArray': _UM.hr}}
else:
return {'Loadshapes':
{'HrInterval': _UM.hr,
'MinInterval': _UM.min,
'PBase': _UM.kW,
'PMult': _UM.dimensionless,
'QBase': _UM.kW,
'QMult': _UM.dimensionless,
'SInterval': _UM.s,
'TimeArray': _UM.hr}}
def _line_umd(unit_length):
"""Special case of call if the object is a line; needed because it's a type of object for which a "units"
properties exists that influences the other quantities dimensions."""
line_qties = {'Lines': {'C0': _UM.nF / unit_length,
'C1': _UM.nF / unit_length,
'CMatrix': _UM.nF / unit_length,
'EmergAmps': _UM.A,
'Length': unit_length,
'NormAmps': _UM.A,
'R0': _UM.ohm / unit_length,
'R1': _UM.ohm / unit_length,
'RMatrix': _UM.ohm / unit_length,
'Rg': _UM.ohm / unit_length,
'Rho': _UM.ohm / unit_length,
'X0': _UM.ohm / unit_length,
'X1': _UM.ohm / unit_length,
'XMatrix': _UM.ohm / unit_length,
'Xg': _UM.ohm / unit_length,
'Yprim': _UM.siemens / unit_length}}
return line_qties
# </editor-fold>
# <editor-fold desc="Module cache variables">
names_up2date = False
_cached_allnames = []
# </editor-fold>
# <editor-fold desc="Dynamical module population">
def _enh_call(*args, stack, odrobj):
# the dictionary of unit measures is found. The cases of lines and loadshapes are treated aside, because their
# unit dictionary references the object itself ('units' or 'useactual'), so the dependency must be resolved
# dynamically
if stack[0] == 'Lines':
um_d = _line_umd(_line_um[_odr.Lines.Units()])
elif stack[0] == 'LoadShape':
um_d = _loadshape_umd(_line_um[bool(_odr.LoadShape.UseActual())])
else:
um_d = _umr
# the dictionary is walked to find the unit of the particular value requested with this call
try:
ums = _reduce(_getitem, stack, um_d) # nested dict search
except KeyError:
ums = None
# the trt dictionary is walked to find which function must the output be passed through
try:
ths_trt = _reduce(_getitem, stack, _this_module._trt)
except KeyError:
ths_trt = tuple() # empty tuple in order to shortcut the following iteration when no treatments needed
# we retrieve the desired member from opendssdirect.py and call it with the arguments
e_ordobj = odrobj(*args)
# an explicit control must be carried out over what's returned when we are calling the text interface.
# this is because many errors from the text interface are returned silently as regular strings, potentially
# leading to dangerous errors.
# the result is finally treated and assigned a unit.
for _t in ths_trt:
e_ordobj = _t(e_ordobj)
return _assign_unit(e_ordobj, ums)
class _FnWrp:
def __init__(self, partial_fkt):
self._undfcn = partial_fkt
self._stk = '.'.join(partial_fkt.keywords['stack'])
self.__name__ = self._stk
self.name = self._stk
def __call__(self, *args, force=False):
if self._stk in DANGEROUS_STACKS and not _this_module.Solution.Converged():
if _FORCE_UNSAFE_CALLS:
logging.warning('\nUnsafe call to {} was attempted on an unsolved circuit.\n'
'\nRISK OF SEGFAULT OR WRONG/NON-PHYSICAL RESULTS.\n\n'
'Set "force_unsafe_calls" to False in the cfg file to avoid this and raise '
'an error when unsafe calls are made.'.format(self._stk))
return self._undfcn(*args)
else:
raise UnsolvedCircuitError(self._stk)
else:
return self._undfcn(*args)
def __str__(self):
return '<function OpendssdirectEnhancer.' + self._stk + '>'
def __repr__(self):
return self.__str__()
# dynamical population of the module to mock OpenDSSDirect
for _i1 in _inspect_getmembers(_odr.dss):
_i1_name = _i1[0]
if _i1_name in _itf.keys():
setattr(_this_module, _i1_name, types.ModuleType(_this_module.__name__ + '.' + _i1_name))
else:
continue
for _i2 in _inspect_getmembers(_i1[1]):
if _i2[0].startswith('_'): # this vulgar hack is to avoid special methods
continue
try:
_stack = [_i1_name, _i2[0]]
_frozencall = _partial(_enh_call, stack=_stack, odrobj=_attrgetter('.'.join(_stack))(_odr.dss))
except (AttributeError,):
continue
try:
setattr(getattr(_this_module, _i1_name), _i2[0], _FnWrp(_frozencall))
except (TypeError, AttributeError):
continue
# </editor-fold>
# mocks a standard selector, but for named entities
def _named_selector(name, eltype):
nm = (eltype.lower(), name.lower())
_this_module.Circuit.SetActiveClass(nm[0])
_this_module.ActiveClass.Name(nm[1])
assert _this_module.Element.Name().lower() == '.'.join(nm)
class _PackedOpendssElement:
def __init__(self, eltype, name):
# reconstructs what are the available interfaces from file
self._available_interfaces = tuple(
getattr(_this_module, itf_name) for itf_name in _itf_sel_names['interfaces'][eltype])
# reconstructs from file what are the interfaces in which I have to select the element in order to get the
# results that pertain it from self._available_interfaces
self._selectors = []
sel_attrs_chain = tuple(_itf_sel_names['selectors'][eltype])
for ac in sel_attrs_chain:
obi = _attrgetter(ac)(_this_module)
self._selectors.append(obi)
self._name = name
# todo perform sanity checks on name
self.eltype = eltype
# dynamically add methods to expose
for i in self._available_interfaces:
# for m in _interface_methods.get(tuple(i._undobj.split('.')[-1]), []):
for m in _interface_methods.get(tuple(i.__name__.split('.')[-1]), []):
setattr(self, m, self._craft_member(m))
@property
def topological(self):
"""Returns those properties that are marked as 'topological' in the configuration files and identify the wiring
location of the element. (Typically, bus1 and, if it exists, bus2.)"""
try:
top_par_names = _DEFAULT_COMP['default_' + self.eltype]['topological']
except KeyError:
return None
rt = [self[t] for t in top_par_names.keys()]
if len(rt) == 1 and isinstance(rt[0], list):
return tuple(rt[0])
else:
return tuple(rt)
@property
def type(self):
return self.eltype
@property
def fullname(self):
return self.eltype + '.' + self._name
@property
def name(self):
return self._name
@property
def help(self):
"""Displays informations about the object's parameters."""
return self.unpack().paramhelp
def _craft_member(self, item: str):
"""This second order function returns a function that invokes self._side_getattr on item. Such returned
function are intended to be assigned to a _PackedOpendssElement's attribute with the same name as item, and
such operation is carried out in __init__."""
def pckdss_mthd(*args):
# self._side_getattr(m) is a CallFinalizer
return self._side_getattr(item)(*args)
# poo_mthd.__repr__ = lambda: '<function {0} of {1}.{2}>'.format(item, self._eltype, self._name)
# poo_mthd.__name__ = lambda: item
return pckdss_mthd
def _side_getattr(self, item):
"""Returns a _CallFinalizer pointing to item, if item is a valid interface identificator of the object in
OpenDSSdirect.py.
For example, If the object is a Isource, AngleDeg will be a valid item."""
for itf in self._available_interfaces:
if hasattr(itf, item):
return _CallFinalizer(getattr(itf, item), self._selectors, self.fullname, str(itf))
# break unnecessary
else:
continue
else:
raise AttributeError('"{0}" is neither an attribute of the _PackedOpendssElement nor of the {1}-type object'
' it wraps.'
.format(item, self.eltype.upper()))
def dump(self):
"""Returns a dict with all the properties-values pairs of the object as they would be returned
by calls to __getitem__."""
try:
for sel in self._selectors:
try:
sel(self.fullname)
except TypeError:
sel(self.name, self.eltype) # named selector
if _this_module.Element.Name().lower() == self.fullname:
props = _this_module.Element.AllPropertyNames()
else: # something went wrong
if self.eltype == 'bus':
props = []
else:
raise Exception('PackedOpendssElement {} could not be selected. Please contact the dev')
except AttributeError:
raise ValueError('{0}-type objects are not dumpable.'.format(self.eltype.upper()))
return {p: self[p] for p in props}
# @profile(immediate=True)
def unpack(self, verbose=False):
"""Returns a _DssEntity (or a descendant) corresponding to _PackedOpendssElement."""
# identify the corresponding class in the components file
# the linegeometry internal structure is peculiar and has to be treated differently
if self.eltype == 'linegeometry':
return _unpack_linegeom(self)
# the bus packedelement has no corresponding regular element
if self.eltype == 'bus':
raise Exception('Buses cannot be unpacked.')
myclass = _classmap[self.eltype]
# properties are dumped
all_props = self.dump()
# the names of those properties that are ok to pass to the _DssEntity are taken from the components'
# configuration file
valid_props = _deepcopy(_DEFAULT_COMP['default_' + self.eltype]['properties'])
valid_props.update(_DEFAULT_COMP['default_' + self.eltype].get('associated', {}))
ignored_props = _DEFAULT_COMP['default_' + self.eltype].get('ignored', [])
redundant_props = _DEFAULT_COMP['default_' + self.eltype].get('redundant', [])
valid_props = {k: v for k, v in valid_props.items() if k not in ignored_props + redundant_props}
# either those properties dumped that are valid, or those that are valid AND different from the default values,
# are stored in dep_prop
if verbose:
dep_prop = {k.lower(): v for k, v in all_props.items() if k.lower() in valid_props.keys()}
else:
dep_prop = {k.lower(): v for k, v in all_props.items() if
k.lower() in valid_props.keys() and _np.asarray(v != valid_props[k.lower()]).any()}
# the _DssEntity is instantiated with the properties in dep_prop.
if myclass.isnamed():
obj = myclass(self._name, **dep_prop)
else:
obj = myclass(**dep_prop)
obj.name = self._name
return obj
def __getattr__(self, item):
return self._side_getattr(item)
def __getitem__(self, item):
"""Gets a property of the item from the text interface (as opposed to a natively exposed attribute of the
object's interface). For example, <>['xrharm'], where <> is a _PackedOpendssElement representing a Load,
lets you access the 'xrharm' property, which is not available in any way through the standard interface."""
# the raw property is returned in string form from Opendssdirect's text interface with the ? operator
try:
rslt = txt_command('? {0}.{1}'.format(self.fullname, item))
except KeyError:
# the rudimental Keyerror retrieved by _validate... is further explained
raise KeyError('Invalid property {1} requested for {0}'.format(self.fullname, item))
# correct type casting
try:
data_type = self._get_datatype(item)
except KeyError:
# happens, for example, when topological parameters are requested, because while being valid they are not
# listed in the default-type configuration files
return rslt
rslt = _cast_dumbstring(rslt, data_type)
# an unit is possibly added
unt = self._get_builtin_units(item)
if unt is None:
return rslt
else:
return rslt * unt
def __pos__(self):
"""The + operator enables the _PackedOpendssElement, undoing the effects of a previous __neg__ call on the same
element. It directly wraps the 'enable' command from opendss."""
_this_module.txt_command('enable {0}'.format(self.fullname))
def __neg__(self):
"""The - operator disables the _PackedOpendssElement, so that it's like it does not exists. It directly wraps the
'disable' command from opendss."""
_this_module.txt_command('disable {0}'.format(self.fullname))
def _get_datatype(self, item):
"""Gets what type of data corresponds to the property name passed as argument. The information is retrieved from
the configuration files."""
try:
return type(_DEFAULT_COMP['default_' + self.eltype]['properties'][item.lower()])
except KeyError:
return type(_DEFAULT_COMP['default_' + self.eltype]['topological'][item.lower()])
def _get_builtin_units(self, item):
"""Gets what measurement unit corresponds to the property name passed as argument. The information is retrieved
from the configuration files."""
raw_unit = _DEFAULT_COMP['default_' + self.eltype]['units'].get(item.lower(), None)
if raw_unit is None:
return None
else:
return _resolve_unit(raw_unit, self._get_matching_unit)
def _get_matching_unit(self, matchobj):
"""Gets the property stored in the argument, a matchobj, group(2). It is a supporting function to
_get_builtin_units, meant to retrieve, the property 'units' of the object or something similar."""
raw_unit = self[matchobj.group(2)]
return raw_unit
def __setitem__(self, key, value):
"""Edits a property of the _PackedOpendssElement.
Beware: If you do not pass a value with a pint measurement unit when editing a parameter that has a physical
dimensionality, it will be assumed that you are using the default units."""
# it is checked whether you passed a _pint_qty_type as value or not. Throughout the function, errors will be
# thrown if: _pint_qty_type is passed for a property without unit, _pint_qty_type has the wrong dimensionality,
# the content of the _pint_qty_type is not the right data type (such as a matrix instead of an int).
if isinstance(value, PINT_QTY_TYPE):
unt = self._get_builtin_units(key)
ref_value = value.to(unt).magnitude
else:
ref_value = value
# the target datatype is taken from the configuration files and checked
target_type = self._get_datatype(key)
try:
assert isinstance(ref_value, target_type)
except AssertionError:
ref_value = _type_recovery(ref_value, target_type)
# the edit is performed through the text interface with the 'edit' command
txt_command('edit ' + self.fullname + ' ' + key + '=' + _odssrep(ref_value))
def __str__(self):
return '<PackedOpendssElement({0})>'.format(self.fullname)
def __repr__(self):
return self.__str__()
class _CallFinalizer:
def __init__(self, interface, selector_fns: list, name: str, s_interface_name: str):
self._super_interface_name = s_interface_name
self._interface = interface
self._selectors = selector_fns
self._name_to_select = name.split('.', 1)[1]
self._eltype = name.split('.', 1)[0] # it only is useful for the named_selector
def __getattr__(self, item):
return _CallFinalizer(getattr(self._interface, item), self._selectors, self._name_to_select,
self._super_interface_name)
# no cache on call, otherwise it would not see recent edits
def __call__(self, *args):
# this is the key bit: the PackedOpendssElement that instances this class is capable of retaining its name and
# auto select itself before calling the underlying odr.
for sel in self._selectors:
try:
sel(self._name_to_select)
except TypeError:
sel(self._name_to_select, self._eltype)
mlog.debug('Calling {0} with arguments {1}'.format(str(self._interface), str(args)))
return self._interface(*args)
@property
def super_interface(self):
return self._super_interface_name
def _unpack_linegeom(pckob):
nc = pckob['nconds']
nmc = {cc.split('.', 1)[1]: cc.split('.', 1)[0] for cc in get_all_names()}
cnv = {
'tsdata': 'tscable',
'wiredata': 'wire',
'cndata': 'cncable'
}
x = []
h = []
units = []
wrcn = []
for cd in range(nc):
pckob['cond'] = cd + 1
cabtype = cnv[nmc[pckob['wire'].lower()]]
wrcn.append(pckob['wire'].lower())
units.append(pckob['units'][0].lower()) # everything is converted to meters
x.append(pckob['x'].magnitude)
h.append(pckob['h'].magnitude)
naive_props = pckob.dump()
del naive_props['x']
del naive_props['h']
del naive_props['wire']
del naive_props['wires']
del naive_props['cncable']
del naive_props['cncables']
del naive_props['tscable']
del naive_props['tscables']
del naive_props['units']
del naive_props['normamps']
del naive_props['emergamps']
del naive_props['like']
naive_props['x'] = _np.asarray(x)
naive_props['h'] = _np.asarray(h)
naive_props['units'] = units
naive_props[cabtype] = wrcn
return LineGeometry(pckob.name, **naive_props)
# Caching
ALL_NAMES = []
BARE_NAMES_DICT = {}
# <editor-fold desc="Exposed functions">
def pack(item):
"""Returns a PackedOpendssElement corresponding to item."""
itlo = item.lower()
all_names = globals()['ALL_NAMES']
bare_names_dict = globals()['BARE_NAMES_DICT']
if not (itlo in all_names or itlo in bare_names_dict.keys()):
all_names = map(lambda name: name.lower(), _this_module.get_all_names())
bare_names_dict = {name.lower().split('.', 1)[1]: name.lower() for name in _this_module.get_all_names()}
globals()['ALL_NAMES'] = all_names
globals()['BARE_NAMES_DICT'] = bare_names_dict
assert itlo in all_names or itlo in bare_names_dict.keys(),\
'Element {0} was not found in the circuit'.format(item)
try:
fullitem = bare_names_dict[item.lower()]
except KeyError:
fullitem = item.lower()
return _PackedOpendssElement(*fullitem.split('.', 1))
def get_all_names():
"""Gets the fully qualified names of the elements, plus buses, loadshapes and xycurves."""
_odr.utils.run_command('makebuslist')
if _this_module.names_up2date:
return _this_module._cached_allnames
else:
anl = []
_odr.utils.run_command('makebuslist')
anl.extend(map(lambda bn: 'bus.' + bn, _odr.Circuit.AllBusNames()))
anl.extend(_odr.Circuit.AllElementNames())
anl.extend(map(lambda ln: 'loadshape.' + ln, _odr.LoadShape.AllNames()))
anl.extend(map(lambda ln: 'xycurve.' + ln, _xycurve_names()))
# we access here all the elements that are interfaced through ActiveClass
plus_classes = ('tsdata', 'cndata', 'linecode', 'wiredata', 'linegeometry')
with _LogKiller(): # opendssdirect.py sends rogue warnings when an empty array is selected
for pc in plus_classes:
_this_module.Circuit.SetActiveClass(pc)
anl.extend([pc + '.' + x for x in _this_module.ActiveClass.AllNames()])
anl = [x.lower() for x in anl]
_this_module.names_up2date = True
_this_module._cached_allnames = anl
return anl
def txt_command(cmd_str: str, echo=True):
"""Performs a text interface call with the argument passed and logs command and response. The results are checked for
silent errors. **When instantiating components through this function, the update of the names returned by
get_all_names()is triggered**. The log output can be suppressed by setting the keyword argument echo=False; but even
in this case, if kp.get_log_level() is 0, the log of the command will be forced."""
# this context manager has the only purpose to try to redirect the stdout coming from opendss compiled library,
# which in this context is a nuisance (for example, it prints "Duty Cycle solution" every time a solution is
# launched
global _command_manager
with _command_manager():
rslt = _this_module.utils.run_command(cmd_str) # rslt could be an error string too
if _DISKFULL_RE.search(rslt) is not None:
# it may happen that the command manager's stdout redirection meets a "disk full" error for os-related problems,
# especially under Windows. In this case, we re-run the command, give up the command manager, send a warning
rslt = _this_module.utils.run_command(cmd_str)
_command_manager = NullCm
mlog.warning('The run_command context manager was suppressed after an error: {}'.format(str(rslt)))
if echo or get_log_level() == 0:
log_line_on_debug_log('[' + cmd_str.replace('\n', '\n' + ' ' * (30 + len(DEFAULT_ENH_NAME)))
+ ']-->[' + rslt.replace('\n', '') + ']')
log_bare_command(cmd_str)
if _influences_names(cmd_str):
_this_module.names_up2date = False
try:
_validate_text_interface_result(rslt)
except OpenDSSTextError:
raise
else:
return rslt
def log_line_on_debug_log(line: str, lvl=_DBG_LVL):
"""Logs a line in the command log."""
clog.log(lvl, '(id:{0})-'.format(_ID) + line)
# </editor-fold>
def log_bare_command(line: str):
"""Logs a line in the command log."""
bclog.log(logging.DEBUG, line)
# </editor-fold>
# <editor-fold desc="Rolling over the logs">
for lh in mlog.handlers:
if hasattr(lh, 'doRollover'):
lh.doRollover()
for lh in clog.handlers:
if hasattr(lh, 'doRollover'):
lh.doRollover()
# </editor-fold>
| [
"opendssdirect.Basic.Start",
"opendssdirect.Circuit.AllElementNames",
"opendssdirect.utils.run_command",
"inspect.getmembers",
"pandas.DataFrame",
"numpy.multiply",
"numpy.transpose",
"opendssdirect.CktElement.NumTerminals",
"numpy.reshape",
"opendssdirect.Circuit.AllBusNames",
"re.sub",
"copy... | [((5079, 5103), 're.compile', '_re_compile', (['"""Disk Full"""'], {}), "('Disk Full')\n", (5090, 5103), True, 'from re import compile as _re_compile\n'), ((14372, 14401), 'inspect.getmembers', '_inspect_getmembers', (['_odr.dss'], {}), '(_odr.dss)\n', (14391, 14401), True, 'from inspect import getmembers as _inspect_getmembers\n'), ((1817, 1836), 'opendssdirect.Basic.Start', '_odr.Basic.Start', (['(0)'], {}), '(0)\n', (1833, 1836), True, 'import opendssdirect as _odr\n'), ((2472, 2489), 'numpy.asarray', '_np.asarray', (['item'], {}), '(item)\n', (2483, 2489), True, 'import numpy as _np\n'), ((2982, 3012), 'opendssdirect.CktElement.NumTerminals', '_odr.CktElement.NumTerminals', ([], {}), '()\n', (3010, 3012), True, 'import opendssdirect as _odr\n'), ((3025, 3056), 'opendssdirect.CktElement.NumConductors', '_odr.CktElement.NumConductors', ([], {}), '()\n', (3054, 3056), True, 'import opendssdirect as _odr\n'), ((3111, 3155), 'numpy.zeros', '_np.zeros', (['[nterm, ncond]'], {'dtype': '_np.complex'}), '([nterm, ncond], dtype=_np.complex)\n', (3120, 3155), True, 'import numpy as _np\n'), ((3413, 3443), 'opendssdirect.CktElement.NumTerminals', '_odr.CktElement.NumTerminals', ([], {}), '()\n', (3441, 3443), True, 'import opendssdirect as _odr\n'), ((3456, 3487), 'opendssdirect.CktElement.NumConductors', '_odr.CktElement.NumConductors', ([], {}), '()\n', (3485, 3487), True, 'import opendssdirect as _odr\n'), ((3543, 3583), 'numpy.zeros', '_np.zeros', (['[nterm, ncond]'], {'dtype': '_np.int'}), '([nterm, ncond], dtype=_np.int)\n', (3552, 3583), True, 'import numpy as _np\n'), ((4158, 4214), 'pandas.DataFrame', '_DataFrame', ([], {'data': 'mtx', 'index': 'raw_n_ord', 'columns': 'raw_n_ord'}), '(data=mtx, index=raw_n_ord, columns=raw_n_ord)\n', (4168, 4214), True, 'from pandas import DataFrame as _DataFrame\n'), ((4481, 4499), 'numpy.transpose', '_np.transpose', (['mtx'], {}), '(mtx)\n', (4494, 4499), True, 'import numpy as _np\n'), ((4756, 4777), 'opendssdirect.XYCurves.First', '_odr.XYCurves.First', ([], {}), '()\n', (4775, 4777), True, 'import opendssdirect as _odr\n'), ((5315, 5333), 'json.load', '_json_load', (['_tfile'], {}), '(_tfile)\n', (5325, 5333), True, 'from json import load as _json_load\n'), ((5746, 5764), 'json.load', '_json_load', (['_ufile'], {}), '(_ufile)\n', (5756, 5764), True, 'from json import load as _json_load\n'), ((5983, 6001), 'json.load', '_json_load', (['_ifile'], {}), '(_ifile)\n', (5993, 6001), True, 'from json import load as _json_load\n'), ((6132, 6150), 'json.load', '_json_load', (['_ifile'], {}), '(_ifile)\n', (6142, 6150), True, 'from json import load as _json_load\n'), ((14597, 14624), 'inspect.getmembers', '_inspect_getmembers', (['_i1[1]'], {}), '(_i1[1])\n', (14616, 14624), True, 'from inspect import getmembers as _inspect_getmembers\n'), ((28568, 28582), 'numpy.asarray', '_np.asarray', (['x'], {}), '(x)\n', (28579, 28582), True, 'import numpy as _np\n'), ((28606, 28620), 'numpy.asarray', '_np.asarray', (['h'], {}), '(h)\n', (28617, 28620), True, 'import numpy as _np\n'), ((29816, 29853), 'opendssdirect.utils.run_command', '_odr.utils.run_command', (['"""makebuslist"""'], {}), "('makebuslist')\n", (29838, 29853), True, 'import opendssdirect as _odr\n'), ((1918, 1936), 'opendssdirect.Basic.Start', '_odr.Basic.Start', ([], {}), '()\n', (1934, 1936), True, 'import opendssdirect as _odr\n'), ((2527, 2548), 'numpy.array', '_np.array', (['item[0::2]'], {}), '(item[0::2])\n', (2536, 2548), True, 'import numpy as _np\n'), ((4093, 4118), 'opendssdirect.Circuit.YNodeOrder', '_odr.Circuit.YNodeOrder', ([], {}), '()\n', (4116, 4118), True, 'import opendssdirect as _odr\n'), ((4374, 4411), 'numpy.reshape', '_np.reshape', (['item[0::2]', '(side, side)'], {}), '(item[0::2], (side, side))\n', (4385, 4411), True, 'import numpy as _np\n'), ((4678, 4699), 'opendssdirect.XYCurves.Count', '_odr.XYCurves.Count', ([], {}), '()\n', (4697, 4699), True, 'import opendssdirect as _odr\n'), ((4853, 4873), 'opendssdirect.XYCurves.Next', '_odr.XYCurves.Next', ([], {}), '()\n', (4871, 4873), True, 'import opendssdirect as _odr\n'), ((6571, 6604), 'logging.disable', 'logging.disable', (['logging.CRITICAL'], {}), '(logging.CRITICAL)\n', (6586, 6604), False, 'import logging\n'), ((6647, 6678), 'logging.disable', 'logging.disable', (['logging.NOTSET'], {}), '(logging.NOTSET)\n', (6662, 6678), False, 'import logging\n'), ((12309, 12339), 'functools.reduce', '_reduce', (['_getitem', 'stack', 'um_d'], {}), '(_getitem, stack, um_d)\n', (12316, 12339), True, 'from functools import reduce as _reduce, partial as _partial\n'), ((12522, 12565), 'functools.reduce', '_reduce', (['_getitem', 'stack', '_this_module._trt'], {}), '(_getitem, stack, _this_module._trt)\n', (12529, 12565), True, 'from functools import reduce as _reduce, partial as _partial\n'), ((20585, 20649), 'copy.deepcopy', '_deepcopy', (["_DEFAULT_COMP['default_' + self.eltype]['properties']"], {}), "(_DEFAULT_COMP['default_' + self.eltype]['properties'])\n", (20594, 20649), True, 'from copy import deepcopy as _deepcopy\n'), ((29969, 30006), 'opendssdirect.utils.run_command', '_odr.utils.run_command', (['"""makebuslist"""'], {}), "('makebuslist')\n", (29991, 30006), True, 'import opendssdirect as _odr\n'), ((2551, 2572), 'numpy.array', '_np.array', (['item[1::2]'], {}), '(item[1::2])\n', (2560, 2572), True, 'import numpy as _np\n'), ((4426, 4463), 'numpy.reshape', '_np.reshape', (['item[1::2]', '(side, side)'], {}), '(item[1::2], (side, side))\n', (4437, 4463), True, 'import numpy as _np\n'), ((4819, 4839), 'opendssdirect.XYCurves.Name', '_odr.XYCurves.Name', ([], {}), '()\n', (4837, 4839), True, 'import opendssdirect as _odr\n'), ((14497, 14553), 'types.ModuleType', 'types.ModuleType', (["(_this_module.__name__ + '.' + _i1_name)"], {}), "(_this_module.__name__ + '.' + _i1_name)\n", (14513, 14553), False, 'import types\n'), ((30102, 30132), 'opendssdirect.Circuit.AllElementNames', '_odr.Circuit.AllElementNames', ([], {}), '()\n', (30130, 30132), True, 'import opendssdirect as _odr\n'), ((3315, 3339), 'numpy.multiply', '_np.multiply', (['(1.0j)', 'imag'], {}), '(1.0j, imag)\n', (3327, 3339), True, 'import numpy as _np\n'), ((3809, 3835), 'opendssdirect.Circuit.AllBusNames', '_odr.Circuit.AllBusNames', ([], {}), '()\n', (3833, 3835), True, 'import opendssdirect as _odr\n'), ((3904, 3934), 'opendssdirect.Circuit.AllElementNames', '_odr.Circuit.AllElementNames', ([], {}), '()\n', (3932, 3934), True, 'import opendssdirect as _odr\n'), ((4005, 4030), 'opendssdirect.Circuit.YNodeOrder', '_odr.Circuit.YNodeOrder', ([], {}), '()\n', (4028, 4030), True, 'import opendssdirect as _odr\n'), ((8634, 8675), 're.sub', '_sub', (['"""[\\\\,|\\\\ ]*(\\\\]|"|\\\\))"""', '""""""', 'string'], {}), '(\'[\\\\,|\\\\ ]*(\\\\]|"|\\\\))\', \'\', string)\n', (8638, 8675), True, 'from re import sub as _sub\n'), ((8689, 8724), 're.sub', '_sub', (['"""(\\\\[|"|\\\\()\\\\ *"""', '""""""', 'dp_str'], {}), '(\'(\\\\[|"|\\\\()\\\\ *\', \'\', dp_str)\n', (8693, 8724), True, 'from re import sub as _sub\n'), ((12029, 12047), 'opendssdirect.Lines.Units', '_odr.Lines.Units', ([], {}), '()\n', (12045, 12047), True, 'import opendssdirect as _odr\n'), ((16017, 16032), 'operator.attrgetter', '_attrgetter', (['ac'], {}), '(ac)\n', (16028, 16032), True, 'from operator import getitem as _getitem, attrgetter as _attrgetter\n'), ((30054, 30080), 'opendssdirect.Circuit.AllBusNames', '_odr.Circuit.AllBusNames', ([], {}), '()\n', (30078, 30080), True, 'import opendssdirect as _odr\n'), ((30187, 30212), 'opendssdirect.LoadShape.AllNames', '_odr.LoadShape.AllNames', ([], {}), '()\n', (30210, 30212), True, 'import opendssdirect as _odr\n'), ((12128, 12154), 'opendssdirect.LoadShape.UseActual', '_odr.LoadShape.UseActual', ([], {}), '()\n', (12152, 12154), True, 'import opendssdirect as _odr\n')] |
#!/usr/bin/env python
# coding: utf-8
# ## Required Frameworks
# In[ ]:
import matplotlib.pyplot as plt
from google.colab.patches import cv2_imshow
import cv2
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.utils import to_categorical
from keras.optimizers import SGD, Adam
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
# ## Read The Data
# In[ ]:
data = pd.read_csv(r"/content/drive/MyDrive/TEMP/PROJECT/Handwritten Digit & Character Recognition System/A_Z_Handwritten_Data.csv").astype('float32')
# Printing The First 10 Images Using Data.Head(10)
print(data.head(10))
# ## Split Data Into Images And Their Labels
# In[ ]:
# Splitting the data read into the images & their corresponding labels.
# The ‘0’ contains the labels, & so we drop the ‘0’ column from the data dataframe read & use it in the y to form the labels.
X = data.drop('0',axis = 1)
y = data['0']
# ## Reshaping The Data In The CSV File So That It Can Be Displayed As An Image
# In[ ]:
# Also, we are reshaping the train & test image data so that they can be displayed as an image, as initially in the CSV file they were present as 784 columns of pixel data.
# So we convert it to 28×28 pixels.
train_x, test_x, train_y, test_y = train_test_split(X, y, test_size = 0.2)
train_x = np.reshape(train_x.values, (train_x.shape[0], 28,28))
test_x = np.reshape(test_x.values, (test_x.shape[0], 28,28))
print("Train Data Shape: ", train_x.shape)
print("Test Data Shape: ", test_x.shape)
# In[ ]:
# All the labels are present in the form of floating point values, that we convert to integer values, & so we create a dictionary word_dict to map the integer values with the characters.
word_dict = {0:'A',1:'B',2:'C',3:'D',4:'E',5:'F',6:'G',7:'H',8:'I',9:'J',10:'K',11:'L',12:'M',13:'N',14:'O',15:'P',16:'Q',17:'R',18:'S',19:'T',20:'U',21:'V',22:'W',23:'X', 24:'Y',25:'Z'}
# ## Plotting The Number Of Alphabets In The Dataset
# In[ ]:
# Firstly we convert the labels into integer values and append into the count list according to the label.
# This count list has the number of images present in the dataset belonging to each alphabet.
# Now we create a list – alphabets containing all the characters using the values() function of the dictionary.
# Now using the count & alphabets lists we draw the horizontal bar plot.
y_int = np.int0(y)
count = np.zeros(26, dtype='int')
for i in y_int:
count[i] +=1
alphabets = []
for i in word_dict.values():
alphabets.append(i)
fig, ax = plt.subplots(1,1, figsize=(10,10))
ax.barh(alphabets, count)
plt.xlabel("Number of elements ")
plt.ylabel("Alphabets")
plt.grid()
plt.show()
# ## Shuffling The Data
# In[ ]:
# Now we shuffle some of the images of the train set.
# The shuffling is done using the shuffle() function so that we can display some random images.
# We then create 9 plots in 3×3 shape & display the thresholded images of 9 alphabets.
shuff = shuffle(train_x[:100])
fig, ax = plt.subplots(3,3, figsize = (10,10))
axes = ax.flatten()
for i in range(9):
_, shu = cv2.threshold(shuff[i], 30, 200, cv2.THRESH_BINARY)
axes[i].imshow(np.reshape(shuff[i], (28,28)), cmap="Greys")
plt.show()
# ####The Above Image Depicts The Grayscale Images That We Got From The Dataset
# ## Data Reshaping
# In[ ]:
train_X = train_x.reshape(train_x.shape[0],train_x.shape[1],train_x.shape[2],1)
print("New shape of train data: ", train_X.shape)
test_X = test_x.reshape(test_x.shape[0], test_x.shape[1], test_x.shape[2],1)
print("New shape of train data: ", test_X.shape)
# Now we reshape the train & test image dataset so that they can be put in the model.
# In[ ]:
# Here we convert the single float values to categorical values.
# This is done as the CNN model takes input of labels & generates the output as a vector of probabilities.
train_yOHE = to_categorical(train_y, num_classes = 26, dtype='int')
print("New shape of train labels: ", train_yOHE.shape)
test_yOHE = to_categorical(test_y, num_classes = 26, dtype='int')
print("New shape of test labels: ", test_yOHE.shape)
# In[ ]:
# The convolution layers are generally followed by maxpool layers that are used to reduce the number of features extracted and ultimately the output of the maxpool and layers and convolution layers are flattened into a vector of single dimension and are given as an input to the Dense layer.
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(28,28,1)))
model.add(MaxPool2D(pool_size=(2, 2), strides=2))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding = 'same'))
model.add(MaxPool2D(pool_size=(2, 2), strides=2))
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding = 'valid'))
model.add(MaxPool2D(pool_size=(2, 2), strides=2))
model.add(Flatten())
model.add(Dense(64,activation ="relu"))
model.add(Dense(128,activation ="relu"))
model.add(Dense(26,activation ="softmax"))
# In[ ]:
# Here we are compiling the model, where we define the optimizing function & the loss function to be used for fitting.
# The optimizing function used is Adam, that is a combination of RMSprop & Adagram optimizing algorithms.
model.compile(optimizer = Adam(learning_rate=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
# The dataset is very large so we are training for only a single epoch, however, as required we can even train it for multiple epochs.
history = model.fit(train_X, train_yOHE, epochs=1, validation_data = (test_X,test_yOHE))
# In[ ]:
# Now we are getting the model summary that tells us what were the different layers defined in the model & also we save the model using model.save() function.
model.summary()
model.save(r'Character_Model.h5')
# ## Getting the Train & Validation Accuracies & Losses
# In[ ]:
# Accuracy
print("The Validation Accuracy Is :", history.history['val_accuracy'])
print("The Training Accuracy Is :", history.history['accuracy'])
# Loss
print("The Validation Loss Is :", history.history['val_loss'])
print("The Training Loss Is :", history.history['loss'])
# ## Doing Some Predictions on Test Data
# In[ ]:
fig, axes = plt.subplots(3,3, figsize=(8,9))
axes = axes.flatten()
for i,ax in enumerate(axes):
img = np.reshape(test_X[i], (28,28))
ax.imshow(img, cmap="Greys")
pred = word_dict[np.argmax(test_yOHE[i])]
ax.set_title("Prediction: "+pred)
ax.grid()
# ## Doing Prediction on User Input Image
# In[26]:
img = cv2.imread(r'/content/drive/MyDrive/TEMP/PROJECT/Handwritten Digit & Character Recognition System/c1034_02_00028.png')
img_copy = img.copy()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (400,440))
# In[27]:
img_copy = cv2.GaussianBlur(img_copy, (7,7), 0)
img_gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
_, img_thresh = cv2.threshold(img_gray, 100, 255, cv2.THRESH_BINARY_INV)
img_final = cv2.resize(img_thresh, (28,28))
img_final =np.reshape(img_final, (1,28,28,1))
# In[28]:
img_pred = word_dict[np.argmax(model.predict(img_final))]
cv2.putText(img, "Prediction: " + img_pred, (20,410), cv2.FONT_HERSHEY_DUPLEX, 1.3, color = (255,0,30))
cv2_imshow(img)
# In[ ]:
| [
"cv2.GaussianBlur",
"numpy.argmax",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"google.colab.patches.cv2_imshow",
"keras.layers.MaxPool2D",
"cv2.cvtColor",
"keras.layers.Flatten",
"numpy.reshape",
"matplotlib.pyplot.subplots",
"cv2.resize",
"keras.utils.to_categorical",
"... | [((1430, 1467), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (1446, 1467), False, 'from sklearn.model_selection import train_test_split\n'), ((1481, 1535), 'numpy.reshape', 'np.reshape', (['train_x.values', '(train_x.shape[0], 28, 28)'], {}), '(train_x.values, (train_x.shape[0], 28, 28))\n', (1491, 1535), True, 'import numpy as np\n'), ((1544, 1596), 'numpy.reshape', 'np.reshape', (['test_x.values', '(test_x.shape[0], 28, 28)'], {}), '(test_x.values, (test_x.shape[0], 28, 28))\n', (1554, 1596), True, 'import numpy as np\n'), ((2530, 2540), 'numpy.int0', 'np.int0', (['y'], {}), '(y)\n', (2537, 2540), True, 'import numpy as np\n'), ((2549, 2574), 'numpy.zeros', 'np.zeros', (['(26)'], {'dtype': '"""int"""'}), "(26, dtype='int')\n", (2557, 2574), True, 'import numpy as np\n'), ((2688, 2724), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 10)'}), '(1, 1, figsize=(10, 10))\n', (2700, 2724), True, 'import matplotlib.pyplot as plt\n'), ((2750, 2783), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of elements """'], {}), "('Number of elements ')\n", (2760, 2783), True, 'import matplotlib.pyplot as plt\n'), ((2784, 2807), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Alphabets"""'], {}), "('Alphabets')\n", (2794, 2807), True, 'import matplotlib.pyplot as plt\n'), ((2808, 2818), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2816, 2818), True, 'import matplotlib.pyplot as plt\n'), ((2819, 2829), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2827, 2829), True, 'import matplotlib.pyplot as plt\n'), ((3113, 3135), 'sklearn.utils.shuffle', 'shuffle', (['train_x[:100]'], {}), '(train_x[:100])\n', (3120, 3135), False, 'from sklearn.utils import shuffle\n'), ((3147, 3183), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(10, 10)'}), '(3, 3, figsize=(10, 10))\n', (3159, 3183), True, 'import matplotlib.pyplot as plt\n'), ((3353, 3363), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3361, 3363), True, 'import matplotlib.pyplot as plt\n'), ((4021, 4073), 'keras.utils.to_categorical', 'to_categorical', (['train_y'], {'num_classes': '(26)', 'dtype': '"""int"""'}), "(train_y, num_classes=26, dtype='int')\n", (4035, 4073), False, 'from keras.utils import to_categorical\n'), ((4144, 4195), 'keras.utils.to_categorical', 'to_categorical', (['test_y'], {'num_classes': '(26)', 'dtype': '"""int"""'}), "(test_y, num_classes=26, dtype='int')\n", (4158, 4195), False, 'from keras.utils import to_categorical\n'), ((4564, 4576), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4574, 4576), False, 'from keras.models import Sequential\n'), ((6353, 6387), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(8, 9)'}), '(3, 3, figsize=(8, 9))\n', (6365, 6387), True, 'import matplotlib.pyplot as plt\n'), ((6678, 6805), 'cv2.imread', 'cv2.imread', (['"""/content/drive/MyDrive/TEMP/PROJECT/Handwritten Digit & Character Recognition System/c1034_02_00028.png"""'], {}), "(\n '/content/drive/MyDrive/TEMP/PROJECT/Handwritten Digit & Character Recognition System/c1034_02_00028.png'\n )\n", (6688, 6805), False, 'import cv2\n'), ((6826, 6862), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (6838, 6862), False, 'import cv2\n'), ((6869, 6896), 'cv2.resize', 'cv2.resize', (['img', '(400, 440)'], {}), '(img, (400, 440))\n', (6879, 6896), False, 'import cv2\n'), ((6921, 6958), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img_copy', '(7, 7)', '(0)'], {}), '(img_copy, (7, 7), 0)\n', (6937, 6958), False, 'import cv2\n'), ((6969, 7011), 'cv2.cvtColor', 'cv2.cvtColor', (['img_copy', 'cv2.COLOR_BGR2GRAY'], {}), '(img_copy, cv2.COLOR_BGR2GRAY)\n', (6981, 7011), False, 'import cv2\n'), ((7028, 7084), 'cv2.threshold', 'cv2.threshold', (['img_gray', '(100)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(img_gray, 100, 255, cv2.THRESH_BINARY_INV)\n', (7041, 7084), False, 'import cv2\n'), ((7098, 7130), 'cv2.resize', 'cv2.resize', (['img_thresh', '(28, 28)'], {}), '(img_thresh, (28, 28))\n', (7108, 7130), False, 'import cv2\n'), ((7141, 7178), 'numpy.reshape', 'np.reshape', (['img_final', '(1, 28, 28, 1)'], {}), '(img_final, (1, 28, 28, 1))\n', (7151, 7178), True, 'import numpy as np\n'), ((7249, 7358), 'cv2.putText', 'cv2.putText', (['img', "('Prediction: ' + img_pred)", '(20, 410)', 'cv2.FONT_HERSHEY_DUPLEX', '(1.3)'], {'color': '(255, 0, 30)'}), "(img, 'Prediction: ' + img_pred, (20, 410), cv2.\n FONT_HERSHEY_DUPLEX, 1.3, color=(255, 0, 30))\n", (7260, 7358), False, 'import cv2\n'), ((7353, 7368), 'google.colab.patches.cv2_imshow', 'cv2_imshow', (['img'], {}), '(img)\n', (7363, 7368), False, 'from google.colab.patches import cv2_imshow\n'), ((3237, 3288), 'cv2.threshold', 'cv2.threshold', (['shuff[i]', '(30)', '(200)', 'cv2.THRESH_BINARY'], {}), '(shuff[i], 30, 200, cv2.THRESH_BINARY)\n', (3250, 3288), False, 'import cv2\n'), ((4588, 4675), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': '(28, 28, 1)'}), "(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(28, \n 28, 1))\n", (4594, 4675), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout\n'), ((4680, 4718), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2)'}), '(pool_size=(2, 2), strides=2)\n', (4689, 4718), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout\n'), ((4731, 4804), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'padding': '"""same"""'}), "(filters=64, kernel_size=(3, 3), activation='relu', padding='same')\n", (4737, 4804), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout\n'), ((4818, 4856), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2)'}), '(pool_size=(2, 2), strides=2)\n', (4827, 4856), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout\n'), ((4869, 4944), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'padding': '"""valid"""'}), "(filters=128, kernel_size=(3, 3), activation='relu', padding='valid')\n", (4875, 4944), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout\n'), ((4958, 4996), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2)'}), '(pool_size=(2, 2), strides=2)\n', (4967, 4996), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout\n'), ((5009, 5018), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5016, 5018), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout\n'), ((5031, 5059), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (5036, 5059), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout\n'), ((5071, 5100), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (5076, 5100), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout\n'), ((5113, 5144), 'keras.layers.Dense', 'Dense', (['(26)'], {'activation': '"""softmax"""'}), "(26, activation='softmax')\n", (5118, 5144), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout\n'), ((6448, 6479), 'numpy.reshape', 'np.reshape', (['test_X[i]', '(28, 28)'], {}), '(test_X[i], (28, 28))\n', (6458, 6479), True, 'import numpy as np\n'), ((576, 710), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/MyDrive/TEMP/PROJECT/Handwritten Digit & Character Recognition System/A_Z_Handwritten_Data.csv"""'], {}), "(\n '/content/drive/MyDrive/TEMP/PROJECT/Handwritten Digit & Character Recognition System/A_Z_Handwritten_Data.csv'\n )\n", (587, 710), True, 'import pandas as pd\n'), ((3308, 3338), 'numpy.reshape', 'np.reshape', (['shuff[i]', '(28, 28)'], {}), '(shuff[i], (28, 28))\n', (3318, 3338), True, 'import numpy as np\n'), ((5410, 5435), 'keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (5414, 5435), False, 'from keras.optimizers import SGD, Adam\n'), ((6538, 6561), 'numpy.argmax', 'np.argmax', (['test_yOHE[i]'], {}), '(test_yOHE[i])\n', (6547, 6561), True, 'import numpy as np\n')] |
import numpy as np
import cv2
def detect(path):
image = cv2.imread(path)
orig = image.copy()
(H, W) = image.shape[:2]
(origH,origW) = (H,W)
(newW, newH) = (320,320)
rW = W / float(newW)
rH = H / float(newH)
image = cv2.resize(image, (newW, newH))
(H, W) = image.shape[:2]
layerNames = ["feature_fusion/Conv_7/Sigmoid","feature_fusion/concat_3"]
net = cv2.dnn.readNet('frozen_east_text_detection.pb')
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),(123.68, 116.78, 103.94), swapRB=True, crop=False)
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
for y in range(0, numRows):
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
for x in range(0, numCols):
if scoresData[x] < 0.5:
continue
(offsetX, offsetY) = (x * 4.0, y * 4.0) #since o/p of conv network is 1/4th of original size
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
return (rects, confidences),rW,rH,orig,(origH,origW)
| [
"cv2.dnn.blobFromImage",
"cv2.dnn.readNet",
"cv2.imread",
"numpy.sin",
"numpy.cos",
"cv2.resize"
] | [((67, 83), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (77, 83), False, 'import cv2\n'), ((273, 304), 'cv2.resize', 'cv2.resize', (['image', '(newW, newH)'], {}), '(image, (newW, newH))\n', (283, 304), False, 'import cv2\n'), ((436, 484), 'cv2.dnn.readNet', 'cv2.dnn.readNet', (['"""frozen_east_text_detection.pb"""'], {}), "('frozen_east_text_detection.pb')\n", (451, 484), False, 'import cv2\n'), ((503, 600), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['image', '(1.0)', '(W, H)', '(123.68, 116.78, 103.94)'], {'swapRB': '(True)', 'crop': '(False)'}), '(image, 1.0, (W, H), (123.68, 116.78, 103.94), swapRB=\n True, crop=False)\n', (524, 600), False, 'import cv2\n'), ((1302, 1315), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1308, 1315), True, 'import numpy as np\n'), ((1335, 1348), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1341, 1348), True, 'import numpy as np\n')] |
import abc
import itertools
from typing import Any
from torch import nn
from torch.nn import functional as F
from torch import optim
import numpy as np
import torch
from torch import distributions
from hw2.infrastructure import pytorch_util as ptu
from hw2.policies.base_policy import BasePolicy
from hw2.infrastructure.utils import normalize, unnormalize
class MLPPolicy(BasePolicy, nn.Module, metaclass=abc.ABCMeta):
def __init__(self,
ac_dim,
ob_dim,
n_layers,
size,
discrete=False,
learning_rate=1e-4,
training=True,
nn_baseline=False,
**kwargs
):
super().__init__(**kwargs)
# init vars
self.ac_dim = ac_dim
self.ob_dim = ob_dim
self.n_layers = n_layers
self.discrete = discrete
self.size = size
self.learning_rate = learning_rate
self.training = training
self.nn_baseline = nn_baseline
if self.discrete:
self.logits_na = ptu.build_mlp(
input_size=self.ob_dim,
output_size=self.ac_dim,
n_layers=self.n_layers,
size=self.size,
)
self.logits_na.to(ptu.device)
self.mean_net = None
self.logstd = None
self.optimizer = optim.Adam(self.logits_na.parameters(),
self.learning_rate)
else:
self.logits_na = None
self.mean_net = ptu.build_mlp(
input_size=self.ob_dim,
output_size=self.ac_dim,
n_layers=self.n_layers, size=self.size,
)
self.mean_net.to(ptu.device)
self.logstd = nn.Parameter(
torch.zeros(self.ac_dim, dtype=torch.float32, device=ptu.device)
)
self.logstd.to(ptu.device)
self.optimizer = optim.Adam(
itertools.chain([self.logstd], self.mean_net.parameters()),
self.learning_rate
)
if nn_baseline:
self.baseline = ptu.build_mlp(
input_size=self.ob_dim,
output_size=1,
n_layers=self.n_layers,
size=self.size,
)
self.baseline.to(ptu.device)
self.baseline_optimizer = optim.Adam(
self.baseline.parameters(),
self.learning_rate,
)
else:
self.baseline = None
##################################
def save(self, filepath):
torch.save(self.state_dict(), filepath)
##################################
# update/train this policy
def update(self, observations, actions, **kwargs):
raise NotImplementedError
# This function defines the forward pass of the network.
# You can return anything you want, but you should be able to differentiate
# through it. For example, you can return a torch.FloatTensor. You can also
# return more flexible objects, such as a
# `torch.distributions.Distribution` object. It's up to you!
def get_action(self, obs: np.ndarray) -> np.ndarray:
if len(obs.shape) > 1:
observation = obs
else:
observation = obs[None]
observation = ptu.from_numpy(observation)
with torch.no_grad():
action = self.forward(observation).sample()
action = ptu.to_numpy(action)
return action
# update/train this policy
def update(self, observations, actions, **kwargs):
raise NotImplementedError
# This function defines the forward pass of the network.
# You can return anything you want, but you should be able to differentiate
# through it. For example, you can return a torch.FloatTensor. You can also
# return more flexible objects, such as a
# `torch.distributions.Distribution` object. It's up to you!
def forward(self, observation: torch.FloatTensor):
if self.discrete:
logits = self.logits_na(observation)
action_distribution = distributions.Categorical(logits=logits)
return action_distribution
else:
batch_mean = self.mean_net(observation)
scale_tril = torch.diag(torch.exp(self.logstd))
batch_dim = batch_mean.shape[0]
batch_scale_tril = scale_tril.repeat(batch_dim, 1, 1)
action_distribution = distributions.MultivariateNormal(
batch_mean,
scale_tril=batch_scale_tril,
)
return action_distribution
def get_action(self, obs: np.ndarray) -> np.ndarray:
if len(obs.shape) > 1:
observation = obs
else:
observation = obs[None]
observation = ptu.from_numpy(observation)
with torch.no_grad():
action = self.forward(observation).sample()
action = ptu.to_numpy(action)
return action
#####################################################
#####################################################
class MLPPolicySL(MLPPolicy):
def __init__(self, ac_dim, ob_dim, n_layers, size, **kwargs):
super().__init__(ac_dim, ob_dim, n_layers, size, **kwargs)
self.loss = nn.MSELoss()
def update(
self, observations, actions,
adv_n=None, acs_labels_na=None, qvals=None
):
# TODO: update the policy and return the loss
self.optimizer.zero_grad(set_to_none=True)
output = self(torch.Tensor(observations).to(ptu.device))
loss = self.loss(output, torch.Tensor(actions).to(ptu.device))
loss.backward()
self.optimizer.step()
return {
# You can add extra logging information here, but keep this line
'Training Loss': ptu.to_numpy(loss),
}
class MLPPolicyPG(MLPPolicy):
def __init__(self, ac_dim, ob_dim, n_layers, size, **kwargs):
super().__init__(ac_dim, ob_dim, n_layers, size, **kwargs)
self.baseline_loss = nn.MSELoss()
def update(self, observations, actions, advantages, q_values=None):
observations = ptu.from_numpy(observations)
actions = ptu.from_numpy(actions)
advantages = ptu.from_numpy(advantages)
self.optimizer.zero_grad()
forward_return = self.forward(observations)
log_prob = forward_return.log_prob(actions)
loss = torch.sum(-log_prob * advantages)
loss.backward()
if self.nn_baseline:
self.baseline_optimizer.zero_grad()
baseline_pred = self.baseline(observations).squeeze()
baseline_target = ptu.from_numpy(normalize(q_values, np.mean(q_values), np.mean(q_values)))
baseline_loss = self.baseline_loss(baseline_pred, baseline_target)
baseline_loss.backward()
self.baseline_optimizer.step()
self.optimizer.step()
train_log = {'Training Loss': ptu.to_numpy(loss),}
return train_log
def run_baseline_prediction(self, observations):
observations = ptu.from_numpy(observations)
pred = self.baseline(observations)
return ptu.to_numpy(pred.squeeze())
| [
"hw2.infrastructure.pytorch_util.from_numpy",
"torch.nn.MSELoss",
"torch.distributions.Categorical",
"torch.exp",
"torch.Tensor",
"numpy.mean",
"torch.distributions.MultivariateNormal",
"hw2.infrastructure.pytorch_util.to_numpy",
"torch.zeros",
"torch.no_grad",
"torch.sum",
"hw2.infrastructure... | [((3409, 3436), 'hw2.infrastructure.pytorch_util.from_numpy', 'ptu.from_numpy', (['observation'], {}), '(observation)\n', (3423, 3436), True, 'from hw2.infrastructure import pytorch_util as ptu\n'), ((3540, 3560), 'hw2.infrastructure.pytorch_util.to_numpy', 'ptu.to_numpy', (['action'], {}), '(action)\n', (3552, 3560), True, 'from hw2.infrastructure import pytorch_util as ptu\n'), ((4903, 4930), 'hw2.infrastructure.pytorch_util.from_numpy', 'ptu.from_numpy', (['observation'], {}), '(observation)\n', (4917, 4930), True, 'from hw2.infrastructure import pytorch_util as ptu\n'), ((5034, 5054), 'hw2.infrastructure.pytorch_util.to_numpy', 'ptu.to_numpy', (['action'], {}), '(action)\n', (5046, 5054), True, 'from hw2.infrastructure import pytorch_util as ptu\n'), ((5373, 5385), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (5383, 5385), False, 'from torch import nn\n'), ((6159, 6171), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (6169, 6171), False, 'from torch import nn\n'), ((6268, 6296), 'hw2.infrastructure.pytorch_util.from_numpy', 'ptu.from_numpy', (['observations'], {}), '(observations)\n', (6282, 6296), True, 'from hw2.infrastructure import pytorch_util as ptu\n'), ((6315, 6338), 'hw2.infrastructure.pytorch_util.from_numpy', 'ptu.from_numpy', (['actions'], {}), '(actions)\n', (6329, 6338), True, 'from hw2.infrastructure import pytorch_util as ptu\n'), ((6360, 6386), 'hw2.infrastructure.pytorch_util.from_numpy', 'ptu.from_numpy', (['advantages'], {}), '(advantages)\n', (6374, 6386), True, 'from hw2.infrastructure import pytorch_util as ptu\n'), ((6542, 6575), 'torch.sum', 'torch.sum', (['(-log_prob * advantages)'], {}), '(-log_prob * advantages)\n', (6551, 6575), False, 'import torch\n'), ((7200, 7228), 'hw2.infrastructure.pytorch_util.from_numpy', 'ptu.from_numpy', (['observations'], {}), '(observations)\n', (7214, 7228), True, 'from hw2.infrastructure import pytorch_util as ptu\n'), ((1107, 1214), 'hw2.infrastructure.pytorch_util.build_mlp', 'ptu.build_mlp', ([], {'input_size': 'self.ob_dim', 'output_size': 'self.ac_dim', 'n_layers': 'self.n_layers', 'size': 'self.size'}), '(input_size=self.ob_dim, output_size=self.ac_dim, n_layers=\n self.n_layers, size=self.size)\n', (1120, 1214), True, 'from hw2.infrastructure import pytorch_util as ptu\n'), ((1600, 1707), 'hw2.infrastructure.pytorch_util.build_mlp', 'ptu.build_mlp', ([], {'input_size': 'self.ob_dim', 'output_size': 'self.ac_dim', 'n_layers': 'self.n_layers', 'size': 'self.size'}), '(input_size=self.ob_dim, output_size=self.ac_dim, n_layers=\n self.n_layers, size=self.size)\n', (1613, 1707), True, 'from hw2.infrastructure import pytorch_util as ptu\n'), ((2200, 2296), 'hw2.infrastructure.pytorch_util.build_mlp', 'ptu.build_mlp', ([], {'input_size': 'self.ob_dim', 'output_size': '(1)', 'n_layers': 'self.n_layers', 'size': 'self.size'}), '(input_size=self.ob_dim, output_size=1, n_layers=self.n_layers,\n size=self.size)\n', (2213, 2296), True, 'from hw2.infrastructure import pytorch_util as ptu\n'), ((3450, 3465), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3463, 3465), False, 'import torch\n'), ((4201, 4241), 'torch.distributions.Categorical', 'distributions.Categorical', ([], {'logits': 'logits'}), '(logits=logits)\n', (4226, 4241), False, 'from torch import distributions\n'), ((4551, 4624), 'torch.distributions.MultivariateNormal', 'distributions.MultivariateNormal', (['batch_mean'], {'scale_tril': 'batch_scale_tril'}), '(batch_mean, scale_tril=batch_scale_tril)\n', (4583, 4624), False, 'from torch import distributions\n'), ((4944, 4959), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4957, 4959), False, 'import torch\n'), ((5936, 5954), 'hw2.infrastructure.pytorch_util.to_numpy', 'ptu.to_numpy', (['loss'], {}), '(loss)\n', (5948, 5954), True, 'from hw2.infrastructure import pytorch_util as ptu\n'), ((7077, 7095), 'hw2.infrastructure.pytorch_util.to_numpy', 'ptu.to_numpy', (['loss'], {}), '(loss)\n', (7089, 7095), True, 'from hw2.infrastructure import pytorch_util as ptu\n'), ((1863, 1927), 'torch.zeros', 'torch.zeros', (['self.ac_dim'], {'dtype': 'torch.float32', 'device': 'ptu.device'}), '(self.ac_dim, dtype=torch.float32, device=ptu.device)\n', (1874, 1927), False, 'import torch\n'), ((4383, 4405), 'torch.exp', 'torch.exp', (['self.logstd'], {}), '(self.logstd)\n', (4392, 4405), False, 'import torch\n'), ((5634, 5660), 'torch.Tensor', 'torch.Tensor', (['observations'], {}), '(observations)\n', (5646, 5660), False, 'import torch\n'), ((5711, 5732), 'torch.Tensor', 'torch.Tensor', (['actions'], {}), '(actions)\n', (5723, 5732), False, 'import torch\n'), ((6809, 6826), 'numpy.mean', 'np.mean', (['q_values'], {}), '(q_values)\n', (6816, 6826), True, 'import numpy as np\n'), ((6828, 6845), 'numpy.mean', 'np.mean', (['q_values'], {}), '(q_values)\n', (6835, 6845), True, 'import numpy as np\n')] |
from __future__ import absolute_import
import sys
import os
import argparse
import time
import datetime
import shutil
import numpy as np
from PIL import Image
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.backends.cudnn as cudnn
import torch.utils.data
import torchvision.datasets
import torchvision.transforms as transforms
import models
from trainer import Trainer
from validator import Validator
from tester import Tester
from utils.logger import Logger
best_prec1 = 0
np.random.seed(40)
torch.manual_seed(40)
def get_parser():
parser = argparse.ArgumentParser(description='Training multi networks on CIFAR10')
parser.add_argument('--arch', '-a', default='resnet18', type=str, help='network architecture')
parser.add_argument('--optimizer', default='adam', type=str, help='optimizer for updating weights and biases (default: adam)')
parser.add_argument('--num-workers', '-j', default=4, type=int, help='number of data loader workers')
parser.add_argument('--epochs', '-ep', default=200, type=int, help='number of training epochs')
parser.add_argument('--batch-size', '-b', default=64, type=int, help='mini batch size')
parser.add_argument('--learning-rate', '-lr', default=0.1, type=float, help='initial learning rate')
parser.add_argument('--momentum', '-m', default=0., type=float, help='momentum')
parser.add_argument('--weight-decay', '-wd', default=1e-4, type=float, help='weight decay')
parser.add_argument('--print-freq', '-p', default=10, type=int, help='print frequency')
parser.add_argument('--start-epoch', default=0, type=int, help='epoch to resume from')
parser.add_argument('--resume', default='', type=str, help='path to resumed checkpoint')
parser.add_argument('--evaluate', '-e', dest='evaluate', action='store_true', help='evaluate on test set')
parser.add_argument('--train-dir', default='', type=str, help='training set directory')
# parser.add_argument('--val-dir', default='', type=str, help='validation set directory')
parser.add_argument('--test-dir', default='', type=str, help='test set directory')
parser.add_argument('--log-dir', default='', type=str, help='directory to save log')
return parser
def command_line_runner():
parser = get_parser()
# args = vars(parser.parse_args())
args = parser.parse_args()
print(args)
return args
def main():
global best_prec1
args = command_line_runner()
saved_name = '{}_{}_mnt{}_lr{}_b{}_ep{}'.format(args.arch, args.optimizer, args.momentum,
args.learning_rate, args.batch_size, args.epochs)
if not args.evaluate:
if not os.path.exists(args.log_dir):
os.makedir(args.log_dir)
log_dir = os.path.join(args.log_dir, saved_name)
logger = Logger(log_dir)
# creates model
model = models.construct(args.arch)
print(model)
# defines cost function and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = choose_optimizer(model, args)
model = torch.nn.DataParallel(model).cuda()
# model = model.cuda()
# creates trainer, validator and tester
trainer = Trainer(model, criterion)
validator = Validator(model, criterion)
tester = Tester(model, criterion)
# # resumes from checkpoint
if args.resume:
if os.path.exists(args.resume):
print('\n===> loading checkpoint {} ...\n'.format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print('\n**** checkpoint {} loaded at epoch {} ...\n'.format(args.resume,
checkpoint['epoch']))
else:
raise Exception('\n===> No checkpoint found at {} ...\n'.format(args.resume))
# data transformation
data_transforms = {
'train': transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
]),
'test': transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
}
# loads datasets
print('\n===> loading dataset...\n')
train_set = torchvision.datasets.CIFAR10(root=args.train_dir, train=True, download=False,
transform=data_transforms['train'])
train_loader = torch.utils.data.DataLoader(train_set,
batch_size=args.batch_size,
shuffle=True,
pin_memory=True,
num_workers=args.num_workers)
test_set = torchvision.datasets.CIFAR10(root=args.test_dir, train=False, download=False,
transform=data_transforms['test'])
test_loader = torch.utils.data.DataLoader(test_set,
batch_size=args.batch_size,
shuffle=False,
pin_memory=True,
num_workers=args.num_workers)
print('\n===> dataset loaded...\n')
cudnn.benchmark = True
if args.evaluate:
tester.test(test_loader)
return
start = datetime.datetime.now().replace(microsecond=0)
print('\n===> Training starts at: {}\n'.format(start))
t = tqdm(range(args.start_epoch, args.epochs), desc='Training Process', ncols=100, leave=True)
for epoch in t:
adjust_lr(optimizer, epoch, args, logger)
trainer.train(optimizer, epoch, train_loader, logger, args.print_freq)
prec1 = validator.validate(epoch, test_loader, logger, args.print_freq) # same as test loader
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict()
},
is_best, path='./checkpoint', filename=saved_name + '.pth.tar')
end = datetime.datetime.now().replace(microsecond=0)
print('\n===> Training Done!!!\n')
print('\n===> Training Duration: {}\n'.format(end - start))
tester.test(test_loader, args.print_freq)
def adjust_lr(optimizer, epoch, args, logger):
"""Decays the initial learning rate by order of 10 after every 100 epochs"""
lr = args.learning_rate * (0.1 ** (epoch // 100))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
logger.scalar_summary('learning_rate', lr, epoch + 1)
def choose_optimizer(model, args):
updated_params = model.parameters()
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(updated_params, lr=args.learning_rate,
weight_decay=args.weight_decay)
else:
optimizer = torch.optim.SGD(updated_params, lr=args.learning_rate, momentum=0.9,
weight_decay=args.weight_decay, nesterov=True)
return optimizer
def save_checkpoint(state, is_best, path, filename='./checkpoint/checkpoint.pth.tar'):
torch.save(state, os.path.join(path, filename))
if is_best:
shutil.copyfile(os.path.join(path, filename), os.path.join(path, 'best_model_' + filename))
if __name__ == '__main__':
main()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"utils.logger.Logger",
"torchvision.transforms.Normalize",
"os.path.join",
"validator.Validator",
"torch.utils.data.DataLoader",
"torch.load",
"os.path.exists",
"torchvision.transforms.ToTensor",
"datetime.datetime.now",
"torchvision.transforms.R... | [((546, 564), 'numpy.random.seed', 'np.random.seed', (['(40)'], {}), '(40)\n', (560, 564), True, 'import numpy as np\n'), ((565, 586), 'torch.manual_seed', 'torch.manual_seed', (['(40)'], {}), '(40)\n', (582, 586), False, 'import torch\n'), ((620, 693), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Training multi networks on CIFAR10"""'}), "(description='Training multi networks on CIFAR10')\n", (643, 693), False, 'import argparse\n'), ((2933, 2960), 'models.construct', 'models.construct', (['args.arch'], {}), '(args.arch)\n', (2949, 2960), False, 'import models\n'), ((3247, 3272), 'trainer.Trainer', 'Trainer', (['model', 'criterion'], {}), '(model, criterion)\n', (3254, 3272), False, 'from trainer import Trainer\n'), ((3289, 3316), 'validator.Validator', 'Validator', (['model', 'criterion'], {}), '(model, criterion)\n', (3298, 3316), False, 'from validator import Validator\n'), ((3330, 3354), 'tester.Tester', 'Tester', (['model', 'criterion'], {}), '(model, criterion)\n', (3336, 3354), False, 'from tester import Tester\n'), ((4774, 4906), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_set'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': 'args.num_workers'}), '(train_set, batch_size=args.batch_size, shuffle=\n True, pin_memory=True, num_workers=args.num_workers)\n', (4801, 4906), False, 'import torch\n'), ((5281, 5413), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_set'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'pin_memory': '(True)', 'num_workers': 'args.num_workers'}), '(test_set, batch_size=args.batch_size, shuffle=\n False, pin_memory=True, num_workers=args.num_workers)\n', (5308, 5413), False, 'import torch\n'), ((2828, 2866), 'os.path.join', 'os.path.join', (['args.log_dir', 'saved_name'], {}), '(args.log_dir, saved_name)\n', (2840, 2866), False, 'import os\n'), ((2884, 2899), 'utils.logger.Logger', 'Logger', (['log_dir'], {}), '(log_dir)\n', (2890, 2899), False, 'from utils.logger import Logger\n'), ((3419, 3446), 'os.path.exists', 'os.path.exists', (['args.resume'], {}), '(args.resume)\n', (3433, 3446), False, 'import os\n'), ((7321, 7413), 'torch.optim.Adam', 'torch.optim.Adam', (['updated_params'], {'lr': 'args.learning_rate', 'weight_decay': 'args.weight_decay'}), '(updated_params, lr=args.learning_rate, weight_decay=args.\n weight_decay)\n', (7337, 7413), False, 'import torch\n'), ((7476, 7595), 'torch.optim.SGD', 'torch.optim.SGD', (['updated_params'], {'lr': 'args.learning_rate', 'momentum': '(0.9)', 'weight_decay': 'args.weight_decay', 'nesterov': '(True)'}), '(updated_params, lr=args.learning_rate, momentum=0.9,\n weight_decay=args.weight_decay, nesterov=True)\n', (7491, 7595), False, 'import torch\n'), ((7759, 7787), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (7771, 7787), False, 'import os\n'), ((2742, 2770), 'os.path.exists', 'os.path.exists', (['args.log_dir'], {}), '(args.log_dir)\n', (2756, 2770), False, 'import os\n'), ((2784, 2808), 'os.makedir', 'os.makedir', (['args.log_dir'], {}), '(args.log_dir)\n', (2794, 2808), False, 'import os\n'), ((3037, 3058), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3056, 3058), True, 'import torch.nn as nn\n'), ((3125, 3153), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (3146, 3153), False, 'import torch\n'), ((3549, 3572), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (3559, 3572), False, 'import torch\n'), ((5746, 5769), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5767, 5769), False, 'import datetime\n'), ((6673, 6696), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6694, 6696), False, 'import datetime\n'), ((7823, 7851), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (7835, 7851), False, 'import os\n'), ((7853, 7897), 'os.path.join', 'os.path.join', (['path', "('best_model_' + filename)"], {}), "(path, 'best_model_' + filename)\n", (7865, 7897), False, 'import os\n'), ((4126, 4162), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (4147, 4162), True, 'import torchvision.transforms as transforms\n'), ((4176, 4209), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (4207, 4209), True, 'import torchvision.transforms as transforms\n'), ((4223, 4244), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4242, 4244), True, 'import torchvision.transforms as transforms\n'), ((4258, 4329), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.4914, 0.4822, 0.4465]', '[0.2023, 0.1994, 0.201]'], {}), '([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.201])\n', (4278, 4329), True, 'import torchvision.transforms as transforms\n'), ((4392, 4413), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4411, 4413), True, 'import torchvision.transforms as transforms\n'), ((4427, 4498), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.4914, 0.4822, 0.4465]', '[0.2023, 0.1994, 0.201]'], {}), '([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.201])\n', (4447, 4498), True, 'import torchvision.transforms as transforms\n')] |
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
from matplotlib.animation import FuncAnimation
from functools import partial
#system
def van_der_pol(coords, t, mu):
x, y = coords[0::2], coords[1::2]
y_prime = mu*(1-x**2)*y-x
out = np.column_stack([y, y_prime])
return out.reshape(-1)
#initialize window for blitting
def init(fig, axes, scatter):
plt.xlim([-3.0, 3.0])
plt.ylim([-3.0, 3.0])
axes.patch.set_facecolor("black")
return scatter,
#animates one frame
def animate(t, sols, scatter):
print(t)
scatter.set_offsets(np.column_stack([sols[t][0::2], sols[t][1::2]]))
return scatter,
#constants
mu = 0.5
#initial data
M = 50
axis = np.linspace(-2.5, 2.5, M)
init_conds = np.array([(x, y) for x in axis for y in axis])
init_conds = init_conds.reshape(2*M**2)
#solving
total = 20.0
delta = 0.1
N = int(total/delta)
t = np.linspace(0.0, total, N)
sols = odeint(van_der_pol, init_conds, t, args=tuple([mu]))
#setup
fig, axes = plt.figure(), plt.axes(frameon=True)
scatter = plt.scatter([], [], animated=True, c="white", s=1.0)
#animation
anim = FuncAnimation(fig, func=animate, frames=N, init_func=partial(init, fig, axes, scatter), blit=True, fargs=(sols, scatter), repeat=False)
anim.save("out.mp4", fps=40)
| [
"matplotlib.pyplot.xlim",
"functools.partial",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.linspace",
"numpy.column_stack"
] | [((734, 759), 'numpy.linspace', 'np.linspace', (['(-2.5)', '(2.5)', 'M'], {}), '(-2.5, 2.5, M)\n', (745, 759), True, 'import numpy as np\n'), ((773, 819), 'numpy.array', 'np.array', (['[(x, y) for x in axis for y in axis]'], {}), '([(x, y) for x in axis for y in axis])\n', (781, 819), True, 'import numpy as np\n'), ((920, 946), 'numpy.linspace', 'np.linspace', (['(0.0)', 'total', 'N'], {}), '(0.0, total, N)\n', (931, 946), True, 'import numpy as np\n'), ((1074, 1126), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[]', '[]'], {'animated': '(True)', 'c': '"""white"""', 's': '(1.0)'}), "([], [], animated=True, c='white', s=1.0)\n", (1085, 1126), True, 'import matplotlib.pyplot as plt\n'), ((282, 311), 'numpy.column_stack', 'np.column_stack', (['[y, y_prime]'], {}), '([y, y_prime])\n', (297, 311), True, 'import numpy as np\n'), ((406, 427), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-3.0, 3.0]'], {}), '([-3.0, 3.0])\n', (414, 427), True, 'import matplotlib.pyplot as plt\n'), ((432, 453), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-3.0, 3.0]'], {}), '([-3.0, 3.0])\n', (440, 453), True, 'import matplotlib.pyplot as plt\n'), ((1027, 1039), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1037, 1039), True, 'import matplotlib.pyplot as plt\n'), ((1041, 1063), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'frameon': '(True)'}), '(frameon=True)\n', (1049, 1063), True, 'import matplotlib.pyplot as plt\n'), ((602, 649), 'numpy.column_stack', 'np.column_stack', (['[sols[t][0::2], sols[t][1::2]]'], {}), '([sols[t][0::2], sols[t][1::2]])\n', (617, 649), True, 'import numpy as np\n'), ((1199, 1232), 'functools.partial', 'partial', (['init', 'fig', 'axes', 'scatter'], {}), '(init, fig, axes, scatter)\n', (1206, 1232), False, 'from functools import partial\n')] |
"""
An example of RL training using StableBaselines3.
python -m dedo.run_rl_sb3 --env=HangGarment-v1 --rl_algo PPO --logdir=/tmp/dedo
tensorboard --logdir=/tmp/dedo --bind_all --port 6006
Play the saved policy (e.g. logged to PPO_210825_204955_HangGarment-v1):
python -m dedo.run_rl_sb3 --env=HangGarment-v1 \
--play=/tmp/dedo/PPO_210825_204955_HangGarment-v1
@contactrika
"""
import sys
from copy import deepcopy
import os
import pickle
import argparse
import gym
from stable_baselines3 import A2C, DDPG, PPO, SAC, TD3 # used dynamically
from stable_baselines3.common.env_util import (
make_vec_env, DummyVecEnv, SubprocVecEnv)
from dedo.utils.args import get_args, get_args_parser, args_postprocess
from dedo.utils.rl_sb3_utils import CustomCallback, play
from dedo.utils.train_utils import init_train
import numpy as np
import torch
import wandb
def do_play(args, num_episodes=10):
checkpt = os.path.join(args.load_checkpt, 'agent.zip')
print('Loading RL agent checkpoint from', checkpt)
args = pickle.load(open(os.path.join(args.load_checkpt, 'args.pkl'), 'rb'))
args.debug = True
args.viz = True
eval_env = gym.make(args.env, args=args)
eval_env.seed(args.seed)
rl_agent = eval(args.rl_algo).load(checkpt)
play(eval_env, num_episodes=num_episodes, rl_agent=rl_agent, debug=False)
def main():
np.random.seed(args.seed)
torch.random.manual_seed(args.seed)
args.base_logdir = args.logdir
if args.play:
do_play(args)
return # no training, just playing
assert(args.rl_algo is not None), 'Please provide --rl_algo'
assert(args.rl_algo in ['A2C', 'DDPG', 'PPO', 'SAC', 'TD3']), \
f'{args.rl_algo:s} not tested with SB3 (try RLlib)'
# Init RL training envs and agent.
print('debug:INIT', file=sys.stderr)
args.logdir, args.device = init_train(args.rl_algo, args, tags=['SB3', 'HPSearch', args.rl_algo, args.env])
# Hyperparameter search results
args.seed = wandb.config['seed']
np.random.seed(args.seed)
torch.random.manual_seed(args.seed)
print('random_seed', args.seed)
# Stable Baselines3 only supports vectorized envs for on-policy algos.
on_policy = args.rl_algo in ['A2C', 'PPO']
n_envs = args.num_envs if on_policy else 1
print('debug:GYM', file=sys.stderr)
eval_env = gym.make(args.env, args=args)
eval_env.seed(args.seed)
print('debug:DEEP COPY', file=sys.stderr)
train_args = deepcopy(args)
train_args.debug = False # no debug during training
train_args.viz = False # no viz during training
vec_env = make_vec_env(
args.env, n_envs=n_envs,
vec_env_cls=SubprocVecEnv if n_envs > 1 else DummyVecEnv,
env_kwargs={'args': train_args})
vec_env.seed(args.seed)
print('Created', args.task, 'with observation_space',
vec_env.observation_space.shape, 'action_space',
vec_env.action_space.shape)
rl_kwargs = {'learning_rate': args.lr, 'device': args.device,
'tensorboard_log': args.logdir, 'verbose': 1}
num_steps_between_save = args.log_save_interval*10
if on_policy:
num_steps_between_save *= 10 # less frequent logging
if not on_policy and args.cam_resolution > 0:
rl_kwargs['buffer_size'] = args.replay_size
# HP search specific keys
for key in wandb.config.keys():
if key.startswith('HP_'):
key_without_hp = key[3:]
rl_kwargs[key_without_hp] = wandb.config[key]
policy_name = 'MlpPolicy'
if args.cam_resolution > 0 and args.uint8_pixels:
policy_name = 'CnnPolicy'
rl_agent = eval(args.rl_algo)(policy_name, vec_env, **rl_kwargs)
cb = CustomCallback(eval_env, args.logdir, n_envs, args,
num_steps_between_save=num_steps_between_save,
viz=args.viz, debug=args.debug)
rl_agent.learn(total_timesteps=args.total_env_steps, callback=cb)
vec_env.close()
wandb.finish()
# reset logdir
args.logdir = args.base_logdir
def args_setup():
args, main_parser = get_args_parser()
parser = argparse.ArgumentParser(
description="AgentData", parents=[main_parser], add_help=False)
parser.add_argument('--agent_id', type=str, required=True,
help='Agent ID For which wandb is used')
args, unknown = parser.parse_known_args()
args_postprocess(args)
return args
if __name__ == "__main__":
# args = args_setup()
# wandb.agent(args.agent_id, main)
# # main()
args = get_args()
main()
| [
"stable_baselines3.common.env_util.make_vec_env",
"dedo.utils.train_utils.init_train",
"copy.deepcopy",
"numpy.random.seed",
"gym.make",
"torch.random.manual_seed",
"wandb.finish",
"argparse.ArgumentParser",
"dedo.utils.args.get_args_parser",
"dedo.utils.args.get_args",
"dedo.utils.rl_sb3_utils.... | [((918, 962), 'os.path.join', 'os.path.join', (['args.load_checkpt', '"""agent.zip"""'], {}), "(args.load_checkpt, 'agent.zip')\n", (930, 962), False, 'import os\n'), ((1155, 1184), 'gym.make', 'gym.make', (['args.env'], {'args': 'args'}), '(args.env, args=args)\n', (1163, 1184), False, 'import gym\n'), ((1266, 1339), 'dedo.utils.rl_sb3_utils.play', 'play', (['eval_env'], {'num_episodes': 'num_episodes', 'rl_agent': 'rl_agent', 'debug': '(False)'}), '(eval_env, num_episodes=num_episodes, rl_agent=rl_agent, debug=False)\n', (1270, 1339), False, 'from dedo.utils.rl_sb3_utils import CustomCallback, play\n'), ((1359, 1384), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1373, 1384), True, 'import numpy as np\n'), ((1389, 1424), 'torch.random.manual_seed', 'torch.random.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1413, 1424), False, 'import torch\n'), ((1849, 1934), 'dedo.utils.train_utils.init_train', 'init_train', (['args.rl_algo', 'args'], {'tags': "['SB3', 'HPSearch', args.rl_algo, args.env]"}), "(args.rl_algo, args, tags=['SB3', 'HPSearch', args.rl_algo, args.env]\n )\n", (1859, 1934), False, 'from dedo.utils.train_utils import init_train\n'), ((2008, 2033), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2022, 2033), True, 'import numpy as np\n'), ((2038, 2073), 'torch.random.manual_seed', 'torch.random.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2062, 2073), False, 'import torch\n'), ((2336, 2365), 'gym.make', 'gym.make', (['args.env'], {'args': 'args'}), '(args.env, args=args)\n', (2344, 2365), False, 'import gym\n'), ((2458, 2472), 'copy.deepcopy', 'deepcopy', (['args'], {}), '(args)\n', (2466, 2472), False, 'from copy import deepcopy\n'), ((2597, 2729), 'stable_baselines3.common.env_util.make_vec_env', 'make_vec_env', (['args.env'], {'n_envs': 'n_envs', 'vec_env_cls': '(SubprocVecEnv if n_envs > 1 else DummyVecEnv)', 'env_kwargs': "{'args': train_args}"}), "(args.env, n_envs=n_envs, vec_env_cls=SubprocVecEnv if n_envs >\n 1 else DummyVecEnv, env_kwargs={'args': train_args})\n", (2609, 2729), False, 'from stable_baselines3.common.env_util import make_vec_env, DummyVecEnv, SubprocVecEnv\n'), ((3349, 3368), 'wandb.config.keys', 'wandb.config.keys', ([], {}), '()\n', (3366, 3368), False, 'import wandb\n'), ((3696, 3831), 'dedo.utils.rl_sb3_utils.CustomCallback', 'CustomCallback', (['eval_env', 'args.logdir', 'n_envs', 'args'], {'num_steps_between_save': 'num_steps_between_save', 'viz': 'args.viz', 'debug': 'args.debug'}), '(eval_env, args.logdir, n_envs, args, num_steps_between_save=\n num_steps_between_save, viz=args.viz, debug=args.debug)\n', (3710, 3831), False, 'from dedo.utils.rl_sb3_utils import CustomCallback, play\n'), ((3969, 3983), 'wandb.finish', 'wandb.finish', ([], {}), '()\n', (3981, 3983), False, 'import wandb\n'), ((4082, 4099), 'dedo.utils.args.get_args_parser', 'get_args_parser', ([], {}), '()\n', (4097, 4099), False, 'from dedo.utils.args import get_args, get_args_parser, args_postprocess\n'), ((4113, 4204), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""AgentData"""', 'parents': '[main_parser]', 'add_help': '(False)'}), "(description='AgentData', parents=[main_parser],\n add_help=False)\n", (4136, 4204), False, 'import argparse\n'), ((4390, 4412), 'dedo.utils.args.args_postprocess', 'args_postprocess', (['args'], {}), '(args)\n', (4406, 4412), False, 'from dedo.utils.args import get_args, get_args_parser, args_postprocess\n'), ((4548, 4558), 'dedo.utils.args.get_args', 'get_args', ([], {}), '()\n', (4556, 4558), False, 'from dedo.utils.args import get_args, get_args_parser, args_postprocess\n'), ((1046, 1089), 'os.path.join', 'os.path.join', (['args.load_checkpt', '"""args.pkl"""'], {}), "(args.load_checkpt, 'args.pkl')\n", (1058, 1089), False, 'import os\n')] |
import os
import argparse
import numpy as np
import tensorflow as tf
from epi.models import Parameter, Model
from epi.STG_Circuit import NetworkFreq
import time
DTYPE = tf.float32
# Parse script command-line parameters.
parser = argparse.ArgumentParser()
parser.add_argument("--freq", type=float, default=0.55) # frequency for mu
parser.add_argument("--mu_std", type=float, default=0.05) # std in mu constraint
parser.add_argument("--beta", type=float, default=4.0) # aug lag hp
parser.add_argument("--logc0", type=float, default=0.0) # log10 of c_0
parser.add_argument("--random_seed", type=int, default=1)
args = parser.parse_args()
freq = args.freq
mu_std = args.mu_std
beta = args.beta
c0 = 10.0 ** args.logc0
random_seed = args.random_seed
g_el_lb = 4.0
sigma_I = 1.0e-12
# sleep_dur = np.abs(args.logc0) + random_seed/5. + beta/3.
# print('short stagger sleep of', sleep_dur, flush=True)
# time.sleep(sleep_dur)
# 1. Specify the V1 model for EPI.
D = 2
g_el = Parameter("g_el", 1, lb=g_el_lb, ub=8.0)
g_synA = Parameter("g_synA", 1, lb=0.01, ub=4.0)
# Define model
name = "STG_sigmaI=%.2E" % sigma_I
parameters = [g_el, g_synA]
model = Model(name, parameters)
# Emergent property values.
mu = np.array([freq, mu_std ** 2])
init_type = "abc"
abc_std = mu_std
init_params = {"num_keep": 500, "means": np.array([freq]), "stds": np.array([abc_std,])}
dt = 0.025
T = 300
network_freq = NetworkFreq(dt, T, sigma_I, mu)
model.set_eps(network_freq)
# 3. Run EPI.
q_theta, opt_data, epi_path, failed = model.epi(
mu,
arch_type="coupling",
num_stages=2,
num_layers=num_layers,
num_units=25,
post_affine=True,
elemwise_fn="affine",
batch_norm=False,
bn_momentum=0.0,
K=6,
N=400,
num_iters=5000,
lr=1e-3,
c0=c0,
beta=beta,
nu=0.5,
random_seed=random_seed,
init_type=init_type,
init_params=init_params,
verbose=True,
stop_early=True,
log_rate=50,
save_movie_data=True,
)
if not failed:
print("Making movie.", flush=True)
model.epi_opt_movie(epi_path)
print("done.", flush=True)
| [
"argparse.ArgumentParser",
"epi.STG_Circuit.NetworkFreq",
"epi.models.Model",
"numpy.array",
"epi.models.Parameter"
] | [((231, 256), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (254, 256), False, 'import argparse\n'), ((976, 1016), 'epi.models.Parameter', 'Parameter', (['"""g_el"""', '(1)'], {'lb': 'g_el_lb', 'ub': '(8.0)'}), "('g_el', 1, lb=g_el_lb, ub=8.0)\n", (985, 1016), False, 'from epi.models import Parameter, Model\n'), ((1026, 1065), 'epi.models.Parameter', 'Parameter', (['"""g_synA"""', '(1)'], {'lb': '(0.01)', 'ub': '(4.0)'}), "('g_synA', 1, lb=0.01, ub=4.0)\n", (1035, 1065), False, 'from epi.models import Parameter, Model\n'), ((1153, 1176), 'epi.models.Model', 'Model', (['name', 'parameters'], {}), '(name, parameters)\n', (1158, 1176), False, 'from epi.models import Parameter, Model\n'), ((1211, 1240), 'numpy.array', 'np.array', (['[freq, mu_std ** 2]'], {}), '([freq, mu_std ** 2])\n', (1219, 1240), True, 'import numpy as np\n'), ((1401, 1432), 'epi.STG_Circuit.NetworkFreq', 'NetworkFreq', (['dt', 'T', 'sigma_I', 'mu'], {}), '(dt, T, sigma_I, mu)\n', (1412, 1432), False, 'from epi.STG_Circuit import NetworkFreq\n'), ((1318, 1334), 'numpy.array', 'np.array', (['[freq]'], {}), '([freq])\n', (1326, 1334), True, 'import numpy as np\n'), ((1344, 1363), 'numpy.array', 'np.array', (['[abc_std]'], {}), '([abc_std])\n', (1352, 1363), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from scipy.integrate._ivp import rk
import probnum.problems.zoo.diffeq as diffeq_zoo
from probnum import _randomvariablelist, diffeq
@pytest.fixture
def steprule():
return diffeq.stepsize.AdaptiveSteps(0.1, atol=1e-4, rtol=1e-4)
@pytest.fixture
def perturbed_solution(steprule):
y0 = np.array([0.1, 0.1])
ode = diffeq_zoo.lotkavolterra(t0=0.0, tmax=1.0, y0=y0)
rng = np.random.default_rng(seed=1)
testsolver = diffeq.perturbed.scipy_wrapper.WrappedScipyRungeKutta(
rk.RK45, steprule=steprule
)
sol = diffeq.perturbed.step.PerturbedStepSolver(
rng=rng,
solver=testsolver,
noise_scale=0.1,
perturb_function=diffeq.perturbed.step.perturb_uniform,
)
return sol.solve(ode)
def test_states(perturbed_solution):
assert isinstance(
perturbed_solution.states, _randomvariablelist._RandomVariableList
)
def test_call(perturbed_solution):
"""Test for continuity of the dense output.
Small changes of the locations should come with small changes of the
states.
"""
np.testing.assert_allclose(
perturbed_solution(perturbed_solution.locations[0:]).mean,
perturbed_solution.states[0:].mean,
atol=1e-14,
rtol=1e-14,
)
np.testing.assert_allclose(
perturbed_solution(perturbed_solution.locations[0:-1] + 1e-14).mean,
perturbed_solution(perturbed_solution.locations[0:-1]).mean,
atol=1e-12,
rtol=1e-12,
)
np.testing.assert_allclose(
perturbed_solution(perturbed_solution.locations[1:] - 1e-14).mean,
perturbed_solution(perturbed_solution.locations[1:]).mean,
atol=1e-12,
rtol=1e-12,
)
def test_len(perturbed_solution):
np.testing.assert_allclose(
len(perturbed_solution),
len(perturbed_solution.locations),
atol=1e-14,
rtol=1e-14,
)
def test_getitem(perturbed_solution):
np.testing.assert_allclose(
perturbed_solution.interpolants[1](perturbed_solution.locations[1]),
perturbed_solution[1].mean,
atol=1e-14,
rtol=1e-14,
)
| [
"probnum.diffeq.perturbed.scipy_wrapper.WrappedScipyRungeKutta",
"probnum.problems.zoo.diffeq.lotkavolterra",
"probnum.diffeq.stepsize.AdaptiveSteps",
"numpy.random.default_rng",
"probnum.diffeq.perturbed.step.PerturbedStepSolver",
"numpy.array"
] | [((212, 272), 'probnum.diffeq.stepsize.AdaptiveSteps', 'diffeq.stepsize.AdaptiveSteps', (['(0.1)'], {'atol': '(0.0001)', 'rtol': '(0.0001)'}), '(0.1, atol=0.0001, rtol=0.0001)\n', (241, 272), False, 'from probnum import _randomvariablelist, diffeq\n'), ((330, 350), 'numpy.array', 'np.array', (['[0.1, 0.1]'], {}), '([0.1, 0.1])\n', (338, 350), True, 'import numpy as np\n'), ((361, 410), 'probnum.problems.zoo.diffeq.lotkavolterra', 'diffeq_zoo.lotkavolterra', ([], {'t0': '(0.0)', 'tmax': '(1.0)', 'y0': 'y0'}), '(t0=0.0, tmax=1.0, y0=y0)\n', (385, 410), True, 'import probnum.problems.zoo.diffeq as diffeq_zoo\n'), ((421, 450), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': '(1)'}), '(seed=1)\n', (442, 450), True, 'import numpy as np\n'), ((468, 554), 'probnum.diffeq.perturbed.scipy_wrapper.WrappedScipyRungeKutta', 'diffeq.perturbed.scipy_wrapper.WrappedScipyRungeKutta', (['rk.RK45'], {'steprule': 'steprule'}), '(rk.RK45, steprule=\n steprule)\n', (521, 554), False, 'from probnum import _randomvariablelist, diffeq\n'), ((574, 720), 'probnum.diffeq.perturbed.step.PerturbedStepSolver', 'diffeq.perturbed.step.PerturbedStepSolver', ([], {'rng': 'rng', 'solver': 'testsolver', 'noise_scale': '(0.1)', 'perturb_function': 'diffeq.perturbed.step.perturb_uniform'}), '(rng=rng, solver=testsolver,\n noise_scale=0.1, perturb_function=diffeq.perturbed.step.perturb_uniform)\n', (615, 720), False, 'from probnum import _randomvariablelist, diffeq\n')] |
from __future__ import print_function, division, unicode_literals
import numpy as np
from psy import McmcHoDina
from psy.utils import r4beta
attrs = np.random.binomial(1, 0.5, (5, 60))
g = r4beta(1, 2, 0, 0.6, (1, 60))
no_s = r4beta(2, 1, 0.4, 1, (1, 60))
theta = np.random.normal(0, 1, (1000, 1))
lam00 = np.random.normal(0, 1, 5)
lam11 = np.random.uniform(0.5, 3, 5)
ho_dina = McmcHoDina(attrs=attrs)
skills_p = McmcHoDina.get_skills_p(lam0=lam00, lam1=lam11, theta=theta)
skills = np.random.binomial(1, skills_p)
yita = ho_dina.get_yita(skills)
p_val = ho_dina.get_p(yita, guess=g, no_slip=no_s)
score = np.random.binomial(1, p_val)
ho_dina_est = McmcHoDina(attrs=attrs, score=score, max_iter=10000, burn=5000)
est_lam0, est_lam1, est_theta, est_skills, est_no_s, est_g = ho_dina_est.mcmc()
| [
"numpy.random.uniform",
"psy.McmcHoDina",
"numpy.random.binomial",
"psy.utils.r4beta",
"numpy.random.normal",
"psy.McmcHoDina.get_skills_p"
] | [((150, 185), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.5)', '(5, 60)'], {}), '(1, 0.5, (5, 60))\n', (168, 185), True, 'import numpy as np\n'), ((191, 220), 'psy.utils.r4beta', 'r4beta', (['(1)', '(2)', '(0)', '(0.6)', '(1, 60)'], {}), '(1, 2, 0, 0.6, (1, 60))\n', (197, 220), False, 'from psy.utils import r4beta\n'), ((228, 257), 'psy.utils.r4beta', 'r4beta', (['(2)', '(1)', '(0.4)', '(1)', '(1, 60)'], {}), '(2, 1, 0.4, 1, (1, 60))\n', (234, 257), False, 'from psy.utils import r4beta\n'), ((267, 300), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(1000, 1)'], {}), '(0, 1, (1000, 1))\n', (283, 300), True, 'import numpy as np\n'), ((309, 334), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (325, 334), True, 'import numpy as np\n'), ((343, 371), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(3)', '(5)'], {}), '(0.5, 3, 5)\n', (360, 371), True, 'import numpy as np\n'), ((383, 406), 'psy.McmcHoDina', 'McmcHoDina', ([], {'attrs': 'attrs'}), '(attrs=attrs)\n', (393, 406), False, 'from psy import McmcHoDina\n'), ((418, 478), 'psy.McmcHoDina.get_skills_p', 'McmcHoDina.get_skills_p', ([], {'lam0': 'lam00', 'lam1': 'lam11', 'theta': 'theta'}), '(lam0=lam00, lam1=lam11, theta=theta)\n', (441, 478), False, 'from psy import McmcHoDina\n'), ((488, 519), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'skills_p'], {}), '(1, skills_p)\n', (506, 519), True, 'import numpy as np\n'), ((612, 640), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'p_val'], {}), '(1, p_val)\n', (630, 640), True, 'import numpy as np\n'), ((656, 719), 'psy.McmcHoDina', 'McmcHoDina', ([], {'attrs': 'attrs', 'score': 'score', 'max_iter': '(10000)', 'burn': '(5000)'}), '(attrs=attrs, score=score, max_iter=10000, burn=5000)\n', (666, 719), False, 'from psy import McmcHoDina\n')] |
#!/usr/bin/python3
import itertools
import os
import sys
from pathlib import Path
import numpy as np
from PIL import Image
np.set_printoptions(threshold=sys.maxsize)
np.set_printoptions(linewidth=1000)
script_dir = os.path.dirname(__file__)
images_dir = os.path.join(script_dir, "images")
images = [os.path.join(images_dir, f) for f in os.listdir(images_dir)]
se_identity_1 = np.array([[True]])
se_cross_3 = np.array([[False, True, False], [True, True, True], [False, True, False]])
se_north_3 = np.array(
[[False, True, False], [False, True, False], [False, False, False]]
)
se_glider_3 = np.array([[False, True, False], [True, False, False], [True, True, True]])
structural_elements = [se_identity_1, se_cross_3, se_north_3, se_glider_3]
def namestr(obj, namespace=globals()):
return [name for name in namespace if namespace[name] is obj]
def neighborhood_coordinates(img, x, y, dx=1, dy=1):
"""Given a distance out, returns the checkable neighborhood around a
pixel for an image."""
x_nbhd = list(range(x - dx, x + dx + 1))
y_nbhd = list(range(y - dy, y + dy + 1))
x_nbhd = filter(lambda x: x >= 0 and x < img.shape[0], x_nbhd)
y_nbhd = filter(lambda x: x >= 0 and x < img.shape[1], y_nbhd)
return list(itertools.product(list(x_nbhd), list(y_nbhd)))
def check_if_se_coordinate_valid(img, se, x, y, i, j):
"""Given a structural element, an image, an x-y coordinate for the image,
and an i-j coordinate for the structural element, checks to see whether
the element needs to be operated on."""
x_adjusted = x + i - se.shape[0] // 2
y_adjusted = y + j - se.shape[1] // 2
x_yes = (x_adjusted >= 0) and (x_adjusted < img.shape[0])
y_yes = (y_adjusted >= 0) and (y_adjusted < img.shape[1])
return x_yes and y_yes
def eroded_pixel(img, se, x, y, verbose=False):
"""Returns the post-erosion value of the pixel.
Note that the structuring element *must* be an odd number-by-odd number
2D numpy matrix of booleans."""
assert se.shape[0] % 2 == 1
assert se.shape[1] % 2 == 1
return_bool = img[x, y]
# where_to_check = neighborhood_coordinates(img, x, y, dx=se.shape[0]//2,
# dy=se.shape[1]//2)
# for (x, y) in where_to_check:
if verbose:
print("Determining whether {} is active...".format((x, y)))
for i in range(0, se.shape[0]):
if not return_bool:
break
for j in range(0, se.shape[1]):
if not return_bool:
break
if check_if_se_coordinate_valid(img, se, x, y, i, j):
comp = (x + i - se.shape[0] // 2, y + j - se.shape[1] // 2)
if verbose:
print(
" Cross-checking absolute image pixel ({}, {}) against SE pixel ({}, {}).".format(
comp[0], comp[1], i, j
),
end=" ",
)
if se[i, j]:
if verbose:
print(
"Pixel active, assigning... {} && {} == {}.".format(
img[comp[0], comp[1]],
se[i, j],
(img[comp[0], comp[1]] and se[i, j]),
)
)
return_bool = return_bool and (img[comp[0], comp[1]] and se[i, j])
else:
if verbose:
print("SE pixel not active. Moving on.")
if verbose:
print("Return value: {} is {}.".format((x, y), return_bool))
return return_bool
def erode(img, se, verbose=False):
out = np.zeros((img.shape[0], img.shape[1])).astype(type(True))
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
out[i, j] = eroded_pixel(img, se, i, j, verbose=verbose)
return out
def dilated_pixel(img, se, x, y, verbose=False):
"""Returns the post-dilation value of the pixel.
Note that the structuring element *must* be an odd number-by-odd number
2D numpy matrix of booleans."""
assert se.shape[0] % 2 == 1
assert se.shape[1] % 2 == 1
return_bool = img[x, y]
# where_to_check = neighborhood_coordinates(img, x, y, dx=se.shape[0]//2,
# dy=se.shape[1]//2)
# for (x, y) in where_to_check:
if verbose:
print("Determining whether {} is active...".format((x, y)))
for i in range(0, se.shape[0]):
if return_bool:
break
for j in range(0, se.shape[1]):
if return_bool:
break
if check_if_se_coordinate_valid(img, se, x, y, i, j):
comp = (x + i - se.shape[0] // 2, y + j - se.shape[1] // 2)
if verbose:
print(
" Cross-checking absolute image pixel ({}, {}) against SE pixel ({}, {}).".format(
comp[0], comp[1], i, j
),
end=" ",
)
if se[i, j]:
if verbose:
print(
"Pixel active, assigning... {} && {} == {}.".format(
img[comp[0], comp[1]],
se[i, j],
(img[comp[0], comp[1]] and se[i, j]),
)
)
return_bool = return_bool or (img[comp[0], comp[1]] and se[i, j])
else:
if verbose:
print("SE pixel not active. Moving on.")
if verbose:
print("Return value: {} is {}.".format((x, y), return_bool))
return return_bool
def dilate(img, se, verbose=False):
out = np.zeros((img.shape[0], img.shape[1])).astype(type(True))
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
out[i, j] = dilated_pixel(img, se, i, j, verbose=verbose)
return out
def opening(img, se, verbose=False):
return dilate(erode(img, se, verbose=verbose), se, verbose=verbose)
def closing(img, se, verbose=False):
return erode(dilate(img, se, verbose=verbose), se, verbose=verbose)
def boundary(img, se, verbose=False):
out = np.zeros((img.shape[0], img.shape[1])).astype(type(True))
diff = erode(img, se, verbose=verbose)
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
if not img[i, j]:
out[i, j] = False
elif diff[i, j]:
out[i, j] = False
else:
out[i, j] = img[i, j]
return out
if __name__ == "__main__":
print("<NAME> - EECS 332 - MP#2\n" + ("-" * 80))
results_dir = os.path.join(script_dir, "results")
se_dir = os.path.join(script_dir, "structure_elems")
if not os.path.exists(results_dir):
os.makedirs(results_dir)
if not os.path.exists(se_dir):
os.makedirs(se_dir)
for se in structural_elements:
se_name = namestr(se)[0]
se_save_location = os.path.join(se_dir, se_name + ".bmp")
im = Image.fromarray(se.astype(np.uint8) * 255, "L")
im.save(se_save_location)
print("You can see the various SEs used by checking structure_elems/.")
for image in images:
print(image)
img_in = np.array(Image.open(image))
for se in structural_elements:
for op in [erode, dilate, opening, closing, boundary]:
rel_filename = (
Path(image).stem
+ "-"
+ namestr(se)[0]
+ "-"
+ namestr(op)[0]
+ ".bmp"
)
abs_filename = os.path.join(results_dir, rel_filename)
im = Image.fromarray(op(img_in, se))
im.save(abs_filename)
| [
"numpy.set_printoptions",
"os.makedirs",
"os.path.dirname",
"os.path.exists",
"numpy.zeros",
"PIL.Image.open",
"pathlib.Path",
"numpy.array",
"os.path.join",
"os.listdir"
] | [((127, 169), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (146, 169), True, 'import numpy as np\n'), ((170, 205), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(1000)'}), '(linewidth=1000)\n', (189, 205), True, 'import numpy as np\n'), ((220, 245), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (235, 245), False, 'import os\n'), ((259, 293), 'os.path.join', 'os.path.join', (['script_dir', '"""images"""'], {}), "(script_dir, 'images')\n", (271, 293), False, 'import os\n'), ((382, 400), 'numpy.array', 'np.array', (['[[True]]'], {}), '([[True]])\n', (390, 400), True, 'import numpy as np\n'), ((414, 488), 'numpy.array', 'np.array', (['[[False, True, False], [True, True, True], [False, True, False]]'], {}), '([[False, True, False], [True, True, True], [False, True, False]])\n', (422, 488), True, 'import numpy as np\n'), ((502, 579), 'numpy.array', 'np.array', (['[[False, True, False], [False, True, False], [False, False, False]]'], {}), '([[False, True, False], [False, True, False], [False, False, False]])\n', (510, 579), True, 'import numpy as np\n'), ((600, 674), 'numpy.array', 'np.array', (['[[False, True, False], [True, False, False], [True, True, True]]'], {}), '([[False, True, False], [True, False, False], [True, True, True]])\n', (608, 674), True, 'import numpy as np\n'), ((304, 331), 'os.path.join', 'os.path.join', (['images_dir', 'f'], {}), '(images_dir, f)\n', (316, 331), False, 'import os\n'), ((6912, 6947), 'os.path.join', 'os.path.join', (['script_dir', '"""results"""'], {}), "(script_dir, 'results')\n", (6924, 6947), False, 'import os\n'), ((6961, 7004), 'os.path.join', 'os.path.join', (['script_dir', '"""structure_elems"""'], {}), "(script_dir, 'structure_elems')\n", (6973, 7004), False, 'import os\n'), ((341, 363), 'os.listdir', 'os.listdir', (['images_dir'], {}), '(images_dir)\n', (351, 363), False, 'import os\n'), ((7017, 7044), 'os.path.exists', 'os.path.exists', (['results_dir'], {}), '(results_dir)\n', (7031, 7044), False, 'import os\n'), ((7054, 7078), 'os.makedirs', 'os.makedirs', (['results_dir'], {}), '(results_dir)\n', (7065, 7078), False, 'import os\n'), ((7090, 7112), 'os.path.exists', 'os.path.exists', (['se_dir'], {}), '(se_dir)\n', (7104, 7112), False, 'import os\n'), ((7122, 7141), 'os.makedirs', 'os.makedirs', (['se_dir'], {}), '(se_dir)\n', (7133, 7141), False, 'import os\n'), ((7238, 7276), 'os.path.join', 'os.path.join', (['se_dir', "(se_name + '.bmp')"], {}), "(se_dir, se_name + '.bmp')\n", (7250, 7276), False, 'import os\n'), ((3758, 3796), 'numpy.zeros', 'np.zeros', (['(img.shape[0], img.shape[1])'], {}), '((img.shape[0], img.shape[1]))\n', (3766, 3796), True, 'import numpy as np\n'), ((5938, 5976), 'numpy.zeros', 'np.zeros', (['(img.shape[0], img.shape[1])'], {}), '((img.shape[0], img.shape[1]))\n', (5946, 5976), True, 'import numpy as np\n'), ((6433, 6471), 'numpy.zeros', 'np.zeros', (['(img.shape[0], img.shape[1])'], {}), '((img.shape[0], img.shape[1]))\n', (6441, 6471), True, 'import numpy as np\n'), ((7522, 7539), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (7532, 7539), False, 'from PIL import Image\n'), ((7921, 7960), 'os.path.join', 'os.path.join', (['results_dir', 'rel_filename'], {}), '(results_dir, rel_filename)\n', (7933, 7960), False, 'import os\n'), ((7700, 7711), 'pathlib.Path', 'Path', (['image'], {}), '(image)\n', (7704, 7711), False, 'from pathlib import Path\n')] |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Position encodings and utilities."""
import abc
import functools
import numpy as np
def generate_fourier_features(
pos, num_bands, max_resolution=(224, 224), concat_pos=True, sine_only=False
):
"""Generate a Fourier frequency position encoding with linear spacing.
Args:
pos: The position of n points in d dimensional space.
A np array of shape [n, d].
num_bands: The number of bands (K) to use.
max_resolution: The maximum resolution (i.e. the number of pixels per dim).
A tuple representing resolution for each dimension
concat_pos: Concatenate the input position encoding to the Fourier features?
sine_only: Whether to use a single phase (sin) or two (sin/cos) for each
frequency band.
Returns:
embedding: A 1D np array of shape [n, n_channels]. If concat_pos is True
and sine_only is False, output dimensions are ordered as:
[dim_1, dim_2, ..., dim_d,
sin(pi*f_1*dim_1), ..., sin(pi*f_K*dim_1), ...,
sin(pi*f_1*dim_d), ..., sin(pi*f_K*dim_d),
cos(pi*f_1*dim_1), ..., cos(pi*f_K*dim_1), ...,
cos(pi*f_1*dim_d), ..., cos(pi*f_K*dim_d)],
where dim_i is pos[:, i] and f_k is the kth frequency band.
"""
min_freq = 1.0
# Nyquist frequency at the target resolution:
freq_bands = np.stack(
[np.linspace(min_freq, res / 2, num=num_bands, endpoint=True) for res in max_resolution],
axis=0,
)
# Get frequency bands for each spatial dimension.
# Output is size [n, d * num_bands]
per_pos_features = pos[:, :, None] * freq_bands[None, :, :]
per_pos_features = np.reshape(per_pos_features, [-1, np.prod(per_pos_features.shape[1:])])
if sine_only:
# Output is size [n, d * num_bands]
per_pos_features = np.sin(np.pi * (per_pos_features))
else:
# Output is size [n, 2 * d * num_bands]
per_pos_features = np.concatenate(
[np.sin(np.pi * per_pos_features), np.cos(np.pi * per_pos_features)], axis=-1
)
# Concatenate the raw input positions.
if concat_pos:
# Adds d bands to the encoding.
per_pos_features = np.concatenate([pos, per_pos_features], axis=-1)
return per_pos_features
def build_linear_positions(index_dims, output_range=(-1.0, 1.0)):
"""Generate an array of position indices for an N-D input array.
Args:
index_dims: The shape of the index dimensions of the input array.
output_range: The min and max values taken by each input index dimension.
Returns:
A np array of shape [index_dims[0], index_dims[1], .., index_dims[-1], N].
"""
def _linspace(n_xels_per_dim):
return np.linspace(
output_range[0], output_range[1], num=n_xels_per_dim, endpoint=True, dtype=np.float32
)
dim_ranges = [_linspace(n_xels_per_dim) for n_xels_per_dim in index_dims]
array_index_grid = np.meshgrid(*dim_ranges, indexing="ij")
return np.stack(array_index_grid, axis=-1)
class AbstractPositionEncoding(hk.Module, metaclass=abc.ABCMeta):
"""Abstract Perceiver decoder."""
@abc.abstractmethod
def __call__(self, batch_size, pos):
raise NotImplementedError
class TrainablePositionEncoding(AbstractPositionEncoding):
"""Trainable position encoding."""
def __init__(self, index_dim, num_channels=128, init_scale=0.02, name=None):
super(TrainablePositionEncoding, self).__init__(name=name)
self._index_dim = index_dim
self._num_channels = num_channels
self._init_scale = init_scale
def __call__(self, batch_size, pos=None):
del pos # Unused.
pos_embs = hk.get_parameter(
"pos_embs",
[self._index_dim, self._num_channels],
init=hk.initializers.TruncatedNormal(stddev=self._init_scale),
)
if batch_size is not None:
pos_embs = np.broadcast_to(pos_embs[None, :, :], (batch_size,) + pos_embs.shape)
return pos_embs
def _check_or_build_spatial_positions(pos, index_dims, batch_size):
"""Checks or builds spatial position features (x, y, ...).
Args:
pos: None, or an array of position features. If None, position features
are built. Otherwise, their size is checked.
index_dims: An iterable giving the spatial/index size of the data to be
featurized.
batch_size: The batch size of the data to be featurized.
Returns:
An array of position features, of shape [batch_size, prod(index_dims)].
"""
if pos is None:
pos = build_linear_positions(index_dims)
pos = np.broadcast_to(pos[None], (batch_size,) + pos.shape)
pos = np.reshape(pos, [batch_size, np.prod(index_dims), -1])
else:
# Just a warning label: you probably don't want your spatial features to
# have a different spatial layout than your pos coordinate system.
# But feel free to override if you think it'll work!
assert pos.shape[-1] == len(index_dims)
return pos
class FourierPositionEncoding(AbstractPositionEncoding):
"""Fourier (Sinusoidal) position encoding."""
def __init__(
self,
index_dims,
num_bands,
concat_pos=True,
max_resolution=None,
sine_only=False,
name=None,
):
super(FourierPositionEncoding, self).__init__(name=name)
self._num_bands = num_bands
self._concat_pos = concat_pos
self._sine_only = sine_only
self._index_dims = index_dims
# Use the index dims as the maximum resolution if it's not provided.
self._max_resolution = max_resolution or index_dims
def __call__(self, batch_size, pos=None):
pos = _check_or_build_spatial_positions(pos, self._index_dims, batch_size)
build_ff_fn = functools.partial(
generate_fourier_features,
num_bands=self._num_bands,
max_resolution=self._max_resolution,
concat_pos=self._concat_pos,
sine_only=self._sine_only,
)
return jax.vmap(build_ff_fn, 0, 0)(pos)
class PositionEncodingProjector(AbstractPositionEncoding):
"""Projects a position encoding to a target size."""
def __init__(self, output_size, base_position_encoding, name=None):
super(PositionEncodingProjector, self).__init__(name=name)
self._output_size = output_size
self._base_position_encoding = base_position_encoding
def __call__(self, batch_size, pos=None):
base_pos = self._base_position_encoding(batch_size, pos)
projected_pos = hk.Linear(output_size=self._output_size)(base_pos)
return projected_pos
def build_position_encoding(
position_encoding_type,
index_dims,
project_pos_dim=-1,
trainable_position_encoding_kwargs=None,
fourier_position_encoding_kwargs=None,
name=None,
):
"""Builds the position encoding."""
if position_encoding_type == "trainable":
assert trainable_position_encoding_kwargs is not None
output_pos_enc = TrainablePositionEncoding(
# Construct 1D features:
index_dim=np.prod(index_dims),
name=name,
**trainable_position_encoding_kwargs,
)
elif position_encoding_type == "fourier":
assert fourier_position_encoding_kwargs is not None
output_pos_enc = FourierPositionEncoding(
index_dims=index_dims, name=name, **fourier_position_encoding_kwargs
)
else:
raise ValueError(f"Unknown position encoding: {position_encoding_type}.")
if project_pos_dim > 0:
# Project the position encoding to a target dimension:
output_pos_enc = PositionEncodingProjector(
output_size=project_pos_dim, base_position_encoding=output_pos_enc
)
return output_pos_enc
| [
"numpy.stack",
"functools.partial",
"numpy.meshgrid",
"numpy.prod",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"numpy.broadcast_to",
"numpy.concatenate"
] | [((3534, 3573), 'numpy.meshgrid', 'np.meshgrid', (['*dim_ranges'], {'indexing': '"""ij"""'}), "(*dim_ranges, indexing='ij')\n", (3545, 3573), True, 'import numpy as np\n'), ((3586, 3621), 'numpy.stack', 'np.stack', (['array_index_grid'], {'axis': '(-1)'}), '(array_index_grid, axis=-1)\n', (3594, 3621), True, 'import numpy as np\n'), ((2416, 2448), 'numpy.sin', 'np.sin', (['(np.pi * per_pos_features)'], {}), '(np.pi * per_pos_features)\n', (2422, 2448), True, 'import numpy as np\n'), ((2781, 2829), 'numpy.concatenate', 'np.concatenate', (['[pos, per_pos_features]'], {'axis': '(-1)'}), '([pos, per_pos_features], axis=-1)\n', (2795, 2829), True, 'import numpy as np\n'), ((3311, 3414), 'numpy.linspace', 'np.linspace', (['output_range[0]', 'output_range[1]'], {'num': 'n_xels_per_dim', 'endpoint': '(True)', 'dtype': 'np.float32'}), '(output_range[0], output_range[1], num=n_xels_per_dim, endpoint=\n True, dtype=np.float32)\n', (3322, 3414), True, 'import numpy as np\n'), ((5235, 5288), 'numpy.broadcast_to', 'np.broadcast_to', (['pos[None]', '((batch_size,) + pos.shape)'], {}), '(pos[None], (batch_size,) + pos.shape)\n', (5250, 5288), True, 'import numpy as np\n'), ((6437, 6609), 'functools.partial', 'functools.partial', (['generate_fourier_features'], {'num_bands': 'self._num_bands', 'max_resolution': 'self._max_resolution', 'concat_pos': 'self._concat_pos', 'sine_only': 'self._sine_only'}), '(generate_fourier_features, num_bands=self._num_bands,\n max_resolution=self._max_resolution, concat_pos=self._concat_pos,\n sine_only=self._sine_only)\n', (6454, 6609), False, 'import functools\n'), ((1961, 2021), 'numpy.linspace', 'np.linspace', (['min_freq', '(res / 2)'], {'num': 'num_bands', 'endpoint': '(True)'}), '(min_freq, res / 2, num=num_bands, endpoint=True)\n', (1972, 2021), True, 'import numpy as np\n'), ((2288, 2323), 'numpy.prod', 'np.prod', (['per_pos_features.shape[1:]'], {}), '(per_pos_features.shape[1:])\n', (2295, 2323), True, 'import numpy as np\n'), ((4523, 4592), 'numpy.broadcast_to', 'np.broadcast_to', (['pos_embs[None, :, :]', '((batch_size,) + pos_embs.shape)'], {}), '(pos_embs[None, :, :], (batch_size,) + pos_embs.shape)\n', (4538, 4592), True, 'import numpy as np\n'), ((2565, 2597), 'numpy.sin', 'np.sin', (['(np.pi * per_pos_features)'], {}), '(np.pi * per_pos_features)\n', (2571, 2597), True, 'import numpy as np\n'), ((2599, 2631), 'numpy.cos', 'np.cos', (['(np.pi * per_pos_features)'], {}), '(np.pi * per_pos_features)\n', (2605, 2631), True, 'import numpy as np\n'), ((5332, 5351), 'numpy.prod', 'np.prod', (['index_dims'], {}), '(index_dims)\n', (5339, 5351), True, 'import numpy as np\n'), ((7762, 7781), 'numpy.prod', 'np.prod', (['index_dims'], {}), '(index_dims)\n', (7769, 7781), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import pickle
import numpy as np
import torch
import torch.nn.parallel
import torch.utils.data as data
class _OneHotIterator:
"""
>>> it_1 = _OneHotIterator(n_features=128, n_batches_per_epoch=2, batch_size=64, seed=1)
>>> it_2 = _OneHotIterator(n_features=128, n_batches_per_epoch=2, batch_size=64, seed=1)
>>> list(it_1)[0][0].allclose(list(it_2)[0][0])
True
>>> it = _OneHotIterator(n_features=8, n_batches_per_epoch=1, batch_size=4)
>>> data = list(it)
>>> len(data)
1
>>> batch = data[0]
>>> x, y = batch
>>> x.size()
torch.Size([4, 8])
>>> x.sum(dim=1)
tensor([1., 1., 1., 1.])
"""
def __init__(self, n_features, n_batches_per_epoch, batch_size, seed=None):
self.n_batches_per_epoch = n_batches_per_epoch
self.n_features = n_features
self.batch_size = batch_size
self.probs = np.ones(n_features) / n_features
self.batches_generated = 0
self.random_state = np.random.RandomState(seed)
def __iter__(self):
return self
def __next__(self):
if self.batches_generated >= self.n_batches_per_epoch:
raise StopIteration()
batch_data = self.random_state.multinomial(1, self.probs, size=self.batch_size)
self.batches_generated += 1
return torch.from_numpy(batch_data).float(), torch.zeros(1)
class OneHotLoader(torch.utils.data.DataLoader):
"""
>>> data_loader = OneHotLoader(n_features=8, batches_per_epoch=3, batch_size=2, seed=1)
>>> epoch_1 = []
>>> for batch in data_loader:
... epoch_1.append(batch)
>>> [b[0].size() for b in epoch_1]
[torch.Size([2, 8]), torch.Size([2, 8]), torch.Size([2, 8])]
>>> data_loader_other = OneHotLoader(n_features=8, batches_per_epoch=3, batch_size=2)
>>> all_equal = True
>>> for a, b in zip(data_loader, data_loader_other):
... all_equal = all_equal and (a[0] == b[0]).all()
>>> all_equal.item()
0
"""
def __init__(self, n_features, batches_per_epoch, batch_size, seed=None):
self.seed = seed
self.batches_per_epoch = batches_per_epoch
self.n_features = n_features
self.batch_size = batch_size
def __iter__(self):
if self.seed is None:
seed = np.random.randint(0, 2 ** 32)
else:
seed = self.seed
return _OneHotIterator(
n_features=self.n_features,
n_batches_per_epoch=self.batches_per_epoch,
batch_size=self.batch_size,
seed=seed,
)
| [
"numpy.ones",
"numpy.random.RandomState",
"numpy.random.randint",
"torch.zeros",
"torch.from_numpy"
] | [((1170, 1197), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (1191, 1197), True, 'import numpy as np\n'), ((1074, 1093), 'numpy.ones', 'np.ones', (['n_features'], {}), '(n_features)\n', (1081, 1093), True, 'import numpy as np\n'), ((1543, 1557), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (1554, 1557), False, 'import torch\n'), ((2475, 2504), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2 ** 32)'], {}), '(0, 2 ** 32)\n', (2492, 2504), True, 'import numpy as np\n'), ((1505, 1533), 'torch.from_numpy', 'torch.from_numpy', (['batch_data'], {}), '(batch_data)\n', (1521, 1533), False, 'import torch\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import time
from six.moves import xrange
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import math
import numpy as np
import struct
NUM_CLASSES = 10
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
FLAGS = None
def inference(images, hidden1_units):
with tf.name_scope('nodes_hidden1'):
weights1 = tf.Variable(tf.truncated_normal([IMAGE_PIXELS, hidden1_units],stddev=1.0 / math.sqrt(float(IMAGE_PIXELS)), dtype=tf.float32),name='weights1')
biases1 = tf.Variable(tf.zeros([hidden1_units], dtype=tf.float32),name='biases1')
hidden1 = tf.nn.relu(tf.matmul(images, weights1) + biases1)
with tf.name_scope('nodes_output_layer'):
weightso = tf.Variable(tf.truncated_normal([hidden1_units, NUM_CLASSES],stddev=1.0 / math.sqrt(float(hidden1_units)), dtype=tf.float32),name='weightso')
biaseso = tf.Variable(tf.zeros([NUM_CLASSES], dtype=tf.float32),name='biaseso')
logits = tf.matmul(hidden1, weightso) + biaseso
return logits
def loss_(logits, labels):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='xentropy')
return tf.reduce_mean(cross_entropy, name='xentropy_mean')
def training(loss, learning_rate):
tf.summary.scalar('loss', loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def evaluation(logits, labels):
logits = tf.cast(logits, tf.float32)
correct = tf.nn.in_top_k(logits, labels, 1)
return tf.reduce_sum(tf.cast(correct, tf.int32))
def placeholder_inputs(batch_size):
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
return images_placeholder, labels_placeholder
def fill_feed_dict(data_set, images_pl, labels_pl):
images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,FLAGS.fake_data)
feed_dict = {images_pl: images_feed,labels_pl: labels_feed,}
return feed_dict
def do_eval(sess,eval_correct,images_placeholder,labels_placeholder,data_set):
true_count = 0
steps_per_epoch = data_set.num_examples // FLAGS.batch_size
num_examples = steps_per_epoch * FLAGS.batch_size
for step in xrange(steps_per_epoch):
feed_dict = fill_feed_dict(data_set,images_placeholder,labels_placeholder)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = float(true_count) / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %(num_examples, true_count, precision))
def run_training():
data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)
with tf.Graph().as_default():
images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
logits = inference(images_placeholder,FLAGS.hidden1)
loss = loss_(logits, labels_placeholder)
train_op = training(loss, FLAGS.learning_rate)
eval_correct = evaluation(logits, labels_placeholder)
summary = tf.summary.merge_all()
init = tf.global_variables_initializer()
saver = tf.train.Saver()
sess = tf.Session()
summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
sess.run(init)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
feed_dict = fill_feed_dict(data_sets.train,images_placeholder,labels_placeholder)
_, loss_value = sess.run([train_op, loss],feed_dict=feed_dict)
duration = time.time() - start_time
if step % 100 == 0:
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
print('Training Data Eval:')
do_eval(sess,eval_correct,images_placeholder,labels_placeholder,data_sets.train)
print('Validation Data Eval:')
do_eval(sess,eval_correct,images_placeholder,labels_placeholder,data_sets.validation)
print('Test Data Eval:')
do_eval(sess,eval_correct,images_placeholder,labels_placeholder,data_sets.test)
f_bin = open('parameter.txt', 'w')
w_count = 0
b_count = 0
for v in tf.global_variables():
if v.name.find('nodes_') != 0:
continue
shape = sess.run(tf.shape(v))
value = sess.run(tf.transpose(v.value()))
if len(shape) > 1:
for v_ in value:
for w in v_.tolist():
binary_value = struct.pack('f', w)
binary_str = ''.join('{:02x}'.format(x) for x in reversed(binary_value))
f_bin.write('%s ' % binary_str)
w_count += 1
f_bin.write('\n')
f_bin.write('\n')
elif len(shape) == 1:
for v_ in value.tolist():
binary_value = struct.pack('f', v_)
binary_str = ''.join('{:02x}'.format(x) for x in reversed(binary_value))
f_bin.write('%s ' % binary_str)
b_count += 1
f_bin.write('\n')
f_bin.write('\n')
f_bin.close()
print('\n','-------------------------------------------------------')
print('Neuron number in input layer:',IMAGE_PIXELS)
print('Neuron number in hidden layer:',FLAGS.hidden1)
print('Neuron number in output layer:',NUM_CLASSES)
print('Weight number :',w_count)
print('Bias number :',b_count)
def main(_):
np.set_printoptions(suppress=True)
if tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.DeleteRecursively(FLAGS.log_dir)
tf.gfile.MakeDirs(FLAGS.log_dir)
run_training()
# os.system("pause")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.5,
help='Initial learning rate.'
)
parser.add_argument(
'--max_steps',
type=int,
default=5000,
help='Number of steps to run trainer.'
)
parser.add_argument(
'--hidden1',
type=int,
default=30,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--batch_size',
type=int,
default=500,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--input_data_dir',
type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory to put the input data.'
)
parser.add_argument(
'--log_dir',
type=str,
default='/tmp/tensorflow/mnist/logs/fully_connected_feed',
help='Directory to put the log data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"tensorflow.gfile.Exists",
"argparse.ArgumentParser",
"tensorflow.matmul",
"tensorflow.global_variables",
"tensorflow.Variable",
"numpy.set_printoptions",
"struct.pack",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.summary.FileWriter",
"tensorflow.gfile.DeleteRecursively",
"tensorf... | [((1200, 1297), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits', 'name': '"""xentropy"""'}), "(labels=labels, logits=logits,\n name='xentropy')\n", (1246, 1297), True, 'import tensorflow as tf\n'), ((1303, 1354), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {'name': '"""xentropy_mean"""'}), "(cross_entropy, name='xentropy_mean')\n", (1317, 1354), True, 'import tensorflow as tf\n'), ((1393, 1424), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (1410, 1424), True, 'import tensorflow as tf\n'), ((1439, 1487), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (1472, 1487), True, 'import tensorflow as tf\n'), ((1504, 1555), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (1515, 1555), True, 'import tensorflow as tf\n'), ((1681, 1708), 'tensorflow.cast', 'tf.cast', (['logits', 'tf.float32'], {}), '(logits, tf.float32)\n', (1688, 1708), True, 'import tensorflow as tf\n'), ((1721, 1754), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'labels', '(1)'], {}), '(logits, labels, 1)\n', (1735, 1754), True, 'import tensorflow as tf\n'), ((1866, 1926), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(batch_size, IMAGE_PIXELS)'}), '(tf.float32, shape=(batch_size, IMAGE_PIXELS))\n', (1880, 1926), True, 'import tensorflow as tf\n'), ((1950, 1992), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'batch_size'}), '(tf.int32, shape=batch_size)\n', (1964, 1992), True, 'import tensorflow as tf\n'), ((2487, 2510), 'six.moves.xrange', 'xrange', (['steps_per_epoch'], {}), '(steps_per_epoch)\n', (2493, 2510), False, 'from six.moves import xrange\n'), ((2844, 2908), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['FLAGS.input_data_dir', 'FLAGS.fake_data'], {}), '(FLAGS.input_data_dir, FLAGS.fake_data)\n', (2869, 2908), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((5947, 5981), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (5966, 5981), True, 'import numpy as np\n'), ((5989, 6019), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.log_dir'], {}), '(FLAGS.log_dir)\n', (6004, 6019), True, 'import tensorflow as tf\n'), ((6069, 6101), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.log_dir'], {}), '(FLAGS.log_dir)\n', (6086, 6101), True, 'import tensorflow as tf\n'), ((6185, 6210), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6208, 6210), False, 'import argparse\n'), ((7283, 7335), 'tensorflow.app.run', 'tf.app.run', ([], {'main': 'main', 'argv': '([sys.argv[0]] + unparsed)'}), '(main=main, argv=[sys.argv[0]] + unparsed)\n', (7293, 7335), True, 'import tensorflow as tf\n'), ((457, 487), 'tensorflow.name_scope', 'tf.name_scope', (['"""nodes_hidden1"""'], {}), "('nodes_hidden1')\n", (470, 487), True, 'import tensorflow as tf\n'), ((808, 843), 'tensorflow.name_scope', 'tf.name_scope', (['"""nodes_output_layer"""'], {}), "('nodes_output_layer')\n", (821, 843), True, 'import tensorflow as tf\n'), ((1778, 1804), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.int32'], {}), '(correct, tf.int32)\n', (1785, 1804), True, 'import tensorflow as tf\n'), ((3251, 3273), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (3271, 3273), True, 'import tensorflow as tf\n'), ((3285, 3318), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3316, 3318), True, 'import tensorflow as tf\n'), ((3331, 3347), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3345, 3347), True, 'import tensorflow as tf\n'), ((3359, 3371), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3369, 3371), True, 'import tensorflow as tf\n'), ((3393, 3441), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['FLAGS.log_dir', 'sess.graph'], {}), '(FLAGS.log_dir, sess.graph)\n', (3414, 3441), True, 'import tensorflow as tf\n'), ((3477, 3500), 'six.moves.xrange', 'xrange', (['FLAGS.max_steps'], {}), '(FLAGS.max_steps)\n', (3483, 3500), False, 'from six.moves import xrange\n'), ((4652, 4673), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (4671, 4673), True, 'import tensorflow as tf\n'), ((6025, 6066), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['FLAGS.log_dir'], {}), '(FLAGS.log_dir)\n', (6051, 6066), True, 'import tensorflow as tf\n'), ((672, 715), 'tensorflow.zeros', 'tf.zeros', (['[hidden1_units]'], {'dtype': 'tf.float32'}), '([hidden1_units], dtype=tf.float32)\n', (680, 715), True, 'import tensorflow as tf\n'), ((1028, 1069), 'tensorflow.zeros', 'tf.zeros', (['[NUM_CLASSES]'], {'dtype': 'tf.float32'}), '([NUM_CLASSES], dtype=tf.float32)\n', (1036, 1069), True, 'import tensorflow as tf\n'), ((1099, 1127), 'tensorflow.matmul', 'tf.matmul', (['hidden1', 'weightso'], {}), '(hidden1, weightso)\n', (1108, 1127), True, 'import tensorflow as tf\n'), ((3521, 3532), 'time.time', 'time.time', ([], {}), '()\n', (3530, 3532), False, 'import time\n'), ((757, 784), 'tensorflow.matmul', 'tf.matmul', (['images', 'weights1'], {}), '(images, weights1)\n', (766, 784), True, 'import tensorflow as tf\n'), ((2916, 2926), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2924, 2926), True, 'import tensorflow as tf\n'), ((3707, 3718), 'time.time', 'time.time', ([], {}), '()\n', (3716, 3718), False, 'import time\n'), ((4760, 4771), 'tensorflow.shape', 'tf.shape', (['v'], {}), '(v)\n', (4768, 4771), True, 'import tensorflow as tf\n'), ((4952, 4971), 'struct.pack', 'struct.pack', (['"""f"""', 'w'], {}), "('f', w)\n", (4963, 4971), False, 'import struct\n'), ((5313, 5333), 'struct.pack', 'struct.pack', (['"""f"""', 'v_'], {}), "('f', v_)\n", (5324, 5333), False, 'import struct\n')] |
from PyQt5.uic import loadUi
from PyQt5.QtWidgets import QWidget, QApplication, QMessageBox, QFileDialog, QMessageBox, QDesktopWidget
from PyQt5.QtCore import pyqtSignal, Qt
from PyQt5.QtGui import QIntValidator, QDoubleValidator
from PyQt5.QtTest import QTest
import sys
import numpy as np
import copy
import time
from xraydb import XrayDB
import os
class XAnoS_EnergySteps(QWidget):
"""
This widget calculates the Energy values at which the f' values are equidistant below the absorption edge of a selected element
"""
def __init__(self, parent=None):
"""
"""
QWidget.__init__(self, parent)
loadUi('UI_Forms/XAnoS_EnergySteps.ui', self)
self.xrdb = XrayDB()
self.initialize_UI()
self.init_signals()
def initialize_UI(self):
self.doubleValidator=QDoubleValidator()
self.elements = self.xrdb.atomic_symbols
self.elementComboBox.addItems(
[str(self.xrdb.atomic_number(element)) + ': ' + element for element in self.elements])
self.element=self.elementComboBox.currentText().split(': ')[1]
edges = self.xrdb.xray_edges(self.element)
self.edgeComboBox.addItems([key + ': %.4f' % (edges[key].energy / 1000) for key in edges.keys()])
self.EOffsetLineEdit.setValidator(self.doubleValidator)
self.minEnergyLineEdit.setValidator(self.doubleValidator)
self.maxEnergyLineEdit.setValidator(self.doubleValidator)
self.edgeChanged(self.edgeComboBox.currentText())
self.NStepsChanged(20)
self.energyOffset=float(self.EOffsetLineEdit.text())
def init_signals(self):
self.elementComboBox.currentTextChanged.connect(self.elementChanged)
self.edgeComboBox.currentTextChanged.connect(self.edgeChanged)
self.EOffsetLineEdit.textChanged.connect(self.energyOffsetChanged)
self.minEnergyLineEdit.textChanged.connect(self.minEnergyChanged)
self.maxEnergyLineEdit.textChanged.connect(self.maxEnergyChanged)
self.NStepsSpinBox.valueChanged.connect(self.NStepsChanged)
self.calculatePushButton.clicked.connect(self.calculate)
self.savePushButton.clicked.connect(self.saveFile)
def elementChanged(self, txt):
self.element=txt.split(': ')[1]
self.edgeComboBox.currentTextChanged.disconnect()
self.edgeComboBox.clear()
edges = self.xrdb.xray_edges(self.element)
self.edgeComboBox.addItems([key + ': %.4f' % (edges[key].energy / 1000) for key in edges.keys()])
self.edgeComboBox.currentTextChanged.connect(self.edgeChanged)
self.edgeChanged(self.edgeComboBox.currentText())
def edgeChanged(self,txt):
self.maxEnergy=float(txt.split(': ')[1])
self.maxEnergyLineEdit.setText('%.4f'%self.maxEnergy)
self.minEnergyLineEdit.setText('%.4f'%(np.max(self.maxEnergy-1,0)))
def energyOffsetChanged(self, txt):
self.energyOffset=float(txt)
def NStepsChanged(self, N):
self.NEnergy=N
def minEnergyChanged(self,txt):
self.minEnergy=float(txt)
def maxEnergyChanged(self,txt):
self.maxEnergy=float(txt)
def calculate(self):
edge_energy=self.maxEnergy*1000
min_energy=self.minEnergy*1000
element=self.element
steps=self.NEnergy
eoff=self.energyOffset
self.resultTextEdit.clear()
evals = np.linspace(edge_energy, min_energy, steps)
efine = np.linspace(edge_energy, min_energy, 1001)
f1 = self.xrdb.f1_chantler(element=element, energy=efine, smoothing=0)
f1vals = np.linspace(f1[0], f1[-1], steps)
e1vals = np.interp(f1vals, f1, efine)
self.evaltxt = ''
etemp = np.linspace(min_energy, edge_energy + (edge_energy - min_energy), 2001)
f1temp = self.xrdb.f1_chantler(element=element, energy=etemp, smoothing=0)
self.resultTextEdit.append("%10s\t%10s\t%10s\t%10s\t%10s" % ("Step", "f_value", "Mono_E", "Und_E", "f_1"))
for i in range(steps):
# print("%.5f\t%.3f"%(f1vals[i],e1vals[i]/1e3))
self.evaltxt = self.evaltxt + '%.4f,' % (e1vals[i] / 1e3 + eoff)
self.resultTextEdit.append("%10d\t%10.7f\t%10.4f\t%10.4f\t%10.7f" % (
i, f1vals[i], e1vals[i] / 1e3 + eoff, e1vals[i] / 1e3 + 0.17 + eoff,
self.xrdb.f1_chantler(
element=element, energy=e1vals[i], smoothing=0)))
self.plotWidget.add_data(x=etemp/1e3,y=f1temp,fit=True,name='continuous')
self.plotWidget.add_data(x=e1vals/1e3,y=f1vals,name='discrete')
self.plotWidget.Plot(['continuous','discrete'])
txt = 'Energy [' + self.evaltxt[:-1] + '] absolute coupled'
self.resultTextEdit.append('\n')
self.resultTextEdit.append(txt)
def saveFile(self):
try:
txt = 'Energy [' + self.evaltxt[:-1] + '] absolute coupled'
except:
QMessageBox.warning(self,'Save Error','Please calculate before saving!',QMessageBox.Ok)
return
fname=QFileDialog.getSaveFileName(parent=self,caption='Save file as', filter="Text files (*.txt )")[0]
fh = open(fname, 'w')
fh.write(txt)
fh.close()
if __name__ == '__main__':
os.environ["QT_AUTO_SCREEN_SCALE_FACTOR"] = "1"
app = QApplication(sys.argv)
try:
# app.setAttribute(Qt.AA_EnableHighDpiScaling)
app.setHighDpiScaleFactorRoundingPolicy(Qt.HighDpiScaleFactorRoundingPolicy.PassThrough)
except:
pass
# poniFile='/home/epics/CARS5/Data/Data/saxs/2017-06/Alignment/agbh1.poni'
w = XAnoS_EnergySteps()
w.setWindowTitle('XAnoS Energy Steps')
resolution = QDesktopWidget().screenGeometry()
w.setGeometry(0, 0, resolution.width() - 100, resolution.height() - 100)
w.move(int(resolution.width() / 2) - int(w.frameSize().width() / 2),
int(resolution.height() / 2) - int(w.frameSize().height() / 2))
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling)
QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps)
w.setWindowTitle('Energy Steps')
#w.setFixedSize(1024,480)
# w.setGeometry(50,50,800,800)
w.show()
sys.exit(app.exec_())
| [
"PyQt5.QtWidgets.QDesktopWidget",
"PyQt5.QtWidgets.QApplication.setAttribute",
"PyQt5.QtGui.QDoubleValidator",
"numpy.interp",
"PyQt5.uic.loadUi",
"PyQt5.QtWidgets.QMessageBox.warning",
"PyQt5.QtWidgets.QWidget.__init__",
"PyQt5.QtWidgets.QFileDialog.getSaveFileName",
"numpy.max",
"xraydb.XrayDB",... | [((5303, 5325), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (5315, 5325), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QMessageBox, QFileDialog, QMessageBox, QDesktopWidget\n'), ((5942, 5995), 'PyQt5.QtWidgets.QApplication.setAttribute', 'QApplication.setAttribute', (['Qt.AA_EnableHighDpiScaling'], {}), '(Qt.AA_EnableHighDpiScaling)\n', (5967, 5995), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QMessageBox, QFileDialog, QMessageBox, QDesktopWidget\n'), ((6000, 6050), 'PyQt5.QtWidgets.QApplication.setAttribute', 'QApplication.setAttribute', (['Qt.AA_UseHighDpiPixmaps'], {}), '(Qt.AA_UseHighDpiPixmaps)\n', (6025, 6050), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QMessageBox, QFileDialog, QMessageBox, QDesktopWidget\n'), ((603, 633), 'PyQt5.QtWidgets.QWidget.__init__', 'QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (619, 633), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QMessageBox, QFileDialog, QMessageBox, QDesktopWidget\n'), ((642, 687), 'PyQt5.uic.loadUi', 'loadUi', (['"""UI_Forms/XAnoS_EnergySteps.ui"""', 'self'], {}), "('UI_Forms/XAnoS_EnergySteps.ui', self)\n", (648, 687), False, 'from PyQt5.uic import loadUi\n'), ((708, 716), 'xraydb.XrayDB', 'XrayDB', ([], {}), '()\n', (714, 716), False, 'from xraydb import XrayDB\n'), ((833, 851), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (849, 851), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((3400, 3443), 'numpy.linspace', 'np.linspace', (['edge_energy', 'min_energy', 'steps'], {}), '(edge_energy, min_energy, steps)\n', (3411, 3443), True, 'import numpy as np\n'), ((3460, 3502), 'numpy.linspace', 'np.linspace', (['edge_energy', 'min_energy', '(1001)'], {}), '(edge_energy, min_energy, 1001)\n', (3471, 3502), True, 'import numpy as np\n'), ((3599, 3632), 'numpy.linspace', 'np.linspace', (['f1[0]', 'f1[-1]', 'steps'], {}), '(f1[0], f1[-1], steps)\n', (3610, 3632), True, 'import numpy as np\n'), ((3650, 3678), 'numpy.interp', 'np.interp', (['f1vals', 'f1', 'efine'], {}), '(f1vals, f1, efine)\n', (3659, 3678), True, 'import numpy as np\n'), ((3721, 3792), 'numpy.linspace', 'np.linspace', (['min_energy', '(edge_energy + (edge_energy - min_energy))', '(2001)'], {}), '(min_energy, edge_energy + (edge_energy - min_energy), 2001)\n', (3732, 3792), True, 'import numpy as np\n'), ((5042, 5141), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QFileDialog.getSaveFileName', ([], {'parent': 'self', 'caption': '"""Save file as"""', 'filter': '"""Text files (*.txt )"""'}), "(parent=self, caption='Save file as', filter=\n 'Text files (*.txt )')\n", (5069, 5141), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QMessageBox, QFileDialog, QMessageBox, QDesktopWidget\n'), ((5679, 5695), 'PyQt5.QtWidgets.QDesktopWidget', 'QDesktopWidget', ([], {}), '()\n', (5693, 5695), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QMessageBox, QFileDialog, QMessageBox, QDesktopWidget\n'), ((2850, 2879), 'numpy.max', 'np.max', (['(self.maxEnergy - 1)', '(0)'], {}), '(self.maxEnergy - 1, 0)\n', (2856, 2879), True, 'import numpy as np\n'), ((4921, 5015), 'PyQt5.QtWidgets.QMessageBox.warning', 'QMessageBox.warning', (['self', '"""Save Error"""', '"""Please calculate before saving!"""', 'QMessageBox.Ok'], {}), "(self, 'Save Error', 'Please calculate before saving!',\n QMessageBox.Ok)\n", (4940, 5015), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QMessageBox, QFileDialog, QMessageBox, QDesktopWidget\n')] |
import math
import csv
import numpy as np
from random import shuffle
# Sigmoidfunktion als Aktivierungsfunktion
def sigmoid(x):
try:
return 1 / (1 + math.exp(-x))
except OverflowError:
return 0
# Künstliches neuronales Netzwerk
class NeuralNetwork:
# Attribute:
# - Anzahl Neuronen der Eingabeschicht
# - Anzahl Neuronen der versteckten Schicht
# - Anzahl Neuronen der Ausgabeschicht
# - Lernrate (benannt)
def __init__(self, i_neurons, h_neurons, o_neurons, learning_rate = 0.1):
# Grundattribute initialisieren
self.input_neurons = i_neurons
self.hidden_neurons = h_neurons
self.output_neurons = o_neurons
self.learning_rate = learning_rate
self.categories = []
# Gewichte als Zufallswerte initialisieren
self.input_to_hidden = np.random.rand(
self.hidden_neurons, self.input_neurons
) - 0.5
self.hidden_to_output = np.random.rand(
self.output_neurons, self.hidden_neurons
) - 0.5
# Aktivierungsfunktion für NumPy-Arrays
self.activation = np.vectorize(sigmoid)
# Daten vorbereiten
# Attribute:
# - Daten als zweidimensionale Liste
# - Anteil, der als Testdaten abgespalten werden soll
# - Kategorie in der letzten Spalte? Sonst in der ersten
def prepare(self, data, test_ratio=0.1, last=True):
if last:
x = [line[0:-1] for line in data]
y = [line[-1] for line in data]
else:
x = [line[1:] for line in data]
y = [line[0] for line in data]
# Feature-Skalierung (x)
columns = np.array(x).transpose()
x_scaled = []
for column in columns:
if min(column) == max(column):
column = np.zeros(len(column))
else:
column = (column - min(column)) / (max(column) - min(column))
x_scaled.append(column)
x = np.array(x_scaled).transpose()
# Kategorien extrahieren und als Attribut speichern
y_values = list(set(y))
self.categories = y_values
# Verteilung auf Ausgabeneuronen (y)
y_spread = []
for y_i in y:
current = np.zeros(len(y_values))
current[y_values.index(y_i)] = 1
y_spread.append(current)
y_out = np.array(y_spread)
separator = int(test_ratio * len(x))
return x[:separator], y[:separator], x[separator:], y_out[separator:]
# Ein einzelner Trainingsdurchgang
# Attribute:
# - Eingabedaten als zweidimensionale Liste/Array
# - Zieldaten als auf Ausgabeneuronen verteilte Liste/Array
def train(self, inputs, targets):
# Daten ins richtige Format bringen
inputs = np.array(inputs, ndmin = 2).transpose()
targets = np.array(targets, ndmin = 2).transpose()
# Matrixmultiplikation: Gewichte versteckte Schicht * Eingabe
hidden_in = np.dot(self.input_to_hidden, inputs)
# Aktivierungsfunktion anwenden
hidden_out = self.activation(hidden_in)
# Matrixmultiplikation: Gewichte Ausgabeschicht * Ergebnis versteckt
output_in = np.dot(self.hidden_to_output, hidden_out)
# Aktivierungsfunktion anwenden
output_out = self.activation(output_in)
# Die Fehler berechnen
output_diff = targets - output_out
hidden_diff = np.dot(self.hidden_to_output.transpose(), output_diff)
# Die Gewichte mit Lernrate * Fehler anpassen
self.hidden_to_output += (
self.learning_rate *
np.dot(
(output_diff * output_out * (1.0 - output_out)),
hidden_out.transpose()
)
)
self.input_to_hidden += (
self.learning_rate *
np.dot(
(hidden_diff * hidden_out * (1.0 * hidden_out)),
inputs.transpose()
)
)
# Vorhersage für eine Reihe von Testdaten
# Attribute:
# - Eingabedaten als zweidimensionale Liste/Array
# - Vergleichsdaten (benannt, optional)
def predict(self, inputs, targets = None):
# Dieselben Schritte wie in train()
inputs = np.array(inputs, ndmin = 2).transpose()
hidden_in = np.dot(self.input_to_hidden, inputs)
hidden_out = self.activation(hidden_in)
output_in = np.dot(self.hidden_to_output, hidden_out)
output_out = self.activation(output_in)
# Ausgabewerte den Kategorien zuweisen
outputs = output_out.transpose()
result = []
for output in outputs:
result.append(
self.categories[list(output).index(max(output))]
)
# Wenn keine Zielwerte vorhanden, Ergebnisliste zurückgeben
if targets is None:
return result
# Ansonsten vergleichen und korrekte Vorhersagen zählen
correct = 0
for res, pred in zip(targets, result):
if res == pred:
correct += 1
percent = correct / len(result) * 100
return correct, percent
# Hauptprogramm
if __name__ == '__main__':
with open('iris_nn.csv', 'r') as iris_file:
reader = csv.reader(iris_file, quoting=csv.QUOTE_NONNUMERIC)
irises = list(reader)
shuffle(irises)
network = NeuralNetwork(4, 12, 3, learning_rate = 0.2)
x_test, y_test, x_train, y_train = network.prepare(
irises, test_ratio=0.2
)
for i in range(200):
network.train(x_train, y_train)
correct, percent = network.predict(x_test, targets = y_test)
print(f"{correct} korrekte Vorhersagen ({percent}%).")
| [
"math.exp",
"csv.reader",
"numpy.vectorize",
"random.shuffle",
"numpy.array",
"numpy.random.rand",
"numpy.dot"
] | [((5305, 5320), 'random.shuffle', 'shuffle', (['irises'], {}), '(irises)\n', (5312, 5320), False, 'from random import shuffle\n'), ((1121, 1142), 'numpy.vectorize', 'np.vectorize', (['sigmoid'], {}), '(sigmoid)\n', (1133, 1142), True, 'import numpy as np\n'), ((2362, 2380), 'numpy.array', 'np.array', (['y_spread'], {}), '(y_spread)\n', (2370, 2380), True, 'import numpy as np\n'), ((2967, 3003), 'numpy.dot', 'np.dot', (['self.input_to_hidden', 'inputs'], {}), '(self.input_to_hidden, inputs)\n', (2973, 3003), True, 'import numpy as np\n'), ((3189, 3230), 'numpy.dot', 'np.dot', (['self.hidden_to_output', 'hidden_out'], {}), '(self.hidden_to_output, hidden_out)\n', (3195, 3230), True, 'import numpy as np\n'), ((4281, 4317), 'numpy.dot', 'np.dot', (['self.input_to_hidden', 'inputs'], {}), '(self.input_to_hidden, inputs)\n', (4287, 4317), True, 'import numpy as np\n'), ((4386, 4427), 'numpy.dot', 'np.dot', (['self.hidden_to_output', 'hidden_out'], {}), '(self.hidden_to_output, hidden_out)\n', (4392, 4427), True, 'import numpy as np\n'), ((5219, 5270), 'csv.reader', 'csv.reader', (['iris_file'], {'quoting': 'csv.QUOTE_NONNUMERIC'}), '(iris_file, quoting=csv.QUOTE_NONNUMERIC)\n', (5229, 5270), False, 'import csv\n'), ((846, 901), 'numpy.random.rand', 'np.random.rand', (['self.hidden_neurons', 'self.input_neurons'], {}), '(self.hidden_neurons, self.input_neurons)\n', (860, 901), True, 'import numpy as np\n'), ((962, 1018), 'numpy.random.rand', 'np.random.rand', (['self.output_neurons', 'self.hidden_neurons'], {}), '(self.output_neurons, self.hidden_neurons)\n', (976, 1018), True, 'import numpy as np\n'), ((162, 174), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (170, 174), False, 'import math\n'), ((1660, 1671), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1668, 1671), True, 'import numpy as np\n'), ((1971, 1989), 'numpy.array', 'np.array', (['x_scaled'], {}), '(x_scaled)\n', (1979, 1989), True, 'import numpy as np\n'), ((2778, 2803), 'numpy.array', 'np.array', (['inputs'], {'ndmin': '(2)'}), '(inputs, ndmin=2)\n', (2786, 2803), True, 'import numpy as np\n'), ((2836, 2862), 'numpy.array', 'np.array', (['targets'], {'ndmin': '(2)'}), '(targets, ndmin=2)\n', (2844, 2862), True, 'import numpy as np\n'), ((4221, 4246), 'numpy.array', 'np.array', (['inputs'], {'ndmin': '(2)'}), '(inputs, ndmin=2)\n', (4229, 4246), True, 'import numpy as np\n')] |
import io
import os
import xml.etree.ElementTree
import requests
import hashlib
import logging
import urllib
import numpy as np
import wave
import shutil
from operator import itemgetter
from PIL import Image
from urllib.parse import urlparse
from nltk.tokenize import WhitespaceTokenizer
logger = logging.getLogger(__name__)
def tokenize(text):
tok_start = 0
out = []
for tok in text.split():
if len(tok):
out.append((tok, tok_start))
tok_start += len(tok) + 1
else:
tok_start += 1
return out
def create_tokens_and_tags(text, spans):
#tokens_and_idx = tokenize(text) # This function doesn't work properly if text contains multiple whitespaces...
token_index_tuples = [token for token in WhitespaceTokenizer().span_tokenize(text)]
tokens_and_idx = [(text[start:end], start) for start, end in token_index_tuples]
if spans:
spans = list(sorted(spans, key=itemgetter('start')))
span = spans.pop(0)
span_start = span['start']
span_end = span['end']-1
prefix = 'B-'
tokens, tags = [], []
for token, token_start in tokens_and_idx:
tokens.append(token)
token_end = token_start + len(token) #"- 1" - This substraction is wrong. token already uses the index E.g. "Hello" is 0-4
token_start_ind = token_start #It seems like the token start is too early.. for whichever reason
#if for some reason end of span is missed.. pop the new span (Which is quite probable due to this method)
#Attention it seems like span['end'] is the index of first char afterwards. In case the whitespace is part of the
#labell we need to subtract one. Otherwise next token won't trigger the span update.. only the token after next..
if token_start_ind > span_end:
while spans:
span = spans.pop(0)
span_start = span['start']
span_end = span['end'] - 1
prefix = 'B-'
if token_start <= span_end:
break
if not span or token_end < span_start:
tags.append('O')
elif span_start <= token_end and span_end >= token_start_ind:
tags.append(prefix + span['labels'][0])
prefix = 'I-'
else:
tags.append('O')
else:
tokens = [token for token, _ in tokens_and_idx]
tags = ['O'] * len(tokens)
return tokens, tags
def _get_upload_dir(project_dir=None, upload_dir=None):
"""Return either upload_dir, or path by LS_UPLOAD_DIR, or project_dir/upload"""
if upload_dir:
return upload_dir
upload_dir = os.environ.get('LS_UPLOAD_DIR')
if not upload_dir and project_dir:
upload_dir = os.path.join(project_dir, 'upload')
if not os.path.exists(upload_dir):
upload_dir = None
if not upload_dir:
raise FileNotFoundError("Can't find upload dir: either LS_UPLOAD_DIR or project should be passed to converter")
return upload_dir
def download(url, output_dir, filename=None, project_dir=None, return_relative_path=False, upload_dir=None):
is_local_file = url.startswith('/data/') and '?d=' in url
is_uploaded_file = url.startswith('/data/upload')
if is_uploaded_file:
upload_dir = _get_upload_dir(project_dir, upload_dir)
filename = url.replace('/data/upload/', '')
filepath = os.path.join(upload_dir, filename)
logger.debug('Copy {filepath} to {output_dir}'.format(filepath=filepath, output_dir=output_dir))
shutil.copy(filepath, output_dir)
if return_relative_path:
return os.path.join(os.path.basename(output_dir), filename)
return filepath
if is_local_file:
filename, dir_path = url.split('/data/', 1)[-1].split('?d=')
dir_path = str(urllib.parse.unquote(dir_path))
if not os.path.exists(dir_path):
raise FileNotFoundError(dir_path)
filepath = os.path.join(dir_path, filename)
if return_relative_path:
raise NotImplementedError()
return filepath
if filename is None:
basename, ext = os.path.splitext(os.path.basename(urlparse(url).path))
filename = basename + '_' + hashlib.md5(url.encode()).hexdigest()[:4] + ext
filepath = os.path.join(output_dir, filename)
if not os.path.exists(filepath):
logger.info('Download {url} to {filepath}'.format(url=url, filepath=filepath))
r = requests.get(url)
r.raise_for_status()
with io.open(filepath, mode='wb') as fout:
fout.write(r.content)
if return_relative_path:
return os.path.join(os.path.basename(output_dir), filename)
return filepath
def get_image_size(image_path):
return Image.open(image_path).size
def get_image_size_and_channels(image_path):
i = Image.open(image_path)
w, h = i.size
c = len(i.getbands())
return w, h, c
def get_audio_duration(audio_path):
with wave.open(audio_path, mode='r') as f:
return f.getnframes() / float(f.getframerate())
def ensure_dir(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def parse_config(config_string):
def _is_input_tag(tag):
return tag.attrib.get('name') and tag.attrib.get('value', '').startswith('$')
def _is_output_tag(tag):
return tag.attrib.get('name') and tag.attrib.get('toName')
xml_tree = xml.etree.ElementTree.fromstring(config_string)
inputs, outputs = {}, {}
for tag in xml_tree.iter():
if _is_input_tag(tag):
inputs[tag.attrib['name']] = {'type': tag.tag, 'value': tag.attrib['value'].lstrip('$')}
elif _is_output_tag(tag):
outputs[tag.attrib['name']] = {'type': tag.tag, 'to_name': tag.attrib['toName'].split(',')}
for output_tag, tag_info in outputs.items():
tag_info['inputs'] = []
for input_tag_name in tag_info['to_name']:
if input_tag_name not in inputs:
raise KeyError(
'to_name={input_tag_name} is specified for output tag name={output_tag}, but we can\'t find it '
'among input tags'.format(input_tag_name=input_tag_name, output_tag=output_tag))
tag_info['inputs'].append(inputs[input_tag_name])
return outputs
def get_polygon_area(x, y):
"""https://en.wikipedia.org/wiki/Shoelace_formula"""
assert len(x) == len(y)
return float(0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))))
def get_polygon_bounding_box(x, y):
assert len(x) == len(y)
x1, y1, x2, y2 = min(x), min(y), max(x), max(y)
return [x1, y1, x2 - x1, y2 - y1]
| [
"wave.open",
"urllib.parse.unquote",
"os.makedirs",
"os.path.basename",
"numpy.roll",
"urllib.parse.urlparse",
"os.path.exists",
"logging.getLogger",
"PIL.Image.open",
"os.environ.get",
"requests.get",
"io.open",
"nltk.tokenize.WhitespaceTokenizer",
"operator.itemgetter",
"os.path.join",... | [((300, 327), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (317, 327), False, 'import logging\n'), ((2765, 2796), 'os.environ.get', 'os.environ.get', (['"""LS_UPLOAD_DIR"""'], {}), "('LS_UPLOAD_DIR')\n", (2779, 2796), False, 'import os\n'), ((4415, 4449), 'os.path.join', 'os.path.join', (['output_dir', 'filename'], {}), '(output_dir, filename)\n', (4427, 4449), False, 'import os\n'), ((4963, 4985), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (4973, 4985), False, 'from PIL import Image\n'), ((2857, 2892), 'os.path.join', 'os.path.join', (['project_dir', '"""upload"""'], {}), "(project_dir, 'upload')\n", (2869, 2892), False, 'import os\n'), ((3517, 3551), 'os.path.join', 'os.path.join', (['upload_dir', 'filename'], {}), '(upload_dir, filename)\n', (3529, 3551), False, 'import os\n'), ((3665, 3698), 'shutil.copy', 'shutil.copy', (['filepath', 'output_dir'], {}), '(filepath, output_dir)\n', (3676, 3698), False, 'import shutil\n'), ((4081, 4113), 'os.path.join', 'os.path.join', (['dir_path', 'filename'], {}), '(dir_path, filename)\n', (4093, 4113), False, 'import os\n'), ((4461, 4485), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (4475, 4485), False, 'import os\n'), ((4586, 4603), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4598, 4603), False, 'import requests\n'), ((4880, 4902), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (4890, 4902), False, 'from PIL import Image\n'), ((5096, 5127), 'wave.open', 'wave.open', (['audio_path'], {'mode': '"""r"""'}), "(audio_path, mode='r')\n", (5105, 5127), False, 'import wave\n'), ((5229, 5253), 'os.path.exists', 'os.path.exists', (['dir_path'], {}), '(dir_path)\n', (5243, 5253), False, 'import os\n'), ((5263, 5284), 'os.makedirs', 'os.makedirs', (['dir_path'], {}), '(dir_path)\n', (5274, 5284), False, 'import os\n'), ((2908, 2934), 'os.path.exists', 'os.path.exists', (['upload_dir'], {}), '(upload_dir)\n', (2922, 2934), False, 'import os\n'), ((3943, 3973), 'urllib.parse.unquote', 'urllib.parse.unquote', (['dir_path'], {}), '(dir_path)\n', (3963, 3973), False, 'import urllib\n'), ((3990, 4014), 'os.path.exists', 'os.path.exists', (['dir_path'], {}), '(dir_path)\n', (4004, 4014), False, 'import os\n'), ((4646, 4674), 'io.open', 'io.open', (['filepath'], {'mode': '"""wb"""'}), "(filepath, mode='wb')\n", (4653, 4674), False, 'import io\n'), ((4775, 4803), 'os.path.basename', 'os.path.basename', (['output_dir'], {}), '(output_dir)\n', (4791, 4803), False, 'import os\n'), ((3764, 3792), 'os.path.basename', 'os.path.basename', (['output_dir'], {}), '(output_dir)\n', (3780, 3792), False, 'import os\n'), ((770, 791), 'nltk.tokenize.WhitespaceTokenizer', 'WhitespaceTokenizer', ([], {}), '()\n', (789, 791), False, 'from nltk.tokenize import WhitespaceTokenizer\n'), ((951, 970), 'operator.itemgetter', 'itemgetter', (['"""start"""'], {}), "('start')\n", (961, 970), False, 'from operator import itemgetter\n'), ((4295, 4308), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (4303, 4308), False, 'from urllib.parse import urlparse\n'), ((6595, 6608), 'numpy.roll', 'np.roll', (['y', '(1)'], {}), '(y, 1)\n', (6602, 6608), True, 'import numpy as np\n'), ((6622, 6635), 'numpy.roll', 'np.roll', (['x', '(1)'], {}), '(x, 1)\n', (6629, 6635), True, 'import numpy as np\n')] |
import typing
import sys
import numpy as np
import numba as nb
import scipy.ndimage
def solve(grid: np.ndarray, k: int) -> typing.NoReturn:
a = np.pad(grid, pad_width=1, constant_values=0)
cdt = scipy.ndimage.distance_transform_cdt(
input=a,
metric='taxicab',
)
print(np.count_nonzero(cdt >= k))
def main() -> typing.NoReturn:
readline = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
h, w, k = map(int, readline().split())
s = np.frombuffer(
read(),
dtype='S1',
).reshape(h, w + 1)[:, :-1]
grid = np.zeros((h, w), np.int64)
grid[s == b'o'] = 1
solve(grid, k)
main() | [
"numpy.pad",
"numpy.count_nonzero",
"numpy.zeros"
] | [((152, 196), 'numpy.pad', 'np.pad', (['grid'], {'pad_width': '(1)', 'constant_values': '(0)'}), '(grid, pad_width=1, constant_values=0)\n', (158, 196), True, 'import numpy as np\n'), ((558, 584), 'numpy.zeros', 'np.zeros', (['(h, w)', 'np.int64'], {}), '((h, w), np.int64)\n', (566, 584), True, 'import numpy as np\n'), ((291, 317), 'numpy.count_nonzero', 'np.count_nonzero', (['(cdt >= k)'], {}), '(cdt >= k)\n', (307, 317), True, 'import numpy as np\n')] |
import numpy as np
def mask_sum(m):
m1 = np.ones(m.shape, np.int8)
m1[np.logical_not(m)] = 0
return m1.sum()
def estimate_accuracy (method, test_img, standard_img):
result = method.apply(test_img)
coincidence = mask_sum(np.bitwise_and(result, standard_img))
mistake = mask_sum(np.bitwise_and(np.bitwise_not(standard_img),
result))
if mistake == 0: mistake = 0.5
return float(coincidence)/mistake
| [
"numpy.bitwise_and",
"numpy.logical_not",
"numpy.bitwise_not",
"numpy.ones"
] | [((47, 72), 'numpy.ones', 'np.ones', (['m.shape', 'np.int8'], {}), '(m.shape, np.int8)\n', (54, 72), True, 'import numpy as np\n'), ((80, 97), 'numpy.logical_not', 'np.logical_not', (['m'], {}), '(m)\n', (94, 97), True, 'import numpy as np\n'), ((244, 280), 'numpy.bitwise_and', 'np.bitwise_and', (['result', 'standard_img'], {}), '(result, standard_img)\n', (258, 280), True, 'import numpy as np\n'), ((320, 348), 'numpy.bitwise_not', 'np.bitwise_not', (['standard_img'], {}), '(standard_img)\n', (334, 348), True, 'import numpy as np\n')] |
###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2018
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
'''
FeudalPolicy.py - Feudal policy based on the work presente in https://arxiv.org/pdf/1803.03232.pdf
============================================
Copyright CUED Dialogue Systems Group 2015 - 2018
**Relevant Config variables** [Default and possible values]::
[policy]
policytype = feudal
[feudalpolicy]
features = dip/learned/rnn
sortbelief = True/False
si_enc_size = 25
dropout_rate = 0
si_policy_type = dqn
sd_policy_type = dqn
master_policy_type = dqn
actfreq_ds = True/False
.. seealso:: CUED Imports/Dependencies:
import :mod:`ontology.Ontology` |.|
import :mod:`utils.Settings` |.|
import :mod:`utils.ContextLogger`
import :mod:`policy.feudalRL.DIP_parametrisation`
import :mod:`policy.feudalRL.FeudalDQNPolicy`
import :mod:`policy.feudalRL.FeudalBBQNPolicy`
import :mod:`policy.feudalRL.FeudalENACPolicy`
import :mod:`policy.feudalRL.FeudalACERPolicy`
import :mod:`policy.feudalRL.feudalUtils`
************************
'''
__author__ = "cued_dialogue_systems_group"
import sys
import numpy as np
import utils
from utils.Settings import config as cfg
from utils import ContextLogger, DiaAct
import ontology.FlatOntologyManager as FlatOnt
import Policy
import SummaryAction
from policy.feudalRL.DIP_parametrisation import DIP_state, padded_state
from policy.feudalRL.FeudalDQNPolicy import FeudalDQNPolicy
from policy.feudalRL.FeudalBBQNPolicy import FeudalBBQNPolicy
from policy.feudalRL.FeudalENACPolicy import FeudalENACPolicy
from policy.feudalRL.FeudalACERPolicy import FeudalACERPolicy
from policy.feudalRL.feudalUtils import get_feudal_masks
logger = utils.ContextLogger.getLogger('')
class FeudalPolicy(Policy.Policy):
'''Derived from :class:`Policy`
'''
def __init__(self, in_policy_file, out_policy_file, domainString='CamRestaurants', is_training=False):
super(FeudalPolicy, self).__init__(domainString, is_training)
self.domainString = domainString
self.domainUtil = FlatOnt.FlatDomainOntology(self.domainString)
self.in_policy_file = in_policy_file
self.out_policy_file = out_policy_file
self.is_training = is_training
self.prev_state_check = None
#feudalRL variables
self.prev_sub_policy = None
self.prev_master_act = None
self.prev_master_belief = None
self.prev_child_act = None
self.prev_child_belief = None
self.action_freq = np.zeros(len(self.actions.action_names))
self.master_dec_count = np.array([0.,0.])
self.gi_dec_inrow = 0
self.features = 'dip'
if cfg.has_option('feudalpolicy', 'features'):
self.features = cfg.get('feudalpolicy', 'features')
self.si_policy_type = 'dqn'
if cfg.has_option('feudalpolicy', 'si_policy_type'):
self.si_policy_type = cfg.get('feudalpolicy', 'si_policy_type')
self.sd_policy_type = 'dqn'
if cfg.has_option('feudalpolicy', 'sd_policy_type'):
self.sd_policy_type = cfg.get('feudalpolicy', 'sd_policy_type')
self.master_policy_type = self.si_policy_type
if cfg.has_option('feudalpolicy', 'master_policy_type'):
self.master_policy_type = cfg.get('feudalpolicy', 'master_policy_type')
self.sample_master = False
if cfg.has_option('feudalpolicy', 'sample_master'):
self.sample_master = cfg.getboolean('feudalpolicy', 'sample_master')
self.correct_master = False
if cfg.has_option('feudalpolicy', 'correct_master'):
self.correct_master = cfg.getboolean('feudalpolicy', 'correct_master')
self.use_bye = False
if cfg.has_option('feudalpolicy', 'use_bye'):
self.use_bye = cfg.getboolean('feudalpolicy', 'use_bye')
self.reqmore_in_si = True
if cfg.has_option('feudalpolicy', 'reqmore_in_si'):
self.reqmore_in_si = cfg.getboolean('feudalpolicy', 'reqmore_in_si')
self.correction_factor = 0
if cfg.has_option('feudalpolicy', 'correction_factor'):
self.correction_factor = cfg.getfloat('feudalpolicy', 'correction_factor')
self.actfreq_ds = False
if cfg.has_option('feudalpolicy', 'actfreq_ds'):
self.actfreq_ds = cfg.getboolean('feudalpolicy', 'actfreq_ds')
# parameter settings
self.randomseed = 1234
if cfg.has_option('GENERAL', 'seed'):
self.randomseed = cfg.getint('GENERAL', 'seed')
# Create the feudal structure (including feudal masks)
self.summaryaction = SummaryAction.SummaryAction(domainString)
self.full_action_list = self.summaryaction.action_names
self.master_actions = ['give_info', 'request_info', 'pass']
self.slot_independent_actions = ["inform",
"inform_byname",
"inform_alternatives"
]
if self.reqmore_in_si:
self.slot_independent_actions.append("reqmore")
if self.use_bye:
self.slot_independent_actions.append('bye')
self.slot_independent_actions.append('pass')
self.slot_specific_actions = ["request",
"confirm",
"select"]
#if self.reqmore_in_sd is True:
# self.slot_specific_actions.append("reqmore")
self.slot_specific_actions.append('pass')
self.master_freq = np.zeros(len(self.master_actions))
self.si_freq = np.zeros(len(self.slot_independent_actions))
self.sd_freq = np.zeros(len(self.slot_specific_actions))
# master policy
if self.master_policy_type == 'acer':
self.master_policy = FeudalACERPolicy(self._modify_policyfile('master', in_policy_file),
self._modify_policyfile('master', out_policy_file),
domainString=self.domainString, is_training=self.is_training,
action_names=['give_info', 'request_info', 'pass'],
slot='si') # pass is always masked, but its needed for implementation
elif self.master_policy_type == 'enac':
self.master_policy = FeudalENACPolicy(self._modify_policyfile('master', in_policy_file),
self._modify_policyfile('master', out_policy_file),
domainString=self.domainString, is_training=self.is_training,
action_names=['give_info', 'request_info', 'pass'],
slot='si') # pass is always masked, but its needed for implementation
elif self.master_policy_type == 'bbqn':
self.master_policy = FeudalBBQNPolicy(self._modify_policyfile('master', in_policy_file),
self._modify_policyfile('master', out_policy_file),
domainString=self.domainString, is_training=self.is_training,
action_names=['give_info', 'request_info', 'pass'],
slot='si') # pass is always masked, but its needed for implementation
else:
self.master_policy = FeudalDQNPolicy(self._modify_policyfile('master', in_policy_file),
self._modify_policyfile('master', out_policy_file),
domainString=self.domainString, is_training=self.is_training,
action_names=['give_info', 'request_info', 'pass'],
slot='si') # pass is always masked, but its needed for implementation
# si policy
if self.si_policy_type == 'acer':
self.give_info_policy = FeudalACERPolicy(self._modify_policyfile('gi', in_policy_file),
self._modify_policyfile('gi', out_policy_file),
domainString=self.domainString, is_training=self.is_training,
action_names=self.slot_independent_actions, slot='si')
elif self.si_policy_type == 'enac':
self.give_info_policy = FeudalENACPolicy(self._modify_policyfile('gi', in_policy_file),
self._modify_policyfile('gi', out_policy_file),
domainString=self.domainString, is_training=self.is_training,
action_names=self.slot_independent_actions, slot='si')
elif self.si_policy_type == 'bbqn':
self.give_info_policy = FeudalBBQNPolicy(self._modify_policyfile('gi', in_policy_file),
self._modify_policyfile('gi', out_policy_file),
domainString=self.domainString, is_training=self.is_training,
action_names=self.slot_independent_actions, slot='si')
else:
self.give_info_policy = FeudalDQNPolicy(self._modify_policyfile('gi', in_policy_file),
self._modify_policyfile('gi', out_policy_file),
domainString=self.domainString, is_training=self.is_training,
action_names=self.slot_independent_actions, slot='si')
# sd policies
if self.sd_policy_type == 'acer':
self.request_info_policy = FeudalACERPolicy(self._modify_policyfile('ri', in_policy_file),
self._modify_policyfile('ri', out_policy_file),
domainString=self.domainString, is_training=self.is_training,
action_names=self.slot_specific_actions, slot='sd')
elif self.sd_policy_type == 'bbqn':
self.request_info_policy = FeudalBBQNPolicy(self._modify_policyfile('ri', in_policy_file),
self._modify_policyfile('ri', out_policy_file),
domainString=self.domainString, is_training=self.is_training,
action_names=self.slot_specific_actions, slot='sd')
else:
self.request_info_policy = FeudalDQNPolicy(self._modify_policyfile('ri', in_policy_file),
self._modify_policyfile('ri', out_policy_file),
domainString=self.domainString, is_training=self.is_training,
action_names=self.slot_specific_actions, slot='sd')
def _modify_policyfile(self, mod, policyfile):
pf_split = policyfile.split('/')
pf_split[-1] = mod + '_' + pf_split[-1]
return '/'.join(pf_split)
def act_on(self, state, hyps=None):
if self.lastSystemAction is None and self.startwithhello:
systemAct, nextaIdex = 'hello()', -1
else:
systemAct, nextaIdex = self.nextAction(state)
self.lastSystemAction = systemAct
self.summaryAct = nextaIdex
self.prevbelief = state
systemAct = DiaAct.DiaAct(systemAct)
return systemAct
def record(self, reward, domainInControl=None, weight=None, state=None, action=None):
self.master_policy.record(reward, domainInControl=self.domainString, state=self.prev_master_belief, action=self.prev_master_act)
if self.prev_sub_policy == 0:
self.give_info_policy.record(reward, domainInControl=self.domainString, state=self.prev_child_belief, action=self.prev_child_act)
self.request_info_policy.record(reward, domainInControl=self.domainString, state=self.prev_child_belief,
action=len(self.slot_specific_actions)-1)
elif self.prev_sub_policy == 1:
self.request_info_policy.record(reward, domainInControl=self.domainString, state=self.prev_child_belief, action=self.prev_child_act)
self.give_info_policy.record(reward, domainInControl=self.domainString, state=self.prev_child_belief,
action=len(self.slot_independent_actions)-1)
def finalizeRecord(self, reward, domainInControl=None):
if domainInControl is None:
domainInControl = self.domainString
self.master_policy.finalizeRecord(reward)
self.give_info_policy.finalizeRecord(reward)
self.request_info_policy.finalizeRecord(reward)
def convertStateAction(self, state, action):
pass
#this aparently is not necesary
def nextAction(self, beliefstate):
'''
select next action
:param beliefstate:
:returns: (int) next summary action
'''
# compute main belief
af = None
if self.actfreq_ds:
#af = 1./(1 + self.action_freq)
af = 1./(1 + np.concatenate((self.si_freq, self.sd_freq)))
if self.features == 'learned' or self.features == 'rnn':
dipstate = padded_state(beliefstate, domainString=self.domainString, action_freq=af)
else:
dipstate = DIP_state(beliefstate,domainString=self.domainString, action_freq=af)
dipstatevec = dipstate.get_beliefStateVec('general')
# Make decision on main policy
master_Q_values = self.master_policy.nextAction(dipstatevec)
non_exec = self.summaryaction.getNonExecutable(beliefstate.domainStates[beliefstate.currentdomain], self.lastSystemAction)
masks = get_feudal_masks(non_exec, dipstate.slots, self.slot_independent_actions, self.slot_specific_actions)
master_Q_values = np.add(master_Q_values, masks['master'])
if self.is_training and self.correction_factor != 0:
correction = (1-self.master_freq/sum(self.master_freq))
master_Q_values *= correction
if self.sample_master is True and self.is_training is False:
probs = master_Q_values[:-1]
if np.any([x for x in probs if x<0]):
probs[[x < 0 for x in probs]] = 0
probs /= sum(probs)
master_decision = np.random.choice([0,1],p=probs)
#print master_decision
else:
master_decision = np.argmax(master_Q_values)
if master_decision == 0 and self.gi_dec_inrow == 4 and self.correct_master and not self.is_training:
master_decision = 1
self.master_freq[master_decision] += 1
if not self.is_training:
self.master_dec_count[master_decision] += 1
if np.sum(self.master_dec_count) % 1000 == 0:
logger.results('master action frequencies = {}'.format(list(self.master_dec_count)/np.sum(self.master_dec_count))) #TODO: change to debug
#print 'master Q:', master_Q_values, 'master decision:', master_decision
self.prev_master_act = master_decision
self.prev_master_belief = dipstatevec
if master_decision == 0:
self.gi_dec_inrow += 1.
# drop to give_info policy
self.prev_sub_policy = 0
child_Q_values = self.give_info_policy.nextAction(dipstatevec)
child_Q_values = np.add(child_Q_values, masks['give_info'])
child_decision = np.argmax(child_Q_values)
summaryAct = self.slot_independent_actions[child_decision]
self.prev_child_act = child_decision
self.prev_child_belief = dipstatevec
#print 'give info Q:', child_Q_values, 'give info decision:', summaryAct
self.si_freq[child_decision] += 1
elif master_decision == 1:
self.gi_dec_inrow = 0
# drop to request_info policy
self.prev_sub_policy = 1
slot_Qs = {}
best_action = ('slot', 'action', -np.inf)
for slot in dipstate.slots:
dipstatevec = dipstate.get_beliefStateVec(slot)
slot_Qs[slot] = self.request_info_policy.nextAction(dipstatevec)
slot_Qs[slot] = np.add(slot_Qs[slot], masks['req_info'][slot])
slot_max_Q = np.max(slot_Qs[slot])
if slot_max_Q > best_action[2]:
best_action = (slot, np.argmax(slot_Qs[slot]), slot_max_Q)
summaryAct = self.slot_specific_actions[best_action[1]] + '_' + best_action[0]
if 'reqmore' in summaryAct:
summaryAct = 'reqmore'
self.prev_child_act = best_action[1]
self.prev_child_belief = dipstate.get_beliefStateVec(best_action[0])
self.sd_freq[best_action[1]] += 1
#print 'req info Q:', [slot_Qs[s] for s in slot_Qs], 'req info decision:', summaryAct
self.action_freq[self.actions.action_names.index(summaryAct)] += 1
#print 1./(1+self.action_freq)
beliefstate = beliefstate.getDomainState(self.domainUtil.domainString)
masterAct = self.summaryaction.Convert(beliefstate, summaryAct, self.lastSystemAction)
nextaIdex = self.full_action_list.index(summaryAct)
return masterAct, nextaIdex
def get_feudal_masks(self, belief, last_sys_act, slots):
belief = belief.domainStates[belief.currentdomain]
non_exec = self.summaryaction.getNonExecutable(belief, last_sys_act)
feudal_masks = {'req_info':{}, 'give_info':None, 'master':None}
give_info_masks = np.zeros(len(self.slot_independent_actions))
give_info_masks[-1] = -sys.maxint
for i, action in enumerate(self.slot_independent_actions):
if action in non_exec:
give_info_masks[i] = -sys.maxint
feudal_masks['give_info'] = give_info_masks
for slot in slots:
feudal_masks['req_info'][slot] = np.zeros(len(self.slot_specific_actions))
feudal_masks['req_info'][slot][-1] = -sys.maxint
for i, action in enumerate(self.slot_specific_actions):
if action+'_'+slot in non_exec:
feudal_masks['req_info'][slot][i] = -sys.maxint
master_masks = np.zeros(3)
master_masks[:] = -sys.maxint
if 0 in give_info_masks:
master_masks[0] = 0
for slot in slots:
if 0 in feudal_masks['req_info'][slot]:
master_masks[1] = 0
feudal_masks['master'] = master_masks
#print(non_exec)
#print(feudal_masks)
return feudal_masks
def train(self):
'''
call this function when the episode ends
'''
#just train each sub-policy
#print 'train master'
self.master_policy.train()
#print 'train gi'
self.give_info_policy.train()
#print 'train ri'
self.request_info_policy.train()
def savePolicy(self, FORCE_SAVE=False):
"""
Does not use this, cause it will be called from agent after every episode.
we want to save the policy only periodically.
"""
pass
def savePolicyInc(self, FORCE_SAVE=False):
"""
save model and replay buffer
"""
# just save each sub-policy
self.master_policy.savePolicyInc()
self.give_info_policy.savePolicyInc()
self.request_info_policy.savePolicyInc()
def loadPolicy(self, filename):
"""
load model and replay buffer
"""
# load policy models one by one
pass
def restart(self):
self.summaryAct = None
self.lastSystemAction = None
self.prevbelief = None
self.actToBeRecorded = None
self.master_policy.restart()
self.give_info_policy.restart()
self.request_info_policy.restart()
self.master_freq = np.zeros(len(self.master_actions))
self.si_freq = np.zeros(len(self.slot_independent_actions))
self.sd_freq = np.zeros(len(self.slot_specific_actions))
self.action_freq = np.zeros(len(self.actions.action_names))
# END OF FILE | [
"ontology.FlatOntologyManager.FlatDomainOntology",
"policy.feudalRL.feudalUtils.get_feudal_masks",
"numpy.sum",
"utils.Settings.config.getint",
"numpy.argmax",
"utils.Settings.config.has_option",
"utils.DiaAct.DiaAct",
"utils.Settings.config.get",
"policy.feudalRL.DIP_parametrisation.padded_state",
... | [((2610, 2643), 'utils.ContextLogger.getLogger', 'utils.ContextLogger.getLogger', (['""""""'], {}), "('')\n", (2639, 2643), False, 'import utils\n'), ((2971, 3016), 'ontology.FlatOntologyManager.FlatDomainOntology', 'FlatOnt.FlatDomainOntology', (['self.domainString'], {}), '(self.domainString)\n', (2997, 3016), True, 'import ontology.FlatOntologyManager as FlatOnt\n'), ((3500, 3520), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (3508, 3520), True, 'import numpy as np\n'), ((3591, 3633), 'utils.Settings.config.has_option', 'cfg.has_option', (['"""feudalpolicy"""', '"""features"""'], {}), "('feudalpolicy', 'features')\n", (3605, 3633), True, 'from utils.Settings import config as cfg\n'), ((3746, 3794), 'utils.Settings.config.has_option', 'cfg.has_option', (['"""feudalpolicy"""', '"""si_policy_type"""'], {}), "('feudalpolicy', 'si_policy_type')\n", (3760, 3794), True, 'from utils.Settings import config as cfg\n'), ((3919, 3967), 'utils.Settings.config.has_option', 'cfg.has_option', (['"""feudalpolicy"""', '"""sd_policy_type"""'], {}), "('feudalpolicy', 'sd_policy_type')\n", (3933, 3967), True, 'from utils.Settings import config as cfg\n'), ((4110, 4162), 'utils.Settings.config.has_option', 'cfg.has_option', (['"""feudalpolicy"""', '"""master_policy_type"""'], {}), "('feudalpolicy', 'master_policy_type')\n", (4124, 4162), True, 'from utils.Settings import config as cfg\n'), ((4294, 4341), 'utils.Settings.config.has_option', 'cfg.has_option', (['"""feudalpolicy"""', '"""sample_master"""'], {}), "('feudalpolicy', 'sample_master')\n", (4308, 4341), True, 'from utils.Settings import config as cfg\n'), ((4471, 4519), 'utils.Settings.config.has_option', 'cfg.has_option', (['"""feudalpolicy"""', '"""correct_master"""'], {}), "('feudalpolicy', 'correct_master')\n", (4485, 4519), True, 'from utils.Settings import config as cfg\n'), ((4644, 4685), 'utils.Settings.config.has_option', 'cfg.has_option', (['"""feudalpolicy"""', '"""use_bye"""'], {}), "('feudalpolicy', 'use_bye')\n", (4658, 4685), True, 'from utils.Settings import config as cfg\n'), ((4801, 4848), 'utils.Settings.config.has_option', 'cfg.has_option', (['"""feudalpolicy"""', '"""reqmore_in_si"""'], {}), "('feudalpolicy', 'reqmore_in_si')\n", (4815, 4848), True, 'from utils.Settings import config as cfg\n'), ((4977, 5028), 'utils.Settings.config.has_option', 'cfg.has_option', (['"""feudalpolicy"""', '"""correction_factor"""'], {}), "('feudalpolicy', 'correction_factor')\n", (4991, 5028), True, 'from utils.Settings import config as cfg\n'), ((5160, 5204), 'utils.Settings.config.has_option', 'cfg.has_option', (['"""feudalpolicy"""', '"""actfreq_ds"""'], {}), "('feudalpolicy', 'actfreq_ds')\n", (5174, 5204), True, 'from utils.Settings import config as cfg\n'), ((5354, 5387), 'utils.Settings.config.has_option', 'cfg.has_option', (['"""GENERAL"""', '"""seed"""'], {}), "('GENERAL', 'seed')\n", (5368, 5387), True, 'from utils.Settings import config as cfg\n'), ((5543, 5584), 'SummaryAction.SummaryAction', 'SummaryAction.SummaryAction', (['domainString'], {}), '(domainString)\n', (5570, 5584), False, 'import SummaryAction\n'), ((12747, 12771), 'utils.DiaAct.DiaAct', 'DiaAct.DiaAct', (['systemAct'], {}), '(systemAct)\n', (12760, 12771), False, 'from utils import ContextLogger, DiaAct\n'), ((15143, 15248), 'policy.feudalRL.feudalUtils.get_feudal_masks', 'get_feudal_masks', (['non_exec', 'dipstate.slots', 'self.slot_independent_actions', 'self.slot_specific_actions'], {}), '(non_exec, dipstate.slots, self.slot_independent_actions,\n self.slot_specific_actions)\n', (15159, 15248), False, 'from policy.feudalRL.feudalUtils import get_feudal_masks\n'), ((15271, 15311), 'numpy.add', 'np.add', (['master_Q_values', "masks['master']"], {}), "(master_Q_values, masks['master'])\n", (15277, 15311), True, 'import numpy as np\n'), ((19671, 19682), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (19679, 19682), True, 'import numpy as np\n'), ((3663, 3698), 'utils.Settings.config.get', 'cfg.get', (['"""feudalpolicy"""', '"""features"""'], {}), "('feudalpolicy', 'features')\n", (3670, 3698), True, 'from utils.Settings import config as cfg\n'), ((3830, 3871), 'utils.Settings.config.get', 'cfg.get', (['"""feudalpolicy"""', '"""si_policy_type"""'], {}), "('feudalpolicy', 'si_policy_type')\n", (3837, 3871), True, 'from utils.Settings import config as cfg\n'), ((4003, 4044), 'utils.Settings.config.get', 'cfg.get', (['"""feudalpolicy"""', '"""sd_policy_type"""'], {}), "('feudalpolicy', 'sd_policy_type')\n", (4010, 4044), True, 'from utils.Settings import config as cfg\n'), ((4202, 4247), 'utils.Settings.config.get', 'cfg.get', (['"""feudalpolicy"""', '"""master_policy_type"""'], {}), "('feudalpolicy', 'master_policy_type')\n", (4209, 4247), True, 'from utils.Settings import config as cfg\n'), ((4376, 4423), 'utils.Settings.config.getboolean', 'cfg.getboolean', (['"""feudalpolicy"""', '"""sample_master"""'], {}), "('feudalpolicy', 'sample_master')\n", (4390, 4423), True, 'from utils.Settings import config as cfg\n'), ((4555, 4603), 'utils.Settings.config.getboolean', 'cfg.getboolean', (['"""feudalpolicy"""', '"""correct_master"""'], {}), "('feudalpolicy', 'correct_master')\n", (4569, 4603), True, 'from utils.Settings import config as cfg\n'), ((4714, 4755), 'utils.Settings.config.getboolean', 'cfg.getboolean', (['"""feudalpolicy"""', '"""use_bye"""'], {}), "('feudalpolicy', 'use_bye')\n", (4728, 4755), True, 'from utils.Settings import config as cfg\n'), ((4883, 4930), 'utils.Settings.config.getboolean', 'cfg.getboolean', (['"""feudalpolicy"""', '"""reqmore_in_si"""'], {}), "('feudalpolicy', 'reqmore_in_si')\n", (4897, 4930), True, 'from utils.Settings import config as cfg\n'), ((5067, 5116), 'utils.Settings.config.getfloat', 'cfg.getfloat', (['"""feudalpolicy"""', '"""correction_factor"""'], {}), "('feudalpolicy', 'correction_factor')\n", (5079, 5116), True, 'from utils.Settings import config as cfg\n'), ((5236, 5280), 'utils.Settings.config.getboolean', 'cfg.getboolean', (['"""feudalpolicy"""', '"""actfreq_ds"""'], {}), "('feudalpolicy', 'actfreq_ds')\n", (5250, 5280), True, 'from utils.Settings import config as cfg\n'), ((5419, 5448), 'utils.Settings.config.getint', 'cfg.getint', (['"""GENERAL"""', '"""seed"""'], {}), "('GENERAL', 'seed')\n", (5429, 5448), True, 'from utils.Settings import config as cfg\n'), ((14646, 14719), 'policy.feudalRL.DIP_parametrisation.padded_state', 'padded_state', (['beliefstate'], {'domainString': 'self.domainString', 'action_freq': 'af'}), '(beliefstate, domainString=self.domainString, action_freq=af)\n', (14658, 14719), False, 'from policy.feudalRL.DIP_parametrisation import DIP_state, padded_state\n'), ((14757, 14827), 'policy.feudalRL.DIP_parametrisation.DIP_state', 'DIP_state', (['beliefstate'], {'domainString': 'self.domainString', 'action_freq': 'af'}), '(beliefstate, domainString=self.domainString, action_freq=af)\n', (14766, 14827), False, 'from policy.feudalRL.DIP_parametrisation import DIP_state, padded_state\n'), ((15608, 15643), 'numpy.any', 'np.any', (['[x for x in probs if x < 0]'], {}), '([x for x in probs if x < 0])\n', (15614, 15643), True, 'import numpy as np\n'), ((15755, 15788), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'p': 'probs'}), '([0, 1], p=probs)\n', (15771, 15788), True, 'import numpy as np\n'), ((15866, 15892), 'numpy.argmax', 'np.argmax', (['master_Q_values'], {}), '(master_Q_values)\n', (15875, 15892), True, 'import numpy as np\n'), ((16805, 16847), 'numpy.add', 'np.add', (['child_Q_values', "masks['give_info']"], {}), "(child_Q_values, masks['give_info'])\n", (16811, 16847), True, 'import numpy as np\n'), ((16877, 16902), 'numpy.argmax', 'np.argmax', (['child_Q_values'], {}), '(child_Q_values)\n', (16886, 16902), True, 'import numpy as np\n'), ((14512, 14556), 'numpy.concatenate', 'np.concatenate', (['(self.si_freq, self.sd_freq)'], {}), '((self.si_freq, self.sd_freq))\n', (14526, 14556), True, 'import numpy as np\n'), ((16185, 16214), 'numpy.sum', 'np.sum', (['self.master_dec_count'], {}), '(self.master_dec_count)\n', (16191, 16214), True, 'import numpy as np\n'), ((17648, 17694), 'numpy.add', 'np.add', (['slot_Qs[slot]', "masks['req_info'][slot]"], {}), "(slot_Qs[slot], masks['req_info'][slot])\n", (17654, 17694), True, 'import numpy as np\n'), ((17724, 17745), 'numpy.max', 'np.max', (['slot_Qs[slot]'], {}), '(slot_Qs[slot])\n', (17730, 17745), True, 'import numpy as np\n'), ((16327, 16356), 'numpy.sum', 'np.sum', (['self.master_dec_count'], {}), '(self.master_dec_count)\n', (16333, 16356), True, 'import numpy as np\n'), ((17835, 17859), 'numpy.argmax', 'np.argmax', (['slot_Qs[slot]'], {}), '(slot_Qs[slot])\n', (17844, 17859), True, 'import numpy as np\n')] |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from scipy.ndimage.filters import uniform_filter1d
from scipy.ndimage.fourier import fourier_gaussian
from .utils import print_update, validate_tuple
# When loading module, try to use pyFFTW ("Fastest Fourier Transform in the
# West") if it is available.
try:
import pyfftw
except ImportError:
# Use numpy.
USING_FFTW = False
fftn = np.fft.fftn
ifftn = np.fft.ifftn
else:
USING_FFTW = True
pyfftw.interfaces.cache.enable()
planned = False
def fftn(a):
global planned
if not planned:
print_update("Note: FFTW is configuring itself. This will take " +
"several seconds, but subsequent calls will run " +
"*much* faster.")
planned = True
a = pyfftw.n_byte_align(a, a.dtype.alignment)
return pyfftw.interfaces.numpy_fft.fftn(a).astype(np.complex128)
def ifftn(a):
a = pyfftw.n_byte_align(a, a.dtype.alignment)
return pyfftw.interfaces.numpy_fft.ifftn(a)
def bandpass(image, lshort, llong, threshold=None):
"""Convolve with a Gaussian to remove short-wavelength noise,
and subtract out long-wavelength variations,
retaining features of intermediate scale.
Parmeters
---------
image : ndarray
lshort : small-scale cutoff (noise)
llong : large-scale cutoff
for both lshort and llong:
give a tuple value for different sizes per dimension
give int value for same value for all dimensions
when 2*lshort >= llong, no noise filtering is applied
threshold : float or integer
By default, 1 for integer images and 1/256. for float images.
Returns
-------
ndarray, the bandpassed image
"""
lshort = validate_tuple(lshort, image.ndim)
llong = validate_tuple(llong, image.ndim)
if np.any([x*2 >= y for (x, y) in zip(lshort, llong)]):
raise ValueError("The smoothing length scale must be more" +
"than twice the noise length scale.")
if threshold is None:
if np.issubdtype(image.dtype, np.integer):
threshold = 1
else:
threshold = 1/256.
# Perform a rolling average (boxcar) with kernel size = 2*llong + 1
boxcar = np.asarray(image)
for (axis, size) in enumerate(llong):
boxcar = uniform_filter1d(boxcar, size*2+1, axis, mode='nearest',
cval=0)
# Perform a gaussian filter
gaussian = ifftn(fourier_gaussian(fftn(image), lshort)).real
result = gaussian - boxcar
return np.where(result > threshold, result, 0)
def scalefactor_to_gamut(image, original_dtype):
return np.iinfo(original_dtype).max / image.max()
def scale_to_gamut(image, original_dtype, scale_factor=None):
if scale_factor is None:
scale_factor = scalefactor_to_gamut(image, original_dtype)
scaled = (scale_factor * image.clip(min=0.)).astype(original_dtype)
return scaled
| [
"numpy.asarray",
"numpy.iinfo",
"numpy.where",
"scipy.ndimage.filters.uniform_filter1d",
"pyfftw.interfaces.numpy_fft.fftn",
"pyfftw.interfaces.cache.enable",
"pyfftw.n_byte_align",
"pyfftw.interfaces.numpy_fft.ifftn",
"numpy.issubdtype"
] | [((560, 592), 'pyfftw.interfaces.cache.enable', 'pyfftw.interfaces.cache.enable', ([], {}), '()\n', (590, 592), False, 'import pyfftw\n'), ((2389, 2406), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (2399, 2406), True, 'import numpy as np\n'), ((2705, 2744), 'numpy.where', 'np.where', (['(result > threshold)', 'result', '(0)'], {}), '(result > threshold, result, 0)\n', (2713, 2744), True, 'import numpy as np\n'), ((916, 957), 'pyfftw.n_byte_align', 'pyfftw.n_byte_align', (['a', 'a.dtype.alignment'], {}), '(a, a.dtype.alignment)\n', (935, 957), False, 'import pyfftw\n'), ((1062, 1103), 'pyfftw.n_byte_align', 'pyfftw.n_byte_align', (['a', 'a.dtype.alignment'], {}), '(a, a.dtype.alignment)\n', (1081, 1103), False, 'import pyfftw\n'), ((1119, 1155), 'pyfftw.interfaces.numpy_fft.ifftn', 'pyfftw.interfaces.numpy_fft.ifftn', (['a'], {}), '(a)\n', (1152, 1155), False, 'import pyfftw\n'), ((2193, 2231), 'numpy.issubdtype', 'np.issubdtype', (['image.dtype', 'np.integer'], {}), '(image.dtype, np.integer)\n', (2206, 2231), True, 'import numpy as np\n'), ((2466, 2534), 'scipy.ndimage.filters.uniform_filter1d', 'uniform_filter1d', (['boxcar', '(size * 2 + 1)', 'axis'], {'mode': '"""nearest"""', 'cval': '(0)'}), "(boxcar, size * 2 + 1, axis, mode='nearest', cval=0)\n", (2482, 2534), False, 'from scipy.ndimage.filters import uniform_filter1d\n'), ((2807, 2831), 'numpy.iinfo', 'np.iinfo', (['original_dtype'], {}), '(original_dtype)\n', (2815, 2831), True, 'import numpy as np\n'), ((973, 1008), 'pyfftw.interfaces.numpy_fft.fftn', 'pyfftw.interfaces.numpy_fft.fftn', (['a'], {}), '(a)\n', (1005, 1008), False, 'import pyfftw\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# import plotly.graph_objs as go
pd.options.display.max_columns = 7
pd.options.display.max_rows = 7
# %% Data Analysis
data = pd.read_csv('D:/Masaüstüm/Projects/PythonProjects/Regression Types/K Nearest Neighbors/cancer_data.csv')
data.columns
"""
Index(['id', 'diagnosis', 'radius_mean', 'texture_mean', 'perimeter_mean',
'area_mean', 'smoothness_mean', 'compactness_mean', 'concavity_mean',
'concave points_mean', 'symmetry_mean', 'fractal_dimension_mean',
'radius_se', 'texture_se', 'perimeter_se', 'area_se', 'smoothness_se',
'compactness_se', 'concavity_se', 'concave points_se', 'symmetry_se',
'fractal_dimension_se', 'radius_worst', 'texture_worst',
'perimeter_worst', 'area_worst', 'smoothness_worst',
'compactness_worst', 'concavity_worst', 'concave points_worst',
'symmetry_worst', 'fractal_dimension_worst', 'Unnamed: 32'],
dtype='object')
"""
data.drop(['id', 'Unnamed: 32'], axis = 1, inplace = True)
data['diagnosis'].value_counts()
"""
B 357 - benign
M 212 - malignant
"""
benign = data[data['diagnosis'] == 'B']
malignant = data[data['diagnosis'] == "M"]
for each,column in enumerate(benign.columns):
if column == 'diagnosis':
continue
plt.figure(each)
plt.scatter(benign.radius_mean, benign[column], color = 'green', label = 'benign')
plt.scatter(malignant.radius_mean, malignant[column], color = 'red', label = 'malignant')
plt.xlabel('radius_mean')
plt.ylabel(column)
plt.legend()
plt.savefig("radius_mean and " + column + ".jpg")
plt.show()
# they are in images folder, don't run if you dont want to.
# it generates more than 20 figures
data['diagnosis'] = [0 if each == 'B' else 1 for each in data['diagnosis']]
# since sklearn does not understand string(object), we convert to int.
x_n = data.drop(['diagnosis'], axis = 1)
y = data['diagnosis'].values.reshape(-1,1)
x = (x_n - np.min(x_n)) / (np.max(x_n) - np.min(x_n)) # normalized
# %% train test split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.32, random_state = 13)
# %% KNN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors = 4) # neighbor counts
knn.fit(x_train,y_train)
knn.predict(x_test)
# %% evaluate model
print("{} neighbors score {}" .format(4, knn.score(x_test,y_test)))
# 4 neighbors score 0.9836065573770492
# %% Find Best K
list_k = []
for each in range(1,30):
knn_n = KNeighborsClassifier(n_neighbors = each)
knn_n.fit(x_train,y_train)
list_k.append(knn_n.score(x_test,y_test))
plt.plot(range(1,30), list_k)
plt.xlabel("Neighbor Count")
plt.ylabel("Accuracy")
#plt.legend()
plt.show()
| [
"matplotlib.pyplot.show",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"sklearn.neighbors.KNeighborsClassifier",
"matplotlib.pyplot.figure",
"numpy.min",
"numpy.max",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"... | [((222, 336), 'pandas.read_csv', 'pd.read_csv', (['"""D:/Masaüstüm/Projects/PythonProjects/Regression Types/K Nearest Neighbors/cancer_data.csv"""'], {}), "(\n 'D:/Masaüstüm/Projects/PythonProjects/Regression Types/K Nearest Neighbors/cancer_data.csv'\n )\n", (233, 336), True, 'import pandas as pd\n'), ((2182, 2237), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.32)', 'random_state': '(13)'}), '(x, y, test_size=0.32, random_state=13)\n', (2198, 2237), False, 'from sklearn.model_selection import train_test_split\n'), ((2307, 2342), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(4)'}), '(n_neighbors=4)\n', (2327, 2342), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((2758, 2786), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Neighbor Count"""'], {}), "('Neighbor Count')\n", (2768, 2786), True, 'import matplotlib.pyplot as plt\n'), ((2787, 2809), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (2797, 2809), True, 'import matplotlib.pyplot as plt\n'), ((2824, 2834), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2832, 2834), True, 'import matplotlib.pyplot as plt\n'), ((1336, 1352), 'matplotlib.pyplot.figure', 'plt.figure', (['each'], {}), '(each)\n', (1346, 1352), True, 'import matplotlib.pyplot as plt\n'), ((1357, 1435), 'matplotlib.pyplot.scatter', 'plt.scatter', (['benign.radius_mean', 'benign[column]'], {'color': '"""green"""', 'label': '"""benign"""'}), "(benign.radius_mean, benign[column], color='green', label='benign')\n", (1368, 1435), True, 'import matplotlib.pyplot as plt\n'), ((1444, 1534), 'matplotlib.pyplot.scatter', 'plt.scatter', (['malignant.radius_mean', 'malignant[column]'], {'color': '"""red"""', 'label': '"""malignant"""'}), "(malignant.radius_mean, malignant[column], color='red', label=\n 'malignant')\n", (1455, 1534), True, 'import matplotlib.pyplot as plt\n'), ((1538, 1563), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""radius_mean"""'], {}), "('radius_mean')\n", (1548, 1563), True, 'import matplotlib.pyplot as plt\n'), ((1568, 1586), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['column'], {}), '(column)\n', (1578, 1586), True, 'import matplotlib.pyplot as plt\n'), ((1591, 1603), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1601, 1603), True, 'import matplotlib.pyplot as plt\n'), ((1608, 1657), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('radius_mean and ' + column + '.jpg')"], {}), "('radius_mean and ' + column + '.jpg')\n", (1619, 1657), True, 'import matplotlib.pyplot as plt\n'), ((1662, 1672), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1670, 1672), True, 'import matplotlib.pyplot as plt\n'), ((2605, 2643), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'each'}), '(n_neighbors=each)\n', (2625, 2643), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((2015, 2026), 'numpy.min', 'np.min', (['x_n'], {}), '(x_n)\n', (2021, 2026), True, 'import numpy as np\n'), ((2031, 2042), 'numpy.max', 'np.max', (['x_n'], {}), '(x_n)\n', (2037, 2042), True, 'import numpy as np\n'), ((2045, 2056), 'numpy.min', 'np.min', (['x_n'], {}), '(x_n)\n', (2051, 2056), True, 'import numpy as np\n')] |
import math
import scipy.optimize
import pandas as pd
import numpy as np
# Goal: Fit data to find three parameters
# Goal: optimize ref resistor value
# Goal: make sure min and max within range
# Goal: make sure min and max +/-1C detectable
class Thermistor:
""" Thermistor model class. """
def __init__(self, Rt=10000, params=np.array([0.003354, 2.5e-4, 0])):
self.Rt= Rt
self.params = params
def sh(self, R, A, B, D):
"""
Steinhart-Hart function.
Returns temperature in C.
"""
log = np.log(R/float(self.Rt))
return 1.0/(A + B*log + D*log**3)-273.15
def fit(self, data):
""" Find A, B and D for the Steinhart-Hart equation.
1/T= A + B*ln(R/Rt) + D*ln(R/Rt)^3.
@param data: pandas dataframe, first column is temperature (C), second is resistance (Ohm).
"""
# Filter on temperatures >= 0C
view = data[data.iloc[:,0] >= 0]
self.params, self.err = scipy.optimize.curve_fit(self.sh, view.iloc[:,1], view.iloc[:,0], self.params)
def temperature(self, r):
""" Predict the temperature for a certain resistance. """
return self.sh(r, *self.params)
class Sensor:
""" Model the system with the thermistor, ADC and reference resistor. """
def __init__(self, thermistor=Thermistor(), resistor=3000):
self.thermistor = thermistor
self.resistor = resistor
def temp(self, N):
"""
Return the temperature for a reading of N on the ADC.
"""
R = float(N*self.resistor)/(1024-N)
return self.thermistor.temperature(R)
| [
"numpy.array"
] | [((340, 372), 'numpy.array', 'np.array', (['[0.003354, 0.00025, 0]'], {}), '([0.003354, 0.00025, 0])\n', (348, 372), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from tqdm import tqdm
from collections import Counter
class AutoDatatyper(object):
def __init__(self, vector_dim=300, num_rows=1000):
self.vector_dim = vector_dim
self.num_rows = num_rows
self.decode_dict = {0: 'numeric', 1: 'character', 2: 'time', 3: 'complex'}
def create_dataset_from_data_column(self, iterable, label):
iterable_str = self.__remove_na_and_stringify_iterable(iterable)
choice_range = len(iterable_str)
vector_list = []
for i in tqdm(list(range(self.num_rows))):
try:
vec = self.__get_sample_from_column_data(iterable_str, choice_range)
except ValueError:
raise ValueError('All data are NaNs.')
vector_list.append(vec)
return np.array(vector_list), np.array([label] * self.num_rows).reshape(-1, 1)
def __remove_na_and_stringify_iterable(self, iterable):
# Convert iterable to Series
if not isinstance(iterable, pd.Series):
iterable = pd.Series(iterable)
# Drop NAs
iterable.dropna(inplace=True)
iterable = iterable.values
iterable_str = iterable.astype(str)
return iterable_str
def __get_data_column_type(self, iterable, estimator, robustness):
iterable_str = self.__remove_na_and_stringify_iterable(iterable)
choice_range = len(iterable_str)
vector_list = []
for i in (range(int(100 * robustness))):
try:
vec = self.__get_sample_from_column_data(iterable_str, choice_range)
except ValueError:
return 'NaN', 1.0
vector_list.append(vec)
prediction = estimator.predict(np.array(vector_list))
prediction_count = Counter(np.vectorize(lambda x: round(x, 1))(prediction))
confidence = prediction_count.most_common(1)[0][1] / len(prediction)
return self.decode_dict[round(prediction.mean())], confidence
def get_data_column_type_df(self, data, estimator, robustness=0.1):
result_dict = {}
if isinstance(data, pd.DataFrame):
column_names = data.columns.values
for i, colname in tqdm(list(enumerate(column_names))):
datatype, confidence = self.__get_data_column_type(data[colname], estimator, robustness=robustness)
result_dict[colname] = datatype, confidence
else:
column_names = list(range(data.shape[1]))
for i, colname in tqdm(list(enumerate(column_names))):
datatype, confidence = self.__get_data_column_type(data[colname], estimator, robustness=robustness)
result_dict[colname] = datatype, confidence
return result_dict
def __get_sample_from_column_data(self, iterable_str, choice_range):
indices = np.random.choice(choice_range, self.vector_dim)
stringified_data = iterable_str[indices]
raw_feature_names = ['length', 'max', 'min', 'range', 'sum', 'avg', 'std', 'float', 'time', \
'nan', 'json1', 'json2', 'json3', 'array1', 'array2', 'array3', 'array4', \
'tag1', 'tag2', 'tag3', 'tag4', 'url']
raw_feature_dict = {
'length': np.vectorize(len)(stringified_data),
'max': np.vectorize(lambda x: max([ord(char) for char in x]))(stringified_data),
'min': np.vectorize(lambda x: min([ord(char) for char in x]))(stringified_data),
'range': np.vectorize(lambda x: max([ord(char) for char in x]) - min([ord(char) for char in x]))(stringified_data),
'sum': np.vectorize(lambda x: sum([ord(char) for char in x]))(stringified_data),
'avg': np.vectorize(lambda x: sum([ord(char) for char in x]))(stringified_data) / np.vectorize(len)(stringified_data),
'std': np.vectorize(lambda x: np.array([ord(char) for char in x]).std())(stringified_data),
'float': np.vectorize(lambda x: x.count('.'))(stringified_data),
'time': np.vectorize(self.__contains_time_characters)(stringified_data),
'nan': np.vectorize(self.__is_nan)(stringified_data),
'json1': np.vectorize(lambda x: x.count('{'))(stringified_data),
'json2': np.vectorize(lambda x: x.count('}'))(stringified_data),
'json3': np.vectorize(lambda x: x.count(':'))(stringified_data),
'array1': np.vectorize(lambda x: x.count('['))(stringified_data),
'array2': np.vectorize(lambda x: x.count(']'))(stringified_data),
'array3': np.vectorize(lambda x: x.count(','))(stringified_data),
'array4': np.vectorize(lambda x: x.count(';'))(stringified_data),
'tag1': np.vectorize(lambda x: x.count('\\'))(stringified_data),
'tag2': np.vectorize(lambda x: x.count('/'))(stringified_data),
'tag3': np.vectorize(lambda x: x.count('|'))(stringified_data),
'tag4': np.vectorize(lambda x: x.count('-'))(stringified_data),
'url': np.vectorize(self.__contains_url_characters)(stringified_data)
}
range_feature_dict = {feature_name + '_range':
np.array([raw_feature_dict[feature_name].max() - raw_feature_dict[feature_name].min()]) for feature_name in raw_feature_names
}
sum_feature_dict = {feature_name + '_sum':
np.array([raw_feature_dict[feature_name].sum()]) for feature_name in raw_feature_names
}
avg_feature_dict = {feature_name + '_avg':
np.array([raw_feature_dict[feature_name].mean()]) for feature_name in raw_feature_names
}
std_feature_dict = {feature_name + '_std':
np.array([raw_feature_dict[feature_name].std()]) for feature_name in raw_feature_names
}
count_distinct_feature_dict = {feature_name + '_distinct':
np.array([len(Counter(raw_feature_dict[feature_name]))]) for feature_name in raw_feature_names
}
concat_list = [value for key, value in raw_feature_dict.items()] \
+ [value for key, value in sum_feature_dict.items()] \
+ [value for key, value in avg_feature_dict.items()] \
+ [value for key, value in std_feature_dict.items()] \
+ [value for key, value in count_distinct_feature_dict.items()]
vec = np.concatenate(concat_list)
return vec
def __contains_time_characters(self, string):
time_chars = {':', '/', '-', '\\', '.', '+',
'hr', 'hour', 'min', 'minute', 'sec', 'second',
'day', 'week', 'year',
'年', '月', '日', '时', '分', '秒',
'年', '月', '日', '時', '分', '秒'}
count = 0
for char in time_chars:
if char in string:
count += 1
return count
def __is_nan(self, string):
return 1 if string.lower() == 'nan' else 0
def __contains_url_characters(self, string):
url_chars = {'http', '//', 'www', 'com', 'cn', '_'}
count = 0
for char in url_chars:
if char in string:
count += 1
return count
def reduce_data_dict_to_ndarray(self, data_dict):
return np.concatenate([value[0] for key, value in data_dict.items()], axis=0), np.concatenate([value[1] for key, value in data_dict.items()])
def consolidate_data(self, foundation_features, new_features, foundation_label, new_label):
return np.concatenate((foundation_features, new_features), axis=0), np.concatenate((foundation_label, new_label), axis=0) | [
"numpy.vectorize",
"numpy.array",
"pandas.Series",
"numpy.random.choice",
"collections.Counter",
"numpy.concatenate"
] | [((2943, 2990), 'numpy.random.choice', 'np.random.choice', (['choice_range', 'self.vector_dim'], {}), '(choice_range, self.vector_dim)\n', (2959, 2990), True, 'import numpy as np\n'), ((6439, 6466), 'numpy.concatenate', 'np.concatenate', (['concat_list'], {}), '(concat_list)\n', (6453, 6466), True, 'import numpy as np\n'), ((843, 864), 'numpy.array', 'np.array', (['vector_list'], {}), '(vector_list)\n', (851, 864), True, 'import numpy as np\n'), ((1084, 1103), 'pandas.Series', 'pd.Series', (['iterable'], {}), '(iterable)\n', (1093, 1103), True, 'import pandas as pd\n'), ((1815, 1836), 'numpy.array', 'np.array', (['vector_list'], {}), '(vector_list)\n', (1823, 1836), True, 'import numpy as np\n'), ((7688, 7747), 'numpy.concatenate', 'np.concatenate', (['(foundation_features, new_features)'], {'axis': '(0)'}), '((foundation_features, new_features), axis=0)\n', (7702, 7747), True, 'import numpy as np\n'), ((7749, 7802), 'numpy.concatenate', 'np.concatenate', (['(foundation_label, new_label)'], {'axis': '(0)'}), '((foundation_label, new_label), axis=0)\n', (7763, 7802), True, 'import numpy as np\n'), ((3367, 3384), 'numpy.vectorize', 'np.vectorize', (['len'], {}), '(len)\n', (3379, 3384), True, 'import numpy as np\n'), ((4143, 4188), 'numpy.vectorize', 'np.vectorize', (['self.__contains_time_characters'], {}), '(self.__contains_time_characters)\n', (4155, 4188), True, 'import numpy as np\n'), ((4227, 4254), 'numpy.vectorize', 'np.vectorize', (['self.__is_nan'], {}), '(self.__is_nan)\n', (4239, 4254), True, 'import numpy as np\n'), ((5141, 5185), 'numpy.vectorize', 'np.vectorize', (['self.__contains_url_characters'], {}), '(self.__contains_url_characters)\n', (5153, 5185), True, 'import numpy as np\n'), ((866, 899), 'numpy.array', 'np.array', (['([label] * self.num_rows)'], {}), '([label] * self.num_rows)\n', (874, 899), True, 'import numpy as np\n'), ((3905, 3922), 'numpy.vectorize', 'np.vectorize', (['len'], {}), '(len)\n', (3917, 3922), True, 'import numpy as np\n'), ((5996, 6035), 'collections.Counter', 'Counter', (['raw_feature_dict[feature_name]'], {}), '(raw_feature_dict[feature_name])\n', (6003, 6035), False, 'from collections import Counter\n')] |
try:
import cv2
import numpy as np
except ImportError as e:
from pip._internal import main as install
packages = ["numpy", "opencv-python"]
for package in packages:
install(["install", package])
finally:
pass
# Import the image Stack function
from utils.stackimages import stackImages
def empty(args):
pass
image_black = np.zeros([500, 500, 3], dtype=np.uint8)
def detectColor():
# Track Bars
cv2.namedWindow("TrackBars")
cv2.resizeWindow("TrackBars", 600, 200)
cv2.createTrackbar("Hue Min", "TrackBars", 0, 179, empty)
cv2.createTrackbar("Hue Max", "TrackBars", 179, 179, empty)
cv2.createTrackbar("Sat Min", "TrackBars", 131, 255, empty)
cv2.createTrackbar("Sat Max", "TrackBars", 255, 255, empty)
cv2.createTrackbar("Val Min", "TrackBars", 0, 255, empty)
cv2.createTrackbar("Val Max", "TrackBars", 255, 255, empty)
while True:
og_image = cv2.imread("images/car1.jpg")
hsv_image = cv2.cvtColor(og_image, cv2.COLOR_BGR2HSV)
# Get the positions of the TrackBars
h_min = cv2.getTrackbarPos("Hue Min", "TrackBars")
h_max = cv2.getTrackbarPos("Hue Max", "TrackBars")
s_min = cv2.getTrackbarPos("Sat Min", "TrackBars")
s_max = cv2.getTrackbarPos("Sat Max", "TrackBars")
v_min = cv2.getTrackbarPos("Val Min", "TrackBars")
v_max = cv2.getTrackbarPos("Val Max", "TrackBars")
# print(h_min, h_max, s_min, s_max, v_min, v_max)
lower = np.array([h_min, s_min, v_min])
upper = np.array([h_max, s_max, v_max])
# mask the image
mask = cv2.inRange(hsv_image, lower, upper)
imgResult = cv2.bitwise_and(og_image, og_image, mask=mask)
image_label = cv2.putText(image_black, "RED COLOR", (200, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), cv2.LINE_4)
imageStack = stackImages(.5, ([image_label, og_image , hsv_image], [ mask, imgResult, image_black]))
cv2.imshow("Color Images", imageStack)
# cv2.resizeWindow("Color Images", 600, 600)
key = cv2.waitKey(1)
if key & 0xFF == ord('q') or key == 27:
break
return
detectColor()
| [
"cv2.createTrackbar",
"cv2.putText",
"cv2.bitwise_and",
"cv2.cvtColor",
"cv2.waitKey",
"numpy.zeros",
"utils.stackimages.stackImages",
"cv2.imread",
"pip._internal.main",
"numpy.array",
"cv2.getTrackbarPos",
"cv2.resizeWindow",
"cv2.imshow",
"cv2.inRange",
"cv2.namedWindow"
] | [((359, 398), 'numpy.zeros', 'np.zeros', (['[500, 500, 3]'], {'dtype': 'np.uint8'}), '([500, 500, 3], dtype=np.uint8)\n', (367, 398), True, 'import numpy as np\n'), ((439, 467), 'cv2.namedWindow', 'cv2.namedWindow', (['"""TrackBars"""'], {}), "('TrackBars')\n", (454, 467), False, 'import cv2\n'), ((472, 511), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""TrackBars"""', '(600)', '(200)'], {}), "('TrackBars', 600, 200)\n", (488, 511), False, 'import cv2\n'), ((516, 573), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Hue Min"""', '"""TrackBars"""', '(0)', '(179)', 'empty'], {}), "('Hue Min', 'TrackBars', 0, 179, empty)\n", (534, 573), False, 'import cv2\n'), ((578, 637), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Hue Max"""', '"""TrackBars"""', '(179)', '(179)', 'empty'], {}), "('Hue Max', 'TrackBars', 179, 179, empty)\n", (596, 637), False, 'import cv2\n'), ((642, 701), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Sat Min"""', '"""TrackBars"""', '(131)', '(255)', 'empty'], {}), "('Sat Min', 'TrackBars', 131, 255, empty)\n", (660, 701), False, 'import cv2\n'), ((706, 765), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Sat Max"""', '"""TrackBars"""', '(255)', '(255)', 'empty'], {}), "('Sat Max', 'TrackBars', 255, 255, empty)\n", (724, 765), False, 'import cv2\n'), ((770, 827), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Val Min"""', '"""TrackBars"""', '(0)', '(255)', 'empty'], {}), "('Val Min', 'TrackBars', 0, 255, empty)\n", (788, 827), False, 'import cv2\n'), ((832, 891), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Val Max"""', '"""TrackBars"""', '(255)', '(255)', 'empty'], {}), "('Val Max', 'TrackBars', 255, 255, empty)\n", (850, 891), False, 'import cv2\n'), ((927, 956), 'cv2.imread', 'cv2.imread', (['"""images/car1.jpg"""'], {}), "('images/car1.jpg')\n", (937, 956), False, 'import cv2\n'), ((977, 1018), 'cv2.cvtColor', 'cv2.cvtColor', (['og_image', 'cv2.COLOR_BGR2HSV'], {}), '(og_image, cv2.COLOR_BGR2HSV)\n', (989, 1018), False, 'import cv2\n'), ((1080, 1122), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Hue Min"""', '"""TrackBars"""'], {}), "('Hue Min', 'TrackBars')\n", (1098, 1122), False, 'import cv2\n'), ((1139, 1181), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Hue Max"""', '"""TrackBars"""'], {}), "('Hue Max', 'TrackBars')\n", (1157, 1181), False, 'import cv2\n'), ((1198, 1240), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Sat Min"""', '"""TrackBars"""'], {}), "('Sat Min', 'TrackBars')\n", (1216, 1240), False, 'import cv2\n'), ((1257, 1299), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Sat Max"""', '"""TrackBars"""'], {}), "('Sat Max', 'TrackBars')\n", (1275, 1299), False, 'import cv2\n'), ((1316, 1358), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Val Min"""', '"""TrackBars"""'], {}), "('Val Min', 'TrackBars')\n", (1334, 1358), False, 'import cv2\n'), ((1375, 1417), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Val Max"""', '"""TrackBars"""'], {}), "('Val Max', 'TrackBars')\n", (1393, 1417), False, 'import cv2\n'), ((1492, 1523), 'numpy.array', 'np.array', (['[h_min, s_min, v_min]'], {}), '([h_min, s_min, v_min])\n', (1500, 1523), True, 'import numpy as np\n'), ((1540, 1571), 'numpy.array', 'np.array', (['[h_max, s_max, v_max]'], {}), '([h_max, s_max, v_max])\n', (1548, 1571), True, 'import numpy as np\n'), ((1612, 1648), 'cv2.inRange', 'cv2.inRange', (['hsv_image', 'lower', 'upper'], {}), '(hsv_image, lower, upper)\n', (1623, 1648), False, 'import cv2\n'), ((1669, 1715), 'cv2.bitwise_and', 'cv2.bitwise_and', (['og_image', 'og_image'], {'mask': 'mask'}), '(og_image, og_image, mask=mask)\n', (1684, 1715), False, 'import cv2\n'), ((1738, 1845), 'cv2.putText', 'cv2.putText', (['image_black', '"""RED COLOR"""', '(200, 250)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 255, 0)', 'cv2.LINE_4'], {}), "(image_black, 'RED COLOR', (200, 250), cv2.FONT_HERSHEY_SIMPLEX,\n 1, (0, 255, 0), cv2.LINE_4)\n", (1749, 1845), False, 'import cv2\n'), ((1863, 1953), 'utils.stackimages.stackImages', 'stackImages', (['(0.5)', '([image_label, og_image, hsv_image], [mask, imgResult, image_black])'], {}), '(0.5, ([image_label, og_image, hsv_image], [mask, imgResult,\n image_black]))\n', (1874, 1953), False, 'from utils.stackimages import stackImages\n'), ((1959, 1997), 'cv2.imshow', 'cv2.imshow', (['"""Color Images"""', 'imageStack'], {}), "('Color Images', imageStack)\n", (1969, 1997), False, 'import cv2\n'), ((2065, 2079), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2076, 2079), False, 'import cv2\n'), ((193, 222), 'pip._internal.main', 'install', (["['install', package]"], {}), "(['install', package])\n", (200, 222), True, 'from pip._internal import main as install\n')] |
import os
import argparse
import torch
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from models import ArcBinaryClassifier
from batcher import Batcher
parser = argparse.ArgumentParser()
parser.add_argument('--batchSize', type=int, default=128, help='input batch size')
parser.add_argument('--imageSize', type=int, default=32, help='the height / width of the input image to ARC')
parser.add_argument('--glimpseSize', type=int, default=8, help='the height / width of glimpse seen by ARC')
parser.add_argument('--numStates', type=int, default=128, help='number of hidden states in ARC controller')
parser.add_argument('--numGlimpses', type=int, default=6, help='the number glimpses of each image in pair seen by ARC')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--name', default=None, help='Custom name for this configuration. Needed for loading model'
'and saving images')
parser.add_argument('--load', required=True, help='the model to load from.')
parser.add_argument('--same', action='store_true', help='whether to generate same character pairs or not')
opt = parser.parse_args()
if opt.name is None:
# if no name is given, we generate a name from the parameters.
# only those parameters are taken, which if changed break torch.load compatibility.
opt.name = "{}_{}_{}_{}".format(opt.numGlimpses, opt.glimpseSize, opt.numStates,
"cuda" if opt.cuda else "cpu")
# make directory for storing images.
images_path = os.path.join("visualization", opt.name)
os.makedirs(images_path, exist_ok=True)
# initialise the batcher
batcher = Batcher(batch_size=opt.batchSize)
def display(image1, mask1, image2, mask2, name="hola.png"):
_, ax = plt.subplots(1, 2)
# a heuristic for deciding cutoff
masking_cutoff = 2.4 / (opt.glimpseSize)**2
mask1 = (mask1 > masking_cutoff).data.numpy()
mask1 = np.ma.masked_where(mask1 == 0, mask1)
mask2 = (mask2 > masking_cutoff).data.numpy()
mask2 = np.ma.masked_where(mask2 == 0, mask2)
ax[0].imshow(image1.data.numpy(), cmap=mpl.cm.bone)
ax[0].imshow(mask1, interpolation="nearest", cmap=mpl.cm.jet_r, alpha=0.7)
ax[1].imshow(image2.data.numpy(), cmap=mpl.cm.bone)
ax[1].imshow(mask2, interpolation="nearest", cmap=mpl.cm.ocean, alpha=0.7)
plt.savefig(os.path.join(images_path, name))
def get_sample(discriminator):
# size of the set to choose sample from from
sample_size = 30
X, Y = batcher.fetch_batch("train", batch_size=sample_size)
pred = discriminator(X)
if opt.same:
same_pred = pred[sample_size // 2:].data.numpy()[:, 0]
mx = same_pred.argsort()[len(same_pred) // 2] # choose the sample with median confidence
index = mx + sample_size // 2
else:
diff_pred = pred[:sample_size // 2].data.numpy()[:, 0]
mx = diff_pred.argsort()[len(diff_pred) // 2] # choose the sample with median confidence
index = mx
return X[index]
def visualize():
# initialise the model
discriminator = ArcBinaryClassifier(num_glimpses=opt.numGlimpses,
glimpse_h=opt.glimpseSize,
glimpse_w=opt.glimpseSize,
controller_out=opt.numStates)
discriminator.load_state_dict(torch.load(os.path.join("saved_models", opt.name, opt.load)))
arc = discriminator.arc
sample = get_sample(discriminator)
all_hidden = arc._forward(sample[None, :, :])[:, 0, :] # (2*numGlimpses, controller_out)
glimpse_params = torch.tanh(arc.glimpser(all_hidden))
masks = arc.glimpse_window.get_attention_mask(glimpse_params, mask_h=opt.imageSize, mask_w=opt.imageSize)
# separate the masks of each image.
masks1 = []
masks2 = []
for i, mask in enumerate(masks):
if i % 2 == 1: # the first image outputs the hidden state for the next image
masks1.append(mask)
else:
masks2.append(mask)
for i, (mask1, mask2) in enumerate(zip(masks1, masks2)):
display(sample[0], mask1, sample[1], mask2, "img_{}".format(i))
if __name__ == "__main__":
visualize()
| [
"os.makedirs",
"argparse.ArgumentParser",
"numpy.ma.masked_where",
"matplotlib.pyplot.subplots",
"models.ArcBinaryClassifier",
"batcher.Batcher",
"os.path.join"
] | [((194, 219), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (217, 219), False, 'import argparse\n'), ((1689, 1728), 'os.path.join', 'os.path.join', (['"""visualization"""', 'opt.name'], {}), "('visualization', opt.name)\n", (1701, 1728), False, 'import os\n'), ((1729, 1768), 'os.makedirs', 'os.makedirs', (['images_path'], {'exist_ok': '(True)'}), '(images_path, exist_ok=True)\n', (1740, 1768), False, 'import os\n'), ((1806, 1839), 'batcher.Batcher', 'Batcher', ([], {'batch_size': 'opt.batchSize'}), '(batch_size=opt.batchSize)\n', (1813, 1839), False, 'from batcher import Batcher\n'), ((1914, 1932), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (1926, 1932), True, 'import matplotlib.pyplot as plt\n'), ((2083, 2120), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(mask1 == 0)', 'mask1'], {}), '(mask1 == 0, mask1)\n', (2101, 2120), True, 'import numpy as np\n'), ((2184, 2221), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(mask2 == 0)', 'mask2'], {}), '(mask2 == 0, mask2)\n', (2202, 2221), True, 'import numpy as np\n'), ((3235, 3372), 'models.ArcBinaryClassifier', 'ArcBinaryClassifier', ([], {'num_glimpses': 'opt.numGlimpses', 'glimpse_h': 'opt.glimpseSize', 'glimpse_w': 'opt.glimpseSize', 'controller_out': 'opt.numStates'}), '(num_glimpses=opt.numGlimpses, glimpse_h=opt.glimpseSize,\n glimpse_w=opt.glimpseSize, controller_out=opt.numStates)\n', (3254, 3372), False, 'from models import ArcBinaryClassifier\n'), ((2511, 2542), 'os.path.join', 'os.path.join', (['images_path', 'name'], {}), '(images_path, name)\n', (2523, 2542), False, 'import os\n'), ((3534, 3582), 'os.path.join', 'os.path.join', (['"""saved_models"""', 'opt.name', 'opt.load'], {}), "('saved_models', opt.name, opt.load)\n", (3546, 3582), False, 'import os\n')] |
# -*- coding:utf-8 -*-
__author__ = 'ljq'
import heapq
import multiprocessing
import logging
import gensim
import json
import os
import torch
import numpy as np
from collections import OrderedDict
from gensim.models import word2vec
TEXT_DIR = '../data/content.txt'
METADATA_DIR = '../data/metadata.tsv'
def logger_fn(name, input_file, level=logging.INFO):
logger = logging.getLogger(name)
logger.setLevel(level)
log_dir = os.path.dirname(input_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
fh = logging.FileHandler(input_file, mode='w')
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def create_prediction_file(output_file, data_id, all_labels, all_predict_labels, all_predict_scores):
"""
Create the prediction file.
Args:
output_file: The all classes predicted results provided by network
data_id: The data record id info provided by class Data
all_labels: The all origin labels
all_predict_labels: The all predict labels by threshold
all_predict_scores: The all predict scores by threshold
Raises:
IOError: If the prediction file is not a <.json> file
"""
if not output_file.endswith('.json'):
raise IOError("✘ The prediction file is not a json file."
"Please make sure the prediction data is a json file.")
with open(output_file, 'w') as fout:
data_size = len(all_predict_labels)
for i in range(data_size):
predict_labels = [int(i) for i in all_predict_labels[i]]
predict_scores = [round(i, 4) for i in all_predict_scores[i]]
labels = [int(i) for i in all_labels[i]]
data_record = OrderedDict([
('id', data_id[i]),
('labels', labels),
('predict_labels', predict_labels),
('predict_scores', predict_scores)
])
fout.write(json.dumps(data_record, ensure_ascii=False) + '\n')
def get_onehot_label_threshold(scores, threshold=0.5):
"""
Get the predicted onehot labels based on the threshold.
If there is no predict score greater than threshold, then choose the label which has the max predict score.
Args:
scores: The all classes predicted scores provided by network
threshold: The threshold (default: 0.5)
Returns:
predicted_onehot_labels: The predicted labels (onehot)
"""
predicted_onehot_labels = []
scores = np.ndarray.tolist(scores)
for score in scores:
count = 0
onehot_labels_list = [0] * len(score)
for index, predict_score in enumerate(score):
if predict_score >= threshold:
onehot_labels_list[index] = 1
count += 1
if count == 0:
max_score_index = score.index(max(score))
onehot_labels_list[max_score_index] = 1
predicted_onehot_labels.append(onehot_labels_list)
return predicted_onehot_labels
def get_onehot_label_topk(scores, top_num=1):
"""
Get the predicted onehot labels based on the topK number.
Args:
scores: The all classes predicted scores provided by network
top_num: The max topK number (default: 5)
Returns:
predicted_onehot_labels: The predicted labels (onehot)
"""
predicted_onehot_labels = []
scores = np.ndarray.tolist(scores)
for score in scores:
onehot_labels_list = [0] * len(score)
max_num_index_list = list(map(score.index, heapq.nlargest(top_num, score)))
for i in max_num_index_list:
onehot_labels_list[i] = 1
predicted_onehot_labels.append(onehot_labels_list)
return predicted_onehot_labels
def get_label_threshold(scores, threshold=0.5):
"""
Get the predicted labels based on the threshold.
If there is no predict score greater than threshold, then choose the label which has the max predict score.
Args:
scores: The all classes predicted scores provided by network
threshold: The threshold (default: 0.5)
Returns:
predicted_labels: The predicted labels
predicted_scores: The predicted scores
"""
predicted_labels = []
predicted_scores = []
scores = np.ndarray.tolist(scores)
for score in scores:
count = 0
index_list = []
score_list = []
for index, predict_score in enumerate(score):
if predict_score >= threshold:
index_list.append(index)
score_list.append(predict_score)
count += 1
if count == 0:
index_list.append(score.index(max(score)))
score_list.append(max(score))
predicted_labels.append(index_list)
predicted_scores.append(score_list)
return predicted_labels, predicted_scores
def get_label_topk(scores, top_num=1):
"""
Get the predicted labels based on the topK number.
Args:
scores: The all classes predicted scores provided by network
top_num: The max topK number (default: 5)
Returns:
The predicted labels
"""
predicted_labels = []
predicted_scores = []
scores = np.ndarray.tolist(scores)
for score in scores:
score_list = []
index_list = np.argsort(score)[-top_num:]
index_list = index_list[::-1]
for index in index_list:
score_list.append(score[index])
predicted_labels.append(np.ndarray.tolist(index_list))
predicted_scores.append(score_list)
return predicted_labels, predicted_scores
def create_metadata_file(embedding_size, output_file=METADATA_DIR):
"""
Create the metadata file based on the corpus file(Use for the Embedding Visualization later).
Args:
embedding_size: The embedding size
output_file: The metadata file (default: 'metadata.tsv')
Raises:
IOError: If word2vec model file doesn't exist
"""
word2vec_file = '../data/word2vec_' + str(embedding_size) + '.model'
if not os.path.isfile(word2vec_file):
raise IOError("✘ The word2vec file doesn't exist."
"Please use function <create_vocab_size(embedding_size)> to create it!")
model = gensim.models.Word2Vec.load(word2vec_file)
word2idx = dict([(k, v.index) for k, v in model.wv.vocab.items()])
word2idx_sorted = [(k, word2idx[k]) for k in sorted(word2idx, key=word2idx.get, reverse=False)]
with open(output_file, 'w+') as fout:
for word in word2idx_sorted:
if word[0] is None:
print("Empty Line, should replaced by any thing else, or will cause a bug of tensorboard")
fout.write('<Empty Line>' + '\n')
else:
fout.write(word[0] + '\n')
def create_word2vec_model(embedding_size, input_file=TEXT_DIR):
"""
Create the word2vec model based on the given embedding size and the corpus file.
Args:
embedding_size: The embedding size
input_file: The corpus file
"""
word2vec_file = '../data/word2vec_' + str(embedding_size) + '.model'
sentences = word2vec.LineSentence(input_file)
# sg=0 means use CBOW model(default); sg=1 means use skip-gram model.
model = gensim.models.Word2Vec(sentences, size=embedding_size, min_count=0,
sg=0, workers=multiprocessing.cpu_count())
model.save(word2vec_file)
def load_word2vec_matrix(embedding_size):
"""
Return the word2vec model matrix.
Args:
embedding_size: The embedding size
Returns:
The word2vec model matrix
Raises:
IOError: If word2vec model file doesn't exist
"""
word2vec_file = '../data/word2vec_' + str(embedding_size) + '.model'
if not os.path.isfile(word2vec_file):
raise IOError("✘ The word2vec file doesn't exist. "
"Please use function <create_vocab_size(embedding_size)> to create it!")
model = gensim.models.Word2Vec.load(word2vec_file)
vocab_size = len(model.wv.vocab.items())
vocab = dict([(k, v.index) for k, v in model.wv.vocab.items()])
vector = np.zeros([vocab_size, embedding_size])
for key, value in vocab.items():
if key is not None:
vector[value] = model[key]
return vocab_size, vector
def data_word2vec(input_file, num_classes_list, total_classes, word2vec_model):
"""
Create the research data tokenindex based on the word2vec model file.
Return the class Data(includes the data tokenindex and data labels).
Args:
input_file: The research data
num_classes_list: <list> The number of classes
total_classes: The total number of classes
word2vec_model: The word2vec model file
Returns:
The class Data(includes the data tokenindex and data labels)
Raises:
IOError: If the input file is not the .json file
"""
# num_classes_list = list(map(int, num_classes_list.split(',')))
vocab = dict([(k, v.index) for (k, v) in word2vec_model.wv.vocab.items()])
def _token_to_index(content):
result = []
for item in content:
word2id = vocab.get(item)
if word2id is None:
word2id = 0
result.append(word2id)
return result
def _create_onehot_labels(labels_index, num_labels):
label = [0] * num_labels
for item in labels_index:
label[int(item)] = 1
return label
if not input_file.endswith('.json'):
raise IOError("✘ The research data is not a json file. "
"Please preprocess the research data into the json file.")
with open(input_file) as fin:
id_list = []
title_index_list = []
abstract_index_list = []
labels_list = []
onehot_labels_list = []
onehot_labels_tuple_list = []
total_line = 0
for eachline in fin:
data = json.loads(eachline)
patent_id = data['id']
title_content = data['title']
abstract_content = data['abstract']
first_labels = data['section']
second_labels = data['subsection']
third_labels = data['group']
fourth_labels = data['subgroup']
total_labels = data['labels']
id_list.append(patent_id)
title_index_list.append(_token_to_index(title_content))
abstract_index_list.append(_token_to_index(abstract_content))
labels_list.append(total_labels)
labels_tuple = (_create_onehot_labels(first_labels, num_classes_list[0]),
_create_onehot_labels(second_labels, num_classes_list[1]),
_create_onehot_labels(third_labels, num_classes_list[2]),
_create_onehot_labels(fourth_labels, num_classes_list[3]))
onehot_labels_tuple_list.append(labels_tuple)
onehot_labels_list.append(_create_onehot_labels(total_labels, total_classes))
total_line += 1
class _Data:
def __init__(self):
pass
@property
def number(self):
return total_line
@property
def patent_id(self):
return id_list
@property
def title_tokenindex(self):
return title_index_list
@property
def abstract_tokenindex(self):
return abstract_index_list
@property
def labels(self):
return labels_list
@property
def onehot_labels_tuple(self):
return onehot_labels_tuple_list
@property
def onehot_labels(self):
return onehot_labels_list
return _Data()
def data_augmented(data, drop_rate=1.0):
"""
Data augmented.
Args:
data: The Class Data()
drop_rate: The drop rate
Returns:
aug_data
"""
aug_num = data.number
aug_patent_id = data.patent_id
aug_title_tokenindex = data.title_tokenindex
aug_abstract_tokenindex = data.abstract_tokenindex
aug_labels = data.labels
aug_onehot_labels = data.onehot_labels
aug_onehot_labels_tuple = data.onehot_labels_tuple
for i in range(len(data.aug_abstract_tokenindex)):
data_record = data.tokenindex[i]
if len(data_record) == 1:
continue
elif len(data_record) == 2:
data_record[0], data_record[1] = data_record[1], data_record[0]
aug_patent_id.append(data.patent_id[i])
aug_title_tokenindex.append(data.title_tokenindex[i])
aug_abstract_tokenindex.append(data_record)
aug_labels.append(data.labels[i])
aug_onehot_labels.append(data.onehot_labels[i])
aug_onehot_labels_tuple.append(data.onehot_labels_tuple[i])
aug_num += 1
else:
data_record = np.array(data_record)
for num in range(len(data_record) // 10):
# random shuffle & random drop
data_shuffled = np.random.permutation(np.arange(int(len(data_record) * drop_rate)))
new_data_record = data_record[data_shuffled]
aug_patent_id.append(data.patent_id[i])
aug_title_tokenindex.append(data.title_tokenindex[i])
aug_abstract_tokenindex.append(list(new_data_record))
aug_labels.append(data.labels[i])
aug_onehot_labels.append(data.onehot_labels[i])
aug_onehot_labels_tuple.append(data.onehot_labels_tuple[i])
aug_num += 1
class _AugData:
def __init__(self):
pass
@property
def number(self):
return aug_num
@property
def patent_id(self):
return aug_patent_id
@property
def title_tokenindex(self):
return aug_title_tokenindex
@property
def abstract_tokenindex(self):
return aug_abstract_tokenindex
@property
def labels(self):
return aug_labels
@property
def onehot_labels(self):
return aug_onehot_labels
@property
def onehot_labels_tuple(self):
return aug_onehot_labels_tuple
return _AugData()
def load_data_and_labels(data_file, num_classes_list, total_classes, embedding_size, data_aug_flag):
"""
Load research data from files, splits the data into words and generates labels.
Return split sentences, labels and the max sentence length of the research data.
Args:
data_file: The research data
num_classes_list: <list> The number of classes
total_classes: The total number of classes
embedding_size: The embedding size
data_aug_flag: The flag of data augmented
Returns:
The class Data
"""
word2vec_file = '../data/word2vec_' + str(embedding_size) + '.model'
# Load word2vec model file
if not os.path.isfile(word2vec_file):
create_word2vec_model(embedding_size, TEXT_DIR)
model = word2vec.Word2Vec.load(word2vec_file)
# Load data from files and split by words
data = data_word2vec(data_file, num_classes_list, total_classes, word2vec_model=model)
if data_aug_flag:
data = data_augmented(data)
# plot_seq_len(data_file, data)
return data
def pad_sequence_with_maxlen(sequences, batch_first=False, padding_value=0, maxlen_arg=None):
r"""
Change from the raw code in torch.nn.utils.rnn for the need to pad with a assigned length
Pad a list of variable length Tensors with ``padding_value``
``pad_sequence`` stacks a list of Tensors along a new dimension,
and pads them to equal length. For example, if the input is list of
sequences with size ``L x *`` and if batch_first is False, and ``T x B x *``
otherwise.
`B` is batch size. It is equal to the number of elements in ``sequences``.
`T` is length of the longest sequence.
`L` is length of the sequence.
`*` is any number of trailing dimensions, including none.
Example:
>>> from torch.nn.utils.rnn import pad_sequence
>>> a = torch.ones(25, 300)
>>> b = torch.ones(22, 300)
>>> c = torch.ones(15, 300)
>>> pad_sequence([a, b, c]).size()
torch.Size([25, 3, 300])
Note:
This function returns a Tensor of size ``T x B x *`` or ``B x T x *``
where `T` is the length of the longest sequence. This function assumes
trailing dimensions and type of all the Tensors in sequences are same.
Arguments:
sequences (list[Tensor]): list of variable length sequences.
batch_first (bool, optional): output will be in ``B x T x *`` if True, or in
``T x B x *`` otherwise
padding_value (float, optional): value for padded elements. Default: 0.
maxlen:the the max length you want to pad
Returns:
Tensor of size ``T x B x *`` if :attr:`batch_first` is ``False``.
Tensor of size ``B x T x *`` otherwise
"""
# assuming trailing dimensions and type of all the Tensors
# in sequences are same and fetching those from sequences[0]
max_size = sequences[0].size()
trailing_dims = max_size[1:]
# if maxlen_arg != None and maxlen_arg < max_len:
# max_len = max_len_arg
if maxlen_arg == None:
max_len = max([s.size(0) for s in sequences])
else:
max_len = maxlen_arg
#
if batch_first:
out_dims = (len(sequences), max_len) + trailing_dims
else:
out_dims = (max_len, len(sequences)) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
for i, tensor in enumerate(sequences):
length = min(max_len, tensor.size(0))
# use index notation to prevent duplicate references to the tensor
if batch_first:
out_tensor[i, :length, ...] = tensor[:length]
else:
out_tensor[:length, i, ...] = tensor[:length]
return out_tensor
def pad_data(data, pad_seq_len):
"""
Padding each sentence of research data according to the max sentence length.
Return the padded data and data labels.
Args:
data: The research data
pad_seq_len: The max sentence length of research data
Returns:
pad_seq: The padded data
labels: The data labels
"""
abstract_pad_seq = pad_sequence_with_maxlen([torch.tensor(item) for item in data.abstract_tokenindex],
batch_first=True, padding_value=0., maxlen_arg=pad_seq_len)
# abstract_pad_seq = abstract_pad_seq.numpy()
# abstract_pad_seq = pad_sequences(data.abstract_tokenindex, maxlen=pad_seq_len, value=0.)
onehot_labels_list = data.onehot_labels
onehot_labels_list_tuple = data.onehot_labels_tuple
return abstract_pad_seq, torch.tensor(onehot_labels_list), \
torch.tensor(np.array(onehot_labels_list_tuple)[:, 0].tolist()), \
torch.tensor(np.array(onehot_labels_list_tuple)[:, 1].tolist()), \
torch.tensor(np.array(onehot_labels_list_tuple)[:, 2].tolist()), \
torch.tensor(np.array(onehot_labels_list_tuple)[:, 3].tolist())
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
含有 yield 说明不是一个普通函数,是一个 Generator.
函数效果:对 data,一共分成 num_epochs 个阶段(epoch),在每个 epoch 内,如果 shuffle=True,就将 data 重新洗牌,
批量生成 (yield) 一批一批的重洗过的 data,每批大小是 batch_size,一共生成 int(len(data)/batch_size)+1 批。
Args:
data: The data
batch_size: The size of the data batch
num_epochs: The number of epochs
shuffle: Shuffle or not (default: True)
Returns:
A batch iterator for data set
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((data_size - 1) / batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
| [
"logging.getLogger",
"json.dumps",
"logging.Formatter",
"numpy.argsort",
"os.path.isfile",
"numpy.arange",
"gensim.models.word2vec.Word2Vec.load",
"gensim.models.Word2Vec.load",
"multiprocessing.cpu_count",
"logging.FileHandler",
"json.loads",
"os.path.dirname",
"os.path.exists",
"heapq.nl... | [((374, 397), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (391, 397), False, 'import logging\n'), ((439, 466), 'os.path.dirname', 'os.path.dirname', (['input_file'], {}), '(input_file)\n', (454, 466), False, 'import os\n'), ((541, 582), 'logging.FileHandler', 'logging.FileHandler', (['input_file'], {'mode': '"""w"""'}), "(input_file, mode='w')\n", (560, 582), False, 'import logging\n'), ((599, 661), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(levelname)s - %(message)s')\n", (616, 661), False, 'import logging\n'), ((2583, 2608), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['scores'], {}), '(scores)\n', (2600, 2608), True, 'import numpy as np\n'), ((3468, 3493), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['scores'], {}), '(scores)\n', (3485, 3493), True, 'import numpy as np\n'), ((4348, 4373), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['scores'], {}), '(scores)\n', (4365, 4373), True, 'import numpy as np\n'), ((5281, 5306), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['scores'], {}), '(scores)\n', (5298, 5306), True, 'import numpy as np\n'), ((6325, 6367), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_file'], {}), '(word2vec_file)\n', (6352, 6367), False, 'import gensim\n'), ((7215, 7248), 'gensim.models.word2vec.LineSentence', 'word2vec.LineSentence', (['input_file'], {}), '(input_file)\n', (7236, 7248), False, 'from gensim.models import word2vec\n'), ((8058, 8100), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_file'], {}), '(word2vec_file)\n', (8085, 8100), False, 'import gensim\n'), ((8227, 8265), 'numpy.zeros', 'np.zeros', (['[vocab_size, embedding_size]'], {}), '([vocab_size, embedding_size])\n', (8235, 8265), True, 'import numpy as np\n'), ((15184, 15221), 'gensim.models.word2vec.Word2Vec.load', 'word2vec.Word2Vec.load', (['word2vec_file'], {}), '(word2vec_file)\n', (15206, 15221), False, 'from gensim.models import word2vec\n'), ((19858, 19872), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (19866, 19872), True, 'import numpy as np\n'), ((478, 501), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (492, 501), False, 'import os\n'), ((511, 531), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (522, 531), False, 'import os\n'), ((6127, 6156), 'os.path.isfile', 'os.path.isfile', (['word2vec_file'], {}), '(word2vec_file)\n', (6141, 6156), False, 'import os\n'), ((7860, 7889), 'os.path.isfile', 'os.path.isfile', (['word2vec_file'], {}), '(word2vec_file)\n', (7874, 7889), False, 'import os\n'), ((15084, 15113), 'os.path.isfile', 'os.path.isfile', (['word2vec_file'], {}), '(word2vec_file)\n', (15098, 15113), False, 'import os\n'), ((18995, 19027), 'torch.tensor', 'torch.tensor', (['onehot_labels_list'], {}), '(onehot_labels_list)\n', (19007, 19027), False, 'import torch\n'), ((1810, 1939), 'collections.OrderedDict', 'OrderedDict', (["[('id', data_id[i]), ('labels', labels), ('predict_labels', predict_labels),\n ('predict_scores', predict_scores)]"], {}), "([('id', data_id[i]), ('labels', labels), ('predict_labels',\n predict_labels), ('predict_scores', predict_scores)])\n", (1821, 1939), False, 'from collections import OrderedDict\n'), ((5377, 5394), 'numpy.argsort', 'np.argsort', (['score'], {}), '(score)\n', (5387, 5394), True, 'import numpy as np\n'), ((5553, 5582), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['index_list'], {}), '(index_list)\n', (5570, 5582), True, 'import numpy as np\n'), ((7452, 7479), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (7477, 7479), False, 'import multiprocessing\n'), ((10037, 10057), 'json.loads', 'json.loads', (['eachline'], {}), '(eachline)\n', (10047, 10057), False, 'import json\n'), ((18554, 18572), 'torch.tensor', 'torch.tensor', (['item'], {}), '(item)\n', (18566, 18572), False, 'import torch\n'), ((3616, 3646), 'heapq.nlargest', 'heapq.nlargest', (['top_num', 'score'], {}), '(top_num, score)\n', (3630, 3646), False, 'import heapq\n'), ((12995, 13016), 'numpy.array', 'np.array', (['data_record'], {}), '(data_record)\n', (13003, 13016), True, 'import numpy as np\n'), ((20114, 20134), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (20123, 20134), True, 'import numpy as np\n'), ((2037, 2080), 'json.dumps', 'json.dumps', (['data_record'], {'ensure_ascii': '(False)'}), '(data_record, ensure_ascii=False)\n', (2047, 2080), False, 'import json\n'), ((19055, 19089), 'numpy.array', 'np.array', (['onehot_labels_list_tuple'], {}), '(onehot_labels_list_tuple)\n', (19063, 19089), True, 'import numpy as np\n'), ((19133, 19167), 'numpy.array', 'np.array', (['onehot_labels_list_tuple'], {}), '(onehot_labels_list_tuple)\n', (19141, 19167), True, 'import numpy as np\n'), ((19211, 19245), 'numpy.array', 'np.array', (['onehot_labels_list_tuple'], {}), '(onehot_labels_list_tuple)\n', (19219, 19245), True, 'import numpy as np\n'), ((19289, 19323), 'numpy.array', 'np.array', (['onehot_labels_list_tuple'], {}), '(onehot_labels_list_tuple)\n', (19297, 19323), True, 'import numpy as np\n')] |
OIMI_R1_txt = '''
0.2225
0.2540
0.2978
0.3302
0.3702
0.3859
0.3975
0.4020
'''
OIMI_T_txt = '''
1.29
1.37
1.47
1.96
2.99
4.33
8.7
12.9
'''
IVF2M_R1_txt = '''
0.2548
0.2992
0.3526
0.3805
0.4138
0.4247
0.4323
0.4355
'''
IVF2M_T_txt = '''
0.33
0.34
0.45
0.55
1.01
1.52
2.67
3.85
'''
IVF4M_R1_txt = '''
0.2855
0.3302
0.3739
0.4020
0.4264
0.4331
0.4412
0.4512
'''
IVF4M_T_txt = '''
0.66
0.93
2.02
3.66
9.55
16.28
32
50
'''
IVFG_R1_txt = '''
0.2284
0.2706
0.3323
0.3739
0.4189
0.4301
0.4383
0.4420
'''
IVFG_T_txt = '''
0.30
0.34
0.43
0.59
1.14
1.71
3.20
5.1
'''
IVFGP_R1_txt = '''
0.2622
0.3094
0.3670
0.3995
0.4294
0.4373
0.4452
0.4497
'''
IVFGP_T_txt = '''
0.31
0.34
0.46
0.65
1.23
1.96
3.60
5.54
'''
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy
import re
import seaborn as sns
sns.set(style='ticks', palette='Set2')
sns.despine()
dataset = "DEEP"
if dataset == "DEEP":
OIMI_R1 = re.findall(r"[0-9.]+", OIMI_R1_txt)
OIMI_T = re.findall(r"[0-9.]+", OIMI_T_txt)
IVF2M_R1 = re.findall(r"[0-9.]+", IVF2M_R1_txt)
IVF2M_T = re.findall(r"[0-9.]+", IVF2M_T_txt)
IVF4M_R1 = re.findall(r"[0-9.]+", IVF4M_R1_txt)
IVF4M_T = re.findall(r"[0-9.]+", IVF4M_T_txt)
IVFG_R1 = re.findall(r"[0-9.]+", IVFG_R1_txt)
IVFG_T = re.findall(r"[0-9.]+", IVFG_T_txt)
IVFGP_R1 = re.findall(r"[0-9.]+", IVFGP_R1_txt)
IVFGP_T = re.findall(r"[0-9.]+", IVFGP_T_txt)
plt.figure(figsize=[5,4])
lineOIMI, = plt.plot(OIMI_T, OIMI_R1, 'r', label = 'OIMI-D-OADC $2^{14}$')
lineIVF2M, = plt.plot(IVF2M_T, IVF2M_R1, '-g', label = 'IVFOADC $2^{21}$')
lineIVF4M, = plt.plot(IVF4M_T, IVF4M_R1, '-m', label = 'IVFOADC $2^{22}$')
lineIVFG, = plt.plot(IVFG_T, IVFG_R1, '-c', label = 'IVFOADC+G $2^{20}$')
lineIVFGP, = plt.plot(IVFGP_T, IVFGP_R1, '--b', label = 'IVFOADC+G+P $2^{20}$')
plt.xticks(numpy.arange(0.2, 2.91, 0.2))
plt.yticks(numpy.arange(0.25, 0.46, 0.02))
plt.axis([0.2, 2.81, 0.25, 0.451])
plt.xlabel('Time (ms)', fontsize=12)
plt.ylabel('R@1, 16 bytes', fontsize=12)
#plt.legend(frameon = True, fontsize=9, loc=4)
pp = PdfPages('recallR1_PQ16.pdf')
pp.savefig(bbox_inches='tight')
pp.close()
| [
"matplotlib.backends.backend_pdf.PdfPages",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"seaborn.despine",
"matplotlib.pyplot.figure",
"re.findall",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"seaborn.set"
] | [((831, 869), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""', 'palette': '"""Set2"""'}), "(style='ticks', palette='Set2')\n", (838, 869), True, 'import seaborn as sns\n'), ((870, 883), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (881, 883), True, 'import seaborn as sns\n'), ((938, 972), 're.findall', 're.findall', (['"""[0-9.]+"""', 'OIMI_R1_txt'], {}), "('[0-9.]+', OIMI_R1_txt)\n", (948, 972), False, 'import re\n'), ((987, 1020), 're.findall', 're.findall', (['"""[0-9.]+"""', 'OIMI_T_txt'], {}), "('[0-9.]+', OIMI_T_txt)\n", (997, 1020), False, 'import re\n'), ((1038, 1073), 're.findall', 're.findall', (['"""[0-9.]+"""', 'IVF2M_R1_txt'], {}), "('[0-9.]+', IVF2M_R1_txt)\n", (1048, 1073), False, 'import re\n'), ((1089, 1123), 're.findall', 're.findall', (['"""[0-9.]+"""', 'IVF2M_T_txt'], {}), "('[0-9.]+', IVF2M_T_txt)\n", (1099, 1123), False, 'import re\n'), ((1141, 1176), 're.findall', 're.findall', (['"""[0-9.]+"""', 'IVF4M_R1_txt'], {}), "('[0-9.]+', IVF4M_R1_txt)\n", (1151, 1176), False, 'import re\n'), ((1192, 1226), 're.findall', 're.findall', (['"""[0-9.]+"""', 'IVF4M_T_txt'], {}), "('[0-9.]+', IVF4M_T_txt)\n", (1202, 1226), False, 'import re\n'), ((1243, 1277), 're.findall', 're.findall', (['"""[0-9.]+"""', 'IVFG_R1_txt'], {}), "('[0-9.]+', IVFG_R1_txt)\n", (1253, 1277), False, 'import re\n'), ((1292, 1325), 're.findall', 're.findall', (['"""[0-9.]+"""', 'IVFG_T_txt'], {}), "('[0-9.]+', IVFG_T_txt)\n", (1302, 1325), False, 'import re\n'), ((1343, 1378), 're.findall', 're.findall', (['"""[0-9.]+"""', 'IVFGP_R1_txt'], {}), "('[0-9.]+', IVFGP_R1_txt)\n", (1353, 1378), False, 'import re\n'), ((1394, 1428), 're.findall', 're.findall', (['"""[0-9.]+"""', 'IVFGP_T_txt'], {}), "('[0-9.]+', IVFGP_T_txt)\n", (1404, 1428), False, 'import re\n'), ((1435, 1461), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[5, 4]'}), '(figsize=[5, 4])\n', (1445, 1461), True, 'import matplotlib.pyplot as plt\n'), ((1477, 1537), 'matplotlib.pyplot.plot', 'plt.plot', (['OIMI_T', 'OIMI_R1', '"""r"""'], {'label': '"""OIMI-D-OADC $2^{14}$"""'}), "(OIMI_T, OIMI_R1, 'r', label='OIMI-D-OADC $2^{14}$')\n", (1485, 1537), True, 'import matplotlib.pyplot as plt\n'), ((1557, 1616), 'matplotlib.pyplot.plot', 'plt.plot', (['IVF2M_T', 'IVF2M_R1', '"""-g"""'], {'label': '"""IVFOADC $2^{21}$"""'}), "(IVF2M_T, IVF2M_R1, '-g', label='IVFOADC $2^{21}$')\n", (1565, 1616), True, 'import matplotlib.pyplot as plt\n'), ((1636, 1695), 'matplotlib.pyplot.plot', 'plt.plot', (['IVF4M_T', 'IVF4M_R1', '"""-m"""'], {'label': '"""IVFOADC $2^{22}$"""'}), "(IVF4M_T, IVF4M_R1, '-m', label='IVFOADC $2^{22}$')\n", (1644, 1695), True, 'import matplotlib.pyplot as plt\n'), ((1714, 1773), 'matplotlib.pyplot.plot', 'plt.plot', (['IVFG_T', 'IVFG_R1', '"""-c"""'], {'label': '"""IVFOADC+G $2^{20}$"""'}), "(IVFG_T, IVFG_R1, '-c', label='IVFOADC+G $2^{20}$')\n", (1722, 1773), True, 'import matplotlib.pyplot as plt\n'), ((1793, 1857), 'matplotlib.pyplot.plot', 'plt.plot', (['IVFGP_T', 'IVFGP_R1', '"""--b"""'], {'label': '"""IVFOADC+G+P $2^{20}$"""'}), "(IVFGP_T, IVFGP_R1, '--b', label='IVFOADC+G+P $2^{20}$')\n", (1801, 1857), True, 'import matplotlib.pyplot as plt\n'), ((1958, 1992), 'matplotlib.pyplot.axis', 'plt.axis', (['[0.2, 2.81, 0.25, 0.451]'], {}), '([0.2, 2.81, 0.25, 0.451])\n', (1966, 1992), True, 'import matplotlib.pyplot as plt\n'), ((1997, 2033), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (ms)"""'], {'fontsize': '(12)'}), "('Time (ms)', fontsize=12)\n", (2007, 2033), True, 'import matplotlib.pyplot as plt\n'), ((2038, 2078), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""R@1, 16 bytes"""'], {'fontsize': '(12)'}), "('R@1, 16 bytes', fontsize=12)\n", (2048, 2078), True, 'import matplotlib.pyplot as plt\n'), ((2140, 2169), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['"""recallR1_PQ16.pdf"""'], {}), "('recallR1_PQ16.pdf')\n", (2148, 2169), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((1876, 1904), 'numpy.arange', 'numpy.arange', (['(0.2)', '(2.91)', '(0.2)'], {}), '(0.2, 2.91, 0.2)\n', (1888, 1904), False, 'import numpy\n'), ((1921, 1951), 'numpy.arange', 'numpy.arange', (['(0.25)', '(0.46)', '(0.02)'], {}), '(0.25, 0.46, 0.02)\n', (1933, 1951), False, 'import numpy\n')] |
import numpy as np # this module is useful to work with numerical arrays
from tqdm import tqdm # this module is useful to plot progress bars
import torch
import torchvision
from torchvision import transforms
from torch.utils.data import random_split
from torch import nn
import torch.optim as optim
data_dir = 'dataset'
### With these commands the train and test datasets, respectively, are downloaded
### automatically and stored in the local "data_dir" directory.
train_dataset = torchvision.datasets.MNIST(data_dir, train=True, download=False)
test_dataset = torchvision.datasets.MNIST(data_dir, train=False, download=False)
train_transform = transforms.Compose([
transforms.ToTensor(),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
])
# Set the train transform
train_dataset.transform = train_transform
# Set the test transform
test_dataset.transform = test_transform
m = len(train_dataset)
#random_split randomly split a dataset into non-overlapping new datasets of given lengths
#train (55,000 images), val split (5,000 images)
train_data, val_data = random_split(train_dataset, [int(m-m*0.2), int(m*0.2)])
batch_size = 256
# The dataloaders handle shuffling, batching, etc...
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size)
valid_loader = torch.utils.data.DataLoader(val_data, batch_size=batch_size)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,shuffle=True)
class Encoder(nn.Module):
def __init__(self, encoded_space_dim,fc2_input_dim):
super().__init__()
### Convolutional section
self.encoder_cnn = nn.Sequential(
# First convolutional layer
nn.Conv2d(1, 8, 3, stride=2, padding=1),
#nn.BatchNorm2d(8),
nn.ReLU(True),
# Second convolutional layer
nn.Conv2d(8, 16, 3, stride=2, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(True),
# Third convolutional layer
nn.Conv2d(16, 32, 3, stride=2, padding=0),
#nn.BatchNorm2d(32),
nn.ReLU(True)
)
### Flatten layer
self.flatten = nn.Flatten(start_dim=1)
### Linear section
self.encoder_lin = nn.Sequential(
# First linear layer
nn.Linear(3 * 3 * 32, 128),
nn.ReLU(True),
# Second linear layer
nn.Linear(128, encoded_space_dim),
nn.LayerNorm(encoded_space_dim, elementwise_affine=False)
)
def forward(self, x):
# Apply convolutions
x = self.encoder_cnn(x)
# Flatten
x = self.flatten(x)
# # Apply linear layers
x = self.encoder_lin(x)
return x
class Decoder(nn.Module):
def __init__(self, encoded_space_dim,fc2_input_dim):
super().__init__()
### Linear section
self.decoder_lin = nn.Sequential(
# First linear layer
nn.Linear(encoded_space_dim, 128),
nn.ReLU(True),
# Second linear layer
nn.Linear(128, 3 * 3 * 32),
nn.ReLU(True)
)
### Unflatten
self.unflatten = nn.Unflatten(dim=1, unflattened_size=(32, 3, 3))
### Convolutional section
self.decoder_conv = nn.Sequential(
# First transposed convolution
nn.ConvTranspose2d(32, 16, 3, stride=2, output_padding=0),
nn.BatchNorm2d(16),
nn.ReLU(True),
# Second transposed convolution
nn.ConvTranspose2d(16, 8, 3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(8),
nn.ReLU(True),
# Third transposed convolution
nn.ConvTranspose2d(8, 1, 3, stride=2, padding=1, output_padding=1)
)
def forward(self, x):
# Apply linear layers
x = self.decoder_lin(x)
# Unflatten
x = self.unflatten(x)
# Apply transposed convolutions
x = self.decoder_conv(x)
# Apply a sigmoid to force the output to be between 0 and 1 (valid pixel values)
x = torch.sigmoid(x)
return x
### Set the random seed for reproducible results
torch.manual_seed(0)
### Initialize the two networks
d = 5
#model = Autoencoder(encoded_space_dim=encoded_space_dim)
encoder = Encoder(encoded_space_dim=d,fc2_input_dim=128)
decoder = Decoder(encoded_space_dim=d,fc2_input_dim=128)
print(encoder)
print(decoder)
### Define the loss function
loss_fn = torch.nn.MSELoss()
### Define an optimizer (both for the encoder and the decoder!)
lr= 0.001
#lr = 0.0008 # Learning rate
params_to_optimize = [
{'params': encoder.parameters()},
{'params': decoder.parameters()}
]
optim = torch.optim.Adam(params_to_optimize, lr=lr, weight_decay=1e-05)
#optim = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=6e-05)
# Check if the GPU is available
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
print(f'Selected device: {device}')
# Move both the encoder and the decoder to the selected device
encoder.to(device)
decoder.to(device)
#model.to(device)
### Training function
def train_epoch(encoder, decoder, device, dataloader, loss_fn, optimizer):
# Set train mode for both the encoder and the decoder
encoder.train()
decoder.train()
train_loss = []
# Iterate the dataloader (we do not need the label values, this is unsupervised learning)
for image_batch, _ in dataloader: # with "_" we just ignore the labels (the second element of the dataloader tuple)
# Move tensor to the proper device
image_batch = image_batch.to(device)
# Encode data
encoded_data = encoder(image_batch)
# Decode data
decoded_data = decoder(encoded_data)
# Evaluate loss
loss = loss_fn(decoded_data, image_batch)
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Print batch loss
print('\t partial train loss (single batch): %f' % (loss.data))
train_loss.append(loss.detach().cpu().numpy())
return np.mean(train_loss)
### Testing function
def test_epoch(encoder, decoder, device, dataloader, loss_fn):
# Set evaluation mode for encoder and decoder
encoder.eval()
decoder.eval()
with torch.no_grad(): # No need to track the gradients
# Define the lists to store the outputs for each batch
conc_out = []
conc_label = []
for image_batch, _ in dataloader:
# Move tensor to the proper device
image_batch = image_batch.to(device)
# Encode data
encoded_data = encoder(image_batch)
# Decode data
decoded_data = decoder(encoded_data)
# Append the network output and the original image to the lists
conc_out.append(decoded_data.cpu())
conc_label.append(image_batch.cpu())
# Create a single tensor with all the values in the lists
conc_out = torch.cat(conc_out)
conc_label = torch.cat(conc_label)
# Evaluate global loss
val_loss = loss_fn(conc_out, conc_label)
return val_loss.data
num_epochs = 30
history={'train_loss':[],'val_loss':[]}
for epoch in range(num_epochs):
train_loss = train_epoch(encoder,decoder,device,train_loader,loss_fn,optim)
val_loss = test_epoch(encoder,decoder,device,valid_loader,loss_fn)
print('\n EPOCH {}/{} \t train loss {:.3f} \t val loss {:.3f}'.format(epoch + 1, num_epochs,train_loss,val_loss))
history['train_loss'].append(train_loss)
history['val_loss'].append(val_loss)
encoded_samples = []
for sample in tqdm(test_dataset):
img = sample[0].unsqueeze(0).to(device)
label = sample[1]
# Encode image
encoder.eval()
with torch.no_grad():
encoded_img = encoder(img)
# Append to list
encoded_img = encoded_img.flatten().cpu().numpy()
encoded_sample = encoded_img.tolist()
encoded_sample.append(label)
encoded_samples.append(encoded_sample)
np.savetxt('latent_mnist_test_d{}.csv'.format(d), encoded_samples, delimiter = ',')
print('finished!')
| [
"torch.cat",
"numpy.mean",
"torch.device",
"torch.no_grad",
"torch.nn.MSELoss",
"torch.nn.Unflatten",
"torch.utils.data.DataLoader",
"torchvision.transforms.ToTensor",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"tqdm.tqdm",
"torch.manual_seed",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"t... | [((483, 547), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', (['data_dir'], {'train': '(True)', 'download': '(False)'}), '(data_dir, train=True, download=False)\n', (509, 547), False, 'import torchvision\n'), ((564, 629), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', (['data_dir'], {'train': '(False)', 'download': '(False)'}), '(data_dir, train=False, download=False)\n', (590, 629), False, 'import torchvision\n'), ((1234, 1296), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': 'batch_size'}), '(train_data, batch_size=batch_size)\n', (1261, 1296), False, 'import torch\n'), ((1312, 1372), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_data'], {'batch_size': 'batch_size'}), '(val_data, batch_size=batch_size)\n', (1339, 1372), False, 'import torch\n'), ((1387, 1465), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(test_dataset, batch_size=batch_size, shuffle=True)\n', (1414, 1465), False, 'import torch\n'), ((4245, 4265), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (4262, 4265), False, 'import torch\n'), ((4548, 4566), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (4564, 4566), False, 'import torch\n'), ((4782, 4845), 'torch.optim.Adam', 'torch.optim.Adam', (['params_to_optimize'], {'lr': 'lr', 'weight_decay': '(1e-05)'}), '(params_to_optimize, lr=lr, weight_decay=1e-05)\n', (4798, 4845), False, 'import torch\n'), ((7740, 7758), 'tqdm.tqdm', 'tqdm', (['test_dataset'], {}), '(test_dataset)\n', (7744, 7758), False, 'from tqdm import tqdm\n'), ((4985, 5010), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5008, 5010), False, 'import torch\n'), ((4961, 4981), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4973, 4981), False, 'import torch\n'), ((5016, 5035), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5028, 5035), False, 'import torch\n'), ((6186, 6205), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (6193, 6205), True, 'import numpy as np\n'), ((675, 696), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (694, 696), False, 'from torchvision import transforms\n'), ((743, 764), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (762, 764), False, 'from torchvision import transforms\n'), ((2194, 2217), 'torch.nn.Flatten', 'nn.Flatten', ([], {'start_dim': '(1)'}), '(start_dim=1)\n', (2204, 2217), False, 'from torch import nn\n'), ((3224, 3272), 'torch.nn.Unflatten', 'nn.Unflatten', ([], {'dim': '(1)', 'unflattened_size': '(32, 3, 3)'}), '(dim=1, unflattened_size=(32, 3, 3))\n', (3236, 3272), False, 'from torch import nn\n'), ((4160, 4176), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (4173, 4176), False, 'import torch\n'), ((6390, 6405), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6403, 6405), False, 'import torch\n'), ((7094, 7113), 'torch.cat', 'torch.cat', (['conc_out'], {}), '(conc_out)\n', (7103, 7113), False, 'import torch\n'), ((7135, 7156), 'torch.cat', 'torch.cat', (['conc_label'], {}), '(conc_label)\n', (7144, 7156), False, 'import torch\n'), ((7873, 7888), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7886, 7888), False, 'import torch\n'), ((1718, 1757), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(8)', '(3)'], {'stride': '(2)', 'padding': '(1)'}), '(1, 8, 3, stride=2, padding=1)\n', (1727, 1757), False, 'from torch import nn\n'), ((1803, 1816), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1810, 1816), False, 'from torch import nn\n'), ((1871, 1911), 'torch.nn.Conv2d', 'nn.Conv2d', (['(8)', '(16)', '(3)'], {'stride': '(2)', 'padding': '(1)'}), '(8, 16, 3, stride=2, padding=1)\n', (1880, 1911), False, 'from torch import nn\n'), ((1925, 1943), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (1939, 1943), False, 'from torch import nn\n'), ((1957, 1970), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1964, 1970), False, 'from torch import nn\n'), ((2024, 2065), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)', '(3)'], {'stride': '(2)', 'padding': '(0)'}), '(16, 32, 3, stride=2, padding=0)\n', (2033, 2065), False, 'from torch import nn\n'), ((2112, 2125), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2119, 2125), False, 'from torch import nn\n'), ((2333, 2359), 'torch.nn.Linear', 'nn.Linear', (['(3 * 3 * 32)', '(128)'], {}), '(3 * 3 * 32, 128)\n', (2342, 2359), False, 'from torch import nn\n'), ((2373, 2386), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2380, 2386), False, 'from torch import nn\n'), ((2434, 2467), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'encoded_space_dim'], {}), '(128, encoded_space_dim)\n', (2443, 2467), False, 'from torch import nn\n'), ((2481, 2538), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['encoded_space_dim'], {'elementwise_affine': '(False)'}), '(encoded_space_dim, elementwise_affine=False)\n', (2493, 2538), False, 'from torch import nn\n'), ((3004, 3037), 'torch.nn.Linear', 'nn.Linear', (['encoded_space_dim', '(128)'], {}), '(encoded_space_dim, 128)\n', (3013, 3037), False, 'from torch import nn\n'), ((3051, 3064), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3058, 3064), False, 'from torch import nn\n'), ((3112, 3138), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(3 * 3 * 32)'], {}), '(128, 3 * 3 * 32)\n', (3121, 3138), False, 'from torch import nn\n'), ((3152, 3165), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3159, 3165), False, 'from torch import nn\n'), ((3406, 3463), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(32)', '(16)', '(3)'], {'stride': '(2)', 'output_padding': '(0)'}), '(32, 16, 3, stride=2, output_padding=0)\n', (3424, 3463), False, 'from torch import nn\n'), ((3477, 3495), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (3491, 3495), False, 'from torch import nn\n'), ((3509, 3522), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3516, 3522), False, 'from torch import nn\n'), ((3580, 3647), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(16)', '(8)', '(3)'], {'stride': '(2)', 'padding': '(1)', 'output_padding': '(1)'}), '(16, 8, 3, stride=2, padding=1, output_padding=1)\n', (3598, 3647), False, 'from torch import nn\n'), ((3661, 3678), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(8)'], {}), '(8)\n', (3675, 3678), False, 'from torch import nn\n'), ((3692, 3705), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3699, 3705), False, 'from torch import nn\n'), ((3762, 3828), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(8)', '(1)', '(3)'], {'stride': '(2)', 'padding': '(1)', 'output_padding': '(1)'}), '(8, 1, 3, stride=2, padding=1, output_padding=1)\n', (3780, 3828), False, 'from torch import nn\n')] |
import numpy
from .ltl_nodes import build_tree
class HypothesisTester(object):
"""Test a hypothesis about a property based on random samples.
The test is posed as follows. The null hypothesis H0 is that
P_{satisfied} >= prob, and the alternative hypothesis is that
P_{satisfied} < prob. The parameter alpha and beta define the
Parameters
-----------
prob : float
The probability threshold for the hypothesis. Between 0 and 1.
alpha : float
The Type-I error limit specified for deciding between the alternative
hypotheses. Between 0 and 1.
beta : float
The Type-II error limit specified for deciding between the alternative
hypotheses. Between 0 and 1.
delta : float
The indifference parameter, which defines an interval around `prob` in
both directions inside which it is acceptable to conclude either of the
alternative hypotheses.
"""
def __init__(self, prob, alpha, beta, delta):
self.prob = prob
self.alpha = alpha
self.beta = beta
self.delta = delta
self.logA = numpy.log((1 - self.beta) / self.alpha)
self.logB = numpy.log(self.beta / (1 - self.alpha))
def get_logq(self, samples):
"""Return the logarithm of the test ratio given a set of samples.
This function is typically not used from outside but can be useful
for tracking and plotting how the value of the test ratio changes as
samples are collected.
Parameters
----------
samples : list[bool]
A list of True/False values corresponding to the satisfaction of
a property in a series of random samples.
Returns
-------
logq : float
The test ratio calculated given the samples and test parameters.
"""
ps = len([s for s in samples if s])
ns = len(samples) - ps
term1 = ps * numpy.log(self.prob - self.delta)
term2 = ns * numpy.log(1 - (self.prob - self.delta))
term3 = ps * numpy.log(self.prob + self.delta)
term4 = ns * numpy.log(1 - (self.prob + self.delta))
logq = term1 + term2 - term3 - term4
return logq
def test(self, samples):
"""Return the result of the hypothesis test or None of not decidable.
Parameters
----------
samples : list[bool]
A list of True/False values corresponding to the satisfaction of
a property in a series of random samples.
Returns
-------
result : 0, 1 or None
The result of the hypothesis test with 0 corresponding to the
null hypothesis, 1 corresponding to the alternative hypothesis.
If the test cannot yet be decided with the given set of samples,
None is returned.
"""
logq = self.get_logq(samples)
if logq <= self.logB:
return 0
elif logq >= self.logA:
return 1
else:
return None
class PathChecker(object):
"""Check the satisfaction of a given path property on a single path.
The PathChecker can be used in two modes: in "offline" mode in which
a full path has already been sampled. In this case the nodes along the path
are passed in the constructor of the PathChecker, and the satisfaction of
the property is determined immediately. In "online" mode, the PathChecker
is initialized with just the property's formula and no path nodes. Nodes are
then passed via the `update` method to the PathChecker, one by one, and the
`truth` attribute can be tested after each update to see if the property
has been verified to be False or True or has not been determined yet (None).
Parameters
-----------
formula_str : str
A path property described using BLTL.
nodes : Optional[list]
A list of node names corresponding to the path. Only used when the full
path is already available at the time the PathChecker is constructed.
Attributes
----------
truth : bool or None
This attribute needs to be monitored from outside to determine if the
property is satisfied (1) or not satisfied (0) or could not be
determined yet (None).
"""
def __init__(self, formula_str, nodes=None):
self.formula_str = formula_str
self.roots = []
self.time = 0
root = build_tree(self.formula_str)
self.roots.append(root)
if nodes is not None:
for t, s in enumerate(nodes):
tf = self.update(s, (t == (len(nodes)-1)))
if tf is not None:
break
self.truth = tf
else:
self.truth = None
def update(self, node, is_last=False):
"""Return whether the property is satisfied given the latest node.
Parameters
----------
node : str
The identifier of the next node along the path.
is_last : bool
Needs to be set to True if this is the last node along the path,
otherwise False.
Returns
-------
tf : bool or None
If the property is determined to be satisfied, 1 is returned, if it
is not satisfied, 0 is returned, if satisfaction cannot yet be
determined, None is returned.
"""
self.roots[self.time].update(node, is_last)
tf = self.roots[0].eval_node()
if tf is None:
self.roots.append(self.roots[self.time].duplicate())
self.roots[self.time].link(self.roots[self.time+1])
self.time += 1
self.truth = tf
return self.truth
| [
"numpy.log"
] | [((1123, 1162), 'numpy.log', 'numpy.log', (['((1 - self.beta) / self.alpha)'], {}), '((1 - self.beta) / self.alpha)\n', (1132, 1162), False, 'import numpy\n'), ((1183, 1222), 'numpy.log', 'numpy.log', (['(self.beta / (1 - self.alpha))'], {}), '(self.beta / (1 - self.alpha))\n', (1192, 1222), False, 'import numpy\n'), ((1953, 1986), 'numpy.log', 'numpy.log', (['(self.prob - self.delta)'], {}), '(self.prob - self.delta)\n', (1962, 1986), False, 'import numpy\n'), ((2008, 2047), 'numpy.log', 'numpy.log', (['(1 - (self.prob - self.delta))'], {}), '(1 - (self.prob - self.delta))\n', (2017, 2047), False, 'import numpy\n'), ((2069, 2102), 'numpy.log', 'numpy.log', (['(self.prob + self.delta)'], {}), '(self.prob + self.delta)\n', (2078, 2102), False, 'import numpy\n'), ((2124, 2163), 'numpy.log', 'numpy.log', (['(1 - (self.prob + self.delta))'], {}), '(1 - (self.prob + self.delta))\n', (2133, 2163), False, 'import numpy\n')] |
from pathlib import Path
from contextlib import contextmanager
import numpy as np
from ._version import get_versions
from .array import Array, MetaData, asarray, \
check_accessmode, delete_array, create_array, \
truncate_array
from .datadir import DataDir, create_datadir
from .metadata import MetaData
from .readcoderaggedarray import readcode, readcodefunc
from .utils import wrap
__all__ = ['RaggedArray', 'asraggedarray', 'create_raggedarray',
'delete_raggedarray', 'truncate_raggedarray']
# TODO needs doc
# TODO an open_array method
class RaggedArray:
"""
Disk-based sequence of arrays that may have a variable length in maximally
one dimension.
"""
_valuesdirname = 'values'
_indicesdirname = 'indices'
_arraydescrfilename = 'arraydescription.json'
_metadatafilename = 'metadata.json'
_readmefilename = 'README.txt'
_protectedfiles = {_valuesdirname, _indicesdirname,
_readmefilename, _metadatafilename,
_arraydescrfilename}
_formatversion = get_versions()['version']
def __init__(self, path, accessmode='r'):
self._datadir = DataDir(path=path,
protectedpaths=self._protectedfiles)
self._path = self._datadir._path
self._accessmode = check_accessmode(accessmode)
self._valuespath = self._path / self._valuesdirname
self._indicespath = self._path / self._indicesdirname
self._arraydescrpath = self._path / self._arraydescrfilename
self._values = Array(self._valuespath, accessmode=self._accessmode)
self._indices = Array(self._indicespath, accessmode=self._accessmode)
self._metadata = MetaData(self._path / self._metadatafilename,
accessmode=accessmode)
arrayinfo = {}
arrayinfo['len'] = len(self._indices)
arrayinfo['size'] = self._values.size
arrayinfo['atom'] = self._values.shape[1:]
arrayinfo['numtype'] = self._values._arrayinfo['numtype']
arrayinfo['darrversion'] = RaggedArray._formatversion
arrayinfo['darrobject'] = 'RaggedArray'
self._arrayinfo = arrayinfo
@property
def accessmode(self):
"""Data access mode of metadata, {'r', 'r+'}."""
return self._accessmode
@accessmode.setter
def accessmode(self, value):
self._accessmode = check_accessmode(value)
self._metadata.accessmode = value
self._values.accessmode = value
self._indices.accessmode = value
@property
def dtype(self):
"""Numpy data type of the array values.
"""
return self._values._dtype
@property
def atom(self):
"""Dimensions of the non-variable axes of the arrays.
"""
return tuple(self._values._shape[1:])
@property
def datadir(self):
"""Data directory object with many useful methods, such as
writing information to text or json files, archiving all data,
calculating checksums etc."""
return self._datadir
@property
def narrays(self):
"""Numpy data type of the array values.
"""
return self._indices.shape[0]
@property
def metadata(self):
"""
Dictionary of meta data.
"""
return self._metadata
@property
def mb(self):
"""Storage size in megabytes of the ragged array.
"""
return self._values.mb + self._indices.mb
@property
def path(self):
"""File system path to array data"""
return self._path
@property
def size(self):
"""Total number of values in the data array.
"""
return int(self._values._size)
def __getitem__(self, item):
if not np.issubdtype(type(item), np.integer):
raise TypeError("Only integers can be used for indexing " \
"darraylists, which '{}' is not".format(item))
index = slice(*self._indices[item])
return self._values[index]
def __len__(self):
return self._indices.shape[0]
def __repr__(self):
return f'darr ragged array ({self.narrays} arrays with at' \
f'om shape {self.atom}, {self.accessmode})'
__str__ = __repr__
def _update_readmetxt(self):
txt = readcodetxt(self)
self._datadir._write_txt(self._readmefilename, txt, overwrite=True)
def _update_arraydescr(self, **kwargs):
self._arrayinfo.update(kwargs)
self._datadir._write_jsondict(filename=self._arraydescrfilename,
d=self._arrayinfo, overwrite=True)
def _append(self, array):
size = len(array)
endindex = self._values.shape[0]
self._values.append(np.asarray(array, dtype=self.dtype))
self._indices.append([[endindex, endindex + size]])
def append(self, array):
self._append(array)
self._update_readmetxt()
self._update_arraydescr(len=len(self._indices),
size=self._values.size)
def copy(self, path, accessmode='r', overwrite=False):
arrayiterable = (self[i] for i in range(len(self)))
metadata = dict(self.metadata)
return asraggedarray(path=path, arrayiterable=arrayiterable,
dtype=self.dtype, metadata=metadata,
accessmode=accessmode, overwrite=overwrite)
@contextmanager
def _view(self, accessmode=None):
with self._indices._open_array(accessmode=accessmode) as (iv, _), \
self._values._open_array(accessmode=accessmode) as (vv, _):
yield iv, vv
def iter_arrays(self, startindex=0, endindex=None, stepsize=1,
accessmode=None):
if endindex is None:
endindex = self.narrays
with self._view(accessmode=accessmode):
for i in range(startindex, endindex, stepsize):
yield np.array(self[i], copy=True)
def iterappend(self, arrayiterable):
"""Iteratively append data from a data iterable.
The iterable has to yield array-like objects compliant with darr.
The length of first dimension of these objects may be different,
but the length of other dimensions, if any, has to be the same.
Parameters
----------
arrayiterable: an iterable that yield array-like objects
Returns
-------
None
"""
# TODO refactor such that info files are not updated at each append?
with self._view():
for a in arrayiterable:
self._append(a)
self._update_readmetxt()
self._update_arraydescr(len=len(self._indices),
size=self._values.size)
def readcode(self, language):
"""Generate code to read the array in a different language.
Note that this does not include reading the metadata, which is just
based on a text file in JSON format.
Parameter
---------
language: str
One of the languages that are supported. Choose from:
'matlab', 'numpymemmap', 'R'.
Example
-------
>>> import darr
>>> a = darr.asraggedarray('test.darr', [[1],[2,3],[4,5,6],[7,8,9,10]], overwrite=True)
>>> print(a.readcode('matlab'))
fileid = fopen('indices/arrayvalues.bin');
i = fread(fileid, [2, 4], '*int64', 'ieee-le');
fclose(fileid);
fileid = fopen('values/arrayvalues.bin');
v = fread(fileid, 10, '*int32', 'ieee-le');
fclose(fileid);
% example to read third subarray
startindex = i(1,3) + 1; % matlab starts counting from 1
endindex = i(2,3); % matlab has inclusive end index
a = v(startindex:endindex);
"""
if language not in readcodefunc.keys():
raise ValueError(f'Language "{language}" not supported, choose '
f'from {readcodefunc.keys()}')
d = self._arrayinfo
return readcodefunc[language](self)
# FIXME empty arrayiterable
def asraggedarray(path, arrayiterable, dtype=None, metadata=None,
accessmode='r+', overwrite=False):
path = Path(path)
if not hasattr(arrayiterable, 'next'):
arrayiterable = (a for a in arrayiterable)
bd = create_datadir(path=path, overwrite=overwrite)
firstarray = np.asarray(next(arrayiterable), dtype=dtype)
dtype = firstarray.dtype
valuespath = bd.path.joinpath(RaggedArray._valuesdirname)
indicespath = bd.path.joinpath(RaggedArray._indicesdirname)
valuesda = asarray(path=valuespath, array=firstarray, dtype=dtype,
accessmode='r+', overwrite=overwrite)
firstindices = [[0, len(firstarray)]]
indicesda = asarray(path=indicespath, array=firstindices,
dtype=np.int64, accessmode='r+',
overwrite=overwrite)
valueslen = firstindices[0][1]
indiceslen = 1
with valuesda._open_array(accessmode='r+') as (_, vfd), \
indicesda._open_array(accessmode='r+') as (_, ifd):
for array in arrayiterable:
lenincreasevalues = valuesda._append(array, fd=vfd)
starti, endi = valueslen, valueslen + lenincreasevalues
lenincreaseindices = indicesda._append([[starti, endi]], fd=ifd)
valueslen += lenincreasevalues
indiceslen += lenincreaseindices
valuesda._update_len(lenincrease=valueslen-firstindices[0][1])
valuesda._update_readmetxt()
indicesda._update_len(lenincrease=indiceslen-1)
indicesda._update_readmetxt()
datainfo = {}
datainfo['len'] = len(indicesda)
datainfo['size'] = valuesda.size
datainfo['atom'] = valuesda.shape[1:]
datainfo['numtype'] = valuesda._arrayinfo['numtype']
datainfo['darrversion'] = Array._formatversion
datainfo['darrobject'] = 'RaggedArray'
bd._write_jsondict(filename=RaggedArray._arraydescrfilename,
d=datainfo, overwrite=overwrite)
metadatapath = path.joinpath(Array._metadatafilename)
if metadata is not None:
bd._write_jsondict(filename=Array._metadatafilename,
d=metadata, overwrite=overwrite)
elif metadatapath.exists(): # no metadata but file exists, remove it
metadatapath.unlink()
ra = RaggedArray(path=path, accessmode=accessmode)
ra._update_readmetxt()
return RaggedArray(path=path, accessmode=accessmode)
def create_raggedarray(path, atom=(), dtype='float64', metadata=None,
accessmode='r+', overwrite=False):
if not hasattr(atom, '__len__'):
raise TypeError(f'shape "{atom}" is not a sequence of dimensions.\n'
f'If you want just a list of 1-dimensional arrays, '
f'use "()"')
shape = [0] + list(atom)
ar = np.zeros(shape, dtype=dtype)
ra = asraggedarray(path=path, arrayiterable=[ar], metadata=metadata,
accessmode=accessmode, overwrite=overwrite)
# the current ragged array has one element, which is an empty array
# but we want an empty ragged array => we should get rid of the indices
create_array(path=ra._indicespath, shape=(0,2), dtype=np.int64,
overwrite=True)
ra._update_arraydescr(len=0, size=0)
return RaggedArray(ra.path, accessmode=accessmode)
# TODO, simplify explanation if subarrays are 1-dimensional
# TODO add readcode for more languages
readmetxt = wrap('Disk-based storage of a ragged array') + '\n' + \
wrap('====================================') + '\n\n' + \
wrap('This directory is a data store for a numeric ragged array. '
'A ragged array (also called a jagged array) is a sequence '
'of arrays that may vary in length in their first '\
'dimension only. On disk, these arrays are concatenated '\
'along their variable dimension. The easiest way to '\
'access the data is to use the Darr library '\
'(https://pypi.org/project/darr/) in Python, as '\
'follows:') \
+ '\n\n' \
+ '>>> import darr\n' \
+ ">>> a = darr.RaggedArray('path_to_array_dir')\n\n" + \
wrap("where 'path_to_array_dir' is the name of the array "
"directory, which is the one that also contains this README.")\
+ "\n\n" + \
wrap('If Darr is not available, the data can also be read in '\
'other environments, with a little more effort, using the '\
'description or example code below.') + '\n\n\n' \
+ 'Description of data storage\n' \
+ '---------------------------\n' + \
wrap('There are two subdirectories, each containing an array '
'stored in a self-explanatory format. See the READMEs in '
'the corresponding directories to find out in detail out '
'how to read them. Example code is provided below '
'for a number of analysis environments, which in many cases '
'is sufficient.') + '\n\n' + \
wrap('The subdirectory "values" holds the numerical data itself, '
'where subarrays are simply appended along their variable '
'length dimension (first axis). So the number of dimensions '
'of the values array is one less than that of the ragged '
'array. A particular subarray can be be retrieved using the '
'appropriate start and end index along the first axis of the '
'values array. These indices (counting from 0) are stored in '
'a different 2-dimensional array in the subdirectory '
'"indices". The first axis of the index array represents the '
'sequence number of the subarray and the second axis '
'(length 2) represents start and (non-inclusive) end '
'indices to be used on the values array. To read the n-th '
'subarray, read the nt-h start and end indices from the '
'indices array and use these to read the array data from '
'the values array.') + '\n\n\n'
def readcodetxt(ra):
"""Returns text on how to read a Darr ragged array numeric binary data in
various programming languages.
Parameters
----------
ra: Darr raggedarray
"""
s = readmetxt
s += wrap(f'This ragged array has {len(ra)} subarrays. ') + '\n\n' + \
wrap(f'Example code for reading the data') + '\n' + \
wrap(f'=================================') + '\n\n'
languages = (
("Python with Numpy (memmap):", "numpymemmap"),
("R:", "R"),
("Matlab:", "matlab")
)
for heading, language in languages:
codetext = readcode(ra, language)
if codetext is not None:
s += f"{heading}\n{'-' * len(heading)}\n{codetext}\n"
return s
def delete_raggedarray(ra):
"""
Delete Darr ragged array data from disk.
Parameters
----------
path: path to data directory
"""
try:
if not isinstance(ra, RaggedArray):
ra = RaggedArray(ra, accessmode='r+')
except:
raise TypeError(f"'{ra}' not recognized as a Darr ragged array")
if not ra.accessmode == 'r+':
raise OSError('Darr ragged array is read-only; set accessmode to '
'"r+" to change')
for fn in ra._protectedfiles:
path = ra.path.joinpath(fn)
if path.exists() and not path.is_dir():
path.unlink()
delete_array(ra._values)
delete_array(ra._indices)
try:
ra._path.rmdir()
except OSError as error:
message = f"Error: could not fully delete Darr ragged array " \
f"directory " \
f"'{ra.path}'. It may contain additional files that are " \
f"not part of the darr. If so, these should be removed " \
f"manually."
raise OSError(message) from error
def truncate_raggedarray(ra, index):
"""Truncate darr ragged array.
Parameters
----------
ra: array or str or pathlib.Path
The darr object to be truncated or file system path to it.
index: int
The index along the first axis at which the darr ragged array should
be truncated. Negative indices can be used but the resulting length of
the truncated darr should be 0 or larger and smaller than the
current length.
"""
try:
if not isinstance(ra, RaggedArray):
ra = RaggedArray(ra, accessmode='r+')
except Exception:
raise TypeError(f"'{ra}' not recognized as a darr Ragged Array")
# FIXME allow for numpy ints
if not isinstance(index, int):
raise TypeError(f"'index' should be an int (is {type(index)})")
with ra._indices._open_array() as (mmap, _):
newlen = len(mmap[:index])
del mmap
ra._values.check_arraywriteable()
ra._indices.check_arraywriteable()
if 0 <= newlen < len(ra):
truncate_array(ra._indices, index=newlen)
if newlen == 0:
vi = 0
else:
vi = int(ra._indices[-1][-1])
truncate_array(ra._values, index=vi)
ra._update_readmetxt()
ra._update_arraydescr(len=len(ra._indices), size=ra._values.size)
else:
raise IndexError(f"'index' {index} would yield a ragged array of "
f"length {newlen}, which is invalid (current length "
f"is {len(ra)})")
| [
"numpy.array",
"numpy.asarray",
"pathlib.Path",
"numpy.zeros"
] | [((8311, 8321), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (8315, 8321), False, 'from pathlib import Path\n'), ((10976, 11004), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (10984, 11004), True, 'import numpy as np\n'), ((4804, 4839), 'numpy.asarray', 'np.asarray', (['array'], {'dtype': 'self.dtype'}), '(array, dtype=self.dtype)\n', (4814, 4839), True, 'import numpy as np\n'), ((6003, 6031), 'numpy.array', 'np.array', (['self[i]'], {'copy': '(True)'}), '(self[i], copy=True)\n', (6011, 6031), True, 'import numpy as np\n')] |
import numpy as np
from scipy.ndimage import affine_transform
from PIL import Image
import random
import pickle
from tensorflow import keras
import os
import math
from matplotlib import pyplot as plt
data_dir = "../Dataset/train"
nb_classes = 5004
def rgb2ycbcr(image):
"""Transfer RGB image to YCbCr.
Args:
image: Numpy array in range of (0, 255)
Returns:
final_img: Numpy array in float64.
Raises:
ValueError: An error occured when input image is not RGB mode.
ValueError: An error occured when input image is not in range of (0, 255).
"""
assert image.shape[-1] == 3, "Input should be in RGB mode."
# assert np.max(image) > 1., "Input should be in range of (0, 255)"
R, G, B = [image[:, :, i][:, :, np.newaxis] for i in range(3)]
Y = 0.257 * R + 0.504 * G + 0.098 * B + 16
Cb = -0.148 * R - 0.291 * G + 0.439 * B + 128
Cr = 0.439 * R - 0.368 * G - 0.071 * B + 128
final_img = np.concatenate((Y, Cb, Cr), axis=2)
# return np.uint8(final_img)
return final_img.squeeze()
def build_transform(rotation, shear, height_zoom, width_zoom, height_shift,
width_shift):
"""
Build a transformation matrix with the specified characteristics.
"""
rotation = np.deg2rad(rotation)
shear = np.deg2rad(shear)
rotation_matrix = np.array([[np.cos(rotation),
np.sin(rotation), 0],
[-np.sin(rotation),
np.cos(rotation), 0], [0, 0, 1]])
shift_matrix = np.array([[1, 0, height_shift], [0, 1, width_shift],
[0, 0, 1]])
shear_matrix = np.array([[1, np.sin(shear), 0], [0, np.cos(shear), 0],
[0, 0, 1]])
zoom_matrix = np.array([[1.0 / height_zoom, 0, 0],
[0, 1.0 / width_zoom, 0], [0, 0, 1]])
shift_matrix = np.array([[1, 0, -height_shift], [0, 1, -width_shift],
[0, 0, 1]])
return np.dot(
np.dot(rotation_matrix, shear_matrix), np.dot(zoom_matrix,
shift_matrix))
class WhaleSequence(keras.utils.Sequence):
def __init__(self,
p2l,
p2bb,
data_dir,
img_shape,
batch_size,
shuffle=True,
augment=True):
self.p2l = p2l
self.ps = [p for p in p2l.keys()]
self.nb_data = len(self.ps)
if shuffle:
random.shuffle(self.ps)
self.img_shape = img_shape
self.batch_size = batch_size
self.data_dir = data_dir
self.augment = augment
self.p2bb = p2bb
def preprocess(self, p, crop_margin=0.05, anisotropy=2.55):
img = np.array(Image.open(os.path.join(self.data_dir, p)))
size_y, size_x = img.shape[:2]
row = self.p2bb.loc[p]
x0, y0, x1, y1 = row['x0'], row['y0'], row['x1'], row['y1']
# dx = x1 - x0
# dy = y1 - y0
# x0 -= dx * 0.05
# y0 -= dy * 0.05
# y1 += dy * 0.05 + 1
# x1 += dx * 0.05 + 1
# if x0 < 0:
# x0 = 0
# if x1 > size_x:
# x1 = size_x
# if y0 < 0:
# y0 = 0
# if y1 > size_y:
# y1 = size_y
# dx = x1 - x0
# dy = y1 - y0
# if dx > dy * anisotropy:
# dy = 0.5 * (dx / anisotropy - dy)
# y0 -= dy
# y1 += dy
# else:
# dx = 0.5 * (dy * anisotropy - dx)
# x0 -= dx
# x1 += dx
# Generate the transformation matrix
# # XXX Why???
# trans = np.array([[1, 0, -0.5 * self.img_shape[0]],
# [0, 1, -0.5 * self.img_shape[1]], [0, 0, 1]])
# trans = np.dot(
# np.array([[(y1 - y0) / self.img_shape[0], 0, 0],
# [0, (x1 - x0) / self.img_shape[1], 0], [0, 0, 1]]),
# trans)
# if self.augment:
# trans = np.dot(
# build_transform(
# random.uniform(-5, 5), random.uniform(-5, 5),
# random.uniform(0.8, 1.0), random.uniform(0.8, 1.0),
# random.uniform(-0.05 * (y1 - y0), 0.05 * (y1 - y0)),
# random.uniform(-0.05 * (x1 - x0), 0.05 * (x1 - x0))),
# trans)
# trans = np.dot(
# np.array([[1, 0, 0.5 * (y1 + y0)], [0, 1, 0.5 * (x1 + x0)],
# [0, 0, 1]]), trans)
# Read the image, transform to black and white and comvert to numpy array
if len(img.shape) == 3:
img = rgb2ycbcr(img)
else:
img = np.concatenate([img[:, :, np.newaxis] for _ in range(3)],
axis=-1)
img = np.array(img, dtype="float32")
# # Apply affine transformation
# matrix = trans[:2, :2]
# offset = trans[:2, 2]
# img = affine_transform(
# img,
# matrix,
# offset,
# output_shape=self.img_shape[:-1],
# order=1,
# mode='constant',
# cval=np.average(img))
# print([x0, x1, y0, y1])
img = img[int(y0):int(y1), int(x0):int(x1), :]
# img = img.reshape(self.img_shape)
# Normalize to zero mean and unit variance
img -= np.mean(img, keepdims=True)
img /= np.std(img, keepdims=True) + keras.backend.epsilon()
plt.imshow(img.squeeze())
plt.show()
return img
def __len__(self):
return math.ceil(len(self.ps) / self.batch_size)
def __getitem__(self, idx):
batch_p = self.ps[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_data = []
batch_label = []
for p in batch_p:
data = self.preprocess(p, self.img_shape)
label = self.p2l[p]
batch_data.append(data)
batch_label.append(label)
batch_data = np.array(batch_data)
batch_label = np.array(batch_label)
curr_batch_size = batch_label.shape[0]
return ([batch_data, batch_label], [
keras.utils.to_categorical(batch_label, nb_classes),
np.zeros(batch_label.shape)
])
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
class MyDirectoryIterator(keras.preprocessing.image.DirectoryIterator):
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array), ) + self.image_shape, dtype=self.dtype)
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = keras.preprocessing.image.load_img(
os.path.join(self.directory, fname),
color_mode=self.color_mode,
target_size=self.target_size,
interpolation=self.interpolation)
x = keras.preprocessing.image.img_to_array(
img, data_format=self.data_format)
# Pillow images should be closed after `load_img`,
# but not PIL images.
if hasattr(img, 'close'):
img.close()
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(x, params)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = keras.preprocessing.image.array_to_img(
batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(self.dtype)
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), self.num_classes),
dtype=self.dtype)
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return [batch_x, batch_y], [
keras.utils.to_categorical(batch_y, nb_classes),
np.zeros(batch_y.shape)
]
class ImageDataLabelGenerator(keras.preprocessing.image.ImageDataGenerator):
def flow_from_directory(self,
directory,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
return MyDirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
if __name__ == "__main__":
import pickle
from pandas import read_csv
from data import WhaleSequence
from backbones import Vgg16
with open("../Dataset/metadata/p2l_train.pickle", 'rb') as f:
p2l_train = pickle.load(f)
with open("../Dataset/metadata/p2l_valid.pickle", 'rb') as f:
p2l_valid = pickle.load(f)
p2bb = read_csv("../Dataset/metadata/bounding_boxes.csv").set_index(
"Image")
train_data_gen = ImageDataLabelGenerator(
samplewise_center=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=16,
width_shift_range=0.2,
height_shift_range=0.1,
zoom_range=0.2,
fill_mode='reflect',
horizontal_flip=True,
vertical_flip=False,
preprocessing_function=rgb2ycbcr,
rescale=1. / 255,
validation_split=0.1)
train_gen = train_data_gen.flow_from_directory(
"../Dataset/Train",
target_size=(192, 384),
color_mode="rgb",
class_mode="sparse",
batch_size=16,
shuffle=True,
interpolation="bicubic",
subset='training')
valid_gen = train_data_gen.flow_from_directory(
"../Dataset/Train",
target_size=(192, 384),
color_mode='rgb',
class_mode='sparse',
batch_size=10,
subset="validation",
shuffle=True,
interpolation="bicubic")
for i, tr in enumerate(train_gen):
data, label = tr[0]
label_onehot, losses_v = tr[1]
print(data.shape)
print(type(label[0]), label.shape)
print(label_onehot.shape)
print(losses_v.shape)
plt.imshow(data[0][:,:,0].squeeze())
plt.title(label[0])
plt.show()
if i > 10:
break
| [
"matplotlib.pyplot.title",
"pandas.read_csv",
"random.shuffle",
"numpy.mean",
"pickle.load",
"numpy.sin",
"numpy.random.randint",
"tensorflow.keras.preprocessing.image.array_to_img",
"tensorflow.keras.backend.epsilon",
"os.path.join",
"numpy.std",
"matplotlib.pyplot.show",
"tensorflow.keras.... | [((1004, 1039), 'numpy.concatenate', 'np.concatenate', (['(Y, Cb, Cr)'], {'axis': '(2)'}), '((Y, Cb, Cr), axis=2)\n', (1018, 1039), True, 'import numpy as np\n'), ((1317, 1337), 'numpy.deg2rad', 'np.deg2rad', (['rotation'], {}), '(rotation)\n', (1327, 1337), True, 'import numpy as np\n'), ((1350, 1367), 'numpy.deg2rad', 'np.deg2rad', (['shear'], {}), '(shear)\n', (1360, 1367), True, 'import numpy as np\n'), ((1612, 1676), 'numpy.array', 'np.array', (['[[1, 0, height_shift], [0, 1, width_shift], [0, 0, 1]]'], {}), '([[1, 0, height_shift], [0, 1, width_shift], [0, 0, 1]])\n', (1620, 1676), True, 'import numpy as np\n'), ((1840, 1914), 'numpy.array', 'np.array', (['[[1.0 / height_zoom, 0, 0], [0, 1.0 / width_zoom, 0], [0, 0, 1]]'], {}), '([[1.0 / height_zoom, 0, 0], [0, 1.0 / width_zoom, 0], [0, 0, 1]])\n', (1848, 1914), True, 'import numpy as np\n'), ((1962, 2028), 'numpy.array', 'np.array', (['[[1, 0, -height_shift], [0, 1, -width_shift], [0, 0, 1]]'], {}), '([[1, 0, -height_shift], [0, 1, -width_shift], [0, 0, 1]])\n', (1970, 2028), True, 'import numpy as np\n'), ((2085, 2122), 'numpy.dot', 'np.dot', (['rotation_matrix', 'shear_matrix'], {}), '(rotation_matrix, shear_matrix)\n', (2091, 2122), True, 'import numpy as np\n'), ((2124, 2157), 'numpy.dot', 'np.dot', (['zoom_matrix', 'shift_matrix'], {}), '(zoom_matrix, shift_matrix)\n', (2130, 2157), True, 'import numpy as np\n'), ((5509, 5536), 'numpy.mean', 'np.mean', (['img'], {'keepdims': '(True)'}), '(img, keepdims=True)\n', (5516, 5536), True, 'import numpy as np\n'), ((5647, 5657), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5655, 5657), True, 'from matplotlib import pyplot as plt\n'), ((6124, 6144), 'numpy.array', 'np.array', (['batch_data'], {}), '(batch_data)\n', (6132, 6144), True, 'import numpy as np\n'), ((6167, 6188), 'numpy.array', 'np.array', (['batch_label'], {}), '(batch_label)\n', (6175, 6188), True, 'import numpy as np\n'), ((10479, 10493), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10490, 10493), False, 'import pickle\n'), ((10580, 10594), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10591, 10594), False, 'import pickle\n'), ((12011, 12030), 'matplotlib.pyplot.title', 'plt.title', (['label[0]'], {}), '(label[0])\n', (12020, 12030), True, 'from matplotlib import pyplot as plt\n'), ((12039, 12049), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12047, 12049), True, 'from matplotlib import pyplot as plt\n'), ((2607, 2630), 'random.shuffle', 'random.shuffle', (['self.ps'], {}), '(self.ps)\n', (2621, 2630), False, 'import random\n'), ((4937, 4967), 'numpy.array', 'np.array', (['img'], {'dtype': '"""float32"""'}), "(img, dtype='float32')\n", (4945, 4967), True, 'import numpy as np\n'), ((5552, 5578), 'numpy.std', 'np.std', (['img'], {'keepdims': '(True)'}), '(img, keepdims=True)\n', (5558, 5578), True, 'import numpy as np\n'), ((5581, 5604), 'tensorflow.keras.backend.epsilon', 'keras.backend.epsilon', ([], {}), '()\n', (5602, 5604), False, 'from tensorflow import keras\n'), ((7121, 7194), 'tensorflow.keras.preprocessing.image.img_to_array', 'keras.preprocessing.image.img_to_array', (['img'], {'data_format': 'self.data_format'}), '(img, data_format=self.data_format)\n', (7159, 7194), False, 'from tensorflow import keras\n'), ((10606, 10656), 'pandas.read_csv', 'read_csv', (['"""../Dataset/metadata/bounding_boxes.csv"""'], {}), "('../Dataset/metadata/bounding_boxes.csv')\n", (10614, 10656), False, 'from pandas import read_csv\n'), ((1401, 1417), 'numpy.cos', 'np.cos', (['rotation'], {}), '(rotation)\n', (1407, 1417), True, 'import numpy as np\n'), ((1452, 1468), 'numpy.sin', 'np.sin', (['rotation'], {}), '(rotation)\n', (1458, 1468), True, 'import numpy as np\n'), ((1559, 1575), 'numpy.cos', 'np.cos', (['rotation'], {}), '(rotation)\n', (1565, 1575), True, 'import numpy as np\n'), ((1739, 1752), 'numpy.sin', 'np.sin', (['shear'], {}), '(shear)\n', (1745, 1752), True, 'import numpy as np\n'), ((1762, 1775), 'numpy.cos', 'np.cos', (['shear'], {}), '(shear)\n', (1768, 1775), True, 'import numpy as np\n'), ((2892, 2922), 'os.path.join', 'os.path.join', (['self.data_dir', 'p'], {}), '(self.data_dir, p)\n', (2904, 2922), False, 'import os\n'), ((6293, 6344), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['batch_label', 'nb_classes'], {}), '(batch_label, nb_classes)\n', (6319, 6344), False, 'from tensorflow import keras\n'), ((6358, 6385), 'numpy.zeros', 'np.zeros', (['batch_label.shape'], {}), '(batch_label.shape)\n', (6366, 6385), True, 'import numpy as np\n'), ((6928, 6963), 'os.path.join', 'os.path.join', (['self.directory', 'fname'], {}), '(self.directory, fname)\n', (6940, 6963), False, 'import os\n'), ((7778, 7863), 'tensorflow.keras.preprocessing.image.array_to_img', 'keras.preprocessing.image.array_to_img', (['batch_x[i]', 'self.data_format'], {'scale': '(True)'}), '(batch_x[i], self.data_format, scale=True\n )\n', (7816, 7863), False, 'from tensorflow import keras\n'), ((8842, 8889), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['batch_y', 'nb_classes'], {}), '(batch_y, nb_classes)\n', (8868, 8889), False, 'from tensorflow import keras\n'), ((8903, 8926), 'numpy.zeros', 'np.zeros', (['batch_y.shape'], {}), '(batch_y.shape)\n', (8911, 8926), True, 'import numpy as np\n'), ((1508, 1524), 'numpy.sin', 'np.sin', (['rotation'], {}), '(rotation)\n', (1514, 1524), True, 'import numpy as np\n'), ((8140, 8177), 'os.path.join', 'os.path.join', (['self.save_to_dir', 'fname'], {}), '(self.save_to_dir, fname)\n', (8152, 8177), False, 'import os\n'), ((8046, 8075), 'numpy.random.randint', 'np.random.randint', (['(10000000.0)'], {}), '(10000000.0)\n', (8063, 8075), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.model_selection import train_test_split
def extract_repair_sequence(seq):
"""
Extract the sequence of node repairs
"""
sort_seq = seq.sort()
repair_seq_order = sort_seq[0][sort_seq[0] != 0]
repair_seq_nodes = sort_seq[1][sort_seq[0] != 0]
return repair_seq_order, repair_seq_nodes
class Trainer:
"""
Training the NN for the infrastructure data
"""
def __init__(self, X, y, num_epoch, num_train, learning_rate, batch_size):
self.num_epoch = num_epoch
self.num_train = num_train
self.learning_rate = learning_rate
self.batch_size = batch_size
self.X = X
self.y = y
self.model = tf.keras.models.Sequential([tf.keras.layers.Dense(100, activation='relu',
input_shape=(self.X.shape[1],)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(units=self.y.shape[1], activation="sigmoid")])
def train(self):
# Create list to collect loss for plot
train_plot = []
valid_plot = []
# Training and validation split
X_train, X_valid, y_train, y_valid = train_test_split(self.X, self.y, random_state=0)
# Choose Optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate)
# Choose Loss Function
loss_func = tf.keras.losses.MeanAbsoluteError()
# Compile and fir the model
self.model.compile(optimizer=optimizer, loss=loss_func, metrics=['accuracy'])
self.model.fit(X_train, y_train, epochs=self.num_epoch, batch_size=self.batch_size)
# Run the updated NN model for train data
train_predict = self.model(X_train).numpy()
train_loss = loss_func(train_predict, y_train)
# Run the updated NN model for validation data
valid_predict = self.model(X_valid).numpy()
valid_loss = loss_func(valid_predict, y_valid).numpy()
# Append loss values for plot
train_plot.append(train_loss.item())
valid_plot.append(valid_loss.item())
plt.figure()
plt.plot(train_plot)
plt.plot(valid_plot)
plt.ylim(0, 0.003)
plt.show()
return self.model
def test_data(self, X_test, y_test, accuracy):
accuracy_index = []
n, m = X_test.shape
y_predict = self.model(X_test).detach()
for i in range(n):
counter = 0
diff = tf.math.abs(21 * (y_predict[i].reshape(-1, 1) - y_test[i, :].reshape(-1, 1)))
for j in range(m):
if diff[j] <= accuracy and y_test[i, j].item() != 0:
counter += 1
total = np.count_nonzero(y_test[i, :])
if total == 0:
accuracy_index.append(100)
else:
accuracy_index.append(counter / total * 100)
print(f"Test accuracy {accuracy} was successfully done!")
return accuracy_index
| [
"matplotlib.pyplot.show",
"numpy.count_nonzero",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"tensorflow.keras.layers.Dense",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.losses.MeanAbsoluteError",
"matplotlib.pyplot.figure",
"tensorflow.keras.optimizers.Adam"
] | [((1556, 1604), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.X', 'self.y'], {'random_state': '(0)'}), '(self.X, self.y, random_state=0)\n', (1572, 1604), False, 'from sklearn.model_selection import train_test_split\n'), ((1653, 1711), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (1677, 1711), True, 'import tensorflow as tf\n'), ((1763, 1798), 'tensorflow.keras.losses.MeanAbsoluteError', 'tf.keras.losses.MeanAbsoluteError', ([], {}), '()\n', (1796, 1798), True, 'import tensorflow as tf\n'), ((2478, 2490), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2488, 2490), True, 'import matplotlib.pyplot as plt\n'), ((2499, 2519), 'matplotlib.pyplot.plot', 'plt.plot', (['train_plot'], {}), '(train_plot)\n', (2507, 2519), True, 'import matplotlib.pyplot as plt\n'), ((2528, 2548), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_plot'], {}), '(valid_plot)\n', (2536, 2548), True, 'import matplotlib.pyplot as plt\n'), ((2557, 2575), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(0.003)'], {}), '(0, 0.003)\n', (2565, 2575), True, 'import matplotlib.pyplot as plt\n'), ((2584, 2594), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2592, 2594), True, 'import matplotlib.pyplot as plt\n'), ((3079, 3109), 'numpy.count_nonzero', 'np.count_nonzero', (['y_test[i, :]'], {}), '(y_test[i, :])\n', (3095, 3109), True, 'import numpy as np\n'), ((797, 874), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(100)'], {'activation': '"""relu"""', 'input_shape': '(self.X.shape[1],)'}), "(100, activation='relu', input_shape=(self.X.shape[1],))\n", (818, 874), True, 'import tensorflow as tf\n'), ((996, 1041), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (1017, 1041), True, 'import tensorflow as tf\n'), ((1092, 1137), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (1113, 1137), True, 'import tensorflow as tf\n'), ((1188, 1233), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (1209, 1233), True, 'import tensorflow as tf\n'), ((1284, 1350), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'self.y.shape[1]', 'activation': '"""sigmoid"""'}), "(units=self.y.shape[1], activation='sigmoid')\n", (1305, 1350), True, 'import tensorflow as tf\n')] |
from pymaster import nmtlib as lib
import numpy as np
class NmtCovarianceWorkspace(object) :
"""
NmtCovarianceWorkspace objects are used to compute and store the coupling coefficients needed to calculate the Gaussian covariance matrix under the Efstathiou approximation (astro-ph/0307515). When initialized, this object is practically empty. The information describing the coupling coefficients must be computed or read from a file afterwards.
"""
def __init__(self) :
self.wsp=None
def __del__(self) :
if(self.wsp is not None) :
lib.covar_workspace_free(self.wsp)
self.wsp=None
def read_from(self,fname) :
"""
Reads the contents of an NmtCovarianceWorkspace object from a file (encoded using an internal binary format).
:param str fname: input file name
"""
if self.wsp is not None :
lib.covar_workspace_free(self.wsp)
self.wsp=None
self.wsp=lib.read_covar_workspace(fname);
def compute_coupling_coefficients(self,wa,wb) :
"""
Computes coupling coefficients of the Gaussian covariance between the power spectra computed using wa and wb (two NmtWorkspace objects).
:param NmtWorkspace wa,wb: workspaces used to compute the two power spectra whose covariance matrix you want to compute.
"""
if self.wsp is not None :
lib.covar_workspace_free(self.wsp)
self.wsp=None
ns=wa.wsp.nside;
if(wa.wsp.nside!=wb.wsp.nside) :
raise ValueError("Everything should have the same resolution!")
if((wa.wsp.ncls!=1) or (wb.wsp.ncls!=1)) :
raise ValueError("Gaussian covariances only supported for spin-0 fields")
self.wsp=lib.covar_workspace_init_py(wa.wsp,wb.wsp)
def write_to(self,fname) :
"""
Writes the contents of an NmtCovarianceWorkspace object to a file (encoded using an internal binary format).
:param str fname: output file name
"""
if self.wsp is None :
raise ValueError("Must initialize workspace before writing")
lib.write_covar_workspace(self.wsp,fname)
class NmtCovarianceWorkspaceFlat(object) :
"""
NmtCovarianceWorkspaceFlat objects are used to compute and store the coupling coefficients needed to calculate the Gaussian covariance matrix under a flat-sky version the Efstathiou approximation (astro-ph/0307515). When initialized, this object is practically empty. The information describing the coupling coefficients must be computed or read from a file afterwards.
"""
def __init__(self) :
self.wsp=None
def __del__(self) :
if(self.wsp is not None) :
lib.covar_workspace_flat_free(self.wsp)
self.wsp=None
def read_from(self,fname) :
"""
Reads the contents of an NmtCovarianceWorkspaceFlat object from a file (encoded using an internal binary format).
:param str fname: input file name
"""
if self.wsp is not None :
lib.covar_workspace_flat_free(self.wsp)
self.wsp=None
self.wsp=lib.read_covar_workspace_flat(fname);
def compute_coupling_coefficients(self,wa,wb) :
"""
Computes coupling coefficients of the Gaussian covariance between the power spectra computed using wa and wb (two NmtWorkspaceFlat objects).
:param NmtWorkspaceFlat wa,wb: workspaces used to compute the two power spectra whose covariance matrix you want to compute.
"""
if((wa.wsp.fs.nx!=wb.wsp.fs.nx) or (wa.wsp.fs.ny!=wb.wsp.fs.ny)) :
raise ValueError("Everything should have the same resolution!")
if((wa.wsp.ncls!=1) or (wb.wsp.ncls!=1)) :
raise ValueError("Gaussian covariances only supported for spin-0 fields")
if self.wsp is not None :
lib.covar_workspace_flat_free(self.wsp)
self.wsp=None
self.wsp=lib.covar_workspace_flat_init_py(wa.wsp,wb.wsp)
def write_to(self,fname) :
"""
Writes the contents of an NmtCovarianceWorkspaceFlat object to a file (encoded using an internal binary format).
:param str fname: output file name
"""
if self.wsp is None :
raise ValueError("Must initialize workspace before writing")
lib.write_covar_workspace_flat(self.wsp,fname)
def gaussian_covariance(cw,cla1b1,cla1b2,cla2b1,cla2b2) :
"""
Computes Gaussian covariance matrix for power spectra using the information precomputed in cw (a NmtCovarianceWorkspace object). cw should have been initialized using the two NmtWorkspace objects used to compute these power spectra. The notation above assumes that these two NmtWorkspace objects, wa and wb, were used to compute the power spectra of four fields: a1 and a2 for wa, b1 and b2 for wb. Then, claXbY above should be a prediction for the power spectrum between fields aX and bY. These predicted input power spectra should be defined for all ells <=3*nside (where nside is the HEALPix resolution parameter of the fields that were correlated).
:param NmtCovarianceWorkspace cw: workspaces containing the precomputed coupling coefficients.
:param cla1b1: prediction for the cross-power spectrum between a1 and b1.
:param cla1b2: prediction for the cross-power spectrum between a1 and b2.
:param cla2b1: prediction for the cross-power spectrum between a2 and b1.
:param cla2b2: prediction for the cross-power spectrum between a2 and b2.
"""
if((len(cla1b1)!=cw.wsp.lmax_a+1) or (len(cla1b2)!=cw.wsp.lmax_a+1) or (len(cla2b1)!=cw.wsp.lmax_a+1) or (len(cla2b2)!=cw.wsp.lmax_a+1)) :
raise ValueError("Input C_ls have a weird length")
len_a=cw.wsp.ncls_a*cw.wsp.bin_a.n_bands
len_b=cw.wsp.ncls_b*cw.wsp.bin_b.n_bands
covar1d=lib.comp_gaussian_covariance(cw.wsp,cla1b1,cla1b2,cla2b1,cla2b2,len_a*len_b)
covar=np.reshape(covar1d,[len_a,len_b])
return covar
def gaussian_covariance_flat(cw,larr,cla1b1,cla1b2,cla2b1,cla2b2) :
"""
Computes Gaussian covariance matrix for flat-sky power spectra using the information precomputed in cw (a NmtCovarianceWorkspaceFlat object). cw should have been initialized using the two NmtWorkspaceFlat objects used to compute these power spectra. The notation above assumes that these two NmtWorkspaceFlat objects, wa and wb, were used to compute the power spectra of four fields: a1 and a2 for wa, b1 and b2 for wb. Then, claXbY above should be a prediction for the power spectrum between fields aX and bY. These predicted input power spectra should be defined in a sufficiently well sampled range of ells given themap properties from which the power spectra were computed. The values of ell at which they are sampled are given by larr.
:param NmtCovarianceWorkspaceFlat cw: workspaces containing the precomputed coupling coefficients.
:param larr: values of ell at which the following power spectra are computed.
:param cla1b1: prediction for the cross-power spectrum between a1 and b1.
:param cla1b2: prediction for the cross-power spectrum between a1 and b2.
:param cla2b1: prediction for the cross-power spectrum between a2 and b1.
:param cla2b2: prediction for the cross-power spectrum between a2 and b2.
"""
if((len(cla1b1)!=len(larr)) or (len(cla1b2)!=len(larr)) or (len(cla2b1)!=len(larr)) or (len(cla2b2)!=len(larr))) :
raise ValueError("Input C_ls have a weird length")
len_a=cw.wsp.ncls_a*cw.wsp.bin.n_bands
len_b=cw.wsp.ncls_b*cw.wsp.bin.n_bands
covar1d=lib.comp_gaussian_covariance_flat(cw.wsp,larr,cla1b1,cla1b2,cla2b1,cla2b2,len_a*len_b)
covar=np.reshape(covar1d,[len_a,len_b])
return covar
| [
"pymaster.nmtlib.read_covar_workspace_flat",
"pymaster.nmtlib.write_covar_workspace_flat",
"pymaster.nmtlib.write_covar_workspace",
"pymaster.nmtlib.covar_workspace_free",
"pymaster.nmtlib.covar_workspace_init_py",
"pymaster.nmtlib.read_covar_workspace",
"pymaster.nmtlib.comp_gaussian_covariance_flat",
... | [((5853, 5940), 'pymaster.nmtlib.comp_gaussian_covariance', 'lib.comp_gaussian_covariance', (['cw.wsp', 'cla1b1', 'cla1b2', 'cla2b1', 'cla2b2', '(len_a * len_b)'], {}), '(cw.wsp, cla1b1, cla1b2, cla2b1, cla2b2, len_a *\n len_b)\n', (5881, 5940), True, 'from pymaster import nmtlib as lib\n'), ((5940, 5975), 'numpy.reshape', 'np.reshape', (['covar1d', '[len_a, len_b]'], {}), '(covar1d, [len_a, len_b])\n', (5950, 5975), True, 'import numpy as np\n'), ((7596, 7694), 'pymaster.nmtlib.comp_gaussian_covariance_flat', 'lib.comp_gaussian_covariance_flat', (['cw.wsp', 'larr', 'cla1b1', 'cla1b2', 'cla2b1', 'cla2b2', '(len_a * len_b)'], {}), '(cw.wsp, larr, cla1b1, cla1b2, cla2b1,\n cla2b2, len_a * len_b)\n', (7629, 7694), True, 'from pymaster import nmtlib as lib\n'), ((7693, 7728), 'numpy.reshape', 'np.reshape', (['covar1d', '[len_a, len_b]'], {}), '(covar1d, [len_a, len_b])\n', (7703, 7728), True, 'import numpy as np\n'), ((987, 1018), 'pymaster.nmtlib.read_covar_workspace', 'lib.read_covar_workspace', (['fname'], {}), '(fname)\n', (1011, 1018), True, 'from pymaster import nmtlib as lib\n'), ((1775, 1818), 'pymaster.nmtlib.covar_workspace_init_py', 'lib.covar_workspace_init_py', (['wa.wsp', 'wb.wsp'], {}), '(wa.wsp, wb.wsp)\n', (1802, 1818), True, 'from pymaster import nmtlib as lib\n'), ((2146, 2188), 'pymaster.nmtlib.write_covar_workspace', 'lib.write_covar_workspace', (['self.wsp', 'fname'], {}), '(self.wsp, fname)\n', (2171, 2188), True, 'from pymaster import nmtlib as lib\n'), ((3162, 3198), 'pymaster.nmtlib.read_covar_workspace_flat', 'lib.read_covar_workspace_flat', (['fname'], {}), '(fname)\n', (3191, 3198), True, 'from pymaster import nmtlib as lib\n'), ((3977, 4025), 'pymaster.nmtlib.covar_workspace_flat_init_py', 'lib.covar_workspace_flat_init_py', (['wa.wsp', 'wb.wsp'], {}), '(wa.wsp, wb.wsp)\n', (4009, 4025), True, 'from pymaster import nmtlib as lib\n'), ((4357, 4404), 'pymaster.nmtlib.write_covar_workspace_flat', 'lib.write_covar_workspace_flat', (['self.wsp', 'fname'], {}), '(self.wsp, fname)\n', (4387, 4404), True, 'from pymaster import nmtlib as lib\n'), ((584, 618), 'pymaster.nmtlib.covar_workspace_free', 'lib.covar_workspace_free', (['self.wsp'], {}), '(self.wsp)\n', (608, 618), True, 'from pymaster import nmtlib as lib\n'), ((909, 943), 'pymaster.nmtlib.covar_workspace_free', 'lib.covar_workspace_free', (['self.wsp'], {}), '(self.wsp)\n', (933, 943), True, 'from pymaster import nmtlib as lib\n'), ((1418, 1452), 'pymaster.nmtlib.covar_workspace_free', 'lib.covar_workspace_free', (['self.wsp'], {}), '(self.wsp)\n', (1442, 1452), True, 'from pymaster import nmtlib as lib\n'), ((2745, 2784), 'pymaster.nmtlib.covar_workspace_flat_free', 'lib.covar_workspace_flat_free', (['self.wsp'], {}), '(self.wsp)\n', (2774, 2784), True, 'from pymaster import nmtlib as lib\n'), ((3079, 3118), 'pymaster.nmtlib.covar_workspace_flat_free', 'lib.covar_workspace_flat_free', (['self.wsp'], {}), '(self.wsp)\n', (3108, 3118), True, 'from pymaster import nmtlib as lib\n'), ((3894, 3933), 'pymaster.nmtlib.covar_workspace_flat_free', 'lib.covar_workspace_flat_free', (['self.wsp'], {}), '(self.wsp)\n', (3923, 3933), True, 'from pymaster import nmtlib as lib\n')] |
import numpy as np
import matplotlib.pyplot as plt
def merge_bboxes(bboxes):
max_x1y1x2y2 = [np.inf, np.inf, -np.inf, -np.inf]
for bbox in bboxes:
max_x1y1x2y2 = [min(max_x1y1x2y2[0], bbox[0]), min(max_x1y1x2y2[1], bbox[1]),
max(max_x1y1x2y2[2], bbox[2]+bbox[0]), max(max_x1y1x2y2[3], bbox[3]+bbox[1])]
return [max_x1y1x2y2[0], max_x1y1x2y2[1], max_x1y1x2y2[2]-max_x1y1x2y2[0], max_x1y1x2y2[3]-max_x1y1x2y2[1]]
def check_clear(ann, vis=False, debug=False):
kps = np.asarray(ann['keypoints']).reshape(-1, 3)
if debug:
print(np.hstack((np.arange(kps.shape[0]).reshape((-1, 1)), kps)))
if vis:
plt.figure(figsize=(20, 20))
plt.imshow(I); plt.axis('off')
for idx, kp in enumerate(kps):
plt.scatter(kp[0], kp[1], )
plt.text(kp[0], kp[1], '%d'%idx, weight='bold')
eyes_ys = kps[1:5, 1]
eyes_ys_valid_idx = eyes_ys!=0
eyes_ys_valid = eyes_ys[eyes_ys_valid_idx]
ankles_ys = kps[15:17, 1]
ankles_ys_valid_idx = ankles_ys!=0
ankles_ys_valid = ankles_ys[ankles_ys_valid_idx]
if eyes_ys_valid.size==0 or ankles_ys_valid.size==0:
return False
should_min_y_idx = np.argmin(eyes_ys_valid) # two eyes
should_max_y_idx = np.argmax(ankles_ys_valid) # two ankles
kps_valid = kps[kps[:, 1]!=0, :]
if debug:
print(eyes_ys_valid[should_min_y_idx], np.min(kps_valid[:, 1]), kps[15:17, 1][should_max_y_idx], np.max(kps_valid[:, 1]), kps[1:5, 2], kps[15:17, 2])
return eyes_ys_valid[should_min_y_idx]==np.min(kps_valid[:, 1]) and ankles_ys_valid[should_max_y_idx]==np.max(kps_valid[:, 1]) \
and np.any(np.logical_or(kps[1:5, 2]==1, kps[1:5, 2]==2)) and np.any(np.logical_or(kps[15:17, 2]==1, kps[15:17, 2]==2))
def check_valid_surface(cats):
green_cats_exception = {'water':'', 'ground':'', 'solid':'', 'vegetation':['-', 'flower', 'tree'], 'floor':'', 'plant':['+', 'grass']}
if_green = False
for super_cat in green_cats_exception.keys():
if cats[0] == super_cat:
sub_cats = green_cats_exception[super_cat]
if sub_cats == '':
if_green = True
elif sub_cats[0] == '-':
if cats[1] not in sub_cats[1:]:
if_green = True
elif sub_cats[0] == '+':
if cats[1] in sub_cats[1:]:
if_green = True
return if_green
| [
"numpy.argmax",
"matplotlib.pyplot.imshow",
"numpy.asarray",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"numpy.argmin",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure",
"numpy.min",
"numpy.max",
"numpy.logical_or",
"numpy.arange"
] | [((1206, 1230), 'numpy.argmin', 'np.argmin', (['eyes_ys_valid'], {}), '(eyes_ys_valid)\n', (1215, 1230), True, 'import numpy as np\n'), ((1265, 1291), 'numpy.argmax', 'np.argmax', (['ankles_ys_valid'], {}), '(ankles_ys_valid)\n', (1274, 1291), True, 'import numpy as np\n'), ((666, 694), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (676, 694), True, 'import matplotlib.pyplot as plt\n'), ((703, 716), 'matplotlib.pyplot.imshow', 'plt.imshow', (['I'], {}), '(I)\n', (713, 716), True, 'import matplotlib.pyplot as plt\n'), ((718, 733), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (726, 733), True, 'import matplotlib.pyplot as plt\n'), ((513, 541), 'numpy.asarray', 'np.asarray', (["ann['keypoints']"], {}), "(ann['keypoints'])\n", (523, 541), True, 'import numpy as np\n'), ((785, 810), 'matplotlib.pyplot.scatter', 'plt.scatter', (['kp[0]', 'kp[1]'], {}), '(kp[0], kp[1])\n', (796, 810), True, 'import matplotlib.pyplot as plt\n'), ((825, 874), 'matplotlib.pyplot.text', 'plt.text', (['kp[0]', 'kp[1]', "('%d' % idx)"], {'weight': '"""bold"""'}), "(kp[0], kp[1], '%d' % idx, weight='bold')\n", (833, 874), True, 'import matplotlib.pyplot as plt\n'), ((1405, 1428), 'numpy.min', 'np.min', (['kps_valid[:, 1]'], {}), '(kps_valid[:, 1])\n', (1411, 1428), True, 'import numpy as np\n'), ((1463, 1486), 'numpy.max', 'np.max', (['kps_valid[:, 1]'], {}), '(kps_valid[:, 1])\n', (1469, 1486), True, 'import numpy as np\n'), ((1561, 1584), 'numpy.min', 'np.min', (['kps_valid[:, 1]'], {}), '(kps_valid[:, 1])\n', (1567, 1584), True, 'import numpy as np\n'), ((1624, 1647), 'numpy.max', 'np.max', (['kps_valid[:, 1]'], {}), '(kps_valid[:, 1])\n', (1630, 1647), True, 'import numpy as np\n'), ((1669, 1718), 'numpy.logical_or', 'np.logical_or', (['(kps[1:5, 2] == 1)', '(kps[1:5, 2] == 2)'], {}), '(kps[1:5, 2] == 1, kps[1:5, 2] == 2)\n', (1682, 1718), True, 'import numpy as np\n'), ((1727, 1780), 'numpy.logical_or', 'np.logical_or', (['(kps[15:17, 2] == 1)', '(kps[15:17, 2] == 2)'], {}), '(kps[15:17, 2] == 1, kps[15:17, 2] == 2)\n', (1740, 1780), True, 'import numpy as np\n'), ((596, 619), 'numpy.arange', 'np.arange', (['kps.shape[0]'], {}), '(kps.shape[0])\n', (605, 619), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
from pathlib import Path
import shutil
from itertools import groupby
from tempfile import NamedTemporaryFile
import string
import csv
import numpy as np
import pandas as pd
import torchaudio
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
cal_gcmvn_stats,
)
from torch.utils.data import Dataset
from tqdm import tqdm
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
class ASR_Dataset(Dataset):
"""
Create a Dataset for MuST-C. Each item is a tuple of the form:
waveform, sample_rate, source utterance, target utterance, speaker_id,
utterance_id
"""
def __init__(self, root: str, lang, split: str, speed_perturb: bool = False, tokenizer: bool = False) -> None:
_root = Path(root) / f"{lang}" / split
wav_root, txt_root = _root / "wav", _root / "txt"
if tokenizer:
txt_root = _root / "txt.tok"
assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir(), (_root, wav_root, txt_root)
# Load audio segments
try:
import yaml
except ImportError:
print("Please install PyYAML to load the MuST-C YAML files")
with open(txt_root / f"{split}.yaml") as f:
segments = yaml.load(f, Loader=yaml.BaseLoader)
self.speed_perturb = [0.9, 1.0, 1.1] if speed_perturb and split.startswith("train") else None
# Load source and target utterances
with open(txt_root / f"{split}.{lang}") as f:
utterances = [r.strip() for r in f]
assert len(segments) == len(utterances)
for i, u in enumerate(utterances):
segments[i][lang] = u
# Gather info
self.data = []
for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]):
wav_path = wav_root / wav_filename
try:
sample_rate = torchaudio.info(wav_path.as_posix())[0].rate
except TypeError:
sample_rate = torchaudio.info(wav_path.as_posix()).sample_rate
seg_group = sorted(_seg_group, key=lambda x: float(x["offset"]))
for i, segment in enumerate(seg_group):
offset = int(float(segment["offset"]) * sample_rate)
n_frames = int(float(segment["duration"]) * sample_rate)
_id = f"{split}_{wav_path.stem}_{i}"
self.data.append(
(
wav_path.as_posix(),
offset,
n_frames,
sample_rate,
segment[lang],
segment["speaker_id"] if "speaker_id" in segment else "spk1",
_id,
)
)
def __getitem__(self, n: int):
wav_path, offset, n_frames, sr, utt, spk_id, utt_id = self.data[n]
items = []
if self.speed_perturb is None:
waveform, _ = torchaudio.load(wav_path, frame_offset=offset, num_frames=n_frames)
items.append([waveform, sr, n_frames, utt, spk_id, utt_id])
else:
for speed in self.speed_perturb:
sp_utt_id = f"sp{speed}_" + utt_id
sp_n_frames = n_frames / speed
if speed == 1.0:
waveform, _ = torchaudio.load(wav_path, frame_offset=offset, num_frames=n_frames)
else:
waveform, _ = torchaudio.load(wav_path, frame_offset=offset, num_frames=n_frames)
effects = [
["speed", f"{speed}"],
["rate", f"{sr}"]
]
waveform, _ = torchaudio.sox_effects.apply_effects_tensor(waveform, sr, effects)
items.append([waveform, sr, sp_n_frames, utt, spk_id, sp_utt_id])
return items
def get_wav(self, n: int, speed_perturb=1.0):
wav_path, offset, n_frames, sr, utt, spk_id, utt_id = self.data[n]
if self.speed_perturb is None or speed_perturb == 1.0:
waveform, _ = torchaudio.load(wav_path, frame_offset=offset, num_frames=n_frames)
else:
waveform, _ = torchaudio.load(wav_path, frame_offset=offset, num_frames=n_frames)
effects = [
["speed", f"{speed_perturb}"],
["rate", f"{sr}"]
]
waveform, _ = torchaudio.sox_effects.apply_effects_tensor(waveform, sr, effects)
return waveform
def get_fast(self, n: int):
wav_path, offset, n_frames, sr, utt, spk_id, utt_id = self.data[n]
items = []
if self.speed_perturb is None:
items.append([wav_path, sr, n_frames, utt, spk_id, utt_id])
else:
for speed in self.speed_perturb:
sp_utt_id = f"sp{speed}_" + utt_id
sp_n_frames = n_frames / speed
items.append([wav_path, sr, sp_n_frames, utt, spk_id, sp_utt_id])
return items
def get_text(self):
src_text = []
for item in self.data:
src_text.append(item[4])
return src_text
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute()
splits = args.splits.split(",")
lang = args.lang
cur_root = root / f"{lang}"
if not cur_root.is_dir():
print(f"{cur_root.as_posix()} does not exist. Skipped.")
return
if args.output_root is None:
output_root = cur_root
else:
output_root = Path(args.output_root).absolute() / f"{lang}"
# Extract features
if args.speed_perturb:
zip_path = output_root / "fbank80_sp.zip"
else:
zip_path = output_root / "fbank80.zip"
index = 0
gen_feature_flag = False
if not Path.exists(zip_path):
gen_feature_flag = True
if args.overwrite or gen_feature_flag:
if args.speed_perturb:
feature_root = output_root / "fbank80_sp"
else:
feature_root = output_root / "fbank80"
feature_root.mkdir(exist_ok=True)
for split in splits:
print(f"Fetching split {split}...")
dataset = ASR_Dataset(root.as_posix(), lang, split, args.speed_perturb, args.tokenizer)
is_train_split = split.startswith("train")
print("Extracting log mel filter bank features...")
if is_train_split and args.cmvn_type == "global":
print("And estimating cepstral mean and variance stats...")
gcmvn_feature_list = []
for idx in tqdm(range(len(dataset))):
items = dataset.get_fast(idx)
for item in items:
index += 1
wav_path, sr, _, _, _, utt_id = item
features_path = (feature_root / f"{utt_id}.npy").as_posix()
if not os.path.exists(features_path):
sp = 1.0
if dataset.speed_perturb is not None:
sp = float(utt_id.split("_")[0].replace("sp", ""))
waveform = dataset.get_wav(idx, sp)
if waveform.shape[1] == 0:
continue
features = extract_fbank_features(waveform, sr, Path(features_path))
if split == 'train' and args.cmvn_type == "global" and not utt_id.startswith("sp"):
if len(gcmvn_feature_list) < args.gcmvn_max_num:
gcmvn_feature_list.append(features)
if is_train_split and args.size != -1 and index > args.size:
break
if is_train_split and args.cmvn_type == "global":
# Estimate and save cmv
stats = cal_gcmvn_stats(gcmvn_feature_list)
with open(output_root / "gcmvn.npz", "wb") as f:
np.savez(f, mean=stats["mean"], std=stats["std"])
# Pack features into ZIP
print("ZIPing features...")
create_zip(feature_root, zip_path)
# Clean up
shutil.rmtree(feature_root)
gen_manifest_flag = False
for split in splits:
if not Path.exists(output_root / f"{split}_{args.task}.tsv"):
gen_manifest_flag = True
break
train_text = []
if args.overwrite or gen_manifest_flag:
print("Fetching ZIP manifest...")
zip_manifest = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
for split in splits:
is_train_split = split.startswith("train")
manifest = {c: [] for c in MANIFEST_COLUMNS}
if args.task == "st" and args.add_src:
manifest["src_text"] = []
dataset = ASR_Dataset(args.data_root, lang, split, args.speed_perturb, args.tokenizer)
for idx in range(len(dataset)):
items = dataset.get_fast(idx)
for item in items:
_, sr, n_frames, utt, speaker_id, utt_id = item
manifest["id"].append(utt_id)
manifest["audio"].append(zip_manifest[utt_id])
duration_ms = int(n_frames / sr * 1000)
manifest["n_frames"].append(int(1 + (duration_ms - 25) / 10))
if args.lowercase_src:
utt = utt.lower()
if args.rm_punc_src:
for w in string.punctuation:
utt = utt.replace(w, "")
manifest["tgt_text"].append(utt)
manifest["speaker"].append(speaker_id)
if is_train_split and args.size != -1 and len(manifest["id"]) > args.size:
break
if is_train_split:
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, output_root / f"{split}_{args.task}.tsv")
# Generate vocab
v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}"
gen_vocab_flag = True
if args.asr_prefix is not None:
gen_vocab_flag = False
spm_filename_prefix = args.asr_prefix
if gen_vocab_flag:
if len(train_text) == 0:
print("Loading the training text to build dictionary...")
for split in args.SPLITS:
if split.startswith("train"):
csv_path = output_root / f"{split}_{args.task}.tsv"
with open(csv_path) as f:
reader = csv.DictReader(
f,
delimiter="\t",
quotechar=None,
doublequote=False,
lineterminator="\n",
quoting=csv.QUOTE_NONE,
)
tgt_text = [dict(e)["tgt_text"] for e in reader]
train_text.extend(tgt_text)
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
output_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
)
# Generate config YAML
yaml_filename = f"config_{args.task}.yaml"
gen_config_yaml(
output_root,
spm_filename_prefix + ".model",
yaml_filename=yaml_filename,
specaugment_policy="lb",
cmvn_type=args.cmvn_type,
gcmvn_path=(
output_root / "gcmvn.npz" if args.cmvn_type == "global"
else None
),
share_src_and_tgt=True if args.task == "asr" else False
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-root", "-d", required=True, type=str)
parser.add_argument("--output-root", "-o", default=None, type=str)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=8000, type=int)
parser.add_argument("--task", type=str, default="asr", choices=["asr", "st"])
parser.add_argument("--lang", type=str, required=True, help="language")
parser.add_argument("--splits", type=str, default="train,dev,test", help="dataset splits")
parser.add_argument("--speed-perturb", action="store_true", default=False,
help="apply speed perturbation on wave file")
parser.add_argument("--share", action="store_true",
help="share the tokenizer and dictionary of the transcription and translation")
parser.add_argument("--asr-prefix", type=str, default=None, help="prefix of the asr dict")
parser.add_argument("--lowercase-src", action="store_true", help="lowercase the source text")
parser.add_argument("--rm-punc-src", action="store_true", help="remove the punctuation of the source text")
parser.add_argument("--tokenizer", action="store_true", help="use tokenizer txt")
parser.add_argument("--cmvn-type", default="utterance",
choices=["global", "utterance"],
help="The type of cepstral mean and variance normalization")
parser.add_argument("--overwrite", action="store_true", help="overwrite the existing files")
parser.add_argument("--gcmvn-max-num", default=150000, type=int,
help=(
"Maximum number of sentences to use to estimate"
"global mean and variance"
))
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| [
"pathlib.Path.exists",
"yaml.load",
"argparse.ArgumentParser",
"examples.speech_to_text.data_utils.gen_config_yaml",
"pathlib.Path",
"examples.speech_to_text.data_utils.filter_manifest_df",
"shutil.rmtree",
"examples.speech_to_text.data_utils.save_df_to_tsv",
"os.path.exists",
"torchaudio.sox_effe... | [((741, 768), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (758, 768), False, 'import logging\n'), ((12013, 12301), 'examples.speech_to_text.data_utils.gen_config_yaml', 'gen_config_yaml', (['output_root', "(spm_filename_prefix + '.model')"], {'yaml_filename': 'yaml_filename', 'specaugment_policy': '"""lb"""', 'cmvn_type': 'args.cmvn_type', 'gcmvn_path': "(output_root / 'gcmvn.npz' if args.cmvn_type == 'global' else None)", 'share_src_and_tgt': "(True if args.task == 'asr' else False)"}), "(output_root, spm_filename_prefix + '.model', yaml_filename=\n yaml_filename, specaugment_policy='lb', cmvn_type=args.cmvn_type,\n gcmvn_path=output_root / 'gcmvn.npz' if args.cmvn_type == 'global' else\n None, share_src_and_tgt=True if args.task == 'asr' else False)\n", (12028, 12301), False, 'from examples.speech_to_text.data_utils import create_zip, extract_fbank_features, filter_manifest_df, gen_config_yaml, gen_vocab, get_zip_manifest, load_df_from_tsv, save_df_to_tsv, cal_gcmvn_stats\n'), ((12414, 12439), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12437, 12439), False, 'import argparse\n'), ((2172, 2209), 'itertools.groupby', 'groupby', (['segments', "(lambda x: x['wav'])"], {}), "(segments, lambda x: x['wav'])\n", (2179, 2209), False, 'from itertools import groupby\n'), ((6226, 6247), 'pathlib.Path.exists', 'Path.exists', (['zip_path'], {}), '(zip_path)\n', (6237, 6247), False, 'from pathlib import Path\n'), ((8500, 8534), 'examples.speech_to_text.data_utils.create_zip', 'create_zip', (['feature_root', 'zip_path'], {}), '(feature_root, zip_path)\n', (8510, 8534), False, 'from examples.speech_to_text.data_utils import create_zip, extract_fbank_features, filter_manifest_df, gen_config_yaml, gen_vocab, get_zip_manifest, load_df_from_tsv, save_df_to_tsv, cal_gcmvn_stats\n'), ((8563, 8590), 'shutil.rmtree', 'shutil.rmtree', (['feature_root'], {}), '(feature_root)\n', (8576, 8590), False, 'import shutil\n'), ((8902, 8928), 'examples.speech_to_text.data_utils.get_zip_manifest', 'get_zip_manifest', (['zip_path'], {}), '(zip_path)\n', (8918, 8928), False, 'from examples.speech_to_text.data_utils import create_zip, extract_fbank_features, filter_manifest_df, gen_config_yaml, gen_vocab, get_zip_manifest, load_df_from_tsv, save_df_to_tsv, cal_gcmvn_stats\n'), ((1676, 1712), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.BaseLoader'}), '(f, Loader=yaml.BaseLoader)\n', (1685, 1712), False, 'import yaml\n'), ((3377, 3444), 'torchaudio.load', 'torchaudio.load', (['wav_path'], {'frame_offset': 'offset', 'num_frames': 'n_frames'}), '(wav_path, frame_offset=offset, num_frames=n_frames)\n', (3392, 3444), False, 'import torchaudio\n'), ((4497, 4564), 'torchaudio.load', 'torchaudio.load', (['wav_path'], {'frame_offset': 'offset', 'num_frames': 'n_frames'}), '(wav_path, frame_offset=offset, num_frames=n_frames)\n', (4512, 4564), False, 'import torchaudio\n'), ((4605, 4672), 'torchaudio.load', 'torchaudio.load', (['wav_path'], {'frame_offset': 'offset', 'num_frames': 'n_frames'}), '(wav_path, frame_offset=offset, num_frames=n_frames)\n', (4620, 4672), False, 'import torchaudio\n'), ((4818, 4884), 'torchaudio.sox_effects.apply_effects_tensor', 'torchaudio.sox_effects.apply_effects_tensor', (['waveform', 'sr', 'effects'], {}), '(waveform, sr, effects)\n', (4861, 4884), False, 'import torchaudio\n'), ((5640, 5660), 'pathlib.Path', 'Path', (['args.data_root'], {}), '(args.data_root)\n', (5644, 5660), False, 'from pathlib import Path\n'), ((8662, 8715), 'pathlib.Path.exists', 'Path.exists', (["(output_root / f'{split}_{args.task}.tsv')"], {}), "(output_root / f'{split}_{args.task}.tsv')\n", (8673, 8715), False, 'from pathlib import Path\n'), ((10354, 10386), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['manifest'], {}), '(manifest)\n', (10376, 10386), True, 'import pandas as pd\n'), ((10404, 10457), 'examples.speech_to_text.data_utils.filter_manifest_df', 'filter_manifest_df', (['df'], {'is_train_split': 'is_train_split'}), '(df, is_train_split=is_train_split)\n', (10422, 10457), False, 'from examples.speech_to_text.data_utils import create_zip, extract_fbank_features, filter_manifest_df, gen_config_yaml, gen_vocab, get_zip_manifest, load_df_from_tsv, save_df_to_tsv, cal_gcmvn_stats\n'), ((10470, 10530), 'examples.speech_to_text.data_utils.save_df_to_tsv', 'save_df_to_tsv', (['df', "(output_root / f'{split}_{args.task}.tsv')"], {}), "(df, output_root / f'{split}_{args.task}.tsv')\n", (10484, 10530), False, 'from examples.speech_to_text.data_utils import create_zip, extract_fbank_features, filter_manifest_df, gen_config_yaml, gen_vocab, get_zip_manifest, load_df_from_tsv, save_df_to_tsv, cal_gcmvn_stats\n'), ((11647, 11675), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'mode': '"""w"""'}), "(mode='w')\n", (11665, 11675), False, 'from tempfile import NamedTemporaryFile\n'), ((1178, 1188), 'pathlib.Path', 'Path', (['root'], {}), '(root)\n', (1182, 1188), False, 'from pathlib import Path\n'), ((8251, 8286), 'examples.speech_to_text.data_utils.cal_gcmvn_stats', 'cal_gcmvn_stats', (['gcmvn_feature_list'], {}), '(gcmvn_feature_list)\n', (8266, 8286), False, 'from examples.speech_to_text.data_utils import create_zip, extract_fbank_features, filter_manifest_df, gen_config_yaml, gen_vocab, get_zip_manifest, load_df_from_tsv, save_df_to_tsv, cal_gcmvn_stats\n'), ((11788, 11800), 'pathlib.Path', 'Path', (['f.name'], {}), '(f.name)\n', (11792, 11800), False, 'from pathlib import Path\n'), ((3741, 3808), 'torchaudio.load', 'torchaudio.load', (['wav_path'], {'frame_offset': 'offset', 'num_frames': 'n_frames'}), '(wav_path, frame_offset=offset, num_frames=n_frames)\n', (3756, 3808), False, 'import torchaudio\n'), ((3865, 3932), 'torchaudio.load', 'torchaudio.load', (['wav_path'], {'frame_offset': 'offset', 'num_frames': 'n_frames'}), '(wav_path, frame_offset=offset, num_frames=n_frames)\n', (3880, 3932), False, 'import torchaudio\n'), ((4110, 4176), 'torchaudio.sox_effects.apply_effects_tensor', 'torchaudio.sox_effects.apply_effects_tensor', (['waveform', 'sr', 'effects'], {}), '(waveform, sr, effects)\n', (4153, 4176), False, 'import torchaudio\n'), ((5967, 5989), 'pathlib.Path', 'Path', (['args.output_root'], {}), '(args.output_root)\n', (5971, 5989), False, 'from pathlib import Path\n'), ((8372, 8421), 'numpy.savez', 'np.savez', (['f'], {'mean': "stats['mean']", 'std': "stats['std']"}), "(f, mean=stats['mean'], std=stats['std'])\n", (8380, 8421), True, 'import numpy as np\n'), ((7320, 7349), 'os.path.exists', 'os.path.exists', (['features_path'], {}), '(features_path)\n', (7334, 7349), False, 'import os\n'), ((11206, 11323), 'csv.DictReader', 'csv.DictReader', (['f'], {'delimiter': '"""\t"""', 'quotechar': 'None', 'doublequote': '(False)', 'lineterminator': '"""\n"""', 'quoting': 'csv.QUOTE_NONE'}), "(f, delimiter='\\t', quotechar=None, doublequote=False,\n lineterminator='\\n', quoting=csv.QUOTE_NONE)\n", (11220, 11323), False, 'import csv\n'), ((7745, 7764), 'pathlib.Path', 'Path', (['features_path'], {}), '(features_path)\n', (7749, 7764), False, 'from pathlib import Path\n')] |
"""Randomly assign a target class for each victim data.
This file is for distributed attacking.
"""
import os
import pdb
import time
import copy
from tqdm import tqdm
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel
import sys
sys.path.append('../')
from config import BEST_WEIGHTS
from config import MAX_ADD_CLUSTER_BATCH as BATCH_SIZE
from dataset import ModelNet40Attack
from model import DGCNN, PointNetCls, PointNet2ClsSsg, PointConvDensityClsSsg
from util.utils import AverageMeter, str2bool, set_seed
from attack import CWAddClusters
from attack import CrossEntropyAdvLoss, LogitsAdvLoss
from attack import FarChamferDist
def attack():
model.eval()
all_adv_pc = []
all_real_lbl = []
all_target_lbl = []
num = 0
for pc, label, target in tqdm(test_loader):
with torch.no_grad():
pc, label = pc.float().cuda(non_blocking=True), \
label.long().cuda(non_blocking=True)
target_label = target.long().cuda(non_blocking=True)
# attack!
_, best_pc, success_num = attacker.attack(pc, target_label)
# results
num += success_num
all_adv_pc.append(best_pc)
all_real_lbl.append(label.detach().cpu().numpy())
all_target_lbl.append(target_label.detach().cpu().numpy())
# accumulate results
all_adv_pc = np.concatenate(all_adv_pc, axis=0) # [num_data, K, 3]
all_real_lbl = np.concatenate(all_real_lbl, axis=0) # [num_data]
all_target_lbl = np.concatenate(all_target_lbl, axis=0) # [num_data]
return all_adv_pc, all_real_lbl, all_target_lbl, num
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(description='Point Cloud Recognition')
parser.add_argument('--data_root', type=str,
default='data/attack_data.npz')
parser.add_argument('--model', type=str, default='dgcnn', metavar='N',
choices=['pointnet', 'pointnet2',
'dgcnn', 'pointconv'],
help='Model to use, [pointnet, pointnet++, dgcnn, pointconv]')
parser.add_argument('--feature_transform', type=str2bool, default=False,
help='whether to use STN on features in PointNet')
parser.add_argument('--dataset', type=str, default='mn40', metavar='N',
choices=['mn40', 'remesh_mn40',
'opt_mn40', 'conv_opt_mn40'])
parser.add_argument('--batch_size', type=int, default=-1, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--num_points', type=int, default=1024,
help='num of points to use')
parser.add_argument('--dropout', type=float, default=0.5,
help='dropout rate')
parser.add_argument('--emb_dims', type=int, default=1024, metavar='N',
help='Dimension of embeddings')
parser.add_argument('--k', type=int, default=20, metavar='N',
help='Num of nearest neighbors to use')
parser.add_argument('--adv_func', type=str, default='logits',
choices=['logits', 'cross_entropy'],
help='Adversarial loss function to use')
parser.add_argument('--kappa', type=float, default=0.,
help='min margin in logits adv loss')
parser.add_argument('--attack_lr', type=float, default=1e-2,
help='lr in CW optimization')
parser.add_argument('--binary_step', type=int, default=5, metavar='N',
help='Binary search step')
parser.add_argument('--num_iter', type=int, default=500, metavar='N',
help='Number of iterations in each search step')
parser.add_argument('--num_add', type=int, default=3, metavar='N',
help='Number of clusters added in the attack')
parser.add_argument('--cl_num_p', type=int, default=32, metavar='N',
help='Number of points in each cluster')
parser.add_argument('--local_rank', default=-1, type=int,
help='node rank for distributed training')
args = parser.parse_args()
BATCH_SIZE = BATCH_SIZE[args.num_points]
BEST_WEIGHTS = BEST_WEIGHTS[args.dataset][args.num_points]
if args.batch_size == -1:
args.batch_size = BATCH_SIZE[args.model]
set_seed(args.seed)
print(args)
dist.init_process_group(backend='nccl')
torch.cuda.set_device(args.local_rank)
cudnn.benchmark = True
if args.model.lower() == 'dgcnn':
model = DGCNN(args.emb_dims, args.k, output_channels=40)
elif args.model.lower() == 'pointnet':
model = PointNetCls(k=40, feature_transform=args.feature_transform)
elif args.model.lower() == 'pointnet2':
model = PointNet2ClsSsg(num_classes=40)
elif args.model.lower() == 'pointconv':
model = PointConvDensityClsSsg(num_classes=40)
else:
print('Model not recognized')
exit(-1)
# load model weight
state_dict = torch.load(
BEST_WEIGHTS[args.model], map_location='cpu')
print('Loading weight {}'.format(BEST_WEIGHTS[args.model]))
try:
model.load_state_dict(state_dict)
except RuntimeError:
# eliminate 'module.' in keys
state_dict = {k[7:]: v for k, v in state_dict.items()}
model.load_state_dict(state_dict)
# distributed mode on multiple GPUs!
# much faster than nn.DataParallel
model = DistributedDataParallel(
model.cuda(), device_ids=[args.local_rank])
# setup attack settings
if args.adv_func == 'logits':
adv_func = LogitsAdvLoss(kappa=args.kappa)
else:
adv_func = CrossEntropyAdvLoss()
dist_func = FarChamferDist(num_add=args.num_add,
chamfer_method='adv2ori',
chamfer_weight=0.1)
attacker = CWAddClusters(model, adv_func, dist_func,
attack_lr=args.attack_lr,
init_weight=5., max_weight=30.,
binary_step=args.binary_step,
num_iter=args.num_iter,
num_add=args.num_add,
cl_num_p=args.cl_num_p)
# attack
test_set = ModelNet40Attack(args.data_root, num_points=args.num_points,
normalize=True)
test_sampler = DistributedSampler(test_set, shuffle=False)
test_loader = DataLoader(test_set, batch_size=args.batch_size,
shuffle=False, num_workers=4,
pin_memory=True, drop_last=False,
sampler=test_sampler)
# run attack
attacked_data, real_label, target_label, success_num = attack()
# accumulate results
data_num = len(test_set)
success_rate = float(success_num) / float(data_num)
# save results
save_path = './attack/results/{}_{}/Cluster/{}-{}'.\
format(args.dataset, args.num_points, args.num_add, args.cl_num_p)
if not os.path.exists(save_path):
os.makedirs(save_path)
if args.adv_func == 'logits':
args.adv_func = 'logits_kappa={}'.format(args.kappa)
save_name = 'Cluster-{}-{}-success_{:.4f}-rank_{}.npz'.\
format(args.model, args.adv_func,
success_rate, args.local_rank)
np.savez(os.path.join(save_path, save_name),
test_pc=attacked_data.astype(np.float32),
test_label=real_label.astype(np.uint8),
target_label=target_label.astype(np.uint8))
| [
"argparse.ArgumentParser",
"model.PointNetCls",
"model.PointNet2ClsSsg",
"attack.FarChamferDist",
"torch.no_grad",
"os.path.join",
"sys.path.append",
"torch.utils.data.DataLoader",
"torch.load",
"os.path.exists",
"dataset.ModelNet40Attack",
"torch.utils.data.distributed.DistributedSampler",
... | [((544, 566), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (559, 566), False, 'import sys\n'), ((1087, 1104), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (1091, 1104), False, 'from tqdm import tqdm\n'), ((1652, 1686), 'numpy.concatenate', 'np.concatenate', (['all_adv_pc'], {'axis': '(0)'}), '(all_adv_pc, axis=0)\n', (1666, 1686), True, 'import numpy as np\n'), ((1726, 1762), 'numpy.concatenate', 'np.concatenate', (['all_real_lbl'], {'axis': '(0)'}), '(all_real_lbl, axis=0)\n', (1740, 1762), True, 'import numpy as np\n'), ((1798, 1836), 'numpy.concatenate', 'np.concatenate', (['all_target_lbl'], {'axis': '(0)'}), '(all_target_lbl, axis=0)\n', (1812, 1836), True, 'import numpy as np\n'), ((1974, 2036), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Point Cloud Recognition"""'}), "(description='Point Cloud Recognition')\n", (1997, 2036), False, 'import argparse\n'), ((4842, 4861), 'util.utils.set_seed', 'set_seed', (['args.seed'], {}), '(args.seed)\n', (4850, 4861), False, 'from util.utils import AverageMeter, str2bool, set_seed\n'), ((4883, 4922), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (4906, 4922), True, 'import torch.distributed as dist\n'), ((4927, 4965), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (4948, 4965), False, 'import torch\n'), ((5514, 5570), 'torch.load', 'torch.load', (['BEST_WEIGHTS[args.model]'], {'map_location': '"""cpu"""'}), "(BEST_WEIGHTS[args.model], map_location='cpu')\n", (5524, 5570), False, 'import torch\n'), ((6214, 6300), 'attack.FarChamferDist', 'FarChamferDist', ([], {'num_add': 'args.num_add', 'chamfer_method': '"""adv2ori"""', 'chamfer_weight': '(0.1)'}), "(num_add=args.num_add, chamfer_method='adv2ori',\n chamfer_weight=0.1)\n", (6228, 6300), False, 'from attack import FarChamferDist\n'), ((6374, 6583), 'attack.CWAddClusters', 'CWAddClusters', (['model', 'adv_func', 'dist_func'], {'attack_lr': 'args.attack_lr', 'init_weight': '(5.0)', 'max_weight': '(30.0)', 'binary_step': 'args.binary_step', 'num_iter': 'args.num_iter', 'num_add': 'args.num_add', 'cl_num_p': 'args.cl_num_p'}), '(model, adv_func, dist_func, attack_lr=args.attack_lr,\n init_weight=5.0, max_weight=30.0, binary_step=args.binary_step,\n num_iter=args.num_iter, num_add=args.num_add, cl_num_p=args.cl_num_p)\n', (6387, 6583), False, 'from attack import CWAddClusters\n'), ((6777, 6853), 'dataset.ModelNet40Attack', 'ModelNet40Attack', (['args.data_root'], {'num_points': 'args.num_points', 'normalize': '(True)'}), '(args.data_root, num_points=args.num_points, normalize=True)\n', (6793, 6853), False, 'from dataset import ModelNet40Attack\n'), ((6905, 6948), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['test_set'], {'shuffle': '(False)'}), '(test_set, shuffle=False)\n', (6923, 6948), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((6967, 7106), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(4)', 'pin_memory': '(True)', 'drop_last': '(False)', 'sampler': 'test_sampler'}), '(test_set, batch_size=args.batch_size, shuffle=False, num_workers\n =4, pin_memory=True, drop_last=False, sampler=test_sampler)\n', (6977, 7106), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((5048, 5096), 'model.DGCNN', 'DGCNN', (['args.emb_dims', 'args.k'], {'output_channels': '(40)'}), '(args.emb_dims, args.k, output_channels=40)\n', (5053, 5096), False, 'from model import DGCNN, PointNetCls, PointNet2ClsSsg, PointConvDensityClsSsg\n'), ((6115, 6146), 'attack.LogitsAdvLoss', 'LogitsAdvLoss', ([], {'kappa': 'args.kappa'}), '(kappa=args.kappa)\n', (6128, 6146), False, 'from attack import CrossEntropyAdvLoss, LogitsAdvLoss\n'), ((6176, 6197), 'attack.CrossEntropyAdvLoss', 'CrossEntropyAdvLoss', ([], {}), '()\n', (6195, 6197), False, 'from attack import CrossEntropyAdvLoss, LogitsAdvLoss\n'), ((7549, 7574), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (7563, 7574), False, 'import os\n'), ((7584, 7606), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (7595, 7606), False, 'import os\n'), ((7864, 7898), 'os.path.join', 'os.path.join', (['save_path', 'save_name'], {}), '(save_path, save_name)\n', (7876, 7898), False, 'import os\n'), ((1119, 1134), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1132, 1134), False, 'import torch\n'), ((5156, 5215), 'model.PointNetCls', 'PointNetCls', ([], {'k': '(40)', 'feature_transform': 'args.feature_transform'}), '(k=40, feature_transform=args.feature_transform)\n', (5167, 5215), False, 'from model import DGCNN, PointNetCls, PointNet2ClsSsg, PointConvDensityClsSsg\n'), ((5276, 5307), 'model.PointNet2ClsSsg', 'PointNet2ClsSsg', ([], {'num_classes': '(40)'}), '(num_classes=40)\n', (5291, 5307), False, 'from model import DGCNN, PointNetCls, PointNet2ClsSsg, PointConvDensityClsSsg\n'), ((5368, 5406), 'model.PointConvDensityClsSsg', 'PointConvDensityClsSsg', ([], {'num_classes': '(40)'}), '(num_classes=40)\n', (5390, 5406), False, 'from model import DGCNN, PointNetCls, PointNet2ClsSsg, PointConvDensityClsSsg\n')] |
import cv2
import numpy as np
import tensorflow as tf
PB_PATH = '/home/opencv-mds/models/frozen_inference_graph.pb'
VIDEO_PATH = '/home/opencv-mds/OpenCV_in_Ubuntu/Data/Lane_Detection_Videos/challenge.mp4'
def import_graph(PATH_TO_FTOZEN_PB):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FTOZEN_PB, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def run_inference_for_single_image(image, sess, image_tensor, tensor_dict):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image = np.expand_dims(image, axis=0)
# Run inference
output_dict = sess.run(tensor_dict,feed_dict={image_tensor: image})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
return output_dict
def frameProcessing(image, sess, image_tensor, tensor_dict):
result = np.copy(image)
output_dict = run_inference_for_single_image(result, sess, image_tensor, tensor_dict)
num_detections = int(output_dict['num_detections'])
rows = result.shape[0]
cols = result.shape[1]
for i in range(num_detections):
classId = int(output_dict['detection_classes'][i])
score = float(output_dict['detection_scores'][i])
bbox = [float(v) for v in output_dict['detection_boxes'][i]]
if score > 0.3:
x = bbox[1] * cols
y = bbox[0] * rows
right = bbox[3] * cols
bottom = bbox[2] * rows
cv2.rectangle(result, (int(x), int(y)), (int(right), int(bottom)), (125, 255, 51), thickness=2)
return result
def Video(openpath, graph, savepath = "output.avi"):
cap = cv2.VideoCapture(openpath)
if cap.isOpened():
print("Video Opened")
else:
print("Video Not Opened")
print("Program Abort")
exit()
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))
out = cv2.VideoWriter(savepath, fourcc, fps, (width, height), True)
cv2.namedWindow("Input", cv2.WINDOW_GUI_EXPANDED)
cv2.namedWindow("Output", cv2.WINDOW_GUI_EXPANDED)
with graph.as_default():
with tf.Session() as sess:
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
while cap.isOpened():
# Capture frame-by-frame
ret, frame = cap.read()
if ret:
# Our operations on the frame come here
output = frameProcessing(frame, sess, image_tensor, tensor_dict)
# Write frame-by-frame
out.write(output)
# Display the resulting frame
cv2.imshow("Input", frame)
cv2.imshow("Output", output)
else:
break
# waitKey(int(1000.0/fps)) for matching fps of video
if cv2.waitKey(int(1000.0/fps)) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
out.release()
cv2.destroyAllWindows()
return
Video(VIDEO_PATH, import_graph(PB_PATH))
| [
"numpy.copy",
"cv2.cvtColor",
"tensorflow.Session",
"numpy.expand_dims",
"cv2.imshow",
"tensorflow.get_default_graph",
"cv2.VideoCapture",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"cv2.VideoWriter",
"tensorflow.import_graph_def",
"tensorflow.GraphDef",
"cv2.destroyAllWindows",
"cv2.na... | [((267, 277), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (275, 277), True, 'import tensorflow as tf\n'), ((688, 726), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (700, 726), False, 'import cv2\n'), ((828, 857), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (842, 857), True, 'import numpy as np\n'), ((1436, 1450), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (1443, 1450), True, 'import numpy as np\n'), ((2225, 2251), 'cv2.VideoCapture', 'cv2.VideoCapture', (['openpath'], {}), '(openpath)\n', (2241, 2251), False, 'import cv2\n'), ((2592, 2653), 'cv2.VideoWriter', 'cv2.VideoWriter', (['savepath', 'fourcc', 'fps', '(width, height)', '(True)'], {}), '(savepath, fourcc, fps, (width, height), True)\n', (2607, 2653), False, 'import cv2\n'), ((2658, 2707), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Input"""', 'cv2.WINDOW_GUI_EXPANDED'], {}), "('Input', cv2.WINDOW_GUI_EXPANDED)\n", (2673, 2707), False, 'import cv2\n'), ((2712, 2762), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Output"""', 'cv2.WINDOW_GUI_EXPANDED'], {}), "('Output', cv2.WINDOW_GUI_EXPANDED)\n", (2727, 2762), False, 'import cv2\n'), ((4254, 4277), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4275, 4277), False, 'import cv2\n'), ((340, 353), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (351, 353), True, 'import tensorflow as tf\n'), ((367, 406), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['PATH_TO_FTOZEN_PB', '"""rb"""'], {}), "(PATH_TO_FTOZEN_PB, 'rb')\n", (381, 406), True, 'import tensorflow as tf\n'), ((528, 570), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (547, 570), True, 'import tensorflow as tf\n'), ((2805, 2817), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2815, 2817), True, 'import tensorflow as tf\n'), ((2845, 2867), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (2865, 2867), True, 'import tensorflow as tf\n'), ((3384, 3406), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3404, 3406), True, 'import tensorflow as tf\n'), ((3879, 3905), 'cv2.imshow', 'cv2.imshow', (['"""Input"""', 'frame'], {}), "('Input', frame)\n", (3889, 3905), False, 'import cv2\n'), ((3926, 3954), 'cv2.imshow', 'cv2.imshow', (['"""Output"""', 'output'], {}), "('Output', output)\n", (3936, 3954), False, 'import cv2\n'), ((3302, 3324), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3322, 3324), True, 'import tensorflow as tf\n')] |
"""Methods for processing OPA 800 tuning data."""
import numpy as np
import WrightTools as wt
from ._discrete_tune import DiscreteTune
from ._instrument import Instrument
from ._transition import Transition
from ._plot import plot_tune_test
from ._common import save
from ._map import map_ind_points
__all__ = ["tune_test"]
def _offsets(data, channel_name, tune_points, *, spline=True, **spline_kwargs):
data.moment(axis=1, channel=channel_name, moment=1, resultant=data.axes[0].shape)
offsets = data[f"{channel_name}_1_moment_1"].points
if spline:
return wt.kit.Spline(data.axes[0].points, offsets, **spline_kwargs)
if np.allclose(data.axes[0].points, tune_points):
return offsets.clip(data.axes[1].min(), data.axes[1].max())
if np.allclose(data.axes[0].points, tune_points[::-1]):
return offsets.clip(data.axes[1].min(), data.axes[1].max())[::-1]
else:
raise ValueError("Data points and instrument points do not match, and splining disabled")
def tune_test(
*,
data,
channel,
arrangement,
instrument,
level=False,
gtol=0.01,
ltol=0.1,
restore_setpoints=True,
autosave=True,
save_directory=None,
**spline_kwargs,
):
"""Workup a Tune Test.
Parameters
----------
data : wt.data.Data
should be in (setpoint, dependent)
channel: wt.data.Channel or int or str
channel to process
arrangement: str
name of the arrangment to modify
instrument: attune.Instrument
instrument object to modify
level: bool, optional
toggle leveling data (Defalts to False)
gtol: float, optional
global tolerance for rejecting noise level relative to global maximum
ltol: float, optional
local tolerance for rejecting data relative to slice maximum
restore_setpoints: bool, optional
toggles remapping onto original setpoints for each tune (default is True)
autosave: bool, optional
toggles saving of instrument file and images (Defaults to True)
save_directory: Path-like
where to save (Defaults to current working directory)
**spline_kwargs: optional
extra arguments to pass to spline creation (e.g. s=0, k=1 for linear interpolation)
Returns
-------
attune.Instrument
New instrument object.
"""
metadata = {
"channel": channel,
"arrangement": arrangement,
"level": level,
"gtol": gtol,
"ltol": ltol,
"spline_kwargs": spline_kwargs,
}
if not isinstance(channel, (int, str)):
metadata["channel"] = channel.natural_name
transition = Transition("tune_test", instrument, metadata=metadata, data=data)
data = data.copy()
data.convert("nm")
setpoints = data.axes[0].points
setpoints.sort()
if isinstance(channel, (int, str)):
channel = data.channels[wt.kit.get_index(data.channel_names, channel)]
orig_channel = data.create_channel(
f"{channel.natural_name}_orig", channel, units=channel.units
)
# TODO: check if level does what we want
if level:
data.level(channel.natural_name, 0, -3)
# TODO: gtol/ltol should maybe be moved to wt
cutoff = channel.max() * gtol
channel.clip(min=cutoff)
max_axis = tuple(i for i, v in enumerate(data.axes[0].shape) if v > 1)
cutoff = np.nanmax(channel[:], axis=1, keepdims=True) * ltol
channel.clip(min=cutoff)
offset_spline = _offsets(data, channel.natural_name, setpoints, **spline_kwargs)
try:
raw_offsets = _offsets(data, channel.natural_name, setpoints, spline=False)
except ValueError:
raw_offsets = None
old_instrument = instrument.as_dict()
for tune in old_instrument["arrangements"][arrangement]["tunes"].values():
if "ranges" in tune:
# Discrete tune in dict form
continue
tune["independent"] += offset_spline(tune["independent"])
new_instrument = Instrument(**old_instrument)
if restore_setpoints:
for tune in new_instrument[arrangement].keys():
if isinstance(instrument[arrangement][tune], DiscreteTune):
continue
new_instrument = map_ind_points(
new_instrument, arrangement, tune, instrument[arrangement][tune].independent
)
new_instrument._transition = transition
fig, _ = plot_tune_test(
data,
channel.natural_name,
used_offsets=offset_spline(setpoints),
raw_offsets=raw_offsets,
)
if autosave:
save(new_instrument, fig, "tune_test", save_directory)
return new_instrument
| [
"WrightTools.kit.get_index",
"numpy.allclose",
"WrightTools.kit.Spline",
"numpy.nanmax"
] | [((652, 697), 'numpy.allclose', 'np.allclose', (['data.axes[0].points', 'tune_points'], {}), '(data.axes[0].points, tune_points)\n', (663, 697), True, 'import numpy as np\n'), ((774, 825), 'numpy.allclose', 'np.allclose', (['data.axes[0].points', 'tune_points[::-1]'], {}), '(data.axes[0].points, tune_points[::-1])\n', (785, 825), True, 'import numpy as np\n'), ((584, 644), 'WrightTools.kit.Spline', 'wt.kit.Spline', (['data.axes[0].points', 'offsets'], {}), '(data.axes[0].points, offsets, **spline_kwargs)\n', (597, 644), True, 'import WrightTools as wt\n'), ((3387, 3431), 'numpy.nanmax', 'np.nanmax', (['channel[:]'], {'axis': '(1)', 'keepdims': '(True)'}), '(channel[:], axis=1, keepdims=True)\n', (3396, 3431), True, 'import numpy as np\n'), ((2903, 2948), 'WrightTools.kit.get_index', 'wt.kit.get_index', (['data.channel_names', 'channel'], {}), '(data.channel_names, channel)\n', (2919, 2948), True, 'import WrightTools as wt\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-GAN's stargan_estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
import six
from tensorflow.contrib import layers
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python.estimator.python import stargan_estimator_impl as estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import training
from tensorflow.python.training import training_util
def dummy_generator_fn(input_data, input_data_domain_label, mode):
del input_data_domain_label, mode
return variable_scope.get_variable('dummy_g', initializer=0.5) * input_data
def dummy_discriminator_fn(input_data, num_domains, mode):
del mode
hidden = layers.flatten(input_data)
output_src = math_ops.reduce_mean(hidden, axis=1)
output_cls = layers.fully_connected(
inputs=hidden, num_outputs=num_domains, scope='debug')
return output_src, output_cls
class StarGetGANModelTest(test.TestCase, parameterized.TestCase):
"""Tests that `StarGetGANModel` produces the correct model."""
@parameterized.named_parameters(('train', model_fn_lib.ModeKeys.TRAIN),
('eval', model_fn_lib.ModeKeys.EVAL),
('predict', model_fn_lib.ModeKeys.PREDICT))
def test_get_gan_model(self, mode):
with ops.Graph().as_default():
input_data = array_ops.ones([6, 4, 4, 3])
input_data_domain_label = array_ops.one_hot([0] * 6, 5)
gan_model = estimator._get_gan_model(
mode,
dummy_generator_fn,
dummy_discriminator_fn,
input_data,
input_data_domain_label,
add_summaries=False)
self.assertEqual(input_data, gan_model.input_data)
self.assertIsNotNone(gan_model.generated_data)
self.assertIsNotNone(gan_model.generated_data_domain_target)
self.assertLen(gan_model.generator_variables, 1)
self.assertIsNotNone(gan_model.generator_scope)
self.assertIsNotNone(gan_model.generator_fn)
if mode == model_fn_lib.ModeKeys.PREDICT:
self.assertIsNone(gan_model.input_data_domain_label)
self.assertEqual(input_data_domain_label,
gan_model.generated_data_domain_target)
self.assertIsNone(gan_model.reconstructed_data)
self.assertIsNone(gan_model.discriminator_input_data_source_predication)
self.assertIsNone(
gan_model.discriminator_generated_data_source_predication)
self.assertIsNone(gan_model.discriminator_input_data_domain_predication)
self.assertIsNone(
gan_model.discriminator_generated_data_domain_predication)
self.assertIsNone(gan_model.discriminator_variables)
self.assertIsNone(gan_model.discriminator_scope)
self.assertIsNone(gan_model.discriminator_fn)
else:
self.assertEqual(input_data_domain_label,
gan_model.input_data_domain_label)
self.assertIsNotNone(gan_model.reconstructed_data.shape)
self.assertIsNotNone(
gan_model.discriminator_input_data_source_predication)
self.assertIsNotNone(
gan_model.discriminator_generated_data_source_predication)
self.assertIsNotNone(
gan_model.discriminator_input_data_domain_predication)
self.assertIsNotNone(
gan_model.discriminator_generated_data_domain_predication)
self.assertLen(gan_model.discriminator_variables, 2) # 1 FC layer
self.assertIsNotNone(gan_model.discriminator_scope)
self.assertIsNotNone(gan_model.discriminator_fn)
def get_dummy_gan_model():
"""Similar to get_gan_model()."""
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
gen_var = variable_scope.get_variable('dummy_var', initializer=0.0)
with variable_scope.variable_scope('discriminator') as dis_scope:
dis_var = variable_scope.get_variable('dummy_var', initializer=0.0)
return tfgan_tuples.StarGANModel(
input_data=array_ops.ones([1, 2, 2, 3]),
input_data_domain_label=array_ops.ones([1, 2]),
generated_data=array_ops.ones([1, 2, 2, 3]),
generated_data_domain_target=array_ops.ones([1, 2]),
reconstructed_data=array_ops.ones([1, 2, 2, 3]),
discriminator_input_data_source_predication=array_ops.ones([1]) * dis_var,
discriminator_generated_data_source_predication=array_ops.ones(
[1]) * gen_var * dis_var,
discriminator_input_data_domain_predication=array_ops.ones([1, 2
]) * dis_var,
discriminator_generated_data_domain_predication=array_ops.ones([1, 2]) *
gen_var * dis_var,
generator_variables=[gen_var],
generator_scope=gen_scope,
generator_fn=None,
discriminator_variables=[dis_var],
discriminator_scope=dis_scope,
discriminator_fn=None)
def dummy_loss_fn(gan_model):
loss = math_ops.reduce_sum(
gan_model.discriminator_input_data_domain_predication -
gan_model.discriminator_generated_data_domain_predication)
loss += math_ops.reduce_sum(gan_model.input_data - gan_model.generated_data)
return tfgan_tuples.GANLoss(loss, loss)
def get_metrics(gan_model):
return {
'mse_custom_metric':
metrics_lib.mean_squared_error(gan_model.input_data,
gan_model.generated_data)
}
class GetEstimatorSpecTest(test.TestCase, parameterized.TestCase):
"""Tests that the EstimatorSpec is constructed appropriately."""
@classmethod
def setUpClass(cls):
super(GetEstimatorSpecTest, cls).setUpClass()
cls._generator_optimizer = training.GradientDescentOptimizer(1.0)
cls._discriminator_optimizer = training.GradientDescentOptimizer(1.0)
@parameterized.named_parameters(('train', model_fn_lib.ModeKeys.TRAIN),
('eval', model_fn_lib.ModeKeys.EVAL),
('predict', model_fn_lib.ModeKeys.PREDICT))
def test_get_estimator_spec(self, mode):
with ops.Graph().as_default():
self._gan_model = get_dummy_gan_model()
spec = estimator._get_estimator_spec(
mode,
self._gan_model,
loss_fn=dummy_loss_fn,
get_eval_metric_ops_fn=get_metrics,
generator_optimizer=self._generator_optimizer,
discriminator_optimizer=self._discriminator_optimizer)
self.assertEqual(mode, spec.mode)
if mode == model_fn_lib.ModeKeys.PREDICT:
self.assertEqual(self._gan_model.generated_data, spec.predictions)
elif mode == model_fn_lib.ModeKeys.TRAIN:
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.train_op)
self.assertIsNotNone(spec.training_hooks)
elif mode == model_fn_lib.ModeKeys.EVAL:
self.assertEqual(self._gan_model.generated_data, spec.predictions)
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.eval_metric_ops)
# TODO(joelshor): Add pandas test.
class StarGANEstimatorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self,
train_input_fn,
eval_input_fn,
predict_input_fn,
prediction_size,
lr_decay=False):
def make_opt():
gstep = training_util.get_or_create_global_step()
lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
return training.GradientDescentOptimizer(lr)
gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
est = estimator.StarGANEstimator(
generator_fn=dummy_generator_fn,
discriminator_fn=dummy_discriminator_fn,
loss_fn=dummy_loss_fn,
generator_optimizer=gopt,
discriminator_optimizer=dopt,
get_eval_metric_ops_fn=get_metrics,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
self.assertEqual(scores['discriminator_loss'] + scores['generator_loss'],
scores['loss'])
self.assertIn('mse_custom_metric', six.iterkeys(scores))
# PREDICT
predictions = np.array([x for x in est.predict(predict_input_fn)])
self.assertAllEqual(prediction_size, predictions.shape)
@staticmethod
def _numpy_input_fn_wrapper(numpy_input_fn, batch_size, label_size):
"""Wrapper to remove the dictionary in numpy_input_fn.
NOTE:
We create the domain_label here because the model expect a fully define
batch_size from the input.
Args:
numpy_input_fn: input_fn created from numpy_io
batch_size: (int) number of items for each batch
label_size: (int) number of domains
Returns:
a new input_fn
"""
def new_input_fn():
features = numpy_input_fn()
return features['x'], array_ops.one_hot([0] * batch_size, label_size)
return new_input_fn
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
batch_size = 5
img_size = 8
channel_size = 3
label_size = 3
image_data = np.zeros(
[batch_size, img_size, img_size, channel_size], dtype=np.float32)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': image_data},
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': image_data}, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': image_data}, shuffle=False)
train_input_fn = self._numpy_input_fn_wrapper(train_input_fn, batch_size,
label_size)
eval_input_fn = self._numpy_input_fn_wrapper(eval_input_fn, batch_size,
label_size)
predict_input_fn = self._numpy_input_fn_wrapper(predict_input_fn,
batch_size, label_size)
predict_input_fn = estimator.stargan_prediction_input_fn_wrapper(
predict_input_fn)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, img_size, img_size, channel_size])
if __name__ == '__main__':
test.main()
| [
"tensorflow.contrib.layers.flatten",
"tensorflow.python.ops.metrics.mean_squared_error",
"tensorflow.python.training.training.GradientDescentOptimizer",
"shutil.rmtree",
"six.iterkeys",
"tempfile.mkdtemp",
"tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn",
"tensorflow.python.summary.writer.... | [((2039, 2065), 'tensorflow.contrib.layers.flatten', 'layers.flatten', (['input_data'], {}), '(input_data)\n', (2053, 2065), False, 'from tensorflow.contrib import layers\n'), ((2081, 2117), 'tensorflow.python.ops.math_ops.reduce_mean', 'math_ops.reduce_mean', (['hidden'], {'axis': '(1)'}), '(hidden, axis=1)\n', (2101, 2117), False, 'from tensorflow.python.ops import math_ops\n'), ((2133, 2210), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', ([], {'inputs': 'hidden', 'num_outputs': 'num_domains', 'scope': '"""debug"""'}), "(inputs=hidden, num_outputs=num_domains, scope='debug')\n", (2155, 2210), False, 'from tensorflow.contrib import layers\n'), ((2388, 2550), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('train', model_fn_lib.ModeKeys.TRAIN)", "('eval', model_fn_lib.ModeKeys.EVAL)", "('predict', model_fn_lib.ModeKeys.PREDICT)"], {}), "(('train', model_fn_lib.ModeKeys.TRAIN), (\n 'eval', model_fn_lib.ModeKeys.EVAL), ('predict', model_fn_lib.ModeKeys.\n PREDICT))\n", (2418, 2550), False, 'from absl.testing import parameterized\n'), ((6252, 6390), 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['(gan_model.discriminator_input_data_domain_predication - gan_model.\n discriminator_generated_data_domain_predication)'], {}), '(gan_model.discriminator_input_data_domain_predication -\n gan_model.discriminator_generated_data_domain_predication)\n', (6271, 6390), False, 'from tensorflow.python.ops import math_ops\n'), ((6410, 6478), 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['(gan_model.input_data - gan_model.generated_data)'], {}), '(gan_model.input_data - gan_model.generated_data)\n', (6429, 6478), False, 'from tensorflow.python.ops import math_ops\n'), ((6488, 6520), 'tensorflow.contrib.gan.python.namedtuples.GANLoss', 'tfgan_tuples.GANLoss', (['loss', 'loss'], {}), '(loss, loss)\n', (6508, 6520), True, 'from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples\n'), ((7096, 7258), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('train', model_fn_lib.ModeKeys.TRAIN)", "('eval', model_fn_lib.ModeKeys.EVAL)", "('predict', model_fn_lib.ModeKeys.PREDICT)"], {}), "(('train', model_fn_lib.ModeKeys.TRAIN), (\n 'eval', model_fn_lib.ModeKeys.EVAL), ('predict', model_fn_lib.ModeKeys.\n PREDICT))\n", (7126, 7258), False, 'from absl.testing import parameterized\n'), ((12119, 12130), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (12128, 12130), False, 'from tensorflow.python.platform import test\n'), ((1886, 1941), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""dummy_g"""'], {'initializer': '(0.5)'}), "('dummy_g', initializer=0.5)\n", (1913, 1941), False, 'from tensorflow.python.ops import variable_scope\n'), ((4997, 5039), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""generator"""'], {}), "('generator')\n", (5026, 5039), False, 'from tensorflow.python.ops import variable_scope\n'), ((5068, 5125), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""dummy_var"""'], {'initializer': '(0.0)'}), "('dummy_var', initializer=0.0)\n", (5095, 5125), False, 'from tensorflow.python.ops import variable_scope\n'), ((5133, 5179), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""discriminator"""'], {}), "('discriminator')\n", (5162, 5179), False, 'from tensorflow.python.ops import variable_scope\n'), ((5208, 5265), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""dummy_var"""'], {'initializer': '(0.0)'}), "('dummy_var', initializer=0.0)\n", (5235, 5265), False, 'from tensorflow.python.ops import variable_scope\n'), ((6599, 6677), 'tensorflow.python.ops.metrics.mean_squared_error', 'metrics_lib.mean_squared_error', (['gan_model.input_data', 'gan_model.generated_data'], {}), '(gan_model.input_data, gan_model.generated_data)\n', (6629, 6677), True, 'from tensorflow.python.ops import metrics as metrics_lib\n'), ((6979, 7017), 'tensorflow.python.training.training.GradientDescentOptimizer', 'training.GradientDescentOptimizer', (['(1.0)'], {}), '(1.0)\n', (7012, 7017), False, 'from tensorflow.python.training import training\n'), ((7053, 7091), 'tensorflow.python.training.training.GradientDescentOptimizer', 'training.GradientDescentOptimizer', (['(1.0)'], {}), '(1.0)\n', (7086, 7091), False, 'from tensorflow.python.training import training\n'), ((8467, 8485), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (8483, 8485), False, 'import tempfile\n'), ((9220, 9474), 'tensorflow.contrib.gan.python.estimator.python.stargan_estimator_impl.StarGANEstimator', 'estimator.StarGANEstimator', ([], {'generator_fn': 'dummy_generator_fn', 'discriminator_fn': 'dummy_discriminator_fn', 'loss_fn': 'dummy_loss_fn', 'generator_optimizer': 'gopt', 'discriminator_optimizer': 'dopt', 'get_eval_metric_ops_fn': 'get_metrics', 'model_dir': 'self._model_dir'}), '(generator_fn=dummy_generator_fn,\n discriminator_fn=dummy_discriminator_fn, loss_fn=dummy_loss_fn,\n generator_optimizer=gopt, discriminator_optimizer=dopt,\n get_eval_metric_ops_fn=get_metrics, model_dir=self._model_dir)\n', (9246, 9474), True, 'from tensorflow.contrib.gan.python.estimator.python import stargan_estimator_impl as estimator\n'), ((10904, 10978), 'numpy.zeros', 'np.zeros', (['[batch_size, img_size, img_size, channel_size]'], {'dtype': 'np.float32'}), '([batch_size, img_size, img_size, channel_size], dtype=np.float32)\n', (10912, 10978), True, 'import numpy as np\n'), ((11009, 11111), 'tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn', 'numpy_io.numpy_input_fn', ([], {'x': "{'x': image_data}", 'batch_size': 'batch_size', 'num_epochs': 'None', 'shuffle': '(True)'}), "(x={'x': image_data}, batch_size=batch_size,\n num_epochs=None, shuffle=True)\n", (11032, 11111), False, 'from tensorflow.python.estimator.inputs import numpy_io\n'), ((11161, 11248), 'tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn', 'numpy_io.numpy_input_fn', ([], {'x': "{'x': image_data}", 'batch_size': 'batch_size', 'shuffle': '(False)'}), "(x={'x': image_data}, batch_size=batch_size, shuffle\n =False)\n", (11184, 11248), False, 'from tensorflow.python.estimator.inputs import numpy_io\n'), ((11276, 11335), 'tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn', 'numpy_io.numpy_input_fn', ([], {'x': "{'x': image_data}", 'shuffle': '(False)'}), "(x={'x': image_data}, shuffle=False)\n", (11299, 11335), False, 'from tensorflow.python.estimator.inputs import numpy_io\n'), ((11793, 11856), 'tensorflow.contrib.gan.python.estimator.python.stargan_estimator_impl.stargan_prediction_input_fn_wrapper', 'estimator.stargan_prediction_input_fn_wrapper', (['predict_input_fn'], {}), '(predict_input_fn)\n', (11838, 11856), True, 'from tensorflow.contrib.gan.python.estimator.python import stargan_estimator_impl as estimator\n'), ((2701, 2729), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[6, 4, 4, 3]'], {}), '([6, 4, 4, 3])\n', (2715, 2729), False, 'from tensorflow.python.ops import array_ops\n'), ((2762, 2791), 'tensorflow.python.ops.array_ops.one_hot', 'array_ops.one_hot', (['([0] * 6)', '(5)'], {}), '([0] * 6, 5)\n', (2779, 2791), False, 'from tensorflow.python.ops import array_ops\n'), ((2810, 2946), 'tensorflow.contrib.gan.python.estimator.python.stargan_estimator_impl._get_gan_model', 'estimator._get_gan_model', (['mode', 'dummy_generator_fn', 'dummy_discriminator_fn', 'input_data', 'input_data_domain_label'], {'add_summaries': '(False)'}), '(mode, dummy_generator_fn, dummy_discriminator_fn,\n input_data, input_data_domain_label, add_summaries=False)\n', (2834, 2946), True, 'from tensorflow.contrib.gan.python.estimator.python import stargan_estimator_impl as estimator\n'), ((5319, 5347), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[1, 2, 2, 3]'], {}), '([1, 2, 2, 3])\n', (5333, 5347), False, 'from tensorflow.python.ops import array_ops\n'), ((5379, 5401), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[1, 2]'], {}), '([1, 2])\n', (5393, 5401), False, 'from tensorflow.python.ops import array_ops\n'), ((5424, 5452), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[1, 2, 2, 3]'], {}), '([1, 2, 2, 3])\n', (5438, 5452), False, 'from tensorflow.python.ops import array_ops\n'), ((5489, 5511), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[1, 2]'], {}), '([1, 2])\n', (5503, 5511), False, 'from tensorflow.python.ops import array_ops\n'), ((5538, 5566), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[1, 2, 2, 3]'], {}), '([1, 2, 2, 3])\n', (5552, 5566), False, 'from tensorflow.python.ops import array_ops\n'), ((7454, 7681), 'tensorflow.contrib.gan.python.estimator.python.stargan_estimator_impl._get_estimator_spec', 'estimator._get_estimator_spec', (['mode', 'self._gan_model'], {'loss_fn': 'dummy_loss_fn', 'get_eval_metric_ops_fn': 'get_metrics', 'generator_optimizer': 'self._generator_optimizer', 'discriminator_optimizer': 'self._discriminator_optimizer'}), '(mode, self._gan_model, loss_fn=dummy_loss_fn,\n get_eval_metric_ops_fn=get_metrics, generator_optimizer=self.\n _generator_optimizer, discriminator_optimizer=self._discriminator_optimizer\n )\n', (7483, 7681), True, 'from tensorflow.contrib.gan.python.estimator.python import stargan_estimator_impl as estimator\n'), ((8539, 8575), 'tensorflow.python.summary.writer.writer_cache.FileWriterCache.clear', 'writer_cache.FileWriterCache.clear', ([], {}), '()\n', (8573, 8575), False, 'from tensorflow.python.summary.writer import writer_cache\n'), ((8582, 8612), 'shutil.rmtree', 'shutil.rmtree', (['self._model_dir'], {}), '(self._model_dir)\n', (8595, 8612), False, 'import shutil\n'), ((8894, 8935), 'tensorflow.python.training.training_util.get_or_create_global_step', 'training_util.get_or_create_global_step', ([], {}), '()\n', (8933, 8935), False, 'from tensorflow.python.training import training_util\n'), ((8947, 9005), 'tensorflow.python.training.learning_rate_decay.exponential_decay', 'learning_rate_decay.exponential_decay', (['(1.0)', 'gstep', '(10)', '(0.9)'], {}), '(1.0, gstep, 10, 0.9)\n', (8984, 9005), False, 'from tensorflow.python.training import learning_rate_decay\n'), ((9019, 9056), 'tensorflow.python.training.training.GradientDescentOptimizer', 'training.GradientDescentOptimizer', (['lr'], {}), '(lr)\n', (9052, 9056), False, 'from tensorflow.python.training import training\n'), ((9095, 9133), 'tensorflow.python.training.training.GradientDescentOptimizer', 'training.GradientDescentOptimizer', (['(1.0)'], {}), '(1.0)\n', (9128, 9133), False, 'from tensorflow.python.training import training\n'), ((9171, 9209), 'tensorflow.python.training.training.GradientDescentOptimizer', 'training.GradientDescentOptimizer', (['(1.0)'], {}), '(1.0)\n', (9204, 9209), False, 'from tensorflow.python.training import training\n'), ((9748, 9768), 'six.iterkeys', 'six.iterkeys', (['scores'], {}), '(scores)\n', (9760, 9768), False, 'import six\n'), ((9924, 9944), 'six.iterkeys', 'six.iterkeys', (['scores'], {}), '(scores)\n', (9936, 9944), False, 'import six\n'), ((5618, 5637), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[1]'], {}), '([1])\n', (5632, 5637), False, 'from tensorflow.python.ops import array_ops\n'), ((5805, 5827), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[1, 2]'], {}), '([1, 2])\n', (5819, 5827), False, 'from tensorflow.python.ops import array_ops\n'), ((10653, 10700), 'tensorflow.python.ops.array_ops.one_hot', 'array_ops.one_hot', (['([0] * batch_size)', 'label_size'], {}), '([0] * batch_size, label_size)\n', (10670, 10700), False, 'from tensorflow.python.ops import array_ops\n'), ((2656, 2667), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (2665, 2667), False, 'from tensorflow.python.framework import ops\n'), ((5703, 5722), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[1]'], {}), '([1])\n', (5717, 5722), False, 'from tensorflow.python.ops import array_ops\n'), ((5959, 5981), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[1, 2]'], {}), '([1, 2])\n', (5973, 5981), False, 'from tensorflow.python.ops import array_ops\n'), ((7369, 7380), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (7378, 7380), False, 'from tensorflow.python.framework import ops\n'), ((7961, 7972), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (7969, 7972), True, 'import numpy as np\n'), ((8241, 8252), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (8249, 8252), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda
from datasets import GDADataset, NodeClassification
from models import SharedBilinearDecoder, GAL, NodeClassifier
from tqdm import tqdm
import numpy as np
import torch
import itertools
import gc
import numpy as np
from torch.utils.data import Dataset, DataLoader
def run(args):
args.train_ratings.to_csv('train_ratings_ml.csv')
args.test_ratings.to_csv('test_ratings_ml.csv')
train_gda_set = GDADataset(args.train_ratings, args.users_train, args.prefetch_to_gpu)
test_gda_set = GDADataset(args.test_ratings, args.users_test, args.prefetch_to_gpu)
train_fairness_set = NodeClassification(args.users_train, args.prefetch_to_gpu)
test_fairness_set = NodeClassification(args.users_test, args.prefetch_to_gpu)
edges = np.hstack((np.stack([args.train_ratings['user_id'].values,
args.train_ratings['movie_id'].values]),
np.stack([args.train_ratings['movie_id'].values,
args.train_ratings['user_id'].values])))
edges = torch.LongTensor(edges)
def get_model():
decoder = SharedBilinearDecoder(args.num_rel, 2, args.embed_dim).to(args.device)
model = GAL(decoder, args.embed_dim, args.num_ent, edges, args).to(args.device)
return model, decoder
if args.prefetch_to_gpu:
train_loader = DataLoader(train_gda_set, batch_size=args.batch_size, shuffle=True, drop_last=True,
num_workers=0, collate_fn=collate_fn)
else:
train_loader = DataLoader(train_gda_set, batch_size=args.batch_size, shuffle=True, drop_last=True,
num_workers=4, pin_memory=True, collate_fn=collate_fn)
node_cls_loader = DataLoader(train_fairness_set, batch_size=256, shuffle=True, drop_last=False,
num_workers=4, pin_memory=True, collate_fn=node_cls_collate_fn)
args.logger.info('Lambda: {}'.format(args.lambda_reg))
model, decoder = get_model()
#for i in model.named_parameters(): print(i[0])
optimizer_task = create_optimizer([
{'params': model.encoder.parameters()},
{'params': model.batchnorm.parameters()},
{'params': model.decoder.parameters()},
{'params':model.gnn.parameters()}], 'adam', args.lr)
optimizer_adv_gender = create_optimizer([
{'params': model.encoder.parameters()},
{'params': model.batchnorm.parameters()},
{'params': model.gender.parameters()},
{'params':model.gnn.parameters()}], 'adam', args.lr * args.lambda_reg)
args.logger.info('GDA for Gender Attribute')
model.set_mode('gender')
for epoch in tqdm(range(args.num_epochs)):
if epoch % (args.valid_freq) == 0 and epoch >= 15:
with torch.no_grad():
test_gda(test_gda_set, args, model)
train_gda(train_loader, node_cls_loader, args, model, optimizer_task, optimizer_adv_gender, False)
gc.collect()
embeddings = model.encode(None).detach().squeeze(0)
attacker = NodeClassifier(args.embed_dim, embeddings).cuda()
optimizer_attacker_gender = create_optimizer(attacker.gender.parameters(), 'adam', args.lr)
args.logger.info('Gender Adversary')
attacker.set_mode('gender')
for epoch in tqdm(range(args.finetune_epochs)):
train_node_cls(node_cls_loader, args, attacker, optimizer_attacker_gender)
gc.collect()
with torch.no_grad():
rmse, test_loss = test_node_cls(test_fairness_set, args, attacker, mode='gender')
dirname = os.path.join('./checkpoints', args.experiment, args.task, args.model)
Path(dirname).mkdir(parents=True, exist_ok=True)
logname = args.logname
path = (os.path.join(dirname,logname+"model_gender.pth"))
torch.save(model.state_dict(), path)
path = (os.path.join(dirname,logname+"attacker_gender.pth"))
torch.save(attacker.state_dict(), path)
model, decoder = get_model()
optimizer_task = create_optimizer([
{'params': model.encoder.parameters()},
{'params': model.batchnorm.parameters()},
{'params': model.decoder.parameters()},
{'params':model.gnn.parameters()}], 'adam', args.lr)
optimizer_adv_age = create_optimizer([
{'params': model.encoder.parameters()},
{'params': model.batchnorm.parameters()},
{'params': model.age.parameters()},
{'params':model.gnn.parameters()}], 'adam', args.lr * args.lambda_reg)
args.logger.info('GDA for Age Attribute')
model.set_mode('age')
for epoch in tqdm(range(args.num_epochs)):
if epoch % (args.valid_freq) == 0 and epoch >= 15:
with torch.no_grad():
test_gda(test_gda_set, args, model)
train_gda(train_loader, node_cls_loader, args, model, optimizer_task, optimizer_adv_age, False)
gc.collect()
embeddings = model.encode(None).detach().squeeze(0)
attacker = NodeClassifier(args.embed_dim, embeddings).cuda()
optimizer_attacker_age = create_optimizer(attacker.age.parameters() ,'adam', args.lr)
args.logger.info('Age Adversary')
attacker.set_mode('age')
for epoch in tqdm(range(args.finetune_epochs)):
train_node_cls(node_cls_loader, args, attacker, optimizer_attacker_age)
gc.collect()
with torch.no_grad():
rmse, test_loss = test_node_cls(test_fairness_set, args, attacker, mode='age')
path = (os.path.join(dirname,logname+"model_age.pth"))
torch.save(model.state_dict(), path)
path = (os.path.join(dirname,logname+"attacker_age.pth"))
torch.save(attacker.state_dict(), path)
model, decoder = get_model()
optimizer_task = create_optimizer([
{'params': model.encoder.parameters()},
{'params': model.batchnorm.parameters()},
{'params': model.decoder.parameters()},
{'params':model.gnn.parameters()}], 'adam', args.lr)
optimizer_adv_occupation = create_optimizer([
{'params': model.encoder.parameters()},
{'params': model.batchnorm.parameters()},
{'params': model.occupation.parameters()},
{'params':model.gnn.parameters()}], 'adam', args.lr * args.lambda_reg)
args.logger.info('GDA for Occupation Attribute')
model.set_mode('occupation')
for epoch in tqdm(range(args.num_epochs)):
if epoch % (args.valid_freq) == 0 and epoch >= 15:
with torch.no_grad():
test_gda(test_gda_set, args, model)
train_gda(train_loader, node_cls_loader, args, model, optimizer_task, optimizer_adv_occupation, False)
gc.collect()
embeddings = model.encode(None).detach().squeeze(0)
attacker = NodeClassifier(args.embed_dim, embeddings).cuda()
optimizer_attacker_occupation = create_optimizer(attacker.occupation.parameters(), 'adam', args.lr)
args.logger.info('Occupation Adversary')
attacker.set_mode('occupation')
for epoch in tqdm(range(args.finetune_epochs)):
train_node_cls(node_cls_loader, args, attacker, optimizer_attacker_occupation)
gc.collect()
with torch.no_grad():
rmse, test_loss = test_node_cls(test_fairness_set, args, attacker, mode='occupation')
path = (os.path.join(dirname,logname+"model_occupation.pth"))
torch.save(model.state_dict(), path)
path = (os.path.join(dirname,logname+"attacker_occupation.pth"))
torch.save(attacker.state_dict(), path)
if __name__ == '__main__':
assert(False) # You shouldn't run this. Please call exec.py
| [
"helper.train_gda",
"helper.os.path.join",
"numpy.stack",
"models.NodeClassifier",
"torch.utils.data.DataLoader",
"torch.LongTensor",
"models.GAL",
"datasets.NodeClassification",
"helper.test_node_cls",
"helper.train_node_cls",
"gc.collect",
"helper.test_gda",
"models.SharedBilinearDecoder",... | [((614, 684), 'datasets.GDADataset', 'GDADataset', (['args.train_ratings', 'args.users_train', 'args.prefetch_to_gpu'], {}), '(args.train_ratings, args.users_train, args.prefetch_to_gpu)\n', (624, 684), False, 'from datasets import GDADataset, NodeClassification\n'), ((704, 772), 'datasets.GDADataset', 'GDADataset', (['args.test_ratings', 'args.users_test', 'args.prefetch_to_gpu'], {}), '(args.test_ratings, args.users_test, args.prefetch_to_gpu)\n', (714, 772), False, 'from datasets import GDADataset, NodeClassification\n'), ((799, 857), 'datasets.NodeClassification', 'NodeClassification', (['args.users_train', 'args.prefetch_to_gpu'], {}), '(args.users_train, args.prefetch_to_gpu)\n', (817, 857), False, 'from datasets import GDADataset, NodeClassification\n'), ((882, 939), 'datasets.NodeClassification', 'NodeClassification', (['args.users_test', 'args.prefetch_to_gpu'], {}), '(args.users_test, args.prefetch_to_gpu)\n', (900, 939), False, 'from datasets import GDADataset, NodeClassification\n'), ((1244, 1267), 'torch.LongTensor', 'torch.LongTensor', (['edges'], {}), '(edges)\n', (1260, 1267), False, 'import torch\n'), ((1935, 2081), 'torch.utils.data.DataLoader', 'DataLoader', (['train_fairness_set'], {'batch_size': '(256)', 'shuffle': '(True)', 'drop_last': '(False)', 'num_workers': '(4)', 'pin_memory': '(True)', 'collate_fn': 'node_cls_collate_fn'}), '(train_fairness_set, batch_size=256, shuffle=True, drop_last=\n False, num_workers=4, pin_memory=True, collate_fn=node_cls_collate_fn)\n', (1945, 2081), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3961, 4030), 'helper.os.path.join', 'os.path.join', (['"""./checkpoints"""', 'args.experiment', 'args.task', 'args.model'], {}), "('./checkpoints', args.experiment, args.task, args.model)\n", (3973, 4030), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((4124, 4175), 'helper.os.path.join', 'os.path.join', (['dirname', "(logname + 'model_gender.pth')"], {}), "(dirname, logname + 'model_gender.pth')\n", (4136, 4175), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((4227, 4281), 'helper.os.path.join', 'os.path.join', (['dirname', "(logname + 'attacker_gender.pth')"], {}), "(dirname, logname + 'attacker_gender.pth')\n", (4239, 4281), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((6030, 6078), 'helper.os.path.join', 'os.path.join', (['dirname', "(logname + 'model_age.pth')"], {}), "(dirname, logname + 'model_age.pth')\n", (6042, 6078), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((6130, 6181), 'helper.os.path.join', 'os.path.join', (['dirname', "(logname + 'attacker_age.pth')"], {}), "(dirname, logname + 'attacker_age.pth')\n", (6142, 6181), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((8008, 8063), 'helper.os.path.join', 'os.path.join', (['dirname', "(logname + 'model_occupation.pth')"], {}), "(dirname, logname + 'model_occupation.pth')\n", (8020, 8063), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((8115, 8173), 'helper.os.path.join', 'os.path.join', (['dirname', "(logname + 'attacker_occupation.pth')"], {}), "(dirname, logname + 'attacker_occupation.pth')\n", (8127, 8173), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((1550, 1675), 'torch.utils.data.DataLoader', 'DataLoader', (['train_gda_set'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'drop_last': '(True)', 'num_workers': '(0)', 'collate_fn': 'collate_fn'}), '(train_gda_set, batch_size=args.batch_size, shuffle=True,\n drop_last=True, num_workers=0, collate_fn=collate_fn)\n', (1560, 1675), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1739, 1881), 'torch.utils.data.DataLoader', 'DataLoader', (['train_gda_set'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'drop_last': '(True)', 'num_workers': '(4)', 'pin_memory': '(True)', 'collate_fn': 'collate_fn'}), '(train_gda_set, batch_size=args.batch_size, shuffle=True,\n drop_last=True, num_workers=4, pin_memory=True, collate_fn=collate_fn)\n', (1749, 1881), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3248, 3350), 'helper.train_gda', 'train_gda', (['train_loader', 'node_cls_loader', 'args', 'model', 'optimizer_task', 'optimizer_adv_gender', '(False)'], {}), '(train_loader, node_cls_loader, args, model, optimizer_task,\n optimizer_adv_gender, False)\n', (3257, 3350), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((3356, 3368), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3366, 3368), False, 'import gc\n'), ((3725, 3799), 'helper.train_node_cls', 'train_node_cls', (['node_cls_loader', 'args', 'attacker', 'optimizer_attacker_gender'], {}), '(node_cls_loader, args, attacker, optimizer_attacker_gender)\n', (3739, 3799), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((3809, 3821), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3819, 3821), False, 'import gc\n'), ((5340, 5439), 'helper.train_gda', 'train_gda', (['train_loader', 'node_cls_loader', 'args', 'model', 'optimizer_task', 'optimizer_adv_age', '(False)'], {}), '(train_loader, node_cls_loader, args, model, optimizer_task,\n optimizer_adv_age, False)\n', (5349, 5439), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((5444, 5456), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5454, 5456), False, 'import gc\n'), ((5802, 5873), 'helper.train_node_cls', 'train_node_cls', (['node_cls_loader', 'args', 'attacker', 'optimizer_attacker_age'], {}), '(node_cls_loader, args, attacker, optimizer_attacker_age)\n', (5816, 5873), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((5883, 5895), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5893, 5895), False, 'import gc\n'), ((7268, 7374), 'helper.train_gda', 'train_gda', (['train_loader', 'node_cls_loader', 'args', 'model', 'optimizer_task', 'optimizer_adv_occupation', '(False)'], {}), '(train_loader, node_cls_loader, args, model, optimizer_task,\n optimizer_adv_occupation, False)\n', (7277, 7374), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((7380, 7392), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7390, 7392), False, 'import gc\n'), ((7766, 7844), 'helper.train_node_cls', 'train_node_cls', (['node_cls_loader', 'args', 'attacker', 'optimizer_attacker_occupation'], {}), '(node_cls_loader, args, attacker, optimizer_attacker_occupation)\n', (7780, 7844), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((7854, 7866), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7864, 7866), False, 'import gc\n'), ((964, 1056), 'numpy.stack', 'np.stack', (["[args.train_ratings['user_id'].values, args.train_ratings['movie_id'].values]"], {}), "([args.train_ratings['user_id'].values, args.train_ratings[\n 'movie_id'].values])\n", (972, 1056), True, 'import numpy as np\n'), ((1109, 1201), 'numpy.stack', 'np.stack', (["[args.train_ratings['movie_id'].values, args.train_ratings['user_id'].values]"], {}), "([args.train_ratings['movie_id'].values, args.train_ratings[\n 'user_id'].values])\n", (1117, 1201), True, 'import numpy as np\n'), ((3441, 3483), 'models.NodeClassifier', 'NodeClassifier', (['args.embed_dim', 'embeddings'], {}), '(args.embed_dim, embeddings)\n', (3455, 3483), False, 'from models import SharedBilinearDecoder, GAL, NodeClassifier\n'), ((3835, 3850), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3848, 3850), False, 'import torch\n'), ((3882, 3945), 'helper.test_node_cls', 'test_node_cls', (['test_fairness_set', 'args', 'attacker'], {'mode': '"""gender"""'}), "(test_fairness_set, args, attacker, mode='gender')\n", (3895, 3945), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((4035, 4048), 'helper.Path', 'Path', (['dirname'], {}), '(dirname)\n', (4039, 4048), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((5529, 5571), 'models.NodeClassifier', 'NodeClassifier', (['args.embed_dim', 'embeddings'], {}), '(args.embed_dim, embeddings)\n', (5543, 5571), False, 'from models import SharedBilinearDecoder, GAL, NodeClassifier\n'), ((5909, 5924), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5922, 5924), False, 'import torch\n'), ((5956, 6016), 'helper.test_node_cls', 'test_node_cls', (['test_fairness_set', 'args', 'attacker'], {'mode': '"""age"""'}), "(test_fairness_set, args, attacker, mode='age')\n", (5969, 6016), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((7465, 7507), 'models.NodeClassifier', 'NodeClassifier', (['args.embed_dim', 'embeddings'], {}), '(args.embed_dim, embeddings)\n', (7479, 7507), False, 'from models import SharedBilinearDecoder, GAL, NodeClassifier\n'), ((7880, 7895), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7893, 7895), False, 'import torch\n'), ((7927, 7994), 'helper.test_node_cls', 'test_node_cls', (['test_fairness_set', 'args', 'attacker'], {'mode': '"""occupation"""'}), "(test_fairness_set, args, attacker, mode='occupation')\n", (7940, 7994), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((1308, 1362), 'models.SharedBilinearDecoder', 'SharedBilinearDecoder', (['args.num_rel', '(2)', 'args.embed_dim'], {}), '(args.num_rel, 2, args.embed_dim)\n', (1329, 1362), False, 'from models import SharedBilinearDecoder, GAL, NodeClassifier\n'), ((1395, 1450), 'models.GAL', 'GAL', (['decoder', 'args.embed_dim', 'args.num_ent', 'edges', 'args'], {}), '(decoder, args.embed_dim, args.num_ent, edges, args)\n', (1398, 1450), False, 'from models import SharedBilinearDecoder, GAL, NodeClassifier\n'), ((3170, 3185), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3183, 3185), False, 'import torch\n'), ((3203, 3238), 'helper.test_gda', 'test_gda', (['test_gda_set', 'args', 'model'], {}), '(test_gda_set, args, model)\n', (3211, 3238), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((5262, 5277), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5275, 5277), False, 'import torch\n'), ((5295, 5330), 'helper.test_gda', 'test_gda', (['test_gda_set', 'args', 'model'], {}), '(test_gda_set, args, model)\n', (5303, 5330), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n'), ((7190, 7205), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7203, 7205), False, 'import torch\n'), ((7223, 7258), 'helper.test_gda', 'test_gda', (['test_gda_set', 'args', 'model'], {}), '(test_gda_set, args, model)\n', (7231, 7258), False, 'from helper import Path, os, to_device, make_dataset_1M, create_optimizer, ltensor, collate_fn, node_cls_collate_fn, get_logger, train_node_cls, test_node_cls, train_gda, test_gda\n')] |
# Import routines
import numpy as np
import math
import random
# Defining hyperparameters
m = 5 # number of cities, ranges from 1 ..... m
t = 24 # number of hours, ranges from 0 .... t-1
d = 7 # number of days, ranges from 0 ... d-1
C = 5 # Per hour fuel and other costs
R = 9 # per hour revenue from a passenger
class CabDriver():
def __init__(self):
"""initialise your state and define your action space and state space"""
## action space is possible combinations of pick up and drop cities
self.action_space = [(0,0)]+[(x, y) for x in range(m) for y in range(m) if x != y]
# State space is possible combination of
self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]
self.state_init = random.choice(self.state_space)
# Start the first round
self.reset()
## Encoding state (or state-action) for NN input
def state_encod_arch1(self, state):
"""convert the state into a vector so that it can be fed to the NN. This method converts a given state into a vector format. Hint: The vector is of size m + t + d."""
## state is - [m,t,d] where m is citi, t is time and d is day
state_encod = [0 for _ in range(m+t+d)]
state_encod[state[0]] = 1 # setting citi to 1
state_encod[m+state[1]] = 1 # setting time to 1
state_encod[m+t+state[2]] = 1 # setting day to 1
return state_encod
# Use this function if you are using architecture-2
# def state_encod_arch2(self, state, action):
# """convert the (state-action) into a vector so that it can be fed to the NN. This method converts a given state-action pair into a vector format. Hint: The vector is of size m + t + d + m + m."""
# return state_encod
## Getting number of requests
def requests(self, state):
"""Determining the number of requests basis the location.
Use the table specified in the MDP and complete for rest of the locations"""
location = state[0]
if location == 0: # location A gets 2 requests
requests = np.random.poisson(2) # gettting poison distribution
if location == 1: # location B gets 12 requests
requests = np.random.poisson(12) # gettting poison distribution
if location == 2: # location C gets 4 requests
requests = np.random.poisson(4) # gettting poison distribution
if location == 3: # location D gets 7 requests
requests = np.random.poisson(7) # gettting poison distribution
if location == 4: # location E gets 8 requests
requests = np.random.poisson(8) # gettting poison distribution
if requests >15:
requests =15
possible_actions_index = random.sample(range(1, (m-1)*m +1), requests)+[0] # (0,0) is not considered as customer request
actions = [self.action_space[i] for i in possible_actions_index]
return possible_actions_index,actions
def reward_func(self, wait_time, transit_time, ride_time):
# question during wait time car is turned off? of so battery cost should not be considered
# else include batterry cost for waittime. I am including wait time and panalising at the rate
# C as not other rate defined
reward = (R * ride_time) - (C * (wait_time + transit_time + ride_time))
return reward
def next_state_func(self, state, action, Time_matrix):
"""Takes state and action as input and returns next state"""
next_state = []
# Initialize various times
total_time = 0
transit_time = 0 # to go from current location to pickup location
wait_time = 0 # in case driver chooses to refuse all requests
ride_time = 0 # from Pick-up to drop
# find out current location, current time and current time
curr_loc = state[0] # get current location x from the state s= (x,t,d)
pickup_loc = action[0] # get pickup location from action (p,q)
drop_loc = action[1] # get drop location from action (p,q)
curr_time = state[1] # get current time t from the state s= (x,t,d)
curr_day = state[2] # get current day d from the state s= (x,t,d)
# there are three posibilities for next state
# 1. Go offline - action(0,0) setting pickup and drop to 0
"""the driver always has the option to go ‘offline’ (accept no ride). The noride action just moves the time component by 1 hour"""
# 2. Pick up location is same as your current location. So no transition time and no wait time
# 3. Pickup and current location is different. Need to find transition time and drop off time
if ((pickup_loc== 0) and (drop_loc == 0)):
wait_time = 1
next_loc = curr_loc
elif (curr_loc == pickup_loc):
# means driver is already at pickup point, wait and transit are both 0 then.
wait_time =0
transit_time = 0
# get ride time from time matrix which is a 4D matrix (pickup, dropoff, time, day)
ride_time = Time_matrix[curr_loc][drop_loc][curr_time][curr_day]
# set drop location as the next location
next_loc = drop_loc
else:
# find ride time to reach the pick up location from timematrix
transit_time = Time_matrix[curr_loc][pickup_loc][curr_time][curr_day]
#find new time and day
updated_time, updated_day = self.calculate_new_time(curr_time, curr_day, transit_time)
#calculate drop off time
ride_time = Time_matrix[pickup_loc][drop_loc][updated_time][updated_day]
next_loc = drop_loc
# Calculate total time as sum of all durations
total_time = (wait_time + transit_time + ride_time)
next_time, next_day = self.calculate_new_time(curr_time, curr_day, total_time)
# set next state (x,t,d)
next_state = [next_loc, next_time, next_day]
return next_state, wait_time, transit_time, ride_time
def calculate_new_time(self, curr_time, curr_day, ride_time):
ride_time = int(ride_time)
if (curr_time + ride_time) < 24: # rides are within a day
curr_time = curr_time + ride_time
else: # rides are crossing a day
# format to 0-23 range
curr_time = (curr_time + ride_time) % 24
# take integer division days
days = (curr_time + ride_time) // 24
# format to 0-6 range
curr_day = (curr_day + days ) % 7
return curr_time, curr_day
# defininf step function
def step(self, state, action, Time_matrix):
# get next state
next_state, wait_time, transit_time, ride_time = self.next_state_func(
state, action, Time_matrix)
# calculate reward
reward = self.reward_func(wait_time, transit_time, ride_time)
total_time = wait_time + transit_time + ride_time
# set the basis for next step
return reward, next_state, total_time
def reset(self):
return self.action_space, self.state_space, self.state_init
| [
"random.choice",
"numpy.random.poisson"
] | [((778, 809), 'random.choice', 'random.choice', (['self.state_space'], {}), '(self.state_space)\n', (791, 809), False, 'import random\n'), ((2128, 2148), 'numpy.random.poisson', 'np.random.poisson', (['(2)'], {}), '(2)\n', (2145, 2148), True, 'import numpy as np\n'), ((2259, 2280), 'numpy.random.poisson', 'np.random.poisson', (['(12)'], {}), '(12)\n', (2276, 2280), True, 'import numpy as np\n'), ((2390, 2410), 'numpy.random.poisson', 'np.random.poisson', (['(4)'], {}), '(4)\n', (2407, 2410), True, 'import numpy as np\n'), ((2520, 2540), 'numpy.random.poisson', 'np.random.poisson', (['(7)'], {}), '(7)\n', (2537, 2540), True, 'import numpy as np\n'), ((2650, 2670), 'numpy.random.poisson', 'np.random.poisson', (['(8)'], {}), '(8)\n', (2667, 2670), True, 'import numpy as np\n')] |
import os
import pyparallelproj as ppp
import numpy as np
import argparse
nsubsets = 1
# setup a scanner
scanner = ppp.RegularPolygonPETScanner(ncrystals_per_module = np.array([16,1]),
nmodules = np.array([28,1]))
# setup a test image
voxsize = np.array([2.,2.,2.])
n0 = 350
n1 = 350
n2 = max(1,int((scanner.xc2.max() - scanner.xc2.min()) / voxsize[2]))
# setup a random image
img = np.random.rand(n0,n1,n2)
img_origin = (-(np.array(img.shape) / 2) + 0.5) * voxsize
# setup the projector
sino_params = ppp.PETSinogramParameters(scanner, ntofbins = 27, tofbin_width = 28.)
proj = ppp.SinogramProjector(scanner, sino_params, img.shape, nsubsets = nsubsets,
voxsize = voxsize, img_origin = img_origin,
tof = True, sigma_tof = 60./2.35, n_sigmas = 3)
######## tof projections
# setup a random sinogram
tsino = np.random.rand(*proj.sino_params.shape)
img_fwd_tof = proj.fwd_project(img)
back_tof = proj.back_project(tsino)
# check if fwd and back projection are adjoint
print((img*back_tof).sum())
print((img_fwd_tof*tsino).sum())
######## nontof projections
proj.set_tof(False)
# setup a random sinogram
rsino = np.random.rand(*proj.sino_params.nontof_shape)
img_fwd_nontof = proj.fwd_project(img)
back_nontof = proj.back_project(rsino)
# check if fwd and back projection are adjoint
print((img*back_nontof).sum())
print((img_fwd_nontof*rsino).sum())
| [
"numpy.random.rand",
"pyparallelproj.SinogramProjector",
"numpy.array",
"pyparallelproj.PETSinogramParameters"
] | [((299, 324), 'numpy.array', 'np.array', (['[2.0, 2.0, 2.0]'], {}), '([2.0, 2.0, 2.0])\n', (307, 324), True, 'import numpy as np\n'), ((454, 480), 'numpy.random.rand', 'np.random.rand', (['n0', 'n1', 'n2'], {}), '(n0, n1, n2)\n', (468, 480), True, 'import numpy as np\n'), ((577, 643), 'pyparallelproj.PETSinogramParameters', 'ppp.PETSinogramParameters', (['scanner'], {'ntofbins': '(27)', 'tofbin_width': '(28.0)'}), '(scanner, ntofbins=27, tofbin_width=28.0)\n', (602, 643), True, 'import pyparallelproj as ppp\n'), ((654, 820), 'pyparallelproj.SinogramProjector', 'ppp.SinogramProjector', (['scanner', 'sino_params', 'img.shape'], {'nsubsets': 'nsubsets', 'voxsize': 'voxsize', 'img_origin': 'img_origin', 'tof': '(True)', 'sigma_tof': '(60.0 / 2.35)', 'n_sigmas': '(3)'}), '(scanner, sino_params, img.shape, nsubsets=nsubsets,\n voxsize=voxsize, img_origin=img_origin, tof=True, sigma_tof=60.0 / 2.35,\n n_sigmas=3)\n', (675, 820), True, 'import pyparallelproj as ppp\n'), ((944, 983), 'numpy.random.rand', 'np.random.rand', (['*proj.sino_params.shape'], {}), '(*proj.sino_params.shape)\n', (958, 983), True, 'import numpy as np\n'), ((1258, 1304), 'numpy.random.rand', 'np.random.rand', (['*proj.sino_params.nontof_shape'], {}), '(*proj.sino_params.nontof_shape)\n', (1272, 1304), True, 'import numpy as np\n'), ((169, 186), 'numpy.array', 'np.array', (['[16, 1]'], {}), '([16, 1])\n', (177, 186), True, 'import numpy as np\n'), ((249, 266), 'numpy.array', 'np.array', (['[28, 1]'], {}), '([28, 1])\n', (257, 266), True, 'import numpy as np\n'), ((495, 514), 'numpy.array', 'np.array', (['img.shape'], {}), '(img.shape)\n', (503, 514), True, 'import numpy as np\n')] |
import os
import codecs
import numpy as np
import matplotlib.pyplot as plt
Has_Header = True
CSV = 'data/valence_arousal_exp.csv'
def calculate_mean_variance(data):
theta = np.arctan(data[:, 0] / data[:, 1])
m_x = np.mean(np.cos(theta))
m_y = np.mean(np.sin(theta))
mu = np.arctan(m_y / m_x)
R = np.sqrt(m_x ** 2 + m_y ** 2)
sigma = np.sqrt(-2 * np.log(R))
return mu, sigma
def filled_arc(center, radius, theta1, theta2, color):
# Ref: https://stackoverflow.com/a/30642704
phi = np.linspace(theta1, theta2, 100)
x = center[0] + radius * np.cos(phi)
y = center[1] + radius * np.sin(phi)
# Equation of the chord
m = (y[-1] - y[0]) / (x[-1] - x[0])
c = y[0] - m * x[0]
y2 = m * x + c
# Plot the filled arc
plt.fill_between(x, y, y2, facecolor=color, edgecolor='none', alpha=0.5)
def filled_sector(center, radius, theta1, theta2, color):
filled_arc(center, radius, theta1, theta2, color)
# Fill triangle
x_0, y_0 = center
x_1 = center[0] + radius * np.cos(theta1)
y_1 = center[1] + radius * np.sin(theta1)
x_2 = center[0] + radius * np.cos(theta2)
y_2 = center[1] + radius * np.sin(theta2)
plt.fill([x_0, x_1, x_2, x_0], [y_0, y_1, y_2, y_0], facecolor=color,
edgecolor='none', alpha=0.5)
def plot(name_lst, group_lst, mu_lst, sigma_lst):
cx, cy = 5.0, 5.0
colors = ['red', 'blue']
markers = ['x', '+']
linestyles = ['r-', 'b--']
bg_img = plt.imread('data/28-affect-words.png')
# plt.imshow(bg_img, extent=[-0.5, 10.5, -0.5, 10.5])
plt.imshow(bg_img, extent=[-0.2, 10.2, 0.1, 9.9])
theta = np.linspace(0, 2 * np.pi, 100)
radius = 4.8
x = radius * np.cos(theta) + cx
y = radius * np.sin(theta) + cy
plt.plot(x, y, color='black')
for name, group, mu, sigma, color, marker, linestyle in \
zip(name_lst, group_lst, mu_lst, sigma_lst, colors, markers, linestyles):
plt.plot(group[:, 0], group[:, 1], marker, label=name, color=color)
ex = cx + radius * np.cos(mu)
ey = cy + radius * np.sin(mu)
plt.plot([cx, ex], [cy, ey], linestyle)
for d_mu in [-sigma, sigma]:
ex = cx + radius * np.cos(mu + d_mu)
ey = cy + radius * np.sin(mu + d_mu)
plt.plot([cx, ex], [cy, ey], linestyle='-', color='black')
filled_sector([cx, cy], radius, mu - sigma, mu + sigma, color)
plt.axis('equal')
plt.xlabel('Valence')
plt.ylabel('Arousal')
plt.xlim(0, 10)
plt.ylim(0, 10)
plt.legend(loc='lower left', bbox_to_anchor=(0.65, 0.0))
plt.savefig('valence_arousal_plain.pdf', bbox_inches='tight')
plt.show()
group_1, group_2 = [], []
with codecs.open(CSV, 'r', 'utf-8') as f:
for line in f.readlines():
if Has_Header:
Has_Header = False
continue
eps = np.random.random(2) * 0.1
data = line.strip().split(',')
if int(data[0]) == 1:
group_1.append((int(data[2]) + eps[0], int(data[3]) + eps[1]))
elif int(data[0]) == 2:
group_2.append((int(data[2]) + eps[0], int(data[3]) + eps[1]))
group_1 = np.array(group_1)
group_2 = np.array(group_2)
mu_1, sigma_1 = calculate_mean_variance(group_1)
mu_2, sigma_2 = calculate_mean_variance(group_2)
plot(['Reactive HRI', 'TFVT-HRI'], [group_2, group_1], [mu_2, mu_1], [sigma_2, sigma_1])
| [
"numpy.sin",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.imread",
"codecs.open",
"matplotlib.pyplot.imshow",
"numpy.linspace",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"numpy.cos",
"matplotlib.pyplot.ylabel",
"numpy.arctan",
"matplotlib.pyplot.... | [((3163, 3180), 'numpy.array', 'np.array', (['group_1'], {}), '(group_1)\n', (3171, 3180), True, 'import numpy as np\n'), ((3191, 3208), 'numpy.array', 'np.array', (['group_2'], {}), '(group_2)\n', (3199, 3208), True, 'import numpy as np\n'), ((180, 214), 'numpy.arctan', 'np.arctan', (['(data[:, 0] / data[:, 1])'], {}), '(data[:, 0] / data[:, 1])\n', (189, 214), True, 'import numpy as np\n'), ((290, 310), 'numpy.arctan', 'np.arctan', (['(m_y / m_x)'], {}), '(m_y / m_x)\n', (299, 310), True, 'import numpy as np\n'), ((320, 348), 'numpy.sqrt', 'np.sqrt', (['(m_x ** 2 + m_y ** 2)'], {}), '(m_x ** 2 + m_y ** 2)\n', (327, 348), True, 'import numpy as np\n'), ((521, 553), 'numpy.linspace', 'np.linspace', (['theta1', 'theta2', '(100)'], {}), '(theta1, theta2, 100)\n', (532, 553), True, 'import numpy as np\n'), ((779, 851), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', 'y', 'y2'], {'facecolor': 'color', 'edgecolor': '"""none"""', 'alpha': '(0.5)'}), "(x, y, y2, facecolor=color, edgecolor='none', alpha=0.5)\n", (795, 851), True, 'import matplotlib.pyplot as plt\n'), ((1199, 1301), 'matplotlib.pyplot.fill', 'plt.fill', (['[x_0, x_1, x_2, x_0]', '[y_0, y_1, y_2, y_0]'], {'facecolor': 'color', 'edgecolor': '"""none"""', 'alpha': '(0.5)'}), "([x_0, x_1, x_2, x_0], [y_0, y_1, y_2, y_0], facecolor=color,\n edgecolor='none', alpha=0.5)\n", (1207, 1301), True, 'import matplotlib.pyplot as plt\n'), ((1484, 1522), 'matplotlib.pyplot.imread', 'plt.imread', (['"""data/28-affect-words.png"""'], {}), "('data/28-affect-words.png')\n", (1494, 1522), True, 'import matplotlib.pyplot as plt\n'), ((1585, 1634), 'matplotlib.pyplot.imshow', 'plt.imshow', (['bg_img'], {'extent': '[-0.2, 10.2, 0.1, 9.9]'}), '(bg_img, extent=[-0.2, 10.2, 0.1, 9.9])\n', (1595, 1634), True, 'import matplotlib.pyplot as plt\n'), ((1648, 1678), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(100)'], {}), '(0, 2 * np.pi, 100)\n', (1659, 1678), True, 'import numpy as np\n'), ((1772, 1801), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""black"""'}), "(x, y, color='black')\n", (1780, 1801), True, 'import matplotlib.pyplot as plt\n'), ((2432, 2449), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (2440, 2449), True, 'import matplotlib.pyplot as plt\n'), ((2454, 2475), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Valence"""'], {}), "('Valence')\n", (2464, 2475), True, 'import matplotlib.pyplot as plt\n'), ((2480, 2501), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Arousal"""'], {}), "('Arousal')\n", (2490, 2501), True, 'import matplotlib.pyplot as plt\n'), ((2506, 2521), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(10)'], {}), '(0, 10)\n', (2514, 2521), True, 'import matplotlib.pyplot as plt\n'), ((2526, 2541), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(10)'], {}), '(0, 10)\n', (2534, 2541), True, 'import matplotlib.pyplot as plt\n'), ((2546, 2602), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""', 'bbox_to_anchor': '(0.65, 0.0)'}), "(loc='lower left', bbox_to_anchor=(0.65, 0.0))\n", (2556, 2602), True, 'import matplotlib.pyplot as plt\n'), ((2607, 2668), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""valence_arousal_plain.pdf"""'], {'bbox_inches': '"""tight"""'}), "('valence_arousal_plain.pdf', bbox_inches='tight')\n", (2618, 2668), True, 'import matplotlib.pyplot as plt\n'), ((2673, 2683), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2681, 2683), True, 'import matplotlib.pyplot as plt\n'), ((2717, 2747), 'codecs.open', 'codecs.open', (['CSV', '"""r"""', '"""utf-8"""'], {}), "(CSV, 'r', 'utf-8')\n", (2728, 2747), False, 'import codecs\n'), ((233, 246), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (239, 246), True, 'import numpy as np\n'), ((266, 279), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (272, 279), True, 'import numpy as np\n'), ((1955, 2022), 'matplotlib.pyplot.plot', 'plt.plot', (['group[:, 0]', 'group[:, 1]', 'marker'], {'label': 'name', 'color': 'color'}), '(group[:, 0], group[:, 1], marker, label=name, color=color)\n', (1963, 2022), True, 'import matplotlib.pyplot as plt\n'), ((2108, 2147), 'matplotlib.pyplot.plot', 'plt.plot', (['[cx, ex]', '[cy, ey]', 'linestyle'], {}), '([cx, ex], [cy, ey], linestyle)\n', (2116, 2147), True, 'import matplotlib.pyplot as plt\n'), ((374, 383), 'numpy.log', 'np.log', (['R'], {}), '(R)\n', (380, 383), True, 'import numpy as np\n'), ((583, 594), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (589, 594), True, 'import numpy as np\n'), ((624, 635), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (630, 635), True, 'import numpy as np\n'), ((1040, 1054), 'numpy.cos', 'np.cos', (['theta1'], {}), '(theta1)\n', (1046, 1054), True, 'import numpy as np\n'), ((1086, 1100), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (1092, 1100), True, 'import numpy as np\n'), ((1133, 1147), 'numpy.cos', 'np.cos', (['theta2'], {}), '(theta2)\n', (1139, 1147), True, 'import numpy as np\n'), ((1179, 1193), 'numpy.sin', 'np.sin', (['theta2'], {}), '(theta2)\n', (1185, 1193), True, 'import numpy as np\n'), ((1713, 1726), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1719, 1726), True, 'import numpy as np\n'), ((1749, 1762), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1755, 1762), True, 'import numpy as np\n'), ((2296, 2354), 'matplotlib.pyplot.plot', 'plt.plot', (['[cx, ex]', '[cy, ey]'], {'linestyle': '"""-"""', 'color': '"""black"""'}), "([cx, ex], [cy, ey], linestyle='-', color='black')\n", (2304, 2354), True, 'import matplotlib.pyplot as plt\n'), ((2875, 2894), 'numpy.random.random', 'np.random.random', (['(2)'], {}), '(2)\n', (2891, 2894), True, 'import numpy as np\n'), ((2051, 2061), 'numpy.cos', 'np.cos', (['mu'], {}), '(mu)\n', (2057, 2061), True, 'import numpy as np\n'), ((2089, 2099), 'numpy.sin', 'np.sin', (['mu'], {}), '(mu)\n', (2095, 2099), True, 'import numpy as np\n'), ((2217, 2234), 'numpy.cos', 'np.cos', (['(mu + d_mu)'], {}), '(mu + d_mu)\n', (2223, 2234), True, 'import numpy as np\n'), ((2266, 2283), 'numpy.sin', 'np.sin', (['(mu + d_mu)'], {}), '(mu + d_mu)\n', (2272, 2283), True, 'import numpy as np\n')] |
import numpy
import matplotlib.pyplot as plt
FILE_NAME = 'rewards_nonshare.npz'
def smooth(reward_vec, filter_size):
l = len(reward_vec) - filter_size + 1
print(len(reward_vec))
smooth_reward_vec = numpy.zeros(l)
for i in range(l):
reward = numpy.mean(reward_vec[i:i+filter_size])
smooth_reward_vec[i] = reward
return smooth_reward_vec
if __name__ == '__main__':
f = numpy.load(FILE_NAME)
reward = f['arr_0']
qmax = f['arr_1']
reward_smooth = smooth(reward, 300)
l = len(reward_smooth)
fig = plt.figure(figsize=(8,6))
line1, = plt.plot(reward_smooth, color='r', linestyle='-', linewidth=3)
line2, = plt.plot(numpy.arange(l), -150 * numpy.ones(l), color='k', linestyle=':', linewidth=1)
plt.xlabel('Episode', fontsize=26)
plt.ylabel('Reward', fontsize=24)
plt.xticks(fontsize=22)
plt.yticks([-800, -700, -600, -500, -400, -300, -200, -150, -100, 0], fontsize=22)
plt.axis([-20, l+10, -600, -100])
plt.tight_layout()
fig.savefig('reward.pdf', format='pdf', dpi=1200)
plt.show()
| [
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.yticks",
"numpy.zeros",
"matplotlib.pyplot.axis",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
... | [((212, 226), 'numpy.zeros', 'numpy.zeros', (['l'], {}), '(l)\n', (223, 226), False, 'import numpy\n'), ((415, 436), 'numpy.load', 'numpy.load', (['FILE_NAME'], {}), '(FILE_NAME)\n', (425, 436), False, 'import numpy\n'), ((570, 596), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (580, 596), True, 'import matplotlib.pyplot as plt\n'), ((609, 671), 'matplotlib.pyplot.plot', 'plt.plot', (['reward_smooth'], {'color': '"""r"""', 'linestyle': '"""-"""', 'linewidth': '(3)'}), "(reward_smooth, color='r', linestyle='-', linewidth=3)\n", (617, 671), True, 'import matplotlib.pyplot as plt\n'), ((776, 810), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {'fontsize': '(26)'}), "('Episode', fontsize=26)\n", (786, 810), True, 'import matplotlib.pyplot as plt\n'), ((815, 848), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reward"""'], {'fontsize': '(24)'}), "('Reward', fontsize=24)\n", (825, 848), True, 'import matplotlib.pyplot as plt\n'), ((853, 876), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(22)'}), '(fontsize=22)\n', (863, 876), True, 'import matplotlib.pyplot as plt\n'), ((882, 968), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-800, -700, -600, -500, -400, -300, -200, -150, -100, 0]'], {'fontsize': '(22)'}), '([-800, -700, -600, -500, -400, -300, -200, -150, -100, 0],\n fontsize=22)\n', (892, 968), True, 'import matplotlib.pyplot as plt\n'), ((970, 1005), 'matplotlib.pyplot.axis', 'plt.axis', (['[-20, l + 10, -600, -100]'], {}), '([-20, l + 10, -600, -100])\n', (978, 1005), True, 'import matplotlib.pyplot as plt\n'), ((1008, 1026), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1024, 1026), True, 'import matplotlib.pyplot as plt\n'), ((1085, 1095), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1093, 1095), True, 'import matplotlib.pyplot as plt\n'), ((267, 308), 'numpy.mean', 'numpy.mean', (['reward_vec[i:i + filter_size]'], {}), '(reward_vec[i:i + filter_size])\n', (277, 308), False, 'import numpy\n'), ((694, 709), 'numpy.arange', 'numpy.arange', (['l'], {}), '(l)\n', (706, 709), False, 'import numpy\n'), ((718, 731), 'numpy.ones', 'numpy.ones', (['l'], {}), '(l)\n', (728, 731), False, 'import numpy\n')] |
"""
Contains class DistanceTo for calculations of distance between segments of
a segmented images and given region(s)
# Author: <NAME> (MPI for Biochemistry)
# $Id$
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import zip
from builtins import range
__version__ = "$Revision$"
import sys
import logging
import inspect
import numpy
import scipy
import scipy.ndimage as ndimage
from .features import Features
class DistanceTo(Features):
"""
Distance from segments to specified region(s)
Important attributes:
- distance: distance for each segment
- closestRegion: id of the closest region, for each segment
Basic usage:
# caclulate
dist = DistanceTo(segments=segment_object)
dist.getDistance(regionIds, regions)
# show results
dist.distance
dist.closestRegion
Note: unlike other classes that inherit from Features, the data of this
class is internally storred in a compact form. That is, elements of
self.ids and self._distance directly correspond to each other, both arrays
are compact. On the other hand, self.distance is dynamically generated
(from self._distance) and is indexed by ids (self.distance[i] is the
distance for the i-th segment). Important consequence is that changing
self.ids effectively changes data (self.distance). The same is true for
self.closestRegion and self._closestRegion.
"""
#############################################################
#
# Initialization
#
#############################################################
def __init__(self, segments=None, ids=None):
"""
Initializes attributes.
Arguments:
- segments: (Segment) segments
- ids: segment ids (if None segments.ids used)
"""
# set segments and ids
#super(DistanceTo, self).__init__(segments=segments, ids=ids)
self.segments = segments
self._ids = ids
if self._ids is None:
if self.segments is not None:
self._ids = segments.ids
else:
self._ids = numpy.asarray(ids)
# local data attributes
self.dataNames = ['distance', 'closestRegion']
self.compact = True
#############################################################
#
# Attributes
#
#############################################################
def getDistance(self, ids=None):
"""
Get distance to closest region for each segment. Requires self.ids
to be set properly. If arg ids is specified it is used instead of
self.ids.
If ids is None, None is returned. If ids is [], 0-length ndarray is
returned.
Argument:
- ids: ids, if not specified self.ids is used
"""
if ids is None:
ids = self.ids
if ids is None:
dist = None
elif len(ids) == 0:
dist = numpy.array([])
else:
dist = numpy.zeros(self.maxId+1) -1
dist[ids] = self._distance
return dist
def setDistance(self, distance, ids=None):
"""
Sets distance to closest region for each segment. Requires self.ids
to be set properly. If arg ids is specified it is used instead of
self.ids.
Argument:
- distance: (ndarray or list) distances indexed by ids
- ids: ids, if not specified self.ids is used
"""
if ids is None:
ids = self.ids
if (distance is None) and (ids is None):
self._distance = None
else:
dist = numpy.asarray(distance)
self._distance = dist[ids]
distance = property(fget=getDistance, fset=setDistance,
doc='Distances from segment to their closest regions')
def getClosestRegion(self, ids=None):
"""
Gets closest region id for each segment. Requires self.ids
to be set properly. If arg ids is specified it is used instead of
self.ids.
If ids is None, None is returned. If ids is [], 0-length ndarray is
returned.
Argument:
- ids: ids, if not specified self.ids is used
"""
if ids is None:
ids = self.ids
if ids is None:
res = None
elif len(ids) == 0:
res = numpy.array([])
else:
res = numpy.zeros(max(ids)+1) -1
res[ids] = self._closestRegion
return res
def setClosestRegion(self, closestRegion, ids=None):
"""
Sets closest region id for each segment. Requires self.ids
to be set properly. If arg ids is specified it is used instead of
self.ids.
Argument:
- closestRegion: (ndarray or list) closest region ids indexed by
self.ids (segment ids)
- ids: ids, if not specified self.ids is used
"""
if ids is None:
ids = self.ids
if (closestRegion is None) and (ids is None):
self._closestRegion = None
else:
dist = numpy.asarray(closestRegion)
self._closestRegion = dist[ids]
closestRegion = property(fget=getClosestRegion, fset=setClosestRegion,
doc='ClosestRegions from segment to their ' \
+ 'closest regions')
#############################################################
#
# Calculations
#
#############################################################
def calculate(self, regionIds, regions=None, segments=None,
ids=None, mode='mean', surface=None):
"""
Calculates distance of each segment to its closest region.
Regions are specified by (Image or ndarray) region and regionIds. If
(arg) region is not specifed this instance is used. If regionIds is not
given, the region is defined as all positive elements of region array.
Takes into account positioning of segments and regions.
If surfaces > 0, only the surfaces (of thickness given by arg surface)
of the segments are considered. Otherwise whole segments are taken into
account. In any case the full region is used.
If mode is 'center', shortest distances between the region and segment
centers (note that center may lay outside the segment) are calculated.
If mode is 'min'/'max'/'mean'/'median', the shortest distance between
each (surface, if arg surface is not None) element of segments and the
region are calculated first, and then the min/max/mean/median of these
values is found for each segment separately. Finally, the closest
region for each segment is found.
If more than one region id is given (arg regionIds is a list with
>1 element) the distance between a segment and each region is
calculated first (according to the arg mode) and then the closest
region is found. Note that in case of mean / median mode this means
that the mean / median is calculated to one (closest) region.
If ids are not given, distances to all segments are calculated.
If the distance to a segment can not be calculated (if the segments does
not exist, for example) the result for that segment is set to -1.
Uses self.getDistanceToRegions().
Sets:
- self.distance: ndarray of distances from each segment to its
closest region (indexed by self.ids)
- self.closestRegion: ndarray of closest region ids for each segment
(indexed by self.ids)
Arguments:
- segments: (Segment)) segmented image, if None self.segments is used
- ids: segment ids, if None self.ids is used
- region: (Segments) regions
- regionIds: (single int, list, tuple or ndarray) id(s) of the region
- mode: 'center', 'min', 'max', 'mean' or 'median'
- surface: thickness of segment surfaces
"""
# arguments
if segments is None:
segments = self.segments
if ids is None:
ids = self.ids
# save arguments as attributes
self.regionIds = regionIds
self.mode = mode
self.surface = surface
# bring segments and regions to common inset
seg_data = segments.makeInset(ids=ids, additional=regions,
additionalIds=regionIds, update=False)
if regions is not None:
reg_data = regions.makeInset(ids=regionIds, update=False,
additional=segments, additionalIds=ids)
else:
reg_data = seg_data
# calculate distances to all regions
all_dist = self.getDistanceToRegions(
segments=seg_data, segmentIds=ids, regions=reg_data,
regionIds=regionIds, mode=mode, surface=surface)
# find closest region for each segment and set attributes
if all_dist is not None:
if all_dist.ndim == 2:
self._distance = numpy.min(all_dist, axis=0)
id_pos = numpy.argmin(all_dist, axis=0)
self._closestRegion = numpy.asarray(regionIds)[id_pos]
else:
self._distance = all_dist
self._closestRegion = (
# numpy.zeros(segments.maxId+1, dtype=int) + regionIds
numpy.zeros(len(self.ids), dtype=int) + regionIds)
else:
self._distance = None
self._closestRegion = None
@classmethod
def getDistanceToRegions(cls, segments, segmentIds, regions,
regionIds, mode='mean', surface=None):
"""
Calculates distance of each segment to each region.
Segments are specified by args (ndarray) segments and segmentIds. If
segment ids has no elements None is returned. Regions are specified
by args (ndarray) regions and regionIds. If arg regionIds is None,
None is returned. Args segments and regions are ndarrays that are
expected to have the same shape.
If surfaces > 0, only the surfaces (of thickness given by arg surface)
of the segments are considered. Otherwise whole segments are taken into
account.
If mode is 'center', shortest distances between a region and segment
centers (note that center may lay outside the segment) are calculated.
If mode is 'min'/'max'/'mean'/'median', the shortest distance between
each (surface, if arg surface is not None) element of segments and the
region are calculated first, and then the min/max/mean/median of these
values is found for each segment separately.
If the distance to a segment can not be calculated (if the segments does
not exist, for example) the result for that segment is set to -1.
Uses scipy.ndimage.distance_transform_edt.
Arguments:
- segments: (ndarray) segments
- ids: segment ids
- region: (Segments) regions
- regionIds: (single int, list, tuple or ndarray) region ids
- mode: 'center', 'min', 'max', 'mean' or 'median'
- surface: thickness of segment surfaces
Returns: distances (2D ndarray where axis 0 corresponds to regions
and axis 1 to segments, shape=((len(regionIds), len(segmentIds))
between each segment and each region.
"""
# trivial cases
if (regionIds is None):
return None
if (segmentIds is None) or (len(segmentIds) == 0):
return None
# extract surfaces if required
if (surface is not None) and (surface > 0):
from .segment import Segment
tmp_seg = Segment()
segments = tmp_seg.makeSurfaces(data=segments,
size=surface, ids=segmentIds)
# deal with multiple region ids
if isinstance(regionIds, (list, tuple, numpy.ndarray)):
regionIds = numpy.asarray(regionIds)
distances = \
numpy.zeros(shape=(len(regionIds), len(segmentIds)),
dtype='float')
for reg_id, reg_id_index in zip(regionIds, list(range(len(regionIds)))):
distances[reg_id_index,:] = \
cls.getDistanceToRegions(regions=regions, regionIds=reg_id,
segments=segments, segmentIds=segmentIds,
mode=mode, surface=surface)
return distances
# calculate distance (from all elements) to the region
if regionIds is None:
distance_input = numpy.where(regions>0, 0, 1)
else:
distance_input = numpy.where(regions==regionIds, 0, 1)
if (distance_input > 0).all(): # workaround for scipy bug 1089
dist_array = numpy.zeros(shape=distance_input.shape) - 1
else:
dist_array = ndimage.distance_transform_edt(distance_input)
# find distances to segments
if mode == 'center':
# distances to the segment centers
from .morphology import Morphology
mor = Morphology(segments=segments, ids=segmentIds)
centers = mor.getCenter(real=False)
distances = [dist_array[tuple(centers[id_])] for id_ in segmentIds]
elif mode == 'median':
# median distance to segments
distances = [numpy.median(dist_array[segments==id_]) \
for id_ in segmentIds]
else:
# any of ndarray methods (no arguments)
try:
distances = [getattr(dist_array[segments==id_], mode)() \
for id_ in segmentIds]
except AttributeError:
raise ValueError("Mode ", mode, " was not recognized. It can ",
"be 'center', 'median' or any appropriate ndarray ",
"method name, such as 'min', 'max' or 'mean'.")
distances = numpy.asarray(distances)
return distances
| [
"scipy.ndimage.distance_transform_edt",
"numpy.median",
"numpy.asarray",
"numpy.zeros",
"numpy.argmin",
"numpy.min",
"numpy.where",
"numpy.array"
] | [((14342, 14366), 'numpy.asarray', 'numpy.asarray', (['distances'], {}), '(distances)\n', (14355, 14366), False, 'import numpy\n'), ((2188, 2206), 'numpy.asarray', 'numpy.asarray', (['ids'], {}), '(ids)\n', (2201, 2206), False, 'import numpy\n'), ((3721, 3744), 'numpy.asarray', 'numpy.asarray', (['distance'], {}), '(distance)\n', (3734, 3744), False, 'import numpy\n'), ((5217, 5245), 'numpy.asarray', 'numpy.asarray', (['closestRegion'], {}), '(closestRegion)\n', (5230, 5245), False, 'import numpy\n'), ((12275, 12299), 'numpy.asarray', 'numpy.asarray', (['regionIds'], {}), '(regionIds)\n', (12288, 12299), False, 'import numpy\n'), ((12943, 12973), 'numpy.where', 'numpy.where', (['(regions > 0)', '(0)', '(1)'], {}), '(regions > 0, 0, 1)\n', (12954, 12973), False, 'import numpy\n'), ((13015, 13054), 'numpy.where', 'numpy.where', (['(regions == regionIds)', '(0)', '(1)'], {}), '(regions == regionIds, 0, 1)\n', (13026, 13054), False, 'import numpy\n'), ((13233, 13279), 'scipy.ndimage.distance_transform_edt', 'ndimage.distance_transform_edt', (['distance_input'], {}), '(distance_input)\n', (13263, 13279), True, 'import scipy.ndimage as ndimage\n'), ((3034, 3049), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (3045, 3049), False, 'import numpy\n'), ((4476, 4491), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (4487, 4491), False, 'import numpy\n'), ((9259, 9286), 'numpy.min', 'numpy.min', (['all_dist'], {'axis': '(0)'}), '(all_dist, axis=0)\n', (9268, 9286), False, 'import numpy\n'), ((9312, 9342), 'numpy.argmin', 'numpy.argmin', (['all_dist'], {'axis': '(0)'}), '(all_dist, axis=0)\n', (9324, 9342), False, 'import numpy\n'), ((13150, 13189), 'numpy.zeros', 'numpy.zeros', ([], {'shape': 'distance_input.shape'}), '(shape=distance_input.shape)\n', (13161, 13189), False, 'import numpy\n'), ((3083, 3110), 'numpy.zeros', 'numpy.zeros', (['(self.maxId + 1)'], {}), '(self.maxId + 1)\n', (3094, 3110), False, 'import numpy\n'), ((9381, 9405), 'numpy.asarray', 'numpy.asarray', (['regionIds'], {}), '(regionIds)\n', (9394, 9405), False, 'import numpy\n'), ((13746, 13787), 'numpy.median', 'numpy.median', (['dist_array[segments == id_]'], {}), '(dist_array[segments == id_])\n', (13758, 13787), False, 'import numpy\n')] |
import numpy as np
from advent.utils import project_root
from advent.utils.serialization import json_load
from advent.dataset.base_dataset import BaseDataset
import cv2
DEFAULT_INFO_PATH = project_root / 'advent/dataset/compound_list/info.json'
class BDDataSet(BaseDataset):
def __init__(self, root, list_path, set='val',
max_iters=None,
crop_size=(321, 321), mean=(128, 128, 128),
load_labels=True,
info_path=DEFAULT_INFO_PATH, labels_size=None):
super().__init__(root, list_path, set, max_iters, crop_size, labels_size, mean)
DEFAULT_INFO_PATH = project_root / 'advent/dataset/compound_list/info.json'
self.load_labels = load_labels
self.info = json_load(info_path)
self.class_names = np.array(self.info['label'], dtype=np.str)
self.mapping = np.array(self.info['label2train'], dtype=np.int)
self.map_vector = np.zeros((self.mapping.shape[0],), dtype=np.int64)
for source_label, target_label in self.mapping:
self.map_vector[source_label] = target_label
def get_metadata(self, name, name_next):
if self.set == 'train':
name = name.split('\t')[0]
name_next = name_next.split('\t')[0]
img_file = self.root / name
img_file_rev = self.root / name_next
label_file = None
else:
img_file = self.root / name
img_file_rev = img_file
label_file = name.replace("images", "labels").replace(".jpg","_train_id.png")
label_file = self.root / label_file
return img_file, img_file_rev, label_file
def map_labels(self, input_):
return self.map_vector[input_.astype(np.int64, copy=False)]
def __getitem__(self, index):
img_file, img_file_rev, label_file, name, name_next = self.files[index]
if not label_file == None:
label = self.get_labels(label_file)
image, image_aug = self.get_image(img_file)
img_file_rev, _ = self.get_image(img_file_rev)
image = self.preprocess(image)
# edge_image = cv2.Canny(image, 50,200)
image_aug = self.preprocess(image_aug)
img_file_rev = self.preprocess(img_file_rev)
if label_file == None:
label = image.copy()
return image.copy(), img_file_rev.copy(), label, np.array(image.shape), name, name_next
| [
"advent.utils.serialization.json_load",
"numpy.zeros",
"numpy.array"
] | [((755, 775), 'advent.utils.serialization.json_load', 'json_load', (['info_path'], {}), '(info_path)\n', (764, 775), False, 'from advent.utils.serialization import json_load\n'), ((803, 845), 'numpy.array', 'np.array', (["self.info['label']"], {'dtype': 'np.str'}), "(self.info['label'], dtype=np.str)\n", (811, 845), True, 'import numpy as np\n'), ((869, 917), 'numpy.array', 'np.array', (["self.info['label2train']"], {'dtype': 'np.int'}), "(self.info['label2train'], dtype=np.int)\n", (877, 917), True, 'import numpy as np\n'), ((945, 995), 'numpy.zeros', 'np.zeros', (['(self.mapping.shape[0],)'], {'dtype': 'np.int64'}), '((self.mapping.shape[0],), dtype=np.int64)\n', (953, 995), True, 'import numpy as np\n'), ((2391, 2412), 'numpy.array', 'np.array', (['image.shape'], {}), '(image.shape)\n', (2399, 2412), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import random
XLIM = (-4, 4)
YLIM = (-4, 4)
def draw():
circ.set_radius(r0)
circ.set_center((x0, y0))
line.set_data([x1, x2], [y1, y2])
a = (x2-x1)**2 + (y2-y1)**2
b = 2*((x2-x1)*(x1-x0)+(y2-y1)*(y1-y0))
c = (x1-x0)**2+(y1-y0)**2
tv = -b/(2*a)
if tv < 0:
tv = 0
elif tv > 1:
tv = 1
yv = a*tv**2+b*tv+c
xl = x1 + tv*(x2-x1)
yl = y1 + tv*(y2-y1)
hypotl = np.hypot((xl-x0), (yl-y0))
cosp = (xl-x0) / hypotl
sinp = (yl-y0) / hypotl
xc = r0 * cosp + x0
yc = r0 * sinp + y0
if yv <= r0**2:
ax.set_title('Есть пересечение.')
dotl.set_data([], [])
dotc.set_data([], [])
linep.set_data([], [])
else:
ax.set_title(f'Нет пересечений, dist={round(np.hypot((xl-xc), (yl-yc)), 2)}')
dotl.set_data([xl], [yl])
dotc.set_data([xc], [yc])
linep.set_data([xl, xc], [yl, yc])
fig.canvas.draw_idle()
def onclick(event):
global x0, y0, x1, y1, x2, y2, r0
x0 = random.random()*(XLIM[1]-XLIM[0])-(XLIM[1]-XLIM[0])/2
y0 = random.random()*(YLIM[1]-YLIM[0])-(YLIM[1]-YLIM[0])/2
r0 = random.random() * min(x0-XLIM[0], XLIM[1]-x0, y0-YLIM[0], YLIM[1]-y0) + 0.1
x1 = event.xdata
y1 = event.ydata
x2 = event.xdata
y2 = event.ydata
draw()
def onmove(event):
global x0, y0, x1, y1, x2, y2, r0
if not event.inaxes:
return
if x0 == 0:
return
x2 = event.xdata
y2 = event.ydata
draw()
x0 = 0; y0 = 0
x1 = 0; y1 = 0
x2 = 0; y2 = 0
r0 = 0
fig = plt.figure()
fig.canvas.mpl_connect('motion_notify_event', onmove)
fig.canvas.mpl_connect('button_press_event', onclick)
ax = fig.add_subplot(111, xlim=XLIM, ylim=YLIM)
ax.set_aspect('equal')
ax.grid()
line, = ax.plot([], [], 'r')
linep, = ax.plot([], [], 'g')
circ = ax.add_artist(plt.Circle((0, 0), 0))
dotl, = ax.plot([], [], '.g')
dotc, = ax.plot([], [], '.g')
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.hypot",
"random.random",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.Circle"
] | [((1742, 1754), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1752, 1754), True, 'import matplotlib.pyplot as plt\n'), ((2124, 2134), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2132, 2134), True, 'import matplotlib.pyplot as plt\n'), ((540, 566), 'numpy.hypot', 'np.hypot', (['(xl - x0)', '(yl - y0)'], {}), '(xl - x0, yl - y0)\n', (548, 566), True, 'import numpy as np\n'), ((2036, 2057), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(0, 0)', '(0)'], {}), '((0, 0), 0)\n', (2046, 2057), True, 'import matplotlib.pyplot as plt\n'), ((1171, 1186), 'random.random', 'random.random', ([], {}), '()\n', (1184, 1186), False, 'import random\n'), ((1235, 1250), 'random.random', 'random.random', ([], {}), '()\n', (1248, 1250), False, 'import random\n'), ((1299, 1314), 'random.random', 'random.random', ([], {}), '()\n', (1312, 1314), False, 'import random\n'), ((923, 949), 'numpy.hypot', 'np.hypot', (['(xl - xc)', '(yl - yc)'], {}), '(xl - xc, yl - yc)\n', (931, 949), True, 'import numpy as np\n')] |
import numpy as np
import torch
from config import VAL_RATIO, BATCH_SIZE, data_path
from torch.utils.data import DataLoader, Dataset
class TIMITDataset(Dataset):
def __init__(self, X, y=None):
self.data = torch.from_numpy(X).float()
if y is not None:
y = y.astype(np.int)
self.label = torch.LongTensor(y)
else:
self.label = None
def __getitem__(self, idx):
if self.label is not None:
return self.data[idx], self.label[idx]
else:
return self.data[idx]
def __len__(self):
return len(self.data)
def load_train_data():
# print('Loading data ...')
train = np.load(data_path + 'train_11.npy')
train_label = np.load(data_path + 'train_label_11.npy')
print('Size of training data: {}'.format(train.shape))
return train, train_label
def load_test_data():
test = np.load(data_path + 'test_11.npy')
print('Size of testing data: {}'.format(test.shape))
return test
def split_val_data(train, train_label):
percent = int(train.shape[0] * (1 - VAL_RATIO))
train_x, train_y, val_x, val_y = train[:percent], train_label[:percent],\
train[percent:], train_label[percent:]
print('Size of training set: {}'.format(train_x.shape))
print('Size of validation set: {}'.format(val_x.shape))
return train_x, train_y, val_x, val_y
def get_dataloader(train_x, train_y, val_x, val_y):
train_set = TIMITDataset(train_x, train_y)
val_set = TIMITDataset(val_x, val_y)
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE,
shuffle=True) #only shuffle the training data
val_loader = DataLoader(val_set, batch_size=BATCH_SIZE, shuffle=False)
return train_loader, val_loader
| [
"torch.from_numpy",
"numpy.load",
"torch.utils.data.DataLoader",
"torch.LongTensor"
] | [((686, 721), 'numpy.load', 'np.load', (["(data_path + 'train_11.npy')"], {}), "(data_path + 'train_11.npy')\n", (693, 721), True, 'import numpy as np\n'), ((740, 781), 'numpy.load', 'np.load', (["(data_path + 'train_label_11.npy')"], {}), "(data_path + 'train_label_11.npy')\n", (747, 781), True, 'import numpy as np\n'), ((907, 941), 'numpy.load', 'np.load', (["(data_path + 'test_11.npy')"], {}), "(data_path + 'test_11.npy')\n", (914, 941), True, 'import numpy as np\n'), ((1586, 1644), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)'}), '(train_set, batch_size=BATCH_SIZE, shuffle=True)\n', (1596, 1644), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((1725, 1782), 'torch.utils.data.DataLoader', 'DataLoader', (['val_set'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(False)'}), '(val_set, batch_size=BATCH_SIZE, shuffle=False)\n', (1735, 1782), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((331, 350), 'torch.LongTensor', 'torch.LongTensor', (['y'], {}), '(y)\n', (347, 350), False, 'import torch\n'), ((219, 238), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (235, 238), False, 'import torch\n')] |
import argparse
import json
import os
import sys
from tqdm import tqdm
from PIL import Image, ImageDraw
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
torch.backends.cudnn.benchmark = True
from config import GlobalConfig
from architectures import AttentionField
from data import CARLA_points
from utils import iou, flow_to_color
parser = argparse.ArgumentParser()
parser.add_argument('--id', type=str, help='Unique experiment identifier.')
parser.add_argument('--device', type=str, default='cuda', help='Device to use')
parser.add_argument('--vis', action='store_true', help='Visualize each model while evaluating')
parser.add_argument('--vis_freq', type=int, default=100, help='Visualization frequency')
parser.add_argument('--batch_size', type=int, default=16, help='Batch size')
parser.add_argument('--out_res', type=int, default=256, help='output image resolution')
args = parser.parse_args()
# config
conf = GlobalConfig()
# data
val_set = CARLA_points(conf.val_data, conf)
dataloader_val = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=8, pin_memory=True)
# model
model = AttentionField(conf, args.device)
# load saved weights
model.encoder.load_state_dict(torch.load('log/{}/best_encoder.pth'.format(args.id)))
model.decoder.load_state_dict(torch.load('log/{}/best_decoder.pth'.format(args.id)))
# image storage directories
if args.vis:
if not os.path.isdir(f"log/{args.id}/img"):
os.makedirs(f"log/{args.id}/img")
if not os.path.isdir(f"log/{args.id}/sem"):
os.makedirs(f"log/{args.id}/sem")
if not os.path.isdir(f"log/{args.id}/out"):
os.makedirs(f"log/{args.id}/out")
if not os.path.isdir(f"log/{args.id}/flow"):
os.makedirs(f"log/{args.id}/flow")
intersection_epoch = [0.] * conf.num_class
union_epoch = [0.] * conf.num_class
off_epoch = 0.
wp_epoch = 0.
match = 0
miss = 0
fp = 0
converter = np.uint8(conf.converter) # used for semantics
with torch.no_grad():
model.eval()
for batch_num, data in enumerate(tqdm(dataloader_val), 0):
# create batch and move to GPU
fronts_in = data['fronts']
lefts_in = data['lefts']
rights_in = data['rights']
images = []
for i in range(conf.seq_len):
images.append(fronts_in[i].to(args.device, dtype=torch.float32))
if conf.num_camera==3:
images.append(lefts_in[i].to(args.device, dtype=torch.float32))
images.append(rights_in[i].to(args.device, dtype=torch.float32))
# semantic points for network input
query_points = data['semantic_points'].to(args.device, dtype=torch.float32)
gt_occ = data['semantic_labels'].to(args.device)
# target points for network input
target_point = torch.stack(data['target_point']).to(args.device, dtype=torch.float32)
# waypoints for visualization
waypoints = []
# create driving offset label by looping over timesteps
# label = -query + waypoint so that at test time query + label = waypoint
gt_offsets = -query_points.clone()
for i in range(conf.tot_len):
waypoint = torch.stack(data['waypoints'][i]).to(args.device, dtype=torch.float32)
waypoints.append(waypoint)
# create a delta tensor to add to the query points
delta = waypoint.transpose(0,1).unsqueeze(1) # (B, 1, 2)
# divide to account for higher resolution
delta = (-gt_offsets[:,:,2]==i).unsqueeze(-1) * delta / conf.resolution # (B, P, 2)
gt_offsets[:,:,:2] += delta
gt_offsets = gt_offsets[:,:,:2].transpose(1,2) # (B, 2, P)
gt_offsets[:,1,:] += conf.offset # reconstruct only front of vehicle
velocity = data['velocity'].to(args.device, dtype=torch.float32)
# inference
encoding = model.encoder(images, velocity)
pred_occ, pred_off, _ = model.decode(query_points, target_point, encoding)
# waypoint prediction
pred_waypoint_mean, red_light_occ = model.plan(target_point, encoding, conf.plan_scale, conf.plan_points, conf.plan_iters)
wp_pred = pred_waypoint_mean[:,conf.seq_len:]
wp_gt = torch.stack(waypoints[conf.seq_len:], dim=1).transpose(0,2)
# s,t,b = model.control_pid(wp_pred, velocity, target_point, red_light_occ)
# grid used for visualizing occupancy and flow
linspace_x = torch.linspace(-conf.axis/2, conf.axis/2, steps=args.out_res)
linspace_y = torch.linspace(-conf.axis/2, conf.axis/2, steps=args.out_res)
linspace_t = torch.linspace(0, conf.tot_len - 1, steps=conf.tot_len)
# gt semantics
semantics = (data['topdowns'][0][0][0].data.cpu().numpy()).astype(np.uint8)
semantics = converter[semantics][:conf.axis,conf.offset:conf.axis+conf.offset]
red_light_gt = (semantics==3).sum()
if red_light_gt and red_light_occ:
match += 1
if red_light_gt and red_light_occ==0:
miss += 1
if red_light_gt==0 and red_light_occ:
fp += 1
if args.vis and (batch_num % args.vis_freq == 0):
for i in range(conf.seq_len):
# save one sample per batch
if not os.path.isdir(f"log/{args.id}/img/{str(i)}"):
os.makedirs(f"log/{args.id}/img/{str(i)}")
front_numpy = (fronts_in[i][0].data.cpu().numpy().transpose((1, 2, 0))).astype(np.uint8)
left_numpy = (lefts_in[i][0].data.cpu().numpy().transpose((1, 2, 0))).astype(np.uint8)
right_numpy = (rights_in[i][0].data.cpu().numpy().transpose((1, 2, 0))).astype(np.uint8)
image_numpy = np.concatenate([left_numpy,front_numpy,right_numpy], axis=1)
image_display = Image.fromarray(image_numpy)
image_display.save(f"log/{args.id}/img/{str(i)}/{str(batch_num).zfill(4)}.png")
# target point in pixel coordinates
target_point_pixel = target_point.squeeze().cpu().numpy()
target_point_pixel[1] += conf.offset * conf.resolution
# hack for when actual target is outside image (axis/2 * resolution)
target_point_pixel = np.clip(target_point_pixel, -(conf.axis/2 * conf.resolution - 1), (conf.axis/2 * conf.resolution - 1))
target_point_pixel = (target_point_pixel*args.out_res//50 + args.out_res//2).astype(np.uint8)
for i in range(conf.tot_len):
if not os.path.isdir(f"log/{args.id}/sem/{str(i)}"):
os.makedirs(f"log/{args.id}/sem/{str(i)}")
if not os.path.isdir(f"log/{args.id}/out/{str(i)}"):
os.makedirs(f"log/{args.id}/out/{str(i)}")
if not os.path.isdir(f"log/{args.id}/flow/{str(i)}"):
os.makedirs(f"log/{args.id}/flow/{str(i)}")
# gt semantics
semantics = (data['topdowns'][i][0][0].data.cpu().numpy()).astype(np.uint8)
semantics = converter[semantics][:conf.axis,conf.offset:conf.axis+conf.offset]
semantic_display = np.zeros((semantics.shape[0], semantics.shape[1], 3))
for key, value in conf.classes.items():
semantic_display[np.where(semantics == key)] = value
semantic_display = semantic_display.astype(np.uint8)
semantic_display = Image.fromarray(semantic_display)
semantic_display.save(f"log/{args.id}/sem/{str(i)}/{str(batch_num).zfill(4)}.png")
# gt waypoint in pixel coordinates
img_waypoint = waypoints[i].data.cpu().numpy()
img_waypoint[1] += conf.offset * conf.resolution
img_waypoint = np.clip(img_waypoint, -(conf.axis/2 * conf.resolution - 1), (conf.axis/2 * conf.resolution - 1))
img_waypoint = (img_waypoint*args.out_res//(conf.axis * conf.resolution) + args.out_res//2).astype(np.uint8)
# predicted waypoint in pixel coordinates
pred_waypoint = pred_waypoint_mean[0,i].data.cpu().numpy()
pred_waypoint[1] += conf.offset * conf.resolution
pred_waypoint = np.clip(pred_waypoint, -(conf.axis/2 * conf.resolution - 1), (conf.axis/2 * conf.resolution - 1))
pred_waypoint = (pred_waypoint*args.out_res//(conf.axis * conf.resolution) + args.out_res//2).astype(np.uint8)
# visualization of occupancy and flow
img_rows = []
flow_rows = []
for row in range(args.out_res):
grid_x, grid_y, grid_t = torch.meshgrid(linspace_x, linspace_y[row], linspace_t[i].unsqueeze(0))
grid_points = torch.stack((grid_x, grid_y, grid_t), dim=3).unsqueeze(0).repeat(args.batch_size,1,1,1,1)
grid_points = grid_points.reshape(args.batch_size,-1,3).to(args.device, dtype=torch.float32)
pred_img_pts, pred_img_offsets, _ = model.decode(grid_points, target_point, encoding)
pred_img_pts = torch.argmax(pred_img_pts[-1], dim=1)
pred_img = pred_img_pts.reshape(args.batch_size,args.out_res)
pred_flow = pred_img_offsets[-1].reshape(args.batch_size,2,args.out_res)
img_rows.append(pred_img)
flow_rows.append(pred_flow)
pred_img = torch.stack(img_rows, dim=-1)
pred_flow = torch.stack(flow_rows, dim=-1)
semantics = pred_img[0,:,:].transpose(1, 0).data.cpu().numpy().astype(np.uint8)
semantic_display = np.zeros((semantics.shape[0], semantics.shape[1], 3))
for key, value in conf.classes.items():
semantic_display[np.where(semantics == key)] = value
semantic_display = semantic_display.astype(np.uint8)
semantic_display = Image.fromarray(semantic_display)
semantic_display.save(f"log/{args.id}/out/{str(i)}/{str(batch_num).zfill(4)}.png")
# flow image of predicted offsets
flow_uv = pred_flow[0,:,:,:].transpose(2,0).data.cpu().numpy()*args.out_res/conf.axis
flow_rgb = flow_to_color(flow_uv)
flow_display = Image.fromarray(flow_rgb)
draw = ImageDraw.Draw(flow_display)
draw.ellipse([tuple(target_point_pixel-2), tuple(target_point_pixel+2)], fill='Blue', outline='Blue')
draw.ellipse([tuple(img_waypoint-2), tuple(img_waypoint+2)], fill='Green', outline='Green')
draw.ellipse([tuple(pred_waypoint-2), tuple(pred_waypoint+2)], fill='Red', outline='Red')
flow_display.save(f"log/{args.id}/flow/{str(i)}/{str(batch_num).zfill(4)}.png")
pred_occ_class = torch.argmax(pred_occ[-1], dim=1)
# losses
for k in range(conf.num_class):
gt_occ_k = gt_occ==k
pred_occ_k = pred_occ_class==k
for pt1, pt2 in zip(gt_occ_k, pred_occ_k):
intersection, union = iou(pt1, pt2)
intersection_epoch[k] += float(intersection.item())
union_epoch[k] += float(union.item())
off_epoch += float(F.l1_loss(pred_off[-1], gt_offsets).mean())
wp_epoch += float(F.l1_loss(wp_gt,wp_pred).mean())
out_loss = np.array(intersection_epoch) / np.array(union_epoch)
off_loss = off_epoch / float(batch_num)
wp_loss = wp_epoch / float(batch_num)
print (f'Off: {off_loss:3.3f}')
print (f'Wp: {wp_loss:3.3f}')
print (f'Match: {match}')
print (f'Miss: {miss}')
print (f'FP: {fp}')
for k in range(conf.num_class):
print(f'Class {k:02d}: IoU: {out_loss[k]:3.3f}') | [
"argparse.ArgumentParser",
"torch.argmax",
"numpy.clip",
"torch.no_grad",
"torch.utils.data.DataLoader",
"config.GlobalConfig",
"PIL.ImageDraw.Draw",
"tqdm.tqdm",
"numpy.uint8",
"torch.nn.functional.l1_loss",
"numpy.concatenate",
"utils.iou",
"os.makedirs",
"torch.stack",
"os.path.isdir"... | [((427, 452), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (450, 452), False, 'import argparse\n'), ((1003, 1017), 'config.GlobalConfig', 'GlobalConfig', ([], {}), '()\n', (1015, 1017), False, 'from config import GlobalConfig\n'), ((1036, 1069), 'data.CARLA_points', 'CARLA_points', (['conf.val_data', 'conf'], {}), '(conf.val_data, conf)\n', (1048, 1069), False, 'from data import CARLA_points\n'), ((1087, 1186), 'torch.utils.data.DataLoader', 'DataLoader', (['val_set'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(8)', 'pin_memory': '(True)'}), '(val_set, batch_size=args.batch_size, shuffle=False, num_workers=\n 8, pin_memory=True)\n', (1097, 1186), False, 'from torch.utils.data import DataLoader\n'), ((1199, 1232), 'architectures.AttentionField', 'AttentionField', (['conf', 'args.device'], {}), '(conf, args.device)\n', (1213, 1232), False, 'from architectures import AttentionField\n'), ((1940, 1964), 'numpy.uint8', 'np.uint8', (['conf.converter'], {}), '(conf.converter)\n', (1948, 1964), True, 'import numpy as np\n'), ((1992, 2007), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2005, 2007), False, 'import torch\n'), ((10088, 10116), 'numpy.array', 'np.array', (['intersection_epoch'], {}), '(intersection_epoch)\n', (10096, 10116), True, 'import numpy as np\n'), ((10119, 10140), 'numpy.array', 'np.array', (['union_epoch'], {}), '(union_epoch)\n', (10127, 10140), True, 'import numpy as np\n'), ((1475, 1510), 'os.path.isdir', 'os.path.isdir', (['f"""log/{args.id}/img"""'], {}), "(f'log/{args.id}/img')\n", (1488, 1510), False, 'import os\n'), ((1514, 1547), 'os.makedirs', 'os.makedirs', (['f"""log/{args.id}/img"""'], {}), "(f'log/{args.id}/img')\n", (1525, 1547), False, 'import os\n'), ((1556, 1591), 'os.path.isdir', 'os.path.isdir', (['f"""log/{args.id}/sem"""'], {}), "(f'log/{args.id}/sem')\n", (1569, 1591), False, 'import os\n'), ((1595, 1628), 'os.makedirs', 'os.makedirs', (['f"""log/{args.id}/sem"""'], {}), "(f'log/{args.id}/sem')\n", (1606, 1628), False, 'import os\n'), ((1637, 1672), 'os.path.isdir', 'os.path.isdir', (['f"""log/{args.id}/out"""'], {}), "(f'log/{args.id}/out')\n", (1650, 1672), False, 'import os\n'), ((1676, 1709), 'os.makedirs', 'os.makedirs', (['f"""log/{args.id}/out"""'], {}), "(f'log/{args.id}/out')\n", (1687, 1709), False, 'import os\n'), ((1718, 1754), 'os.path.isdir', 'os.path.isdir', (['f"""log/{args.id}/flow"""'], {}), "(f'log/{args.id}/flow')\n", (1731, 1754), False, 'import os\n'), ((1758, 1792), 'os.makedirs', 'os.makedirs', (['f"""log/{args.id}/flow"""'], {}), "(f'log/{args.id}/flow')\n", (1769, 1792), False, 'import os\n'), ((2058, 2078), 'tqdm.tqdm', 'tqdm', (['dataloader_val'], {}), '(dataloader_val)\n', (2062, 2078), False, 'from tqdm import tqdm\n'), ((4186, 4251), 'torch.linspace', 'torch.linspace', (['(-conf.axis / 2)', '(conf.axis / 2)'], {'steps': 'args.out_res'}), '(-conf.axis / 2, conf.axis / 2, steps=args.out_res)\n', (4200, 4251), False, 'import torch\n'), ((4263, 4328), 'torch.linspace', 'torch.linspace', (['(-conf.axis / 2)', '(conf.axis / 2)'], {'steps': 'args.out_res'}), '(-conf.axis / 2, conf.axis / 2, steps=args.out_res)\n', (4277, 4328), False, 'import torch\n'), ((4340, 4395), 'torch.linspace', 'torch.linspace', (['(0)', '(conf.tot_len - 1)'], {'steps': 'conf.tot_len'}), '(0, conf.tot_len - 1, steps=conf.tot_len)\n', (4354, 4395), False, 'import torch\n'), ((9636, 9669), 'torch.argmax', 'torch.argmax', (['pred_occ[-1]'], {'dim': '(1)'}), '(pred_occ[-1], dim=1)\n', (9648, 9669), False, 'import torch\n'), ((5741, 5850), 'numpy.clip', 'np.clip', (['target_point_pixel', '(-(conf.axis / 2 * conf.resolution - 1))', '(conf.axis / 2 * conf.resolution - 1)'], {}), '(target_point_pixel, -(conf.axis / 2 * conf.resolution - 1), conf.\n axis / 2 * conf.resolution - 1)\n', (5748, 5850), True, 'import numpy as np\n'), ((2707, 2740), 'torch.stack', 'torch.stack', (["data['target_point']"], {}), "(data['target_point'])\n", (2718, 2740), False, 'import torch\n'), ((3982, 4026), 'torch.stack', 'torch.stack', (['waypoints[conf.seq_len:]'], {'dim': '(1)'}), '(waypoints[conf.seq_len:], dim=1)\n', (3993, 4026), False, 'import torch\n'), ((5291, 5353), 'numpy.concatenate', 'np.concatenate', (['[left_numpy, front_numpy, right_numpy]'], {'axis': '(1)'}), '([left_numpy, front_numpy, right_numpy], axis=1)\n', (5305, 5353), True, 'import numpy as np\n'), ((5372, 5400), 'PIL.Image.fromarray', 'Image.fromarray', (['image_numpy'], {}), '(image_numpy)\n', (5387, 5400), False, 'from PIL import Image, ImageDraw\n'), ((6504, 6557), 'numpy.zeros', 'np.zeros', (['(semantics.shape[0], semantics.shape[1], 3)'], {}), '((semantics.shape[0], semantics.shape[1], 3))\n', (6512, 6557), True, 'import numpy as np\n'), ((6740, 6773), 'PIL.Image.fromarray', 'Image.fromarray', (['semantic_display'], {}), '(semantic_display)\n', (6755, 6773), False, 'from PIL import Image, ImageDraw\n'), ((7024, 7126), 'numpy.clip', 'np.clip', (['img_waypoint', '(-(conf.axis / 2 * conf.resolution - 1))', '(conf.axis / 2 * conf.resolution - 1)'], {}), '(img_waypoint, -(conf.axis / 2 * conf.resolution - 1), conf.axis / 2 *\n conf.resolution - 1)\n', (7031, 7126), True, 'import numpy as np\n'), ((7419, 7523), 'numpy.clip', 'np.clip', (['pred_waypoint', '(-(conf.axis / 2 * conf.resolution - 1))', '(conf.axis / 2 * conf.resolution - 1)'], {}), '(pred_waypoint, -(conf.axis / 2 * conf.resolution - 1), conf.axis / \n 2 * conf.resolution - 1)\n', (7426, 7523), True, 'import numpy as np\n'), ((8440, 8469), 'torch.stack', 'torch.stack', (['img_rows'], {'dim': '(-1)'}), '(img_rows, dim=-1)\n', (8451, 8469), False, 'import torch\n'), ((8486, 8516), 'torch.stack', 'torch.stack', (['flow_rows'], {'dim': '(-1)'}), '(flow_rows, dim=-1)\n', (8497, 8516), False, 'import torch\n'), ((8625, 8678), 'numpy.zeros', 'np.zeros', (['(semantics.shape[0], semantics.shape[1], 3)'], {}), '((semantics.shape[0], semantics.shape[1], 3))\n', (8633, 8678), True, 'import numpy as np\n'), ((8861, 8894), 'PIL.Image.fromarray', 'Image.fromarray', (['semantic_display'], {}), '(semantic_display)\n', (8876, 8894), False, 'from PIL import Image, ImageDraw\n'), ((9127, 9149), 'utils.flow_to_color', 'flow_to_color', (['flow_uv'], {}), '(flow_uv)\n', (9140, 9149), False, 'from utils import iou, flow_to_color\n'), ((9170, 9195), 'PIL.Image.fromarray', 'Image.fromarray', (['flow_rgb'], {}), '(flow_rgb)\n', (9185, 9195), False, 'from PIL import Image, ImageDraw\n'), ((9207, 9235), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['flow_display'], {}), '(flow_display)\n', (9221, 9235), False, 'from PIL import Image, ImageDraw\n'), ((9845, 9858), 'utils.iou', 'iou', (['pt1', 'pt2'], {}), '(pt1, pt2)\n', (9848, 9858), False, 'from utils import iou, flow_to_color\n'), ((3050, 3083), 'torch.stack', 'torch.stack', (["data['waypoints'][i]"], {}), "(data['waypoints'][i])\n", (3061, 3083), False, 'import torch\n'), ((8173, 8210), 'torch.argmax', 'torch.argmax', (['pred_img_pts[-1]'], {'dim': '(1)'}), '(pred_img_pts[-1], dim=1)\n', (8185, 8210), False, 'import torch\n'), ((9979, 10014), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['pred_off[-1]', 'gt_offsets'], {}), '(pred_off[-1], gt_offsets)\n', (9988, 10014), True, 'import torch.nn.functional as F\n'), ((10043, 10068), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['wp_gt', 'wp_pred'], {}), '(wp_gt, wp_pred)\n', (10052, 10068), True, 'import torch.nn.functional as F\n'), ((6624, 6650), 'numpy.where', 'np.where', (['(semantics == key)'], {}), '(semantics == key)\n', (6632, 6650), True, 'import numpy as np\n'), ((8745, 8771), 'numpy.where', 'np.where', (['(semantics == key)'], {}), '(semantics == key)\n', (8753, 8771), True, 'import numpy as np\n'), ((7874, 7918), 'torch.stack', 'torch.stack', (['(grid_x, grid_y, grid_t)'], {'dim': '(3)'}), '((grid_x, grid_y, grid_t), dim=3)\n', (7885, 7918), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plt
y = 0
z = np.arange(-0, 1, .02)
t = -y*np.log(z)-(1-y)*np.log(1-z)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, t)
ax.set_ylim([0,3])
ax.set_xlim([0,1])
ax.set_xlabel('y_predict')
ax.set_ylabel('cross entropy')
ax.set_title('y_true = 0')
plt.show() | [
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.log",
"matplotlib.pyplot.show"
] | [((62, 84), 'numpy.arange', 'np.arange', (['(-0)', '(1)', '(0.02)'], {}), '(-0, 1, 0.02)\n', (71, 84), True, 'import numpy as np\n'), ((126, 138), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (136, 138), True, 'import matplotlib.pyplot as plt\n'), ((303, 313), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (311, 313), True, 'import matplotlib.pyplot as plt\n'), ((91, 100), 'numpy.log', 'np.log', (['z'], {}), '(z)\n', (97, 100), True, 'import numpy as np\n'), ((107, 120), 'numpy.log', 'np.log', (['(1 - z)'], {}), '(1 - z)\n', (113, 120), True, 'import numpy as np\n')] |
import math
import numpy as np
def truncate(number, digits) -> float:
'''
Truncate number to n nearest digits. Set to -ve for decimal places
Args:
number (float) : the number to truncate
digits (int) : nearest digits. 0 truncate to 1. 1 truncate to 10. -1
truncate to 0.1
'''
stepper = pow(10.0, digits)
return math.trunc(stepper * number) / stepper
def merge_l_l(l_l, downsample_length):
'''
Merge list of lists over downsample length [[..], [.], [..] ] -> [[...], [....]]
'''
cut_end_length = (len(l_l) % downsample_length)
reshaped_l_l = np.array(l_l[0:-cut_end_length]).reshape(-1, downsample_length)
downsampled_l_l = []
flatten = lambda l: [item for sublist in l for item in sublist]
for i in range(reshaped_l_l.shape[0]):
downsampled_l_l.append(flatten(reshaped_l_l[i, :]))
return downsampled_l_l
| [
"numpy.array",
"math.trunc"
] | [((392, 420), 'math.trunc', 'math.trunc', (['(stepper * number)'], {}), '(stepper * number)\n', (402, 420), False, 'import math\n'), ((644, 676), 'numpy.array', 'np.array', (['l_l[0:-cut_end_length]'], {}), '(l_l[0:-cut_end_length])\n', (652, 676), True, 'import numpy as np\n')] |
import itertools
import numpy as np
import pandas as pd
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from sklearn import cross_validation
__names2num = {
0: [1, 0],
1: [0, 1]
}
class NHLDATA(DenseDesignMatrix):
def __init__(self, filename, X=None, Y=None, scaler=None,
split_prop=None, start=0, stop=None, batch_size=None):
if X == None:
X, Y = load_train_data_csv(filename)
self._batch_size = batch_size
if batch_size:
stop = int(len(Y)/batch_size)*batch_size
if not stop:
stop = len(Y)
X = X[start:stop]
Y = Y[start:stop]
if scaler:
X = scaler.fit_transform(X)
if split_prop:
X1, X2, Y1, Y2 = self._split(X, Y, split_prop)
X = X1
Y = Y1
super(NHLDATA, self).__init__(X=X, y=Y)
@property
def nr_inputs(self):
return len(self.X[0])
def _split(self, X, Y, prop=.8):
return cross_validation.train_test_split(X, Y,
test_size=1-prop, random_state=42)
def split(self, prop=.8):
X1, X2, y1, y2 = self._split(self.X, self.y, prop)
return NHLDATA(None, X1, y1, batch_size=self._batch_size), \
NHLDATA(None, X2, y2, batch_size=self._batch_size)
def __len__(self):
return self.X.shape[0]
def __iter__(self):
return itertools.izip_longest(self.X, self.y)
def load_train_data_csv(filename):
d = pd.read_csv(filename)
return np.array(d.ix[:,:-1], dtype=np.float32), \
np.array([__names2num[di] for di in d.ix[:,-1]], dtype=np.float32)
def load_train_data_csv_bin(filename):
d = pd.read_csv(filename)
return np.array(d.ix[:,:-1], dtype=np.float32), d.ix[:,-1]
| [
"pandas.read_csv",
"sklearn.cross_validation.train_test_split",
"itertools.izip_longest",
"numpy.array"
] | [((1547, 1568), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (1558, 1568), True, 'import pandas as pd\n'), ((1746, 1767), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (1757, 1767), True, 'import pandas as pd\n'), ((1022, 1098), 'sklearn.cross_validation.train_test_split', 'cross_validation.train_test_split', (['X', 'Y'], {'test_size': '(1 - prop)', 'random_state': '(42)'}), '(X, Y, test_size=1 - prop, random_state=42)\n', (1055, 1098), False, 'from sklearn import cross_validation\n'), ((1464, 1502), 'itertools.izip_longest', 'itertools.izip_longest', (['self.X', 'self.y'], {}), '(self.X, self.y)\n', (1486, 1502), False, 'import itertools\n'), ((1580, 1620), 'numpy.array', 'np.array', (['d.ix[:, :-1]'], {'dtype': 'np.float32'}), '(d.ix[:, :-1], dtype=np.float32)\n', (1588, 1620), True, 'import numpy as np\n'), ((1631, 1698), 'numpy.array', 'np.array', (['[__names2num[di] for di in d.ix[:, -1]]'], {'dtype': 'np.float32'}), '([__names2num[di] for di in d.ix[:, -1]], dtype=np.float32)\n', (1639, 1698), True, 'import numpy as np\n'), ((1779, 1819), 'numpy.array', 'np.array', (['d.ix[:, :-1]'], {'dtype': 'np.float32'}), '(d.ix[:, :-1], dtype=np.float32)\n', (1787, 1819), True, 'import numpy as np\n')] |
import numpy as np
import ipopt
from datetime import datetime
MIN = -2.0e19
MAX = 2.0e19
class Parameter:
def __init__(self, D, P, jj: np.array, ll: np.array,
tt0: np.array, uu: np.array, ww: np.array, mm: np.array,
CC: np.ndarray, KK: np.ndarray, RR: np.ndarray, SS: np.ndarray):
self.D = D
self.P = P
self.ll = ll
self.tt0 = tt0
self.uu = uu
self.ww = ww
self.mm = mm
self.CC = CC
self.KK = KK
self.RR = RR
self.SS = SS
self.s = SS.shape[0]
self.vj = jj
class Variable:
def __init__(self, v, t, e, a):
self.x0 = np.concatenate((v, t, e, a))
self.len_x = self.x0.shape[0]
self.m, self.l, self.n, self.k = v.shape[0], t.shape[0], e.shape[0], a.shape[0]
self.start_idx = [self.m, self.m+self.l, self.m+self.l+self.n]
class MeMoSparse:
def __init__(self, f: np.array, var: Variable, para: Parameter):
"""
:param f: array with the same length as the variable 'v'
"""
self.f = np.concatenate([f, np.zeros(var.l + var.n + var.k)])
self.var = var
self.para = para
self.lj = self.para.vj.shape[0]
self.c_start_idx = [self.lj, self.lj+self.para.s, self.lj+self.para.s+1, self.lj+self.para.s+1+self.var.l]
def objective(self, x):
return np.dot(self.f, x)
def gradient(self, x):
"""
The gradient of the objective function
"""
return self.f
def constraints(self, x):
"""
The 'm' + 's' + 1 + 2*'l' linear and 'l' nonlinear constraints
"""
v = x[:self.var.m]
vj = v[self.para.vj]
t = x[self.var.start_idx[0]: self.var.start_idx[1]]
e = x[self.var.start_idx[1]: self.var.start_idx[2]]
a = x[self.var.start_idx[2]:]
p = np.matmul(self.para.CC, e)
return np.concatenate([
vj - np.matmul(self.para.KK, e),
np.matmul(self.para.SS, v),
[np.dot(self.para.mm, p)],
np.divide(p, self.para.ww) - t,
np.multiply(self.para.tt0, np.power(2, np.matmul(self.para.RR, a))) - t,
])
def jacobianstructure(self):
"""
:[0](m) :[1](l) :[2](n) :[:](k)
:[0](j) * - * -
:[1](s) * - - -
:[2](1) - - * -
:[3](l) - dia * -
:[:](l) - dia - *
:return:
"""
j, m, s, l, n, k = self.lj, self.var.m, self.para.s, self.var.l, self.var.n, self.var.k
r_sidx, c_sidx = self.c_start_idx, self.var.start_idx
n_r, n_c = r_sidx[-1] + l, c_sidx[-1] + k
ss = np.zeros((n_r, n_c))
ss[:r_sidx[1], :c_sidx[0]] = np.ones((j+s, m)) # r=:, c=0
ss[r_sidx[2]:r_sidx[3], c_sidx[0]:c_sidx[1]] = np.eye(l) # r=3, c=1
ss[r_sidx[3]:, c_sidx[0]:c_sidx[1]] = np.eye(l) # r=4, c=1
ss[:r_sidx[0], c_sidx[1]:c_sidx[2]] = np.ones((j, n)) # r=0, c=2
ss[r_sidx[1]:r_sidx[3], c_sidx[1]:c_sidx[2]] = np.ones((1+l, n)) # r=1:,c=2
ss[r_sidx[3]:, c_sidx[2]:] = np.ones((l, k)) # r=:, c=3
return np.nonzero(ss)
def jacobian(self, x):
"""
The Jacobian of the constraints with shape (|constraints|, |x|)
"""
j, m, s, l, n, k = self.lj, self.var.m, self.para.s, self.var.l, self.var.n, self.var.k
j1 = np.concatenate([
np.ones((j, m)),
np.negative(self.para.KK)
], axis=1).flatten()
j2 = self.para.SS.flatten()
j3 = np.dot(self.para.mm, self.para.CC).flatten()
j4 = np.concatenate([
np.negative(self.para.ww.reshape((-1, 1))),
self.para.CC
], axis=1).flatten()
a = x[self.var.start_idx[2]:]
tmp = np.multiply(np.log(2)*np.power(2, np.matmul(self.para.RR, a)), self.para.tt0)
j5 = np.concatenate([
np.negative(np.ones((l, 1))),
np.multiply(tmp.reshape((-1, 1)), self.para.RR) # da
], axis=1).flatten()
return np.concatenate([j1, j2, j3, j4, j5])
def hessianstructure(self):
"""
the structure of the hessian is of a lower triangular matrix.
shape of (|x|, |x|)
"""
k = self.var.k
row, col = np.nonzero(np.tril(np.ones((k, k))))
return row + self.c_start_idx[-1], col + self.var.start_idx[-1]
def hessian(self, x, lagrange, obj_factor):
"""
the hessian of the lagrangian
:param lagrange: 1d-array with length of |j6|.shape[0]
"""
a = x[self.var.start_idx[2]:]
H = np.power(np.log(2), 2) * np.multiply(self.para.tt0, np.matmul(self.para.RR, a))
H = np.multiply(lagrange[-self.var.l:], H)
H = np.multiply(H.reshape((-1, 1)), self.para.RR)
H = np.matmul(H.T, self.para.RR)
return np.tril(H).flatten()
def intermediate(self, alg_mod, iter_count, obj_value, inf_pr, inf_du, mu,
d_norm, regularization_size, alpha_du, alpha_pr, ls_trial):
print("Objective value at iteration #%d is - %g" % (iter_count, obj_value))
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current Time =", current_time)
def optimizer(ff: np.array, var: Variable, para: Parameter):
lb = np.concatenate(
(para.ll, [para.D]*var.l, [0]*var.n, [MIN]*var.k))
ub = np.concatenate([para.uu, [MAX]*(var.l + var.n + var.k)])
j = para.vj.shape[0]
cl = np.concatenate(
[[MIN] * j, np.zeros(para.s + 1 + 2 * var.l)])
cu = np.concatenate(
[np.zeros(j + para.s), [para.P], np.zeros(2 * var.l)])
# define problem
nlp = ipopt.problem(
n=var.len_x,
m=len(cl),
problem_obj=MeMoSparse(ff, var, para),
lb=lb,
ub=ub,
cl=cl,
cu=cu
)
# Set solver options
# nlp.addOption('derivative_test', 'second-order')
nlp.addOption('mu_strategy', 'adaptive')
nlp.addOption('tol', 1e-7)
# scale problem
nlp.setProblemScaling(
obj_scaling=2,
x_scaling=[1] * var.x0.shape[0]
)
nlp.addOption('nlp_scaling_method', 'user-scaling')
# solve problem
x, info = nlp.solve(var.x0)
print("Solution of the primal variables: x=%s\n" % repr(x))
print("Solution of the dual variables: lambda=%s\n" % repr(info['mult_g']))
print("Objective=%s\n" % repr(info['obj_val']))
return x, info
def test():
m, l, n, k = 3, 5, 7, 11
v = np.ones(m)
t = np.ones(l)
e = np.ones(n)
a = np.ones(k)
ll = np.ones(m) * (-10)
jj = np.array([0, 1])
uu = np.ones(m) * 10
KK = np.abs(np.random.randn(jj.shape[0], n))
s = 13
SS = np.abs(np.random.randn(s, m))
P = 100
CC = np.abs(np.random.randn(l, n))
ww = np.abs(np.random.randn(l))
D = 0.01
RR = np.abs(np.random.randn(l, k))
tt0 = np.abs(np.random.randn(l))
ff = np.ones(m)
mm = np.ones(l)
variable = Variable(v, t, e, a)
parameter = Parameter(D, P, jj, ll, tt0, uu, ww, mm, CC, KK, RR, SS)
x, info = optimizer(ff, variable, parameter)
# ######################################
# run with toy data
test()
# ######################################
# run with toy mini data
# import os
#
# basepath = 'regulateme/data_mini'
#
# SS = np.load(os.path.join(basepath, 'SS.npy'))
# RR = np.load(os.path.join(basepath, 'RR.npy'))
# CC = np.load(os.path.join(basepath, 'CC.npy'))
# KK = np.load(os.path.join(basepath, 'KK.npy'))
# ll = np.load(os.path.join(basepath, 'll.npy'))
# uu = np.load(os.path.join(basepath, 'uu.npy'))
# ff = np.load(os.path.join(basepath, 'ff.npy'))
# ww = np.load(os.path.join(basepath, 'ww.npy'))
# tt0 = np.load(os.path.join(basepath, 'tt0.npy'))
# mm = np.load(os.path.join(basepath, 'mm.npy'))
#
# D = float(np.load(os.path.join(basepath, 'D.npy')))
# P = float(np.load(os.path.join(basepath, 'P.npy')))
#
# jj = np.array([0, 1, 3, 4]) # the index of v for the constraint of "vj-ke<=0"
# KK = KK[:jj.shape[0]]
#
# v = np.zeros(len(ll))
# p = D*np.ones(len(tt0))
# e = D*np.ones(CC.shape[1])
# t = D*np.ones(len(tt0))
# a = np.zeros(RR.shape[1])
#
# variable = Variable(v, t, e, a)
# parameter = Parameter(D, P, jj, ll, tt0, uu, ww, mm, CC, KK, RR, SS)
#
# # print("======== only nonlinear and linear constraints ==========")
# # _, info = optimizer(ff, variable, parameter)
# # print(info)
#
# print("======== only with linear constraints ==========")
# _, info = optimizer(ff, variable, parameter)
# print(f"info: ")
| [
"numpy.divide",
"numpy.multiply",
"numpy.eye",
"numpy.log",
"numpy.random.randn",
"numpy.tril",
"numpy.zeros",
"numpy.ones",
"numpy.negative",
"numpy.nonzero",
"numpy.array",
"numpy.matmul",
"numpy.dot",
"datetime.datetime.now",
"numpy.concatenate"
] | [((5517, 5588), 'numpy.concatenate', 'np.concatenate', (['(para.ll, [para.D] * var.l, [0] * var.n, [MIN] * var.k)'], {}), '((para.ll, [para.D] * var.l, [0] * var.n, [MIN] * var.k))\n', (5531, 5588), True, 'import numpy as np\n'), ((5601, 5659), 'numpy.concatenate', 'np.concatenate', (['[para.uu, [MAX] * (var.l + var.n + var.k)]'], {}), '([para.uu, [MAX] * (var.l + var.n + var.k)])\n', (5615, 5659), True, 'import numpy as np\n'), ((6702, 6712), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (6709, 6712), True, 'import numpy as np\n'), ((6721, 6731), 'numpy.ones', 'np.ones', (['l'], {}), '(l)\n', (6728, 6731), True, 'import numpy as np\n'), ((6740, 6750), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (6747, 6750), True, 'import numpy as np\n'), ((6759, 6769), 'numpy.ones', 'np.ones', (['k'], {}), '(k)\n', (6766, 6769), True, 'import numpy as np\n'), ((6808, 6824), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (6816, 6824), True, 'import numpy as np\n'), ((7134, 7144), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (7141, 7144), True, 'import numpy as np\n'), ((7154, 7164), 'numpy.ones', 'np.ones', (['l'], {}), '(l)\n', (7161, 7164), True, 'import numpy as np\n'), ((673, 701), 'numpy.concatenate', 'np.concatenate', (['(v, t, e, a)'], {}), '((v, t, e, a))\n', (687, 701), True, 'import numpy as np\n'), ((1396, 1413), 'numpy.dot', 'np.dot', (['self.f', 'x'], {}), '(self.f, x)\n', (1402, 1413), True, 'import numpy as np\n'), ((1888, 1914), 'numpy.matmul', 'np.matmul', (['self.para.CC', 'e'], {}), '(self.para.CC, e)\n', (1897, 1914), True, 'import numpy as np\n'), ((2767, 2787), 'numpy.zeros', 'np.zeros', (['(n_r, n_c)'], {}), '((n_r, n_c))\n', (2775, 2787), True, 'import numpy as np\n'), ((2825, 2844), 'numpy.ones', 'np.ones', (['(j + s, m)'], {}), '((j + s, m))\n', (2832, 2844), True, 'import numpy as np\n'), ((2930, 2939), 'numpy.eye', 'np.eye', (['l'], {}), '(l)\n', (2936, 2939), True, 'import numpy as np\n'), ((3007, 3016), 'numpy.eye', 'np.eye', (['l'], {}), '(l)\n', (3013, 3016), True, 'import numpy as np\n'), ((3094, 3109), 'numpy.ones', 'np.ones', (['(j, n)'], {}), '((j, n))\n', (3101, 3109), True, 'import numpy as np\n'), ((3189, 3208), 'numpy.ones', 'np.ones', (['(1 + l, n)'], {}), '((1 + l, n))\n', (3196, 3208), True, 'import numpy as np\n'), ((3258, 3273), 'numpy.ones', 'np.ones', (['(l, k)'], {}), '((l, k))\n', (3265, 3273), True, 'import numpy as np\n'), ((3323, 3337), 'numpy.nonzero', 'np.nonzero', (['ss'], {}), '(ss)\n', (3333, 3337), True, 'import numpy as np\n'), ((4241, 4277), 'numpy.concatenate', 'np.concatenate', (['[j1, j2, j3, j4, j5]'], {}), '([j1, j2, j3, j4, j5])\n', (4255, 4277), True, 'import numpy as np\n'), ((4901, 4939), 'numpy.multiply', 'np.multiply', (['lagrange[-self.var.l:]', 'H'], {}), '(lagrange[-self.var.l:], H)\n', (4912, 4939), True, 'import numpy as np\n'), ((5010, 5038), 'numpy.matmul', 'np.matmul', (['H.T', 'self.para.RR'], {}), '(H.T, self.para.RR)\n', (5019, 5038), True, 'import numpy as np\n'), ((5336, 5350), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5348, 5350), False, 'from datetime import datetime\n'), ((6780, 6790), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (6787, 6790), True, 'import numpy as np\n'), ((6834, 6844), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (6841, 6844), True, 'import numpy as np\n'), ((6866, 6897), 'numpy.random.randn', 'np.random.randn', (['jj.shape[0]', 'n'], {}), '(jj.shape[0], n)\n', (6881, 6897), True, 'import numpy as np\n'), ((6926, 6947), 'numpy.random.randn', 'np.random.randn', (['s', 'm'], {}), '(s, m)\n', (6941, 6947), True, 'import numpy as np\n'), ((6977, 6998), 'numpy.random.randn', 'np.random.randn', (['l', 'n'], {}), '(l, n)\n', (6992, 6998), True, 'import numpy as np\n'), ((7016, 7034), 'numpy.random.randn', 'np.random.randn', (['l'], {}), '(l)\n', (7031, 7034), True, 'import numpy as np\n'), ((7065, 7086), 'numpy.random.randn', 'np.random.randn', (['l', 'k'], {}), '(l, k)\n', (7080, 7086), True, 'import numpy as np\n'), ((7105, 7123), 'numpy.random.randn', 'np.random.randn', (['l'], {}), '(l)\n', (7120, 7123), True, 'import numpy as np\n'), ((5729, 5761), 'numpy.zeros', 'np.zeros', (['(para.s + 1 + 2 * var.l)'], {}), '(para.s + 1 + 2 * var.l)\n', (5737, 5761), True, 'import numpy as np\n'), ((5798, 5818), 'numpy.zeros', 'np.zeros', (['(j + para.s)'], {}), '(j + para.s)\n', (5806, 5818), True, 'import numpy as np\n'), ((5830, 5849), 'numpy.zeros', 'np.zeros', (['(2 * var.l)'], {}), '(2 * var.l)\n', (5838, 5849), True, 'import numpy as np\n'), ((1114, 1145), 'numpy.zeros', 'np.zeros', (['(var.l + var.n + var.k)'], {}), '(var.l + var.n + var.k)\n', (1122, 1145), True, 'import numpy as np\n'), ((2005, 2031), 'numpy.matmul', 'np.matmul', (['self.para.SS', 'v'], {}), '(self.para.SS, v)\n', (2014, 2031), True, 'import numpy as np\n'), ((3736, 3770), 'numpy.dot', 'np.dot', (['self.para.mm', 'self.para.CC'], {}), '(self.para.mm, self.para.CC)\n', (3742, 3770), True, 'import numpy as np\n'), ((3987, 3996), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (3993, 3996), True, 'import numpy as np\n'), ((4494, 4509), 'numpy.ones', 'np.ones', (['(k, k)'], {}), '((k, k))\n', (4501, 4509), True, 'import numpy as np\n'), ((4818, 4827), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (4824, 4827), True, 'import numpy as np\n'), ((4861, 4887), 'numpy.matmul', 'np.matmul', (['self.para.RR', 'a'], {}), '(self.para.RR, a)\n', (4870, 4887), True, 'import numpy as np\n'), ((5055, 5065), 'numpy.tril', 'np.tril', (['H'], {}), '(H)\n', (5062, 5065), True, 'import numpy as np\n'), ((1965, 1991), 'numpy.matmul', 'np.matmul', (['self.para.KK', 'e'], {}), '(self.para.KK, e)\n', (1974, 1991), True, 'import numpy as np\n'), ((2046, 2069), 'numpy.dot', 'np.dot', (['self.para.mm', 'p'], {}), '(self.para.mm, p)\n', (2052, 2069), True, 'import numpy as np\n'), ((2084, 2110), 'numpy.divide', 'np.divide', (['p', 'self.para.ww'], {}), '(p, self.para.ww)\n', (2093, 2110), True, 'import numpy as np\n'), ((4009, 4035), 'numpy.matmul', 'np.matmul', (['self.para.RR', 'a'], {}), '(self.para.RR, a)\n', (4018, 4035), True, 'import numpy as np\n'), ((3601, 3616), 'numpy.ones', 'np.ones', (['(j, m)'], {}), '((j, m))\n', (3608, 3616), True, 'import numpy as np\n'), ((3630, 3655), 'numpy.negative', 'np.negative', (['self.para.KK'], {}), '(self.para.KK)\n', (3641, 3655), True, 'import numpy as np\n'), ((2167, 2193), 'numpy.matmul', 'np.matmul', (['self.para.RR', 'a'], {}), '(self.para.RR, a)\n', (2176, 2193), True, 'import numpy as np\n'), ((4107, 4122), 'numpy.ones', 'np.ones', (['(l, 1)'], {}), '((l, 1))\n', (4114, 4122), True, 'import numpy as np\n')] |
"""The ALEExperiment class handles the logic for training a deep
Q-learning agent in the Arcade Learning Environment.
Author: <NAME>
"""
import logging
import numpy as np
import image_preprocessing
# Number of rows to crop off the bottom of the (downsampled) screen.
# This is appropriate for breakout, but it may need to be modified
# for other games.
CROP_OFFSET = 8
class ALEExperiment(object):
def __init__(self, ale, agent, resized_width, resized_height,
resize_method, num_epochs, epoch_length, test_length,
frame_skip, death_ends_episode, max_start_nullops, rng):
self.ale = ale
self.agent = agent
self.num_epochs = num_epochs
self.epoch_length = epoch_length
self.test_length = test_length
self.frame_skip = frame_skip
self.death_ends_episode = death_ends_episode
self.min_action_set = ale.getMinimalActionSet()
self.resized_width = resized_width
self.resized_height = resized_height
self.resize_method = resize_method
self.width, self.height = ale.getScreenDims()
self.buffer_length = 10
self.buffer_count = 0
self.screen_buffer = np.empty((self.buffer_length,
self.height, self.width),
dtype=np.uint8)
self.terminal_lol = False # Most recent episode ended on a loss of life
self.max_start_nullops = max_start_nullops
self.rng = rng
def run(self):
"""
Run the desired number of training epochs, a testing epoch
is conducted after each training epoch.
"""
self.agent.time_count_start()
for epoch in range(1, self.num_epochs + 1):
self.run_epoch(epoch, self.epoch_length)
self.agent.finish_epoch(epoch)
if self.test_length > 0:
self.agent.start_testing()
self.run_epoch(epoch, self.test_length, True)
self.agent.finish_testing(epoch)
def run_epoch(self, epoch, num_steps, testing=False):
""" Run one 'epoch' of training or testing, where an epoch is defined
by the number of steps executed. Prints a progress report after
every trial
Arguments:
epoch - the current epoch number
num_steps - steps per epoch
testing - True if this Epoch is used for testing and not training
"""
self.terminal_lol = False # Make sure each epoch starts with a reset.
steps_left = num_steps
while steps_left > 0:
prefix = "testing" if testing else "training"
logging.info(prefix + " epoch: " + str(epoch) + " steps_left: " +
str(steps_left))
_, num_steps = self.run_episode(steps_left, testing)
steps_left -= num_steps
def _init_episode(self):
""" This method resets the game if needed, performs enough null
actions to ensure that the screen buffer is ready and optionally
performs a randomly determined number of null action to randomize
the initial game state."""
if not self.terminal_lol or self.ale.game_over():
self.ale.reset_game()
if self.max_start_nullops > 0:
random_actions = self.rng.randint(8, self.max_start_nullops+1)
for _ in range(random_actions):
self._act(0) # Null action
# Make sure the screen buffer is filled at the beginning of
# each episode...
self._act(0)
self._act(0)
def _act(self, action):
"""Perform the indicated action for a single frame, return the
resulting reward and store the resulting screen image in the
buffer
"""
reward = self.ale.act(action)
index = self.buffer_count % self.buffer_length
self.ale.getScreenGrayscale(self.screen_buffer[index, ...])
self.buffer_count += 1
return reward
def _step(self, action):
""" Repeat one action the appopriate number of times and return
the summed reward. """
reward = 0
for _ in range(self.frame_skip):
reward += self._act(action)
return reward
def run_episode(self, max_steps, testing):
"""Run a single training episode.
The boolean terminal value returned indicates whether the
episode ended because the game ended or the agent died (True)
or because the maximum number of steps was reached (False).
Currently this value will be ignored.
Return: (terminal, num_steps)
"""
self._init_episode()
start_lives = self.ale.lives()
action = self.agent.start_episode(self.get_observation())
num_steps = 0
while True:
reward = self._step(self.min_action_set[action])
self.terminal_lol = (self.death_ends_episode and not testing and
self.ale.lives() < start_lives)
terminal = self.ale.game_over() or self.terminal_lol
num_steps += 1
if terminal or num_steps >= max_steps:
self.agent.end_episode(reward, terminal)
break
action = self.agent.step(reward, self.get_observation())
return terminal, num_steps
def get_observation(self):
""" Resize and merge the previous two screen images """
assert self.buffer_count >= self.buffer_length
index = self.buffer_count % self.buffer_length - 1
# max_image = np.maximum(self.screen_buffer[index, ...],
# self.screen_buffer[index - 1, ...])
max_image = self.screen_buffer[index]
for i in range(self.buffer_length):
max_image = np.maximum(max_image, self.screen_buffer[index-i, ...])
return self.resize_image(max_image)
def resize_image(self, image):
""" Appropriately resize a single image """
if self.resize_method == 'crop':
# resize keeping aspect ratio
resize_height = int(round(
float(self.height) * self.resized_width / self.width))
resized = image_preprocessing.resize(image, (self.resized_width, resize_height))
# Crop the part we want
crop_y_cutoff = resize_height - CROP_OFFSET - self.resized_height
cropped = resized[crop_y_cutoff:
crop_y_cutoff + self.resized_height, :]
return cropped
elif self.resize_method == 'scale':
return image_preprocessing.resize(image, (self.resized_width, self.resized_height))
else:
raise ValueError('Unrecognized image resize method.')
| [
"image_preprocessing.resize",
"numpy.empty",
"numpy.maximum"
] | [((1204, 1275), 'numpy.empty', 'np.empty', (['(self.buffer_length, self.height, self.width)'], {'dtype': 'np.uint8'}), '((self.buffer_length, self.height, self.width), dtype=np.uint8)\n', (1212, 1275), True, 'import numpy as np\n'), ((5843, 5900), 'numpy.maximum', 'np.maximum', (['max_image', 'self.screen_buffer[index - i, ...]'], {}), '(max_image, self.screen_buffer[index - i, ...])\n', (5853, 5900), True, 'import numpy as np\n'), ((6248, 6318), 'image_preprocessing.resize', 'image_preprocessing.resize', (['image', '(self.resized_width, resize_height)'], {}), '(image, (self.resized_width, resize_height))\n', (6274, 6318), False, 'import image_preprocessing\n'), ((6640, 6716), 'image_preprocessing.resize', 'image_preprocessing.resize', (['image', '(self.resized_width, self.resized_height)'], {}), '(image, (self.resized_width, self.resized_height))\n', (6666, 6716), False, 'import image_preprocessing\n')] |
import numpy as np
import torch
import sched
import argparse
from tqdm import tqdm
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from IPython.display import clear_output
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
EPS = 1e-5
ZETA = 0.3
def scores_to_priority(scores):
rank = np.argsort(-scores)
priority = np.zeros_like(rank)
for i in range(len(priority)):
priority[rank[i]] = i
return priority
def liu_score(tasks, num_procs=1):
try:
tasks = tasks.numpy()
except:
tasks = np.asarray(tasks)
n = tasks.shape[0]
left = (tasks[:, 1] / tasks[:, 0]).sum()
right = num_procs * (n * (2 ** (1 / n) - 1))
return left, right
def liu_test(tasks, num_procs=1):
left, right = liu_score(tasks, num_procs)
return int(left < right)
# 얘를 구현하고, 얘를 바까서 딱히 별 일이 없도록 하자.
def seperate_taskset(taskset, use_deadline=True):
# length : 4
# taskset[:, 0] : period, T
# taskset[:, 1] : execution time, C
# taskset[:, 2] : deadline. deadline, D is in between [C, T], uniform random integer.
# 혹시 몰라, S는 slack time임. T - C
if isinstance(taskset, torch.Tensor):
taskset = taskset.numpy()
T, C, D = (taskset[:, 0],
taskset[:, 1],
taskset[:, 2])
if use_deadline:
return T, C, D
else:
#implicit case, T is equal to deadline
return T, C, T
def new_OPA(tasks, num_procs, test, use_deadline=True):
"""Audsley's Optimality Priority Assignment Algorithm"""
l = len(tasks)
priority = np.ones(shape=(l, ), dtype=np.int64) * l
unassigned = set(range(l))
for k in range(l):
allocated = False
for task in unassigned:
prev_priority = priority[task]
priority[task] = k
res = test(tasks, num_procs, priority, use_deadline)
if res is True: # Schedulable
unassigned.remove(task)
allocated = True
break
else:
priority[task] = prev_priority
if allocated is False:
return False, np.array(priority)
return True, np.array(priority)
def OPA(tasks, num_procs, test, use_deadline=True):
"""Audsley's Optimality Priority Assignment Algorithm"""
l = len(tasks)
priority = np.ones(shape=(l, ), dtype=np.int64) * l
unassigned = set(range(l))
for k in range(l):
allocated = False
for task in unassigned:
priority[task] = k
res = test(tasks, num_procs, priority, use_deadline)
if res is True: # Schedulable
unassigned.remove(task)
allocated = True
break
else:
priority[task] = l
if allocated is False:
return False, np.array(priority)
return True, np.array(priority)
## Test_*_scores 함수에서는
## k가 높을 수록 priority가 높은 것 (논문 수식이 다 이렇게 전개되어 있어서 'ㅅ'...)
## k, k-1, ... 해서 0까지 integer 값임.
def test_C_RTA(tasks, num_procs, priority, use_deadline=True):
#여기 들어오는 Priority는 Strict해야 한다.
#0, 1, ..., l - 1 (l - 1)이 가장 높은 priority
num_tasks, inventory = tasks.shape
T, C, D = seperate_taskset(tasks, use_deadline)
l = len(tasks)
m = num_procs
R_UB = np.copy(C)
def W_R(i, L):
N_R = (L + C[i] - C[i]) // T[i]
return N_R * C[i] + min(C[i], L + C[i] - C[i] - N_R * T[i])
def I_R(i, k, R_UB_k):
return min(W_R(i, R_UB_k), R_UB_k - C[k] + 1)
def W_NC(i, L):
N_NC = (L // T[i])
return N_NC * C[i] + min(C[i], L - N_NC * T[i])
def I_NC(i, L, C_k):
return min(W_NC(i, L), L - C_k + 1)
def I_DIFF_R(i, k, R_UB_k):
return I_R(i, k, R_UB_k) - I_NC(i, R_UB_k, C[k])
def update_R(k, prev_R_k):
left = 0
for i in range(num_tasks):
if priority[i] > priority[k]:
left += I_R(i, k, prev_R_k)
return C[k] + (left) // m
def R(k):
prev = R_UB[k] = C[k]
for _ in range(100):
R_UB[k] = update_R(k, R_UB[k])
if np.abs(R_UB[k] - prev) <= 1e-5:
break
prev = R_UB[k]
for p in reversed(range(l)):
for k in range(l):
if priority[k] == p:
R(k)
if D[k] < R_UB[k]:
return False
return True
def test_RTA(tasks, num_procs, priority, use_deadline=True, ret_score=False):
#여기 들어오는 Priority는 Strict해야 한다.
#0, 1, ..., l - 1 (l - 1)이 가장 높은 priority
# 그리고 뭔가 Overflow가 남
num_tasks, inventory = tasks.shape
T, C, D = seperate_taskset(tasks, use_deadline)
l = len(tasks)
m = num_procs
R_UB = np.copy(C)
def W_R(i, L):
N_R = (L + R_UB[i] - C[i]) // T[i]
return N_R * C[i] + min(C[i], L + R_UB[i] - C[i] - N_R * T[i])
def I_R(i, k, R_UB_k):
ret = min(W_R(i, R_UB_k), R_UB_k - C[k] + 1)
return ret
def update_R(k, prev_R_k):
left = 0
for i in range(num_tasks):
if priority[i] > priority[k]:
left += I_R(i, k, prev_R_k)
return C[k] + (left) // m
def R(k):
prev = R_UB[k]
for _ in range(50):
R_UB[k] = update_R(k, R_UB[k])
if (np.abs(R_UB[k] - prev) <= 1e-5) or (D[k] < R_UB[k]):
break
prev = R_UB[k]
if ret_score is True:
ret = 0
rett = True
calcd = np.zeros(l, dtype=np.int32)
for p in reversed(range(l)):
for k in range(l):
if priority[k] == p:
R(k)
if D[k] < R_UB[k]:
ret -= 1
else:
ret += 1
if ret == l:
return 2
else:
return (ret / l)
else:
ret = 0
rett = True
calcd = np.zeros(l, dtype=np.int32)
for p in reversed(range(l)):
for k in range(l):
if priority[k] == p:
R(k)
if D[k] < R_UB[k]:
return False
return True
def test_RTA_LC(tasks, num_procs, priority, use_deadline=False, ret_score=False):
#여기 들어오는 Priority는 Strict해야 한다.
#0, 1, ..., l - 1 (l - 1)이 가장 높은 priority
num_tasks, inventory = tasks.shape
T, C, D = seperate_taskset(tasks, use_deadline)
l = len(tasks)
m = num_procs
R_UB = np.copy(C)
def W_R(i, L):
N_R_ = (L + R_UB[i] - C[i]) // T[i]
return N_R_ * C[i] + min(C[i], L + R_UB[i] - C[i] - N_R_ * T[i])
def I_R(i, k, R_UB_k):
return min(W_R(i, R_UB_k), R_UB_k - C[k] + 1)
def W_NC(i, L):
N_NC_ = (L // T[i])
return N_NC_ * C[i] + min(C[i], L - N_NC_ * T[i])
def I_NC(i, k, L):
ret = min(W_NC(i, L), L - C[k] + 1)
return ret
def I_DIFF_R(i, k, R_UB_k):
return I_R(i, k, R_UB_k) - I_NC(i, k, R_UB_k)
def update_R(k, prev_R_k):
left = 0
for i in range(num_tasks):
if priority[i] > priority[k]:
left += I_NC(i, k, prev_R_k)
right = []
for i in range(num_tasks):
if priority[i] > priority[k]:
right.append(I_DIFF_R(i, k, prev_R_k))
right = sorted(right, key=lambda x: -x)
right = right[:m - 1]
right = np.sum(right)
return C[k] + (left + right) // m
def R(k):
prev = R_UB[k]
for _ in range(100):
R_UB[k] = update_R(k, R_UB[k])
if (np.abs(R_UB[k] - prev) <= 1e-5) or (D[k] < R_UB[k]):
break
prev = R_UB[k]
if ret_score is False:
for p in reversed(range(l)):
for k in range(l):
if priority[k] == p:
R(k)
if D[k] < R_UB[k]:
return False
return True
else:
ret = 0
for p in reversed(range(l)):
for k in range(l):
if priority[k] == p:
R(k)
if D[k] < R_UB[k]:
ret += -1
else:
ret += 1
if ret == l:
return 2
else:
return (ret / l)
return True
def test_DA(tasks, num_procs, priority, use_deadline, ret_score=False):
#여기 들어오는 Priority는 Strict해야 한다.
#0, 1, ..., l - 1 (l - 1)이 가장 높은 priority
num_tasks, inventory = tasks.shape
T, C, D = seperate_taskset(tasks, use_deadline)
l = len(tasks)
m = num_procs
def N(i, L):
return np.floor((L + D[i] - C[i]) / T[i])
def W(i, L):
return N(i, L) * C[i] + min(C[i], L + D[i] - C[i] - N(i, L) * T[i])
def I(i, k):
return min(W(i, D[k]), D[k] - C[k] + 1)
if ret_score is False:
for k in range(l):
s = 0.0
for i in range(l):
if priority[i] > priority[k]:
s += I(i, k)
r = C[k] + np.floor(s / m)
if D[k] < r:
return False
return True
else:
ret = 0
rett = True
rs = []
for k in range(l):
s = 0.0
for i in range(l):
if priority[i] > priority[k]:
s += I(i, k)
r = C[k] + np.floor(s / m)
rs.append(r - D[k])
if D[k] < r:
ret -= 1
rett = False
else:
ret += 1
if ret == l:
return 2
else:
return (ret / l)
#return np.min(rs) / l
#return (np.min(rs) - np.max(rs)) / l
def test_DA_LC(tasks, num_procs, priority, use_deadline, ret_score=False):
#여기 들어오는 Priority는 Strict해야 한다.
#0, 1, ..., l - 1 (l - 1)이 가장 높은 priority
num_tasks, inventory = tasks.shape
T, C, D = seperate_taskset(tasks, use_deadline)
l = len(tasks)
m = num_procs
def N_D(i, L):
return (L + D[i] - C[i]) // T[i]
def W_D(i, L):
ND = N_D(i, L)
return ND * C[i] + min(C[i], L + D[i] - C[i] - ND * T[i])
def N_NC(i, L):
ret = (L // T[i])
return ret
def W_NC(i, L):
return N_NC(i, L) * C[i] + min(C[i], L - N_NC(i, L) * T[i])
def I_D(i, k):
return min(W_D(i, D[k]), D[k] - C[k] + 1)
def I_NC(i, k, L):
ret = min(W_NC(i, L), L - C[k] + 1)
return ret
def I_DIFF_D(i, k, D_k):
return I_D(i, k) - I_NC(i, k, D_k)
def V(k):
left = 0
for i in range(num_tasks):
if priority[i] > priority[k]:
left += I_NC(i, k, D[k])
right = []
for i in range(num_tasks):
if priority[i] > priority[k]:
right.append(I_DIFF_D(i, k, D[k]))
right = sorted(right, key=lambda x: -x)
right = right[:m - 1]
right = np.sum(right)
return C[k] + (left + right) // m
if ret_score is False:
for p in reversed(range(l)):
for k in range(l):
if priority[k] == p:
if D[k] < V(k):
return False
return True
else:
ret = 0
for p in reversed(range(l)):
for k in range(l):
if priority[k] == p:
if D[k] < V(k):
ret += -1
else:
ret += 1
if ret == l:
return 2
else:
return (ret / l)
return True
def test_NP_FP(tasks, num_procs, priority, use_deadline, ret_score=False):
num_tasks, inventory = tasks.shape
m = num_procs
n = num_tasks
T, C, D = seperate_taskset(tasks, use_deadline)
S = D - C
U = C / T
U_tau = np.sum(U)
I_1 = np.zeros(shape=(n, n), dtpye=np.int32)
I_2 = np.zeros(shape=(n, n), dtpye=np.int32)
I_df = np.zeros(shape=(n, n), dtpye=np.int32)
def calc_I1(i, k):
if i == k:
return I1_1(k)
if (priority[i] < priority[k]):
if (A[k] == 0):
return 0
elif (alpha_2 >= A[k]) and (A[k] > 0):
return I2_1[i]
else:
return I3_1[i]
elif i == k:
return I1_1(i)
return I3_1[i];
def calc_i2(i, k):
if i == k:
return I1_2[i]
if (priority[i] < priority[k]) and (S[k] >= C[i]):
return I3_2[i]
return I4_2[i]
for i in range(n):
for k in range(n):
I_1[i, k] = 0
#right = sorted(right, key=lambda x: -x)
#right = right[:m - 1]
def get_DM_DS_scores(tasks, num_procs, zeta=ZETA, use_deadline=True):
T, C, D = seperate_taskset(tasks, use_deadline=use_deadline)
delta = np.array(C / D)
arr = np.argsort(-delta)[:num_procs - 1]
arr = np.array([delta[x] >= ZETA for x in arr.tolist()])
order = np.copy(D)
order[delta >= zeta] = -1
return (order - C * EPS)
def get_SM_DS_scores(tasks, num_proces, zeta=ZETA, use_deadline=True):
T, C, D = seperate_taskset(tasks, use_deadline=use_deadline)
S = T - C
delta = np.array(C / D)
arr = np.argsort(-delta)[:num_procs - 1]
arr = np.array([delta[x] >= ZETA for x in arr.tolist()])
order = np.copy(S)
order[delta >= zeta] = -1
return (order - C * EPS)
def get_DkC_scores(tasks, num_procs=1, k=-1, use_deadline=True):
#DkC라고 불리고, DCMPO라고도 불림;
T, C, D = seperate_taskset(tasks, use_deadline=use_deadline)
m = num_procs
if k < 0:
k = m - 1 + np.sqrt(5 * (m ** 2) - 6 * m + 1 + EPS)
k /= (2 * m)
order = D - C * k
ret = np.zeros_like(order)
for rank, idx in enumerate(np.argsort(order)):
ret[idx] = rank
return order
return ret
def get_TkC_scores(tasks, num_procs, k=-1, use_deadline=True):
#TkC라고 불리고, TCMPO라고도 불림.
T, C, D = seperate_taskset(tasks)
m = num_procs
if k < 0:
k = m - 1 + np.sqrt(5 * (m ** 2) - 6 * m + 1 + EPS)
k /= (2 * m)
order = T - C * k
return order
def get_DCMPO_scores(tasks, num_procs, k=EPS, use_deadline=True):
return get_DkC_scores(tasks, num_procs, k=k, use_deadline=use_deadline)
def get_DM_scores(tasks, num_procs, use_deadline=True):
return get_DkC_scores(tasks, num_procs=num_procs, k=EPS, use_deadline=use_deadline)
def get_SM_scores(tasks, num_procs):
return get_SM_US_scores(tasks, num_procs, zeta=10000.0)
num_procs = 16
sample_size = 50
pointset_size = 80
def check(tasks, order, num_procs):
if isinstance(tasks, torch.Tensor):
tasks = tasks.numpy()
ret = sched.ScdChecker(tasks, order, num_procs=num_procs).run() > 0
return ret | [
"numpy.zeros_like",
"numpy.sum",
"numpy.abs",
"numpy.copy",
"numpy.floor",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"numpy.argsort",
"numpy.array",
"sched.ScdChecker",
"numpy.sqrt"
] | [((391, 410), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (401, 410), True, 'import numpy as np\n'), ((426, 445), 'numpy.zeros_like', 'np.zeros_like', (['rank'], {}), '(rank)\n', (439, 445), True, 'import numpy as np\n'), ((3369, 3379), 'numpy.copy', 'np.copy', (['C'], {}), '(C)\n', (3376, 3379), True, 'import numpy as np\n'), ((4788, 4798), 'numpy.copy', 'np.copy', (['C'], {}), '(C)\n', (4795, 4798), True, 'import numpy as np\n'), ((6532, 6542), 'numpy.copy', 'np.copy', (['C'], {}), '(C)\n', (6539, 6542), True, 'import numpy as np\n'), ((11992, 12001), 'numpy.sum', 'np.sum', (['U'], {}), '(U)\n', (11998, 12001), True, 'import numpy as np\n'), ((12013, 12051), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, n)', 'dtpye': 'np.int32'}), '(shape=(n, n), dtpye=np.int32)\n', (12021, 12051), True, 'import numpy as np\n'), ((12062, 12100), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, n)', 'dtpye': 'np.int32'}), '(shape=(n, n), dtpye=np.int32)\n', (12070, 12100), True, 'import numpy as np\n'), ((12112, 12150), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, n)', 'dtpye': 'np.int32'}), '(shape=(n, n), dtpye=np.int32)\n', (12120, 12150), True, 'import numpy as np\n'), ((12997, 13012), 'numpy.array', 'np.array', (['(C / D)'], {}), '(C / D)\n', (13005, 13012), True, 'import numpy as np\n'), ((13131, 13141), 'numpy.copy', 'np.copy', (['D'], {}), '(D)\n', (13138, 13141), True, 'import numpy as np\n'), ((13364, 13379), 'numpy.array', 'np.array', (['(C / D)'], {}), '(C / D)\n', (13372, 13379), True, 'import numpy as np\n'), ((13498, 13508), 'numpy.copy', 'np.copy', (['S'], {}), '(S)\n', (13505, 13508), True, 'import numpy as np\n'), ((13875, 13895), 'numpy.zeros_like', 'np.zeros_like', (['order'], {}), '(order)\n', (13888, 13895), True, 'import numpy as np\n'), ((1654, 1689), 'numpy.ones', 'np.ones', ([], {'shape': '(l,)', 'dtype': 'np.int64'}), '(shape=(l,), dtype=np.int64)\n', (1661, 1689), True, 'import numpy as np\n'), ((2244, 2262), 'numpy.array', 'np.array', (['priority'], {}), '(priority)\n', (2252, 2262), True, 'import numpy as np\n'), ((2413, 2448), 'numpy.ones', 'np.ones', ([], {'shape': '(l,)', 'dtype': 'np.int64'}), '(shape=(l,), dtype=np.int64)\n', (2420, 2448), True, 'import numpy as np\n'), ((2949, 2967), 'numpy.array', 'np.array', (['priority'], {}), '(priority)\n', (2957, 2967), True, 'import numpy as np\n'), ((5545, 5572), 'numpy.zeros', 'np.zeros', (['l'], {'dtype': 'np.int32'}), '(l, dtype=np.int32)\n', (5553, 5572), True, 'import numpy as np\n'), ((5981, 6008), 'numpy.zeros', 'np.zeros', (['l'], {'dtype': 'np.int32'}), '(l, dtype=np.int32)\n', (5989, 6008), True, 'import numpy as np\n'), ((7460, 7473), 'numpy.sum', 'np.sum', (['right'], {}), '(right)\n', (7466, 7473), True, 'import numpy as np\n'), ((8709, 8743), 'numpy.floor', 'np.floor', (['((L + D[i] - C[i]) / T[i])'], {}), '((L + D[i] - C[i]) / T[i])\n', (8717, 8743), True, 'import numpy as np\n'), ((11100, 11113), 'numpy.sum', 'np.sum', (['right'], {}), '(right)\n', (11106, 11113), True, 'import numpy as np\n'), ((13023, 13041), 'numpy.argsort', 'np.argsort', (['(-delta)'], {}), '(-delta)\n', (13033, 13041), True, 'import numpy as np\n'), ((13390, 13408), 'numpy.argsort', 'np.argsort', (['(-delta)'], {}), '(-delta)\n', (13400, 13408), True, 'import numpy as np\n'), ((13927, 13944), 'numpy.argsort', 'np.argsort', (['order'], {}), '(order)\n', (13937, 13944), True, 'import numpy as np\n'), ((635, 652), 'numpy.asarray', 'np.asarray', (['tasks'], {}), '(tasks)\n', (645, 652), True, 'import numpy as np\n'), ((13781, 13818), 'numpy.sqrt', 'np.sqrt', (['(5 * m ** 2 - 6 * m + 1 + EPS)'], {}), '(5 * m ** 2 - 6 * m + 1 + EPS)\n', (13788, 13818), True, 'import numpy as np\n'), ((14187, 14224), 'numpy.sqrt', 'np.sqrt', (['(5 * m ** 2 - 6 * m + 1 + EPS)'], {}), '(5 * m ** 2 - 6 * m + 1 + EPS)\n', (14194, 14224), True, 'import numpy as np\n'), ((2207, 2225), 'numpy.array', 'np.array', (['priority'], {}), '(priority)\n', (2215, 2225), True, 'import numpy as np\n'), ((2912, 2930), 'numpy.array', 'np.array', (['priority'], {}), '(priority)\n', (2920, 2930), True, 'import numpy as np\n'), ((4190, 4212), 'numpy.abs', 'np.abs', (['(R_UB[k] - prev)'], {}), '(R_UB[k] - prev)\n', (4196, 4212), True, 'import numpy as np\n'), ((9111, 9126), 'numpy.floor', 'np.floor', (['(s / m)'], {}), '(s / m)\n', (9119, 9126), True, 'import numpy as np\n'), ((9483, 9498), 'numpy.floor', 'np.floor', (['(s / m)'], {}), '(s / m)\n', (9491, 9498), True, 'import numpy as np\n'), ((14844, 14895), 'sched.ScdChecker', 'sched.ScdChecker', (['tasks', 'order'], {'num_procs': 'num_procs'}), '(tasks, order, num_procs=num_procs)\n', (14860, 14895), False, 'import sched\n'), ((5364, 5386), 'numpy.abs', 'np.abs', (['(R_UB[k] - prev)'], {}), '(R_UB[k] - prev)\n', (5370, 5386), True, 'import numpy as np\n'), ((7642, 7664), 'numpy.abs', 'np.abs', (['(R_UB[k] - prev)'], {}), '(R_UB[k] - prev)\n', (7648, 7664), True, 'import numpy as np\n')] |
from __future__ import print_function
import sys
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.models as models
import random
import os
import argparse
import numpy as np
import dataloader_clothing1M as dataloader
from sklearn.mixture import GaussianMixture
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter('cloth_logs/clothing1m-odin-june6')
parser = argparse.ArgumentParser(description='PyTorch Clothing1M Training')
parser.add_argument('--batch_size', default=32, type=int, help='train batchsize')
parser.add_argument('--lr', '--learning_rate', default=0.002, type=float, help='initial learning rate')
parser.add_argument('--alpha', default=0.5, type=float, help='parameter for Beta')
parser.add_argument('--lambda_u', default=0, type=float, help='weight for unsupervised loss')
parser.add_argument('--p_threshold', default=0.5, type=float, help='clean probability threshold')
parser.add_argument('--T', default=0.5, type=float, help='sharpening temperature')
parser.add_argument('--num_epochs', default=100, type=int)
parser.add_argument('--id', default='clothing1m')
parser.add_argument('--data_path', default='../../Clothing1M/data', type=str, help='path to dataset')
parser.add_argument('--seed', default=123)
parser.add_argument('--gpuid', default=0, type=int)
parser.add_argument('--num_class', default=14, type=int)
parser.add_argument('--num_batches', default=1000, type=int)
args = parser.parse_args()
arg_dict = vars(args)
for key in arg_dict.keys():
writer.add_text(key, str(arg_dict[key]))
torch.cuda.set_device(args.gpuid)
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# define temperature scaling:
def temperature_scaling(f_xs, T):
S = np.exp(f_xs/T)
S = S/np.sum(S)
return S
# Training
# def train(epoch,net,net2,optimizer,labeled_trainloader,unlabeled_trainloader):
def train(epoch, net, net2, optimizer, labeled_trainloader, unlabeled_trainloader, netname):
net.train()
net2.eval() # fix one network and train the other
unlabeled_train_iter = iter(unlabeled_trainloader)
num_iter = (len(labeled_trainloader.dataset) // args.batch_size) + 1
for batch_idx, (inputs_x, inputs_x2, labels_x, w_x) in enumerate(labeled_trainloader):
try:
inputs_u, inputs_u2 = unlabeled_train_iter.next()
except:
unlabeled_train_iter = iter(unlabeled_trainloader)
inputs_u, inputs_u2 = unlabeled_train_iter.next()
batch_size = inputs_x.size(0)
# Transform label to one-hot
labels_x = torch.zeros(batch_size, args.num_class).scatter_(1, labels_x.view(-1, 1), 1)
w_x = w_x.view(-1, 1).type(torch.FloatTensor)
inputs_x, inputs_x2, labels_x, w_x = inputs_x.cuda(), inputs_x2.cuda(), labels_x.cuda(), w_x.cuda()
inputs_u, inputs_u2 = inputs_u.cuda(), inputs_u2.cuda()
with torch.no_grad():
# label co-guessing of unlabeled samples
outputs_u11 = net(inputs_u)
outputs_u12 = net(inputs_u2)
outputs_u21 = net2(inputs_u)
outputs_u22 = net2(inputs_u2)
pu = (torch.softmax(outputs_u11, dim=1) + torch.softmax(outputs_u12, dim=1) + torch.softmax(outputs_u21,
dim=1) + torch.softmax(
outputs_u22, dim=1)) / 4
ptu = pu ** (1 / args.T) # temparature sharpening
targets_u = ptu / ptu.sum(dim=1, keepdim=True) # normalize
targets_u = targets_u.detach()
# label refinement of labeled samples
outputs_x = net(inputs_x)
outputs_x2 = net(inputs_x2)
px = (torch.softmax(outputs_x, dim=1) + torch.softmax(outputs_x2, dim=1)) / 2
px = w_x * labels_x + (1 - w_x) * px
ptx = px ** (1 / args.T) # temparature sharpening
targets_x = ptx / ptx.sum(dim=1, keepdim=True) # normalize
targets_x = targets_x.detach()
# mixmatch
l = np.random.beta(args.alpha, args.alpha)
l = max(l, 1 - l)
all_inputs = torch.cat([inputs_x, inputs_x2, inputs_u, inputs_u2], dim=0)
all_targets = torch.cat([targets_x, targets_x, targets_u, targets_u], dim=0)
idx = torch.randperm(all_inputs.size(0))
input_a, input_b = all_inputs, all_inputs[idx]
target_a, target_b = all_targets, all_targets[idx]
mixed_input = l * input_a[:batch_size * 2] + (1 - l) * input_b[:batch_size * 2]
mixed_target = l * target_a[:batch_size * 2] + (1 - l) * target_b[:batch_size * 2]
logits = net(mixed_input)
Lx = -torch.mean(torch.sum(F.log_softmax(logits, dim=1) * mixed_target, dim=1))
# regularization
prior = torch.ones(args.num_class) / args.num_class
prior = prior.cuda()
pred_mean = torch.softmax(logits, dim=1).mean(0)
penalty = torch.sum(prior * torch.log(prior / pred_mean))
loss = Lx + penalty
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
sys.stdout.write('\r')
sys.stdout.write('Clothing1M | Epoch [%3d/%3d] Iter[%3d/%3d]\t Labeled loss: %.4f '
% (epoch, args.num_epochs, batch_idx + 1, num_iter, Lx.item()))
sys.stdout.flush()
writer.add_scalar('Train_' + netname + '/labeled_loss', Lx.item(), epoch)
def warmup(net, optimizer, dataloader, netname): # changegg
net.train()
for batch_idx, (inputs, labels, path) in enumerate(dataloader):
inputs, labels = inputs.cuda(), labels.cuda()
optimizer.zero_grad()
outputs = net(inputs)
loss = CEloss(outputs, labels)
penalty = conf_penalty(outputs)
L = loss + penalty
L.backward()
optimizer.step()
sys.stdout.write('\r')
sys.stdout.write('|Warm-up: Iter[%3d/%3d]\t CE-loss: %.4f Conf-Penalty: %.4f'
% (batch_idx + 1, args.num_batches, loss.item(), penalty.item()))
sys.stdout.flush()
writer.add_scalar('Warmup_' + netname + '/Loss', loss.item(), epoch)
def val(net, val_loader, k):
net.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(val_loader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = net(inputs)
_, predicted = torch.max(outputs, 1)
total += targets.size(0)
correct += predicted.eq(targets).cpu().sum().item()
acc = 100. * correct / total
print("\n| Validation\t Net%d Acc: %.2f%%" % (k, acc))
writer.add_scalar('Validation/Accuracy', acc, epoch) # add
if acc > best_acc[k - 1]:
best_acc[k - 1] = acc
print('| Saving Best Net%d ...' % k)
save_point = './checkpoint/%s_net%d.pth.tar' % (args.id, k)
torch.save(net.state_dict(), save_point)
return acc
def test(net1, net2, test_loader):
net1.eval()
net2.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs1 = net1(inputs)
outputs2 = net2(inputs)
outputs = outputs1 + outputs2
_, predicted = torch.max(outputs, 1)
total += targets.size(0)
correct += predicted.eq(targets).cpu().sum().item()
acc = 100. * correct / total
print("\n| Test Acc: %.2f%%\n" % (acc))
writer.add_scalar('Test/Accuracy', acc, epoch)
return acc
def eval_train(epoch, model):
###### define temperature and epsilon values for odin:
temper = 1000
epsilon = 0.0014
######
model.eval()
num_samples = args.num_batches * args.batch_size
losses = torch.zeros(num_samples)
paths = []
n = 0
with torch.no_grad():
for batch_idx, (inputs, targets, path) in enumerate(eval_loader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
# Add ODIN part
new_inputs = []
inputs_np = inputs.data.cpu().numpy()
for i, output, in enumerate(outputs.data.cpu().numpy()):
max_S = np.max(temperature_scaling(output, temper))
grad = np.negative(np.gradient(inputs_np[i], np.log(max_S), axis=0))
new_inputs.append(inputs_np[i]-epsilon*np.sign(grad))
outputs = model(torch.from_numpy(np.array(new_inputs)).float().cuda())
#########
loss = CE(outputs, targets)
for b in range(inputs.size(0)):
losses[n] = loss[b]
paths.append(path[b])
n += 1
sys.stdout.write('\r')
sys.stdout.write('| Evaluating loss Iter %3d\t' % (batch_idx))
sys.stdout.flush()
losses = (losses - losses.min()) / (losses.max() - losses.min())
losses = losses.reshape(-1, 1)
gmm = GaussianMixture(n_components=2, max_iter=10, reg_covar=5e-4, tol=1e-2)
gmm.fit(losses)
prob = gmm.predict_proba(losses)
prob = prob[:, gmm.means_.argmin()]
return prob, paths
class NegEntropy(object):
def __call__(self, outputs):
probs = torch.softmax(outputs, dim=1)
return torch.mean(torch.sum(probs.log() * probs, dim=1))
def create_model():
model = models.resnet50(pretrained=True)
model.fc = nn.Linear(2048, args.num_class)
model = model.cuda()
return model
log = open('./checkpoint/%s.txt' % args.id, 'w')
log.flush()
loader = dataloader.clothing_dataloader(root=args.data_path, batch_size=args.batch_size, num_workers=5,
num_batches=args.num_batches)
print('| Building net')
net1 = create_model()
net2 = create_model()
# Visualization
images, labels, idx = next(iter(loader.run('eval_train')))
images = images.cuda()
grid = torchvision.utils.make_grid(images)
writer.add_graph(net1, images)
writer.add_graph(net2, images)
cudnn.benchmark = True
optimizer1 = optim.SGD(net1.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-3)
optimizer2 = optim.SGD(net2.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-3)
CE = nn.CrossEntropyLoss(reduction='none')
CEloss = nn.CrossEntropyLoss()
conf_penalty = NegEntropy()
best_acc = [0, 0]
for epoch in range(args.num_epochs + 1):
lr = args.lr
if epoch >= 40:
lr /= 10
for param_group in optimizer1.param_groups:
param_group['lr'] = lr
for param_group in optimizer2.param_groups:
param_group['lr'] = lr
if epoch < 1: # warm up
train_loader = loader.run('warmup')
print('Warmup Net1')
warmup(net1, optimizer1, train_loader, 'net1')
train_loader = loader.run('warmup')
print('\nWarmup Net2')
warmup(net2, optimizer2, train_loader, 'net2')
else:
pred1 = (prob1 > args.p_threshold) # divide dataset
pred2 = (prob2 > args.p_threshold)
print('\n\nTrain Net1')
labeled_trainloader, unlabeled_trainloader = loader.run('train', pred2, prob2, paths=paths2) # co-divide
train(epoch, net1, net2, optimizer1, labeled_trainloader, unlabeled_trainloader, "net1") # train net1
print('\nTrain Net2')
labeled_trainloader, unlabeled_trainloader = loader.run('train', pred1, prob1, paths=paths1) # co-divide
train(epoch, net2, net1, optimizer2, labeled_trainloader, unlabeled_trainloader, "net2") # train net2
val_loader = loader.run('val') # validation
acc1 = val(net1, val_loader, 1)
acc2 = val(net2, val_loader, 2)
log.write('Validation Epoch:%d Acc1:%.2f Acc2:%.2f\n' % (epoch, acc1, acc2))
log.flush()
print('\n==== net 1 evaluate next epoch training data loss ====')
eval_loader = loader.run('eval_train') # evaluate training data loss for next epoch
prob1, paths1 = eval_train(epoch, net1)
print('\n==== net 2 evaluate next epoch training data loss ====')
eval_loader = loader.run('eval_train')
prob2, paths2 = eval_train(epoch, net2)
test_loader = loader.run('test')
net1.load_state_dict(torch.load('./checkpoint/%s_net1.pth.tar' % args.id))
net2.load_state_dict(torch.load('./checkpoint/%s_net2.pth.tar' % args.id))
acc = test(net1, net2, test_loader)
log.write('Test Accuracy:%.2f\n' % (acc))
log.flush()
writer.close()
| [
"sys.stdout.write",
"numpy.sum",
"argparse.ArgumentParser",
"sklearn.mixture.GaussianMixture",
"torch.cat",
"sys.stdout.flush",
"numpy.exp",
"torch.no_grad",
"torch.ones",
"torch.load",
"torch.softmax",
"random.seed",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.Linear",
"torch.zer... | [((442, 491), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['"""cloth_logs/clothing1m-odin-june6"""'], {}), "('cloth_logs/clothing1m-odin-june6')\n", (455, 491), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((502, 568), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Clothing1M Training"""'}), "(description='PyTorch Clothing1M Training')\n", (525, 568), False, 'import argparse\n'), ((1661, 1694), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpuid'], {}), '(args.gpuid)\n', (1682, 1694), False, 'import torch\n'), ((1695, 1717), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (1706, 1717), False, 'import random\n'), ((1718, 1746), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1735, 1746), False, 'import torch\n'), ((1747, 1784), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (1773, 1784), False, 'import torch\n'), ((9813, 9942), 'dataloader_clothing1M.clothing_dataloader', 'dataloader.clothing_dataloader', ([], {'root': 'args.data_path', 'batch_size': 'args.batch_size', 'num_workers': '(5)', 'num_batches': 'args.num_batches'}), '(root=args.data_path, batch_size=args.\n batch_size, num_workers=5, num_batches=args.num_batches)\n', (9843, 9942), True, 'import dataloader_clothing1M as dataloader\n'), ((10152, 10187), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['images'], {}), '(images)\n', (10179, 10187), False, 'import torchvision\n'), ((10455, 10492), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (10474, 10492), True, 'import torch.nn as nn\n'), ((10502, 10523), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (10521, 10523), True, 'import torch.nn as nn\n'), ((1858, 1874), 'numpy.exp', 'np.exp', (['(f_xs / T)'], {}), '(f_xs / T)\n', (1864, 1874), True, 'import numpy as np\n'), ((8031, 8055), 'torch.zeros', 'torch.zeros', (['num_samples'], {}), '(num_samples)\n', (8042, 8055), False, 'import torch\n'), ((9221, 9293), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': '(2)', 'max_iter': '(10)', 'reg_covar': '(0.0005)', 'tol': '(0.01)'}), '(n_components=2, max_iter=10, reg_covar=0.0005, tol=0.01)\n', (9236, 9293), False, 'from sklearn.mixture import GaussianMixture\n'), ((9618, 9650), 'torchvision.models.resnet50', 'models.resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (9633, 9650), True, 'import torchvision.models as models\n'), ((9666, 9697), 'torch.nn.Linear', 'nn.Linear', (['(2048)', 'args.num_class'], {}), '(2048, args.num_class)\n', (9675, 9697), True, 'import torch.nn as nn\n'), ((12379, 12431), 'torch.load', 'torch.load', (["('./checkpoint/%s_net1.pth.tar' % args.id)"], {}), "('./checkpoint/%s_net1.pth.tar' % args.id)\n", (12389, 12431), False, 'import torch\n'), ((12454, 12506), 'torch.load', 'torch.load', (["('./checkpoint/%s_net2.pth.tar' % args.id)"], {}), "('./checkpoint/%s_net2.pth.tar' % args.id)\n", (12464, 12506), False, 'import torch\n'), ((1883, 1892), 'numpy.sum', 'np.sum', (['S'], {}), '(S)\n', (1889, 1892), True, 'import numpy as np\n'), ((4198, 4236), 'numpy.random.beta', 'np.random.beta', (['args.alpha', 'args.alpha'], {}), '(args.alpha, args.alpha)\n', (4212, 4236), True, 'import numpy as np\n'), ((4285, 4345), 'torch.cat', 'torch.cat', (['[inputs_x, inputs_x2, inputs_u, inputs_u2]'], {'dim': '(0)'}), '([inputs_x, inputs_x2, inputs_u, inputs_u2], dim=0)\n', (4294, 4345), False, 'import torch\n'), ((4368, 4430), 'torch.cat', 'torch.cat', (['[targets_x, targets_x, targets_u, targets_u]'], {'dim': '(0)'}), '([targets_x, targets_x, targets_u, targets_u], dim=0)\n', (4377, 4430), False, 'import torch\n'), ((5299, 5321), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (5315, 5321), False, 'import sys\n'), ((5512, 5530), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5528, 5530), False, 'import sys\n'), ((6033, 6055), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (6049, 6055), False, 'import sys\n'), ((6242, 6260), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6258, 6260), False, 'import sys\n'), ((6420, 6435), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6433, 6435), False, 'import torch\n'), ((7251, 7266), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7264, 7266), False, 'import torch\n'), ((8090, 8105), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8103, 8105), False, 'import torch\n'), ((9489, 9518), 'torch.softmax', 'torch.softmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (9502, 9518), False, 'import torch\n'), ((3014, 3029), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3027, 3029), False, 'import torch\n'), ((4942, 4968), 'torch.ones', 'torch.ones', (['args.num_class'], {}), '(args.num_class)\n', (4952, 4968), False, 'import torch\n'), ((6625, 6646), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (6634, 6646), False, 'import torch\n'), ((7537, 7558), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (7546, 7558), False, 'import torch\n'), ((8977, 8999), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (8993, 8999), False, 'import sys\n'), ((9012, 9072), 'sys.stdout.write', 'sys.stdout.write', (["('| Evaluating loss Iter %3d\\t' % batch_idx)"], {}), "('| Evaluating loss Iter %3d\\t' % batch_idx)\n", (9028, 9072), False, 'import sys\n'), ((9087, 9105), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9103, 9105), False, 'import sys\n'), ((2696, 2735), 'torch.zeros', 'torch.zeros', (['batch_size', 'args.num_class'], {}), '(batch_size, args.num_class)\n', (2707, 2735), False, 'import torch\n'), ((5035, 5063), 'torch.softmax', 'torch.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (5048, 5063), False, 'import torch\n'), ((5108, 5136), 'torch.log', 'torch.log', (['(prior / pred_mean)'], {}), '(prior / pred_mean)\n', (5117, 5136), False, 'import torch\n'), ((3479, 3512), 'torch.softmax', 'torch.softmax', (['outputs_u22'], {'dim': '(1)'}), '(outputs_u22, dim=1)\n', (3492, 3512), False, 'import torch\n'), ((3862, 3893), 'torch.softmax', 'torch.softmax', (['outputs_x'], {'dim': '(1)'}), '(outputs_x, dim=1)\n', (3875, 3893), False, 'import torch\n'), ((3896, 3928), 'torch.softmax', 'torch.softmax', (['outputs_x2'], {'dim': '(1)'}), '(outputs_x2, dim=1)\n', (3909, 3928), False, 'import torch\n'), ((3339, 3372), 'torch.softmax', 'torch.softmax', (['outputs_u21'], {'dim': '(1)'}), '(outputs_u21, dim=1)\n', (3352, 3372), False, 'import torch\n'), ((4847, 4875), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (4860, 4875), True, 'import torch.nn.functional as F\n'), ((8582, 8595), 'numpy.log', 'np.log', (['max_S'], {}), '(max_S)\n', (8588, 8595), True, 'import numpy as np\n'), ((3267, 3300), 'torch.softmax', 'torch.softmax', (['outputs_u11'], {'dim': '(1)'}), '(outputs_u11, dim=1)\n', (3280, 3300), False, 'import torch\n'), ((3303, 3336), 'torch.softmax', 'torch.softmax', (['outputs_u12'], {'dim': '(1)'}), '(outputs_u12, dim=1)\n', (3316, 3336), False, 'import torch\n'), ((8662, 8675), 'numpy.sign', 'np.sign', (['grad'], {}), '(grad)\n', (8669, 8675), True, 'import numpy as np\n'), ((8722, 8742), 'numpy.array', 'np.array', (['new_inputs'], {}), '(new_inputs)\n', (8730, 8742), True, 'import numpy as np\n')] |
import dataset
import utils
from dataset import DataSet
import compute_merw as rw
import metrics as mtr
import kernel_methods as kern
import numpy as np
import scipy.sparse.linalg as sla
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
import warnings
from scipy.sparse.csgraph import connected_components
def get_small_scores():
return np.array([[0.0, 0.4, 0.5, 0.6, 0.1],
[0.4, 0.0, 0.3, 0.7, 0.2],
[0.5, 0.3, 0.0, 0.5, 0.3],
[0.6, 0.7, 0.5, 0.0, 0.6],
[0.1, 0.2, 0.3, 0.6, 0.0]])
def get_small_adjmx():
return np.array([[0, 0, 1, 0, 1],
[0, 0, 1, 1, 1],
[1, 1, 0, 0, 1],
[0, 1, 0, 0, 1],
[1, 1, 1, 1, 0]])
def get_art_adjmx():
return np.array([[0, 1, 0, 0, 0],
[1, 0, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 0, 1],
[0, 0, 0, 1, 0]])
def test_small_basic():
ds = DataSet('../datasets/', 'test', 'small-basic')
print('DS: {}; iterations: {}'.format(ds.name, ds.set_count))
for i in range(1, ds.set_count + 1):
print("ITER #{}".format(i))
trn, tst = ds.get_dataset(i)
trns, tsts = utils.get_edges_set(trn), utils.get_edges_set(tst)
print('\tTRAIN: {}'.format(trns))
print('\tTEST: {}'.format(tsts))
scores = get_small_scores()
auc_res_tot = mtr.auc(ds.vx_count, trns, tsts, scores)
auc_res_010 = mtr.auc(ds.vx_count, trns, tsts, scores, 10)
auc_res_100 = mtr.auc(ds.vx_count, trns, tsts, scores, 100)
auc_res_01k = mtr.auc(ds.vx_count, trns, tsts, scores, 1000)
# auc_res_10k = mtr.auc(ds.vx_count, trns, tsts, scores, 10000)
# auc_res_1ck = mtr.auc(ds.vx_count, trns, tsts, scores, 100000)
# auc_res_01m = mtr.auc(ds.vx_count, trns, tsts, scores, 1000000)
prc_res_002 = mtr.precision(ds.vx_count, trns, tsts, scores, 2)
print('\tMETRICS:')
print('\t\t-> AUC___TOTAL: {:.04}'.format(auc_res_tot)) # exp: 0.67
print('\t\t-> AUC______10: {:.04}'.format(auc_res_010))
print('\t\t-> AUC_____100: {:.04}'.format(auc_res_100))
print('\t\t-> AUC____1000: {:.04}'.format(auc_res_01k))
# print('\t\t-> AUC___10000: {:.04}'.format(auc_res_10k))
# print('\t\t-> AUC__100000: {:.04}'.format(auc_res_1ck))
# print('\t\t-> AUC_1000000: {:.04}'.format(auc_res_01m))
print('\t\t-> PRECISON__2: {:.04}'.format(prc_res_002)) # exp: 0.50
print()
def test_small_cross():
ds = DataSet('../datasets/', 'test', 'small-cross')
print('DS: {}; iterations: {}'.format(ds.name, ds.set_count))
for i in range(1, ds.set_count + 1):
print("ITER #{}".format(i))
trn, tst = ds.get_dataset(i)
print('\tTRAIN: {}'.format(trn))
print('\tTEST: {}'.format(tst))
trns, tsts = utils.get_edges_set(trn), utils.get_edges_set(tst)
scores = get_small_scores()
auc_res_tot = mtr.auc(ds.vx_count, trns, tsts, scores)
auc_res_010 = mtr.auc(ds.vx_count, trns, tsts, scores, 10)
auc_res_100 = mtr.auc(ds.vx_count, trns, tsts, scores, 100)
auc_res_01k = mtr.auc(ds.vx_count, trns, tsts, scores, 1000)
# auc_res_10k = mtr.auc(ds.vx_count, trns, tsts, scores, 10000)
# auc_res_1ck = mtr.auc(ds.vx_count, trns, tsts, scores, 100000)
# auc_res_01m = mtr.auc(ds.vx_count, trns, tsts, scores, 1000000)
prc_res_002 = mtr.precision(ds.vx_count, trns, tsts, scores, 2)
print('\tMETRICS:')
print('\t\t-> AUC___TOT: {:.04}'.format(auc_res_tot)) # expected: 0.67
print('\t\t-> AUC____10: {:.04}'.format(auc_res_010))
print('\t\t-> AUC___100: {:.04}'.format(auc_res_100))
print('\t\t-> AUC____1K: {:.04}'.format(auc_res_01k))
# print('\t\t-> AUC___10K: {:.04}'.format(auc_res_10k))
# print('\t\t-> AUC__100K: {:.04}'.format(auc_res_1ck))
# print('\t\t-> AUC____1M: {:.04}'.format(auc_res_01m))
print('\t\t-> PREC____2: {:.04}'.format(prc_res_002)) # expected: 0.50
print()
def print_sparse_as_dense(S):
for i in range(S.get_shape()[0]):
print('[', end=' ')
for j in range(S.get_shape()[1]):
print('{:7.4f}'.format(S[i, j]), end=' ')
print(' ]')
def walks_survey(A):
print('A (adjacency matrix):')
print(A)
print()
print('-------------------------------------')
print('GRW:')
P_grw, pi_grw = rw.compute_grw(csc_matrix(A))
print()
print('P (GRW transition matrix):')
print_sparse_as_dense(P_grw)
print()
print('pi (GRW stationary distribution):')
print(pi_grw)
L_grw = kern.general_laplacian(P_grw, pi_grw)
print()
print('L (GRW general laplacian):')
print_sparse_as_dense(L_grw)
LL = kern.laplacian(csr_matrix(A, (A.shape[0], A.shape[1]), 'd'))
print()
print('LL (GRW laplacian):')
print_sparse_as_dense(LL)
LL_sym = kern.symmetric_normalized_laplacian(
csr_matrix(A, (A.shape[0], A.shape[1]), 'd'))
print()
print('L (GRW symmetric normalized laplacian):')
print_sparse_as_dense(LL_sym)
print()
print('-------------------------------------')
print('MERW:')
P_merw, v_merw, lambda_merw, pi_merw = \
rw.compute_merw(csr_matrix(A, (A.shape[0], A.shape[1]), 'd'))
v_merw *= -1
l, v = sla.eigsh(csr_matrix(A, (A.shape[0], A.shape[1]), 'd'), 1,
which='LA')
lambda_max = l[0]
v_max = v[:, 0]
print()
print('P (MERW transition matrix):')
print_sparse_as_dense(P_merw)
print()
print('pi (MERW stationary distribution):')
print(pi_merw)
print()
print('lambda (max eigenvalue):')
print(lambda_merw)
print(lambda_max)
print()
print('v (max eigenvector):')
print(v_merw)
print(v_max)
W = kern.compute_eigen_weighted_graph(
csr_matrix(A, (A.shape[0], A.shape[1]), 'd'), lambda_merw, v_merw)
print()
print('W (eigen-weighted graph):')
print_sparse_as_dense(W)
L_merw = kern.general_laplacian(P_merw, pi_merw)
print()
print('L (MERW general laplacian):')
print_sparse_as_dense(L_merw)
L = kern.mecl(
csr_matrix(A, (A.shape[0], A.shape[1]), 'd'), lambda_merw, v_merw)
print()
print('L (maximal entropy combinatorial laplacian):')
print_sparse_as_dense(L)
L_sym = kern.mecl(
csr_matrix(A, (A.shape[0], A.shape[1]), 'd'), lambda_merw, v_merw,
type='sym')
print()
print('L_sym (symmetric normalized maximal entropy laplacian):')
print_sparse_as_dense(L_sym)
L_asym = kern.mecl(
csr_matrix(A, (A.shape[0], A.shape[1]), 'd'), lambda_merw, v_merw,
type='asym')
print()
print('L_rw (asymmetric normalized maximal entropy laplacian):')
print_sparse_as_dense(L_asym)
CK = kern.commute_time_kernel(LL, 3)
print()
print('CK:')
print_sparse_as_dense(CK)
NCK = kern.commute_time_kernel(LL_sym, 3)
print()
print('NCK:')
print_sparse_as_dense(NCK)
MECK = kern.commute_time_kernel(L, 3)
print()
print('MECK:')
print_sparse_as_dense(MECK)
NMECK = kern.commute_time_kernel(L_sym, 3)
print()
print('NMECK:')
print_sparse_as_dense(NMECK)
DK = kern.heat_diffusion_kernel(LL)
print()
print('DK:')
print_sparse_as_dense(DK)
NDK = kern.heat_diffusion_kernel(LL_sym, 3)
print()
print('NDK:')
print_sparse_as_dense(NDK)
MEDK = kern.heat_diffusion_kernel(L)
print()
print('MEDK:')
print_sparse_as_dense(MEDK)
NMEDK = kern.heat_diffusion_kernel(L_sym, 3)
print()
print('NMEDK:')
print_sparse_as_dense(NMEDK)
RK = kern.regularized_laplacian_kernel(LL)
print()
print('RK:')
print_sparse_as_dense(RK)
NRK = kern.regularized_laplacian_kernel(LL_sym)
print()
print('NRK:')
print_sparse_as_dense(NRK)
MERK = kern.regularized_laplacian_kernel(L)
print()
print('MERK:')
print_sparse_as_dense(MERK)
NMERK = kern.regularized_laplacian_kernel(L_sym)
print()
print('NMERK:')
print_sparse_as_dense(NMERK)
MENK = kern.neumann_kernel(
csr_matrix(A, (A.shape[0], A.shape[1]), 'd'), lambda_merw, v_merw)
print()
print('MENK:')
print_sparse_as_dense(MENK)
NNK = kern.traditional_normalized_neumann_kernel(
csr_matrix(A, (A.shape[0], A.shape[1]), 'd'))
print()
print('NNK:')
print_sparse_as_dense(NNK)
NMENK = kern.normalized_neumann_kernel(
csr_matrix(A, (A.shape[0], A.shape[1]), 'd'), lambda_merw, v_merw)
print()
print('NMENK:')
print_sparse_as_dense(NMENK)
def dk_tests_1k():
ds = DataSet('../datasets/', 'gr-qc', 'eg1k')
trn, tst = ds.get_dataset()
trns, tsts = utils.get_edges_set(trn), utils.get_edges_set(tst)
rmtrns, rmtsts = set(), set()
toTest = True
for x in tsts:
if x in trns:
if toTest:
rmtrns.add(x)
else:
rmtsts.add(x)
toTest = not toTest
for x in rmtrns:
trns.remove(x)
for x in rmtsts:
tsts.remove(x)
for x in tsts:
if x in trns:
print("NO!")
A = lil_matrix((ds.vx_count, ds.vx_count))
for v1, v2 in trns:
A[v1, v2] = 1
A[v2, v1] = 1
A = csr_matrix(A, (ds.vx_count, ds.vx_count), 'd')
ls, vs = sla.eigsh(A, 1, which='LA')
l_max = ls[0]
v_max = vs[:, 0]
# print("Values of AUC (1000 samples) and precision (K=30) " +
# "for heat diffusion kernel variants:")
print("Values of AUC (10000 samples) for heat diffusion kernel variants:")
auc_sampl = 10000
prc_k = 30
# DK
DK = kern.heat_diffusion_kernel(kern.laplacian(A))
auc = mtr.auc(ds.vx_count, trns, tsts, DK, auc_sampl)
print(" DK - AUC: {:.4f}".format(auc))
prc = mtr.precision(ds.vx_count, trns, tsts, DK, prc_k)
print(" DK - PRC: {:.4f}".format(prc))
# NDK
warnings.filterwarnings("ignore")
NDK = kern.heat_diffusion_kernel(kern.symmetric_normalized_laplacian(A))
auc = mtr.auc(ds.vx_count, trns, tsts, NDK, auc_sampl)
# prc = mtr.precision(ds.vx_count, trns, tsts, NDK, prc_k)
print(" NDK - AUC: {:.4f}".format(auc))
# print(" NDK - PREC: {:.4f}".format(prc))
# MEDK
MEDK = kern.heat_diffusion_kernel(kern.mecl(A, l_max, v_max))
auc = mtr.auc(ds.vx_count, trns, tsts, MEDK, auc_sampl)
# prc = mtr.precision(ds.vx_count, trns, tsts, MEDK, prc_k)
print(" MEDK - AUC: {:.4f}".format(auc))
# print(" MEDK - PREC: {:.4f}".format(prc))
# NMEDK
NMEDK = kern.heat_diffusion_kernel(kern.mecl(A, l_max, v_max, type='sym'))
auc = mtr.auc(ds.vx_count, trns, tsts, NMEDK, auc_sampl)
# prc = mtr.precision(ds.vx_count, trns, tsts, NMEDK, prc_k)
print("NMEDK - AUC: {:.4f}".format(auc))
# print("NMEDK - PREC: {:.4f}".format(prc))
def check_trn_tst_disjoint(ds):
trn, tst = ds.get_dataset()
trns, tsts = utils.get_edges_set(trn), utils.get_edges_set(tst)
overlap_count = 0
for edge in trns:
if edge in tsts:
overlap_count += 1
if overlap_count == 0:
print('Dataset "{}" has disjoint training and test sets'
.format(ds.name))
else:
print('Dataset "{}" has {} common edges in training and test sets'
.format(ds.name), overlap_count)
def check_trn_symmetric_and_connected(ds):
A = ds.get_training_set(mode='adjacency_matrix_csr')
nonsym_count = 0
for i, j in zip(A.nonzero()[0], A.nonzero()[1]):
if not A[j, i] > 0:
nonsym_count += 1
if nonsym_count == 0:
print('Training set "{}" matrix is symmetric'
.format(ds.name))
else:
print('Training set "{}" matrix has {} asymmetric entries'
.format(ds.name, nonsym_count))
diag_count = 0
for i in range(0, A.get_shape()[0]):
if A[i, i] > 0:
diag_count += 1
if diag_count == 0:
print('Training set "{}" matrix nas no diagonal entries'
.format(ds.name))
else:
print('Training set "{}" matrix has {} diagonal entries'
.format(ds.name, diag_count))
cc_count = connected_components(A, directed=False, return_labels=False)
if cc_count == 1:
print('Training set "{}" graph is connected'
.format(ds.name))
else:
print('Training set "{}" graph has {} connected components'
.format(ds.name, cc_count))
tst = ds.get_test_edges()
for i, j in tst:
A[i, j] = 1
A[j, i] = 1
cc_count = connected_components(A, directed=False, return_labels=False)
if cc_count == 1:
print('Dataset "{}" total graph is connected'
.format(ds.name))
else:
print('Dataset "{}" total graph has {} connected components'
.format(ds.name, cc_count))
print()
def basic_eg1k_checkup():
dss = []
dss.append(DataSet('../datasets/', 'gr-qc', 'eg1k_rnd_std'))
dss.append(DataSet('../datasets/', 'gr-qc', 'eg1k_rnd_kcv'))
dss.append(DataSet('../datasets/', 'gr-qc', 'eg1k_chr_frm'))
dss.append(DataSet('../datasets/', 'gr-qc', 'eg1k_chr_prc'))
for ds in dss:
check_trn_tst_disjoint(ds)
check_trn_symmetric_and_connected(ds)
if __name__ == '__main__':
# print('TEST #1 - SMALL BASIC:')
# test_small_basic()
# rint('TEST #2 - SMALL CROSS:')
# test_small_cross()
# print('WALKS SURVEY - small graph:\n')
# walks_survey(get_small_adjmx())
# print('WALKS SURVEY - article graph:\n')
# walks_survey(get_art_adjmx())
# dk_tests_1k()
basic_eg1k_checkup()
| [
"dataset.DataSet",
"kernel_methods.general_laplacian",
"warnings.filterwarnings",
"utils.get_edges_set",
"metrics.auc",
"kernel_methods.symmetric_normalized_laplacian",
"scipy.sparse.linalg.eigsh",
"kernel_methods.regularized_laplacian_kernel",
"kernel_methods.heat_diffusion_kernel",
"scipy.sparse... | [((354, 504), 'numpy.array', 'np.array', (['[[0.0, 0.4, 0.5, 0.6, 0.1], [0.4, 0.0, 0.3, 0.7, 0.2], [0.5, 0.3, 0.0, 0.5,\n 0.3], [0.6, 0.7, 0.5, 0.0, 0.6], [0.1, 0.2, 0.3, 0.6, 0.0]]'], {}), '([[0.0, 0.4, 0.5, 0.6, 0.1], [0.4, 0.0, 0.3, 0.7, 0.2], [0.5, 0.3, \n 0.0, 0.5, 0.3], [0.6, 0.7, 0.5, 0.0, 0.6], [0.1, 0.2, 0.3, 0.6, 0.0]])\n', (362, 504), True, 'import numpy as np\n'), ((620, 720), 'numpy.array', 'np.array', (['[[0, 0, 1, 0, 1], [0, 0, 1, 1, 1], [1, 1, 0, 0, 1], [0, 1, 0, 0, 1], [1, 1,\n 1, 1, 0]]'], {}), '([[0, 0, 1, 0, 1], [0, 0, 1, 1, 1], [1, 1, 0, 0, 1], [0, 1, 0, 0, 1\n ], [1, 1, 1, 1, 0]])\n', (628, 720), True, 'import numpy as np\n'), ((834, 934), 'numpy.array', 'np.array', (['[[0, 1, 0, 0, 0], [1, 0, 1, 1, 0], [0, 1, 0, 1, 0], [0, 1, 1, 0, 1], [0, 0,\n 0, 1, 0]]'], {}), '([[0, 1, 0, 0, 0], [1, 0, 1, 1, 0], [0, 1, 0, 1, 0], [0, 1, 1, 0, 1\n ], [0, 0, 0, 1, 0]])\n', (842, 934), True, 'import numpy as np\n'), ((1049, 1095), 'dataset.DataSet', 'DataSet', (['"""../datasets/"""', '"""test"""', '"""small-basic"""'], {}), "('../datasets/', 'test', 'small-basic')\n", (1056, 1095), False, 'from dataset import DataSet\n'), ((2644, 2690), 'dataset.DataSet', 'DataSet', (['"""../datasets/"""', '"""test"""', '"""small-cross"""'], {}), "('../datasets/', 'test', 'small-cross')\n", (2651, 2690), False, 'from dataset import DataSet\n'), ((4789, 4826), 'kernel_methods.general_laplacian', 'kern.general_laplacian', (['P_grw', 'pi_grw'], {}), '(P_grw, pi_grw)\n', (4811, 4826), True, 'import kernel_methods as kern\n'), ((6183, 6222), 'kernel_methods.general_laplacian', 'kern.general_laplacian', (['P_merw', 'pi_merw'], {}), '(P_merw, pi_merw)\n', (6205, 6222), True, 'import kernel_methods as kern\n'), ((6983, 7014), 'kernel_methods.commute_time_kernel', 'kern.commute_time_kernel', (['LL', '(3)'], {}), '(LL, 3)\n', (7007, 7014), True, 'import kernel_methods as kern\n'), ((7085, 7120), 'kernel_methods.commute_time_kernel', 'kern.commute_time_kernel', (['LL_sym', '(3)'], {}), '(LL_sym, 3)\n', (7109, 7120), True, 'import kernel_methods as kern\n'), ((7194, 7224), 'kernel_methods.commute_time_kernel', 'kern.commute_time_kernel', (['L', '(3)'], {}), '(L, 3)\n', (7218, 7224), True, 'import kernel_methods as kern\n'), ((7301, 7335), 'kernel_methods.commute_time_kernel', 'kern.commute_time_kernel', (['L_sym', '(3)'], {}), '(L_sym, 3)\n', (7325, 7335), True, 'import kernel_methods as kern\n'), ((7411, 7441), 'kernel_methods.heat_diffusion_kernel', 'kern.heat_diffusion_kernel', (['LL'], {}), '(LL)\n', (7437, 7441), True, 'import kernel_methods as kern\n'), ((7512, 7549), 'kernel_methods.heat_diffusion_kernel', 'kern.heat_diffusion_kernel', (['LL_sym', '(3)'], {}), '(LL_sym, 3)\n', (7538, 7549), True, 'import kernel_methods as kern\n'), ((7623, 7652), 'kernel_methods.heat_diffusion_kernel', 'kern.heat_diffusion_kernel', (['L'], {}), '(L)\n', (7649, 7652), True, 'import kernel_methods as kern\n'), ((7729, 7765), 'kernel_methods.heat_diffusion_kernel', 'kern.heat_diffusion_kernel', (['L_sym', '(3)'], {}), '(L_sym, 3)\n', (7755, 7765), True, 'import kernel_methods as kern\n'), ((7841, 7878), 'kernel_methods.regularized_laplacian_kernel', 'kern.regularized_laplacian_kernel', (['LL'], {}), '(LL)\n', (7874, 7878), True, 'import kernel_methods as kern\n'), ((7949, 7990), 'kernel_methods.regularized_laplacian_kernel', 'kern.regularized_laplacian_kernel', (['LL_sym'], {}), '(LL_sym)\n', (7982, 7990), True, 'import kernel_methods as kern\n'), ((8064, 8100), 'kernel_methods.regularized_laplacian_kernel', 'kern.regularized_laplacian_kernel', (['L'], {}), '(L)\n', (8097, 8100), True, 'import kernel_methods as kern\n'), ((8177, 8217), 'kernel_methods.regularized_laplacian_kernel', 'kern.regularized_laplacian_kernel', (['L_sym'], {}), '(L_sym)\n', (8210, 8217), True, 'import kernel_methods as kern\n'), ((8839, 8879), 'dataset.DataSet', 'DataSet', (['"""../datasets/"""', '"""gr-qc"""', '"""eg1k"""'], {}), "('../datasets/', 'gr-qc', 'eg1k')\n", (8846, 8879), False, 'from dataset import DataSet\n'), ((9369, 9407), 'scipy.sparse.lil_matrix', 'lil_matrix', (['(ds.vx_count, ds.vx_count)'], {}), '((ds.vx_count, ds.vx_count))\n', (9379, 9407), False, 'from scipy.sparse import csc_matrix, csr_matrix, lil_matrix\n'), ((9485, 9531), 'scipy.sparse.csr_matrix', 'csr_matrix', (['A', '(ds.vx_count, ds.vx_count)', '"""d"""'], {}), "(A, (ds.vx_count, ds.vx_count), 'd')\n", (9495, 9531), False, 'from scipy.sparse import csc_matrix, csr_matrix, lil_matrix\n'), ((9546, 9573), 'scipy.sparse.linalg.eigsh', 'sla.eigsh', (['A', '(1)'], {'which': '"""LA"""'}), "(A, 1, which='LA')\n", (9555, 9573), True, 'import scipy.sparse.linalg as sla\n'), ((9927, 9974), 'metrics.auc', 'mtr.auc', (['ds.vx_count', 'trns', 'tsts', 'DK', 'auc_sampl'], {}), '(ds.vx_count, trns, tsts, DK, auc_sampl)\n', (9934, 9974), True, 'import metrics as mtr\n'), ((10030, 10079), 'metrics.precision', 'mtr.precision', (['ds.vx_count', 'trns', 'tsts', 'DK', 'prc_k'], {}), '(ds.vx_count, trns, tsts, DK, prc_k)\n', (10043, 10079), True, 'import metrics as mtr\n'), ((10140, 10173), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (10163, 10173), False, 'import warnings\n'), ((10263, 10311), 'metrics.auc', 'mtr.auc', (['ds.vx_count', 'trns', 'tsts', 'NDK', 'auc_sampl'], {}), '(ds.vx_count, trns, tsts, NDK, auc_sampl)\n', (10270, 10311), True, 'import metrics as mtr\n'), ((10557, 10606), 'metrics.auc', 'mtr.auc', (['ds.vx_count', 'trns', 'tsts', 'MEDK', 'auc_sampl'], {}), '(ds.vx_count, trns, tsts, MEDK, auc_sampl)\n', (10564, 10606), True, 'import metrics as mtr\n'), ((10867, 10917), 'metrics.auc', 'mtr.auc', (['ds.vx_count', 'trns', 'tsts', 'NMEDK', 'auc_sampl'], {}), '(ds.vx_count, trns, tsts, NMEDK, auc_sampl)\n', (10874, 10917), True, 'import metrics as mtr\n'), ((12405, 12465), 'scipy.sparse.csgraph.connected_components', 'connected_components', (['A'], {'directed': '(False)', 'return_labels': '(False)'}), '(A, directed=False, return_labels=False)\n', (12425, 12465), False, 'from scipy.sparse.csgraph import connected_components\n'), ((12801, 12861), 'scipy.sparse.csgraph.connected_components', 'connected_components', (['A'], {'directed': '(False)', 'return_labels': '(False)'}), '(A, directed=False, return_labels=False)\n', (12821, 12861), False, 'from scipy.sparse.csgraph import connected_components\n'), ((1493, 1533), 'metrics.auc', 'mtr.auc', (['ds.vx_count', 'trns', 'tsts', 'scores'], {}), '(ds.vx_count, trns, tsts, scores)\n', (1500, 1533), True, 'import metrics as mtr\n'), ((1556, 1600), 'metrics.auc', 'mtr.auc', (['ds.vx_count', 'trns', 'tsts', 'scores', '(10)'], {}), '(ds.vx_count, trns, tsts, scores, 10)\n', (1563, 1600), True, 'import metrics as mtr\n'), ((1623, 1668), 'metrics.auc', 'mtr.auc', (['ds.vx_count', 'trns', 'tsts', 'scores', '(100)'], {}), '(ds.vx_count, trns, tsts, scores, 100)\n', (1630, 1668), True, 'import metrics as mtr\n'), ((1691, 1737), 'metrics.auc', 'mtr.auc', (['ds.vx_count', 'trns', 'tsts', 'scores', '(1000)'], {}), '(ds.vx_count, trns, tsts, scores, 1000)\n', (1698, 1737), True, 'import metrics as mtr\n'), ((1976, 2025), 'metrics.precision', 'mtr.precision', (['ds.vx_count', 'trns', 'tsts', 'scores', '(2)'], {}), '(ds.vx_count, trns, tsts, scores, 2)\n', (1989, 2025), True, 'import metrics as mtr\n'), ((3085, 3125), 'metrics.auc', 'mtr.auc', (['ds.vx_count', 'trns', 'tsts', 'scores'], {}), '(ds.vx_count, trns, tsts, scores)\n', (3092, 3125), True, 'import metrics as mtr\n'), ((3148, 3192), 'metrics.auc', 'mtr.auc', (['ds.vx_count', 'trns', 'tsts', 'scores', '(10)'], {}), '(ds.vx_count, trns, tsts, scores, 10)\n', (3155, 3192), True, 'import metrics as mtr\n'), ((3215, 3260), 'metrics.auc', 'mtr.auc', (['ds.vx_count', 'trns', 'tsts', 'scores', '(100)'], {}), '(ds.vx_count, trns, tsts, scores, 100)\n', (3222, 3260), True, 'import metrics as mtr\n'), ((3283, 3329), 'metrics.auc', 'mtr.auc', (['ds.vx_count', 'trns', 'tsts', 'scores', '(1000)'], {}), '(ds.vx_count, trns, tsts, scores, 1000)\n', (3290, 3329), True, 'import metrics as mtr\n'), ((3568, 3617), 'metrics.precision', 'mtr.precision', (['ds.vx_count', 'trns', 'tsts', 'scores', '(2)'], {}), '(ds.vx_count, trns, tsts, scores, 2)\n', (3581, 3617), True, 'import metrics as mtr\n'), ((4597, 4610), 'scipy.sparse.csc_matrix', 'csc_matrix', (['A'], {}), '(A)\n', (4607, 4610), False, 'from scipy.sparse import csc_matrix, csr_matrix, lil_matrix\n'), ((4937, 4981), 'scipy.sparse.csr_matrix', 'csr_matrix', (['A', '(A.shape[0], A.shape[1])', '"""d"""'], {}), "(A, (A.shape[0], A.shape[1]), 'd')\n", (4947, 4981), False, 'from scipy.sparse import csc_matrix, csr_matrix, lil_matrix\n'), ((5117, 5161), 'scipy.sparse.csr_matrix', 'csr_matrix', (['A', '(A.shape[0], A.shape[1])', '"""d"""'], {}), "(A, (A.shape[0], A.shape[1]), 'd')\n", (5127, 5161), False, 'from scipy.sparse import csc_matrix, csr_matrix, lil_matrix\n'), ((5414, 5458), 'scipy.sparse.csr_matrix', 'csr_matrix', (['A', '(A.shape[0], A.shape[1])', '"""d"""'], {}), "(A, (A.shape[0], A.shape[1]), 'd')\n", (5424, 5458), False, 'from scipy.sparse import csc_matrix, csr_matrix, lil_matrix\n'), ((5499, 5543), 'scipy.sparse.csr_matrix', 'csr_matrix', (['A', '(A.shape[0], A.shape[1])', '"""d"""'], {}), "(A, (A.shape[0], A.shape[1]), 'd')\n", (5509, 5543), False, 'from scipy.sparse import csc_matrix, csr_matrix, lil_matrix\n'), ((6021, 6065), 'scipy.sparse.csr_matrix', 'csr_matrix', (['A', '(A.shape[0], A.shape[1])', '"""d"""'], {}), "(A, (A.shape[0], A.shape[1]), 'd')\n", (6031, 6065), False, 'from scipy.sparse import csc_matrix, csr_matrix, lil_matrix\n'), ((6338, 6382), 'scipy.sparse.csr_matrix', 'csr_matrix', (['A', '(A.shape[0], A.shape[1])', '"""d"""'], {}), "(A, (A.shape[0], A.shape[1]), 'd')\n", (6348, 6382), False, 'from scipy.sparse import csc_matrix, csr_matrix, lil_matrix\n'), ((6536, 6580), 'scipy.sparse.csr_matrix', 'csr_matrix', (['A', '(A.shape[0], A.shape[1])', '"""d"""'], {}), "(A, (A.shape[0], A.shape[1]), 'd')\n", (6546, 6580), False, 'from scipy.sparse import csc_matrix, csr_matrix, lil_matrix\n'), ((6770, 6814), 'scipy.sparse.csr_matrix', 'csr_matrix', (['A', '(A.shape[0], A.shape[1])', '"""d"""'], {}), "(A, (A.shape[0], A.shape[1]), 'd')\n", (6780, 6814), False, 'from scipy.sparse import csc_matrix, csr_matrix, lil_matrix\n'), ((8324, 8368), 'scipy.sparse.csr_matrix', 'csr_matrix', (['A', '(A.shape[0], A.shape[1])', '"""d"""'], {}), "(A, (A.shape[0], A.shape[1]), 'd')\n", (8334, 8368), False, 'from scipy.sparse import csc_matrix, csr_matrix, lil_matrix\n'), ((8517, 8561), 'scipy.sparse.csr_matrix', 'csr_matrix', (['A', '(A.shape[0], A.shape[1])', '"""d"""'], {}), "(A, (A.shape[0], A.shape[1]), 'd')\n", (8527, 8561), False, 'from scipy.sparse import csc_matrix, csr_matrix, lil_matrix\n'), ((8677, 8721), 'scipy.sparse.csr_matrix', 'csr_matrix', (['A', '(A.shape[0], A.shape[1])', '"""d"""'], {}), "(A, (A.shape[0], A.shape[1]), 'd')\n", (8687, 8721), False, 'from scipy.sparse import csc_matrix, csr_matrix, lil_matrix\n'), ((8929, 8953), 'utils.get_edges_set', 'utils.get_edges_set', (['trn'], {}), '(trn)\n', (8948, 8953), False, 'import utils\n'), ((8955, 8979), 'utils.get_edges_set', 'utils.get_edges_set', (['tst'], {}), '(tst)\n', (8974, 8979), False, 'import utils\n'), ((9897, 9914), 'kernel_methods.laplacian', 'kern.laplacian', (['A'], {}), '(A)\n', (9911, 9914), True, 'import kernel_methods as kern\n'), ((10212, 10250), 'kernel_methods.symmetric_normalized_laplacian', 'kern.symmetric_normalized_laplacian', (['A'], {}), '(A)\n', (10247, 10250), True, 'import kernel_methods as kern\n'), ((10518, 10544), 'kernel_methods.mecl', 'kern.mecl', (['A', 'l_max', 'v_max'], {}), '(A, l_max, v_max)\n', (10527, 10544), True, 'import kernel_methods as kern\n'), ((10816, 10854), 'kernel_methods.mecl', 'kern.mecl', (['A', 'l_max', 'v_max'], {'type': '"""sym"""'}), "(A, l_max, v_max, type='sym')\n", (10825, 10854), True, 'import kernel_methods as kern\n'), ((11159, 11183), 'utils.get_edges_set', 'utils.get_edges_set', (['trn'], {}), '(trn)\n', (11178, 11183), False, 'import utils\n'), ((11185, 11209), 'utils.get_edges_set', 'utils.get_edges_set', (['tst'], {}), '(tst)\n', (11204, 11209), False, 'import utils\n'), ((13161, 13209), 'dataset.DataSet', 'DataSet', (['"""../datasets/"""', '"""gr-qc"""', '"""eg1k_rnd_std"""'], {}), "('../datasets/', 'gr-qc', 'eg1k_rnd_std')\n", (13168, 13209), False, 'from dataset import DataSet\n'), ((13226, 13274), 'dataset.DataSet', 'DataSet', (['"""../datasets/"""', '"""gr-qc"""', '"""eg1k_rnd_kcv"""'], {}), "('../datasets/', 'gr-qc', 'eg1k_rnd_kcv')\n", (13233, 13274), False, 'from dataset import DataSet\n'), ((13291, 13339), 'dataset.DataSet', 'DataSet', (['"""../datasets/"""', '"""gr-qc"""', '"""eg1k_chr_frm"""'], {}), "('../datasets/', 'gr-qc', 'eg1k_chr_frm')\n", (13298, 13339), False, 'from dataset import DataSet\n'), ((13356, 13404), 'dataset.DataSet', 'DataSet', (['"""../datasets/"""', '"""gr-qc"""', '"""eg1k_chr_prc"""'], {}), "('../datasets/', 'gr-qc', 'eg1k_chr_prc')\n", (13363, 13404), False, 'from dataset import DataSet\n'), ((1298, 1322), 'utils.get_edges_set', 'utils.get_edges_set', (['trn'], {}), '(trn)\n', (1317, 1322), False, 'import utils\n'), ((1324, 1348), 'utils.get_edges_set', 'utils.get_edges_set', (['tst'], {}), '(tst)\n', (1343, 1348), False, 'import utils\n'), ((2975, 2999), 'utils.get_edges_set', 'utils.get_edges_set', (['trn'], {}), '(trn)\n', (2994, 2999), False, 'import utils\n'), ((3001, 3025), 'utils.get_edges_set', 'utils.get_edges_set', (['tst'], {}), '(tst)\n', (3020, 3025), False, 'import utils\n')] |
"""
optimizers.py
~~~~~~~~~~
Collection of activation functions.
Each function provides forward pass and backpropagation.
"""
### --- IMPORTS --- ###
# Standard Import
# Third-Party Import
import numpy as np
class Optimizer_SGD:
def __init__(self, learning_rate=1., decay=0., momentum=0.):
"""Stochastic Gradient Optimizer + Momentum
Decay is Rate of Reducing the Learning rate
Momentum is the influence of previous gradients"""
self.learning_rate = learning_rate
self.current_learning_rate = learning_rate
self.decay = decay
self.iterations = 0
self.momentum = momentum
def pre_update_params(self):
"""Update the Learningrate if
a Decay is given"""
if self.decay:
self.current_learning_rate = self.learning_rate * (1. / (1. + self.decay * self.iterations))
def update_params(self, layer):
"""Update Weights and Biases
Will use current Learning Rate and,
if given, the Momentum"""
# Momentum SGD
if self.momentum:
if not hasattr(layer, 'weight_momentums'):
layer.weight_momentums = np.zeros_like(layer.weights)
layer.bias_momentums = np.zeros_like(layer.biases)
weight_updates = self.momentum * layer.weight_momentums - self.current_learning_rate * layer.dweights
layer.weight_momentums = weight_updates
bias_updates = self.momentum * layer.bias_momentums - self.current_learning_rate * layer.dbiases
layer.bias_momentums = bias_updates
# Vanilla SGD
else:
weight_updates = -self.current_learning_rate * layer.dweights
bias_updates = -self.current_learning_rate * layer.dbiases
# Update Weights and Biases
layer.weights += weight_updates
layer.biases += bias_updates
def post_update_paramy(self):
"""Update the Iterations"""
self.iterations += 1
# AdaGrad Optimizer
class Optimizer_Adagrad:
def __init__(self, learning_rate=1., decay=0., epsilon=1e-7):
"""AdaGrad Optimizer
Vanilla SGD + Normalize Gradients"""
self.learning_rate = learning_rate
self.current_learning_rate = learning_rate
self.decay = decay
self.iterations = 0
self.epsilon = epsilon
def pre_update_params(self):
"""Update the Learningrate if
a Decay is given"""
if self.decay:
self.current_learning_rate = self.learning_rate * (1. / (1. + self.decay * self.iterations))
def update_params(self, layer):
"""Update Weights and Biases
Will use current Learning Rate with
respect to normalized Gradient"""
if not hasattr(layer, 'weight_cache'): # initial init
layer.weight_cache = np.zeros_like(layer.weights)
layer.bias_cache = np.zeros_like(layer.biases)
layer.weight_cache += layer.dweights**2
layer.bias_cache += layer.dbiases**2
# Vanilla SGD + Normalization
layer.weights += -self.current_learning_rate * layer.dweights / (np.sqrt(layer.weight_cache) + self.epsilon)
layer.biases += -self.current_learning_rate * layer.dbiases / (np.sqrt(layer.bias_cache) + self.epsilon)
def post_update_paramy(self):
"""Update the Iterations"""
self.iterations += 1
# RMSprop Optimizer
class Optimizer_RMSprop:
def __init__(self, learning_rate=0.001, decay=0., epsilon=1e-7, rho=0.9):
"""RMSprop Optimizer
like AdaGrad, but smoother Cache"""
self.learning_rate = learning_rate
self.current_learning_rate = learning_rate
self.decay = decay
self.iterations = 0
self.epsilon = epsilon
self.rho = rho
def pre_update_params(self):
"""Update the Learningrate if
a Decay is given"""
if self.decay:
self.current_learning_rate = self.learning_rate * (1. / (1. + self.decay * self.iterations))
def update_params(self, layer):
"""Update Weights and Biases
Will use current Learning Rate with
respect to RMSprop Gradient"""
if not hasattr(layer, 'weight_cache'): # initial init
layer.weight_cache = np.zeros_like(layer.weights)
layer.bias_cache = np.zeros_like(layer.biases)
layer.weight_cache = self.rho * layer.weight_cache + (1 - self.rho) * layer.dweights**2
layer.bias_cache = self.rho * layer.bias_cache + (1 - self.rho) * layer.dbiases**2
# Vanilla SGD + Normalization
layer.weights += -self.current_learning_rate * layer.dweights / (np.sqrt(layer.weight_cache) + self.epsilon)
layer.biases += -self.current_learning_rate * layer.dbiases / (np.sqrt(layer.bias_cache) + self.epsilon)
def post_update_paramy(self):
"""Update the Iterations"""
self.iterations += 1
# Adam
class Optimizer_Adam:
def __init__(self, learning_rate=0.001, decay=0., epsilon=1e-7, beta_1=0.9, beta_2=0.999):
"""Adam
RMSprop + SGD momentum"""
self.learning_rate = learning_rate
self.current_learning_rate = learning_rate
self.decay = decay
self.iterations = 0
self.epsilon = epsilon
self.beta_1 = beta_1
self.beta_2 = beta_2
def pre_update_params(self):
"""Update the Learningrate if
a Decay is given"""
if self.decay:
self.current_learning_rate = self.learning_rate * (1. / (1. + self.decay * self.iterations))
def update_params(self, layer):
"""Update Weights and Biases
Will use current Learning Rate with
respect to RMSprop Gradient + Momentum"""
if not hasattr(layer, 'weight_cache'): # initial init
layer.weight_momentums = np.zeros_like(layer.weights)
layer.weight_cache = np.zeros_like(layer.weights)
layer.bias_momentums = np.zeros_like(layer.biases)
layer.bias_cache = np.zeros_like(layer.biases)
layer.weight_momentums = self.beta_1 * layer.weight_momentums + (1 - self.beta_1) * layer.dweights
layer.bias_momentums = self.beta_1 * layer.bias_momentums + (1 - self.beta_1) * layer.dbiases
weight_momentums_corrected = layer.weight_momentums / (1 - self.beta_1 ** (self.iterations + 1))
bias_momentums_corrected = layer.bias_momentums / (1 - self.beta_1 ** (self.iterations + 1))
layer.weight_cache = self.beta_2 * layer.weight_cache + (1 - self.beta_2) * layer.dweights**2
layer.bias_cache = self.beta_2 * layer.bias_cache + (1 - self.beta_2) * layer.dbiases**2
weight_cache_corrected = layer.weight_cache / (1 - self.beta_2 ** (self.iterations + 1))
bias_cache_corrected = layer.bias_cache / (1 - self.beta_2 ** (self.iterations + 1))
# Vanilla SGD + normalization
layer.weights += -self.current_learning_rate * weight_momentums_corrected / (np.sqrt(weight_cache_corrected) + self.epsilon)
layer.biases += -self.current_learning_rate * bias_momentums_corrected / (np.sqrt(bias_cache_corrected) + self.epsilon)
def post_update_params(self):
"""Update the Iterations"""
self.iterations += 1 | [
"numpy.zeros_like",
"numpy.sqrt"
] | [((2933, 2961), 'numpy.zeros_like', 'np.zeros_like', (['layer.weights'], {}), '(layer.weights)\n', (2946, 2961), True, 'import numpy as np\n'), ((2993, 3020), 'numpy.zeros_like', 'np.zeros_like', (['layer.biases'], {}), '(layer.biases)\n', (3006, 3020), True, 'import numpy as np\n'), ((4401, 4429), 'numpy.zeros_like', 'np.zeros_like', (['layer.weights'], {}), '(layer.weights)\n', (4414, 4429), True, 'import numpy as np\n'), ((4461, 4488), 'numpy.zeros_like', 'np.zeros_like', (['layer.biases'], {}), '(layer.biases)\n', (4474, 4488), True, 'import numpy as np\n'), ((5999, 6027), 'numpy.zeros_like', 'np.zeros_like', (['layer.weights'], {}), '(layer.weights)\n', (6012, 6027), True, 'import numpy as np\n'), ((6061, 6089), 'numpy.zeros_like', 'np.zeros_like', (['layer.weights'], {}), '(layer.weights)\n', (6074, 6089), True, 'import numpy as np\n'), ((6126, 6153), 'numpy.zeros_like', 'np.zeros_like', (['layer.biases'], {}), '(layer.biases)\n', (6139, 6153), True, 'import numpy as np\n'), ((6185, 6212), 'numpy.zeros_like', 'np.zeros_like', (['layer.biases'], {}), '(layer.biases)\n', (6198, 6212), True, 'import numpy as np\n'), ((1198, 1226), 'numpy.zeros_like', 'np.zeros_like', (['layer.weights'], {}), '(layer.weights)\n', (1211, 1226), True, 'import numpy as np\n'), ((1266, 1293), 'numpy.zeros_like', 'np.zeros_like', (['layer.biases'], {}), '(layer.biases)\n', (1279, 1293), True, 'import numpy as np\n'), ((3227, 3254), 'numpy.sqrt', 'np.sqrt', (['layer.weight_cache'], {}), '(layer.weight_cache)\n', (3234, 3254), True, 'import numpy as np\n'), ((3342, 3367), 'numpy.sqrt', 'np.sqrt', (['layer.bias_cache'], {}), '(layer.bias_cache)\n', (3349, 3367), True, 'import numpy as np\n'), ((4789, 4816), 'numpy.sqrt', 'np.sqrt', (['layer.weight_cache'], {}), '(layer.weight_cache)\n', (4796, 4816), True, 'import numpy as np\n'), ((4904, 4929), 'numpy.sqrt', 'np.sqrt', (['layer.bias_cache'], {}), '(layer.bias_cache)\n', (4911, 4929), True, 'import numpy as np\n'), ((7152, 7183), 'numpy.sqrt', 'np.sqrt', (['weight_cache_corrected'], {}), '(weight_cache_corrected)\n', (7159, 7183), True, 'import numpy as np\n'), ((7282, 7311), 'numpy.sqrt', 'np.sqrt', (['bias_cache_corrected'], {}), '(bias_cache_corrected)\n', (7289, 7311), True, 'import numpy as np\n')] |
import numpy as np
from .electools import Elec, Result
from .engine import Engine
from .utils.darray import DependArray
class Model(object):
def __init__(
self,
qm_positions,
positions,
qm_charges,
charges,
cell_basis,
qm_total_charge,
switching_type=None,
cutoff=None,
swdist=None,
pbc=None,
):
"""
Creat a Model object.
"""
self.qm_positions = qm_positions
self.positions = positions
self.qm_charges = qm_charges
self.charges = charges
self.cell_basis = cell_basis
self.qm_total_charge = qm_total_charge
self.switching_type = switching_type
if cutoff is not None:
self.cutoff = cutoff
else:
raise ValueError("cutoff is not set")
if swdist is None:
self.swdist = cutoff * .75
else:
self.swdist = swdist
if pbc is not None:
self.pbc = pbc
elif np.any(self.cell_basis != 0.0):
self.pbc = True
else:
self.pbc = False
self.elec = Elec(
self.qm_positions,
self.positions,
self.qm_charges,
self.charges,
self.qm_total_charge,
self.cell_basis,
switching_type=self.switching_type,
cutoff=self.cutoff,
swdist=self.swdist,
pbc=self.pbc,
)
def get_result(
self,
name,
qm_energy,
qm_energy_gradient,
mm_esp,
):
result_obj = Result(
qm_energy=qm_energy,
qm_energy_gradient=qm_energy_gradient,
mm_esp=mm_esp,
qm_charges=self.qm_charges,
mm_charges=self.elec.near_field.charges,
near_field_mask=self.elec.near_field.near_field_mask,
scaling_factor=self.elec.near_field.scaling_factor,
scaling_factor_gradient=self.elec.near_field.scaling_factor_gradient,
qmmm_coulomb_tensor=self.elec.near_field.qmmm_coulomb_tensor,
qmmm_coulomb_tensor_gradient=self.elec.near_field.qmmm_coulomb_tensor_gradient,
weighted_qmmm_coulomb_tensor=self.elec.near_field.weighted_qmmm_coulomb_tensor,
weighted_qmmm_coulomb_tensor_inv=self.elec.near_field.weighted_qmmm_coulomb_tensor_inv,
elec=self.elec,
)
setattr(self, name, result_obj)
| [
"numpy.any"
] | [((1040, 1070), 'numpy.any', 'np.any', (['(self.cell_basis != 0.0)'], {}), '(self.cell_basis != 0.0)\n', (1046, 1070), True, 'import numpy as np\n')] |
"""
Parallel-1-Serial Tests for the SimpleComm class
The 'P1S' Test Suite specificially tests whether the serial behavior is the
same as the 1-rank parallel behavior. If the 'Par' test suite passes with
various communicator sizes (1, 2, ...), then this suite should be run to make
sure that serial communication behaves consistently.
Copyright 2017, University Corporation for Atmospheric Research
See the LICENSE.txt file for details
"""
from __future__ import print_function
import unittest
import numpy as np
from mpi4py import MPI
from asaptools import simplecomm
from asaptools.partition import Duplicate, EqualStride
MPI_COMM_WORLD = MPI.COMM_WORLD
class SimpleCommP1STests(unittest.TestCase):
def setUp(self):
self.scomm = simplecomm.create_comm(serial=True)
self.pcomm = simplecomm.create_comm(serial=False)
self.size = MPI_COMM_WORLD.Get_size()
self.rank = MPI_COMM_WORLD.Get_rank()
def testIsSerialLike(self):
self.assertEqual(self.rank, 0, 'Rank not consistent with serial-like operation')
self.assertEqual(self.size, 1, 'Size not consistent with serial-like operation')
def testGetSize(self):
sresult = self.scomm.get_size()
presult = self.pcomm.get_size()
self.assertEqual(sresult, presult)
def testIsManager(self):
sresult = self.scomm.is_manager()
presult = self.pcomm.is_manager()
self.assertEqual(sresult, presult)
def testSumInt(self):
data = 5
sresult = self.scomm.allreduce(data, 'sum')
presult = self.pcomm.allreduce(data, 'sum')
self.assertEqual(sresult, presult)
def testSumList(self):
data = range(5)
sresult = self.scomm.allreduce(data, 'sum')
presult = self.pcomm.allreduce(data, 'sum')
self.assertEqual(sresult, presult)
def testSumDict(self):
data = {'rank': self.rank, 'range': range(3 + self.rank)}
sresult = self.scomm.allreduce(data, 'sum')
presult = self.pcomm.allreduce(data, 'sum')
self.assertEqual(sresult, presult)
def testSumArray(self):
data = np.arange(5)
sresult = self.scomm.allreduce(data, 'sum')
presult = self.pcomm.allreduce(data, 'sum')
self.assertEqual(sresult, presult)
def testMaxInt(self):
data = 13 + self.rank
sresult = self.scomm.allreduce(data, 'max')
presult = self.pcomm.allreduce(data, 'max')
self.assertEqual(sresult, presult)
def testMaxList(self):
data = range(5 + self.rank)
sresult = self.scomm.allreduce(data, 'max')
presult = self.pcomm.allreduce(data, 'max')
self.assertEqual(sresult, presult)
def testMaxDict(self):
data = {'rank': self.rank, 'range': range(3 + self.rank)}
sresult = self.scomm.allreduce(data, 'max')
presult = self.pcomm.allreduce(data, 'max')
self.assertEqual(sresult, presult)
def testMaxArray(self):
data = np.arange(5 + self.rank)
sresult = self.scomm.allreduce(data, 'max')
presult = self.pcomm.allreduce(data, 'max')
self.assertEqual(sresult, presult)
def testPartitionInt(self):
data = 13 + self.rank
sresult = self.scomm.partition(data, func=Duplicate())
presult = self.pcomm.partition(data, func=Duplicate())
self.assertEqual(sresult, presult)
def testPartitionIntInvolved(self):
data = 13 + self.rank
sresult = self.scomm.partition(data, func=Duplicate(), involved=True)
presult = self.pcomm.partition(data, func=Duplicate(), involved=True)
self.assertEqual(sresult, presult)
def testPartitionList(self):
data = range(5 + self.rank)
sresult = self.scomm.partition(data, func=EqualStride())
presult = self.pcomm.partition(data, func=EqualStride())
self.assertEqual(sresult, presult)
def testPartitionListInvolved(self):
data = range(5 + self.rank)
sresult = self.scomm.partition(data, func=EqualStride(), involved=True)
presult = self.pcomm.partition(data, func=EqualStride(), involved=True)
self.assertEqual(sresult, presult)
def testPartitionArray(self):
data = np.arange(2 + self.rank)
sresult = self.scomm.partition(data)
presult = self.pcomm.partition(data)
self.assertEqual(sresult, presult)
def testPartitionArrayInvolved(self):
data = np.arange(2 + self.rank)
sresult = self.scomm.partition(data, involved=True)
presult = self.pcomm.partition(data, involved=True)
np.testing.assert_array_equal(sresult, presult)
def testPartitionStrArray(self):
data = np.array([c for c in 'abcdefghijklmnopqrstuvwxyz'])
sresult = self.scomm.partition(data)
presult = self.pcomm.partition(data)
self.assertEqual(sresult, presult)
def testPartitionStrArrayInvolved(self):
data = np.array([c for c in 'abcdefghijklmnopqrstuvwxyz'])
sresult = self.scomm.partition(data, involved=True)
presult = self.pcomm.partition(data, involved=True)
np.testing.assert_array_equal(sresult, presult)
def testRationError(self):
data = 10
self.assertRaises(RuntimeError, self.scomm.ration, data)
self.assertRaises(RuntimeError, self.pcomm.ration, data)
def testCollectError(self):
data = 10
self.assertRaises(RuntimeError, self.scomm.collect, data)
self.assertRaises(RuntimeError, self.pcomm.collect, data)
if __name__ == '__main__':
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
mystream = StringIO()
tests = unittest.TestLoader().loadTestsFromTestCase(SimpleCommP1STests)
unittest.TextTestRunner(stream=mystream).run(tests)
MPI_COMM_WORLD.Barrier()
results = MPI_COMM_WORLD.gather(mystream.getvalue())
if MPI_COMM_WORLD.Get_rank() == 0:
for rank, result in enumerate(results):
print('RESULTS FOR RANK ' + str(rank) + ':')
print(str(result))
| [
"io.StringIO",
"asaptools.partition.Duplicate",
"unittest.TextTestRunner",
"numpy.testing.assert_array_equal",
"asaptools.partition.EqualStride",
"numpy.array",
"numpy.arange",
"unittest.TestLoader",
"asaptools.simplecomm.create_comm"
] | [((5696, 5706), 'io.StringIO', 'StringIO', ([], {}), '()\n', (5704, 5706), False, 'from io import StringIO\n'), ((752, 787), 'asaptools.simplecomm.create_comm', 'simplecomm.create_comm', ([], {'serial': '(True)'}), '(serial=True)\n', (774, 787), False, 'from asaptools import simplecomm\n'), ((809, 845), 'asaptools.simplecomm.create_comm', 'simplecomm.create_comm', ([], {'serial': '(False)'}), '(serial=False)\n', (831, 845), False, 'from asaptools import simplecomm\n'), ((2132, 2144), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (2141, 2144), True, 'import numpy as np\n'), ((2992, 3016), 'numpy.arange', 'np.arange', (['(5 + self.rank)'], {}), '(5 + self.rank)\n', (3001, 3016), True, 'import numpy as np\n'), ((4240, 4264), 'numpy.arange', 'np.arange', (['(2 + self.rank)'], {}), '(2 + self.rank)\n', (4249, 4264), True, 'import numpy as np\n'), ((4456, 4480), 'numpy.arange', 'np.arange', (['(2 + self.rank)'], {}), '(2 + self.rank)\n', (4465, 4480), True, 'import numpy as np\n'), ((4609, 4656), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['sresult', 'presult'], {}), '(sresult, presult)\n', (4638, 4656), True, 'import numpy as np\n'), ((4710, 4761), 'numpy.array', 'np.array', (["[c for c in 'abcdefghijklmnopqrstuvwxyz']"], {}), "([c for c in 'abcdefghijklmnopqrstuvwxyz'])\n", (4718, 4761), True, 'import numpy as np\n'), ((4956, 5007), 'numpy.array', 'np.array', (["[c for c in 'abcdefghijklmnopqrstuvwxyz']"], {}), "([c for c in 'abcdefghijklmnopqrstuvwxyz'])\n", (4964, 5007), True, 'import numpy as np\n'), ((5136, 5183), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['sresult', 'presult'], {}), '(sresult, presult)\n', (5165, 5183), True, 'import numpy as np\n'), ((5719, 5740), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (5738, 5740), False, 'import unittest\n'), ((5787, 5827), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'stream': 'mystream'}), '(stream=mystream)\n', (5810, 5827), False, 'import unittest\n'), ((3277, 3288), 'asaptools.partition.Duplicate', 'Duplicate', ([], {}), '()\n', (3286, 3288), False, 'from asaptools.partition import Duplicate, EqualStride\n'), ((3340, 3351), 'asaptools.partition.Duplicate', 'Duplicate', ([], {}), '()\n', (3349, 3351), False, 'from asaptools.partition import Duplicate, EqualStride\n'), ((3517, 3528), 'asaptools.partition.Duplicate', 'Duplicate', ([], {}), '()\n', (3526, 3528), False, 'from asaptools.partition import Duplicate, EqualStride\n'), ((3595, 3606), 'asaptools.partition.Duplicate', 'Duplicate', ([], {}), '()\n', (3604, 3606), False, 'from asaptools.partition import Duplicate, EqualStride\n'), ((3786, 3799), 'asaptools.partition.EqualStride', 'EqualStride', ([], {}), '()\n', (3797, 3799), False, 'from asaptools.partition import Duplicate, EqualStride\n'), ((3851, 3864), 'asaptools.partition.EqualStride', 'EqualStride', ([], {}), '()\n', (3862, 3864), False, 'from asaptools.partition import Duplicate, EqualStride\n'), ((4037, 4050), 'asaptools.partition.EqualStride', 'EqualStride', ([], {}), '()\n', (4048, 4050), False, 'from asaptools.partition import Duplicate, EqualStride\n'), ((4117, 4130), 'asaptools.partition.EqualStride', 'EqualStride', ([], {}), '()\n', (4128, 4130), False, 'from asaptools.partition import Duplicate, EqualStride\n')] |
import warnings
import numpy as np
import pandas
from . import io
from astropy import time
def get_sample():
""" Short to to Sample.load() """
return Sample.load()
class Sample():
def __init__(self, data=None):
""" """
self.set_data(data)
@classmethod
def load(cls, default_salt2=True):
""" Load a Sample instance building it from io.get_targets_data() """
data = io.get_targets_data()
return cls(data=data)
# ------- #
# LOADER #
# ------- #
def load_hostdata(self):
""" load the host data using io.get_host_data(). This is made automatically upon hostdata call. """
self.set_hostdata( io.get_host_data() )
def load_phasedf(self, min_detection=5, groupby='filter', client=None, rebuild=False, **kwargs):
""" Load the phasedf. This is made automatically upton phasedf call.
If this is the first time ever you call this, the phasedf has to be built, so it takes ~45s.
Once built, the dataframe is stored such that, next time, it is directly loaded.
Use rebuild=True to bypass the automatic loading and rebuild the dataframe.
Parameters
----------
min_detection: [float] -optional-
minimal signal to noise ratio for a point to be considered as 'detected'
groupby: [string or list of] -optional-
data column(s) to group the lightcurve data and measuring the count statistics.
example: filter, [filter, fieldid]
client: [dask.Client] -optional-
if a dask.distributed.Client instance is given, this will use dask.
Otherwise a basic for loop (slower) is used.
rebuild: [bool] -optional-
force the dataframe to be rebuilt.
**kwargs goes to self.build_phase_coverage()
Returns
-------
None
"""
if not rebuild:
phasedf = io.get_phase_coverage(load=True, warn=False)
if phasedf is None:
rebuild = True
if rebuild:
phasedf = self.build_phase_coverage(min_detection=min_detection,
groupby=groupby,
client=client, store=True, **kwargs)
self._phasedf = phasedf
def load_fiedid(self):
""" compute the fields containing the targets using
ztfquery.fields.get_fields_containing_target().
This take quite some time.
the 'fieldid' entry is added to self.data
"""
from ztfquery import fields
fieldid = [fields.get_fields_containing_target(s_["ra"],s_["dec"])
for i_,s_ in self.data.iterrows()]
self.data["fieldid"] = fieldid
# ------- #
# SETTER #
# ------- #
def set_data(self, data):
""" attach to this instance the target data.
= Most likely you should not use this method directly. =
use sample = Sample.load()
"""
data["t0day"] = data["t0"].astype("int")
self._data = data
def set_hostdata(self, hostdata):
""" attach to this instance the host data.
= Most likely you should not use this method directly. =
use sample.load_hostdata() or simply
sample.hostdata that automatically load this corrent hostdata.
"""
self._hostdata = hostdata
def merge_to_data(self, dataframe, how="outer", **kwargs):
""" Merge the given dataframe with self.data.
The merged dataframe will replace self.data
"""
self._data = pandas.merge(self.data, dataframe, how=how,
**{**dict(left_index=True, right_index=True),
**kwargs})
# ------- #
# GETTER #
# ------- #
def get_data(self, clean_t0nan=True,
t0_range=None, x1_range=None, c_range=None,
redshift_range=None, z_quality=None,
t0_err_range=None, c_err_range=None, x1_err_range=None,
in_targetlist=None, goodcoverage=None, coverage_prop={},
query=None, data=None):
"""
*_range: [None or [min, max]] -optional-
cut to be applied to the data for
t0, x1, c, (and there error) and redshift.
boundaries are mandatory. For instance, redshift range lower than 0.06
should be: redshift_range = (0, 0.06).
= see below for t0 format =
t0_range: [None or [tmin,tmax]] -optional-
Should be a format automatically understood by astropy.time.Time
e.g. t0_range=["2018-04-01","2020-10-01"]
in_targetlist: [list of strings] -optional-
The target must be in this list
goodcoverage: [None or bool] -optional-
Select the data given the lc phase coverage
- None: no cut
- True: only good lc kept
- False: only bad lc kept (good lc discarded)
This uses self.get_goodcoverage_targets(**coverage_prop)
coverage_prop: [dict] -optional-
kwargs passed to self.get_goodcoverage_targets
= used only if goodcoverage is not None =
query: [string] -optional-
any additional query to be given to data.query({query}).
This are SQL-like format applied to the colums.
See pandas.DataFrame.query()
Returns
-------
DataFrame (sub part or copy of self.data)
"""
if clean_t0nan:
data = self.data[~self.data["t0"].isna()]
else:
data = self.data.copy()
# - Time Range
if t0_range is not None:
t0_start = time.Time(t0_range[0]).mjd
t0_end = time.Time(t0_range[1]).mjd
data = data[data["t0"].between(t0_start, t0_end)]
# LC Cuts
# - stretch (x1) range
if x1_range is not None:
data = data[data["x1"].between(*x1_range)]
# - color (c) range
if c_range is not None:
data = data[data["c"].between(*c_range)]
# - t0 errors range
if t0_err_range is not None:
data = data[data["t0_err"].between(*t0_err_range)]
# - stretch errors (x1) range
if x1_err_range is not None:
data = data[data["x1_err"].between(*x1_err_range)]
# - color errors (c) range
if c_err_range is not None:
data = data[data["c_err"].between(*c_err_range)]
# Redshift Cuts
# - Redshift range
if redshift_range is not None:
data = data[data["redshift"].between(*redshift_range)]
# - redshift origin
if z_quality is not None and z_quality not in ["any","all","*"]:
data = data[data["z_quality"].isin(np.atleast_1d(z_quality))]
# Target cuts
# - in given list
if in_targetlist is not None:
data = data.loc[np.asarray(in_targetlist)[np.in1d(in_targetlist, data.index.astype("string"))] ]
# - special good lc list.
if goodcoverage is not None:
good_covarege_targets = self.get_goodcoverage_targets(**coverage_prop)
# doing it with np.in1d to make sure all matches and not some are already missing
flag_goodcoverage = np.asarray(good_covarege_targets)[np.in1d(good_covarege_targets, data.index.astype("string"))]
if goodcoverage:
data = data.loc[flag_goodcoverage]
else:
data = data.loc[~flag_goodcoverage]
# Additional Query
if query:
data = data.query(query)
return data
# LightCurve
def get_target_lightcurve(self, name):
""" Get the {name} LightCurve object """
from . import lightcurve
return lightcurve.LightCurve.from_name(name)
# Spectrum
def get_target_spectra(self, name):
""" Get a list with all the Spectra for the given object """
from . import spectroscopy
return spectroscopy.Spectrum.from_name(name)
# Extra
def get_goodcoverage_targets(self,
n_early_bands=">=2",
n_late_bands=">=2",
n_points=">=7",
premax_range=[-15,0],
postmax_range=[0,30],
phase_range=[-15,30],
**kwargs):
""" kwargs should have the same format as the n_early_point='>=2' for instance.
None means no constrain, like n_bands=None means 'n_bands' is not considered.
"""
query = {**dict(n_early_bands=n_early_bands, n_late_bands=n_late_bands,
n_points=n_points),
**kwargs}
df_query = " and ".join([f"{k}{v}" for k,v in query.items() if v is not None])
phase_coverage = self.get_phase_coverage(premax_range=premax_range,
postmax_range=postmax_range,
phase_range=phase_range)
return phase_coverage.query(df_query).index.astype("string")
def get_phase_coverage(self,premax_range=[-15,0],
postmax_range=[0,30],
phase_range=[-15,30], min_det_perband=1):
""" """
# All
phases = self.phasedf[self.phasedf.between(*phase_range)].reset_index().rename({"level_0":"name"},axis=1)
n_points = phases.groupby(["name"]).size().to_frame("n_points")
n_bands = (phases.groupby(["name", "filter"]).size()>=min_det_perband
).groupby(level=[0]).sum().to_frame("n_bands")
# Pre-Max
# - AnyBand
premax = self.phasedf[self.phasedf.between(*premax_range)].reset_index().rename({"level_0":"name"},axis=1)
n_early_points = premax.groupby(["name"]).size().to_frame("n_early_points")
n_early_bands = (premax.groupby(["name", "filter"]).size()>=min_det_perband
).groupby(level=[0]).sum().to_frame("n_early_bands")
# - Per filters
n_early_points_perfilter = premax.groupby(["name", "filter"]).size()
n_early_points_g = n_early_points_perfilter.xs("p48g", level=1).to_frame("n_early_points_p48g")
n_early_points_r = n_early_points_perfilter.xs("p48r", level=1).to_frame("n_early_points_p48r")
n_early_points_i = n_early_points_perfilter.xs("p48i", level=1).to_frame("n_early_points_p48i")
# Post-Max
# - AnyBand
postmax = self.phasedf[self.phasedf.between(*postmax_range)].reset_index().rename({"level_0":"name"},axis=1)
n_late_points = postmax.groupby(["name"]).size().to_frame("n_late_points")
n_late_bands = (postmax.groupby(["name", "filter"]).size()>=min_det_perband
).groupby(level=[0]).sum().to_frame("n_late_bands")
# - Per filters
n_late_points_perfilter = postmax.groupby(["name", "filter"]).size()
n_late_points_g = n_late_points_perfilter.xs("p48g", level=1).to_frame("n_late_points_p48g")
n_late_points_r = n_late_points_perfilter.xs("p48r", level=1).to_frame("n_late_points_p48r")
n_late_points_i = n_late_points_perfilter.xs("p48i", level=1).to_frame("n_late_points_p48i")
return pandas.concat([n_points,n_bands,
n_early_points,n_early_bands,n_late_points, n_late_bands,
n_early_points_g, n_early_points_r, n_early_points_i,
n_late_points_g, n_late_points_r, n_late_points_i], axis=1).fillna(0).astype(int)
def build_phase_coverage(self, min_detection=5, groupby='filter', client=None, store=True, **kwargs):
"""
time:
- Dask: 45s on a normal laptop | 4 cores
- No Dask: 60s on a normal laptop | 4 cores
"""
import pandas
import dask
from . import lightcurve
phases = []
datalc = self.get_data()
datalc = datalc[datalc["redshift"].between(-0.1,0.2)]
warnings.warn("building phase coverage takes ~30s to 1min.")
# - Without Dask
if client is None:
warnings.warn("loading without Dask (client is None) ; it will be slow")
names_ok = []
for name in datalc.index:
try:
dt = lightcurve.LightCurve.from_name(name)
phases.append( dt.get_obsphase(min_detection=min_detection, groupby=groupby, **kwargs))
names_ok.append(name)
except:
warnings.warn(f"get_obsphase did not work for {name}")
continue
phasedf = pandas.concat(phases, keys=names_ok)
# - With Dask
else:
for name in datalc.index:
dt = dask.delayed(lightcurve.LightCurve.from_name)(name)
phases.append( dt.get_obsphase(min_detection=min_detection, groupby=groupby, **kwargs)
)
fphases = client.compute(phases)
data_ = client.gather(fphases, "skip") # wait until all is done
names = datalc.index[[i for i,f_ in enumerate(fphases) if f_.status=="finished"]]
phasedf = pandas.concat(data_, keys=names)
phasedf_exploded = phasedf.explode()
if store:
filepath = io.get_phase_coverage(load=False)
phasedf_exploded.to_csv(filepath)
return phasedf_exploded
# ------- #
# PLOTTER #
# ------- #
def show_discoveryhist(self, ax=None, daymax=15, linecolor="C1", **kwargs):
""" """
from matplotlib.colors import to_rgba
datasalt = self.get_data(clean_t0nan=True)
if ax is None:
import matplotlib.pyplot as mpl
fig = mpl.figure(figsize=[6,3])
ax = fig.add_axes([0.1,0.2,0.8,0.7])
else:
fig = ax.figure
prop = dict(fill=True, histtype="bar",
density=False, facecolor=to_rgba("C0", 0.1), edgecolor="C0",
align="left", zorder=3)
_ = ax.hist(datasalt.groupby("t0day").size(), range=[0,15], bins=15,
**{**prop,**kwargs})
if linecolor != "None":
ax.axvline(3.4, color="C1", zorder=5, lw=1.5, ls="--")
# xx = np.arange(0, daymax)
#ax.scatter(xx[1:], stats.poisson.pmf(xx[1:], mu=3.5)*1050,
# s=50, color="C1", zorder=5)
#ax.set_yscale("log")
_ = ax.set_xticks(np.arange(daymax))
ax.set_xlim(0)
clearwhich = ["left","right","top"] # "bottom"
[ax.spines[which].set_visible(False) for which in clearwhich]
ax.tick_params("y", labelsize="x-small")
ax.set_yticks([0,100,200])
ax.set_xlabel("Number of SN Ia per day")
return fig
def show_discoveryevol(self, ax=None, t0_range=["2018-04-01","2021-01-01"],
typed_color="C0", quality_color="goldenrod",
xformat=True, dataprop={},
**kwargs):
""" """
from matplotlib.colors import to_rgba
# - Data
datasalt_all = self.get_data(clean_t0nan=True,
t0_range=t0_range,
**dataprop)
datasalt_lccut = self.get_data(clean_t0nan=True,
t0_range=t0_range,
goodcoverage=True,
**dataprop)
datasalt_zcut = self.get_data(clean_t0nan=True,
t0_range=t0_range,
z_quality=2,
**dataprop)
datasalt_zcut_lccut = self.get_data(clean_t0nan=True,
t0_range=t0_range,
z_quality=2,
goodcoverage=True,
**dataprop)
# - Figue
if ax is None:
import matplotlib.pyplot as mpl
fig = mpl.figure(figsize=[8,4])
ax = fig.add_subplot(111)
else:
fig = ax.figure
# - Internal
def _show_single_(gb_cumsize, mplfunc="fill_between",
add_text=True, text_color=None, **kwargs_):
""" """
time_ = time.Time(gb_cumsize.index, format="mjd").datetime
values_ = gb_cumsize.values
_ = getattr(ax,mplfunc)(time_, values_, **kwargs_)
if add_text:
ax.text(time_[-1], values_[-1],f" {values_[-1]}",
va="center",ha="left", color=text_color)
# Show all
_show_single_(datasalt_all.groupby("t0day").size().cumsum(),
lw=2, ls="None", facecolor="w", edgecolor="None",
add_text=False)
_show_single_(datasalt_all.groupby("t0day").size().cumsum(),
lw=2, ls="--", color=typed_color, mplfunc="plot",
text_color=typed_color)
_show_single_(datasalt_lccut.groupby("t0day").size().cumsum(),
lw=2, ls="None", facecolor=to_rgba(quality_color,0.2),
edgecolor="None", add_text=False)
_show_single_(datasalt_lccut.groupby("t0day").size().cumsum(),
lw=2, ls="--", color=quality_color, mplfunc="plot",
text_color=quality_color)
_show_single_(datasalt_zcut.groupby("t0day").size().cumsum(),
mplfunc="plot",
lw=1, ls="-", color=typed_color,
text_color=typed_color)
_show_single_(datasalt_zcut_lccut.groupby("t0day").size().cumsum(),
lw=2, ls="-", facecolor=to_rgba(quality_color,0.2),
edgecolor="None", add_text=False)
_show_single_(datasalt_zcut_lccut.groupby("t0day").size().cumsum(),
lw=2, ls="-", color=quality_color, mplfunc="plot",
text_color=quality_color)
# - Out Formating
if xformat:
from matplotlib import dates as mdates
locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
clearwhich = ["left","right","top"] # "bottom"
[ax.spines[which].set_visible(False) for which in clearwhich]
# ax.set_title("ZTF-1 Ia Statistics")
ax.set_ylabel("Number of Type Ia Supernovae")
ax.set_ylim(bottom=0)
ax.tick_params("y", labelsize="small")
return fig
# =============== #
# Properties #
# =============== #
@property
def data(self):
""" """
return self._data
@property
def hostdata(self):
""" """
if not hasattr(self, "_hostdata"):
self.load_hostdata()
return self._hostdata
@property
def phasedf(self):
""" """
if not hasattr(self, "_phasedf"):
self.load_phasedf()
return self._phasedf
| [
"matplotlib.colors.to_rgba",
"dask.delayed",
"ztfquery.fields.get_fields_containing_target",
"astropy.time.Time",
"numpy.asarray",
"matplotlib.dates.ConciseDateFormatter",
"matplotlib.pyplot.figure",
"matplotlib.dates.AutoDateLocator",
"numpy.arange",
"warnings.warn",
"numpy.atleast_1d",
"pand... | [((12476, 12536), 'warnings.warn', 'warnings.warn', (['"""building phase coverage takes ~30s to 1min."""'], {}), "('building phase coverage takes ~30s to 1min.')\n", (12489, 12536), False, 'import warnings\n'), ((2692, 2748), 'ztfquery.fields.get_fields_containing_target', 'fields.get_fields_containing_target', (["s_['ra']", "s_['dec']"], {}), "(s_['ra'], s_['dec'])\n", (2727, 2748), False, 'from ztfquery import fields\n'), ((12601, 12673), 'warnings.warn', 'warnings.warn', (['"""loading without Dask (client is None) ; it will be slow"""'], {}), "('loading without Dask (client is None) ; it will be slow')\n", (12614, 12673), False, 'import warnings\n'), ((13139, 13175), 'pandas.concat', 'pandas.concat', (['phases'], {'keys': 'names_ok'}), '(phases, keys=names_ok)\n', (13152, 13175), False, 'import pandas\n'), ((13709, 13741), 'pandas.concat', 'pandas.concat', (['data_'], {'keys': 'names'}), '(data_, keys=names)\n', (13722, 13741), False, 'import pandas\n'), ((14299, 14325), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {'figsize': '[6, 3]'}), '(figsize=[6, 3])\n', (14309, 14325), True, 'import matplotlib.pyplot as mpl\n'), ((15046, 15063), 'numpy.arange', 'np.arange', (['daymax'], {}), '(daymax)\n', (15055, 15063), True, 'import numpy as np\n'), ((16759, 16785), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {'figsize': '[8, 4]'}), '(figsize=[8, 4])\n', (16769, 16785), True, 'import matplotlib.pyplot as mpl\n'), ((18899, 18923), 'matplotlib.dates.AutoDateLocator', 'mdates.AutoDateLocator', ([], {}), '()\n', (18921, 18923), True, 'from matplotlib import dates as mdates\n'), ((18948, 18984), 'matplotlib.dates.ConciseDateFormatter', 'mdates.ConciseDateFormatter', (['locator'], {}), '(locator)\n', (18975, 18984), True, 'from matplotlib import dates as mdates\n'), ((5895, 5917), 'astropy.time.Time', 'time.Time', (['t0_range[0]'], {}), '(t0_range[0])\n', (5904, 5917), False, 'from astropy import time\n'), ((5943, 5965), 'astropy.time.Time', 'time.Time', (['t0_range[1]'], {}), '(t0_range[1])\n', (5952, 5965), False, 'from astropy import time\n'), ((7535, 7568), 'numpy.asarray', 'np.asarray', (['good_covarege_targets'], {}), '(good_covarege_targets)\n', (7545, 7568), True, 'import numpy as np\n'), ((14523, 14541), 'matplotlib.colors.to_rgba', 'to_rgba', (['"""C0"""', '(0.1)'], {}), "('C0', 0.1)\n", (14530, 14541), False, 'from matplotlib.colors import to_rgba\n'), ((17062, 17103), 'astropy.time.Time', 'time.Time', (['gb_cumsize.index'], {'format': '"""mjd"""'}), "(gb_cumsize.index, format='mjd')\n", (17071, 17103), False, 'from astropy import time\n'), ((17885, 17912), 'matplotlib.colors.to_rgba', 'to_rgba', (['quality_color', '(0.2)'], {}), '(quality_color, 0.2)\n', (17892, 17912), False, 'from matplotlib.colors import to_rgba\n'), ((18497, 18524), 'matplotlib.colors.to_rgba', 'to_rgba', (['quality_color', '(0.2)'], {}), '(quality_color, 0.2)\n', (18504, 18524), False, 'from matplotlib.colors import to_rgba\n'), ((7019, 7043), 'numpy.atleast_1d', 'np.atleast_1d', (['z_quality'], {}), '(z_quality)\n', (7032, 7043), True, 'import numpy as np\n'), ((7161, 7186), 'numpy.asarray', 'np.asarray', (['in_targetlist'], {}), '(in_targetlist)\n', (7171, 7186), True, 'import numpy as np\n'), ((13272, 13317), 'dask.delayed', 'dask.delayed', (['lightcurve.LightCurve.from_name'], {}), '(lightcurve.LightCurve.from_name)\n', (13284, 13317), False, 'import dask\n'), ((11700, 11919), 'pandas.concat', 'pandas.concat', (['[n_points, n_bands, n_early_points, n_early_bands, n_late_points,\n n_late_bands, n_early_points_g, n_early_points_r, n_early_points_i,\n n_late_points_g, n_late_points_r, n_late_points_i]'], {'axis': '(1)'}), '([n_points, n_bands, n_early_points, n_early_bands,\n n_late_points, n_late_bands, n_early_points_g, n_early_points_r,\n n_early_points_i, n_late_points_g, n_late_points_r, n_late_points_i],\n axis=1)\n', (11713, 11919), False, 'import pandas\n'), ((13016, 13070), 'warnings.warn', 'warnings.warn', (['f"""get_obsphase did not work for {name}"""'], {}), "(f'get_obsphase did not work for {name}')\n", (13029, 13070), False, 'import warnings\n')] |
import os
import click
import datasheets
import numpy as np
import pandas as pd
from pulp import LpVariable, LpProblem, LpMaximize, lpSum, PULP_CBC_CMD
import yaml
class Optimizer:
def __init__(self, input_data, num_screens, budget):
self.input_data = input_data
self.num_screens = num_screens
self.budget = budget
self.movie_counts = None
self.problem = None
def create_vars(self):
"""Define the optimization decision variables"""
self.movie_counts = {}
for _, row in self.input_data.iterrows():
var = LpVariable(f'{row.movie}_counts', cat='Integer',
lowBound=0, upBound=self.num_screens)
self.movie_counts[row.movie] = var
def get_objective_function(self, solved=False):
objective = []
for _, row in self.input_data.iterrows():
val = _get_val(self.movie_counts[row.movie], solved)
objective.append(val * row.revenue)
return lpSum(objective) if solved else np.sum(objective)
def get_constraints(self):
constraints = []
constraint = (
lpSum(self.movie_counts.values()) == self.num_screens,
'every screen must be assigned'
)
constraints.append(constraint)
total_cost = []
for _, row in self.input_data.iterrows():
total_cost.append(self.movie_counts[row.movie] * row.cost)
constraint = lpSum(total_cost) <= self.budget, 'Limited budget'
constraints.append(constraint)
return constraints
def get_solution(self, solved):
"""Generate a string that contains the solution information"""
msg = []
if solved:
objective_value = self.get_objective_function(solved)
msg.append(f'Optimization successful! '
f'Total Revenue = {objective_value}')
for _, row in self.input_data.iterrows():
val = self.movie_counts[row.movie].varValue
if row.movie == 'empty':
msg.append(f'Leave {int(val)} screens empty')
else:
msg.append(f'Movie {row.movie} is on {int(val)} screens')
else:
msg.append('Optimization algorithm failed!')
return '\n'.join([x for x in msg])
def build_allocation(self):
movie = []
num_screens = []
cost = []
revenue = []
for _, row in self.input_data.iterrows():
val = self.movie_counts[row.movie].varValue
movie.append(row.movie)
num_screens.append(val)
cost.append(row.cost * val)
revenue.append(row.revenue * val)
df = pd.DataFrame({'movie': movie, 'num_screens': num_screens,
'revenue': revenue, 'cost': cost})
total_revenue = df.revenue.sum()
total_cost = df.cost.sum()
total_screens = df.num_screens.sum()
last_row = pd.DataFrame(
{'movie': ['total'], 'num_screens': [total_screens],
'revenue': [total_revenue], 'cost': [total_cost]})
df = pd.concat([df, last_row], axis=0)
df = df.set_index('movie', drop=True)
return df
def run(self):
self.problem = LpProblem('FML', LpMaximize)
self.create_vars()
self.problem += self.get_objective_function(solved=False)
for constraint in self.get_constraints():
self.problem += constraint
status = self.problem.solve(PULP_CBC_CMD(msg=3))
solved = status == 1
return solved
def _get_val(var, solved):
return var.varValue if solved else var
def parse_conf(conf):
with open(conf, 'r') as f:
conf = yaml.load(f)
os.environ['DATASHEETS_SECRETS_PATH'] = conf['creds_file']
os.environ['DATASHEETS_SERVICE_PATH'] = conf['service_file']
workbook = conf['workbook']
num_screens = conf['num_screens']
empty_screen_cost = conf['empty_screen_cost']
budget = conf['budget']
return workbook, num_screens, empty_screen_cost, budget
def load_data(workbook):
tab = workbook.fetch_tab('inputs')
return tab.fetch_data()
def run_pipeline(conf='conf.yml'):
"""
Pull inputs from google sheets, solve the allocation problem, and write the
solution back to the sheet.
"""
workbook, num_screens, empty_screen_cost, budget = parse_conf(conf)
# Pull data
client = datasheets.Client(service=True)
workbook = client.fetch_workbook(workbook)
input_data = load_data(workbook)
empty_screen = pd.DataFrame({'movie': ['empty'], 'revenue': [0],
'cost': [empty_screen_cost]})
input_data = pd.concat([input_data, empty_screen], axis=0)
# Define and solve allocation problem
optimizer = Optimizer(input_data, num_screens, budget)
solved = optimizer.run()
solution_msg = optimizer.get_solution(solved)
print(solution_msg)
if solved:
# Write the results to google sheet.
allocation = optimizer.build_allocation()
tab = workbook.fetch_tab('outputs')
tab.insert_data(allocation)
return solution_msg
@click.command()
@click.option('--conf', default='conf.yml')
def main(conf):
run_pipeline(conf)
if __name__ == '__main__':
main()
| [
"pandas.DataFrame",
"pulp.lpSum",
"yaml.load",
"numpy.sum",
"pulp.LpVariable",
"click.option",
"click.command",
"datasheets.Client",
"pulp.LpProblem",
"pandas.concat",
"pulp.PULP_CBC_CMD"
] | [((5203, 5218), 'click.command', 'click.command', ([], {}), '()\n', (5216, 5218), False, 'import click\n'), ((5220, 5262), 'click.option', 'click.option', (['"""--conf"""'], {'default': '"""conf.yml"""'}), "('--conf', default='conf.yml')\n", (5232, 5262), False, 'import click\n'), ((4469, 4500), 'datasheets.Client', 'datasheets.Client', ([], {'service': '(True)'}), '(service=True)\n', (4486, 4500), False, 'import datasheets\n'), ((4605, 4684), 'pandas.DataFrame', 'pd.DataFrame', (["{'movie': ['empty'], 'revenue': [0], 'cost': [empty_screen_cost]}"], {}), "({'movie': ['empty'], 'revenue': [0], 'cost': [empty_screen_cost]})\n", (4617, 4684), True, 'import pandas as pd\n'), ((4735, 4780), 'pandas.concat', 'pd.concat', (['[input_data, empty_screen]'], {'axis': '(0)'}), '([input_data, empty_screen], axis=0)\n', (4744, 4780), True, 'import pandas as pd\n'), ((2738, 2834), 'pandas.DataFrame', 'pd.DataFrame', (["{'movie': movie, 'num_screens': num_screens, 'revenue': revenue, 'cost': cost}"], {}), "({'movie': movie, 'num_screens': num_screens, 'revenue':\n revenue, 'cost': cost})\n", (2750, 2834), True, 'import pandas as pd\n'), ((2998, 3118), 'pandas.DataFrame', 'pd.DataFrame', (["{'movie': ['total'], 'num_screens': [total_screens], 'revenue': [\n total_revenue], 'cost': [total_cost]}"], {}), "({'movie': ['total'], 'num_screens': [total_screens], 'revenue':\n [total_revenue], 'cost': [total_cost]})\n", (3010, 3118), True, 'import pandas as pd\n'), ((3154, 3187), 'pandas.concat', 'pd.concat', (['[df, last_row]'], {'axis': '(0)'}), '([df, last_row], axis=0)\n', (3163, 3187), True, 'import pandas as pd\n'), ((3295, 3323), 'pulp.LpProblem', 'LpProblem', (['"""FML"""', 'LpMaximize'], {}), "('FML', LpMaximize)\n", (3304, 3323), False, 'from pulp import LpVariable, LpProblem, LpMaximize, lpSum, PULP_CBC_CMD\n'), ((3756, 3768), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (3765, 3768), False, 'import yaml\n'), ((592, 683), 'pulp.LpVariable', 'LpVariable', (['f"""{row.movie}_counts"""'], {'cat': '"""Integer"""', 'lowBound': '(0)', 'upBound': 'self.num_screens'}), "(f'{row.movie}_counts', cat='Integer', lowBound=0, upBound=self.\n num_screens)\n", (602, 683), False, 'from pulp import LpVariable, LpProblem, LpMaximize, lpSum, PULP_CBC_CMD\n'), ((1009, 1025), 'pulp.lpSum', 'lpSum', (['objective'], {}), '(objective)\n', (1014, 1025), False, 'from pulp import LpVariable, LpProblem, LpMaximize, lpSum, PULP_CBC_CMD\n'), ((1041, 1058), 'numpy.sum', 'np.sum', (['objective'], {}), '(objective)\n', (1047, 1058), True, 'import numpy as np\n'), ((3542, 3561), 'pulp.PULP_CBC_CMD', 'PULP_CBC_CMD', ([], {'msg': '(3)'}), '(msg=3)\n', (3554, 3561), False, 'from pulp import LpVariable, LpProblem, LpMaximize, lpSum, PULP_CBC_CMD\n'), ((1467, 1484), 'pulp.lpSum', 'lpSum', (['total_cost'], {}), '(total_cost)\n', (1472, 1484), False, 'from pulp import LpVariable, LpProblem, LpMaximize, lpSum, PULP_CBC_CMD\n')] |
from typing import Type
import numpy as np
reveal_type(np.ModuleDeprecationWarning()) # E: numpy.ModuleDeprecationWarning
reveal_type(np.VisibleDeprecationWarning()) # E: numpy.VisibleDeprecationWarning
reveal_type(np.ComplexWarning()) # E: numpy.ComplexWarning
reveal_type(np.RankWarning()) # E: numpy.RankWarning
reveal_type(np.TooHardError()) # E: numpy.TooHardError
reveal_type(np.AxisError("test")) # E: numpy.AxisError
reveal_type(np.AxisError(5, 1)) # E: numpy.AxisError
| [
"numpy.VisibleDeprecationWarning",
"numpy.ComplexWarning",
"numpy.RankWarning",
"numpy.ModuleDeprecationWarning",
"numpy.AxisError",
"numpy.TooHardError"
] | [((57, 86), 'numpy.ModuleDeprecationWarning', 'np.ModuleDeprecationWarning', ([], {}), '()\n', (84, 86), True, 'import numpy as np\n'), ((137, 167), 'numpy.VisibleDeprecationWarning', 'np.VisibleDeprecationWarning', ([], {}), '()\n', (165, 167), True, 'import numpy as np\n'), ((219, 238), 'numpy.ComplexWarning', 'np.ComplexWarning', ([], {}), '()\n', (236, 238), True, 'import numpy as np\n'), ((279, 295), 'numpy.RankWarning', 'np.RankWarning', ([], {}), '()\n', (293, 295), True, 'import numpy as np\n'), ((333, 350), 'numpy.TooHardError', 'np.TooHardError', ([], {}), '()\n', (348, 350), True, 'import numpy as np\n'), ((389, 409), 'numpy.AxisError', 'np.AxisError', (['"""test"""'], {}), "('test')\n", (401, 409), True, 'import numpy as np\n'), ((445, 463), 'numpy.AxisError', 'np.AxisError', (['(5)', '(1)'], {}), '(5, 1)\n', (457, 463), True, 'import numpy as np\n')] |
"""
This code was developed by <NAME>, <NAME> and <NAME> of PES University
Refer to the README for the sources of the Deepspeech, Tacotron and WaveRNN implementations/folders
The answer extraction code references the Hugging face run_squad.py example, modified for our use
"""
import sys
import wave
import os
import audioop
import collections
from timeit import default_timer as timer
import numpy as np
import torch
import wikipedia
import spacy
import sounddevice as sd
import soundfile as sf
from gingerit.gingerit import GingerIt
from playsound import playsound
from pytorch_pretrained_bert.tokenization import BasicTokenizer, BertTokenizer
from pytorch_pretrained_bert.modeling import BertForQuestionAnswering, BertConfig
from deepspeech import Model
from Tacotron_TTS.synthesizer import Synthesizer
from Vocoder_WaveRNN.vocoder_models.fatchord_version import WaveRNN
from Vocoder_WaveRNN import vocoder_hparams as hp
from Vocoder_WaveRNN.vocoder_utils.text import symbols
from Vocoder_WaveRNN.vocoder_models.tacotron import Tacotron
from Vocoder_WaveRNN.vocoder_utils.text import text_to_sequence
spell_check = GingerIt()
def change_samplerate(audio_in, inrate):
# s_read = wave.open(audio_path,'r')
n_frames = audio_in.getnframes()
channels = audio_in.getnchannels()
data = audio_in.readframes(n_frames)
converted = audioop.ratecv(data, 2, channels, inrate, 16000, None)
converted = audioop.tomono(converted[0], 2, 1, 0)
op = np.frombuffer(converted, np.int16)
return 16000, op
BEAM_WIDTH = 500
LM_ALPHA = 0.75
LM_BETA = 1.85
speech_model_path = 'DeepSpeech/Models/output_graph.pb'
alphabet = 'DeepSpeech/Models/alphabet.txt'
lm = 'DeepSpeech/Models/lm.binary'
trie = 'DeepSpeech/Models/trie'
N_FEATURES = 261
N_CONTEXT = 9
current_working_directory=os.getcwd()
#print(current_working_directory)
model_load_start = timer()
ds = Model(speech_model_path, N_FEATURES, N_CONTEXT, alphabet, BEAM_WIDTH)
model_load_end = timer() - model_load_start
print('Loaded S2T model in {:.3}s.'.format(model_load_end))
model_load_start = timer()
model_path = 'BERT/bert_model.bin'
config_file = 'BERT/bert_config.json'
max_answer_length = 30
max_query_length = 64
doc_stride = 128
max_seq_length = 384
#############
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
#############
config = BertConfig(config_file)
model = BertForQuestionAnswering(config)
model.load_state_dict(torch.load(model_path, map_location='cpu'))
model.to(device)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
model_load_end = timer() - model_load_start
print('Loaded BERT model in {:.3}s.'.format(model_load_end))
print('')
print("Input 0 for tacotron and 1 for WaveRNN >>> ",end=' ')
tts_choice = int(input())
if tts_choice != 1:
tts_choice = 0
if tts_choice == 0:
print("Loading regular tacotron....")
model_load_start = timer()
synthesizer = Synthesizer()
synthesizer.load( current_working_directory + '/Tacotron_TTS/tacotron_model_data/model.ckpt')
#synthesizer.load('/QA_VoiceBot_Desktop_Application-Clean/Tacotron_TTS/tacotron_model_data/model.ckpt')
model_load_end = timer() - model_load_start
print('Loaded T2S model in {:.3}s.'.format(model_load_end))
else:
print("Loading fatchord wavernn implementation...")
model_load_start = timer()
print('\nInitialising WaveRNN Model...\n')
# Instantiate WaveRNN Model
voc_model = WaveRNN(rnn_dims=hp.voc_rnn_dims,
fc_dims=hp.voc_fc_dims,
bits=hp.bits,
pad=hp.voc_pad,
upsample_factors=hp.voc_upsample_factors,
feat_dims=hp.num_mels,
compute_dims=hp.voc_compute_dims,
res_out_dims=hp.voc_res_out_dims,
res_blocks=hp.voc_res_blocks,
hop_length=hp.hop_length,
sample_rate=hp.sample_rate,
mode='MOL')
voc_model.restore('Vocoder_WaveRNN//WaveRNN_weights//voc_weights//latest_weights.pyt')
print('\nInitialising Tacotron_TTS Model...\n')
# Instantiate Tacotron_TTS Model
tts_model = Tacotron(embed_dims=hp.tts_embed_dims,
num_chars=len(symbols.symbols),
encoder_dims=hp.tts_encoder_dims,
decoder_dims=hp.tts_decoder_dims,
n_mels=hp.num_mels,
fft_bins=hp.num_mels,
postnet_dims=hp.tts_postnet_dims,
encoder_K=hp.tts_encoder_K,
lstm_dims=hp.tts_lstm_dims,
postnet_K=hp.tts_postnet_K,
num_highways=hp.tts_num_highways,
dropout=hp.tts_dropout)
tts_model.restore('Vocoder_WaveRNN//WaveRNN_weights//tts_weights//latest_weights.pyt')
model_load_end = timer() - model_load_start
print('Loaded T2S model in {:.3}s.'.format(model_load_end))
def is_whitespace(char):
if char == " " or char == "\t" or char == "\r" or char == "\n" or ord(char) == 0x202F:
return True
return False
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def check_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
class InputFeatures(object):
def __init__(self, doc_span_index, tokens, token_is_max_context, token_to_orig_map,
input_ids, input_mask, segment_ids, doc_tokens):
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_is_max_context = token_is_max_context
self.token_to_orig_map = token_to_orig_map
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.doc_tokens = doc_tokens
def input_to_features(question, context):
"""Loads a data file into a list of `InputBatch`s."""
inputbatch = []
query_tokens = tokenizer.tokenize(question)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length] # reduce question tokens to max input size
doc_tokens = []
prev_is_whitespace = True
for c in context:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = check_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
inputbatch.append(InputFeatures(doc_span_index=doc_span_index,
tokens=tokens,
token_is_max_context=token_is_max_context,
token_to_orig_map=token_to_orig_map,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
doc_tokens=doc_tokens))
return inputbatch
def bert_predict(context, question):
input_features = input_to_features(question, context)
print("Number of batches:", len(input_features))
predicts = []
for f in input_features:
all_input_ids = torch.tensor([f.input_ids], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids], dtype=torch.long)
input_ids = all_input_ids.to(device)
input_mask = all_input_mask.to(device)
segment_ids = all_segment_ids.to(device)
with torch.no_grad():
start_logits, end_logits = model(input_ids, segment_ids, input_mask)
start_logits = start_logits[0].detach().cpu().tolist()
end_logits = end_logits[0].detach().cpu().tolist()
output = predict(f, start_logits, end_logits)
predicts.append(output)
predicts = sorted(
predicts,
key=lambda x: x[1],
reverse=True)
return predicts[0][0]
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (index, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = index
ns_chars.append(c)
ns_text = "".join(ns_chars)
return ns_text, ns_to_s_map
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
return orig_textclear
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def predict(features, start_logit, end_logit):
n_best_size = 10
_PrelimPrediction = collections.namedtuple("PrelimPrediction",
["start_index", "end_index", "start_logit",
"end_logit"])
_NbestPrediction = collections.namedtuple("NbestPrediction", ["text", "start_logit", "end_logit"])
prelim_predictions = []
start_indexes = _get_best_indexes(start_logit, n_best_size)
end_indexes = _get_best_indexes(end_logit, n_best_size)
# print(start_indexes)
# print(end_indexes)
for start_index in start_indexes:
for end_index in end_indexes:
# we remove the indexes which are invalid
if start_index >= len(features.tokens):
continue
if end_index >= len(features.tokens):
continue
if start_index not in features.token_to_orig_map:
continue
if end_index not in features.token_to_orig_map:
continue
if not features.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
start_index=start_index,
end_index=end_index,
start_logit=start_logit[start_index],
end_logit=end_logit[end_index]))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
final_text = "Sorry, I wasn't able to find an answer :("
score = 0
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= 1: # n best size before
break
feature = features
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = feature.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, True)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
score = pred.start_logit + pred.end_logit
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
return final_text, score
priorities = {"PERSON": 1, "EVENT": 2, "ORG": 3, "PRODUCT": 4, "LOC": 5, "GPE": 6, "NORP": 7, "LANGUAGE": 8,
"DATE": 9, "OTHER": 10}
nlp = spacy.load("en_core_web_md") # Much worse but faster NER with "en_core_web_sm"
LOCALINFO = {"you": 'Data/About_Self',
"yourself": 'Data/About_Self',
"You": 'Data/About_Self',
"Yourself": 'Data/About_Self',
"PESU": 'Data/About_PESU',
"PES University": 'Data/About_PESU'}
DATAKEYS = LOCALINFO.keys()
def spacy_ner(text):
doc = nlp(text)
tagged_text = []
for token in doc:
tagged_text.append((token.text, token.tag_))
prev = ""
ents_label_list = []
for X in doc.ents:
if X.label_ not in priorities.keys():
ents_label_list.append((X.text, "OTHER"))
else:
if prev == "DATE" and X.label_ == "EVENT":
old_ent = ents_label_list.pop()
new_ent = (old_ent[0] + " " + X.text, "EVENT")
ents_label_list.append(new_ent)
else:
ents_label_list.append((X.text, X.label_))
prev = X.label_
ents_label_list = sorted(ents_label_list, key=lambda x: priorities[x[1]])
return ents_label_list, doc #
def reduced_text(wiki_page, doc, topics):
text = wiki_page.content
reduced_passage = ""
doc_roots = []
for chunk in doc.noun_chunks:
doc_roots.append(chunk.root.text)
# for nkey in topics:
# if nkey in doc_roots:
# doc_roots.remove(nkey)
if topics != []:
if topics[0] in doc_roots:
doc_roots.remove(topics[0])
text = text.split('\n')
if "== See also ==" in text:
text = text[:text.index("== See also ==")]
if "== Notes ==" in text:
text = text[:text.index("== Notes ==")]
if "== References ==" in text:
text = text[:text.index("== References ==")]
for line in text:
for root in doc_roots:
if root in line:
sen = line.split(".")
for s in sen:
if root in s:
reduced_passage += s + "."
return wiki_page.summary + reduced_passage
def get_context(question):
for corpuskey in DATAKEYS:
if corpuskey in question:
text_file = open(LOCALINFO[corpuskey], "r")
print("Local file used :", LOCALINFO[corpuskey])
search_passage = text_file.read()
return search_passage
topic_list, doc = spacy_ner(question)
for i in range(len(topic_list)):
topic_list[i] = topic_list[i][0]
if len(topic_list) == 0:
for token in doc:
if 'NN' in token.tag_:
topic_list.append(token.lemma_)
try:
wiki_page = wikipedia.page(topic_list[0])
except wikipedia.exceptions.DisambiguationError as err:
wiki_page = wikipedia.page(err.options[0])
else:
try:
wiki_page = wikipedia.page(topic_list[0])
except wikipedia.exceptions.DisambiguationError as err:
wiki_page = wikipedia.page(err.options[0])
print("Page Used :", wiki_page.title)
return reduced_text(wiki_page, doc, topic_list)
def get_context_via_search(question):
for corpuskey in DATAKEYS:
if corpuskey in question:
text_file = open(LOCALINFO[corpuskey], "r")
print("Local file used :", LOCALINFO[corpuskey])
search_passage = text_file.read()
return search_passage
page_list = wikipedia.search(question)
print("Page used:", page_list[0])
wiki_page = wikipedia.page(page_list[0])
# print(wiki_page.content)
topic_list, doc = spacy_ner(question)
return reduced_text(wiki_page, doc, topic_list)
directory_in_str = "test_audio/"
directory = os.fsencode(directory_in_str)
def generate_answer(question):
try:
context = get_context(question)
# print(context)
return bert_predict(context, question)
except IndexError:
return "Sorry, couldn't find any pages to search from!"
def test_aud_in():
tstart = timer()
audio = "py_rec.wav"
fs = 44100
duration = 5 # seconds
myrecording = sd.rec(duration * fs, samplerate=fs, channels=2, dtype='float32')
print("Recording Audio")
sd.wait()
print("Audio recording complete , Play Audio")
sd.play(myrecording, fs)
sd.wait()
print("Play Audio Complete")
sf.write(audio, myrecording, fs)
fin = wave.open(audio, 'rb')
fs = fin.getframerate()
if fs != 16000:
warn = 'Resampling from {}Hz to 16kHz'
print(warn.format(fs), file=sys.stderr)
fs, audio = change_samplerate(fin, fs)
audio_length = fin.getnframes() * (1 / 16000)
fin.close()
else:
audio = np.frombuffer(fin.readframes(fin.getnframes()), np.int16)
audio_length = fin.getnframes() * (1 / 16000)
fin.close()
print('Running inference.', file=sys.stderr)
inference_start = timer()
qasked = ds.stt(audio, fs)
inference_end = timer() - inference_start
print('Inference took %0.3fs for %0.3fs audio file.' % (inference_end, audio_length), file=sys.stderr)
print("Infered:", qasked)
qasked = spell_check.parse(qasked)['result']
print("Question:", qasked)
print("Generating answer!")
gen_start = timer()
ans = generate_answer(qasked)
print("Answer:", ans)
print("Answer generated in {:.3}s.".format(timer() - gen_start))
print("Generating audio out")
if tts_choice == 0:
aud_timer = timer()
aud_out = synthesizer.synthesize(ans)
print('Took {:.3}s for audio synthesis.'.format(timer() - aud_timer))
tot_time = timer() - tstart
aud_out = np.frombuffer(aud_out, dtype='int32')
sd.play(aud_out, 10500)
sd.wait()
print("Time for sample: {:.3}s.".format(tot_time))
save_path = f'Tacotron_TTS/Tacotron_outputs/__input_{ans[:10]}.wav'
sf.write(save_path,aud_out, 10500)
else:
input_sequence = text_to_sequence(ans.strip(), hp.tts_cleaner_names)
aud_timer = timer()
_, m, attention = tts_model.generate(input_sequence)
save_path = f'Vocoder_WaveRNN/WaveRNN_outputs/__input_{ans[:10]}.wav'
m = torch.tensor(m).unsqueeze(0)
m = (m + 4) / 8
batched = 1
op = voc_model.generate(m, save_path, batched, hp.voc_target, hp.voc_overlap, hp.mu_law)
print('Took {:.3}s for audio synthesis.'.format(timer() - aud_timer))
sample_time = timer() - aud_timer
sd.play(op, 22050)
sd.wait()
print("Time for sample: {:.3}s.".format(sample_time))
def test_files():
count = 0
time_for_all_files = 0
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".wav"):
start_time = timer()
fn2 = directory_in_str + filename
#playsound(fn2)
fin = wave.open(fn2, 'rb')
fs = fin.getframerate()
if fs != 16000:
print('Resampling from ({}) to 16kHz.'.format(fs), file=sys.stderr)
fs, audio = change_samplerate(fin, fs)
audio_length = fin.getnframes() * (1 / 16000)
fin.close()
else:
audio = np.frombuffer(fin.readframes(fin.getnframes()), np.int16)
audio_length = fin.getnframes() * (1 / 16000)
fin.close()
print('Running inference.', file=sys.stderr)
inference_start = timer()
qasked = ds.stt(audio, fs)
inference_end = timer() - inference_start
print('Inference took %0.3fs for %0.3fs audio file.' % (inference_end, audio_length), file=sys.stderr)
print("Inferred:", qasked)
qasked = spell_check.parse(qasked)['result']
print("Question:", qasked)
gen_start = timer()
ans = generate_answer(qasked)
print("Answer:", ans)
print("Answer generated in {:.3}s.".format(timer() - gen_start))
print("Generating audio out")
if tts_choice == 0:
aud_timer = timer()
aud_out = synthesizer.synthesize(ans)
print('Took {:.3}s for audio synthesis.'.format(timer() - aud_timer))
sample_time = timer() - start_time
aud_out = np.frombuffer(aud_out, dtype='int32')
sd.play(aud_out, 10500)
sd.wait()
save_path = f'Tacotron_TTS/Tacotron_outputs/__input_{ans[:10]}.wav'
sf.write(save_path, aud_out, 10500)
else:
input_sequence = text_to_sequence(ans.strip(), hp.tts_cleaner_names)
aud_timer = timer()
_, m, attention = tts_model.generate(input_sequence)
save_path = f'Vocoder_WaveRNN/WaveRNN_outputs/__input_{ans[:10]}.wav'
m = torch.tensor(m).unsqueeze(0)
m = (m + 4) / 8
batched = 1
op = voc_model.generate(m, save_path, batched, hp.voc_target, hp.voc_overlap, hp.mu_law)
print('Took {:.3}s for audio synthesis.'.format(timer() - aud_timer))
sample_time = timer() - start_time
sd.play(op, 22050)
sd.wait()
print("Time for sample: {:.3}s.\n".format(sample_time))
time_for_all_files += sample_time
count += 1
print("******")
print("Time for all samples :", time_for_all_files, "s")
print("Average time: {:.3}s".format(time_for_all_files / count))
print()
print("############################")
print("Testing all files in testing folder")
print("########################")
print()
test_files()
| [
"sounddevice.rec",
"audioop.tomono",
"pytorch_pretrained_bert.tokenization.BertTokenizer.from_pretrained",
"sounddevice.wait",
"torch.device",
"Tacotron_TTS.synthesizer.Synthesizer",
"torch.no_grad",
"os.fsdecode",
"torch.load",
"spacy.load",
"audioop.ratecv",
"soundfile.write",
"pytorch_pre... | [((1125, 1135), 'gingerit.gingerit.GingerIt', 'GingerIt', ([], {}), '()\n', (1133, 1135), False, 'from gingerit.gingerit import GingerIt\n'), ((1803, 1814), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1812, 1814), False, 'import os\n'), ((1869, 1876), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1874, 1876), True, 'from timeit import default_timer as timer\n'), ((1882, 1951), 'deepspeech.Model', 'Model', (['speech_model_path', 'N_FEATURES', 'N_CONTEXT', 'alphabet', 'BEAM_WIDTH'], {}), '(speech_model_path, N_FEATURES, N_CONTEXT, alphabet, BEAM_WIDTH)\n', (1887, 1951), False, 'from deepspeech import Model\n'), ((2076, 2083), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2081, 2083), True, 'from timeit import default_timer as timer\n'), ((2335, 2354), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2347, 2354), False, 'import torch\n'), ((2379, 2402), 'pytorch_pretrained_bert.modeling.BertConfig', 'BertConfig', (['config_file'], {}), '(config_file)\n', (2389, 2402), False, 'from pytorch_pretrained_bert.modeling import BertForQuestionAnswering, BertConfig\n'), ((2411, 2443), 'pytorch_pretrained_bert.modeling.BertForQuestionAnswering', 'BertForQuestionAnswering', (['config'], {}), '(config)\n', (2435, 2443), False, 'from pytorch_pretrained_bert.modeling import BertForQuestionAnswering, BertConfig\n'), ((2539, 2609), 'pytorch_pretrained_bert.tokenization.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-uncased"""'], {'do_lower_case': '(True)'}), "('bert-base-uncased', do_lower_case=True)\n", (2568, 2609), False, 'from pytorch_pretrained_bert.tokenization import BasicTokenizer, BertTokenizer\n'), ((16560, 16588), 'spacy.load', 'spacy.load', (['"""en_core_web_md"""'], {}), "('en_core_web_md')\n", (16570, 16588), False, 'import spacy\n'), ((20254, 20283), 'os.fsencode', 'os.fsencode', (['directory_in_str'], {}), '(directory_in_str)\n', (20265, 20283), False, 'import os\n'), ((1353, 1407), 'audioop.ratecv', 'audioop.ratecv', (['data', '(2)', 'channels', 'inrate', '(16000)', 'None'], {}), '(data, 2, channels, inrate, 16000, None)\n', (1367, 1407), False, 'import audioop\n'), ((1424, 1461), 'audioop.tomono', 'audioop.tomono', (['converted[0]', '(2)', '(1)', '(0)'], {}), '(converted[0], 2, 1, 0)\n', (1438, 1461), False, 'import audioop\n'), ((1471, 1505), 'numpy.frombuffer', 'np.frombuffer', (['converted', 'np.int16'], {}), '(converted, np.int16)\n', (1484, 1505), True, 'import numpy as np\n'), ((1969, 1976), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1974, 1976), True, 'from timeit import default_timer as timer\n'), ((2466, 2508), 'torch.load', 'torch.load', (['model_path'], {'map_location': '"""cpu"""'}), "(model_path, map_location='cpu')\n", (2476, 2508), False, 'import torch\n'), ((2627, 2634), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2632, 2634), True, 'from timeit import default_timer as timer\n'), ((2938, 2945), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2943, 2945), True, 'from timeit import default_timer as timer\n'), ((2964, 2977), 'Tacotron_TTS.synthesizer.Synthesizer', 'Synthesizer', ([], {}), '()\n', (2975, 2977), False, 'from Tacotron_TTS.synthesizer import Synthesizer\n'), ((3386, 3393), 'timeit.default_timer', 'timer', ([], {}), '()\n', (3391, 3393), True, 'from timeit import default_timer as timer\n'), ((3490, 3826), 'Vocoder_WaveRNN.vocoder_models.fatchord_version.WaveRNN', 'WaveRNN', ([], {'rnn_dims': 'hp.voc_rnn_dims', 'fc_dims': 'hp.voc_fc_dims', 'bits': 'hp.bits', 'pad': 'hp.voc_pad', 'upsample_factors': 'hp.voc_upsample_factors', 'feat_dims': 'hp.num_mels', 'compute_dims': 'hp.voc_compute_dims', 'res_out_dims': 'hp.voc_res_out_dims', 'res_blocks': 'hp.voc_res_blocks', 'hop_length': 'hp.hop_length', 'sample_rate': 'hp.sample_rate', 'mode': '"""MOL"""'}), "(rnn_dims=hp.voc_rnn_dims, fc_dims=hp.voc_fc_dims, bits=hp.bits, pad\n =hp.voc_pad, upsample_factors=hp.voc_upsample_factors, feat_dims=hp.\n num_mels, compute_dims=hp.voc_compute_dims, res_out_dims=hp.\n voc_res_out_dims, res_blocks=hp.voc_res_blocks, hop_length=hp.\n hop_length, sample_rate=hp.sample_rate, mode='MOL')\n", (3497, 3826), False, 'from Vocoder_WaveRNN.vocoder_models.fatchord_version import WaveRNN\n'), ((7836, 7890), 'collections.namedtuple', 'collections.namedtuple', (['"""DocSpan"""', "['start', 'length']"], {}), "('DocSpan', ['start', 'length'])\n", (7858, 7890), False, 'import collections\n'), ((11722, 11765), 'pytorch_pretrained_bert.tokenization.BasicTokenizer', 'BasicTokenizer', ([], {'do_lower_case': 'do_lower_case'}), '(do_lower_case=do_lower_case)\n', (11736, 11765), False, 'from pytorch_pretrained_bert.tokenization import BasicTokenizer, BertTokenizer\n'), ((13226, 13330), 'collections.namedtuple', 'collections.namedtuple', (['"""PrelimPrediction"""', "['start_index', 'end_index', 'start_logit', 'end_logit']"], {}), "('PrelimPrediction', ['start_index', 'end_index',\n 'start_logit', 'end_logit'])\n", (13248, 13330), False, 'import collections\n'), ((13446, 13525), 'collections.namedtuple', 'collections.namedtuple', (['"""NbestPrediction"""', "['text', 'start_logit', 'end_logit']"], {}), "('NbestPrediction', ['text', 'start_logit', 'end_logit'])\n", (13468, 13525), False, 'import collections\n'), ((19972, 19998), 'wikipedia.search', 'wikipedia.search', (['question'], {}), '(question)\n', (19988, 19998), False, 'import wikipedia\n'), ((20053, 20081), 'wikipedia.page', 'wikipedia.page', (['page_list[0]'], {}), '(page_list[0])\n', (20067, 20081), False, 'import wikipedia\n'), ((20559, 20566), 'timeit.default_timer', 'timer', ([], {}), '()\n', (20564, 20566), True, 'from timeit import default_timer as timer\n'), ((20653, 20718), 'sounddevice.rec', 'sd.rec', (['(duration * fs)'], {'samplerate': 'fs', 'channels': '(2)', 'dtype': '"""float32"""'}), "(duration * fs, samplerate=fs, channels=2, dtype='float32')\n", (20659, 20718), True, 'import sounddevice as sd\n'), ((20752, 20761), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (20759, 20761), True, 'import sounddevice as sd\n'), ((20817, 20841), 'sounddevice.play', 'sd.play', (['myrecording', 'fs'], {}), '(myrecording, fs)\n', (20824, 20841), True, 'import sounddevice as sd\n'), ((20846, 20855), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (20853, 20855), True, 'import sounddevice as sd\n'), ((20893, 20925), 'soundfile.write', 'sf.write', (['audio', 'myrecording', 'fs'], {}), '(audio, myrecording, fs)\n', (20901, 20925), True, 'import soundfile as sf\n'), ((20937, 20959), 'wave.open', 'wave.open', (['audio', '"""rb"""'], {}), "(audio, 'rb')\n", (20946, 20959), False, 'import wave\n'), ((21454, 21461), 'timeit.default_timer', 'timer', ([], {}), '()\n', (21459, 21461), True, 'from timeit import default_timer as timer\n'), ((21804, 21811), 'timeit.default_timer', 'timer', ([], {}), '()\n', (21809, 21811), True, 'from timeit import default_timer as timer\n'), ((23211, 23232), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (23221, 23232), False, 'import os\n'), ((3210, 3217), 'timeit.default_timer', 'timer', ([], {}), '()\n', (3215, 3217), True, 'from timeit import default_timer as timer\n'), ((5015, 5022), 'timeit.default_timer', 'timer', ([], {}), '()\n', (5020, 5022), True, 'from timeit import default_timer as timer\n'), ((10460, 10505), 'torch.tensor', 'torch.tensor', (['[f.input_ids]'], {'dtype': 'torch.long'}), '([f.input_ids], dtype=torch.long)\n', (10472, 10505), False, 'import torch\n'), ((10531, 10577), 'torch.tensor', 'torch.tensor', (['[f.input_mask]'], {'dtype': 'torch.long'}), '([f.input_mask], dtype=torch.long)\n', (10543, 10577), False, 'import torch\n'), ((10604, 10651), 'torch.tensor', 'torch.tensor', (['[f.segment_ids]'], {'dtype': 'torch.long'}), '([f.segment_ids], dtype=torch.long)\n', (10616, 10651), False, 'import torch\n'), ((11436, 11461), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (11459, 11461), False, 'import collections\n'), ((21513, 21520), 'timeit.default_timer', 'timer', ([], {}), '()\n', (21518, 21520), True, 'from timeit import default_timer as timer\n'), ((22019, 22026), 'timeit.default_timer', 'timer', ([], {}), '()\n', (22024, 22026), True, 'from timeit import default_timer as timer\n'), ((22205, 22242), 'numpy.frombuffer', 'np.frombuffer', (['aud_out'], {'dtype': '"""int32"""'}), "(aud_out, dtype='int32')\n", (22218, 22242), True, 'import numpy as np\n'), ((22251, 22274), 'sounddevice.play', 'sd.play', (['aud_out', '(10500)'], {}), '(aud_out, 10500)\n', (22258, 22274), True, 'import sounddevice as sd\n'), ((22283, 22292), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (22290, 22292), True, 'import sounddevice as sd\n'), ((22436, 22471), 'soundfile.write', 'sf.write', (['save_path', 'aud_out', '(10500)'], {}), '(save_path, aud_out, 10500)\n', (22444, 22471), True, 'import soundfile as sf\n'), ((22578, 22585), 'timeit.default_timer', 'timer', ([], {}), '()\n', (22583, 22585), True, 'from timeit import default_timer as timer\n'), ((23035, 23053), 'sounddevice.play', 'sd.play', (['op', '(22050)'], {}), '(op, 22050)\n', (23042, 23053), True, 'import sounddevice as sd\n'), ((23062, 23071), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (23069, 23071), True, 'import sounddevice as sd\n'), ((23253, 23270), 'os.fsdecode', 'os.fsdecode', (['file'], {}), '(file)\n', (23264, 23270), False, 'import os\n'), ((10807, 10822), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10820, 10822), False, 'import torch\n'), ((19213, 19242), 'wikipedia.page', 'wikipedia.page', (['topic_list[0]'], {}), '(topic_list[0])\n', (19227, 19242), False, 'import wikipedia\n'), ((19410, 19439), 'wikipedia.page', 'wikipedia.page', (['topic_list[0]'], {}), '(topic_list[0])\n', (19424, 19439), False, 'import wikipedia\n'), ((22170, 22177), 'timeit.default_timer', 'timer', ([], {}), '()\n', (22175, 22177), True, 'from timeit import default_timer as timer\n'), ((23007, 23014), 'timeit.default_timer', 'timer', ([], {}), '()\n', (23012, 23014), True, 'from timeit import default_timer as timer\n'), ((23334, 23341), 'timeit.default_timer', 'timer', ([], {}), '()\n', (23339, 23341), True, 'from timeit import default_timer as timer\n'), ((23434, 23454), 'wave.open', 'wave.open', (['fn2', '"""rb"""'], {}), "(fn2, 'rb')\n", (23443, 23454), False, 'import wave\n'), ((24026, 24033), 'timeit.default_timer', 'timer', ([], {}), '()\n', (24031, 24033), True, 'from timeit import default_timer as timer\n'), ((24401, 24408), 'timeit.default_timer', 'timer', ([], {}), '()\n', (24406, 24408), True, 'from timeit import default_timer as timer\n'), ((19331, 19361), 'wikipedia.page', 'wikipedia.page', (['err.options[0]'], {}), '(err.options[0])\n', (19345, 19361), False, 'import wikipedia\n'), ((19528, 19558), 'wikipedia.page', 'wikipedia.page', (['err.options[0]'], {}), '(err.options[0])\n', (19542, 19558), False, 'import wikipedia\n'), ((21919, 21926), 'timeit.default_timer', 'timer', ([], {}), '()\n', (21924, 21926), True, 'from timeit import default_timer as timer\n'), ((22737, 22752), 'torch.tensor', 'torch.tensor', (['m'], {}), '(m)\n', (22749, 22752), False, 'import torch\n'), ((24101, 24108), 'timeit.default_timer', 'timer', ([], {}), '()\n', (24106, 24108), True, 'from timeit import default_timer as timer\n'), ((24664, 24671), 'timeit.default_timer', 'timer', ([], {}), '()\n', (24669, 24671), True, 'from timeit import default_timer as timer\n'), ((24889, 24926), 'numpy.frombuffer', 'np.frombuffer', (['aud_out'], {'dtype': '"""int32"""'}), "(aud_out, dtype='int32')\n", (24902, 24926), True, 'import numpy as np\n'), ((24943, 24966), 'sounddevice.play', 'sd.play', (['aud_out', '(10500)'], {}), '(aud_out, 10500)\n', (24950, 24966), True, 'import sounddevice as sd\n'), ((24983, 24992), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (24990, 24992), True, 'import sounddevice as sd\n'), ((25093, 25128), 'soundfile.write', 'sf.write', (['save_path', 'aud_out', '(10500)'], {}), '(save_path, aud_out, 10500)\n', (25101, 25128), True, 'import soundfile as sf\n'), ((25261, 25268), 'timeit.default_timer', 'timer', ([], {}), '()\n', (25266, 25268), True, 'from timeit import default_timer as timer\n'), ((25791, 25809), 'sounddevice.play', 'sd.play', (['op', '(22050)'], {}), '(op, 22050)\n', (25798, 25809), True, 'import sounddevice as sd\n'), ((25826, 25835), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (25833, 25835), True, 'import sounddevice as sd\n'), ((22129, 22136), 'timeit.default_timer', 'timer', ([], {}), '()\n', (22134, 22136), True, 'from timeit import default_timer as timer\n'), ((22963, 22970), 'timeit.default_timer', 'timer', ([], {}), '()\n', (22968, 22970), True, 'from timeit import default_timer as timer\n'), ((24842, 24849), 'timeit.default_timer', 'timer', ([], {}), '()\n', (24847, 24849), True, 'from timeit import default_timer as timer\n'), ((25754, 25761), 'timeit.default_timer', 'timer', ([], {}), '()\n', (25759, 25761), True, 'from timeit import default_timer as timer\n'), ((24540, 24547), 'timeit.default_timer', 'timer', ([], {}), '()\n', (24545, 24547), True, 'from timeit import default_timer as timer\n'), ((25444, 25459), 'torch.tensor', 'torch.tensor', (['m'], {}), '(m)\n', (25456, 25459), False, 'import torch\n'), ((24790, 24797), 'timeit.default_timer', 'timer', ([], {}), '()\n', (24795, 24797), True, 'from timeit import default_timer as timer\n'), ((25702, 25709), 'timeit.default_timer', 'timer', ([], {}), '()\n', (25707, 25709), True, 'from timeit import default_timer as timer\n')] |
import pickle as pkl
import scipy.sparse
import numpy as np
import pandas as pd
from scipy import sparse as sp
import networkx as nx
#' from gcn.utils import *
from sc_data import *
from collections import defaultdict
from scipy.stats import uniform
#' -------- convert graph to specific format -----------
def get_value(diction, specific):
for key, val in diction.items():
if val == specific:
return (key)
def graph(matrix):
adj = defaultdict(list) # default value of int is 0
for i, row in enumerate(matrix):
for j, adjacent in enumerate(row):
if adjacent:
adj[i].append(j)
if adj[i].__len__ == 0:
adj[i] = []
return adj
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
# convert nested lists to a flat list
output = []
def removNestings(l):
for i in l:
if type(i) == list:
removNestings(i)
else:
output.append(i)
return (output)
def load_customize_data(datadir):
input_data(datadir)
PIK = "{}/datasets.dat".format(datadir)
with open(PIK, "rb") as f:
objects = pkl.load(f)
data_train1, data_test1, data_val1, label_train1, label_test1, label_val1, lab_data2, lab_label2, types = tuple(
objects)
datas_train = pd.concat([data_train1, lab_data2])
labels_train = pd.concat([label_train1, lab_label2])
datas_train = np.array(datas_train)
datas_test = np.array(data_test1)
datas_val = np.array(data_val1)
labels_train = np.array(labels_train).flatten()
labels_test = np.array(label_test1).flatten()
labels_val = np.array(label_val1).flatten()
#' convert pandas data frame to csr_matrix format
datas_tr = scipy.sparse.csr_matrix(datas_train.astype('Float64'))
datas_va = scipy.sparse.csr_matrix(datas_val.astype('Float64'))
datas_te = scipy.sparse.csr_matrix(datas_test.astype('Float64'))
#' 3) set the unlabeled data in training set
#' @param N; the number of labeled samples in training set
M = len(data_train1)
#' 4) get the feature object by combining training, test, valiation sets
features = sp.vstack((sp.vstack((datas_tr, datas_va)), datas_te)).tolil()
#' features = preprocess_features(features)
#' 5) Given cell type, generate three sets of labels with the same dimension
labels_tr = labels_train.flatten()
labels_va = labels_val.flatten()
labels_te = labels_test.flatten()
labels = np.concatenate(
[np.concatenate([labels_tr, labels_va]), labels_te])
Labels = pd.DataFrame(labels)
true_label = Labels
#' convert list to binary matrix
uniq = np.unique(Labels.values)
rename = {}
for line in range(0, len(types)):
key = types[line]
rename[key] = int(line)
Label1 = Labels.replace(rename)
indices = np.array(Label1.values, dtype='int').tolist()
indice = [item for sublist in indices for item in sublist]
#' convert list to binary matrix
indptr = range(len(indice) + 1)
dat = np.ones(len(indice))
binary_label = scipy.sparse.csr_matrix((dat, indice, indptr))
#' new label with binary values
new_label = np.array(binary_label.todense())
idx_train = range(M)
idx_pred = range(M, len(labels_tr))
idx_val = range(len(labels_tr), len(labels_tr) + len(labels_va))
idx_test = range(
len(labels_tr) + len(labels_va),
len(labels_tr) + len(labels_va) + len(labels_te))
train_mask = sample_mask(idx_train, new_label.shape[0])
pred_mask = sample_mask(idx_pred, new_label.shape[0])
val_mask = sample_mask(idx_val, new_label.shape[0])
test_mask = sample_mask(idx_test, new_label.shape[0])
labels_binary_train = np.zeros(new_label.shape)
labels_binary_val = np.zeros(new_label.shape)
labels_binary_test = np.zeros(new_label.shape)
labels_binary_train[train_mask, :] = new_label[train_mask, :]
labels_binary_val[val_mask, :] = new_label[val_mask, :]
labels_binary_test[test_mask, :] = new_label[test_mask, :]
#' ----- use seurat output to construct matrix ---------
id_graph1 = pd.read_csv('{}/integrate_graph.csv'.format(datadir),
index_col=0,
sep=',')
id_graph2 = pd.read_csv('{}/data2_graph.csv'.format(datadir),
sep=',',
index_col=0)
#' --- map index ----
fake1 = np.array([-1] * len(data_test1.index))
index1 = np.concatenate((data_train1.index, data_val1.index)).flatten()
fake2 = np.array([-1] * len(data_train1))
fake3 = np.array([-1] * len(data_val1))
find1 = np.concatenate((fake2, fake3, np.array(data_test1.index))).flatten()
#' ---------------------------------------------
#' graph 2
#' ---------------------------------------------
id_grp1 = np.array([
np.concatenate((np.where(find1 == id_graph2.iloc[i, 1])[0],
np.where(find1 == id_graph2.iloc[i, 0])[0]))
for i in range(len(id_graph2))
])
id_grp2 = np.array([
np.concatenate((np.where(find1 == id_graph2.iloc[i, 0])[0],
np.where(find1 == id_graph2.iloc[i, 1])[0]))
for i in range(len(id_graph2))
])
#' ---------------------------------------------
#' inter-graph
#' ---------------------------------------------
id_gp1 = np.array([
np.concatenate((np.where(find1 == id_graph1.iloc[i, 1])[0],
np.where(index1 == id_graph1.iloc[i, 0])[0]))
for i in range(len(id_graph1))
])
id_gp2 = np.array([
np.concatenate((np.where(index1 == id_graph1.iloc[i, 0])[0],
np.where(find1 == id_graph1.iloc[i, 1])[0]))
for i in range(len(id_graph1))
])
matrix = np.identity(len(labels))
matrix[tuple(id_grp1.T)] = 1
matrix[tuple(id_grp2.T)] = 1
adj = graph(matrix)
adj = nx.adjacency_matrix(nx.from_dict_of_lists(adj))
num_label = np.argmax(new_label, 1)
print("assign input coordinatly....")
return adj, features, num_label, idx_train, idx_val, idx_test, idx_pred
#' in case FLAGS are defined twice
def del_all_flags(FLAGS):
flags_dict = FLAGS._flags()
keys_list = [keys for keys in flags_dict]
for keys in keys_list:
FLAGS.__delattr__(keys)
| [
"pandas.DataFrame",
"networkx.from_dict_of_lists",
"numpy.concatenate",
"numpy.argmax",
"scipy.sparse.vstack",
"numpy.zeros",
"collections.defaultdict",
"pickle.load",
"numpy.array",
"numpy.where",
"pandas.concat",
"numpy.unique"
] | [((464, 481), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (475, 481), False, 'from collections import defaultdict\n'), ((781, 792), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (789, 792), True, 'import numpy as np\n'), ((822, 851), 'numpy.array', 'np.array', (['mask'], {'dtype': 'np.bool'}), '(mask, dtype=np.bool)\n', (830, 851), True, 'import numpy as np\n'), ((1383, 1418), 'pandas.concat', 'pd.concat', (['[data_train1, lab_data2]'], {}), '([data_train1, lab_data2])\n', (1392, 1418), True, 'import pandas as pd\n'), ((1438, 1475), 'pandas.concat', 'pd.concat', (['[label_train1, lab_label2]'], {}), '([label_train1, lab_label2])\n', (1447, 1475), True, 'import pandas as pd\n'), ((1495, 1516), 'numpy.array', 'np.array', (['datas_train'], {}), '(datas_train)\n', (1503, 1516), True, 'import numpy as np\n'), ((1534, 1554), 'numpy.array', 'np.array', (['data_test1'], {}), '(data_test1)\n', (1542, 1554), True, 'import numpy as np\n'), ((1571, 1590), 'numpy.array', 'np.array', (['data_val1'], {}), '(data_val1)\n', (1579, 1590), True, 'import numpy as np\n'), ((2648, 2668), 'pandas.DataFrame', 'pd.DataFrame', (['labels'], {}), '(labels)\n', (2660, 2668), True, 'import pandas as pd\n'), ((2742, 2766), 'numpy.unique', 'np.unique', (['Labels.values'], {}), '(Labels.values)\n', (2751, 2766), True, 'import numpy as np\n'), ((3814, 3839), 'numpy.zeros', 'np.zeros', (['new_label.shape'], {}), '(new_label.shape)\n', (3822, 3839), True, 'import numpy as np\n'), ((3864, 3889), 'numpy.zeros', 'np.zeros', (['new_label.shape'], {}), '(new_label.shape)\n', (3872, 3889), True, 'import numpy as np\n'), ((3915, 3940), 'numpy.zeros', 'np.zeros', (['new_label.shape'], {}), '(new_label.shape)\n', (3923, 3940), True, 'import numpy as np\n'), ((7381, 7404), 'numpy.argmax', 'np.argmax', (['new_label', '(1)'], {}), '(new_label, 1)\n', (7390, 7404), True, 'import numpy as np\n'), ((1217, 1228), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (1225, 1228), True, 'import pickle as pkl\n'), ((7336, 7362), 'networkx.from_dict_of_lists', 'nx.from_dict_of_lists', (['adj'], {}), '(adj)\n', (7357, 7362), True, 'import networkx as nx\n'), ((1611, 1633), 'numpy.array', 'np.array', (['labels_train'], {}), '(labels_train)\n', (1619, 1633), True, 'import numpy as np\n'), ((1662, 1683), 'numpy.array', 'np.array', (['label_test1'], {}), '(label_test1)\n', (1670, 1683), True, 'import numpy as np\n'), ((1711, 1731), 'numpy.array', 'np.array', (['label_val1'], {}), '(label_val1)\n', (1719, 1731), True, 'import numpy as np\n'), ((2583, 2621), 'numpy.concatenate', 'np.concatenate', (['[labels_tr, labels_va]'], {}), '([labels_tr, labels_va])\n', (2597, 2621), True, 'import numpy as np\n'), ((2932, 2968), 'numpy.array', 'np.array', (['Label1.values'], {'dtype': '"""int"""'}), "(Label1.values, dtype='int')\n", (2940, 2968), True, 'import numpy as np\n'), ((4894, 4946), 'numpy.concatenate', 'np.concatenate', (['(data_train1.index, data_val1.index)'], {}), '((data_train1.index, data_val1.index))\n', (4908, 4946), True, 'import numpy as np\n'), ((2248, 2279), 'scipy.sparse.vstack', 'sp.vstack', (['(datas_tr, datas_va)'], {}), '((datas_tr, datas_va))\n', (2257, 2279), True, 'from scipy import sparse as sp\n'), ((5090, 5116), 'numpy.array', 'np.array', (['data_test1.index'], {}), '(data_test1.index)\n', (5098, 5116), True, 'import numpy as np\n'), ((5788, 5827), 'numpy.where', 'np.where', (['(find1 == id_graph2.iloc[i, 1])'], {}), '(find1 == id_graph2.iloc[i, 1])\n', (5796, 5827), True, 'import numpy as np\n'), ((5856, 5895), 'numpy.where', 'np.where', (['(find1 == id_graph2.iloc[i, 0])'], {}), '(find1 == id_graph2.iloc[i, 0])\n', (5864, 5895), True, 'import numpy as np\n'), ((5997, 6036), 'numpy.where', 'np.where', (['(find1 == id_graph2.iloc[i, 0])'], {}), '(find1 == id_graph2.iloc[i, 0])\n', (6005, 6036), True, 'import numpy as np\n'), ((6065, 6104), 'numpy.where', 'np.where', (['(find1 == id_graph2.iloc[i, 1])'], {}), '(find1 == id_graph2.iloc[i, 1])\n', (6073, 6104), True, 'import numpy as np\n'), ((6807, 6846), 'numpy.where', 'np.where', (['(find1 == id_graph1.iloc[i, 1])'], {}), '(find1 == id_graph1.iloc[i, 1])\n', (6815, 6846), True, 'import numpy as np\n'), ((6875, 6915), 'numpy.where', 'np.where', (['(index1 == id_graph1.iloc[i, 0])'], {}), '(index1 == id_graph1.iloc[i, 0])\n', (6883, 6915), True, 'import numpy as np\n'), ((7016, 7056), 'numpy.where', 'np.where', (['(index1 == id_graph1.iloc[i, 0])'], {}), '(index1 == id_graph1.iloc[i, 0])\n', (7024, 7056), True, 'import numpy as np\n'), ((7085, 7124), 'numpy.where', 'np.where', (['(find1 == id_graph1.iloc[i, 1])'], {}), '(find1 == id_graph1.iloc[i, 1])\n', (7093, 7124), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import blocksparse as bs
from tensorflow.python.ops import gradient_checker
def ceil_div(x, y):
return -(-x // y)
shapes = [
# [ [32, 32], [ [32, 1] ] ],
[ [ 64,], [ None, ] ],
[ [1024,], [ None, ] ],
[ [1023,], [ None, ] ],
[ [1024, 128], [ [1024, 1], [1, 128], None ] ],
[ [1023, 127], [ [1023, 1], [1, 127], None ] ],
[ [64, 64, 64], [ [64, 64, 1], [64, 1, 64], [1,64,64], [1,64,1], [64,1,1], [1,1,64], [1,1,1], None ] ],
[ [63, 63, 63], [ [63, 63, 1], [63, 1, 63], [1,63,63], [1,63,1], [63,1,1], [1,1,63], [1,1,1], None ] ],
[ [16,16,16,16,16], [ [16,16,16,16,1], None ] ],
]
class DropoutTest(tf.test.TestCase):
def testDropout(self):
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
with self.test_session(config=config) as sess:
bs.set_entropy()
sess.run(tf.global_variables_initializer())
# with tf.device("/gpu:0"):
# x = tf.ones([10000])*-10.0
# g = bs.concrete_gate(x)
# g = sess.run(g)
# print(g.sum()/g.size)
# error = gradient_checker.compute_gradient_error(x, x.shape, g, g.shape) #, extra_feed_dict={ x: cpuX, m: mask }
# print(error)
for dtype in (tf.float16, ): #tf.float16, tf.bfloat16
for x_shape, mask_shapes in shapes:
for mask_shape in mask_shapes:
m_shape = x_shape if mask_shape is None else mask_shape
cpuO = np.ones(x_shape, dtype=np.float32)
cpuX = np.random.uniform(-1.0, 1.0, x_shape).astype(np.float16).astype(np.float32)
cpuM = np.random.randint(0, 2, size=m_shape, dtype=np.bool)
mask = np.zeros(ceil_div(cpuM.size, 32)*32, dtype=np.bool)
mask[:cpuM.size] = cpuM.reshape(-1)
mask = np.packbits(mask.reshape(-1,8)[:,::-1]).view(np.int32)
cpuY = cpuX * cpuM.astype(np.float32) * 2.0
with tf.device("/gpu:0"):
x = tf.placeholder(tf.float32, cpuX.shape)
m = tf.placeholder(tf.int32, mask.shape)
xf = bs.float_cast(x, dtype=dtype)
y, _ = bs.dropout(xf, keep_prob=0.5, mask=m, mask_shape=mask_shape)
y = bs.float_cast(y, dtype=tf.float32)
devY, = sess.run( [y,], feed_dict={ x: cpuX, m: mask } )
xf = bs.float_cast(x, dtype=dtype)
y, _ = bs.dropout(xf, keep_prob=0.8, mask_shape=mask_shape)
y = bs.float_cast(y, dtype=tf.float32)
devO, = sess.run( [y,], feed_dict={ x: cpuO } )
diff = np.abs(devY - cpuY)
print("dype: %8s x_shape: %-20s m_shape: %-20s err: %4.2f norm_sum: %4.2f" % ( dtype.name, str(x_shape), str(mask_shape), diff.sum(), devO.sum()/devO.size ))
#np.savetxt( "diff.txt", diff, fmt="%4.2f")
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.test.main",
"blocksparse.float_cast",
"numpy.random.uniform",
"numpy.abs",
"tensorflow.global_variables_initializer",
"tensorflow.device",
"numpy.ones",
"blocksparse.dropout",
"tensorflow.ConfigProto",
"tensorflow.placeholder",
"numpy.random.randint",
"blocksparse.set_entropy"
] | [((3457, 3471), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (3469, 3471), True, 'import tensorflow as tf\n'), ((890, 968), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'intra_op_parallelism_threads': '(1)', 'inter_op_parallelism_threads': '(1)'}), '(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n', (904, 968), True, 'import tensorflow as tf\n'), ((1063, 1079), 'blocksparse.set_entropy', 'bs.set_entropy', ([], {}), '()\n', (1077, 1079), True, 'import blocksparse as bs\n'), ((1101, 1134), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1132, 1134), True, 'import tensorflow as tf\n'), ((1784, 1818), 'numpy.ones', 'np.ones', (['x_shape'], {'dtype': 'np.float32'}), '(x_shape, dtype=np.float32)\n', (1791, 1818), True, 'import numpy as np\n'), ((1957, 2009), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': 'm_shape', 'dtype': 'np.bool'}), '(0, 2, size=m_shape, dtype=np.bool)\n', (1974, 2009), True, 'import numpy as np\n'), ((3155, 3174), 'numpy.abs', 'np.abs', (['(devY - cpuY)'], {}), '(devY - cpuY)\n', (3161, 3174), True, 'import numpy as np\n'), ((2339, 2358), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (2348, 2358), True, 'import tensorflow as tf\n'), ((2393, 2431), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'cpuX.shape'], {}), '(tf.float32, cpuX.shape)\n', (2407, 2431), True, 'import tensorflow as tf\n'), ((2464, 2500), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', 'mask.shape'], {}), '(tf.int32, mask.shape)\n', (2478, 2500), True, 'import tensorflow as tf\n'), ((2539, 2568), 'blocksparse.float_cast', 'bs.float_cast', (['x'], {'dtype': 'dtype'}), '(x, dtype=dtype)\n', (2552, 2568), True, 'import blocksparse as bs\n'), ((2604, 2664), 'blocksparse.dropout', 'bs.dropout', (['xf'], {'keep_prob': '(0.5)', 'mask': 'm', 'mask_shape': 'mask_shape'}), '(xf, keep_prob=0.5, mask=m, mask_shape=mask_shape)\n', (2614, 2664), True, 'import blocksparse as bs\n'), ((2700, 2734), 'blocksparse.float_cast', 'bs.float_cast', (['y'], {'dtype': 'tf.float32'}), '(y, dtype=tf.float32)\n', (2713, 2734), True, 'import blocksparse as bs\n'), ((2857, 2886), 'blocksparse.float_cast', 'bs.float_cast', (['x'], {'dtype': 'dtype'}), '(x, dtype=dtype)\n', (2870, 2886), True, 'import blocksparse as bs\n'), ((2922, 2974), 'blocksparse.dropout', 'bs.dropout', (['xf'], {'keep_prob': '(0.8)', 'mask_shape': 'mask_shape'}), '(xf, keep_prob=0.8, mask_shape=mask_shape)\n', (2932, 2974), True, 'import blocksparse as bs\n'), ((3010, 3044), 'blocksparse.float_cast', 'bs.float_cast', (['y'], {'dtype': 'tf.float32'}), '(y, dtype=tf.float32)\n', (3023, 3044), True, 'import blocksparse as bs\n'), ((1850, 1887), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', 'x_shape'], {}), '(-1.0, 1.0, x_shape)\n', (1867, 1887), True, 'import numpy as np\n')] |
'''
Copyright (c) 2021. IIP Lab, Wuhan University
'''
import os
import argparse
import numpy as np
import pandas as pd
from scipy.stats import spearmanr
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras import backend as K
from data import *
from train import *
from layers import ProductOfExpertGaussian as POE
### Modality to their short name
mod_rep_dict = {
"resnet50" : "V",
"audiovgg" : "A",
"fudannlp" : "T",
}
### Short name to the modalities
rep_mod_dict = \
{value: key for key, value in mod_rep_dict.items()}
### Modality to their shape
mod_shape_dict = {
"resnet50" : 128,
"audiovgg" : 128,
"fudannlp" : 20,
}
def ord_rep(rep_str):
ord_rep = ""
for i, letter in enumerate(["V", "A", "T"]):
if letter in rep_str:
ord_rep += letter
return ord_rep
def rep2mods(rep_str):
test_mods = []
for i, letter in enumerate(["V", "A", "T"]):
if letter in rep_str:
test_mods.append(rep_mod_dict[letter])
return test_mods
def mods2index(mods_list, mod_pos_dict):
idx_list = [mod_pos_dict[mod] for mod in mods_list]
return sorted(idx_list)
def get_model_info(model_path):
info_dict = {}
path_list = model_path.split(os.path.sep)
info_dict["encodertype"] = path_list[-6]
info_dict["length"] = int(path_list[-5].split("_")[-1])
info_dict["split"] = int(path_list[-4])
info_dict["lambda"] = float(path_list[-3])
return info_dict
def get_testgen(feature_root, target_root, split_root, test_mods, phase):
'''
Get data generator for test
'''
test_gen = VariationalEncoderDecoderGen(
phase = phase,
feature_root = feature_root,
target_root = target_root,
split_root = split_root,
modalities = test_mods,
batch_size = 128,
shuffle = False, ### You cannot shuffle data in test phase
concat = False,
)
return test_gen
def build_test_model(model_path,
train_shapes,
test_mods,
rnn_type,
mod_pos_dict,
modalities,
summary=False):
model = get_model(train_shapes, rnn_type, modalities, summary=False)
model.load_weights(model_path)
if modalities == ["user"]:
### Get the input tensor
abst_in = model.inputs[-1]
uid_in = model.inputs[0]
mods_in = model.inputs[1]
uid_emb = model.get_layer("uid_emb")(uid_in)
uid_emb = model.get_layer("uid_emb_reshape")(uid_emb)
concat = layers.Concatenate(axis=-1)([uid_emb, mods_in])
mean_stds = model.encoders[0](concat)
mean = mean_stds[0]
input_space = [uid_in] + [mods_in] + [abst_in]
preds_seq = model.decoder([mean, abst_in])
### Get learnt user embeddings
test_model = [models.Model(inputs=input_space, outputs=mean_stds)]
### Evaluation
test_model.append(models.Model(inputs=input_space, outputs=preds_seq))
if summary:
[test_model[i].summary() for i in range(len(test_model))]
else:
### Get index for each modality
mod_idxes = mods2index(test_mods, mod_pos_dict)
### Get the input tensor indicated by mod_idxes
uemb_in = model.inputs[0]
mods_in = [model.inputs[1:-1][i] for i in mod_idxes]
abst_in = model.inputs[-1]
### Build the model for prediction
encoders = [model.encoders[i] for i in mod_idxes]
mean_stds = [encoder(mod_in) for encoder, mod_in in zip(encoders, mods_in)]
mean, _ = POE()(mean_stds)
preds_seq = model.decoder([mean, abst_in])
test_model = models.Model(inputs=[uemb_in]+mods_in+[abst_in], outputs=preds_seq)
if summary:
test_model.summary()
return test_model
def user_predict(model, test_gen, pred_path):
num_videos = test_gen.num_videos
batch_size = test_gen.batch_size
timesteps = test_gen.timesteps
# emb_dim = model[0].output_shape[0][-1]
### for user-encoder evaluation
preds = np.empty((num_videos, timesteps), dtype=np.float32)
truth = np.empty((num_videos, timesteps), dtype=np.float32)
for i, [features, target] in enumerate(test_gen):
preds_batch = np.squeeze(model[1].predict(features))
preds[i * batch_size:(i + 1) * batch_size] = preds_batch.squeeze()
truth[i * batch_size:(i + 1) * batch_size] = target.squeeze()
if pred_path is not None:
print("Prediction has been saved to {}".format(pred_path))
np.save(pred_path, preds)
return preds, truth
def uemb_output(model, test_gen, emb_path):
num_videos = test_gen.num_videos
batch_size = test_gen.batch_size
# timesteps = test_gen.timesteps
emb_dim = model[0].output_shape[0][-1]
### for user embeddings
uemb_mean = np.empty((num_videos, emb_dim), dtype=np.float32)
uemb_std = np.empty((num_videos, emb_dim), dtype=np.float32)
for i, [features, target] in enumerate(test_gen):
uemb_mean[i * batch_size:(i + 1) * batch_size] = model[0].predict(features)[0].squeeze()
uemb_std[i * batch_size:(i + 1) * batch_size] = model[0].predict(features)[1].squeeze()
uemb = np.concatenate((uemb_mean[:, None, :], uemb_std[:, None, :]), axis=1)
if emb_path is not None:
print("User embeddings have been saved to {}".format(emb_path))
np.save(emb_path, uemb)
def predict(test_model, test_gen, save_path):
num_videos = test_gen.num_videos
batch_size = test_gen.batch_size
timesteps = test_gen.timesteps
preds = np.empty([num_videos, timesteps], dtype=np.float32)
truth = np.empty([num_videos, timesteps], dtype=np.float32)
for i, [features, targets] in enumerate(test_gen):
preds[i*batch_size:(i+1)*batch_size] = test_model.predict(features).squeeze()
truth[i*batch_size:(i+1)*batch_size] = targets.squeeze()
if save_path is not None:
print("Prediction saved to {}".format(save_path))
np.save(save_path, preds)
return preds, truth
def evaluate(preds, truth, save_path):
def pearson_corr(preds, truth):
corr = 0
num_samples = len(preds)
cnt_samples = num_samples
for i in range(num_samples):
corr_this = pd.Series(preds[i]).corr(pd.Series(truth[i]))
if np.isnan(corr_this):
cnt_samples = cnt_samples-1
continue
corr += corr_this
return corr / cnt_samples
def spearman_corr(preds, truth):
corr = 0
p_val = 0
num_samples = len(preds)
cnt_samples = num_samples
for i in range(num_samples):
corr_this, p_value_this = spearmanr(pd.Series(preds[i]), pd.Series(truth[i]))
if np.isnan(corr_this):
cnt_samples = cnt_samples-1
continue
corr += corr_this
return corr / cnt_samples
def nmse(preds, truth):
return np.mean(np.square(preds - truth)) / (truth.std()**2)
nmse = nmse(preds, truth)
corr = pearson_corr(preds, truth)
srcc = spearman_corr(preds, truth)
table = pd.DataFrame({
"nmse" : [nmse],
"corr" : [corr],
"srcc" : [srcc]})
print("test nmse: {:.4f}".format(nmse))
print("test corr: {:.4f}".format(corr))
print("test srcc: {:.4f}".format(srcc))
table.to_csv(save_path, mode='a', index=False, sep="\t")
return nmse, corr, srcc
def test_run(model_path, rnn_type="simple", abbr_test_mods="U", device="0"):
### Set tensorflow session
tf.reset_default_graph()
os.environ["CUDA_VISIBLE_DEVICES"] = device
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
### Save path to the prediction result
model_info = get_model_info(model_path)
model_root = os.path.split(model_path)[0]
test_root = os.path.join(model_root, "test", std_mods(abbr_test_mods))
if not os.path.exists(test_root):
os.makedirs(test_root)
pred_path = os.path.join(test_root, "predict.npy")
### Get the test data generator
feature_root = os.path.join("data")
split_root = os.path.join(feature_root, "split", str(model_info["split"]))
target_root = os.path.join(feature_root, "len_{}".format(model_info["length"]))
### Get the model for prediction
if model_info["encodertype"] == "user":
train_mods = ["user"]
mod_pos_dict = {"user": 0}
uemb_path = os.path.join(feature_root, "user_emb.npy")
test_mods = train_mods
train_shapes = [[1], [3]] + [[model_info["length"], 1]]
test_model = build_test_model(model_path, train_shapes, test_mods, rnn_type, mod_pos_dict, train_mods)
test_gen = get_testgen(feature_root, target_root, split_root, test_mods, phase="test")
### Evaluation
preds, truth = user_predict(test_model, test_gen, pred_path)
### User embeddings output
uemb_gen = get_testgen(feature_root, target_root, split_root, test_mods, phase="all")
uemb_output(test_model, uemb_gen, uemb_path)
else:
train_mods = ["resnet50", "audiovgg", "fudannlp"]
mod_pos_dict = {mod: train_mods.index(mod) for mod in mod_rep_dict.keys()}
test_mods = rep2mods(ord_rep(abbr_test_mods))
train_shapes = [[2, 8]] + [[mod_shape_dict[mod]] for mod in train_mods] + [[model_info["length"], 1]]
test_model = build_test_model(model_path, train_shapes, test_mods, rnn_type, mod_pos_dict, train_mods)
test_gen = get_testgen(feature_root, target_root, split_root, test_mods, phase="test")
preds, truth = predict(test_model, test_gen, pred_path)
### Evaluate model with numerous indexes
eval_path = os.path.join(test_root, "eval.txt")
nmse, corr, srcc = evaluate(preds, truth, eval_path)
K.clear_session()
return nmse, corr, srcc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str,
help="path where the pre-trained model is stored.")
parser.add_argument("--rnn_type", type=str, default="simple",
help="type of decoder")
parser.add_argument("--test_mods", type=str, default="U",
help="modalities available in the test phase")
parser.add_argument("--device", type=str, default="0",
help="specify the GPU device")
args = parser.parse_args()
test_run(model_path=args.model_path, rnn_type=args.rnn_type, abbr_test_mods=args.test_mods, device=args.device) | [
"argparse.ArgumentParser",
"numpy.empty",
"tensorflow.reset_default_graph",
"numpy.isnan",
"tensorflow.ConfigProto",
"os.path.join",
"pandas.DataFrame",
"tensorflow.keras.layers.Concatenate",
"os.path.exists",
"layers.ProductOfExpertGaussian",
"numpy.save",
"tensorflow.keras.backend.clear_sess... | [((4339, 4390), 'numpy.empty', 'np.empty', (['(num_videos, timesteps)'], {'dtype': 'np.float32'}), '((num_videos, timesteps), dtype=np.float32)\n', (4347, 4390), True, 'import numpy as np\n'), ((4404, 4455), 'numpy.empty', 'np.empty', (['(num_videos, timesteps)'], {'dtype': 'np.float32'}), '((num_videos, timesteps), dtype=np.float32)\n', (4412, 4455), True, 'import numpy as np\n'), ((5140, 5189), 'numpy.empty', 'np.empty', (['(num_videos, emb_dim)'], {'dtype': 'np.float32'}), '((num_videos, emb_dim), dtype=np.float32)\n', (5148, 5189), True, 'import numpy as np\n'), ((5206, 5255), 'numpy.empty', 'np.empty', (['(num_videos, emb_dim)'], {'dtype': 'np.float32'}), '((num_videos, emb_dim), dtype=np.float32)\n', (5214, 5255), True, 'import numpy as np\n'), ((5522, 5591), 'numpy.concatenate', 'np.concatenate', (['(uemb_mean[:, None, :], uemb_std[:, None, :])'], {'axis': '(1)'}), '((uemb_mean[:, None, :], uemb_std[:, None, :]), axis=1)\n', (5536, 5591), True, 'import numpy as np\n'), ((5911, 5962), 'numpy.empty', 'np.empty', (['[num_videos, timesteps]'], {'dtype': 'np.float32'}), '([num_videos, timesteps], dtype=np.float32)\n', (5919, 5962), True, 'import numpy as np\n'), ((5976, 6027), 'numpy.empty', 'np.empty', (['[num_videos, timesteps]'], {'dtype': 'np.float32'}), '([num_videos, timesteps], dtype=np.float32)\n', (5984, 6027), True, 'import numpy as np\n'), ((7522, 7584), 'pandas.DataFrame', 'pd.DataFrame', (["{'nmse': [nmse], 'corr': [corr], 'srcc': [srcc]}"], {}), "({'nmse': [nmse], 'corr': [corr], 'srcc': [srcc]})\n", (7534, 7584), True, 'import pandas as pd\n'), ((7973, 7997), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (7995, 7997), True, 'import tensorflow as tf\n'), ((8061, 8077), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (8075, 8077), True, 'import tensorflow as tf\n'), ((8134, 8159), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (8144, 8159), True, 'import tensorflow as tf\n'), ((8165, 8184), 'tensorflow.keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (8178, 8184), True, 'from tensorflow.keras import backend as K\n'), ((8487, 8525), 'os.path.join', 'os.path.join', (['test_root', '"""predict.npy"""'], {}), "(test_root, 'predict.npy')\n", (8499, 8525), False, 'import os\n'), ((8585, 8605), 'os.path.join', 'os.path.join', (['"""data"""'], {}), "('data')\n", (8597, 8605), False, 'import os\n'), ((10233, 10268), 'os.path.join', 'os.path.join', (['test_root', '"""eval.txt"""'], {}), "(test_root, 'eval.txt')\n", (10245, 10268), False, 'import os\n'), ((10334, 10351), 'tensorflow.keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (10349, 10351), True, 'from tensorflow.keras import backend as K\n'), ((10427, 10452), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10450, 10452), False, 'import argparse\n'), ((3930, 4001), 'tensorflow.keras.models.Model', 'models.Model', ([], {'inputs': '([uemb_in] + mods_in + [abst_in])', 'outputs': 'preds_seq'}), '(inputs=[uemb_in] + mods_in + [abst_in], outputs=preds_seq)\n', (3942, 4001), False, 'from tensorflow.keras import layers, models\n'), ((4832, 4857), 'numpy.save', 'np.save', (['pred_path', 'preds'], {}), '(pred_path, preds)\n', (4839, 4857), True, 'import numpy as np\n'), ((5706, 5729), 'numpy.save', 'np.save', (['emb_path', 'uemb'], {}), '(emb_path, uemb)\n', (5713, 5729), True, 'import numpy as np\n'), ((6340, 6365), 'numpy.save', 'np.save', (['save_path', 'preds'], {}), '(save_path, preds)\n', (6347, 6365), True, 'import numpy as np\n'), ((8294, 8319), 'os.path.split', 'os.path.split', (['model_path'], {}), '(model_path)\n', (8307, 8319), False, 'import os\n'), ((8411, 8436), 'os.path.exists', 'os.path.exists', (['test_root'], {}), '(test_root)\n', (8425, 8436), False, 'import os\n'), ((8447, 8469), 'os.makedirs', 'os.makedirs', (['test_root'], {}), '(test_root)\n', (8458, 8469), False, 'import os\n'), ((8944, 8986), 'os.path.join', 'os.path.join', (['feature_root', '"""user_emb.npy"""'], {}), "(feature_root, 'user_emb.npy')\n", (8956, 8986), False, 'import os\n'), ((2782, 2809), 'tensorflow.keras.layers.Concatenate', 'layers.Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (2800, 2809), False, 'from tensorflow.keras import layers, models\n'), ((3079, 3130), 'tensorflow.keras.models.Model', 'models.Model', ([], {'inputs': 'input_space', 'outputs': 'mean_stds'}), '(inputs=input_space, outputs=mean_stds)\n', (3091, 3130), False, 'from tensorflow.keras import layers, models\n'), ((3183, 3234), 'tensorflow.keras.models.Model', 'models.Model', ([], {'inputs': 'input_space', 'outputs': 'preds_seq'}), '(inputs=input_space, outputs=preds_seq)\n', (3195, 3234), False, 'from tensorflow.keras import layers, models\n'), ((3839, 3844), 'layers.ProductOfExpertGaussian', 'POE', ([], {}), '()\n', (3842, 3844), True, 'from layers import ProductOfExpertGaussian as POE\n'), ((6685, 6704), 'numpy.isnan', 'np.isnan', (['corr_this'], {}), '(corr_this)\n', (6693, 6704), True, 'import numpy as np\n'), ((7134, 7153), 'numpy.isnan', 'np.isnan', (['corr_this'], {}), '(corr_this)\n', (7142, 7153), True, 'import numpy as np\n'), ((6648, 6667), 'pandas.Series', 'pd.Series', (['truth[i]'], {}), '(truth[i])\n', (6657, 6667), True, 'import pandas as pd\n'), ((7076, 7095), 'pandas.Series', 'pd.Series', (['preds[i]'], {}), '(preds[i])\n', (7085, 7095), True, 'import pandas as pd\n'), ((7097, 7116), 'pandas.Series', 'pd.Series', (['truth[i]'], {}), '(truth[i])\n', (7106, 7116), True, 'import pandas as pd\n'), ((7350, 7374), 'numpy.square', 'np.square', (['(preds - truth)'], {}), '(preds - truth)\n', (7359, 7374), True, 'import numpy as np\n'), ((6623, 6642), 'pandas.Series', 'pd.Series', (['preds[i]'], {}), '(preds[i])\n', (6632, 6642), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
# This script deals with color conversions and color transformations.
#
# copyright (C) 2014-2017 <NAME> | <EMAIL>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# LICENSE:
#
# colcol is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# colcol is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# I have added several formatting and functional modifications to this
# file to make it better suit my requirements. @itsthejoker, 2019/1/10
__version__ = "0.13.0"
import colorsys
import math
import re
from typing import List, Tuple, Union
import numpy
def ColorDistance(rgb1: Union[Tuple, List], rgb2: Union[Tuple, List]) -> float:
"""
This function calculates and returns the relative distance between two
RGB colors. It's up for debate how accurate or useful this is, but it's
better than nothing and also I have no idea what I'm doing. It's worth
noting that this is Euclidean distance, which uses the true value of the
colors and is only barely better than nothing when it comes to actually
comparing two colors.
See https://stackoverflow.com/a/14097641 for the origin of this
function and also some more explanations of exactly what's going on and why
this is probably a bad idea. More info: https://www.compuphase.com/cmetric.htm
:param rgb1: Tuple or list; the first RGB color to compare.
:param rgb2: Tuple or list; the second RGB color to compare.
:return: float; the distance between the first color and the second.
Approximately.
"""
rgb1 = numpy.array(rgb1)
rgb2 = numpy.array(rgb2)
rm = 0.5 * (rgb1[0] + rgb2[0])
distance = math.sqrt(sum((2 + rm, 4, 3 - rm) * (rgb1 - rgb2) ** 2))
return distance
def is_rgb(in_col):
"""
Check whether input is a valid RGB color.
Return True if it is, otherwise False.
"""
if len(in_col) == 3 and type(in_col) == tuple:
if (
type(in_col[0]) is int
and type(in_col[1])
and type(in_col[2])
and 0 <= in_col[0] <= 255
and 0 <= in_col[1] <= 255
and 0 <= in_col[2] <= 255
):
return True
else:
return False
else:
return False
def is_hex(in_col):
"""
Check whether an input string is a valid hex value.
Return True if it is, otherwise False.
"""
if type(in_col) is not str:
return False
regular_expression = re.compile(
r"""^ # match beginning of string
[#]? # exactly one hash, but optional
[0-9a-fA-F]{6} # exactly six of the hex symbols 0 to 9, a to f
$ # match end of string
""",
re.VERBOSE | re.MULTILINE,
)
if regular_expression.match(in_col) == None:
return False
else:
return True
def is_hsl(in_col):
"""
Check whether an input is a valid HSL color.
Return True if it is, otherwise False.
"""
if len(in_col) == 3 and type(in_col) == tuple:
if 0 <= in_col[0] <= 1 and 0 <= in_col[1] <= 1 and 0 <= in_col[2] <= 1:
return True
else:
return False
else:
return False
def rgb_to_hex(rgb):
"""
Convert RGB colors to hex.
Input should be a tuple of integers (R, G, B) where each is between 0 and 255.
Output is a string representing a hex number. For instance '#FFFFFF'.
"""
# make sure input is ok
assert is_rgb(rgb) is True, "Error, %s is not a valid RGB color." % rgb
# make conversion
return "#%02x%02x%02x".lower() % rgb
def hex_to_rgb(in_col):
"""
Convert a hex color to RGB.
Input should be a string. For example '#FFFFFF'.
Output is a tuple of integers (R, G, B).
"""
# make sure input is ok
assert is_hex(in_col) is True, f"Error, {in_col} is not a valid hex color."
# make the conversion
in_col = in_col.lstrip("#")
return tuple([int(in_col[s : s + 2], 16) for s in range(0, len(in_col), 2)])
def rgb_to_hsl(in_col):
"""
Convert RGB colors to HSL.
Input should be a tuple of integers (R, G, B) where each is between 0 and
255. Output is a tuple of floats between 0.0 and 1.0.
"""
assert is_rgb(in_col), "Error, %s is not a valid RGB color." % in_col
# Convert each RGB integer to a float between 0 and 1
r, g, b = [x / 255.0 for x in in_col]
# RGB -> HSL
h, l, s = colorsys.rgb_to_hls(r, g, b)
return (h, s, l)
def hsl_to_rgb(in_col):
"""
Convert HSL colors to RGB.
Input should be a tuple of floats between 0.0 and 1.0.
Output is a tuple of integers (R, G, B) where each is between 0 and 255.
"""
assert is_hsl(in_col), f"Error, {str(in_col)} is not a valid HSL color."
# assign to variables
h, s, l = in_col
# RGB -> HSL
r, g, b = colorsys.hls_to_rgb(h, l, s)
# convert it back to the appropriate integers
r = int(round(255 * r))
g = int(round(255 * g))
b = int(round(255 * b))
return (r, g, b)
def hex_to_hsl(in_col):
"""
Convert a hex color to hsl.
Input should be a string. For example '#FFFFFF'.
Output is a tuple of ...... . For instance: (h, s, l).
"""
return rgb_to_hsl(hex_to_rgb(in_col))
def hsl_to_hex(in_col):
"""
Convert hsl color to hex.
"""
return rgb_to_hex(hsl_to_rgb(in_col))
class Color:
"""
A color class
"""
def __init__(self, in_col):
self.colors = {
"aliceblue": "#F0F8FF",
"antiquewhite": "#FAEBD7",
"aqua": "#00FFFF",
"aquamarine": "#7FFFD4",
"azure": "#F0FFFF",
"beige": "#F5F5DC",
"bisque": "#FFE4C4",
"black": "#000000",
"blanchedalmond": "#FFEBCD",
"blue": "#0000FF",
"blueviolet": "#8A2BE2",
"brown": "#A52A2A",
"burlywood": "#DEB887",
"cadetblue": "#5F9EA0",
"chartreuse": "#7FFF00",
"chocolate": "#D2691E",
"coral": "#FF7F50",
"cornflowerblue": "#6495ED",
"cornsilk": "#FFF8DC",
"crimson": "#DC143C",
"cyan": "#00FFFF",
"darkblue": "#00008B",
"darkcyan": "#008B8B",
"darkgoldenrod": "#B8860B",
"darkgrey": "#A9A9A9",
"darkgreen": "#006400",
"darkkhaki": "#BDB76B",
"darkmagenta": "#8B008B",
"darkolivegreen": "#556B2F",
"darkorange": "#FF8C00",
"darkorchid": "#9932CC",
"darkred": "#8B0000",
"darksalmon": "#E9967A",
"darkseagreen": "#8FBC8F",
"darkslateblue": "#483D8B",
"darkslategrey": "#2F4F4F",
"darkturquoise": "#00CED1",
"darkviolet": "#9400D3",
"deeppink": "#FF1493",
"deepskyblue": "#00BFFF",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1E90FF",
"firebrick": "#B22222",
"floralwhite": "#FFFAF0",
"forestgreen": "#228B22",
"fuchsia": "#FF00FF",
"gainsboro": "#DCDCDC",
"ghostwhite": "#F8F8FF",
"gold": "#FFD700",
"goldenrod": "#DAA520",
"gray": "#808080",
"grey": "#808080",
"green": "#008000",
"greenyellow": "#ADFF2F",
"honeydew": "#F0FFF0",
"hotpink": "#FF69B4",
"indianred": "#CD5C5C",
"indigo": "#4B0082",
"ivory": "#FFFFF0",
"khaki": "#F0E68C",
"lavender": "#E6E6FA",
"lavenderblush": "#FFF0F5",
"lawngreen": "#7CFC00",
"lemonchiffon": "#FFFACD",
"lightblue": "#ADD8E6",
"lightcoral": "#F08080",
"lightcyan": "#E0FFFF",
"lightgoldenrodyellow": "#FAFAD2",
"lightgrey": "#D3D3D3",
"lightgreen": "#90EE90",
"lightpink": "#FFB6C1",
"lightsalmon": "#FFA07A",
"lightseagreen": "#20B2AA",
"lightskyblue": "#87CEFA",
"lightslategrey": "#778899",
"lightsteelblue": "#B0C4DE",
"lightyellow": "#FFFFE0",
"lime": "#00FF00",
"limegreen": "#32CD32",
"linen": "#FAF0E6",
"magenta": "#FF00FF",
"maroon": "#800000",
"mediumaquamarine": "#66CDAA",
"mediumblue": "#0000CD",
"mediumorchid": "#BA55D3",
"mediumpurple": "#9370DB",
"mediumseagreen": "#3CB371",
"mediumslateblue": "#7B68EE",
"mediumspringgreen": "#00FA9A",
"mediumturquoise": "#48D1CC",
"mediumvioletred": "#C71585",
"midnightblue": "#191970",
"mintcream": "#F5FFFA",
"mistyrose": "#FFE4E1",
"moccasin": "#FFE4B5",
"navajowhite": "#FFDEAD",
"navy": "#000080",
"oldlace": "#FDF5E6",
"olive": "#808000",
"olivedrab": "#6B8E23",
"orange": "#FFA500",
"orangered": "#FF4500",
"orchid": "#DA70D6",
"palegoldenrod": "#EEE8AA",
"palegreen": "#98FB98",
"paleturquoise": "#AFEEEE",
"palevioletred": "#DB7093",
"papayawhip": "#FFEFD5",
"peachpuff": "#FFDAB9",
"peru": "#CD853F",
"pink": "#FFC0CB",
"plum": "#DDA0DD",
"powderblue": "#B0E0E6",
"purple": "#800080",
"rebeccapurple": "#663399",
"red": "#FF0000",
"rosybrown": "#BC8F8F",
"royalblue": "#4169E1",
"saddlebrown": "#8B4513",
"salmon": "#FA8072",
"sandybrown": "#F4A460",
"seagreen": "#2E8B57",
"seashell": "#FFF5EE",
"sienna": "#A0522D",
"silver": "#C0C0C0",
"skyblue": "#87CEEB",
"slateblue": "#6A5ACD",
"slategrey": "#708090",
"snow": "#FFFAFA",
"springgreen": "#00FF7F",
"steelblue": "#4682B4",
"tan": "#D2B48C",
"teal": "#008080",
"thistle": "#D8BFD8",
"tomato": "#FF6347",
"turquoise": "#40E0D0",
"violet": "#EE82EE",
"wheat": "#F5DEB3",
"white": "#FFFFFF",
"whitesmoke": "#F5F5F5",
"yellow": "#FFFF00",
"yellowgreen": "#9ACD32",
}
# make sure the input color is valid
assert (
is_rgb(in_col) or is_hex(in_col) or in_col.lower() in self.colors.keys()
), (
f'Error, the input color "{in_col}" is not a valid rgb color, hex '
f"color or named color"
)
if (
is_rgb(in_col) is False
and is_hex(in_col) is False
and in_col.lower() in self.colors.keys()
):
in_col = self.colors[in_col.lower()]
# set variables
self._set_color(in_col)
def __str__(self):
"""
Change how object prints.
"""
if self.get_format() == "hex":
return str(rgb_to_hex(self.col))
else:
return str(self.col)
def __repr__(self):
"""
Change how object is represented.
"""
return self.__str__()
def __eq__(self, other):
"""Override the default Equals behavior"""
if isinstance(other, self.__class__):
if self.get_format() == "hex":
return str(rgb_to_hex(self.col)) == other.__str__()
else:
return self.col == other.__str__()
else:
if self.get_format() == "hex":
return str(rgb_to_hex(self.col)) == other
else:
return self.col == other
def distance_to(self, color) -> [float, None]:
"""
Convenience function for ColorDistance().
Pass in another instance of a color and this will return the relative
distance between this color and the second. Or None if it can't be
computed, which happens sometimes when one of the colors is close to
black. I don't really know why.
"""
error_state = None
if not isinstance(color, Color):
raise ValueError("Must compare instances of Color!")
try:
distance = ColorDistance(self.rgb(), color.rgb())
except (ValueError, RuntimeWarning): # math or numpy pitching a fit
return error_state
if distance == "nan":
return error_state
return distance
def _set_color(self, in_col):
"""
Private method to set the color variables.
"""
# check whether input is hex, if it is, make it RGB
if is_hex(in_col):
self.col = hex_to_rgb(in_col)
self._in_format = "hex"
else:
self.col = in_col
self._in_format = "rgb"
def get_format(self):
"""
Return the format in which the input color was specified as a string.
This could be "rgb" or "hex"
"""
return self._in_format
def info(self):
"""
Print information about the color represented by the object.
"""
print('Input (and output) color format: "%s"' % self.get_format())
print("RGB: %s" % str(self.rgb()))
print("HEX: %s" % str(self.hex()))
print("HSL: %s" % str(self.hsl()))
def hex(self):
"""
Convenience function to get the hex value of a color.
"""
return rgb_to_hex(self.col) # type(self)(rgb_to_hex(self.col))
def rgb(self):
"""
Convenience function to get the rgb value of a color.
"""
return self.col # type(self)(self.col)
def hsl(self):
"""
Convenience function to get the hsl value of a color.
"""
return rgb_to_hsl(self.col) # type(self)(rgb_to_hsl(self.col))
def set_h(self, value):
"""
Set hue of color from 0 to 1 and return as new color.
"""
assert 0 <= value <= 1
h, s, l = self.hsl()
if self.get_format == "hex":
new_col = hsl_to_hex((value, s, l))
else:
new_col = hsl_to_rgb((value, s, l))
return Color(new_col)
def set_s(self, value):
"""
Set saturation of color from 0 to 1 and return as new color.
"""
assert 0 <= value <= 1
h, s, l = self.hsl()
if self.get_format == "hex":
new_col = hsl_to_hex((h, value, l))
else:
new_col = hsl_to_rgb((h, value, l))
return Color(new_col)
def set_l(self, value):
"""
Set lightness of color from 0 to 1 and return as new color.
"""
assert 0 <= value <= 1
h, s, l = self.hsl()
if self.get_format == "hex":
new_col = hsl_to_hex((h, s, value))
else:
new_col = hsl_to_rgb((h, s, value))
return Color(new_col)
def complementary(self):
"""
Returns input color and its complementary color as a list of hex or
rgb values, depending on what was submitted.
O
x x
x x
x x
x x
x x
O
"""
# RGB -> HSL
h, s, l = rgb_to_hsl(self.col)
# Rotation by 180 degrees
h = (h + 0.5) % 1
color = hsl_to_rgb((h, s, l)) # HSL -> new RGB
# Prepare colors for returning them
if self.get_format() == "hex":
colors = [rgb_to_hex(self.col), rgb_to_hex(tuple(color))]
else:
colors = [self.col, tuple(color)]
# return as list of color objects
return [Color(s) for s in colors]
def split_complementary(self):
"""
Returns input color and its split complementary colors (those adjecent to the complement)
as a list of of hex or rgb values, depending on what was submitted.
x
O O
x x
x x
x x
x x
O
"""
# RGB -> HSL
h, s, l = rgb_to_hsl(self.col)
# Rotation by 150 degrees
angle = 150 / 360.0
h_list = [(h + ang) % 1 for ang in (-angle, angle)]
analagous = [hsl_to_rgb((h, s, l)) for h in h_list] # HSL -> new RGB
# add all the colors together
colors = [self.col, tuple(analagous[0]), tuple(analagous[1])]
# if the input was hex, convert it back
if self.get_format() == "hex":
colors = [rgb_to_hex(s) for s in colors]
# return as list of color objects
return [Color(s) for s in colors]
def triadic(self):
"""
Returns input color as well as the two triadic colors as a list of hex or rgb values, depending on what was submitted.
x
x x
O O
x x
x x
x x
O
#first color is wrong!
"""
# RGB -> HSL
h, s, l = rgb_to_hsl(self.col)
# Rotation by 120 degrees
angle = 120 / 360.0
h_list = [(h + ang) % 1 for ang in (-angle, angle)]
analagous = [hsl_to_rgb((h, s, l)) for h in h_list] # HSL -> new RGB
# add all the colors together
colors = [self.col, tuple(analagous[0]), tuple(analagous[1])]
# if the input was hex, convert it back
if self.get_format() == "hex":
colors = [rgb_to_hex(s) for s in colors]
# return as list of color objects
return [Color(s) for s in colors]
def square(self):
"""
O
x x
x x
O O
x x
x x
O
"""
# RGB -> HSL
h, s, l = rgb_to_hsl(self.col)
# Rotation by 90 degrees
angle = 90 / 360.0
h_list = [(h + ang) % 1 for ang in (-angle, angle, angle * 2)]
analagous = [hsl_to_rgb((h, s, l)) for h in h_list] # HSL -> new RGB
# add all the colors together
colors = [
self.col,
tuple(analagous[0]),
tuple(analagous[1]),
tuple(analagous[2]),
]
# if the input was hex, convert it back
if self.get_format() == "hex":
colors = [rgb_to_hex(s) for s in colors]
# return as list of color objects
return [Color(s) for s in colors]
def tetradic(self):
"""
O
x x
x O
x x
O x
x x
O
"""
# RGB -> HSL
h, s, l = rgb_to_hsl(self.col)
# Rotation by 30 degrees
angle = 30 / 360.0
h_list = [(h + ang) % 1 for ang in (-angle * 2, angle * 4, angle * 6)]
analagous = [hsl_to_rgb((h, s, l)) for h in h_list] # HSL -> new RGB
# add all the colors together
colors = [
self.col,
tuple(analagous[0]),
tuple(analagous[1]),
tuple(analagous[2]),
]
# if the input was hex, convert it back
if self.get_format() == "hex":
colors = [rgb_to_hex(s) for s in colors]
# return as list of color objects
return [Color(s) for s in colors]
def analagous(self):
"""
Returns the input color as well as its analagous colors.
x
x x
x x
x x
x x
O O
O
"""
# RGB -> HSL
h, s, l = rgb_to_hsl(self.col)
# Rotation by 30 degrees
degree = 30 / 360.0
h = [(h + angle) % 1 for angle in (-degree, degree)]
analagous = [hsl_to_rgb((hi, s, l)) for hi in h] # HSL -> new RGB
# add all the colors together
colors = [self.col, tuple(analagous[0]), tuple(analagous[1])]
# if the input was hex, convert it back
if self.get_format() == "hex":
colors = [rgb_to_hex(s) for s in colors]
# return as list of color objects
return [Color(s) for s in colors]
def similar(self):
"""
Returns the input color as well as similar colors.
(The ones that are next to the original one on the color wheel)
"""
raise NotImplementedError
# if the input was hex, convert it back
if self.get_format() == "hex":
colors = [rgb_to_hex(s) for s in colors]
# return as list of color objects
return [Color(s) for s in colors]
def monochromatic(self):
"""
Returns the input color as well as ....
"""
raise NotImplementedError
# if the input was hex, convert it back
if self.get_format() == "hex":
colors = [rgb_to_hex(s) for s in colors]
# return as list of color objects
return [Color(s) for s in colors]
def tones(self, number=10):
"""
Returns input color as well as
tones - created by adding gray to a pure hue and showing less or more saturated options
"""
raise NotImplementedError
pass
def tints(self, number=10):
"""
Returns input color as well as tints of that color (lighter colors).
number specifies how many new ones to return.
"""
assert type(number) is int, "Error, the input number must be an integer."
assert (
2 <= number and number <= 1000
), "Error, the input number must be between 2 and 1000"
# RGB -> HSL
hue, saturation, lightness = rgb_to_hsl(self.col)
# what is the difference of 100% lightness and the current value
diff = 1.0 - lightness
# devide the difference on a step size
step = diff / float(number)
# use that step size to generate the 10 increasing lightness values
lightness_list = [lightness + step * s for s in range(1, number + 1)]
# add the input color to a list, then build the 10 new HSL colors, convert to RGB and save in the same list
colors = [self.col]
colors.extend([hsl_to_rgb((hue, saturation, l)) for l in lightness_list])
# if the input was hex, convert it back
if self.get_format() == "hex":
colors = [rgb_to_hex(s) for s in colors]
# return as list of color objects
return [Color(s) for s in colors]
def shades(self, number=10):
"""
Returns input color as well as shades of that color (darker colors).
number specifies how many new ones to return.
"""
assert type(number) is int, "Error, the input number must be an integer."
assert (
2 <= number and number <= 1000
), "Error, the input number must be between 2 and 1000"
# RGB -> HSL
hue, saturation, lightness = rgb_to_hsl(self.col)
# divide the difference on a step size
step = lightness / float(number)
# use that step size to generate the 10 increasing lightness values
lightness_list = [lightness - step * s for s in range(1, number + 1)]
# add the input color to a list, then build the 10 new HSL colors,
# convert to RGB and save in the same list
colors = [self.col]
colors.extend([hsl_to_rgb((hue, saturation, l)) for l in lightness_list])
# if the input was hex, convert it back
if self.get_format() == "hex":
colors = [rgb_to_hex(s) for s in colors]
# return as list of color objects
return [Color(s) for s in colors]
def saturate(self, number=10):
"""
Returns the input color as well as more saturated versions of that color.
number specifies how many new ones to return.
"""
assert type(number) is int, "Error, the input number must be an integer."
assert (
2 <= number and number <= 1000
), "Error, the input number must be between 2 and 1000"
# RGB -> HSL
hue, saturation, lightness = rgb_to_hsl(self.col)
# what is the difference of 100% saturation and the current value
diff = 1.0 - saturation
# divide the difference on a step size
step = diff / float(number)
# use that step size to generate the 10 increasing saturation values
saturation_list = [saturation + step * s for s in range(1, number + 1)]
# add the input color to a list, then build the 10 new HSL colors,
# convert to RGB and save in the same list
colors = [self.col]
colors.extend([hsl_to_rgb((hue, s, lightness)) for s in saturation_list])
# if the input was hex, convert it back
if self.get_format() == "hex":
colors = [rgb_to_hex(s) for s in colors]
# return as list of color objects
return [Color(s) for s in colors]
def desaturate(self, number=10):
"""
Returns the input color as well as less saturated versions of that color.
number specifies how many new ones to return.
"""
assert type(number) is int, "Error, the input number must be an integer."
assert (
2 <= number and number <= 1000
), "Error, the input number must be between 2 and 1000"
# RGB -> HSL
hue, saturation, lightness = rgb_to_hsl(self.col)
# divide the difference on a step size
step = saturation / float(number)
# use that step size to generate the 10 increasing saturation values
saturation_list = [saturation - step * s for s in range(1, number + 1)]
# add the input color to a list, then build the 10 new HSL colors,
# convert to RGB and save in the same list
colors = [self.col]
colors.extend([hsl_to_rgb((hue, s, lightness)) for s in saturation_list])
# if the input was hex, convert it back
if self.get_format() == "hex":
colors = [rgb_to_hex(s) for s in colors]
# return as list of color objects
return [Color(s) for s in colors]
def next_color(self):
"""
Function for generating a sequence of unique colors.
The input is a tuple of an RGB color, for example (124,1,34), and
the method returns the "next" color.
When R reaches 255 one is added to G and R is reset.
When R and G both reach 255 one is added to B and R and G are reset.
This should generate over 1.6 million colors (255*255*255)
"""
R, G, B = self.col
if R == 255 and G == 255 and B == 255:
raise ValueError(
"R, G and B all have the value 255, no further colors are " "available."
)
elif R == 255 and G == 255:
R = 0
G = 0
B += 1
elif R == 255:
R = 0
G += 1
else:
R += 1
col = (R, G, B)
# if the input was hex, convert it back
if self.get_format() == "hex":
col = rgb_to_hex(col)
# return as color object
return Color(col)
# def visualize(color_list=['#acc123','#ffffff','#000000', '#1ccf9c']):
# """
# Visualizes a list of colors.
# Useful to see what one gets out of the different functions.
# """
#
# #asserts.... here....
# #should work for list of strings and for color objects
#
#
#
# from tkinter import Tk, Canvas, Frame, BOTH
#
# color_list = [s.hex() for s in color_list]
# print(color_list)
#
# class Example(Frame):
#
# def __init__(self, parent, cl):
# Frame.__init__(self, parent)
#
# self.parent = parent
# self.color_list = cl
# self.initUI()
#
# def initUI(self):
#
# self.parent.title("Colors")
# self.pack(fill=BOTH, expand=1)
#
# canvas = Canvas(self)
#
# #modify rectangle size based on how many colors there are
# rect_size = 700/float(len(color_list))
#
# for i in range(len(self.color_list)):
# canvas.create_rectangle(10+rect_size*i, 10, 10+rect_size*(i+1), 110, outline=self.color_list[i], fill=self.color_list[i])
# canvas.pack(fill=BOTH, expand=1)
#
#
# def main():
#
# root = Tk()
# ex = Example(root, color_list)
# root.geometry("720x120+250+300")
# root.mainloop()
#
# main()
def test():
"""
Unit tests to make sure all methods work.
"""
col1 = "#01f490"
col2 = (1, 244, 144)
col3 = (0.43141289437585734, 0.9918367346938776, 0.4803921568627451)
# test helper functions
assert is_rgb(col2) is True
assert is_rgb(col1) is False
assert is_rgb(col3) is False
assert is_hex(col1) is True
assert is_hex(col2) is False
assert is_hex(col3) is False
assert is_hsl(col3) is True
assert is_hsl(col1) is False
assert is_hsl(col2) is False
assert rgb_to_hex(col2) == col1
assert rgb_to_hsl(col2) == col3
assert hex_to_rgb(col1) == col2
assert hex_to_hsl(col1) == col3
assert hsl_to_hex(col3) == col1
assert hsl_to_rgb(col3) == col2
# test the __eq__ method
assert Color(col1) != Color(col2)
assert Color(col1) != col2
assert Color(col1) == Color(col1)
assert Color(col1) == col1
# test Color object with hex color
x = Color(col1)
assert x.rgb() == col2
assert x.hex() == col1
assert x.hsl() == col3
assert x.get_format() == "hex"
assert x.complementary() == ["#01f490", "#f40165"]
assert x.split_complementary() == [
"#01f490",
"#f41601",
"#f401de",
] # these seem to be off by one
assert x.triadic() == ["#01f490", "#f49001", "#9001f4"]
assert x.square() == ["#01f490", "#def401", "#1601f4", "#f40165"]
assert x.tetradic() == ["#01f490", "#65f401", "#9001f4", "#f40165"]
assert x.analagous() == ["#01f490", "#01f417", "#01def4"]
assert x.tints() == [
"#01f490",
"#11fe9d",
"#2cfea8",
"#46feb3",
"#61febd",
"#7bfec8",
"#95ffd3",
"#b0ffde",
"#caffe9",
"#e5fff4",
"#ffffff",
]
assert x.shades() == [
"#01f490",
"#01dc82",
"#01c373",
"#01ab65",
"#019256",
"#017a48",
"#00623a",
"#00492b",
"#00311d",
"#00180e",
"#000000",
]
assert x.saturate() == [
"#01f490",
"#01f490",
"#01f490",
"#01f490",
"#01f490",
"#00f490",
"#00f590",
"#00f590",
"#00f590",
"#00f590",
"#00f590",
]
assert x.desaturate() == [
"#01f490",
"#0de88e",
"#19dc8c",
"#25d08a",
"#32c387",
"#3eb785",
"#4aab83",
"#569f81",
"#62937f",
"#6e877d",
"#7a7a7a",
]
assert x.next_color() == "#02f490"
# x.similar()
# x.monochromatic()
# x.tones()
# x.set_h(0.5)
# x.set_s(0.5)
# x.set_l(0.5)
# test Color object with rgb color
x = Color(col2)
assert x.rgb() == col2
assert x.hex() == col1
assert x.hsl() == col3
assert x.get_format() == "rgb"
assert x.complementary() == [(1, 244, 144), (244, 1, 101)]
assert x.split_complementary() == [
(1, 244, 144),
(244, 22, 1),
(244, 1, 222),
] # these seem to be off by one
assert x.triadic() == [(1, 244, 144), (244, 144, 1), (144, 1, 244)]
assert x.square() == [(1, 244, 144), (222, 244, 1), (22, 1, 244), (244, 1, 101)]
assert x.tetradic() == [(1, 244, 144), (101, 244, 1), (144, 1, 244), (244, 1, 101)]
assert x.analagous() == [(1, 244, 144), (1, 244, 23), (1, 222, 244)]
assert x.tints() == [
(1, 244, 144),
(17, 254, 157),
(44, 254, 168),
(70, 254, 179),
(97, 254, 189),
(123, 254, 200),
(149, 255, 211),
(176, 255, 222),
(202, 255, 233),
(229, 255, 244),
(255, 255, 255),
]
assert x.shades() == [
(1, 244, 144),
(1, 220, 130),
(1, 195, 115),
(1, 171, 101),
(1, 146, 86),
(1, 122, 72),
(0, 98, 58),
(0, 73, 43),
(0, 49, 29),
(0, 24, 14),
(0, 0, 0),
]
assert x.saturate() == [
(1, 244, 144),
(1, 244, 144),
(1, 244, 144),
(1, 244, 144),
(1, 244, 144),
(0, 244, 144),
(0, 245, 144),
(0, 245, 144),
(0, 245, 144),
(0, 245, 144),
(0, 245, 144),
]
assert x.desaturate() == [
(1, 244, 144),
(13, 232, 142),
(25, 220, 140),
(37, 208, 138),
(50, 195, 135),
(62, 183, 133),
(74, 171, 131),
(86, 159, 129),
(98, 147, 127),
(110, 135, 125),
(122, 122, 122),
]
assert x.next_color() == (2, 244, 144)
# x.similar()
# x.monochromatic()
# x.tones()
# x.set_h(0.5)
# x.set_s(0.5)
# x.set_l(0.5)
print("All tests passed.")
| [
"colorsys.rgb_to_hls",
"numpy.array",
"colorsys.hls_to_rgb",
"re.compile"
] | [((2236, 2253), 'numpy.array', 'numpy.array', (['rgb1'], {}), '(rgb1)\n', (2247, 2253), False, 'import numpy\n'), ((2265, 2282), 'numpy.array', 'numpy.array', (['rgb2'], {}), '(rgb2)\n', (2276, 2282), False, 'import numpy\n'), ((3137, 3383), 're.compile', 're.compile', (['"""^ # match beginning of string\n [#]? # exactly one hash, but optional\n [0-9a-fA-F]{6} # exactly six of the hex symbols 0 to 9, a to f\n $ # match end of string\n """', '(re.VERBOSE | re.MULTILINE)'], {}), '(\n """^ # match beginning of string\n [#]? # exactly one hash, but optional\n [0-9a-fA-F]{6} # exactly six of the hex symbols 0 to 9, a to f\n $ # match end of string\n """\n , re.VERBOSE | re.MULTILINE)\n', (3147, 3383), False, 'import re\n'), ((5085, 5113), 'colorsys.rgb_to_hls', 'colorsys.rgb_to_hls', (['r', 'g', 'b'], {}), '(r, g, b)\n', (5104, 5113), False, 'import colorsys\n'), ((5502, 5530), 'colorsys.hls_to_rgb', 'colorsys.hls_to_rgb', (['h', 'l', 's'], {}), '(h, l, s)\n', (5521, 5530), False, 'import colorsys\n')] |
import numpy as np
import cv2
import random as rng
import math
def areaCal(contour):
area = 0
for i in range(len(contour)):
area += cv2.contourArea(contour[i])
return area
frame=cv2.imread('0297.jpg')
sp=frame.shape
data_A = np.zeros([sp[0], sp[1]], np.uint8)
#out = cv2.VideoWriter('output_92.avi', cv2.VideoWriter_fourcc(*'XVID'), 20.0, size,1)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 230, 255, 0) # 二值化
kernel = np.ones((3, 3), np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel) # 开运算
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel) # 闭运算
contours, hierarchy = cv2.findContours(closing, 3, 1)
for i in range(len(contours)):
color = (rng.randint(0, 256), rng.randint(0, 256), rng.randint(0, 256))
a=cv2.drawContours(frame, contours,i,color)
print(areaCal(contours))
th_=178
cx=163
cy=162
a=26
b=40
frame = cv2.drawContours(frame, contours, -1, (0, 0, 255), 1) # 在原图中画轮廓
image=cv2.ellipse(frame,(cx,cy),(a, b), th_, 0, 360, (0, 0, 255), 2, 8, 0)
for i in range(0, sp[0]):
for j in range(0, sp[1]):
# print(int(i*cos)+int(j*sin),cx,int(-i*sin)+int(j*cos),cy,a,b)
th = math.pi * th_ / 180
cos = math.cos(th)
sin = math.sin(th)
print(i,j)
if math.pow((int(i-cx)*cos+int(j-cy)*sin),2)/(a*a)+math.pow((int(j-cy)*cos-int(i-cx)*sin),2)/(b*b)<=0.25: # 长轴为x轴,且像素点在方框内
data_A[i][j] += 1 # 若在椭圆内,则对该像素点进行累加
cv2.imwrite('00001-%d.jpg' %th_, image)
#for c in range(len(contours)):
| [
"cv2.contourArea",
"random.randint",
"cv2.cvtColor",
"cv2.morphologyEx",
"cv2.threshold",
"cv2.imwrite",
"numpy.zeros",
"numpy.ones",
"math.sin",
"cv2.imread",
"cv2.ellipse",
"math.cos",
"cv2.drawContours",
"cv2.findContours"
] | [((217, 239), 'cv2.imread', 'cv2.imread', (['"""0297.jpg"""'], {}), "('0297.jpg')\n", (227, 239), False, 'import cv2\n'), ((266, 300), 'numpy.zeros', 'np.zeros', (['[sp[0], sp[1]]', 'np.uint8'], {}), '([sp[0], sp[1]], np.uint8)\n', (274, 300), True, 'import numpy as np\n'), ((397, 436), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (409, 436), False, 'import cv2\n'), ((452, 484), 'cv2.threshold', 'cv2.threshold', (['gray', '(230)', '(255)', '(0)'], {}), '(gray, 230, 255, 0)\n', (465, 484), False, 'import cv2\n'), ((502, 527), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (509, 527), True, 'import numpy as np\n'), ((539, 587), 'cv2.morphologyEx', 'cv2.morphologyEx', (['thresh', 'cv2.MORPH_OPEN', 'kernel'], {}), '(thresh, cv2.MORPH_OPEN, kernel)\n', (555, 587), False, 'import cv2\n'), ((606, 656), 'cv2.morphologyEx', 'cv2.morphologyEx', (['opening', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(opening, cv2.MORPH_CLOSE, kernel)\n', (622, 656), False, 'import cv2\n'), ((687, 718), 'cv2.findContours', 'cv2.findContours', (['closing', '(3)', '(1)'], {}), '(closing, 3, 1)\n', (703, 718), False, 'import cv2\n'), ((951, 1004), 'cv2.drawContours', 'cv2.drawContours', (['frame', 'contours', '(-1)', '(0, 0, 255)', '(1)'], {}), '(frame, contours, -1, (0, 0, 255), 1)\n', (967, 1004), False, 'import cv2\n'), ((1023, 1094), 'cv2.ellipse', 'cv2.ellipse', (['frame', '(cx, cy)', '(a, b)', 'th_', '(0)', '(360)', '(0, 0, 255)', '(2)', '(8)', '(0)'], {}), '(frame, (cx, cy), (a, b), th_, 0, 360, (0, 0, 255), 2, 8, 0)\n', (1034, 1094), False, 'import cv2\n'), ((1520, 1560), 'cv2.imwrite', 'cv2.imwrite', (["('00001-%d.jpg' % th_)", 'image'], {}), "('00001-%d.jpg' % th_, image)\n", (1531, 1560), False, 'import cv2\n'), ((835, 878), 'cv2.drawContours', 'cv2.drawContours', (['frame', 'contours', 'i', 'color'], {}), '(frame, contours, i, color)\n', (851, 878), False, 'import cv2\n'), ((161, 188), 'cv2.contourArea', 'cv2.contourArea', (['contour[i]'], {}), '(contour[i])\n', (176, 188), False, 'import cv2\n'), ((765, 784), 'random.randint', 'rng.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (776, 784), True, 'import random as rng\n'), ((786, 805), 'random.randint', 'rng.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (797, 805), True, 'import random as rng\n'), ((807, 826), 'random.randint', 'rng.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (818, 826), True, 'import random as rng\n'), ((1272, 1284), 'math.cos', 'math.cos', (['th'], {}), '(th)\n', (1280, 1284), False, 'import math\n'), ((1300, 1312), 'math.sin', 'math.sin', (['th'], {}), '(th)\n', (1308, 1312), False, 'import math\n')] |
import Aidlab
from Aidlab.Signal import Signal
import numpy as np
from multiprocessing import Process, Queue, Array
import matplotlib.pyplot as pyplot
import matplotlib.animation as animation
buffer_size = 500
result = None
x = [i for i in range(buffer_size)]
y = []
figure = pyplot.figure()
axis = figure.add_subplot(1, 1, 1)
def animate(i):
global y
axis.clear()
axis.plot(x, y)
pyplot.ylim([np.min(y) - np.std(y), np.max(y) + np.std(y)])
def chart(result):
global y
y = result
ani = animation.FuncAnimation(figure, animate, interval=2)
pyplot.show()
class MainManager(Aidlab.Aidlab):
def __init__(self):
super().__init__()
self.sample_index = 0
def did_connect(self, aidlab):
print("Connected to: ", aidlab.address)
def did_disconnect(self, aidlab):
print("Disconnected from: ", aidlab.address)
def did_receive_ecg(self, aidlab, timestamp, values):
global result, buffer_size
self.sample_index += 1
result[self.sample_index % buffer_size] = values[0]
if __name__ == '__main__':
# create process for Plot
result = Array('d', buffer_size)
Process(target=chart, args=(result,)).start()
signals = [Signal.ecg]
main_manager = MainManager()
main_manager.connect(signals)
# Start the connection
while True:
pass
| [
"matplotlib.pyplot.show",
"multiprocessing.Array",
"numpy.std",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.figure",
"numpy.min",
"numpy.max",
"multiprocessing.Process"
] | [((278, 293), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (291, 293), True, 'import matplotlib.pyplot as pyplot\n'), ((520, 572), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['figure', 'animate'], {'interval': '(2)'}), '(figure, animate, interval=2)\n', (543, 572), True, 'import matplotlib.animation as animation\n'), ((577, 590), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (588, 590), True, 'import matplotlib.pyplot as pyplot\n'), ((1143, 1166), 'multiprocessing.Array', 'Array', (['"""d"""', 'buffer_size'], {}), "('d', buffer_size)\n", (1148, 1166), False, 'from multiprocessing import Process, Queue, Array\n'), ((1171, 1208), 'multiprocessing.Process', 'Process', ([], {'target': 'chart', 'args': '(result,)'}), '(target=chart, args=(result,))\n', (1178, 1208), False, 'from multiprocessing import Process, Queue, Array\n'), ((414, 423), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (420, 423), True, 'import numpy as np\n'), ((426, 435), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (432, 435), True, 'import numpy as np\n'), ((437, 446), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (443, 446), True, 'import numpy as np\n'), ((449, 458), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (455, 458), True, 'import numpy as np\n')] |
from typing import Tuple
import numpy as np
from numba import jit
@jit(nopython=True)
def fit_bpr(
data_triplets: np.ndarray,
initial_user_factors: np.ndarray,
initial_item_factors: np.ndarray,
initial_item_biases: np.ndarray,
lr_bi: float = 0.01,
lr_pu: float = 0.01,
lr_qi: float = 0.01,
reg_bi: float = 0.01,
reg_pu: float = 0.01,
reg_qi: float = 0.01,
verbose=False,
n_epochs=100,
batch_size=50,
eps=1e-5,
decay=.01
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
m = data_triplets.shape[0]
residuals = np.zeros(n_epochs)
user_factors = initial_user_factors.copy()
item_factors = initial_item_factors.copy()
item_biases = initial_item_biases.copy()
samples = np.random.choice(m, batch_size*n_epochs, replace=True)
for epoch in range(n_epochs):
old_user_factors = user_factors
old_item_factors = item_factors
old_item_biases = item_biases
samples_epoch = samples[(batch_size*epoch):(batch_size*(epoch+1))]
epoch_lr_bi = lr_bi / (1 + decay * epoch)
epoch_lr_pu = lr_pu / (1 + decay * epoch)
epoch_lr_qi = lr_qi / (1 + decay * epoch)
(user_factors, item_factors, item_biases) = fit_batch(
data_triplets=data_triplets,
initial_user_factors=user_factors,
initial_item_factors=item_factors,
initial_item_biases=item_biases,
lr_bi=epoch_lr_bi,
lr_pu=epoch_lr_pu,
lr_qi=epoch_lr_qi,
reg_bi=reg_bi,
reg_pu=reg_pu,
reg_qi=reg_qi,
verbose=False,
samples=samples_epoch,
)
batch_norm = (
np.linalg.norm(user_factors - old_user_factors)
+ np.linalg.norm(item_factors - old_item_factors)
+ np.linalg.norm(item_biases - old_item_biases)
) / batch_size
residuals[epoch] = batch_norm
if batch_norm < eps:
return user_factors, item_factors, item_biases, residuals[:epoch]
return user_factors, item_factors, item_biases, residuals
@jit(nopython=True)
def fit_batch(
data_triplets: np.ndarray,
initial_user_factors: np.ndarray,
initial_item_factors: np.ndarray,
initial_item_biases: np.ndarray,
lr_bi: float = 0.01,
lr_pu: float = 0.01,
lr_qi: float = 0.01,
reg_bi: float = 0.01,
reg_pu: float = 0.01,
reg_qi: float = 0.01,
verbose=False,
samples=None,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Parameters
----------
initial_item_biases
samples
data_triplets
initial_user_factors
initial_item_factors
lr_bi
lr_pu
lr_qi
reg_bi
reg_pu
reg_qi
verbose
Returns
-------
"""
user_factors = initial_user_factors.copy()
item_factors = initial_item_factors.copy()
item_biases = initial_item_biases.copy()
for idx in samples:
row = data_triplets[idx, :]
u = row[0]
i = row[1]
j = row[2]
pu = user_factors[u, :]
qi = item_factors[i, :]
qj = item_factors[j, :]
x_ui = np.dot(pu, qi) + item_biases[i]
x_uj = np.dot(pu, qj) + item_biases[j]
x_uij = x_ui - x_uj
coeff = -np.exp(-x_uij) / (1 + np.exp(-x_uij))
user_factors[u, :] = pu - lr_pu * (coeff * (qi - qj) + reg_pu * pu)
item_factors[i, :] = qi - lr_qi * (coeff * pu + reg_qi * qi)
item_factors[j, :] = qj - lr_qi * (coeff * -pu + reg_qi * qj)
item_biases[i] += -lr_bi * (coeff + reg_bi * item_biases[i])
item_biases[j] += -lr_bi * (-coeff + reg_bi * item_biases[j])
return user_factors, item_factors, item_biases
@jit(nopython=False)
def score_bpr(
X: np.ndarray,
user_factors: np.ndarray,
item_factors: np.ndarray,
user_biases: np.ndarray,
item_biases: np.ndarray,
global_mean: np.ndarray,
known_users,
known_items,
):
"""
Parameters
----------
X : ndarray
Columns are [ user_id, item_id ]
user_factors
item_factors
user_biases
item_biases
global_mean
known_users : set
known_items : set
Returns
-------
"""
m = X.shape[0]
scores = np.zeros(m)
for i in np.arange(m):
u = X[i, 0]
i = X[i, 1]
if u in known_users and i in known_items:
scores[i] = (
np.dot(user_factors[u, :], item_factors[i, :])
+ user_biases[u]
+ item_biases[i]
+ global_mean
)
elif u in known_users:
scores[i] = user_biases[u] + global_mean
elif i in known_items:
scores[i] = item_biases[i] + global_mean
else:
scores[i] = global_mean
return scores
| [
"numpy.zeros",
"numba.jit",
"numpy.arange",
"numpy.linalg.norm",
"numpy.random.choice",
"numpy.exp",
"numpy.dot"
] | [((70, 88), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (73, 88), False, 'from numba import jit\n'), ((2127, 2145), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (2130, 2145), False, 'from numba import jit\n'), ((3741, 3760), 'numba.jit', 'jit', ([], {'nopython': '(False)'}), '(nopython=False)\n', (3744, 3760), False, 'from numba import jit\n'), ((591, 609), 'numpy.zeros', 'np.zeros', (['n_epochs'], {}), '(n_epochs)\n', (599, 609), True, 'import numpy as np\n'), ((763, 819), 'numpy.random.choice', 'np.random.choice', (['m', '(batch_size * n_epochs)'], {'replace': '(True)'}), '(m, batch_size * n_epochs, replace=True)\n', (779, 819), True, 'import numpy as np\n'), ((4267, 4278), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (4275, 4278), True, 'import numpy as np\n'), ((4292, 4304), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (4301, 4304), True, 'import numpy as np\n'), ((3166, 3180), 'numpy.dot', 'np.dot', (['pu', 'qi'], {}), '(pu, qi)\n', (3172, 3180), True, 'import numpy as np\n'), ((3213, 3227), 'numpy.dot', 'np.dot', (['pu', 'qj'], {}), '(pu, qj)\n', (3219, 3227), True, 'import numpy as np\n'), ((1847, 1892), 'numpy.linalg.norm', 'np.linalg.norm', (['(item_biases - old_item_biases)'], {}), '(item_biases - old_item_biases)\n', (1861, 1892), True, 'import numpy as np\n'), ((3291, 3305), 'numpy.exp', 'np.exp', (['(-x_uij)'], {}), '(-x_uij)\n', (3297, 3305), True, 'import numpy as np\n'), ((3313, 3327), 'numpy.exp', 'np.exp', (['(-x_uij)'], {}), '(-x_uij)\n', (3319, 3327), True, 'import numpy as np\n'), ((1723, 1770), 'numpy.linalg.norm', 'np.linalg.norm', (['(user_factors - old_user_factors)'], {}), '(user_factors - old_user_factors)\n', (1737, 1770), True, 'import numpy as np\n'), ((1785, 1832), 'numpy.linalg.norm', 'np.linalg.norm', (['(item_factors - old_item_factors)'], {}), '(item_factors - old_item_factors)\n', (1799, 1832), True, 'import numpy as np\n'), ((4438, 4484), 'numpy.dot', 'np.dot', (['user_factors[u, :]', 'item_factors[i, :]'], {}), '(user_factors[u, :], item_factors[i, :])\n', (4444, 4484), True, 'import numpy as np\n')] |
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_hastie_10_2
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
def make_hastie_11_2(n_samples):
X, y_org = make_hastie_10_2(n_samples=n_samples)
z = np.random.randn(n_samples)
y = y_org * z
y[y > 0] = 1
y[y <= 0] = 0
r = np.random.rand(n_samples) < 0.2
y[r] = 1 - y[r]
X = np.hstack((X, z.reshape(n_samples, 1)))
return X, y
def make_friedman1_poly(n_samples, noise=5):
X, y = make_friedman1(n_samples=n_samples, noise=noise)
poly = PolynomialFeatures()
X = poly.fit_transform(X)
return X, y
def apply_tree(x, tree, use_varname=False):
score = 0
for leaf in tree:
match = True
for eq in leaf["eqs"]:
svar = eq["svar"]
sval = eq["sval"]
if "<" == eq["op"]:
if use_varname:
if x[eq["name"]] >= sval:
match = False
else:
if x[svar] >= sval:
match = False
else:
if use_varname:
if x[eq["name"]] < sval:
match = False
else:
if x[svar] < sval:
match = False
if not match:
break
if match:
score = leaf["y"]
break
return score
| [
"sklearn.datasets.make_hastie_10_2",
"numpy.random.randn",
"sklearn.preprocessing.PolynomialFeatures",
"numpy.random.rand",
"sklearn.datasets.make_friedman1"
] | [((211, 248), 'sklearn.datasets.make_hastie_10_2', 'make_hastie_10_2', ([], {'n_samples': 'n_samples'}), '(n_samples=n_samples)\n', (227, 248), False, 'from sklearn.datasets import make_hastie_10_2\n'), ((258, 284), 'numpy.random.randn', 'np.random.randn', (['n_samples'], {}), '(n_samples)\n', (273, 284), True, 'import numpy as np\n'), ((519, 567), 'sklearn.datasets.make_friedman1', 'make_friedman1', ([], {'n_samples': 'n_samples', 'noise': 'noise'}), '(n_samples=n_samples, noise=noise)\n', (533, 567), False, 'from sklearn.datasets import make_friedman1\n'), ((580, 600), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {}), '()\n', (598, 600), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((346, 371), 'numpy.random.rand', 'np.random.rand', (['n_samples'], {}), '(n_samples)\n', (360, 371), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
#
# Copyright (c) 2018-2020 Carnegie Mellon University
# All rights reserved.
#
# Based on work by <NAME>.
#
# SPDX-License-Identifier: Apache-2.0
#
"""Remove similar frames based on a perceptual hash metric
"""
import argparse
import json
import os
import random
import shutil
import imagehash
import numpy as np
from PIL import Image
from datumaro.components.project import Project
DIFF_THRESHOLD = 10
DEFAULT_RATIO = 0.7
def checkDiff(image_hash, base_image_hash, threshold):
if base_image_hash is None:
return True
if image_hash - base_image_hash >= threshold:
return True
return False
def checkDiffComplete(image_hash, base_image_list, threshold):
if len(base_image_list) <= 0:
return True
for i in base_image_list:
if not checkDiff(image_hash, i, threshold):
return False
return True
def checkDiffRandom(image_hash, base_image_list, check_ratio, threshold):
if len(base_image_list) <= 0:
return True
check_length = int(len(base_image_list) * check_ratio)
new_list = []
new_list.extend(range(len(base_image_list)))
random.shuffle(new_list)
for i in new_list[:check_length]:
if not checkDiff(image_hash, base_image_list[i], threshold):
return False
return True
def contProcess(dic, threshold):
base_image_hash = None
nodup = []
print(len(dic["items"]))
for i in dic["items"]:
imgpath = i["image"]["path"]
im = Image.open(imgpath)
a = np.asarray(im)
im = Image.fromarray(a)
image_hash = imagehash.phash(im)
if checkDiff(image_hash, base_image_hash, threshold):
base_image_hash = image_hash
nodup.append(i)
return nodup
def completeProcess(dic, threshold):
base_image_list = []
nodup2 = []
print(len(dic["items"]))
for i in dic["items"]:
imgpath = i["image"]["path"]
im = Image.open(imgpath)
a = np.asarray(im)
im = Image.fromarray(a)
image_hash = imagehash.phash(im)
if checkDiffComplete(image_hash, base_image_list, threshold):
base_image_list.append(image_hash)
nodup2.append(i)
return nodup2
def randomProcess(dic, ratio, threshold):
if ratio < 0 or ratio > 1:
raise Exception("Random ratio should between 0 and 1")
base_image_list2 = []
nodup3 = []
print(len(dic["items"]))
for i in dic["items"]:
imgpath = i["image"]["path"]
im = Image.open(imgpath)
a = np.asarray(im)
im = Image.fromarray(a)
image_hash = imagehash.phash(im)
if checkDiffRandom(image_hash, base_image_list2, ratio, threshold):
base_image_list2.append(image_hash)
nodup3.append(i)
return nodup3
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--level",
required=True,
type=int,
help="1(continuous checking) 2(random checking) 3(complete checking)",
)
parser.add_argument("-p", "--path", required=True, help="Input dataset path")
parser.add_argument("-o", "--output", default="unique", help="Output dataset path")
parser.add_argument(
"-t",
"--threshold",
type=int,
default=DIFF_THRESHOLD,
help="Threshold of difference",
)
parser.add_argument(
"-r", "--ratio", type=float, default=DEFAULT_RATIO, help="Random ratio"
)
args = parser.parse_args()
if args.level > 3 or args.level < 1:
raise Exception("No suitable level found")
ANNOPATH = os.path.join(args.path, "dataset", "annotations")
FRAMEJSON = os.path.join(ANNOPATH, "default.json")
dic = json.loads(open(FRAMEJSON).read())
if args.level == 1:
result = contProcess(dic, args.threshold)
print(len(result))
data = {}
data["categories"] = dic["categories"]
data["items"] = result
data["info"] = dic["info"]
elif args.level == 2:
result = randomProcess(dic, args.ratio, args.threshold)
print(len(result))
data = {}
data["categories"] = dic["categories"]
data["items"] = result
data["info"] = dic["info"]
elif args.level == 3:
result = completeProcess(dic, args.threshold)
print(len(result))
data = {}
data["categories"] = dic["categories"]
data["items"] = result
data["info"] = dic["info"]
if os.path.exists(args.output):
shutil.rmtree(args.output)
p = Project.generate(
args.output,
{
"project_name": args.output,
},
)
p.make_dataset().save()
RESULTPATH = os.path.join(args.output, "dataset", "annotations")
path = os.path.join(RESULTPATH, "default.json")
fp = open(path, "w")
json.dump(data, fp)
fp.close()
if __name__ == "__main__":
main()
| [
"json.dump",
"argparse.ArgumentParser",
"random.shuffle",
"numpy.asarray",
"datumaro.components.project.Project.generate",
"os.path.exists",
"imagehash.phash",
"PIL.Image.open",
"PIL.Image.fromarray",
"shutil.rmtree",
"os.path.join"
] | [((1152, 1176), 'random.shuffle', 'random.shuffle', (['new_list'], {}), '(new_list)\n', (1166, 1176), False, 'import random\n'), ((2851, 2876), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2874, 2876), False, 'import argparse\n'), ((3640, 3689), 'os.path.join', 'os.path.join', (['args.path', '"""dataset"""', '"""annotations"""'], {}), "(args.path, 'dataset', 'annotations')\n", (3652, 3689), False, 'import os\n'), ((3706, 3744), 'os.path.join', 'os.path.join', (['ANNOPATH', '"""default.json"""'], {}), "(ANNOPATH, 'default.json')\n", (3718, 3744), False, 'import os\n'), ((4518, 4545), 'os.path.exists', 'os.path.exists', (['args.output'], {}), '(args.output)\n', (4532, 4545), False, 'import os\n'), ((4591, 4651), 'datumaro.components.project.Project.generate', 'Project.generate', (['args.output', "{'project_name': args.output}"], {}), "(args.output, {'project_name': args.output})\n", (4607, 4651), False, 'from datumaro.components.project import Project\n'), ((4744, 4795), 'os.path.join', 'os.path.join', (['args.output', '"""dataset"""', '"""annotations"""'], {}), "(args.output, 'dataset', 'annotations')\n", (4756, 4795), False, 'import os\n'), ((4807, 4847), 'os.path.join', 'os.path.join', (['RESULTPATH', '"""default.json"""'], {}), "(RESULTPATH, 'default.json')\n", (4819, 4847), False, 'import os\n'), ((4877, 4896), 'json.dump', 'json.dump', (['data', 'fp'], {}), '(data, fp)\n', (4886, 4896), False, 'import json\n'), ((1508, 1527), 'PIL.Image.open', 'Image.open', (['imgpath'], {}), '(imgpath)\n', (1518, 1527), False, 'from PIL import Image\n'), ((1540, 1554), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (1550, 1554), True, 'import numpy as np\n'), ((1568, 1586), 'PIL.Image.fromarray', 'Image.fromarray', (['a'], {}), '(a)\n', (1583, 1586), False, 'from PIL import Image\n'), ((1608, 1627), 'imagehash.phash', 'imagehash.phash', (['im'], {}), '(im)\n', (1623, 1627), False, 'import imagehash\n'), ((1963, 1982), 'PIL.Image.open', 'Image.open', (['imgpath'], {}), '(imgpath)\n', (1973, 1982), False, 'from PIL import Image\n'), ((1995, 2009), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (2005, 2009), True, 'import numpy as np\n'), ((2023, 2041), 'PIL.Image.fromarray', 'Image.fromarray', (['a'], {}), '(a)\n', (2038, 2041), False, 'from PIL import Image\n'), ((2063, 2082), 'imagehash.phash', 'imagehash.phash', (['im'], {}), '(im)\n', (2078, 2082), False, 'import imagehash\n'), ((2533, 2552), 'PIL.Image.open', 'Image.open', (['imgpath'], {}), '(imgpath)\n', (2543, 2552), False, 'from PIL import Image\n'), ((2565, 2579), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (2575, 2579), True, 'import numpy as np\n'), ((2593, 2611), 'PIL.Image.fromarray', 'Image.fromarray', (['a'], {}), '(a)\n', (2608, 2611), False, 'from PIL import Image\n'), ((2633, 2652), 'imagehash.phash', 'imagehash.phash', (['im'], {}), '(im)\n', (2648, 2652), False, 'import imagehash\n'), ((4555, 4581), 'shutil.rmtree', 'shutil.rmtree', (['args.output'], {}), '(args.output)\n', (4568, 4581), False, 'import shutil\n')] |
import os
import pytest
import numpy as np
from quantum_systems import ODQD, GeneralOrbitalSystem, SpatialOrbitalSystem
@pytest.fixture(scope="module")
def get_odho():
n = 2
l = 10
grid_length = 5
num_grid_points = 1001
omega = 1
odho = GeneralOrbitalSystem(
n,
ODQD(
l, grid_length, num_grid_points, potential=ODQD.HOPotential(omega)
),
)
return odho
@pytest.fixture(scope="module")
def get_odho_ao():
n = 2
l = 20
grid_length = 5
num_grid_points = 1001
omega = 1
odho = SpatialOrbitalSystem(
n,
ODQD(
l, grid_length, num_grid_points, potential=ODQD.HOPotential(omega)
),
)
return odho
@pytest.fixture(scope="module")
def get_oddw():
n = 2
l = 10
grid_length = 6
num_grid_points = 1001
omega = 1
length_of_dw = 5
oddw = GeneralOrbitalSystem(
n,
ODQD(
l,
grid_length,
num_grid_points,
potential=ODQD.DWPotential(omega, length_of_dw),
),
)
return oddw
@pytest.fixture(scope="module")
def get_odgauss():
n = 2
l = 10
grid_length = 20
num_grid_points = 1001
weight = 1
center = 0
deviation = 2.5
odgauss = GeneralOrbitalSystem(
n,
ODQD(
l,
grid_length,
num_grid_points,
potential=ODQD.GaussianPotential(weight, center, deviation, np=np),
),
)
return odgauss
@pytest.fixture(scope="module")
def get_oddw_smooth():
n = 2
l = 10
grid_length = 5
num_grid_points = 1001
a = 5
oddw_smooth = GeneralOrbitalSystem(
n,
ODQD(
l,
grid_length,
num_grid_points,
potential=ODQD.DWPotentialSmooth(a=a),
),
)
return oddw_smooth
def test_odho(get_odho):
odho = get_odho
dip = np.load(os.path.join("tests", "dat", "odho_dipole_moment.npy"))
np.testing.assert_allclose(dip, odho.position, atol=1e-10)
h = np.load(os.path.join("tests", "dat", "odho_h.npy"))
np.testing.assert_allclose(h, odho.h, atol=1e-10)
u = np.load(os.path.join("tests", "dat", "odho_u.npy"))
np.testing.assert_allclose(u, odho.u, atol=1e-10)
spf = np.load(os.path.join("tests", "dat", "odho_spf.npy"))
np.testing.assert_allclose(spf, odho.spf, atol=1e-10)
def test_oddw(get_oddw):
oddw = get_oddw
dip = np.load(os.path.join("tests", "dat", "oddw_dipole_moment.npy"))
np.testing.assert_allclose(dip, oddw.position, atol=1e-10)
h = np.load(os.path.join("tests", "dat", "oddw_h.npy"))
np.testing.assert_allclose(h, oddw.h, atol=1e-10)
u = np.load(os.path.join("tests", "dat", "oddw_u.npy"))
np.testing.assert_allclose(u, oddw.u, atol=1e-10)
spf = np.load(os.path.join("tests", "dat", "oddw_spf.npy"))
np.testing.assert_allclose(spf, oddw.spf, atol=1e-10)
def test_odgauss(get_odgauss):
odgauss = get_odgauss
dip = np.load(os.path.join("tests", "dat", "odgauss_dipole_moment.npy"))
np.testing.assert_allclose(dip, odgauss.position, atol=1e-10)
h = np.load(os.path.join("tests", "dat", "odgauss_h.npy"))
np.testing.assert_allclose(h, odgauss.h, atol=1e-10)
u = np.load(os.path.join("tests", "dat", "odgauss_u.npy"))
np.testing.assert_allclose(u, odgauss.u, atol=1e-10)
spf = np.load(os.path.join("tests", "dat", "odgauss_spf.npy"))
np.testing.assert_allclose(spf, odgauss.spf, atol=1e-10)
def test_oddw_smooth(get_oddw_smooth):
oddw_smooth = get_oddw_smooth
dip = np.load(os.path.join("tests", "dat", "oddw_smooth_dipole_moment.npy"))
np.testing.assert_allclose(dip, oddw_smooth.position, atol=1e-10)
h = np.load(os.path.join("tests", "dat", "oddw_smooth_h.npy"))
np.testing.assert_allclose(h, oddw_smooth.h, atol=1e-10)
u = np.load(os.path.join("tests", "dat", "oddw_smooth_u.npy"))
np.testing.assert_allclose(u, oddw_smooth.u, atol=1e-10)
spf = np.load(os.path.join("tests", "dat", "oddw_smooth_spf.npy"))
np.testing.assert_allclose(spf, oddw_smooth.spf, atol=1e-10)
def test_anti_symmetric_two_body_symmetry_odho(get_odho):
odho = get_odho
l = odho.l
u = odho.u
for p in range(l):
for q in range(l):
for r in range(l):
for s in range(l):
assert abs(u[p, q, r, s] + u[p, q, s, r]) < 1e-8
assert abs(u[p, q, r, s] + u[q, p, r, s]) < 1e-8
assert abs(u[p, q, r, s] - u[q, p, s, r]) < 1e-8
def test_anti_symmetric_two_body_symmetry_oddw(get_oddw):
oddw = get_oddw
l = oddw.l
u = oddw.u
for p in range(l):
for q in range(l):
for r in range(l):
for s in range(l):
assert abs(u[p, q, r, s] + u[p, q, s, r]) < 1e-8
assert abs(u[p, q, r, s] + u[q, p, r, s]) < 1e-8
assert abs(u[p, q, r, s] - u[q, p, s, r]) < 1e-8
def test_two_body_symmetry_odho(get_odho_ao):
odho = get_odho_ao
l = odho.l // 2
u = odho.u
for p in range(l):
for q in range(l):
for r in range(l):
for s in range(l):
assert abs(u[p, q, r, s] - u[q, p, s, r]) < 1e-8
| [
"quantum_systems.ODQD.DWPotentialSmooth",
"quantum_systems.ODQD.DWPotential",
"quantum_systems.ODQD.GaussianPotential",
"pytest.fixture",
"numpy.testing.assert_allclose",
"os.path.join",
"quantum_systems.ODQD.HOPotential"
] | [((124, 154), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (138, 154), False, 'import pytest\n'), ((429, 459), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (443, 459), False, 'import pytest\n'), ((737, 767), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (751, 767), False, 'import pytest\n'), ((1115, 1145), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1129, 1145), False, 'import pytest\n'), ((1537, 1567), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1551, 1567), False, 'import pytest\n'), ((2023, 2081), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dip', 'odho.position'], {'atol': '(1e-10)'}), '(dip, odho.position, atol=1e-10)\n', (2049, 2081), True, 'import numpy as np\n'), ((2147, 2196), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['h', 'odho.h'], {'atol': '(1e-10)'}), '(h, odho.h, atol=1e-10)\n', (2173, 2196), True, 'import numpy as np\n'), ((2262, 2311), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['u', 'odho.u'], {'atol': '(1e-10)'}), '(u, odho.u, atol=1e-10)\n', (2288, 2311), True, 'import numpy as np\n'), ((2381, 2434), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf', 'odho.spf'], {'atol': '(1e-10)'}), '(spf, odho.spf, atol=1e-10)\n', (2407, 2434), True, 'import numpy as np\n'), ((2561, 2619), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dip', 'oddw.position'], {'atol': '(1e-10)'}), '(dip, oddw.position, atol=1e-10)\n', (2587, 2619), True, 'import numpy as np\n'), ((2685, 2734), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['h', 'oddw.h'], {'atol': '(1e-10)'}), '(h, oddw.h, atol=1e-10)\n', (2711, 2734), True, 'import numpy as np\n'), ((2800, 2849), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['u', 'oddw.u'], {'atol': '(1e-10)'}), '(u, oddw.u, atol=1e-10)\n', (2826, 2849), True, 'import numpy as np\n'), ((2919, 2972), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf', 'oddw.spf'], {'atol': '(1e-10)'}), '(spf, oddw.spf, atol=1e-10)\n', (2945, 2972), True, 'import numpy as np\n'), ((3114, 3175), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dip', 'odgauss.position'], {'atol': '(1e-10)'}), '(dip, odgauss.position, atol=1e-10)\n', (3140, 3175), True, 'import numpy as np\n'), ((3244, 3296), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['h', 'odgauss.h'], {'atol': '(1e-10)'}), '(h, odgauss.h, atol=1e-10)\n', (3270, 3296), True, 'import numpy as np\n'), ((3365, 3417), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['u', 'odgauss.u'], {'atol': '(1e-10)'}), '(u, odgauss.u, atol=1e-10)\n', (3391, 3417), True, 'import numpy as np\n'), ((3490, 3546), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf', 'odgauss.spf'], {'atol': '(1e-10)'}), '(spf, odgauss.spf, atol=1e-10)\n', (3516, 3546), True, 'import numpy as np\n'), ((3708, 3773), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dip', 'oddw_smooth.position'], {'atol': '(1e-10)'}), '(dip, oddw_smooth.position, atol=1e-10)\n', (3734, 3773), True, 'import numpy as np\n'), ((3846, 3902), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['h', 'oddw_smooth.h'], {'atol': '(1e-10)'}), '(h, oddw_smooth.h, atol=1e-10)\n', (3872, 3902), True, 'import numpy as np\n'), ((3975, 4031), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['u', 'oddw_smooth.u'], {'atol': '(1e-10)'}), '(u, oddw_smooth.u, atol=1e-10)\n', (4001, 4031), True, 'import numpy as np\n'), ((4108, 4168), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf', 'oddw_smooth.spf'], {'atol': '(1e-10)'}), '(spf, oddw_smooth.spf, atol=1e-10)\n', (4134, 4168), True, 'import numpy as np\n'), ((1963, 2017), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""odho_dipole_moment.npy"""'], {}), "('tests', 'dat', 'odho_dipole_moment.npy')\n", (1975, 2017), False, 'import os\n'), ((2099, 2141), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""odho_h.npy"""'], {}), "('tests', 'dat', 'odho_h.npy')\n", (2111, 2141), False, 'import os\n'), ((2214, 2256), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""odho_u.npy"""'], {}), "('tests', 'dat', 'odho_u.npy')\n", (2226, 2256), False, 'import os\n'), ((2331, 2375), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""odho_spf.npy"""'], {}), "('tests', 'dat', 'odho_spf.npy')\n", (2343, 2375), False, 'import os\n'), ((2501, 2555), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""oddw_dipole_moment.npy"""'], {}), "('tests', 'dat', 'oddw_dipole_moment.npy')\n", (2513, 2555), False, 'import os\n'), ((2637, 2679), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""oddw_h.npy"""'], {}), "('tests', 'dat', 'oddw_h.npy')\n", (2649, 2679), False, 'import os\n'), ((2752, 2794), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""oddw_u.npy"""'], {}), "('tests', 'dat', 'oddw_u.npy')\n", (2764, 2794), False, 'import os\n'), ((2869, 2913), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""oddw_spf.npy"""'], {}), "('tests', 'dat', 'oddw_spf.npy')\n", (2881, 2913), False, 'import os\n'), ((3051, 3108), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""odgauss_dipole_moment.npy"""'], {}), "('tests', 'dat', 'odgauss_dipole_moment.npy')\n", (3063, 3108), False, 'import os\n'), ((3193, 3238), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""odgauss_h.npy"""'], {}), "('tests', 'dat', 'odgauss_h.npy')\n", (3205, 3238), False, 'import os\n'), ((3314, 3359), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""odgauss_u.npy"""'], {}), "('tests', 'dat', 'odgauss_u.npy')\n", (3326, 3359), False, 'import os\n'), ((3437, 3484), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""odgauss_spf.npy"""'], {}), "('tests', 'dat', 'odgauss_spf.npy')\n", (3449, 3484), False, 'import os\n'), ((3641, 3702), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""oddw_smooth_dipole_moment.npy"""'], {}), "('tests', 'dat', 'oddw_smooth_dipole_moment.npy')\n", (3653, 3702), False, 'import os\n'), ((3791, 3840), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""oddw_smooth_h.npy"""'], {}), "('tests', 'dat', 'oddw_smooth_h.npy')\n", (3803, 3840), False, 'import os\n'), ((3920, 3969), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""oddw_smooth_u.npy"""'], {}), "('tests', 'dat', 'oddw_smooth_u.npy')\n", (3932, 3969), False, 'import os\n'), ((4051, 4102), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""oddw_smooth_spf.npy"""'], {}), "('tests', 'dat', 'oddw_smooth_spf.npy')\n", (4063, 4102), False, 'import os\n'), ((368, 391), 'quantum_systems.ODQD.HOPotential', 'ODQD.HOPotential', (['omega'], {}), '(omega)\n', (384, 391), False, 'from quantum_systems import ODQD, GeneralOrbitalSystem, SpatialOrbitalSystem\n'), ((676, 699), 'quantum_systems.ODQD.HOPotential', 'ODQD.HOPotential', (['omega'], {}), '(omega)\n', (692, 699), False, 'from quantum_systems import ODQD, GeneralOrbitalSystem, SpatialOrbitalSystem\n'), ((1039, 1076), 'quantum_systems.ODQD.DWPotential', 'ODQD.DWPotential', (['omega', 'length_of_dw'], {}), '(omega, length_of_dw)\n', (1055, 1076), False, 'from quantum_systems import ODQD, GeneralOrbitalSystem, SpatialOrbitalSystem\n'), ((1439, 1495), 'quantum_systems.ODQD.GaussianPotential', 'ODQD.GaussianPotential', (['weight', 'center', 'deviation'], {'np': 'np'}), '(weight, center, deviation, np=np)\n', (1461, 1495), False, 'from quantum_systems import ODQD, GeneralOrbitalSystem, SpatialOrbitalSystem\n'), ((1827, 1854), 'quantum_systems.ODQD.DWPotentialSmooth', 'ODQD.DWPotentialSmooth', ([], {'a': 'a'}), '(a=a)\n', (1849, 1854), False, 'from quantum_systems import ODQD, GeneralOrbitalSystem, SpatialOrbitalSystem\n')] |
import numpy as np
from scipy.interpolate import CloughTocher2DInterpolator as CT2DInt
from mpl_toolkits.mplot3d import (Axes3D, art3d)
import matplotlib as mpl
import matplotlib.pyplot as plt
def KinDrape_eff_NR(d, Grid, Org, Ang, OrgNode, PreShear, Plt):
## Mold definition: Hemisphere
The, Phi = np.meshgrid(np.linspace(0,2*np.pi,100),
np.linspace(1e-6,np.pi/2-np.pi/20,50))
X = np.cos(The)*np.sin(Phi); Y = np.sin(The)*np.sin(Phi); Z = np.cos(Phi)
F = CT2DInt((X.ravel(),Y.ravel()),Z.ravel(),fill_value = np.min(Z))
## Mold definition: Generic double-curved mold
#X,Y = np.meshgrid(np.linspace(0,0.5,51),np.linspace(0,0.5,51));
#F = lambda x,y: 1.004*x + 1.089*y - 3.667*x**2 -4.4*x*y - 3.75*y**2 + \
# 3.086*x**3 + 8.889*x**2*y + 4.321*y**3; Z = F(X,Y);
## Aux. variables Dir1 and Dir1. Initialization of Node, P and CellShear
Dir1 = np.array([[1, 0], [0, 1], [-1, 0], [0, -1]])
Dir2 = np.array([[0, 1], [-1, 0], [0, -1], [1, 0]])
Node = np.empty((*Grid, 3))*np.nan
P = [list(((np.nan,np.nan,np.nan),)*4)]*(Grid[0]-1)*(Grid[1]-1)
CellShear = np.empty(((Grid[0]-1)*(Grid[1]-1)))*np.nan
## STEP 1: Place org. node (1) and node (2) defined by ini. drape angle
# Get indices for cell, place 1st node and solve for 2nd node
Idx = CellIdx(OrgNode[0],OrgNode[1],Dir1,Dir2,Grid,0)[0]
Node[Idx[0][0],Idx[1][0]] = [Org[0], Org[1], F(Org[0], Org[1])]
Node[Idx] = MoldCircIntersec(Node[Idx],F,d,Ang+90,1)
## STEP 2: Place generator cells (initial cells) while minimizing shear
GenStart = OrgNode + np.array([[0, 0], [1, 1], [0, 1], [0, 0]])
nGen = np.hstack([Grid[0]-OrgNode[0]-1, Grid[1]-OrgNode[1]-2, OrgNode])
for i in range(4): # Arms
CellAng_0 = Ang+i*90 + PreShear*(1+(-1)**i)/2
for j in GenStart[i,:] + np.arange(nGen[i]).reshape(-1,1)*Dir1[i,:]:
# Get cell idx and no. Solve for Vert #2+4 using NR, upd. CellAng_0
Idx, CellNo = CellIdx(j[0],j[1],Dir1,Dir2,Grid,i)
Node[Idx], CellAng_0 = NRSol(Node[Idx],CellAng_0,F,d,PreShear,i)
# Put current cell vertex coord. and shear in P and CellShear
P[CellNo] = list(map(tuple,Node[Idx]))
CellShear[CellNo] = np.abs(np.mean(ShearFun(Node[Idx],i),0))
## STEP 3: Place remaining, constrained cells in 4 quadrants between arms
ConStart = OrgNode + np.array([[1,1],[0,1],[0,0],[1,0]])
nCon = (nGen[[0,1,2,1,2,3,0,3]]-[1,0,0,0,0,0,1,0]).reshape(4,2)
for i in range(4): # Quadrants
for j in ConStart[i,0] + np.arange(nCon[i,0])*(Dir1[i,0]+Dir2[i,0]):
for k in ConStart[i,1] + np.arange(nCon[i,1])*(Dir1[i,1]+Dir2[i,1]):
# Get cell idx and no. Call MoldCircIntersec to get Vert #3
Idx, CellNo = CellIdx(j,k,Dir1,Dir2,Grid,i)
Node[Idx] = MoldCircIntersec(Node[Idx],F,d,[],2)
# Put curr. cell coord. and shear in P and CellShear
P[CellNo] = list(map(tuple,Node[Idx]))
CellShear[CellNo] = np.abs(np.mean(ShearFun(Node[Idx],i),0))
## Plotting
if Plt:
# Create 3D figure and plot surface (offset in z by -1 mm)
fig = plt.figure(); ax = Axes3D(fig,auto_add_to_figure=0); fig.add_axes(ax)
ax.plot_surface(X,Y,Z-1e-3,color=(0.64,0.71,0.80),shade=True)
# Define colormap and map the shear of each cell to a list of colors
cMin = np.nanmin(CellShear); cMax = np.nanmax(CellShear)
CMapName = 'jet'; CMap = mpl.cm.get_cmap(CMapName)
C = list(map(list,CMap(mpl.colors.Normalize(cMin,cMax)(CellShear))))
# Plot cells as colored polygons, and create axis labels and colorbar
pc = art3d.Poly3DCollection(P,cmap=CMapName)
pc.set_facecolor(C); pc.set_edgecolor('k'); ax.add_collection3d(pc)
ax.set_xlabel('x'); ax.set_ylabel('y'); ax.set_zlabel('z')
ax.set_box_aspect((np.ptp(X), np.ptp(Y), np.ptp(Z)))
fig.colorbar(pc,shrink=0.75,boundaries=np.linspace(cMin, cMax, 50),
label='Shear angle [deg]'); plt.show()
return Node, CellShear, ax, fig
## Auxiliary functions
def CellIdx(Row,Col,Dir1,Dir2,Grid,No):
# Return all row and col idx. of the cell + cell no. given vert. 1 idx.
Rows = Row + np.array([0, Dir2[No,0], Dir1[No,0]+Dir2[No,0], Dir1[No,0]])
Cols = Col + np.array([0, Dir2[No,1], Dir1[No,1]+Dir2[No,1], Dir1[No,1]])
CellNo = Rows[No] + Cols[No]*(Grid[0]-1)
return tuple(np.vstack([Rows, Cols])), CellNo
def NRSol(Vert,CellAng,F,d,PreShear,i):
# Newton-Raphson solver to find CellAng that min. shear in cell (Step 2)
for j in range(100): # Max iter
# Calculate the current objective and the vertex coord. Check converg.
Obj_curr, Vert = Step2Obj(CellAng,Vert,F,d,i,PreShear)
if np.abs(Obj_curr) < 1e-3 or j == 100:
break
# Calculate a perturbed objective and a forward finite diff. gradient
Obj_pert = Step2Obj(CellAng+1e-8,Vert,F,d,i,PreShear)[0]
Grad = (Obj_pert - Obj_curr)/1e-8
# Update the design variable using a scaled step
CellAng = CellAng - 0.5*(Obj_curr / Grad)
return Vert, CellAng
def Step2Obj(CellAng,Vert,F,d,i,PreShear):
# Compute all cell vertices given the angle CellAng of cell edge 1-4
# Place: node #4 based on CellAng and d, and node #3 based on d (kinematics)
Vert = MoldCircIntersec(Vert,F,d,CellAng,3)
Vert = MoldCircIntersec(Vert,F,d,[],2)
# Evaluate shear and calculate the objective
Shear = ShearFun(Vert,i)
Obj = np.sum(Shear - PreShear)/4
return Obj, Vert
def MoldCircIntersec(Vert,F,d,Ang,UnknownVertIdx):
# Location of unknown vertex based on intersec. of circle and mold surface
# C, R: center and radius. Vec1, Vec2: perpend. unit vec. spanning circle
if UnknownVertIdx in (1,3): # Step 1 (2nd node) and Step 2 iterative (Vert #4)
# Circle is constructed along angle Ang, centered in Vert #1
C = Vert[0,:]
R = d
Vec1 = np.array([-np.cos(Ang*np.pi/180), -np.sin(Ang*np.pi/180), 0])
Vec2 = np.array([0, 0, 1])
else: # Step 3 and Step 2 iterative (Vert # 3)
# Circle is intersection of two spheres, centered in Vert 2 and Vert 4
C = (Vert[1,:] + Vert[3,:])/2
R = np.sqrt(d**2 - np.linalg.norm(C-Vert[1,:])**2)
Vec1 = (Vert[0,:]-C)/np.linalg.norm(Vert[0,:]-C)
CircAxis = (C-Vert[1,:])/np.linalg.norm(C-Vert[1,:])
Vec2 = np.cross(Vec1,CircAxis)/np.linalg.norm(np.cross(Vec1,CircAxis))
# Find the intersection between the circle and the surface using bisection
IntersecPt = np.array([np.NaN, np.NaN, np.NaN])
Theta = np.array([60*np.pi/180, (360-60)*np.pi/180])
for i in range(100): # Max iter
# Compute middle point
Theta_mid = np.sum(Theta)/2
# Circle pt. in 3D based on center, radius, 2 perp. vectors and angle
CircCoor = C + R*np.cos(Theta_mid)*Vec1 + R*np.sin(Theta_mid)*Vec2
FunVal_mid = CircCoor[2] - F(CircCoor[0],CircCoor[1])
# Stop or adjust interval based on function value at midpoint
if np.abs(FunVal_mid) < 5e-6: # Stopping tolerance
IntersecPt = np.hstack((CircCoor[0:2].T,F(CircCoor[0],CircCoor[1])))
break
elif FunVal_mid > 0:
Theta[0] = Theta_mid
else:
Theta[1] = Theta_mid
# Store the IntersecPt in Vert at the row specified by UnknownVertIdx
Vert[UnknownVertIdx,:] = IntersecPt
return Vert
def ShearFun(Vert,i):
# Calc. 4 signed shear angles using edge vector pairs u and v + quad. # (i)
u = Vert[[1, 2, 3, 0], :] - Vert
v = Vert[[3, 0, 1, 2], :] - Vert
Shear = np.arctan2(np.linalg.norm(np.cross(u,v),2,1),np.sum(u*v,1))-np.pi/2
return 180/np.pi*Shear*np.array([-1, 1, -1, 1])*(-1)**i | [
"numpy.sum",
"numpy.abs",
"matplotlib.cm.get_cmap",
"numpy.empty",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.linalg.norm",
"numpy.arange",
"matplotlib.colors.Normalize",
"mpl_toolkits.mplot3d.art3d.Poly3DCollection",
"numpy.linspace",
"matplotlib.pyplot.show",
"mpl_toolkits.mplot3d.Axe... | [((489, 500), 'numpy.cos', 'np.cos', (['Phi'], {}), '(Phi)\n', (495, 500), True, 'import numpy as np\n'), ((926, 970), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [-1, 0], [0, -1]]'], {}), '([[1, 0], [0, 1], [-1, 0], [0, -1]])\n', (934, 970), True, 'import numpy as np\n'), ((985, 1029), 'numpy.array', 'np.array', (['[[0, 1], [-1, 0], [0, -1], [1, 0]]'], {}), '([[0, 1], [-1, 0], [0, -1], [1, 0]])\n', (993, 1029), True, 'import numpy as np\n'), ((1692, 1764), 'numpy.hstack', 'np.hstack', (['[Grid[0] - OrgNode[0] - 1, Grid[1] - OrgNode[1] - 2, OrgNode]'], {}), '([Grid[0] - OrgNode[0] - 1, Grid[1] - OrgNode[1] - 2, OrgNode])\n', (1701, 1764), True, 'import numpy as np\n'), ((6793, 6827), 'numpy.array', 'np.array', (['[np.NaN, np.NaN, np.NaN]'], {}), '([np.NaN, np.NaN, np.NaN])\n', (6801, 6827), True, 'import numpy as np\n'), ((6841, 6895), 'numpy.array', 'np.array', (['[60 * np.pi / 180, (360 - 60) * np.pi / 180]'], {}), '([60 * np.pi / 180, (360 - 60) * np.pi / 180])\n', (6849, 6895), True, 'import numpy as np\n'), ((327, 357), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(100)'], {}), '(0, 2 * np.pi, 100)\n', (338, 357), True, 'import numpy as np\n'), ((383, 429), 'numpy.linspace', 'np.linspace', (['(1e-06)', '(np.pi / 2 - np.pi / 20)', '(50)'], {}), '(1e-06, np.pi / 2 - np.pi / 20, 50)\n', (394, 429), True, 'import numpy as np\n'), ((431, 442), 'numpy.cos', 'np.cos', (['The'], {}), '(The)\n', (437, 442), True, 'import numpy as np\n'), ((443, 454), 'numpy.sin', 'np.sin', (['Phi'], {}), '(Phi)\n', (449, 454), True, 'import numpy as np\n'), ((460, 471), 'numpy.sin', 'np.sin', (['The'], {}), '(The)\n', (466, 471), True, 'import numpy as np\n'), ((472, 483), 'numpy.sin', 'np.sin', (['Phi'], {}), '(Phi)\n', (478, 483), True, 'import numpy as np\n'), ((1043, 1063), 'numpy.empty', 'np.empty', (['(*Grid, 3)'], {}), '((*Grid, 3))\n', (1051, 1063), True, 'import numpy as np\n'), ((1157, 1196), 'numpy.empty', 'np.empty', (['((Grid[0] - 1) * (Grid[1] - 1))'], {}), '((Grid[0] - 1) * (Grid[1] - 1))\n', (1165, 1196), True, 'import numpy as np\n'), ((1637, 1679), 'numpy.array', 'np.array', (['[[0, 0], [1, 1], [0, 1], [0, 0]]'], {}), '([[0, 0], [1, 1], [0, 1], [0, 0]])\n', (1645, 1679), True, 'import numpy as np\n'), ((2452, 2494), 'numpy.array', 'np.array', (['[[1, 1], [0, 1], [0, 0], [1, 0]]'], {}), '([[1, 1], [0, 1], [0, 0], [1, 0]])\n', (2460, 2494), True, 'import numpy as np\n'), ((3276, 3288), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3286, 3288), True, 'import matplotlib.pyplot as plt\n'), ((3295, 3328), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {'auto_add_to_figure': '(0)'}), '(fig, auto_add_to_figure=0)\n', (3301, 3328), False, 'from mpl_toolkits.mplot3d import Axes3D, art3d\n'), ((3512, 3532), 'numpy.nanmin', 'np.nanmin', (['CellShear'], {}), '(CellShear)\n', (3521, 3532), True, 'import numpy as np\n'), ((3541, 3561), 'numpy.nanmax', 'np.nanmax', (['CellShear'], {}), '(CellShear)\n', (3550, 3561), True, 'import numpy as np\n'), ((3596, 3621), 'matplotlib.cm.get_cmap', 'mpl.cm.get_cmap', (['CMapName'], {}), '(CMapName)\n', (3611, 3621), True, 'import matplotlib as mpl\n'), ((3793, 3833), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'art3d.Poly3DCollection', (['P'], {'cmap': 'CMapName'}), '(P, cmap=CMapName)\n', (3815, 3833), False, 'from mpl_toolkits.mplot3d import Axes3D, art3d\n'), ((4168, 4178), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4176, 4178), True, 'import matplotlib.pyplot as plt\n'), ((4377, 4443), 'numpy.array', 'np.array', (['[0, Dir2[No, 0], Dir1[No, 0] + Dir2[No, 0], Dir1[No, 0]]'], {}), '([0, Dir2[No, 0], Dir1[No, 0] + Dir2[No, 0], Dir1[No, 0]])\n', (4385, 4443), True, 'import numpy as np\n'), ((4456, 4522), 'numpy.array', 'np.array', (['[0, Dir2[No, 1], Dir1[No, 1] + Dir2[No, 1], Dir1[No, 1]]'], {}), '([0, Dir2[No, 1], Dir1[No, 1] + Dir2[No, 1], Dir1[No, 1]])\n', (4464, 4522), True, 'import numpy as np\n'), ((5694, 5718), 'numpy.sum', 'np.sum', (['(Shear - PreShear)'], {}), '(Shear - PreShear)\n', (5700, 5718), True, 'import numpy as np\n'), ((6243, 6262), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (6251, 6262), True, 'import numpy as np\n'), ((563, 572), 'numpy.min', 'np.min', (['Z'], {}), '(Z)\n', (569, 572), True, 'import numpy as np\n'), ((4581, 4604), 'numpy.vstack', 'np.vstack', (['[Rows, Cols]'], {}), '([Rows, Cols])\n', (4590, 4604), True, 'import numpy as np\n'), ((6524, 6554), 'numpy.linalg.norm', 'np.linalg.norm', (['(Vert[0, :] - C)'], {}), '(Vert[0, :] - C)\n', (6538, 6554), True, 'import numpy as np\n'), ((6586, 6616), 'numpy.linalg.norm', 'np.linalg.norm', (['(C - Vert[1, :])'], {}), '(C - Vert[1, :])\n', (6600, 6616), True, 'import numpy as np\n'), ((6630, 6654), 'numpy.cross', 'np.cross', (['Vec1', 'CircAxis'], {}), '(Vec1, CircAxis)\n', (6638, 6654), True, 'import numpy as np\n'), ((6976, 6989), 'numpy.sum', 'np.sum', (['Theta'], {}), '(Theta)\n', (6982, 6989), True, 'import numpy as np\n'), ((7293, 7311), 'numpy.abs', 'np.abs', (['FunVal_mid'], {}), '(FunVal_mid)\n', (7299, 7311), True, 'import numpy as np\n'), ((7936, 7952), 'numpy.sum', 'np.sum', (['(u * v)', '(1)'], {}), '(u * v, 1)\n', (7942, 7952), True, 'import numpy as np\n'), ((7987, 8011), 'numpy.array', 'np.array', (['[-1, 1, -1, 1]'], {}), '([-1, 1, -1, 1])\n', (7995, 8011), True, 'import numpy as np\n'), ((2627, 2648), 'numpy.arange', 'np.arange', (['nCon[i, 0]'], {}), '(nCon[i, 0])\n', (2636, 2648), True, 'import numpy as np\n'), ((4006, 4015), 'numpy.ptp', 'np.ptp', (['X'], {}), '(X)\n', (4012, 4015), True, 'import numpy as np\n'), ((4017, 4026), 'numpy.ptp', 'np.ptp', (['Y'], {}), '(Y)\n', (4023, 4026), True, 'import numpy as np\n'), ((4028, 4037), 'numpy.ptp', 'np.ptp', (['Z'], {}), '(Z)\n', (4034, 4037), True, 'import numpy as np\n'), ((4088, 4115), 'numpy.linspace', 'np.linspace', (['cMin', 'cMax', '(50)'], {}), '(cMin, cMax, 50)\n', (4099, 4115), True, 'import numpy as np\n'), ((4930, 4946), 'numpy.abs', 'np.abs', (['Obj_curr'], {}), '(Obj_curr)\n', (4936, 4946), True, 'import numpy as np\n'), ((6669, 6693), 'numpy.cross', 'np.cross', (['Vec1', 'CircAxis'], {}), '(Vec1, CircAxis)\n', (6677, 6693), True, 'import numpy as np\n'), ((7917, 7931), 'numpy.cross', 'np.cross', (['u', 'v'], {}), '(u, v)\n', (7925, 7931), True, 'import numpy as np\n'), ((2709, 2730), 'numpy.arange', 'np.arange', (['nCon[i, 1]'], {}), '(nCon[i, 1])\n', (2718, 2730), True, 'import numpy as np\n'), ((6176, 6201), 'numpy.cos', 'np.cos', (['(Ang * np.pi / 180)'], {}), '(Ang * np.pi / 180)\n', (6182, 6201), True, 'import numpy as np\n'), ((6200, 6225), 'numpy.sin', 'np.sin', (['(Ang * np.pi / 180)'], {}), '(Ang * np.pi / 180)\n', (6206, 6225), True, 'import numpy as np\n'), ((6462, 6492), 'numpy.linalg.norm', 'np.linalg.norm', (['(C - Vert[1, :])'], {}), '(C - Vert[1, :])\n', (6476, 6492), True, 'import numpy as np\n'), ((7124, 7141), 'numpy.sin', 'np.sin', (['Theta_mid'], {}), '(Theta_mid)\n', (7130, 7141), True, 'import numpy as np\n'), ((1879, 1897), 'numpy.arange', 'np.arange', (['nGen[i]'], {}), '(nGen[i])\n', (1888, 1897), True, 'import numpy as np\n'), ((3654, 3686), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', (['cMin', 'cMax'], {}), '(cMin, cMax)\n', (3674, 3686), True, 'import matplotlib as mpl\n'), ((7097, 7114), 'numpy.cos', 'np.cos', (['Theta_mid'], {}), '(Theta_mid)\n', (7103, 7114), True, 'import numpy as np\n')] |
from .ik import IKUtils
from mp.action_sequences import ScriptedActions
from mp.const import INIT_JOINT_CONF
import numpy as np
def get_grasp_approach_actions(env, obs, grasp):
"""get_grasp_approach_actions.
retrieves grasp approach actions. Given the grasp (cartesian points on the
object where the tips need to reach), this funtion returns an action
generator that executes the grasp.
Args:
env: robot envrionment
obs: observations from the robot
grasp: selected grasp
"""
# generates actions to carry out grasp
action_sequence = ScriptedActions(env, obs['robot_tip_positions'], grasp)
# estimates pre-grasp tip positions and joint configuration
pregrasp_joint_conf, pregrasp_tip_pos = get_safe_pregrasp(
env, obs, grasp
)
if pregrasp_joint_conf is None:
raise RuntimeError('Feasible heuristic grasp approach is not found.')
# actual generation of actions to carry out grasp
action_sequence.add_raise_tips()
action_sequence.add_heuristic_pregrasp(pregrasp_tip_pos)
action_sequence.add_grasp(coef=0.6)
act_seq = action_sequence.get_action_sequence(
action_repeat=4 if env.simulation else 12 * 4,
action_repeat_end=40 if env.simulation else 400
)
return act_seq
def get_safe_pregrasp(env, obs, grasp, candidate_margins=[1.1, 1.3, 1.5]):
pregrasp_tip_pos = []
pregrasp_jconfs = []
ik_utils = IKUtils(env)
init_tip_pos = env.platform.forward_kinematics(INIT_JOINT_CONF)
mask = np.eye(3)[grasp.valid_tips, :].sum(0).reshape(3, -1)
for margin in candidate_margins:
tip_pos = grasp.T_cube_to_base(grasp.cube_tip_pos * margin)
tip_pos = tip_pos * mask + (1 - mask) * init_tip_pos
qs = ik_utils.sample_no_collision_ik(tip_pos)
if len(qs) > 0:
pregrasp_tip_pos.append(tip_pos)
pregrasp_jconfs.append(qs[0])
print('candidate margin coef {}: safe'.format(margin))
else:
print('candidate margin coef {}: no ik solution found'.format(margin))
if len(pregrasp_tip_pos) == 0:
print('warning: no safe pregrasp pose with a margin')
tip_pos = grasp.T_cube_to_base(grasp.cube_tip_pos * candidate_margins[0])
tip_pos = tip_pos * mask + (1 - mask) * init_tip_pos
qs = ik_utils.sample_ik(tip_pos)
if len(qs) == 0:
return None, None
else:
pregrasp_tip_pos.append(tip_pos)
pregrasp_jconfs.append(qs[0])
return pregrasp_jconfs[-1], pregrasp_tip_pos[-1]
| [
"numpy.eye",
"mp.action_sequences.ScriptedActions"
] | [((592, 647), 'mp.action_sequences.ScriptedActions', 'ScriptedActions', (['env', "obs['robot_tip_positions']", 'grasp'], {}), "(env, obs['robot_tip_positions'], grasp)\n", (607, 647), False, 'from mp.action_sequences import ScriptedActions\n'), ((1537, 1546), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1543, 1546), True, 'import numpy as np\n')] |
import os
import sys
import pandas as pd
import numpy as np
import csv
import pickle
text_path = '../../../Cross-Modal-BERT/data/text/'
dev_text = '../../../Cross-Modal-BERT/data/text/dev.tsv'
train_text = '../../../Cross-Modal-BERT/data/text/train.tsv'
test_text = '../../../Cross-Modal-BERT/data/text/test.tsv'
full_data_path = '../../../data/cmumosi_full_all.pkl'
noaligne_path = '../../../data/cmumosi_audio_noalign.pkl'
OUTPUT = '../../../Cross-Modal-BERT/data/audio/'
audio_select_cols = [ 1, 3, 6, 25, 60 ]
def generate_rainbow_map():
rainbow_map = {}
full_data = pickle.load(open(full_data_path, 'rb'))
print('full_data: ', len(full_data.keys()))
for segment_name in full_data.keys():
text = full_data[segment_name]['text'].strip().lower()
rainbow_map[text] = segment_name
return rainbow_map
def get_audio(env, audio_noalign, rainbow_map, max_len):
audios = []
df = pd.read_csv(os.path.join(text_path, env + '.tsv'), sep='\t')
for index, row in df.iterrows():
text = row[0].strip().lower()
segment_name = rainbow_map.get(text)
if not segment_name:
print('text: ', text)
continue
# print(segment_name)
audio = audio_noalign[segment_name]['features']
audio = audio[:, audio_select_cols]
padding = np.zeros((max_len - len(audio), 5))
audios.append(np.concatenate([audio, padding]))
print(env, len(audios))
return audios
def main():
data = []
rainbow_map = generate_rainbow_map()
audio_noalign = pickle.load(open(noaligne_path, 'rb'))
# max_len = max(map(lambda x: len(audio_noalign[x]['features']), audio_noalign.keys()))
max_len = 5000
print('max_len: ', max_len)
for env in ['train', 'dev', 'test']:
audios = get_audio(env, audio_noalign, rainbow_map, max_len)
data.append(audios)
with open(OUTPUT + 'audio_noalign.pkl', mode='wb') as f:
pickle.dump(data, f)
if __name__ == '__main__':
main()
| [
"pickle.dump",
"os.path.join",
"numpy.concatenate"
] | [((945, 982), 'os.path.join', 'os.path.join', (['text_path', "(env + '.tsv')"], {}), "(text_path, env + '.tsv')\n", (957, 982), False, 'import os\n'), ((1975, 1995), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (1986, 1995), False, 'import pickle\n'), ((1405, 1437), 'numpy.concatenate', 'np.concatenate', (['[audio, padding]'], {}), '([audio, padding])\n', (1419, 1437), True, 'import numpy as np\n')] |
#!python
# ----------------------------------------------------------------------------
# Copyright (c) 2017 Massachusetts Institute of Technology (MIT)
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
#
# The full license is in the LICENSE file, distributed with this software.
# ----------------------------------------------------------------------------
"""Record beacon signals of specified satellites in Digital RF.
Satellite and recording parameters are specified in .ini configuration files.
Example configurations are included along with this script.
"""
from __future__ import absolute_import, division, print_function
import datetime
import math
import optparse
import os
import string
import subprocess
import sys
import time
import traceback
import dateutil.parser
import ephem
import numpy as np
import pytz
from digital_rf import DigitalMetadataWriter
from six.moves import configparser
class ExceptionString(Exception):
"""Simple exception handling string"""
def __str__(self):
return repr(self.args[0])
def doppler_shift(frequency, relativeVelocity):
"""
DESCRIPTION:
This function calculates the doppler shift of a given frequency when actual
frequency and the relative velocity is passed.
The function for the doppler shift is f' = f - f*(v/c).
INPUTS:
frequency (float) = satlitte's beacon frequency in Hz
relativeVelocity (float) = Velocity at which the satellite is moving
towards or away from observer in m/s
RETURNS:
Param1 (float) = The frequency experienced due to doppler shift in Hz
AFFECTS:
None
EXCEPTIONS:
None
DEPENDENCIES:
ephem.Observer(...), ephem.readtle(...)
Note: relativeVelocity is positive when moving away from the observer
and negative when moving towards
"""
return frequency - frequency * (relativeVelocity / 3e8)
def satellite_rise_and_set(
opt, obsLat, obsLong, obsElev, objName, tle1, tle2, startDate
):
"""
DESCRIPTION:
This function take in the observers latitude, longitude, elevation as well
as the object's name and TLE line 1 and 2 to calculate the next closest rise
and set times from the given start date. Returns an array of values.
INPUTS:
obsLat (string) = Latitude of Observer in degrees represented as strings
obsLong (string) = Longitude of Observer in degrees represented as strings
obsElev (float) = Elevation of Observer in meters
objName (string) = Name of the satellite
tle1 (string) = First line of TLE
tle2 (string) = Second line of TLE
startDate (string or ephem.date) = The date from which next closest rise and set are to be
found in radians that print as degrees
RETURNS:
Param1 (ephem.date) = Rise time of satellite in 'yyyy/mm/dd hh:mm:ss'
Param2 (ephem.date) = Half time between rise and set in 'yyyy/mm/dd hh:mm:ss'
Param3 (ephem.date) = Set time of satellite in 'yyyy/mm/dd hh:mm:ss'
AFFECTS:
None
EXCEPTIONS:
None
DEPENDENCIES:
ephem.Observer(...), ephem.readtle(...)
"""
obsLoc = ephem.Observer()
obsLoc.lat = obsLat
obsLoc.long = obsLong
obsLoc.elev = obsElev
obsLoc.date = startDate
if opt.debug:
print("dbg location: ", obsLoc)
print("dbg tle1: ", tle1)
print("dbg tle2: ", tle2)
satObj = ephem.readtle(objName, tle1, tle2)
if opt.debug:
print("dbg object: ", satObj)
satObj.compute(obsLoc) # computes closest next rise time to given date
pinfo = obsLoc.next_pass(satObj)
return (pinfo[0], pinfo[2], pinfo[4])
def satellite_values_at_time(opt, obsLat, obsLong, obsElev, objName, tle1, tle2, date):
"""
DESCRIPTION:
This function take in the observers latitude, longitude, elevation as well
as the object's name and TLE line 1 and 2 to calculate various values
at a given date. It returns an array of values
INPUTS:
obsLat (string) = Latitude of Observer in degrees represented as strings
obsLong (string) = Longitude of Observer in degrees represented as strings
obsElev (float) = Elevation of Observer in meters
objName (string) = Name of the satellite
tle1 (string) = First line of TLE
tle2 (string) = Second line of TLE
date (string or ephem.date) = The date from which next closest rise and set are to be
found in radians that print as degrees
RETURNS:
Param1 (ephem.angle) = satellite's latitude in radians that print as degrees
Param2 (ephem.angle) = satellite's longitude in radians that print as degrees
Param3 (float) = satellite's current range from the observer in meters
Param4 (float) = satellite's current range_velocity from the observer in m/s
Param5 (ephem.angle) = satellite's current azimuth in radians that print as degrees
Param6 (ephem.angle) = satellite's current altitude in radians that print as degrees
Param7 (ephem.angle) = satellite's right ascention in radians that print as hours of arc
Param8 (ephem.angle) = satellite's declination in radians that print as degrees
Param9 (float) = satellite's elevation in meters from sea level
AFFECTS:
None
EXCEPTIONS:
None
DEPENDENCIES:
ephem.Observer(...), ephem.readtle(...)
"""
obsLoc = ephem.Observer()
obsLoc.lat = obsLat
obsLoc.long = obsLong
obsLoc.elev = obsElev
obsLoc.date = date
satObj = ephem.readtle(objName, tle1, tle2)
satObj.compute(obsLoc)
if opt.debug:
print(
"\tLatitude: %s, Longitude %s, Range: %gm, Range Velocity: %gm/s"
% (satObj.sublat, satObj.sublong, satObj.range, satObj.range_velocity)
)
print(
"\tAzimuth: %s, Altitude: %s, Elevation: %gm"
% (satObj.az, satObj.alt, satObj.elevation)
)
print("\tRight Ascention: %s, Declination: %s" % (satObj.ra, satObj.dec))
return (
satObj.sublat,
satObj.sublong,
satObj.range,
satObj.range_velocity,
satObj.az,
satObj.alt,
satObj.ra,
satObj.dec,
satObj.elevation,
)
def max_satellite_bandwidth(
opt,
obsLat,
obsLong,
obsElev,
objName,
tle1,
tle2,
startDate,
endDate,
interval,
beaconFreq,
):
"""
DESCRIPTION:
The function calls the satellite_bandwidth function over and over for however many rises and sets occur
during the [startDate, endDate]. The max bandwidth is then returned.
INPUTS:
obsLat (string) = Latitude of Observer in degrees represented as strings
obsLong (string) = Longitude of Observer in degrees represented as strings
obsElev (float) = Elevation of Observer in meters
objName (string) = Name of the satellite
tle1 (string) = First line of TLE
tle2 (string) = Second line of TLE
startDate (string or ephem.date) = The date/time at which to find the first cycle
endDate (string or ephem.date) = The date/time at which to stop looking for a cycle
interval (float) = The rate at which to sample during one rise/set cycle same format
as time
beaconFreq (float) = The frequency of the beacon
RETURNS:
Param1 (float) = The max bandwidth of the satellite in the given range
of start and end dates
AFFECTS:
None
EXCEPTIONS:
None
DEPENDENCIES:
None
"""
maxBandwidth = 0
(satRise, satTransit, satSet) = satellite_rise_and_set(
opt, obsLat, obsLong, obsElev, objName, tle1, tle2, startDate
)
if satRise == satTransit == satSet:
return 0
while satRise < endDate:
(objBandwidth, shiftedFrequencies) = satellite_bandwidth(
opt,
obsLat,
obsLong,
obsElev,
objName,
tle1,
tle2,
satRise,
satSet,
interval,
beaconFreq,
)
if objBandwidth > maxBandwidth:
maxBandwidth = objBandwidth
(satRise, satTransit, satSet) = satellite_rise_and_set(
opt,
obsLat,
obsLong,
obsElev,
objName,
tle1,
tle2,
satSet + ephem.minute * 5.0,
)
# print "Name: %s, Rise Time: %s, Transit Time: %s, Set Time: %s" % (objName, ephem.date(satRise-ephem.hour*4.0), ephem.date(satTransit-ephem.hour*4.0), ephem.date(satSet-ephem.hour*4.0))
return maxBandwidth
def satellite_bandwidth(
opt,
obsLat,
obsLong,
obsElev,
objName,
tle1,
tle2,
satRise,
satSet,
interval,
beaconFreq,
):
"""
DESCRIPTION:
The function finds the bandwidth of a satellite pass
INPUTS:
obsLat (string) = Latitude of Observer in degrees represented as strings
obsLong (string) = Longitude of Observer in degrees represented as strings
obsElev (float) = Elevation of Observer in meters
objName (string) = Name of the satellite
tle1 (string) = First line of TLE
tle2 (string) = Second line of TLE
satRise (string or ephem.date) = The time at which the satellite rises above horizon
satSet (string or ephem.date) = The time at which the satellite sets
interval (float) = The rate at which to sample during one rise/set cycle in seconds
beaconFreq (float) = The frequency of the beacon
RETURNS:
Param1 (float) = The bandwidth of the satellite during the rise/set cycle
Param2 (list) = All the frequencies during the rise/set cycle sampled
by given interval
AFFECTS:
None
EXCEPTIONS:
None
DEPENDENCIES:
ephem.date(...), ephem.hour, ephem.minute, ephem.second
"""
currTime = satRise
dopplerFrequencies = []
dopplerBandwidth = []
if opt.debug:
print("satellite_bandwidth ", currTime, satSet, interval)
while (currTime.triple())[2] < (satSet.triple())[
2
]: # the 2nd index of the returned tuple has the fraction of the day
try:
(
sublat,
sublong,
range_val,
range_velocity,
az,
alt,
ra,
dec,
elevation,
) = satellite_values_at_time(
opt, obsLat, obsLong, obsElev, objName, tle1, tle2, currTime
)
(dopplerFreq) = doppler_shift(beaconFreq, range_velocity)
dopplerFrequencies.append(dopplerFreq)
dopplerBandwidth.append(dopplerFreq - beaconFreq)
currTime = currTime + ephem.second * interval
currTime = ephem.date(currTime)
except Exception as eobj:
exp_str = str(ExceptionString(eobj))
print("exception: %s." % (exp_str))
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print(lines)
if opt.debug:
print("# DF:", np.array(dopplerFrequencies) / 1e6, " MHz")
print("# OB:", np.array(dopplerBandwidth) / 1e3, " kHz")
return (np.array(dopplerBandwidth), np.array(dopplerFrequencies))
def __read_config__(inifile):
"""
DESCRIPTION:
The function parses the given file and returns a dictionary with the values.
INPUTS:
inifile (string) = the name of the file to be read including path
RETURNS:
For an object config :
Dictionary with name given by [Section] each of which contains:
ObsLat Decimal observer latitude
ObsLong Decimal observer longitude
ObsElev Decimal observer elevation
ObjName String object name
TLE1 String of TLE line 1 in standard format.
TLE2 String of TLE line 2 in standard format.
BeaconFreq Array of floating point beacon frequencies in Hz.
For a radio config :
TBD
AFFECTS:
None
EXCEPTIONS:
None
DEPENDENCIES:
Use module re for the simple regex used in parsing the file.
Note:
Example object format
[PROPCUBE_MERRY]
ObsLat=42.623108
ObsLong=-71.489069
ObsElev=150.0
ObjName="PROPCUBE_MERRY"
TLE1="1 90735U 16074.41055570 +.00001915 +00000-0 +22522-3 0 00790"
TLE2="2 90735 064.7823 174.3149 0209894 234.3073 123.8339 14.73463371009286"
BeaconFreq=[380.0e6,2.38e9]
StartDate="2016/03/21 14:00:00"
Interval="00:00:10"
Example radio format :
TBD
"""
objects = {}
print("# loading config ", inifile)
cparse = configparser.ConfigParser()
cparse.read(inifile)
for s in cparse.sections():
vals = cparse.options(s)
cfg = {}
for v in vals:
cfg[v] = cparse.get(s, v)
objects[s] = cfg
return objects
def get_next_object(opt, site, objects, ctime):
"""Not too efficent but works for now."""
rise_list = {}
for obj in objects:
obj_id = obj
obj_info = objects[obj]
if opt.debug:
print("# object ", obj_id, " @ ", ctime)
print("# obj_info", obj_info)
site_name = site["site"]["name"]
site_tag = site["site"]["tag"]
obs_lat = site["site"]["latitude"]
obs_long = site["site"]["longitude"]
obs_elev = float(site["site"]["elevation"])
obj_name = obj_info["name"]
obj_tle1 = obj_info["tle1"][1:-1]
obj_tle2 = obj_info["tle2"][1:-1]
obj_freqs = np.array(string.split(obj_info["frequencies"], ","), np.float32)
c_dtime = datetime.datetime.utcfromtimestamp(ctime)
c_ephem_time = ephem.Date(c_dtime)
(sat_rise, sat_transit, sat_set) = satellite_rise_and_set(
opt, obs_lat, obs_long, obs_elev, obj_name, obj_tle1, obj_tle2, c_ephem_time
)
if sat_set <= sat_rise or sat_transit <= sat_rise or sat_set <= sat_transit:
continue
rise_list[sat_rise] = obj
if opt.debug:
print(" rise list : ", rise_list)
keys = list(rise_list.keys())
if opt.debug:
print(" rise keys : ", keys)
keys.sort()
if opt.debug:
print(" sorted : ", keys)
print(" selected : ", rise_list[keys[0]])
return rise_list[keys[0]]
def ephemeris_passes(opt, st0, et0):
"""
DESCRIPTION:
Finds passes from the start time to the end time given the options. Will
implement a bash script or execute on the command line.
USAGE:
ephemeris_passes(opt, st0, et0)
INPUTS:
opt command line arguments
st0 unix time start time
et0 unix time end time
RETURNS:
None
AFFECTS:
Prints all the passes
EXCEPTIONS:
None
DEPENDENCIES:
ephem
"""
passes = {}
objects = __read_config__(opt.config)
site = __read_config__(opt.site)
if opt.verbose:
print("# got objects ", objects)
print("# got radio site ", site)
print("\n")
ctime = st0
etime = et0
last_sat_rise = ctime
while ctime < etime:
obj = get_next_object(opt, site, objects, ctime)
obj_id = obj
obj_info = objects[obj]
if opt.debug:
print("# object ", obj_id, " @ ", ctime)
print("# obj_info", obj_info)
site_name = site["site"]["name"]
site_tag = site["site"]["tag"]
obs_lat = site["site"]["latitude"]
obs_long = site["site"]["longitude"]
obs_elev = float(site["site"]["elevation"])
obj_name = obj_info["name"]
obj_tle1 = obj_info["tle1"][1:-1]
obj_tle2 = obj_info["tle2"][1:-1]
obj_freqs = np.array(string.split(obj_info["frequencies"], ","), np.float32)
c_dtime = datetime.datetime.utcfromtimestamp(ctime)
c_ephem_time = ephem.Date(c_dtime)
try:
(sat_rise, sat_transit, sat_set) = satellite_rise_and_set(
opt,
obs_lat,
obs_long,
obs_elev,
obj_name,
obj_tle1,
obj_tle2,
c_ephem_time,
)
if sat_set <= sat_rise or sat_transit <= sat_rise or sat_set <= sat_transit:
continue
if not last_sat_rise == sat_rise:
(
sub_lat,
sub_long,
sat_range,
sat_velocity,
az,
el,
ra,
dec,
alt,
) = satellite_values_at_time(
opt,
obs_lat,
obs_long,
obs_elev,
obj_name,
obj_tle1,
obj_tle2,
sat_transit,
)
(obj_bandwidth, obj_doppler) = satellite_bandwidth(
opt,
obs_lat,
obs_long,
obs_elev,
obj_name,
obj_tle1,
obj_tle2,
sat_rise,
sat_set,
op.interval,
obj_freqs,
)
last_sat_rise = sat_rise
if opt.debug:
print(
"time : ",
c_ephem_time,
sat_set,
(sat_set - c_ephem_time) * 60 * 60 * 24,
)
ctime = ctime + (sat_set - c_ephem_time) * 60 * 60 * 24
if opt.el_mask:
el_val = np.rad2deg(el)
el_mask = np.float(opt.el_mask)
if opt.debug:
print("# el_val ", el_val, " el_mask ", el_mask)
if el_val < el_mask: # check mask here!
continue
# This should really go out as digital metadata into the recording location
print("# Site : %s " % (site_name))
print("# Site tag : %s " % (site_tag))
print("# Object Name: %s" % (obj_name))
print(
"# observer @ latitude : %s, longitude : %s, elevation : %s m"
% (obs_lat, obs_long, obs_elev)
)
print(
"# GMT -- Rise Time: %s, Transit Time: %s, Set Time: %s"
% (sat_rise, sat_transit, sat_set)
)
print(
"# Azimuth: %f deg, Elevation: %f deg, Altitude: %g km"
% (np.rad2deg(az), np.rad2deg(el), alt / 1000.0)
)
print(
"# Frequencies: %s MHz, Bandwidth: %s kHz"
% (
obj_freqs / 1.0e6,
obj_bandwidth[np.argmax(obj_bandwidth)] / 1.0e3,
)
)
pass_md = {
"obj_id": obj_id,
"rise_time": sat_rise,
"transit_time": sat_transit,
"set_time": sat_set,
"azimuth": np.rad2deg(az),
"elevation": np.rad2deg(el),
"altitude": alt,
"doppler_frequency": obj_doppler,
"doppler_bandwidth": obj_bandwidth,
}
if opt.schedule:
d = sat_rise.tuple()
rise_time = "%04d%02d%02d_%02d%02d" % (d[0], d[1], d[2], d[3], d[4])
offset_rise = ephem.date(sat_rise - ephem.minute)
d = offset_rise.tuple()
offset_rise_time = "%04d-%02d-%02dT%02d:%02d:%02dZ" % (
d[0],
d[1],
d[2],
d[3],
d[4],
int(d[5]),
)
offset_set = ephem.date(sat_set + ephem.minute)
d = offset_set.tuple()
offset_set_time = "%04d-%02d-%02dT%02d:%02d:%02dZ" % (
d[0],
d[1],
d[2],
d[3],
d[4],
int(d[5]),
)
cmd_lines = []
radio_channel = string.split(site["radio"]["channel"][1:-1], ",")
radio_gain = string.split(site["radio"]["gain"][1:-1], ",")
radio_address = string.split(site["radio"]["address"][1:-1], ",")
recorder_channels = string.split(
site["recorder"]["channels"][1:-1], ","
)
radio_sample_rate = site["radio"]["sample_rate"]
cmd_line0 = "%s " % (site["recorder"]["command"])
if site["radio"]["type"] == "b210":
# just record a fixed frequency, needs a dual radio Thor3 script. This can be done!
idx = 0
freq = obj_freqs[1]
cmd_line1 = '-r %s -d "%s" -s %s -e %s -c %s -f %4.3f ' % (
radio_sample_rate,
radio_channel[idx],
offset_rise_time,
offset_set_time,
recorder_channels[idx],
freq,
)
log_file_name = "%s_%s_%s_%dMHz.log" % (
site_tag,
obj_id,
offset_rise_time,
int(freq / 1.0e6),
)
cmd_fname = "%s_%s_%s_%dMHz" % (
site_tag,
obj_id,
rise_time,
int(freq / 1.0e6),
)
cmd_line2 = " -g %s -m %s --devargs num_recv_frames=1024 --devargs master_clock_rate=24.0e6 -o %s/%s" % (
radio_gain[idx],
radio_address[idx],
site["recorder"]["data_path"],
cmd_fname,
)
cmd_line2 += " {0}".format(
site["radio"].get("extra_args", "")
).rstrip()
if not opt.foreground:
cmd_line0 = "nohup " + cmd_line0
cmd_line2 = cmd_line2 + " 2>&1 &"
else:
cmd_line2 = cmd_line2
if opt.debug:
print(cmd_line0, cmd_line1, cmd_line2, cmd_fname)
cmd_lines.append(
(
cmd_line0 + cmd_line1 + cmd_line2,
cmd_fname,
pass_md,
obj_info,
)
)
print("\n")
elif site["radio"]["type"] == "n200_tvrx2":
cmd_line1 = (
' -r %s -d "%s %s" -s %s -e %s -c %s,%s -f %4.3f,%4.3f '
% (
radio_sample_rate,
radio_channel[0],
radio_channel[1],
offset_rise_time,
offset_set_time,
recorder_channels[0],
recorder_channels[1],
obj_freqs[0],
obj_freqs[1],
)
)
log_file_name = "%s_%s_%s_combined.log" % (
site_tag,
obj_id,
offset_rise_time,
)
cmd_fname = "%s_%s_%s_combined" % (site_tag, obj_id, rise_time)
cmd_line2 = " -g %s,%s -m %s -o %s/%s" % (
radio_gain[0],
radio_gain[1],
radio_address[0],
site["recorder"]["data_path"],
cmd_fname,
)
cmd_line2 += " {0}".format(
site["radio"].get("extra_args", "")
).rstrip()
if not opt.foreground:
cmd_line0 = "nohup " + cmd_line0
cmd_line2 = cmd_line2 + " 2>&1 &"
else:
cmd_line2 = cmd_line2
if opt.debug:
print(cmd_line0, cmd_line1, cmd_line2, cmd_fname)
cmd_lines.append(
(
cmd_line0 + cmd_line1 + cmd_line2,
cmd_fname,
pass_md,
obj_info,
)
)
print("\n")
if opt.foreground:
dtstart0 = dateutil.parser.parse(offset_rise_time)
dtstop0 = dateutil.parser.parse(offset_set_time)
start0 = int(
(
dtstart0 - datetime.datetime(1970, 1, 1, tzinfo=pytz.utc)
).total_seconds()
)
stop0 = int(
(
dtstop0 - datetime.datetime(1970, 1, 1, tzinfo=pytz.utc)
).total_seconds()
)
if opt.verbose:
print("# waiting for %s @ %s " % (obj_id, offset_rise_time))
while time.time() < start0 - 30:
time.sleep(op.interval)
if opt.verbose:
print("# %d sec" % (start0 - time.time()))
for cmd_tuple in cmd_lines:
cmd, cmd_fname, pass_md, info_md = cmd_tuple
print("# Executing command %s " % (cmd))
# write the digital metadata
start_idx = int(start0)
mdata_dir = (
site["recorder"]["metadata_path"]
+ "/"
+ cmd_fname
+ "/metadata"
)
# site metadata
# note we use directory structure for the dictionary here
# eventually we will add this feature to digital metadata
for k in site:
try:
os.makedirs(mdata_dir + "/config/%s" % (k))
except:
pass
md_site_obj = DigitalMetadataWriter(
mdata_dir + "/config/%s" % (k), 3600, 60, 1, 1, k
)
if opt.debug:
print(site[k])
if opt.verbose:
print("# writing metadata config / %s " % (k))
md_site_obj.write(start_idx, site[k])
# info metadata
try:
os.makedirs(mdata_dir + "/info")
except:
pass
md_info_obj = DigitalMetadataWriter(
mdata_dir + "/info", 3600, 60, 1, 1, "info"
)
if opt.verbose:
print("# writing metadata info")
if opt.debug:
print(info_md)
md_info_obj.write(start_idx, info_md)
# pass metadata
try:
os.makedirs(mdata_dir + "/pass")
except:
pass
md_pass_obj = DigitalMetadataWriter(
mdata_dir + "/pass", 3600, 60, 1, 1, "pass"
)
if opt.verbose:
print("# writing metadata pass")
if opt.debug:
print(pass_md)
md_pass_obj.write(start_idx, pass_md)
# sys.exit(1)
# call the command
try:
subprocess.call(cmd, shell=True)
except Exception as eobj:
exp_str = str(ExceptionString(eobj))
print("exception: %s." % (exp_str))
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback
)
print(lines)
print("# wait...")
while time.time() < stop0 + 1:
time.sleep(op.interval)
if opt.verbose:
print("# complete in %d sec" % (stop0 - time.time()))
except Exception as eobj:
exp_str = str(ExceptionString(eobj))
print("exception: %s." % (exp_str))
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print(lines)
# sys.exit(1)
# advance 10 minutes
ctime = ctime + 60 * op.interval
continue
def parse_command_line():
parser = optparse.OptionParser()
parser.add_option(
"-v",
"--verbose",
action="store_true",
dest="verbose",
default=False,
help="prints debug output and additional detail.",
)
parser.add_option(
"-d",
"--debug",
action="store_true",
dest="debug",
default=False,
help="run in debug mode and not service context.",
)
parser.add_option(
"-b",
"--bash",
action="store_true",
dest="schedule",
default=False,
help="create schedule file for bash shell based command / control.",
)
parser.add_option(
"-m",
"--mask",
dest="el_mask",
type=float,
default=0.0,
help="mask all passes below the provided elevation.",
)
parser.add_option(
"-c",
"--config",
dest="config",
default="config/beacons.ini",
help="Use configuration file <config>.",
)
parser.add_option(
"-f",
"--foreground",
action="store_true",
dest="foreground",
help="Execute schedule in foreground.",
)
parser.add_option(
"-s",
"--starttime",
dest="starttime",
help="Start time in ISO8601 format, e.g. 2016-01-01T15:24:00Z",
)
parser.add_option(
"-e",
"--endtime",
dest="endtime",
help="End time in ISO8601 format, e.g. 2016-01-01T16:24:00Z",
)
parser.add_option(
"-i",
"--interval",
dest="interval",
type=float,
default=10.0,
help="Sampling interval for ephemeris predictions, default is 10 seconds.",
)
parser.add_option(
"-r",
"--radio",
dest="site",
default="config/site.ini",
help="Radio site configuration file.",
)
(options, args) = parser.parse_args()
return (options, args)
if __name__ == "__main__":
# parse command line options
op, args = parse_command_line()
if op.starttime is None:
st0 = int(math.ceil(time.time())) + 10
else:
dtst0 = dateutil.parser.parse(op.starttime)
st0 = int(
(dtst0 - datetime.datetime(1970, 1, 1, tzinfo=pytz.utc)).total_seconds()
)
print("Start time: %s (%d)" % (dtst0.strftime("%a %b %d %H:%M:%S %Y"), st0))
if op.endtime is None:
# default to the next 24 hours
et0 = st0 + 60 * 60 * 24.0
else:
dtet0 = dateutil.parser.parse(op.endtime)
et0 = int(
(dtet0 - datetime.datetime(1970, 1, 1, tzinfo=pytz.utc)).total_seconds()
)
print("End time: %s (%d)" % (dtet0.strftime("%a %b %d %H:%M:%S %Y"), et0))
ephemeris_passes(op, st0, et0)
| [
"string.split",
"ephem.Observer",
"optparse.OptionParser",
"numpy.argmax",
"sys.exc_info",
"ephem.readtle",
"datetime.datetime.utcfromtimestamp",
"digital_rf.DigitalMetadataWriter",
"traceback.format_exception",
"ephem.Date",
"numpy.float",
"time.sleep",
"datetime.datetime",
"subprocess.ca... | [((3452, 3468), 'ephem.Observer', 'ephem.Observer', ([], {}), '()\n', (3466, 3468), False, 'import ephem\n'), ((3715, 3749), 'ephem.readtle', 'ephem.readtle', (['objName', 'tle1', 'tle2'], {}), '(objName, tle1, tle2)\n', (3728, 3749), False, 'import ephem\n'), ((6008, 6024), 'ephem.Observer', 'ephem.Observer', ([], {}), '()\n', (6022, 6024), False, 'import ephem\n'), ((6138, 6172), 'ephem.readtle', 'ephem.readtle', (['objName', 'tle1', 'tle2'], {}), '(objName, tle1, tle2)\n', (6151, 6172), False, 'import ephem\n'), ((14032, 14059), 'six.moves.configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (14057, 14059), False, 'from six.moves import configparser\n'), ((32019, 32042), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (32040, 32042), False, 'import optparse\n'), ((12409, 12435), 'numpy.array', 'np.array', (['dopplerBandwidth'], {}), '(dopplerBandwidth)\n', (12417, 12435), True, 'import numpy as np\n'), ((12437, 12465), 'numpy.array', 'np.array', (['dopplerFrequencies'], {}), '(dopplerFrequencies)\n', (12445, 12465), True, 'import numpy as np\n'), ((15029, 15070), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['ctime'], {}), '(ctime)\n', (15063, 15070), False, 'import datetime\n'), ((15094, 15113), 'ephem.Date', 'ephem.Date', (['c_dtime'], {}), '(c_dtime)\n', (15104, 15113), False, 'import ephem\n'), ((17253, 17294), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['ctime'], {}), '(ctime)\n', (17287, 17294), False, 'import datetime\n'), ((17318, 17337), 'ephem.Date', 'ephem.Date', (['c_dtime'], {}), '(c_dtime)\n', (17328, 17337), False, 'import ephem\n'), ((11921, 11941), 'ephem.date', 'ephem.date', (['currTime'], {}), '(currTime)\n', (11931, 11941), False, 'import ephem\n'), ((14955, 14997), 'string.split', 'string.split', (["obj_info['frequencies']", '""","""'], {}), "(obj_info['frequencies'], ',')\n", (14967, 14997), False, 'import string\n'), ((17179, 17221), 'string.split', 'string.split', (["obj_info['frequencies']", '""","""'], {}), "(obj_info['frequencies'], ',')\n", (17191, 17221), False, 'import string\n'), ((12122, 12136), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (12134, 12136), False, 'import sys\n'), ((12157, 12219), 'traceback.format_exception', 'traceback.format_exception', (['exc_type', 'exc_value', 'exc_traceback'], {}), '(exc_type, exc_value, exc_traceback)\n', (12183, 12219), False, 'import traceback\n'), ((12287, 12315), 'numpy.array', 'np.array', (['dopplerFrequencies'], {}), '(dopplerFrequencies)\n', (12295, 12315), True, 'import numpy as np\n'), ((12354, 12380), 'numpy.array', 'np.array', (['dopplerBandwidth'], {}), '(dopplerBandwidth)\n', (12362, 12380), True, 'import numpy as np\n'), ((31729, 31743), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (31741, 31743), False, 'import sys\n'), ((31764, 31826), 'traceback.format_exception', 'traceback.format_exception', (['exc_type', 'exc_value', 'exc_traceback'], {}), '(exc_type, exc_value, exc_traceback)\n', (31790, 31826), False, 'import traceback\n'), ((19208, 19222), 'numpy.rad2deg', 'np.rad2deg', (['el'], {}), '(el)\n', (19218, 19222), True, 'import numpy as np\n'), ((19253, 19274), 'numpy.float', 'np.float', (['opt.el_mask'], {}), '(opt.el_mask)\n', (19261, 19274), True, 'import numpy as np\n'), ((20768, 20782), 'numpy.rad2deg', 'np.rad2deg', (['az'], {}), '(az)\n', (20778, 20782), True, 'import numpy as np\n'), ((20817, 20831), 'numpy.rad2deg', 'np.rad2deg', (['el'], {}), '(el)\n', (20827, 20831), True, 'import numpy as np\n'), ((21197, 21232), 'ephem.date', 'ephem.date', (['(sat_rise - ephem.minute)'], {}), '(sat_rise - ephem.minute)\n', (21207, 21232), False, 'import ephem\n'), ((21594, 21628), 'ephem.date', 'ephem.date', (['(sat_set + ephem.minute)'], {}), '(sat_set + ephem.minute)\n', (21604, 21628), False, 'import ephem\n'), ((22026, 22075), 'string.split', 'string.split', (["site['radio']['channel'][1:-1]", '""","""'], {}), "(site['radio']['channel'][1:-1], ',')\n", (22038, 22075), False, 'import string\n'), ((22109, 22155), 'string.split', 'string.split', (["site['radio']['gain'][1:-1]", '""","""'], {}), "(site['radio']['gain'][1:-1], ',')\n", (22121, 22155), False, 'import string\n'), ((22192, 22241), 'string.split', 'string.split', (["site['radio']['address'][1:-1]", '""","""'], {}), "(site['radio']['address'][1:-1], ',')\n", (22204, 22241), False, 'import string\n'), ((22282, 22335), 'string.split', 'string.split', (["site['recorder']['channels'][1:-1]", '""","""'], {}), "(site['recorder']['channels'][1:-1], ',')\n", (22294, 22335), False, 'import string\n'), ((34114, 34125), 'time.time', 'time.time', ([], {}), '()\n', (34123, 34125), False, 'import time\n'), ((27842, 27853), 'time.time', 'time.time', ([], {}), '()\n', (27851, 27853), False, 'import time\n'), ((27893, 27916), 'time.sleep', 'time.sleep', (['op.interval'], {}), '(op.interval)\n', (27903, 27916), False, 'import time\n'), ((29682, 29748), 'digital_rf.DigitalMetadataWriter', 'DigitalMetadataWriter', (["(mdata_dir + '/info')", '(3600)', '(60)', '(1)', '(1)', '"""info"""'], {}), "(mdata_dir + '/info', 3600, 60, 1, 1, 'info')\n", (29703, 29748), False, 'from digital_rf import DigitalMetadataWriter\n'), ((30285, 30351), 'digital_rf.DigitalMetadataWriter', 'DigitalMetadataWriter', (["(mdata_dir + '/pass')", '(3600)', '(60)', '(1)', '(1)', '"""pass"""'], {}), "(mdata_dir + '/pass', 3600, 60, 1, 1, 'pass')\n", (30306, 30351), False, 'from digital_rf import DigitalMetadataWriter\n'), ((31353, 31364), 'time.time', 'time.time', ([], {}), '()\n', (31362, 31364), False, 'import time\n'), ((31402, 31425), 'time.sleep', 'time.sleep', (['op.interval'], {}), '(op.interval)\n', (31412, 31425), False, 'import time\n'), ((34235, 34281), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {'tzinfo': 'pytz.utc'}), '(1970, 1, 1, tzinfo=pytz.utc)\n', (34252, 34281), False, 'import datetime\n'), ((34597, 34643), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {'tzinfo': 'pytz.utc'}), '(1970, 1, 1, tzinfo=pytz.utc)\n', (34614, 34643), False, 'import datetime\n'), ((20207, 20221), 'numpy.rad2deg', 'np.rad2deg', (['az'], {}), '(az)\n', (20217, 20221), True, 'import numpy as np\n'), ((20223, 20237), 'numpy.rad2deg', 'np.rad2deg', (['el'], {}), '(el)\n', (20233, 20237), True, 'import numpy as np\n'), ((29031, 29101), 'digital_rf.DigitalMetadataWriter', 'DigitalMetadataWriter', (["(mdata_dir + '/config/%s' % k)", '(3600)', '(60)', '(1)', '(1)', 'k'], {}), "(mdata_dir + '/config/%s' % k, 3600, 60, 1, 1, k)\n", (29052, 29101), False, 'from digital_rf import DigitalMetadataWriter\n'), ((29545, 29577), 'os.makedirs', 'os.makedirs', (["(mdata_dir + '/info')"], {}), "(mdata_dir + '/info')\n", (29556, 29577), False, 'import os\n'), ((30148, 30180), 'os.makedirs', 'os.makedirs', (["(mdata_dir + '/pass')"], {}), "(mdata_dir + '/pass')\n", (30159, 30180), False, 'import os\n'), ((30793, 30825), 'subprocess.call', 'subprocess.call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (30808, 30825), False, 'import subprocess\n'), ((28871, 28912), 'os.makedirs', 'os.makedirs', (["(mdata_dir + '/config/%s' % k)"], {}), "(mdata_dir + '/config/%s' % k)\n", (28882, 28912), False, 'import os\n'), ((31070, 31084), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (31082, 31084), False, 'import sys\n'), ((31121, 31183), 'traceback.format_exception', 'traceback.format_exception', (['exc_type', 'exc_value', 'exc_traceback'], {}), '(exc_type, exc_value, exc_traceback)\n', (31147, 31183), False, 'import traceback\n'), ((20462, 20486), 'numpy.argmax', 'np.argmax', (['obj_bandwidth'], {}), '(obj_bandwidth)\n', (20471, 20486), True, 'import numpy as np\n'), ((27374, 27420), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {'tzinfo': 'pytz.utc'}), '(1970, 1, 1, tzinfo=pytz.utc)\n', (27391, 27420), False, 'import datetime\n'), ((27582, 27628), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {'tzinfo': 'pytz.utc'}), '(1970, 1, 1, tzinfo=pytz.utc)\n', (27599, 27628), False, 'import datetime\n'), ((28017, 28028), 'time.time', 'time.time', ([], {}), '()\n', (28026, 28028), False, 'import time\n'), ((31534, 31545), 'time.time', 'time.time', ([], {}), '()\n', (31543, 31545), False, 'import time\n')] |
# Import Libraries
import requests;
import numpy as np
import math;
import scipy
import scipy.stats;
from scipy.stats import norm
import statistics
from datetime import datetime, date
import os;
os.chdir(r'') # where the client_id is stored
f = open('client_id')
cid = f.read()
def get_prices(ticker):
global cid
endpoint = "https://api.tdameritrade.com/v1/marketdata/{}/pricehistory".format(ticker)
payload = {'apikey': cid,
'periodType': 'month',
'period': 1,
'frequencyType': 'daily',
'freuency': 1}
data = requests.get(url = endpoint, params = payload).json()
return data
def get_volatility(data):
delta = []
for i in range(1, len(data['candles'])):
d = (data['candles'][i]['close']/data['candles'][i-1]['close']) - 1
delta.append(d)
std = statistics.stdev(delta)
w = std*math.sqrt(252)
return w
def get_quote(ticker):
global cid
endpoint = "https://api.tdameritrade.com/v1/marketdata/{}/quotes".format(ticker)
payload = {'apikey': cid
}
data = requests.get(url = endpoint, params = payload).json()
return data[ticker]['lastPrice']
'''
Our Options are Going to be Out-Of-The Money, and will not be exercised in time. Thus, it is valid to treat the American Options like European Options.
'''
def price_options(ticker, strike, exp_date, spot_date = None, price = None, r = None, w = None, call = True):
# Spot Price
if (price == None):
price = get_quote(ticker)
# Spot Date
if (spot_date == None):
spot_date = date.today()
# Time to Maturity
T = exp_date - spot_date
T = T.days/365
# Interest Rate
if (r == None):
r = 0.01
# Volatility
if (w == None):
prices = get_prices(ticker)
w = get_volatility(prices)
d1 = (np.log(price/strike) + (r + (0.5*(w**2)))*T) / (w * math.sqrt(T))
d2 = d1 - (w*math.sqrt(T))
c = (norm.cdf(d1)*price) - (norm.cdf(d2)*strike*math.exp(-r*T))
if call:
call = (norm.cdf(d1)*price) - (norm.cdf(d2)*strike*math.exp(-r*T))
return call
else:
put = (norm.cdf(-d2)*strike*math.exp(-r*T)) - (norm.cdf(-d1)*price)
return put
| [
"math.exp",
"numpy.log",
"math.sqrt",
"statistics.stdev",
"datetime.date.today",
"scipy.stats.norm.cdf",
"requests.get",
"os.chdir"
] | [((209, 221), 'os.chdir', 'os.chdir', (['""""""'], {}), "('')\n", (217, 221), False, 'import os\n'), ((903, 926), 'statistics.stdev', 'statistics.stdev', (['delta'], {}), '(delta)\n', (919, 926), False, 'import statistics\n'), ((940, 954), 'math.sqrt', 'math.sqrt', (['(252)'], {}), '(252)\n', (949, 954), False, 'import math\n'), ((1683, 1695), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1693, 1695), False, 'from datetime import datetime, date\n'), ((628, 670), 'requests.get', 'requests.get', ([], {'url': 'endpoint', 'params': 'payload'}), '(url=endpoint, params=payload)\n', (640, 670), False, 'import requests\n'), ((1170, 1212), 'requests.get', 'requests.get', ([], {'url': 'endpoint', 'params': 'payload'}), '(url=endpoint, params=payload)\n', (1182, 1212), False, 'import requests\n'), ((1953, 1975), 'numpy.log', 'np.log', (['(price / strike)'], {}), '(price / strike)\n', (1959, 1975), True, 'import numpy as np\n'), ((2005, 2017), 'math.sqrt', 'math.sqrt', (['T'], {}), '(T)\n', (2014, 2017), False, 'import math\n'), ((2037, 2049), 'math.sqrt', 'math.sqrt', (['T'], {}), '(T)\n', (2046, 2049), False, 'import math\n'), ((2067, 2079), 'scipy.stats.norm.cdf', 'norm.cdf', (['d1'], {}), '(d1)\n', (2075, 2079), False, 'from scipy.stats import norm\n'), ((2110, 2126), 'math.exp', 'math.exp', (['(-r * T)'], {}), '(-r * T)\n', (2118, 2126), False, 'import math\n'), ((2090, 2102), 'scipy.stats.norm.cdf', 'norm.cdf', (['d2'], {}), '(d2)\n', (2098, 2102), False, 'from scipy.stats import norm\n'), ((2157, 2169), 'scipy.stats.norm.cdf', 'norm.cdf', (['d1'], {}), '(d1)\n', (2165, 2169), False, 'from scipy.stats import norm\n'), ((2200, 2216), 'math.exp', 'math.exp', (['(-r * T)'], {}), '(-r * T)\n', (2208, 2216), False, 'import math\n'), ((2285, 2301), 'math.exp', 'math.exp', (['(-r * T)'], {}), '(-r * T)\n', (2293, 2301), False, 'import math\n'), ((2304, 2317), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-d1)'], {}), '(-d1)\n', (2312, 2317), False, 'from scipy.stats import norm\n'), ((2180, 2192), 'scipy.stats.norm.cdf', 'norm.cdf', (['d2'], {}), '(d2)\n', (2188, 2192), False, 'from scipy.stats import norm\n'), ((2264, 2277), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-d2)'], {}), '(-d2)\n', (2272, 2277), False, 'from scipy.stats import norm\n')] |
# <NAME> 2014-2020
# mlxtend Machine Learning Library Extensions
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import numpy as np
def one_hot(y, num_labels='auto', dtype='float'):
"""One-hot encoding of class labels
Parameters
----------
y : array-like, shape = [n_classlabels]
Python list or numpy array consisting of class labels.
num_labels : int or 'auto'
Number of unique labels in the class label array. Infers the number
of unique labels from the input array if set to 'auto'.
dtype : str
NumPy array type (float, float32, float64) of the output array.
Returns
----------
ary : numpy.ndarray, shape = [n_classlabels]
One-hot encoded array, where each sample is represented as
a row vector in the returned array.
Examples
----------
For usage examples, please see
http://rasbt.github.io/mlxtend/user_guide/preprocessing/one_hot/
"""
if not (num_labels == 'auto' or isinstance(num_labels, int)):
raise AttributeError('num_labels must be an integer or "auto"')
if isinstance(y, list):
yt = np.asarray(y)
else:
yt = y
if not len(yt.shape) == 1:
raise AttributeError('y array must be 1-dimensional')
if num_labels == 'auto':
# uniq = np.unique(yt).shape[0]
uniq = np.max(yt + 1)
else:
uniq = num_labels
if uniq == 1:
ary = np.array([[0.]], dtype=dtype)
else:
ary = np.zeros((len(y), uniq))
for i, val in enumerate(y):
ary[i, val] = 1
return ary.astype(dtype)
| [
"numpy.array",
"numpy.asarray",
"numpy.max"
] | [((1136, 1149), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1146, 1149), True, 'import numpy as np\n'), ((1352, 1366), 'numpy.max', 'np.max', (['(yt + 1)'], {}), '(yt + 1)\n', (1358, 1366), True, 'import numpy as np\n'), ((1435, 1465), 'numpy.array', 'np.array', (['[[0.0]]'], {'dtype': 'dtype'}), '([[0.0]], dtype=dtype)\n', (1443, 1465), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 3 14:36:42 2019
@author: gparkes
This script contains demonstration code for all sorts of sci-kit learn examples
all functions generate a graph, by default we do not save these to a file
Material drawn from:
ttps://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/06.00-Figure-Code.ipynb
Taken and used for non-commercial purposes, with modification.
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.patches import Ellipse
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.decomposition import PCA
from sklearn.manifold import Isomap
from sklearn.metrics import pairwise_distances_argmin
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs, make_swiss_roll
import misc_fig as mf
def demo_broadcasting_cubes():
#------------------------------------------------------------
# Draw a figure and axis with no boundary
fig = plt.figure(figsize=(6, 4.5), facecolor='w')
ax = plt.axes([0, 0, 1, 1], xticks=[], yticks=[], frameon=False)
# types of plots
solid = dict(c="black", ls="-", lw=1, label_kwargs=dict(color="k"))
dotted = dict(c='black', ls='-', lw=0.5, alpha=0.5, label_kwargs=dict(color='gray'))
depth = .3
#------------------------------------------------------------
# Draw top operation: vector plus scalar
mf.draw_cube(ax, (1, 10), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)
mf.draw_cube(ax, (2, 10), 1, depth, [1, 2, 3, 6, 9], '1', **solid)
mf.draw_cube(ax, (3, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)
mf.draw_cube(ax, (6, 10), 1, depth, [1, 2, 3, 4, 5, 6, 7, 9, 10], '5', **solid)
mf.draw_cube(ax, (7, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '5', **dotted)
mf.draw_cube(ax, (8, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '5', **dotted)
mf.draw_cube(ax, (12, 10), 1, depth, [1, 2, 3, 4, 5, 6, 9], '5', **solid)
mf.draw_cube(ax, (13, 10), 1, depth, [1, 2, 3, 6, 9], '6', **solid)
mf.draw_cube(ax, (14, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10], '7', **solid)
ax.text(5, 10.5, '+', size=12, ha='center', va='center')
ax.text(10.5, 10.5, '=', size=12, ha='center', va='center')
ax.text(1, 11.5, r'${\tt np.arange(3) + 5}$',
size=12, ha='left', va='bottom')
#------------------------------------------------------------
# Draw middle operation: matrix plus vector
# first block
mf.draw_cube(ax, (1, 7.5), 1, depth, [1, 2, 3, 4, 5, 6, 9], '1', **solid)
mf.draw_cube(ax, (2, 7.5), 1, depth, [1, 2, 3, 6, 9], '1', **solid)
mf.draw_cube(ax, (3, 7.5), 1, depth, [1, 2, 3, 6, 7, 9, 10], '1', **solid)
mf.draw_cube(ax, (1, 6.5), 1, depth, [2, 3, 4], '1', **solid)
mf.draw_cube(ax, (2, 6.5), 1, depth, [2, 3], '1', **solid)
mf.draw_cube(ax, (3, 6.5), 1, depth, [2, 3, 7, 10], '1', **solid)
mf.draw_cube(ax, (1, 5.5), 1, depth, [2, 3, 4], '1', **solid)
mf.draw_cube(ax, (2, 5.5), 1, depth, [2, 3], '1', **solid)
mf.draw_cube(ax, (3, 5.5), 1, depth, [2, 3, 7, 10], '1', **solid)
# second block
mf.draw_cube(ax, (6, 7.5), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)
mf.draw_cube(ax, (7, 7.5), 1, depth, [1, 2, 3, 6, 9], '1', **solid)
mf.draw_cube(ax, (8, 7.5), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)
mf.draw_cube(ax, (6, 6.5), 1, depth, range(2, 13), '0', **dotted)
mf.draw_cube(ax, (7, 6.5), 1, depth, [2, 3, 6, 7, 9, 10, 11], '1', **dotted)
mf.draw_cube(ax, (8, 6.5), 1, depth, [2, 3, 6, 7, 9, 10, 11], '2', **dotted)
mf.draw_cube(ax, (6, 5.5), 1, depth, [2, 3, 4, 7, 8, 10, 11, 12], '0', **dotted)
mf.draw_cube(ax, (7, 5.5), 1, depth, [2, 3, 7, 10, 11], '1', **dotted)
mf.draw_cube(ax, (8, 5.5), 1, depth, [2, 3, 7, 10, 11], '2', **dotted)
# third block
mf.draw_cube(ax, (12, 7.5), 1, depth, [1, 2, 3, 4, 5, 6, 9], '1', **solid)
mf.draw_cube(ax, (13, 7.5), 1, depth, [1, 2, 3, 6, 9], '2', **solid)
mf.draw_cube(ax, (14, 7.5), 1, depth, [1, 2, 3, 6, 7, 9, 10], '3', **solid)
mf.draw_cube(ax, (12, 6.5), 1, depth, [2, 3, 4], '1', **solid)
mf.draw_cube(ax, (13, 6.5), 1, depth, [2, 3], '2', **solid)
mf.draw_cube(ax, (14, 6.5), 1, depth, [2, 3, 7, 10], '3', **solid)
mf.draw_cube(ax, (12, 5.5), 1, depth, [2, 3, 4], '1', **solid)
mf.draw_cube(ax, (13, 5.5), 1, depth, [2, 3], '2', **solid)
mf.draw_cube(ax, (14, 5.5), 1, depth, [2, 3, 7, 10], '3', **solid)
ax.text(5, 7.0, '+', size=12, ha='center', va='center')
ax.text(10.5, 7.0, '=', size=12, ha='center', va='center')
ax.text(1, 9.0, r'${\tt np.ones((3,\, 3)) + np.arange(3)}$',
size=12, ha='left', va='bottom')
#------------------------------------------------------------
# Draw bottom operation: vector plus vector, double broadcast
# first block
mf.draw_cube(ax, (1, 3), 1, depth, [1, 2, 3, 4, 5, 6, 7, 9, 10], '0', **solid)
mf.draw_cube(ax, (1, 2), 1, depth, [2, 3, 4, 7, 10], '1', **solid)
mf.draw_cube(ax, (1, 1), 1, depth, [2, 3, 4, 7, 10], '2', **solid)
mf.draw_cube(ax, (2, 3), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '0', **dotted)
mf.draw_cube(ax, (2, 2), 1, depth, [2, 3, 7, 10, 11], '1', **dotted)
mf.draw_cube(ax, (2, 1), 1, depth, [2, 3, 7, 10, 11], '2', **dotted)
mf.draw_cube(ax, (3, 3), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '0', **dotted)
mf.draw_cube(ax, (3, 2), 1, depth, [2, 3, 7, 10, 11], '1', **dotted)
mf.draw_cube(ax, (3, 1), 1, depth, [2, 3, 7, 10, 11], '2', **dotted)
# second block
mf.draw_cube(ax, (6, 3), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)
mf.draw_cube(ax, (7, 3), 1, depth, [1, 2, 3, 6, 9], '1', **solid)
mf.draw_cube(ax, (8, 3), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)
mf.draw_cube(ax, (6, 2), 1, depth, range(2, 13), '0', **dotted)
mf.draw_cube(ax, (7, 2), 1, depth, [2, 3, 6, 7, 9, 10, 11], '1', **dotted)
mf.draw_cube(ax, (8, 2), 1, depth, [2, 3, 6, 7, 9, 10, 11], '2', **dotted)
mf.draw_cube(ax, (6, 1), 1, depth, [2, 3, 4, 7, 8, 10, 11, 12], '0', **dotted)
mf.draw_cube(ax, (7, 1), 1, depth, [2, 3, 7, 10, 11], '1', **dotted)
mf.draw_cube(ax, (8, 1), 1, depth, [2, 3, 7, 10, 11], '2', **dotted)
# third block
mf.draw_cube(ax, (12, 3), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)
mf.draw_cube(ax, (13, 3), 1, depth, [1, 2, 3, 6, 9], '1', **solid)
mf.draw_cube(ax, (14, 3), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)
mf.draw_cube(ax, (12, 2), 1, depth, [2, 3, 4], '1', **solid)
mf.draw_cube(ax, (13, 2), 1, depth, [2, 3], '2', **solid)
mf.draw_cube(ax, (14, 2), 1, depth, [2, 3, 7, 10], '3', **solid)
mf.draw_cube(ax, (12, 1), 1, depth, [2, 3, 4], '2', **solid)
mf.draw_cube(ax, (13, 1), 1, depth, [2, 3], '3', **solid)
mf.draw_cube(ax, (14, 1), 1, depth, [2, 3, 7, 10], '4', **solid)
ax.text(5, 2.5, '+', size=12, ha='center', va='center')
ax.text(10.5, 2.5, '=', size=12, ha='center', va='center')
ax.text(1, 4.5, r'${\tt np.arange(3).reshape((3,\, 1)) + np.arange(3)}$',
ha='left', size=12, va='bottom')
ax.set_xlim(0, 16)
ax.set_ylim(0.5, 12.5)
# save
# fig.savefig('figures/02.05-broadcasting.png')
def demo_aggregate_df():
df = pd.DataFrame({'data': [1, 2, 3, 4, 5, 6]},
index=['A', 'B', 'C', 'A', 'B', 'C'])
df.index.name = 'key'
fig = plt.figure(figsize=(8, 6), facecolor='white')
ax = plt.axes([0, 0, 1, 1])
ax.axis('off')
mf.draw_dataframe(df, [0, 0])
for y, ind in zip([3, 1, -1], 'ABC'):
split = df[df.index == ind]
mf.draw_dataframe(split, [2, y])
sum = pd.DataFrame(split.sum()).T
sum.index = [ind]
sum.index.name = 'key'
sum.columns = ['data']
mf.draw_dataframe(sum, [4, y + 0.25])
result = df.groupby(df.index).sum()
mf.draw_dataframe(result, [6, 0.75])
style = dict(fontsize=14, ha='center', weight='bold')
plt.text(0.5, 3.6, "Input", **style)
plt.text(2.5, 4.6, "Split", **style)
plt.text(4.5, 4.35, "Apply (sum)", **style)
plt.text(6.5, 2.85, "Combine", **style)
arrowprops = dict(facecolor='black', width=1, headwidth=6)
plt.annotate('', (1.8, 3.6), (1.2, 2.8), arrowprops=arrowprops)
plt.annotate('', (1.8, 1.75), (1.2, 1.75), arrowprops=arrowprops)
plt.annotate('', (1.8, -0.1), (1.2, 0.7), arrowprops=arrowprops)
plt.annotate('', (3.8, 3.8), (3.2, 3.8), arrowprops=arrowprops)
plt.annotate('', (3.8, 1.75), (3.2, 1.75), arrowprops=arrowprops)
plt.annotate('', (3.8, -0.3), (3.2, -0.3), arrowprops=arrowprops)
plt.annotate('', (5.8, 2.8), (5.2, 3.6), arrowprops=arrowprops)
plt.annotate('', (5.8, 1.75), (5.2, 1.75), arrowprops=arrowprops)
plt.annotate('', (5.8, 0.7), (5.2, -0.1), arrowprops=arrowprops)
plt.axis('equal')
plt.ylim(-1.5, 5);
# fig.savefig('figures/03.08-split-apply-combine.png')
def demo_features_and_labels_grid():
fig = plt.figure(figsize=(6, 4))
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
ax.axis('equal')
# Draw features matrix
ax.vlines(range(6), ymin=0, ymax=9, lw=1)
ax.hlines(range(10), xmin=0, xmax=5, lw=1)
font_prop = dict(size=12, family='monospace')
ax.text(-1, -1, "Feature Matrix ($X$)", size=14)
ax.text(0.1, -0.3, r'n_features $\longrightarrow$', **font_prop)
ax.text(-0.1, 0.1, r'$\longleftarrow$ n_samples', rotation=90,
va='top', ha='right', **font_prop)
# Draw labels vector
ax.vlines(range(8, 10), ymin=0, ymax=9, lw=1)
ax.hlines(range(10), xmin=8, xmax=9, lw=1)
ax.text(7, -1, "Target Vector ($y$)", size=14)
ax.text(7.9, 0.1, r'$\longleftarrow$ n_samples', rotation=90,
va='top', ha='right', **font_prop)
ax.set_ylim(10, -2)
# fig.savefig('figures/05.02-samples-features.png')
def demo_5_fold_cross_validation():
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
mf.draw_rects(5, ax, textprop=dict(size=10))
# fig.savefig('figures/05.03-5-fold-CV.png')
def demo_classification_data(n=50, n_centers=2, std=.6):
X, y = make_blobs(n_samples=n, centers=n_centers, random_state=0, cluster_std=std)
# fit
clf = SVC(kernel="linear").fit(X, y)
# new points to predict
X2,_ = make_blobs(n_samples=80, centers=2, random_state=0, cluster_std=.8)
X2 = X2[50:]
y2 = clf.predict(X2)
return X, y, X2, y2, clf
def demo_regression_data(n=200, n2=100):
rng = np.random.RandomState(1)
X = rng.randn(n, 2)
y = np.dot(X, [-2, 1]) + .1 * rng.randn(X.shape[0])
# fit model
model = LinearRegression().fit(X, y)
# create points to predict
X2 = rng.randn(n2,2)
y2 = model.predict(X2)
return X, y, X2, y2, model
def demo_cluster_data(n=100, centers=4, std=1.5):
X, y = make_blobs(n_samples=n, centers=centers, cluster_std=std, random_state=42)
# fit
model = KMeans(centers, random_state=0).fit(X)
y_hat = model.predict(X)
return X, y, y_hat, model
def demo_swiss_roll_data(n=200, noise=.5):
X, y = make_swiss_roll(n_samples=n, noise=noise, random_state=42)
X = X[:, [0, 2]]
return X, y
def demo_nonlinear_data(n=30, err=.8, rseed=1):
rng = np.random.RandomState(rseed)
X = rng.rand(n,1)**2
y = 10 - 1. / (X.ravel() + .1)
if err > 0:
y += err * rng.randn(n)
return X, y
def demo_classification_example_1():
X, y, X2, y2, clf = demo_classification_data()
# plot the data
fig, ax = plt.subplots(figsize=(8, 6))
point_style = dict(cmap='Paired', s=50)
ax.scatter(X[:, 0], X[:, 1], c=y, **point_style)
# format plot
mf.format_plot(ax, 'Input Data')
ax.axis([-1, 4, -2, 7])
# fig.savefig('figures/05.01-classification-1.png')
def demo_classification_example_2():
X, y, X2, y2, clf = demo_classification_data()
# Get contours describing the model
xx = np.linspace(-1, 4, 10)
yy = np.linspace(-2, 7, 10)
xy1, xy2 = np.meshgrid(xx, yy)
point_style = dict(cmap='Paired', s=50)
Z = np.array([clf.decision_function([t])
for t in zip(xy1.flat, xy2.flat)]).reshape(xy1.shape)
# plot points and model
fig, ax = plt.subplots(figsize=(8, 6))
line_style = dict(levels = [-1.0, 0.0, 1.0],
linestyles = ['dashed', 'solid', 'dashed'],
colors = 'gray', linewidths=1)
ax.scatter(X[:, 0], X[:, 1], c=y, **point_style)
ax.contour(xy1, xy2, Z, **line_style)
# format plot
mf.format_plot(ax, 'Model Learned from Input Data')
ax.axis([-1, 4, -2, 7])
# fig.savefig('figures/05.01-classification-2.png')
def demo_regression_example_1():
X, y, _, _, _ = demo_regression_data()
# plot data points
fig, ax = plt.subplots()
points = ax.scatter(X[:, 0], X[:, 1], c=y, s=50,
cmap='viridis')
# format plot
mf.format_plot(ax, 'Input Data')
ax.axis([-4, 4, -3, 3])
# fig.savefig('figures/05.01-regression-1.png')
def demo_regression_example_2():
X, y, _, _, _ = demo_regression_data()
points = np.hstack([X, y[:, None]]).reshape(-1, 1, 3)
segments = np.hstack([points, points])
segments[:, 0, 2] = -8
# plot points in 3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X[:, 0], X[:, 1], y, c=y, s=35,
cmap='viridis')
ax.add_collection3d(Line3DCollection(segments, colors='gray', alpha=0.2))
ax.scatter(X[:, 0], X[:, 1], -8 + np.zeros(X.shape[0]), c=y, s=10,
cmap='viridis')
# format plot
ax.patch.set_facecolor('white')
ax.view_init(elev=20, azim=-70)
ax.set_zlim3d(-8, 8)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.zaxis.set_major_formatter(plt.NullFormatter())
ax.set(xlabel='feature 1', ylabel='feature 2', zlabel='label')
# Hide axes (is there a better way?)
ax.w_xaxis.line.set_visible(False)
ax.w_yaxis.line.set_visible(False)
ax.w_zaxis.line.set_visible(False)
for tick in ax.w_xaxis.get_ticklines():
tick.set_visible(False)
for tick in ax.w_yaxis.get_ticklines():
tick.set_visible(False)
for tick in ax.w_zaxis.get_ticklines():
tick.set_visible(False)
# fig.savefig('figures/05.01-regression-2.png')
def demo_regression_example_3():
X, y, _, _, model = demo_regression_data()
# plot data points
fig, ax = plt.subplots()
pts = ax.scatter(X[:, 0], X[:, 1], c=y, s=50,
cmap='viridis', zorder=2)
# compute and plot model color mesh
xx, yy = np.meshgrid(np.linspace(-4, 4),
np.linspace(-3, 3))
Xfit = np.vstack([xx.ravel(), yy.ravel()]).T
yfit = model.predict(Xfit)
zz = yfit.reshape(xx.shape)
ax.pcolorfast([-4, 4], [-3, 3], zz, alpha=0.5,
cmap='viridis', norm=pts.norm, zorder=1)
# format plot
mf.format_plot(ax, 'Input Data with Linear Fit')
ax.axis([-4, 4, -3, 3])
# fig.savefig('figures/05.01-regression-3.png')
def demo_cluster_example_1():
X,_,_,_ = demo_cluster_data()
# plot the input data
fig, ax = plt.subplots(figsize=(8, 6))
ax.scatter(X[:, 0], X[:, 1], s=50, color='gray')
# format the plot
mf.format_plot(ax, 'Input Data')
# fig.savefig('figures/05.01-clustering-1.png')
def demo_cluster_example_2():
X,y,_,_ = demo_cluster_data()
# plot the data with cluster labels
fig, ax = plt.subplots(figsize=(8, 6))
ax.scatter(X[:, 0], X[:, 1], s=50, c=y, cmap='viridis')
# format the plot
mf.format_plot(ax, 'Learned Cluster Labels')
# fig.savefig('figures/05.01-clustering-2.png')
def demo_dimensionality_reduction_example_1():
X, _ = demo_swiss_roll_data()
# visualize data
fig, ax = plt.subplots()
ax.scatter(X[:, 0], X[:, 1], color='gray', s=30)
# format the plot
mf.format_plot(ax, 'Input Data')
# fig.savefig('figures/05.01-dimesionality-1.png')
def demo_dimensionality_reduction_example_2():
X, y = demo_swiss_roll_data()
# create model
model = Isomap(n_neighbors=8, n_components=1)
y_fit = model.fit_transform(X).ravel()
# visualize data
fig, ax = plt.subplots()
pts = ax.scatter(X[:, 0], X[:, 1], c=y_fit, cmap='viridis', s=30)
cb = fig.colorbar(pts, ax=ax)
# format the plot
mf.format_plot(ax, 'Learned Latent Parameter')
cb.set_ticks([])
cb.set_label('Latent Variable', color='gray')
# fig.savefig('figures/05.01-dimesionality-2.png')
def demo_bias_variance_tradeoff():
X, y = demo_nonlinear_data()
xfit = np.linspace(-0.1, 1.0, 1000)[:, None]
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree), LinearRegression(**kwargs))
model1 = PolynomialRegression(1).fit(X, y)
model20 = PolynomialRegression(20).fit(X, y)
fig, ax = plt.subplots(1, 2, figsize=(16, 6))
fig.subplots_adjust(left=0.0625, right=0.95, wspace=0.1)
ax[0].scatter(X.ravel(), y, s=40)
ax[0].plot(xfit.ravel(), model1.predict(xfit), color='gray')
ax[0].axis([-0.1, 1.0, -2, 14])
ax[0].set_title('High-bias model: Underfits the data', size=14)
ax[1].scatter(X.ravel(), y, s=40)
ax[1].plot(xfit.ravel(), model20.predict(xfit), color='gray')
ax[1].axis([-0.1, 1.0, -2, 14])
ax[1].set_title('High-variance model: Overfits the data', size=14)
# fig.savefig('figures/05.03-bias-variance.png')
def demo_bias_variance_tradeoff_metrics():
fig, ax = plt.subplots(1, 2, figsize=(16, 6))
fig.subplots_adjust(left=0.0625, right=0.95, wspace=0.1)
X, y = demo_nonlinear_data()
X2, y2 = demo_nonlinear_data(10, rseed=42)
xfit = np.linspace(-0.1, 1.0, 1000)[:, None]
PolynomialRegression = lambda degree: make_pipeline(PolynomialFeatures(degree),\
LinearRegression())
model1 = PolynomialRegression(1).fit(X, y)
model20 = PolynomialRegression(20).fit(X, y)
ax[0].scatter(X.ravel(), y, s=40, c='blue')
ax[0].plot(xfit.ravel(), model1.predict(xfit), color='gray')
ax[0].axis([-0.1, 1.0, -2, 14])
ax[0].set_title('High-bias model: Underfits the data', size=14)
ax[0].scatter(X2.ravel(), y2, s=40, c='red')
ax[0].text(0.02, 0.98, "training score: $R^2$ = {0:.2f}".format(model1.score(X, y)),
ha='left', va='top', transform=ax[0].transAxes, size=14, color='blue')
ax[0].text(0.02, 0.91, "validation score: $R^2$ = {0:.2f}".format(model1.score(X2, y2)),
ha='left', va='top', transform=ax[0].transAxes, size=14, color='red')
ax[1].scatter(X.ravel(), y, s=40, c='blue')
ax[1].plot(xfit.ravel(), model20.predict(xfit), color='gray')
ax[1].axis([-0.1, 1.0, -2, 14])
ax[1].set_title('High-variance model: Overfits the data', size=14)
ax[1].scatter(X2.ravel(), y2, s=40, c='red')
ax[1].text(0.02, 0.98, "training score: $R^2$ = {0:.2g}".format(model20.score(X, y)),
ha='left', va='top', transform=ax[1].transAxes, size=14, color='blue')
ax[1].text(0.02, 0.91, "validation score: $R^2$ = {0:.2g}".format(model20.score(X2, y2)),
ha='left', va='top', transform=ax[1].transAxes, size=14, color='red')
# fig.savefig('figures/05.03-bias-variance-2.png')
def demo_validation_curve():
x = np.linspace(0, 1, 1000)
y1 = -(x - 0.5) ** 2
y2 = y1 - 0.33 + np.exp(x - 1)
fig, ax = plt.subplots()
ax.plot(x, y2, lw=10, alpha=0.5, color='blue')
ax.plot(x, y1, lw=10, alpha=0.5, color='red')
ax.text(0.15, 0.2, "training score", rotation=45, size=16, color='blue')
ax.text(0.2, -0.05, "validation score", rotation=20, size=16, color='red')
ax.text(0.02, 0.1, r'$\longleftarrow$ High Bias', size=18, rotation=90, va='center')
ax.text(0.98, 0.1, r'$\longleftarrow$ High Variance $\longrightarrow$', size=18, rotation=90, ha='right', va='center')
ax.text(0.48, -0.12, 'Best$\\longrightarrow$\nModel', size=18, rotation=90, va='center')
ax.set_xlim(0, 1)
ax.set_ylim(-0.3, 0.5)
ax.set_xlabel(r'model complexity $\longrightarrow$', size=14)
ax.set_ylabel(r'model score $\longrightarrow$', size=14)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.set_title("Validation Curve Schematic", size=16)
# fig.savefig('figures/05.03-validation-curve.png')
def demo_learning_curve():
N = np.linspace(0, 1, 1000)
x = np.linspace(0, 1, 1000)
y1 = 0.75 + 0.2 * np.exp(-4 * N)
y2 = 0.7 - 0.6 * np.exp(-4 * N)
fig, ax = plt.subplots()
ax.plot(x, y1, lw=10, alpha=0.5, color='blue')
ax.plot(x, y2, lw=10, alpha=0.5, color='red')
ax.text(0.2, 0.88, "training score", rotation=-10, size=16, color='blue')
ax.text(0.2, 0.5, "validation score", rotation=30, size=16, color='red')
ax.text(0.98, 0.45, r'Good Fit $\longrightarrow$', size=18, rotation=90, ha='right', va='center')
ax.text(0.02, 0.57, r'$\longleftarrow$ High Variance $\longrightarrow$', size=18, rotation=90, va='center')
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_xlabel(r'training set size $\longrightarrow$', size=14)
ax.set_ylabel(r'model score $\longrightarrow$', size=14)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.set_title("Learning Curve Schematic", size=16)
# fig.savefig('figures/05.03-learning-curve.png')
def demo_naive_bayes():
X, y = make_blobs(100, 2, centers=2, random_state=2, cluster_std=1.5)
fig, ax = plt.subplots()
ax.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu')
ax.set_title('Naive Bayes Model', size=14)
xlim = (-8, 8)
ylim = (-15, 5)
xg = np.linspace(xlim[0], xlim[1], 60)
yg = np.linspace(ylim[0], ylim[1], 40)
xx, yy = np.meshgrid(xg, yg)
Xgrid = np.vstack([xx.ravel(), yy.ravel()]).T
for label, color in enumerate(['red', 'blue']):
mask = (y == label)
mu, std = X[mask].mean(0), X[mask].std(0)
P = np.exp(-0.5 * (Xgrid - mu) ** 2 / std ** 2).prod(1)
Pm = np.ma.masked_array(P, P < 0.03)
ax.pcolorfast(xg, yg, Pm.reshape(xx.shape), alpha=0.5,
cmap=color.title() + 's')
ax.contour(xx, yy, P.reshape(xx.shape),
levels=[0.01, 0.1, 0.5, 0.9],
colors=color, alpha=0.2)
ax.set(xlim=xlim, ylim=ylim)
# fig.savefig('figures/05.05-gaussian-NB.png')
def demo_gaussian_basis_functions():
class GaussianFeatures(BaseEstimator, TransformerMixin):
"""Uniformly-spaced Gaussian Features for 1D input"""
def __init__(self, N, width_factor=2.0):
self.N = N
self.width_factor = width_factor
@staticmethod
def _gauss_basis(x, y, width, axis=None):
arg = (x - y) / width
return np.exp(-0.5 * np.sum(arg ** 2, axis))
def fit(self, X, y=None):
# create N centers spread along the data range
self.centers_ = np.linspace(X.min(), X.max(), self.N)
self.width_ = self.width_factor * (self.centers_[1] - self.centers_[0])
return self
def transform(self, X):
return self._gauss_basis(X[:, :, np.newaxis], self.centers_,
self.width_, axis=1)
rng = np.random.RandomState(1)
x = 10 * rng.rand(50)
y = np.sin(x) + 0.1 * rng.randn(50)
xfit = np.linspace(0, 10, 1000)
gauss_model = make_pipeline(GaussianFeatures(10, 1.0),
LinearRegression())
gauss_model.fit(x[:, np.newaxis], y)
yfit = gauss_model.predict(xfit[:, np.newaxis])
gf = gauss_model.named_steps['gaussianfeatures']
lm = gauss_model.named_steps['linearregression']
fig, ax = plt.subplots()
for i in range(10):
selector = np.zeros(10)
selector[i] = 1
Xfit = gf.transform(xfit[:, None]) * selector
yfit = lm.predict(Xfit)
ax.fill_between(xfit, yfit.min(), yfit, color='gray', alpha=0.2)
ax.scatter(x, y)
ax.plot(xfit, gauss_model.predict(xfit[:, np.newaxis]))
ax.set_xlim(0, 10)
ax.set_ylim(yfit.min(), 1.5)
# fig.savefig('figures/05.06-gaussian-basis.png')
def demo_decision_tree_example():
fig = plt.figure(figsize=(10, 4))
ax = fig.add_axes([0, 0, 0.8, 1], frameon=False, xticks=[], yticks=[])
ax.set_title('Example Decision Tree: Animal Classification', size=24)
def text(ax, x, y, t, size=20, **kwargs):
ax.text(x, y, t,
ha='center', va='center', size=size,
bbox=dict(boxstyle='round', ec='k', fc='w'), **kwargs)
text(ax, 0.5, 0.9, "How big is\nthe animal?", 20)
text(ax, 0.3, 0.6, "Does the animal\nhave horns?", 18)
text(ax, 0.7, 0.6, "Does the animal\nhave two legs?", 18)
text(ax, 0.12, 0.3, "Are the horns\nlonger than 10cm?", 14)
text(ax, 0.38, 0.3, "Is the animal\nwearing a collar?", 14)
text(ax, 0.62, 0.3, "Does the animal\nhave wings?", 14)
text(ax, 0.88, 0.3, "Does the animal\nhave a tail?", 14)
text(ax, 0.4, 0.75, "> 1m", 12, alpha=0.4)
text(ax, 0.6, 0.75, "< 1m", 12, alpha=0.4)
text(ax, 0.21, 0.45, "yes", 12, alpha=0.4)
text(ax, 0.34, 0.45, "no", 12, alpha=0.4)
text(ax, 0.66, 0.45, "yes", 12, alpha=0.4)
text(ax, 0.79, 0.45, "no", 12, alpha=0.4)
ax.plot([0.3, 0.5, 0.7], [0.6, 0.9, 0.6], '-k')
ax.plot([0.12, 0.3, 0.38], [0.3, 0.6, 0.3], '-k')
ax.plot([0.62, 0.7, 0.88], [0.3, 0.6, 0.3], '-k')
ax.plot([0.0, 0.12, 0.20], [0.0, 0.3, 0.0], '--k')
ax.plot([0.28, 0.38, 0.48], [0.0, 0.3, 0.0], '--k')
ax.plot([0.52, 0.62, 0.72], [0.0, 0.3, 0.0], '--k')
ax.plot([0.8, 0.88, 1.0], [0.0, 0.3, 0.0], '--k')
ax.axis([0, 1, 0, 1])
#fig.savefig('figures/05.08-decision-tree.png')
def demo_decision_tree_levels():
fig, ax = plt.subplots(1, 4, figsize=(16, 3))
fig.subplots_adjust(left=0.02, right=0.98, wspace=0.1)
X, y = make_blobs(n_samples=300, centers=4,
random_state=0, cluster_std=1.0)
for axi, depth in zip(ax, range(1, 5)):
model = DecisionTreeClassifier(max_depth=depth)
mf.visualize_tree(model, X, y, ax=axi)
axi.set_title('depth = {0}'.format(depth))
#fig.savefig('figures/05.08-decision-tree-levels.png')
def demo_decision_tree_overfitting():
model = DecisionTreeClassifier()
X, y = make_blobs(n_samples=300, centers=4,
random_state=0, cluster_std=1.0)
fig, ax = plt.subplots(1, 2, figsize=(16, 6))
fig.subplots_adjust(left=0.0625, right=0.95, wspace=0.1)
mf.visualize_tree(model, X[::2], y[::2], boundaries=False, ax=ax[0])
mf.visualize_tree(model, X[1::2], y[1::2], boundaries=False, ax=ax[1])
fig.savefig('figures/05.08-decision-tree-overfitting.png')
def demo_pca_components_rotation():
rng = np.random.RandomState(1)
X = np.dot(rng.rand(2, 2), rng.randn(2, 200)).T
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
fig, ax = plt.subplots(1, 2, figsize=(16, 6))
fig.subplots_adjust(left=0.0625, right=0.95, wspace=0.1)
# plot data
ax[0].scatter(X[:, 0], X[:, 1], alpha=0.2)
for length, vector in zip(pca.explained_variance_, pca.components_):
v = vector * 3 * np.sqrt(length)
mf.draw_vector(pca.mean_, pca.mean_ + v, ax=ax[0])
ax[0].axis('equal');
ax[0].set(xlabel='x', ylabel='y', title='input')
# plot principal components
X_pca = pca.transform(X)
ax[1].scatter(X_pca[:, 0], X_pca[:, 1], alpha=0.2)
mf.draw_vector([0, 0], [0, 3], ax=ax[1])
mf.draw_vector([0, 0], [3, 0], ax=ax[1])
ax[1].axis('equal')
ax[1].set(xlabel='component 1', ylabel='component 2',
title='principal components',
xlim=(-5, 5), ylim=(-3, 3.1))
# fig.savefig('figures/05.09-PCA-rotation.png')
def demo_expectation_maximization_kmeans():
X, y_true = make_blobs(n_samples=300, centers=4,
cluster_std=0.60, random_state=0)
rng = np.random.RandomState(42)
centers = [0, 4] + rng.randn(4, 2)
def draw_points(ax, c, factor=1):
ax.scatter(X[:, 0], X[:, 1], c=c, cmap='viridis',
s=50 * factor, alpha=0.3)
def draw_centers(ax, centers, factor=1, alpha=1.0):
ax.scatter(centers[:, 0], centers[:, 1],
c=np.arange(4), cmap='viridis', s=200 * factor,
alpha=alpha)
ax.scatter(centers[:, 0], centers[:, 1],
c='black', s=50 * factor, alpha=alpha)
def make_ax(fig, gs):
ax = fig.add_subplot(gs)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
return ax
fig = plt.figure(figsize=(15, 4))
gs = plt.GridSpec(4, 15, left=0.02, right=0.98, bottom=0.05, top=0.95, wspace=0.2, hspace=0.2)
ax0 = make_ax(fig, gs[:4, :4])
ax0.text(0.98, 0.98, "Random Initialization", transform=ax0.transAxes,
ha='right', va='top', size=16)
draw_points(ax0, 'gray', factor=2)
draw_centers(ax0, centers, factor=2)
for i in range(3):
ax1 = make_ax(fig, gs[:2, 4 + 2 * i:6 + 2 * i])
ax2 = make_ax(fig, gs[2:, 5 + 2 * i:7 + 2 * i])
# E-step
y_pred = pairwise_distances_argmin(X, centers)
draw_points(ax1, y_pred)
draw_centers(ax1, centers)
# M-step
new_centers = np.array([X[y_pred == i].mean(0) for i in range(4)])
draw_points(ax2, y_pred)
draw_centers(ax2, centers, alpha=0.3)
draw_centers(ax2, new_centers)
for i in range(4):
ax2.annotate('', new_centers[i], centers[i],
arrowprops=dict(arrowstyle='->', linewidth=1))
# Finish iteration
centers = new_centers
ax1.text(0.95, 0.95, "E-Step", transform=ax1.transAxes, ha='right', va='top', size=14)
ax2.text(0.95, 0.95, "M-Step", transform=ax2.transAxes, ha='right', va='top', size=14)
# Final E-step
y_pred = pairwise_distances_argmin(X, centers)
axf = make_ax(fig, gs[:4, -4:])
draw_points(axf, y_pred, factor=2)
draw_centers(axf, centers, factor=2)
axf.text(0.98, 0.98, "Final Clustering", transform=axf.transAxes,
ha='right', va='top', size=16)
# fig.savefig('figures/05.11-expectation-maximization.png')
def demo_covariance_GMM_type():
fig, ax = plt.subplots(1, 3, figsize=(14, 4), sharex=True, sharey=True)
fig.subplots_adjust(wspace=0.05)
rng = np.random.RandomState(5)
X = np.dot(rng.randn(500, 2), rng.randn(2, 2))
for i, cov_type in enumerate(['diag', 'spherical', 'full']):
model = GaussianMixture(1, covariance_type=cov_type).fit(X)
ax[i].axis('equal')
ax[i].scatter(X[:, 0], X[:, 1], alpha=0.5)
ax[i].set_xlim(-3, 3)
ax[i].set_title('covariance_type="{0}"'.format(cov_type),
size=14, family='monospace')
mf.draw_ellipse(model.means_[0], model.covars_[0], ax[i], alpha=0.2)
ax[i].xaxis.set_major_formatter(plt.NullFormatter())
ax[i].yaxis.set_major_formatter(plt.NullFormatter())
# fig.savefig('figures/05.12-covariance-type.png') | [
"numpy.sum",
"misc_fig.draw_ellipse",
"matplotlib.pyplot.axes",
"misc_fig.draw_dataframe",
"sklearn.mixture.GaussianMixture",
"sklearn.tree.DecisionTreeClassifier",
"misc_fig.visualize_tree",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"numpy.exp",
"numpy.ma.masked_array",
"skle... | [((1341, 1384), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4.5)', 'facecolor': '"""w"""'}), "(figsize=(6, 4.5), facecolor='w')\n", (1351, 1384), True, 'import matplotlib.pyplot as plt\n'), ((1394, 1453), 'matplotlib.pyplot.axes', 'plt.axes', (['[0, 0, 1, 1]'], {'xticks': '[]', 'yticks': '[]', 'frameon': '(False)'}), '([0, 0, 1, 1], xticks=[], yticks=[], frameon=False)\n', (1402, 1453), True, 'import matplotlib.pyplot as plt\n'), ((1768, 1840), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(1, 10)', '(1)', 'depth', '[1, 2, 3, 4, 5, 6, 9]', '"""0"""'], {}), "(ax, (1, 10), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)\n", (1780, 1840), True, 'import misc_fig as mf\n'), ((1845, 1911), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(2, 10)', '(1)', 'depth', '[1, 2, 3, 6, 9]', '"""1"""'], {}), "(ax, (2, 10), 1, depth, [1, 2, 3, 6, 9], '1', **solid)\n", (1857, 1911), True, 'import misc_fig as mf\n'), ((1916, 1989), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(3, 10)', '(1)', 'depth', '[1, 2, 3, 6, 7, 9, 10]', '"""2"""'], {}), "(ax, (3, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)\n", (1928, 1989), True, 'import misc_fig as mf\n'), ((1995, 2074), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(6, 10)', '(1)', 'depth', '[1, 2, 3, 4, 5, 6, 7, 9, 10]', '"""5"""'], {}), "(ax, (6, 10), 1, depth, [1, 2, 3, 4, 5, 6, 7, 9, 10], '5', **solid)\n", (2007, 2074), True, 'import misc_fig as mf\n'), ((2079, 2157), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(7, 10)', '(1)', 'depth', '[1, 2, 3, 6, 7, 9, 10, 11]', '"""5"""'], {}), "(ax, (7, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '5', **dotted)\n", (2091, 2157), True, 'import misc_fig as mf\n'), ((2162, 2240), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(8, 10)', '(1)', 'depth', '[1, 2, 3, 6, 7, 9, 10, 11]', '"""5"""'], {}), "(ax, (8, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '5', **dotted)\n", (2174, 2240), True, 'import misc_fig as mf\n'), ((2246, 2319), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(12, 10)', '(1)', 'depth', '[1, 2, 3, 4, 5, 6, 9]', '"""5"""'], {}), "(ax, (12, 10), 1, depth, [1, 2, 3, 4, 5, 6, 9], '5', **solid)\n", (2258, 2319), True, 'import misc_fig as mf\n'), ((2324, 2391), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(13, 10)', '(1)', 'depth', '[1, 2, 3, 6, 9]', '"""6"""'], {}), "(ax, (13, 10), 1, depth, [1, 2, 3, 6, 9], '6', **solid)\n", (2336, 2391), True, 'import misc_fig as mf\n'), ((2396, 2470), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(14, 10)', '(1)', 'depth', '[1, 2, 3, 6, 7, 9, 10]', '"""7"""'], {}), "(ax, (14, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10], '7', **solid)\n", (2408, 2470), True, 'import misc_fig as mf\n'), ((2830, 2903), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(1, 7.5)', '(1)', 'depth', '[1, 2, 3, 4, 5, 6, 9]', '"""1"""'], {}), "(ax, (1, 7.5), 1, depth, [1, 2, 3, 4, 5, 6, 9], '1', **solid)\n", (2842, 2903), True, 'import misc_fig as mf\n'), ((2908, 2975), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(2, 7.5)', '(1)', 'depth', '[1, 2, 3, 6, 9]', '"""1"""'], {}), "(ax, (2, 7.5), 1, depth, [1, 2, 3, 6, 9], '1', **solid)\n", (2920, 2975), True, 'import misc_fig as mf\n'), ((2980, 3054), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(3, 7.5)', '(1)', 'depth', '[1, 2, 3, 6, 7, 9, 10]', '"""1"""'], {}), "(ax, (3, 7.5), 1, depth, [1, 2, 3, 6, 7, 9, 10], '1', **solid)\n", (2992, 3054), True, 'import misc_fig as mf\n'), ((3060, 3121), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(1, 6.5)', '(1)', 'depth', '[2, 3, 4]', '"""1"""'], {}), "(ax, (1, 6.5), 1, depth, [2, 3, 4], '1', **solid)\n", (3072, 3121), True, 'import misc_fig as mf\n'), ((3126, 3184), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(2, 6.5)', '(1)', 'depth', '[2, 3]', '"""1"""'], {}), "(ax, (2, 6.5), 1, depth, [2, 3], '1', **solid)\n", (3138, 3184), True, 'import misc_fig as mf\n'), ((3189, 3254), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(3, 6.5)', '(1)', 'depth', '[2, 3, 7, 10]', '"""1"""'], {}), "(ax, (3, 6.5), 1, depth, [2, 3, 7, 10], '1', **solid)\n", (3201, 3254), True, 'import misc_fig as mf\n'), ((3260, 3321), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(1, 5.5)', '(1)', 'depth', '[2, 3, 4]', '"""1"""'], {}), "(ax, (1, 5.5), 1, depth, [2, 3, 4], '1', **solid)\n", (3272, 3321), True, 'import misc_fig as mf\n'), ((3326, 3384), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(2, 5.5)', '(1)', 'depth', '[2, 3]', '"""1"""'], {}), "(ax, (2, 5.5), 1, depth, [2, 3], '1', **solid)\n", (3338, 3384), True, 'import misc_fig as mf\n'), ((3389, 3454), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(3, 5.5)', '(1)', 'depth', '[2, 3, 7, 10]', '"""1"""'], {}), "(ax, (3, 5.5), 1, depth, [2, 3, 7, 10], '1', **solid)\n", (3401, 3454), True, 'import misc_fig as mf\n'), ((3479, 3552), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(6, 7.5)', '(1)', 'depth', '[1, 2, 3, 4, 5, 6, 9]', '"""0"""'], {}), "(ax, (6, 7.5), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)\n", (3491, 3552), True, 'import misc_fig as mf\n'), ((3557, 3624), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(7, 7.5)', '(1)', 'depth', '[1, 2, 3, 6, 9]', '"""1"""'], {}), "(ax, (7, 7.5), 1, depth, [1, 2, 3, 6, 9], '1', **solid)\n", (3569, 3624), True, 'import misc_fig as mf\n'), ((3629, 3703), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(8, 7.5)', '(1)', 'depth', '[1, 2, 3, 6, 7, 9, 10]', '"""2"""'], {}), "(ax, (8, 7.5), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)\n", (3641, 3703), True, 'import misc_fig as mf\n'), ((3779, 3855), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(7, 6.5)', '(1)', 'depth', '[2, 3, 6, 7, 9, 10, 11]', '"""1"""'], {}), "(ax, (7, 6.5), 1, depth, [2, 3, 6, 7, 9, 10, 11], '1', **dotted)\n", (3791, 3855), True, 'import misc_fig as mf\n'), ((3860, 3936), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(8, 6.5)', '(1)', 'depth', '[2, 3, 6, 7, 9, 10, 11]', '"""2"""'], {}), "(ax, (8, 6.5), 1, depth, [2, 3, 6, 7, 9, 10, 11], '2', **dotted)\n", (3872, 3936), True, 'import misc_fig as mf\n'), ((3942, 4027), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(6, 5.5)', '(1)', 'depth', '[2, 3, 4, 7, 8, 10, 11, 12]', '"""0"""'], {}), "(ax, (6, 5.5), 1, depth, [2, 3, 4, 7, 8, 10, 11, 12], '0', **dotted\n )\n", (3954, 4027), True, 'import misc_fig as mf\n'), ((4027, 4097), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(7, 5.5)', '(1)', 'depth', '[2, 3, 7, 10, 11]', '"""1"""'], {}), "(ax, (7, 5.5), 1, depth, [2, 3, 7, 10, 11], '1', **dotted)\n", (4039, 4097), True, 'import misc_fig as mf\n'), ((4102, 4172), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(8, 5.5)', '(1)', 'depth', '[2, 3, 7, 10, 11]', '"""2"""'], {}), "(ax, (8, 5.5), 1, depth, [2, 3, 7, 10, 11], '2', **dotted)\n", (4114, 4172), True, 'import misc_fig as mf\n'), ((4196, 4270), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(12, 7.5)', '(1)', 'depth', '[1, 2, 3, 4, 5, 6, 9]', '"""1"""'], {}), "(ax, (12, 7.5), 1, depth, [1, 2, 3, 4, 5, 6, 9], '1', **solid)\n", (4208, 4270), True, 'import misc_fig as mf\n'), ((4275, 4343), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(13, 7.5)', '(1)', 'depth', '[1, 2, 3, 6, 9]', '"""2"""'], {}), "(ax, (13, 7.5), 1, depth, [1, 2, 3, 6, 9], '2', **solid)\n", (4287, 4343), True, 'import misc_fig as mf\n'), ((4348, 4423), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(14, 7.5)', '(1)', 'depth', '[1, 2, 3, 6, 7, 9, 10]', '"""3"""'], {}), "(ax, (14, 7.5), 1, depth, [1, 2, 3, 6, 7, 9, 10], '3', **solid)\n", (4360, 4423), True, 'import misc_fig as mf\n'), ((4429, 4491), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(12, 6.5)', '(1)', 'depth', '[2, 3, 4]', '"""1"""'], {}), "(ax, (12, 6.5), 1, depth, [2, 3, 4], '1', **solid)\n", (4441, 4491), True, 'import misc_fig as mf\n'), ((4496, 4555), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(13, 6.5)', '(1)', 'depth', '[2, 3]', '"""2"""'], {}), "(ax, (13, 6.5), 1, depth, [2, 3], '2', **solid)\n", (4508, 4555), True, 'import misc_fig as mf\n'), ((4560, 4626), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(14, 6.5)', '(1)', 'depth', '[2, 3, 7, 10]', '"""3"""'], {}), "(ax, (14, 6.5), 1, depth, [2, 3, 7, 10], '3', **solid)\n", (4572, 4626), True, 'import misc_fig as mf\n'), ((4632, 4694), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(12, 5.5)', '(1)', 'depth', '[2, 3, 4]', '"""1"""'], {}), "(ax, (12, 5.5), 1, depth, [2, 3, 4], '1', **solid)\n", (4644, 4694), True, 'import misc_fig as mf\n'), ((4699, 4758), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(13, 5.5)', '(1)', 'depth', '[2, 3]', '"""2"""'], {}), "(ax, (13, 5.5), 1, depth, [2, 3], '2', **solid)\n", (4711, 4758), True, 'import misc_fig as mf\n'), ((4763, 4829), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(14, 5.5)', '(1)', 'depth', '[2, 3, 7, 10]', '"""3"""'], {}), "(ax, (14, 5.5), 1, depth, [2, 3, 7, 10], '3', **solid)\n", (4775, 4829), True, 'import misc_fig as mf\n'), ((5220, 5298), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(1, 3)', '(1)', 'depth', '[1, 2, 3, 4, 5, 6, 7, 9, 10]', '"""0"""'], {}), "(ax, (1, 3), 1, depth, [1, 2, 3, 4, 5, 6, 7, 9, 10], '0', **solid)\n", (5232, 5298), True, 'import misc_fig as mf\n'), ((5303, 5369), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(1, 2)', '(1)', 'depth', '[2, 3, 4, 7, 10]', '"""1"""'], {}), "(ax, (1, 2), 1, depth, [2, 3, 4, 7, 10], '1', **solid)\n", (5315, 5369), True, 'import misc_fig as mf\n'), ((5374, 5440), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(1, 1)', '(1)', 'depth', '[2, 3, 4, 7, 10]', '"""2"""'], {}), "(ax, (1, 1), 1, depth, [2, 3, 4, 7, 10], '2', **solid)\n", (5386, 5440), True, 'import misc_fig as mf\n'), ((5446, 5523), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(2, 3)', '(1)', 'depth', '[1, 2, 3, 6, 7, 9, 10, 11]', '"""0"""'], {}), "(ax, (2, 3), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '0', **dotted)\n", (5458, 5523), True, 'import misc_fig as mf\n'), ((5528, 5596), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(2, 2)', '(1)', 'depth', '[2, 3, 7, 10, 11]', '"""1"""'], {}), "(ax, (2, 2), 1, depth, [2, 3, 7, 10, 11], '1', **dotted)\n", (5540, 5596), True, 'import misc_fig as mf\n'), ((5601, 5669), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(2, 1)', '(1)', 'depth', '[2, 3, 7, 10, 11]', '"""2"""'], {}), "(ax, (2, 1), 1, depth, [2, 3, 7, 10, 11], '2', **dotted)\n", (5613, 5669), True, 'import misc_fig as mf\n'), ((5675, 5752), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(3, 3)', '(1)', 'depth', '[1, 2, 3, 6, 7, 9, 10, 11]', '"""0"""'], {}), "(ax, (3, 3), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '0', **dotted)\n", (5687, 5752), True, 'import misc_fig as mf\n'), ((5757, 5825), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(3, 2)', '(1)', 'depth', '[2, 3, 7, 10, 11]', '"""1"""'], {}), "(ax, (3, 2), 1, depth, [2, 3, 7, 10, 11], '1', **dotted)\n", (5769, 5825), True, 'import misc_fig as mf\n'), ((5830, 5898), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(3, 1)', '(1)', 'depth', '[2, 3, 7, 10, 11]', '"""2"""'], {}), "(ax, (3, 1), 1, depth, [2, 3, 7, 10, 11], '2', **dotted)\n", (5842, 5898), True, 'import misc_fig as mf\n'), ((5923, 5994), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(6, 3)', '(1)', 'depth', '[1, 2, 3, 4, 5, 6, 9]', '"""0"""'], {}), "(ax, (6, 3), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)\n", (5935, 5994), True, 'import misc_fig as mf\n'), ((5999, 6064), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(7, 3)', '(1)', 'depth', '[1, 2, 3, 6, 9]', '"""1"""'], {}), "(ax, (7, 3), 1, depth, [1, 2, 3, 6, 9], '1', **solid)\n", (6011, 6064), True, 'import misc_fig as mf\n'), ((6069, 6141), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(8, 3)', '(1)', 'depth', '[1, 2, 3, 6, 7, 9, 10]', '"""2"""'], {}), "(ax, (8, 3), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)\n", (6081, 6141), True, 'import misc_fig as mf\n'), ((6215, 6289), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(7, 2)', '(1)', 'depth', '[2, 3, 6, 7, 9, 10, 11]', '"""1"""'], {}), "(ax, (7, 2), 1, depth, [2, 3, 6, 7, 9, 10, 11], '1', **dotted)\n", (6227, 6289), True, 'import misc_fig as mf\n'), ((6294, 6368), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(8, 2)', '(1)', 'depth', '[2, 3, 6, 7, 9, 10, 11]', '"""2"""'], {}), "(ax, (8, 2), 1, depth, [2, 3, 6, 7, 9, 10, 11], '2', **dotted)\n", (6306, 6368), True, 'import misc_fig as mf\n'), ((6374, 6452), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(6, 1)', '(1)', 'depth', '[2, 3, 4, 7, 8, 10, 11, 12]', '"""0"""'], {}), "(ax, (6, 1), 1, depth, [2, 3, 4, 7, 8, 10, 11, 12], '0', **dotted)\n", (6386, 6452), True, 'import misc_fig as mf\n'), ((6457, 6525), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(7, 1)', '(1)', 'depth', '[2, 3, 7, 10, 11]', '"""1"""'], {}), "(ax, (7, 1), 1, depth, [2, 3, 7, 10, 11], '1', **dotted)\n", (6469, 6525), True, 'import misc_fig as mf\n'), ((6530, 6598), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(8, 1)', '(1)', 'depth', '[2, 3, 7, 10, 11]', '"""2"""'], {}), "(ax, (8, 1), 1, depth, [2, 3, 7, 10, 11], '2', **dotted)\n", (6542, 6598), True, 'import misc_fig as mf\n'), ((6622, 6694), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(12, 3)', '(1)', 'depth', '[1, 2, 3, 4, 5, 6, 9]', '"""0"""'], {}), "(ax, (12, 3), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)\n", (6634, 6694), True, 'import misc_fig as mf\n'), ((6699, 6765), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(13, 3)', '(1)', 'depth', '[1, 2, 3, 6, 9]', '"""1"""'], {}), "(ax, (13, 3), 1, depth, [1, 2, 3, 6, 9], '1', **solid)\n", (6711, 6765), True, 'import misc_fig as mf\n'), ((6770, 6843), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(14, 3)', '(1)', 'depth', '[1, 2, 3, 6, 7, 9, 10]', '"""2"""'], {}), "(ax, (14, 3), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)\n", (6782, 6843), True, 'import misc_fig as mf\n'), ((6849, 6909), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(12, 2)', '(1)', 'depth', '[2, 3, 4]', '"""1"""'], {}), "(ax, (12, 2), 1, depth, [2, 3, 4], '1', **solid)\n", (6861, 6909), True, 'import misc_fig as mf\n'), ((6914, 6971), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(13, 2)', '(1)', 'depth', '[2, 3]', '"""2"""'], {}), "(ax, (13, 2), 1, depth, [2, 3], '2', **solid)\n", (6926, 6971), True, 'import misc_fig as mf\n'), ((6976, 7040), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(14, 2)', '(1)', 'depth', '[2, 3, 7, 10]', '"""3"""'], {}), "(ax, (14, 2), 1, depth, [2, 3, 7, 10], '3', **solid)\n", (6988, 7040), True, 'import misc_fig as mf\n'), ((7046, 7106), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(12, 1)', '(1)', 'depth', '[2, 3, 4]', '"""2"""'], {}), "(ax, (12, 1), 1, depth, [2, 3, 4], '2', **solid)\n", (7058, 7106), True, 'import misc_fig as mf\n'), ((7111, 7168), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(13, 1)', '(1)', 'depth', '[2, 3]', '"""3"""'], {}), "(ax, (13, 1), 1, depth, [2, 3], '3', **solid)\n", (7123, 7168), True, 'import misc_fig as mf\n'), ((7173, 7237), 'misc_fig.draw_cube', 'mf.draw_cube', (['ax', '(14, 1)', '(1)', 'depth', '[2, 3, 7, 10]', '"""4"""'], {}), "(ax, (14, 1), 1, depth, [2, 3, 7, 10], '4', **solid)\n", (7185, 7237), True, 'import misc_fig as mf\n'), ((7636, 7721), 'pandas.DataFrame', 'pd.DataFrame', (["{'data': [1, 2, 3, 4, 5, 6]}"], {'index': "['A', 'B', 'C', 'A', 'B', 'C']"}), "({'data': [1, 2, 3, 4, 5, 6]}, index=['A', 'B', 'C', 'A', 'B', 'C']\n )\n", (7648, 7721), True, 'import pandas as pd\n'), ((7778, 7823), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)', 'facecolor': '"""white"""'}), "(figsize=(8, 6), facecolor='white')\n", (7788, 7823), True, 'import matplotlib.pyplot as plt\n'), ((7833, 7855), 'matplotlib.pyplot.axes', 'plt.axes', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (7841, 7855), True, 'import matplotlib.pyplot as plt\n'), ((7881, 7910), 'misc_fig.draw_dataframe', 'mf.draw_dataframe', (['df', '[0, 0]'], {}), '(df, [0, 0])\n', (7898, 7910), True, 'import misc_fig as mf\n'), ((8253, 8289), 'misc_fig.draw_dataframe', 'mf.draw_dataframe', (['result', '[6, 0.75]'], {}), '(result, [6, 0.75])\n', (8270, 8289), True, 'import misc_fig as mf\n'), ((8353, 8389), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(3.6)', '"""Input"""'], {}), "(0.5, 3.6, 'Input', **style)\n", (8361, 8389), True, 'import matplotlib.pyplot as plt\n'), ((8394, 8430), 'matplotlib.pyplot.text', 'plt.text', (['(2.5)', '(4.6)', '"""Split"""'], {}), "(2.5, 4.6, 'Split', **style)\n", (8402, 8430), True, 'import matplotlib.pyplot as plt\n'), ((8435, 8478), 'matplotlib.pyplot.text', 'plt.text', (['(4.5)', '(4.35)', '"""Apply (sum)"""'], {}), "(4.5, 4.35, 'Apply (sum)', **style)\n", (8443, 8478), True, 'import matplotlib.pyplot as plt\n'), ((8483, 8522), 'matplotlib.pyplot.text', 'plt.text', (['(6.5)', '(2.85)', '"""Combine"""'], {}), "(6.5, 2.85, 'Combine', **style)\n", (8491, 8522), True, 'import matplotlib.pyplot as plt\n'), ((8591, 8654), 'matplotlib.pyplot.annotate', 'plt.annotate', (['""""""', '(1.8, 3.6)', '(1.2, 2.8)'], {'arrowprops': 'arrowprops'}), "('', (1.8, 3.6), (1.2, 2.8), arrowprops=arrowprops)\n", (8603, 8654), True, 'import matplotlib.pyplot as plt\n'), ((8659, 8724), 'matplotlib.pyplot.annotate', 'plt.annotate', (['""""""', '(1.8, 1.75)', '(1.2, 1.75)'], {'arrowprops': 'arrowprops'}), "('', (1.8, 1.75), (1.2, 1.75), arrowprops=arrowprops)\n", (8671, 8724), True, 'import matplotlib.pyplot as plt\n'), ((8729, 8793), 'matplotlib.pyplot.annotate', 'plt.annotate', (['""""""', '(1.8, -0.1)', '(1.2, 0.7)'], {'arrowprops': 'arrowprops'}), "('', (1.8, -0.1), (1.2, 0.7), arrowprops=arrowprops)\n", (8741, 8793), True, 'import matplotlib.pyplot as plt\n'), ((8799, 8862), 'matplotlib.pyplot.annotate', 'plt.annotate', (['""""""', '(3.8, 3.8)', '(3.2, 3.8)'], {'arrowprops': 'arrowprops'}), "('', (3.8, 3.8), (3.2, 3.8), arrowprops=arrowprops)\n", (8811, 8862), True, 'import matplotlib.pyplot as plt\n'), ((8867, 8932), 'matplotlib.pyplot.annotate', 'plt.annotate', (['""""""', '(3.8, 1.75)', '(3.2, 1.75)'], {'arrowprops': 'arrowprops'}), "('', (3.8, 1.75), (3.2, 1.75), arrowprops=arrowprops)\n", (8879, 8932), True, 'import matplotlib.pyplot as plt\n'), ((8937, 9002), 'matplotlib.pyplot.annotate', 'plt.annotate', (['""""""', '(3.8, -0.3)', '(3.2, -0.3)'], {'arrowprops': 'arrowprops'}), "('', (3.8, -0.3), (3.2, -0.3), arrowprops=arrowprops)\n", (8949, 9002), True, 'import matplotlib.pyplot as plt\n'), ((9008, 9071), 'matplotlib.pyplot.annotate', 'plt.annotate', (['""""""', '(5.8, 2.8)', '(5.2, 3.6)'], {'arrowprops': 'arrowprops'}), "('', (5.8, 2.8), (5.2, 3.6), arrowprops=arrowprops)\n", (9020, 9071), True, 'import matplotlib.pyplot as plt\n'), ((9076, 9141), 'matplotlib.pyplot.annotate', 'plt.annotate', (['""""""', '(5.8, 1.75)', '(5.2, 1.75)'], {'arrowprops': 'arrowprops'}), "('', (5.8, 1.75), (5.2, 1.75), arrowprops=arrowprops)\n", (9088, 9141), True, 'import matplotlib.pyplot as plt\n'), ((9146, 9210), 'matplotlib.pyplot.annotate', 'plt.annotate', (['""""""', '(5.8, 0.7)', '(5.2, -0.1)'], {'arrowprops': 'arrowprops'}), "('', (5.8, 0.7), (5.2, -0.1), arrowprops=arrowprops)\n", (9158, 9210), True, 'import matplotlib.pyplot as plt\n'), ((9216, 9233), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (9224, 9233), True, 'import matplotlib.pyplot as plt\n'), ((9238, 9255), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5)', '(5)'], {}), '(-1.5, 5)\n', (9246, 9255), True, 'import matplotlib.pyplot as plt\n'), ((9366, 9392), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (9376, 9392), True, 'import matplotlib.pyplot as plt\n'), ((10293, 10305), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10303, 10305), True, 'import matplotlib.pyplot as plt\n'), ((10530, 10605), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': 'n', 'centers': 'n_centers', 'random_state': '(0)', 'cluster_std': 'std'}), '(n_samples=n, centers=n_centers, random_state=0, cluster_std=std)\n', (10540, 10605), False, 'from sklearn.datasets import make_blobs, make_swiss_roll\n'), ((10696, 10764), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(80)', 'centers': '(2)', 'random_state': '(0)', 'cluster_std': '(0.8)'}), '(n_samples=80, centers=2, random_state=0, cluster_std=0.8)\n', (10706, 10764), False, 'from sklearn.datasets import make_blobs, make_swiss_roll\n'), ((10889, 10913), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (10910, 10913), True, 'import numpy as np\n'), ((11230, 11304), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': 'n', 'centers': 'centers', 'cluster_std': 'std', 'random_state': '(42)'}), '(n_samples=n, centers=centers, cluster_std=std, random_state=42)\n', (11240, 11304), False, 'from sklearn.datasets import make_blobs, make_swiss_roll\n'), ((11482, 11540), 'sklearn.datasets.make_swiss_roll', 'make_swiss_roll', ([], {'n_samples': 'n', 'noise': 'noise', 'random_state': '(42)'}), '(n_samples=n, noise=noise, random_state=42)\n', (11497, 11540), False, 'from sklearn.datasets import make_blobs, make_swiss_roll\n'), ((11638, 11666), 'numpy.random.RandomState', 'np.random.RandomState', (['rseed'], {}), '(rseed)\n', (11659, 11666), True, 'import numpy as np\n'), ((11916, 11944), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (11928, 11944), True, 'import matplotlib.pyplot as plt\n'), ((12065, 12097), 'misc_fig.format_plot', 'mf.format_plot', (['ax', '"""Input Data"""'], {}), "(ax, 'Input Data')\n", (12079, 12097), True, 'import misc_fig as mf\n'), ((12323, 12345), 'numpy.linspace', 'np.linspace', (['(-1)', '(4)', '(10)'], {}), '(-1, 4, 10)\n', (12334, 12345), True, 'import numpy as np\n'), ((12355, 12377), 'numpy.linspace', 'np.linspace', (['(-2)', '(7)', '(10)'], {}), '(-2, 7, 10)\n', (12366, 12377), True, 'import numpy as np\n'), ((12393, 12412), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'yy'], {}), '(xx, yy)\n', (12404, 12412), True, 'import numpy as np\n'), ((12617, 12645), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (12629, 12645), True, 'import matplotlib.pyplot as plt\n'), ((12932, 12983), 'misc_fig.format_plot', 'mf.format_plot', (['ax', '"""Model Learned from Input Data"""'], {}), "(ax, 'Model Learned from Input Data')\n", (12946, 12983), True, 'import misc_fig as mf\n'), ((13184, 13198), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (13196, 13198), True, 'import matplotlib.pyplot as plt\n'), ((13315, 13347), 'misc_fig.format_plot', 'mf.format_plot', (['ax', '"""Input Data"""'], {}), "(ax, 'Input Data')\n", (13329, 13347), True, 'import misc_fig as mf\n'), ((13582, 13609), 'numpy.hstack', 'np.hstack', (['[points, points]'], {}), '([points, points])\n', (13591, 13609), True, 'import numpy as np\n'), ((13672, 13684), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13682, 13684), True, 'import matplotlib.pyplot as plt\n'), ((14894, 14908), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (14906, 14908), True, 'import matplotlib.pyplot as plt\n'), ((15382, 15430), 'misc_fig.format_plot', 'mf.format_plot', (['ax', '"""Input Data with Linear Fit"""'], {}), "(ax, 'Input Data with Linear Fit')\n", (15396, 15430), True, 'import misc_fig as mf\n'), ((15618, 15646), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (15630, 15646), True, 'import matplotlib.pyplot as plt\n'), ((15727, 15759), 'misc_fig.format_plot', 'mf.format_plot', (['ax', '"""Input Data"""'], {}), "(ax, 'Input Data')\n", (15741, 15759), True, 'import misc_fig as mf\n'), ((15934, 15962), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (15946, 15962), True, 'import matplotlib.pyplot as plt\n'), ((16050, 16094), 'misc_fig.format_plot', 'mf.format_plot', (['ax', '"""Learned Cluster Labels"""'], {}), "(ax, 'Learned Cluster Labels')\n", (16064, 16094), True, 'import misc_fig as mf\n'), ((16266, 16280), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (16278, 16280), True, 'import matplotlib.pyplot as plt\n'), ((16361, 16393), 'misc_fig.format_plot', 'mf.format_plot', (['ax', '"""Input Data"""'], {}), "(ax, 'Input Data')\n", (16375, 16393), True, 'import misc_fig as mf\n'), ((16564, 16601), 'sklearn.manifold.Isomap', 'Isomap', ([], {'n_neighbors': '(8)', 'n_components': '(1)'}), '(n_neighbors=8, n_components=1)\n', (16570, 16601), False, 'from sklearn.manifold import Isomap\n'), ((16681, 16695), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (16693, 16695), True, 'import matplotlib.pyplot as plt\n'), ((16827, 16873), 'misc_fig.format_plot', 'mf.format_plot', (['ax', '"""Learned Latent Parameter"""'], {}), "(ax, 'Learned Latent Parameter')\n", (16841, 16873), True, 'import misc_fig as mf\n'), ((17368, 17403), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(16, 6)'}), '(1, 2, figsize=(16, 6))\n', (17380, 17403), True, 'import matplotlib.pyplot as plt\n'), ((17998, 18033), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(16, 6)'}), '(1, 2, figsize=(16, 6))\n', (18010, 18033), True, 'import matplotlib.pyplot as plt\n'), ((19824, 19847), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (19835, 19847), True, 'import numpy as np\n'), ((19923, 19937), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (19935, 19937), True, 'import matplotlib.pyplot as plt\n'), ((20940, 20963), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (20951, 20963), True, 'import numpy as np\n'), ((20972, 20995), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (20983, 20995), True, 'import numpy as np\n'), ((21084, 21098), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (21096, 21098), True, 'import matplotlib.pyplot as plt\n'), ((22001, 22063), 'sklearn.datasets.make_blobs', 'make_blobs', (['(100)', '(2)'], {'centers': '(2)', 'random_state': '(2)', 'cluster_std': '(1.5)'}), '(100, 2, centers=2, random_state=2, cluster_std=1.5)\n', (22011, 22063), False, 'from sklearn.datasets import make_blobs, make_swiss_roll\n'), ((22079, 22093), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (22091, 22093), True, 'import matplotlib.pyplot as plt\n'), ((22249, 22282), 'numpy.linspace', 'np.linspace', (['xlim[0]', 'xlim[1]', '(60)'], {}), '(xlim[0], xlim[1], 60)\n', (22260, 22282), True, 'import numpy as np\n'), ((22292, 22325), 'numpy.linspace', 'np.linspace', (['ylim[0]', 'ylim[1]', '(40)'], {}), '(ylim[0], ylim[1], 40)\n', (22303, 22325), True, 'import numpy as np\n'), ((22339, 22358), 'numpy.meshgrid', 'np.meshgrid', (['xg', 'yg'], {}), '(xg, yg)\n', (22350, 22358), True, 'import numpy as np\n'), ((23875, 23899), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (23896, 23899), True, 'import numpy as np\n'), ((23977, 24001), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(1000)'], {}), '(0, 10, 1000)\n', (23988, 24001), True, 'import numpy as np\n'), ((24329, 24343), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (24341, 24343), True, 'import matplotlib.pyplot as plt\n'), ((24823, 24850), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 4)'}), '(figsize=(10, 4))\n', (24833, 24850), True, 'import matplotlib.pyplot as plt\n'), ((26414, 26449), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(4)'], {'figsize': '(16, 3)'}), '(1, 4, figsize=(16, 3))\n', (26426, 26449), True, 'import matplotlib.pyplot as plt\n'), ((26521, 26590), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(300)', 'centers': '(4)', 'random_state': '(0)', 'cluster_std': '(1.0)'}), '(n_samples=300, centers=4, random_state=0, cluster_std=1.0)\n', (26531, 26590), False, 'from sklearn.datasets import make_blobs, make_swiss_roll\n'), ((26924, 26948), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (26946, 26948), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((26961, 27030), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(300)', 'centers': '(4)', 'random_state': '(0)', 'cluster_std': '(1.0)'}), '(n_samples=300, centers=4, random_state=0, cluster_std=1.0)\n', (26971, 27030), False, 'from sklearn.datasets import make_blobs, make_swiss_roll\n'), ((27067, 27102), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(16, 6)'}), '(1, 2, figsize=(16, 6))\n', (27079, 27102), True, 'import matplotlib.pyplot as plt\n'), ((27168, 27236), 'misc_fig.visualize_tree', 'mf.visualize_tree', (['model', 'X[::2]', 'y[::2]'], {'boundaries': '(False)', 'ax': 'ax[0]'}), '(model, X[::2], y[::2], boundaries=False, ax=ax[0])\n', (27185, 27236), True, 'import misc_fig as mf\n'), ((27241, 27311), 'misc_fig.visualize_tree', 'mf.visualize_tree', (['model', 'X[1::2]', 'y[1::2]'], {'boundaries': '(False)', 'ax': 'ax[1]'}), '(model, X[1::2], y[1::2], boundaries=False, ax=ax[1])\n', (27258, 27311), True, 'import misc_fig as mf\n'), ((27424, 27448), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (27445, 27448), True, 'import numpy as np\n'), ((27511, 27543), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)', 'whiten': '(True)'}), '(n_components=2, whiten=True)\n', (27514, 27543), False, 'from sklearn.decomposition import PCA\n'), ((27574, 27609), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(16, 6)'}), '(1, 2, figsize=(16, 6))\n', (27586, 27609), True, 'import matplotlib.pyplot as plt\n'), ((28107, 28147), 'misc_fig.draw_vector', 'mf.draw_vector', (['[0, 0]', '[0, 3]'], {'ax': 'ax[1]'}), '([0, 0], [0, 3], ax=ax[1])\n', (28121, 28147), True, 'import misc_fig as mf\n'), ((28152, 28192), 'misc_fig.draw_vector', 'mf.draw_vector', (['[0, 0]', '[3, 0]'], {'ax': 'ax[1]'}), '([0, 0], [3, 0], ax=ax[1])\n', (28166, 28192), True, 'import misc_fig as mf\n'), ((28479, 28548), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(300)', 'centers': '(4)', 'cluster_std': '(0.6)', 'random_state': '(0)'}), '(n_samples=300, centers=4, cluster_std=0.6, random_state=0)\n', (28489, 28548), False, 'from sklearn.datasets import make_blobs, make_swiss_roll\n'), ((28588, 28613), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (28609, 28613), True, 'import numpy as np\n'), ((29312, 29339), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 4)'}), '(figsize=(15, 4))\n', (29322, 29339), True, 'import matplotlib.pyplot as plt\n'), ((29349, 29443), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(4)', '(15)'], {'left': '(0.02)', 'right': '(0.98)', 'bottom': '(0.05)', 'top': '(0.95)', 'wspace': '(0.2)', 'hspace': '(0.2)'}), '(4, 15, left=0.02, right=0.98, bottom=0.05, top=0.95, wspace=\n 0.2, hspace=0.2)\n', (29361, 29443), True, 'import matplotlib.pyplot as plt\n'), ((30600, 30637), 'sklearn.metrics.pairwise_distances_argmin', 'pairwise_distances_argmin', (['X', 'centers'], {}), '(X, centers)\n', (30625, 30637), False, 'from sklearn.metrics import pairwise_distances_argmin\n'), ((30981, 31042), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(14, 4)', 'sharex': '(True)', 'sharey': '(True)'}), '(1, 3, figsize=(14, 4), sharex=True, sharey=True)\n', (30993, 31042), True, 'import matplotlib.pyplot as plt\n'), ((31091, 31115), 'numpy.random.RandomState', 'np.random.RandomState', (['(5)'], {}), '(5)\n', (31112, 31115), True, 'import numpy as np\n'), ((7998, 8030), 'misc_fig.draw_dataframe', 'mf.draw_dataframe', (['split', '[2, y]'], {}), '(split, [2, y])\n', (8015, 8030), True, 'import misc_fig as mf\n'), ((8170, 8207), 'misc_fig.draw_dataframe', 'mf.draw_dataframe', (['sum', '[4, y + 0.25]'], {}), '(sum, [4, y + 0.25])\n', (8187, 8207), True, 'import misc_fig as mf\n'), ((10946, 10964), 'numpy.dot', 'np.dot', (['X', '[-2, 1]'], {}), '(X, [-2, 1])\n', (10952, 10964), True, 'import numpy as np\n'), ((13834, 13886), 'mpl_toolkits.mplot3d.art3d.Line3DCollection', 'Line3DCollection', (['segments'], {'colors': '"""gray"""', 'alpha': '(0.2)'}), "(segments, colors='gray', alpha=0.2)\n", (13850, 13886), False, 'from mpl_toolkits.mplot3d.art3d import Line3DCollection\n'), ((14139, 14158), 'matplotlib.pyplot.NullFormatter', 'plt.NullFormatter', ([], {}), '()\n', (14156, 14158), True, 'import matplotlib.pyplot as plt\n'), ((14193, 14212), 'matplotlib.pyplot.NullFormatter', 'plt.NullFormatter', ([], {}), '()\n', (14210, 14212), True, 'import matplotlib.pyplot as plt\n'), ((14247, 14266), 'matplotlib.pyplot.NullFormatter', 'plt.NullFormatter', ([], {}), '()\n', (14264, 14266), True, 'import matplotlib.pyplot as plt\n'), ((15072, 15090), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)'], {}), '(-4, 4)\n', (15083, 15090), True, 'import numpy as np\n'), ((15117, 15135), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)'], {}), '(-3, 3)\n', (15128, 15135), True, 'import numpy as np\n'), ((17082, 17110), 'numpy.linspace', 'np.linspace', (['(-0.1)', '(1.0)', '(1000)'], {}), '(-0.1, 1.0, 1000)\n', (17093, 17110), True, 'import numpy as np\n'), ((18187, 18215), 'numpy.linspace', 'np.linspace', (['(-0.1)', '(1.0)', '(1000)'], {}), '(-0.1, 1.0, 1000)\n', (18198, 18215), True, 'import numpy as np\n'), ((19894, 19907), 'numpy.exp', 'np.exp', (['(x - 1)'], {}), '(x - 1)\n', (19900, 19907), True, 'import numpy as np\n'), ((20714, 20733), 'matplotlib.pyplot.NullFormatter', 'plt.NullFormatter', ([], {}), '()\n', (20731, 20733), True, 'import matplotlib.pyplot as plt\n'), ((20768, 20787), 'matplotlib.pyplot.NullFormatter', 'plt.NullFormatter', ([], {}), '()\n', (20785, 20787), True, 'import matplotlib.pyplot as plt\n'), ((21779, 21798), 'matplotlib.pyplot.NullFormatter', 'plt.NullFormatter', ([], {}), '()\n', (21796, 21798), True, 'import matplotlib.pyplot as plt\n'), ((21833, 21852), 'matplotlib.pyplot.NullFormatter', 'plt.NullFormatter', ([], {}), '()\n', (21850, 21852), True, 'import matplotlib.pyplot as plt\n'), ((22617, 22648), 'numpy.ma.masked_array', 'np.ma.masked_array', (['P', '(P < 0.03)'], {}), '(P, P < 0.03)\n', (22635, 22648), True, 'import numpy as np\n'), ((23934, 23943), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (23940, 23943), True, 'import numpy as np\n'), ((24094, 24112), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (24110, 24112), False, 'from sklearn.linear_model import LinearRegression\n'), ((24388, 24400), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (24396, 24400), True, 'import numpy as np\n'), ((26674, 26713), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': 'depth'}), '(max_depth=depth)\n', (26696, 26713), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((26722, 26760), 'misc_fig.visualize_tree', 'mf.visualize_tree', (['model', 'X', 'y'], {'ax': 'axi'}), '(model, X, y, ax=axi)\n', (26739, 26760), True, 'import misc_fig as mf\n'), ((27857, 27907), 'misc_fig.draw_vector', 'mf.draw_vector', (['pca.mean_', '(pca.mean_ + v)'], {'ax': 'ax[0]'}), '(pca.mean_, pca.mean_ + v, ax=ax[0])\n', (27871, 27907), True, 'import misc_fig as mf\n'), ((29844, 29881), 'sklearn.metrics.pairwise_distances_argmin', 'pairwise_distances_argmin', (['X', 'centers'], {}), '(X, centers)\n', (29869, 29881), False, 'from sklearn.metrics import pairwise_distances_argmin\n'), ((31537, 31605), 'misc_fig.draw_ellipse', 'mf.draw_ellipse', (['model.means_[0]', 'model.covars_[0]', 'ax[i]'], {'alpha': '(0.2)'}), '(model.means_[0], model.covars_[0], ax[i], alpha=0.2)\n', (31552, 31605), True, 'import misc_fig as mf\n'), ((10626, 10646), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (10629, 10646), False, 'from sklearn.svm import SVC\n'), ((11023, 11041), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (11039, 11041), False, 'from sklearn.linear_model import LinearRegression\n'), ((11327, 11358), 'sklearn.cluster.KMeans', 'KMeans', (['centers'], {'random_state': '(0)'}), '(centers, random_state=0)\n', (11333, 11358), False, 'from sklearn.cluster import KMeans\n'), ((13522, 13548), 'numpy.hstack', 'np.hstack', (['[X, y[:, None]]'], {}), '([X, y[:, None]])\n', (13531, 13548), True, 'import numpy as np\n'), ((13926, 13946), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (13934, 13946), True, 'import numpy as np\n'), ((17200, 17226), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['degree'], {}), '(degree)\n', (17218, 17226), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((17228, 17254), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '(**kwargs)\n', (17244, 17254), False, 'from sklearn.linear_model import LinearRegression\n'), ((18281, 18307), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['degree'], {}), '(degree)\n', (18299, 18307), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((18366, 18384), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (18382, 18384), False, 'from sklearn.linear_model import LinearRegression\n'), ((21018, 21032), 'numpy.exp', 'np.exp', (['(-4 * N)'], {}), '(-4 * N)\n', (21024, 21032), True, 'import numpy as np\n'), ((21054, 21068), 'numpy.exp', 'np.exp', (['(-4 * N)'], {}), '(-4 * N)\n', (21060, 21068), True, 'import numpy as np\n'), ((27833, 27848), 'numpy.sqrt', 'np.sqrt', (['length'], {}), '(length)\n', (27840, 27848), True, 'import numpy as np\n'), ((29204, 29223), 'matplotlib.pyplot.NullFormatter', 'plt.NullFormatter', ([], {}), '()\n', (29221, 29223), True, 'import matplotlib.pyplot as plt\n'), ((29262, 29281), 'matplotlib.pyplot.NullFormatter', 'plt.NullFormatter', ([], {}), '()\n', (29279, 29281), True, 'import matplotlib.pyplot as plt\n'), ((31646, 31665), 'matplotlib.pyplot.NullFormatter', 'plt.NullFormatter', ([], {}), '()\n', (31663, 31665), True, 'import matplotlib.pyplot as plt\n'), ((31707, 31726), 'matplotlib.pyplot.NullFormatter', 'plt.NullFormatter', ([], {}), '()\n', (31724, 31726), True, 'import matplotlib.pyplot as plt\n'), ((22552, 22595), 'numpy.exp', 'np.exp', (['(-0.5 * (Xgrid - mu) ** 2 / std ** 2)'], {}), '(-0.5 * (Xgrid - mu) ** 2 / std ** 2)\n', (22558, 22595), True, 'import numpy as np\n'), ((28922, 28934), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (28931, 28934), True, 'import numpy as np\n'), ((31249, 31293), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', (['(1)'], {'covariance_type': 'cov_type'}), '(1, covariance_type=cov_type)\n', (31264, 31293), False, 'from sklearn.mixture import GaussianMixture\n'), ((23408, 23430), 'numpy.sum', 'np.sum', (['(arg ** 2)', 'axis'], {}), '(arg ** 2, axis)\n', (23414, 23430), True, 'import numpy as np\n')] |
"""
Construct a dataset with (multiple) source and target domains, from https://github.com/criteo-research/pytorch-ada/blob/master/adalib/ada/datasets/multisource.py
"""
import logging
from enum import Enum
from typing import Dict
import numpy as np
import torch.utils.data
from sklearn.utils import check_random_state
from kale.loaddata.dataset_access import DatasetAccess, get_class_subset
from kale.loaddata.sampler import get_labels, MultiDataLoader, SamplingConfig
class WeightingType(Enum):
NATURAL = "natural"
BALANCED = "balanced"
PRESET0 = "preset0"
class DatasetSizeType(Enum):
Max = "max" # size of the biggest dataset
Source = "source" # size of the source dataset
@staticmethod
def get_size(size_type, source_dataset, *other_datasets):
if size_type is DatasetSizeType.Max:
return max(list(map(len, other_datasets)) + [len(source_dataset)])
elif size_type is DatasetSizeType.Source:
return len(source_dataset)
else:
raise ValueError(f"Size type size must be 'max' or 'source', had '{size_type}'")
class DomainsDatasetBase:
def prepare_data_loaders(self):
"""
handles train/validation/test split to have 3 datasets each with data from all domains
"""
raise NotImplementedError()
def get_domain_loaders(self, split="train", batch_size=32):
"""
handles the sampling of a dataset containing multiple domains
Args:
split (string, optional): ["train"|"valid"|"test"]. Which dataset to iterate on. Defaults to "train".
batch_size (int, optional): Defaults to 32.
Returns:
MultiDataLoader: A dataloader with API similar to the torch.dataloader, but returning
batches from several domains at each iteration.
"""
raise NotImplementedError()
class MultiDomainDatasets(DomainsDatasetBase):
def __init__(
self,
source_access: DatasetAccess,
target_access: DatasetAccess,
config_weight_type="natural",
config_size_type=DatasetSizeType.Max,
val_split_ratio=0.1,
source_sampling_config=None,
target_sampling_config=None,
n_fewshot=None,
random_state=None,
class_ids=None,
):
"""The class controlling how the source and target domains are
iterated over.
Args:
source_access (DatasetAccess): accessor for the source dataset
target_access (DatasetAccess): accessor for the target dataset
config_weight_type (WeightingType, optional): The weight type for sampling. Defaults to 'natural'.
config_size_type (DatasetSizeType, optional): Which dataset size to use to define the number of epochs vs batch_size. Defaults to DatasetSizeType.Max.
val_split_ratio (float, optional): ratio for the validation part of the train dataset. Defaults to 0.1.
source_sampling_config (SamplingConfig, optional): How to sample from the source. Defaults to None (=> RandomSampler).
target_sampling_config (SamplingConfig, optional): How to sample from the target. Defaults to None (=> RandomSampler).
n_fewshot (int, optional): Number of target samples for which the label may be used,
to define the few-shot, semi-supervised setting. Defaults to None.
random_state ([int|np.random.RandomState], optional): Used for deterministic sampling/few-shot label selection. Defaults to None.
class_ids (list, optional): List of chosen subset of class ids. Defaults to None (=> All Classes).
Examples::
>>> dataset = MultiDomainDatasets(source_access, target_access)
"""
weight_type = WeightingType(config_weight_type)
size_type = DatasetSizeType(config_size_type)
if weight_type is WeightingType.PRESET0:
self._source_sampling_config = SamplingConfig(class_weights=np.arange(source_access.n_classes(), 0, -1))
self._target_sampling_config = SamplingConfig(
# class_weights=random_state.randint(1, 4, size=target_access.n_classes())
class_weights=np.random.randint(1, 4, size=target_access.n_classes())
)
elif weight_type is WeightingType.BALANCED:
self._source_sampling_config = SamplingConfig(balance=True)
self._target_sampling_config = SamplingConfig(balance=True)
elif weight_type not in WeightingType:
raise ValueError(f"Unknown weighting method {weight_type}.")
else:
self._source_sampling_config = SamplingConfig()
self._target_sampling_config = SamplingConfig()
self._source_access = source_access
self._target_access = target_access
self._val_split_ratio = val_split_ratio
# self._source_sampling_config = (
# source_sampling_config
# if source_sampling_config is not None
# else SamplingConfig()
# )
# self._target_sampling_config = (
# target_sampling_config
# if target_sampling_config is not None
# else SamplingConfig()
# )
self._size_type = size_type
self._n_fewshot = n_fewshot
self._random_state = check_random_state(random_state)
self._source_by_split: Dict[str, torch.utils.data.Subset] = {}
self._labeled_target_by_split = None
self._target_by_split: Dict[str, torch.utils.data.Subset] = {}
self.class_ids = class_ids
def is_semi_supervised(self):
return self._n_fewshot is not None and self._n_fewshot > 0
def prepare_data_loaders(self):
logging.debug("Load source")
(self._source_by_split["train"], self._source_by_split["valid"],) = self._source_access.get_train_val(
self._val_split_ratio
)
if self.class_ids is not None:
self._source_by_split["train"] = get_class_subset(self._source_by_split["train"], self.class_ids)
self._source_by_split["valid"] = get_class_subset(self._source_by_split["valid"], self.class_ids)
logging.debug("Load target")
(self._target_by_split["train"], self._target_by_split["valid"],) = self._target_access.get_train_val(
self._val_split_ratio
)
if self.class_ids is not None:
self._target_by_split["train"] = get_class_subset(self._target_by_split["train"], self.class_ids)
self._target_by_split["valid"] = get_class_subset(self._target_by_split["valid"], self.class_ids)
logging.debug("Load source Test")
self._source_by_split["test"] = self._source_access.get_test()
if self.class_ids is not None:
self._source_by_split["test"] = get_class_subset(self._source_by_split["test"], self.class_ids)
logging.debug("Load target Test")
self._target_by_split["test"] = self._target_access.get_test()
if self.class_ids is not None:
self._target_by_split["test"] = get_class_subset(self._target_by_split["test"], self.class_ids)
if self._n_fewshot is not None and self._n_fewshot > 0:
# semi-supervised target domain
self._labeled_target_by_split = {}
for part in ["train", "valid", "test"]:
(self._labeled_target_by_split[part], self._target_by_split[part],) = _split_dataset_few_shot(
self._target_by_split[part], self._n_fewshot
)
def get_domain_loaders(self, split="train", batch_size=32):
source_ds = self._source_by_split[split]
source_loader = self._source_sampling_config.create_loader(source_ds, batch_size)
target_ds = self._target_by_split[split]
if self._labeled_target_by_split is None:
# unsupervised target domain
target_loader = self._target_sampling_config.create_loader(target_ds, batch_size)
n_dataset = DatasetSizeType.get_size(self._size_type, source_ds, target_ds)
return MultiDataLoader(
dataloaders=[source_loader, target_loader], n_batches=max(n_dataset // batch_size, 1),
)
else:
# semi-supervised target domain
target_labeled_ds = self._labeled_target_by_split[split]
target_unlabeled_ds = target_ds
# label domain: always balanced
target_labeled_loader = SamplingConfig(balance=True, class_weights=None).create_loader(
target_labeled_ds, batch_size=min(len(target_labeled_ds), batch_size)
)
target_unlabeled_loader = self._target_sampling_config.create_loader(target_unlabeled_ds, batch_size)
n_dataset = DatasetSizeType.get_size(self._size_type, source_ds, target_labeled_ds, target_unlabeled_ds)
return MultiDataLoader(
dataloaders=[source_loader, target_labeled_loader, target_unlabeled_loader],
n_batches=max(n_dataset // batch_size, 1),
)
def __len__(self):
source_ds = self._source_by_split["train"]
target_ds = self._target_by_split["train"]
if self._labeled_target_by_split is None:
return DatasetSizeType.get_size(self._size_type, source_ds, target_ds)
else:
labeled_target_ds = self._labeled_target_by_split["train"]
return DatasetSizeType.get_size(self._size_type, source_ds, labeled_target_ds, target_ds)
def _split_dataset_few_shot(dataset, n_fewshot, random_state=None):
if n_fewshot <= 0:
raise ValueError(f"n_fewshot should be > 0, not '{n_fewshot}'")
assert n_fewshot > 0
labels = get_labels(dataset)
classes = sorted(set(labels))
if n_fewshot < 1:
max_few = len(dataset) // len(classes)
n_fewshot = round(max_few * n_fewshot)
n_fewshot = int(round(n_fewshot))
random_state = check_random_state(random_state)
# sample n_fewshot items per class from last dataset
tindices = []
uindices = []
for class_ in classes:
indices = np.where(labels == class_)[0]
random_state.shuffle(indices)
head, tail = np.split(indices, [n_fewshot])
assert len(head) == n_fewshot
tindices.append(head)
uindices.append(tail)
tindices = np.concatenate(tindices)
uindices = np.concatenate(uindices)
assert len(tindices) == len(classes) * n_fewshot
labeled_dataset = torch.utils.data.Subset(dataset, tindices)
unlabeled_dataset = torch.utils.data.Subset(dataset, uindices)
return labeled_dataset, unlabeled_dataset
| [
"sklearn.utils.check_random_state",
"logging.debug",
"kale.loaddata.sampler.SamplingConfig",
"kale.loaddata.dataset_access.get_class_subset",
"numpy.split",
"numpy.where",
"kale.loaddata.sampler.get_labels",
"numpy.concatenate"
] | [((9735, 9754), 'kale.loaddata.sampler.get_labels', 'get_labels', (['dataset'], {}), '(dataset)\n', (9745, 9754), False, 'from kale.loaddata.sampler import get_labels, MultiDataLoader, SamplingConfig\n'), ((9963, 9995), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (9981, 9995), False, 'from sklearn.utils import check_random_state\n'), ((10367, 10391), 'numpy.concatenate', 'np.concatenate', (['tindices'], {}), '(tindices)\n', (10381, 10391), True, 'import numpy as np\n'), ((10407, 10431), 'numpy.concatenate', 'np.concatenate', (['uindices'], {}), '(uindices)\n', (10421, 10431), True, 'import numpy as np\n'), ((5338, 5370), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (5356, 5370), False, 'from sklearn.utils import check_random_state\n'), ((5740, 5768), 'logging.debug', 'logging.debug', (['"""Load source"""'], {}), "('Load source')\n", (5753, 5768), False, 'import logging\n'), ((6192, 6220), 'logging.debug', 'logging.debug', (['"""Load target"""'], {}), "('Load target')\n", (6205, 6220), False, 'import logging\n'), ((6644, 6677), 'logging.debug', 'logging.debug', (['"""Load source Test"""'], {}), "('Load source Test')\n", (6657, 6677), False, 'import logging\n'), ((6904, 6937), 'logging.debug', 'logging.debug', (['"""Load target Test"""'], {}), "('Load target Test')\n", (6917, 6937), False, 'import logging\n'), ((10223, 10253), 'numpy.split', 'np.split', (['indices', '[n_fewshot]'], {}), '(indices, [n_fewshot])\n', (10231, 10253), True, 'import numpy as np\n'), ((6008, 6072), 'kale.loaddata.dataset_access.get_class_subset', 'get_class_subset', (["self._source_by_split['train']", 'self.class_ids'], {}), "(self._source_by_split['train'], self.class_ids)\n", (6024, 6072), False, 'from kale.loaddata.dataset_access import DatasetAccess, get_class_subset\n'), ((6118, 6182), 'kale.loaddata.dataset_access.get_class_subset', 'get_class_subset', (["self._source_by_split['valid']", 'self.class_ids'], {}), "(self._source_by_split['valid'], self.class_ids)\n", (6134, 6182), False, 'from kale.loaddata.dataset_access import DatasetAccess, get_class_subset\n'), ((6460, 6524), 'kale.loaddata.dataset_access.get_class_subset', 'get_class_subset', (["self._target_by_split['train']", 'self.class_ids'], {}), "(self._target_by_split['train'], self.class_ids)\n", (6476, 6524), False, 'from kale.loaddata.dataset_access import DatasetAccess, get_class_subset\n'), ((6570, 6634), 'kale.loaddata.dataset_access.get_class_subset', 'get_class_subset', (["self._target_by_split['valid']", 'self.class_ids'], {}), "(self._target_by_split['valid'], self.class_ids)\n", (6586, 6634), False, 'from kale.loaddata.dataset_access import DatasetAccess, get_class_subset\n'), ((6832, 6895), 'kale.loaddata.dataset_access.get_class_subset', 'get_class_subset', (["self._source_by_split['test']", 'self.class_ids'], {}), "(self._source_by_split['test'], self.class_ids)\n", (6848, 6895), False, 'from kale.loaddata.dataset_access import DatasetAccess, get_class_subset\n'), ((7092, 7155), 'kale.loaddata.dataset_access.get_class_subset', 'get_class_subset', (["self._target_by_split['test']", 'self.class_ids'], {}), "(self._target_by_split['test'], self.class_ids)\n", (7108, 7155), False, 'from kale.loaddata.dataset_access import DatasetAccess, get_class_subset\n'), ((10134, 10160), 'numpy.where', 'np.where', (['(labels == class_)'], {}), '(labels == class_)\n', (10142, 10160), True, 'import numpy as np\n'), ((4385, 4413), 'kale.loaddata.sampler.SamplingConfig', 'SamplingConfig', ([], {'balance': '(True)'}), '(balance=True)\n', (4399, 4413), False, 'from kale.loaddata.sampler import get_labels, MultiDataLoader, SamplingConfig\n'), ((4457, 4485), 'kale.loaddata.sampler.SamplingConfig', 'SamplingConfig', ([], {'balance': '(True)'}), '(balance=True)\n', (4471, 4485), False, 'from kale.loaddata.sampler import get_labels, MultiDataLoader, SamplingConfig\n'), ((4663, 4679), 'kale.loaddata.sampler.SamplingConfig', 'SamplingConfig', ([], {}), '()\n', (4677, 4679), False, 'from kale.loaddata.sampler import get_labels, MultiDataLoader, SamplingConfig\n'), ((4723, 4739), 'kale.loaddata.sampler.SamplingConfig', 'SamplingConfig', ([], {}), '()\n', (4737, 4739), False, 'from kale.loaddata.sampler import get_labels, MultiDataLoader, SamplingConfig\n'), ((8489, 8537), 'kale.loaddata.sampler.SamplingConfig', 'SamplingConfig', ([], {'balance': '(True)', 'class_weights': 'None'}), '(balance=True, class_weights=None)\n', (8503, 8537), False, 'from kale.loaddata.sampler import get_labels, MultiDataLoader, SamplingConfig\n')] |
import numpy as np
def dummy_median(y_actual):
# dummy median predictor
return np.full(y_actual.shape, np.median(y_actual)) | [
"numpy.median"
] | [((112, 131), 'numpy.median', 'np.median', (['y_actual'], {}), '(y_actual)\n', (121, 131), True, 'import numpy as np\n')] |
"""
Rasterplots
Utilities for raster image manipulation (e.g. OR maps) using
PIL/Pillow and Numpy. Used for visualizing orientation maps (with and
without selectivity), polar FFT spectra and afferent model weight
patterns.
"""
import Image
import ImageOps
import numpy as np
import colorsys
rgb_to_hsv = np.vectorize(colorsys.rgb_to_hsv)
hsv_to_rgb = np.vectorize(colorsys.hsv_to_rgb)
def black_selectivity(image, whitelevel=0.2):
"""
Makes zero selectivity black for publication. Swaps saturation and
value and scales saturation by the whitelevel.
"""
whitefactor = 1.0 / whitelevel # 25% multiplies by 4.0
image_rgba = image.convert('RGBA')
arr = np.asarray(image_rgba).astype('float')
r, g, b, a = np.rollaxis(arr, axis=-1)
h, s, v = rgb_to_hsv(r, g, b) # s is [0,1] all v are 255.0
s *= (255.0 * whitefactor)
r, g, b = hsv_to_rgb(h, (v / 255.0), np.clip(s, 0, 255.0))
arr_stack = np.dstack((r, g, b, a))
return Image.fromarray(arr_stack.astype('uint8'), 'RGBA')
def OR_map(preference, selectivity=None):
"""
Supply the raw preference and (optionally) selectivity. Note that
selectivity multiplier affects the raw selectivity data and is
therefore automatically applied.
"""
shape = preference.shape
if selectivity is None:
selectivity = np.ones(shape, dtype=np.float64)
else:
assert preference.shape == selectivity.shape, \
"Preference and selectivity shapes must match."
value = np.ones(shape, dtype=np.int64) * 255
channels = (preference, selectivity, value)
rgb_channels = hsv_to_rgb(*channels)
arr_stack = np.dstack(rgb_channels)
return Image.fromarray(arr_stack.astype('uint8'), 'RGB')
def greyscale(arr):
"""
Converts a numpy 2D array of floats between 0.0 and 1.0 to a PIL
greyscale image.
"""
return Image.fromarray(np.uint8(arr*255))
def cf_image(cfs, coords, width=None, height=None, pos=(0,0),
size=26, border=5, bg=(0,0,0), colmap=None):
"""
Returns a PIL image showing the selected connection fields (CFS)
as supplied by extract_CFs. Does not support non-square CF
shapes.
'cfs' is an ndarray of N dstacked cfs, each of shape (X,X): (X,X,N)
'coords' is an ndarray of N coordinates: (N,2)
'width' and 'height' are either None (full) of integer grid sizes
'pos' is the starting position of the block, (x,y)
'size' and 'border' are the cf image size and the border size in pixels.
'colmap' is an RGB array shape (N,M,3) with values between 0.0 and 1.0.
"""
normalize = lambda arr: (arr - arr.min()) / (arr.max() - arr.min())
cf_im = lambda cf, size: greyscale(normalize(cf)).resize((size,size),
Image.NEAREST)
(posx, posy) = pos
(d1,d2) = zip(*coords)
density = len(set(d1))
assert density == len(set(d2)), "Not implemented for non-square sets"
height = density if height is None else height
width = density if width is None else width
assert height>0 and width>0, "Height and width must be None or greater than zero"
assert posx+width <= density, "X position and width greater than density"
assert posy+height <= density, "Y position and width greater than density"
# Functions mapping original coordinates onto consecutive grid indices
fst_map = dict(((ind,i) for (i,ind) in enumerate(sorted(set(d1)))))
snd_map = dict(((ind,i) for (i,ind) in enumerate(sorted(set(d2)))))
# Generating a dictionary from the grid coordinates to the CF index
mapped_coords = [(fst_map[fst],snd_map[snd]) for [fst,snd] in coords]
indexed_coords = dict((coord,i) for (i, coord) in enumerate(mapped_coords))
# Initialising the image
imwidth = width*size+(width-1)*border
imheight = height*size+(height-1)*border
cf_block = Image.new('RGB', (imwidth, imheight), bg)
# Building image row by row, top to bottom.
for yind in range(height):
for xind in range(width):
# Swapped coordinate system
cf_ind = indexed_coords[(yind+posy, xind+posx)]
# Get color from the color map if available
if colmap is not None:
crd1, crd2 = coords[cf_ind]
r,g,b = colmap[crd1, crd2, :]
color = (r*255,g*255,b*255)
else:
color = (255, 255, 255)
cf = cfs[:,:,cf_ind]
(cf_dim1, cf_dim2) = cf.shape
assert cf_dim1 == cf_dim2, "Only supports square CFs."
cf_image = ImageOps.colorize(cf_im(cf, size), (0, 0, 0, 0), color)
xoffset = xind*border
yoffset = yind*border
paste_coord = (xoffset+xind*size, yoffset+yind*size)
cf_block.paste(cf_image, paste_coord)
return cf_block
def resize(image, size, filter_type=Image.NEAREST):
"""
Resizes the given image to the given size using the specified
filter. Default is box filter (no interpolation) appropriate for
simulated orientation maps.
"""
return image.resize(size, filter_type)
#########################
# DEPRACATION FUNCTIONS #
#########################
def greyscale_image(array2D, normalize=False, scale_factor=255):
raise Exception("Use greyscale instead")
#########################
#########################
#########################
| [
"numpy.dstack",
"numpy.uint8",
"numpy.vectorize",
"numpy.asarray",
"Image.new",
"numpy.ones",
"numpy.clip",
"numpy.rollaxis"
] | [((307, 340), 'numpy.vectorize', 'np.vectorize', (['colorsys.rgb_to_hsv'], {}), '(colorsys.rgb_to_hsv)\n', (319, 340), True, 'import numpy as np\n'), ((354, 387), 'numpy.vectorize', 'np.vectorize', (['colorsys.hsv_to_rgb'], {}), '(colorsys.hsv_to_rgb)\n', (366, 387), True, 'import numpy as np\n'), ((732, 757), 'numpy.rollaxis', 'np.rollaxis', (['arr'], {'axis': '(-1)'}), '(arr, axis=-1)\n', (743, 757), True, 'import numpy as np\n'), ((929, 952), 'numpy.dstack', 'np.dstack', (['(r, g, b, a)'], {}), '((r, g, b, a))\n', (938, 952), True, 'import numpy as np\n'), ((1640, 1663), 'numpy.dstack', 'np.dstack', (['rgb_channels'], {}), '(rgb_channels)\n', (1649, 1663), True, 'import numpy as np\n'), ((3877, 3918), 'Image.new', 'Image.new', (['"""RGB"""', '(imwidth, imheight)', 'bg'], {}), "('RGB', (imwidth, imheight), bg)\n", (3886, 3918), False, 'import Image\n'), ((892, 912), 'numpy.clip', 'np.clip', (['s', '(0)', '(255.0)'], {}), '(s, 0, 255.0)\n', (899, 912), True, 'import numpy as np\n'), ((1326, 1358), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.float64'}), '(shape, dtype=np.float64)\n', (1333, 1358), True, 'import numpy as np\n'), ((1497, 1527), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.int64'}), '(shape, dtype=np.int64)\n', (1504, 1527), True, 'import numpy as np\n'), ((1879, 1898), 'numpy.uint8', 'np.uint8', (['(arr * 255)'], {}), '(arr * 255)\n', (1887, 1898), True, 'import numpy as np\n'), ((676, 698), 'numpy.asarray', 'np.asarray', (['image_rgba'], {}), '(image_rgba)\n', (686, 698), True, 'import numpy as np\n')] |
import numpy as np
from .base import e, tau
def lu_decomposition(a):
"""Calculate matrices U and L from a given matrix A (:a).
This function will raise a ValueError if matrix A isn't LU decomposable.
:param a the matrix that should be decomposed into L and U.
:return
L: decomposed lower triangular matrix | L.U = A.
U: decomposed upper triangular matrix | L.U = A.
"""
return _lu_decomposition(a, a.shape[0] - 1)
def _lu_decomposition(a0, k):
"""Recursive call of :lu_decomposition function.
:param a0: the matrix A (it's also the parameter :a in the :lu_decomposition).
:param k: which level of reduction is the algorithm dealing. Resolves recursion.
:return: L, U and the Ak matrix.
"""
_l, ak_minus_1 = (np.identity(a0.shape[0]), a0) \
if k == 1 \
else _lu_decomposition(a0, k - 1)
tau_matrix = np.dot(tau(ak_minus_1[:, k - 1], k), e(k - 1, a0.shape[0]).T)
return _l + tau_matrix, np.dot(np.identity(a0.shape[0]) - tau_matrix,
ak_minus_1)
def qr_decomposition(a):
"""Calculate matrices Q and R from a given matrix A (:a).
This function will raise a ValueError if matrix A isn't QR decomposable.
:param a the matrix that should be decomposed into Q and R.
:return
Q: decomposed orthogonal matrix | Q.Q^T = I.
R: decomposed upper triangular matrix | QR = A.
"""
if a.shape[0] < a.shape[1]:
raise ValueError('A e R^[%i, %i] is not QR decomposable.' % a.shape)
return _qr_decomposition(a, a.shape[1])
def _qr_decomposition(a0, k):
"""Recursive call of :qr_decomposition function.
:param a0: the matrix A (it's also the parameter :a in the :qr_decomposition).
:param k: which level of reduction is the algorithm dealing. Resolves recursion.
:return: Q and R matrix.
"""
_q, ak_minus_1 = (
np.identity(a0.shape[0]), a0) if k == 1 else _qr_decomposition(a0,
k - 1)
x = ak_minus_1[:, k - 1].reshape((a0.shape[0], 1))
y = x + np.sign(x[k - 1]) * np.linalg.norm(x) * e(k - 1, a0.shape[0])
h = np.identity(a0.shape[0]) - 2 * np.dot(y, y.T) / np.dot(y.T, y)
del x, y
return np.dot(_q, h.T), np.dot(h, ak_minus_1)
| [
"numpy.dot",
"numpy.linalg.norm",
"numpy.identity",
"numpy.sign"
] | [((2195, 2219), 'numpy.identity', 'np.identity', (['a0.shape[0]'], {}), '(a0.shape[0])\n', (2206, 2219), True, 'import numpy as np\n'), ((2283, 2298), 'numpy.dot', 'np.dot', (['_q', 'h.T'], {}), '(_q, h.T)\n', (2289, 2298), True, 'import numpy as np\n'), ((2300, 2321), 'numpy.dot', 'np.dot', (['h', 'ak_minus_1'], {}), '(h, ak_minus_1)\n', (2306, 2321), True, 'import numpy as np\n'), ((780, 804), 'numpy.identity', 'np.identity', (['a0.shape[0]'], {}), '(a0.shape[0])\n', (791, 804), True, 'import numpy as np\n'), ((1913, 1937), 'numpy.identity', 'np.identity', (['a0.shape[0]'], {}), '(a0.shape[0])\n', (1924, 1937), True, 'import numpy as np\n'), ((2243, 2257), 'numpy.dot', 'np.dot', (['y.T', 'y'], {}), '(y.T, y)\n', (2249, 2257), True, 'import numpy as np\n'), ((989, 1013), 'numpy.identity', 'np.identity', (['a0.shape[0]'], {}), '(a0.shape[0])\n', (1000, 1013), True, 'import numpy as np\n'), ((2125, 2142), 'numpy.sign', 'np.sign', (['x[k - 1]'], {}), '(x[k - 1])\n', (2132, 2142), True, 'import numpy as np\n'), ((2145, 2162), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (2159, 2162), True, 'import numpy as np\n'), ((2226, 2240), 'numpy.dot', 'np.dot', (['y', 'y.T'], {}), '(y, y.T)\n', (2232, 2240), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding:utf-8 -*-
import time
import numpy as np
import tensorflow as tf
import os
class NERTagger(object):
"""The NER Tagger Model."""
def __init__(self, is_training, config):
self.batch_size = batch_size = config.batch_size
self.seq_length = seq_length = config.seq_length
size = config.hidden_size
vocab_size = config.vocab_size
tag_size = config.tag_size
self._input_data = tf.placeholder(tf.int32, [batch_size, seq_length])
self._targets = tf.placeholder(tf.int32, [batch_size, seq_length])
# Check if Model is Training
self.is_training = is_training
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias=0.0, state_is_tuple=True)
if is_training and config.keep_prob < 1:
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(
lstm_cell, output_keep_prob=config.keep_prob)
cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers, state_is_tuple=True)
self._initial_state = cell.zero_state(batch_size, tf.float32)
with tf.device("/cpu:0"):
embedding = tf.get_variable(
"embedding", [vocab_size, size], dtype=tf.float32)
inputs = tf.nn.embedding_lookup(embedding, self._input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
outputs = []
state = self._initial_state
with tf.variable_scope("ner_lstm"):
for time_step in range(seq_length):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.concat(outputs, 1), [-1, size])
softmax_w = tf.get_variable(
"softmax_w", [size, tag_size], dtype=tf.float32)
softmax_b = tf.get_variable("softmax_b", [tag_size], dtype=tf.float32)
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
logits=[logits],
targets=[tf.reshape(self._targets, [-1])],
weights=[tf.ones([batch_size * seq_length], dtype=tf.float32)])
# Fetch Reults in session.run()
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
self._logits = logits
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
self.saver = tf.train.Saver(tf.global_variables())
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def logits(self):
return self._logits
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
def predict_tag(self, sess, tags, text):
x = np.array(text)
feed = {self._input_data: x}
logits = sess.run([self._logits], feed_dict=feed)
results = np.argmax(logits, 1)
id2labels = dict(zip(tags.values(), tags.keys()))
labels = map(id2labels.get, results)
return labels
def run(session, model, dataset, eval_op, ner_train_dir, epoch):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
step = 0
while dataset.has_next():
step = step + 1
(x, y) = dataset.next_batch(model.batch_size)
fetches = [model.cost, eval_op]
feed_dict = {}
feed_dict[model.input_data] = x
feed_dict[model.targets] = y
cost, _ = session.run(fetches, feed_dict)
costs += cost
iters += model.seq_length
# Save Model to CheckPoint when is_training is True
if model.is_training:
checkpoint_path = os.path.join(ner_train_dir, "lstm/model.ckpt")
model.saver.save(session, checkpoint_path)
print("Model Saved... at time step " + str(step))
return np.exp(costs / iters) | [
"tensorflow.reduce_sum",
"tensorflow.trainable_variables",
"numpy.argmax",
"tensorflow.reshape",
"tensorflow.get_variable_scope",
"tensorflow.nn.rnn_cell.DropoutWrapper",
"tensorflow.matmul",
"tensorflow.assign",
"tensorflow.Variable",
"tensorflow.global_variables",
"numpy.exp",
"os.path.join"... | [((4164, 4175), 'time.time', 'time.time', ([], {}), '()\n', (4173, 4175), False, 'import time\n'), ((4851, 4872), 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), '(costs / iters)\n', (4857, 4872), True, 'import numpy as np\n'), ((462, 512), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, seq_length]'], {}), '(tf.int32, [batch_size, seq_length])\n', (476, 512), True, 'import tensorflow as tf\n'), ((537, 587), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, seq_length]'], {}), '(tf.int32, [batch_size, seq_length])\n', (551, 587), True, 'import tensorflow as tf\n'), ((686, 758), 'tensorflow.nn.rnn_cell.BasicLSTMCell', 'tf.nn.rnn_cell.BasicLSTMCell', (['size'], {'forget_bias': '(0.0)', 'state_is_tuple': '(True)'}), '(size, forget_bias=0.0, state_is_tuple=True)\n', (714, 758), True, 'import tensorflow as tf\n'), ((940, 1026), 'tensorflow.nn.rnn_cell.MultiRNNCell', 'tf.nn.rnn_cell.MultiRNNCell', (['([lstm_cell] * config.num_layers)'], {'state_is_tuple': '(True)'}), '([lstm_cell] * config.num_layers, state_is_tuple\n =True)\n', (967, 1026), True, 'import tensorflow as tf\n'), ((1850, 1914), 'tensorflow.get_variable', 'tf.get_variable', (['"""softmax_w"""', '[size, tag_size]'], {'dtype': 'tf.float32'}), "('softmax_w', [size, tag_size], dtype=tf.float32)\n", (1865, 1914), True, 'import tensorflow as tf\n'), ((1948, 2006), 'tensorflow.get_variable', 'tf.get_variable', (['"""softmax_b"""', '[tag_size]'], {'dtype': 'tf.float32'}), "('softmax_b', [tag_size], dtype=tf.float32)\n", (1963, 2006), True, 'import tensorflow as tf\n'), ((2478, 2511), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)'}), '(0.0, trainable=False)\n', (2489, 2511), True, 'import tensorflow as tf\n'), ((2528, 2552), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2550, 2552), True, 'import tensorflow as tf\n'), ((2706, 2749), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['self._lr'], {}), '(self._lr)\n', (2739, 2749), True, 'import tensorflow as tf\n'), ((2844, 2906), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[]', 'name': '"""new_learning_rate"""'}), "(tf.float32, shape=[], name='new_learning_rate')\n", (2858, 2906), True, 'import tensorflow as tf\n'), ((2946, 2979), 'tensorflow.assign', 'tf.assign', (['self._lr', 'self._new_lr'], {}), '(self._lr, self._new_lr)\n', (2955, 2979), True, 'import tensorflow as tf\n'), ((3761, 3775), 'numpy.array', 'np.array', (['text'], {}), '(text)\n', (3769, 3775), True, 'import numpy as np\n'), ((3890, 3910), 'numpy.argmax', 'np.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (3899, 3910), True, 'import numpy as np\n'), ((4683, 4729), 'os.path.join', 'os.path.join', (['ner_train_dir', '"""lstm/model.ckpt"""'], {}), "(ner_train_dir, 'lstm/model.ckpt')\n", (4695, 4729), False, 'import os\n'), ((832, 907), 'tensorflow.nn.rnn_cell.DropoutWrapper', 'tf.nn.rnn_cell.DropoutWrapper', (['lstm_cell'], {'output_keep_prob': 'config.keep_prob'}), '(lstm_cell, output_keep_prob=config.keep_prob)\n', (861, 907), True, 'import tensorflow as tf\n'), ((1107, 1126), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (1116, 1126), True, 'import tensorflow as tf\n'), ((1152, 1218), 'tensorflow.get_variable', 'tf.get_variable', (['"""embedding"""', '[vocab_size, size]'], {'dtype': 'tf.float32'}), "('embedding', [vocab_size, size], dtype=tf.float32)\n", (1167, 1218), True, 'import tensorflow as tf\n'), ((1257, 1308), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'self._input_data'], {}), '(embedding, self._input_data)\n', (1279, 1308), True, 'import tensorflow as tf\n'), ((1380, 1419), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['inputs', 'config.keep_prob'], {}), '(inputs, config.keep_prob)\n', (1393, 1419), True, 'import tensorflow as tf\n'), ((1491, 1520), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""ner_lstm"""'], {}), "('ner_lstm')\n", (1508, 1520), True, 'import tensorflow as tf\n'), ((1795, 1816), 'tensorflow.concat', 'tf.concat', (['outputs', '(1)'], {}), '(outputs, 1)\n', (1804, 1816), True, 'import tensorflow as tf\n'), ((2024, 2052), 'tensorflow.matmul', 'tf.matmul', (['output', 'softmax_w'], {}), '(output, softmax_w)\n', (2033, 2052), True, 'import tensorflow as tf\n'), ((2361, 2380), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), '(loss)\n', (2374, 2380), True, 'import tensorflow as tf\n'), ((2595, 2620), 'tensorflow.gradients', 'tf.gradients', (['cost', 'tvars'], {}), '(cost, tvars)\n', (2607, 2620), True, 'import tensorflow as tf\n'), ((3016, 3037), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (3035, 3037), True, 'import tensorflow as tf\n'), ((2182, 2213), 'tensorflow.reshape', 'tf.reshape', (['self._targets', '[-1]'], {}), '(self._targets, [-1])\n', (2192, 2213), True, 'import tensorflow as tf\n'), ((2237, 2289), 'tensorflow.ones', 'tf.ones', (['[batch_size * seq_length]'], {'dtype': 'tf.float32'}), '([batch_size * seq_length], dtype=tf.float32)\n', (2244, 2289), True, 'import tensorflow as tf\n'), ((1604, 1627), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (1625, 1627), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
"""Investigates the distribution for link Markov models.
The link Markov model is based on the linking behavior of a given
linkograph. Since the links in a linkograph are determined by
ontology, the transition model is depending on the ontology. This
script creates a sequence of random Markov models and considers how
the link markov models might be used to profile them.
"""
import argparse # For command line parsing.
import numpy as np # For matrices.
import time # For getting the time to use as a random seed.
import math # For modf.
import json # For manipulating json files.
import matplotlib.pyplot as plt # For graphing.
import markov.Model as markel
def genSingleOntologyStats(ontNext, ontLink, size, modelNum, runNum,
precision=2, seeds=None):
"""Generate the stats on link models for a given ontology.
inputs:
ontNext: ontology used to generate Markov model that create the
next state.
ontLink: ontology used for constructing linkographs.
size: the size of linkographs to consider.
modelNum: the number of models.
runNum: the number of linkographs to consider for each linkograph
size.
precision: the number of decimals places to use for the Markov
models.
seeds: a list of seeds to use for the generated next Markov
models. The size of the list should be the same as the number of
runs.
output:
a modelNum x size_of_transition_matrix array. The size of the
transition matrix is the square of the number of abstraction
classes in ontLink, the ontology used to create the
linkographs. The rows index the model and the columns are a linear
ordering of the average of the link matrix entries.
"""
ontSize = len(ontNext)
absClasses = list(ontNext.keys())
absClasses.sort()
results = np.zeros((modelNum, len(absClasses)**2))
if seeds is None:
seeds = [time.time()*i for i in range(modelNum)]
models = []
# Create the generating models
for i in range(modelNum):
m = markel.genModelFromOntology(ontology=ontNext,
precision=2,
seed=seeds[i])
# Storing the model and the current state
models.append(m)
for modelIndex, m in enumerate(models):
linkModels = np.zeros((ontSize, ontSize, runNum))
print('Model: {0}'.format(modelIndex))
for i in range(runNum):
# Randomize the initial state
m.state = m.random.randint(1, len(m.absClasses)) - 1
linko = m.genLinkograph(size, ontology=ontLink)
newModel = markel.genModelFromLinko(linko,
precision=precision,
ontology=None,
seed=None,
method='link_predictor',
linkNum=1)
linkModels[:, :, i] = newModel.tMatrix
# Find the mean.
results[modelIndex][:] = np.mean(linkModels, axis=-1).flatten()
return results
def genLinkMarkov(linkoSize, model, precision=2, timeSize=7):
"""Generates a link Markov from model generated linkograph.
inputs:
linkoSize: the size of linkograph to base the link Markov model
off of.
model: the Markov model to use. Note that the model must have an
ontology in order to generate the linkographs.
precicision: the number of decimal places to use for the
link Markov model.
timeSize = the size of integers to use for seeding the random
number generator of the returned Markov model.
output:
A link Markov model based off a linkoSize linkograph generated by
the provided Markov model.
"""
seed = int(math.modf(time.time())[0]*(10**timeSize))
# generate the linkograph
linko = model.genLinkograph(linkoSize)
# create the link model
model = genModelFromLinko(linko, precision=precision,
ontology=model.ontology, seed=seed,
method='link_predictor', linkNum=1)
return model
if __name__ == '__main__':
info = "Investigates the distribution of link markov models."
parser = argparse.ArgumentParser(description=info)
parser.add_argument('ontNext', metavar='ONTOLOGY_NEXT.json',
nargs=1,
help='the ontology file for producing.')
parser.add_argument('ontLink', metavar='ONTOLOGY_LINK.json',
nargs=1,
help='the ontology file for learning.')
parser.add_argument('-s', '--size', type=int, default = 100,
help='linkograph size.')
parser.add_argument('-n', '--modelNum', type=int, default = 100,
help='number of generating models.')
parser.add_argument('-r', '--runs', type=int, default = 100,
help='the number of runs.')
parser.add_argument('-p', '--precision', type=int, default = 2,
help='the number of runs.')
args = parser.parse_args()
# Extract the ontology
ontNext = None
with open(args.ontNext[0], 'r') as ontNextFile:
ontNext = json.load(ontNextFile)
ontLink = None
with open(args.ontLink[0], 'r') as ontLinkFile:
ontLink = json.load(ontLinkFile)
seed = int(math.modf(time.time())[0]*(10**7))
results = genSingleOntologyStats(ontNext=ontNext,
ontLink=ontLink,
size=args.size,
modelNum=args.modelNum,
runNum=args.runs,
precision=args.precision)
absClasses = list(ontNext.keys())
absClasses.sort()
plt.figure(1)
for transitions in results:
plt.plot(transitions)
plt.title('Average Transition Probability')
plt.xlabel('Transition Number')
plt.ylabel('Average Transition Probability')
plt.show()
| [
"matplotlib.pyplot.title",
"json.load",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"markov.Model.genModelFromLinko",
"numpy.zeros",
"time.time",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"markov.Model... | [((4361, 4402), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'info'}), '(description=info)\n', (4384, 4402), False, 'import argparse\n'), ((5955, 5968), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (5965, 5968), True, 'import matplotlib.pyplot as plt\n'), ((6044, 6087), 'matplotlib.pyplot.title', 'plt.title', (['"""Average Transition Probability"""'], {}), "('Average Transition Probability')\n", (6053, 6087), True, 'import matplotlib.pyplot as plt\n'), ((6092, 6123), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Transition Number"""'], {}), "('Transition Number')\n", (6102, 6123), True, 'import matplotlib.pyplot as plt\n'), ((6128, 6172), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Average Transition Probability"""'], {}), "('Average Transition Probability')\n", (6138, 6172), True, 'import matplotlib.pyplot as plt\n'), ((6178, 6188), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6186, 6188), True, 'import matplotlib.pyplot as plt\n'), ((2076, 2149), 'markov.Model.genModelFromOntology', 'markel.genModelFromOntology', ([], {'ontology': 'ontNext', 'precision': '(2)', 'seed': 'seeds[i]'}), '(ontology=ontNext, precision=2, seed=seeds[i])\n', (2103, 2149), True, 'import markov.Model as markel\n'), ((2373, 2409), 'numpy.zeros', 'np.zeros', (['(ontSize, ontSize, runNum)'], {}), '((ontSize, ontSize, runNum))\n', (2381, 2409), True, 'import numpy as np\n'), ((5361, 5383), 'json.load', 'json.load', (['ontNextFile'], {}), '(ontNextFile)\n', (5370, 5383), False, 'import json\n'), ((5474, 5496), 'json.load', 'json.load', (['ontLinkFile'], {}), '(ontLinkFile)\n', (5483, 5496), False, 'import json\n'), ((6009, 6030), 'matplotlib.pyplot.plot', 'plt.plot', (['transitions'], {}), '(transitions)\n', (6017, 6030), True, 'import matplotlib.pyplot as plt\n'), ((2684, 2803), 'markov.Model.genModelFromLinko', 'markel.genModelFromLinko', (['linko'], {'precision': 'precision', 'ontology': 'None', 'seed': 'None', 'method': '"""link_predictor"""', 'linkNum': '(1)'}), "(linko, precision=precision, ontology=None, seed=\n None, method='link_predictor', linkNum=1)\n", (2708, 2803), True, 'import markov.Model as markel\n'), ((1942, 1953), 'time.time', 'time.time', ([], {}), '()\n', (1951, 1953), False, 'import time\n'), ((3150, 3178), 'numpy.mean', 'np.mean', (['linkModels'], {'axis': '(-1)'}), '(linkModels, axis=-1)\n', (3157, 3178), True, 'import numpy as np\n'), ((3905, 3916), 'time.time', 'time.time', ([], {}), '()\n', (3914, 3916), False, 'import time\n'), ((5523, 5534), 'time.time', 'time.time', ([], {}), '()\n', (5532, 5534), False, 'import time\n')] |
import numpy as np
def mrisensesim(size, ncoils=8, array_cent=None, coil_width=2, n_rings=None, phi=0):
"""Apply simulated sensitivity maps. Based on a script by <NAME>.
Args:
size (tuple): Size of the image array for the sensitivity coils.
nc_range (int, default: 8): Number of coils to simulate.
array_cent (tuple, default: 0): Location of the center of the coil
array.
coil_width (double, default: 2): Parameter governing the width of the
coil, multiplied by actual image dimension.
n_rings (int, default: ncoils // 4): Number of rings for a
cylindrical hardware set-up.
phi (double, default: 0): Parameter for rotating coil geometry.
Returns:
list: A list of dimensions (ncoils, (N)), specifying spatially-varying
sensitivity maps for each coil.
"""
if array_cent is None:
c_shift = [0, 0, 0]
elif len(array_cent) < 3:
c_shift = array_cent + (0,)
else:
c_shift = array_cent
c_width = coil_width * min(size)
if len(size) > 2:
if n_rings is None:
n_rings = ncoils // 4
c_rad = min(size[0:1]) / 2
smap = []
if len(size) > 2:
zz, yy, xx = np.meshgrid(
range(size[2]), range(size[1]), range(size[0]), indexing="ij"
)
else:
yy, xx = np.meshgrid(range(size[1]), range(size[0]), indexing="ij")
if ncoils > 1:
x0 = np.zeros((ncoils,))
y0 = np.zeros((ncoils,))
z0 = np.zeros((ncoils,))
for i in range(ncoils):
if len(size) > 2:
theta = np.radians((i - 1) * 360 / (ncoils + n_rings) + phi)
else:
theta = np.radians((i - 1) * 360 / ncoils + phi)
x0[i] = c_rad * np.cos(theta) + size[0] / 2
y0[i] = c_rad * np.sin(theta) + size[1] / 2
if len(size) > 2:
z0[i] = (size[2] / (n_rings + 1)) * (i // n_rings)
smap.append(
np.exp(
-1
* ((xx - x0[i]) ** 2 + (yy - y0[i]) ** 2 + (zz - z0[i]) ** 2)
/ (2 * c_width)
)
)
else:
smap.append(
np.exp(-1 * ((xx - x0[i]) ** 2 + (yy - y0[i]) ** 2) / (2 * c_width))
)
else:
x0 = c_shift[0]
y0 = c_shift[1]
z0 = c_shift[2]
if len(size) > 2:
smap = np.exp(
-1 * ((xx - x0) ** 2 + (yy - y0) ** 2 + (zz - z0) ** 2) / (2 * c_width)
)
else:
smap = np.exp(-1 * ((xx - x0) ** 2 + (yy - y0) ** 2) / (2 * c_width))
side_mat = np.arange(int(size[0] // 2) - 20, 1, -1)
side_mat = np.reshape(side_mat, (1,) + side_mat.shape) * np.ones(shape=(size[1], 1))
cent_zeros = np.zeros(shape=(size[1], size[0] - side_mat.shape[1] * 2))
ph = np.concatenate((side_mat, cent_zeros, side_mat), axis=1) / 10
if len(size) > 2:
ph = np.reshape(ph, (1,) + ph.shape)
for i, s in enumerate(smap):
smap[i] = s * np.exp(i * 1j * ph * np.pi / 180)
return smap
| [
"numpy.radians",
"numpy.zeros",
"numpy.ones",
"numpy.sin",
"numpy.exp",
"numpy.reshape",
"numpy.cos",
"numpy.concatenate"
] | [((2884, 2942), 'numpy.zeros', 'np.zeros', ([], {'shape': '(size[1], size[0] - side_mat.shape[1] * 2)'}), '(shape=(size[1], size[0] - side_mat.shape[1] * 2))\n', (2892, 2942), True, 'import numpy as np\n'), ((1466, 1485), 'numpy.zeros', 'np.zeros', (['(ncoils,)'], {}), '((ncoils,))\n', (1474, 1485), True, 'import numpy as np\n'), ((1499, 1518), 'numpy.zeros', 'np.zeros', (['(ncoils,)'], {}), '((ncoils,))\n', (1507, 1518), True, 'import numpy as np\n'), ((1532, 1551), 'numpy.zeros', 'np.zeros', (['(ncoils,)'], {}), '((ncoils,))\n', (1540, 1551), True, 'import numpy as np\n'), ((2793, 2836), 'numpy.reshape', 'np.reshape', (['side_mat', '((1,) + side_mat.shape)'], {}), '(side_mat, (1,) + side_mat.shape)\n', (2803, 2836), True, 'import numpy as np\n'), ((2839, 2866), 'numpy.ones', 'np.ones', ([], {'shape': '(size[1], 1)'}), '(shape=(size[1], 1))\n', (2846, 2866), True, 'import numpy as np\n'), ((2953, 3009), 'numpy.concatenate', 'np.concatenate', (['(side_mat, cent_zeros, side_mat)'], {'axis': '(1)'}), '((side_mat, cent_zeros, side_mat), axis=1)\n', (2967, 3009), True, 'import numpy as np\n'), ((3050, 3081), 'numpy.reshape', 'np.reshape', (['ph', '((1,) + ph.shape)'], {}), '(ph, (1,) + ph.shape)\n', (3060, 3081), True, 'import numpy as np\n'), ((2515, 2594), 'numpy.exp', 'np.exp', (['(-1 * ((xx - x0) ** 2 + (yy - y0) ** 2 + (zz - z0) ** 2) / (2 * c_width))'], {}), '(-1 * ((xx - x0) ** 2 + (yy - y0) ** 2 + (zz - z0) ** 2) / (2 * c_width))\n', (2521, 2594), True, 'import numpy as np\n'), ((2658, 2720), 'numpy.exp', 'np.exp', (['(-1 * ((xx - x0) ** 2 + (yy - y0) ** 2) / (2 * c_width))'], {}), '(-1 * ((xx - x0) ** 2 + (yy - y0) ** 2) / (2 * c_width))\n', (2664, 2720), True, 'import numpy as np\n'), ((3138, 3173), 'numpy.exp', 'np.exp', (['(i * 1.0j * ph * np.pi / 180)'], {}), '(i * 1.0j * ph * np.pi / 180)\n', (3144, 3173), True, 'import numpy as np\n'), ((1639, 1691), 'numpy.radians', 'np.radians', (['((i - 1) * 360 / (ncoils + n_rings) + phi)'], {}), '((i - 1) * 360 / (ncoils + n_rings) + phi)\n', (1649, 1691), True, 'import numpy as np\n'), ((1734, 1774), 'numpy.radians', 'np.radians', (['((i - 1) * 360 / ncoils + phi)'], {}), '((i - 1) * 360 / ncoils + phi)\n', (1744, 1774), True, 'import numpy as np\n'), ((1803, 1816), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1809, 1816), True, 'import numpy as np\n'), ((1859, 1872), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1865, 1872), True, 'import numpy as np\n'), ((2033, 2126), 'numpy.exp', 'np.exp', (['(-1 * ((xx - x0[i]) ** 2 + (yy - y0[i]) ** 2 + (zz - z0[i]) ** 2) / (2 *\n c_width))'], {}), '(-1 * ((xx - x0[i]) ** 2 + (yy - y0[i]) ** 2 + (zz - z0[i]) ** 2) / (\n 2 * c_width))\n', (2039, 2126), True, 'import numpy as np\n'), ((2301, 2369), 'numpy.exp', 'np.exp', (['(-1 * ((xx - x0[i]) ** 2 + (yy - y0[i]) ** 2) / (2 * c_width))'], {}), '(-1 * ((xx - x0[i]) ** 2 + (yy - y0[i]) ** 2) / (2 * c_width))\n', (2307, 2369), True, 'import numpy as np\n')] |
# Steven 05/17/2020
# clustering model design
from time import time
import pandas as pd
import numpy as np
# from sklearn.decomposition import PCA
# from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import KMeans
# from sklearn.cluster import DBSCAN
# from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler # StandardScaler
# from sklearn.model_selection import StratifiedKFold
# from sklearn.model_selection import GridSearchCV
from sklearn.metrics import silhouette_score
# from sklearn.metrics import make_scorer
# from sklearn.metrics import calinski_harabasz_score
from sklearn.metrics import davies_bouldin_score
from sklearn.neighbors._nearest_centroid import NearestCentroid
import matplotlib.pyplot as plt
def createKMeans(k=2):
model = KMeans(n_clusters=k, random_state=0)
# print(model,k)
# print('k=',k)
return model
def s_score_silhouette(estimator, X):
labels_ = estimator.fit_predict(X)
score = 0
# print(X.shape)
# print(X)
actualK = len(list(set(labels_)))
if actualK > 1:
# print(labels_)
score = silhouette_score(X, labels_, metric='euclidean') # 'euclidean'
# score = calinski_harabasz_score(X, labels_)
# score = davies_bouldin_score(X, labels_)
# print(score)
return score
def squaredDistances(a, b):
return np.sum((a - b)**2)
def calculateSSE2(data, labels, centroids):
print(data.shape, type(data), centroids.shape)
sse = 0
for i, ct in enumerate(centroids):
# print('i,ct=',i,ct)
samples = []
for k in range(data.shape[0]):
label = labels[k]
sample = data.iloc[k, :].values
# print('sample,label=',i,sample,label)
if label == i:
# sse += squaredDistances(sample,ct)
samples.append(sample)
sse += squaredDistances(samples, ct)
return sse
def calculateSSE(data, labels, centroids):
# print(data.shape,type(data),centroids.shape,labels.shape)
# print('labels=',labels)
# data = data.to_numpy()#if dataframe
sse = 0
for i, ct in enumerate(centroids):
# print('i,ct=',i,ct)
# samples = data.iloc[np.where(labels == i)[0], :].values
samples = data[np.where(labels == i)[0], :]
sse += squaredDistances(samples, ct)
return sse
def calculateDaviesBouldin(data, labels):
return davies_bouldin_score(data, labels)
def getModelMeasure(data, labels):
sse, dbValue, csm = 0, 0, 0
# k = len(np.unique(labels))
k = len(list(set(labels)))
if k > 1:
# print(data.shape,model.labels_)
# csm = silhouette_score(data, labels, metric='euclidean')
clf = NearestCentroid()
clf.fit(data, labels)
# print(clf.centroids_)
sse = calculateSSE(data, labels, clf.centroids_)
# dbValue = calculateDaviesBouldin(data,labels)
sse = round(sse, 4)
csm = round(csm, 4)
dbValue = round(dbValue, 4)
print('SSE=', sse, 'DB=', dbValue, 'CSM=', csm, 'clusters=', k)
# print("Silhouette Coefficient: %0.3f" % csm)
# print('clusters=',k)
return sse, dbValue, csm, k
def preprocessingData(data, N=5):
scaler = MinMaxScaler() # StandardScaler() #
# scaler.fit(data)
# data = scaler.transform(data)
data = scaler.fit_transform(data)
return data
def KMeansModelTrain(dataName, data, N=10):
data = preprocessingData(data)
df = pd.DataFrame()
print('datashape=', data.shape)
columns = ['Dataset', 'Algorithm', 'K', 'tt(s)', 'SSE', 'DB', 'CSM']
for i in range(2, N, 1): # 2 N 1
model = createKMeans(i)
modelName = 'K-Means'
t = time()
model.fit(data)
tt = round(time() - t, 4)
print("\ndataSet:%s model:%s iter i=%d run in %.2fs" % (dataName, modelName, i, tt))
sse, dbValue, csm, k = getModelMeasure(data, model.labels_)
dbName = dataName + str(data.shape)
line = pd.DataFrame([[dbName, modelName, k, tt, sse, dbValue, csm]], columns=columns)
df = df.append(line, ignore_index=True)
# print('cluster_labels=',np.unique(model.labels_))
# df.to_csv(gSaveBase + dataName+'_' + modelName+'_result.csv',index=True)
print('Train result:\n', df)
plotModel(dataName, modelName, df)
index, bestK = getBestkFromSse(dataName, modelName, df)
bestLine = df.iloc[index, :]
print('bestLine=', index, 'bestK=', bestK, 'df=\n', bestLine)
return bestK
def plotModel(datasetName, modelName, df): # sse sse gredient
# df = df.loc[:,['K', 'tt(s)', 'SSE','DB','CSM']]
# print(df.iloc[:,[0,1,2,4]]) #no db
x = df.loc[:, ['K']].values # K
y = df.loc[:, ['SSE']].values # SSE
z = np.zeros((len(x)))
for i in range(len(x) - 1):
z[i + 1] = y[i] - y[i + 1]
# plt.figure(figsize=(8,5))
ax = plt.subplot(1, 1, 1)
title = datasetName + '_' + modelName + '_SSE'
plt.title(title)
ax.plot(x, y, label='SSE', c='k', marker='o')
ax.plot(x, z, label='sse decrease gradient', c='b', marker='.')
ax.set_ylabel('SSE')
ax.set_xlabel('K clusters')
ax.legend()
ax.grid()
plt.xticks(np.arange(1, 12))
# plt.savefig(gSaveBase+title+'.png')
plt.show()
def getBestkFromSse(datasetName, modelName, df): # sse gradient
print('df=\n', df)
x = df.loc[:, ['K']].values # K
y = df.loc[:, ['SSE']].values # SSE
z = np.zeros((len(x))) # gradient
for i in range(len(x) - 1):
z[i + 1] = y[i] - y[i + 1]
index = np.argmax(z)
bestK = x[index][0]
# print('z=',z,index,bestK)
return index, bestK
def KMeansModel(k, data):
data = preprocessingData(data)
model = createKMeans(k)
model.fit(data)
clf = NearestCentroid()
clf.fit(data, model.labels_)
return k, clf.centroids_, model.labels_
| [
"pandas.DataFrame",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.title",
"numpy.sum",
"matplotlib.pyplot.show",
"numpy.argmax",
"sklearn.cluster.KMeans",
"sklearn.preprocessing.MinMaxScaler",
"time.time",
"sklearn.metrics.silhouette_score",
"numpy.where",
"numpy.arange",
"sklearn.neighbor... | [((811, 847), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k', 'random_state': '(0)'}), '(n_clusters=k, random_state=0)\n', (817, 847), False, 'from sklearn.cluster import KMeans\n'), ((1380, 1400), 'numpy.sum', 'np.sum', (['((a - b) ** 2)'], {}), '((a - b) ** 2)\n', (1386, 1400), True, 'import numpy as np\n'), ((2439, 2473), 'sklearn.metrics.davies_bouldin_score', 'davies_bouldin_score', (['data', 'labels'], {}), '(data, labels)\n', (2459, 2473), False, 'from sklearn.metrics import davies_bouldin_score\n'), ((3245, 3259), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (3257, 3259), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((3485, 3499), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3497, 3499), True, 'import pandas as pd\n'), ((4900, 4920), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (4911, 4920), True, 'import matplotlib.pyplot as plt\n'), ((4976, 4992), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4985, 4992), True, 'import matplotlib.pyplot as plt\n'), ((5277, 5287), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5285, 5287), True, 'import matplotlib.pyplot as plt\n'), ((5576, 5588), 'numpy.argmax', 'np.argmax', (['z'], {}), '(z)\n', (5585, 5588), True, 'import numpy as np\n'), ((5791, 5808), 'sklearn.neighbors._nearest_centroid.NearestCentroid', 'NearestCentroid', ([], {}), '()\n', (5806, 5808), False, 'from sklearn.neighbors._nearest_centroid import NearestCentroid\n'), ((1134, 1182), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['X', 'labels_'], {'metric': '"""euclidean"""'}), "(X, labels_, metric='euclidean')\n", (1150, 1182), False, 'from sklearn.metrics import silhouette_score\n'), ((2744, 2761), 'sklearn.neighbors._nearest_centroid.NearestCentroid', 'NearestCentroid', ([], {}), '()\n', (2759, 2761), False, 'from sklearn.neighbors._nearest_centroid import NearestCentroid\n'), ((3722, 3728), 'time.time', 'time', ([], {}), '()\n', (3726, 3728), False, 'from time import time\n'), ((4009, 4087), 'pandas.DataFrame', 'pd.DataFrame', (['[[dbName, modelName, k, tt, sse, dbValue, csm]]'], {'columns': 'columns'}), '([[dbName, modelName, k, tt, sse, dbValue, csm]], columns=columns)\n', (4021, 4087), True, 'import pandas as pd\n'), ((5213, 5229), 'numpy.arange', 'np.arange', (['(1)', '(12)'], {}), '(1, 12)\n', (5222, 5229), True, 'import numpy as np\n'), ((3773, 3779), 'time.time', 'time', ([], {}), '()\n', (3777, 3779), False, 'from time import time\n'), ((2295, 2316), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (2303, 2316), True, 'import numpy as np\n')] |
from typing import List
import gym
import gym.spaces
import numpy as np
from pbrl.competitive.agent import Agent
class CompetitiveEnv:
def __init__(self, env: gym.Env, index=None, **kwargs):
self.index = index
assert isinstance(env.observation_space, gym.spaces.Tuple)
if self.index is not None:
self.observation_space = env.observation_space.spaces[self.index]
self.action_space = env.action_space.spaces[self.index]
else:
self.observation_space = env.observation_space
self.action_space = env.action_space
self.role_num = len(env.observation_space.spaces)
self.env = env
self.observations = None
self.rewards = None
self.dones = None
self.infos = None
self.times_reset = 0
self.random_state = np.random.RandomState()
self.indices = []
self.agents: List[Agent] = []
self.state = dict()
self.init(**kwargs)
def init(self, **kwargs):
pass
def before_reset(self):
pass
def after_done(self):
pass
def step(self, action=None):
if self.index is None and action is not None:
actions = action
else:
actions = np.repeat(None, self.role_num)
if self.index is not None:
actions[self.index] = action
# action can be None when evaluating
observations = np.asarray(self.observations)
for agent, index in zip(self.agents, self.indices):
observations_ = observations[index]
actions_ = agent.step(observations_).tolist()
actions[index] = actions_
results = self.env.step(tuple(actions))
self.observations, self.rewards, self.dones, self.infos = results
if True in self.dones:
self.after_done()
if self.index is not None:
return (arr[self.index] for arr in (self.observations, self.rewards, self.dones, self.infos))
else:
return self.observations, self.rewards, self.dones, self.infos
def reset(self):
self.before_reset()
self.times_reset += 1
self.observations = self.env.reset()
for agent in self.agents:
agent.reset()
if self.index is not None:
return self.observations[self.index]
else:
return self.observations
def render(self, mode="human"):
self.env.render(mode)
def seed(self, seed=None):
self.random_state.seed(seed)
self.env.seed(seed)
def close(self):
pass
| [
"numpy.asarray",
"numpy.random.RandomState",
"numpy.repeat"
] | [((848, 871), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (869, 871), True, 'import numpy as np\n'), ((1456, 1485), 'numpy.asarray', 'np.asarray', (['self.observations'], {}), '(self.observations)\n', (1466, 1485), True, 'import numpy as np\n'), ((1272, 1302), 'numpy.repeat', 'np.repeat', (['None', 'self.role_num'], {}), '(None, self.role_num)\n', (1281, 1302), True, 'import numpy as np\n')] |
# coding:utf-8
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import learning_curve
from sklearn.svm import SVC
import LoadData
import matplotlib.pyplot as plt
import numpy as np
# assume classifier and training data is prepared...
def PlotLearningCurve(clf, X, Y):
train_sizes, train_scores, test_scores = learning_curve(
clf, X, Y, cv=10, n_jobs=-1, train_sizes=np.linspace(.1, 1., 10), verbose=0)
# train_scores_mean = np.mean(train_scores, axis=1)
# train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.figure()
plt.title("RandomForestClassifier")
plt.xlabel("Training examples")
plt.ylabel("Score")
plt.ylim((0.7, 1.01))
plt.grid()
# Plot the average training and test score lines at each training set size
# plt.plot(train_sizes, train_scores_mean, 'o-', color="b", label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="r", label="Test score")
plt.legend(loc="best")
# Plot the std deviation as a transparent range at each training set size
# plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
# alpha=0.1, color="b")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std,
alpha=0.1, color="r")
# Draw the plot and reset the y-axis
plt.draw()
plt.show()
if __name__ == '__main__':
clf_RF = RandomForestClassifier(n_estimators=50, min_samples_split=2, min_samples_leaf=1, oob_score=True)
clf_SVC = clf = SVC(C=10, kernel='rbf', gamma=30)
df = LoadData.readDataSet()
df_label = df['label']
df_features = df.drop(['label','id'], axis=1)
PlotLearningCurve(clf_RF, df_features, df_label)
| [
"matplotlib.pyplot.title",
"sklearn.ensemble.RandomForestClassifier",
"LoadData.readDataSet",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"numpy.std",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.linspa... | [((597, 625), 'numpy.mean', 'np.mean', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (604, 625), True, 'import numpy as np\n'), ((648, 675), 'numpy.std', 'np.std', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (654, 675), True, 'import numpy as np\n'), ((685, 697), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (695, 697), True, 'import matplotlib.pyplot as plt\n'), ((702, 737), 'matplotlib.pyplot.title', 'plt.title', (['"""RandomForestClassifier"""'], {}), "('RandomForestClassifier')\n", (711, 737), True, 'import matplotlib.pyplot as plt\n'), ((742, 773), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Training examples"""'], {}), "('Training examples')\n", (752, 773), True, 'import matplotlib.pyplot as plt\n'), ((778, 797), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Score"""'], {}), "('Score')\n", (788, 797), True, 'import matplotlib.pyplot as plt\n'), ((802, 823), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.7, 1.01)'], {}), '((0.7, 1.01))\n', (810, 823), True, 'import matplotlib.pyplot as plt\n'), ((828, 838), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (836, 838), True, 'import matplotlib.pyplot as plt\n'), ((1015, 1091), 'matplotlib.pyplot.plot', 'plt.plot', (['train_sizes', 'test_scores_mean', '"""o-"""'], {'color': '"""r"""', 'label': '"""Test score"""'}), "(train_sizes, test_scores_mean, 'o-', color='r', label='Test score')\n", (1023, 1091), True, 'import matplotlib.pyplot as plt\n'), ((1096, 1118), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1106, 1118), True, 'import matplotlib.pyplot as plt\n'), ((1362, 1490), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['train_sizes', '(test_scores_mean - test_scores_std)', '(test_scores_mean + test_scores_std)'], {'alpha': '(0.1)', 'color': '"""r"""'}), "(train_sizes, test_scores_mean - test_scores_std, \n test_scores_mean + test_scores_std, alpha=0.1, color='r')\n", (1378, 1490), True, 'import matplotlib.pyplot as plt\n'), ((1562, 1572), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1570, 1572), True, 'import matplotlib.pyplot as plt\n'), ((1577, 1587), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1585, 1587), True, 'import matplotlib.pyplot as plt\n'), ((1649, 1749), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(50)', 'min_samples_split': '(2)', 'min_samples_leaf': '(1)', 'oob_score': '(True)'}), '(n_estimators=50, min_samples_split=2,\n min_samples_leaf=1, oob_score=True)\n', (1671, 1749), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1766, 1799), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(10)', 'kernel': '"""rbf"""', 'gamma': '(30)'}), "(C=10, kernel='rbf', gamma=30)\n", (1769, 1799), False, 'from sklearn.svm import SVC\n'), ((1819, 1841), 'LoadData.readDataSet', 'LoadData.readDataSet', ([], {}), '()\n', (1839, 1841), False, 'import LoadData\n'), ((417, 442), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1.0)', '(10)'], {}), '(0.1, 1.0, 10)\n', (428, 442), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
import unittest
import numpy
from cclib.bridge import cclib2biopython
class BiopythonTest(unittest.TestCase):
"""Tests for the cclib2biopython bridge in cclib."""
def test_makebiopython(self):
from Bio.PDB.Superimposer import Superimposer
atomnos = numpy.array([1, 8, 1], "i")
a = numpy.array([[-1, 1, 0], [0, 0, 0], [1, 1, 0]], "f")
b = numpy.array([[1.1, 2, 0], [1, 1, 0], [2, 1, 0]], "f")
si = Superimposer()
si.set_atoms(cclib2biopython.makebiopython(a, atomnos),
cclib2biopython.makebiopython(b, atomnos))
ref = 0.29337859596
assert abs(si.rms - ref) < 1.0e-6
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"cclib.bridge.cclib2biopython.makebiopython",
"Bio.PDB.Superimposer.Superimposer",
"numpy.array"
] | [((894, 909), 'unittest.main', 'unittest.main', ([], {}), '()\n', (907, 909), False, 'import unittest\n'), ((476, 503), 'numpy.array', 'numpy.array', (['[1, 8, 1]', '"""i"""'], {}), "([1, 8, 1], 'i')\n", (487, 503), False, 'import numpy\n'), ((516, 568), 'numpy.array', 'numpy.array', (['[[-1, 1, 0], [0, 0, 0], [1, 1, 0]]', '"""f"""'], {}), "([[-1, 1, 0], [0, 0, 0], [1, 1, 0]], 'f')\n", (527, 568), False, 'import numpy\n'), ((581, 634), 'numpy.array', 'numpy.array', (['[[1.1, 2, 0], [1, 1, 0], [2, 1, 0]]', '"""f"""'], {}), "([[1.1, 2, 0], [1, 1, 0], [2, 1, 0]], 'f')\n", (592, 634), False, 'import numpy\n'), ((648, 662), 'Bio.PDB.Superimposer.Superimposer', 'Superimposer', ([], {}), '()\n', (660, 662), False, 'from Bio.PDB.Superimposer import Superimposer\n'), ((684, 725), 'cclib.bridge.cclib2biopython.makebiopython', 'cclib2biopython.makebiopython', (['a', 'atomnos'], {}), '(a, atomnos)\n', (713, 725), False, 'from cclib.bridge import cclib2biopython\n'), ((748, 789), 'cclib.bridge.cclib2biopython.makebiopython', 'cclib2biopython.makebiopython', (['b', 'atomnos'], {}), '(b, atomnos)\n', (777, 789), False, 'from cclib.bridge import cclib2biopython\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.