code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import torch
from torch.utils.data.sampler import Sampler
import prototype.spring.linklink as link
import math
import numpy as np
class DistributedSampler(Sampler):
def __init__(self, dataset, world_size=None, rank=None, round_up=True):
if world_size is None:
world_size = link.get_world_size()
if rank is None:
rank = link.get_rank()
self.dataset = dataset
self.world_size = world_size
self.rank = rank
self.round_up = round_up
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.world_size))
if self.round_up:
self.total_size = self.num_samples * self.world_size
self.length = self.num_samples
else:
self.total_size = len(self.dataset)
if self.rank < self.world_size-1:
self.length = self.num_samples
else:
self.length = self.total_size - (self.world_size-1)*self.num_samples
def __iter__(self):
g = torch.Generator()
g.manual_seed(self.epoch)
indices = list(torch.randperm(len(self.dataset), generator=g))
if self.round_up:
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
offset = self.num_samples * self.rank
indices = indices[offset:offset + self.num_samples]
if self.round_up or (not self.round_up and self.rank < self.world_size-1):
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.length
def set_epoch(self, epoch):
self.epoch = epoch
class DistributedGivenIterationSampler(Sampler):
def __init__(self, dataset, total_iter, batch_size, world_size=None, rank=None, last_iter=0):
if world_size is None:
world_size = link.get_world_size()
if rank is None:
rank = link.get_rank()
assert rank < world_size
self.dataset = dataset
self.total_iter = total_iter
self.batch_size = batch_size
self.world_size = world_size
self.rank = rank
self.last_iter = last_iter
self.total_size = self.total_iter*self.batch_size
self.indices = self.gen_new_list()
self.call = 0
def __iter__(self):
if self.call == 0:
self.call = 1
return iter(self.indices[self.last_iter*self.batch_size:])
else:
raise RuntimeError("this sampler is not designed to be called more than once!!")
def gen_new_list(self):
np.random.seed(0)
all_size = self.total_size * self.world_size
indices = np.arange(len(self.dataset))
indices = indices[:all_size]
num_repeat = (all_size-1) // indices.shape[0] + 1
indices = np.tile(indices, num_repeat)
indices = indices[:all_size]
np.random.shuffle(indices)
beg = self.total_size * self.rank
indices = indices[beg:beg+self.total_size]
assert len(indices) == self.total_size
return indices
def __len__(self):
# note here we do not take last iter into consideration, since __len__
# should only be used for displaying, the correct remaining size is
# handled by dataloader
return self.total_size
class DistributedEpochSampler(Sampler):
def __init__(self, dataset, total_iter, batch_size, world_size=None, rank=None, last_iter=0):
if world_size is None:
world_size = link.get_world_size()
if rank is None:
rank = link.get_rank()
assert rank < world_size
self.dataset = dataset
self.total_iter = total_iter
self.batch_size = batch_size
self.world_size = world_size
self.rank = rank
self.last_iter = last_iter
self.all_size_single = self.total_iter * self.batch_size
self.indices = self.gen_new_list()
self.call = 0
def __iter__(self):
if self.call == 0:
self.call = 1
return iter(self.indices[self.last_iter*self.batch_size:])
else:
raise RuntimeError("this sampler is not designed to be called more than once!!")
def get_one_epoch_self_part(self):
num = len(self.dataset)
indices = np.arange(num)
extra_indices = np.random.choice(num, self.extra_per_epoch, replace=False)
indices = np.concatenate((indices, extra_indices))
np.random.shuffle(indices)
assert len(indices) % (self.world_size * self.batch_size) == 0
num_single = len(indices) // self.world_size
return indices[self.rank*num_single:(self.rank+1)*num_single]
def gen_new_list(self):
np.random.seed(0)
self.all_num = self.total_iter * self.batch_size * self.world_size
iter_per_epoch = (len(self.dataset) - 1) // (self.batch_size * self.world_size) + 1
self.num_per_epoch = iter_per_epoch * self.batch_size * self.world_size
self.extra_per_epoch = self.num_per_epoch - len(self.dataset)
repeat = (self.all_num - 1) // self.num_per_epoch + 1
indices = []
for i in range(repeat):
indice = self.get_one_epoch_self_part()
indices.append(indice)
indices = np.concatenate(indices)
indices = indices[:self.all_size_single]
assert len(indices) == self.all_size_single
return indices
def __len__(self):
return self.all_size_single
class RankedGivenIterationSampler(Sampler):
def __init__(self, dataset, total_iter, batch_size, last_iter=0):
self.dataset = dataset
self.total_iter = total_iter
self.batch_size = batch_size
self.last_iter = last_iter
self.total_size = self.total_iter*self.batch_size
self.cur_size = self.last_iter * self.batch_size
# self.indices = self.gen_new_list()
self.indices = np.arange(len(self.dataset))
self.call = 0
def indice_generator(self):
np.random.shuffle(self.indices)
while self.cur_size < self.total_size:
remaining_size = self.total_size - self.cur_size
indices = self.indices[:remaining_size]
self.cur_size += len(indices)
for item in indices:
yield item
def __iter__(self):
if self.call == 0:
self.call = 1
# return iter(self.indices[self.last_iter*self.batch_size:])
return self.indice_generator()
else:
raise RuntimeError("this sampler is not designed to be called more than once!!")
# def gen_new_list(self):
# all_size = self.total_size
# indices = np.arange(len(self.dataset))
# indices = indices[:all_size]
# num_repeat = (all_size-1) // indices.shape[0] + 1
# indices = np.tile(indices, num_repeat)
# indices = indices[:all_size]
# np.random.shuffle(indices)
# assert len(indices) == self.total_size
# return indices
def __len__(self):
# note here we do not take last iter into consideration, since __len__
# should only be used for displaying, the correct remaining size is
# handled by dataloader
return self.total_size
class RankedGivenIterationSamplerDaLi(RankedGivenIterationSampler):
def __init__(self, dataset, total_iter, batch_size, last_iter=0):
super(RankedGivenIterationSamplerDaLi, self).__init__(dataset, total_iter, batch_size, last_iter)
def gen_new_list(self):
all_size = self.total_size
indices = np.array(self.dataset.meta_indice)
indices = indices[:all_size]
num_repeat = (all_size-1) // indices.shape[0] + 1
indices = np.tile(indices, num_repeat)
indices = indices[:all_size]
np.random.shuffle(indices)
print(link.get_rank(), indices.max(), indices.min())
assert len(indices) == self.total_size
return indices
sampler_dict = {
'distributed': DistributedSampler,
'distributed_iteration': DistributedGivenIterationSampler,
'distributed_epoch': DistributedEpochSampler,
'ranked_iteration': RankedGivenIterationSampler,
'ranked_iteration_dali': RankedGivenIterationSamplerDaLi
}
def build_sampler(cfg_sampler, cfg_dataset):
batch_size = cfg_dataset['batch_size']
dataset = cfg_dataset['dataset']
# check step type: iteration or epoch ?
if not getattr(cfg_dataset, 'max_iter', False):
world_size = link.get_world_size()
iter_per_epoch = (len(dataset) - 1) // (batch_size * world_size) + 1
total_iter = cfg_dataset['max_epoch'] * iter_per_epoch
else:
total_iter = cfg_dataset['max_iter']
# initialize sampler kwargs
if cfg_sampler['type'] == 'distributed':
sampler_kwargs = {'dataset': dataset}
else:
sampler_kwargs = {
'dataset': dataset,
'batch_size': batch_size,
'total_iter': total_iter,
'last_iter': cfg_dataset['last_iter']
}
cfg_sampler['kwargs'].update(sampler_kwargs)
cfg_dataset['max_iter'] = total_iter
cfg_dataset.pop('dataset')
return sampler_dict[cfg_sampler['type']](**cfg_sampler['kwargs'])
| [
"numpy.random.seed",
"numpy.concatenate",
"prototype.spring.linklink.get_rank",
"prototype.spring.linklink.get_world_size",
"numpy.arange",
"numpy.tile",
"torch.Generator",
"numpy.random.choice",
"numpy.array",
"numpy.random.shuffle"
] | [((1030, 1047), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (1045, 1047), False, 'import torch\n'), ((2616, 2633), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2630, 2633), True, 'import numpy as np\n'), ((2848, 2876), 'numpy.tile', 'np.tile', (['indices', 'num_repeat'], {}), '(indices, num_repeat)\n', (2855, 2876), True, 'import numpy as np\n'), ((2923, 2949), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2940, 2949), True, 'import numpy as np\n'), ((4348, 4362), 'numpy.arange', 'np.arange', (['num'], {}), '(num)\n', (4357, 4362), True, 'import numpy as np\n'), ((4387, 4445), 'numpy.random.choice', 'np.random.choice', (['num', 'self.extra_per_epoch'], {'replace': '(False)'}), '(num, self.extra_per_epoch, replace=False)\n', (4403, 4445), True, 'import numpy as np\n'), ((4464, 4504), 'numpy.concatenate', 'np.concatenate', (['(indices, extra_indices)'], {}), '((indices, extra_indices))\n', (4478, 4504), True, 'import numpy as np\n'), ((4513, 4539), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (4530, 4539), True, 'import numpy as np\n'), ((4771, 4788), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4785, 4788), True, 'import numpy as np\n'), ((5328, 5351), 'numpy.concatenate', 'np.concatenate', (['indices'], {}), '(indices)\n', (5342, 5351), True, 'import numpy as np\n'), ((6071, 6102), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indices'], {}), '(self.indices)\n', (6088, 6102), True, 'import numpy as np\n'), ((7652, 7686), 'numpy.array', 'np.array', (['self.dataset.meta_indice'], {}), '(self.dataset.meta_indice)\n', (7660, 7686), True, 'import numpy as np\n'), ((7800, 7828), 'numpy.tile', 'np.tile', (['indices', 'num_repeat'], {}), '(indices, num_repeat)\n', (7807, 7828), True, 'import numpy as np\n'), ((7875, 7901), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (7892, 7901), True, 'import numpy as np\n'), ((8565, 8586), 'prototype.spring.linklink.get_world_size', 'link.get_world_size', ([], {}), '()\n', (8584, 8586), True, 'import prototype.spring.linklink as link\n'), ((299, 320), 'prototype.spring.linklink.get_world_size', 'link.get_world_size', ([], {}), '()\n', (318, 320), True, 'import prototype.spring.linklink as link\n'), ((365, 380), 'prototype.spring.linklink.get_rank', 'link.get_rank', ([], {}), '()\n', (378, 380), True, 'import prototype.spring.linklink as link\n'), ((1881, 1902), 'prototype.spring.linklink.get_world_size', 'link.get_world_size', ([], {}), '()\n', (1900, 1902), True, 'import prototype.spring.linklink as link\n'), ((1947, 1962), 'prototype.spring.linklink.get_rank', 'link.get_rank', ([], {}), '()\n', (1960, 1962), True, 'import prototype.spring.linklink as link\n'), ((3553, 3574), 'prototype.spring.linklink.get_world_size', 'link.get_world_size', ([], {}), '()\n', (3572, 3574), True, 'import prototype.spring.linklink as link\n'), ((3619, 3634), 'prototype.spring.linklink.get_rank', 'link.get_rank', ([], {}), '()\n', (3632, 3634), True, 'import prototype.spring.linklink as link\n'), ((7916, 7931), 'prototype.spring.linklink.get_rank', 'link.get_rank', ([], {}), '()\n', (7929, 7931), True, 'import prototype.spring.linklink as link\n')] |
from pathlib import Path
import pinocchio as pin
import numpy as np
from cosypose.config import ASSET_DIR
from cosypose.datasets.datasets_cfg import make_urdf_dataset, make_texture_dataset
from cosypose.simulator import BaseScene, Body, Camera
from cosypose.simulator import BodyCache, TextureCache, apply_random_textures
class SamplerError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class BopRecordingScene(BaseScene):
def __init__(self,
urdf_ds='ycbv',
texture_ds='shapenet',
domain_randomization=True,
textures_on_objects=False,
n_objects_interval=(2, 5),
objects_xyz_interval=((0.0, -0.5, -0.15), (1.0, 0.5, 0.15)),
proba_falling=0.5,
resolution=(640, 480),
focal_interval=((515, 515), (515, 515)),
camera_distance_interval=(0.5, 1.5),
border_check=True,
gpu_renderer=True,
n_textures_cache=50,
seed=0):
# Objects
self.urdf_ds = make_urdf_dataset(urdf_ds)
self.n_objects_interval = n_objects_interval
self.objects_xyz_interval = objects_xyz_interval
self.n_objects_cache = len(self.urdf_ds)
assert self.n_objects_cache >= max(n_objects_interval)
self.away_transform = (0, 0, 1000), (0, 0, 0, 1)
self.proba_falling = proba_falling
# Domain randomization
self.texture_ds = make_texture_dataset(texture_ds)
self.n_textures_cache = min(n_textures_cache, len(self.texture_ds))
self.domain_randomization = domain_randomization
self.textures_on_objects = textures_on_objects
# Camera
self.resolution = resolution
self.focal_interval = np.array(focal_interval)
self.camera_distance_interval = camera_distance_interval
self.border_check = border_check
self.gpu_renderer = gpu_renderer
# Seeding
self.np_random = np.random.RandomState(seed)
pin.seed(seed)
self.seed = seed
def load_background(self):
cage_path = Path(ASSET_DIR / 'cage' / 'cage.urdf').as_posix()
self.background = Body.load(cage_path, client_id=self.client_id, scale=3.0)
def load_plane(self):
plane_path = Path(ASSET_DIR / 'plane' / 'plane.urdf').as_posix()
self.plane = Body.load(plane_path, client_id=self.client_id, scale=2.0)
def background_pos_orn_rand(self):
pos = self.np_random.uniform(np.ones(3) * -1, np.ones(3))
orn = pin.Quaternion(pin.SE3.Random().rotation).coeffs()
self.background.pose = pos, orn
def show_plane(self):
self.plane.pose = (0, 0, 0), (0, 0, 0, 1)
def hide_plane(self):
self.plane.pose = self.away_transform
def load_body_cache(self):
assert self._connected
self.body_cache = BodyCache(self.urdf_ds, self.client_id)
def load_texture_cache(self):
assert self._connected
ds_texture_ids = self.np_random.choice(len(self.texture_ds), size=self.n_textures_cache)
self.texture_cache = TextureCache(self.texture_ds, self.client_id)
[self.texture_cache.get_texture(idx) for idx in ds_texture_ids]
def connect(self, load=True):
super().connect(gpu_renderer=self.gpu_renderer)
if load:
self.load_background()
self.load_plane()
self.hide_plane()
self.load_body_cache()
self.load_texture_cache()
def disconnect(self):
super().disconnect()
def pick_rand_objects(self):
n_min, n_max = self.n_objects_interval
n_objects = self.np_random.choice(np.arange(n_min, n_max + 1))
ids = self.np_random.choice(len(self.urdf_ds), size=n_objects, replace=False)
self.bodies = self.body_cache.get_bodies_by_ids(ids)
def visuals_rand(self):
bodies = [self.background] + [self.plane]
if self.textures_on_objects and self.np_random.rand() > 0.9:
bodies = self.bodies + bodies
for body in bodies:
apply_random_textures(body, self.texture_cache.cached_textures,
np_random=self.np_random)
def objects_pos_orn_rand(self):
self.hide_plane()
for body in self.bodies:
pos = self.np_random.uniform(*self.objects_xyz_interval)
orn = pin.Quaternion(pin.SE3.Random().rotation).coeffs()
body.pose = pos, orn
def objects_pos_orn_rand_falling(self):
self.show_plane()
dheight = 0.05
for n, body in enumerate(self.bodies):
pos = self.np_random.uniform(*self.objects_xyz_interval)
pos[2] = dheight * (n + 1)
orn = pin.Quaternion(pin.SE3.Random().rotation).coeffs()
body.pose = pos, orn
ms = self.np_random.randint(5, 10) * 100
self.run_simulation(float(ms) * 1e-3)
def sample_camera(self):
assert self.focal_interval.shape == (2, 2)
K = np.zeros((3, 3), dtype=np.float)
fxfy = self.np_random.uniform(*self.focal_interval)
W, H = max(self.resolution), min(self.resolution)
K[0, 0] = fxfy[0]
K[1, 1] = fxfy[1]
K[0, 2] = W / 2
K[1, 2] = H / 2
K[2, 2] = 1.0
rho = self.np_random.uniform(*self.camera_distance_interval)
theta = self.np_random.uniform(0, np.pi/2)
phi = self.np_random.uniform(0, 2 * np.pi)
roll = self.np_random.uniform(-10, 10) * np.pi / 180
box_center = np.mean(self.objects_xyz_interval, axis=0)
cam = Camera(resolution=self.resolution, client_id=self._client_id)
cam.set_intrinsic_K(K)
cam.set_extrinsic_spherical(target=box_center, rho=rho, phi=phi, theta=theta, roll=roll)
return cam
def camera_rand(self):
N = 0
valid = False
self.cam_obs = None
while not valid:
cam = self.sample_camera()
cam_obs_ = cam.get_state()
mask = cam_obs_['mask']
mask[mask == self.background._body_id] = 0
mask[mask == 255] = 0
uniqs = np.unique(cam_obs_['mask'])
valid = len(uniqs) == len(self.bodies) + 1
if valid and self.border_check:
for uniq in uniqs[uniqs > 0]:
H, W = cam_obs_['mask'].shape
ids = np.where(cam_obs_['mask'] == uniq)
if ids[0].max() == H-1 or ids[0].min() == 0 or \
ids[1].max() == W-1 or ids[1].min() == 0:
valid = False
N += 1
if N >= 3:
raise SamplerError('Cannot sample valid camera configuration.')
self.cam_obs = cam_obs_
def _full_rand(self,
objects=True,
objects_pos_orn=True,
falling=False,
background_pos_orn=True,
camera=True,
visuals=True):
if background_pos_orn:
self.background_pos_orn_rand()
if objects:
self.pick_rand_objects()
if visuals:
self.visuals_rand()
if objects_pos_orn:
if falling:
self.objects_pos_orn_rand_falling()
else:
self.objects_pos_orn_rand()
if camera:
self.camera_rand()
def get_state(self):
objects = []
for body in self.bodies:
state = body.get_state()
state['id_in_segm'] = body._body_id
objects.append(state)
state = dict(
camera=self.cam_obs,
objects=objects,
)
return state
def try_rand(self):
n_iter = 0
while n_iter < 50:
try:
falling = self.np_random.rand() < self.proba_falling
visuals = self.domain_randomization
background_pos_orn = self.domain_randomization
kwargs = dict(
objects=True,
objects_pos_orn=True,
falling=falling,
background_pos_orn=background_pos_orn,
camera=True,
visuals=visuals,
)
self._full_rand(**kwargs)
return
except SamplerError as e:
print("Sampling failed: ", e)
n_iter += 1
raise SamplerError('Sampling failed')
def make_new_scene(self):
self.try_rand()
obs = self.get_state()
return obs
| [
"cosypose.simulator.apply_random_textures",
"pinocchio.SE3.Random",
"cosypose.simulator.TextureCache",
"numpy.zeros",
"cosypose.datasets.datasets_cfg.make_urdf_dataset",
"numpy.random.RandomState",
"numpy.ones",
"cosypose.simulator.Body.load",
"pathlib.Path",
"numpy.mean",
"numpy.array",
"cosy... | [((1159, 1185), 'cosypose.datasets.datasets_cfg.make_urdf_dataset', 'make_urdf_dataset', (['urdf_ds'], {}), '(urdf_ds)\n', (1176, 1185), False, 'from cosypose.datasets.datasets_cfg import make_urdf_dataset, make_texture_dataset\n'), ((1566, 1598), 'cosypose.datasets.datasets_cfg.make_texture_dataset', 'make_texture_dataset', (['texture_ds'], {}), '(texture_ds)\n', (1586, 1598), False, 'from cosypose.datasets.datasets_cfg import make_urdf_dataset, make_texture_dataset\n'), ((1872, 1896), 'numpy.array', 'np.array', (['focal_interval'], {}), '(focal_interval)\n', (1880, 1896), True, 'import numpy as np\n'), ((2088, 2115), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (2109, 2115), True, 'import numpy as np\n'), ((2124, 2138), 'pinocchio.seed', 'pin.seed', (['seed'], {}), '(seed)\n', (2132, 2138), True, 'import pinocchio as pin\n'), ((2292, 2349), 'cosypose.simulator.Body.load', 'Body.load', (['cage_path'], {'client_id': 'self.client_id', 'scale': '(3.0)'}), '(cage_path, client_id=self.client_id, scale=3.0)\n', (2301, 2349), False, 'from cosypose.simulator import BaseScene, Body, Camera\n'), ((2471, 2529), 'cosypose.simulator.Body.load', 'Body.load', (['plane_path'], {'client_id': 'self.client_id', 'scale': '(2.0)'}), '(plane_path, client_id=self.client_id, scale=2.0)\n', (2480, 2529), False, 'from cosypose.simulator import BaseScene, Body, Camera\n'), ((2980, 3019), 'cosypose.simulator.BodyCache', 'BodyCache', (['self.urdf_ds', 'self.client_id'], {}), '(self.urdf_ds, self.client_id)\n', (2989, 3019), False, 'from cosypose.simulator import BodyCache, TextureCache, apply_random_textures\n'), ((3212, 3257), 'cosypose.simulator.TextureCache', 'TextureCache', (['self.texture_ds', 'self.client_id'], {}), '(self.texture_ds, self.client_id)\n', (3224, 3257), False, 'from cosypose.simulator import BodyCache, TextureCache, apply_random_textures\n'), ((5123, 5155), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'np.float'}), '((3, 3), dtype=np.float)\n', (5131, 5155), True, 'import numpy as np\n'), ((5649, 5691), 'numpy.mean', 'np.mean', (['self.objects_xyz_interval'], {'axis': '(0)'}), '(self.objects_xyz_interval, axis=0)\n', (5656, 5691), True, 'import numpy as np\n'), ((5707, 5768), 'cosypose.simulator.Camera', 'Camera', ([], {'resolution': 'self.resolution', 'client_id': 'self._client_id'}), '(resolution=self.resolution, client_id=self._client_id)\n', (5713, 5768), False, 'from cosypose.simulator import BaseScene, Body, Camera\n'), ((2624, 2634), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (2631, 2634), True, 'import numpy as np\n'), ((3786, 3813), 'numpy.arange', 'np.arange', (['n_min', '(n_max + 1)'], {}), '(n_min, n_max + 1)\n', (3795, 3813), True, 'import numpy as np\n'), ((4192, 4286), 'cosypose.simulator.apply_random_textures', 'apply_random_textures', (['body', 'self.texture_cache.cached_textures'], {'np_random': 'self.np_random'}), '(body, self.texture_cache.cached_textures, np_random=\n self.np_random)\n', (4213, 4286), False, 'from cosypose.simulator import BodyCache, TextureCache, apply_random_textures\n'), ((6256, 6283), 'numpy.unique', 'np.unique', (["cam_obs_['mask']"], {}), "(cam_obs_['mask'])\n", (6265, 6283), True, 'import numpy as np\n'), ((2216, 2254), 'pathlib.Path', 'Path', (["(ASSET_DIR / 'cage' / 'cage.urdf')"], {}), "(ASSET_DIR / 'cage' / 'cage.urdf')\n", (2220, 2254), False, 'from pathlib import Path\n'), ((2398, 2438), 'pathlib.Path', 'Path', (["(ASSET_DIR / 'plane' / 'plane.urdf')"], {}), "(ASSET_DIR / 'plane' / 'plane.urdf')\n", (2402, 2438), False, 'from pathlib import Path\n'), ((2607, 2617), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (2614, 2617), True, 'import numpy as np\n'), ((6506, 6540), 'numpy.where', 'np.where', (["(cam_obs_['mask'] == uniq)"], {}), "(cam_obs_['mask'] == uniq)\n", (6514, 6540), True, 'import numpy as np\n'), ((2665, 2681), 'pinocchio.SE3.Random', 'pin.SE3.Random', ([], {}), '()\n', (2679, 2681), True, 'import pinocchio as pin\n'), ((4514, 4530), 'pinocchio.SE3.Random', 'pin.SE3.Random', ([], {}), '()\n', (4528, 4530), True, 'import pinocchio as pin\n'), ((4865, 4881), 'pinocchio.SE3.Random', 'pin.SE3.Random', ([], {}), '()\n', (4879, 4881), True, 'import pinocchio as pin\n')] |
'''
Created on Aug 25, 2016
@author: <NAME> <<EMAIL>>
'''
from __future__ import division
import collections
import unittest
import numpy as np
import six
from .. import stats
class TestStats(unittest.TestCase):
""" test suite for statistics functions """
_multiprocess_can_split_ = True # let nose know that tests can run parallel
def test_mean_std_online(self):
""" test the mean_std_online function """
x = np.random.random(10)
mean, std = stats.mean_std_online(x)
self.assertAlmostEqual(mean, x.mean())
self.assertAlmostEqual(std, x.std())
mean, std = stats.mean_std_online(iter(x))
self.assertAlmostEqual(mean, x.mean())
self.assertAlmostEqual(std, x.std())
mean, std = stats.mean_std_online(x, ddof=2)
self.assertAlmostEqual(mean, x.mean())
self.assertAlmostEqual(std, x.std(ddof=2))
# corner cases
mean, std = stats.mean_std_online([1])
self.assertEqual(mean, 1)
self.assertEqual(std, 0)
mean, std = stats.mean_std_online([1], ddof=2)
self.assertEqual(mean, 1)
self.assertTrue(np.isnan(std))
mean, std = stats.mean_std_online([])
self.assertTrue(np.isnan(mean))
self.assertTrue(np.isnan(std))
def test_mean_std_frequency_table(self):
""" test the mean_std_frequency_table function """
x = np.random.randint(0, 5, 10)
f = np.bincount(x)
for ddof in (0, 2):
mean, std = stats.mean_std_frequency_table(f, ddof=ddof)
self.assertAlmostEqual(mean, x.mean())
self.assertAlmostEqual(std, x.std(ddof=ddof))
c = collections.Counter()
for i, freq in enumerate(f):
c[i] += freq
for ddof in (0, 2):
mean, std = stats.mean_std_frequency_table(c, ddof=ddof)
self.assertAlmostEqual(mean, x.mean())
self.assertAlmostEqual(std, x.std(ddof=ddof))
def _test_StatisticsAccumulator(self, shape=None, ddof=2):
""" test the StatisticsAccumulator class """
if shape is None:
x = np.random.random(10)
else:
x = np.random.random([10] + shape)
acc = stats.StatisticsAccumulator(shape=shape, ddof=ddof)
self.assertIsInstance(str(acc), six.string_types)
acc.add(x[0])
self.assertIsInstance(str(acc), six.string_types)
acc.add_many(x[1:])
np.testing.assert_allclose(acc.mean, x.mean(axis=0))
np.testing.assert_allclose(acc.std, x.std(axis=0, ddof=ddof))
self.assertIsInstance(str(acc), six.string_types)
try:
import uncertainties
except ImportError:
pass
else:
if shape is None:
self.assertIsInstance(acc.to_uncertainties(),
uncertainties.core.Variable)
else:
self.assertIsInstance(acc.to_uncertainties(), np.ndarray)
def test_StatisticsAccumulator(self):
""" test the StatisticsAccumulator class """
for ddof in [0, 2]:
for shape in [None, [1], [3], [2, 3]]:
self._test_StatisticsAccumulator(shape=shape, ddof=ddof)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.isnan",
"numpy.random.random",
"numpy.random.randint",
"collections.Counter",
"numpy.bincount"
] | [((3414, 3429), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3427, 3429), False, 'import unittest\n'), ((449, 469), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (465, 469), True, 'import numpy as np\n'), ((1471, 1498), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '(10)'], {}), '(0, 5, 10)\n', (1488, 1498), True, 'import numpy as np\n'), ((1511, 1525), 'numpy.bincount', 'np.bincount', (['x'], {}), '(x)\n', (1522, 1525), True, 'import numpy as np\n'), ((1757, 1778), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (1776, 1778), False, 'import collections\n'), ((1188, 1201), 'numpy.isnan', 'np.isnan', (['std'], {}), '(std)\n', (1196, 1201), True, 'import numpy as np\n'), ((1282, 1296), 'numpy.isnan', 'np.isnan', (['mean'], {}), '(mean)\n', (1290, 1296), True, 'import numpy as np\n'), ((1322, 1335), 'numpy.isnan', 'np.isnan', (['std'], {}), '(std)\n', (1330, 1335), True, 'import numpy as np\n'), ((2219, 2239), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (2235, 2239), True, 'import numpy as np\n'), ((2270, 2300), 'numpy.random.random', 'np.random.random', (['([10] + shape)'], {}), '([10] + shape)\n', (2286, 2300), True, 'import numpy as np\n')] |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import test_utils as tu
from tensorflow.compiler.tests import xla_test
from tensorflow.python import ipu
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import layers
from tensorflow.python.ipu import ipu_compiler
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
import test_utils as tu
class ScalarElementWiseGraphTest(xla_test.XLATestCase):
# Overriding abstract method.
def cached_session(self):
return 0
# Overriding abstract method.
def test_session(self):
return 0
def testDoNotCompileScalarElementWiseGraphWithParameter(self):
cfg = ipu.config.IPUConfig()
report_helper = tu.ReportHelper()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
def my_graph(a, b):
with ops.device("/device:IPU:0"):
x = math_ops.add(a, b)
return x
with ops.device('cpu'):
a = array_ops.placeholder(np.int32, name="a")
b = array_ops.placeholder(np.int32, name="b")
out = ipu.ipu_compiler.compile(my_graph, [a, b])
fd = {a: np.int32(2), b: np.int32(3)}
result = sess.run(out, fd)
self.assert_num_reports(report_helper, 0)
self.assertAllClose(result, [5])
def testDoNotCompileScalarConstGraph(self):
cfg = ipu.config.IPUConfig()
report_helper = tu.ReportHelper()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
def my_graph(a, b):
with ops.device("/device:IPU:0"):
x = math_ops.add(a, b)
return x
with ops.device('cpu'):
a = 2
b = 3
out = ipu.ipu_compiler.compile(my_graph, [a, b])
result = sess.run(out)
# If compile was called, a report_json would be generated
self.assert_num_reports(report_helper, 0)
self.assertEqual(result, [5])
def testDoNotCompileScalarElementWiseGraphWithParameterAdd1(self):
cfg = ipu.config.IPUConfig()
report_helper = tu.ReportHelper()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
def my_graph(a, b):
with ops.device("/device:IPU:0"):
x = math_ops.add(a, b)
x = math_ops.add(x, 1)
return x
with ops.device('cpu'):
a = array_ops.placeholder(np.int32, name="a")
b = array_ops.placeholder(np.int32, name="b")
out = ipu.ipu_compiler.compile(my_graph, [a, b])
fd = {a: np.int32(2.0), b: np.int32(3.0)}
result = sess.run(out, fd)
# If compile was called, a report_json would be generated
self.assert_num_reports(report_helper, 0)
self.assertEqual(result, [6])
@test_util.deprecated_graph_mode_only
def testWhenSomeScalarOnDevice(self):
cfg = ipu.config.IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
tu.enable_ipu_events(cfg)
cfg.configure_ipu_system()
def conv(x, ksize, stride, filters_out):
return layers.Conv2D(
filters_out,
ksize,
stride,
'SAME',
kernel_initializer=init_ops.constant_initializer(0.1),
bias_initializer=init_ops.constant_initializer(0.0))(x)
def graph1(x):
x = conv(x, 3, 1, 2)
x = math_ops.reduce_mean(x)
x = array_ops.reshape(x, [])
with variable_scope.variable_scope("vs",
use_resource=True,
reuse=variable_scope.AUTO_REUSE):
z = variable_scope.get_variable(
"var",
shape=[],
dtype=np.float32,
initializer=init_ops.constant_initializer(1.0))
x = x + z
z = state_ops.assign_add(z, x)
return x
with ops.device('cpu'):
x = array_ops.placeholder(np.float32, shape=[1, 4, 4, 2])
def graph2():
with variable_scope.variable_scope("vs",
use_resource=True,
reuse=variable_scope.AUTO_REUSE):
z = variable_scope.get_variable(
"var",
shape=[],
dtype=np.float32,
initializer=init_ops.constant_initializer(1.0))
return state_ops.assign_add(z, 1.0)
with ops.device("/device:IPU:0"):
output1 = ipu_compiler.compile(graph1, inputs=[x])
output2 = ipu_compiler.compile(graph2, inputs=[])
tu.move_variable_initialization_to_cpu()
with tu.ipu_session() as sess:
report_json = tu.ReportJSON(self, sess)
report_json.reset()
sess.run(variables.global_variables_initializer())
report_helper.clear_reports()
result1 = sess.run(output1, {x: np.ones(x.shape)})
report_json.parse_log()
report_json.assert_contains_host_to_device_transfer_event()
report_json.reset()
self.assert_num_reports(report_helper, 1)
report_helper.clear_reports()
result2 = sess.run(output2)
report_json.parse_log()
# Check that there was a copy from device to host. If there was no the copy
# there would be one compile event at this place. We see no compile event
# as expected.
report_json.assert_contains_device_to_host_transfer_event()
report_json.reset()
self.assert_num_reports(report_helper, 0)
result3 = sess.run(output1, {x: np.ones(x.shape)})
report_json.parse_log()
report_json.assert_contains_host_to_device_transfer_event()
report_json.reset()
self.assert_num_reports(report_helper, 0)
# Read comment for case result2.
result4 = sess.run(output2)
report_json.parse_log()
report_json.assert_contains_device_to_host_transfer_event()
report_json.reset()
self.assert_num_reports(report_helper, 0)
self.assertAllClose(result1, [2.25])
self.assertAllClose(result2, [4.25])
self.assertAllClose(result3, [5.5])
self.assertAllClose(result4, [10.75])
if __name__ == "__main__":
googletest.main()
| [
"tensorflow.python.ipu.config.IPUConfig",
"tensorflow.python.ops.array_ops.reshape",
"numpy.ones",
"tensorflow.python.framework.ops.device",
"test_utils.ipu_session",
"test_utils.ReportHelper",
"test_utils.enable_ipu_events",
"tensorflow.python.ipu.ipu_compiler.compile",
"tensorflow.python.platform.... | [((7204, 7221), 'tensorflow.python.platform.googletest.main', 'googletest.main', ([], {}), '()\n', (7219, 7221), False, 'from tensorflow.python.platform import googletest\n'), ((1728, 1750), 'tensorflow.python.ipu.config.IPUConfig', 'ipu.config.IPUConfig', ([], {}), '()\n', (1748, 1750), False, 'from tensorflow.python import ipu\n'), ((1771, 1788), 'test_utils.ReportHelper', 'tu.ReportHelper', ([], {}), '()\n', (1786, 1788), True, 'import test_utils as tu\n'), ((2435, 2457), 'tensorflow.python.ipu.config.IPUConfig', 'ipu.config.IPUConfig', ([], {}), '()\n', (2455, 2457), False, 'from tensorflow.python import ipu\n'), ((2478, 2495), 'test_utils.ReportHelper', 'tu.ReportHelper', ([], {}), '()\n', (2493, 2495), True, 'import test_utils as tu\n'), ((3097, 3119), 'tensorflow.python.ipu.config.IPUConfig', 'ipu.config.IPUConfig', ([], {}), '()\n', (3117, 3119), False, 'from tensorflow.python import ipu\n'), ((3140, 3157), 'test_utils.ReportHelper', 'tu.ReportHelper', ([], {}), '()\n', (3155, 3157), True, 'import test_utils as tu\n'), ((3936, 3958), 'tensorflow.python.ipu.config.IPUConfig', 'ipu.config.IPUConfig', ([], {}), '()\n', (3956, 3958), False, 'from tensorflow.python import ipu\n'), ((3979, 3996), 'test_utils.ReportHelper', 'tu.ReportHelper', ([], {}), '()\n', (3994, 3996), True, 'import test_utils as tu\n'), ((4090, 4115), 'test_utils.enable_ipu_events', 'tu.enable_ipu_events', (['cfg'], {}), '(cfg)\n', (4110, 4115), True, 'import test_utils as tu\n'), ((5633, 5673), 'test_utils.move_variable_initialization_to_cpu', 'tu.move_variable_initialization_to_cpu', ([], {}), '()\n', (5671, 5673), True, 'import test_utils as tu\n'), ((2168, 2210), 'tensorflow.python.ipu.ipu_compiler.compile', 'ipu.ipu_compiler.compile', (['my_graph', '[a, b]'], {}), '(my_graph, [a, b])\n', (2192, 2210), False, 'from tensorflow.python import ipu\n'), ((2794, 2836), 'tensorflow.python.ipu.ipu_compiler.compile', 'ipu.ipu_compiler.compile', (['my_graph', '[a, b]'], {}), '(my_graph, [a, b])\n', (2818, 2836), False, 'from tensorflow.python import ipu\n'), ((3570, 3612), 'tensorflow.python.ipu.ipu_compiler.compile', 'ipu.ipu_compiler.compile', (['my_graph', '[a, b]'], {}), '(my_graph, [a, b])\n', (3594, 3612), False, 'from tensorflow.python import ipu\n'), ((4485, 4508), 'tensorflow.python.ops.math_ops.reduce_mean', 'math_ops.reduce_mean', (['x'], {}), '(x)\n', (4505, 4508), False, 'from tensorflow.python.ops import math_ops\n'), ((4519, 4543), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['x', '[]'], {}), '(x, [])\n', (4536, 4543), False, 'from tensorflow.python.ops import array_ops\n'), ((4926, 4952), 'tensorflow.python.ops.state_ops.assign_add', 'state_ops.assign_add', (['z', 'x'], {}), '(z, x)\n', (4946, 4952), False, 'from tensorflow.python.ops import state_ops\n'), ((4978, 4995), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (4988, 4995), False, 'from tensorflow.python.framework import ops\n'), ((5007, 5060), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['np.float32'], {'shape': '[1, 4, 4, 2]'}), '(np.float32, shape=[1, 4, 4, 2])\n', (5028, 5060), False, 'from tensorflow.python.ops import array_ops\n'), ((5447, 5475), 'tensorflow.python.ops.state_ops.assign_add', 'state_ops.assign_add', (['z', '(1.0)'], {}), '(z, 1.0)\n', (5467, 5475), False, 'from tensorflow.python.ops import state_ops\n'), ((5486, 5513), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (5496, 5513), False, 'from tensorflow.python.framework import ops\n'), ((5531, 5571), 'tensorflow.python.ipu.ipu_compiler.compile', 'ipu_compiler.compile', (['graph1'], {'inputs': '[x]'}), '(graph1, inputs=[x])\n', (5551, 5571), False, 'from tensorflow.python.ipu import ipu_compiler\n'), ((5588, 5627), 'tensorflow.python.ipu.ipu_compiler.compile', 'ipu_compiler.compile', (['graph2'], {'inputs': '[]'}), '(graph2, inputs=[])\n', (5608, 5627), False, 'from tensorflow.python.ipu import ipu_compiler\n'), ((5684, 5700), 'test_utils.ipu_session', 'tu.ipu_session', ([], {}), '()\n', (5698, 5700), True, 'import test_utils as tu\n'), ((5731, 5756), 'test_utils.ReportJSON', 'tu.ReportJSON', (['self', 'sess'], {}), '(self, sess)\n', (5744, 5756), True, 'import test_utils as tu\n'), ((2028, 2045), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (2038, 2045), False, 'from tensorflow.python.framework import ops\n'), ((2059, 2100), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['np.int32'], {'name': '"""a"""'}), "(np.int32, name='a')\n", (2080, 2100), False, 'from tensorflow.python.ops import array_ops\n'), ((2113, 2154), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['np.int32'], {'name': '"""b"""'}), "(np.int32, name='b')\n", (2134, 2154), False, 'from tensorflow.python.ops import array_ops\n'), ((2227, 2238), 'numpy.int32', 'np.int32', (['(2)'], {}), '(2)\n', (2235, 2238), True, 'import numpy as np\n'), ((2243, 2254), 'numpy.int32', 'np.int32', (['(3)'], {}), '(3)\n', (2251, 2254), True, 'import numpy as np\n'), ((2735, 2752), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (2745, 2752), False, 'from tensorflow.python.framework import ops\n'), ((3430, 3447), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (3440, 3447), False, 'from tensorflow.python.framework import ops\n'), ((3461, 3502), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['np.int32'], {'name': '"""a"""'}), "(np.int32, name='a')\n", (3482, 3502), False, 'from tensorflow.python.ops import array_ops\n'), ((3515, 3556), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['np.int32'], {'name': '"""b"""'}), "(np.int32, name='b')\n", (3536, 3556), False, 'from tensorflow.python.ops import array_ops\n'), ((3629, 3642), 'numpy.int32', 'np.int32', (['(2.0)'], {}), '(2.0)\n', (3637, 3642), True, 'import numpy as np\n'), ((3647, 3660), 'numpy.int32', 'np.int32', (['(3.0)'], {}), '(3.0)\n', (3655, 3660), True, 'import numpy as np\n'), ((4556, 4648), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""vs"""'], {'use_resource': '(True)', 'reuse': 'variable_scope.AUTO_REUSE'}), "('vs', use_resource=True, reuse=variable_scope\n .AUTO_REUSE)\n", (4585, 4648), False, 'from tensorflow.python.ops import variable_scope\n'), ((5091, 5183), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""vs"""'], {'use_resource': '(True)', 'reuse': 'variable_scope.AUTO_REUSE'}), "('vs', use_resource=True, reuse=variable_scope\n .AUTO_REUSE)\n", (5120, 5183), False, 'from tensorflow.python.ops import variable_scope\n'), ((5798, 5838), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (5836, 5838), False, 'from tensorflow.python.ops import variables\n'), ((1937, 1964), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (1947, 1964), False, 'from tensorflow.python.framework import ops\n'), ((1980, 1998), 'tensorflow.python.ops.math_ops.add', 'math_ops.add', (['a', 'b'], {}), '(a, b)\n', (1992, 1998), False, 'from tensorflow.python.ops import math_ops\n'), ((2644, 2671), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (2654, 2671), False, 'from tensorflow.python.framework import ops\n'), ((2687, 2705), 'tensorflow.python.ops.math_ops.add', 'math_ops.add', (['a', 'b'], {}), '(a, b)\n', (2699, 2705), False, 'from tensorflow.python.ops import math_ops\n'), ((3306, 3333), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (3316, 3333), False, 'from tensorflow.python.framework import ops\n'), ((3349, 3367), 'tensorflow.python.ops.math_ops.add', 'math_ops.add', (['a', 'b'], {}), '(a, b)\n', (3361, 3367), False, 'from tensorflow.python.ops import math_ops\n'), ((3382, 3400), 'tensorflow.python.ops.math_ops.add', 'math_ops.add', (['x', '(1)'], {}), '(x, 1)\n', (3394, 3400), False, 'from tensorflow.python.ops import math_ops\n'), ((5915, 5931), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (5922, 5931), True, 'import numpy as np\n'), ((6567, 6583), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (6574, 6583), True, 'import numpy as np\n'), ((4326, 4360), 'tensorflow.python.ops.init_ops.constant_initializer', 'init_ops.constant_initializer', (['(0.1)'], {}), '(0.1)\n', (4355, 4360), False, 'from tensorflow.python.ops import init_ops\n'), ((4389, 4423), 'tensorflow.python.ops.init_ops.constant_initializer', 'init_ops.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (4418, 4423), False, 'from tensorflow.python.ops import init_ops\n'), ((4863, 4897), 'tensorflow.python.ops.init_ops.constant_initializer', 'init_ops.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (4892, 4897), False, 'from tensorflow.python.ops import init_ops\n'), ((5398, 5432), 'tensorflow.python.ops.init_ops.constant_initializer', 'init_ops.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (5427, 5432), False, 'from tensorflow.python.ops import init_ops\n')] |
import numpy
import theano
from blocks_extras.utils import check_valid_permutation
from blocks.initialization import NdarrayInitialization
class PermutationMatrix(NdarrayInitialization):
"""Generates a 2-dimensional permutation matrix.
Parameters
----------
permutation : ndarray, 1-dimensional, optional
A permutation on the integers in a given range. If specified,
always generate the permutation matrix corresponding to this
permutation, ignoring the random number generator.
"""
def __init__(self, permutation=None):
if permutation is not None:
permutation = check_valid_permutation(permutation)
self.permutation = permutation
def generate(self, rng, shape):
def make_matrix(size, perm):
return numpy.eye(size, dtype=theano.config.floatX)[:, perm]
if len(shape) != 2 or shape[0] != shape[1]:
raise ValueError("requested shape is not square")
if self.permutation is not None:
if shape[0] != len(self.permutation):
raise ValueError("provided permutation does not match "
"requested shape")
return make_matrix(shape[0], self.permutation)
else:
return make_matrix(shape[0], rng.permutation(shape[0]))
| [
"numpy.eye",
"blocks_extras.utils.check_valid_permutation"
] | [((636, 672), 'blocks_extras.utils.check_valid_permutation', 'check_valid_permutation', (['permutation'], {}), '(permutation)\n', (659, 672), False, 'from blocks_extras.utils import check_valid_permutation\n'), ((805, 848), 'numpy.eye', 'numpy.eye', (['size'], {'dtype': 'theano.config.floatX'}), '(size, dtype=theano.config.floatX)\n', (814, 848), False, 'import numpy\n')] |
import unittest
import numpy as np
from grizzly.encoders import numpy_to_weld_type
import pandas_weld as pdw
from lazy_result import LazyResult
from pandas_weld.tests.utils import evaluate_if_necessary
class IndexTests(unittest.TestCase):
# noinspection PyMethodMayBeStatic
def test_getitem_slice(self):
result = pdw.Index(np.array([1, 2, 3]), np.dtype(np.int64))[:2]
expected_result = pdw.Index(np.array([1, 2]), np.dtype(np.int64))
np.testing.assert_array_equal(evaluate_if_necessary(expected_result).data,
evaluate_if_necessary(result).data)
# noinspection PyMethodMayBeStatic
def test_getitem_filter(self):
to_filter = LazyResult(np.array([True, False, True], dtype=np.dtype(np.bool)),
numpy_to_weld_type(np.dtype(np.bool)),
1)
result = pdw.Index(np.array([1, 2, 3]), np.dtype(np.int64))[to_filter]
expected_result = pdw.Index(np.array([1, 3]), np.dtype(np.int64))
np.testing.assert_array_equal(evaluate_if_necessary(expected_result).data,
evaluate_if_necessary(result).data)
def main():
unittest.main()
if __name__ == '__main__':
main()
| [
"unittest.main",
"pandas_weld.tests.utils.evaluate_if_necessary",
"numpy.dtype",
"numpy.array"
] | [((1217, 1232), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1230, 1232), False, 'import unittest\n'), ((425, 441), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (433, 441), True, 'import numpy as np\n'), ((443, 461), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (451, 461), True, 'import numpy as np\n'), ((1003, 1019), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (1011, 1019), True, 'import numpy as np\n'), ((1021, 1039), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (1029, 1039), True, 'import numpy as np\n'), ((343, 362), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (351, 362), True, 'import numpy as np\n'), ((364, 382), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (372, 382), True, 'import numpy as np\n'), ((502, 540), 'pandas_weld.tests.utils.evaluate_if_necessary', 'evaluate_if_necessary', (['expected_result'], {}), '(expected_result)\n', (523, 540), False, 'from pandas_weld.tests.utils import evaluate_if_necessary\n'), ((585, 614), 'pandas_weld.tests.utils.evaluate_if_necessary', 'evaluate_if_necessary', (['result'], {}), '(result)\n', (606, 614), False, 'from pandas_weld.tests.utils import evaluate_if_necessary\n'), ((833, 850), 'numpy.dtype', 'np.dtype', (['np.bool'], {}), '(np.bool)\n', (841, 850), True, 'import numpy as np\n'), ((914, 933), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (922, 933), True, 'import numpy as np\n'), ((935, 953), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (943, 953), True, 'import numpy as np\n'), ((1080, 1118), 'pandas_weld.tests.utils.evaluate_if_necessary', 'evaluate_if_necessary', (['expected_result'], {}), '(expected_result)\n', (1101, 1118), False, 'from pandas_weld.tests.utils import evaluate_if_necessary\n'), ((1163, 1192), 'pandas_weld.tests.utils.evaluate_if_necessary', 'evaluate_if_necessary', (['result'], {}), '(result)\n', (1184, 1192), False, 'from pandas_weld.tests.utils import evaluate_if_necessary\n'), ((763, 780), 'numpy.dtype', 'np.dtype', (['np.bool'], {}), '(np.bool)\n', (771, 780), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by <NAME>
import numpy as np
import logging
import math
import transforms3d.euler as txe
import transforms3d.quaternions as txq
import argparse
import cv2
import matplotlib.pyplot as plt
try:
from thirdparty.mano.webuser.smpl_handpca_wrapper_HAND_only \
import load_model as load_mano_model
MANO_PRESENT = True
except ImportError:
load_mano_model = None
MANO_PRESENT = False
if MANO_PRESENT:
# hacks needed for MANO Python2 code
import os.path as osp
import _pickle as cPickle
import sys
sys.modules['cPickle'] = cPickle
sys.path.append(osp.join('thirdparty', 'mano'))
sys.path.append(osp.join('thirdparty', 'mano', 'webuser'))
def texture_proc(colors, a=0.05, invert=False):
idx = colors > 0
ci = colors[idx]
if len(ci) == 0:
return colors
if invert:
ci = 1 - ci
# fit a sigmoid
x1 = min(ci); y1 = a
x2 = max(ci); y2 = 1-a
lna = np.log((1 - y1) / y1)
lnb = np.log((1 - y2) / y2)
k = (lnb - lna) / (x1 - x2)
mu = (x2*lna - x1*lnb) / (lna - lnb)
# apply the sigmoid
ci = np.exp(k * (ci-mu)) / (1 + np.exp(k * (ci-mu)))
colors[idx] = ci
return colors
class MovingAverage:
def __init__(self):
self.count = 0
self.val = 0
def append(self, v):
self.val = self.val*self.count + v
self.count += 1
self.val /= self.count
def linesegment_from_points(p1, p2):
n = p2 - p1
return np.hstack((p1, n))
def get_hand_line_ids():
line_ids = []
for finger in range(5):
base = 4*finger + 1
line_ids.append([0, base])
for j in range(3):
line_ids.append([base+j, base+j+1])
line_ids = np.asarray(line_ids, dtype=int)
return line_ids
def rotmat_from_vecs(v1, v2=np.asarray([0, 0, 1])):
"""
Returns a rotation matrix R_1_2
:param v1: vector in frame 1
:param v2: vector in frame 2
:return:
"""
v1 = v1 / np.linalg.norm(v1)
v2 = v2 / np.linalg.norm(v2)
v = np.cross(v2, v1)
vx = np.asarray([
[0, -v[2], +v[1], 0],
[+v[2], 0, -v[0], 0],
[-v[1], +v[0], 0, 0],
[0, 0, 0, 0]])
dotp = np.dot(v1, v2)
if np.abs(dotp + 1) < 1e-3:
R = np.eye(4)
x = np.cross(v2, [1, 0, 0])
R[:3, :3] = txe.axangle2mat(x, np.pi)
else:
R = np.eye(4) + vx + np.dot(vx, vx)/(1+dotp)
return R
def p_dist_linesegment(p, ls):
"""
Distance from point p to line segment ls
p: Nx3
ls: Mx6 (2 3-dim endpoints of M line segments)
"""
# NxMx3
ap = p[:, np.newaxis, :] - ls[np.newaxis, :, :3]
# 1xMx3
u = ls[np.newaxis, :, 3:]
# 1xMx3
u_norm = u / np.linalg.norm(u, axis=2, keepdims=True)
# NxM
proj = np.sum(ap * u_norm, axis=2)
# point to line distance
# NxM
d_line = np.linalg.norm(np.cross(ap, u_norm, axis=2), axis=2)
# point to endpoint distance
# NxM
d_a = np.linalg.norm(ap, axis=2)
d_b = np.linalg.norm(ap-u, axis=2)
d_endp = np.minimum(d_a, d_b)
within_ls = (proj > 0) * (proj < np.linalg.norm(u, axis=2)) * (d_endp < 0.03)
d_ls = within_ls*d_line + (1-within_ls)*d_endp
return d_ls
def closest_linesegment_point(l0, l1, p):
"""
For each point in p, finds the closest point on the list of line segments
whose endpoints are l0 and l1
p: N x 3
l0, l1: M x 3
out: N x M x 3
"""
p = np.broadcast_to(p[:, np.newaxis, :], (len(p), len(l0), 3))
l0 = np.broadcast_to(l0[np.newaxis, :, :], (len(p), len(l0), 3))
l1 = np.broadcast_to(l1[np.newaxis, :, :], (len(p), len(l1), 3))
llen = np.linalg.norm(l1 - l0, axis=-1, keepdims=True)
lu = (l1 - l0) / llen
v = p - l0
d = np.sum(v * lu, axis=-1, keepdims=True)
d = np.clip(d, a_min=0, a_max=llen)
out = l0 + d*lu
return out
def pose_matrix(pose):
T = np.eye(4)
T[:3, 3] = pose['translation']
T[:3, :3] = txq.quat2mat(pose['rotation'])
return T
def tform_points(T, X):
"""
X: Nx3
T: 4x4 homogeneous
"""
X = np.vstack((X.T, np.ones(len(X))))
X = T @ X
X = X[:3].T
return X
def project(P, X):
"""
X: Nx3
P: 3x4 projection matrix, ContactPose.P or K @ cTo
returns Nx2 perspective projections
"""
X = np.vstack((X.T, np.ones(len(X))))
x = P @ X
x = x[:2] / x[2]
return x.T
def get_A(camera_name, W=960, H=540):
"""
Get the affine transformation matrix applied after 3D->2D projection
"""
def flipud(H):
return np.asarray([[1, 0, 0], [0, -1, H], [0, 0, 1]])
def fliplr(W):
return np.asarray([[-1, 0, W], [0, 1, 0], [0, 0, 1]])
def transpose():
return np.asarray([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
if camera_name == 'kinect2_left':
return np.dot(fliplr(H), transpose())
elif camera_name == 'kinect2_right':
return np.dot(flipud(W), transpose())
elif camera_name == 'kinect2_middle':
return np.dot(fliplr(W), flipud(H))
else:
raise NotImplementedError
def setup_logging(filename=None):
logging.basicConfig(level=logging.DEBUG)
root = logging.getLogger()
if filename is not None:
root.addHandler(logging.FileHandler(filename, 'w'))
root.info('Logging to {:s}'.format(filename))
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
EPS = np.finfo(float).eps * 4.0
q0 = np.asarray(quat0) / np.linalg.norm(quat0)
q1 = np.asarray(quat1) / np.linalg.norm(quat1)
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = np.dot(q0, q1)
if abs(abs(d) - 1.0) < EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
q1 *= -1.0
angle = math.acos(d) + spin * math.pi
if abs(angle) < EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def average_quaternions(qs, ws=None):
"""
From https://qr.ae/TcwOci
"""
if ws is None:
ws = np.ones(len(qs)) / len(qs)
else:
assert sum(ws) == 1
for idx in range(1, len(qs)):
if np.dot(qs[0], qs[idx]) < 0:
qs[idx] *= -1
for i in range(1, len(qs)):
frac = ws[i] / (ws[i-1] + ws[i]) # weight of qs[i]
qs[i] = quaternion_slerp(qs[i-1], qs[i], fraction=frac)
ws[i] = 1 - sum(ws[i+1:])
return qs[-1]
def default_argparse(require_p_num=True, require_intent=True,
require_object_name=True):
parser = argparse.ArgumentParser()
parser.add_argument('--p_num', type=int, help='Participant number (1-50)',
required=require_p_num)
parser.add_argument('--intent', choices=('use', 'handoff'),
help='Grasp intent', required=require_intent)
parser.add_argument('--object_name', help="Name of object",
required=require_object_name)
return parser
def default_multiargparse():
parser = argparse.ArgumentParser()
parser.add_argument('--p_num',
help='Participant numbers, comma or - separated.'
'Skipping means all participants',
default=None)
parser.add_argument('--intent', choices=('use', 'handoff', 'use,handoff'),
help='Grasp intents, comma separated', default='use,handoff')
parser.add_argument('--object_name',
help="Object names, comma separated, ignore for all objects",
default=None)
return parser
def parse_multiargs(args):
"""
parses the p_num, intent, and object_name arguments from a parser created
with default_multiargparse
"""
from utilities.dataset import get_p_nums
p_nums = args.p_num
if p_nums is None:
p_nums = list(range(1, 51))
elif '-' in p_nums:
first, last = p_nums.split('-')
p_nums = list(range(int(first), int(last)+1))
else:
p_nums = [int(p) for p in p_nums.split(',')]
intents = args.intent.split(',')
object_names = args.object_name
if object_names is not None:
object_names = object_names.split(',')
all_p_nums = []
for intent in intents:
for object_name in object_names:
all_p_nums.extend([pn for pn in p_nums if pn in
get_p_nums(object_name, intent)])
p_nums = list(set(all_p_nums))
delattr(args, 'p_num')
delattr(args, 'intent')
delattr(args, 'object_name')
return p_nums, intents, object_names, args
def colorcode_depth_image(im):
assert(im.ndim == 2)
im = im.astype(float)
im /= im.max()
j, i = np.nonzero(im)
c = im[j, i]
im = np.zeros((im.shape[0], im.shape[1], 3))
im[j, i, :] = plt.cm.viridis(c)[:, :3]
im = (im * 255.0).astype(np.uint8)
return im
def draw_hands(im, joints, colors=((0, 255, 0), (0, 0, 255)), circle_radius=3,
line_thickness=2, offset=np.zeros(2, dtype=np.int)):
if im is None:
print('Invalid image')
return im
if im.ndim == 2: # depth image
im = colorcode_depth_image(im)
for hand_idx, (js, c) in enumerate(zip(joints, colors)):
if js is None:
continue
else:
js = np.round(js-offset[np.newaxis, :]).astype(np.int)
for j in js:
im = cv2.circle(im, tuple(j), circle_radius, c, -1, cv2.LINE_AA)
for finger in range(5):
base = 4*finger + 1
im = cv2.line(im, tuple(js[0]), tuple(js[base]), (0, 0, 0),
line_thickness, cv2.LINE_AA)
for j in range(3):
im = cv2.line(im, tuple(js[base+j]), tuple(js[base+j+1]),
(0, 0, 0), line_thickness, cv2.LINE_AA)
return im
def draw_object_markers(im, ms, color=(0, 255, 255), circle_radius=3,
offset=np.zeros(2, dtype=np.int)):
if im.ndim == 2: # depth image
im = colorcode_depth_image(im)
for m in np.round(ms).astype(np.int):
im = cv2.circle(im, tuple(m-offset), circle_radius, color, -1, cv2.LINE_AA)
return im
def crop_image(im, joints, crop_size, fillvalue=[0]):
"""
joints: list of 21x2 2D joint locations per each hand
crops the im into a crop_size square centered at the mean of all joint
locations
returns cropped image and top-left pixel position of the crop in the full image
"""
if im.ndim < 3:
im = im[:, :, np.newaxis]
if isinstance(fillvalue, list) or isinstance(fillvalue, np.ndarray):
fillvalue = np.asarray(fillvalue).astype(im.dtype)
else:
fillvalue = np.asarray([fillvalue for _ in im.shape[2]]).astype(im.dtype)
joints = np.vstack([j for j in joints if j is not None])
bbcenter = np.round(np.mean(joints, axis=0)).astype(np.int)
im_crop = np.zeros((crop_size, crop_size, im.shape[2]), dtype=im.dtype)
tl = bbcenter - crop_size//2
br = bbcenter + crop_size//2
tl_crop = np.asarray([0, 0], dtype=np.int)
br_crop = np.asarray([crop_size, crop_size], dtype=np.int)
tl_spill = np.minimum(0, tl)
tl -= tl_spill
tl_crop -= tl_spill
br_spill = np.maximum(0, br-np.array([im.shape[1], im.shape[0]]))
br -= br_spill
br_crop -= br_spill
im_crop[tl_crop[1]:br_crop[1], tl_crop[0]:br_crop[0], :] = \
im[tl[1]:br[1], tl[0]:br[0], :]
return im_crop.squeeze(), tl
def openpose2mano(o, n_joints_per_finger=4):
"""
convert joints from openpose format to MANO format
"""
finger_o2m = {0: 4, 1: 0, 2: 1, 3: 3, 4: 2}
m = np.zeros((5*n_joints_per_finger+1, 3))
m[0] = o[0]
for ofidx in range(5):
for jidx in range(n_joints_per_finger):
oidx = 1 + ofidx*4 + jidx
midx = 1 + finger_o2m[ofidx]*n_joints_per_finger + jidx
m[midx] = o[oidx]
return np.array(m)
# m2o
# 0->1, 1->2, 2->4, 3->3, 4->0
def mano2openpose(m, n_joints_per_finger=4):
"""
convert joints from MANO format to openpose format
"""
finger_o2m = {0: 4, 1: 0, 2: 1, 3: 3, 4: 2}
finger_m2o = {v: k for k,v in finger_o2m.items()}
o = np.zeros((5*n_joints_per_finger+1, 3))
o[0] = m[0]
for mfidx in range(5):
for jidx in range(n_joints_per_finger):
midx = 1 + mfidx*4 + jidx
oidx = 1 + finger_m2o[mfidx]*n_joints_per_finger + jidx
o[oidx] = m[midx]
return o
def mano_joints_with_fingertips(m):
"""
get joints from MANO model
MANO model does not come with fingertip joints, so we have selected vertices
that correspond to fingertips
"""
fingertip_idxs = [333, 444, 672, 555, 745]
out = [m.J_transformed[0]]
for fidx in range(5):
for jidx in range(4):
if jidx < 3:
idx = 1 + fidx*3 + jidx
out.append(m.J_transformed[idx])
else:
out.append(m[fingertip_idxs[fidx]])
return out
def load_mano_meshes(params, model_dicts, oTh=(np.eye(4), np.eye(4)),
flat_hand_mean=False):
if not MANO_PRESENT or model_dicts is None:
return (None, None)
out = []
for hand_idx, mp in enumerate(params):
if mp is None:
out.append(None)
continue
ncomps = len(mp['pose']) - 3
m = load_mano_model(model_dicts[hand_idx], ncomps=ncomps,
flat_hand_mean=flat_hand_mean)
m.betas[:] = mp['betas']
m.pose[:] = mp['pose']
oTm = oTh[hand_idx] @ mp['hTm']
vertices = np.array(m)
vertices = tform_points(oTm, vertices)
joints = mano2openpose(mano_joints_with_fingertips(m))
joints = tform_points(oTm, joints)
out.append({
'vertices': vertices,
'joints': joints,
'faces': np.asarray(m.f),
})
return out
def grabcut_mask(src, mask, n_iters=10):
"""
Refines noisy mask edges using Grabcut on image src
"""
assert(src.shape[:2] == mask.shape[:2])
y, x = np.where(mask)
gmask = np.zeros((src.shape[0], src.shape[1]), dtype=np.uint8) # GC_BGD
gmask[y.min():y.max()+1, x.min():x.max()+1] = 2 # GC_PR_BGD
gmask[y, x] = 3 # GC_PR_FGD
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
gmask, bgdModel, fgdModel = \
cv2.grabCut(src, gmask, (0, 0, 0, 0), bgdModel, fgdModel, n_iters,
mode=cv2.GC_INIT_WITH_MASK)
mask = np.logical_or(gmask==1, gmask==3)
return mask | [
"numpy.sum",
"argparse.ArgumentParser",
"numpy.abs",
"numpy.clip",
"numpy.mean",
"numpy.linalg.norm",
"numpy.exp",
"utilities.dataset.get_p_nums",
"transforms3d.quaternions.quat2mat",
"os.path.join",
"numpy.round",
"transforms3d.euler.axangle2mat",
"logging.FileHandler",
"matplotlib.pyplot... | [((946, 967), 'numpy.log', 'np.log', (['((1 - y1) / y1)'], {}), '((1 - y1) / y1)\n', (952, 967), True, 'import numpy as np\n'), ((976, 997), 'numpy.log', 'np.log', (['((1 - y2) / y2)'], {}), '((1 - y2) / y2)\n', (982, 997), True, 'import numpy as np\n'), ((1431, 1449), 'numpy.hstack', 'np.hstack', (['(p1, n)'], {}), '((p1, n))\n', (1440, 1449), True, 'import numpy as np\n'), ((1654, 1685), 'numpy.asarray', 'np.asarray', (['line_ids'], {'dtype': 'int'}), '(line_ids, dtype=int)\n', (1664, 1685), True, 'import numpy as np\n'), ((1734, 1755), 'numpy.asarray', 'np.asarray', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1744, 1755), True, 'import numpy as np\n'), ((1946, 1962), 'numpy.cross', 'np.cross', (['v2', 'v1'], {}), '(v2, v1)\n', (1954, 1962), True, 'import numpy as np\n'), ((1971, 2068), 'numpy.asarray', 'np.asarray', (['[[0, -v[2], +v[1], 0], [+v[2], 0, -v[0], 0], [-v[1], +v[0], 0, 0], [0, 0, 0, 0]\n ]'], {}), '([[0, -v[2], +v[1], 0], [+v[2], 0, -v[0], 0], [-v[1], +v[0], 0, 0\n ], [0, 0, 0, 0]])\n', (1981, 2068), True, 'import numpy as np\n'), ((2113, 2127), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (2119, 2127), True, 'import numpy as np\n'), ((2650, 2677), 'numpy.sum', 'np.sum', (['(ap * u_norm)'], {'axis': '(2)'}), '(ap * u_norm, axis=2)\n', (2656, 2677), True, 'import numpy as np\n'), ((2826, 2852), 'numpy.linalg.norm', 'np.linalg.norm', (['ap'], {'axis': '(2)'}), '(ap, axis=2)\n', (2840, 2852), True, 'import numpy as np\n'), ((2861, 2891), 'numpy.linalg.norm', 'np.linalg.norm', (['(ap - u)'], {'axis': '(2)'}), '(ap - u, axis=2)\n', (2875, 2891), True, 'import numpy as np\n'), ((2901, 2921), 'numpy.minimum', 'np.minimum', (['d_a', 'd_b'], {}), '(d_a, d_b)\n', (2911, 2921), True, 'import numpy as np\n'), ((3487, 3534), 'numpy.linalg.norm', 'np.linalg.norm', (['(l1 - l0)'], {'axis': '(-1)', 'keepdims': '(True)'}), '(l1 - l0, axis=-1, keepdims=True)\n', (3501, 3534), True, 'import numpy as np\n'), ((3579, 3617), 'numpy.sum', 'np.sum', (['(v * lu)'], {'axis': '(-1)', 'keepdims': '(True)'}), '(v * lu, axis=-1, keepdims=True)\n', (3585, 3617), True, 'import numpy as np\n'), ((3624, 3655), 'numpy.clip', 'np.clip', (['d'], {'a_min': '(0)', 'a_max': 'llen'}), '(d, a_min=0, a_max=llen)\n', (3631, 3655), True, 'import numpy as np\n'), ((3719, 3728), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (3725, 3728), True, 'import numpy as np\n'), ((3777, 3807), 'transforms3d.quaternions.quat2mat', 'txq.quat2mat', (["pose['rotation']"], {}), "(pose['rotation'])\n", (3789, 3807), True, 'import transforms3d.quaternions as txq\n'), ((4860, 4900), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (4879, 4900), False, 'import logging\n'), ((4910, 4929), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (4927, 4929), False, 'import logging\n'), ((5355, 5369), 'numpy.dot', 'np.dot', (['q0', 'q1'], {}), '(q0, q1)\n', (5361, 5369), True, 'import numpy as np\n'), ((6302, 6327), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6325, 6327), False, 'import argparse\n'), ((6753, 6778), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6776, 6778), False, 'import argparse\n'), ((8363, 8377), 'numpy.nonzero', 'np.nonzero', (['im'], {}), '(im)\n', (8373, 8377), True, 'import numpy as np\n'), ((8400, 8439), 'numpy.zeros', 'np.zeros', (['(im.shape[0], im.shape[1], 3)'], {}), '((im.shape[0], im.shape[1], 3))\n', (8408, 8439), True, 'import numpy as np\n'), ((8651, 8676), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.int'}), '(2, dtype=np.int)\n', (8659, 8676), True, 'import numpy as np\n'), ((9495, 9520), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.int'}), '(2, dtype=np.int)\n', (9503, 9520), True, 'import numpy as np\n'), ((10287, 10334), 'numpy.vstack', 'np.vstack', (['[j for j in joints if j is not None]'], {}), '([j for j in joints if j is not None])\n', (10296, 10334), True, 'import numpy as np\n'), ((10409, 10470), 'numpy.zeros', 'np.zeros', (['(crop_size, crop_size, im.shape[2])'], {'dtype': 'im.dtype'}), '((crop_size, crop_size, im.shape[2]), dtype=im.dtype)\n', (10417, 10470), True, 'import numpy as np\n'), ((10545, 10577), 'numpy.asarray', 'np.asarray', (['[0, 0]'], {'dtype': 'np.int'}), '([0, 0], dtype=np.int)\n', (10555, 10577), True, 'import numpy as np\n'), ((10590, 10638), 'numpy.asarray', 'np.asarray', (['[crop_size, crop_size]'], {'dtype': 'np.int'}), '([crop_size, crop_size], dtype=np.int)\n', (10600, 10638), True, 'import numpy as np\n'), ((10652, 10669), 'numpy.minimum', 'np.minimum', (['(0)', 'tl'], {}), '(0, tl)\n', (10662, 10669), True, 'import numpy as np\n'), ((11110, 11152), 'numpy.zeros', 'np.zeros', (['(5 * n_joints_per_finger + 1, 3)'], {}), '((5 * n_joints_per_finger + 1, 3))\n', (11118, 11152), True, 'import numpy as np\n'), ((11359, 11370), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (11367, 11370), True, 'import numpy as np\n'), ((11624, 11666), 'numpy.zeros', 'np.zeros', (['(5 * n_joints_per_finger + 1, 3)'], {}), '((5 * n_joints_per_finger + 1, 3))\n', (11632, 11666), True, 'import numpy as np\n'), ((13345, 13359), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (13353, 13359), True, 'import numpy as np\n'), ((13370, 13424), 'numpy.zeros', 'np.zeros', (['(src.shape[0], src.shape[1])'], {'dtype': 'np.uint8'}), '((src.shape[0], src.shape[1]), dtype=np.uint8)\n', (13378, 13424), True, 'import numpy as np\n'), ((13543, 13572), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (13551, 13572), True, 'import numpy as np\n'), ((13584, 13613), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (13592, 13613), True, 'import numpy as np\n'), ((13651, 13750), 'cv2.grabCut', 'cv2.grabCut', (['src', 'gmask', '(0, 0, 0, 0)', 'bgdModel', 'fgdModel', 'n_iters'], {'mode': 'cv2.GC_INIT_WITH_MASK'}), '(src, gmask, (0, 0, 0, 0), bgdModel, fgdModel, n_iters, mode=cv2\n .GC_INIT_WITH_MASK)\n', (13662, 13750), False, 'import cv2\n'), ((13774, 13811), 'numpy.logical_or', 'np.logical_or', (['(gmask == 1)', '(gmask == 3)'], {}), '(gmask == 1, gmask == 3)\n', (13787, 13811), True, 'import numpy as np\n'), ((621, 651), 'os.path.join', 'osp.join', (['"""thirdparty"""', '"""mano"""'], {}), "('thirdparty', 'mano')\n", (629, 651), True, 'import os.path as osp\n'), ((671, 712), 'os.path.join', 'osp.join', (['"""thirdparty"""', '"""mano"""', '"""webuser"""'], {}), "('thirdparty', 'mano', 'webuser')\n", (679, 712), True, 'import os.path as osp\n'), ((1096, 1117), 'numpy.exp', 'np.exp', (['(k * (ci - mu))'], {}), '(k * (ci - mu))\n', (1102, 1117), True, 'import numpy as np\n'), ((1890, 1908), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (1904, 1908), True, 'import numpy as np\n'), ((1921, 1939), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (1935, 1939), True, 'import numpy as np\n'), ((2135, 2151), 'numpy.abs', 'np.abs', (['(dotp + 1)'], {}), '(dotp + 1)\n', (2141, 2151), True, 'import numpy as np\n'), ((2168, 2177), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2174, 2177), True, 'import numpy as np\n'), ((2186, 2209), 'numpy.cross', 'np.cross', (['v2', '[1, 0, 0]'], {}), '(v2, [1, 0, 0])\n', (2194, 2209), True, 'import numpy as np\n'), ((2227, 2252), 'transforms3d.euler.axangle2mat', 'txe.axangle2mat', (['x', 'np.pi'], {}), '(x, np.pi)\n', (2242, 2252), True, 'import transforms3d.euler as txe\n'), ((2591, 2631), 'numpy.linalg.norm', 'np.linalg.norm', (['u'], {'axis': '(2)', 'keepdims': '(True)'}), '(u, axis=2, keepdims=True)\n', (2605, 2631), True, 'import numpy as np\n'), ((2740, 2768), 'numpy.cross', 'np.cross', (['ap', 'u_norm'], {'axis': '(2)'}), '(ap, u_norm, axis=2)\n', (2748, 2768), True, 'import numpy as np\n'), ((4334, 4380), 'numpy.asarray', 'np.asarray', (['[[1, 0, 0], [0, -1, H], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, -1, H], [0, 0, 1]])\n', (4344, 4380), True, 'import numpy as np\n'), ((4411, 4457), 'numpy.asarray', 'np.asarray', (['[[-1, 0, W], [0, 1, 0], [0, 0, 1]]'], {}), '([[-1, 0, W], [0, 1, 0], [0, 0, 1]])\n', (4421, 4457), True, 'import numpy as np\n'), ((4490, 4535), 'numpy.asarray', 'np.asarray', (['[[0, 1, 0], [1, 0, 0], [0, 0, 1]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 1]])\n', (4500, 4535), True, 'import numpy as np\n'), ((5180, 5197), 'numpy.asarray', 'np.asarray', (['quat0'], {}), '(quat0)\n', (5190, 5197), True, 'import numpy as np\n'), ((5200, 5221), 'numpy.linalg.norm', 'np.linalg.norm', (['quat0'], {}), '(quat0)\n', (5214, 5221), True, 'import numpy as np\n'), ((5229, 5246), 'numpy.asarray', 'np.asarray', (['quat1'], {}), '(quat1)\n', (5239, 5246), True, 'import numpy as np\n'), ((5249, 5270), 'numpy.linalg.norm', 'np.linalg.norm', (['quat1'], {}), '(quat1)\n', (5263, 5270), True, 'import numpy as np\n'), ((5510, 5522), 'math.acos', 'math.acos', (['d'], {}), '(d)\n', (5519, 5522), False, 'import math\n'), ((5595, 5610), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (5603, 5610), False, 'import math\n'), ((5620, 5654), 'math.sin', 'math.sin', (['((1.0 - fraction) * angle)'], {}), '((1.0 - fraction) * angle)\n', (5628, 5654), False, 'import math\n'), ((5671, 5697), 'math.sin', 'math.sin', (['(fraction * angle)'], {}), '(fraction * angle)\n', (5679, 5697), False, 'import math\n'), ((8456, 8473), 'matplotlib.pyplot.cm.viridis', 'plt.cm.viridis', (['c'], {}), '(c)\n', (8470, 8473), True, 'import matplotlib.pyplot as plt\n'), ((12399, 12408), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (12405, 12408), True, 'import numpy as np\n'), ((12410, 12419), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (12416, 12419), True, 'import numpy as np\n'), ((12690, 12779), 'thirdparty.mano.webuser.smpl_handpca_wrapper_HAND_only.load_model', 'load_mano_model', (['model_dicts[hand_idx]'], {'ncomps': 'ncomps', 'flat_hand_mean': 'flat_hand_mean'}), '(model_dicts[hand_idx], ncomps=ncomps, flat_hand_mean=\n flat_hand_mean)\n', (12705, 12779), True, 'from thirdparty.mano.webuser.smpl_handpca_wrapper_HAND_only import load_model as load_mano_model\n'), ((12909, 12920), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (12917, 12920), True, 'import numpy as np\n'), ((1123, 1144), 'numpy.exp', 'np.exp', (['(k * (ci - mu))'], {}), '(k * (ci - mu))\n', (1129, 1144), True, 'import numpy as np\n'), ((4977, 5011), 'logging.FileHandler', 'logging.FileHandler', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (4996, 5011), False, 'import logging\n'), ((5147, 5162), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (5155, 5162), True, 'import numpy as np\n'), ((5936, 5958), 'numpy.dot', 'np.dot', (['qs[0]', 'qs[idx]'], {}), '(qs[0], qs[idx])\n', (5942, 5958), True, 'import numpy as np\n'), ((9603, 9615), 'numpy.round', 'np.round', (['ms'], {}), '(ms)\n', (9611, 9615), True, 'import numpy as np\n'), ((10739, 10775), 'numpy.array', 'np.array', (['[im.shape[1], im.shape[0]]'], {}), '([im.shape[1], im.shape[0]])\n', (10747, 10775), True, 'import numpy as np\n'), ((2269, 2278), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2275, 2278), True, 'import numpy as np\n'), ((2286, 2300), 'numpy.dot', 'np.dot', (['vx', 'vx'], {}), '(vx, vx)\n', (2292, 2300), True, 'import numpy as np\n'), ((2958, 2983), 'numpy.linalg.norm', 'np.linalg.norm', (['u'], {'axis': '(2)'}), '(u, axis=2)\n', (2972, 2983), True, 'import numpy as np\n'), ((10150, 10171), 'numpy.asarray', 'np.asarray', (['fillvalue'], {}), '(fillvalue)\n', (10160, 10171), True, 'import numpy as np\n'), ((10213, 10257), 'numpy.asarray', 'np.asarray', (['[fillvalue for _ in im.shape[2]]'], {}), '([fillvalue for _ in im.shape[2]])\n', (10223, 10257), True, 'import numpy as np\n'), ((10357, 10380), 'numpy.mean', 'np.mean', (['joints'], {'axis': '(0)'}), '(joints, axis=0)\n', (10364, 10380), True, 'import numpy as np\n'), ((13148, 13163), 'numpy.asarray', 'np.asarray', (['m.f'], {}), '(m.f)\n', (13158, 13163), True, 'import numpy as np\n'), ((8920, 8956), 'numpy.round', 'np.round', (['(js - offset[np.newaxis, :])'], {}), '(js - offset[np.newaxis, :])\n', (8928, 8956), True, 'import numpy as np\n'), ((8059, 8090), 'utilities.dataset.get_p_nums', 'get_p_nums', (['object_name', 'intent'], {}), '(object_name, intent)\n', (8069, 8090), False, 'from utilities.dataset import get_p_nums\n')] |
"""
implements saving and reading of Image types in the framework
"""
import io
import json
import struct
import bson
import numpy
from matplotlib import pyplot
from PIL import Image
from urlpath import URL
from cortex.utils import read_all
class ColorImage:
SupportedSchemes = ['file']
@classmethod
def from_bytes(cls, width, height, data):
return cls(Image.frombytes("RGB", (width, height), data))
@classmethod
def from_uri(cls, uri):
uri = URL(uri)
if uri.scheme not in cls.SupportedSchemes:
raise ValueError(f"currently not supporting {uri.scheme} uris")
return cls(Image.open(uri.path))
def __init__(self, impl):
self._impl = impl
def save(self, fp):
self._impl.save(fp)
class DepthImage:
SupportedSchemes = ['file']
@classmethod
def from_bytes(cls, width, height, data):
"""
creates a new DepthImage from widht, height, and array of floats.
:param width:
:param height:
:param data:
:return:
"""
parsed_data = list(data[width *i: width*(i+1)] for i in range(height))
return cls(parsed_data)
Header = struct.Struct("II")
@classmethod
def from_uri(cls, uri):
"""
returns a new depth image from the URI. the uri should be a bson file
:param uri:
:return:
"""
uri = URL(uri)
if uri.scheme not in cls.SupportedSchemes:
raise ValueError(f"currently not supporting {uri.scheme} uris")
with open(uri.path, "rb") as f:
return cls.from_file(f)
@classmethod
def from_file(cls, f):
"""
creates a depth image from a bson file
:param f:
:return:
"""
data = bson.decode_all(f.read())
return cls(data[0]['raw']['data'])
def __init__(self, data):
self.data = data
@property
def width(self):
return len(self.data[0]) if len(self.data) else 0
@property
def height(self):
return len(self.data)
@property
def size(self):
return self.width * self.height
def _img_data(self):
fig = pyplot.Figure()
axes = fig.add_subplot(1,1,1)
axes.imshow(numpy.array(self.data))
img_data = io.BytesIO()
fig.savefig(img_data)
return bytes(img_data.getbuffer())
def save(self, fp):
"""
writes the image as bson
:param fp:
:return:
"""
fp.write(self.bson())
def bson(self):
"""
returns the bson encoded image
:return:
"""
return bson.encode(dict(
raw=dict(width=self.width, height=self.height, data=self.data), image=self._img_data()))
@classmethod
def from_bson_data(cls, data):
"""
:param data:
:return:
"""
return cls.from_file(io.BytesIO(data))
| [
"io.BytesIO",
"struct.Struct",
"matplotlib.pyplot.Figure",
"urlpath.URL",
"PIL.Image.open",
"numpy.array",
"PIL.Image.frombytes"
] | [((1194, 1213), 'struct.Struct', 'struct.Struct', (['"""II"""'], {}), "('II')\n", (1207, 1213), False, 'import struct\n'), ((485, 493), 'urlpath.URL', 'URL', (['uri'], {}), '(uri)\n', (488, 493), False, 'from urlpath import URL\n'), ((1413, 1421), 'urlpath.URL', 'URL', (['uri'], {}), '(uri)\n', (1416, 1421), False, 'from urlpath import URL\n'), ((2192, 2207), 'matplotlib.pyplot.Figure', 'pyplot.Figure', ([], {}), '()\n', (2205, 2207), False, 'from matplotlib import pyplot\n'), ((2309, 2321), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2319, 2321), False, 'import io\n'), ((377, 422), 'PIL.Image.frombytes', 'Image.frombytes', (['"""RGB"""', '(width, height)', 'data'], {}), "('RGB', (width, height), data)\n", (392, 422), False, 'from PIL import Image\n'), ((640, 660), 'PIL.Image.open', 'Image.open', (['uri.path'], {}), '(uri.path)\n', (650, 660), False, 'from PIL import Image\n'), ((2266, 2288), 'numpy.array', 'numpy.array', (['self.data'], {}), '(self.data)\n', (2277, 2288), False, 'import numpy\n'), ((2922, 2938), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (2932, 2938), False, 'import io\n')] |
import unittest
import os
import numpy as np
import fv3gfs.wrapper
import fv3gfs.util
from mpi4py import MPI
from util import (
get_default_config,
get_state_single_variable,
main,
replace_state_with_random_values,
)
test_dir = os.path.dirname(os.path.abspath(__file__))
def select_ocean_values(*fields):
is_ocean = np.isclose(get_state_single_variable("land_sea_mask"), 0.0)
return (field[is_ocean] for field in fields)
class PrescribeSSTTests(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(PrescribeSSTTests, self).__init__(*args, **kwargs)
def setUp(self):
pass
def tearDown(self):
MPI.COMM_WORLD.barrier()
def test_prescribing_sst_changes_model_state(self):
checkpoint_state = fv3gfs.wrapper.get_state(fv3gfs.wrapper.get_restart_names())
# If we do not set the sea surface temperature and
# use_climatological_sst is set to .false., the sea surface temperature
# will remain at what it was set to in the initial conditions for the
# duration of the run.
fv3gfs.wrapper.step()
air_temperature_from_default_ocean_temperature = get_state_single_variable(
"air_temperature"
)
fv3gfs.wrapper.set_state(checkpoint_state)
replace_state_with_random_values(["ocean_surface_temperature"])
fv3gfs.wrapper.step()
air_temperature_from_prescribed_ocean_temperature = get_state_single_variable(
"air_temperature"
)
assert not np.allclose(
air_temperature_from_default_ocean_temperature,
air_temperature_from_prescribed_ocean_temperature,
)
def test_prescribing_sst_changes_surface_temperature_diagnostic(self):
replaced_state = replace_state_with_random_values(["ocean_surface_temperature"])
prescribed_sst = replaced_state["ocean_surface_temperature"].view[:]
fv3gfs.wrapper.step()
surface_temperature_diagnostic = fv3gfs.wrapper.get_diagnostic_by_name(
"tsfc", module_name="gfs_sfc"
).view[:]
result, expected = select_ocean_values(
surface_temperature_diagnostic, prescribed_sst
)
np.testing.assert_allclose(result, expected)
if __name__ == "__main__":
config = get_default_config()
config["namelist"]["gfs_physics_nml"]["use_climatological_sst"] = False
main(test_dir, config)
| [
"os.path.abspath",
"util.replace_state_with_random_values",
"numpy.allclose",
"util.get_state_single_variable",
"util.main",
"numpy.testing.assert_allclose",
"mpi4py.MPI.COMM_WORLD.barrier",
"util.get_default_config"
] | [((262, 287), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (277, 287), False, 'import os\n'), ((2314, 2334), 'util.get_default_config', 'get_default_config', ([], {}), '()\n', (2332, 2334), False, 'from util import get_default_config, get_state_single_variable, main, replace_state_with_random_values\n'), ((2415, 2437), 'util.main', 'main', (['test_dir', 'config'], {}), '(test_dir, config)\n', (2419, 2437), False, 'from util import get_default_config, get_state_single_variable, main, replace_state_with_random_values\n'), ((351, 393), 'util.get_state_single_variable', 'get_state_single_variable', (['"""land_sea_mask"""'], {}), "('land_sea_mask')\n", (376, 393), False, 'from util import get_default_config, get_state_single_variable, main, replace_state_with_random_values\n'), ((669, 693), 'mpi4py.MPI.COMM_WORLD.barrier', 'MPI.COMM_WORLD.barrier', ([], {}), '()\n', (691, 693), False, 'from mpi4py import MPI\n'), ((1175, 1219), 'util.get_state_single_variable', 'get_state_single_variable', (['"""air_temperature"""'], {}), "('air_temperature')\n", (1200, 1219), False, 'from util import get_default_config, get_state_single_variable, main, replace_state_with_random_values\n'), ((1302, 1365), 'util.replace_state_with_random_values', 'replace_state_with_random_values', (["['ocean_surface_temperature']"], {}), "(['ocean_surface_temperature'])\n", (1334, 1365), False, 'from util import get_default_config, get_state_single_variable, main, replace_state_with_random_values\n'), ((1456, 1500), 'util.get_state_single_variable', 'get_state_single_variable', (['"""air_temperature"""'], {}), "('air_temperature')\n", (1481, 1500), False, 'from util import get_default_config, get_state_single_variable, main, replace_state_with_random_values\n'), ((1790, 1853), 'util.replace_state_with_random_values', 'replace_state_with_random_values', (["['ocean_surface_temperature']"], {}), "(['ocean_surface_temperature'])\n", (1822, 1853), False, 'from util import get_default_config, get_state_single_variable, main, replace_state_with_random_values\n'), ((2227, 2271), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (2253, 2271), True, 'import numpy as np\n'), ((1543, 1657), 'numpy.allclose', 'np.allclose', (['air_temperature_from_default_ocean_temperature', 'air_temperature_from_prescribed_ocean_temperature'], {}), '(air_temperature_from_default_ocean_temperature,\n air_temperature_from_prescribed_ocean_temperature)\n', (1554, 1657), True, 'import numpy as np\n')] |
import time
import torch
import numpy as np
def train(model, dataloader, optimizer, criterion_signal,criterion_label,weight_label, clip,n_elements,len_y):
model.train()
total_epoch_loss = 0
epoch_loss_signal = 0
epoch_loss_label = 0
correct = 0
for i, batch in enumerate(dataloader):
x = batch[0].permute(0,2,1)
x_ci = batch[1]
y = batch[2].permute(0,2,1) #(batch,time,features)
y_mask = batch[3].permute(0,2,1).squeeze()
optimizer.zero_grad()
output ,_ = model(x, x_ci, y, y_mask)
output_signal = output[:,:,0] #Signal
output_label = output[:,:,1:5].permute(0,2,1) #segmentation
y_signal = y[:,:,0]
y_label = y[:,:,1:5].permute(0,2,1)
y_label = torch.argmax(y_label, 1)
loss_signal = criterion_signal(output_signal, y_signal)
loss_signal = torch.sum((loss_signal*y_mask)) / y_mask.sum()
loss_label = weight_label * criterion_label(output_label, y_label)
loss = loss_signal + loss_label
loss.backward()
total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
#print(total_norm)
optimizer.step()
label_predicted = torch.argmax(output_label, 1)
correct += (label_predicted == y_label).sum().item()
epoch_loss_signal += loss_signal.item()
epoch_loss_label += loss_label.item()
total_epoch_loss += loss_signal.item() + loss_label.item()
total_epoch_loss = total_epoch_loss/n_elements
epoch_loss_signal = epoch_loss_signal/n_elements
epoch_loss_label = epoch_loss_label/n_elements
accuracy = (correct/(n_elements*len_y))*100
return total_epoch_loss, epoch_loss_signal, epoch_loss_label, accuracy
def evaluate(model, dataloader, criterion_signal,criterion_label, weight_label,n_elements,len_y):
model.eval()
total_epoch_loss = 0
epoch_loss_signal = 0
epoch_loss_label = 0
correct = 0
with torch.no_grad():
for i, batch in enumerate(dataloader):
x = batch[0].permute(0,2,1)
x_ci = batch[1]
y = batch[2].permute(0,2,1) #(batch,time,features)
y_mask = batch[3].permute(0,2,1).squeeze()
output, _ = model(x, x_ci, y, y_mask, 0) #turn off teacher forcing
#Segmentation
output_label = output[:,:,1:5].permute(0,2,1)
y_label = y[:,:,1:5].permute(0,2,1)
y_label = torch.argmax(y_label, 1)
loss_label = weight_label * criterion_label(output_label, y_label)
_, label_predicted = torch.max(output_label.data, 1)
correct += (label_predicted == y_label).sum().item()
#MASK
#y_mask = label_predicted>0
#Signal
output_signal = output[:,:,0]
y_signal = y[:,:,0]
loss_signal = criterion_signal(output_signal, y_signal)
loss_signal = torch.sum((loss_signal*y_mask)) / y_mask.sum()
#BloodPressure
#SUM LOSS
loss = loss_signal + loss_label
epoch_loss_signal += loss_signal.item()
epoch_loss_label += loss_label.item()
total_epoch_loss += loss_signal.item() + loss_label.item()
total_epoch_loss = total_epoch_loss/n_elements
epoch_loss_signal = epoch_loss_signal/n_elements
epoch_loss_label = epoch_loss_label/n_elements
accuracy = (correct/(n_elements*len_y))*100
return total_epoch_loss, epoch_loss_signal, epoch_loss_label, accuracy
def load_checkpoint(model,optimizer,scheduler,path,stage='validation'):
if torch.cuda.is_available():
map_location=lambda storage, loc: storage.cuda()
else:
map_location='cpu'
if stage == 'validation':
history = np.load(path[:-3]+'_history_best.npz',allow_pickle=True)
checkpoint = torch.load(path,map_location=map_location)
if stage == 'final':
history = np.load(path[:-3]+'_history_best_final.npz',allow_pickle=True)
checkpoint = torch.load(path+'_final',map_location=map_location)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
epoch = checkpoint['epoch']
best_valid_loss = checkpoint['loss']
loss_train_history = history['arr_0'][0][:epoch]
loss_val_history = history['arr_0'][1][:epoch]
return (model,optimizer,scheduler,epoch,best_valid_loss,loss_train_history,loss_val_history)
def fit(n_epochs,model,optimizer,scheduler,criterion_signal,criterion_label,weight_label,clip_val,
train_dl,q_train,val_dl,q_val,final_len_y,model_save,save=False,final=False, e_i = 0, history=[]):
if e_i != 0:
#Train
loss_train_history = history[0,0]
loss_signal_train_history = history[0,1]
loss_label_train_history = history[0,2]
accuracy_train_history = history[0,3]
#Valid
loss_valid_history = history[1,0]
loss_signal_valid_history = history[1,1]
loss_label_valid_history = history[1,2]
accuracy_valid_history = history[1,3]
n_epochs = n_epochs + e_i
patience = 0
best_valid_loss = min(loss_valid_history)
else:
best_valid_loss = float('inf')
for epoch in range(e_i,n_epochs):
start_time = time.time()
total_train_loss,signal_train_loss,label_train_loss, train_accuracy = train(model, train_dl, optimizer, criterion_signal,criterion_label,weight_label,clip_val,q_train,final_len_y)
total_valid_loss,signal_valid_loss,label_valid_loss, valid_accuracy = evaluate(model, val_dl, criterion_signal,criterion_label,weight_label,q_val,final_len_y)
scheduler.step(total_valid_loss)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if epoch == 0:
#Train
loss_train_history = total_train_loss
loss_signal_train_history = signal_train_loss
loss_label_train_history = label_train_loss
accuracy_train_history = train_accuracy
#Valid
loss_valid_history = total_valid_loss
loss_signal_valid_history = signal_valid_loss
loss_label_valid_history = label_valid_loss
accuracy_valid_history = valid_accuracy
patience = 0
else:
#Train
loss_train_history = np.append(loss_train_history, total_train_loss)
loss_signal_train_history = np.append(loss_signal_train_history, signal_train_loss)
loss_label_train_history = np.append(loss_label_train_history, label_train_loss)
accuracy_train_history = np.append(accuracy_train_history, train_accuracy)
#Train
loss_valid_history = np.append(loss_valid_history, total_valid_loss)
loss_signal_valid_history = np.append(loss_signal_valid_history, signal_valid_loss)
loss_label_valid_history = np.append(loss_label_valid_history, label_valid_loss)
accuracy_valid_history = np.append(accuracy_valid_history, valid_accuracy)
if total_valid_loss < best_valid_loss:
best_valid_loss = total_valid_loss
patience = 0
if save:
if final:
torch.save({'epoch':epoch,'model_state_dict': model.state_dict(),'optimizer_state_dict': optimizer.state_dict(),'loss': best_valid_loss,'scheduler_state_dict': scheduler.state_dict()}, model_save+'_final')
history = np.array([[loss_train_history,loss_signal_train_history,loss_label_train_history,accuracy_train_history],
[loss_valid_history,loss_signal_valid_history,loss_label_valid_history,accuracy_valid_history]])
np.savez(model_save[:-3]+'_history_best_final',history)
else:
torch.save({'epoch':epoch,'model_state_dict': model.state_dict(),'optimizer_state_dict': optimizer.state_dict(),
'loss': best_valid_loss,'scheduler_state_dict': scheduler.state_dict()}, model_save)
history = np.array([[loss_train_history,loss_signal_train_history,loss_label_train_history,accuracy_train_history],
[loss_valid_history,loss_signal_valid_history,loss_label_valid_history,accuracy_valid_history]])
np.savez(model_save[:-3]+'_history_best',history)
else:
patience += 1
print('Epoch:{}|Patience: {}|Time:{}:{}s|TT_Loss: {:.8f}|TV_Loss: {:.8f}|ST_Loss: {:.8f}|SV_Loss: {:.8f}|LT_Loss: {:.8f}|LV_Loss: {:.8f}|Acc_T: {:.1f}%|Acc_V: {:.1f}%|Min_V_Loss: {:.8f} '.format(
epoch, patience,epoch_mins,epoch_secs,total_train_loss, total_valid_loss,signal_train_loss,signal_valid_loss,label_train_loss,label_valid_loss,train_accuracy,valid_accuracy, best_valid_loss))
#EarlyStopping
if patience > scheduler.patience*2 + scheduler.patience/2:
history = np.asarray([[loss_train_history,loss_signal_train_history,loss_label_train_history,accuracy_train_history],
[loss_valid_history,loss_signal_valid_history,loss_label_valid_history,accuracy_valid_history]])
return model, history
history = np.asarray([[loss_train_history,loss_signal_train_history,loss_label_train_history,accuracy_train_history],
[loss_valid_history,loss_signal_valid_history,loss_label_valid_history,accuracy_valid_history]])
return model, history
def predict(model, dataloader,criterion_signal,criterion_label, weight_label,final_len_x,final_len_y):
model.eval()
n_elements = len(dataloader.dataset)
num_batches = len(dataloader)
batch_size = dataloader.batch_size
predictions = torch.zeros(n_elements,final_len_y,5)
attentions = torch.zeros(n_elements,final_len_x,final_len_y)
input = torch.zeros(n_elements,final_len_x,2)
total_epoch_loss = 0
epoch_loss_signal = 0
epoch_loss_label = 0
correct = 0
with torch.no_grad():
for i, batch in enumerate(dataloader):
x = batch[0].permute(0,2,1)
x_ci = batch[1]
y = batch[2].permute(0,2,1) #(batch,time,features)
y_mask = batch[3].permute(0,2,1).squeeze()
#CORRECTO?
y_mask = torch.ones_like(y_mask)
start = i*batch_size
end = start + batch_size
if i == num_batches - 1:
end = n_elements
output, att_weight = model(x,x_ci, y,y_mask, 0) #turn off teacher forcing
output_label = output[:,:,1:5].permute(0,2,1)
y_label = y[:,:,1:5].permute(0,2,1)
y_label = torch.argmax(y_label, 1)
loss_label = weight_label * criterion_label(output_label, y_label)
_, label_predicted = torch.max(output_label.data, 1)
correct += (label_predicted == y_label).sum().item()
mask_predicted = label_predicted>0
mask_predicted_att = mask_predicted.unsqueeze(1).repeat(1,att_weight.size(1),1)
att_weight = att_weight * mask_predicted_att
attentions[start:end] = att_weight
predictions[start:end] = output
input[start:end] = x[:,:,:2]
output_signal = output[:,:,0]
y_signal = y[:,:,0]
#SIGNAL
loss_signal = criterion_signal(output_signal, y_signal)
loss_signal = torch.sum((loss_signal*mask_predicted)) / mask_predicted.sum()
loss = loss_signal + loss_label
epoch_loss_signal += loss_signal.item()
epoch_loss_label += loss_label.item()
total_epoch_loss += loss_signal.item() + loss_label.item()
total_epoch_loss = total_epoch_loss/n_elements
epoch_loss_signal = epoch_loss_signal/n_elements
epoch_loss_label = epoch_loss_label/n_elements
accuracy = (correct/(n_elements*final_len_y))*100
return input, predictions,total_epoch_loss, epoch_loss_signal, epoch_loss_label,accuracy,attentions
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
| [
"torch.ones_like",
"numpy.load",
"torch.argmax",
"numpy.asarray",
"torch.load",
"time.time",
"numpy.append",
"torch.cuda.is_available",
"torch.max",
"numpy.array",
"torch.zeros",
"numpy.savez",
"torch.no_grad",
"torch.sum"
] | [((3316, 3341), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3339, 3341), False, 'import torch\n'), ((8667, 8889), 'numpy.asarray', 'np.asarray', (['[[loss_train_history, loss_signal_train_history, loss_label_train_history,\n accuracy_train_history], [loss_valid_history, loss_signal_valid_history,\n loss_label_valid_history, accuracy_valid_history]]'], {}), '([[loss_train_history, loss_signal_train_history,\n loss_label_train_history, accuracy_train_history], [loss_valid_history,\n loss_signal_valid_history, loss_label_valid_history,\n accuracy_valid_history]])\n', (8677, 8889), True, 'import numpy as np\n'), ((9161, 9200), 'torch.zeros', 'torch.zeros', (['n_elements', 'final_len_y', '(5)'], {}), '(n_elements, final_len_y, 5)\n', (9172, 9200), False, 'import torch\n'), ((9214, 9263), 'torch.zeros', 'torch.zeros', (['n_elements', 'final_len_x', 'final_len_y'], {}), '(n_elements, final_len_x, final_len_y)\n', (9225, 9263), False, 'import torch\n'), ((9272, 9311), 'torch.zeros', 'torch.zeros', (['n_elements', 'final_len_x', '(2)'], {}), '(n_elements, final_len_x, 2)\n', (9283, 9311), False, 'import torch\n'), ((707, 731), 'torch.argmax', 'torch.argmax', (['y_label', '(1)'], {}), '(y_label, 1)\n', (719, 731), False, 'import torch\n'), ((1128, 1157), 'torch.argmax', 'torch.argmax', (['output_label', '(1)'], {}), '(output_label, 1)\n', (1140, 1157), False, 'import torch\n'), ((1842, 1857), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1855, 1857), False, 'import torch\n'), ((3470, 3529), 'numpy.load', 'np.load', (["(path[:-3] + '_history_best.npz')"], {'allow_pickle': '(True)'}), "(path[:-3] + '_history_best.npz', allow_pickle=True)\n", (3477, 3529), True, 'import numpy as np\n'), ((3544, 3587), 'torch.load', 'torch.load', (['path'], {'map_location': 'map_location'}), '(path, map_location=map_location)\n', (3554, 3587), False, 'import torch\n'), ((3624, 3689), 'numpy.load', 'np.load', (["(path[:-3] + '_history_best_final.npz')"], {'allow_pickle': '(True)'}), "(path[:-3] + '_history_best_final.npz', allow_pickle=True)\n", (3631, 3689), True, 'import numpy as np\n'), ((3704, 3758), 'torch.load', 'torch.load', (["(path + '_final')"], {'map_location': 'map_location'}), "(path + '_final', map_location=map_location)\n", (3714, 3758), False, 'import torch\n'), ((4975, 4986), 'time.time', 'time.time', ([], {}), '()\n', (4984, 4986), False, 'import time\n'), ((5386, 5397), 'time.time', 'time.time', ([], {}), '()\n', (5395, 5397), False, 'import time\n'), ((9402, 9417), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9415, 9417), False, 'import torch\n'), ((811, 842), 'torch.sum', 'torch.sum', (['(loss_signal * y_mask)'], {}), '(loss_signal * y_mask)\n', (820, 842), False, 'import torch\n'), ((2268, 2292), 'torch.argmax', 'torch.argmax', (['y_label', '(1)'], {}), '(y_label, 1)\n', (2280, 2292), False, 'import torch\n'), ((2393, 2424), 'torch.max', 'torch.max', (['output_label.data', '(1)'], {}), '(output_label.data, 1)\n', (2402, 2424), False, 'import torch\n'), ((5967, 6014), 'numpy.append', 'np.append', (['loss_train_history', 'total_train_loss'], {}), '(loss_train_history, total_train_loss)\n', (5976, 6014), True, 'import numpy as np\n'), ((6049, 6104), 'numpy.append', 'np.append', (['loss_signal_train_history', 'signal_train_loss'], {}), '(loss_signal_train_history, signal_train_loss)\n', (6058, 6104), True, 'import numpy as np\n'), ((6138, 6191), 'numpy.append', 'np.append', (['loss_label_train_history', 'label_train_loss'], {}), '(loss_label_train_history, label_train_loss)\n', (6147, 6191), True, 'import numpy as np\n'), ((6223, 6272), 'numpy.append', 'np.append', (['accuracy_train_history', 'train_accuracy'], {}), '(accuracy_train_history, train_accuracy)\n', (6232, 6272), True, 'import numpy as np\n'), ((6313, 6360), 'numpy.append', 'np.append', (['loss_valid_history', 'total_valid_loss'], {}), '(loss_valid_history, total_valid_loss)\n', (6322, 6360), True, 'import numpy as np\n'), ((6395, 6450), 'numpy.append', 'np.append', (['loss_signal_valid_history', 'signal_valid_loss'], {}), '(loss_signal_valid_history, signal_valid_loss)\n', (6404, 6450), True, 'import numpy as np\n'), ((6484, 6537), 'numpy.append', 'np.append', (['loss_label_valid_history', 'label_valid_loss'], {}), '(loss_label_valid_history, label_valid_loss)\n', (6493, 6537), True, 'import numpy as np\n'), ((6569, 6618), 'numpy.append', 'np.append', (['accuracy_valid_history', 'valid_accuracy'], {}), '(accuracy_valid_history, valid_accuracy)\n', (6578, 6618), True, 'import numpy as np\n'), ((8387, 8609), 'numpy.asarray', 'np.asarray', (['[[loss_train_history, loss_signal_train_history, loss_label_train_history,\n accuracy_train_history], [loss_valid_history, loss_signal_valid_history,\n loss_label_valid_history, accuracy_valid_history]]'], {}), '([[loss_train_history, loss_signal_train_history,\n loss_label_train_history, accuracy_train_history], [loss_valid_history,\n loss_signal_valid_history, loss_label_valid_history,\n accuracy_valid_history]])\n', (8397, 8609), True, 'import numpy as np\n'), ((9656, 9679), 'torch.ones_like', 'torch.ones_like', (['y_mask'], {}), '(y_mask)\n', (9671, 9679), False, 'import torch\n'), ((9986, 10010), 'torch.argmax', 'torch.argmax', (['y_label', '(1)'], {}), '(y_label, 1)\n', (9998, 10010), False, 'import torch\n'), ((10111, 10142), 'torch.max', 'torch.max', (['output_label.data', '(1)'], {}), '(output_label.data, 1)\n', (10120, 10142), False, 'import torch\n'), ((2688, 2719), 'torch.sum', 'torch.sum', (['(loss_signal * y_mask)'], {}), '(loss_signal * y_mask)\n', (2697, 2719), False, 'import torch\n'), ((10657, 10696), 'torch.sum', 'torch.sum', (['(loss_signal * mask_predicted)'], {}), '(loss_signal * mask_predicted)\n', (10666, 10696), False, 'import torch\n'), ((6996, 7216), 'numpy.array', 'np.array', (['[[loss_train_history, loss_signal_train_history, loss_label_train_history,\n accuracy_train_history], [loss_valid_history, loss_signal_valid_history,\n loss_label_valid_history, accuracy_valid_history]]'], {}), '([[loss_train_history, loss_signal_train_history,\n loss_label_train_history, accuracy_train_history], [loss_valid_history,\n loss_signal_valid_history, loss_label_valid_history,\n accuracy_valid_history]])\n', (7004, 7216), True, 'import numpy as np\n'), ((7237, 7295), 'numpy.savez', 'np.savez', (["(model_save[:-3] + '_history_best_final')", 'history'], {}), "(model_save[:-3] + '_history_best_final', history)\n", (7245, 7295), True, 'import numpy as np\n'), ((7557, 7777), 'numpy.array', 'np.array', (['[[loss_train_history, loss_signal_train_history, loss_label_train_history,\n accuracy_train_history], [loss_valid_history, loss_signal_valid_history,\n loss_label_valid_history, accuracy_valid_history]]'], {}), '([[loss_train_history, loss_signal_train_history,\n loss_label_train_history, accuracy_train_history], [loss_valid_history,\n loss_signal_valid_history, loss_label_valid_history,\n accuracy_valid_history]])\n', (7565, 7777), True, 'import numpy as np\n'), ((7800, 7852), 'numpy.savez', 'np.savez', (["(model_save[:-3] + '_history_best')", 'history'], {}), "(model_save[:-3] + '_history_best', history)\n", (7808, 7852), True, 'import numpy as np\n')] |
from .. import otherm
import numpy as np
import os
here = os.path.dirname(os.path.abspath(__file__))
methane_path = os.path.join(here, 'data', 'methane.out')
def test_methane():
methane = otherm.Molecule(methane_path)
# ORCA 4.0 defaulted to sigma_r = 4 and 1 atmosphere standard state
methane.calculate_thermochemistry(ss='1atm',
symm_n=3)
expected_g = -40.39166850 * otherm.Constants.ha_to_j_mol
# Difference should be less than 1 J mol-1
assert np.abs(methane.g - expected_g) < 1
def test_methane_gaussian():
methane = otherm.Molecule(methane_path)
# Populate parameters from a Gaussian09 calculation
methane.freqs = (6 * [0.0] +
[1308.0624, 1308.0891, 1308.1077, 1530.3570, 1530.3806,
3046.1934, 3198.8181, 3198.8521, 3198.9170])[::-1]
methane.xyzs = [['C', -0.000002, 0.000041, 0.000008],
['H', -0.637549, -0.828986, -0.334635],
['H', -0.444245, 0.953621, -0.314719],
['H', 0.083441, -0.021253, 1.094689],
['H', 0.998355, -0.103324, -0.445342]]
methane.shift_to_com()
expected_i_mat = np.array([[-0.34825, 0.18296, 0.91937],
[0.67773, 0.72672, 0.11209],
[0.64762, -0.66212, 0.37707]])
i_mat = otherm.calc_moments_of_inertia(methane.xyzs)
assert np.sum(i_mat - expected_i_mat) < 1E-6
methane.e = -40.4294662 * otherm.Constants.ha_to_j_mol
expected_g = -40.404434 * otherm.Constants.ha_to_j_mol
# Thermochemistry not calculated with symmetry
methane.calculate_thermochemistry(ss='1atm',
method='igm',
symm_n=1)
# Slightly poorer agreement with Gaussian but still within 0.5 kJ mol-1
assert np.abs(methane.g - expected_g) < 4
def test_calc_ss():
methane_1atm = otherm.Molecule(methane_path)
methane_1atm.calculate_thermochemistry(ss='1atm')
methane_1m = otherm.Molecule(methane_path)
methane_1m.calculate_thermochemistry(ss='1M')
delta = methane_1m.g - methane_1atm.g
# Expected additive amount it 1.9 kcal mol-1
assert 1.8 < otherm.Constants.j_to_kcal * delta < 2
| [
"os.path.abspath",
"numpy.abs",
"numpy.sum",
"numpy.array",
"os.path.join"
] | [((117, 158), 'os.path.join', 'os.path.join', (['here', '"""data"""', '"""methane.out"""'], {}), "(here, 'data', 'methane.out')\n", (129, 158), False, 'import os\n'), ((74, 99), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (89, 99), False, 'import os\n'), ((1214, 1318), 'numpy.array', 'np.array', (['[[-0.34825, 0.18296, 0.91937], [0.67773, 0.72672, 0.11209], [0.64762, -\n 0.66212, 0.37707]]'], {}), '([[-0.34825, 0.18296, 0.91937], [0.67773, 0.72672, 0.11209], [\n 0.64762, -0.66212, 0.37707]])\n', (1222, 1318), True, 'import numpy as np\n'), ((517, 547), 'numpy.abs', 'np.abs', (['(methane.g - expected_g)'], {}), '(methane.g - expected_g)\n', (523, 547), True, 'import numpy as np\n'), ((1445, 1475), 'numpy.sum', 'np.sum', (['(i_mat - expected_i_mat)'], {}), '(i_mat - expected_i_mat)\n', (1451, 1475), True, 'import numpy as np\n'), ((1891, 1921), 'numpy.abs', 'np.abs', (['(methane.g - expected_g)'], {}), '(methane.g - expected_g)\n', (1897, 1921), True, 'import numpy as np\n')] |
# http://www.wildml.com/2016/08/rnns-in-tensorflow-a-practical-guide-and-undocumented-features/
# http://learningtensorflow.com/index.html
# http://suriyadeepan.github.io/2016-12-31-practical-seq2seq/
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
import pprint
pp = pprint.PrettyPrinter(indent=4)
sess = tf.InteractiveSession()
# One hot encoding for each char in 'hello'
h = [1, 0, 0, 0]
e = [0, 1, 0, 0]
l = [0, 0, 1, 0]
o = [0, 0, 0, 1]
with tf.variable_scope('one_cell') as scope:
# One cell RNN input_dim (4) -> output_dim (2)
hidden_size = 2
cell = tf.contrib.rnn.BasicRNNCell(num_units=hidden_size)
print(cell.output_size, cell.state_size)
x_data = np.array([[h]], dtype=np.float32) # x_data = [[[1,0,0,0]]]
pp.pprint(x_data)
outputs, _states = tf.nn.dynamic_rnn(cell, x_data, dtype=tf.float32)
sess.run(tf.global_variables_initializer())
pp.pprint(outputs.eval())
with tf.variable_scope('two_sequances') as scope:
# One cell RNN input_dim (4) -> output_dim (2). sequence: 5
hidden_size = 2
cell = tf.contrib.rnn.BasicRNNCell(num_units=hidden_size)
x_data = np.array([[h, e, l, l, o]], dtype=np.float32)
print(x_data.shape)
pp.pprint(x_data)
outputs, states = tf.nn.dynamic_rnn(cell, x_data, dtype=tf.float32)
sess.run(tf.global_variables_initializer())
pp.pprint(outputs.eval())
with tf.variable_scope('3_batches') as scope:
# One cell RNN input_dim (4) -> output_dim (2). sequence: 5, batch 3
# 3 batches 'hello', 'eolll', 'lleel'
x_data = np.array([[h, e, l, l, o],
[e, o, l, l, l],
[l, l, e, e, l]], dtype=np.float32)
pp.pprint(x_data)
hidden_size = 2
cell = rnn.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True)
outputs, _states = tf.nn.dynamic_rnn(
cell, x_data, dtype=tf.float32)
sess.run(tf.global_variables_initializer())
pp.pprint(outputs.eval())
with tf.variable_scope('3_batches_dynamic_length') as scope:
# One cell RNN input_dim (4) -> output_dim (5). sequence: 5, batch 3
# 3 batches 'hello', 'eolll', 'lleel'
x_data = np.array([[h, e, l, l, o],
[e, o, l, l, l],
[l, l, e, e, l]], dtype=np.float32)
pp.pprint(x_data)
hidden_size = 2
cell = rnn.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True)
outputs, _states = tf.nn.dynamic_rnn(
cell, x_data, sequence_length=[5, 3, 4], dtype=tf.float32)
sess.run(tf.global_variables_initializer())
pp.pprint(outputs.eval())
with tf.variable_scope('initial_state') as scope:
batch_size = 3
x_data = np.array([[h, e, l, l, o],
[e, o, l, l, l],
[l, l, e, e, l]], dtype=np.float32)
pp.pprint(x_data)
# One cell RNN input_dim (4) -> output_dim (5). sequence: 5, batch: 3
hidden_size = 2
cell = rnn.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True)
initial_state = cell.zero_state(batch_size, tf.float32)
outputs, _states = tf.nn.dynamic_rnn(cell, x_data,
initial_state=initial_state, dtype=tf.float32)
sess.run(tf.global_variables_initializer())
pp.pprint(outputs.eval())
# Create input data
batch_size=3
sequence_length=5
input_dim=3
x_data = np.arange(45, dtype=np.float32).reshape(batch_size, sequence_length, input_dim)
pp.pprint(x_data) # batch, sequence_length, input_dim
with tf.variable_scope('generated_data') as scope:
# One cell RNN input_dim (3) -> output_dim (5). sequence: 5, batch: 3
cell = rnn.BasicLSTMCell(num_units=5, state_is_tuple=True)
initial_state = cell.zero_state(batch_size, tf.float32)
outputs, _states = tf.nn.dynamic_rnn(cell, x_data,
initial_state=initial_state, dtype=tf.float32)
sess.run(tf.global_variables_initializer())
pp.pprint(outputs.eval())
with tf.variable_scope('MultiRNNCell') as scope:
# Make rnn
cell = rnn.BasicLSTMCell(num_units=5, state_is_tuple=True)
cell = rnn.MultiRNNCell([cell] * 3, state_is_tuple=True) # 3 layers
# rnn in/out
outputs, _states = tf.nn.dynamic_rnn(cell, x_data, dtype=tf.float32)
print("dynamic rnn: ", outputs)
sess.run(tf.global_variables_initializer())
pp.pprint(outputs.eval()) # batch size, unrolling (time), hidden_size
with tf.variable_scope('dynamic_rnn') as scope:
cell = rnn.BasicLSTMCell(num_units=5, state_is_tuple=True)
outputs, _states = tf.nn.dynamic_rnn(cell, x_data, dtype=tf.float32,
sequence_length=[1, 3, 2])
# lentgh 1 for batch 1, lentgh 2 for batch 2
print("dynamic rnn: ", outputs)
sess.run(tf.global_variables_initializer())
pp.pprint(outputs.eval()) # batch size, unrolling (time), hidden_size
with tf.variable_scope('bi-directional') as scope:
# bi-directional rnn
cell_fw = rnn.BasicLSTMCell(num_units=5, state_is_tuple=True)
cell_bw = rnn.BasicLSTMCell(num_units=5, state_is_tuple=True)
outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, x_data,
sequence_length=[2, 3, 1],
dtype=tf.float32)
sess.run(tf.global_variables_initializer())
pp.pprint(sess.run(outputs))
pp.pprint(sess.run(states))
# flattern based softmax
hidden_size=3
sequence_length=5
batch_size=3
num_classes=5
pp.pprint(x_data) # hidden_size=3, sequence_length=4, batch_size=2
x_data = x_data.reshape(-1, hidden_size)
pp.pprint(x_data)
softmax_w = np.arange(15, dtype=np.float32).reshape(hidden_size, num_classes)
outputs = np.matmul(x_data, softmax_w)
outputs = outputs.reshape(-1, sequence_length, num_classes) # batch, seq, class
pp.pprint(outputs)
# [batch_size, sequence_length]
y_data = tf.constant([[1, 1, 1]])
# [batch_size, sequence_length, emb_dim ]
prediction = tf.constant([[[0.2, 0.7], [0.6, 0.2], [0.2, 0.9]]], dtype=tf.float32)
# [batch_size * sequence_length]
weights = tf.constant([[1, 1, 1]], dtype=tf.float32)
sequence_loss = tf.contrib.seq2seq.sequence_loss(logits=prediction, targets=y_data, weights=weights)
sess.run(tf.global_variables_initializer())
print("Loss: ", sequence_loss.eval())
# [batch_size, sequence_length]
y_data = tf.constant([[1, 1, 1]])
# [batch_size, sequence_length, emb_dim ]
prediction1 = tf.constant([[[0.3, 0.7], [0.3, 0.7], [0.3, 0.7]]], dtype=tf.float32)
prediction2 = tf.constant([[[0.1, 0.9], [0.1, 0.9], [0.1, 0.9]]], dtype=tf.float32)
prediction3 = tf.constant([[[1, 0], [1, 0], [1, 0]]], dtype=tf.float32)
prediction4 = tf.constant([[[0, 1], [1, 0], [0, 1]]], dtype=tf.float32)
# [batch_size * sequence_length]
weights = tf.constant([[1, 1, 1]], dtype=tf.float32)
sequence_loss1 = tf.contrib.seq2seq.sequence_loss(prediction1, y_data, weights)
sequence_loss2 = tf.contrib.seq2seq.sequence_loss(prediction2, y_data, weights)
sequence_loss3 = tf.contrib.seq2seq.sequence_loss(prediction3, y_data, weights)
sequence_loss4 = tf.contrib.seq2seq.sequence_loss(prediction3, y_data, weights)
sess.run(tf.global_variables_initializer())
print("Loss1: ", sequence_loss1.eval(),
"Loss2: ", sequence_loss2.eval(),
"Loss3: ", sequence_loss3.eval(),
"Loss4: ", sequence_loss4.eval()) | [
"tensorflow.contrib.rnn.BasicRNNCell",
"tensorflow.nn.dynamic_rnn",
"tensorflow.global_variables_initializer",
"tensorflow.variable_scope",
"tensorflow.constant",
"pprint.PrettyPrinter",
"tensorflow.contrib.rnn.BasicLSTMCell",
"numpy.array",
"tensorflow.contrib.seq2seq.sequence_loss",
"numpy.matmu... | [((299, 329), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (319, 329), False, 'import pprint\n'), ((337, 360), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (358, 360), True, 'import tensorflow as tf\n'), ((5723, 5751), 'numpy.matmul', 'np.matmul', (['x_data', 'softmax_w'], {}), '(x_data, softmax_w)\n', (5732, 5751), True, 'import numpy as np\n'), ((5893, 5917), 'tensorflow.constant', 'tf.constant', (['[[1, 1, 1]]'], {}), '([[1, 1, 1]])\n', (5904, 5917), True, 'import tensorflow as tf\n'), ((5974, 6043), 'tensorflow.constant', 'tf.constant', (['[[[0.2, 0.7], [0.6, 0.2], [0.2, 0.9]]]'], {'dtype': 'tf.float32'}), '([[[0.2, 0.7], [0.6, 0.2], [0.2, 0.9]]], dtype=tf.float32)\n', (5985, 6043), True, 'import tensorflow as tf\n'), ((6088, 6130), 'tensorflow.constant', 'tf.constant', (['[[1, 1, 1]]'], {'dtype': 'tf.float32'}), '([[1, 1, 1]], dtype=tf.float32)\n', (6099, 6130), True, 'import tensorflow as tf\n'), ((6148, 6237), 'tensorflow.contrib.seq2seq.sequence_loss', 'tf.contrib.seq2seq.sequence_loss', ([], {'logits': 'prediction', 'targets': 'y_data', 'weights': 'weights'}), '(logits=prediction, targets=y_data, weights\n =weights)\n', (6180, 6237), True, 'import tensorflow as tf\n'), ((6357, 6381), 'tensorflow.constant', 'tf.constant', (['[[1, 1, 1]]'], {}), '([[1, 1, 1]])\n', (6368, 6381), True, 'import tensorflow as tf\n'), ((6439, 6508), 'tensorflow.constant', 'tf.constant', (['[[[0.3, 0.7], [0.3, 0.7], [0.3, 0.7]]]'], {'dtype': 'tf.float32'}), '([[[0.3, 0.7], [0.3, 0.7], [0.3, 0.7]]], dtype=tf.float32)\n', (6450, 6508), True, 'import tensorflow as tf\n'), ((6523, 6592), 'tensorflow.constant', 'tf.constant', (['[[[0.1, 0.9], [0.1, 0.9], [0.1, 0.9]]]'], {'dtype': 'tf.float32'}), '([[[0.1, 0.9], [0.1, 0.9], [0.1, 0.9]]], dtype=tf.float32)\n', (6534, 6592), True, 'import tensorflow as tf\n'), ((6608, 6665), 'tensorflow.constant', 'tf.constant', (['[[[1, 0], [1, 0], [1, 0]]]'], {'dtype': 'tf.float32'}), '([[[1, 0], [1, 0], [1, 0]]], dtype=tf.float32)\n', (6619, 6665), True, 'import tensorflow as tf\n'), ((6680, 6737), 'tensorflow.constant', 'tf.constant', (['[[[0, 1], [1, 0], [0, 1]]]'], {'dtype': 'tf.float32'}), '([[[0, 1], [1, 0], [0, 1]]], dtype=tf.float32)\n', (6691, 6737), True, 'import tensorflow as tf\n'), ((6782, 6824), 'tensorflow.constant', 'tf.constant', (['[[1, 1, 1]]'], {'dtype': 'tf.float32'}), '([[1, 1, 1]], dtype=tf.float32)\n', (6793, 6824), True, 'import tensorflow as tf\n'), ((6843, 6905), 'tensorflow.contrib.seq2seq.sequence_loss', 'tf.contrib.seq2seq.sequence_loss', (['prediction1', 'y_data', 'weights'], {}), '(prediction1, y_data, weights)\n', (6875, 6905), True, 'import tensorflow as tf\n'), ((6923, 6985), 'tensorflow.contrib.seq2seq.sequence_loss', 'tf.contrib.seq2seq.sequence_loss', (['prediction2', 'y_data', 'weights'], {}), '(prediction2, y_data, weights)\n', (6955, 6985), True, 'import tensorflow as tf\n'), ((7003, 7065), 'tensorflow.contrib.seq2seq.sequence_loss', 'tf.contrib.seq2seq.sequence_loss', (['prediction3', 'y_data', 'weights'], {}), '(prediction3, y_data, weights)\n', (7035, 7065), True, 'import tensorflow as tf\n'), ((7083, 7145), 'tensorflow.contrib.seq2seq.sequence_loss', 'tf.contrib.seq2seq.sequence_loss', (['prediction3', 'y_data', 'weights'], {}), '(prediction3, y_data, weights)\n', (7115, 7145), True, 'import tensorflow as tf\n'), ((480, 509), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""one_cell"""'], {}), "('one_cell')\n", (497, 509), True, 'import tensorflow as tf\n'), ((602, 652), 'tensorflow.contrib.rnn.BasicRNNCell', 'tf.contrib.rnn.BasicRNNCell', ([], {'num_units': 'hidden_size'}), '(num_units=hidden_size)\n', (629, 652), True, 'import tensorflow as tf\n'), ((712, 745), 'numpy.array', 'np.array', (['[[h]]'], {'dtype': 'np.float32'}), '([[h]], dtype=np.float32)\n', (720, 745), True, 'import numpy as np\n'), ((816, 865), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'x_data'], {'dtype': 'tf.float32'}), '(cell, x_data, dtype=tf.float32)\n', (833, 865), True, 'import tensorflow as tf\n'), ((951, 985), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""two_sequances"""'], {}), "('two_sequances')\n", (968, 985), True, 'import tensorflow as tf\n'), ((1091, 1141), 'tensorflow.contrib.rnn.BasicRNNCell', 'tf.contrib.rnn.BasicRNNCell', ([], {'num_units': 'hidden_size'}), '(num_units=hidden_size)\n', (1118, 1141), True, 'import tensorflow as tf\n'), ((1155, 1200), 'numpy.array', 'np.array', (['[[h, e, l, l, o]]'], {'dtype': 'np.float32'}), '([[h, e, l, l, o]], dtype=np.float32)\n', (1163, 1200), True, 'import numpy as np\n'), ((1269, 1318), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'x_data'], {'dtype': 'tf.float32'}), '(cell, x_data, dtype=tf.float32)\n', (1286, 1318), True, 'import tensorflow as tf\n'), ((1403, 1433), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""3_batches"""'], {}), "('3_batches')\n", (1420, 1433), True, 'import tensorflow as tf\n'), ((1572, 1651), 'numpy.array', 'np.array', (['[[h, e, l, l, o], [e, o, l, l, l], [l, l, e, e, l]]'], {'dtype': 'np.float32'}), '([[h, e, l, l, o], [e, o, l, l, l], [l, l, e, e, l]], dtype=np.float32)\n', (1580, 1651), True, 'import numpy as np\n'), ((1752, 1813), 'tensorflow.contrib.rnn.BasicLSTMCell', 'rnn.BasicLSTMCell', ([], {'num_units': 'hidden_size', 'state_is_tuple': '(True)'}), '(num_units=hidden_size, state_is_tuple=True)\n', (1769, 1813), False, 'from tensorflow.contrib import rnn\n'), ((1837, 1886), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'x_data'], {'dtype': 'tf.float32'}), '(cell, x_data, dtype=tf.float32)\n', (1854, 1886), True, 'import tensorflow as tf\n'), ((1980, 2025), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""3_batches_dynamic_length"""'], {}), "('3_batches_dynamic_length')\n", (1997, 2025), True, 'import tensorflow as tf\n'), ((2164, 2243), 'numpy.array', 'np.array', (['[[h, e, l, l, o], [e, o, l, l, l], [l, l, e, e, l]]'], {'dtype': 'np.float32'}), '([[h, e, l, l, o], [e, o, l, l, l], [l, l, e, e, l]], dtype=np.float32)\n', (2172, 2243), True, 'import numpy as np\n'), ((2344, 2405), 'tensorflow.contrib.rnn.BasicLSTMCell', 'rnn.BasicLSTMCell', ([], {'num_units': 'hidden_size', 'state_is_tuple': '(True)'}), '(num_units=hidden_size, state_is_tuple=True)\n', (2361, 2405), False, 'from tensorflow.contrib import rnn\n'), ((2429, 2505), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'x_data'], {'sequence_length': '[5, 3, 4]', 'dtype': 'tf.float32'}), '(cell, x_data, sequence_length=[5, 3, 4], dtype=tf.float32)\n', (2446, 2505), True, 'import tensorflow as tf\n'), ((2599, 2633), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""initial_state"""'], {}), "('initial_state')\n", (2616, 2633), True, 'import tensorflow as tf\n'), ((2676, 2755), 'numpy.array', 'np.array', (['[[h, e, l, l, o], [e, o, l, l, l], [l, l, e, e, l]]'], {'dtype': 'np.float32'}), '([[h, e, l, l, o], [e, o, l, l, l], [l, l, e, e, l]], dtype=np.float32)\n', (2684, 2755), True, 'import numpy as np\n'), ((2930, 2991), 'tensorflow.contrib.rnn.BasicLSTMCell', 'rnn.BasicLSTMCell', ([], {'num_units': 'hidden_size', 'state_is_tuple': '(True)'}), '(num_units=hidden_size, state_is_tuple=True)\n', (2947, 2991), False, 'from tensorflow.contrib import rnn\n'), ((3075, 3153), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'x_data'], {'initial_state': 'initial_state', 'dtype': 'tf.float32'}), '(cell, x_data, initial_state=initial_state, dtype=tf.float32)\n', (3092, 3153), True, 'import tensorflow as tf\n'), ((3489, 3524), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""generated_data"""'], {}), "('generated_data')\n", (3506, 3524), True, 'import tensorflow as tf\n'), ((3620, 3671), 'tensorflow.contrib.rnn.BasicLSTMCell', 'rnn.BasicLSTMCell', ([], {'num_units': '(5)', 'state_is_tuple': '(True)'}), '(num_units=5, state_is_tuple=True)\n', (3637, 3671), False, 'from tensorflow.contrib import rnn\n'), ((3755, 3833), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'x_data'], {'initial_state': 'initial_state', 'dtype': 'tf.float32'}), '(cell, x_data, initial_state=initial_state, dtype=tf.float32)\n', (3772, 3833), True, 'import tensorflow as tf\n'), ((3959, 3992), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""MultiRNNCell"""'], {}), "('MultiRNNCell')\n", (3976, 3992), True, 'import tensorflow as tf\n'), ((4029, 4080), 'tensorflow.contrib.rnn.BasicLSTMCell', 'rnn.BasicLSTMCell', ([], {'num_units': '(5)', 'state_is_tuple': '(True)'}), '(num_units=5, state_is_tuple=True)\n', (4046, 4080), False, 'from tensorflow.contrib import rnn\n'), ((4092, 4141), 'tensorflow.contrib.rnn.MultiRNNCell', 'rnn.MultiRNNCell', (['([cell] * 3)'], {'state_is_tuple': '(True)'}), '([cell] * 3, state_is_tuple=True)\n', (4108, 4141), False, 'from tensorflow.contrib import rnn\n'), ((4194, 4243), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'x_data'], {'dtype': 'tf.float32'}), '(cell, x_data, dtype=tf.float32)\n', (4211, 4243), True, 'import tensorflow as tf\n'), ((4409, 4441), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dynamic_rnn"""'], {}), "('dynamic_rnn')\n", (4426, 4441), True, 'import tensorflow as tf\n'), ((4463, 4514), 'tensorflow.contrib.rnn.BasicLSTMCell', 'rnn.BasicLSTMCell', ([], {'num_units': '(5)', 'state_is_tuple': '(True)'}), '(num_units=5, state_is_tuple=True)\n', (4480, 4514), False, 'from tensorflow.contrib import rnn\n'), ((4538, 4614), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'x_data'], {'dtype': 'tf.float32', 'sequence_length': '[1, 3, 2]'}), '(cell, x_data, dtype=tf.float32, sequence_length=[1, 3, 2])\n', (4555, 4614), True, 'import tensorflow as tf\n'), ((4871, 4906), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""bi-directional"""'], {}), "('bi-directional')\n", (4888, 4906), True, 'import tensorflow as tf\n'), ((4956, 5007), 'tensorflow.contrib.rnn.BasicLSTMCell', 'rnn.BasicLSTMCell', ([], {'num_units': '(5)', 'state_is_tuple': '(True)'}), '(num_units=5, state_is_tuple=True)\n', (4973, 5007), False, 'from tensorflow.contrib import rnn\n'), ((5022, 5073), 'tensorflow.contrib.rnn.BasicLSTMCell', 'rnn.BasicLSTMCell', ([], {'num_units': '(5)', 'state_is_tuple': '(True)'}), '(num_units=5, state_is_tuple=True)\n', (5039, 5073), False, 'from tensorflow.contrib import rnn\n'), ((5097, 5204), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', (['cell_fw', 'cell_bw', 'x_data'], {'sequence_length': '[2, 3, 1]', 'dtype': 'tf.float32'}), '(cell_fw, cell_bw, x_data, sequence_length=[\n 2, 3, 1], dtype=tf.float32)\n', (5128, 5204), True, 'import tensorflow as tf\n'), ((6242, 6275), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6273, 6275), True, 'import tensorflow as tf\n'), ((7156, 7189), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7187, 7189), True, 'import tensorflow as tf\n'), ((880, 913), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (911, 913), True, 'import tensorflow as tf\n'), ((1332, 1365), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1363, 1365), True, 'import tensorflow as tf\n'), ((1909, 1942), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1940, 1942), True, 'import tensorflow as tf\n'), ((2528, 2561), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2559, 2561), True, 'import tensorflow as tf\n'), ((3208, 3241), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3239, 3241), True, 'import tensorflow as tf\n'), ((3347, 3378), 'numpy.arange', 'np.arange', (['(45)'], {'dtype': 'np.float32'}), '(45, dtype=np.float32)\n', (3356, 3378), True, 'import numpy as np\n'), ((3888, 3921), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3919, 3921), True, 'import tensorflow as tf\n'), ((4293, 4326), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4324, 4326), True, 'import tensorflow as tf\n'), ((4755, 4788), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4786, 4788), True, 'import tensorflow as tf\n'), ((5322, 5355), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5353, 5355), True, 'import tensorflow as tf\n'), ((5647, 5678), 'numpy.arange', 'np.arange', (['(15)'], {'dtype': 'np.float32'}), '(15, dtype=np.float32)\n', (5656, 5678), True, 'import numpy as np\n')] |
import os
import pickle
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
file_dir = os.path.dirname(os.path.abspath(__file__)) + '/KeywordsDataset'
class KeywordsDataset(Dataset):
"""
Keyword Spotting dataset (preprocessed MFCC feature vectors)
"""
classes = ['0 - unknown', '1 - one', '2 - two', '3 - three']
onehot_to_int = lambda _,x: np.argmax(x, axis=1)
def __init__(self, root, train=True, transform=None, target_transform=None):
"""
Args:
dataset_pkl (string): Path to the pkl file with data and annotations.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.dataset_file = os.path.join(root, 'IA_DATASET')
self.train = train # training set or test set
self.transform = transform
self.target_transform = target_transform
if not self._check_exists():
raise RuntimeError('Dataset not found.')
with open(self.dataset_file, 'rb') as f:
raw_dict = pickle.load(f)
if self.train:
self.data_train = raw_dict['x_train']
self.targets_train = self.onehot_to_int(raw_dict['y_train'])
self.data_val = raw_dict['x_val']
self.targets_val = self.onehot_to_int(raw_dict['y_val'])
self.data = np.concatenate([self.data_val, self.data_train], axis=0)
self.targets = np.concatenate([self.targets_val, self.targets_train], axis=0)
else:
self.data = raw_dict['x_test']
self.targets = self.onehot_to_int(raw_dict['y_test'])
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.targets[index])
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data)
def _check_exists(self):
return os.path.exists(self.dataset_file)
if __name__ == "__main__":
# some small sanity checks
kw_dataset = KeywordsDataset(file_dir, train=True)
assert len(kw_dataset) == 33046
kw_dataset = KeywordsDataset(file_dir, train=False)
assert len(kw_dataset) == 3672
try: # insert wrong path
kw_dataset = KeywordsDataset(os.path.dirname(file_dir), train=False)
except RuntimeError:
pass # Dataset not found.
| [
"os.path.abspath",
"numpy.argmax",
"os.path.dirname",
"os.path.exists",
"pickle.load",
"os.path.join",
"numpy.concatenate"
] | [((130, 155), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (145, 155), False, 'import os\n'), ((389, 409), 'numpy.argmax', 'np.argmax', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (398, 409), True, 'import numpy as np\n'), ((750, 782), 'os.path.join', 'os.path.join', (['root', '"""IA_DATASET"""'], {}), "(root, 'IA_DATASET')\n", (762, 782), False, 'import os\n'), ((2273, 2306), 'os.path.exists', 'os.path.exists', (['self.dataset_file'], {}), '(self.dataset_file)\n', (2287, 2306), False, 'import os\n'), ((1106, 1120), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1117, 1120), False, 'import pickle\n'), ((1415, 1471), 'numpy.concatenate', 'np.concatenate', (['[self.data_val, self.data_train]'], {'axis': '(0)'}), '([self.data_val, self.data_train], axis=0)\n', (1429, 1471), True, 'import numpy as np\n'), ((1499, 1561), 'numpy.concatenate', 'np.concatenate', (['[self.targets_val, self.targets_train]'], {'axis': '(0)'}), '([self.targets_val, self.targets_train], axis=0)\n', (1513, 1561), True, 'import numpy as np\n'), ((2615, 2640), 'os.path.dirname', 'os.path.dirname', (['file_dir'], {}), '(file_dir)\n', (2630, 2640), False, 'import os\n')] |
# CTOK Converts Celsius to Kelvin
import numpy as np
def CtoK(C):
K = np.array(C) + 273.15
return K | [
"numpy.array"
] | [((74, 85), 'numpy.array', 'np.array', (['C'], {}), '(C)\n', (82, 85), True, 'import numpy as np\n')] |
import pynbody
import pylab
import numpy as np
import matplotlib.pylab as plt
import astropy.units as u
#Now I need a code that will load the simulation (s will stand for simulation)
s = pynbody.load('/media/jillian/cptmarvel/cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096/cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096')
h=s.halos()
#The following code will change the units to make it more appealing
s.physical_units()
def findBH(s):
BH = s.stars[pynbody.filt.LowPass('tform', 0.0)]
return BH
pynbody.analysis.angmom.faceon(h[5])
BH = findBH(s)
BH_pos = BH['pos']
BHx = BH_pos[:,0]
BHy = BH_pos[:,1]
BHz = BH_pos[:,2]
BH_position = np.array([BHx[0], BHy[0], BHz[0]])
radius= 0.5 #kpc
sphere = pynbody.filt.Sphere(radius, cen =(BH_position))
#sphere=pynbody.analysis.halo.shrink_sphere_center(s, r=None, shrink_factor=0.7, min_particles=100)
stars = s.stars[0:]
in_sphere = stars[sphere]
total_stars = len(in_sphere)
print("Total stars: ",total_stars)
| [
"pynbody.filt.Sphere",
"pynbody.load",
"pynbody.analysis.angmom.faceon",
"numpy.array",
"pynbody.filt.LowPass"
] | [((190, 324), 'pynbody.load', 'pynbody.load', (['"""/media/jillian/cptmarvel/cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096/cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096"""'], {}), "(\n '/media/jillian/cptmarvel/cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096/cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096'\n )\n", (202, 324), False, 'import pynbody\n'), ((497, 533), 'pynbody.analysis.angmom.faceon', 'pynbody.analysis.angmom.faceon', (['h[5]'], {}), '(h[5])\n', (527, 533), False, 'import pynbody\n'), ((636, 670), 'numpy.array', 'np.array', (['[BHx[0], BHy[0], BHz[0]]'], {}), '([BHx[0], BHy[0], BHz[0]])\n', (644, 670), True, 'import numpy as np\n'), ((698, 742), 'pynbody.filt.Sphere', 'pynbody.filt.Sphere', (['radius'], {'cen': 'BH_position'}), '(radius, cen=BH_position)\n', (717, 742), False, 'import pynbody\n'), ((446, 480), 'pynbody.filt.LowPass', 'pynbody.filt.LowPass', (['"""tform"""', '(0.0)'], {}), "('tform', 0.0)\n", (466, 480), False, 'import pynbody\n')] |
import pandas as pd
import pathlib
import imageio
import numpy as np
import skimage
from utils.imaging import get_path, get_image_ids
from tqdm import tqdm
from scipy import ndimage
from skimage.color import rgb2gray
from skimage.filters import threshold_otsu
def rle_encoding(x):
'''
Performs run length encoding on an array
Args:
x (ndarray): numpy array of shape (height, width), 1 - mask, 0 - background
Returns:
(list): run length as list
'''
dots = np.where(x.T.flatten()==1)[0]
run_lengths = []
prev = -2
for b in dots:
if (b > prev+1): run_lengths.extend((b+1, 0))
run_lengths[-1] += 1
prev = b
return " ".join([str(i) for i in run_lengths])
def rle_image(labels_image, image_id):
'''
Take a labelled image and image id then perform rle and return a pandas dataframe
Args:
labels_image (ndarray): a sequentially labelled image
Return:
df_image (dataframe): data frame of ImageId, Encoding
'''
num_labels = np.amax(labels_image)
df_image = pd.DataFrame(columns=['ImageId','EncodedPixels'])
for label_num in range(1, num_labels+1):
label_mask = np.where(labels_image == label_num, 1, 0)
if label_mask.flatten().sum() > 10:
rle = rle_encoding(label_mask)
rle_series = pd.Series({'ImageId': image_id[:-4], 'EncodedPixels': rle})
df_image = df_image.append(rle_series, ignore_index=True)
return df_image
def rle_images_in_dir(image_type='test', stage_num = 1):
'''
Performs rle on all labelled images in a directory
Arguments:
image_type (str): training or test data
stage_num (int): stage number of the data
'''
stage_num = str(stage_num)
input_path = get_path('output_' + image_type + '_' + stage_num + '_lab_seg')
image_ids = get_image_ids(input_path)
output_path = get_path('output_' + image_type + '_' + stage_num)
df_all = pd.DataFrame()
for idx, image_id in tqdm(enumerate(image_ids), total=len(image_ids)):
image_dir = input_path + image_id
image = skimage.io.imread(image_dir)
df_image = rle_image(image, image_id)
df_all = df_all.append(df_image, ignore_index=True)
#print('encoded image %d of %d, image: %s \n' % \
# (idx + 1, len(image_ids), image_id[:-4]))
#df_all.to_csv(output_path + 'rle_submission.csv', index=None)
return df_all
if __name__ == '__main__':
df = rle_images_in_dir(image_type = 'test', stage_num = 1)
| [
"pandas.DataFrame",
"skimage.io.imread",
"numpy.amax",
"numpy.where",
"utils.imaging.get_image_ids",
"pandas.Series",
"utils.imaging.get_path"
] | [((1041, 1062), 'numpy.amax', 'np.amax', (['labels_image'], {}), '(labels_image)\n', (1048, 1062), True, 'import numpy as np\n'), ((1078, 1128), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['ImageId', 'EncodedPixels']"}), "(columns=['ImageId', 'EncodedPixels'])\n", (1090, 1128), True, 'import pandas as pd\n'), ((1791, 1854), 'utils.imaging.get_path', 'get_path', (["('output_' + image_type + '_' + stage_num + '_lab_seg')"], {}), "('output_' + image_type + '_' + stage_num + '_lab_seg')\n", (1799, 1854), False, 'from utils.imaging import get_path, get_image_ids\n'), ((1871, 1896), 'utils.imaging.get_image_ids', 'get_image_ids', (['input_path'], {}), '(input_path)\n', (1884, 1896), False, 'from utils.imaging import get_path, get_image_ids\n'), ((1915, 1965), 'utils.imaging.get_path', 'get_path', (["('output_' + image_type + '_' + stage_num)"], {}), "('output_' + image_type + '_' + stage_num)\n", (1923, 1965), False, 'from utils.imaging import get_path, get_image_ids\n'), ((1980, 1994), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1992, 1994), True, 'import pandas as pd\n'), ((1194, 1235), 'numpy.where', 'np.where', (['(labels_image == label_num)', '(1)', '(0)'], {}), '(labels_image == label_num, 1, 0)\n', (1202, 1235), True, 'import numpy as np\n'), ((2128, 2156), 'skimage.io.imread', 'skimage.io.imread', (['image_dir'], {}), '(image_dir)\n', (2145, 2156), False, 'import skimage\n'), ((1348, 1407), 'pandas.Series', 'pd.Series', (["{'ImageId': image_id[:-4], 'EncodedPixels': rle}"], {}), "({'ImageId': image_id[:-4], 'EncodedPixels': rle})\n", (1357, 1407), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
"""
Functions
---------
"""
import numpy
import scipy.linalg
from sklearn.preprocessing import normalize
def preprocess(*dataSets, p = None, center = True, normalize = True):
Ps = []
Ss = []
for i in range(len(dataSets)):
if center: # Center before or after PCA?
dataSets[i] = dataSets[i] - dataSets[i].mean(axis = 0)
if normalize: # Normalize before or after PCA?
dataSets[i] = normalize(dataSets[i])
if p is not None and 0 < p < min(dataSets[i].shape):
U, s, Vt = scipy.linalg.svd(dataSets[i], full_matrices = False)
U = U[:, :p]
s = s[:p]
Vt = Vt[:p]
Ps[i] = U
Ss[i] = numpy.diag(sA) @ VAt
else: # Check if really faster for p = None
Ps[i], Ss[i] = scipy.linalg.qr(A, mode = "economic")
return Ps, Ss
def postprocess(s, Vp, Ps, q = None):
for i in range(len(Vp)): # Ps @ Vp?
V = Ps[i] @ Vp
if q is not None and 0 < q < len(s):
s = s[:q]
for j in range(len(V)):
V[j] = V[j][:, :q]
return s, V
def kettenringmatrix(Ss):
r = len(Ss)
m = min(Ss[0].shape)
rows = []
for j in range(len(Ss)):
cells = []
for k in range(len(Ss)):
if j == k:
cells[k] = numpy.zeros(Ss[j].shape)
else:
cells[k] = Ss[j].Ss[k].T
rows[j] = numpy.concatenate(cells, axis = 1)
K = numpy.concatenate(rows, axis = 0)
return K, r, m
def kettenringsvd(K, r, m):
l, v = scipy.linalg.eig(K)
l = l[::r]
argsort = numpy.flip(numpy.argsort(l), 0)
l = l[argsort]
l = l[:m]
s = l
v = v[:, ::r] # or !length?
v = v[:, argsort]
V = []
for l in range(r):
Vn = normalize(v[(m * l):(m * (l + 1)), :m])
return s, V
def gcca(*dataSets, p = None, q = None, center = True, normalize = True):
# Add validation
Ps, Ss = preprocess(dataSets, p, center, normalize)
K, r, m = kettenringmatrix(Ss)
s, Vp = kettenringsvd(K, r, m)
s, V = postprocess(s, Vp, Ps, q)
return s, V
def svd2(A): # Only for testing, remove later
m = A.shape[0]
n = A.shape[1]
zeros1 = numpy.zeros((m, m))
zeros2 = numpy.zeros((n, n))
row1 = numpy.concatenate((zeros1, A), axis = 1)
row2 = numpy.concatenate((A.T, zeros2), axis = 1)
B = numpy.concatenate((row1, row2), axis = 0)
l, v = scipy.linalg.eig(B)
l = l[::2]
argsort = numpy.flip(numpy.argsort(l), 0)
l = l[argsort]
l = l[:n]
v = v[:, ::2]
v = v[:, argsort]
U = normalize(v[:m, :n])
V = normalize(v[m:, :n])
s = l
return U, s, V.T
def svd3(A, B, C): # Only for testing, remove later
mA = A.shape[0]
nA = A.shape[1]
mB = B.shape[0]
nB = B.shape[1]
mC = C.shape[0]
nC = C.shape[1]
if not (nA == nB == nC):
print("Error!")
zeros1 = numpy.zeros((mA, mA))
zeros2 = numpy.zeros((mB, mB))
zeros3 = numpy.zeros((mC, mC))
row1 = numpy.concatenate((zeros1, A, C.T), axis = 1)
row2 = numpy.concatenate((A.T, zeros2, B), axis = 1)
row2 = numpy.concatenate((C, B.T, zeros3), axis = 1)
P = numpy.concatenate((row1, row2, row3), axis = 0)
l, v = scipy.linalg.eig(P)
l = l[::3]
argsort = numpy.flip(numpy.argsort(l), 0)
l = l[argsort]
l = l[:nA]
v = v[:,::3]
v = v[:,argsort]
U = normalize(v[:mA,:nA])
V = normalize(v[mA:mB,:nA])
W = normalize(v[mB:,:nA])
s = l
return s, U, V, W | [
"numpy.zeros",
"numpy.argsort",
"sklearn.preprocessing.normalize",
"numpy.diag",
"numpy.concatenate"
] | [((1258, 1289), 'numpy.concatenate', 'numpy.concatenate', (['rows'], {'axis': '(0)'}), '(rows, axis=0)\n', (1275, 1289), False, 'import numpy\n'), ((1936, 1955), 'numpy.zeros', 'numpy.zeros', (['(m, m)'], {}), '((m, m))\n', (1947, 1955), False, 'import numpy\n'), ((1966, 1985), 'numpy.zeros', 'numpy.zeros', (['(n, n)'], {}), '((n, n))\n', (1977, 1985), False, 'import numpy\n'), ((1994, 2032), 'numpy.concatenate', 'numpy.concatenate', (['(zeros1, A)'], {'axis': '(1)'}), '((zeros1, A), axis=1)\n', (2011, 2032), False, 'import numpy\n'), ((2043, 2083), 'numpy.concatenate', 'numpy.concatenate', (['(A.T, zeros2)'], {'axis': '(1)'}), '((A.T, zeros2), axis=1)\n', (2060, 2083), False, 'import numpy\n'), ((2091, 2130), 'numpy.concatenate', 'numpy.concatenate', (['(row1, row2)'], {'axis': '(0)'}), '((row1, row2), axis=0)\n', (2108, 2130), False, 'import numpy\n'), ((2282, 2302), 'sklearn.preprocessing.normalize', 'normalize', (['v[:m, :n]'], {}), '(v[:m, :n])\n', (2291, 2302), False, 'from sklearn.preprocessing import normalize\n'), ((2308, 2328), 'sklearn.preprocessing.normalize', 'normalize', (['v[m:, :n]'], {}), '(v[m:, :n])\n', (2317, 2328), False, 'from sklearn.preprocessing import normalize\n'), ((2564, 2585), 'numpy.zeros', 'numpy.zeros', (['(mA, mA)'], {}), '((mA, mA))\n', (2575, 2585), False, 'import numpy\n'), ((2596, 2617), 'numpy.zeros', 'numpy.zeros', (['(mB, mB)'], {}), '((mB, mB))\n', (2607, 2617), False, 'import numpy\n'), ((2628, 2649), 'numpy.zeros', 'numpy.zeros', (['(mC, mC)'], {}), '((mC, mC))\n', (2639, 2649), False, 'import numpy\n'), ((2658, 2701), 'numpy.concatenate', 'numpy.concatenate', (['(zeros1, A, C.T)'], {'axis': '(1)'}), '((zeros1, A, C.T), axis=1)\n', (2675, 2701), False, 'import numpy\n'), ((2712, 2755), 'numpy.concatenate', 'numpy.concatenate', (['(A.T, zeros2, B)'], {'axis': '(1)'}), '((A.T, zeros2, B), axis=1)\n', (2729, 2755), False, 'import numpy\n'), ((2766, 2809), 'numpy.concatenate', 'numpy.concatenate', (['(C, B.T, zeros3)'], {'axis': '(1)'}), '((C, B.T, zeros3), axis=1)\n', (2783, 2809), False, 'import numpy\n'), ((2817, 2862), 'numpy.concatenate', 'numpy.concatenate', (['(row1, row2, row3)'], {'axis': '(0)'}), '((row1, row2, row3), axis=0)\n', (2834, 2862), False, 'import numpy\n'), ((3013, 3035), 'sklearn.preprocessing.normalize', 'normalize', (['v[:mA, :nA]'], {}), '(v[:mA, :nA])\n', (3022, 3035), False, 'from sklearn.preprocessing import normalize\n'), ((3040, 3064), 'sklearn.preprocessing.normalize', 'normalize', (['v[mA:mB, :nA]'], {}), '(v[mA:mB, :nA])\n', (3049, 3064), False, 'from sklearn.preprocessing import normalize\n'), ((3069, 3091), 'sklearn.preprocessing.normalize', 'normalize', (['v[mB:, :nA]'], {}), '(v[mB:, :nA])\n', (3078, 3091), False, 'from sklearn.preprocessing import normalize\n'), ((1218, 1250), 'numpy.concatenate', 'numpy.concatenate', (['cells'], {'axis': '(1)'}), '(cells, axis=1)\n', (1235, 1250), False, 'import numpy\n'), ((1399, 1415), 'numpy.argsort', 'numpy.argsort', (['l'], {}), '(l)\n', (1412, 1415), False, 'import numpy\n'), ((1537, 1572), 'sklearn.preprocessing.normalize', 'normalize', (['v[m * l:m * (l + 1), :m]'], {}), '(v[m * l:m * (l + 1), :m])\n', (1546, 1572), False, 'from sklearn.preprocessing import normalize\n'), ((2195, 2211), 'numpy.argsort', 'numpy.argsort', (['l'], {}), '(l)\n', (2208, 2211), False, 'import numpy\n'), ((2927, 2943), 'numpy.argsort', 'numpy.argsort', (['l'], {}), '(l)\n', (2940, 2943), False, 'import numpy\n'), ((420, 442), 'sklearn.preprocessing.normalize', 'normalize', (['dataSets[i]'], {}), '(dataSets[i])\n', (429, 442), False, 'from sklearn.preprocessing import normalize\n'), ((633, 647), 'numpy.diag', 'numpy.diag', (['sA'], {}), '(sA)\n', (643, 647), False, 'import numpy\n'), ((1143, 1167), 'numpy.zeros', 'numpy.zeros', (['Ss[j].shape'], {}), '(Ss[j].shape)\n', (1154, 1167), False, 'import numpy\n')] |
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
'''
Given coordinates of grids in real space, evaluate the GTO values and MO
orbital values on these grids.
See also
pyscf/examples/dft/30-ao_value_on_grid.py
pyscf/examples/pbc/30-ao_value_on_grid.py
'''
import numpy
from pyscf import lib, gto, scf
mol = gto.M(atom='H 0 0 0; F 0 0 1', basis='sto3g')
mf = scf.RHF(mol).run()
coords = numpy.random.random((100,3))
#
# AO values and MO values on given grids
#
ao = mol.eval_gto('GTOval_sph', coords)
mo = ao.dot(mf.mo_coeff)
#
# AO values and MO gradients on given grids
#
ao_grad = mol.eval_gto('GTOval_ip_sph', coords) # (3,Ngrids,n) array
mo_grad = [x.dot(mf.mo_coeff) for x in ao_grad]
#
# AO values and gradients and higher order derivatives can be computed
# simultaneously to reduce cost.
#
# deriv=0: orbital value
ao = mol.eval_gto('GTOval_sph_deriv0', coords)
# deriv=1: orbital value + gradients
ao_p = mol.eval_gto('GTOval_sph_deriv1', coords) # (4,Ngrids,n) array
ao = ao_p[0]
ao_grad = ao_p[1:4] # x, y, z
# deriv=2: value + gradients + second order derivatives
ao_p = mol.eval_gto('GTOval_sph_deriv2', coords) # (10,Ngrids,n) array
ao = ao_p[0]
ao_grad = ao_p[1:4] # x, y, z
ao_hess = ao_p[4:10] # xx, xy, xz, yy, yz, zz
# deriv=3: value + gradients + second order derivatives + third order
ao_p = mol.eval_gto('GTOval_sph_deriv3', coords) # (20,Ngrids,n) array
ao = ao_p[0]
ao_grad = ao_p[1:4] # x, y, z
ao_hess = ao_p[4:10] # xx, xy, xz, yy, yz, zz
ao_3rd = ao_p[10:15] # xxx, xxy, xxz, xyy, xyz, xzz, yyy, yyz, yzz, zzz
# deriv=4: ...
| [
"pyscf.scf.RHF",
"pyscf.gto.M",
"numpy.random.random"
] | [((313, 358), 'pyscf.gto.M', 'gto.M', ([], {'atom': '"""H 0 0 0; F 0 0 1"""', 'basis': '"""sto3g"""'}), "(atom='H 0 0 0; F 0 0 1', basis='sto3g')\n", (318, 358), False, 'from pyscf import lib, gto, scf\n'), ((393, 422), 'numpy.random.random', 'numpy.random.random', (['(100, 3)'], {}), '((100, 3))\n', (412, 422), False, 'import numpy\n'), ((364, 376), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (371, 376), False, 'from pyscf import lib, gto, scf\n')] |
import numpy as np
from numpy.linalg import norm
from math import *
from matplotlib import pyplot as plt
from matplotlib.patches import Polygon
from random import random
from scipy.spatial import ConvexHull
from matplotlib import path
import time
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
from tools import init_fonts
from path_shortening import shorten_path
def isCollisionFreeVertex(obstacles, point):
x,y,z = point
for obstacle in obstacles:
dx, dy, dz = obstacle.dimensions
x0, y0, z0 = obstacle.pose
if abs(x-x0)<=dx/2 and abs(y-y0)<=dy/2 and abs(z-z0)<=dz/2:
return 0
return 1
def isCollisionFreeEdge(obstacles, closest_vert, p):
closest_vert = np.array(closest_vert); p = np.array(p)
collFree = True
l = norm(closest_vert - p)
map_resolution = 0.01; M = int(l / map_resolution)
if M <= 2: M = 20
t = np.linspace(0,1,M)
for i in range(1,M-1):
point = (1-t[i])*closest_vert + t[i]*p # calculate configuration
collFree = isCollisionFreeVertex(obstacles, point)
if collFree == False: return False
return collFree
class Node3D:
def __init__(self):
self.p = [0, 0, 0]
self.i = 0
self.iPrev = 0
def closestNode3D(rrt, p):
distance = []
for node in rrt:
distance.append( sqrt((p[0] - node.p[0])**2 + (p[1] - node.p[1])**2 + (p[2] - node.p[2])**2) )
distance = np.array(distance)
dmin = min(distance)
ind_min = distance.tolist().index(dmin)
closest_node = rrt[ind_min]
return closest_node
def plot_point3D(p, color='blue'):
ax.scatter3D(p[0], p[1], p[2], color=color)
# Add Obstacles
class Parallelepiped:
def __init__(self):
self.dimensions = [0,0,0]
self.pose = [0,0,0]
self.verts = self.vertixes()
def vertixes(self):
dx = self.dimensions[0]
dy = self.dimensions[1]
dz = self.dimensions[2]
C = np.array(self.pose)
Z = np.array([[-dx/2, -dy/2, -dz/2],
[dx/2, -dy/2, -dz/2 ],
[dx/2, dy/2, -dz/2],
[-dx/2, dy/2, -dz/2],
[-dx/2, -dy/2, dz/2],
[dx/2, -dy/2, dz/2 ],
[dx/2, dy/2, dz/2],
[-dx/2, dy/2, dz/2]])
Z += C
# list of sides' polygons of figure
verts = [ [Z[0], Z[1], Z[2], Z[3]],
[Z[4], Z[5], Z[6], Z[7]],
[Z[0], Z[1], Z[5], Z[4]],
[Z[2], Z[3], Z[7], Z[6]],
[Z[1], Z[2], Z[6], Z[5]],
[Z[4], Z[7], Z[3], Z[0]] ]
return verts
def draw(self, ax):
ax.add_collection3d(Poly3DCollection(self.vertixes(), facecolors='k', linewidths=1, edgecolors='k', alpha=.25))
### Obstacles ###
def add_obstacle(obstacles, pose, dim):
obstacle = Parallelepiped()
obstacle.dimensions = dim
obstacle.pose = pose
obstacles.append(obstacle)
return obstacles
# obstacles_poses = [ [-0.8, 0., 1.5], [ 1., 0., 1.5], [ 0., 1., 1.5], [ 0.,-1., 1.5] ]
# obstacles_dims = [ [1.4, 1.0, 0.2], [1.0, 1.0, 0.2], [3.0, 1.0, 0.2], [3.0, 1.0, 0.2] ]
obstacles_poses = [ [-0.8, 0., 1.5], [ 0., 1., 1.5], [ 0.,-1., 1.5] ]
obstacles_dims = [ [1.4, 1.0, 0.3], [3.0, 1.0, 0.3], [3.0, 1.0, 0.3] ]
obstacles = []
for pose, dim in zip(obstacles_poses, obstacles_dims):
obstacles = add_obstacle(obstacles, pose, dim)
##################
init_fonts()
fig = plt.figure(figsize=(15,15))
ax = plt.axes(projection='3d')
ax.set_xlabel('X, [m]')
ax.set_ylabel('Y, [m]')
ax.set_zlabel('Z, [m]')
ax.set_xlim([-2.5, 2.5])
ax.set_ylim([-2.5, 2.5])
ax.set_zlim([0.0, 3.0])
for obstacle in obstacles: obstacle.draw(ax)
# parameters
animate = 1
# RRT Initialization
maxiters = 500
nearGoal = False # This will be set to true if goal has been reached
minDistGoal = 0.05 # Convergence criterion: success when the tree reaches within 0.25 in distance from the goal.
d = 0.5 # [m], Extension parameter: this controls how far the RRT extends in each step.
# Start and goal positions
start = np.array([0.0, 0.0, 0.0]); ax.scatter3D(start[0], start[1], start[2], color='green', s=100)
goal = np.array([0.0, 0.5, 2.5]); ax.scatter3D(goal[0], goal[1], goal[2], color='red', s=100)
# Initialize RRT. The RRT will be represented as a 2 x N list of points.
# So each column represents a vertex of the tree.
rrt = []
start_node = Node3D()
start_node.p = start
start_node.i = 0
start_node.iPrev = 0
rrt.append(start_node)
# RRT algorithm
start_time = time.time()
iters = 0
while not nearGoal and iters < maxiters:
# Sample point
rnd = random()
# With probability 0.05, sample the goal. This promotes movement to the goal.
if rnd < 0.10:
p = goal
else:
p = np.array([random()*5-2.5, random()*5-2.5, random()*3]) # Should be a 3 x 1 vector
# Check if sample is collision free
collFree = isCollisionFreeVertex(obstacles, p)
# If it's not collision free, continue with loop
if not collFree:
iters += 1
continue
# If it is collision free, find closest point in existing tree.
closest_node = closestNode3D(rrt, p)
# Extend tree towards xy from closest_vert. Use the extension parameter
# d defined above as your step size. In other words, the Euclidean
# distance between new_vert and closest_vert should be d.
new_node = Node3D()
new_node.p = closest_node.p + d * (p - closest_node.p)
new_node.i = len(rrt)
new_node.iPrev = closest_node.i
if animate:
ax.plot([closest_node.p[0], new_node.p[0]], [closest_node.p[1], new_node.p[1]], [closest_node.p[2], new_node.p[2]],color = 'b', zorder=5)
plt.pause(0.01)
# Check if new vertice is in collision
collFree = isCollisionFreeEdge(obstacles, closest_node.p, new_node.p)
# If it's not collision free, continue with loop
if not collFree:
iters += 1
continue
# If it is collision free, add it to tree
rrt.append(new_node)
# Check if we have reached the goal
if norm(np.array(goal) - np.array(new_node.p)) < minDistGoal:
# Add last, goal node
goal_node = Node3D()
goal_node.p = goal
goal_node.i = len(rrt)
goal_node.iPrev = new_node.i
if isCollisionFreeEdge(obstacles, new_node.p, goal_node.p):
rrt.append(goal_node)
P = [goal_node.p]
else: P = []
end_time = time.time()
nearGoal = True
print ('Reached the goal after %.2f seconds:' % (end_time - start_time))
iters += 1
print ('Number of iterations passed: %d / %d' %(iters, maxiters))
print ('RRT length: ', len(rrt))
# Path construction from RRT:
print ('Constructing the path...')
i = len(rrt) - 1
while True:
i = rrt[i].iPrev
P.append(rrt[i].p)
if i == 0:
print ('Reached RRT start node')
break
P = np.array(P)
# drawing a path from RRT
for i in range(P.shape[0]-1):
ax.plot([P[i,0], P[i+1,0]], [P[i,1], P[i+1,1]], [P[i,2], P[i+1,2]], color = 'g', linewidth=5, zorder=10)
# shortened path
print ('Shortening the path...')
P = shorten_path(P, obstacles, smoothiters=100)
for i in range(P.shape[0]-1):
ax.plot([P[i,0], P[i+1,0]], [P[i,1], P[i+1,1]], [P[i,2], P[i+1,2]], color = 'orange', linewidth=5, zorder=15)
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.axes",
"time.time",
"random.random",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.linalg.norm",
"numpy.linspace",
"tools.init_fonts",
"matplotlib.pyplot.pause",
"path_shortening.shorten_path"
] | [((3532, 3544), 'tools.init_fonts', 'init_fonts', ([], {}), '()\n', (3542, 3544), False, 'from tools import init_fonts\n'), ((3551, 3579), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (3561, 3579), True, 'from matplotlib import pyplot as plt\n'), ((3584, 3609), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (3592, 3609), True, 'from matplotlib import pyplot as plt\n'), ((4172, 4197), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4180, 4197), True, 'import numpy as np\n'), ((4272, 4297), 'numpy.array', 'np.array', (['[0.0, 0.5, 2.5]'], {}), '([0.0, 0.5, 2.5])\n', (4280, 4297), True, 'import numpy as np\n'), ((4628, 4639), 'time.time', 'time.time', ([], {}), '()\n', (4637, 4639), False, 'import time\n'), ((7014, 7025), 'numpy.array', 'np.array', (['P'], {}), '(P)\n', (7022, 7025), True, 'import numpy as np\n'), ((7246, 7289), 'path_shortening.shorten_path', 'shorten_path', (['P', 'obstacles'], {'smoothiters': '(100)'}), '(P, obstacles, smoothiters=100)\n', (7258, 7289), False, 'from path_shortening import shorten_path\n'), ((7435, 7445), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7443, 7445), True, 'from matplotlib import pyplot as plt\n'), ((758, 780), 'numpy.array', 'np.array', (['closest_vert'], {}), '(closest_vert)\n', (766, 780), True, 'import numpy as np\n'), ((786, 797), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (794, 797), True, 'import numpy as np\n'), ((826, 848), 'numpy.linalg.norm', 'norm', (['(closest_vert - p)'], {}), '(closest_vert - p)\n', (830, 848), False, 'from numpy.linalg import norm\n'), ((934, 954), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'M'], {}), '(0, 1, M)\n', (945, 954), True, 'import numpy as np\n'), ((1480, 1498), 'numpy.array', 'np.array', (['distance'], {}), '(distance)\n', (1488, 1498), True, 'import numpy as np\n'), ((4720, 4728), 'random.random', 'random', ([], {}), '()\n', (4726, 4728), False, 'from random import random\n'), ((2019, 2038), 'numpy.array', 'np.array', (['self.pose'], {}), '(self.pose)\n', (2027, 2038), True, 'import numpy as np\n'), ((2052, 2296), 'numpy.array', 'np.array', (['[[-dx / 2, -dy / 2, -dz / 2], [dx / 2, -dy / 2, -dz / 2], [dx / 2, dy / 2, \n -dz / 2], [-dx / 2, dy / 2, -dz / 2], [-dx / 2, -dy / 2, dz / 2], [dx /\n 2, -dy / 2, dz / 2], [dx / 2, dy / 2, dz / 2], [-dx / 2, dy / 2, dz / 2]]'], {}), '([[-dx / 2, -dy / 2, -dz / 2], [dx / 2, -dy / 2, -dz / 2], [dx / 2,\n dy / 2, -dz / 2], [-dx / 2, dy / 2, -dz / 2], [-dx / 2, -dy / 2, dz / 2\n ], [dx / 2, -dy / 2, dz / 2], [dx / 2, dy / 2, dz / 2], [-dx / 2, dy / \n 2, dz / 2]])\n', (2060, 2296), True, 'import numpy as np\n'), ((5807, 5822), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (5816, 5822), True, 'from matplotlib import pyplot as plt\n'), ((6568, 6579), 'time.time', 'time.time', ([], {}), '()\n', (6577, 6579), False, 'import time\n'), ((6188, 6202), 'numpy.array', 'np.array', (['goal'], {}), '(goal)\n', (6196, 6202), True, 'import numpy as np\n'), ((6205, 6225), 'numpy.array', 'np.array', (['new_node.p'], {}), '(new_node.p)\n', (6213, 6225), True, 'import numpy as np\n'), ((4911, 4919), 'random.random', 'random', ([], {}), '()\n', (4917, 4919), False, 'from random import random\n'), ((4879, 4887), 'random.random', 'random', ([], {}), '()\n', (4885, 4887), False, 'from random import random\n'), ((4895, 4903), 'random.random', 'random', ([], {}), '()\n', (4901, 4903), False, 'from random import random\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2 as cv
import numpy as np
#import tensorflow as tf
import tensorflow.compat.v1 as tf
#tf.disable_eager_execution()
class IrisLandmark(object):
def __init__(
self,
model_path='iris_landmark/iris_landmark.tflite',
num_threads=1,
):
self._interpreter = tf.lite.Interpreter(model_path=model_path)#,num_threads=num_threads)
self._interpreter.allocate_tensors()
self._input_details = self._interpreter.get_input_details()
self._output_details = self._interpreter.get_output_details()
def __call__(
self,
image,
):
input_shape = self._input_details[0]['shape']
# 正規化・リサイズ
img = cv.cvtColor(image, cv.COLOR_BGR2RGB)
img = img / 255.0
img_resized = tf.image.resize(img, [input_shape[1], input_shape[2]],
method='bicubic',
preserve_aspect_ratio=False)
img_input = img_resized.numpy()
img_input = (img_input - 0.5) / 0.5
reshape_img = img_input.reshape(1, input_shape[1], input_shape[2],
input_shape[3])
tensor = tf.convert_to_tensor(reshape_img, dtype=tf.float32)
# 推論実行
input_details_tensor_index = self._input_details[0]['index']
self._interpreter.set_tensor(input_details_tensor_index, tensor)
self._interpreter.invoke()
# 推論結果取得
output_details_tensor_index0 = self._output_details[0]['index']
output_details_tensor_index1 = self._output_details[1]['index']
eye_contour = self._interpreter.get_tensor(
output_details_tensor_index0)
iris = self._interpreter.get_tensor(output_details_tensor_index1)
return np.squeeze(eye_contour), np.squeeze(iris)
def get_input_shape(self):
input_shape = self._input_details[0]['shape']
return [input_shape[1], input_shape[2]] | [
"cv2.cvtColor",
"tensorflow.compat.v1.lite.Interpreter",
"tensorflow.compat.v1.convert_to_tensor",
"tensorflow.compat.v1.image.resize",
"numpy.squeeze"
] | [((347, 389), 'tensorflow.compat.v1.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_path': 'model_path'}), '(model_path=model_path)\n', (366, 389), True, 'import tensorflow.compat.v1 as tf\n'), ((742, 778), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2RGB'], {}), '(image, cv.COLOR_BGR2RGB)\n', (753, 778), True, 'import cv2 as cv\n'), ((827, 932), 'tensorflow.compat.v1.image.resize', 'tf.image.resize', (['img', '[input_shape[1], input_shape[2]]'], {'method': '"""bicubic"""', 'preserve_aspect_ratio': '(False)'}), "(img, [input_shape[1], input_shape[2]], method='bicubic',\n preserve_aspect_ratio=False)\n", (842, 932), True, 'import tensorflow.compat.v1 as tf\n'), ((1238, 1289), 'tensorflow.compat.v1.convert_to_tensor', 'tf.convert_to_tensor', (['reshape_img'], {'dtype': 'tf.float32'}), '(reshape_img, dtype=tf.float32)\n', (1258, 1289), True, 'import tensorflow.compat.v1 as tf\n'), ((1829, 1852), 'numpy.squeeze', 'np.squeeze', (['eye_contour'], {}), '(eye_contour)\n', (1839, 1852), True, 'import numpy as np\n'), ((1854, 1870), 'numpy.squeeze', 'np.squeeze', (['iris'], {}), '(iris)\n', (1864, 1870), True, 'import numpy as np\n')] |
from abc import ABCMeta, abstractmethod
import numpy as np
from ezclimate.storage_tree import BigStorageTree, SmallStorageTree
np.seterr(all='ignore')
class EZUtility(object):
"""Calculation of Epstein-Zin utility for the EZ-Climate model.
The Epstein-Zin utility allows for different rates of substitution across time and
states. For specification see DLW-paper.
Parameters
----------
tree : `TreeModel` object
tree structure used
damage : `Damage` object
class that provides damage methods
cost : `Cost` object
class that provides cost methods
period_len : float
subinterval length
eis : float, optional
elasticity of intertemporal substitution
ra : float, optional
risk-aversion
time_pref : float, optional
pure rate of time preference
Attributes
----------
tree : `TreeModel` object
tree structure used
damage : `Damage` object
class that provides damage methods
cost : `Cost` object
class that provides cost methods
period_len : float
subinterval length
decision_times : ndarray
years in the future where decisions will be made
cons_growth : float
consumption growth
growth_term : float
1 + cons_growth
r : float
the parameter rho from the DLW-paper
a : float
the parameter alpha from the DLW-paper
b : float
the parameter beta from the DLW-paper
"""
def __init__(self, tree, damage, cost, period_len, eis=0.9, ra=7.0, time_pref=0.005,
add_penalty_cost=False, max_penalty=0.0, penalty_scale=1.0):
self.tree = tree
self.damage = damage
self.cost = cost
self.period_len = period_len
self.decision_times = tree.decision_times
self.cons_growth = damage.cons_growth
self.growth_term = 1.0 + self.cons_growth
self.r = 1.0 - 1.0/eis
self.a = 1.0 - ra
self.b = (1.0-time_pref)**period_len
self.potential_cons = np.ones(self.decision_times.shape) + self.cons_growth
self.potential_cons = self.potential_cons ** self.decision_times
self.add_penalty_cost = add_penalty_cost
self.max_penalty = max_penalty
self.penalty_scale = penalty_scale
def _end_period_utility(self, m, utility_tree, cons_tree, cost_tree):
"""Calculate the terminal utility."""
period_ave_mitigation = self.damage.average_mitigation(m, self.tree.num_periods)
period_damage = self.damage.damage_function(m, self.tree.num_periods)
damage_nodes = self.tree.get_nodes_in_period(self.tree.num_periods)
period_mitigation = m[damage_nodes[0]:damage_nodes[1]+1]
period_cost = self.cost.cost(self.tree.num_periods, period_mitigation, period_ave_mitigation)
continuation = (1.0 / (1.0 - self.b*(self.growth_term**self.r)))**(1.0/self.r)
cost_tree.set_value(cost_tree.last_period, period_cost)
period_consumption = self.potential_cons[-1] * (1.0 - period_damage)
period_consumption[period_consumption<=0.0] = 1e-18
cons_tree.set_value(cons_tree.last_period, period_consumption)
utility_tree.set_value(utility_tree.last_period, (1.0 - self.b)**(1.0/self.r) * cons_tree.last * continuation)
def _end_period_marginal_utility(self, mu_tree_0, mu_tree_1, ce_tree, utility_tree, cons_tree):
"""Calculate the terminal marginal utility."""
ce_term = utility_tree.last**self.r - (1.0 - self.b)*cons_tree.last**self.r
ce_tree.set_value(ce_tree.last_period, ce_term)
mu_0_last = (1.0 - self.b)*(utility_tree[utility_tree.last_period-self.period_len] / cons_tree.last)**(1.0-self.r)
mu_tree_0.set_value(mu_tree_0.last_period, mu_0_last)
mu_0 = self._mu_0(cons_tree[cons_tree.last_period-self.period_len], ce_tree[ce_tree.last_period-self.period_len])
mu_tree_0.set_value(mu_tree_0.last_period-self.period_len, mu_0)
next_term = self.b * (1.0 - self.b) / (1.0 - self.b * self.growth_term**self.r)
mu_1 = utility_tree[utility_tree.last_period-self.period_len]**(1-self.r) * next_term * cons_tree.last**(self.r-1.0)
mu_tree_1.set_value(mu_tree_1.last_period-self.period_len, mu_1)
def _certain_equivalence(self, period, damage_period, utility_tree):
"""Calculate certainty equivalence utility. If we are between decision nodes, i.e. no branching,
then certainty equivalent utility at time period depends only on the utility next period
given information known today. Otherwise the certainty equivalent utility is the ability
weighted sum of next period utility over the partition reachable from the state.
"""
if utility_tree.is_information_period(period):
damage_nodes = self.tree.get_nodes_in_period(damage_period+1)
probs = self.tree.node_prob[damage_nodes[0]:damage_nodes[1]+1]
even_probs = probs[::2]
odd_probs = probs[1::2]
even_util = ((utility_tree.get_next_period_array(period)[::2])**self.a) * even_probs
odd_util = ((utility_tree.get_next_period_array(period)[1::2])**self.a) * odd_probs
ave_util = (even_util + odd_util) / (even_probs + odd_probs)
cert_equiv = ave_util**(1.0/self.a)
else:
# no branching implies certainty equivalent utility at time period depends only on
# the utility next period given information known today
cert_equiv = utility_tree.get_next_period_array(period)
return cert_equiv
def _utility_generator(self, m, utility_tree, cons_tree, cost_tree, ce_tree, cons_adj=0.0):
"""Generator for calculating utility for each utility period besides the terminal utility."""
periods = utility_tree.periods[::-1]
for period in periods[1:]:
damage_period = utility_tree.between_decision_times(period)
cert_equiv = self._certain_equivalence(period, damage_period, utility_tree)
if utility_tree.is_decision_period(period+self.period_len):
damage_nodes = self.tree.get_nodes_in_period(damage_period)
period_mitigation = m[damage_nodes[0]:damage_nodes[1]+1]
period_ave_mitigation = self.damage.average_mitigation(m, damage_period)
period_cost = self.cost.cost(damage_period, period_mitigation, period_ave_mitigation)
period_damage = self.damage.damage_function(m, damage_period)
cost_tree.set_value(cost_tree.index_below(period+self.period_len), period_cost)
period_consumption = self.potential_cons[damage_period] * (1.0 - period_damage) * (1.0 - period_cost)
period_consumption[period_consumption <= 0.0] = 1e-18
if not utility_tree.is_decision_period(period):
next_consumption = cons_tree.get_next_period_array(period)
segment = period - utility_tree.decision_times[damage_period]
interval = segment + utility_tree.subinterval_len
if utility_tree.is_decision_period(period+self.period_len):
if period < utility_tree.decision_times[-2]:
next_cost = cost_tree[period+self.period_len]
next_consumption *= (1.0 - np.repeat(period_cost,2)) / (1.0 - next_cost)
next_consumption[next_consumption<=0.0] = 1e-18
if period < utility_tree.decision_times[-2]:
temp_consumption = next_consumption/np.repeat(period_consumption,2)
period_consumption = np.sign(temp_consumption)*(np.abs(temp_consumption)**(segment/float(interval))) \
* np.repeat(period_consumption,2)
else:
temp_consumption = next_consumption/period_consumption
period_consumption = np.sign(temp_consumption)*(np.abs(temp_consumption)**(segment/float(interval))) \
* period_consumption
if period == 0:
period_consumption += cons_adj
ce_term = self.b * cert_equiv**self.r
ce_tree.set_value(period, ce_term)
cons_tree.set_value(period, period_consumption)
u = ((1.0-self.b)*period_consumption**self.r + ce_term)**(1.0/self.r)
yield u, period
def utility(self, m, return_trees=False):
"""Calculating utility for the specific mitigation decisions `m`.
Parameters
----------
m : ndarray or list
array of mitigations
return_trees : bool
True if method should return trees calculated in producing the utility
Returns
-------
ndarray or tuple
tuple of `BaseStorageTree` if return_trees else ndarray with utility at period 0
Examples
---------
Assuming we have declared a EZUtility object as 'ezu' and have a mitigation array 'm'
>>> ezu.utility(m)
array([ 9.83391921])
>>> tree_dict = ezu.utility(m, return_trees=True)
"""
utility_tree = BigStorageTree(subinterval_len=self.period_len, decision_times=self.decision_times)
cons_tree = BigStorageTree(subinterval_len=self.period_len, decision_times=self.decision_times)
ce_tree = BigStorageTree(subinterval_len=self.period_len, decision_times=self.decision_times)
cost_tree = SmallStorageTree(decision_times=self.decision_times)
self._end_period_utility(m, utility_tree, cons_tree, cost_tree)
it = self._utility_generator(m, utility_tree, cons_tree, cost_tree, ce_tree)
for u, period in it:
utility_tree.set_value(period, u)
if return_trees:
return {'Utility':utility_tree, 'Consumption':cons_tree, 'Cost':cost_tree, 'CertainEquivalence':ce_tree}
return utility_tree[0]
def adjusted_utility(self, m, period_cons_eps=None, node_cons_eps=None, final_cons_eps=0.0,
first_period_consadj=0.0, return_trees=False):
"""Calculating adjusted utility for sensitivity analysis. Used e.g. to find zero-coupon bond price.
Values in parameters are used to adjusted the utility in different ways.
Parameters
----------
m : ndarray
array of mitigations
period_cons_eps : ndarray, optional
array of increases in consumption per period
node_cons_eps : `SmallStorageTree`, optional
increases in consumption per node
final_cons_eps : float, optional
value to increase the final utilities by
first_period_consadj : float, optional
value to increase consumption at period 0 by
return_trees : bool, optional
True if method should return trees calculated in producing the utility
Returns
-------
ndarray or tuple
tuple of `BaseStorageTree` if return_trees else ndarray with utility at period 0
Examples
---------
Assuming we have declared a EZUtility object as 'ezu' and have a mitigation array 'm'
>>> ezu.adjusted_utility(m, final_cons_eps=0.1)
array([ 9.83424045])
>>> tree_dict = ezu.adjusted_utility(m, final_cons_eps=0.1, return_trees=True)
>>> arr = np.zeros(int(ezu.decision_times[-1]/ezu.period_len) + 1)
>>> arr[-1] = 0.1
>>> ezu.adjusted_utility(m, period_cons_eps=arr)
array([ 9.83424045])
>>> bst = BigStorageTree(5.0, [0, 15, 45, 85, 185, 285, 385])
>>> bst.set_value(bst.last_period, np.repeat(0.01, len(bst.last)))
>>> ezu.adjusted_utility(m, node_cons_eps=bst)
array([ 9.83391921])
The last example differs from the rest in that the last values of the `node_cons_eps` will never be
used. Hence if you want to update the last period consumption, use one of these two methods.
>>> ezu.adjusted_utility(m, first_period_consadj=0.01)
array([ 9.84518772])
"""
utility_tree = BigStorageTree(subinterval_len=self.period_len, decision_times=self.decision_times)
cons_tree = BigStorageTree(subinterval_len=self.period_len, decision_times=self.decision_times)
ce_tree = BigStorageTree(subinterval_len=self.period_len, decision_times=self.decision_times)
cost_tree = SmallStorageTree(decision_times=self.decision_times)
periods = utility_tree.periods[::-1]
if period_cons_eps is None:
period_cons_eps = np.zeros(len(periods))
if node_cons_eps is None:
node_cons_eps = BigStorageTree(subinterval_len=self.period_len, decision_times=self.decision_times)
self._end_period_utility(m, utility_tree, cons_tree, cost_tree)
it = self._utility_generator(m, utility_tree, cons_tree, cost_tree, ce_tree, first_period_consadj)
i = len(utility_tree)-2
for u, period in it:
if period == periods[1]:
mu_0 = (1.0-self.b) * (u/cons_tree[period])**(1.0-self.r)
next_term = self.b * (1.0-self.b) / (1.0-self.b*self.growth_term**self.r)
mu_1 = (u**(1.0-self.r)) * next_term * (cons_tree.last**(self.r-1.0))
u += (final_cons_eps+period_cons_eps[-1]+node_cons_eps.last) * mu_1
u += (period_cons_eps[i]+node_cons_eps.tree[period]) * mu_0
utility_tree.set_value(period, u)
else:
mu_0, m_1, m_2 = self._period_marginal_utility(mu_0, mu_1, m, period, utility_tree, cons_tree, ce_tree)
u += (period_cons_eps[i] + node_cons_eps.tree[period])*mu_0
utility_tree.set_value(period, u)
i -= 1
if return_trees:
return utility_tree, cons_tree, cost_tree, ce_tree
return utility_tree.tree[0]
def _mu_0(self, cons, ce_term):
"""Marginal utility with respect to consumption function."""
t1 = (1.0 - self.b)*cons**(self.r-1.0)
t2 = (ce_term - (self.b-1.0)*cons**self.r)**((1.0/self.r)-1.0)
return t1 * t2
def _mu_1(self, cons, prob, cons_1, cons_2, ce_1, ce_2, do_print=False):
""" marginal utility with respect to consumption next period."""
t1 = (1.0-self.b) * self.b * prob * cons_1**(self.r-1.0)
t2 = (ce_1 - (self.b-1.0) * cons_1**self.r )**((self.a/self.r)-1)
t3 = (prob * (ce_1 - (self.b*(cons_1**self.r)) + cons_1**self.r)**(self.a/self.r) \
+ (1.0-prob) * (ce_2 - (self.b-1.0) * cons_2**self.r)**(self.a/self.r))**((self.r/self.a)-1.0)
t4 = prob * (ce_1-self.b * (cons_1**self.r) + cons_1**self.r)**(self.a/self.r) \
+ (1.0-prob) * (ce_2 - self.b * (cons_2**self.r) + cons_2**self.r)**(self.a/self.r)
t5 = (self.b * t4**(self.r/self.a) - (self.b-1.0) * cons**self.r )**((1.0/self.r)-1.0)
return t1 * t2 * t3 * t5
def _mu_2(self, cons, prev_cons, ce_term):
"""Marginal utility with respect to last period consumption."""
t1 = (1.0-self.b) * self.b * prev_cons**(self.r-1.0)
t2 = ((1.0 - self.b) * cons**self.r - (self.b - 1.0) * self.b \
* prev_cons**self.r + self.b * ce_term)**((1.0/self.r)-1.0)
return t1 * t2
def _period_marginal_utility(self, prev_mu_0, prev_mu_1, m, period, utility_tree, cons_tree, ce_tree):
"""Marginal utility for each node in a period."""
damage_period = utility_tree.between_decision_times(period)
mu_0 = self._mu_0(cons_tree[period], ce_tree[period])
prev_ce = ce_tree.get_next_period_array(period)
prev_cons = cons_tree.get_next_period_array(period)
if utility_tree.is_information_period(period):
probs = self.tree.get_probs_in_period(damage_period+1)
up_prob = np.array([probs[i]/(probs[i]+probs[i+1]) for i in range(0, len(probs), 2)])
down_prob = 1.0 - up_prob
up_cons = prev_cons[::2]
down_cons = prev_cons[1::2]
up_ce = prev_ce[::2]
down_ce = prev_ce[1::2]
mu_1 = self._mu_1(cons_tree[period], up_prob, up_cons, down_cons, up_ce, down_ce)
mu_2 = self._mu_1(cons_tree[period], down_prob, down_cons, up_cons, down_ce, up_ce)
return mu_0, mu_1, mu_2
else:
mu_1 = self._mu_2(cons_tree[period], prev_cons, prev_ce)
return mu_0, mu_1, None
def partial_grad(self, m, i, delta=1e-8):
"""Calculate the ith element of the gradient vector.
Parameters
----------
m : ndarray
array of mitigations
i : int
node to calculate partial grad for
Returns
-------
float
gradient element
"""
m_copy = m.copy()
m_copy[i] -= delta
minus_utility = self.utility(m_copy)
m_copy[i] += 2*delta
plus_utility = self.utility(m_copy)
grad = (plus_utility-minus_utility) / (2*delta)
return grad
| [
"numpy.abs",
"numpy.seterr",
"numpy.ones",
"ezclimate.storage_tree.BigStorageTree",
"numpy.sign",
"ezclimate.storage_tree.SmallStorageTree",
"numpy.repeat"
] | [((128, 151), 'numpy.seterr', 'np.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (137, 151), True, 'import numpy as np\n'), ((9348, 9436), 'ezclimate.storage_tree.BigStorageTree', 'BigStorageTree', ([], {'subinterval_len': 'self.period_len', 'decision_times': 'self.decision_times'}), '(subinterval_len=self.period_len, decision_times=self.\n decision_times)\n', (9362, 9436), False, 'from ezclimate.storage_tree import BigStorageTree, SmallStorageTree\n'), ((9452, 9540), 'ezclimate.storage_tree.BigStorageTree', 'BigStorageTree', ([], {'subinterval_len': 'self.period_len', 'decision_times': 'self.decision_times'}), '(subinterval_len=self.period_len, decision_times=self.\n decision_times)\n', (9466, 9540), False, 'from ezclimate.storage_tree import BigStorageTree, SmallStorageTree\n'), ((9554, 9642), 'ezclimate.storage_tree.BigStorageTree', 'BigStorageTree', ([], {'subinterval_len': 'self.period_len', 'decision_times': 'self.decision_times'}), '(subinterval_len=self.period_len, decision_times=self.\n decision_times)\n', (9568, 9642), False, 'from ezclimate.storage_tree import BigStorageTree, SmallStorageTree\n'), ((9658, 9710), 'ezclimate.storage_tree.SmallStorageTree', 'SmallStorageTree', ([], {'decision_times': 'self.decision_times'}), '(decision_times=self.decision_times)\n', (9674, 9710), False, 'from ezclimate.storage_tree import BigStorageTree, SmallStorageTree\n'), ((12345, 12433), 'ezclimate.storage_tree.BigStorageTree', 'BigStorageTree', ([], {'subinterval_len': 'self.period_len', 'decision_times': 'self.decision_times'}), '(subinterval_len=self.period_len, decision_times=self.\n decision_times)\n', (12359, 12433), False, 'from ezclimate.storage_tree import BigStorageTree, SmallStorageTree\n'), ((12449, 12537), 'ezclimate.storage_tree.BigStorageTree', 'BigStorageTree', ([], {'subinterval_len': 'self.period_len', 'decision_times': 'self.decision_times'}), '(subinterval_len=self.period_len, decision_times=self.\n decision_times)\n', (12463, 12537), False, 'from ezclimate.storage_tree import BigStorageTree, SmallStorageTree\n'), ((12551, 12639), 'ezclimate.storage_tree.BigStorageTree', 'BigStorageTree', ([], {'subinterval_len': 'self.period_len', 'decision_times': 'self.decision_times'}), '(subinterval_len=self.period_len, decision_times=self.\n decision_times)\n', (12565, 12639), False, 'from ezclimate.storage_tree import BigStorageTree, SmallStorageTree\n'), ((12655, 12707), 'ezclimate.storage_tree.SmallStorageTree', 'SmallStorageTree', ([], {'decision_times': 'self.decision_times'}), '(decision_times=self.decision_times)\n', (12671, 12707), False, 'from ezclimate.storage_tree import BigStorageTree, SmallStorageTree\n'), ((2072, 2106), 'numpy.ones', 'np.ones', (['self.decision_times.shape'], {}), '(self.decision_times.shape)\n', (2079, 2106), True, 'import numpy as np\n'), ((12905, 12993), 'ezclimate.storage_tree.BigStorageTree', 'BigStorageTree', ([], {'subinterval_len': 'self.period_len', 'decision_times': 'self.decision_times'}), '(subinterval_len=self.period_len, decision_times=self.\n decision_times)\n', (12919, 12993), False, 'from ezclimate.storage_tree import BigStorageTree, SmallStorageTree\n'), ((7681, 7713), 'numpy.repeat', 'np.repeat', (['period_consumption', '(2)'], {}), '(period_consumption, 2)\n', (7690, 7713), True, 'import numpy as np\n'), ((7879, 7911), 'numpy.repeat', 'np.repeat', (['period_consumption', '(2)'], {}), '(period_consumption, 2)\n', (7888, 7911), True, 'import numpy as np\n'), ((7754, 7779), 'numpy.sign', 'np.sign', (['temp_consumption'], {}), '(temp_consumption)\n', (7761, 7779), True, 'import numpy as np\n'), ((8049, 8074), 'numpy.sign', 'np.sign', (['temp_consumption'], {}), '(temp_consumption)\n', (8056, 8074), True, 'import numpy as np\n'), ((7421, 7446), 'numpy.repeat', 'np.repeat', (['period_cost', '(2)'], {}), '(period_cost, 2)\n', (7430, 7446), True, 'import numpy as np\n'), ((7781, 7805), 'numpy.abs', 'np.abs', (['temp_consumption'], {}), '(temp_consumption)\n', (7787, 7805), True, 'import numpy as np\n'), ((8076, 8100), 'numpy.abs', 'np.abs', (['temp_consumption'], {}), '(temp_consumption)\n', (8082, 8100), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def evaluate(y_test, y_pred):
from sklearn.metrics import accuracy_score
print("===== Accuracy Score =====")
print(accuracy_score(y_test, y_pred))
from sklearn.metrics import classification_report
print("===== Accuracy Score =====")
class_report = classification_report(y_test, y_pred)
print(class_report)
return
# Visualising the results
def plot_model(classifier, X_set, y_set, y_test, y_pred, text):
from matplotlib.colors import ListedColormap
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('pink', 'cyan')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'blue'))(i), label = j)
plt.title(text)
plt.xlabel('X')
plt.ylabel('y')
plt.legend()
plt.show()
def preprocess(X_train, X_test):
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
return X_train, X_test
"""## Get Breast Cancer Dataset"""
from sklearn.datasets import load_breast_cancer
data = load_breast_cancer()
def draw_learning_curves(X, y, classifier):
from sklearn.model_selection import learning_curve
train_sizes, train_scores, test_scores = learning_curve(classifier, X, y)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.title("Learning Curves")
plt.xlabel("Training examples")
plt.ylabel("Score")
plt.plot(train_scores_mean, 'o-', color="b", label="Training Score")
plt.plot(test_scores_mean, 'o-', color="r", label="Cross Validation Score")
plt.legend()
plt.show()
data.keys()
X = data.data
y = data.target
# TRAIN TEST SPLIT
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
def logistic_regression(X_train, X_test, y_train, y_test):
X_train, X_test = preprocess(X_train, X_test)
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
y_pred = np.round(y_pred).flatten()
plot_model(classifier, X_train, y_train, y_test, y_pred, "Logistic Regression")
draw_learning_curves(X_train, y_train, classifier)
logistic_regression(X_train, X_test, y_train, y_test)
def ridge_classification(X_train, X_test, y_train, y_test):
X_train, X_test = preprocess(X_train, X_test)
from sklearn.linear_model import RidgeClassifierCV
classifier = RidgeClassifierCV()
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
y_pred = np.round(y_pred).flatten()
plot_model(classifier, X_train, y_train, y_test, y_pred, "RidgeClassifierCV")
draw_learning_curves(X_train, y_train, classifier)
ridge_classification(X_train, X_test, y_train, y_test)
def svm_classification(X_train, X_test, y_train, y_test):
X_train, X_test = preprocess(X_train, X_test)
from sklearn.svm import SVC
classifier = SVC()
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
y_pred = np.round(y_pred).flatten()
plot_model(classifier, X_train, y_train, y_test, y_pred, "SVC")
draw_learning_curves(X_train, y_train, classifier)
svm_classification(X_train, X_test, y_train, y_test)
def mlp_classification(X_train, X_test, y_train, y_test):
X_train, X_test = preprocess(X_train, X_test)
from sklearn.neural_network import MLPClassifier
classifier = MLPClassifier()
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
y_pred = np.round(y_pred).flatten()
plot_model(classifier, X_train, y_train, y_test, y_pred, "MLP")
draw_learning_curves(X_train, y_train, classifier)
mlp_classification(X_train, X_test, y_train, y_test)
def linearsvm_classification(X_train, X_test, y_train, y_test):
X_train, X_test = preprocess(X_train, X_test)
from sklearn.svm import LinearSVC
classifier = LinearSVC()
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
y_pred = np.round(y_pred).flatten()
plot_model(classifier, X_train, y_train, y_test, y_pred, "LinearSVC")
draw_learning_curves(X_train, y_train, classifier)
linearsvm_classification(X_train, X_test, y_train, y_test)
def rf_classification(X_train, X_test, y_train, y_test):
X_train, X_test = preprocess(X_train, X_test)
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier()
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
y_pred = np.round(y_pred).flatten()
plot_model(classifier, X_train, y_train, y_test, y_pred, "RandomForestClassifier")
draw_learning_curves(X_train, y_train, classifier)
rf_classification(X_train, X_test, y_train, y_test)
def dt_classification(X_train, X_test, y_train, y_test):
X_train, X_test = preprocess(X_train, X_test)
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier()
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
y_pred = np.round(y_pred).flatten()
plot_model(classifier, X_train, y_train, y_test, y_pred, "DecisionTreeClassifier")
draw_learning_curves(X_train, y_train, classifier)
dt_classification(X_train, X_test, y_train, y_test)
def gb_classification(X_train, X_test, y_train, y_test):
X_train, X_test = preprocess(X_train, X_test)
from sklearn.ensemble import GradientBoostingClassifier
classifier = GradientBoostingClassifier()
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
y_pred = np.round(y_pred).flatten()
plot_model(classifier, X_train, y_train, y_test, y_pred, "GradientBoostingClassifier")
draw_learning_curves(X_train, y_train, classifier)
gb_classification(X_train, X_test, y_train, y_test)
def sgd_classification(X_train, X_test, y_train, y_test):
X_train, X_test = preprocess(X_train, X_test)
from sklearn.linear_model import SGDClassifier
classifier = SGDClassifier()
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
y_pred = np.round(y_pred).flatten()
plot_model(classifier, X_train, y_train, y_test, y_pred, "SGDClassifier")
draw_learning_curves(X_train, y_train, classifier)
sgd_classification(X_train, X_test, y_train, y_test)
def perceptron_classification(X_train, X_test, y_train, y_test):
X_train, X_test = preprocess(X_train, X_test)
from sklearn.linear_model import Perceptron
classifier = Perceptron()
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
y_pred = np.round(y_pred).flatten()
plot_model(classifier, X_train, y_train, y_test, y_pred, "Perceptron")
draw_learning_curves(X_train, y_train, classifier)
perceptron_classification(X_train, X_test, y_train, y_test)
def nb_classification(X_train, X_test, y_train, y_test):
X_train, X_test = preprocess(X_train, X_test)
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
y_pred = np.round(y_pred).flatten()
plot_model(classifier, X_train, y_train, y_test, y_pred, "GaussianNB")
draw_learning_curves(X_train, y_train, classifier)
nb_classification(X_train, X_test, y_train, y_test)
def knn_classification(X_train, X_test, y_train, y_test):
X_train, X_test = preprocess(X_train, X_test)
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier()
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
y_pred = np.round(y_pred).flatten()
plot_model(classifier, X_train, y_train, y_test, y_pred, "KNeighborsClassifier")
draw_learning_curves(X_train, y_train, classifier)
knn_classification(X_train, X_test, y_train, y_test) | [
"matplotlib.pyplot.title",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.classification_report",
"sklearn.tree.DecisionTreeClassifier",
"numpy.mean",
"sklearn.neural_network.MLPClassifier",
"sklearn.svm.SVC",
... | [((1693, 1713), 'sklearn.datasets.load_breast_cancer', 'load_breast_cancer', ([], {}), '()\n', (1711, 1713), False, 'from sklearn.datasets import load_breast_cancer\n'), ((2517, 2554), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (2533, 2554), False, 'from sklearn.model_selection import train_test_split\n'), ((335, 372), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (356, 372), False, 'from sklearn.metrics import classification_report\n'), ((1155, 1170), 'matplotlib.pyplot.title', 'plt.title', (['text'], {}), '(text)\n', (1164, 1170), True, 'import matplotlib.pyplot as plt\n'), ((1173, 1188), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (1183, 1188), True, 'import matplotlib.pyplot as plt\n'), ((1191, 1206), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1201, 1206), True, 'import matplotlib.pyplot as plt\n'), ((1209, 1221), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1219, 1221), True, 'import matplotlib.pyplot as plt\n'), ((1224, 1234), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1232, 1234), True, 'import matplotlib.pyplot as plt\n'), ((1317, 1336), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (1320, 1336), False, 'from sklearn.decomposition import PCA\n'), ((1489, 1505), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1503, 1505), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1855, 1887), 'sklearn.model_selection.learning_curve', 'learning_curve', (['classifier', 'X', 'y'], {}), '(classifier, X, y)\n', (1869, 1887), False, 'from sklearn.model_selection import learning_curve\n'), ((1910, 1939), 'numpy.mean', 'np.mean', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (1917, 1939), True, 'import numpy as np\n'), ((1961, 1989), 'numpy.std', 'np.std', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (1967, 1989), True, 'import numpy as np\n'), ((2011, 2039), 'numpy.mean', 'np.mean', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (2018, 2039), True, 'import numpy as np\n'), ((2060, 2087), 'numpy.std', 'np.std', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (2066, 2087), True, 'import numpy as np\n'), ((2090, 2100), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2098, 2100), True, 'import matplotlib.pyplot as plt\n'), ((2103, 2131), 'matplotlib.pyplot.title', 'plt.title', (['"""Learning Curves"""'], {}), "('Learning Curves')\n", (2112, 2131), True, 'import matplotlib.pyplot as plt\n'), ((2134, 2165), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Training examples"""'], {}), "('Training examples')\n", (2144, 2165), True, 'import matplotlib.pyplot as plt\n'), ((2168, 2187), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Score"""'], {}), "('Score')\n", (2178, 2187), True, 'import matplotlib.pyplot as plt\n'), ((2190, 2258), 'matplotlib.pyplot.plot', 'plt.plot', (['train_scores_mean', '"""o-"""'], {'color': '"""b"""', 'label': '"""Training Score"""'}), "(train_scores_mean, 'o-', color='b', label='Training Score')\n", (2198, 2258), True, 'import matplotlib.pyplot as plt\n'), ((2261, 2336), 'matplotlib.pyplot.plot', 'plt.plot', (['test_scores_mean', '"""o-"""'], {'color': '"""r"""', 'label': '"""Cross Validation Score"""'}), "(test_scores_mean, 'o-', color='r', label='Cross Validation Score')\n", (2269, 2336), True, 'import matplotlib.pyplot as plt\n'), ((2339, 2351), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2349, 2351), True, 'import matplotlib.pyplot as plt\n'), ((2354, 2364), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2362, 2364), True, 'import matplotlib.pyplot as plt\n'), ((2734, 2754), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (2752, 2754), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3232, 3251), 'sklearn.linear_model.RidgeClassifierCV', 'RidgeClassifierCV', ([], {}), '()\n', (3249, 3251), False, 'from sklearn.linear_model import RidgeClassifierCV\n'), ((3703, 3708), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (3706, 3708), False, 'from sklearn.svm import SVC\n'), ((4165, 4180), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {}), '()\n', (4178, 4180), False, 'from sklearn.neural_network import MLPClassifier\n'), ((4628, 4639), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (4637, 4639), False, 'from sklearn.svm import LinearSVC\n'), ((5110, 5134), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (5132, 5134), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((5607, 5631), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (5629, 5631), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((6112, 6140), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (6138, 6140), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((6617, 6632), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {}), '()\n', (6630, 6632), False, 'from sklearn.linear_model import SGDClassifier\n'), ((7101, 7113), 'sklearn.linear_model.Perceptron', 'Perceptron', ([], {}), '()\n', (7111, 7113), False, 'from sklearn.linear_model import Perceptron\n'), ((7577, 7589), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (7587, 7589), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((8054, 8076), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (8074, 8076), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((195, 225), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (209, 225), False, 'from sklearn.metrics import accuracy_score\n'), ((1007, 1023), 'numpy.unique', 'np.unique', (['y_set'], {}), '(y_set)\n', (1016, 1023), True, 'import numpy as np\n'), ((887, 919), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["('pink', 'cyan')"], {}), "(('pink', 'cyan'))\n", (901, 919), False, 'from matplotlib.colors import ListedColormap\n'), ((2838, 2854), 'numpy.round', 'np.round', (['y_pred'], {}), '(y_pred)\n', (2846, 2854), True, 'import numpy as np\n'), ((3335, 3351), 'numpy.round', 'np.round', (['y_pred'], {}), '(y_pred)\n', (3343, 3351), True, 'import numpy as np\n'), ((3792, 3808), 'numpy.round', 'np.round', (['y_pred'], {}), '(y_pred)\n', (3800, 3808), True, 'import numpy as np\n'), ((4264, 4280), 'numpy.round', 'np.round', (['y_pred'], {}), '(y_pred)\n', (4272, 4280), True, 'import numpy as np\n'), ((4723, 4739), 'numpy.round', 'np.round', (['y_pred'], {}), '(y_pred)\n', (4731, 4739), True, 'import numpy as np\n'), ((5218, 5234), 'numpy.round', 'np.round', (['y_pred'], {}), '(y_pred)\n', (5226, 5234), True, 'import numpy as np\n'), ((5715, 5731), 'numpy.round', 'np.round', (['y_pred'], {}), '(y_pred)\n', (5723, 5731), True, 'import numpy as np\n'), ((6224, 6240), 'numpy.round', 'np.round', (['y_pred'], {}), '(y_pred)\n', (6232, 6240), True, 'import numpy as np\n'), ((6716, 6732), 'numpy.round', 'np.round', (['y_pred'], {}), '(y_pred)\n', (6724, 6732), True, 'import numpy as np\n'), ((7197, 7213), 'numpy.round', 'np.round', (['y_pred'], {}), '(y_pred)\n', (7205, 7213), True, 'import numpy as np\n'), ((7673, 7689), 'numpy.round', 'np.round', (['y_pred'], {}), '(y_pred)\n', (7681, 7689), True, 'import numpy as np\n'), ((8160, 8176), 'numpy.round', 'np.round', (['y_pred'], {}), '(y_pred)\n', (8168, 8176), True, 'import numpy as np\n'), ((1106, 1137), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["('red', 'blue')"], {}), "(('red', 'blue'))\n", (1120, 1137), False, 'from matplotlib.colors import ListedColormap\n')] |
# \MODULE\-------------------------------------------------------------------------
#
# CONTENTS : STRique
#
# DESCRIPTION : Raw nanopore signal repeat detection pipeline
#
# RESTRICTIONS : none
#
# REQUIRES : none
#
# ---------------------------------------------------------------------------------
# Copyright (c) 2018-2019, <NAME>, Max Planck Institute for Molecular Genetics
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Written by <NAME>
# ---------------------------------------------------------------------------------
# public imports
import os, sys, traceback, json, argparse
import re, itertools
import datetime
import threading, queue
import enum
import numpy as np
import numpy.ma as ma
import scipy.signal as sp
import pomegranate as pg
from signal import signal, SIGPIPE, SIG_DFL
from collections import namedtuple, defaultdict
from skimage.morphology import opening, closing, dilation, erosion, rectangle
from multiprocessing import Pool, Process, Event, Value, Queue
# private imports
from STRique_lib import fast5Index, pyseqan
# simple parallel logging
class logger():
logs = [sys.stderr]
class log_type(enum.Enum):
Error = "[ERROR]"
Warning = "[WARNING]"
Info = "[INFO]"
Debug = "[DEBUG]"
log_types = []
lock = threading.Lock()
log_queue = Queue()
def __logger__():
while True:
print_message = logger.log_queue.get()
if not print_message:
break
for log in logger.logs:
if isinstance(log, str):
with open(log, 'a') as fp:
print(print_message, file = fp)
else:
print(print_message, file = log)
sys.stderr.flush()
def init(file=None, log_level='info'):
if log_level == 'error':
logger.log_types = [logger.log_type.Error]
elif log_level == 'warning':
logger.log_types = [logger.log_type.Error, logger.log_type.Warning]
elif log_level == 'info':
logger.log_types = [logger.log_type.Error, logger.log_type.Warning, logger.log_type.Info]
else:
logger.log_types = [logger.log_type.Error, logger.log_type.Warning, logger.log_type.Info, logger.log_type.Debug]
if file:
if os.path.isfile(file) and os.access(file, os.W_OK) or os.access(os.path.abspath(os.path.dirname(file)), os.W_OK):
logger.logs.append(file)
logger.log_runner = Process(target=logger.__logger__, )
logger.log_runner.start()
logger.log("Logger created.")
if file and len(logger.logs) == 1:
logger.log("Log-file {file} is not accessible".format(file=file), logger.log_type.Error)
def close():
logger.log_queue.put(None)
logger.log_queue.close()
logger.log_runner.join()
def log(message, type=log_type.Info):
with logger.lock:
if type in logger.log_types:
print_message = ' '.join([datetime.datetime.now().strftime("%d.%m.%Y %H:%M:%S"), "[PID {}]".format(os.getpid()), str(type.value), message])
logger.log_queue.put(print_message)
# basic normalization and simulation
class pore_model():
def __init__(self, model_file):
def model_iter(iterable):
for line in iterable:
yield line.strip().split('\t')[:3]
with open(model_file, 'r') as fp:
model_dict = {x[0]:(float(x[1]), float(x[2])) for x in model_iter(fp)}
self.kmer = len(next(iter(model_dict.keys())))
self.model_median = np.median([x[0] for x in model_dict.values()])
self.model_MAD = np.mean(np.absolute(np.subtract([x[0] for x in model_dict.values()], self.model_median)))
min_state = min(model_dict.values(), key=lambda x:x[0])
max_state = max(model_dict.values(), key=lambda x:x[0])
self.model_min = min_state[0] - 6 * min_state[1]
self.model_max = max_state[0] + 6 * max_state[1]
self.model_dict = model_dict
def __sliding_window__(self, a, n=3, mode='same'):
if mode == 'mean':
a = np.append(a, (n-1) * [np.mean(a)])
elif mode == 'median':
a = np.append(a, (n-1) * [np.median(a)])
elif mode == 'mirror':
a = np.append(a, a[-1:-1-(n-1):-1])
else:
a = np.append(a, (n-1) * [a[-1]])
shape = a.shape[:-1] + (a.shape[-1] - n + 1, n)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def MAD(self, signal):
return np.mean(np.absolute(np.subtract(signal, np.median(signal))))
def scale2stdv(self, other):
self_median = np.median(np.array([x[1] for x in self.model_dict.values()]))
other_median = np.median(np.array([x[1] for x in other.model_dict.values()]))
return other_median / self_median
def normalize2model(self, signal, clip=True, mode='median'):
if mode == 'minmax':
model_values = np.array([x[0] for x in self.model_dict.values()])
q5_sig, q95_sig = np.percentile(signal, [1, 99])
q5_mod, q95_mod = np.percentile(model_values, [1, 99])
m5_sig = np.median(signal[signal < q5_sig])
m95_sig = np.median(signal[signal > q95_sig])
m5_mod = np.median(model_values[model_values < q5_mod])
m95_mod = np.median(model_values[model_values > q95_mod])
nrm_signal = (signal - (m5_sig + (m95_sig - m5_sig) / 2)) / ((m95_sig - m5_sig) / 2)
nrm_signal = nrm_signal * ((m95_mod - m5_mod) / 2) + (m5_mod + (m95_mod - m5_mod) / 2)
elif mode == 'entropy':
sliding_std = [self.MAD(x) for x in self.__sliding_window__(signal, n=500, mode='mirror')]
sliding_std += [sliding_std[-1]]
diff_signal = np.abs(np.diff(sliding_std))
ind = np.argpartition(diff_signal, -50)[-50:]
diff_mask = np.zeros(len(diff_signal), dtype=np.dtype('uint8'))
diff_mask[ind] = 1
diff_mask = dilation(diff_mask.reshape((1, len(diff_mask))), rectangle(1, 750))[0].astype(np.bool)
rawMedian = np.median(signal[diff_mask])
rawMAD = self.MAD(signal[diff_mask])
nrm_signal = np.divide(np.subtract(signal, rawMedian), rawMAD)
nrm_signal = np.add(np.multiply(nrm_signal, self.model_MAD), self.model_median)
else:
rawMedian = np.median(signal)
rawMAD = self.MAD(signal)
nrm_signal = np.divide(np.subtract(signal, rawMedian), rawMAD)
nrm_signal = np.add(np.multiply(nrm_signal, self.model_MAD), self.model_median)
if clip == True:
np.clip(nrm_signal, self.model_min + .5, self.model_max - .5, out=nrm_signal)
return nrm_signal
def generate_signal(self, sequence, samples=10, noise=False):
signal = []
level_means = np.array([self.model_dict[kmer][0] for kmer in [sequence[i:i+self.kmer] for i in range(len(sequence)-self.kmer + 1)]])
if samples and not noise:
sig = np.repeat(level_means, samples)
elif not noise:
sig = np.repeat(level_means, np.random.uniform(6, 10, len(level_means)).astype(int))
else:
level_stdvs = np.array([self.model_dict[kmer][1] for kmer in [sequence[i:i+self.kmer] for i in range(len(sequence)-self.kmer + 1)]])
level_samples = np.random.uniform(6, 10, len(level_means)).astype(int)
level_means = np.repeat(level_means, level_samples)
level_stdvs = np.repeat(level_stdvs, level_samples)
sig = np.random.normal(level_means, level_stdvs)
return sig
# profile HMM
class profileHMM(pg.HiddenMarkovModel):
def __init__(self, sequence,
pm_base, transition_probs={}, state_prefix='',
no_silent=False,
std_scale=1.0, std_offset=0.0
):
super().__init__()
self.pm_base = pm_base
self.sequence = sequence
self.state_prefix = state_prefix
self.no_silent = no_silent
self.std_scale = std_scale
self.std_offset = std_offset
self.transition_probs = {'match_loop': .75, # .75
'match_match': .15, # .15 sum to 1
'match_insert': .09, # .09
'match_delete': .01, # .01
'insert_loop' : .15, # .15
'insert_match_0': .40, # .40 sum to 1
'insert_match_1': .40, # .40
'insert_delete': .05, # .05
'delete_delete': .005, # .005
'delete_insert': .05, # .05 sum to 1
'delete_match': .945 # .945
}
for key, value in transition_probs.items():
self.transition_probs[key] = value
self.__init_model__()
def __init_model__(self):
self.match_states, insertion_states, deletetion_states = self.__extract_states__(self.sequence)
self.insertion_states = insertion_states
self.deletion_states = deletetion_states
self.s1 = pg.State(None, name=self.state_prefix+'s1')
self.s2 = pg.State(None, name=self.state_prefix+'s2')
self.e1 = pg.State(None, name=self.state_prefix+'e1')
self.e2 = pg.State(None, name=self.state_prefix+'e2')
self.__connect_states__()
def __extract_states__(self, sequence):
match_states = []
insertion_states = []
deletion_states = []
digits = np.ceil(np.log10(len(sequence) - self.pm_base.kmer + 1)).astype(np.int)
for idx, kmer in enumerate([sequence[i:i+self.pm_base.kmer] for i in range(len(sequence) - self.pm_base.kmer + 1)]):
state_name = self.state_prefix + str(idx).rjust(digits,'0')
state_mean, state_std = self.pm_base.model_dict[kmer]
match_states.append(pg.State(pg.NormalDistribution(state_mean, state_std * self.std_scale + self.std_offset), name=state_name + 'm'))
if not self.no_silent:
deletion_states.append(pg.State(None, name=state_name + 'd'))
insertion_states.append(pg.State(pg.UniformDistribution(self.pm_base.model_min, self.pm_base.model_max),
name=state_name + 'i'))
return match_states, insertion_states, deletion_states
def __connect_states__(self):
self.add_states(self.match_states)
self.add_states(self.insertion_states)
if not self.no_silent:
self.add_states(self.deletion_states)
self.add_states([self.s1, self.s2, self.e1, self.e2])
# matches
for i, state in enumerate(self.match_states):
self.add_transition(state, state, self.transition_probs['match_loop'], group='match_loop')
if i < len(self.match_states) - 1:
self.add_transition(state, self.match_states[i + 1], self.transition_probs['match_match'], group='match_match')
# insertions
for i, state in enumerate(self.insertion_states):
self.add_transition(state, state, self.transition_probs['insert_loop'], group='insert_loop')
self.add_transition(self.match_states[i], state, self.transition_probs['match_insert'], group='match_insert')
self.add_transition(state, self.match_states[i], self.transition_probs['insert_match_1'], group='insert_match_1')
if i < len(self.deletion_states) - 1 and not self.no_silent:
self.add_transition(state, self.deletion_states[i+1], self.transition_probs['insert_delete'], group='insert_delete')
if i < len(self.match_states) - 1:
self.add_transition(state, self.match_states[i+1], self.transition_probs['insert_match_0'], group='insert_match_0')
# deletions
if not self.no_silent:
for i, state in enumerate(self.deletion_states):
self.add_transition(state, self.insertion_states[i], self.transition_probs['delete_insert'], group='delete_insert')
if i > 0:
self.add_transition(self.match_states[i-1], state, self.transition_probs['match_delete'], group='match_delete')
if i < len(self.match_states) - 1:
self.add_transition(state, self.match_states[i+1], self.transition_probs['delete_match'], group='delete_match')
if i < len(self.deletion_states) - 1:
self.add_transition(state, self.deletion_states[i+1], self.transition_probs['delete_delete'], group='delete_delete')
self.add_transition(self.s1, self.deletion_states[0], 1)
self.add_transition(self.s2, self.match_states[0], 1)
self.add_transition(self.deletion_states[-1], self.e1, self.transition_probs['delete_delete'])
self.add_transition(self.deletion_states[-1], self.e2, self.transition_probs['delete_match'])
else:
for i, state in enumerate(self.match_states):
if i < len(self.match_states) - 2:
self.add_transition(state, self.match_states[i+2], self.transition_probs['match_delete'], group='match_delete')
self.add_transition(self.s1, self.insertion_states[0], 1)
self.add_transition(self.s2, self.match_states[0], 1)
self.add_transition(self.insertion_states[-1], self.e1, self.transition_probs['insert_delete'], group='insert_delete')
self.add_transition(self.insertion_states[-1], self.e2, self.transition_probs['insert_match_0'], group='insert_match_0')
self.add_transition(self.match_states[-1], self.e2, self.transition_probs['match_match'])
self.add_transition(self.match_states[-1], self.e1, self.transition_probs['match_delete'])
def bake(self, *args, **kwargs):
self.add_transition(self.start, self.s1, .5)
self.add_transition(self.start, self.s2, .5)
self.add_transition(self.e1, self.end, 1)
self.add_transition(self.e2, self.end, 1)
super().bake(*args, **kwargs)
# repeat count profile HMM
class repeatHMM(pg.HiddenMarkovModel):
def __init__(self, repeat, pm, transition_probs={}, state_prefix='', std_scale=1.0, std_offset=0.0):
super().__init__()
self.repeat = repeat
self.pore_model = pm
self.transition_probs = {'skip': .999,
'leave_repeat': .002
}
for key, value in transition_probs.items():
self.transition_probs[key] = value
self.state_prefix = state_prefix
self.std_scale = std_scale
self.std_offset = std_offset
self.__build_model__()
def __build_model__(self):
if len(self.repeat) >= self.pore_model.kmer:
repeat = self.repeat + self.repeat[: self.pore_model.kmer - 1]
self.repeat_offset = 0
else:
ext = self.pore_model.kmer - 1 + (len(self.repeat) - 1) - ((self.pore_model.kmer - 1) % len(self.repeat))
repeat = self.repeat + ''.join([self.repeat] * self.pore_model.kmer)[:ext]
self.repeat_offset = int(len(repeat) / len(self.repeat)) - 1
self.repeat_hmm = profileHMM(repeat,self.pore_model, transition_probs=self.transition_probs, state_prefix=self.state_prefix,
no_silent=True, std_scale=self.std_scale, std_offset=self.std_offset)
self.add_model(self.repeat_hmm)
self.skip_distribution = pg.NormalDistribution(self.pore_model.model_median, self.pore_model.model_MAD)
self.dummy_distribution = pg.UniformDistribution(self.pore_model.model_min, self.pore_model.model_max)
self.d1 = pg.State(self.dummy_distribution, name=self.state_prefix+'dummy1')
self.d2 = pg.State(self.dummy_distribution, name=self.state_prefix+'dummy2')
self.e1 = pg.State(None, name=self.state_prefix+'e1')
self.e2 = pg.State(None, name=self.state_prefix+'e2')
self.add_state(self.d1)
self.add_state(self.d2)
self.s1 = self.repeat_hmm.s1
self.s2 = self.repeat_hmm.s2
self.add_transition(self.repeat_hmm.e1, self.d1, 1)
self.add_transition(self.repeat_hmm.e2, self.d2, 1)
self.add_transition(self.d1, self.e1, self.transition_probs['leave_repeat'])
self.add_transition(self.d2, self.e2, self.transition_probs['leave_repeat'])
self.add_transition(self.d1, self.s1, 1 - self.transition_probs['leave_repeat'])
self.add_transition(self.d2, self.s2, 1 - self.transition_probs['leave_repeat'])
def bake(self, *args, **kwargs):
self.add_transition(self.start, self.s1, .5)
self.add_transition(self.start, self.s2, .5)
self.add_transition(self.e1, self.end, 1)
self.add_transition(self.e2, self.end, 1)
super().bake(*args, **kwargs)
def bake2(self, *args, **kwargs):
self.skip = pg.State(self.skip_distribution, name=self.state_prefix+'skip')
self.add_state(self.skip)
self.add_transition(self.start, self.s1, .5)
self.add_transition(self.start, self.s2, .5)
self.add_transition(self.e1, self.skip, 1)
self.add_transition(self.e2, self.skip, 1)
self.add_transition(self.skip, self.skip, self.transition_probs['skip'])
self.add_transition(self.skip, self.end, 1 - self.transition_probs['skip'])
super().bake(*args, **kwargs)
def count_repeats(self, states):
states = np.array([x[1] for x in states])
n1 = np.sum(states == self.d1)
n2 = np.sum(states == self.d2)
return n1 + n2 - self.repeat_offset
# repeat detection profile HMM
class flankedRepeatHMM(pg.HiddenMarkovModel):
def __init__(self, repeat,
prefix, suffix,
pm, config=None):
super().__init__()
self.transition_probs = {'skip': 1-1e-4,
'seq_std_scale': 1.0,
'rep_std_scale': 1.0,
'seq_std_offset': 0.0,
'rep_std_offset': 0.0,
'e1_ratio': 0.1}
if config and isinstance(config, dict):
for key, value in config.items():
self.transition_probs[key] = value
self.pore_model = pm
self.repeat = repeat
self.prefix = prefix
self.suffix = suffix
self.__build_model__()
# def free_bake_buffers(self, *args, **kwargs):
# print("Free bake buffers in process {id}".format(id=os.getpid()))
# super().free_bake_buffers()
def __build_model__(self):
# expand primer sequences and get profile HMMs
prefix = self.prefix + ''.join([self.repeat] * int(np.ceil(self.pore_model.kmer / len(self.repeat))))[:-1]
suffix = ''.join([self.repeat] * int(np.ceil(self.pore_model.kmer / len(self.repeat)))) + self.suffix
self.flanking_count = int(np.ceil(self.pore_model.kmer / len(self.repeat))) * 2 - 1
self.prefix_model = profileHMM(prefix, self.pore_model, self.transition_probs, state_prefix='prefix', std_scale=self.transition_probs['seq_std_scale'], std_offset=self.transition_probs['seq_std_offset'])
self.suffix_model = profileHMM(suffix, self.pore_model, self.transition_probs, state_prefix='suffix', std_scale=self.transition_probs['seq_std_scale'], std_offset=self.transition_probs['seq_std_offset'])
self.repeat_model = repeatHMM(self.repeat, self.pore_model, self.transition_probs, state_prefix='repeat', std_scale=self.transition_probs['rep_std_scale'], std_offset=self.transition_probs['rep_std_offset'])
# add sub-modules, flanking and skip states
self.add_model(self.prefix_model)
self.add_model(self.repeat_model)
self.add_model(self.suffix_model)
self.add_transition(self.start, self.prefix_model.s1, self.transition_probs['e1_ratio'])
self.add_transition(self.start, self.prefix_model.s2, (1-self.transition_probs['e1_ratio']))
# repeat model
self.add_transition(self.prefix_model.e1, self.repeat_model.s1, 1)
self.add_transition(self.prefix_model.e2, self.repeat_model.s2, 1)
# suffix model
self.add_transition(self.repeat_model.e1, self.suffix_model.s1, 1)
self.add_transition(self.repeat_model.e2, self.suffix_model.s2, 1)
self.add_transition(self.suffix_model.e1, self.end, 1)
self.add_transition(self.suffix_model.e2, self.end, 1)
# bake and store state IDs
self.bake(merge='All')
def count_repeats(self, sequence, **kwargs):
p, path = super().viterbi(sequence, **kwargs)
if path is not None:
# repeat number equals loops in repeat model + 1 repeat in flanking sequences
n = self.repeat_model.count_repeats(path) + self.flanking_count
path = [x[1].name for x in path if x[0] < self.silent_start]
return n, p, path
else:
return 0, 0, []
# repeat base modification detection
class repeatModHMM(pg.HiddenMarkovModel):
def __init__(self, repeat, pm_base, pm_mod, config=None):
super().__init__()
self.transition_probs = {'rep_std_scale': 1.5,
'rep_std_offset': 0.0,
'leave_repeat': .002}
if config and isinstance(config, dict):
for key, value in config.items():
self.transition_probs[key] = value
self.pore_model_base = pm_base
self.pore_model_mod = pm_mod
self.repeat = repeat
self.__build_model__()
def __build_model__(self):
if len(self.repeat) >= self.pore_model_base.kmer:
repeat = self.repeat + self.repeat[: self.pore_model_base.kmer - 1]
else:
ext = self.pore_model_base.kmer - 1 + (len(self.repeat) - 1) - ((self.pore_model_base.kmer - 1) % len(self.repeat))
repeat = self.repeat + ''.join([self.repeat] * self.pore_model_base.kmer)[:ext]
self.model_min = min(self.pore_model_base.model_min, self.pore_model_mod.model_min)
self.model_max = max(self.pore_model_base.model_max, self.pore_model_mod.model_max)
self.s0 = pg.State(pg.UniformDistribution(self.model_min, self.model_max), name='s0')
self.e0 = pg.State(pg.UniformDistribution(self.model_min, self.model_max), name='e0')
self.base_model = profileHMM(repeat, self.pore_model_base, self.transition_probs, state_prefix='base', no_silent=True, std_scale=self.transition_probs['rep_std_scale'], std_offset=self.transition_probs['rep_std_offset'])
self.mod_model = profileHMM(repeat, self.pore_model_mod, self.transition_probs, state_prefix='mod', no_silent=True, std_scale=self.transition_probs['rep_std_scale'] * self.pore_model_mod.scale2stdv(self.pore_model_base), std_offset=self.transition_probs['rep_std_offset'])
self.add_model(self.base_model)
self.add_model(self.mod_model)
self.add_state(self.s0)
self.add_state(self.e0)
# transitions
self.add_transition(self.start, self.s0, 1)
self.add_transition(self.s0, self.base_model.s1, 0.25)
self.add_transition(self.s0, self.base_model.s2, 0.25)
self.add_transition(self.s0, self.mod_model.s1, 0.25)
self.add_transition(self.s0, self.mod_model.s2, 0.25)
self.add_transition(self.base_model.e1, self.e0, 1)
self.add_transition(self.base_model.e2, self.e0, 1)
self.add_transition(self.mod_model.e1, self.e0, 1)
self.add_transition(self.mod_model.e2, self.e0, 1)
self.add_transition(self.e0, self.end, self.transition_probs['leave_repeat'])
self.add_transition(self.e0, self.s0, 1-self.transition_probs['leave_repeat'])
# bake
self.bake(merge='All')
def mod_repeats(self, signal, **kwargs):
p, path = super().viterbi(np.clip(signal, self.model_min, self.model_max), **kwargs)
if path is not None:
states = [x[1].name for x in path if x[0] < self.silent_start]
states_gr = [next(g) for k,g in itertools.groupby(states, key=lambda x : False if x in ['s0', 'e0'] else True) if k]
pattern = ''.join(['1' if 'mod' in x else '0' for x in states_gr])
return pattern
else:
return '-'
# main repeat detection methods
class repeatCounter(object):
def __init__(self, model_file, mod_model_file=None, align_config=None, HMM_config=None):
default_config = {'dist_offset': 16.0,
'dist_min': 0.0,
'gap_open_h': -1.0,
'gap_open_v': -16.0,
'gap_extension_h': -1.0,
'gap_extension_v': -16.0,
'samples': 6}
if align_config and isinstance(align_config, dict):
for key, value in align_config.items():
default_config[key] = value
self.algn = pyseqan.align_raw()
self.algn.dist_offset = default_config['dist_offset']
self.algn.dist_min = default_config['dist_min']
self.algn.gap_open_h = default_config['gap_open_h']
self.algn.gap_open_v = default_config['gap_open_v']
self.algn.gap_extension_h = default_config['gap_extension_h']
self.algn.gap_extension_v = default_config['gap_extension_v']
self.pm = pore_model(model_file)
if mod_model_file:
self.pm_mod = pore_model(mod_model_file)
else:
self.pm_mod = self.pm
self.samples = default_config['samples']
self.HMM_config = HMM_config
self.targets = {}
self.target_classifier = namedtuple('target_classifier', field_names=['prefix', 'suffix', 'prefix_ext', 'suffix_ext', 'repeatHMM', 'modHMM'])
def __reverse_complement__(self, sequence):
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
return "".join(complement.get(base, base) for base in reversed(sequence))
def __detect_range__(self, signal, segment, pre_trim=0, post_trim=0):
score, idx_signal, idx_segment = self.algn.align_overlap(signal, segment)
segment_begin = np.abs(np.array(idx_signal) - idx_segment[0]).argmin()
segment_end = np.abs(np.array(idx_signal) - idx_segment[-1]).argmin()
if segment_end > segment_begin:
score = score / (segment_end - segment_begin)
else:
score = 0.0
segment_begin = np.abs(np.array(idx_signal) - idx_segment[0 + pre_trim]).argmin()
segment_end = np.abs(np.array(idx_signal) - idx_segment[-1 - post_trim]).argmin()
return score, segment_begin, segment_end
def __detect_short__(self, flanked_model, segment):
return flanked_model.count_repeats(segment)
def add_target(self, target_name, repeat, prefix, suffix):
if not target_name in self.targets:
prefix_ext = prefix.upper()
prefix = prefix[-50:].upper()
suffix_ext = suffix.upper()
suffix = suffix[:50].upper()
repeat = repeat.upper()
# template model
tc_plus = self.target_classifier(
self.pm.generate_signal(prefix, samples=self.samples),
self.pm.generate_signal(suffix, samples=self.samples),
self.pm.generate_signal(prefix_ext, samples=self.samples),
self.pm.generate_signal(suffix_ext, samples=self.samples),
flankedRepeatHMM(repeat, prefix, suffix, self.pm, self.HMM_config),
repeatModHMM(repeat, self.pm, self.pm_mod, config=self.HMM_config))
# complement model
tc_minus = self.target_classifier(
self.pm.generate_signal(self.__reverse_complement__(suffix), samples=self.samples),
self.pm.generate_signal(self.__reverse_complement__(prefix), samples=self.samples),
self.pm.generate_signal(self.__reverse_complement__(suffix_ext), samples=self.samples),
self.pm.generate_signal(self.__reverse_complement__(prefix_ext), samples=self.samples),
flankedRepeatHMM(self.__reverse_complement__(repeat), self.__reverse_complement__(suffix), self.__reverse_complement__(prefix), self.pm, self.HMM_config),
repeatModHMM(self.__reverse_complement__(repeat), self.pm, self.pm_mod, config=self.HMM_config))
self.targets[target_name] = (tc_plus, tc_minus)
logger.log("RepeatCounter: Added target {}".format(target_name), logger.log_type.Info)
else:
raise ValueError("RepeatCounter: Target with name " + str(target_name) + " already defined.")
def detect(self, target_name, raw_signal, strand):
if target_name in self.targets:
tc_plus, tc_minus = self.targets[target_name]
if strand == '+':
tc = tc_plus
elif strand == '-':
tc = tc_minus
else:
raise ValueError("RepeatCounter: Strand must be + or -.")
flt_signal = sp.medfilt(raw_signal, kernel_size=3)
morph_signal = (flt_signal - np.median(flt_signal)) / self.pm.MAD(flt_signal)
morph_signal = np.clip(morph_signal * 24 + 127, 0, 255).astype(np.dtype('uint8')).reshape((1, len(morph_signal)))
flt = rectangle(1, 8)
morph_signal = opening(morph_signal, flt)
morph_signal = closing(morph_signal, flt)[0].astype(np.dtype('float'))
morph_signal = self.pm.normalize2model(morph_signal.astype(np.dtype('float')), mode='minmax')
flt_signal = self.pm.normalize2model(flt_signal.astype(np.dtype('float')), mode='minmax')
trim_prefix = len(tc.prefix_ext) - len(tc.prefix)
trim_suffix = len(tc.suffix_ext) - len(tc.suffix)
score_prefix, prefix_begin, prefix_end = self.__detect_range__(morph_signal, tc.prefix_ext, pre_trim=trim_prefix)
score_suffix, suffix_begin, suffix_end = self.__detect_range__(morph_signal, tc.suffix_ext, post_trim=trim_suffix)
n = 0; p = 0; states = []; mod_pattern = '-'
if prefix_begin < suffix_end and score_prefix > 0.0 and score_suffix > 0.0:
n, p, states = self.__detect_short__(tc.repeatHMM, flt_signal[prefix_begin:suffix_end])
if self.pm != self.pm_mod:
#rep_signal = flt_signal[prefix_begin:suffix_end][np.array([True if 'repeat' in x else False for x in states])]
nrm_signal = self.pm.normalize2model(raw_signal.astype(np.dtype('float')), mode='minmax')
rep_signal = nrm_signal[prefix_begin:suffix_end][np.array([True if 'repeat' in x else False for x in states])]
mod_pattern = tc.modHMM.mod_repeats(rep_signal)
# import matplotlib.pyplot as plt
# f, ax = plt.subplots(2, sharex=True)
# ax[0].plot(raw_signal[prefix_begin:suffix_end][np.array([True if 'repeat' in x else False for x in states])], 'k-')
# ax[1].plot(rep_signal, 'k-')
# ax[0].set_title('Count {count}, strand {strand}'.format(count=n, strand=strand))
# plt.show()
return n, score_prefix, score_suffix, p, prefix_end, max(suffix_begin - prefix_end, 0), mod_pattern
else:
raise ValueError("RepeatCounter: Target with name " + str(target_name) + " not defined.")
# multi locus repeat detection
class repeatDetector(object):
class sam_record(object):
def __init__(self):
self.QNAME = ''
self.FLAG = 0
self.RNAME = ''
self.POS = 0
self.TLEN = 0
self.CLIP_BEGIN = 0
self.CLIP_END = 0
def __init__(self, repeat_config, model_file, fast5_index_file, mod_model_file=None, align_config=None, HMM_config=None):
self.repeatCounter = repeatCounter(model_file, mod_model_file=mod_model_file, align_config=align_config, HMM_config=HMM_config)
self.repeatLoci = defaultdict(lambda : [])
self.repeat_config = repeat_config
self.is_init = False
self.f5 = fast5Index.fast5Index(fast5_index_file)
def __init_hmm__(self):
for target_name, (chr, begin, end, repeat, prefix, suffix) in self.repeat_config.items():
self.repeatCounter.add_target(target_name, repeat, prefix, suffix)
self.repeatLoci[chr].append((target_name, begin, end))
self.is_init = True
def __decode_cigar__(self, cigar):
ops = [(int(op[:-1]), op[-1]) for op in re.findall('(\d*\D)',cigar)]
return ops
def __ops_length__(self, ops, recOps='MIS=X'):
n = [op[0] for op in ops if op[1] in recOps]
return sum(n)
def __decode_sam__(self, sam_line):
cols = sam_line.rstrip().split('\t')
sr = self.sam_record()
if len(cols) >= 11:
try:
sr.QNAME = cols[0]
sr.FLAG = int(cols[1])
sr.RNAME = cols[2]
sr.POS = int(cols[3])
cigar_ops = self.__decode_cigar__(cols[5])
sr.TLEN = self.__ops_length__(cigar_ops, recOps='MDN=X')
sr.CLIP_BEGIN = sum([op[0] for op in cigar_ops[:2] if op[1] in 'SH'])
sr.CLIP_END = sum([op[0] for op in cigar_ops[-2:] if op[1] in 'SH'])
except:
return self.sam_record()
return sr
def __intersect_target__(self, sam_record):
target_names = []
if sam_record.RNAME in self.repeatLoci.keys():
for target_name, begin, end in self.repeatLoci[sam_record.RNAME]:
if begin > sam_record.POS - sam_record.CLIP_BEGIN and end < sam_record.POS + sam_record.TLEN + sam_record.CLIP_END:
target_names.append(target_name)
return target_names
def detect(self, sam_line=''):
if not self.is_init:
self.__init_hmm__()
target_counts = []
sam_record = self.__decode_sam__(sam_line)
if not sam_record.QNAME:
logger.log("Detector: Error parsing alignment \n{}".format(sam_line), logger.log_type.Error)
return None
if sam_record.FLAG & 0x10 == 0:
strand = '+'
else:
strand = '-'
target_names = self.__intersect_target__(sam_record)
if not target_names:
logger.log("Detector: No target for {}".format(sam_record.QNAME), logger.log_type.Debug)
return None
f5_record = self.f5.get_raw(sam_record.QNAME)
if f5_record is None:
logger.log("Detector: No fast5 for ID {id}".format(id=sam_record.QNAME), logger.log_type.Warning)
return None
logger.log("Detector: Test {id} for targets: {targets}.".format(id=sam_record.QNAME, targets=','.join(target_names)), logger.log_type.Debug)
for target_name in target_names:
repeat_count = self.repeatCounter.detect(target_name, f5_record, strand)
target_counts.append((sam_record.QNAME, target_name, strand, *repeat_count))
return {'target_counts': target_counts}
# writes repeat detection output to file or stdout
class outputWriter(object):
def __init__(self, output_file=None):
self.output_file = output_file
if self.output_file:
with open(self.output_file, 'w') as fp:
print('\t'.join(['ID', 'target', 'strand', 'count', 'score_prefix', 'score_suffix', 'log_p', 'offset', 'ticks', 'mod']), file=fp)
else:
print('\t'.join(['ID', 'target', 'strand', 'count', 'score_prefix', 'score_suffix', 'log_p', 'offset', 'ticks', 'mod']))
def write_line(self, target_counts=[]):
if self.output_file:
with open(self.output_file, 'a') as fp:
for target_count in target_counts:
print('\t'.join([str(x) for x in target_count]), file=fp)
else:
for target_count in target_counts:
print('\t'.join([str(x) for x in target_count]))
# multiprocess dispatcher
class mt_dispatcher():
def __init__(self, input_queue, threads=1, worker_callables=[], collector_callables=[]):
self.worker_callables = worker_callables
self.collector_callables = collector_callables
self.n_processed = 0
self.n_worker = threads
self.input_queue = input_queue
self.output_queue = Queue()
self.collector_queue = Queue(threads * 10)
self.worker = []
for i in range(threads):
self.worker.append(Process(target=self.__worker__, ))
self.worker[-1].start()
self.collector = Process(target=self.__collector__, )
self.collector.start()
def __worker__(self):
try:
while True:
input = self.input_queue.get()
if not input:
break
try:
for worker_callable in self.worker_callables:
input = worker_callable(**input)
if input:
self.collector_queue.put(input)
self.collector_queue.put('done')
except KeyboardInterrupt:
logger.log("Factory: Worker terminating on user request.", logger.log_type.Info)
break
except Exception as e:
type, value, trace = sys.exc_info()
logger.log('\n'.join(["Factory: Unexpected error in Worker, proceeding wiht remaining reads."] +
traceback.format_exception(*sys.exc_info())), logger.log_type.Warning)
pass
except KeyboardInterrupt:
self.collector_queue.put(None)
self.collector_queue.close()
logger.log("Factory: Worker terminating on user request.", logger.log_type.Info)
return
self.collector_queue.put(None)
self.collector_queue.close()
logger.log("Factory: Worker terminating.", logger.log_type.Debug)
def __collector__(self):
poison_count = self.n_worker
try:
while True:
input = self.collector_queue.get()
if input is None:
poison_count -= 1
if poison_count <= 0:
break
continue
elif input == 'done':
self.n_processed += 1
continue
for collector_callable in self.collector_callables:
input = collector_callable(**input)
self.output_queue.put(input)
except KeyboardInterrupt:
logger.log("Factory: Collector terminating on user request.", logger.log_type.Info)
pass
except:
logger.log("Factory: Unexpected error in collector, terminating.", logger.log_type.Error)
pass
self.output_queue.put(None)
self.output_queue.close()
logger.log("Factory: Collector terminating.", logger.log_type.Debug)
def __stop__(self):
for w in self.worker:
self.input_queue.put(None)
self.input_queue.close()
for w in self.worker:
w.join()
self.collector_queue.close()
self.collector.join()
logger.log("Factory: Terminated.", logger.log_type.Debug)
def __iter__(self):
return self
def __next__(self):
while True:
result = self.output_queue.get()
if result is None:
self.output_queue.close()
raise StopIteration()
else:
return result
def close(self):
self.__stop__()
def n_processed(self):
return self.n_processed
# parse config.json
def parse_config(repeat_config_file, param_config_file=None):
config = {}
# parse repeat config
repeats = {}
with open(repeat_config_file, 'r') as fp:
header = next(fp)
for line in fp:
cols = line.rstrip().split()
if len(cols) == 7:
repeats[cols[3]] = (cols[0], int(cols[1]), int(cols[2]), cols[4], cols[5], cols[6])
else:
logger.log("Config: Repeat config column mismatch while parsing \n{line}".format(line=line), logger.log_type.Error)
config['repeat'] = repeats
config['align'] = None
config['HMM'] = None
# parse HMM and alignment config
if param_config_file:
with open(param_config_file) as fp:
ld_conf = json.load(fp)
try:
assert(isinstance(ld_conf, dict))
assert(isinstance(ld_conf['align'], dict))
assert(isinstance(ld_conf['HMM'], dict))
# Do not check values, missing ones get defaulted, additional ones ignored
config['align'] = ld_conf['align']
config['HMM'] = ld_conf['HMM']
except KeyError as e:
logger.log('Config: Error loading HMM config file, missing {}'.format(e.args[0]), logger.log_type.Error)
exit(1)
except AssertionError as e:
logger.log('Config: file format broken', logger.log_type.Error)
exit(1)
return config
# main class
class main():
def __init__(self):
parser = argparse.ArgumentParser(
description='STRique: a nanopore raw signal repeat detection pipeline',
usage='''STRique.py <command> [<args>]
Available commands are:
index Index batch(es) of bulk-fast5 or tar archived single fast5
count Count single read repeat expansions
plot Plot repeat signal after counting
''')
parser.add_argument('command', help='Subcommand to run')
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Unrecognized command', file=sys.stderr)
parser.print_help(file=sys.stderr)
exit(1)
getattr(self, args.command)(sys.argv[2:])
def index(self, argv):
parser = argparse.ArgumentParser(description="Fast5 raw data archive indexing")
parser.add_argument("input", help="Input batch or directory of batches")
parser.add_argument("--recursive", action='store_true', help="Recursively scan input")
parser.add_argument("--out_prefix", default="", help="Prefix for file paths in output")
parser.add_argument("--tmp_prefix", default=None, help="Prefix for temporary data")
args = parser.parse_args(argv)
for record in fast5Index.fast5Index.index(args.input, recursive=args.recursive, output_prefix=args.out_prefix, tmp_prefix=args.tmp_prefix):
print(record)
def count(self, argv):
# command line
parser = argparse.ArgumentParser(description="STR Detection in raw nanopore data")
parser.add_argument("f5Index", help="Fast5 index")
parser.add_argument("model", help="Pore model")
parser.add_argument("repeat", help="Repeat region config file")
parser.add_argument("--out", default=None, help="Output file name, if not given print to stdout")
parser.add_argument("--algn", default=None, help="Alignment in sam format, if not given read from stdin")
parser.add_argument("--mod_model", default=None, help="Base modification pore model")
parser.add_argument("--config", help="Config file with HMM transition probabilities")
parser.add_argument("--t", type=int, default=1, help="Number of processes to use in parallel")
parser.add_argument("--log_level", default='warning', choices=['error', 'warning', 'info', 'debug'], help="Log level")
args = parser.parse_args(argv)
logger.init(log_level=args.log_level)
# load config
config = parse_config(args.repeat, args.config)
logger.log("Main: Parsed config.", logger.log_type.Debug)
# index/load reads
if not os.path.isfile(args.f5Index):
logger.log("Main: Fast5 index file does not exist.", logger.log_type.Error)
exit(1)
# model files
if not os.path.isfile(args.model):
logger.log("Main: Pore model file does not exist.", logger.log_type.Error)
exit(1)
if args.mod_model and not os.path.isfile(args.mod_model):
logger.log("Main: Modification pore model file does not exist.", logger.log_type.Error)
exit(1)
# repeat detector
rd = repeatDetector(config['repeat'], args.model, args.f5Index, mod_model_file=args.mod_model, align_config=config['align'], HMM_config=config['HMM'])
ow = outputWriter(args.out)
# run repeat detection
sam_queue = Queue(100)
mt = mt_dispatcher(sam_queue, threads=args.t, worker_callables=[rd.detect], collector_callables=[ow.write_line])
if args.algn:
with open(args.algn, 'r') as fp:
for line in fp:
if not line.startswith('@'):
sam_queue.put({'sam_line': line})
else:
for line in sys.stdin:
if not line.startswith('@'):
sam_queue.put({'sam_line': line})
mt.close()
logger.close()
def plot(self, argv):
parser = argparse.ArgumentParser(description="Signal plots over STR expansions")
parser.add_argument("f5Index", help="Fast5 index")
parser.add_argument("--counts", default=None, help="Repeat count output from STRique, if not given read from stdin")
parser.add_argument("--output", default=None, help="Output directory for plots, use instead of interactive GUI")
parser.add_argument("--format", default='png', choices={"png", "pdf", "svg"}, help="Output format when writing to files")
parser.add_argument("--width", default=16, type=int, help="Plot width")
parser.add_argument("--height", default=9, type=int, help="Plot height")
parser.add_argument("--dpi", default=80, type=int, help="Resolution of plot")
parser.add_argument("--extension", type=float, default=0.1, help="Extension as fraction of repeat signal around STR region to plot")
parser.add_argument("--zoom", type=int, default=500, help="Region around prefix and suffix to plot")
parser.add_argument("--log_level", default='warning', choices=['error', 'warning', 'info', 'debug'], help="Log level")
args = parser.parse_args(argv)
logger.init(log_level=args.log_level)
# index/load reads
import matplotlib.pyplot as plt
if not os.path.isfile(args.f5Index):
logger.log("Main: Fast5 index file does not exist.", logger.log_type.Error)
exit(1)
f5Index = fast5Index.fast5Index(args.f5Index)
# create output directory if needed
if args.output:
os.makedirs(args.output, exist_ok=True)
def tsv_iter(input):
if input:
with open(input, 'r') as fp:
for line in fp:
if not line.startswith('ID'):
yield line.strip().split('\t')
else:
for line in sys.stdin:
if not line.startswith('ID'):
yield line.strip().split('\t')
for record in tsv_iter(args.counts):
ID, target, strand, count, score_prefix, score_suffix, _, offset, ticks = record[:9]
offset = int(offset)
ticks = int(ticks)
score_prefix = float(score_prefix)
score_suffix = float(score_suffix)
raw_signal = f5Index.get_raw(ID)
if raw_signal is not None:
flt_signal = sp.medfilt(raw_signal, kernel_size=3)
flt_signal = (flt_signal - np.median(flt_signal)) / np.std(flt_signal)
prefix_extend = max(0, offset - int(ticks * args.extension))
suffix_extend = min(len(flt_signal), offset + ticks + int(ticks * args.extension))
prefix_begin = max(offset - args.zoom, 0)
prefix_end = prefix_begin + args.zoom * 2
suffix_begin = offset + ticks - args.zoom
suffix_end = min(len(flt_signal), suffix_begin + args.zoom * 2)
plt.figure(num=None, figsize=(args.width, args.height), dpi=args.dpi, facecolor='w', edgecolor='k')
plt.subplot(2,1,1)
plt.plot(flt_signal[prefix_extend:suffix_extend], 'k-', linewidth=0.5, label='genome')
plt.plot(np.arange(ticks) + (offset - prefix_extend), flt_signal[offset:offset+ticks], 'b-', linewidth=1.0, label='STR')
plt.legend()
plt.title("Read {} with {} repeats".format(ID, count))
plt.subplot(2,2,3)
plt.plot(flt_signal[prefix_begin:prefix_end], 'k-', label='prefix')
plt.plot(np.arange(args.zoom, 2 * args.zoom), flt_signal[prefix_begin + args.zoom:prefix_end], 'b-')
plt.axvline(args.zoom, color='red', label='STR begin')
plt.legend()
plt.title("Prefix region with score {:.2f}".format(score_prefix))
plt.subplot(2,2,4)
plt.plot(flt_signal[suffix_begin:suffix_end], 'k-', label='suffix')
plt.plot(flt_signal[suffix_begin:suffix_end - args.zoom], 'b-')
plt.axvline(args.zoom, color='red', label='STR end')
plt.legend()
plt.title("Suffix region with score {:.2f}".format(score_suffix))
plt.tight_layout()
if args.output:
f_name = os.path.join(args.output, '_'.join([target, count, ID]) + '.' + args.format)
plt.savefig(f_name, )
else:
plt.show()
else:
logger.log("Plot: No fast5 for ID {id}".format(id=sam_record.QNAME), logger.log_type.Warning)
logger.close()
exit(0)
# main
if __name__ == '__main__':
signal(SIGPIPE,SIG_DFL)
main()
| [
"numpy.sum",
"argparse.ArgumentParser",
"pomegranate.UniformDistribution",
"numpy.clip",
"STRique_lib.pyseqan.align_raw",
"collections.defaultdict",
"os.path.isfile",
"matplotlib.pyplot.figure",
"numpy.argpartition",
"numpy.arange",
"multiprocessing.Queue",
"pomegranate.State",
"numpy.random... | [((2317, 2333), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (2331, 2333), False, 'import threading, queue\n'), ((2350, 2357), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (2355, 2357), False, 'from multiprocessing import Pool, Process, Event, Value, Queue\n'), ((51538, 51562), 'signal.signal', 'signal', (['SIGPIPE', 'SIG_DFL'], {}), '(SIGPIPE, SIG_DFL)\n', (51544, 51562), False, 'from signal import signal, SIGPIPE, SIG_DFL\n'), ((3541, 3574), 'multiprocessing.Process', 'Process', ([], {'target': 'logger.__logger__'}), '(target=logger.__logger__)\n', (3548, 3574), False, 'from multiprocessing import Pool, Process, Event, Value, Queue\n'), ((5570, 5634), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['a'], {'shape': 'shape', 'strides': 'strides'}), '(a, shape=shape, strides=strides)\n', (5601, 5634), True, 'import numpy as np\n'), ((10449, 10494), 'pomegranate.State', 'pg.State', (['None'], {'name': "(self.state_prefix + 's1')"}), "(None, name=self.state_prefix + 's1')\n", (10457, 10494), True, 'import pomegranate as pg\n'), ((10511, 10556), 'pomegranate.State', 'pg.State', (['None'], {'name': "(self.state_prefix + 's2')"}), "(None, name=self.state_prefix + 's2')\n", (10519, 10556), True, 'import pomegranate as pg\n'), ((10573, 10618), 'pomegranate.State', 'pg.State', (['None'], {'name': "(self.state_prefix + 'e1')"}), "(None, name=self.state_prefix + 'e1')\n", (10581, 10618), True, 'import pomegranate as pg\n'), ((10635, 10680), 'pomegranate.State', 'pg.State', (['None'], {'name': "(self.state_prefix + 'e2')"}), "(None, name=self.state_prefix + 'e2')\n", (10643, 10680), True, 'import pomegranate as pg\n'), ((16818, 16896), 'pomegranate.NormalDistribution', 'pg.NormalDistribution', (['self.pore_model.model_median', 'self.pore_model.model_MAD'], {}), '(self.pore_model.model_median, self.pore_model.model_MAD)\n', (16839, 16896), True, 'import pomegranate as pg\n'), ((16931, 17007), 'pomegranate.UniformDistribution', 'pg.UniformDistribution', (['self.pore_model.model_min', 'self.pore_model.model_max'], {}), '(self.pore_model.model_min, self.pore_model.model_max)\n', (16953, 17007), True, 'import pomegranate as pg\n'), ((17026, 17094), 'pomegranate.State', 'pg.State', (['self.dummy_distribution'], {'name': "(self.state_prefix + 'dummy1')"}), "(self.dummy_distribution, name=self.state_prefix + 'dummy1')\n", (17034, 17094), True, 'import pomegranate as pg\n'), ((17111, 17179), 'pomegranate.State', 'pg.State', (['self.dummy_distribution'], {'name': "(self.state_prefix + 'dummy2')"}), "(self.dummy_distribution, name=self.state_prefix + 'dummy2')\n", (17119, 17179), True, 'import pomegranate as pg\n'), ((17196, 17241), 'pomegranate.State', 'pg.State', (['None'], {'name': "(self.state_prefix + 'e1')"}), "(None, name=self.state_prefix + 'e1')\n", (17204, 17241), True, 'import pomegranate as pg\n'), ((17258, 17303), 'pomegranate.State', 'pg.State', (['None'], {'name': "(self.state_prefix + 'e2')"}), "(None, name=self.state_prefix + 'e2')\n", (17266, 17303), True, 'import pomegranate as pg\n'), ((18249, 18314), 'pomegranate.State', 'pg.State', (['self.skip_distribution'], {'name': "(self.state_prefix + 'skip')"}), "(self.skip_distribution, name=self.state_prefix + 'skip')\n", (18257, 18314), True, 'import pomegranate as pg\n'), ((18813, 18845), 'numpy.array', 'np.array', (['[x[1] for x in states]'], {}), '([x[1] for x in states])\n', (18821, 18845), True, 'import numpy as np\n'), ((18859, 18884), 'numpy.sum', 'np.sum', (['(states == self.d1)'], {}), '(states == self.d1)\n', (18865, 18884), True, 'import numpy as np\n'), ((18898, 18923), 'numpy.sum', 'np.sum', (['(states == self.d2)'], {}), '(states == self.d2)\n', (18904, 18923), True, 'import numpy as np\n'), ((26376, 26395), 'STRique_lib.pyseqan.align_raw', 'pyseqan.align_raw', ([], {}), '()\n', (26393, 26395), False, 'from STRique_lib import fast5Index, pyseqan\n'), ((27088, 27208), 'collections.namedtuple', 'namedtuple', (['"""target_classifier"""'], {'field_names': "['prefix', 'suffix', 'prefix_ext', 'suffix_ext', 'repeatHMM', 'modHMM']"}), "('target_classifier', field_names=['prefix', 'suffix',\n 'prefix_ext', 'suffix_ext', 'repeatHMM', 'modHMM'])\n", (27098, 27208), False, 'from collections import namedtuple, defaultdict\n'), ((33485, 33509), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (33496, 33509), False, 'from collections import namedtuple, defaultdict\n'), ((33600, 33639), 'STRique_lib.fast5Index.fast5Index', 'fast5Index.fast5Index', (['fast5_index_file'], {}), '(fast5_index_file)\n', (33621, 33639), False, 'from STRique_lib import fast5Index, pyseqan\n'), ((37899, 37906), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (37904, 37906), False, 'from multiprocessing import Pool, Process, Event, Value, Queue\n'), ((37938, 37957), 'multiprocessing.Queue', 'Queue', (['(threads * 10)'], {}), '(threads * 10)\n', (37943, 37957), False, 'from multiprocessing import Pool, Process, Event, Value, Queue\n'), ((38143, 38177), 'multiprocessing.Process', 'Process', ([], {'target': 'self.__collector__'}), '(target=self.__collector__)\n', (38150, 38177), False, 'from multiprocessing import Pool, Process, Event, Value, Queue\n'), ((42834, 43183), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""STRique: a nanopore raw signal repeat detection pipeline"""', 'usage': '"""STRique.py <command> [<args>]\nAvailable commands are:\n index Index batch(es) of bulk-fast5 or tar archived single fast5\n count Count single read repeat expansions\n plot Plot repeat signal after counting\n"""'}), '(description=\n \'STRique: a nanopore raw signal repeat detection pipeline\', usage=\n """STRique.py <command> [<args>]\nAvailable commands are:\n index Index batch(es) of bulk-fast5 or tar archived single fast5\n count Count single read repeat expansions\n plot Plot repeat signal after counting\n"""\n )\n', (42857, 43183), False, 'import os, sys, traceback, json, argparse\n'), ((43564, 43634), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Fast5 raw data archive indexing"""'}), "(description='Fast5 raw data archive indexing')\n", (43587, 43634), False, 'import os, sys, traceback, json, argparse\n'), ((44060, 44188), 'STRique_lib.fast5Index.fast5Index.index', 'fast5Index.fast5Index.index', (['args.input'], {'recursive': 'args.recursive', 'output_prefix': 'args.out_prefix', 'tmp_prefix': 'args.tmp_prefix'}), '(args.input, recursive=args.recursive,\n output_prefix=args.out_prefix, tmp_prefix=args.tmp_prefix)\n', (44087, 44188), False, 'from STRique_lib import fast5Index, pyseqan\n'), ((44280, 44353), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""STR Detection in raw nanopore data"""'}), "(description='STR Detection in raw nanopore data')\n", (44303, 44353), False, 'import os, sys, traceback, json, argparse\n'), ((46218, 46228), 'multiprocessing.Queue', 'Queue', (['(100)'], {}), '(100)\n', (46223, 46228), False, 'from multiprocessing import Pool, Process, Event, Value, Queue\n'), ((46790, 46861), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Signal plots over STR expansions"""'}), "(description='Signal plots over STR expansions')\n", (46813, 46861), False, 'import os, sys, traceback, json, argparse\n'), ((48244, 48279), 'STRique_lib.fast5Index.fast5Index', 'fast5Index.fast5Index', (['args.f5Index'], {}), '(args.f5Index)\n', (48265, 48279), False, 'from STRique_lib import fast5Index, pyseqan\n'), ((6188, 6218), 'numpy.percentile', 'np.percentile', (['signal', '[1, 99]'], {}), '(signal, [1, 99])\n', (6201, 6218), True, 'import numpy as np\n'), ((6249, 6285), 'numpy.percentile', 'np.percentile', (['model_values', '[1, 99]'], {}), '(model_values, [1, 99])\n', (6262, 6285), True, 'import numpy as np\n'), ((6307, 6341), 'numpy.median', 'np.median', (['signal[signal < q5_sig]'], {}), '(signal[signal < q5_sig])\n', (6316, 6341), True, 'import numpy as np\n'), ((6364, 6399), 'numpy.median', 'np.median', (['signal[signal > q95_sig]'], {}), '(signal[signal > q95_sig])\n', (6373, 6399), True, 'import numpy as np\n'), ((6421, 6467), 'numpy.median', 'np.median', (['model_values[model_values < q5_mod]'], {}), '(model_values[model_values < q5_mod])\n', (6430, 6467), True, 'import numpy as np\n'), ((6490, 6537), 'numpy.median', 'np.median', (['model_values[model_values > q95_mod]'], {}), '(model_values[model_values > q95_mod])\n', (6499, 6537), True, 'import numpy as np\n'), ((7812, 7891), 'numpy.clip', 'np.clip', (['nrm_signal', '(self.model_min + 0.5)', '(self.model_max - 0.5)'], {'out': 'nrm_signal'}), '(nrm_signal, self.model_min + 0.5, self.model_max - 0.5, out=nrm_signal)\n', (7819, 7891), True, 'import numpy as np\n'), ((8196, 8227), 'numpy.repeat', 'np.repeat', (['level_means', 'samples'], {}), '(level_means, samples)\n', (8205, 8227), True, 'import numpy as np\n'), ((23588, 23642), 'pomegranate.UniformDistribution', 'pg.UniformDistribution', (['self.model_min', 'self.model_max'], {}), '(self.model_min, self.model_max)\n', (23610, 23642), True, 'import pomegranate as pg\n'), ((23682, 23736), 'pomegranate.UniformDistribution', 'pg.UniformDistribution', (['self.model_min', 'self.model_max'], {}), '(self.model_min, self.model_max)\n', (23704, 23736), True, 'import pomegranate as pg\n'), ((25263, 25310), 'numpy.clip', 'np.clip', (['signal', 'self.model_min', 'self.model_max'], {}), '(signal, self.model_min, self.model_max)\n', (25270, 25310), True, 'import numpy as np\n'), ((30469, 30506), 'scipy.signal.medfilt', 'sp.medfilt', (['raw_signal'], {'kernel_size': '(3)'}), '(raw_signal, kernel_size=3)\n', (30479, 30506), True, 'import scipy.signal as sp\n'), ((30741, 30756), 'skimage.morphology.rectangle', 'rectangle', (['(1)', '(8)'], {}), '(1, 8)\n', (30750, 30756), False, 'from skimage.morphology import opening, closing, dilation, erosion, rectangle\n'), ((30784, 30810), 'skimage.morphology.opening', 'opening', (['morph_signal', 'flt'], {}), '(morph_signal, flt)\n', (30791, 30810), False, 'from skimage.morphology import opening, closing, dilation, erosion, rectangle\n'), ((42087, 42100), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (42096, 42100), False, 'import os, sys, traceback, json, argparse\n'), ((45450, 45478), 'os.path.isfile', 'os.path.isfile', (['args.f5Index'], {}), '(args.f5Index)\n', (45464, 45478), False, 'import os, sys, traceback, json, argparse\n'), ((45625, 45651), 'os.path.isfile', 'os.path.isfile', (['args.model'], {}), '(args.model)\n', (45639, 45651), False, 'import os, sys, traceback, json, argparse\n'), ((48088, 48116), 'os.path.isfile', 'os.path.isfile', (['args.f5Index'], {}), '(args.f5Index)\n', (48102, 48116), False, 'import os, sys, traceback, json, argparse\n'), ((48360, 48399), 'os.makedirs', 'os.makedirs', (['args.output'], {'exist_ok': '(True)'}), '(args.output, exist_ok=True)\n', (48371, 48399), False, 'import os, sys, traceback, json, argparse\n'), ((7269, 7297), 'numpy.median', 'np.median', (['signal[diff_mask]'], {}), '(signal[diff_mask])\n', (7278, 7297), True, 'import numpy as np\n'), ((7552, 7569), 'numpy.median', 'np.median', (['signal'], {}), '(signal)\n', (7561, 7569), True, 'import numpy as np\n'), ((8617, 8654), 'numpy.repeat', 'np.repeat', (['level_means', 'level_samples'], {}), '(level_means, level_samples)\n', (8626, 8654), True, 'import numpy as np\n'), ((8681, 8718), 'numpy.repeat', 'np.repeat', (['level_stdvs', 'level_samples'], {}), '(level_stdvs, level_samples)\n', (8690, 8718), True, 'import numpy as np\n'), ((8737, 8779), 'numpy.random.normal', 'np.random.normal', (['level_means', 'level_stdvs'], {}), '(level_means, level_stdvs)\n', (8753, 8779), True, 'import numpy as np\n'), ((30875, 30892), 'numpy.dtype', 'np.dtype', (['"""float"""'], {}), "('float')\n", (30883, 30892), True, 'import numpy as np\n'), ((34029, 34059), 're.findall', 're.findall', (['"""(\\\\d*\\\\D)"""', 'cigar'], {}), "('(\\\\d*\\\\D)', cigar)\n", (34039, 34059), False, 'import re, itertools\n'), ((38047, 38078), 'multiprocessing.Process', 'Process', ([], {'target': 'self.__worker__'}), '(target=self.__worker__)\n', (38054, 38078), False, 'from multiprocessing import Pool, Process, Event, Value, Queue\n'), ((45794, 45824), 'os.path.isfile', 'os.path.isfile', (['args.mod_model'], {}), '(args.mod_model)\n', (45808, 45824), False, 'import os, sys, traceback, json, argparse\n'), ((49220, 49257), 'scipy.signal.medfilt', 'sp.medfilt', (['raw_signal'], {'kernel_size': '(3)'}), '(raw_signal, kernel_size=3)\n', (49230, 49257), True, 'import scipy.signal as sp\n'), ((49791, 49894), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(args.width, args.height)', 'dpi': 'args.dpi', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(args.width, args.height), dpi=args.dpi,\n facecolor='w', edgecolor='k')\n", (49801, 49894), True, 'import matplotlib.pyplot as plt\n'), ((49907, 49927), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (49918, 49927), True, 'import matplotlib.pyplot as plt\n'), ((49942, 50032), 'matplotlib.pyplot.plot', 'plt.plot', (['flt_signal[prefix_extend:suffix_extend]', '"""k-"""'], {'linewidth': '(0.5)', 'label': '"""genome"""'}), "(flt_signal[prefix_extend:suffix_extend], 'k-', linewidth=0.5,\n label='genome')\n", (49950, 50032), True, 'import matplotlib.pyplot as plt\n'), ((50182, 50194), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (50192, 50194), True, 'import matplotlib.pyplot as plt\n'), ((50282, 50302), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (50293, 50302), True, 'import matplotlib.pyplot as plt\n'), ((50317, 50384), 'matplotlib.pyplot.plot', 'plt.plot', (['flt_signal[prefix_begin:prefix_end]', '"""k-"""'], {'label': '"""prefix"""'}), "(flt_signal[prefix_begin:prefix_end], 'k-', label='prefix')\n", (50325, 50384), True, 'import matplotlib.pyplot as plt\n'), ((50518, 50572), 'matplotlib.pyplot.axvline', 'plt.axvline', (['args.zoom'], {'color': '"""red"""', 'label': '"""STR begin"""'}), "(args.zoom, color='red', label='STR begin')\n", (50529, 50572), True, 'import matplotlib.pyplot as plt\n'), ((50589, 50601), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (50599, 50601), True, 'import matplotlib.pyplot as plt\n'), ((50700, 50720), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (50711, 50720), True, 'import matplotlib.pyplot as plt\n'), ((50735, 50802), 'matplotlib.pyplot.plot', 'plt.plot', (['flt_signal[suffix_begin:suffix_end]', '"""k-"""'], {'label': '"""suffix"""'}), "(flt_signal[suffix_begin:suffix_end], 'k-', label='suffix')\n", (50743, 50802), True, 'import matplotlib.pyplot as plt\n'), ((50819, 50882), 'matplotlib.pyplot.plot', 'plt.plot', (['flt_signal[suffix_begin:suffix_end - args.zoom]', '"""b-"""'], {}), "(flt_signal[suffix_begin:suffix_end - args.zoom], 'b-')\n", (50827, 50882), True, 'import matplotlib.pyplot as plt\n'), ((50899, 50951), 'matplotlib.pyplot.axvline', 'plt.axvline', (['args.zoom'], {'color': '"""red"""', 'label': '"""STR end"""'}), "(args.zoom, color='red', label='STR end')\n", (50910, 50951), True, 'import matplotlib.pyplot as plt\n'), ((50968, 50980), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (50978, 50980), True, 'import matplotlib.pyplot as plt\n'), ((51079, 51097), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (51095, 51097), True, 'import matplotlib.pyplot as plt\n'), ((2783, 2801), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (2799, 2801), False, 'import os, sys, traceback, json, argparse\n'), ((3358, 3378), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3372, 3378), False, 'import os, sys, traceback, json, argparse\n'), ((3383, 3407), 'os.access', 'os.access', (['file', 'os.W_OK'], {}), '(file, os.W_OK)\n', (3392, 3407), False, 'import os, sys, traceback, json, argparse\n'), ((5360, 5395), 'numpy.append', 'np.append', (['a', 'a[-1:-1 - (n - 1):-1]'], {}), '(a, a[-1:-1 - (n - 1):-1])\n', (5369, 5395), True, 'import numpy as np\n'), ((5422, 5453), 'numpy.append', 'np.append', (['a', '((n - 1) * [a[-1]])'], {}), '(a, (n - 1) * [a[-1]])\n', (5431, 5453), True, 'import numpy as np\n'), ((5718, 5735), 'numpy.median', 'np.median', (['signal'], {}), '(signal)\n', (5727, 5735), True, 'import numpy as np\n'), ((6947, 6967), 'numpy.diff', 'np.diff', (['sliding_std'], {}), '(sliding_std)\n', (6954, 6967), True, 'import numpy as np\n'), ((6987, 7020), 'numpy.argpartition', 'np.argpartition', (['diff_signal', '(-50)'], {}), '(diff_signal, -50)\n', (7002, 7020), True, 'import numpy as np\n'), ((7382, 7412), 'numpy.subtract', 'np.subtract', (['signal', 'rawMedian'], {}), '(signal, rawMedian)\n', (7393, 7412), True, 'import numpy as np\n'), ((7454, 7493), 'numpy.multiply', 'np.multiply', (['nrm_signal', 'self.model_MAD'], {}), '(nrm_signal, self.model_MAD)\n', (7465, 7493), True, 'import numpy as np\n'), ((7643, 7673), 'numpy.subtract', 'np.subtract', (['signal', 'rawMedian'], {}), '(signal, rawMedian)\n', (7654, 7673), True, 'import numpy as np\n'), ((7715, 7754), 'numpy.multiply', 'np.multiply', (['nrm_signal', 'self.model_MAD'], {}), '(nrm_signal, self.model_MAD)\n', (7726, 7754), True, 'import numpy as np\n'), ((11236, 11315), 'pomegranate.NormalDistribution', 'pg.NormalDistribution', (['state_mean', '(state_std * self.std_scale + self.std_offset)'], {}), '(state_mean, state_std * self.std_scale + self.std_offset)\n', (11257, 11315), True, 'import pomegranate as pg\n'), ((11415, 11452), 'pomegranate.State', 'pg.State', (['None'], {'name': "(state_name + 'd')"}), "(None, name=state_name + 'd')\n", (11423, 11452), True, 'import pomegranate as pg\n'), ((11499, 11569), 'pomegranate.UniformDistribution', 'pg.UniformDistribution', (['self.pm_base.model_min', 'self.pm_base.model_max'], {}), '(self.pm_base.model_min, self.pm_base.model_max)\n', (11521, 11569), True, 'import pomegranate as pg\n'), ((25470, 25547), 'itertools.groupby', 'itertools.groupby', (['states'], {'key': "(lambda x: False if x in ['s0', 'e0'] else True)"}), "(states, key=lambda x: False if x in ['s0', 'e0'] else True)\n", (25487, 25547), False, 'import re, itertools\n'), ((30548, 30569), 'numpy.median', 'np.median', (['flt_signal'], {}), '(flt_signal)\n', (30557, 30569), True, 'import numpy as np\n'), ((30965, 30982), 'numpy.dtype', 'np.dtype', (['"""float"""'], {}), "('float')\n", (30973, 30982), True, 'import numpy as np\n'), ((31067, 31084), 'numpy.dtype', 'np.dtype', (['"""float"""'], {}), "('float')\n", (31075, 31084), True, 'import numpy as np\n'), ((49326, 49344), 'numpy.std', 'np.std', (['flt_signal'], {}), '(flt_signal)\n', (49332, 49344), True, 'import numpy as np\n'), ((50410, 50445), 'numpy.arange', 'np.arange', (['args.zoom', '(2 * args.zoom)'], {}), '(args.zoom, 2 * args.zoom)\n', (50419, 50445), True, 'import numpy as np\n'), ((51256, 51275), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f_name'], {}), '(f_name)\n', (51267, 51275), True, 'import matplotlib.pyplot as plt\n'), ((51320, 51330), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (51328, 51330), True, 'import matplotlib.pyplot as plt\n'), ((3437, 3458), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (3452, 3458), False, 'import os, sys, traceback, json, argparse\n'), ((5216, 5226), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (5223, 5226), True, 'import numpy as np\n'), ((7084, 7101), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (7092, 7101), True, 'import numpy as np\n'), ((27586, 27606), 'numpy.array', 'np.array', (['idx_signal'], {}), '(idx_signal)\n', (27594, 27606), True, 'import numpy as np\n'), ((27663, 27683), 'numpy.array', 'np.array', (['idx_signal'], {}), '(idx_signal)\n', (27671, 27683), True, 'import numpy as np\n'), ((27879, 27899), 'numpy.array', 'np.array', (['idx_signal'], {}), '(idx_signal)\n', (27887, 27899), True, 'import numpy as np\n'), ((27967, 27987), 'numpy.array', 'np.array', (['idx_signal'], {}), '(idx_signal)\n', (27975, 27987), True, 'import numpy as np\n'), ((30672, 30689), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (30680, 30689), True, 'import numpy as np\n'), ((30838, 30864), 'skimage.morphology.closing', 'closing', (['morph_signal', 'flt'], {}), '(morph_signal, flt)\n', (30845, 30864), False, 'from skimage.morphology import opening, closing, dilation, erosion, rectangle\n'), ((32082, 32144), 'numpy.array', 'np.array', (["[(True if 'repeat' in x else False) for x in states]"], {}), "([(True if 'repeat' in x else False) for x in states])\n", (32090, 32144), True, 'import numpy as np\n'), ((38914, 38928), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (38926, 38928), False, 'import os, sys, traceback, json, argparse\n'), ((49301, 49322), 'numpy.median', 'np.median', (['flt_signal'], {}), '(flt_signal)\n', (49310, 49322), True, 'import numpy as np\n'), ((50054, 50070), 'numpy.arange', 'np.arange', (['ticks'], {}), '(ticks)\n', (50063, 50070), True, 'import numpy as np\n'), ((4137, 4148), 'os.getpid', 'os.getpid', ([], {}), '()\n', (4146, 4148), False, 'import os, sys, traceback, json, argparse\n'), ((5298, 5310), 'numpy.median', 'np.median', (['a'], {}), '(a)\n', (5307, 5310), True, 'import numpy as np\n'), ((30624, 30664), 'numpy.clip', 'np.clip', (['(morph_signal * 24 + 127)', '(0)', '(255)'], {}), '(morph_signal * 24 + 127, 0, 255)\n', (30631, 30664), True, 'import numpy as np\n'), ((31978, 31995), 'numpy.dtype', 'np.dtype', (['"""float"""'], {}), "('float')\n", (31986, 31995), True, 'import numpy as np\n'), ((4064, 4087), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4085, 4087), False, 'import datetime\n'), ((7207, 7224), 'skimage.morphology.rectangle', 'rectangle', (['(1)', '(750)'], {}), '(1, 750)\n', (7216, 7224), False, 'from skimage.morphology import opening, closing, dilation, erosion, rectangle\n'), ((39118, 39132), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (39130, 39132), False, 'import os, sys, traceback, json, argparse\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
import corner
def test_invalid_quantiles_1(seed=42):
np.random.seed(seed)
with pytest.raises(ValueError):
corner.quantile(np.random.rand(100), [-0.1, 5])
def test_invalid_quantiles_2(seed=42):
np.random.seed(seed)
with pytest.raises(ValueError):
corner.quantile(np.random.rand(100), 5)
def test_invalid_quantiles_3(seed=42):
np.random.seed(seed)
with pytest.raises(ValueError):
corner.quantile(np.random.rand(100), [0.5, 1.0, 8.1])
def test_dimension_mismatch(seed=42):
np.random.seed(seed)
with pytest.raises(ValueError):
corner.quantile(
np.random.rand(100), [0.1, 0.5], weights=np.random.rand(3)
)
def test_valid_quantile(seed=42):
np.random.seed(seed)
x = np.random.rand(25)
q = np.arange(0.1, 1.0, 0.111234)
a = corner.quantile(x, q)
b = np.percentile(x, 100 * q)
assert np.allclose(a, b)
def test_weighted_quantile(seed=42):
np.random.seed(seed)
x = np.random.rand(25)
q = np.arange(0.1, 1.0, 0.111234)
a = corner.quantile(x, q, weights=np.ones_like(x))
b = np.percentile(x, 100 * np.array(q))
assert np.allclose(a, b)
q = [0.0, 1.0]
a = corner.quantile(x, q, weights=np.random.rand(len(x)))
assert np.allclose(a, (np.min(x), np.max(x)))
| [
"numpy.random.seed",
"numpy.ones_like",
"numpy.allclose",
"numpy.percentile",
"corner.quantile",
"pytest.raises",
"numpy.arange",
"numpy.array",
"numpy.min",
"numpy.max",
"numpy.random.rand"
] | [((118, 138), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (132, 138), True, 'import numpy as np\n'), ((276, 296), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (290, 296), True, 'import numpy as np\n'), ((426, 446), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (440, 446), True, 'import numpy as np\n'), ((589, 609), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (603, 609), True, 'import numpy as np\n'), ((792, 812), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (806, 812), True, 'import numpy as np\n'), ((821, 839), 'numpy.random.rand', 'np.random.rand', (['(25)'], {}), '(25)\n', (835, 839), True, 'import numpy as np\n'), ((848, 877), 'numpy.arange', 'np.arange', (['(0.1)', '(1.0)', '(0.111234)'], {}), '(0.1, 1.0, 0.111234)\n', (857, 877), True, 'import numpy as np\n'), ((887, 908), 'corner.quantile', 'corner.quantile', (['x', 'q'], {}), '(x, q)\n', (902, 908), False, 'import corner\n'), ((917, 942), 'numpy.percentile', 'np.percentile', (['x', '(100 * q)'], {}), '(x, 100 * q)\n', (930, 942), True, 'import numpy as np\n'), ((954, 971), 'numpy.allclose', 'np.allclose', (['a', 'b'], {}), '(a, b)\n', (965, 971), True, 'import numpy as np\n'), ((1015, 1035), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1029, 1035), True, 'import numpy as np\n'), ((1044, 1062), 'numpy.random.rand', 'np.random.rand', (['(25)'], {}), '(25)\n', (1058, 1062), True, 'import numpy as np\n'), ((1071, 1100), 'numpy.arange', 'np.arange', (['(0.1)', '(1.0)', '(0.111234)'], {}), '(0.1, 1.0, 0.111234)\n', (1080, 1100), True, 'import numpy as np\n'), ((1211, 1228), 'numpy.allclose', 'np.allclose', (['a', 'b'], {}), '(a, b)\n', (1222, 1228), True, 'import numpy as np\n'), ((148, 173), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (161, 173), False, 'import pytest\n'), ((306, 331), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (319, 331), False, 'import pytest\n'), ((456, 481), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (469, 481), False, 'import pytest\n'), ((619, 644), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (632, 644), False, 'import pytest\n'), ((199, 218), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (213, 218), True, 'import numpy as np\n'), ((357, 376), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (371, 376), True, 'import numpy as np\n'), ((507, 526), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (521, 526), True, 'import numpy as np\n'), ((683, 702), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (697, 702), True, 'import numpy as np\n'), ((1139, 1154), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (1151, 1154), True, 'import numpy as np\n'), ((1187, 1198), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (1195, 1198), True, 'import numpy as np\n'), ((1338, 1347), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (1344, 1347), True, 'import numpy as np\n'), ((1349, 1358), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (1355, 1358), True, 'import numpy as np\n'), ((724, 741), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (738, 741), True, 'import numpy as np\n')] |
"""
Python version of algorithm from Astronomical Algorithms, chapter 26. Only uses
the tables that are valid from 1000 AD to 3000 AD (Table 26.B)
"""
import numpy as np
def JDE0(year,season):
y=(year-2000)/1000.0
return season[0]+season[1]*y+season[2]*y**2+season[3]*y**3+season[4]*y**4
#polynomials describing the rough JDE of equinox as a polynomial function
#of millenia from AD 2000 (Table 26.B)
Mar=[2451623.80984,365242.37404,+0.05169,-0.00411,-0.00057]
Jun=[2451716.56767,365241.62603,+0.00325,+0.00888,-0.00030]
Sep=[2451810.21715,365242.01767,-0.11575,+0.00337,+0.00078]
Dec=[2451900.05952,365242.74049,+0.06223,-0.00823,-0.00032]
dtYear=list(range(1974,2019,1))
#xxx0 xxx1 xxx2 xxx3 xxx4 xxx5 xxx6 xxx7 xxx8 xxx9
dtTable=[ 44.4841,45.4761,46.4567,47.5214,48.5344,49.5861, #1974-1979
50.5387,51.3808,52.1668,52.9565,53.7882,54.3427,54.8712,55.3222,55.8197,56.3000, #1980-1989
56.8553,57.5653,58.3092,59.1218,59.9845,60.7853,61.6287,62.2950,62.9659,63.4673, #1990-1999
63.8285,64.0908,64.2998,64.4734,64.5736,64.6876,64.8452,65.1464,65.4573,65.7768, #2000-2009
66.0699,66.3246,66.6030,66.9069,67.2810,67.6439,68.1024,68.5927,68.9676 #2010-2018
]
def T(year,season):
return (JDE0(year,season)-2451545.0)/36525.0
def W(T):
return 35999.373*T-2.47
def Deltalam(W):
return 1+0.0334*np.cos(np.radians(W))+0.0007*np.cos(np.radians(2*W))
def DeltaT(year):
"""
Calculate difference between ephemeris time and universal time in days
:param year:
:return:
"""
#From https://eclipse.gsfc.nasa.gov/SEhelp/deltatpoly2004.html
return -(26.92+0.32217*(year-2000)+0.005589*(year-2000)**2)/86400.0
#return -32.184/86400.0
#return -(66.0+(year-2000)*1.0)/86400.0
As=[485,203,199,182,156,136, 77, 74, 70, 58, 52, 50, 45, 44, 29, 18, 17, 16, 14, 12, 12, 12, 9, 8]
Bs=[324.96,337.23,342.08, 27.85, 73.14,171.52,222.54,296.72,243.58,119.81,297.17, 21.02,247.54,325.15, 60.93,155.12,288.79,198.04,199.76, 95.39,287.11,320.81,227.73, 15.45]
Cs=[ 1934.136, 32964.467, 20.186,445267.112, 45036.886, 22518.443, 65928.934, 3034.906, 9037.513, 33718.147, 150.678, 2281.226, 29929.562, 31555.956, 4443.417, 67555.328, 4562.452, 62894.029, 31436.921, 14577.848, 31931.756, 34777.259, 1222.114, 16859.074]
def S(T):
result=0
for A,B,C in zip(As,Bs,Cs):
result+=A*np.cos(np.radians(B+C*T))
return result
def JDE(year,season):
jde0=JDE0(year,season)
t=T(year,season)
s=S(t)
w=W(t)
dl=Deltalam(w)
return jde0+s*0.00001/dl+DeltaT(year)
def caldat(JD):
Z=int(JD+0.5)
F=(JD+0.5)-Z
if Z<2299161:
A=Z
else:
alpha=int((Z-1867216.25)/36524.25)
A=Z+1+alpha-int(alpha/4)
B=A+1524
C=int((B-122.1)/365.25)
D=int(365.25*C)
E=int((B-D)/30.6001)
day=B-D-int(30.6001*E)
if E<14:
month=E-1
else:
month=E-13
if month>2:
year=C-4716
else:
year=C-4715
sod=F*86400
min=int(sod/60)
sec=sod-min*60
hour=int(min/60)
min=min-hour*60
return (year,month,day,hour,min,sec)
if __name__=="__main__":
with open("/home/jeppesen/Python seasons table.txt","w") as ouf:
print(" d h d h m d h m",file=ouf)
for year in range(1992,2021):
mar_equ=caldat(JDE(year,Mar))
jun_sol=caldat(JDE(year,Jun))
sep_equ=caldat(JDE(year,Sep))
dec_sol=caldat(JDE(year,Dec))
print("%04d %04d"%(year,year),file=ouf)
print("Perihelion Jan X XX Equinoxes Mar %02d %02d %02d %02d Sept %02d %02d %02d %02d"%(mar_equ[2],mar_equ[3],mar_equ[4],mar_equ[5],sep_equ[2],sep_equ[3],sep_equ[4],sep_equ[5]),file=ouf)
print("Aphelion July X XX Solstices June %02d %02d %02d %02d Dec %02d %02d %02d %02d"%(jun_sol[2],jun_sol[3],jun_sol[4],jun_sol[5],dec_sol[2],dec_sol[3],dec_sol[4],dec_sol[5]),file=ouf)
print(file=ouf)
#print(caldat(JDE(year,Mar)),caldat(JDE(year,Jun)),caldat(JDE(year,Sep)),caldat(JDE(year,Dec))) | [
"numpy.radians"
] | [((1496, 1513), 'numpy.radians', 'np.radians', (['(2 * W)'], {}), '(2 * W)\n', (1506, 1513), True, 'import numpy as np\n'), ((2509, 2530), 'numpy.radians', 'np.radians', (['(B + C * T)'], {}), '(B + C * T)\n', (2519, 2530), True, 'import numpy as np\n'), ((1467, 1480), 'numpy.radians', 'np.radians', (['W'], {}), '(W)\n', (1477, 1480), True, 'import numpy as np\n')] |
import os
import time
import numpy as np
import scipy as sp
import scipy.linalg
import matplotlib as mpl
import matplotlib.pyplot as plt
import pybie2d
from ipde.embedded_boundary_standalone import EmbeddedBoundary
from ipde.heavisides import SlepianMollifier
from ipde.derivatives import fd_x_4, fd_y_4, fourier
from ipde.solvers.single_boundary.interior.poisson import PoissonSolver
from qfs.two_d_qfs import QFS_Evaluator
from personal_utilities.arc_length_reparametrization import arc_length_parameterize
star = pybie2d.misc.curve_descriptions.star
squish = pybie2d.misc.curve_descriptions.squished_circle
GSB = pybie2d.boundaries.global_smooth_boundary.global_smooth_boundary.Global_Smooth_Boundary
Grid = pybie2d.grid.Grid
Laplace_Layer_Singular_Form = pybie2d.kernels.high_level.laplace.Laplace_Layer_Singular_Form
Laplace_Layer_Form = pybie2d.kernels.high_level.laplace.Laplace_Layer_Form
Laplace_Layer_Apply = pybie2d.kernels.high_level.laplace.Laplace_Layer_Apply
Singular_DLP = lambda src, _: Laplace_Layer_Singular_Form(src, ifdipole=True) - 0.5*np.eye(src.N)
Naive_SLP = lambda src, trg: Laplace_Layer_Form(src, trg, ifcharge=True)
nb = 200
M = 6
adj = 10
nb *= adj
M *= adj
M = int(np.floor(M))
M = max(4, min(20, M))
verbose = True
reparametrize = False
slepian_r = 1.5*M
solver_type = 'spectral' # fourth or spectral
# get heaviside function
MOL = SlepianMollifier(slepian_r)
# construct boundary
bdy = GSB(c=star(nb, a=0.2, f=5))
if reparametrize:
bdy = GSB(*arc_length_parameterize(bdy.x, bdy.y))
bh = bdy.dt*bdy.speed.min()
# get number of gridpoints to roughly match boundary spacing
ng = 2*int(0.5*2.4//bh)
# construct a grid
grid = Grid([-1.2, 1.2], ng, [-1.2, 1.2], ng, x_endpoints=[True, False], y_endpoints=[True, False])
################################################################################
# Get solution, forces, BCs
solution_func = lambda x, y: -np.cos(x)*np.exp(np.sin(x))*np.sin(y)
force_func = lambda x, y: (2.0*np.cos(x)+3.0*np.cos(x)*np.sin(x)-np.cos(x)**3)*np.exp(np.sin(x))*np.sin(y)
################################################################################
# Setup Poisson Solver
st = time.time()
ebdy = EmbeddedBoundary(bdy, True, M, bh*1, pad_zone=0, heaviside=MOL.step)
ebdy.register_grid(grid, verbose=verbose)
solver = PoissonSolver(ebdy, MOL.bump, bump_loc=(1.2-ebdy.radial_width, 1.2-ebdy.radial_width), solver_type=solver_type)
time_setup = time.time() - st
f = force_func(ebdy.grid.xg, ebdy.grid.yg)*ebdy.phys
fr = force_func(ebdy.radial_x, ebdy.radial_y)
ua = solution_func(ebdy.grid.xg, ebdy.grid.yg)*ebdy.phys
uar = solution_func(ebdy.radial_x, ebdy.radial_y)
bc = solution_func(ebdy.bdy.x, ebdy.bdy.y)
st = time.time()
ue, uer = solver(f, fr, tol=1e-12, verbose=verbose)
time_inhomogeneous_solve = time.time() - st
mue = np.ma.array(ue, mask=ebdy.ext)
st = time.time()
A = Laplace_Layer_Singular_Form(bdy, ifdipole=True) - 0.5*np.eye(bdy.N)
A_LU = sp.linalg.lu_factor(A)
time_homogeneous_form = time.time() - st
st = time.time()
bv = solver.get_bv(uer)
tau = sp.linalg.lu_solve(A_LU, bc-bv)
qfs = QFS_Evaluator(ebdy.bdy_qfs, True, [Singular_DLP,], Naive_SLP, on_surface=True, form_b2c=False)
sigma = qfs([tau,])
rslp = Laplace_Layer_Apply(ebdy.bdy_qfs.interior_source_bdy, solver.radp, charge=sigma)
gslp = Laplace_Layer_Apply(ebdy.bdy_qfs.interior_source_bdy, solver.gridpa, charge=sigma)
uer += rslp.reshape(uer.shape)
ue[ebdy.phys] += gslp
time_homogeneous_correction = time.time() - st
# compute the error
rerr = np.abs(uer - uar)
gerr = np.abs(ue - ua)
gerrp = gerr[ebdy.phys]
mgerr = np.ma.array(gerr, mask=ebdy.ext)
print('Error, maximum: {:0.4e}'.format(max(gerrp.max(), rerr.max())))
print('Time, setup: {:0.4f}'.format(time_setup*1000))
print('Time, inhomogeneous solve: {:0.4f}'.format(time_inhomogeneous_solve*1000))
print('Time, homogeneous form: {:0.4f}'.format(time_homogeneous_form*1000))
print('Time, homogeneous correction: {:0.4f}'.format(time_homogeneous_correction*1000))
print('Degrees of freedom: {:0.0f}'.format(solver.radp.N + solver.gridpa.N))
if False:
ns = 200*np.arange(1, 21)
uscale = 1.238
# this is for reparm...
errs_M1p5 = np.array([5.5635e-04, 7.2616e-05, 1.9321e-05, 2.5564e-07, 1.9425e-08, 1.0209e-09, 1.2751e-10, 2.3578e-11, 2.4486e-12, 2.2293e-13, 1.3101e-13, 2.5702e-14, 3.7081e-14, 3.5971e-14, 1.0042e-13, 1.1147e-13, 9.3620e-14, 4.2411e-14, 4.7296e-14, 9.9587e-14 ])/uscale
gmres_M1p5 = np.array([31, 18, 16, 17, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 16, 16, 15, 15, 15, 14 ])
errs_M2 = np.array([5.5635e-04, 7.2616e-05, 9.6542e-07, 2.3782e-08, 8.2043e-10, 2.5122e-11, 1.3433e-12, 1.3078e-13, 7.1609e-14, 1.0364e-13, 7.4385e-14, 8.3267e-14, 4.8975e-14, 3.5971e-14, 1.0042e-13, 1.1147e-13, 9.3620e-14, 4.2411e-14, 4.7296e-14, 9.9587e-14 ])/uscale
gmres_M2 = np.array([31, 18, 20, 20, 20, 20, 20, 20, 20, 20, 18, 18, 17, 17, 16, 16, 15, 15, 15, 14 ])
errs_M3 = np.array([5.5635e-04, 7.3761e-06, 1.0056e-07, 1.1997e-09, 1.8841e-11, 2.5424e-13, 9.3370e-14, 6.6558e-14, 4.8045e-14, 1.0364e-13, 7.4385e-14, 8.3267e-14, 4.8975e-14, 3.5971e-14, 1.0042e-13, 1.1147e-13, 9.3620e-14, 4.2411e-14, 4.7296e-14, 9.9587e-14 ])/uscale
gmres_M3 = np.array([31, 26, 30, 31, 31, 31, 29, 25, 23, 20, 18, 18, 17, 17, 16, 16, 15, 15, 15, 14 ])
# for not reparmed
_errs_M1p5 = np.array([1.7102e-04, 1.9008e-05, 4.6032e-06, 3.7857e-08, 5.7047e-09, 5.1529e-10, 1.0177e-10, 7.6548e-12, 3.5099e-12, 4.1889e-13, 3.6504e-13, 3.0553e-13, 2.8244e-13, 2.4847e-13, 2.6668e-13, 2.5580e-13, 2.4225e-13, 2.3270e-13, 1.9718e-13, 1.9051e-13 ])/uscale
_gmres_M1p5 = np.array([16, 14, 14, 14, 14, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10 ])
_errs_M2 = np.array([1.7102e-04, 1.9008e-05, 1.4241e-07, 3.1137e-09, 1.7697e-10, 1.0479e-11, 8.8130e-13, 4.2877e-13, 4.3010e-13, 3.8125e-13, 3.6504e-13, 3.1308e-13, 2.8622e-13, 2.4847e-13, 2.6668e-13, 2.5580e-13, 2.4225e-13, 2.3270e-13, 1.9718e-13, 1.9051e-13 ])/uscale
_gmres_M2 = np.array([16, 14, 14, 13, 13, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10 ])
_errs_M3 = np.array([1.7102e-04, 8.7080e-07, 3.8560e-09, 2.1663e-11, 8.4466e-13, 5.8087e-13, 6.0374e-13, 4.3054e-13, 4.3299e-13, 3.8125e-13, 3.6504e-13, 3.1308e-13, 2.8622e-13, 2.4847e-13, 2.6668e-13, 2.5580e-13, 2.4225e-13, 2.3270e-13, 1.9718e-13, 1.9051e-13 ])/uscale
_gmres_M3 = np.array([16, 14, 14, 14, 14, 14, 14, 13, 13, 12, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10 ])
_errs_M4 = np.array([1.7102e-04, 1.3314e-07, 2.3136e-10, 9.3747e-13, 8.5176e-13, 5.8442e-13, 6.0374e-13, 4.3054e-13, 4.3299e-13, 3.8125e-13, 3.6504e-13, 3.1308e-13, 2.8622e-13, 2.4847e-13, 2.6668e-13, 2.5580e-13, 2.4225e-13, 2.3270e-13, 1.9718e-13, 1.9051e-13 ])/uscale
_gmres_M4 = np.array([16, 16, 17, 17, 17, 14, 14, 13, 13, 12, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10 ])
# timings are for M2
time_setup = [154.9306, 161.8423, 231.6511, 279.5694, 365.5703, 441.3369, 567.2545, 675.1618, 776.0653, 988.2708, 1086.0043, 1109.0827, 1292.7971, 1514.4458, 1566.3459, 1731.6649, 2112.1962, 1997.4332, 2350.7431, 2493.5691 ]
time_solve = [54.4164, 61.3074, 130.9922, 165.1330, 278.5511, 328.9678, 463.7773, 569.0830, 677.9559, 847.0654, 891.0797, 1108.5746, 1211.3173, 1395.2336, 1724.3528, 1821.3904, 2059.0310, 2340.1973, 2725.8365, 3026.1867 ]
time_hform = [3.7887, 4.4360, 8.2417, 17.3187, 27.6716, 59.0572, 101.1767, 111.8031, 116.6139, 160.0151, 158.3679, 191.6876, 212.4312, 273.3963, 310.6759, 316.4508, 333.5750, 423.6121, 439.6663, 491.7672 ]
time_happ = [9.2492, 16.8111, 18.9607, 45.0113, 118.3691, 132.4646, 216.0077, 270.1962, 388.2639, 487.4279, 533.3376, 623.7583, 746.6176, 875.3183, 1029.5680, 1295.3801, 1378.9132, 1613.9953, 1811.7838, 2182.0726 ]
dof = [2937, 10153, 23278, 41176, 64142, 93065, 126337, 164660, 209371, 257995, 308865, 362634, 420616, 484843, 551565, 622557, 700186, 779826, 866465, 954829 ]
os.chdir('/Users/dstein/Documents/Writing/Spectrally Accurate Poisson/images/')
mpl.rc('text', usetex=True)
mpl.rcParams.update({'text.latex.preamble' : [r'\usepackage{amsmath}']})
mpl.rcParams.update({'font.size': 18})
bx = np.pad(bdy.x, (0,1), mode='wrap')
by = np.pad(bdy.y, (0,1), mode='wrap')
ix = np.pad(ebdy.interface.x, (0,1), mode='wrap')
iy = np.pad(ebdy.interface.y, (0,1), mode='wrap')
xbds = [bdy.x.min(), bdy.x.max()]
ybds = [bdy.y.min(), bdy.y.max()]
fig, ax = plt.subplots()
ax.plot(bx, by, color='black')
ax.plot(ix, iy, color='black', linestyle='--')
ax.fill(bx, by, color='pink', zorder=-15, alpha=0.9, edgecolor='none')
ax.fill(ix, iy, color='white', zorder=-10, edgecolor='none')
ax.fill(ix, iy, color='blue', zorder=-5, alpha=0.4, edgecolor='none')
ax.set(xlim=xbds, ylim=ybds)
ax.set(xticks=[], yticks=[], xticklabels=[], yticklabels=[])
ax.text(0.9,0.9,r'$\mathcal{C}$')
ax.text(0.51,0.51,r'$\mathcal{A}$')
ax.text(0.0,0.0,r'$\Omega_0$')
ax.text(-0.4,0.8,r'$\Gamma$')
ax.text(-0.4,0.48,r'$\overline{\Gamma}$')
ax.set_aspect('equal')
fig.tight_layout()
fig.savefig('domain_decomposition.pdf', format='pdf', bbox_inches='tight')
fig, ax = plt.subplots()
ax.plot(ns, errs_M1p5, color='blue', linewidth=2, marker='^', label=r'$\zeta=1.5$')
ax.plot(ns, errs_M2, color='black', linewidth=2, marker='o', label=r'$\zeta=2$')
ax.plot(ns, errs_M3, color='red', linewidth=2, marker='d', label=r'$\zeta=3$')
ax.set_yscale('log')
ax.set_xlabel(r'$n_\text{boundary}$')
ax.set_ylabel(r'$\|u\|_{L^\infty(\Omega)}$')
ax.axhline(1e-12, color='gray', linestyle='--')
fig.tight_layout()
plt.legend()
fig.savefig('poisson_refinement.pdf', format='pdf', bbox_inches='tight')
fig, ax = plt.subplots()
ax.plot(ns, _errs_M1p5, color='blue', linewidth=2, marker='^', label=r'$\zeta=1.5$')
ax.plot(ns, _errs_M2, color='black', linewidth=2, marker='o', label=r'$\zeta=2$')
ax.plot(ns, _errs_M3, color='red', linewidth=2, marker='d', label=r'$\zeta=3$')
ax.plot(ns, _errs_M4, color='orange', linewidth=2, marker='s', label=r'$\zeta=4$')
ax.set_yscale('log')
ax.set_xlabel(r'$n_\text{boundary}$')
ax.set_ylabel(r'$\|u\|_{L^\infty(\Omega)}$')
ax.axhline(1e-12, color='gray', linestyle='--')
fig.tight_layout()
plt.legend()
fig.savefig('_poisson_refinement.pdf', format='pdf', bbox_inches='tight')
fig, ax = plt.subplots()
ax.plot((dof + ns)/1000000, time_solve, color='black', linewidth=2, marker='^', label='Inhomogeneous solve')
ax.plot((dof + ns)/1000000, time_setup, color='blue', linewidth=2, marker='o', label='Inhomogeneous setup')
ax.plot((dof + ns)/1000000, time_hform, color='purple', linewidth=2, marker='d', label='Homogeneous setup')
ax.plot((dof + ns)/1000000, time_happ, color='red', linewidth=2, marker='s', label='Homogeneous solve')
ax.set_xlabel(r'$N_\text{dof}$ (millions)')
ax.set_ylabel(r'Time (ms)')
plt.legend()
fig.tight_layout()
fig.savefig('poisson_time.pdf', format='pdf', bbox_inches='tight')
fig, ax = plt.subplots()
ax.plot(ns, gmres_M1p5, color='blue', linewidth=2, marker='^', label=r'$\zeta=1.5$')
ax.plot(ns, gmres_M2, color='black', linewidth=2, marker='^', label=r'$\zeta=2$')
ax.plot(ns, gmres_M3, color='red', linewidth=2, marker='^', label=r'$\zeta=3$')
ax.set_xlabel(r'$n_\text{boundary}$')
ax.set_ylabel(r'GMRES Iteration Count')
plt.legend()
fig.tight_layout()
fig.savefig('poisson_gmres.pdf', format='pdf', bbox_inches='tight')
fig, ax = plt.subplots()
ax.pcolormesh(grid.xg, grid.yg, mue)
ax.plot(ebdy.bdy.x, ebdy.bdy.y, color='black', linewidth=3)
ax.plot(ebdy.interface.x, ebdy.interface.y, color='white', linewidth=3)
fig, ax = plt.subplots()
clf = ax.imshow(mgerr.T[::-1]+1e-16, extent=grid.x_bounds+grid.y_bounds, vmin=1e-15, norm=mpl.colors.LogNorm())
# clf = ax.pcolormesh(grid.xg, grid.yg, mgerr+1e-16, vmin=1e-11, norm=mpl.colors.LogNorm())
ax.plot(ebdy.bdy.x, ebdy.bdy.y, color='black', linewidth=3)
ax.plot(ebdy.interface.x, ebdy.interface.y, color='white', linewidth=3)
plt.colorbar(clf)
ax.set_aspect('equal')
ax.set(xticks=[], yticks=[], xticklabels=[], yticklabels=[])
ax.set(xlim=[-1.1,1.3], ylim=[-1.2,1.2])
fig.tight_layout()
fig.savefig('poisson_error.pdf', format='pdf', bbox_inches='tight')
| [
"matplotlib.rc",
"numpy.abs",
"numpy.floor",
"personal_utilities.arc_length_reparametrization.arc_length_parameterize",
"numpy.sin",
"numpy.arange",
"matplotlib.colors.LogNorm",
"os.chdir",
"ipde.solvers.single_boundary.interior.poisson.PoissonSolver",
"ipde.heavisides.SlepianMollifier",
"numpy.... | [((1368, 1395), 'ipde.heavisides.SlepianMollifier', 'SlepianMollifier', (['slepian_r'], {}), '(slepian_r)\n', (1384, 1395), False, 'from ipde.heavisides import SlepianMollifier\n'), ((2149, 2160), 'time.time', 'time.time', ([], {}), '()\n', (2158, 2160), False, 'import time\n'), ((2168, 2238), 'ipde.embedded_boundary_standalone.EmbeddedBoundary', 'EmbeddedBoundary', (['bdy', '(True)', 'M', '(bh * 1)'], {'pad_zone': '(0)', 'heaviside': 'MOL.step'}), '(bdy, True, M, bh * 1, pad_zone=0, heaviside=MOL.step)\n', (2184, 2238), False, 'from ipde.embedded_boundary_standalone import EmbeddedBoundary\n'), ((2288, 2408), 'ipde.solvers.single_boundary.interior.poisson.PoissonSolver', 'PoissonSolver', (['ebdy', 'MOL.bump'], {'bump_loc': '(1.2 - ebdy.radial_width, 1.2 - ebdy.radial_width)', 'solver_type': 'solver_type'}), '(ebdy, MOL.bump, bump_loc=(1.2 - ebdy.radial_width, 1.2 - ebdy\n .radial_width), solver_type=solver_type)\n', (2301, 2408), False, 'from ipde.solvers.single_boundary.interior.poisson import PoissonSolver\n'), ((2686, 2697), 'time.time', 'time.time', ([], {}), '()\n', (2695, 2697), False, 'import time\n'), ((2801, 2831), 'numpy.ma.array', 'np.ma.array', (['ue'], {'mask': 'ebdy.ext'}), '(ue, mask=ebdy.ext)\n', (2812, 2831), True, 'import numpy as np\n'), ((2838, 2849), 'time.time', 'time.time', ([], {}), '()\n', (2847, 2849), False, 'import time\n'), ((2929, 2951), 'scipy.linalg.lu_factor', 'sp.linalg.lu_factor', (['A'], {}), '(A)\n', (2948, 2951), True, 'import scipy as sp\n'), ((2999, 3010), 'time.time', 'time.time', ([], {}), '()\n', (3008, 3010), False, 'import time\n'), ((3041, 3074), 'scipy.linalg.lu_solve', 'sp.linalg.lu_solve', (['A_LU', '(bc - bv)'], {}), '(A_LU, bc - bv)\n', (3059, 3074), True, 'import scipy as sp\n'), ((3079, 3177), 'qfs.two_d_qfs.QFS_Evaluator', 'QFS_Evaluator', (['ebdy.bdy_qfs', '(True)', '[Singular_DLP]', 'Naive_SLP'], {'on_surface': '(True)', 'form_b2c': '(False)'}), '(ebdy.bdy_qfs, True, [Singular_DLP], Naive_SLP, on_surface=\n True, form_b2c=False)\n', (3092, 3177), False, 'from qfs.two_d_qfs import QFS_Evaluator\n'), ((3500, 3517), 'numpy.abs', 'np.abs', (['(uer - uar)'], {}), '(uer - uar)\n', (3506, 3517), True, 'import numpy as np\n'), ((3525, 3540), 'numpy.abs', 'np.abs', (['(ue - ua)'], {}), '(ue - ua)\n', (3531, 3540), True, 'import numpy as np\n'), ((3573, 3605), 'numpy.ma.array', 'np.ma.array', (['gerr'], {'mask': 'ebdy.ext'}), '(gerr, mask=ebdy.ext)\n', (3584, 3605), True, 'import numpy as np\n'), ((1198, 1209), 'numpy.floor', 'np.floor', (['M'], {}), '(M)\n', (1206, 1209), True, 'import numpy as np\n'), ((2413, 2424), 'time.time', 'time.time', ([], {}), '()\n', (2422, 2424), False, 'import time\n'), ((2777, 2788), 'time.time', 'time.time', ([], {}), '()\n', (2786, 2788), False, 'import time\n'), ((2976, 2987), 'time.time', 'time.time', ([], {}), '()\n', (2985, 2987), False, 'import time\n'), ((3455, 3466), 'time.time', 'time.time', ([], {}), '()\n', (3464, 3466), False, 'import time\n'), ((4472, 4567), 'numpy.array', 'np.array', (['[31, 18, 16, 17, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 16, 16, 15, 15, 15, 14\n ]'], {}), '([31, 18, 16, 17, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 16, 16, \n 15, 15, 15, 14])\n', (4480, 4567), True, 'import numpy as np\n'), ((5011, 5106), 'numpy.array', 'np.array', (['[31, 18, 20, 20, 20, 20, 20, 20, 20, 20, 18, 18, 17, 17, 16, 16, 15, 15, 15, 14\n ]'], {}), '([31, 18, 20, 20, 20, 20, 20, 20, 20, 20, 18, 18, 17, 17, 16, 16, \n 15, 15, 15, 14])\n', (5019, 5106), True, 'import numpy as np\n'), ((5550, 5645), 'numpy.array', 'np.array', (['[31, 26, 30, 31, 31, 31, 29, 25, 23, 20, 18, 18, 17, 17, 16, 16, 15, 15, 15, 14\n ]'], {}), '([31, 26, 30, 31, 31, 31, 29, 25, 23, 20, 18, 18, 17, 17, 16, 16, \n 15, 15, 15, 14])\n', (5558, 5645), True, 'import numpy as np\n'), ((6111, 6206), 'numpy.array', 'np.array', (['[16, 14, 14, 14, 14, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10\n ]'], {}), '([16, 14, 14, 14, 14, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, \n 10, 10, 10, 10])\n', (6119, 6206), True, 'import numpy as np\n'), ((6652, 6747), 'numpy.array', 'np.array', (['[16, 14, 14, 13, 13, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10\n ]'], {}), '([16, 14, 14, 13, 13, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, \n 10, 10, 10, 10])\n', (6660, 6747), True, 'import numpy as np\n'), ((7193, 7288), 'numpy.array', 'np.array', (['[16, 14, 14, 14, 14, 14, 14, 13, 13, 12, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10\n ]'], {}), '([16, 14, 14, 14, 14, 14, 14, 13, 13, 12, 12, 12, 11, 11, 11, 11, \n 10, 10, 10, 10])\n', (7201, 7288), True, 'import numpy as np\n'), ((7734, 7829), 'numpy.array', 'np.array', (['[16, 16, 17, 17, 17, 14, 14, 13, 13, 12, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10\n ]'], {}), '([16, 16, 17, 17, 17, 14, 14, 13, 13, 12, 12, 12, 11, 11, 11, 11, \n 10, 10, 10, 10])\n', (7742, 7829), True, 'import numpy as np\n'), ((9290, 9369), 'os.chdir', 'os.chdir', (['"""/Users/dstein/Documents/Writing/Spectrally Accurate Poisson/images/"""'], {}), "('/Users/dstein/Documents/Writing/Spectrally Accurate Poisson/images/')\n", (9298, 9369), False, 'import os\n'), ((9372, 9399), 'matplotlib.rc', 'mpl.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (9378, 9399), True, 'import matplotlib as mpl\n'), ((9401, 9472), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (["{'text.latex.preamble': ['\\\\usepackage{amsmath}']}"], {}), "({'text.latex.preamble': ['\\\\usepackage{amsmath}']})\n", (9420, 9472), True, 'import matplotlib as mpl\n'), ((9475, 9513), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (["{'font.size': 18}"], {}), "({'font.size': 18})\n", (9494, 9513), True, 'import matplotlib as mpl\n'), ((9521, 9555), 'numpy.pad', 'np.pad', (['bdy.x', '(0, 1)'], {'mode': '"""wrap"""'}), "(bdy.x, (0, 1), mode='wrap')\n", (9527, 9555), True, 'import numpy as np\n'), ((9561, 9595), 'numpy.pad', 'np.pad', (['bdy.y', '(0, 1)'], {'mode': '"""wrap"""'}), "(bdy.y, (0, 1), mode='wrap')\n", (9567, 9595), True, 'import numpy as np\n'), ((9601, 9646), 'numpy.pad', 'np.pad', (['ebdy.interface.x', '(0, 1)'], {'mode': '"""wrap"""'}), "(ebdy.interface.x, (0, 1), mode='wrap')\n", (9607, 9646), True, 'import numpy as np\n'), ((9652, 9697), 'numpy.pad', 'np.pad', (['ebdy.interface.y', '(0, 1)'], {'mode': '"""wrap"""'}), "(ebdy.interface.y, (0, 1), mode='wrap')\n", (9658, 9697), True, 'import numpy as np\n'), ((9778, 9792), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9790, 9792), True, 'import matplotlib.pyplot as plt\n'), ((10480, 10494), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10492, 10494), True, 'import matplotlib.pyplot as plt\n'), ((10919, 10931), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10929, 10931), True, 'import matplotlib.pyplot as plt\n'), ((11018, 11032), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11030, 11032), True, 'import matplotlib.pyplot as plt\n'), ((11544, 11556), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11554, 11556), True, 'import matplotlib.pyplot as plt\n'), ((11644, 11658), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11656, 11658), True, 'import matplotlib.pyplot as plt\n'), ((12174, 12186), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12184, 12186), True, 'import matplotlib.pyplot as plt\n'), ((12287, 12301), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12299, 12301), True, 'import matplotlib.pyplot as plt\n'), ((12640, 12652), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12650, 12652), True, 'import matplotlib.pyplot as plt\n'), ((12754, 12768), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12766, 12768), True, 'import matplotlib.pyplot as plt\n'), ((12953, 12967), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12965, 12967), True, 'import matplotlib.pyplot as plt\n'), ((13309, 13326), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['clf'], {}), '(clf)\n', (13321, 13326), True, 'import matplotlib.pyplot as plt\n'), ((1921, 1930), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (1927, 1930), True, 'import numpy as np\n'), ((2028, 2037), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (2034, 2037), True, 'import numpy as np\n'), ((2908, 2921), 'numpy.eye', 'np.eye', (['bdy.N'], {}), '(bdy.N)\n', (2914, 2921), True, 'import numpy as np\n'), ((4127, 4143), 'numpy.arange', 'np.arange', (['(1)', '(21)'], {}), '(1, 21)\n', (4136, 4143), True, 'import numpy as np\n'), ((4199, 4462), 'numpy.array', 'np.array', (['[0.00055635, 7.2616e-05, 1.9321e-05, 2.5564e-07, 1.9425e-08, 1.0209e-09, \n 1.2751e-10, 2.3578e-11, 2.4486e-12, 2.2293e-13, 1.3101e-13, 2.5702e-14,\n 3.7081e-14, 3.5971e-14, 1.0042e-13, 1.1147e-13, 9.362e-14, 4.2411e-14, \n 4.7296e-14, 9.9587e-14]'], {}), '([0.00055635, 7.2616e-05, 1.9321e-05, 2.5564e-07, 1.9425e-08, \n 1.0209e-09, 1.2751e-10, 2.3578e-11, 2.4486e-12, 2.2293e-13, 1.3101e-13,\n 2.5702e-14, 3.7081e-14, 3.5971e-14, 1.0042e-13, 1.1147e-13, 9.362e-14, \n 4.2411e-14, 4.7296e-14, 9.9587e-14])\n', (4207, 4462), True, 'import numpy as np\n'), ((4738, 5001), 'numpy.array', 'np.array', (['[0.00055635, 7.2616e-05, 9.6542e-07, 2.3782e-08, 8.2043e-10, 2.5122e-11, \n 1.3433e-12, 1.3078e-13, 7.1609e-14, 1.0364e-13, 7.4385e-14, 8.3267e-14,\n 4.8975e-14, 3.5971e-14, 1.0042e-13, 1.1147e-13, 9.362e-14, 4.2411e-14, \n 4.7296e-14, 9.9587e-14]'], {}), '([0.00055635, 7.2616e-05, 9.6542e-07, 2.3782e-08, 8.2043e-10, \n 2.5122e-11, 1.3433e-12, 1.3078e-13, 7.1609e-14, 1.0364e-13, 7.4385e-14,\n 8.3267e-14, 4.8975e-14, 3.5971e-14, 1.0042e-13, 1.1147e-13, 9.362e-14, \n 4.2411e-14, 4.7296e-14, 9.9587e-14])\n', (4746, 5001), True, 'import numpy as np\n'), ((5277, 5540), 'numpy.array', 'np.array', (['[0.00055635, 7.3761e-06, 1.0056e-07, 1.1997e-09, 1.8841e-11, 2.5424e-13, \n 9.337e-14, 6.6558e-14, 4.8045e-14, 1.0364e-13, 7.4385e-14, 8.3267e-14, \n 4.8975e-14, 3.5971e-14, 1.0042e-13, 1.1147e-13, 9.362e-14, 4.2411e-14, \n 4.7296e-14, 9.9587e-14]'], {}), '([0.00055635, 7.3761e-06, 1.0056e-07, 1.1997e-09, 1.8841e-11, \n 2.5424e-13, 9.337e-14, 6.6558e-14, 4.8045e-14, 1.0364e-13, 7.4385e-14, \n 8.3267e-14, 4.8975e-14, 3.5971e-14, 1.0042e-13, 1.1147e-13, 9.362e-14, \n 4.2411e-14, 4.7296e-14, 9.9587e-14])\n', (5285, 5540), True, 'import numpy as np\n'), ((5837, 6099), 'numpy.array', 'np.array', (['[0.00017102, 1.9008e-05, 4.6032e-06, 3.7857e-08, 5.7047e-09, 5.1529e-10, \n 1.0177e-10, 7.6548e-12, 3.5099e-12, 4.1889e-13, 3.6504e-13, 3.0553e-13,\n 2.8244e-13, 2.4847e-13, 2.6668e-13, 2.558e-13, 2.4225e-13, 2.327e-13, \n 1.9718e-13, 1.9051e-13]'], {}), '([0.00017102, 1.9008e-05, 4.6032e-06, 3.7857e-08, 5.7047e-09, \n 5.1529e-10, 1.0177e-10, 7.6548e-12, 3.5099e-12, 4.1889e-13, 3.6504e-13,\n 3.0553e-13, 2.8244e-13, 2.4847e-13, 2.6668e-13, 2.558e-13, 2.4225e-13, \n 2.327e-13, 1.9718e-13, 1.9051e-13])\n', (5845, 6099), True, 'import numpy as np\n'), ((6378, 6639), 'numpy.array', 'np.array', (['[0.00017102, 1.9008e-05, 1.4241e-07, 3.1137e-09, 1.7697e-10, 1.0479e-11, \n 8.813e-13, 4.2877e-13, 4.301e-13, 3.8125e-13, 3.6504e-13, 3.1308e-13, \n 2.8622e-13, 2.4847e-13, 2.6668e-13, 2.558e-13, 2.4225e-13, 2.327e-13, \n 1.9718e-13, 1.9051e-13]'], {}), '([0.00017102, 1.9008e-05, 1.4241e-07, 3.1137e-09, 1.7697e-10, \n 1.0479e-11, 8.813e-13, 4.2877e-13, 4.301e-13, 3.8125e-13, 3.6504e-13, \n 3.1308e-13, 2.8622e-13, 2.4847e-13, 2.6668e-13, 2.558e-13, 2.4225e-13, \n 2.327e-13, 1.9718e-13, 1.9051e-13])\n', (6386, 6639), True, 'import numpy as np\n'), ((6919, 7179), 'numpy.array', 'np.array', (['[0.00017102, 8.708e-07, 3.856e-09, 2.1663e-11, 8.4466e-13, 5.8087e-13, \n 6.0374e-13, 4.3054e-13, 4.3299e-13, 3.8125e-13, 3.6504e-13, 3.1308e-13,\n 2.8622e-13, 2.4847e-13, 2.6668e-13, 2.558e-13, 2.4225e-13, 2.327e-13, \n 1.9718e-13, 1.9051e-13]'], {}), '([0.00017102, 8.708e-07, 3.856e-09, 2.1663e-11, 8.4466e-13, \n 5.8087e-13, 6.0374e-13, 4.3054e-13, 4.3299e-13, 3.8125e-13, 3.6504e-13,\n 3.1308e-13, 2.8622e-13, 2.4847e-13, 2.6668e-13, 2.558e-13, 2.4225e-13, \n 2.327e-13, 1.9718e-13, 1.9051e-13])\n', (6927, 7179), True, 'import numpy as np\n'), ((7460, 7722), 'numpy.array', 'np.array', (['[0.00017102, 1.3314e-07, 2.3136e-10, 9.3747e-13, 8.5176e-13, 5.8442e-13, \n 6.0374e-13, 4.3054e-13, 4.3299e-13, 3.8125e-13, 3.6504e-13, 3.1308e-13,\n 2.8622e-13, 2.4847e-13, 2.6668e-13, 2.558e-13, 2.4225e-13, 2.327e-13, \n 1.9718e-13, 1.9051e-13]'], {}), '([0.00017102, 1.3314e-07, 2.3136e-10, 9.3747e-13, 8.5176e-13, \n 5.8442e-13, 6.0374e-13, 4.3054e-13, 4.3299e-13, 3.8125e-13, 3.6504e-13,\n 3.1308e-13, 2.8622e-13, 2.4847e-13, 2.6668e-13, 2.558e-13, 2.4225e-13, \n 2.327e-13, 1.9718e-13, 1.9051e-13])\n', (7468, 7722), True, 'import numpy as np\n'), ((1058, 1071), 'numpy.eye', 'np.eye', (['src.N'], {}), '(src.N)\n', (1064, 1071), True, 'import numpy as np\n'), ((1481, 1518), 'personal_utilities.arc_length_reparametrization.arc_length_parameterize', 'arc_length_parameterize', (['bdy.x', 'bdy.y'], {}), '(bdy.x, bdy.y)\n', (1504, 1518), False, 'from personal_utilities.arc_length_reparametrization import arc_length_parameterize\n'), ((13059, 13079), 'matplotlib.colors.LogNorm', 'mpl.colors.LogNorm', ([], {}), '()\n', (13077, 13079), True, 'import matplotlib as mpl\n'), ((1893, 1902), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (1899, 1902), True, 'import numpy as np\n'), ((1910, 1919), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1916, 1919), True, 'import numpy as np\n'), ((2017, 2026), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (2023, 2026), True, 'import numpy as np\n'), ((1996, 2005), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (2002, 2005), True, 'import numpy as np\n'), ((1962, 1971), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (1968, 1971), True, 'import numpy as np\n'), ((1986, 1995), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1992, 1995), True, 'import numpy as np\n'), ((1976, 1985), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (1982, 1985), True, 'import numpy as np\n')] |
import argparse
import json
import multiprocessing
import os
import pickle
import random
from multiprocessing import Pool, cpu_count
import nltk
import numpy as np
import torch
from tqdm import tqdm
from src.data_preprocessors.transformations import (
NoTransformation, SemanticPreservingTransformation,
BlockSwap, ConfusionRemover, DeadCodeInserter,
ForWhileTransformer, OperandSwap, VarRenamer
)
def set_seeds(seed):
torch.manual_seed(seed)
torch.random.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.cuda.manual_seed(seed)
def create_transformers_from_conf_file(processing_conf):
classes = [BlockSwap, ConfusionRemover, DeadCodeInserter, ForWhileTransformer, OperandSwap, VarRenamer]
transformers = {
c: processing_conf[c.__name__] for c in classes
}
return transformers
class ExampleProcessor:
def __init__(
self,
language,
parser_path,
transformation_config,
bidirection_transformation=False,
max_function_length=400
):
self.language = language
self.parser_path = parser_path
self.transformation_config = transformation_config
self.max_function_length = max_function_length
self.bidirection_transformation = bidirection_transformation
def initialize(self):
global example_tokenizer
global example_transformer
transformers = create_transformers_from_conf_file(self.transformation_config)
if self.language == "nl":
example_tokenizer = nltk.word_tokenize
else:
example_tokenizer = NoTransformation(self.parser_path, self.language)
example_transformer = SemanticPreservingTransformation(
parser_path=self.parser_path, language=self.language, transform_functions=transformers
)
def process_example(self, code):
global example_tokenizer
global example_transformer
try:
if self.language == "nl":
original_code = " ".join(example_tokenizer(code))
else:
original_code, _ = example_tokenizer.transform_code(code)
if len(original_code.split()) > self.max_function_length:
return -1
transformed_code, used_transformer = example_transformer.transform_code(code)
if used_transformer:
if used_transformer == "ConfusionRemover": # Change the direction in case of the ConfusionRemover
temp = original_code
original_code = transformed_code
transformed_code = temp
if isinstance(self.bidirection_transformation, str) and self.bidirection_transformation == 'adaptive':
bidirection = (used_transformer in ["BlockSwap", "ForWhileTransformer", "OperandSwap"])
else:
assert isinstance(self.bidirection_transformation, bool)
bidirection = self.bidirection_transformation
if bidirection and np.random.uniform() < 0.5 \
and used_transformer != "SyntacticNoisingTransformation":
return {
'source': original_code,
'target': transformed_code,
'transformer': used_transformer
}
else:
return {
'source': transformed_code,
'target': original_code,
'transformer': used_transformer
}
else:
return -1
except KeyboardInterrupt:
print("Stopping parsing for ", code)
return -1
except:
return -1
def process_functions(
pool, example_processor, functions,
train_file_path=None, valid_file_path=None, valid_percentage=0.002
):
used_transformers = {}
success = 0
tf = open(train_file_path, "wt") if train_file_path is not None else None
vf = open(valid_file_path, "wt") if train_file_path is not None else None
with tqdm(total=len(functions)) as pbar:
processed_example_iterator = pool.imap(
func=example_processor.process_example,
iterable=functions,
chunksize=1000,
)
count = 0
while True:
pbar.update()
count += 1
try:
out = next(processed_example_iterator)
if isinstance(out, int) and out == -1:
continue
if out["transformer"] not in used_transformers.keys():
used_transformers[out["transformer"]] = 0
used_transformers[out["transformer"]] += 1
if np.random.uniform() < valid_percentage:
if vf is not None:
vf.write(json.dumps(out) + "\n")
vf.flush()
else:
if tf is not None:
tf.write(json.dumps(out) + "\n")
tf.flush()
success += 1
except multiprocessing.TimeoutError:
print(f"{count} encountered timeout")
except StopIteration:
print(f"{count} stop iteration")
break
if tf is not None:
tf.close()
if vf is not None:
vf.close()
print(
f"""
Total : {len(functions)},
Success : {success},
Failure : {len(functions) - success}
Stats : {json.dumps(used_transformers, indent=4)}
"""
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--langs', default=["java"], nargs='+',
help="Languages to be processed"
)
parser.add_argument(
'--input_dir', help="Directory of the language pickle files"
)
parser.add_argument(
'--output_dir', help="Directory for saving processed code"
)
parser.add_argument(
'--processing_config_file', default="configs/data_processing_config.json",
help="Configuration file for data processing."
)
parser.add_argument(
'--parser_path', help="Tree-Sitter Parser Path",
)
parser.add_argument(
"--workers", help="Number of worker CPU", type=int, default=20
)
parser.add_argument(
"--timeout", type=int, help="Maximum number of seconds for a function to process.", default=10
)
parser.add_argument(
"--valid_percentage", type=float, help="Percentage of validation examples", default=0.001
)
parser.add_argument("--seed", type=int, default=5000)
args = parser.parse_args()
set_seeds(args.seed)
out_dir = args.output_dir
os.makedirs(out_dir, exist_ok=True)
configuration = json.load(open(args.processing_config_file))
print(configuration)
for lang in args.langs:
print(f"Now processing : {lang}")
pkl_file = os.path.join(args.input_dir, lang + ".pkl")
data = pickle.load(open(pkl_file, "rb"))
functions = [ex['function'] for ex in data]
# for f in functions[:5]:
# print(f)
# print("=" * 100)
if lang == "php":
functions = ["<?php\n" + f + "\n?>" for f in functions]
example_processor = ExampleProcessor(
language=lang,
parser_path=args.parser_path,
transformation_config=configuration["transformers"],
max_function_length=(
configuration["max_function_length"] if "max_function_length" in configuration else 400
),
bidirection_transformation=(
configuration["bidirection_transformation"] if "bidirection_transformation" in configuration else False
)
)
pool = Pool(
processes=min(cpu_count(), args.workers),
initializer=example_processor.initialize
)
process_functions(
pool=pool,
example_processor=example_processor,
functions=functions,
train_file_path=os.path.join(out_dir, f"{lang}_train.jsonl"),
valid_file_path=os.path.join(out_dir, f"{lang}_valid.jsonl"),
valid_percentage=args.valid_percentage
)
del pool
del example_processor
| [
"numpy.random.uniform",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.random.manual_seed",
"os.makedirs",
"torch.manual_seed",
"src.data_preprocessors.transformations.SemanticPreservingTransformation",
"torch.cuda.manual_seed",
"src.data_preprocessors.transformations.NoTransformation",
"j... | [((439, 462), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (456, 462), False, 'import torch\n'), ((467, 497), 'torch.random.manual_seed', 'torch.random.manual_seed', (['seed'], {}), '(seed)\n', (491, 497), False, 'import torch\n'), ((502, 522), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (516, 522), True, 'import numpy as np\n'), ((527, 544), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (538, 544), False, 'import random\n'), ((549, 577), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (571, 577), False, 'import torch\n'), ((5759, 5784), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5782, 5784), False, 'import argparse\n'), ((6875, 6910), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (6886, 6910), False, 'import os\n'), ((1732, 1857), 'src.data_preprocessors.transformations.SemanticPreservingTransformation', 'SemanticPreservingTransformation', ([], {'parser_path': 'self.parser_path', 'language': 'self.language', 'transform_functions': 'transformers'}), '(parser_path=self.parser_path, language=\n self.language, transform_functions=transformers)\n', (1764, 1857), False, 'from src.data_preprocessors.transformations import NoTransformation, SemanticPreservingTransformation, BlockSwap, ConfusionRemover, DeadCodeInserter, ForWhileTransformer, OperandSwap, VarRenamer\n'), ((7090, 7133), 'os.path.join', 'os.path.join', (['args.input_dir', "(lang + '.pkl')"], {}), "(args.input_dir, lang + '.pkl')\n", (7102, 7133), False, 'import os\n'), ((1652, 1701), 'src.data_preprocessors.transformations.NoTransformation', 'NoTransformation', (['self.parser_path', 'self.language'], {}), '(self.parser_path, self.language)\n', (1668, 1701), False, 'from src.data_preprocessors.transformations import NoTransformation, SemanticPreservingTransformation, BlockSwap, ConfusionRemover, DeadCodeInserter, ForWhileTransformer, OperandSwap, VarRenamer\n'), ((5663, 5702), 'json.dumps', 'json.dumps', (['used_transformers'], {'indent': '(4)'}), '(used_transformers, indent=4)\n', (5673, 5702), False, 'import json\n'), ((8233, 8277), 'os.path.join', 'os.path.join', (['out_dir', 'f"""{lang}_train.jsonl"""'], {}), "(out_dir, f'{lang}_train.jsonl')\n", (8245, 8277), False, 'import os\n'), ((8307, 8351), 'os.path.join', 'os.path.join', (['out_dir', 'f"""{lang}_valid.jsonl"""'], {}), "(out_dir, f'{lang}_valid.jsonl')\n", (8319, 8351), False, 'import os\n'), ((4839, 4858), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4856, 4858), True, 'import numpy as np\n'), ((7982, 7993), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (7991, 7993), False, 'from multiprocessing import Pool, cpu_count\n'), ((3089, 3108), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (3106, 3108), True, 'import numpy as np\n'), ((4951, 4966), 'json.dumps', 'json.dumps', (['out'], {}), '(out)\n', (4961, 4966), False, 'import json\n'), ((5104, 5119), 'json.dumps', 'json.dumps', (['out'], {}), '(out)\n', (5114, 5119), False, 'import json\n')] |
from numpy import unique
from numpy import where
from sklearn.datasets import make_classification
from sklearn.cluster import Birch
from matplotlib import pyplot
#Synthetic dataset definition
X, _ = make_classification(n_samples=1800, n_features=2, n_informative=2, n_redundant=0, n_clusters_per_class=1, random_state=4)
#Define the BIRCH model
model = Birch(threshold=0.01, n_clusters=2)
model.fit(X)
yhat = model.predict(X)
#Clusters
clusters = unique(yhat)
#Display
for cluster in clusters:
row_ix = where(yhat == cluster)
pyplot.scatter(X[row_ix, 0], X[row_ix, 1])
pyplot.show()
| [
"matplotlib.pyplot.show",
"sklearn.cluster.Birch",
"matplotlib.pyplot.scatter",
"sklearn.datasets.make_classification",
"numpy.where",
"numpy.unique"
] | [((199, 324), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(1800)', 'n_features': '(2)', 'n_informative': '(2)', 'n_redundant': '(0)', 'n_clusters_per_class': '(1)', 'random_state': '(4)'}), '(n_samples=1800, n_features=2, n_informative=2,\n n_redundant=0, n_clusters_per_class=1, random_state=4)\n', (218, 324), False, 'from sklearn.datasets import make_classification\n'), ((353, 388), 'sklearn.cluster.Birch', 'Birch', ([], {'threshold': '(0.01)', 'n_clusters': '(2)'}), '(threshold=0.01, n_clusters=2)\n', (358, 388), False, 'from sklearn.cluster import Birch\n'), ((447, 459), 'numpy.unique', 'unique', (['yhat'], {}), '(yhat)\n', (453, 459), False, 'from numpy import unique\n'), ((571, 584), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (582, 584), False, 'from matplotlib import pyplot\n'), ((504, 526), 'numpy.where', 'where', (['(yhat == cluster)'], {}), '(yhat == cluster)\n', (509, 526), False, 'from numpy import where\n'), ((528, 570), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['X[row_ix, 0]', 'X[row_ix, 1]'], {}), '(X[row_ix, 0], X[row_ix, 1])\n', (542, 570), False, 'from matplotlib import pyplot\n')] |
from scipy.io import loadmat
import tensorflow as tf
import numpy as np
from PIL import Image
from sklearn.preprocessing import LabelEncoder
import os
def load_mnist():
mnist = tf.keras.datasets.mnist
(mnisTr_X, mnisTr_Y), (mnisTe_X, mnisTe_Y) = mnist.load_data()
mnisTr_X, mnisTe_X = mnisTr_X / 255.0, mnisTe_X / 255.0
trX = mnisTr_X.reshape(-1, 28, 28, 1)
trY = mnisTr_Y
teX = mnisTe_X.reshape(-1, 28, 28, 1)
teY = mnisTe_Y
return trX, trY, teX, teY
def load_svhn():
images_tr = loadmat('data/1_raw/SVHN/train_32x32.mat')
images_te = loadmat('data/1_raw/SVHN/test_32x32.mat')
trX = np.moveaxis(images_tr['X'], -1, 0)/255.0
trY = np.squeeze(images_tr['y'])
teX = np.moveaxis(images_te['X'], -1, 0)/255.0
teY = np.squeeze(images_te['y'])
return trX, trY, teX, teY
def load_cifar10():
(ci10Tr_X, ci10Tr_Y), (ci10Te_X, ci10Te_Y) = tf.keras.datasets.cifar10.load_data()
trX = ci10Tr_X/255.0
trY = np.squeeze(ci10Tr_Y)
teX = ci10Te_X/255.0
teY = np.squeeze(ci10Te_Y)
return trX, trY, teX, teY
def load_cifar100():
(ci100Tr_X, ci100Tr_Y), (ci100Te_X, ci100Te_Y) = tf.keras.datasets.cifar100.load_data()
trX = ci100Tr_X/255.0
trY = np.squeeze(ci100Tr_Y)
teX = ci100Te_X/255.0
teY = np.squeeze(ci100Te_Y)
return trX, trY, teX, teY
def load_tinyImgNet():
img_path = './data/1_raw/tinyImgNet/tiny-imagenet-200/val/images/'
lab_path = './data/1_raw/tinyImgNet/tiny-imagenet-200/val/val_annotations.txt'
with open(lab_path) as f:
lab_list = list(f)
file_list, label_list = [], []
for line in lab_list:
file_name, file_lab = line.split('\t')[:2]
file_list.append(file_name)
label_list.append(file_lab)
img_list = []
for img_name in file_list:
img = img_path + img_name
image = Image.open(img)
image = image.convert(mode='RGB')
image = image.resize((32, 32))
img_np = np.asarray(image)
img_list.append(img_np)
teX = np.array(img_list)
le = LabelEncoder()
teY = le.fit_transform(label_list)
img_tr_path = './data/1_raw/tinyImgNet/tiny-imagenet-200/train/'
img_tr_path_list = []
img_tr_name_list = []
img_list = []
lab_list = []
for path, subdirs, files in os.walk(img_tr_path):
for name in files:
file_path = os.path.join(path, name)
if '.JPEG' in name:
img_tr_path_list.append(file_path)
img_tr_name_list.append(name)
image = Image.open(file_path)
image = image.convert(mode='RGB')
image = image.resize((32, 32))
img_np = np.asarray(image)
img_list.append(img_np)
label, _ = name.split('_')
lab_list.append(label)
trX = np.array(img_list)
trY = le.transform(lab_list)
return trX, trY, teX, teY | [
"numpy.moveaxis",
"scipy.io.loadmat",
"numpy.asarray",
"tensorflow.keras.datasets.cifar100.load_data",
"os.walk",
"sklearn.preprocessing.LabelEncoder",
"PIL.Image.open",
"tensorflow.keras.datasets.cifar10.load_data",
"numpy.array",
"numpy.squeeze",
"os.path.join"
] | [((520, 562), 'scipy.io.loadmat', 'loadmat', (['"""data/1_raw/SVHN/train_32x32.mat"""'], {}), "('data/1_raw/SVHN/train_32x32.mat')\n", (527, 562), False, 'from scipy.io import loadmat\n'), ((579, 620), 'scipy.io.loadmat', 'loadmat', (['"""data/1_raw/SVHN/test_32x32.mat"""'], {}), "('data/1_raw/SVHN/test_32x32.mat')\n", (586, 620), False, 'from scipy.io import loadmat\n'), ((682, 708), 'numpy.squeeze', 'np.squeeze', (["images_tr['y']"], {}), "(images_tr['y'])\n", (692, 708), True, 'import numpy as np\n'), ((770, 796), 'numpy.squeeze', 'np.squeeze', (["images_te['y']"], {}), "(images_te['y'])\n", (780, 796), True, 'import numpy as np\n'), ((898, 935), 'tensorflow.keras.datasets.cifar10.load_data', 'tf.keras.datasets.cifar10.load_data', ([], {}), '()\n', (933, 935), True, 'import tensorflow as tf\n'), ((971, 991), 'numpy.squeeze', 'np.squeeze', (['ci10Tr_Y'], {}), '(ci10Tr_Y)\n', (981, 991), True, 'import numpy as np\n'), ((1027, 1047), 'numpy.squeeze', 'np.squeeze', (['ci10Te_Y'], {}), '(ci10Te_Y)\n', (1037, 1047), True, 'import numpy as np\n'), ((1154, 1192), 'tensorflow.keras.datasets.cifar100.load_data', 'tf.keras.datasets.cifar100.load_data', ([], {}), '()\n', (1190, 1192), True, 'import tensorflow as tf\n'), ((1229, 1250), 'numpy.squeeze', 'np.squeeze', (['ci100Tr_Y'], {}), '(ci100Tr_Y)\n', (1239, 1250), True, 'import numpy as np\n'), ((1287, 1308), 'numpy.squeeze', 'np.squeeze', (['ci100Te_Y'], {}), '(ci100Te_Y)\n', (1297, 1308), True, 'import numpy as np\n'), ((2035, 2053), 'numpy.array', 'np.array', (['img_list'], {}), '(img_list)\n', (2043, 2053), True, 'import numpy as np\n'), ((2063, 2077), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2075, 2077), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2307, 2327), 'os.walk', 'os.walk', (['img_tr_path'], {}), '(img_tr_path)\n', (2314, 2327), False, 'import os\n'), ((2853, 2871), 'numpy.array', 'np.array', (['img_list'], {}), '(img_list)\n', (2861, 2871), True, 'import numpy as np\n'), ((631, 665), 'numpy.moveaxis', 'np.moveaxis', (["images_tr['X']", '(-1)', '(0)'], {}), "(images_tr['X'], -1, 0)\n", (642, 665), True, 'import numpy as np\n'), ((719, 753), 'numpy.moveaxis', 'np.moveaxis', (["images_te['X']", '(-1)', '(0)'], {}), "(images_te['X'], -1, 0)\n", (730, 753), True, 'import numpy as np\n'), ((1860, 1875), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (1870, 1875), False, 'from PIL import Image\n'), ((1975, 1992), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1985, 1992), True, 'import numpy as np\n'), ((2380, 2404), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (2392, 2404), False, 'import os\n'), ((2558, 2579), 'PIL.Image.open', 'Image.open', (['file_path'], {}), '(file_path)\n', (2568, 2579), False, 'from PIL import Image\n'), ((2703, 2720), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (2713, 2720), True, 'import numpy as np\n')] |
import os
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random as rn
import warnings
from kneed import KneeLocator
class BuildDriftKnowledge():
"""
Description :
Class to build the pareto knowledge from hyper-parameters configurations evaluated on differents datasets for the drift detector tuning.
The knowledge consists in the best configuration of hyper-parameters for each dataset.
The datasets are characterised by meta-features and a knowledge base can be then be built to link these features to the best configurations.
Parameters :
results_directory: str
Path to the directory containing the knowledge files (results of the evaluation of the configurations on example streams)
names_detectors: list of str
List of the names of the detectors
names_streams: list of str
list of the names of the streams
n_meta_features: int, default = 15 ((severity, magnitude, interval) * (med, kurto, skew, per10, per90))
Number of meta-features extracted from the stream
NOT USED FOR THE MOMENT as we use theoritical meta-features and not measured ones
knowledge_type: str
String indicating what knowledge is being calculated (for arf tree tuning or drift detectors)
NOT USED FOR THE MOMENT, need further implementing to bring the two applications together
output: str
Directory path where to save output file
verbose: bool, default = False
Print pareto figures if True
Output:
Csv file containing the configurations selected for each example stream (each row = 1 stream)
Example:
>>> names_stm = ['BernouW1ME0010','BernouW1ME005095','BernouW1ME00509','BernouW1ME0109','BernouW1ME0108','BernouW1ME0208','BernouW1ME0207','BernouW1ME0307','BernouW1ME0306','BernouW1ME0406','BernouW1ME0506','BernouW1ME05506',
>>> 'BernouW100ME0010','BernouW100ME005095','BernouW100ME00509','BernouW100ME0109','BernouW100ME0108','BernouW100ME0208','BernouW100ME0207','BernouW100ME0307','BernouW100ME0306','BernouW100ME0406','BernouW100ME0506','BernouW100ME05506',
>>> 'BernouW500ME0010','BernouW500ME005095','BernouW500ME00509','BernouW500ME0109','BernouW500ME0108','BernouW500ME0208','BernouW500ME0207','BernouW500ME0307','BernouW500ME0306','BernouW500ME0406','BernouW500ME0506','BernouW500ME05506']
>>>
>>> names_detect = [['PH1','PH2','PH3','PH4','PH5','PH6','PH7','PH8','PH9','PH10','PH11','PH12','PH13','PH14','PH15','PH16'],
>>> ['ADWIN1','ADWIN2','ADWIN3','ADWIN4','ADWIN5','ADWIN6','ADWIN7','ADWIN8','ADWIN9'],
>>> ['DDM1','DDM2','DDM3','DDM4','DDM5','DDM6','DDM7','DDM8','DDM9','DDM10'],
>>> ['SeqDrift21','SeqDrift22','SeqDrift23','SeqDrift24','SeqDrift25','SeqDrift26','SeqDrift27','SeqDrift28','SeqDrift29','SeqDrift210',
>>> 'SeqDrift211','SeqDrift212','SeqDrift213','SeqDrift214','SeqDrift215','SeqDrift216','SeqDrift217','SeqDrift218']]
>>>
>>> output_dir = os.getcwd()
>>> directory_path_files = 'examples/pareto_knowledge/ExampleDriftKnowledge' # Available in hyper-param-tuning-examples repository
>>>
>>> pareto_build = BuildDriftKnowledge(results_directory=directory_path_files, names_detectors=names_detect, names_streams=names_stm, output=output_dir, verbose=True)
>>> pareto_build.load_drift_data()
>>> pareto_build.calculate_pareto()
>>> pareto_build.best_config
"""
def __init__(self,
results_directory,
names_detectors,
names_streams,
output,
# n_meta_features = 15,
# knowledge_type = 'Drift',
verbose = False):
if results_directory != None and names_detectors != None and names_streams != None and output != None:
self.results_directory = results_directory
self.names_detectors = names_detectors
self.names_streams = names_streams
self.output = output
else :
raise ValueError('Directory paths or list of detectors names or list of streams missing.')
self.verbose = verbose
# self.knowledge_type = knowledge_type
self.n_detectors = 4
# self.n_meta_features = n_meta_features
self.n_streams = len(self.names_streams)
self.best_config = [[] for ind_s in range(self.n_streams)]
warnings.filterwarnings("ignore")
@property
def best_config(self):
""" Retrieve the length of the stream.
Returns
-------
int
The length of the stream.
"""
return self._best_config
@best_config.setter
def best_config(self, best_config):
""" Set the length of the stream
Parameters
----------
length of the stream : int
"""
self._best_config = best_config
def load_drift_data(self) :
"""
Function to load the performance data from the csv files
"""
# Variables for performances of the detectors
self.scores_perf = []
self.list_name_detec_ok = []
for ind_s in range(self.n_streams) :
self.scores_perf.append([[] for ind_d in range(self.n_detectors)])
self.list_name_detec_ok.append([[] for ind_d in range(self.n_detectors)])
# Variables for the meat-features
self.mean_meta_features = []
self.std_meta_features = []
for ind_s in range(self.n_streams) :
self.mean_meta_features.append([[] for ind_d in range(self.n_detectors)])
self.std_meta_features.append([[] for ind_d in range(self.n_detectors)])
ind_s = 0
# Loop through the streams
for stream_name in self.names_streams :
# Open the performance file for the given stream
stream_perf_file = os.sep.join([self.results_directory, os.sep, stream_name + 'PerfDetectors.csv'])
with open(stream_perf_file) as csv_data_file:
data_p = [row for row in csv.reader(csv_data_file)]
ind_d = 0
# Loop through the detectors
for name_detector in self.names_detectors :
# Loop through the detectors configurations
for name_ind_detector in name_detector :
# Get the row of performances for the given configuration
rowind_detector = next((x for x in data_p if x[0] == name_ind_detector), None)
# Only if no Nan value in the row (which mean that the detector detected no drifts at some point)
if 'nan' not in rowind_detector :
# Store the TP number and the FP number for the given detector
self.scores_perf[ind_s][ind_d].append([float(rowind_detector[2]),float(rowind_detector[3])])
self.list_name_detec_ok[ind_s][ind_d].append(name_ind_detector)
ind_d += 1
# # Open the meta-features file for the given stream
# stream_meta_feat_file = self.results_directory+'\\'+stream_name+'metaFeatDetectors.csv'
# with open(stream_meta_feat_file) as csv_data_file:
# data_mf = [row for row in csv.reader(csv_data_file)]
# ind_d = 0
# # Loop through the detectors
# for name_detector in self.names_detectors :
# list_meta_feat_values = [[] for i in range(self.n_meta_features)] # list to store values of each of the meta-features for each detector type
# # Loop through the detectors configurations
# for name_ind_detector in name_detector :
# ind_detec = [i for i in range(len(data_mf)) if data_mf[i][0] == name_ind_detector][0]
# # Loop for each meta-feature
# for ind_meta_feat in range(self.n_meta_features) :
# list_meta_feat_values[ind_meta_feat].append(float(data_mf[ind_detec+ind_meta_feat+1][1]))
# self.mean_meta_features[ind_s][ind_d] = np.nanmean(list_meta_feat_values, axis = 1)
# self.std_meta_features[ind_s][ind_d] = np.nanstd(list_meta_feat_values, axis = 1)
# ind_d += 1
ind_s += 1
# print('end')
def process_meta_features(self):
print('Start process meta-features')
# TODO : to come
def calculate_pareto(self) :
"""
Function to calculate the Pareto front and detect the knee point
"""
if self.verbose == True :
print('Start Pareto calculation')
for ind_s in range(self.n_streams) :
for ind_d in range(self.n_detectors) :
names = self.list_name_detec_ok[ind_s][ind_d]
score = np.array(self.scores_perf[ind_s][ind_d])
# Calculate pareto front
pareto = self.identify_pareto(score)
# print ('Pareto front index values')
# print ('Points on Pareto front: \n',pareto)
pareto_front = score[pareto]
# print ('\nPareto front scores')
# print (pareto_front)
pareto_front_df = pd.DataFrame(pareto_front)
pareto_front_df.sort_values(0, inplace=True)
pareto_front = pareto_front_df.values
scorepd = pd.DataFrame(score,columns = ['X' , 'Y'])
x_all = score[:, 0]
y_all = score[:, 1]
x_pareto = pareto_front[:, 0]
y_pareto = pareto_front[:, 1]
# Detect Knee point on the pareto
try :
kn = KneeLocator(x_pareto, y_pareto, curve='convex', direction='increasing',S=0)
# Knee variable is used
knee_x = kn.knee
knee_y = y_pareto[np.where(x_pareto == knee_x)[0][0]]
# Get the index of the selected configuration
id_name = scorepd.loc[(scorepd['X'] == knee_x) & (scorepd['Y'] == knee_y)].index[0]
except (IndexError, ValueError) :
try :
kn = KneeLocator(x_pareto, y_pareto, curve='concave', direction='increasing',S=0)
knee_x = kn.knee
knee_y = y_pareto[np.where(x_pareto == knee_x)[0][0]]
# Get the index of the selected configuration
id_name = scorepd.loc[(scorepd['X'] == knee_x) & (scorepd['Y'] == knee_y)].index[0]
except (IndexError, ValueError) :
knee_x = pareto_front[len(pareto_front)-1][0]
if all(x == x_pareto[0] for x in x_pareto) :
knee_y = pareto_front[np.argmin(pareto_front.T[1][:])][1]
else :
knee_y = scorepd.loc[(scorepd['X'] == knee_x)].iloc[0]['Y']
# Get the index of the selected configuration
id_name = scorepd.loc[(scorepd['X'] == knee_x) & (scorepd['Y'] == knee_y)].index[0]
if self.verbose == True :
print('Knee point : '+str(names[id_name]))
# Plot Pareto front and knee
plt.scatter(x_all, y_all)
for i, txt in enumerate(names):
plt.annotate(txt, (x_all[i], y_all[i]))
plt.title('Pareto front '+ str(self.names_streams[ind_s]) +'. Knee : '+ str(names[id_name]) + ' ' + str(knee_x) + ' ' + str(knee_y))
plt.plot(x_pareto, y_pareto, color='r')
plt.xlabel('n_TP')
plt.ylabel('n_FP')
xmin, xmax, ymin, ymax = plt.axis()
plt.vlines(knee_x, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
self.best_config[ind_s].append(names[id_name])
with open(self.output+'/bestConfigsDrift.csv.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerows(self.best_config)
if self.verbose == True :
print('End Pareto calculation')
# print(self.best_config)
def identify_pareto(self, scores):
"""
From https://github.com/MichaelAllen1966
"""
# Count number of items
population_size = scores.shape[0]
# Create a NumPy index for scores on the pareto front (zero indexed)
population_ids = np.arange(population_size)
# Create a starting list of items on the Pareto front
# All items start off as being labelled as on the Parteo front
pareto_front = np.ones(population_size, dtype=bool)
# Loop through each item. This will then be compared with all other items
for i in range(population_size):
# Loop through all other items
for j in range(population_size):
# Check if our 'i' pint is dominated by out 'j' point
if (scores[j][0] >= scores[i][0]) and (scores[j][1] <= scores[i][1]) and (scores[j][0] > scores[i][0]) and (scores[j][1] < scores[i][1]):
# j dominates i. Label 'i' point as not on Pareto front
pareto_front[i] = 0
# Stop further comparisons with 'i' (no more comparisons needed)
break
# Return ids of scenarios on pareto front
return population_ids[pareto_front]
def calculate_crowding(self, scores):
"""
From https://github.com/MichaelAllen1966
Crowding is based on a vector for each individual
All dimension is normalised between low and high. For any one dimension, all
solutions are sorted in order low to high. Crowding for chromsome x
for that score is the difference between the next highest and next
lowest score. Total crowding value sums all crowding for all scores
"""
population_size = len(scores[:, 0])
number_of_scores = len(scores[0, :])
# create crowding matrix of population (row) and score (column)
crowding_matrix = np.zeros((population_size, number_of_scores))
# normalise scores (ptp is max-min)
normed_scores = (scores - scores.min(0)) / scores.ptp(0)
# calculate crowding distance for each score in turn
for col in range(number_of_scores):
crowding = np.zeros(population_size)
# end points have maximum crowding
crowding[0] = 1
crowding[population_size - 1] = 1
# Sort each score (to calculate crowding between adjacent scores)
sorted_scores = np.sort(normed_scores[:, col])
sorted_scores_index = np.argsort(normed_scores[:, col])
# Calculate crowding distance for each individual
crowding[1:population_size - 1] = \
(sorted_scores[2:population_size] -
sorted_scores[0:population_size - 2])
# resort to orginal order (two steps)
re_sort_order = np.argsort(sorted_scores_index)
sorted_crowding = crowding[re_sort_order]
# Record crowding distances
crowding_matrix[:, col] = sorted_crowding
# Sum crowding distances of each score
crowding_distances = np.sum(crowding_matrix, axis=1)
return crowding_distances
def reduce_by_crowding(self, scores, number_to_select):
"""
From https://github.com/MichaelAllen1966
This function selects a number of solutions based on tournament of
crowding distances. Two members of the population are picked at
random. The one with the higher croding dostance is always picked
"""
population_ids = np.arange(scores.shape[0])
crowding_distances = self.calculate_crowding(scores)
picked_population_ids = np.zeros((number_to_select))
picked_scores = np.zeros((number_to_select, len(scores[0, :])))
for i in range(number_to_select):
population_size = population_ids.shape[0]
fighter1ID = rn.randint(0, population_size - 1)
fighter2ID = rn.randint(0, population_size - 1)
# If fighter # 1 is better
if crowding_distances[fighter1ID] >= crowding_distances[fighter2ID]:
# add solution to picked solutions array
picked_population_ids[i] = population_ids[fighter1ID]
# Add score to picked scores array
picked_scores[i, :] = scores[fighter1ID, :]
# remove selected solution from available solutions
population_ids = np.delete(population_ids,
(fighter1ID),
axis=0)
scores = np.delete(scores, (fighter1ID), axis=0)
crowding_distances = np.delete(crowding_distances,
(fighter1ID),
axis=0)
else:
picked_population_ids[i] = population_ids[fighter2ID]
picked_scores[i, :] = scores[fighter2ID, :]
population_ids = np.delete(population_ids, (fighter2ID), axis=0)
scores = np.delete(scores, (fighter2ID), axis=0)
crowding_distances = np.delete(crowding_distances, (fighter2ID), axis=0)
# Convert to integer
picked_population_ids = np.asarray(picked_population_ids, dtype=int)
return (picked_population_ids)
| [
"kneed.KneeLocator",
"numpy.sum",
"csv.reader",
"numpy.ones",
"numpy.argmin",
"numpy.argsort",
"numpy.arange",
"pandas.DataFrame",
"random.randint",
"os.sep.join",
"matplotlib.pyplot.show",
"csv.writer",
"matplotlib.pyplot.ylim",
"numpy.asarray",
"numpy.sort",
"matplotlib.pyplot.ylabel... | [((4659, 4692), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (4682, 4692), False, 'import warnings\n'), ((12864, 12890), 'numpy.arange', 'np.arange', (['population_size'], {}), '(population_size)\n', (12873, 12890), True, 'import numpy as np\n'), ((13047, 13083), 'numpy.ones', 'np.ones', (['population_size'], {'dtype': 'bool'}), '(population_size, dtype=bool)\n', (13054, 13083), True, 'import numpy as np\n'), ((14516, 14561), 'numpy.zeros', 'np.zeros', (['(population_size, number_of_scores)'], {}), '((population_size, number_of_scores))\n', (14524, 14561), True, 'import numpy as np\n'), ((15719, 15750), 'numpy.sum', 'np.sum', (['crowding_matrix'], {'axis': '(1)'}), '(crowding_matrix, axis=1)\n', (15725, 15750), True, 'import numpy as np\n'), ((16168, 16194), 'numpy.arange', 'np.arange', (['scores.shape[0]'], {}), '(scores.shape[0])\n', (16177, 16194), True, 'import numpy as np\n'), ((16288, 16314), 'numpy.zeros', 'np.zeros', (['number_to_select'], {}), '(number_to_select)\n', (16296, 16314), True, 'import numpy as np\n'), ((17899, 17943), 'numpy.asarray', 'np.asarray', (['picked_population_ids'], {'dtype': 'int'}), '(picked_population_ids, dtype=int)\n', (17909, 17943), True, 'import numpy as np\n'), ((6123, 6208), 'os.sep.join', 'os.sep.join', (["[self.results_directory, os.sep, stream_name + 'PerfDetectors.csv']"], {}), "([self.results_directory, os.sep, stream_name + 'PerfDetectors.csv']\n )\n", (6134, 6208), False, 'import os\n'), ((12397, 12413), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (12407, 12413), False, 'import csv\n'), ((14801, 14826), 'numpy.zeros', 'np.zeros', (['population_size'], {}), '(population_size)\n', (14809, 14826), True, 'import numpy as np\n'), ((15056, 15086), 'numpy.sort', 'np.sort', (['normed_scores[:, col]'], {}), '(normed_scores[:, col])\n', (15063, 15086), True, 'import numpy as np\n'), ((15122, 15155), 'numpy.argsort', 'np.argsort', (['normed_scores[:, col]'], {}), '(normed_scores[:, col])\n', (15132, 15155), True, 'import numpy as np\n'), ((15461, 15492), 'numpy.argsort', 'np.argsort', (['sorted_scores_index'], {}), '(sorted_scores_index)\n', (15471, 15492), True, 'import numpy as np\n'), ((16512, 16546), 'random.randint', 'rn.randint', (['(0)', '(population_size - 1)'], {}), '(0, population_size - 1)\n', (16522, 16546), True, 'import random as rn\n'), ((16572, 16606), 'random.randint', 'rn.randint', (['(0)', '(population_size - 1)'], {}), '(0, population_size - 1)\n', (16582, 16606), True, 'import random as rn\n'), ((9079, 9119), 'numpy.array', 'np.array', (['self.scores_perf[ind_s][ind_d]'], {}), '(self.scores_perf[ind_s][ind_d])\n', (9087, 9119), True, 'import numpy as np\n'), ((9501, 9527), 'pandas.DataFrame', 'pd.DataFrame', (['pareto_front'], {}), '(pareto_front)\n', (9513, 9527), True, 'import pandas as pd\n'), ((9670, 9709), 'pandas.DataFrame', 'pd.DataFrame', (['score'], {'columns': "['X', 'Y']"}), "(score, columns=['X', 'Y'])\n", (9682, 9709), True, 'import pandas as pd\n'), ((17070, 17115), 'numpy.delete', 'np.delete', (['population_ids', 'fighter1ID'], {'axis': '(0)'}), '(population_ids, fighter1ID, axis=0)\n', (17079, 17115), True, 'import numpy as np\n'), ((17230, 17267), 'numpy.delete', 'np.delete', (['scores', 'fighter1ID'], {'axis': '(0)'}), '(scores, fighter1ID, axis=0)\n', (17239, 17267), True, 'import numpy as np\n'), ((17308, 17357), 'numpy.delete', 'np.delete', (['crowding_distances', 'fighter1ID'], {'axis': '(0)'}), '(crowding_distances, fighter1ID, axis=0)\n', (17317, 17357), True, 'import numpy as np\n'), ((17635, 17680), 'numpy.delete', 'np.delete', (['population_ids', 'fighter2ID'], {'axis': '(0)'}), '(population_ids, fighter2ID, axis=0)\n', (17644, 17680), True, 'import numpy as np\n'), ((17708, 17745), 'numpy.delete', 'np.delete', (['scores', 'fighter2ID'], {'axis': '(0)'}), '(scores, fighter2ID, axis=0)\n', (17717, 17745), True, 'import numpy as np\n'), ((17785, 17834), 'numpy.delete', 'np.delete', (['crowding_distances', 'fighter2ID'], {'axis': '(0)'}), '(crowding_distances, fighter2ID, axis=0)\n', (17794, 17834), True, 'import numpy as np\n'), ((9975, 10051), 'kneed.KneeLocator', 'KneeLocator', (['x_pareto', 'y_pareto'], {'curve': '"""convex"""', 'direction': '"""increasing"""', 'S': '(0)'}), "(x_pareto, y_pareto, curve='convex', direction='increasing', S=0)\n", (9986, 10051), False, 'from kneed import KneeLocator\n'), ((11613, 11638), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_all', 'y_all'], {}), '(x_all, y_all)\n', (11624, 11638), True, 'import matplotlib.pyplot as plt\n'), ((11930, 11969), 'matplotlib.pyplot.plot', 'plt.plot', (['x_pareto', 'y_pareto'], {'color': '"""r"""'}), "(x_pareto, y_pareto, color='r')\n", (11938, 11969), True, 'import matplotlib.pyplot as plt\n'), ((11990, 12008), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""n_TP"""'], {}), "('n_TP')\n", (12000, 12008), True, 'import matplotlib.pyplot as plt\n'), ((12029, 12047), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""n_FP"""'], {}), "('n_FP')\n", (12039, 12047), True, 'import matplotlib.pyplot as plt\n'), ((12093, 12103), 'matplotlib.pyplot.axis', 'plt.axis', ([], {}), '()\n', (12101, 12103), True, 'import matplotlib.pyplot as plt\n'), ((12214, 12224), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12222, 12224), True, 'import matplotlib.pyplot as plt\n'), ((6303, 6328), 'csv.reader', 'csv.reader', (['csv_data_file'], {}), '(csv_data_file)\n', (6313, 6328), False, 'import csv\n'), ((11716, 11755), 'matplotlib.pyplot.annotate', 'plt.annotate', (['txt', '(x_all[i], y_all[i])'], {}), '(txt, (x_all[i], y_all[i]))\n', (11728, 11755), True, 'import matplotlib.pyplot as plt\n'), ((10485, 10562), 'kneed.KneeLocator', 'KneeLocator', (['x_pareto', 'y_pareto'], {'curve': '"""concave"""', 'direction': '"""increasing"""', 'S': '(0)'}), "(x_pareto, y_pareto, curve='concave', direction='increasing', S=0)\n", (10496, 10562), False, 'from kneed import KneeLocator\n'), ((12143, 12153), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (12151, 12153), True, 'import matplotlib.pyplot as plt\n'), ((12158, 12168), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (12166, 12168), True, 'import matplotlib.pyplot as plt\n'), ((10170, 10198), 'numpy.where', 'np.where', (['(x_pareto == knee_x)'], {}), '(x_pareto == knee_x)\n', (10178, 10198), True, 'import numpy as np\n'), ((10645, 10673), 'numpy.where', 'np.where', (['(x_pareto == knee_x)'], {}), '(x_pareto == knee_x)\n', (10653, 10673), True, 'import numpy as np\n'), ((11104, 11135), 'numpy.argmin', 'np.argmin', (['pareto_front.T[1][:]'], {}), '(pareto_front.T[1][:])\n', (11113, 11135), True, 'import numpy as np\n')] |
import numpy as np
class StringEncoder:
"""
Encodes chars into integers.
"""
def __init__(self, available_chars):
self._available_chars = available_chars
def encode_char(self, char: str):
return self._available_chars.index(char)
def encode(self, string):
result = []
for char in string:
result.append(self.encode_char(char))
return np.array(result)
def decode_char(self, char_idx: int):
return self._available_chars[char_idx]
def decode(self, li):
result = []
for char in li:
result.append(self.decode_char(char))
return "".join(result)
# def encode(self, input):
# result = []
# for x in input:
# result.append(self.encode_str(x))
# return np.array(result) | [
"numpy.array"
] | [((412, 428), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (420, 428), True, 'import numpy as np\n')] |
import os
import re
import sys
import time
import json
import pkgutil
import argparse
import collections
from tqdm import tqdm
import pickle
import copy
from pprint import pprint
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams['figure.facecolor'] = 'white'
import numpy as np
import pandas as pd
import xgboost as xgb
import sklearn.linear_model
import sklearn.pipeline
import sklearn.preprocessing
import sklearn.neural_network
# Stopgap to enable use as module & standalone script
if __name__ == "__main__":
from config import SUPPORTED_MODELS, SUPPORTED_MODES, SUPPORTED_DATA_FORMATS, MODEL2LANGS, LANG2INDEX
PRECOMPUTED_FEATURE_FILE = "data/precomputed_features.json"
else:
from litmus.config import SUPPORTED_MODELS, SUPPORTED_MODES, SUPPORTED_DATA_FORMATS, MODEL2LANGS, LANG2INDEX
PRECOMPUTED_FEATURE_FILE = ""
class Featurizer:
def __init__(self, model, precomputed_feature_file, pivot_features):
self.langs_list = MODEL2LANGS[model]
self.pivot_features = pivot_features
if precomputed_feature_file != "":
with open(precomputed_feature_file) as fin:
precomputed = json.load(fin)
else:
pc_data = pkgutil.get_data(__name__, "data/precomputed_features.min.json")
precomputed = json.loads(pc_data.decode('utf-8'))
self.precomputed_type_overlaps = precomputed["type_overlap"][model]
self.precomputed_syntactic_distance = precomputed["syntactic_distance"][model]
self.regression_feats = pd.DataFrame(precomputed["regression_feats"][model], index=self.langs_list)
def featurize(self, langs_target, langs_pivot, task_sizes):
syntactic_distance = [[self.precomputed_syntactic_distance[self.langs_list.index(pivot)][self.langs_list.index(lang)] for lang in langs_target] for pivot in langs_pivot]
type_overlaps = [[self.precomputed_type_overlaps[pivot][lang] for lang in langs_target] for pivot in langs_pivot]
pivot_fts = [
self.regression_feats.loc[langs_target].values,
]
if self.pivot_features == "data_only":
for i in range(len(langs_pivot)):
if task_sizes[i] == 0:
syntactic_distance[i] = [0 for _ in range(len(langs_target))]
type_overlaps[i] = [0 for _ in range(len(langs_target))]
if self.pivot_features != "none":
pivot_fts.append(np.array(type_overlaps).transpose())
pivot_fts.append(np.array(syntactic_distance).transpose())
task_size = [task_sizes for i in range(len(langs_target))]
pivot_fts.append(np.array(task_size).transpose(0, 1))
return np.concatenate(pivot_fts, axis=1)
def pretty_print(list_to_print, sep=' '):
list_to_print = [[str(x) if not isinstance(x, (np.floating, float)) else "{0:0.4f}".format(x) for x in t] for t in list_to_print]
n_cols = len(list_to_print[0])
max_l = [max(len(t[i]) for t in list_to_print) for i in range(n_cols)]
format_string = ['{:<' + str(m) + '}' for m in max_l[:-1]]
format_string.append('{}')
format_string = sep.join(format_string)
for i in range(len(list_to_print)):
print(format_string.format(*list_to_print[i]))
class CustomMinMaxScaler(sklearn.preprocessing.MinMaxScaler):
def _handle_zeros_in_scale(scale, copy=True):
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def __init__(self, common_feats=None, **kwargs):
super().__init__(**kwargs)
self.common_feats = common_feats
def fit(self, X, y=None):
first_pass = not hasattr(self, 'n_samples_seen_')
if not first_pass:
return self
else:
return super().fit(X, y)
def partial_fit(self, X, y=None):
super().partial_fit(X, y)
if self.common_feats:
for group in self.common_feats:
group = np.array(group)
self.data_min_[group] = np.min(self.data_min_[group])
self.data_max_[group] = np.max(self.data_max_[group])
self.data_range_[group] = self.data_max_[group] - self.data_min_[group]
self.scale_[group] = ((self.feature_range[1] - self.feature_range[0]) / CustomMinMaxScaler._handle_zeros_in_scale(self.data_range_[group]))
self.min_[group] = self.feature_range[0] - self.data_min_[group] * self.scale_[group]
return self
def regression(X, Y, common_feats, training_algorithm, load_model, model):
fit_kwargs = {}
if model or load_model:
if load_model:
with open(load_model, 'rb') as f:
model = pickle.load(f)
elif model:
model = copy.deepcopy(model)
if training_algorithm == "mlp":
model.named_steps["regressor"].warm_start = True
elif training_algorithm == "xgboost":
fit_kwargs["regressor__xgb_model"] = model.named_steps["regressor"].get_booster()
else:
if training_algorithm == "mlp":
model = sklearn.pipeline.Pipeline([('scaler', CustomMinMaxScaler(common_feats=common_feats, clip=True)), ('regressor', sklearn.neural_network.MLPRegressor((50, 50)))])
elif training_algorithm == "xgboost":
model = sklearn.pipeline.Pipeline([('scaler', CustomMinMaxScaler(common_feats=common_feats, clip=True)), ('regressor', xgb.XGBRegressor(objective='reg:squarederror', learning_rate=0.1, n_estimators=100, max_depth=10))])
if X.shape[0] > 0:
model.fit(X, Y, **fit_kwargs)
return model
def prepare_data(args):
model = args.model_name
langs_list = MODEL2LANGS[model]
featurizer = Featurizer(model, args.precomputed_features, args.pivot_features)
if args.use_all_langs: all_langs = set(MODEL2LANGS["mbert"]) | set(MODEL2LANGS["xlmr"])
else: all_langs = set(langs_list)
X_array, Y_array = [], []
examples = []
if args.train_format == "json":
with open(args.scores_file) as f:
scores = json.load(f)
for entry in scores:
train_langs = entry["train_config"].keys()
entry["train_config"].update({k: 0 for k in all_langs - train_langs})
train_config = collections.OrderedDict(sorted(entry["train_config"].items()))
eval_results = collections.OrderedDict(sorted(entry["eval_results"].items()))
pivots_to_delete = [x for x in train_config if x not in langs_list]
targets_to_delete = [x for x in eval_results if x not in langs_list]
[train_config.pop(key) for key in pivots_to_delete]
[eval_results.pop(key) for key in targets_to_delete]
X_array.append(featurizer.featurize(list(eval_results.keys()), list(train_config.keys()), list(train_config.values())))
Y_array.append(list(eval_results.values()))
examples.extend([[train_config, t] for t in eval_results.values()])
langs_pivot = list(train_config.keys())
langs_target = list(eval_results.keys())
elif args.train_format == "csv":
df = pd.read_csv(args.scores_file)
langs_target = set()
for i, row in df.iterrows():
if row['target_lang'] not in langs_list:
continue
train_langs = row['train_langs'].split(',')
data_sizes = [float(x) for x in row['train_data_sizes'].split(',')]
values = {k: v for k, v in zip(train_langs, data_sizes)}
values.update({k: 0 for k in all_langs - set(train_langs)})
train_config = collections.OrderedDict(sorted(values.items()))
pivots_to_delete = [x for x in train_config if x not in langs_list]
[train_config.pop(key) for key in pivots_to_delete]
langs_target.add(row['target_lang'])
X_array.append(featurizer.featurize([row['target_lang']], list(train_config.keys()), list(train_config.values())))
Y_array.append(row['score'])
langs_target = list(langs_target)
langs_pivot = list(train_config.keys())
# Reshape datasets
X = np.concatenate(X_array)
Y = np.array(Y_array).reshape(-1)
# Establish feature-set
feat_names = ["Data Size", "Well rep features"]
common_feats = []
if args.pivot_features != "none":
feat_names += ["Type Overlap {}".format(lang) for lang in langs_pivot] + ["Syntactic Distance {}".format(lang) for lang in langs_pivot]
if args.common_scaling:
start = 2
mid = start + len(langs_pivot)
end = mid + len(langs_pivot)
common_feats.append([_ for _ in range(start, mid)])
common_feats.append([_ for _ in range(mid, end)])
feat_names += ["Task data size {}".format(lang) for lang in langs_pivot]
if args.common_scaling:
start = 2 + (2 * len(langs_pivot) if args.pivot_features != "none" else 0)
end = start + len(langs_pivot)
common_feats.append([_ for _ in range(start, end)])
return model, langs_list, feat_names, featurizer, common_feats, X, Y, langs_pivot, langs_target, examples
"""
Parse user-specified pivot-size configurations
"""
def prepare_inf_data(args, langs_list):
data_sizes = args.data_sizes
if isinstance(data_sizes, str) and data_sizes != "":
sizes = re.sub(r"\s+", "", data_sizes)
data_sizes = []
for row in sizes.split(";"):
sizes_map = {lang : int(size) for el in row.split(",") for lang,size in [el.split(":")]}
data_sizes += [ [sizes_map.get(lang, 0) for lang in langs_list] ]
data_sizes = np.array(data_sizes)
tgt_langs = args.heatmap_targets if args.heatmap_targets else args.suggestions_targets
tgt_langs = re.sub(r"\s+", "", tgt_langs).split(",")
return data_sizes, tgt_langs
def train_model(X, Y, feat_names, common_feats, langs_pivot, langs_target, args):
error_method = args.error_method
bprint = args.print
train_indices = args.train_indices
test_indices = args.test_indices
if args.model and args.load_model:
raise ValueError("Cannot specify model load_model. Only specify one")
model = regression(X, Y, common_feats, args.training_algorithm, args.load_model, args.model)
avg_error, std_error = None, None
num_targets = len(langs_target)
num_pivots = len(langs_pivot)
train_function = regression
examples_indices = None
predictions = None
if error_method == "split":
# Split examples into train and test split to compute error on test split alone
X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X, Y, random_state=42)
res1 = train_function(X_train, Y_train, common_feats, args.training_algorithm, args.load_model, args.model)
Y_pred = res1.predict(X_test)
errors = abs(Y_test - Y_pred)
baseline_errors = abs(np.mean(Y_train) - Y_test)
elif error_method == "kfold":
# Use 5 fold CV to compute error over all examples
kf = sklearn.model_selection.KFold(n_splits=5, shuffle=True, random_state=42)
errors = []
predictions = []
examples_indices = []
baseline_errors = []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
Y_train, Y_test = Y[train_index], Y[test_index]
res1 = train_function(X_train, Y_train, common_feats, args.training_algorithm, args.load_model, args.model)
Y_pred = res1.predict(X_test)
error = abs((Y_pred - Y_test))
errors.extend(error)
examples_indices.extend(test_index)
predictions.extend(Y_pred)
baseline_errors.extend(abs(np.mean(Y_train) - Y_test))
elif error_method == "manual_split":
# Use train and test splits supplied in args
X_train, Y_train = X[train_indices], Y[train_indices]
X_test, Y_test = X[test_indices], Y[test_indices]
res1 = train_function(X_train, Y_train, common_feats, args.training_algorithm, args.load_model, args.model)
Y_pred = res1.predict(X_test)
errors = abs(Y_test - Y_pred)
baseline_errors = abs(np.mean(Y_train) - Y_test)
elif error_method == "LOTO":
# Leave one target out scenario, predict for the left out column of elements and compute errors
errors = []
baseline_errors = []
for t in tqdm(range(num_targets)):
indices_to_delete = [_ * num_targets + t for _ in range(num_pivots)]
X_reg = np.delete(X, indices_to_delete, axis=0)
Y_reg = np.delete(Y, indices_to_delete, axis=0)
Y_gold = Y[indices_to_delete]
res1 = regression(X_reg, Y_reg, common_feats, args.training_algorithm, args.load_model, args.model)
Y_pred = res1.predict(X[indices_to_delete])
error = abs((Y_gold - Y_pred))
errors.extend(error)
baseline_errors.extend(abs(np.mean(Y_reg) - Y_gold))
elif error_method == "LOO":
# Leave one out scenario, predict for the left out element and compute errors
errors = []
baseline_errors = []
for _ in tqdm(range(X.shape[0])):
X_reg = np.delete(X, _, axis=0)
Y_reg = np.delete(Y, _, axis=0)
Y_gold = Y[_]
res1 = regression(X_reg, Y_reg, common_feats, args.training_algorithm, args.load_model, args.model)
Y_pred = res1.predict(X[_].reshape(1, -1))[0]
error = abs((Y_gold - Y_pred))
errors.append(error)
baseline_errors.extend(abs(np.mean(Y_reg) - Y_gold))
avg_error, std_error = np.mean(errors), np.std(errors)
baseline_error = np.mean(baseline_errors)
if bprint:
print("Avg Pred Error: {0:0.6f}".format(avg_error))
print("Std Pred Error: {0:0.6f}".format(std_error))
if args.save_model:
with open(args.save_model, 'wb') as f:
pickle.dump(model, f)
return model, (avg_error, std_error), baseline_error, errors, examples_indices, predictions
def build_acc_matrix(langs_pivot, langs_target, featurizer, model, data_sizes):
X = np.concatenate([featurizer.featurize(langs_target, langs_pivot, data_sizes[_]) for _ in range(len(data_sizes))])
Y = model.predict(X)
Y = Y.reshape(len(data_sizes), len(langs_target))
return Y
"""
Enforcing language-sparsity constraint on search space
"""
def find_suggestions(
args,
model, featurizer,
langs_list,
budget, lang_budgets,
targets, pivots, augmentable, weights, data_sizes,
is_exp_grid, objective,
min_perf, min_lang_perf):
# Helpers
rmSpaces = lambda s: re.sub(r"\s+", "", s)
parseLangData = lambda s,t: {kv.split(":")[0] : t(kv.split(":")[1]) for kv in rmSpaces(s).split(",") if kv != ""}
printInfo = lambda s: print(s) if args.suggestions_verbose else None
# Parse configuration
targets, augmentable = rmSpaces(targets), rmSpaces(augmentable)
targets = set(targets.split(","))
augmentable = set(augmentable.split(",")) if augmentable != "" else targets
weights = parseLangData(weights, int)
lang_budgets = parseLangData(lang_budgets, int)
min_perf = float(min_perf) if min_perf.strip() != "" else 0
min_lang_perf = parseLangData(min_lang_perf, float)
assert (len(targets) > 0 and len(pivots) > 0 and len(augmentable) > 0)
langs_list = sorted(langs_list)
langs_pvts = [lang for lang in langs_list]
langs_tgts = [lang for lang in langs_list if lang in targets]
langs_wts = [weights.get(lang, 1) for lang in langs_tgts]
orig_sizes = tuple(pivots)
# Helpers
TgtPerfs= lambda sizes: model.predict( featurizer.featurize(langs_tgts, langs_pvts, sizes) )
if objective == "avg":
AvgPerf = lambda sizes: np.average( TgtPerfs(sizes), weights=langs_wts )
else:
AvgPerf = lambda sizes: np.amin( TgtPerfs(sizes) )
SimpleAugSet = lambda sizes: [(lang, sizes[idx]) for idx, lang in enumerate(langs_pvts) if sizes[idx] > 0]
baseline_perf = AvgPerf( np.array(orig_sizes) )
grid_linear_step = [0.1 * lang_budgets.get(lang, budget) for idx, lang in enumerate(langs_pvts)]
class SearchNode:
ctr_eval = 0
def __init__(self, sizes):
self.sizes = sizes
self.is_terminal = sum(sizes) <= budget
self.score = None
self.perf = None
self.tgt_perfs = None
def Eval(self):
if self.score == None:
SearchNode.ctr_eval += 1
self.score = self.Perf()
return self.score
def Perf(self):
if self.perf == None:
self.tgt_perfs = TgtPerfs( np.array(orig_sizes) + np.array(self.sizes) )
self.perf = AvgPerf( np.array(orig_sizes) + np.array(self.sizes) )
return self.perf
def Augment(self, lidx):
if self.sizes[lidx] == 0:
return None
if is_exp_grid:
new_sizes = tuple([el//2 if idx==lidx else el for idx, el in enumerate(self.sizes)])
else:
new_sizes = tuple([el-grid_linear_step[idx] if idx==lidx else el for idx, el in enumerate(self.sizes)])
return SearchNode(new_sizes)
def Expand(self):
if self.is_terminal:
return [ self ]
frontier = [augNode for idx, lang in enumerate(langs_pvts) if lang in augmentable for augNode in [self.Augment(idx)] if augNode != None]
if not len(frontier):
self.is_terminal = True
return [ self ]
else:
return frontier
def __hash__(self):
return hash((self.sizes, self.is_terminal))
def __eq__(self, other):
return other != None and self.sizes == other.sizes and self.is_terminal == other.is_terminal
def Print(self):
print(
tuple(SimpleAugSet(self.sizes)),
self.score,
self.is_terminal
)
# Search params
BEAM_WIDTH = 5
PRUNE_LANG_QLTY_THRESHOLD = 0.02
t0 = time.time()
# Actual search
# Using list representation for beam, sorting beam is cheap as beam-width is small :P
start_size = budget//2 if is_exp_grid else budget
theoretic_max_case = tuple([lang_budgets.get(lang, start_size) if lang in augmentable else 0 for idx,lang in enumerate(langs_pvts)])
beam = [ SearchNode(theoretic_max_case) ]
while any(not node.is_terminal for node in beam):
beam = [f for node in beam for f in node.Expand()]
beam = list(set(beam))
# batch mode scoring
inps = np.concatenate([featurizer.featurize(langs_tgts, langs_pvts, np.array(orig_sizes) + np.array(node.sizes)) for node in beam])
outs = model.predict(inps).reshape(len(beam), len(langs_tgts))
if objective == "avg": scores = np.average( outs, axis=1, weights=langs_wts )
else: scores = np.amin( outs, axis=1 )
for idx, score in enumerate(list(scores)):
beam[idx].score = score
beam[idx].tgt_perfs = outs[idx,:]
SearchNode.ctr_eval += len(beam)
# Apply constraints on beam candidates
beam = [
node
for node in beam
if node.Eval() >= min_perf and
all(lang_perf >= min_lang_perf[lang] for lang, lang_perf in zip(langs_tgts, node.tgt_perfs) if lang in min_lang_perf)
]
# Retain top-K candidates
beam = sorted(beam, key=lambda x: x.Eval(), reverse=True)
beam = beam[:BEAM_WIDTH]
if args.suggestions_verbose:
print ("Beam:")
[node.Print() for node in beam]
if len(beam) == 0:
best = None
else:
# Cleanup best solution:
# Remove lowest aug langs iteratively as long as drop in gains is less than 5%
best = beam[0]
best_gains = best.Perf() - baseline_perf
sorted_aug_langs = sorted([(langs_pvts[idx], size, idx) for idx, size in enumerate(best.sizes)], key=lambda x: x[1])
for aug_lang, aug_size, aug_idx in sorted_aug_langs:
while best.sizes[aug_idx] > 0:
curr = best.Augment(aug_idx)
curr_gains = curr.Perf() - baseline_perf
printInfo ("Attempting reduction of lang (%s, %d, %d) with gains delta (%.4f - %.4f)..." % (aug_lang, curr.sizes[aug_idx], aug_idx, curr_gains, best_gains))
if curr_gains > best_gains or 1.0 - curr_gains / best_gains < PRUNE_LANG_QLTY_THRESHOLD:
printInfo ("Reduced lang %s..." % aug_lang)
best = curr
else:
break
t1 = time.time()
equal_aug_sizes = np.array(orig_sizes) + np.array([(budget // len(augmentable)) if lang in augmentable else 0 for lang in langs_pvts])
return {
"search-stats": {
"num_nodes_searched": SearchNode.ctr_eval,
"time-taken": t1-t0,
"budget": budget,
"used_budget": sum(best.sizes) if best != None else 0,
},
"search-perfs": {
"baseline-perf": AvgPerf(orig_sizes),
"augmented-perf": best.Perf() if best != None else 0,
"max-perf": AvgPerf( np.array(orig_sizes) + np.array(theoretic_max_case) ),
"equal-aug-perf": AvgPerf(equal_aug_sizes)
},
"lang-perfs": {
"before-aug": { tgt : score for tgt, score in zip(langs_tgts, TgtPerfs(orig_sizes)) },
"after-aug": { tgt : score for tgt, score in zip(langs_tgts, TgtPerfs(np.array(orig_sizes) + np.array(best.sizes))) } if best != None else {},
"equal-aug": { tgt : score for tgt, score in zip(langs_tgts, TgtPerfs(equal_aug_sizes)) }
},
"augments": SimpleAugSet(best.sizes) if best != None else {},
"ablation": {
langs_pvts[idx] : best.Augment(idx).Perf() - best.Perf()
for idx, size in enumerate(best.sizes) if size > 0
} if best != None else {}
}
"""
Returns predicted performance in targets for diff pivot data_sizes
"""
def get_user_config_perfs(model, featurizer, langs_list, targets, data_sizes):
# Parse params
targets = set(targets)
langs_tgts = [lang for lang in langs_list if lang in targets]
langs_pvts = langs_list
# Helpers
TgtPerfs= lambda sizes: model.predict( featurizer.featurize(langs_tgts, langs_pvts, sizes) )
AvgPerf = lambda sizes: np.average( TgtPerfs(sizes) )
SimpleAugSet = lambda sizes: [(lang, sizes[idx]) for idx, lang in enumerate(langs_pvts) if sizes[idx] > 0]
# Add tgt-score-distrib for best performing user-specified config
best_user_config, best_user_config_idx, _ = max([(user_config, row_idx, AvgPerf(user_config)) for row_idx, user_config in enumerate(data_sizes.tolist())], key= lambda x: x[2])
best_user_config_perfs = TgtPerfs(best_user_config)
return {
"best-config-idx": best_user_config_idx,
"best-config": SimpleAugSet(best_user_config),
"best-tgt-perfs": list(zip(langs_tgts, best_user_config_perfs))
}
"""
Plots heatmap of predicted performance in langs_tgts for given data_sizes
"""
def plot_perf_heatmap(langs_list, langs_pivot, langs_tgts, data_sizes, model, featurizer, output_dir):
langs_tgts = [t for t in langs_tgts if t in langs_list]
Y = build_acc_matrix(langs_pivot, langs_tgts, featurizer, model, data_sizes)
print ("vsize" , data_sizes.shape[0])
fig = plt.figure( figsize=(6, 3), dpi=300 )
plt.rc('xtick', labelsize=10)
plt.rc('ytick', labelsize=10)
ax = sns.heatmap(Y, cmap='RdYlGn', cbar=True, cbar_kws={'orientation': 'horizontal'}, yticklabels=["Config-%d" % (ConfigIdx+1) for ConfigIdx in range(len(data_sizes))], xticklabels=langs_tgts)
plt.setp(ax.get_yticklabels(), rotation=0, ha="right", rotation_mode="anchor")
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
pltfile = '{}.png'.format(int(time.time()))
if output_dir:
pltfile = os.path.join(output_dir, pltfile)
plt.savefig(pltfile, bbox_inches='tight')
return pltfile, Y
def litmus_main(args):
"""
Prepare predictive model
"""
assert(args.load_state or args.scores_file)
if args.load_state:
# Load trained model + metadata
with open(args.load_state, "rb") as pkl_f:
model, featurizer, langs_list, langs_pivot, langs_target, avg_error, baseline_error = pickle.load(pkl_f)
else:
# Prepare data
model, langs_list, feat_names, featurizer, common_feats, X, Y, langs_pivot, langs_target, examples = prepare_data(args)
# Train prediction model
model, (avg_error, std_error), baseline_error, errors, examples_indices, predictions = train_model(X, Y, feat_names, common_feats, langs_pivot, langs_target, args)
if args.save_state:
with open(args.save_state, "wb") as pkl_f:
pickle.dump([model, featurizer, langs_list, langs_pivot, langs_target, avg_error, baseline_error], pkl_f)
ret_val = {
"error": avg_error, # Average Error Computed by specified Method
"langs_pivot": langs_pivot, # Pivot Languages
"langs_target": langs_target, # Target Languages
"baseline_error": baseline_error, # Error when using Mean Baseline Method instead of training model
}
"""
Inference using trained model
"""
if args.mode:
data_sizes, langs_tgts = prepare_inf_data(args, langs_list)
# Set basline perfs for all target modes
ret_val["user-config-perfs"] = get_user_config_perfs(model, featurizer, langs_list, langs_tgts, data_sizes)
pprint (ret_val["user-config-perfs"])
if "heatmap" in args.mode:
pltfile, Y = plot_perf_heatmap(langs_list, langs_pivot, langs_tgts, data_sizes, model, featurizer, args.output_dir)
ret_val["heatmapFile"] = pltfile
ret_val["acc_matrix"] = {
"index": langs_list,
"matrix": Y.tolist()
}
if "suggestions" in args.mode:
# Get baseline row for pivot sizes
pivot_row_idx = int(args.suggestions_pivots) if args.suggestions_pivots != "" else ret_val["user-config-perfs"]["best-config-idx"]
pivot_sizes = data_sizes[pivot_row_idx]
ret_val["suggestions"] = \
find_suggestions(args,
model, featurizer, langs_list,
args.suggestions_budget, args.suggestions_langbudget,
args.suggestions_targets, pivot_sizes, args.suggestions_augmentable, args.suggestions_weights,
data_sizes,
args.suggestions_grid == "exponential",
args.suggestions_objective,
args.suggestions_minperf, args.suggestions_minlangperf)
ret_val["suggestions"]["suggestions_row"] = pivot_row_idx
pprint (ret_val["suggestions"])
return ret_val
def parse_args(args):
parser = argparse.ArgumentParser("LITMUS Tool")
# Options for loading training data / model
parser.add_argument("model_name", type=str, default="xlmr", help="name of model to use", choices=SUPPORTED_MODELS)
parser.add_argument("--scores_file", default=None, type=str, help="path of json file containing scores to train on")
parser.add_argument("--train_format", default="json", help="Format of the training data", choices=["json", "csv"])
parser.add_argument("--save_state", default=None, type=str, help="Save state of training of model to pickle file")
parser.add_argument("--load_state", default=None, type=str, help="Load trained model from pickle file")
# Feature options
parser.add_argument("--precomputed_features", type=str, default=PRECOMPUTED_FEATURE_FILE, help="Path to precomputed-features file.")
parser.add_argument("--pivot_features", type=str, default="none", choices=["none", "all", "data_only"], help="What features based on pivot langs to use")
parser.add_argument("--use_all_langs", action="store_true", help="Add features based on all langs the tool supports (Needed for transfer)")
parser.add_argument("--common_scaling", action="store_true", help="Common min max scaling params that are pvt dependent(data size, type overlap, distance)")
# Model training options
parser.add_argument("--training_algorithm", type=str, default="xgboost", help="which regressor to use", choices=["xgboost", "mlp"])
parser.add_argument("--error_method", type=str, default="split", choices=["LOO", "LOTO", "split", "kfold", "manual_split"])
parser.add_argument("--dont_print", action="store_false", dest="print", help="disable any form of printing")
# Experimental Options
parser.add_argument("--train_indices", default=None, help="train indices if error_method is manual_split")
parser.add_argument("--test_indices", default=None, help="test indices if error_method is manual_split")
parser.add_argument("--model", default=None, help="Model to load") # hack - for calling from external script using actual model
parser.add_argument("--load_model", type=str, default=None, help="Load neural network model from saved file")
parser.add_argument("--save_model", type=str, default=None, help="Save neural network model to file")
# Options for inferencing
parser.add_argument("--data_sizes", default="", help="Pivot data-size configs (semi-colon separated configs, each config itself being comma-separated key-value pairs)")
parser.add_argument("--mode", type=str, default=None, nargs='+', help="Output modes (comma-separated). Choose from following: {heatmap, suggestions}.")
# Options for heatmap comparison of multiple user-configs
parser.add_argument("--output_dir", type=str, default=None, help="Overrride output directory")
parser.add_argument("--heatmap_targets", type=str, default=None, help="Targets for heatmap. Overrides suggestions_targets (which is used by deafult)")
# Options for suggestions finding
parser.add_argument("--suggestions_budget", type=int, default=0, help="Budget for finding suggestions of which languages to add data for (0 to disable)")
parser.add_argument("--suggestions_langbudget", type=str, default="", help="Language-specific budget for finding suggestions (overrrides suggestions_budget for these langs, comma-separated list of key:value pairs)")
parser.add_argument("--suggestions_targets", type=str, default="", help="Targets being considered (comma-separated)")
parser.add_argument("--suggestions_weights", type=str, default="", help="Target weights for avg perf objective (comma-separated list of key:value pairs, default wt=1)")
parser.add_argument("--suggestions_pivots", type=str, default="", help="Index of desired row in data_sizes")
parser.add_argument("--suggestions_augmentable", type=str, default="", help="Set of augmentable languages (comma-separated)")
parser.add_argument("--suggestions_grid", type=str, default="exponential", choices=["exponential", "linear"], help="Search space grid to use for suggestions")
parser.add_argument("--suggestions_objective", type=str, default="avg", help="Objective function to be used for finding suggestions", choices=["avg", "min"])
parser.add_argument("--suggestions_minperf", type=str, default="", help="Minimum acceptable average performance across tgts")
parser.add_argument("--suggestions_minlangperf", type=str, default="", help="Minimum acceptable performance for given tgts (comma-separated list of key:value pairs)")
parser.add_argument("--suggestions_verbose", action="store_true", help="Verbose logging of search")
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
litmus_main(args) | [
"pickle.dump",
"argparse.ArgumentParser",
"numpy.amin",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"numpy.mean",
"pickle.load",
"pprint.pprint",
"xgboost.XGBRegressor",
"os.path.join",
"pandas.DataFrame",
"numpy.std",
"numpy.max",
"matplotlib.pyplot.rc",
"re.sub",
"pkgutil.get_data... | [((8445, 8468), 'numpy.concatenate', 'np.concatenate', (['X_array'], {}), '(X_array)\n', (8459, 8468), True, 'import numpy as np\n'), ((14076, 14100), 'numpy.mean', 'np.mean', (['baseline_errors'], {}), '(baseline_errors)\n', (14083, 14100), True, 'import numpy as np\n'), ((18584, 18595), 'time.time', 'time.time', ([], {}), '()\n', (18593, 18595), False, 'import time\n'), ((21238, 21249), 'time.time', 'time.time', ([], {}), '()\n', (21247, 21249), False, 'import time\n'), ((24064, 24099), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 3)', 'dpi': '(300)'}), '(figsize=(6, 3), dpi=300)\n', (24074, 24099), True, 'import matplotlib.pyplot as plt\n'), ((24106, 24135), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '(10)'}), "('xtick', labelsize=10)\n", (24112, 24135), True, 'import matplotlib.pyplot as plt\n'), ((24140, 24169), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '(10)'}), "('ytick', labelsize=10)\n", (24146, 24169), True, 'import matplotlib.pyplot as plt\n'), ((24659, 24700), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pltfile'], {'bbox_inches': '"""tight"""'}), "(pltfile, bbox_inches='tight')\n", (24670, 24700), True, 'import matplotlib.pyplot as plt\n'), ((27699, 27737), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""LITMUS Tool"""'], {}), "('LITMUS Tool')\n", (27722, 27737), False, 'import argparse\n'), ((1544, 1619), 'pandas.DataFrame', 'pd.DataFrame', (["precomputed['regression_feats'][model]"], {'index': 'self.langs_list'}), "(precomputed['regression_feats'][model], index=self.langs_list)\n", (1556, 1619), True, 'import pandas as pd\n'), ((2693, 2726), 'numpy.concatenate', 'np.concatenate', (['pivot_fts'], {'axis': '(1)'}), '(pivot_fts, axis=1)\n', (2707, 2726), True, 'import numpy as np\n'), ((3439, 3457), 'numpy.isscalar', 'np.isscalar', (['scale'], {}), '(scale)\n', (3450, 3457), True, 'import numpy as np\n'), ((9661, 9691), 're.sub', 're.sub', (['"""\\\\s+"""', '""""""', 'data_sizes'], {}), "('\\\\s+', '', data_sizes)\n", (9667, 9691), False, 'import re\n'), ((9953, 9973), 'numpy.array', 'np.array', (['data_sizes'], {}), '(data_sizes)\n', (9961, 9973), True, 'import numpy as np\n'), ((14023, 14038), 'numpy.mean', 'np.mean', (['errors'], {}), '(errors)\n', (14030, 14038), True, 'import numpy as np\n'), ((14040, 14054), 'numpy.std', 'np.std', (['errors'], {}), '(errors)\n', (14046, 14054), True, 'import numpy as np\n'), ((15062, 15083), 're.sub', 're.sub', (['"""\\\\s+"""', '""""""', 's'], {}), "('\\\\s+', '', s)\n", (15068, 15083), False, 'import re\n'), ((16454, 16474), 'numpy.array', 'np.array', (['orig_sizes'], {}), '(orig_sizes)\n', (16462, 16474), True, 'import numpy as np\n'), ((21273, 21293), 'numpy.array', 'np.array', (['orig_sizes'], {}), '(orig_sizes)\n', (21281, 21293), True, 'import numpy as np\n'), ((24621, 24654), 'os.path.join', 'os.path.join', (['output_dir', 'pltfile'], {}), '(output_dir, pltfile)\n', (24633, 24654), False, 'import os\n'), ((26294, 26330), 'pprint.pprint', 'pprint', (["ret_val['user-config-perfs']"], {}), "(ret_val['user-config-perfs'])\n", (26300, 26330), False, 'from pprint import pprint\n'), ((1221, 1285), 'pkgutil.get_data', 'pkgutil.get_data', (['__name__', '"""data/precomputed_features.min.json"""'], {}), "(__name__, 'data/precomputed_features.min.json')\n", (1237, 1285), False, 'import pkgutil\n'), ((6364, 6376), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6373, 6376), False, 'import json\n'), ((7433, 7462), 'pandas.read_csv', 'pd.read_csv', (['args.scores_file'], {}), '(args.scores_file)\n', (7444, 7462), True, 'import pandas as pd\n'), ((8477, 8494), 'numpy.array', 'np.array', (['Y_array'], {}), '(Y_array)\n', (8485, 8494), True, 'import numpy as np\n'), ((10082, 10111), 're.sub', 're.sub', (['"""\\\\s+"""', '""""""', 'tgt_langs'], {}), "('\\\\s+', '', tgt_langs)\n", (10088, 10111), False, 'import re\n'), ((14320, 14341), 'pickle.dump', 'pickle.dump', (['model', 'f'], {}), '(model, f)\n', (14331, 14341), False, 'import pickle\n'), ((19375, 19418), 'numpy.average', 'np.average', (['outs'], {'axis': '(1)', 'weights': 'langs_wts'}), '(outs, axis=1, weights=langs_wts)\n', (19385, 19418), True, 'import numpy as np\n'), ((19462, 19483), 'numpy.amin', 'np.amin', (['outs'], {'axis': '(1)'}), '(outs, axis=1)\n', (19469, 19483), True, 'import numpy as np\n'), ((24570, 24581), 'time.time', 'time.time', ([], {}), '()\n', (24579, 24581), False, 'import time\n'), ((25060, 25078), 'pickle.load', 'pickle.load', (['pkl_f'], {}), '(pkl_f)\n', (25071, 25078), False, 'import pickle\n'), ((25535, 25644), 'pickle.dump', 'pickle.dump', (['[model, featurizer, langs_list, langs_pivot, langs_target, avg_error,\n baseline_error]', 'pkl_f'], {}), '([model, featurizer, langs_list, langs_pivot, langs_target,\n avg_error, baseline_error], pkl_f)\n', (25546, 25644), False, 'import pickle\n'), ((27610, 27640), 'pprint.pprint', 'pprint', (["ret_val['suggestions']"], {}), "(ret_val['suggestions'])\n", (27616, 27640), False, 'from pprint import pprint\n'), ((1170, 1184), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (1179, 1184), False, 'import json\n'), ((4247, 4262), 'numpy.array', 'np.array', (['group'], {}), '(group)\n', (4255, 4262), True, 'import numpy as np\n'), ((4303, 4332), 'numpy.min', 'np.min', (['self.data_min_[group]'], {}), '(self.data_min_[group])\n', (4309, 4332), True, 'import numpy as np\n'), ((4373, 4402), 'numpy.max', 'np.max', (['self.data_max_[group]'], {}), '(self.data_max_[group])\n', (4379, 4402), True, 'import numpy as np\n'), ((4989, 5003), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5000, 5003), False, 'import pickle\n'), ((5044, 5064), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (5057, 5064), False, 'import copy\n'), ((11237, 11253), 'numpy.mean', 'np.mean', (['Y_train'], {}), '(Y_train)\n', (11244, 11253), True, 'import numpy as np\n'), ((2640, 2659), 'numpy.array', 'np.array', (['task_size'], {}), '(task_size)\n', (2648, 2659), True, 'import numpy as np\n'), ((21800, 21820), 'numpy.array', 'np.array', (['orig_sizes'], {}), '(orig_sizes)\n', (21808, 21820), True, 'import numpy as np\n'), ((21823, 21851), 'numpy.array', 'np.array', (['theoretic_max_case'], {}), '(theoretic_max_case)\n', (21831, 21851), True, 'import numpy as np\n'), ((2440, 2463), 'numpy.array', 'np.array', (['type_overlaps'], {}), '(type_overlaps)\n', (2448, 2463), True, 'import numpy as np\n'), ((2506, 2534), 'numpy.array', 'np.array', (['syntactic_distance'], {}), '(syntactic_distance)\n', (2514, 2534), True, 'import numpy as np\n'), ((12549, 12565), 'numpy.mean', 'np.mean', (['Y_train'], {}), '(Y_train)\n', (12556, 12565), True, 'import numpy as np\n'), ((12907, 12946), 'numpy.delete', 'np.delete', (['X', 'indices_to_delete'], {'axis': '(0)'}), '(X, indices_to_delete, axis=0)\n', (12916, 12946), True, 'import numpy as np\n'), ((12967, 13006), 'numpy.delete', 'np.delete', (['Y', 'indices_to_delete'], {'axis': '(0)'}), '(Y, indices_to_delete, axis=0)\n', (12976, 13006), True, 'import numpy as np\n'), ((17125, 17145), 'numpy.array', 'np.array', (['orig_sizes'], {}), '(orig_sizes)\n', (17133, 17145), True, 'import numpy as np\n'), ((17148, 17168), 'numpy.array', 'np.array', (['self.sizes'], {}), '(self.sizes)\n', (17156, 17168), True, 'import numpy as np\n'), ((17208, 17228), 'numpy.array', 'np.array', (['orig_sizes'], {}), '(orig_sizes)\n', (17216, 17228), True, 'import numpy as np\n'), ((17231, 17251), 'numpy.array', 'np.array', (['self.sizes'], {}), '(self.sizes)\n', (17239, 17251), True, 'import numpy as np\n'), ((19199, 19219), 'numpy.array', 'np.array', (['orig_sizes'], {}), '(orig_sizes)\n', (19207, 19219), True, 'import numpy as np\n'), ((19222, 19242), 'numpy.array', 'np.array', (['node.sizes'], {}), '(node.sizes)\n', (19230, 19242), True, 'import numpy as np\n'), ((5713, 5814), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {'objective': '"""reg:squarederror"""', 'learning_rate': '(0.1)', 'n_estimators': '(100)', 'max_depth': '(10)'}), "(objective='reg:squarederror', learning_rate=0.1,\n n_estimators=100, max_depth=10)\n", (5729, 5814), True, 'import xgboost as xgb\n'), ((12084, 12100), 'numpy.mean', 'np.mean', (['Y_train'], {}), '(Y_train)\n', (12091, 12100), True, 'import numpy as np\n'), ((13589, 13612), 'numpy.delete', 'np.delete', (['X', '_'], {'axis': '(0)'}), '(X, _, axis=0)\n', (13598, 13612), True, 'import numpy as np\n'), ((13633, 13656), 'numpy.delete', 'np.delete', (['Y', '_'], {'axis': '(0)'}), '(Y, _, axis=0)\n', (13642, 13656), True, 'import numpy as np\n'), ((13333, 13347), 'numpy.mean', 'np.mean', (['Y_reg'], {}), '(Y_reg)\n', (13340, 13347), True, 'import numpy as np\n'), ((22126, 22146), 'numpy.array', 'np.array', (['orig_sizes'], {}), '(orig_sizes)\n', (22134, 22146), True, 'import numpy as np\n'), ((22149, 22169), 'numpy.array', 'np.array', (['best.sizes'], {}), '(best.sizes)\n', (22157, 22169), True, 'import numpy as np\n'), ((13969, 13983), 'numpy.mean', 'np.mean', (['Y_reg'], {}), '(Y_reg)\n', (13976, 13983), True, 'import numpy as np\n')] |
from numpy.random import randint
default_network = {
"type": "DFA",
"dataset_name": "mnist",
"sequence": "fcReLu",
"cost_function": "softmax_cross_entropy",
"learning_rate": 0.01,
"minimize_manually": True,
"gather_stats": False,
"save_graph": False,
"memory_only": False,
"restore_model": False,
"save_model": False,
"restore_model_path": None,
"save_model_path": None,
"minimum_accuracy": [(1, 1)],
"batch_size": 10,
"epochs": 1,
"eval_period": 1000,
"stat_period": 100,
"seed": randint(1, 100000000),
}
fcSigmoid = dict(default_network)
fcSigmoid.update({
"sequence": "fcSigmoid",
"cost_function": "mean_squared_error",
"learning_reate": 0.05,
"batch_size": 10,
"epochs":30,
"minimize_manually": True,
})
fcReLu = dict(default_network)
fcReLu.update({
"sequence": "fcReLu",
"cost_function": "softmax_cross_entropy",
"learning_reate": 0.01,
"batch_size": 10,
"epochs":30,
"minimize_manually": True,
})
liao_network = dict(default_network)
liao_network = {
"type": "DFA",
"dataset_name": "cifar10",
"sequence": "liao_mnist",
"cost_function": "softmax_cross_entropy",
"learning_rate": 0.00001,
"minimize_manually": True,
"batch_size": 100,
"epochs": 150,
}
vgg_16 = dict(default_network)
vgg_16.update({
"minimum_accuracy": [(10, 12), (50, 20)],
"type": "BP",
"sequence": "vgg_16",
"epochs": 100,
"cost_function": "softmax_cross_entropy",
"dataset_name": "cifar10"
})
vgg_16_DFA = dict(vgg_16)
vgg_16_DFA.update({
"type": "DFA",
"minimum_accuracy": [(20, 20), (50, 40)],
})
exp_mnist_fc = dict(default_network)
exp_mnist_fc.update({
"sequence": "experiment_mnist_fc",
"batch_size": 10,
"epochs": 20
})
exp_mnist_conv = dict(default_network)
exp_mnist_conv.update({
"sequence": "experiment_mnist_conv",
"learning_rate": 0.005,
"batch_size": 100,
"epochs": 150
})
exp_cifar_vgg_bp = dict(default_network)
exp_cifar_vgg_bp.update({
"type": "BP",
"dataset_name": "cifar10",
"sequence": "vgg_16",
"cost_function": "softmax_cross_entropy",
"learning_rate": 0.001,
"memory_only": False,
"minimize_manually": False,
"minimum_accuracy": [(5, 20)],
"batch_size": 100,
"epochs": 80,
})
exp_cifar_vgg_fa = dict(exp_cifar_vgg_bp)
exp_cifar_vgg_fa.update({
"type": "FA",
"learning_rate": 0.000005
})
exp_cifar_vgg_dfa = dict(exp_cifar_vgg_fa)
exp_cifar_vgg_dfa.update({
"type": "DFA"
})
exp_cifar_vgg_memdfa = dict(exp_cifar_vgg_fa)
exp_cifar_vgg_memdfa.update({
"type": "DFAMEM"
})
exp_cifar_liao_bp = dict(default_network)
exp_cifar_liao_bp.update({
"type": "BP",
"dataset_name": "cifar10",
"sequence": "liao_cifar",
"minimum_accuracy": [(3, 11), (10, 20)],
"learning_rate": 0.001,
"batch_size": 100,
"epochs": 100
})
exp_cifar_liao_fa = dict(exp_cifar_liao_bp)
exp_cifar_liao_fa.update({
"type": "FA",
"learning_rate": 0.00001,
})
exp_cifar_liao_dfa = dict(exp_cifar_liao_bp)
exp_cifar_liao_dfa.update({
"type": "DFA",
"learning_rate": 0.00001,
})
exp_cifar_liao_memdfa = dict(exp_cifar_liao_bp)
exp_cifar_liao_memdfa.update({
"type": "DFAMEM",
"learning_rate": 0.00001,
}) | [
"numpy.random.randint"
] | [((563, 584), 'numpy.random.randint', 'randint', (['(1)', '(100000000)'], {}), '(1, 100000000)\n', (570, 584), False, 'from numpy.random import randint\n')] |
import numpy as np
from matplotlib_scalebar.scalebar import ScaleBar
from matplotlib import pyplot as plt
def addScaleBar(ax, scale, location='upper right'):
"""Add a scale bar to an axes.
Parameters
----------
ax : matplotlib.axes.Axes
Matplotlib axis on which to plot.
"""
if scale:
scalebar = ScaleBar(scale, location=location)
ax.add_artist(scalebar)
plt.show()
def addArrows(ax, c='r', lenx=0.04, leny=0.06, flip=False):
"""
Add coordinate definition arrows (radial and circumferential) to an axes.
Parameters
----------
ax : matplotlib.axes.Axes
Matplotlib axis to plot on
"""
startcoord = (0.02, 0.02)
ax.annotate(
"",
xy=(startcoord[0]-0.002, startcoord[1]),
xytext=(startcoord[0]+lenx, startcoord[1]),
xycoords='axes fraction',
c=c,
arrowprops=dict(arrowstyle="<-", color=c, lw=2),
)
ax.annotate(
"",
xy=(startcoord[0], startcoord[1]-0.002),
xytext=(startcoord[0], startcoord[1]+leny),
xycoords='axes fraction',
c=c,
arrowprops=dict(arrowstyle="<-", color=c, lw=2),
)
positions = [(0.011, startcoord[1]+leny), (startcoord[0]+lenx, 0.01)]
if flip:
labels = 'CR'
else:
labels = 'RC'
for label, position in zip(labels, positions):
ax.annotate(
label,
xy=position,
xycoords='axes fraction',
fontsize=14,
fontweight='bold',
c=c,
)
def plot(img, title, scale=None, location=None, ax=None):
"""Plotting an imge.
Parameters
----------
img : array
Image data to be plotted
title : str
Title of plot.
scale : float
Scale in meters per pixel.
location : str
Location of scale bar i.e. 'lower right', 'upper left'
"""
if ax is None:
_, ax = plt.subplots(figsize=(10, 6))
ax.imshow(img, cmap='gray')
ax.set_title(title, fontsize=14)
ax.set_axis_off()
if scale is not None:
addScaleBar(ax, scale=scale, location=location)
addArrows(ax)
def plot_comparison(img1, title1, img2, title2, scale=None, location=None):
"""Plotting two images next to each other.
Parameters
----------
img1 : array
Image data to be plotted on left.
img2 : array
Image data to be plotted on right.
title1 : str
Title of left-hand plot.
title2 : str
Title of right-hand plot.
scale : float
Scale in meters per pixel.
location : str
Location of scale bar i.e. 'lower right', 'upper left'
"""
fig, (ax_a, ax_b) = plt.subplots(
ncols=2, figsize=(14, 7), sharex=True, sharey=True
)
plot(img1, title1, ax=ax_a)
plot(img2, title2, scale=scale, location=location, ax=ax_b)
fig.tight_layout()
def plot_hist(arr):
_, ax = plt.subplots(figsize=(8, 5))
histogram = ax.hist(arr[~np.isnan(arr)].flatten(), bins=60, range=(0, 2))
ax.set_title('Image Histogram', fontsize=14)
ax.set_xlabel('Gray value', fontsize=12)
ax.set_ylabel('Frequency', fontsize=12)
return histogram
| [
"matplotlib_scalebar.scalebar.ScaleBar",
"matplotlib.pyplot.subplots",
"numpy.isnan",
"matplotlib.pyplot.show"
] | [((2820, 2884), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'figsize': '(14, 7)', 'sharex': '(True)', 'sharey': '(True)'}), '(ncols=2, figsize=(14, 7), sharex=True, sharey=True)\n', (2832, 2884), True, 'from matplotlib import pyplot as plt\n'), ((3064, 3092), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (3076, 3092), True, 'from matplotlib import pyplot as plt\n'), ((340, 374), 'matplotlib_scalebar.scalebar.ScaleBar', 'ScaleBar', (['scale'], {'location': 'location'}), '(scale, location=location)\n', (348, 374), False, 'from matplotlib_scalebar.scalebar import ScaleBar\n'), ((415, 425), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (423, 425), True, 'from matplotlib import pyplot as plt\n'), ((2055, 2084), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (2067, 2084), True, 'from matplotlib import pyplot as plt\n'), ((3122, 3135), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (3130, 3135), True, 'import numpy as np\n')] |
# plot_mesa_hr.py
# plots the output of mesa onto an hr diagram
import sys
import os
import string
import numpy as np
import pandas as pd
from astropy.io import ascii
from astropy.io import fits
import matplotlib.pyplot as plt
from read_mesa import read_history
file_loc = '/Users/galaxies-air/Courses/Stars/ps1_mesa/LOGS/'
data_df = read_history(file_loc)
axisfont = 14
ticksize = 12
ticks = 8
titlefont = 24
legendfont = 14
textfont = 16
fig, ax = plt.subplots(figsize=(8, 7))
plot = ax.scatter(10**data_df['log_Teff'], 10**data_df['log_L'],
c=np.log10(data_df['star_age']), marker='o')
plt.colorbar(plot, label='log(Age)')
plt.yscale('log')
plt.xscale('log')
ax.set_xlim(10000, 2000)
ax.set_ylim(0.1, 1000)
ax.invert_xaxis()
ax.set_xlabel('T$_{eff}$ (K)', fontsize=axisfont)
ax.set_ylabel('L (L$_\odot$)', fontsize=axisfont)
ax.tick_params(labelsize=ticksize, size=ticks)
fig.savefig('/Users/galaxies-air/Courses/Stars/ps1_mesa/ps1_mesa_fig.pdf')
plt.close('all')
| [
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.close",
"matplotlib.pyplot.colorbar",
"read_mesa.read_history",
"numpy.log10",
"matplotlib.pyplot.subplots"
] | [((337, 359), 'read_mesa.read_history', 'read_history', (['file_loc'], {}), '(file_loc)\n', (349, 359), False, 'from read_mesa import read_history\n'), ((455, 483), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 7)'}), '(figsize=(8, 7))\n', (467, 483), True, 'import matplotlib.pyplot as plt\n'), ((613, 649), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['plot'], {'label': '"""log(Age)"""'}), "(plot, label='log(Age)')\n", (625, 649), True, 'import matplotlib.pyplot as plt\n'), ((650, 667), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (660, 667), True, 'import matplotlib.pyplot as plt\n'), ((668, 685), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (678, 685), True, 'import matplotlib.pyplot as plt\n'), ((974, 990), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (983, 990), True, 'import matplotlib.pyplot as plt\n'), ((570, 599), 'numpy.log10', 'np.log10', (["data_df['star_age']"], {}), "(data_df['star_age'])\n", (578, 599), True, 'import numpy as np\n')] |
from tensorflow.keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator
import matplotlib.pyplot as plt
import numpy as np
# # 이미지 제네레이터를 선언합니다.
# train_datagen = ImageDataGenerator(horizontal_flip=True,
# vertical_flip=True,
# shear_range=0.5,
# brightness_range=[0.5, 1.5],
# zoom_range=0.2,
# width_shift_range=0.1,
# height_shift_range=0.1,
# rotation_range=30,
# fill_mode='nearest'
# )
#
# # 햄버거 사진을 불러옵니다.
# hamburger = img_to_array(load_img('d:/data/햄버거.png')).astype(np.uint8)
# plt.figure();
# plt.title('original image')
# plt.imshow(hamburger)
#
# # 제네레이터를 사용해서 이미지를 변환합니다.
# hamburger = hamburger.reshape((1,) + hamburger.shape)
# train_generator = train_datagen.flow(hamburger, batch_size=1)
#
# fig = plt.figure(figsize=(5, 5))
# fig.suptitle('augmented image')
#
# for i in range(9):
# data = next(train_generator) # 제네레이터에게서 이미지를 받아옵니다.
# image = data[0]
# plt.subplot(3, 3, i + 1)
# plt.xticks([])
# plt.yticks([])
# plt.imshow(np.array(image, dtype=np.uint8), cmap='gray')
#
# plt.show()
# 2. 데이터 증식 이용하여 cifar10 학습 시키기
from tensorflow.keras.datasets import cifar10
import numpy as np
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# 평균과 표준편차는 채널별로 구해줍니다.
x_mean = np.mean(x_train, axis=(0, 1, 2))
x_std = np.std(x_train, axis=(0, 1, 2))
x_train = (x_train - x_mean) / x_std
x_test = (x_test - x_mean) / x_std
# 3. 이미지 제네레이터를 사용하여 모델 학습하기
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,
test_size=0.3, random_state=777)
print('data ready~')
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(horizontal_flip=True,
zoom_range=0.2,
width_shift_range=0.1,
height_shift_range=0.1,
rotation_range=30,
fill_mode='nearest'
)
# 검증 데이터셋에는 변환을 사용하지 않습니다.
val_datagen = ImageDataGenerator()
batch_size = 32
train_generator = train_datagen.flow(x_train, y_train,
batch_size=batch_size)
val_generator = val_datagen.flow(x_val, y_val,
batch_size=batch_size)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization
from tensorflow.keras.optimizers import Adam
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=3, padding='same', input_shape=(32, 32, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(filters=32, kernel_size=3, padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=2, padding='same'))
model.add(Conv2D(filters=64, kernel_size=3, padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(filters=64, kernel_size=3, padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=2, padding='same'))
model.add(Conv2D(filters=128, kernel_size=3, padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(filters=128, kernel_size=3, padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=2, padding='same'))
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer=Adam(1e-4),
loss='sparse_categorical_crossentropy',
metrics=['acc'])
def get_step(train_len, batch_size):
if (train_len % batch_size > 0):
return train_len // batch_size + 1
else:
return train_len // batch_size
history = model.fit(train_generator,
epochs=10,
steps_per_epoch=get_step(len(x_train), batch_size),
validation_data=val_generator,
validation_steps=get_step(len(x_val), batch_size))
# # 4. 학습 과정 시각화 하기
#
# import matplotlib.pyplot as plt
#
# his_dict = history.history
# loss = his_dict['loss']
# val_loss = his_dict['val_loss']
#
# epochs = range(1, len(loss) + 1)
# fig = plt.figure(figsize=(10, 5))
#
# # 훈련 및 검증 손실 그리기
# ax1 = fig.add_subplot(1, 2, 1)
# ax1.plot(epochs, loss, color='blue', label='train_loss')
# ax1.plot(epochs, val_loss, color='orange', label='val_loss')
# ax1.set_title('train and val loss')
# ax1.set_xlabel('epochs')
# ax1.set_ylabel('loss')
# ax1.legend()
#
# acc = his_dict['acc']
# val_acc = his_dict['val_acc']
#
# # 훈련 및 검증 정확도 그리기
# ax2 = fig.add_subplot(1, 2, 2)
# ax2.plot(epochs, acc, color='blue', label='train_acc')
# ax2.plot(epochs, val_acc, color='orange', label='val_acc')
# ax2.set_title('train and val acc')
# ax2.set_xlabel('epochs')
# ax2.set_ylabel('acc')
# ax2.legend()
#
# plt.show()
#
#
| [
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"numpy.std",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.datasets.cifar10.load_data",
"numpy.mean",
"tensorflo... | [((1507, 1526), 'tensorflow.keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (1524, 1526), False, 'from tensorflow.keras.datasets import cifar10\n'), ((1561, 1593), 'numpy.mean', 'np.mean', (['x_train'], {'axis': '(0, 1, 2)'}), '(x_train, axis=(0, 1, 2))\n', (1568, 1593), True, 'import numpy as np\n'), ((1602, 1633), 'numpy.std', 'np.std', (['x_train'], {'axis': '(0, 1, 2)'}), '(x_train, axis=(0, 1, 2))\n', (1608, 1633), True, 'import numpy as np\n'), ((1825, 1892), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_train', 'y_train'], {'test_size': '(0.3)', 'random_state': '(777)'}), '(x_train, y_train, test_size=0.3, random_state=777)\n', (1841, 1892), False, 'from sklearn.model_selection import train_test_split\n'), ((2050, 2198), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'horizontal_flip': '(True)', 'zoom_range': '(0.2)', 'width_shift_range': '(0.1)', 'height_shift_range': '(0.1)', 'rotation_range': '(30)', 'fill_mode': '"""nearest"""'}), "(horizontal_flip=True, zoom_range=0.2, width_shift_range=\n 0.1, height_shift_range=0.1, rotation_range=30, fill_mode='nearest')\n", (2068, 2198), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2447, 2467), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '()\n', (2465, 2467), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2908, 2920), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2918, 2920), False, 'from tensorflow.keras.models import Sequential\n'), ((2931, 3005), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'padding': '"""same"""', 'input_shape': '(32, 32, 3)'}), "(filters=32, kernel_size=3, padding='same', input_shape=(32, 32, 3))\n", (2937, 3005), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3017, 3037), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3035, 3037), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3049, 3067), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3059, 3067), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3079, 3128), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'padding': '"""same"""'}), "(filters=32, kernel_size=3, padding='same')\n", (3085, 3128), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3140, 3160), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3158, 3160), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3172, 3190), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3182, 3190), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3202, 3256), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), strides=2, padding='same')\n", (3211, 3256), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3268, 3317), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'padding': '"""same"""'}), "(filters=64, kernel_size=3, padding='same')\n", (3274, 3317), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3329, 3349), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3347, 3349), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3361, 3379), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3371, 3379), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3391, 3440), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'padding': '"""same"""'}), "(filters=64, kernel_size=3, padding='same')\n", (3397, 3440), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3452, 3472), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3470, 3472), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3484, 3502), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3494, 3502), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3514, 3568), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), strides=2, padding='same')\n", (3523, 3568), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3580, 3630), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3)', 'padding': '"""same"""'}), "(filters=128, kernel_size=3, padding='same')\n", (3586, 3630), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3642, 3662), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3660, 3662), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3674, 3692), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3684, 3692), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3704, 3754), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3)', 'padding': '"""same"""'}), "(filters=128, kernel_size=3, padding='same')\n", (3710, 3754), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3766, 3786), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3784, 3786), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3798, 3816), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3808, 3816), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3828, 3882), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), strides=2, padding='same')\n", (3837, 3882), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3894, 3903), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3901, 3903), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3915, 3925), 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {}), '(256)\n', (3920, 3925), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3937, 3957), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3955, 3957), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3969, 3987), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3979, 3987), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((3999, 4030), 'tensorflow.keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (4004, 4030), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Activation, BatchNormalization\n'), ((4057, 4069), 'tensorflow.keras.optimizers.Adam', 'Adam', (['(0.0001)'], {}), '(0.0001)\n', (4061, 4069), False, 'from tensorflow.keras.optimizers import Adam\n')] |
import time
from argparse import Namespace
from contextlib import nullcontext
from threading import Lock, Thread
import ipywidgets as w
import numpy as np
from IPython.display import clear_output, display
class UIView:
def __init__(self, container, named_widgets) -> None:
self._named_widgets = named_widgets
self._container = container
def __dir__(self):
return self._named_widgets.keys()
def _ipython_display_(self):
display(self._container)
def __getattr__(self, attr):
if attr in self._named_widgets:
return self._named_widgets[attr]
raise AttributeError()
def __call__(self, decorated):
from functools import wraps
@wraps(decorated)
def decorator(*args, **kwargs):
return decorated(self.values, *args, **kwargs)
return decorator
@property
def values(self):
class Values:
def __getattr__(_, attr):
if hasattr(self._named_widgets[attr], "value"):
return self._named_widgets[attr].value
else:
return self._named_widgets[attr]
def __setattr__(_, name: str, value) -> None:
self._named_widgets[name].value = value
return Values()
class make:
@staticmethod
def singleton(name, widget):
return UIView(widget, {name: widget})
@staticmethod
def slider(id="unset", *args, **kwargs):
return make.singleton(name=id, widget=w.FloatSlider(*args, **kwargs))
@staticmethod
def progress(id="unset", *args, **kwargs):
return make.singleton(name=id, widget=w.FloatProgress(min=0, max=1, **kwargs))
@staticmethod
def checkbox(id="unset", *args, **kwargs):
return make.singleton(name=id, widget=w.Checkbox(*args, **kwargs))
@staticmethod
def int(id="unset", *args, **kwargs):
return make.singleton(name=id, widget=w.BoundedIntText(*args, **kwargs))
@staticmethod
def float(id="unset", *args, **kwargs):
return make.singleton(name=id, widget=w.FloatText(*args, **kwargs))
@staticmethod
def text(id="unset", *args, **kwargs):
return make.singleton(name=id, widget=w.Text(*args, **kwargs))
@staticmethod
def button(id="unset", *args, **kwargs):
return make.singleton(name=id, widget=w.Button(*args, **kwargs))
@staticmethod
def output(id="unset"):
class Output(w.Output):
def display(self, *things):
with self:
# clear_output(wait=True)
for t in things:
display(t)
def clear(self):
clear_output(wait=True)
# def __enter__(self):
# enter_return = super().__enter__()
# # clear_output(wait=True)
# return enter_return
return make.singleton(name=id, widget=Output())
@staticmethod
def label(id="unset", *args, **kwargs):
return make.singleton(name=id, widget=w.Label(*args, **kwargs))
@staticmethod
def line(id="unset", *args, **kwargs):
import bqplot as bq
x_sc = bq.LinearScale(min=0, max=10)
y_sc = bq.LinearScale(min=-1, max=1)
ax_x = bq.Axis(label="X", scale=x_sc, tick_format="0.0f")
ax_y = bq.Axis(
label="Y", scale=y_sc, orientation="vertical", tick_format="0.2f"
)
line = bq.Lines(
x=[0],
y=[0],
scales={"x": x_sc, "y": y_sc},
colors=["blue"],
)
fig = bq.Figure(axes=[ax_x, ax_y], marks=[line], **kwargs)
class LineWidget(w.Output):
def __init__(self, **kwargs):
super().__init__(**kwargs)
with self:
display(fig)
def plot(_, y):
x = np.arange(len(y))
x_sc.max = len(y)
y_sc.min = y.min()
y_sc.max = y.max()
line.x = x
line.y = y
return make.singleton(name=id, widget=LineWidget())
@staticmethod
def h(*children, **kwargs):
new_named_widgets = {
n: w for u in children for n, w in u._named_widgets.items()
}
container_content = [u._container for u in children]
return UIView(
container=w.HBox(container_content, **kwargs),
named_widgets=new_named_widgets,
)
@staticmethod
def v(*children, **kwargs):
new_named_widgets = {
n: w for u in children for n, w in u._named_widgets.items()
}
container_content = [u._container for u in children]
return UIView(
container=w.VBox(container_content, **kwargs),
named_widgets=new_named_widgets,
)
def for_training(model, dataloader, in_background=True, **params):
params = Namespace(**params)
running_state = "stopped"
it = 0
batch_iterator = iter(dataloader)
loss_history = []
lock = nullcontext()
view = make.v(
make.h(
make.v(
make.label(value="LR"),
make.slider(
"lr",
min=0.0001,
max=1,
value=0.001,
step=0.0001,
readout_format=".4f",
layout={"width": "auto"},
),
make.label(value="ITS"),
make.int(
"its",
min=1,
max=999999999,
value=params.its,
layout={"width": "auto"},
),
layout=w.Layout(**{"width": "25%", "border": "2px solid #ccc"}),
),
make.v(
make.h(
make.progress("progress", description="IT [000:000]"),
make.button("play", description="Start"),
make.button("stop", description="Stop"),
),
make.line("loss", title="Loss", layout={"height": "350px"}),
layout={"width": "100%", "border": "2px solid #ccc"},
),
layout={"border": "2px solid #ccc"},
),
make.output("out"),
layout={"border": "4px solid #e6e6e6"},
)
def step():
batch = next(batch_iterator)
loss = model.training_step(batch)
loss_history.append(loss)
def train():
nonlocal it, batch_iterator
try:
while True:
with lock:
step()
its = view.its.value
if running_state != "running":
break
if it > its:
stop()
break
view.progress.value = (it + 1) / (its + 1)
view.progress.description = f"IT [{it:03}:{its:03}]"
loss_history_np = np.array(loss_history)
view.loss.plot(loss_history_np)
it += 1
except StopIteration:
stop()
def play():
with lock:
nonlocal running_state
view.play.description = "Pause"
view.stop.disabled = False
running_state = "running"
if in_background:
thread = Thread(target=train)
thread.start()
else:
train()
def pause():
with lock:
nonlocal running_state
view.play.description = "Start"
running_state = "paused"
def stop():
with lock:
nonlocal it, batch_iterator, running_state, loss_history
pause()
it = 0
loss_history = []
batch_iterator = iter(dataloader)
view.stop.disabled = True
running_state = "stopped"
def toggle():
if running_state != "running":
play()
else:
pause()
view.play.on_click(lambda _: toggle())
view.stop.on_click(lambda _: stop())
return view
def for_generator(generator):
# TODO: This kind of does not work because ot the multi threadedness. Look at the notebook
# from the commit this comes from. It is not good. Think of a way to sync with the front-end
# or discard this function.
view = make.v(
make.h(
make.progress("progress"),
make.button("toggle", description="Start"),
make.button("restart", description="Restart"),
),
make.output("out"),
)
it = generator(view.out)
running = False
lock = nullcontext()
def restart():
with lock:
nonlocal it, running
running = False
it = generator(view.out)
with view.out:
view.out.clear()
def run():
nonlocal it
while running:
# time.sleep(0.05)
try:
with lock:
value = next(it)
try:
view.progress.value = value
except Exception:
pass
except StopIteration:
restart()
def play():
with lock:
nonlocal running
view.toggle.description = "Pause"
running = True
thread = Thread(target=run)
thread.start()
def pause():
with lock:
nonlocal running
view.toggle.description = "Start"
running = False
def toggle():
if running:
pause()
else:
play()
view.toggle.on_click(lambda _: toggle())
view.restart.on_click(lambda _: restart())
return view
| [
"argparse.Namespace",
"bqplot.Figure",
"ipywidgets.Text",
"bqplot.LinearScale",
"ipywidgets.BoundedIntText",
"ipywidgets.FloatProgress",
"ipywidgets.Button",
"bqplot.Axis",
"IPython.display.display",
"ipywidgets.Label",
"ipywidgets.Layout",
"contextlib.nullcontext",
"threading.Thread",
"ip... | [((4944, 4963), 'argparse.Namespace', 'Namespace', ([], {}), '(**params)\n', (4953, 4963), False, 'from argparse import Namespace\n'), ((5076, 5089), 'contextlib.nullcontext', 'nullcontext', ([], {}), '()\n', (5087, 5089), False, 'from contextlib import nullcontext\n'), ((8769, 8782), 'contextlib.nullcontext', 'nullcontext', ([], {}), '()\n', (8780, 8782), False, 'from contextlib import nullcontext\n'), ((468, 492), 'IPython.display.display', 'display', (['self._container'], {}), '(self._container)\n', (475, 492), False, 'from IPython.display import clear_output, display\n'), ((726, 742), 'functools.wraps', 'wraps', (['decorated'], {}), '(decorated)\n', (731, 742), False, 'from functools import wraps\n'), ((3204, 3233), 'bqplot.LinearScale', 'bq.LinearScale', ([], {'min': '(0)', 'max': '(10)'}), '(min=0, max=10)\n', (3218, 3233), True, 'import bqplot as bq\n'), ((3249, 3278), 'bqplot.LinearScale', 'bq.LinearScale', ([], {'min': '(-1)', 'max': '(1)'}), '(min=-1, max=1)\n', (3263, 3278), True, 'import bqplot as bq\n'), ((3295, 3345), 'bqplot.Axis', 'bq.Axis', ([], {'label': '"""X"""', 'scale': 'x_sc', 'tick_format': '"""0.0f"""'}), "(label='X', scale=x_sc, tick_format='0.0f')\n", (3302, 3345), True, 'import bqplot as bq\n'), ((3361, 3435), 'bqplot.Axis', 'bq.Axis', ([], {'label': '"""Y"""', 'scale': 'y_sc', 'orientation': '"""vertical"""', 'tick_format': '"""0.2f"""'}), "(label='Y', scale=y_sc, orientation='vertical', tick_format='0.2f')\n", (3368, 3435), True, 'import bqplot as bq\n'), ((3474, 3544), 'bqplot.Lines', 'bq.Lines', ([], {'x': '[0]', 'y': '[0]', 'scales': "{'x': x_sc, 'y': y_sc}", 'colors': "['blue']"}), "(x=[0], y=[0], scales={'x': x_sc, 'y': y_sc}, colors=['blue'])\n", (3482, 3544), True, 'import bqplot as bq\n'), ((3619, 3671), 'bqplot.Figure', 'bq.Figure', ([], {'axes': '[ax_x, ax_y]', 'marks': '[line]'}), '(axes=[ax_x, ax_y], marks=[line], **kwargs)\n', (3628, 3671), True, 'import bqplot as bq\n'), ((9514, 9532), 'threading.Thread', 'Thread', ([], {'target': 'run'}), '(target=run)\n', (9520, 9532), False, 'from threading import Lock, Thread\n'), ((1524, 1554), 'ipywidgets.FloatSlider', 'w.FloatSlider', (['*args'], {}), '(*args, **kwargs)\n', (1537, 1554), True, 'import ipywidgets as w\n'), ((1668, 1707), 'ipywidgets.FloatProgress', 'w.FloatProgress', ([], {'min': '(0)', 'max': '(1)'}), '(min=0, max=1, **kwargs)\n', (1683, 1707), True, 'import ipywidgets as w\n'), ((1821, 1848), 'ipywidgets.Checkbox', 'w.Checkbox', (['*args'], {}), '(*args, **kwargs)\n', (1831, 1848), True, 'import ipywidgets as w\n'), ((1957, 1990), 'ipywidgets.BoundedIntText', 'w.BoundedIntText', (['*args'], {}), '(*args, **kwargs)\n', (1973, 1990), True, 'import ipywidgets as w\n'), ((2101, 2129), 'ipywidgets.FloatText', 'w.FloatText', (['*args'], {}), '(*args, **kwargs)\n', (2112, 2129), True, 'import ipywidgets as w\n'), ((2239, 2262), 'ipywidgets.Text', 'w.Text', (['*args'], {}), '(*args, **kwargs)\n', (2245, 2262), True, 'import ipywidgets as w\n'), ((2374, 2399), 'ipywidgets.Button', 'w.Button', (['*args'], {}), '(*args, **kwargs)\n', (2382, 2399), True, 'import ipywidgets as w\n'), ((2711, 2734), 'IPython.display.clear_output', 'clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (2723, 2734), False, 'from IPython.display import clear_output, display\n'), ((3072, 3096), 'ipywidgets.Label', 'w.Label', (['*args'], {}), '(*args, **kwargs)\n', (3079, 3096), True, 'import ipywidgets as w\n'), ((4409, 4444), 'ipywidgets.HBox', 'w.HBox', (['container_content'], {}), '(container_content, **kwargs)\n', (4415, 4444), True, 'import ipywidgets as w\n'), ((4770, 4805), 'ipywidgets.VBox', 'w.VBox', (['container_content'], {}), '(container_content, **kwargs)\n', (4776, 4805), True, 'import ipywidgets as w\n'), ((7462, 7482), 'threading.Thread', 'Thread', ([], {'target': 'train'}), '(target=train)\n', (7468, 7482), False, 'from threading import Lock, Thread\n'), ((3841, 3853), 'IPython.display.display', 'display', (['fig'], {}), '(fig)\n', (3848, 3853), False, 'from IPython.display import clear_output, display\n'), ((5755, 5811), 'ipywidgets.Layout', 'w.Layout', ([], {}), "(**{'width': '25%', 'border': '2px solid #ccc'})\n", (5763, 5811), True, 'import ipywidgets as w\n'), ((7062, 7084), 'numpy.array', 'np.array', (['loss_history'], {}), '(loss_history)\n', (7070, 7084), True, 'import numpy as np\n'), ((2654, 2664), 'IPython.display.display', 'display', (['t'], {}), '(t)\n', (2661, 2664), False, 'from IPython.display import clear_output, display\n')] |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import numpy as np
from nrrd.tests.util import *
import nrrd
class TestReadingFunctions(unittest.TestCase):
def setUp(self):
self.expected_header = {u'dimension': 3,
u'encoding': 'raw',
u'endian': 'little',
u'kinds': ['domain', 'domain', 'domain'],
u'sizes': np.array([30, 30, 30]),
u'space': 'left-posterior-superior',
u'space directions': np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
u'space origin': np.array([0, 0, 0]),
u'type': 'short'}
self.expected_data = np.fromfile(RAW_DATA_FILE_PATH, np.int16).reshape((30, 30, 30), order='F')
def test_read_header_only(self):
with open(RAW_NRRD_FILE_PATH, 'rb') as f:
header = nrrd.read_header(f)
# np.testing.assert_equal is used to compare the headers because it will appropriately handle each
# value in the structure. Since some of the values can be Numpy arrays inside the headers, this must be
# used to compare the two values.
np.testing.assert_equal(self.expected_header, header)
def test_read_header_only_with_filename(self):
header = nrrd.read_header(RAW_NRRD_FILE_PATH)
# np.testing.assert_equal is used to compare the headers because it will appropriately handle each
# value in the structure. Since some of the values can be Numpy arrays inside the headers, this must be
# used to compare the two values.
np.testing.assert_equal(self.expected_header, header)
def test_read_detached_header_only(self):
expected_header = self.expected_header
expected_header[u'data file'] = os.path.basename(RAW_DATA_FILE_PATH)
with open(RAW_NHDR_FILE_PATH, 'rb') as f:
header = nrrd.read_header(f)
np.testing.assert_equal(self.expected_header, header)
def test_read_header_and_data_filename(self):
data, header = nrrd.read(RAW_NRRD_FILE_PATH)
np.testing.assert_equal(self.expected_header, header)
np.testing.assert_equal(data, self.expected_data)
# Test that the data read is able to be edited
self.assertTrue(data.flags['WRITEABLE'])
def test_read_detached_header_and_data(self):
expected_header = self.expected_header
expected_header[u'data file'] = os.path.basename(RAW_DATA_FILE_PATH)
data, header = nrrd.read(RAW_NHDR_FILE_PATH)
np.testing.assert_equal(self.expected_header, header)
np.testing.assert_equal(data, self.expected_data)
# Test that the data read is able to be edited
self.assertTrue(data.flags['WRITEABLE'])
def test_read_header_and_gz_compressed_data(self):
expected_header = self.expected_header
expected_header[u'encoding'] = 'gzip'
data, header = nrrd.read(GZ_NRRD_FILE_PATH)
np.testing.assert_equal(self.expected_header, header)
np.testing.assert_equal(data, self.expected_data)
# Test that the data read is able to be edited
self.assertTrue(data.flags['WRITEABLE'])
def test_read_header_and_bz2_compressed_data(self):
expected_header = self.expected_header
expected_header[u'encoding'] = 'bzip2'
data, header = nrrd.read(BZ2_NRRD_FILE_PATH)
np.testing.assert_equal(self.expected_header, header)
np.testing.assert_equal(data, self.expected_data)
# Test that the data read is able to be edited
self.assertTrue(data.flags['WRITEABLE'])
def test_read_header_and_gz_compressed_data_with_lineskip3(self):
expected_header = self.expected_header
expected_header[u'encoding'] = 'gzip'
expected_header[u'line skip'] = 3
data, header = nrrd.read(GZ_LINESKIP_NRRD_FILE_PATH)
np.testing.assert_equal(self.expected_header, header)
np.testing.assert_equal(data, self.expected_data)
# Test that the data read is able to be edited
self.assertTrue(data.flags['WRITEABLE'])
def test_read_raw_header(self):
expected_header = {u'type': 'float', u'dimension': 3}
header = nrrd.read_header(('NRRD0005', 'type: float', 'dimension: 3'))
self.assertEqual(expected_header, header)
expected_header = {u'my extra info': u'my : colon-separated : values'}
header = nrrd.read_header(('NRRD0005', 'my extra info:=my : colon-separated : values'))
np.testing.assert_equal(expected_header, header)
def test_read_dup_field_error_and_warn(self):
expected_header = {u'type': 'float', u'dimension': 3}
header_txt_tuple = ('NRRD0005', 'type: float', 'dimension: 3', 'type: float')
with self.assertRaisesRegex(nrrd.NRRDError, "Duplicate header field: 'type'"):
header = nrrd.read_header(header_txt_tuple)
import warnings
with warnings.catch_warnings(record=True) as w:
nrrd.reader.ALLOW_DUPLICATE_FIELD = True
header = nrrd.read_header(header_txt_tuple)
self.assertTrue("Duplicate header field: 'type'" in str(w[0].message))
self.assertEqual(expected_header, header)
nrrd.reader.ALLOW_DUPLICATE_FIELD = False
def test_read_header_and_ascii_1d_data(self):
expected_header = {u'dimension': 1,
u'encoding': 'ASCII',
u'kinds': ['domain'],
u'sizes': [27],
u'spacings': [1.0458000000000001],
u'type': 'unsigned char'}
data, header = nrrd.read(ASCII_1D_NRRD_FILE_PATH)
self.assertEqual(header, expected_header)
np.testing.assert_equal(data.dtype, np.uint8)
np.testing.assert_equal(data, np.arange(1, 28))
# Test that the data read is able to be edited
self.assertTrue(data.flags['WRITEABLE'])
def test_read_header_and_ascii_2d_data(self):
expected_header = {u'dimension': 2,
u'encoding': 'ASCII',
u'kinds': ['domain', 'domain'],
u'sizes': [3, 9],
u'spacings': [1.0458000000000001, 2],
u'type': 'unsigned short'}
data, header = nrrd.read(ASCII_2D_NRRD_FILE_PATH)
np.testing.assert_equal(header, expected_header)
np.testing.assert_equal(data.dtype, np.uint16)
np.testing.assert_equal(data, np.arange(1, 28).reshape(3, 9, order='F'))
# Test that the data read is able to be edited
self.assertTrue(data.flags['WRITEABLE'])
def test_read_simple_4d_nrrd(self):
expected_header = {'type': 'double',
'dimension': 4,
'space': 'right-anterior-superior',
'sizes': np.array([1, 1, 1, 1]),
'space directions': np.array([[1.5, 0., 0.],
[0., 1.5, 0.],
[0., 0., 1.],
[np.NaN, np.NaN, np.NaN]]),
'endian': 'little',
'encoding': 'raw',
'measurement frame': np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])}
data, header = nrrd.read(RAW_4D_NRRD_FILE_PATH)
np.testing.assert_equal(header, expected_header)
np.testing.assert_equal(data.dtype, np.float64)
np.testing.assert_equal(data, np.array([[[[0.76903426]]]]))
# Test that the data read is able to be edited
self.assertTrue(data.flags['WRITEABLE'])
def test_custom_fields_without_field_map(self):
expected_header = {u'dimension': 1,
u'encoding': 'ASCII',
u'kinds': ['domain'],
u'sizes': [27],
u'spacings': [1.0458000000000001],
u'int': '24',
u'double': '25.5566',
u'string': 'This is a long string of information that is important.',
u'int list': '1 2 3 4 5 100',
u'double list': '0.2 0.502 0.8',
u'string list': 'words are split by space in list',
u'int vector': '(100, 200, -300)',
u'double vector': '(100.5,200.3,-300.99)',
u'int matrix': '(1,0,0) (0,1,0) (0,0,1)',
u'double matrix': '(1.2,0.3,0) (0,1.5,0) (0,-0.55,1.6)',
u'type': 'unsigned char'}
header = nrrd.read_header(ASCII_1D_CUSTOM_FIELDS_FILE_PATH)
self.assertEqual(header, expected_header)
def test_custom_fields_with_field_map(self):
expected_header = {u'dimension': 1,
u'encoding': 'ASCII',
u'kinds': ['domain'],
u'sizes': [27],
u'spacings': [1.0458000000000001],
u'int': 24,
u'double': 25.5566,
u'string': 'This is a long string of information that is important.',
u'int list': np.array([1, 2, 3, 4, 5, 100]),
u'double list': np.array([0.2, 0.502, 0.8]),
u'string list': ['words', 'are', 'split', 'by', 'space', 'in', 'list'],
u'int vector': np.array([100, 200, -300]),
u'double vector': np.array([100.5, 200.3, -300.99]),
u'int matrix': np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
u'double matrix': np.array([[1.2, 0.3, 0.0], [0.0, 1.5, 0.0], [0.0, -0.55, 1.6]]),
u'type': 'unsigned char'}
custom_field_map = {'int': 'int',
'double': 'double',
'string': 'string',
'int list': 'int list',
'double list': 'double list',
'string list': 'string list',
'int vector': 'int vector',
'double vector': 'double vector',
'int matrix': 'int matrix',
'double matrix': 'double matrix'}
header = nrrd.read_header(ASCII_1D_CUSTOM_FIELDS_FILE_PATH, custom_field_map)
np.testing.assert_equal(header, expected_header)
if __name__ == '__main__':
unittest.main()
| [
"os.path.basename",
"numpy.fromfile",
"os.path.dirname",
"nrrd.read_header",
"numpy.array",
"warnings.catch_warnings",
"numpy.testing.assert_equal",
"numpy.arange",
"nrrd.read"
] | [((1316, 1369), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.expected_header', 'header'], {}), '(self.expected_header, header)\n', (1339, 1369), True, 'import numpy as np\n'), ((1439, 1475), 'nrrd.read_header', 'nrrd.read_header', (['RAW_NRRD_FILE_PATH'], {}), '(RAW_NRRD_FILE_PATH)\n', (1455, 1475), False, 'import nrrd\n'), ((1746, 1799), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.expected_header', 'header'], {}), '(self.expected_header, header)\n', (1769, 1799), True, 'import numpy as np\n'), ((1934, 1970), 'os.path.basename', 'os.path.basename', (['RAW_DATA_FILE_PATH'], {}), '(RAW_DATA_FILE_PATH)\n', (1950, 1970), False, 'import os\n'), ((2072, 2125), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.expected_header', 'header'], {}), '(self.expected_header, header)\n', (2095, 2125), True, 'import numpy as np\n'), ((2200, 2229), 'nrrd.read', 'nrrd.read', (['RAW_NRRD_FILE_PATH'], {}), '(RAW_NRRD_FILE_PATH)\n', (2209, 2229), False, 'import nrrd\n'), ((2239, 2292), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.expected_header', 'header'], {}), '(self.expected_header, header)\n', (2262, 2292), True, 'import numpy as np\n'), ((2301, 2350), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['data', 'self.expected_data'], {}), '(data, self.expected_data)\n', (2324, 2350), True, 'import numpy as np\n'), ((2594, 2630), 'os.path.basename', 'os.path.basename', (['RAW_DATA_FILE_PATH'], {}), '(RAW_DATA_FILE_PATH)\n', (2610, 2630), False, 'import os\n'), ((2655, 2684), 'nrrd.read', 'nrrd.read', (['RAW_NHDR_FILE_PATH'], {}), '(RAW_NHDR_FILE_PATH)\n', (2664, 2684), False, 'import nrrd\n'), ((2694, 2747), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.expected_header', 'header'], {}), '(self.expected_header, header)\n', (2717, 2747), True, 'import numpy as np\n'), ((2756, 2805), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['data', 'self.expected_data'], {}), '(data, self.expected_data)\n', (2779, 2805), True, 'import numpy as np\n'), ((3084, 3112), 'nrrd.read', 'nrrd.read', (['GZ_NRRD_FILE_PATH'], {}), '(GZ_NRRD_FILE_PATH)\n', (3093, 3112), False, 'import nrrd\n'), ((3122, 3175), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.expected_header', 'header'], {}), '(self.expected_header, header)\n', (3145, 3175), True, 'import numpy as np\n'), ((3184, 3233), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['data', 'self.expected_data'], {}), '(data, self.expected_data)\n', (3207, 3233), True, 'import numpy as np\n'), ((3514, 3543), 'nrrd.read', 'nrrd.read', (['BZ2_NRRD_FILE_PATH'], {}), '(BZ2_NRRD_FILE_PATH)\n', (3523, 3543), False, 'import nrrd\n'), ((3553, 3606), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.expected_header', 'header'], {}), '(self.expected_header, header)\n', (3576, 3606), True, 'import numpy as np\n'), ((3615, 3664), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['data', 'self.expected_data'], {}), '(data, self.expected_data)\n', (3638, 3664), True, 'import numpy as np\n'), ((4000, 4037), 'nrrd.read', 'nrrd.read', (['GZ_LINESKIP_NRRD_FILE_PATH'], {}), '(GZ_LINESKIP_NRRD_FILE_PATH)\n', (4009, 4037), False, 'import nrrd\n'), ((4047, 4100), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.expected_header', 'header'], {}), '(self.expected_header, header)\n', (4070, 4100), True, 'import numpy as np\n'), ((4109, 4158), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['data', 'self.expected_data'], {}), '(data, self.expected_data)\n', (4132, 4158), True, 'import numpy as np\n'), ((4380, 4441), 'nrrd.read_header', 'nrrd.read_header', (["('NRRD0005', 'type: float', 'dimension: 3')"], {}), "(('NRRD0005', 'type: float', 'dimension: 3'))\n", (4396, 4441), False, 'import nrrd\n'), ((4589, 4667), 'nrrd.read_header', 'nrrd.read_header', (["('NRRD0005', 'my extra info:=my : colon-separated : values')"], {}), "(('NRRD0005', 'my extra info:=my : colon-separated : values'))\n", (4605, 4667), False, 'import nrrd\n'), ((4676, 4724), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['expected_header', 'header'], {}), '(expected_header, header)\n', (4699, 4724), True, 'import numpy as np\n'), ((5826, 5860), 'nrrd.read', 'nrrd.read', (['ASCII_1D_NRRD_FILE_PATH'], {}), '(ASCII_1D_NRRD_FILE_PATH)\n', (5835, 5860), False, 'import nrrd\n'), ((5920, 5965), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['data.dtype', 'np.uint8'], {}), '(data.dtype, np.uint8)\n', (5943, 5965), True, 'import numpy as np\n'), ((6518, 6552), 'nrrd.read', 'nrrd.read', (['ASCII_2D_NRRD_FILE_PATH'], {}), '(ASCII_2D_NRRD_FILE_PATH)\n', (6527, 6552), False, 'import nrrd\n'), ((6562, 6610), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['header', 'expected_header'], {}), '(header, expected_header)\n', (6585, 6610), True, 'import numpy as np\n'), ((6619, 6665), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['data.dtype', 'np.uint16'], {}), '(data.dtype, np.uint16)\n', (6642, 6665), True, 'import numpy as np\n'), ((7739, 7771), 'nrrd.read', 'nrrd.read', (['RAW_4D_NRRD_FILE_PATH'], {}), '(RAW_4D_NRRD_FILE_PATH)\n', (7748, 7771), False, 'import nrrd\n'), ((7781, 7829), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['header', 'expected_header'], {}), '(header, expected_header)\n', (7804, 7829), True, 'import numpy as np\n'), ((7838, 7885), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['data.dtype', 'np.float64'], {}), '(data.dtype, np.float64)\n', (7861, 7885), True, 'import numpy as np\n'), ((9098, 9148), 'nrrd.read_header', 'nrrd.read_header', (['ASCII_1D_CUSTOM_FIELDS_FILE_PATH'], {}), '(ASCII_1D_CUSTOM_FIELDS_FILE_PATH)\n', (9114, 9148), False, 'import nrrd\n'), ((10883, 10951), 'nrrd.read_header', 'nrrd.read_header', (['ASCII_1D_CUSTOM_FIELDS_FILE_PATH', 'custom_field_map'], {}), '(ASCII_1D_CUSTOM_FIELDS_FILE_PATH, custom_field_map)\n', (10899, 10951), False, 'import nrrd\n'), ((10961, 11009), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['header', 'expected_header'], {}), '(header, expected_header)\n', (10984, 11009), True, 'import numpy as np\n'), ((70, 95), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (85, 95), False, 'import os\n'), ((501, 523), 'numpy.array', 'np.array', (['[30, 30, 30]'], {}), '([30, 30, 30])\n', (509, 523), True, 'import numpy as np\n'), ((647, 690), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (655, 690), True, 'import numpy as np\n'), ((741, 760), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (749, 760), True, 'import numpy as np\n'), ((1026, 1045), 'nrrd.read_header', 'nrrd.read_header', (['f'], {}), '(f)\n', (1042, 1045), False, 'import nrrd\n'), ((2043, 2062), 'nrrd.read_header', 'nrrd.read_header', (['f'], {}), '(f)\n', (2059, 2062), False, 'import nrrd\n'), ((5033, 5067), 'nrrd.read_header', 'nrrd.read_header', (['header_txt_tuple'], {}), '(header_txt_tuple)\n', (5049, 5067), False, 'import nrrd\n'), ((5106, 5142), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (5129, 5142), False, 'import warnings\n'), ((5223, 5257), 'nrrd.read_header', 'nrrd.read_header', (['header_txt_tuple'], {}), '(header_txt_tuple)\n', (5239, 5257), False, 'import nrrd\n'), ((6004, 6020), 'numpy.arange', 'np.arange', (['(1)', '(28)'], {}), '(1, 28)\n', (6013, 6020), True, 'import numpy as np\n'), ((7080, 7102), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (7088, 7102), True, 'import numpy as np\n'), ((7151, 7243), 'numpy.array', 'np.array', (['[[1.5, 0.0, 0.0], [0.0, 1.5, 0.0], [0.0, 0.0, 1.0], [np.NaN, np.NaN, np.NaN]]'], {}), '([[1.5, 0.0, 0.0], [0.0, 1.5, 0.0], [0.0, 0.0, 1.0], [np.NaN, np.\n NaN, np.NaN]])\n', (7159, 7243), True, 'import numpy as np\n'), ((7545, 7606), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (7553, 7606), True, 'import numpy as np\n'), ((7924, 7952), 'numpy.array', 'np.array', (['[[[[0.76903426]]]]'], {}), '([[[[0.76903426]]]])\n', (7932, 7952), True, 'import numpy as np\n'), ((9720, 9750), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 100]'], {}), '([1, 2, 3, 4, 5, 100])\n', (9728, 9750), True, 'import numpy as np\n'), ((9795, 9822), 'numpy.array', 'np.array', (['[0.2, 0.502, 0.8]'], {}), '([0.2, 0.502, 0.8])\n', (9803, 9822), True, 'import numpy as np\n'), ((9965, 9991), 'numpy.array', 'np.array', (['[100, 200, -300]'], {}), '([100, 200, -300])\n', (9973, 9991), True, 'import numpy as np\n'), ((10038, 10071), 'numpy.array', 'np.array', (['[100.5, 200.3, -300.99]'], {}), '([100.5, 200.3, -300.99])\n', (10046, 10071), True, 'import numpy as np\n'), ((10115, 10158), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (10123, 10158), True, 'import numpy as np\n'), ((10205, 10268), 'numpy.array', 'np.array', (['[[1.2, 0.3, 0.0], [0.0, 1.5, 0.0], [0.0, -0.55, 1.6]]'], {}), '([[1.2, 0.3, 0.0], [0.0, 1.5, 0.0], [0.0, -0.55, 1.6]])\n', (10213, 10268), True, 'import numpy as np\n'), ((842, 883), 'numpy.fromfile', 'np.fromfile', (['RAW_DATA_FILE_PATH', 'np.int16'], {}), '(RAW_DATA_FILE_PATH, np.int16)\n', (853, 883), True, 'import numpy as np\n'), ((6704, 6720), 'numpy.arange', 'np.arange', (['(1)', '(28)'], {}), '(1, 28)\n', (6713, 6720), True, 'import numpy as np\n')] |
import numpy
ar = list(map(int,input().split()))
np_ar = numpy.array(ar)
print(numpy.reshape(np_ar,(3,3)))
| [
"numpy.array",
"numpy.reshape"
] | [((57, 72), 'numpy.array', 'numpy.array', (['ar'], {}), '(ar)\n', (68, 72), False, 'import numpy\n'), ((79, 107), 'numpy.reshape', 'numpy.reshape', (['np_ar', '(3, 3)'], {}), '(np_ar, (3, 3))\n', (92, 107), False, 'import numpy\n')] |
import numpy as np
from scipy.stats import entropy
def OHE(val, len_output):
val = np.array(val)
res = np.eye(len_output)[np.array(val).reshape(-1)]
return res.reshape(list(val.shape)+[len_output])
def cross_entropy(predictions, targets):
predictions = np.array(predictions)[:, :, 0]
return entropy(predictions) + entropy(predictions, targets)
def relu(x):
x = list(map(lambda i: i if i > 0 else 0, x))
return np.array(x)
def categorical_cross_entropy(predictions, targets, epsilon=1e-12):
predictions = np.clip(predictions, epsilon, 1. - epsilon)
N = predictions.shape[0]
ce = -np.sum(targets*np.log(predictions+1e-9))/N
return ce
def dsoftmax(s):
jacobian_m = np.diag(s)
for i in range(len(jacobian_m)):
for j in range(len(jacobian_m)):
if i == j:
jacobian_m[i][j] = s[i] * (1-s[i])
else:
jacobian_m[i][j] = -s[i]*s[j]
return jacobian_m
def drelu(x):
x[x <= 0] = 0
x[x > 0] = 1
return x
| [
"numpy.eye",
"numpy.log",
"scipy.stats.entropy",
"numpy.clip",
"numpy.array",
"numpy.diag"
] | [((89, 102), 'numpy.array', 'np.array', (['val'], {}), '(val)\n', (97, 102), True, 'import numpy as np\n'), ((444, 455), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (452, 455), True, 'import numpy as np\n'), ((544, 588), 'numpy.clip', 'np.clip', (['predictions', 'epsilon', '(1.0 - epsilon)'], {}), '(predictions, epsilon, 1.0 - epsilon)\n', (551, 588), True, 'import numpy as np\n'), ((720, 730), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (727, 730), True, 'import numpy as np\n'), ((113, 131), 'numpy.eye', 'np.eye', (['len_output'], {}), '(len_output)\n', (119, 131), True, 'import numpy as np\n'), ((273, 294), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (281, 294), True, 'import numpy as np\n'), ((315, 335), 'scipy.stats.entropy', 'entropy', (['predictions'], {}), '(predictions)\n', (322, 335), False, 'from scipy.stats import entropy\n'), ((338, 367), 'scipy.stats.entropy', 'entropy', (['predictions', 'targets'], {}), '(predictions, targets)\n', (345, 367), False, 'from scipy.stats import entropy\n'), ((132, 145), 'numpy.array', 'np.array', (['val'], {}), '(val)\n', (140, 145), True, 'import numpy as np\n'), ((642, 669), 'numpy.log', 'np.log', (['(predictions + 1e-09)'], {}), '(predictions + 1e-09)\n', (648, 669), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from itertools import product
from tensornetwork import Node, ncon
from tnpy.tsdrg import TreeNode, TensorTree, TSDRG
from tnpy.exact_diagonalization import ExactDiagonalization as ED
from tnpy.model import RandomHeisenberg, SpectralFoldedRandomHeisenberg
class TestTensorTree(unittest.TestCase):
model = RandomHeisenberg(N=4, h=0, penalty=0.0, s_target=0)
node0 = TreeNode(0, model.mpo[0])
node1 = TreeNode(1, model.mpo[1])
node2 = TreeNode(2, model.mpo[2])
node3 = TreeNode(3, model.mpo[3])
node4 = Node(ED(RandomHeisenberg(N=2, h=0).mpo).evecs.reshape((2, 2, 4)))
node5 = Node(ED(RandomHeisenberg(N=3, h=0).mpo).evecs.reshape((2, 4, 8)))
node6 = Node(ED(RandomHeisenberg(N=4, h=0).mpo).evecs.reshape((8, 2, 16)))
def __init__(self, *args, **kwargs):
super(TestTensorTree, self).__init__(*args, **kwargs)
self.tree = TensorTree([self.node0, self.node1, self.node2, self.node3])
# test TensorTree.append() and TensorTree.horizon
self.tree.append(TreeNode(4, self.node4, left=self.node1, right=self.node2))
self.assertCountEqual([0, 4, 3], self.tree.horizon)
self.tree.append(TreeNode(5, self.node5, left=self.node0, right=self.tree.node(4)))
self.assertCountEqual([5, 3], self.tree.horizon)
self.tree.append(TreeNode(6, self.node6, left=self.tree.node(5), right=self.node3))
self.assertCountEqual([6], self.tree.horizon)
def test_is_leaf(self):
self.assertTrue(self.node0.is_leaf)
self.assertTrue(self.node1.is_leaf)
self.assertTrue(self.node2.is_leaf)
self.assertTrue(self.node3.is_leaf)
self.assertFalse(self.tree.node(5).is_leaf)
def test_equal(self):
self.assertTrue(self.node0 == TreeNode(0, None))
self.assertFalse(self.tree.node(6) == TreeNode(6, None))
def test_n_nodes(self):
self.assertEqual(7, self.tree.n_nodes)
def test_n_layers(self):
self.assertEqual(4, self.tree.n_layers)
def test_has_root(self):
self.assertTrue(self.tree.has_root)
empty_tree = TensorTree([])
self.assertFalse(empty_tree.has_root)
def test_node(self):
self.assertTrue(self.tree.node(5) == self.tree.root.left)
self.assertTrue(self.tree.node(4) == self.tree.node(5).right)
def test_find_path(self):
self.assertCountEqual([6, 5, 0], self.tree.find_path(0))
self.assertCountEqual([6, 5, 4], self.tree.find_path(4))
self.assertCountEqual([6, 5, 4, 2], self.tree.find_path(2))
self.assertRaises(KeyError, self.tree.find_path, 7)
self.assertRaises(KeyError, self.tree.find_path, -1)
def test_ancestor(self):
ancestor = self.tree.ancestor(2)
self.assertEqual(3, len(ancestor))
self.assertTrue(ancestor[0] == self.tree.root)
self.assertTrue(ancestor[1] == self.tree.node(5))
self.assertTrue(ancestor[2] == self.tree.node(4))
def test_common_ancestor(self):
self.assertCountEqual([6, 5, 4], self.tree.common_ancestor(1, 2))
self.assertCountEqual([6, 5], self.tree.common_ancestor(0, 2))
self.assertCountEqual([6, 5], self.tree.common_ancestor(0, 4))
self.assertCountEqual([6], self.tree.common_ancestor(2, 3))
def test_contract_nodes(self):
np.testing.assert_allclose(
np.identity(16),
self.tree.contract_nodes([4, 5, 6]),
atol=1e-12
)
np.testing.assert_allclose(
np.identity(16),
self.tree.contract_nodes([5, 6]),
atol=1e-12
)
np.testing.assert_allclose(
np.identity(16),
self.tree.contract_nodes([6]),
atol=1e-12
)
out_tensor, out_order = self.tree.contract_nodes(
[6, 5, 4],
open_bonds=[(5, 'left')],
return_out_order=True
)
self.assertCountEqual(
(16, 2, 16, 2),
out_tensor.shape
)
self.assertListEqual(
["-Node6Sub2", "-Node5Sub0", "-ConjNode6Sub2", "-ConjNode5Sub0"],
out_order
)
out_tensor, out_order = self.tree.contract_nodes(
[6, 5],
open_bonds=[(5, 'right')],
return_out_order=True
)
self.assertCountEqual(
(16, 4, 16, 4),
out_tensor.shape
)
self.assertListEqual(
["-Node6Sub2", "-Node5Sub1", "-ConjNode6Sub2", "-ConjNode5Sub1"],
out_order
)
def test_plot(self):
g = self.tree.plot()
g.render(format='png', view=False)
class TestTSDRG(unittest.TestCase):
ordered_model = RandomHeisenberg(N=6, h=0.0, penalty=0.0, s_target=0)
ordered_tsdrg = TSDRG(ordered_model.mpo, chi=2**6)
model = RandomHeisenberg(N=6, h=1e-5, penalty=0.0, s_target=0)
model.seed = 2021
tsdrg = TSDRG(model.mpo, chi=2**6)
tsdrg.run()
ed = ED(model.mpo)
def test_N(self):
self.assertEqual(6, self.tsdrg.N)
def test_block_hamiltonian(self):
for site in range(self.ordered_tsdrg.N - 1):
np.testing.assert_array_equal(
np.array(
[[0.25, 0, 0, 0],
[0, -0.25, 0.5, 0],
[0, 0.5, -0.25, 0],
[0, 0, 0, 0.25]]
),
self.ordered_tsdrg.block_hamiltonian(site)
)
def test_spectrum_projector(self):
evecs = np.array(
[[0, 1, 0, 0],
[1 / np.sqrt(2), 0, 1 / np.sqrt(2), 0],
[-1 / np.sqrt(2), 0, 1 / np.sqrt(2), 0],
[0, 0, 0, 1]]
)
V, W = self.ordered_tsdrg.spectrum_projector(site=3, evecs=evecs)
np.testing.assert_array_equal(
evecs.reshape((2, 2, 4)),
V.tensor
)
coarse_grained_mpo = ncon(
[self.ordered_model.mpo[3].tensor, self.ordered_model.mpo[4].tensor],
[(-1, 1, '-a1', '-a2'), (1, -2, '-b1', '-b2')],
out_order=[-1, -2, '-a1', '-b1', '-a2', '-b2']
).reshape((6, 6, 4, 4))
np.testing.assert_allclose(
ncon(
[coarse_grained_mpo, evecs, evecs],
[('-m1', '-m2', 1, 2), (1, '-a1'), (2, '-b1')],
out_order=['-m1', '-m2', '-a1', '-b1']
),
W.tensor,
atol=1e-12
)
def test_run(self):
np.testing.assert_allclose(
np.diagflat(self.ed.evals[:self.tsdrg.chi]),
self.tsdrg.mpo[0].tensor,
atol=1e-12
)
def test_reduced_density_matrix(self):
for site, energy_level in product(range(self.tsdrg.N), range(self.tsdrg.chi)):
ss = self.ed.reduced_density_matrix(site, energy_level)
rho = self.tsdrg.reduced_density_matrix(site, energy_level)
s = np.linalg.svd(rho, compute_uv=False)
np.testing.assert_allclose(
ss,
s,
atol=1e-12
)
# TODO: confirm the order of open_bonds, i.e. the basis of reduced rho
# TODO: [fix] error for energy_level > 0 when there's no disorder
def test_entanglement_entropy(self):
for site, energy_level in product(range(self.tsdrg.N), range(self.tsdrg.chi)[:4]):
self.assertAlmostEqual(
self.ed.entanglement_entropy(site, energy_level),
self.tsdrg.entanglement_entropy(site, energy_level),
places=12
)
def test_energies(self):
np.testing.assert_allclose(
np.diagflat(self.ed.evals[:self.tsdrg.chi]),
self.tsdrg.energies(),
atol=1e-12
)
np.testing.assert_allclose(
np.diagflat(self.ed.evals[:self.tsdrg.chi]),
self.tsdrg.energies(self.model.mpo),
atol=1e-12
)
model2 = SpectralFoldedRandomHeisenberg(
N=self.model.N, h=self.model.h,
penalty=self.model.penalty, s_target=self.model.s_target
)
model2.seed = self.model.seed
tsdrg2 = TSDRG(model2.mpo, chi=2**self.model.N)
tsdrg2.run()
np.testing.assert_allclose(
self.ed.evals,
np.sort(np.diag(tsdrg2.energies(self.model.mpo))),
atol=1e-12
)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"tnpy.exact_diagonalization.ExactDiagonalization",
"numpy.diagflat",
"tnpy.tsdrg.TensorTree",
"tnpy.tsdrg.TSDRG",
"numpy.identity",
"tnpy.model.SpectralFoldedRandomHeisenberg",
"numpy.linalg.svd",
"numpy.array",
"tnpy.tsdrg.TreeNode",
"tnpy.model.RandomHeisenberg",
"numpy.test... | [((347, 398), 'tnpy.model.RandomHeisenberg', 'RandomHeisenberg', ([], {'N': '(4)', 'h': '(0)', 'penalty': '(0.0)', 's_target': '(0)'}), '(N=4, h=0, penalty=0.0, s_target=0)\n', (363, 398), False, 'from tnpy.model import RandomHeisenberg, SpectralFoldedRandomHeisenberg\n'), ((411, 436), 'tnpy.tsdrg.TreeNode', 'TreeNode', (['(0)', 'model.mpo[0]'], {}), '(0, model.mpo[0])\n', (419, 436), False, 'from tnpy.tsdrg import TreeNode, TensorTree, TSDRG\n'), ((449, 474), 'tnpy.tsdrg.TreeNode', 'TreeNode', (['(1)', 'model.mpo[1]'], {}), '(1, model.mpo[1])\n', (457, 474), False, 'from tnpy.tsdrg import TreeNode, TensorTree, TSDRG\n'), ((487, 512), 'tnpy.tsdrg.TreeNode', 'TreeNode', (['(2)', 'model.mpo[2]'], {}), '(2, model.mpo[2])\n', (495, 512), False, 'from tnpy.tsdrg import TreeNode, TensorTree, TSDRG\n'), ((525, 550), 'tnpy.tsdrg.TreeNode', 'TreeNode', (['(3)', 'model.mpo[3]'], {}), '(3, model.mpo[3])\n', (533, 550), False, 'from tnpy.tsdrg import TreeNode, TensorTree, TSDRG\n'), ((4728, 4781), 'tnpy.model.RandomHeisenberg', 'RandomHeisenberg', ([], {'N': '(6)', 'h': '(0.0)', 'penalty': '(0.0)', 's_target': '(0)'}), '(N=6, h=0.0, penalty=0.0, s_target=0)\n', (4744, 4781), False, 'from tnpy.model import RandomHeisenberg, SpectralFoldedRandomHeisenberg\n'), ((4802, 4838), 'tnpy.tsdrg.TSDRG', 'TSDRG', (['ordered_model.mpo'], {'chi': '(2 ** 6)'}), '(ordered_model.mpo, chi=2 ** 6)\n', (4807, 4838), False, 'from tnpy.tsdrg import TreeNode, TensorTree, TSDRG\n'), ((4849, 4904), 'tnpy.model.RandomHeisenberg', 'RandomHeisenberg', ([], {'N': '(6)', 'h': '(1e-05)', 'penalty': '(0.0)', 's_target': '(0)'}), '(N=6, h=1e-05, penalty=0.0, s_target=0)\n', (4865, 4904), False, 'from tnpy.model import RandomHeisenberg, SpectralFoldedRandomHeisenberg\n'), ((4938, 4966), 'tnpy.tsdrg.TSDRG', 'TSDRG', (['model.mpo'], {'chi': '(2 ** 6)'}), '(model.mpo, chi=2 ** 6)\n', (4943, 4966), False, 'from tnpy.tsdrg import TreeNode, TensorTree, TSDRG\n'), ((4990, 5003), 'tnpy.exact_diagonalization.ExactDiagonalization', 'ED', (['model.mpo'], {}), '(model.mpo)\n', (4992, 5003), True, 'from tnpy.exact_diagonalization import ExactDiagonalization as ED\n'), ((8437, 8452), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8450, 8452), False, 'import unittest\n'), ((910, 970), 'tnpy.tsdrg.TensorTree', 'TensorTree', (['[self.node0, self.node1, self.node2, self.node3]'], {}), '([self.node0, self.node1, self.node2, self.node3])\n', (920, 970), False, 'from tnpy.tsdrg import TreeNode, TensorTree, TSDRG\n'), ((2124, 2138), 'tnpy.tsdrg.TensorTree', 'TensorTree', (['[]'], {}), '([])\n', (2134, 2138), False, 'from tnpy.tsdrg import TreeNode, TensorTree, TSDRG\n'), ((7975, 8100), 'tnpy.model.SpectralFoldedRandomHeisenberg', 'SpectralFoldedRandomHeisenberg', ([], {'N': 'self.model.N', 'h': 'self.model.h', 'penalty': 'self.model.penalty', 's_target': 'self.model.s_target'}), '(N=self.model.N, h=self.model.h, penalty=self\n .model.penalty, s_target=self.model.s_target)\n', (8005, 8100), False, 'from tnpy.model import RandomHeisenberg, SpectralFoldedRandomHeisenberg\n'), ((8185, 8225), 'tnpy.tsdrg.TSDRG', 'TSDRG', (['model2.mpo'], {'chi': '(2 ** self.model.N)'}), '(model2.mpo, chi=2 ** self.model.N)\n', (8190, 8225), False, 'from tnpy.tsdrg import TreeNode, TensorTree, TSDRG\n'), ((1054, 1112), 'tnpy.tsdrg.TreeNode', 'TreeNode', (['(4)', 'self.node4'], {'left': 'self.node1', 'right': 'self.node2'}), '(4, self.node4, left=self.node1, right=self.node2)\n', (1062, 1112), False, 'from tnpy.tsdrg import TreeNode, TensorTree, TSDRG\n'), ((3387, 3402), 'numpy.identity', 'np.identity', (['(16)'], {}), '(16)\n', (3398, 3402), True, 'import numpy as np\n'), ((3534, 3549), 'numpy.identity', 'np.identity', (['(16)'], {}), '(16)\n', (3545, 3549), True, 'import numpy as np\n'), ((3678, 3693), 'numpy.identity', 'np.identity', (['(16)'], {}), '(16)\n', (3689, 3693), True, 'import numpy as np\n'), ((6215, 6347), 'tensornetwork.ncon', 'ncon', (['[coarse_grained_mpo, evecs, evecs]', "[('-m1', '-m2', 1, 2), (1, '-a1'), (2, '-b1')]"], {'out_order': "['-m1', '-m2', '-a1', '-b1']"}), "([coarse_grained_mpo, evecs, evecs], [('-m1', '-m2', 1, 2), (1, '-a1'),\n (2, '-b1')], out_order=['-m1', '-m2', '-a1', '-b1'])\n", (6219, 6347), False, 'from tensornetwork import Node, ncon\n'), ((6535, 6578), 'numpy.diagflat', 'np.diagflat', (['self.ed.evals[:self.tsdrg.chi]'], {}), '(self.ed.evals[:self.tsdrg.chi])\n', (6546, 6578), True, 'import numpy as np\n'), ((6938, 6974), 'numpy.linalg.svd', 'np.linalg.svd', (['rho'], {'compute_uv': '(False)'}), '(rho, compute_uv=False)\n', (6951, 6974), True, 'import numpy as np\n'), ((6987, 7032), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ss', 's'], {'atol': '(1e-12)'}), '(ss, s, atol=1e-12)\n', (7013, 7032), True, 'import numpy as np\n'), ((7670, 7713), 'numpy.diagflat', 'np.diagflat', (['self.ed.evals[:self.tsdrg.chi]'], {}), '(self.ed.evals[:self.tsdrg.chi])\n', (7681, 7713), True, 'import numpy as np\n'), ((7831, 7874), 'numpy.diagflat', 'np.diagflat', (['self.ed.evals[:self.tsdrg.chi]'], {}), '(self.ed.evals[:self.tsdrg.chi])\n', (7842, 7874), True, 'import numpy as np\n'), ((1791, 1808), 'tnpy.tsdrg.TreeNode', 'TreeNode', (['(0)', 'None'], {}), '(0, None)\n', (1799, 1808), False, 'from tnpy.tsdrg import TreeNode, TensorTree, TSDRG\n'), ((1856, 1873), 'tnpy.tsdrg.TreeNode', 'TreeNode', (['(6)', 'None'], {}), '(6, None)\n', (1864, 1873), False, 'from tnpy.tsdrg import TreeNode, TensorTree, TSDRG\n'), ((5220, 5308), 'numpy.array', 'np.array', (['[[0.25, 0, 0, 0], [0, -0.25, 0.5, 0], [0, 0.5, -0.25, 0], [0, 0, 0, 0.25]]'], {}), '([[0.25, 0, 0, 0], [0, -0.25, 0.5, 0], [0, 0.5, -0.25, 0], [0, 0, 0,\n 0.25]])\n', (5228, 5308), True, 'import numpy as np\n'), ((5928, 6106), 'tensornetwork.ncon', 'ncon', (['[self.ordered_model.mpo[3].tensor, self.ordered_model.mpo[4].tensor]', "[(-1, 1, '-a1', '-a2'), (1, -2, '-b1', '-b2')]"], {'out_order': "[-1, -2, '-a1', '-b1', '-a2', '-b2']"}), "([self.ordered_model.mpo[3].tensor, self.ordered_model.mpo[4].tensor],\n [(-1, 1, '-a1', '-a2'), (1, -2, '-b1', '-b2')], out_order=[-1, -2,\n '-a1', '-b1', '-a2', '-b2'])\n", (5932, 6106), False, 'from tensornetwork import Node, ncon\n'), ((5591, 5601), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5598, 5601), True, 'import numpy as np\n'), ((5610, 5620), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5617, 5620), True, 'import numpy as np\n'), ((5645, 5655), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5652, 5655), True, 'import numpy as np\n'), ((5664, 5674), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5671, 5674), True, 'import numpy as np\n'), ((571, 597), 'tnpy.model.RandomHeisenberg', 'RandomHeisenberg', ([], {'N': '(2)', 'h': '(0)'}), '(N=2, h=0)\n', (587, 597), False, 'from tnpy.model import RandomHeisenberg, SpectralFoldedRandomHeisenberg\n'), ((649, 675), 'tnpy.model.RandomHeisenberg', 'RandomHeisenberg', ([], {'N': '(3)', 'h': '(0)'}), '(N=3, h=0)\n', (665, 675), False, 'from tnpy.model import RandomHeisenberg, SpectralFoldedRandomHeisenberg\n'), ((727, 753), 'tnpy.model.RandomHeisenberg', 'RandomHeisenberg', ([], {'N': '(4)', 'h': '(0)'}), '(N=4, h=0)\n', (743, 753), False, 'from tnpy.model import RandomHeisenberg, SpectralFoldedRandomHeisenberg\n')] |
import cv2
import numpy as np
def skeleton_of_shape(img):
thn = cv2.ximgproc.thinning(img, None, thinningType=cv2.ximgproc.THINNING_ZHANGSUEN)
w = thn.shape[1]
h = thn.shape[0]
for y in range(0, h):
thn[y, w-1] = 0
thn[y, 0] = 0
for x in range(0, w):
thn[h - 1, x] = 0
thn[0, x] = 0
return thn
def endpoints(img):
skel = img.copy()
skel[skel != 0] = 1
skel = np.uint8(skel)
kernel = np.uint8([[1, 1, 1],
[1, 10, 1],
[1, 1, 1]])
src_depth = -1
filtered = cv2.filter2D(skel, src_depth, kernel)
out = np.zeros_like(skel)
out[filtered == 11] = 255
return out
def inner_nodes(img):
skel = img.copy()
skel[skel != 0] = 1
skel = np.uint8(skel)
kernel = np.uint8([[1, 5, 0],
[0, 10, 0],
[1, 5, 0]])
src_depth = -1
filtered = cv2.filter2D(skel, src_depth, kernel)
out = np.zeros_like(skel)
out[filtered == 12] = 255
out[filtered == 17] = 255
out[filtered == 22] = 255
return out
def skeleton_nodes(skel):
skel = skel.copy()
skel[skel != 0] = 1
skel = np.uint8(skel)
end = endpoints(skel)
inner = inner_nodes(skel)
res = cv2.bitwise_or(end, inner)
return res
# return np.where(filtered==11) - végpontok koordinátái
| [
"numpy.uint8",
"numpy.zeros_like",
"cv2.filter2D",
"cv2.ximgproc.thinning",
"cv2.bitwise_or"
] | [((70, 148), 'cv2.ximgproc.thinning', 'cv2.ximgproc.thinning', (['img', 'None'], {'thinningType': 'cv2.ximgproc.THINNING_ZHANGSUEN'}), '(img, None, thinningType=cv2.ximgproc.THINNING_ZHANGSUEN)\n', (91, 148), False, 'import cv2\n'), ((433, 447), 'numpy.uint8', 'np.uint8', (['skel'], {}), '(skel)\n', (441, 447), True, 'import numpy as np\n'), ((462, 506), 'numpy.uint8', 'np.uint8', (['[[1, 1, 1], [1, 10, 1], [1, 1, 1]]'], {}), '([[1, 1, 1], [1, 10, 1], [1, 1, 1]])\n', (470, 506), True, 'import numpy as np\n'), ((587, 624), 'cv2.filter2D', 'cv2.filter2D', (['skel', 'src_depth', 'kernel'], {}), '(skel, src_depth, kernel)\n', (599, 624), False, 'import cv2\n'), ((636, 655), 'numpy.zeros_like', 'np.zeros_like', (['skel'], {}), '(skel)\n', (649, 655), True, 'import numpy as np\n'), ((783, 797), 'numpy.uint8', 'np.uint8', (['skel'], {}), '(skel)\n', (791, 797), True, 'import numpy as np\n'), ((812, 856), 'numpy.uint8', 'np.uint8', (['[[1, 5, 0], [0, 10, 0], [1, 5, 0]]'], {}), '([[1, 5, 0], [0, 10, 0], [1, 5, 0]])\n', (820, 856), True, 'import numpy as np\n'), ((937, 974), 'cv2.filter2D', 'cv2.filter2D', (['skel', 'src_depth', 'kernel'], {}), '(skel, src_depth, kernel)\n', (949, 974), False, 'import cv2\n'), ((986, 1005), 'numpy.zeros_like', 'np.zeros_like', (['skel'], {}), '(skel)\n', (999, 1005), True, 'import numpy as np\n'), ((1198, 1212), 'numpy.uint8', 'np.uint8', (['skel'], {}), '(skel)\n', (1206, 1212), True, 'import numpy as np\n'), ((1280, 1306), 'cv2.bitwise_or', 'cv2.bitwise_or', (['end', 'inner'], {}), '(end, inner)\n', (1294, 1306), False, 'import cv2\n')] |
"flabel (list of lists) unit tests."
import numpy as np
from numpy.testing import assert_equal
from la.flabel import listmap, listmap_fill
# ---------------------------------------------------------------------------
# listmap
#
# test to make sure listmap returns the same output as
#
# idx = map(list1.index, list2)
def listmap_test():
"listmap test"
list1 = range(6)
list2 = range(5)
msg = "listmap failed on list1=%s and list2=%s and ignore_unmappable=%s"
for i in range(100):
np.random.shuffle(list2)
idx1 = map(list1.index, list2)
idx2 = listmap(list1, list2)
ignore_unmappable = False
yield assert_equal, idx1, idx2, msg % (list1, list2, ignore_unmappable)
ignore_unmappable = True
yield assert_equal, idx1, idx2, msg % (list1, list2, ignore_unmappable)
def listmap_unmappable_test():
"listmap unmappable test"
msg = "listmap failed on list1=%s and list2=%s and ignore_unmappable=%s"
for i in range(100):
list1 = range(6)
list2 = range(5)
np.random.shuffle(list2)
idx1 = map(list1.index, list2)
list2 = ['unmappable #1'] + list2 + ['unmappable #2']
ignore_unmappable = True
idx2 = listmap(list1, list2, ignore_unmappable=ignore_unmappable)
yield assert_equal, idx1, idx2, msg % (list1, list2, ignore_unmappable)
# ---------------------------------------------------------------------------
# listmap_fill unit tests
def listmap_fill_test():
"listmap_fill test"
# test to make sure listmap_nofill returns the same output as
#
# idx = map(list1.index, list2)
#
# when there are no items in list2 that are not in list1
list1 = range(6)
list2 = range(5)
msg = "listmap_fill failed on list1=%s and list2=%s"
for i in range(100):
np.random.shuffle(list2)
idx1 = map(list1.index, list2)
idx2, ignore = listmap_fill(list1, list2)
yield assert_equal, idx1, idx2, msg % (list1, list2)
def listmap_fill_unmappable_test():
"listmap_fill unmappable test"
list1 = ['a', 2, 3]
list2 = ['a', 2, 3, 4]
idx, idx_unmappable = listmap_fill(list1, list2)
idx2 = [0, 1, 2, 0]
idx2_unmappable = [3]
msg = "listmap_fill failed on list1=%s and list2=%s"
yield assert_equal, idx, idx2, msg % (list1, list2)
yield assert_equal, idx_unmappable, idx2_unmappable, msg % (list1, list2)
| [
"la.flabel.listmap_fill",
"la.flabel.listmap",
"numpy.random.shuffle"
] | [((2262, 2288), 'la.flabel.listmap_fill', 'listmap_fill', (['list1', 'list2'], {}), '(list1, list2)\n', (2274, 2288), False, 'from la.flabel import listmap, listmap_fill\n'), ((552, 576), 'numpy.random.shuffle', 'np.random.shuffle', (['list2'], {}), '(list2)\n', (569, 576), True, 'import numpy as np\n'), ((631, 652), 'la.flabel.listmap', 'listmap', (['list1', 'list2'], {}), '(list1, list2)\n', (638, 652), False, 'from la.flabel import listmap, listmap_fill\n'), ((1116, 1140), 'numpy.random.shuffle', 'np.random.shuffle', (['list2'], {}), '(list2)\n', (1133, 1140), True, 'import numpy as np\n'), ((1290, 1348), 'la.flabel.listmap', 'listmap', (['list1', 'list2'], {'ignore_unmappable': 'ignore_unmappable'}), '(list1, list2, ignore_unmappable=ignore_unmappable)\n', (1297, 1348), False, 'from la.flabel import listmap, listmap_fill\n'), ((1918, 1942), 'numpy.random.shuffle', 'np.random.shuffle', (['list2'], {}), '(list2)\n', (1935, 1942), True, 'import numpy as np\n'), ((2005, 2031), 'la.flabel.listmap_fill', 'listmap_fill', (['list1', 'list2'], {}), '(list1, list2)\n', (2017, 2031), False, 'from la.flabel import listmap, listmap_fill\n')] |
import numpy as np
from ipywidgets import widgets as wdg
import matplotlib.pyplot as plt
import threading
from ipyfilechooser import FileChooser
from matplotlib.animation import FuncAnimation
import os
import plotly.graph_objects as go
import pandas as pd
import sys
import re
from scipy import constants as cts
from IPython.display import display, HTML
import time
from IPython.display import clear_output
# print('*'*60)
# print()
work_dir = os.path.join(os.path.dirname(__file__), '../')
work_dir = os.path.abspath(work_dir)
path = os.path.abspath(work_dir + '/../')
# print(work_dir)
if not work_dir in sys.path:
sys.path.insert(0, work_dir)
# print(work_dir)
from pyOSA import Yokogawa
print(sys.argv)
xlim = [ float(re.findall("\d+",sys.argv[1])[0]),
float(re.findall("\d+",sys.argv[2])[0])]
print(xlim)
if len(sys.argv)>3:
print(sys.argv[3])
DEBUG = True
else:
DEBUG = False
# ----------------------------------
# -- Setup the plot
# ----------------------------------
height = 1000
x = np.linspace(xlim[0], xlim[1], 100000)
y = np.log10((1/np.cosh((x-(700+1850)/2)/10))**2)
tr = go.Scatter(x =x, y =y)
figOSA = go.FigureWidget(data=tr)
figOSA.update_xaxes(title = 'Wavelength (nm)', range = [xlim[0], xlim[1]],
showspikes = True, spikethickness= 1)
figOSA.update_yaxes(title = 'Power (dBm)', range = [-90, 20],
showspikes = True, spikethickness= 1)
figOSA.update_layout(height = height)
# ----------------------------------
# -- Setup the UI
# ----------------------------------
class _dic2struct():
def __init__(self, d, which='sim', do_tr=True):
self._dic = d
for a, b in d.items():
setattr(self, a, _dic2struct(b) if isinstance(b, dict) else b)
def __repr__(self):
return str(list(self._dic.keys()))
dd = {}
dd['cnct'] = wdg.Checkbox(value = False, description = "Connected")
dd['freq_scale'] = wdg.Checkbox(value = False, description = "Frequency ?")
dd['ip'] = wdg.Text(value = '10.0.0.11', description = 'IP:')
dd['λ'] = wdg.IntRangeSlider(value = (xlim[0], xlim[1]),
min =xlim[0], max = xlim[1], step = 5,
description = 'λ',
continuous_update=False)
dd['pts'] = wdg.IntSlider(value = 50000,
min =10, max = 100000, step = 100,
description = 'Points:',
continuous_update=False)
dd['pts'].add_class("osa_wavelength")
dd['scan'] = wdg.ToggleButtons(options=['Single', 'Repeat', 'Stop'],
value = 'Stop',
description='Scan:',disabled=False,
button_style = 'info')
dd['scan'].add_class("osa_scan_button")
dd['trace'] = wdg.Dropdown(options=['Trace A', 'Trace B', 'Trace C', 'Trace D'],
value = 'Trace A',
description='Trace:')
dd['res'] = wdg.Dropdown(options=['Norm/Hold', 'Norm/Auto', 'Mid', 'High 1', 'High 2', 'High 3'],
description='Resolution:')
Bdwt_val = {0.02: '0.02 nm',
0.05: '0.05 nm',
0.1: '0.1 nm',
0.2: '0.2 nm',
0.5: '0.5 nm',
1: '1 nm',
2: '2 nm'}
dd['bandwidth'] = wdg.SelectionSlider(description='Bandwidth:',
options=Bdwt_val.values(),
continuous_update=False)
dd['bandwidth'].add_class("osa_bandwidth")
dd['clr'] = wdg.Button(description = 'Clear Trace',button_style = 'info',tooltip='Click me',)
dd['clr'].add_class("osa_clear")
dd['save'] = wdg.Button(description = 'Save Spectra',button_style = 'info')
dd['save'].add_class("osa_save")
dd['picker'] = FileChooser('./../')
dd['picker'].use_dir_icons = True
dd['picker'].rows = 5
dd['picker'].width = 200
ui = _dic2struct(dd)
# ----------------------------------
# -- Worker for scanning
# ----------------------------------
run_thread = True
def worker(f, instr):
while run_thread:
try:
#with Yokogawa(ip=ip) as instr:
trace = instr.trace
# x = np.linspace(600, 1700, 50001)
# y = np.log10(np.random.rand(50001)*(1/np.cosh((x-(700+1850)/2)/10))**2)
f.data[0].x = trace.lbd.values*1e9
f.data[0].y = trace.S.values
except:
print('Comunication error')
time.sleep(0.1)
#with Yokogawa(ip=ip) as instr:
trace = instr.trace
# x = np.linspace(600, 1700, 50001)
# y = np.log10(np.random.rand(50001)*(1/np.cosh((x-(700+1850)/2)/10))**2)
f.data[0].x = trace.lbd.values*1e9
f.data[0].y = trace.S.values
time.sleep(0.1)
# ----------------------------------
# -- Setup the Connectors
# ----------------------------------
connected = False
def connect(change):
global connected
global osa
ip = ui.ip.value
if change.new:
connected = True
with Yokogawa(ip=ip) as osa:
try:
para = osa.settings
except Exception as err:
if DEBUG:
print(f'Param fetching: {err}')
try:
trace = osa.trace
except Exception as err:
if DEBUG:
print(f'Trace fetching: {err}')
lbd_start = para['centwlgth'] - para['span']/2
lbd_end = para['centwlgth'] + para['span']/2
# print((1e9*lbd_start, 1e9*lbd_end))
#ax.set_xlim([1e9*lbd_start, 1e9*lbd_end])
figOSA.update_xaxes(range = [1e9*lbd_start, 1e9*lbd_end])
ui.λ.value = (1e9*lbd_start, 1e9*lbd_end)
ui.bandwidth.value = Bdwt_val[1e9*para['bdwdth']]
try:
ui.res.index = int(para['resol'])
except:
pass
try:
ui.pts.value = int(para['pts'])
except:
pass
# time.sleep(0.5)
print(traces)
figOSA.data[0].x = trace.lbd.values*1e9
figOSA.data[0].y = trace.S.values
printt('Finished Connecting')
else:
connected = False
def scan_osa(change):
global thread_osa
global run_thread
run_thread = False
ip = ui.ip.value
if connected:
# osa.scan = change.new.lower()
run_thread = False
if change.new.lower() == 'single' or change.new.lower() == 'repeat':
with Yokogawa(ip=ip) as osa:
osa.scan = change.new.lower()
run_thread = True
thread_osa = threading.Thread(target=worker, args=(figOSA, osa))
thread_osa.start()
if change.new.lower() == 'stop':
with Yokogawa(ip=ip) as osa:
osa.scan = change.new.lower()
print('Trying to kill the stuff')
run_thread = False
def select_trace(change):
ip = ui.ip.value
if connected:
with Yokogawa(ip=ip) as osa:
osa.trace = change.new.replace('Trace ', '')
def update_λ(change):
ip = ui.ip.value
if connected:
# print(change.new)
centwlgth = (change.new[1] + change.new[0])/2
span = (change.new[1] - change.new[0])
with Yokogawa(ip=ip) as osa:
para = osa.settings
para['centwlgth'] = centwlgth*1e-9
para['span'] = span*1e-9
print(para)
osa.settings = para
figOSA.update_xaxes(range = change.new)
def update_res(change):
ip = ui.ip.value
if connected:
para = osa.settings
para['resol'] = change.new
with Yokogawa(ip=ip) as osa:
osa.settings = para
def update_bdwt(change):
ip = ui.ip.value
if connected:
para = osa.settings
para['bdwdth'] = float(change.new.replace(' nm', ''))*1e-9
with Yokogawa(ip=ip) as osa:
osa.settings = para
para = osa.settings
ui.bandwidth.value = Bdwt_val[1e9*para['bdwdth']]
def update_points(change):
ip = ui.ip.value
if connected:
para = osa.settings
para['pts'] = change.new
with Yokogawa(ip=ip) as osa:
osa.settings = para
para = osa.settings
ui.pts.value = int(para['pts'])
def clear_trace(change):
figOSA.data[0].x = []
figOSA.data[0].y = []
def freq_scale(change):
xdata = figOSA.data[0].x
# ydata = figOSA.data[0].y
print(change.new)
if change.new:
newx = 1e-12 * cts.c/(xdata*1e-9)
xlabel = 'Frequency (THz)'
else:
newx = 1e9 * cts.c/(xdata*1e12)
xlabel = 'Wavelength (nm)'
figOSA.data[0].x = newx
# figOSA.data[0].y = ydata
figOSA.update_xaxes(title = xlabel, range = [newx.min(), newx.max()])
def save_data(change):
ip = ui.ip.value
fname = ui.picker.selected
if fname:
if not os.path.exists(ui.picker.selected):
with Yokogawa(ip=ip) as osa:
trace = osa.trace
trace.to_parquet(fname)
# ----------------------------------
# -- connect callbacks and traits
# ----------------------------------
ui.cnct.observe(connect, 'value')
ui.scan.observe(scan_osa,'value')
ui.trace.observe(select_trace, 'value')
ui.λ.observe(update_λ, 'value')
ui.bandwidth.observe(update_bdwt, 'value')
ui.pts.observe(update_points, 'value')
ui.res.observe(update_res, 'index')
ui.clr.on_click(clear_trace)
ui.save.on_click(save_data)
ui.freq_scale.observe(freq_scale, 'value')
# ----------------------------------
# -- Display
# ----------------------------------
box_layout = wdg.Layout(display='flex',
flex_flow='column',
flex_wrap = 'wrap',
align_content = 'stretch',
justify_content = 'center',
align_items='stretch',
width='28%')
outp_layout = wdg.Layout(display='flex',
flex_flow='column',
flex_wrap = 'wrap',
align_content = 'stretch',
justify_content = 'center',
align_items='stretch',
width='72%')
ui.picker.layout = wdg.Layout(display='flex',
flex_flow='column',
flex_wrap = 'wrap',
align_content = 'stretch',
justify_content = 'center',
align_items='stretch',
width='100%')
cc = [ui.cnct,ui.freq_scale, ui.ip,ui.scan, ui.trace, ui.res,ui.bandwidth, ui.pts,ui.λ, ui.clr,ui.save,ui.picker]
ctrl = wdg.Box(children = cc,layout = box_layout)
otp = wdg.Box(children = [figOSA], layout = outp_layout)
display(wdg.HBox([ctrl, otp]))
| [
"ipywidgets.widgets.ToggleButtons",
"plotly.graph_objects.FigureWidget",
"ipywidgets.widgets.Dropdown",
"os.path.abspath",
"pyOSA.Yokogawa",
"ipywidgets.widgets.Checkbox",
"os.path.dirname",
"os.path.exists",
"re.findall",
"numpy.linspace",
"ipywidgets.widgets.IntSlider",
"plotly.graph_objects... | [((503, 528), 'os.path.abspath', 'os.path.abspath', (['work_dir'], {}), '(work_dir)\n', (518, 528), False, 'import os\n'), ((536, 570), 'os.path.abspath', 'os.path.abspath', (["(work_dir + '/../')"], {}), "(work_dir + '/../')\n", (551, 570), False, 'import os\n'), ((1036, 1073), 'numpy.linspace', 'np.linspace', (['xlim[0]', 'xlim[1]', '(100000)'], {}), '(xlim[0], xlim[1], 100000)\n', (1047, 1073), True, 'import numpy as np\n'), ((1129, 1149), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (1139, 1149), True, 'import plotly.graph_objects as go\n'), ((1161, 1185), 'plotly.graph_objects.FigureWidget', 'go.FigureWidget', ([], {'data': 'tr'}), '(data=tr)\n', (1176, 1185), True, 'import plotly.graph_objects as go\n'), ((1851, 1901), 'ipywidgets.widgets.Checkbox', 'wdg.Checkbox', ([], {'value': '(False)', 'description': '"""Connected"""'}), "(value=False, description='Connected')\n", (1863, 1901), True, 'from ipywidgets import widgets as wdg\n'), ((1926, 1978), 'ipywidgets.widgets.Checkbox', 'wdg.Checkbox', ([], {'value': '(False)', 'description': '"""Frequency ?"""'}), "(value=False, description='Frequency ?')\n", (1938, 1978), True, 'from ipywidgets import widgets as wdg\n'), ((1995, 2041), 'ipywidgets.widgets.Text', 'wdg.Text', ([], {'value': '"""10.0.0.11"""', 'description': '"""IP:"""'}), "(value='10.0.0.11', description='IP:')\n", (2003, 2041), True, 'from ipywidgets import widgets as wdg\n'), ((2057, 2182), 'ipywidgets.widgets.IntRangeSlider', 'wdg.IntRangeSlider', ([], {'value': '(xlim[0], xlim[1])', 'min': 'xlim[0]', 'max': 'xlim[1]', 'step': '(5)', 'description': '"""λ"""', 'continuous_update': '(False)'}), "(value=(xlim[0], xlim[1]), min=xlim[0], max=xlim[1], step\n =5, description='λ', continuous_update=False)\n", (2075, 2182), True, 'from ipywidgets import widgets as wdg\n'), ((2290, 2399), 'ipywidgets.widgets.IntSlider', 'wdg.IntSlider', ([], {'value': '(50000)', 'min': '(10)', 'max': '(100000)', 'step': '(100)', 'description': '"""Points:"""', 'continuous_update': '(False)'}), "(value=50000, min=10, max=100000, step=100, description=\n 'Points:', continuous_update=False)\n", (2303, 2399), True, 'from ipywidgets import widgets as wdg\n'), ((2547, 2678), 'ipywidgets.widgets.ToggleButtons', 'wdg.ToggleButtons', ([], {'options': "['Single', 'Repeat', 'Stop']", 'value': '"""Stop"""', 'description': '"""Scan:"""', 'disabled': '(False)', 'button_style': '"""info"""'}), "(options=['Single', 'Repeat', 'Stop'], value='Stop',\n description='Scan:', disabled=False, button_style='info')\n", (2564, 2678), True, 'from ipywidgets import widgets as wdg\n'), ((2826, 2936), 'ipywidgets.widgets.Dropdown', 'wdg.Dropdown', ([], {'options': "['Trace A', 'Trace B', 'Trace C', 'Trace D']", 'value': '"""Trace A"""', 'description': '"""Trace:"""'}), "(options=['Trace A', 'Trace B', 'Trace C', 'Trace D'], value=\n 'Trace A', description='Trace:')\n", (2838, 2936), True, 'from ipywidgets import widgets as wdg\n'), ((3005, 3121), 'ipywidgets.widgets.Dropdown', 'wdg.Dropdown', ([], {'options': "['Norm/Hold', 'Norm/Auto', 'Mid', 'High 1', 'High 2', 'High 3']", 'description': '"""Resolution:"""'}), "(options=['Norm/Hold', 'Norm/Auto', 'Mid', 'High 1', 'High 2',\n 'High 3'], description='Resolution:')\n", (3017, 3121), True, 'from ipywidgets import widgets as wdg\n'), ((3588, 3666), 'ipywidgets.widgets.Button', 'wdg.Button', ([], {'description': '"""Clear Trace"""', 'button_style': '"""info"""', 'tooltip': '"""Click me"""'}), "(description='Clear Trace', button_style='info', tooltip='Click me')\n", (3598, 3666), True, 'from ipywidgets import widgets as wdg\n'), ((3716, 3775), 'ipywidgets.widgets.Button', 'wdg.Button', ([], {'description': '"""Save Spectra"""', 'button_style': '"""info"""'}), "(description='Save Spectra', button_style='info')\n", (3726, 3775), True, 'from ipywidgets import widgets as wdg\n'), ((3827, 3847), 'ipyfilechooser.FileChooser', 'FileChooser', (['"""./../"""'], {}), "('./../')\n", (3838, 3847), False, 'from ipyfilechooser import FileChooser\n'), ((9715, 9875), 'ipywidgets.widgets.Layout', 'wdg.Layout', ([], {'display': '"""flex"""', 'flex_flow': '"""column"""', 'flex_wrap': '"""wrap"""', 'align_content': '"""stretch"""', 'justify_content': '"""center"""', 'align_items': '"""stretch"""', 'width': '"""28%"""'}), "(display='flex', flex_flow='column', flex_wrap='wrap',\n align_content='stretch', justify_content='center', align_items=\n 'stretch', width='28%')\n", (9725, 9875), True, 'from ipywidgets import widgets as wdg\n'), ((10010, 10170), 'ipywidgets.widgets.Layout', 'wdg.Layout', ([], {'display': '"""flex"""', 'flex_flow': '"""column"""', 'flex_wrap': '"""wrap"""', 'align_content': '"""stretch"""', 'justify_content': '"""center"""', 'align_items': '"""stretch"""', 'width': '"""72%"""'}), "(display='flex', flex_flow='column', flex_wrap='wrap',\n align_content='stretch', justify_content='center', align_items=\n 'stretch', width='72%')\n", (10020, 10170), True, 'from ipywidgets import widgets as wdg\n'), ((10309, 10470), 'ipywidgets.widgets.Layout', 'wdg.Layout', ([], {'display': '"""flex"""', 'flex_flow': '"""column"""', 'flex_wrap': '"""wrap"""', 'align_content': '"""stretch"""', 'justify_content': '"""center"""', 'align_items': '"""stretch"""', 'width': '"""100%"""'}), "(display='flex', flex_flow='column', flex_wrap='wrap',\n align_content='stretch', justify_content='center', align_items=\n 'stretch', width='100%')\n", (10319, 10470), True, 'from ipywidgets import widgets as wdg\n'), ((10711, 10750), 'ipywidgets.widgets.Box', 'wdg.Box', ([], {'children': 'cc', 'layout': 'box_layout'}), '(children=cc, layout=box_layout)\n', (10718, 10750), True, 'from ipywidgets import widgets as wdg\n'), ((10760, 10806), 'ipywidgets.widgets.Box', 'wdg.Box', ([], {'children': '[figOSA]', 'layout': 'outp_layout'}), '(children=[figOSA], layout=outp_layout)\n', (10767, 10806), True, 'from ipywidgets import widgets as wdg\n'), ((458, 483), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (473, 483), False, 'import os\n'), ((624, 652), 'sys.path.insert', 'sys.path.insert', (['(0)', 'work_dir'], {}), '(0, work_dir)\n', (639, 652), False, 'import sys\n'), ((10819, 10840), 'ipywidgets.widgets.HBox', 'wdg.HBox', (['[ctrl, otp]'], {}), '([ctrl, otp])\n', (10827, 10840), True, 'from ipywidgets import widgets as wdg\n'), ((4809, 4824), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4819, 4824), False, 'import time\n'), ((737, 768), 're.findall', 're.findall', (['"""\\\\d+"""', 'sys.argv[1]'], {}), "('\\\\d+', sys.argv[1])\n", (747, 768), False, 'import re\n'), ((786, 817), 're.findall', 're.findall', (['"""\\\\d+"""', 'sys.argv[2]'], {}), "('\\\\d+', sys.argv[2])\n", (796, 817), False, 'import re\n'), ((1090, 1126), 'numpy.cosh', 'np.cosh', (['((x - (700 + 1850) / 2) / 10)'], {}), '((x - (700 + 1850) / 2) / 10)\n', (1097, 1126), True, 'import numpy as np\n'), ((5079, 5094), 'pyOSA.Yokogawa', 'Yokogawa', ([], {'ip': 'ip'}), '(ip=ip)\n', (5087, 5094), False, 'from pyOSA import Yokogawa\n'), ((7073, 7088), 'pyOSA.Yokogawa', 'Yokogawa', ([], {'ip': 'ip'}), '(ip=ip)\n', (7081, 7088), False, 'from pyOSA import Yokogawa\n'), ((7359, 7374), 'pyOSA.Yokogawa', 'Yokogawa', ([], {'ip': 'ip'}), '(ip=ip)\n', (7367, 7374), False, 'from pyOSA import Yokogawa\n'), ((7744, 7759), 'pyOSA.Yokogawa', 'Yokogawa', ([], {'ip': 'ip'}), '(ip=ip)\n', (7752, 7759), False, 'from pyOSA import Yokogawa\n'), ((7973, 7988), 'pyOSA.Yokogawa', 'Yokogawa', ([], {'ip': 'ip'}), '(ip=ip)\n', (7981, 7988), False, 'from pyOSA import Yokogawa\n'), ((8260, 8275), 'pyOSA.Yokogawa', 'Yokogawa', ([], {'ip': 'ip'}), '(ip=ip)\n', (8268, 8275), False, 'from pyOSA import Yokogawa\n'), ((8996, 9030), 'os.path.exists', 'os.path.exists', (['ui.picker.selected'], {}), '(ui.picker.selected)\n', (9010, 9030), False, 'import os\n'), ((4491, 4506), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4501, 4506), False, 'import time\n'), ((6568, 6583), 'pyOSA.Yokogawa', 'Yokogawa', ([], {'ip': 'ip'}), '(ip=ip)\n', (6576, 6583), False, 'from pyOSA import Yokogawa\n'), ((6701, 6752), 'threading.Thread', 'threading.Thread', ([], {'target': 'worker', 'args': '(figOSA, osa)'}), '(target=worker, args=(figOSA, osa))\n', (6717, 6752), False, 'import threading\n'), ((6846, 6861), 'pyOSA.Yokogawa', 'Yokogawa', ([], {'ip': 'ip'}), '(ip=ip)\n', (6854, 6861), False, 'from pyOSA import Yokogawa\n'), ((9049, 9064), 'pyOSA.Yokogawa', 'Yokogawa', ([], {'ip': 'ip'}), '(ip=ip)\n', (9057, 9064), False, 'from pyOSA import Yokogawa\n')] |
"""
===========
SVM as CRF
===========
A CRF with one node is the same as a multiclass SVM.
Evaluation on iris dataset (really easy).
"""
from time import time
import numpy as np
from sklearn.datasets import load_iris
from sklearn.cross_validation import train_test_split
from pystruct.models import GraphCRF
from pystruct.learners import NSlackSSVM
iris = load_iris()
X, y = iris.data, iris.target
# make each example into a tuple of a single feature vector and an empty edge
# list
X_ = [(np.atleast_2d(x), np.empty((0, 2), dtype=np.int)) for x in X]
Y = y.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X_, Y)
pbl = GraphCRF(inference_method='unary')
svm = NSlackSSVM(pbl, C=100)
start = time()
svm.fit(X_train, y_train)
time_svm = time() - start
y_pred = np.vstack(svm.predict(X_test))
print("Score with pystruct crf svm: %f (took %f seconds)"
% (np.mean(y_pred == y_test), time_svm))
| [
"sklearn.datasets.load_iris",
"sklearn.cross_validation.train_test_split",
"numpy.empty",
"time.time",
"pystruct.learners.NSlackSSVM",
"numpy.mean",
"pystruct.models.GraphCRF",
"numpy.atleast_2d"
] | [((362, 373), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (371, 373), False, 'from sklearn.datasets import load_iris\n'), ((616, 639), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X_', 'Y'], {}), '(X_, Y)\n', (632, 639), False, 'from sklearn.cross_validation import train_test_split\n'), ((647, 681), 'pystruct.models.GraphCRF', 'GraphCRF', ([], {'inference_method': '"""unary"""'}), "(inference_method='unary')\n", (655, 681), False, 'from pystruct.models import GraphCRF\n'), ((688, 710), 'pystruct.learners.NSlackSSVM', 'NSlackSSVM', (['pbl'], {'C': '(100)'}), '(pbl, C=100)\n', (698, 710), False, 'from pystruct.learners import NSlackSSVM\n'), ((721, 727), 'time.time', 'time', ([], {}), '()\n', (725, 727), False, 'from time import time\n'), ((765, 771), 'time.time', 'time', ([], {}), '()\n', (769, 771), False, 'from time import time\n'), ((497, 513), 'numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (510, 513), True, 'import numpy as np\n'), ((515, 545), 'numpy.empty', 'np.empty', (['(0, 2)'], {'dtype': 'np.int'}), '((0, 2), dtype=np.int)\n', (523, 545), True, 'import numpy as np\n'), ((887, 912), 'numpy.mean', 'np.mean', (['(y_pred == y_test)'], {}), '(y_pred == y_test)\n', (894, 912), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 6 18:40:47 2020
@author: benjamin
"""
from lxml import html
import requests
import numpy as np
import pandas as pd
import datetime as dt
from time import time as t
#Uncomment each block block below one at a time to extract data from each cemetery.
#Cemetery 1:
#cemetery_name = 'sorsele-skogskyrkogard'
#blocks = np.array(['1', '2', '3', '4', '6', '7', '8', 'AL', '10', '11', '12', '13', '13ML', '14', '15'])
#pages_in_block = np.array([4, 2, 6, 4, 7, 6, 3, 1, 3, 1, 1, 3, 1, 2, 1])
#Cemetery 2:
#cemetery_name = 'sorsele-gamla-kyrkogard'
#blocks = np.array(['0'])
#pages_in_block = np.array([21])
#Cemetery 3:
#cemetery_name = 'skogskyrkogarden'
#blocks = np.array(['1', '2', '3', '4', '5', '7', '8', 'a', 'allm', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])
#pages_in_block = np.array([2, 3, 3, 5, 5, 3, 2, 8, 3, 4, 4, 10, 5, 5, 6, 6])
#Cemetery 4:
#cemetery_name = 'viktoriakyrkans-kyrkogard'
#blocks = np.array(['0'])
#pages_in_block = np.array([1])
#Cemetery 5:
#cemetery_name = 'mala-gamla'
#blocks = np.array(['askf', 'aslu', 'maga', 'milu', 'urnf'])
#pages_in_block = np.array([1, 0, 11, 2, 1])
#Cemetery 6:
#cemetery_name = 'gargnas-kyrkogard'
#blocks = np.array(['1', '2', '3', '3ml'])
#pages_in_block = np.array([18, 6, 2, 1])
numBlocks = len(blocks)
mainURL = 'http://gravar.se/en/forsamling/mala-sorsele-pastorat/' + cemetery_name + '/0/'
entries = []
AllData = pd.DataFrame(columns=['Block', 'Names', 'Births', 'Deaths', 'Site', 'Link'])
for b in range(numBlocks):
blockURL = mainURL + blocks[b] + '/page/'
domain = 'http://gravar.se'
# Generate arrays to append data to
block_labels = np.array([]) # The cemetery block designator
block_links = np.array([]) # All URLs to individual pages
block_names = np.array([]) # All full names
block_births = np.array([]) # All birth dates
block_deaths = np.array([]) # All death dates
block_sites = np.array([]) # The grave sites within the cemetery block
for i in range(pages_in_block[b]): # Extract the data from each page in a cemetery block
pnum = str(i+1)
page = requests.get(blockURL + pnum)
tree = html.fromstring(page.content)
links = np.array(tree.xpath('//ul[@class="buried"]/*/div/h2/a/@href'))
links = np.array([domain + link for link in links]) # Add the domain
names = np.array(tree.xpath('//ul[@class="buried"]/*/div/h2/a/text()'))
birth_elem = tree.xpath('//ul[@class="buried"]/*/div/p[1]/span[1]')
births = np.array([be.text for be in birth_elem])
births[births == None] = ''#'0001-01-01'
death_elem = tree.xpath('//ul[@class="buried"]/*/div/p[1]/span[2]')
deaths = np.array([de.text for de in death_elem])
deaths[deaths == None] = ''#'0001-01-01'
sites = np.array(tree.xpath('//ul[@class="buried"]/*/div/p[2]/span/text()'))
labels = np.array([blocks[b]]*len(names))
block_links = np.append(block_links, links)
block_names = np.append(block_names, names)
block_labels = np.append(block_labels, labels)
block_births = np.append(block_births, births)
block_deaths = np.append(block_deaths, deaths)
block_sites = np.append(block_sites, sites)
block_blocks = np.array([blocks[b] * len(block_names)])
print(blocks[b], ': ', len(block_names))
entries.append(len(block_names))
data = np.stack([block_labels, block_names, block_births, block_deaths, block_sites, block_links]).T
df = pd.DataFrame(data, columns=['Block', 'Names', 'Births', 'Deaths', 'Site', 'Link'])
AllData = AllData.append(df)
#births as date objects
#block_births = [dt.datetime.strptime(block_births[i], '%Y-%m-%d').date() for i in range(len(block_births))]
#block_deaths = [dt.datetime.strptime(block_deaths[i], '%Y-%m-%d').date() for i in range(len(block_deaths))]
#%%
begtime = t()
print('Saving...')
AllData.to_csv(cemetery_name + '.csv', encoding='utf-8-sig')
print('Completed CSV')
endtime = t()
print('Total Time: ', (endtime - begtime), ' s')
| [
"pandas.DataFrame",
"numpy.stack",
"lxml.html.fromstring",
"time.time",
"numpy.append",
"numpy.array",
"requests.get"
] | [((1479, 1555), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Block', 'Names', 'Births', 'Deaths', 'Site', 'Link']"}), "(columns=['Block', 'Names', 'Births', 'Deaths', 'Site', 'Link'])\n", (1491, 1555), True, 'import pandas as pd\n'), ((4049, 4052), 'time.time', 't', ([], {}), '()\n', (4050, 4052), True, 'from time import time as t\n'), ((4176, 4179), 'time.time', 't', ([], {}), '()\n', (4177, 4179), True, 'from time import time as t\n'), ((1731, 1743), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1739, 1743), True, 'import numpy as np\n'), ((1795, 1807), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1803, 1807), True, 'import numpy as np\n'), ((1858, 1870), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1866, 1870), True, 'import numpy as np\n'), ((1908, 1920), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1916, 1920), True, 'import numpy as np\n'), ((1959, 1971), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1967, 1971), True, 'import numpy as np\n'), ((2009, 2021), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2017, 2021), True, 'import numpy as np\n'), ((3671, 3757), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['Block', 'Names', 'Births', 'Deaths', 'Site', 'Link']"}), "(data, columns=['Block', 'Names', 'Births', 'Deaths', 'Site',\n 'Link'])\n", (3683, 3757), True, 'import pandas as pd\n'), ((2207, 2236), 'requests.get', 'requests.get', (['(blockURL + pnum)'], {}), '(blockURL + pnum)\n', (2219, 2236), False, 'import requests\n'), ((2253, 2282), 'lxml.html.fromstring', 'html.fromstring', (['page.content'], {}), '(page.content)\n', (2268, 2282), False, 'from lxml import html\n'), ((2380, 2425), 'numpy.array', 'np.array', (['[(domain + link) for link in links]'], {}), '([(domain + link) for link in links])\n', (2388, 2425), True, 'import numpy as np\n'), ((2617, 2657), 'numpy.array', 'np.array', (['[be.text for be in birth_elem]'], {}), '([be.text for be in birth_elem])\n', (2625, 2657), True, 'import numpy as np\n'), ((2823, 2863), 'numpy.array', 'np.array', (['[de.text for de in death_elem]'], {}), '([de.text for de in death_elem])\n', (2831, 2863), True, 'import numpy as np\n'), ((3104, 3133), 'numpy.append', 'np.append', (['block_links', 'links'], {}), '(block_links, links)\n', (3113, 3133), True, 'import numpy as np\n'), ((3157, 3186), 'numpy.append', 'np.append', (['block_names', 'names'], {}), '(block_names, names)\n', (3166, 3186), True, 'import numpy as np\n'), ((3211, 3242), 'numpy.append', 'np.append', (['block_labels', 'labels'], {}), '(block_labels, labels)\n', (3220, 3242), True, 'import numpy as np\n'), ((3267, 3298), 'numpy.append', 'np.append', (['block_births', 'births'], {}), '(block_births, births)\n', (3276, 3298), True, 'import numpy as np\n'), ((3323, 3354), 'numpy.append', 'np.append', (['block_deaths', 'deaths'], {}), '(block_deaths, deaths)\n', (3332, 3354), True, 'import numpy as np\n'), ((3378, 3407), 'numpy.append', 'np.append', (['block_sites', 'sites'], {}), '(block_sites, sites)\n', (3387, 3407), True, 'import numpy as np\n'), ((3567, 3662), 'numpy.stack', 'np.stack', (['[block_labels, block_names, block_births, block_deaths, block_sites,\n block_links]'], {}), '([block_labels, block_names, block_births, block_deaths,\n block_sites, block_links])\n', (3575, 3662), True, 'import numpy as np\n')] |
from sklearn.metrics.pairwise import pairwise_distances
from tensorflow.python.platform import gfile
import tensorflow as tf
import numpy as np
import detect_and_align
import argparse
import time
import cv2
import os
import datetime
import tkinter as tk
from tkinter import *
from tkinter import Message ,Text
import cv2
import os
import shutil
import csv
import numpy as np
from PIL import Image, ImageTk
import pandas as pd
import datetime
import time
import tkinter.ttk as ttk
import tkinter.font as font
#from RegistrationPage import RegistrationPage
import webbrowser
import random
class IdData:
"""Keeps track of known identities and calculates id matches"""
def __init__(
self, id_folder, mtcnn, sess, embeddings, images_placeholder, phase_train_placeholder, distance_treshold
):
print("Loading known identities: ", end="")
self.distance_treshold = distance_treshold
self.id_folder = id_folder
self.mtcnn = mtcnn
self.id_names = []
image_paths = []
ids = os.listdir(os.path.expanduser(id_folder))
for id_name in ids:
id_dir = os.path.join(id_folder, id_name)
image_paths = image_paths + [os.path.join(id_dir, img) for img in os.listdir(id_dir)]
print("Found %d images in id folder" % len(image_paths))
aligned_images, id_image_paths = self.detect_id_faces(image_paths)
feed_dict = {images_placeholder: aligned_images, phase_train_placeholder: False}
self.embeddings = sess.run(embeddings, feed_dict=feed_dict)
if len(id_image_paths) < 5:
self.print_distance_table(id_image_paths)
def detect_id_faces(self, image_paths):
aligned_images = []
id_image_paths = []
for image_path in image_paths:
image = cv2.imread(os.path.expanduser(image_path), cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
face_patches, _, _ = detect_and_align.detect_faces(image, self.mtcnn)
if len(face_patches) > 1:
print(
"Warning: Found multiple faces in id image: %s" % image_path
+ "\nMake sure to only have one face in the id images. "
+ "If that's the case then it's a false positive detection and"
+ " you can solve it by increasing the thresolds of the cascade network"
)
aligned_images = aligned_images + face_patches
id_image_paths += [image_path] * len(face_patches)
path = os.path.dirname(image_path)
self.id_names += [os.path.basename(path)] * len(face_patches)
return np.stack(aligned_images), id_image_paths
def print_distance_table(self, id_image_paths):
"""Prints distances between id embeddings"""
distance_matrix = pairwise_distances(self.embeddings, self.embeddings)
image_names = [path.split("/")[-1] for path in id_image_paths]
'''''
print("Distance matrix:\n{:20}".format(""), end="")
[print("{:20}".format(name), end="") for name in image_names]
for path, distance_row in zip(image_names, distance_matrix):
print("\n{:20}".format(path), end="")
for distance in distance_row:
print("{:20}".format("%0.3f" % distance), end="")
print()
'''''
def find_matching_ids(self, embs):
matching_ids = []
matching_distances = []
distance_matrix = pairwise_distances(embs, self.embeddings)
for distance_row in distance_matrix:
min_index = np.argmin(distance_row)
if distance_row[min_index] < self.distance_treshold:
matching_ids.append(self.id_names[min_index])
matching_distances.append(distance_row[min_index])
else:
matching_ids.append(None)
matching_distances.append(None)
return matching_ids, matching_distances
def load_model(model):
model_exp = os.path.expanduser(model)
if os.path.isfile(model_exp):
print("Loading model filename: %s" % model_exp)
with gfile.FastGFile(model_exp, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name="")
else:
raise ValueError("Specify model file, not directory!")
def main(model, id_folder, threshold):
with tf.Graph().as_default():
with tf.Session() as sess:
# Setup models
mtcnn = detect_and_align.create_mtcnn(sess, None)
load_model(model)
#load_model(args.model)
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# Load anchor IDs
id_data = IdData(
#args.id_folder[0],
id_folder,
mtcnn,
sess,
embeddings,
images_placeholder,
phase_train_placeholder,
#args.threshold,
threshold,
)
cap = cv2.VideoCapture(1)
frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
show_landmarks = False
show_bb = False
show_id = True
show_fps = False
col_names = ['Name','Date','Time']
attendance = pd.DataFrame(columns = col_names)
cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
while True:
start = time.time()
_, frame = cap.read()
# Locate faces and landmarks in frame
face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(frame, mtcnn)
if len(face_patches) > 0:
face_patches = np.stack(face_patches)
feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
embs = sess.run(embeddings, feed_dict=feed_dict)
matching_ids, matching_distances = id_data.find_matching_ids(embs)
for bb, landmark, matching_id, dist in zip(
padded_bounding_boxes, landmarks, matching_ids, matching_distances
):
if matching_id is None:
matching_id = "Unknown"
elif matching_distances[0] is not None:
#print(matching_distances)
if matching_distances[0] > .70:
matching_id = "Unknown"
#else:
# print("Hi %s! Distance: %1.4f" % (matching_id, dist))
#show_id:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, matching_id, (bb[0], bb[3]), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
aa = matching_id
#print("Name : ", aa)
#tt=str(Id)+"-"+aa
attendance.loc[len(attendance)] = [aa,date,timeStamp]
#print("attendance, ", attendance)
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
Hour,Minute,Second=timeStamp.split(":")
#myCsvRow =
#with open('Attendance\Attendance.csv','a') as fd:
#fd.write(attendance)
fileName="Attendance\Attendance.csv"
attendance.to_csv(fileName, mode='a', index=True)
res=attendance
cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]), (255, 0, 0), 2)
if show_landmarks:
for j in range(5):
size = 1
top_left = (int(landmark[j]) - size, int(landmark[j + 5]) - size)
bottom_right = (int(landmark[j]) + size, int(landmark[j + 5]) + size)
cv2.rectangle(frame, top_left, bottom_right, (255, 0, 255), 2)
#else:
# print("Couldn't find a face")
end = time.time()
seconds = end - start
fps = round(1 / seconds, 2)
if show_fps:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, str(fps), (0, int(frame_height) - 5), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
cv2.imshow("frame", frame)
key = cv2.waitKey(1)
if key == ord("q"):
break
elif key == ord("l"):
show_landmarks = not show_landmarks
elif key == ord("b"):
show_bb = not show_bb
elif key == ord("i"):
show_id = not show_id
elif key == ord("f"):
show_fps = not show_fps
cap.release()
cv2.destroyAllWindows() | [
"tensorflow.python.platform.gfile.FastGFile",
"numpy.argmin",
"os.path.isfile",
"cv2.rectangle",
"tensorflow.get_default_graph",
"cv2.imshow",
"os.path.join",
"pandas.DataFrame",
"sklearn.metrics.pairwise.pairwise_distances",
"cv2.cvtColor",
"detect_and_align.create_mtcnn",
"os.path.dirname",
... | [((4037, 4062), 'os.path.expanduser', 'os.path.expanduser', (['model'], {}), '(model)\n', (4055, 4062), False, 'import os\n'), ((4070, 4095), 'os.path.isfile', 'os.path.isfile', (['model_exp'], {}), '(model_exp)\n', (4084, 4095), False, 'import os\n'), ((2862, 2914), 'sklearn.metrics.pairwise.pairwise_distances', 'pairwise_distances', (['self.embeddings', 'self.embeddings'], {}), '(self.embeddings, self.embeddings)\n', (2880, 2914), False, 'from sklearn.metrics.pairwise import pairwise_distances\n'), ((3511, 3552), 'sklearn.metrics.pairwise.pairwise_distances', 'pairwise_distances', (['embs', 'self.embeddings'], {}), '(embs, self.embeddings)\n', (3529, 3552), False, 'from sklearn.metrics.pairwise import pairwise_distances\n'), ((1054, 1083), 'os.path.expanduser', 'os.path.expanduser', (['id_folder'], {}), '(id_folder)\n', (1072, 1083), False, 'import os\n'), ((1134, 1166), 'os.path.join', 'os.path.join', (['id_folder', 'id_name'], {}), '(id_folder, id_name)\n', (1146, 1166), False, 'import os\n'), ((1895, 1933), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (1907, 1933), False, 'import cv2\n'), ((1967, 2015), 'detect_and_align.detect_faces', 'detect_and_align.detect_faces', (['image', 'self.mtcnn'], {}), '(image, self.mtcnn)\n', (1996, 2015), False, 'import detect_and_align\n'), ((2571, 2598), 'os.path.dirname', 'os.path.dirname', (['image_path'], {}), '(image_path)\n', (2586, 2598), False, 'import os\n'), ((2689, 2713), 'numpy.stack', 'np.stack', (['aligned_images'], {}), '(aligned_images)\n', (2697, 2713), True, 'import numpy as np\n'), ((3622, 3645), 'numpy.argmin', 'np.argmin', (['distance_row'], {}), '(distance_row)\n', (3631, 3645), True, 'import numpy as np\n'), ((4166, 4198), 'tensorflow.python.platform.gfile.FastGFile', 'gfile.FastGFile', (['model_exp', '"""rb"""'], {}), "(model_exp, 'rb')\n", (4181, 4198), False, 'from tensorflow.python.platform import gfile\n'), ((4229, 4242), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (4240, 4242), True, 'import tensorflow as tf\n'), ((4303, 4342), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (4322, 4342), True, 'import tensorflow as tf\n'), ((4504, 4516), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4514, 4516), True, 'import tensorflow as tf\n'), ((4574, 4615), 'detect_and_align.create_mtcnn', 'detect_and_align.create_mtcnn', (['sess', 'None'], {}), '(sess, None)\n', (4603, 4615), False, 'import detect_and_align\n'), ((5314, 5333), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(1)'], {}), '(1)\n', (5330, 5333), False, 'import cv2\n'), ((5589, 5620), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'col_names'}), '(columns=col_names)\n', (5601, 5620), True, 'import pandas as pd\n'), ((5635, 5678), 'cv2.namedWindow', 'cv2.namedWindow', (['"""frame"""', 'cv2.WINDOW_NORMAL'], {}), "('frame', cv2.WINDOW_NORMAL)\n", (5650, 5678), False, 'import cv2\n'), ((9670, 9693), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (9691, 9693), False, 'import cv2\n'), ((1825, 1855), 'os.path.expanduser', 'os.path.expanduser', (['image_path'], {}), '(image_path)\n', (1843, 1855), False, 'import os\n'), ((4466, 4476), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4474, 4476), True, 'import tensorflow as tf\n'), ((5727, 5738), 'time.time', 'time.time', ([], {}), '()\n', (5736, 5738), False, 'import time\n'), ((5897, 5940), 'detect_and_align.detect_faces', 'detect_and_align.detect_faces', (['frame', 'mtcnn'], {}), '(frame, mtcnn)\n', (5926, 5940), False, 'import detect_and_align\n'), ((8855, 8866), 'time.time', 'time.time', ([], {}), '()\n', (8864, 8866), False, 'import time\n'), ((9168, 9194), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (9178, 9194), False, 'import cv2\n'), ((9218, 9232), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (9229, 9232), False, 'import cv2\n'), ((1208, 1233), 'os.path.join', 'os.path.join', (['id_dir', 'img'], {}), '(id_dir, img)\n', (1220, 1233), False, 'import os\n'), ((2629, 2651), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2645, 2651), False, 'import os\n'), ((4715, 4737), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (4735, 4737), True, 'import tensorflow as tf\n'), ((4793, 4815), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (4813, 4815), True, 'import tensorflow as tf\n'), ((4889, 4911), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (4909, 4911), True, 'import tensorflow as tf\n'), ((6019, 6041), 'numpy.stack', 'np.stack', (['face_patches'], {}), '(face_patches)\n', (6027, 6041), True, 'import numpy as np\n'), ((1245, 1263), 'os.listdir', 'os.listdir', (['id_dir'], {}), '(id_dir)\n', (1255, 1263), False, 'import os\n'), ((7039, 7132), 'cv2.putText', 'cv2.putText', (['frame', 'matching_id', '(bb[0], bb[3])', 'font', '(1)', '(255, 255, 255)', '(1)', 'cv2.LINE_AA'], {}), '(frame, matching_id, (bb[0], bb[3]), font, 1, (255, 255, 255), 1,\n cv2.LINE_AA)\n', (7050, 7132), False, 'import cv2\n'), ((7158, 7169), 'time.time', 'time.time', ([], {}), '()\n', (7167, 7169), False, 'import time\n'), ((7647, 7658), 'time.time', 'time.time', ([], {}), '()\n', (7656, 7658), False, 'import time\n'), ((8263, 8331), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(bb[0], bb[1])', '(bb[2], bb[3])', '(255, 0, 0)', '(2)'], {}), '(frame, (bb[0], bb[1]), (bb[2], bb[3]), (255, 0, 0), 2)\n', (8276, 8331), False, 'import cv2\n'), ((7201, 7236), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (7232, 7236), False, 'import datetime\n'), ((7294, 7329), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (7325, 7329), False, 'import datetime\n'), ((7690, 7725), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (7721, 7725), False, 'import datetime\n'), ((7783, 7818), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (7814, 7818), False, 'import datetime\n'), ((8695, 8757), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'top_left', 'bottom_right', '(255, 0, 255)', '(2)'], {}), '(frame, top_left, bottom_right, (255, 0, 255), 2)\n', (8708, 8757), False, 'import cv2\n')] |
"""PyBoxLib layout class."""
import numpy as np
import fboxlib.fcboxlib as fcboxlib
class layout():
"""BoxLib layout."""
def __init__(self, boxarray=None, cptr=None):
if cptr:
self.cptr = cptr
else:
pmask = np.ones(boxarray.dim, np.int32)
self.cptr = fcboxlib.layout_create_from_boxarray(boxarray.cptr, pmask)
@property
def nboxes(self):
"""Return number of boxes."""
return fcboxlib.layout_nboxes(self.cptr)
@property
def local_boxes(self):
"""List of local boxes."""
return [ n for n in range(1, self.nboxes+1) if self.is_local(n) ]
def is_local(self, n):
return fcboxlib.layout_local(self.cptr, n)
def get_box(self, n):
if 1 <= n <= self.nboxes:
return fcboxlib.layout_get_box(self.cptr, n)
return None
def echo(self):
fcboxlib.layout_print(self.cptr)
| [
"fboxlib.fcboxlib.layout_local",
"numpy.ones",
"fboxlib.fcboxlib.layout_print",
"fboxlib.fcboxlib.layout_get_box",
"fboxlib.fcboxlib.layout_nboxes",
"fboxlib.fcboxlib.layout_create_from_boxarray"
] | [((439, 472), 'fboxlib.fcboxlib.layout_nboxes', 'fcboxlib.layout_nboxes', (['self.cptr'], {}), '(self.cptr)\n', (461, 472), True, 'import fboxlib.fcboxlib as fcboxlib\n'), ((657, 692), 'fboxlib.fcboxlib.layout_local', 'fcboxlib.layout_local', (['self.cptr', 'n'], {}), '(self.cptr, n)\n', (678, 692), True, 'import fboxlib.fcboxlib as fcboxlib\n'), ((846, 878), 'fboxlib.fcboxlib.layout_print', 'fcboxlib.layout_print', (['self.cptr'], {}), '(self.cptr)\n', (867, 878), True, 'import fboxlib.fcboxlib as fcboxlib\n'), ((246, 277), 'numpy.ones', 'np.ones', (['boxarray.dim', 'np.int32'], {}), '(boxarray.dim, np.int32)\n', (253, 277), True, 'import numpy as np\n'), ((297, 355), 'fboxlib.fcboxlib.layout_create_from_boxarray', 'fcboxlib.layout_create_from_boxarray', (['boxarray.cptr', 'pmask'], {}), '(boxarray.cptr, pmask)\n', (333, 355), True, 'import fboxlib.fcboxlib as fcboxlib\n'), ((765, 802), 'fboxlib.fcboxlib.layout_get_box', 'fcboxlib.layout_get_box', (['self.cptr', 'n'], {}), '(self.cptr, n)\n', (788, 802), True, 'import fboxlib.fcboxlib as fcboxlib\n')] |
"""
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
from boosting_decision_making import defect_type_model, custom_defect_type_model
from boosting_decision_making import custom_boosting_decision_maker, boosting_decision_maker
from commons.object_saving.object_saver import ObjectSaver
import logging
import numpy as np
import os
logger = logging.getLogger("analyzerApp.modelChooser")
class ModelChooser:
def __init__(self, app_config={}, search_cfg={}):
self.app_config = app_config
self.search_cfg = search_cfg
self.object_saver = ObjectSaver(self.app_config)
self.model_folder_mapping = {
"defect_type_model/": custom_defect_type_model.CustomDefectTypeModel,
"suggestion_model/": custom_boosting_decision_maker.CustomBoostingDecisionMaker,
"auto_analysis_model/": custom_boosting_decision_maker.CustomBoostingDecisionMaker
}
self.initialize_global_models()
def initialize_global_models(self):
self.global_models = {}
for model_name, folder, class_to_use in [
("defect_type_model/",
self.search_cfg["GlobalDefectTypeModelFolder"], defect_type_model.DefectTypeModel),
("suggestion_model/",
self.search_cfg["SuggestBoostModelFolder"], boosting_decision_maker.BoostingDecisionMaker),
("auto_analysis_model/",
self.search_cfg["BoostModelFolder"], boosting_decision_maker.BoostingDecisionMaker)]:
if folder.strip():
self.global_models[model_name] = class_to_use(folder=folder)
else:
self.global_models[model_name] = None
def choose_model(self, project_id, model_name_folder, custom_model_prob=1.0):
model = self.global_models[model_name_folder]
prob_for_model = np.random.uniform()
if prob_for_model > custom_model_prob:
return model
folders = self.object_saver.get_folder_objects(project_id, model_name_folder)
if len(folders):
try:
model = self.model_folder_mapping[model_name_folder](
self.app_config, project_id, folder=folders[0])
except Exception as err:
logger.error(err)
return model
def delete_old_model(self, model_name, project_id):
all_folders = self.object_saver.get_folder_objects(
project_id, "%s/" % model_name)
deleted_models = 0
for folder in all_folders:
if os.path.basename(
folder.strip("/").strip("\\")).startswith(model_name):
deleted_models += self.object_saver.remove_folder_objects(project_id, folder)
return deleted_models
def delete_all_custom_models(self, project_id):
for model_name_folder in self.model_folder_mapping:
self.delete_old_model(model_name_folder.strip("/").strip("\\"), project_id)
def get_model_info(self, model_name, project_id):
all_folders = self.object_saver.get_folder_objects(
project_id, "%s/" % model_name)
return all_folders[0] if len(all_folders) else ""
| [
"commons.object_saving.object_saver.ObjectSaver",
"numpy.random.uniform",
"logging.getLogger"
] | [((868, 913), 'logging.getLogger', 'logging.getLogger', (['"""analyzerApp.modelChooser"""'], {}), "('analyzerApp.modelChooser')\n", (885, 913), False, 'import logging\n'), ((1093, 1121), 'commons.object_saving.object_saver.ObjectSaver', 'ObjectSaver', (['self.app_config'], {}), '(self.app_config)\n', (1104, 1121), False, 'from commons.object_saving.object_saver import ObjectSaver\n'), ((2352, 2371), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2369, 2371), True, 'import numpy as np\n')] |
from napari_pssr import (
train_pssr_widget,
predict_pssr_widget,
)
import numpy as np
def test_train_pssr_widget(make_napari_viewer, capsys):
viewer = make_napari_viewer()
layer = viewer.add_image(np.random.random((100, 100)))
my_widget = train_pssr_widget()
my_widget(viewer.layers[0])
def test_predict_pssr_widget(make_napari_viewer, capsys):
viewer = make_napari_viewer()
layer = viewer.add_image(np.random.random((100, 100)))
my_widget = predict_pssr_widget()
my_widget(viewer.layers[0])
| [
"napari_pssr.predict_pssr_widget",
"numpy.random.random",
"napari_pssr.train_pssr_widget"
] | [((263, 282), 'napari_pssr.train_pssr_widget', 'train_pssr_widget', ([], {}), '()\n', (280, 282), False, 'from napari_pssr import train_pssr_widget, predict_pssr_widget\n'), ((486, 507), 'napari_pssr.predict_pssr_widget', 'predict_pssr_widget', ([], {}), '()\n', (505, 507), False, 'from napari_pssr import train_pssr_widget, predict_pssr_widget\n'), ((216, 244), 'numpy.random.random', 'np.random.random', (['(100, 100)'], {}), '((100, 100))\n', (232, 244), True, 'import numpy as np\n'), ((439, 467), 'numpy.random.random', 'np.random.random', (['(100, 100)'], {}), '((100, 100))\n', (455, 467), True, 'import numpy as np\n')] |
import gym
import numpy as np
from collections import deque
import sys
class Agent:
def __init__(self, Q, env, mode, eps=1.0, alpha=0.01, gamma=1.0):
self.env = env
self.Q = Q
self.eps_min = 0.00001
self.eps = eps
self.gamma = gamma
self.shape = int(np.sqrt(self.env.nS))
if gym.envs.registration.registry.env_specs['FrozenLake-v3']._kwargs['is_slippery'] and self.shape == 4:
self.num_episodes, self.alpha = (100000, 0.01)
elif gym.envs.registration.registry.env_specs['FrozenLake-v3']._kwargs['is_slippery'] and self.shape == 8:
self.num_episodes, self.alpha = (100000, 0.05)
else:
self.num_episodes, self.alpha = 20000, alpha
def update_Q(self, Qsa, Qsa_next, reward, alpha, gamma):
return Qsa + (alpha * (reward + (gamma * Qsa_next) - Qsa))
def exp_decay(self, i_episode):
return max(self.eps ** i_episode, self.eps_min)
def epsilon_greedy_probs(self, Q_s, i_episode, eps=None):
epsilon = self.exp_decay(i_episode)
if eps is not None:
epsilon = eps
policy_s = np.ones(self.env.nA) * epsilon / self.env.nA
policy_s[np.argmax(Q_s)] = 1 - epsilon + (epsilon / self.env.nA)
return policy_s
def select_action(self, Q_s):
best_a = np.argmax(self.Q[Q_s])
return best_a
def learn(self, plot_every=100):
tmp_scores = deque(maxlen=plot_every)
scores = deque(maxlen=self.num_episodes)
for i_episode in range(1, self.num_episodes + 1):
if i_episode % 100 == 0:
print("\rEpisode {}/{} || average reward {}".format(i_episode, self.num_episodes,
np.mean(tmp_scores)), end="")
sys.stdout.flush()
score = 0
state = self.env.reset()
while True:
policy_s = self.epsilon_greedy_probs(self.Q[state], i_episode, None)
action = np.random.choice(np.arange(self.env.nA), p=policy_s)
next_state, reward, done, info = self.env.step(action)
score += reward
# Sarsamax takes no sampling for the next_state, bux max.:
self.Q[state][action] = self.update_Q(self.Q[state][action], np.max(self.Q[next_state]),
reward, self.alpha, self.gamma)
state = next_state
if done:
tmp_scores.append(score)
break
if (i_episode % plot_every == 0):
scores.append(np.mean(tmp_scores))
self.print_policy()
return
def print_policy(self):
policy_sarsamax = np.array([np.argmax(self.Q[key]) if key in self.Q else -1 \
for key in np.arange(self.env.nS)]).reshape((self.shape, self.shape))
print("\nEstimated Optimal Policy (UP = 3, RIGHT = 2, DOWN = 1, LEFT = 0):")
print(policy_sarsamax)
| [
"numpy.argmax",
"numpy.ones",
"numpy.max",
"numpy.mean",
"sys.stdout.flush",
"numpy.arange",
"collections.deque",
"numpy.sqrt"
] | [((1342, 1364), 'numpy.argmax', 'np.argmax', (['self.Q[Q_s]'], {}), '(self.Q[Q_s])\n', (1351, 1364), True, 'import numpy as np\n'), ((1446, 1470), 'collections.deque', 'deque', ([], {'maxlen': 'plot_every'}), '(maxlen=plot_every)\n', (1451, 1470), False, 'from collections import deque\n'), ((1488, 1519), 'collections.deque', 'deque', ([], {'maxlen': 'self.num_episodes'}), '(maxlen=self.num_episodes)\n', (1493, 1519), False, 'from collections import deque\n'), ((305, 325), 'numpy.sqrt', 'np.sqrt', (['self.env.nS'], {}), '(self.env.nS)\n', (312, 325), True, 'import numpy as np\n'), ((1210, 1224), 'numpy.argmax', 'np.argmax', (['Q_s'], {}), '(Q_s)\n', (1219, 1224), True, 'import numpy as np\n'), ((1148, 1168), 'numpy.ones', 'np.ones', (['self.env.nA'], {}), '(self.env.nA)\n', (1155, 1168), True, 'import numpy as np\n'), ((1783, 1801), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1799, 1801), False, 'import sys\n'), ((2014, 2036), 'numpy.arange', 'np.arange', (['self.env.nA'], {}), '(self.env.nA)\n', (2023, 2036), True, 'import numpy as np\n'), ((2306, 2332), 'numpy.max', 'np.max', (['self.Q[next_state]'], {}), '(self.Q[next_state])\n', (2312, 2332), True, 'import numpy as np\n'), ((2629, 2648), 'numpy.mean', 'np.mean', (['tmp_scores'], {}), '(tmp_scores)\n', (2636, 2648), True, 'import numpy as np\n'), ((1737, 1756), 'numpy.mean', 'np.mean', (['tmp_scores'], {}), '(tmp_scores)\n', (1744, 1756), True, 'import numpy as np\n'), ((2760, 2782), 'numpy.argmax', 'np.argmax', (['self.Q[key]'], {}), '(self.Q[key])\n', (2769, 2782), True, 'import numpy as np\n'), ((2857, 2879), 'numpy.arange', 'np.arange', (['self.env.nS'], {}), '(self.env.nS)\n', (2866, 2879), True, 'import numpy as np\n')] |
import os
import rnnSMAP
from rnnSMAP import runTrainLSTM
from rnnSMAP import runTestLSTM
import matplotlib.pyplot as plt
import numpy as np
import scipy
import scipy.stats as stats
import matplotlib
import matplotlib.gridspec as gridspec
import imp
imp.reload(rnnSMAP)
rnnSMAP.reload()
matplotlib.rcParams.update({'font.size': 14})
matplotlib.rcParams.update({'lines.linewidth': 2})
matplotlib.rcParams.update({'lines.markersize': 10})
#################################################
# intervals temporal test
doOpt = []
# doOpt.append('train')
# doOpt.append('test')
# doOpt.append('loadData')
# doOpt.append('plotComb')
# doOpt.append('plotConf')
# doOpt.append('plotMap')
# doOpt.append('plotBox')
doOpt.append('plotVal')
# doOpt.append('plotVS')
rootDB = rnnSMAP.kPath['DB_L3_NA']
rootOut = rnnSMAP.kPath['OutSigma_L3_NA']
drLst = np.arange(0.1, 1, 0.1)
drStrLst = ["%02d" % (x*100) for x in drLst]
testName = 'CONUSv2f1'
yrLst = [2016]
saveFolder = os.path.join(rnnSMAP.kPath['dirResult'], 'paperSigma')
legLst = list()
for dr in drLst:
legLst.append(str(dr))
#################################################
if 'train' in doOpt:
opt = rnnSMAP.classLSTM.optLSTM(
rootDB=rootDB,
rootOut=rootOut,
syr=2015, eyr=2015,
var='varLst_Forcing', varC='varConstLst_Noah',
train='CONUSv2f1', dr=0.5, modelOpt='relu',
model='cudnn', loss='sigma',
)
for k in range(0, len(drLst)):
opt['dr'] = drLst[k]
opt['out'] = 'CONUSv2f1_y15_Forcing_dr'+drStrLst[k]
cudaID = k % 3
runTrainLSTM.runCmdLine(
opt=opt, cudaID=cudaID, screenName=opt['out'])
#################################################
if 'test' in doOpt:
rootOut = rnnSMAP.kPath['OutSigma_L3_NA']
rootDB = rnnSMAP.kPath['DB_L3_NA']
for k in range(0, len(drLst)):
out = 'CONUSv2f1_y15_Forcing_dr'+drStrLst[k]
cudaID = k % 3
runTestLSTM.runCmdLine(
rootDB=rootDB, rootOut=rootOut, out=out, testName=testName,
yrLst=yrLst, cudaID=cudaID, screenName=out)
#################################################
if 'loadData' in doOpt:
rootOut = rnnSMAP.kPath['OutSigma_L3_NA']
rootDB = rnnSMAP.kPath['DB_L3_NA']
predField = 'LSTM'
targetField = 'SMAP'
dsLst = list()
statErrLst = list()
statSigmaLst = list()
statConfLst = list()
for k in range(0, len(drLst)):
if drLst[k] == 0.5:
out = 'CONUSv2f1_y15_Forcing'
else:
out = 'CONUSv2f1_y15_Forcing_dr'+drStrLst[k]
ds = rnnSMAP.classDB.DatasetPost(
rootDB=rootDB, subsetName=testName, yrLst=yrLst)
ds.readData(var='SMAP_AM', field='SMAP')
ds.readPred(rootOut=rootOut, out=out, drMC=100, field='LSTM')
statErr = ds.statCalError(predField='LSTM', targetField='SMAP')
statSigma = ds.statCalSigma(field='LSTM')
statConf = ds.statCalConf(predField='LSTM', targetField='SMAP')
dsLst.append(ds)
statErrLst.append(statErr)
statSigmaLst.append(statSigma)
statConfLst.append(statConf)
#################################################
if 'plotConf' in doOpt:
fig, axes = plt.subplots(ncols=3, figsize=(9, 4))
confXLst = list()
confMCLst = list()
confLst = list()
for k in range(0, len(drLst)):
statConf = statConfLst[k]
confXLst.append(statConf.conf_sigmaX)
confMCLst.append(statConf.conf_sigmaMC)
confLst.append(statConf.conf_sigma)
rnnSMAP.funPost.plotCDF(confXLst, ax=axes[0], legendLst=legLst)
axes[0].set_title('sigmaX')
rnnSMAP.funPost.plotCDF(confMCLst, ax=axes[1], legendLst=legLst)
axes[1].set_title('sigmaMC')
rnnSMAP.funPost.plotCDF(confLst, ax=axes[2], legendLst=legLst)
axes[2].set_title('sigmaComb')
plt.tight_layout()
fig.show()
saveFile = os.path.join(saveFolder, 'CONUS_temp_dr_conf.png')
fig.savefig(saveFile, dpi=100)
#################################################
if 'plotBox' in doOpt:
data = list()
# strSigmaLst = ['sigmaMC', 'sigmaX', 'sigma']
strSigmaLst = []
# strErrLst = ['ubRMSE', 'Bias']
strErrLst = ['ubRMSE']
# labelC = [r'$\sigma_{mc}$', r'$\sigma_{x}$',
# r'$\sigma_{comb}$', 'ubRMSE', 'Bias']
labelC = ['ubRMSE']
for strSigma in strSigmaLst:
temp = list()
for k in range(0, len(drLst)):
statSigma = statSigmaLst[k]
temp.append(getattr(statSigma, strSigma))
data.append(temp)
for strErr in strErrLst:
temp = list()
for k in range(0, len(drLst)):
statErr = statErrLst[k]
temp.append(getattr(statErr, strErr))
data.append(temp)
fig = rnnSMAP.funPost.plotBox(
data, labelS=legLst, labelC=labelC,
colorLst=plt.cm.jet(drLst), figsize=(4, 4), sharey=False)
# fig.subplots_adjust(wspace=0.5)
plt.tight_layout()
saveFile = os.path.join(saveFolder, 'CONUS_temp_dr_box')
fig.savefig(saveFile, dpi=100)
#################################################
if 'plotVal' in doOpt:
confLst = list()
distLst = list()
errLst = list()
for k in range(0, len(drLst)):
statConf = statConfLst[k]
confLst.append(statConf.conf_sigma)
errLst.append(getattr(statErrLst[k], 'ubRMSE'))
xSort = rnnSMAP.funPost.flatData(statConf.conf_sigma)
yRank = np.arange(len(xSort))/float(len(xSort)-1)
# rmse = np.sqrt(((xSort - yRank) ** 2).mean())
dist = 0
dbin = 0.01
for xbin in np.arange(0, 1, dbin):
ind = (xSort > xbin) & (xSort <= xbin+dbin)
temp = np.max(np.abs(xSort[ind] - yRank[ind]))
if not np.isnan(temp):
dist = dist+temp*dbin
distLst.append(dist)
fig,axes = plt.subplots(1, 3, figsize=(12, 4))
ax = axes[0]
cLst = plt.cm.jet(drLst)
bp = ax.boxplot(errLst, patch_artist=True, notch=True, showfliers=False)
for patch, color in zip(bp['boxes'], cLst):
patch.set_facecolor(color)
ax.set_xticks([])
ax.set_ylabel('ubRMSE')
ax.set_xlabel('dr')
ax.set_title('Model Error')
ax.legend(bp['boxes'], legLst, loc='center left', bbox_to_anchor=(1, 0.5))
ax = axes[1]
rnnSMAP.funPost.plotCDF(
confLst, ax=ax, legendLst=None, showDiff=None,
xlabel=r'$P_{ee}$', ylabel=None)
ax.set_title(r'CDF of $p_{comb}$')
ax = axes[2]
ax.plot(drLst, distLst, marker='*')
ax.set_ylabel(r'd($p_{comb}$, 1-to-1)')
ax.set_xlabel('dr')
ax.set_title(r'Uncertainty Quality')
plt.tight_layout()
fig.subplots_adjust(wspace=0.5)
saveFile = os.path.join(saveFolder, 'drVal')
fig.savefig(saveFile, dpi=100)
fig.savefig(saveFile+'.eps')
fig.show()
| [
"imp.reload",
"matplotlib.pyplot.tight_layout",
"numpy.abs",
"matplotlib.pyplot.cm.jet",
"matplotlib.rcParams.update",
"rnnSMAP.runTestLSTM.runCmdLine",
"rnnSMAP.funPost.flatData",
"rnnSMAP.reload",
"matplotlib.pyplot.subplots",
"rnnSMAP.runTrainLSTM.runCmdLine",
"numpy.isnan",
"rnnSMAP.classL... | [((252, 271), 'imp.reload', 'imp.reload', (['rnnSMAP'], {}), '(rnnSMAP)\n', (262, 271), False, 'import imp\n'), ((272, 288), 'rnnSMAP.reload', 'rnnSMAP.reload', ([], {}), '()\n', (286, 288), False, 'import rnnSMAP\n'), ((290, 335), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 14}"], {}), "({'font.size': 14})\n", (316, 335), False, 'import matplotlib\n'), ((336, 386), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'lines.linewidth': 2}"], {}), "({'lines.linewidth': 2})\n", (362, 386), False, 'import matplotlib\n'), ((387, 439), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'lines.markersize': 10}"], {}), "({'lines.markersize': 10})\n", (413, 439), False, 'import matplotlib\n'), ((843, 865), 'numpy.arange', 'np.arange', (['(0.1)', '(1)', '(0.1)'], {}), '(0.1, 1, 0.1)\n', (852, 865), True, 'import numpy as np\n'), ((962, 1016), 'os.path.join', 'os.path.join', (["rnnSMAP.kPath['dirResult']", '"""paperSigma"""'], {}), "(rnnSMAP.kPath['dirResult'], 'paperSigma')\n", (974, 1016), False, 'import os\n'), ((1160, 1366), 'rnnSMAP.classLSTM.optLSTM', 'rnnSMAP.classLSTM.optLSTM', ([], {'rootDB': 'rootDB', 'rootOut': 'rootOut', 'syr': '(2015)', 'eyr': '(2015)', 'var': '"""varLst_Forcing"""', 'varC': '"""varConstLst_Noah"""', 'train': '"""CONUSv2f1"""', 'dr': '(0.5)', 'modelOpt': '"""relu"""', 'model': '"""cudnn"""', 'loss': '"""sigma"""'}), "(rootDB=rootDB, rootOut=rootOut, syr=2015, eyr=\n 2015, var='varLst_Forcing', varC='varConstLst_Noah', train='CONUSv2f1',\n dr=0.5, modelOpt='relu', model='cudnn', loss='sigma')\n", (1185, 1366), False, 'import rnnSMAP\n'), ((3203, 3240), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(3)', 'figsize': '(9, 4)'}), '(ncols=3, figsize=(9, 4))\n', (3215, 3240), True, 'import matplotlib.pyplot as plt\n'), ((3519, 3582), 'rnnSMAP.funPost.plotCDF', 'rnnSMAP.funPost.plotCDF', (['confXLst'], {'ax': 'axes[0]', 'legendLst': 'legLst'}), '(confXLst, ax=axes[0], legendLst=legLst)\n', (3542, 3582), False, 'import rnnSMAP\n'), ((3619, 3683), 'rnnSMAP.funPost.plotCDF', 'rnnSMAP.funPost.plotCDF', (['confMCLst'], {'ax': 'axes[1]', 'legendLst': 'legLst'}), '(confMCLst, ax=axes[1], legendLst=legLst)\n', (3642, 3683), False, 'import rnnSMAP\n'), ((3721, 3783), 'rnnSMAP.funPost.plotCDF', 'rnnSMAP.funPost.plotCDF', (['confLst'], {'ax': 'axes[2]', 'legendLst': 'legLst'}), '(confLst, ax=axes[2], legendLst=legLst)\n', (3744, 3783), False, 'import rnnSMAP\n'), ((3823, 3841), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3839, 3841), True, 'import matplotlib.pyplot as plt\n'), ((3872, 3922), 'os.path.join', 'os.path.join', (['saveFolder', '"""CONUS_temp_dr_conf.png"""'], {}), "(saveFolder, 'CONUS_temp_dr_conf.png')\n", (3884, 3922), False, 'import os\n'), ((4918, 4936), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4934, 4936), True, 'import matplotlib.pyplot as plt\n'), ((4952, 4997), 'os.path.join', 'os.path.join', (['saveFolder', '"""CONUS_temp_dr_box"""'], {}), "(saveFolder, 'CONUS_temp_dr_box')\n", (4964, 4997), False, 'import os\n'), ((5828, 5863), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(12, 4)'}), '(1, 3, figsize=(12, 4))\n', (5840, 5863), True, 'import matplotlib.pyplot as plt\n'), ((5893, 5910), 'matplotlib.pyplot.cm.jet', 'plt.cm.jet', (['drLst'], {}), '(drLst)\n', (5903, 5910), True, 'import matplotlib.pyplot as plt\n'), ((6278, 6384), 'rnnSMAP.funPost.plotCDF', 'rnnSMAP.funPost.plotCDF', (['confLst'], {'ax': 'ax', 'legendLst': 'None', 'showDiff': 'None', 'xlabel': '"""$P_{ee}$"""', 'ylabel': 'None'}), "(confLst, ax=ax, legendLst=None, showDiff=None,\n xlabel='$P_{ee}$', ylabel=None)\n", (6301, 6384), False, 'import rnnSMAP\n'), ((6610, 6628), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6626, 6628), True, 'import matplotlib.pyplot as plt\n'), ((6680, 6713), 'os.path.join', 'os.path.join', (['saveFolder', '"""drVal"""'], {}), "(saveFolder, 'drVal')\n", (6692, 6713), False, 'import os\n'), ((1568, 1638), 'rnnSMAP.runTrainLSTM.runCmdLine', 'runTrainLSTM.runCmdLine', ([], {'opt': 'opt', 'cudaID': 'cudaID', 'screenName': "opt['out']"}), "(opt=opt, cudaID=cudaID, screenName=opt['out'])\n", (1591, 1638), False, 'from rnnSMAP import runTrainLSTM\n'), ((1928, 2059), 'rnnSMAP.runTestLSTM.runCmdLine', 'runTestLSTM.runCmdLine', ([], {'rootDB': 'rootDB', 'rootOut': 'rootOut', 'out': 'out', 'testName': 'testName', 'yrLst': 'yrLst', 'cudaID': 'cudaID', 'screenName': 'out'}), '(rootDB=rootDB, rootOut=rootOut, out=out, testName=\n testName, yrLst=yrLst, cudaID=cudaID, screenName=out)\n', (1950, 2059), False, 'from rnnSMAP import runTestLSTM\n'), ((2572, 2648), 'rnnSMAP.classDB.DatasetPost', 'rnnSMAP.classDB.DatasetPost', ([], {'rootDB': 'rootDB', 'subsetName': 'testName', 'yrLst': 'yrLst'}), '(rootDB=rootDB, subsetName=testName, yrLst=yrLst)\n', (2599, 2648), False, 'import rnnSMAP\n'), ((5355, 5400), 'rnnSMAP.funPost.flatData', 'rnnSMAP.funPost.flatData', (['statConf.conf_sigma'], {}), '(statConf.conf_sigma)\n', (5379, 5400), False, 'import rnnSMAP\n'), ((5572, 5593), 'numpy.arange', 'np.arange', (['(0)', '(1)', 'dbin'], {}), '(0, 1, dbin)\n', (5581, 5593), True, 'import numpy as np\n'), ((4827, 4844), 'matplotlib.pyplot.cm.jet', 'plt.cm.jet', (['drLst'], {}), '(drLst)\n', (4837, 4844), True, 'import matplotlib.pyplot as plt\n'), ((5677, 5708), 'numpy.abs', 'np.abs', (['(xSort[ind] - yRank[ind])'], {}), '(xSort[ind] - yRank[ind])\n', (5683, 5708), True, 'import numpy as np\n'), ((5729, 5743), 'numpy.isnan', 'np.isnan', (['temp'], {}), '(temp)\n', (5737, 5743), True, 'import numpy as np\n')] |
import pyqtgraph as pg
import numpy as np
from pyqtgraph.Qt import QtGui, QtCore
from pyqtgraph import LayoutWidget
from .algorithms import de_casteljau
from .algorithms import degree_elevation
from .utils import construct_arrow
from .utils import delete_content
from .utils import compute_bbox_of_points
from .color import JChooseColor
from .color import setup_color
from .arrow import JArrowDock
from .remove_item import JRemoveItem
class BezierCurve(pg.ROI, JChooseColor, JArrowDock, JRemoveItem):
def __init__(self, positions, resolution=100, viewbox=None,
arrow=False, arrow_start=0.9, arrow_width=0.5):
pos = [0, 0]
pg.ROI.__init__(self, pos, size=[1, 1])
self.handlePen.setColor(QtGui.QColor(0, 0, 0))
for p in positions:
self.addFreeHandle(p)
self.setPen(200, 200, 220)
self.resolution = resolution
self.info_dock = viewbox.info_dock
self._resolution_edit = None
self.menu = self.build_menu()
JChooseColor.__init__(self)
self.set_black_color()
JArrowDock.__init__(self, arrow, start=arrow_start, width=arrow_width)
JRemoveItem.__init__(self, viewbox)
self._display_info_dock()
@classmethod
def load(cls, s, viewbox=None):
if "*JBezierCurve" not in s:
print("Error reading a Bezier curve from string %s" % s)
s = s.replace("*JBezierCurve", "")
if s[0] != "{" or s[-1] != "}":
print("Error the string is in the wrong format")
data = eval(s)
curve = cls(data["control points"], data["resolution"],
viewbox=viewbox, arrow=data["arrow"],
arrow_start=data["arrow start"],
arrow_width=data["arrow width"])
setup_color(curve, data["color"])
if viewbox is not None:
viewbox.label.setText("Bezier Curve loaded.")
return curve
def get_save_control_points(self):
points = self.get_control_points()
dx = self.pos().x()
dy = self.pos().y()
return [[p[0] + dx, p[1] + dy] for p in points]
def save(self, file, points=None):
data = {}
if points is None:
points = self.get_save_control_points()
data["control points"] = points
data["resolution"] = self.resolution
data["color"] = self.color
data["arrow"] = self._arrow
data["arrow start"] = self._arrow_start
data["arrow width"] = self._arrow_width
file.write("*JBezierCurve\n")
file.write(str(data) + "\n")
def get_control_points(self):
control_points = []
for p in self.handles:
vector = np.array([p["pos"].x(), p["pos"].y()])
control_points.append(vector)
return control_points
def compute_bbox(self):
points = self.get_control_points()
points = [[x[0], x[1]] for x in points]
return compute_bbox_of_points(points)
def shape(self):
p = QtGui.QPainterPath()
control_points = self.get_control_points()
if len(control_points) == 0:
return p
start = control_points[0]
p.moveTo(start[0], start[1])
for point in control_points[1:]:
p.lineTo(point[0], point[1])
p.lineTo(start[0], start[1])
return p
def boundingRect(self):
return self.shape().boundingRect()
def get_drawing_points(self):
points = self._get_drawing_points()
dx = self.pos().x()
dy = self.pos().y()
return [[x + dx, y + dy] for x, y in points]
def _get_drawing_points(self):
if not self._arrow:
cps = self.get_control_points()
parameters = np.linspace(0.0, 1.0, self.resolution)
return [de_casteljau(cps, t) for t in parameters]
else:
cps = self.get_control_points()
parameters = np.linspace(0.0, self._arrow_start, self.resolution)
curve = [de_casteljau(cps, t) for t in parameters]
last_2 = curve[-1]
last = cps[-1]
arrow_points = construct_arrow(last_2, last, self._arrow_width)
curve.extend(arrow_points[1:])
return curve
def paint(self, p, *args):
pts = self._get_drawing_points()
points = [QtCore.QPointF(pt[0], pt[1]) for pt in pts]
p.setRenderHint(QtGui.QPainter.Antialiasing)
p.setPen(self.currentPen)
for i in range(len(points) - 1):
p.drawLine(points[i], points[i + 1])
def mouseClickEvent(self, ev):
self._display_info_dock()
if ev.button() == QtCore.Qt.RightButton:
self.raise_menu(ev)
def build_menu(self):
menu = QtGui.QMenu()
menu.setTitle("Bezier Curve")
menu.addAction("Elevate Degree", self.elevate_degree)
menu.addAction("Remove", self.remove_item)
return menu
def elevate_degree(self):
control_points = self.get_control_points()
new_control_points = degree_elevation(control_points)
for handle in self.handles[:]:
self.removeHandle(handle["item"])
for point in new_control_points:
p = [point[0], point[1]]
self.addFreeHandle(p)
def _toggle_arrow(self):
self._arrow = not self._arrow
self.update()
def raise_menu(self, event):
pos = event.screenPos()
self.menu.popup(QtCore.QPoint(pos.x(), pos.y()))
def clear_points(self):
while 0 < len(self.handles):
self.removeHandle(self.handles[0]["item"])
def _changed_resolution(self):
try:
self.resolution = float(self._resolution_edit.text())
except ValueError:
pass
self.update()
def get_resolution_dock(self):
layout = LayoutWidget()
label = QtGui.QLabel("Resolution")
layout.addWidget(label, row=0, col=0)
line_edit = QtGui.QLineEdit(str(self.resolution))
validator = QtGui.QIntValidator(20, 1000)
line_edit.setValidator(validator)
line_edit.textChanged.connect(self._changed_resolution)
layout.addWidget(line_edit, row=0, col=1)
self._resolution_edit = line_edit
layout.layout.setContentsMargins(0, 0, 0, 5)
return layout
def get_degree_elevate_dock(self):
layout = LayoutWidget()
button = QtGui.QPushButton("Elevate Degree")
button.clicked.connect(self.elevate_degree)
layout.addWidget(button, row=0, col=0)
layout.layout.setContentsMargins(0, 0, 0, 0)
return layout
def _display_info_dock(self):
if self.info_dock is None:
return
delete_content(self.info_dock)
container = LayoutWidget()
label = QtGui.QLabel("Curve")
container.addWidget(label, row=0, col=0)
degree_dock_widget = self.get_degree_elevate_dock()
container.addWidget(degree_dock_widget, row=1, col=0)
resolution_dock_widget = self.get_resolution_dock()
container.addWidget(resolution_dock_widget, row=2, col=0)
arrow_dock_widget = self.get_arrow_dock_widget()
container.addWidget(arrow_dock_widget, row=3, col=0)
color_dock_widget = self.get_color_dock_widget()
container.addWidget(color_dock_widget, row=4, col=0)
remove_item_widget = self.get_remove_item_dock_widget()
container.addWidget(remove_item_widget, row=5, col=0)
vertical_spacer = QtGui.QSpacerItem(1, 1, QtGui.QSizePolicy.Minimum,
QtGui.QSizePolicy.Expanding)
container.layout.addItem(vertical_spacer, 6, 0)
self.info_dock.addWidget(container)
| [
"pyqtgraph.Qt.QtGui.QIntValidator",
"pyqtgraph.Qt.QtGui.QSpacerItem",
"pyqtgraph.Qt.QtGui.QLabel",
"pyqtgraph.LayoutWidget",
"pyqtgraph.Qt.QtGui.QColor",
"pyqtgraph.Qt.QtGui.QPushButton",
"pyqtgraph.Qt.QtGui.QMenu",
"numpy.linspace",
"pyqtgraph.Qt.QtGui.QPainterPath",
"pyqtgraph.Qt.QtCore.QPointF"... | [((668, 707), 'pyqtgraph.ROI.__init__', 'pg.ROI.__init__', (['self', 'pos'], {'size': '[1, 1]'}), '(self, pos, size=[1, 1])\n', (683, 707), True, 'import pyqtgraph as pg\n'), ((3044, 3064), 'pyqtgraph.Qt.QtGui.QPainterPath', 'QtGui.QPainterPath', ([], {}), '()\n', (3062, 3064), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((4787, 4800), 'pyqtgraph.Qt.QtGui.QMenu', 'QtGui.QMenu', ([], {}), '()\n', (4798, 4800), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((5885, 5899), 'pyqtgraph.LayoutWidget', 'LayoutWidget', ([], {}), '()\n', (5897, 5899), False, 'from pyqtgraph import LayoutWidget\n'), ((5917, 5943), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""Resolution"""'], {}), "('Resolution')\n", (5929, 5943), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((6069, 6098), 'pyqtgraph.Qt.QtGui.QIntValidator', 'QtGui.QIntValidator', (['(20)', '(1000)'], {}), '(20, 1000)\n', (6088, 6098), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((6432, 6446), 'pyqtgraph.LayoutWidget', 'LayoutWidget', ([], {}), '()\n', (6444, 6446), False, 'from pyqtgraph import LayoutWidget\n'), ((6464, 6499), 'pyqtgraph.Qt.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Elevate Degree"""'], {}), "('Elevate Degree')\n", (6481, 6499), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((6826, 6840), 'pyqtgraph.LayoutWidget', 'LayoutWidget', ([], {}), '()\n', (6838, 6840), False, 'from pyqtgraph import LayoutWidget\n'), ((6857, 6878), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""Curve"""'], {}), "('Curve')\n", (6869, 6878), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((7570, 7649), 'pyqtgraph.Qt.QtGui.QSpacerItem', 'QtGui.QSpacerItem', (['(1)', '(1)', 'QtGui.QSizePolicy.Minimum', 'QtGui.QSizePolicy.Expanding'], {}), '(1, 1, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)\n', (7587, 7649), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((740, 761), 'pyqtgraph.Qt.QtGui.QColor', 'QtGui.QColor', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (752, 761), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((3778, 3816), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'self.resolution'], {}), '(0.0, 1.0, self.resolution)\n', (3789, 3816), True, 'import numpy as np\n'), ((3962, 4014), 'numpy.linspace', 'np.linspace', (['(0.0)', 'self._arrow_start', 'self.resolution'], {}), '(0.0, self._arrow_start, self.resolution)\n', (3973, 4014), True, 'import numpy as np\n'), ((4372, 4400), 'pyqtgraph.Qt.QtCore.QPointF', 'QtCore.QPointF', (['pt[0]', 'pt[1]'], {}), '(pt[0], pt[1])\n', (4386, 4400), False, 'from pyqtgraph.Qt import QtGui, QtCore\n')] |
import pdb
import numpy as np
import matplotlib.pyplot as plt
import dist
import util
import pickle
from mdp10 import MDP, TabularQ, NNQ, value_iteration, Q_learn, Q_learn_batch, greedy, sim_episode, evaluate
class No_Exit(MDP):
# Like breakout or pong, but one player, no walls to break out, no
# way to win You can move paddle vertically up or down or stay
actions = (+1, 0, -1)
def __init__(self, field_size, ball_speed = 1, random_start = True):
# image space is n by n
self.q = None
self.n = field_size
h = self.n * ball_speed
self.discount_factor = (h - 1.0) / h
self.ball_speed = ball_speed
# state space is: ball position and velocity, paddle position
# and velocity
# - ball position is n by n
# - ball velocity is one of (-1, -1), (-1, 1), (0, -1), (0, 1),
# (1, -1), (1, 1)
# - paddle position is n; this is location of bottom of paddle,
# can stick "up" out of the screen
# - paddle velocity is one of 1, 0, -1
self.states = [((br, bc), (brv, bcv), pp, pv) for \
br in range(self.n) for
bc in range(self.n) for
brv in (-1, 0, 1) for
bcv in (-1, 1) for
pp in range(self.n) for
pv in (-1, 0, 1)]
self.states.append('over')
self.start = dist.uniform_dist([((br, 0), (0, 1), 0, 0) \
for br in range(self.n)]) \
if random_start else \
dist.delta_dist(((int(self.n/2), 0), (0, 1), 0, 0))
ax = None
def draw_state(self, state = None, pause = False):
if self.ax is None:
plt.ion()
plt.figure(facecolor="white")
self.ax = plt.subplot()
if state is None: state = self.state
((br, bc), (brv, bcv), pp, pv) = state
im = np.zeros((self.n, self.n+1))
im[br, bc] = -1
im[pp, self.n] = 1
ims = self.ax.imshow(im, interpolation = 'none',
cmap = 'viridis',
extent = [-0.5, self.n+0.5,
-0.5, self.n-0.5])
ims.set_clim(-1, 1)
plt.pause(0.0001)
if pause: input('go?')
else: plt.pause(0.1)
def state2vec(self, s):
if s == 'over':
return np.array([[0, 0, 0, 0, 0, 0, 1]])
((br, bc), (brv, bcv), pp, pv) = s
return np.array([[br, bc, brv, bcv, pp, pv, 0]])
def terminal(self, state):
return state == 'over'
def reward_fn(self, s, a):
return 0 if s == 'over' else 1
def transition_model(self, s, a, p = 0.4):
# Only randomness is in brv and brc after a bounce
# 1- prob of negating nominal velocity
if s == 'over':
return dist.delta_dist('over')
# Current state
((br, bc), (brv, bcv), pp, pv) = s
# Nominal next ball state
new_br = br + self.ball_speed*brv; new_brv = brv
new_bc = bc + self.ball_speed*bcv; new_bcv = bcv
# nominal paddle state, a is action (-1, 0, 1)
new_pp = max(0, min(self.n-1, pp + a))
new_pv = a
new_s = None
hit_r = hit_c = False
# bottom, top contacts
if new_br < 0:
new_br = 0; new_brv = 1; hit_r = True
elif new_br >= self.n:
new_br = self.n - 1; new_brv = -1; hit_r = True
# back, front contacts
if new_bc < 0: # back bounce
new_bc = 0; new_bcv = 1; hit_c = True
elif new_bc >= self.n:
if self.paddle_hit(pp, new_pp, br, bc, new_br, new_bc):
new_bc = self.n-1; new_bcv = -1; hit_c = True
else:
return dist.delta_dist('over')
new_s = ((new_br, new_bc), (new_brv, new_bcv), new_pp, new_pv)
if ((not hit_c) and (not hit_r)):
return dist.delta_dist(new_s)
elif hit_c: # also hit_c and hit_r
if abs(new_brv) > 0:
return dist.DDist({new_s: p,
((new_br, new_bc), (-new_brv, new_bcv), new_pp, new_pv) : 1-p})
else:
return dist.DDist({new_s: p,
((new_br, new_bc), (-1, new_bcv), new_pp, new_pv) : 0.5*(1-p),
((new_br, new_bc), (1, new_bcv), new_pp, new_pv) : 0.5*(1-p)})
elif hit_r:
return dist.DDist({new_s: p,
((new_br, new_bc), (new_brv, -new_bcv), new_pp, new_pv) : 1-p})
def paddle_hit(self, pp, new_pp, br, bc, new_br, new_bc):
# Being generous to paddle, any overlap in row
prset = set(range(pp, pp+2)).union(set(range(new_pp, new_pp+2)))
brset = set([br, br+1, new_br, new_br+1])
return len(prset.intersection(brset)) >= 2
##############################
# Display
##############################
def tidy_plot(xmin, xmax, ymin, ymax, center = False, title = None,
xlabel = None, ylabel = None):
plt.ion()
plt.figure(facecolor="white")
ax = plt.subplot()
if center:
ax.spines['left'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('zero')
ax.spines['top'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
else:
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
eps = .05
plt.xlim(xmin-eps, xmax+eps)
plt.ylim(ymin-eps, ymax+eps)
if title: ax.set_title(title)
if xlabel: ax.set_xlabel(xlabel)
if ylabel: ax.set_ylabel(ylabel)
return ax
def plot_points(x, y, ax = None, clear = False,
xmin = None, xmax = None, ymin = None, ymax = None,
style = 'or-'):
if ax is None:
if xmin == None: xmin = np.min(x) - 0.5
if xmax == None: xmax = np.max(x) + 0.5
if ymin == None: ymin = np.min(y) - 0.5
if ymax == None: ymax = np.max(y) + 0.5
ax = tidy_plot(xmin, xmax, ymin, ymax)
x_range = xmax - xmin; y_range = ymax - ymin
if .1 < x_range / y_range < 10:
plt.axis('equal')
xlim, ylim = ax.get_xlim(), ax.get_ylim()
elif clear:
xlim, ylim = ax.get_xlim(), ax.get_ylim()
ax.clear()
else:
xlim, ylim = ax.get_xlim(), ax.get_ylim()
ax.plot(x, y, style, markeredgewidth=0.0)
# Seems to occasionally mess up the limits
ax.set_xlim(xlim); ax.set_ylim(ylim)
ax.grid(True, which='both')
return ax
import functools
def toHex(s):
lst = []
for ch in s:
hv = hex(ord(ch)).replace('0x', '')
if len(hv) == 1:
hv = '0'+hv
lst.append(hv)
return functools.reduce(lambda x,y:x+y, lst)
##############################
def test_learn_play(d = 6, num_layers = 2, num_units = 100,
eps = 0.5, iters = 10000, draw=False,
tabular = True, batch=False, batch_epochs=10,
num_episodes = 10, episode_length = 100):
iters_per_value = 1 if iters <= 10 else int(iters / 10.0)
scores = []
def interact(q, iter=0):
if iter % iters_per_value == 0:
scores.append((iter, evaluate(game, num_episodes, episode_length,
lambda s: greedy(q, s))[0]))
print('score', scores[-1])
game = No_Exit(d)
if tabular:
q = TabularQ(game.states, game.actions)
else:
q = NNQ(game.states, game.actions, game.state2vec, num_layers, num_units,
epochs=batch_epochs if batch else 1)
if batch:
qf = Q_learn_batch(game, q, iters=iters, episode_length = 100, n_episodes=10,
interactive_fn=interact)
else:
qf = Q_learn(game, q, iters=iters, interactive_fn=interact)
if scores:
print('String to upload (incude quotes): "%s"'%toHex(pickle.dumps([tabular, batch, scores], 0).decode()))
# Plot learning curve
plot_points(np.array([s[0] for s in scores]),
np.array([s[1] for s in scores]))
for i in range(num_episodes):
reward, _ = sim_episode(game, (episode_length if d > 5 else episode_length/2),
lambda s: greedy(qf, s), draw=draw)
print('Reward', reward)
def test_solve_play(d = 6, draw=False,
num_episodes = 10, episode_length = 100):
game = No_Exit(d)
qf = value_iteration(game , TabularQ(game.states, game.actions))
for i in range(num_episodes):
reward, _ = sim_episode(game, (episode_length if d > 5 else episode_length/2),
lambda s: greedy(qf, s), draw=draw)
print('Reward', reward)
########## Test cases
# Value Iteration
# test_solve_play()
# Tabular Q-learn
# test_learn_play(iters=100000, tabular=True, batch=False)
# Tabular Batch Q-learn
# test_learn_play(iters=10, tabular=True, batch=True) # Check: why do we want fewer iterations here?
# NN Q-learn
# test_learn_play(iters=100000, tabular=False, batch=False)
# NN Batch Q-learn (Fitted Q-learn)
# test_learn_play(iters=10, tabular=False, batch=True)
| [
"matplotlib.pyplot.figure",
"mdp10.greedy",
"dist.DDist",
"numpy.max",
"matplotlib.pyplot.pause",
"dist.delta_dist",
"pickle.dumps",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.ion",
"numpy.min",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"mdp10.Q_learn",
"numpy.zeros",
"mat... | [((5230, 5239), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (5237, 5239), True, 'import matplotlib.pyplot as plt\n'), ((5244, 5273), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'facecolor': '"""white"""'}), "(facecolor='white')\n", (5254, 5273), True, 'import matplotlib.pyplot as plt\n'), ((5283, 5296), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (5294, 5296), True, 'import matplotlib.pyplot as plt\n'), ((5886, 5918), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(xmin - eps)', '(xmax + eps)'], {}), '(xmin - eps, xmax + eps)\n', (5894, 5918), True, 'import matplotlib.pyplot as plt\n'), ((5919, 5951), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(ymin - eps)', '(ymax + eps)'], {}), '(ymin - eps, ymax + eps)\n', (5927, 5951), True, 'import matplotlib.pyplot as plt\n'), ((7176, 7217), 'functools.reduce', 'functools.reduce', (['(lambda x, y: x + y)', 'lst'], {}), '(lambda x, y: x + y, lst)\n', (7192, 7217), False, 'import functools\n'), ((2011, 2041), 'numpy.zeros', 'np.zeros', (['(self.n, self.n + 1)'], {}), '((self.n, self.n + 1))\n', (2019, 2041), True, 'import numpy as np\n'), ((2341, 2358), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (2350, 2358), True, 'import matplotlib.pyplot as plt\n'), ((2585, 2626), 'numpy.array', 'np.array', (['[[br, bc, brv, bcv, pp, pv, 0]]'], {}), '([[br, bc, brv, bcv, pp, pv, 0]])\n', (2593, 2626), True, 'import numpy as np\n'), ((7890, 7925), 'mdp10.TabularQ', 'TabularQ', (['game.states', 'game.actions'], {}), '(game.states, game.actions)\n', (7898, 7925), False, 'from mdp10 import MDP, TabularQ, NNQ, value_iteration, Q_learn, Q_learn_batch, greedy, sim_episode, evaluate\n'), ((7948, 8058), 'mdp10.NNQ', 'NNQ', (['game.states', 'game.actions', 'game.state2vec', 'num_layers', 'num_units'], {'epochs': '(batch_epochs if batch else 1)'}), '(game.states, game.actions, game.state2vec, num_layers, num_units,\n epochs=batch_epochs if batch else 1)\n', (7951, 8058), False, 'from mdp10 import MDP, TabularQ, NNQ, value_iteration, Q_learn, Q_learn_batch, greedy, sim_episode, evaluate\n'), ((8098, 8197), 'mdp10.Q_learn_batch', 'Q_learn_batch', (['game', 'q'], {'iters': 'iters', 'episode_length': '(100)', 'n_episodes': '(10)', 'interactive_fn': 'interact'}), '(game, q, iters=iters, episode_length=100, n_episodes=10,\n interactive_fn=interact)\n', (8111, 8197), False, 'from mdp10 import MDP, TabularQ, NNQ, value_iteration, Q_learn, Q_learn_batch, greedy, sim_episode, evaluate\n'), ((8246, 8300), 'mdp10.Q_learn', 'Q_learn', (['game', 'q'], {'iters': 'iters', 'interactive_fn': 'interact'}), '(game, q, iters=iters, interactive_fn=interact)\n', (8253, 8300), False, 'from mdp10 import MDP, TabularQ, NNQ, value_iteration, Q_learn, Q_learn_batch, greedy, sim_episode, evaluate\n'), ((8945, 8980), 'mdp10.TabularQ', 'TabularQ', (['game.states', 'game.actions'], {}), '(game.states, game.actions)\n', (8953, 8980), False, 'from mdp10 import MDP, TabularQ, NNQ, value_iteration, Q_learn, Q_learn_batch, greedy, sim_episode, evaluate\n'), ((1817, 1826), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1824, 1826), True, 'import matplotlib.pyplot as plt\n'), ((1839, 1868), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'facecolor': '"""white"""'}), "(facecolor='white')\n", (1849, 1868), True, 'import matplotlib.pyplot as plt\n'), ((1891, 1904), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (1902, 1904), True, 'import matplotlib.pyplot as plt\n'), ((2405, 2419), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (2414, 2419), True, 'import matplotlib.pyplot as plt\n'), ((2493, 2526), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 1]]'], {}), '([[0, 0, 0, 0, 0, 0, 1]])\n', (2501, 2526), True, 'import numpy as np\n'), ((2966, 2989), 'dist.delta_dist', 'dist.delta_dist', (['"""over"""'], {}), "('over')\n", (2981, 2989), False, 'import dist\n'), ((4066, 4088), 'dist.delta_dist', 'dist.delta_dist', (['new_s'], {}), '(new_s)\n', (4081, 4088), False, 'import dist\n'), ((6589, 6606), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (6597, 6606), True, 'import matplotlib.pyplot as plt\n'), ((8480, 8512), 'numpy.array', 'np.array', (['[s[0] for s in scores]'], {}), '([s[0] for s in scores])\n', (8488, 8512), True, 'import numpy as np\n'), ((8534, 8566), 'numpy.array', 'np.array', (['[s[1] for s in scores]'], {}), '([s[1] for s in scores])\n', (8542, 8566), True, 'import numpy as np\n'), ((6276, 6285), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (6282, 6285), True, 'import numpy as np\n'), ((6324, 6333), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (6330, 6333), True, 'import numpy as np\n'), ((6372, 6381), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (6378, 6381), True, 'import numpy as np\n'), ((6420, 6429), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (6426, 6429), True, 'import numpy as np\n'), ((8731, 8744), 'mdp10.greedy', 'greedy', (['qf', 's'], {}), '(qf, s)\n', (8737, 8744), False, 'from mdp10 import MDP, TabularQ, NNQ, value_iteration, Q_learn, Q_learn_batch, greedy, sim_episode, evaluate\n'), ((9145, 9158), 'mdp10.greedy', 'greedy', (['qf', 's'], {}), '(qf, s)\n', (9151, 9158), False, 'from mdp10 import MDP, TabularQ, NNQ, value_iteration, Q_learn, Q_learn_batch, greedy, sim_episode, evaluate\n'), ((3909, 3932), 'dist.delta_dist', 'dist.delta_dist', (['"""over"""'], {}), "('over')\n", (3924, 3932), False, 'import dist\n'), ((4208, 4298), 'dist.DDist', 'dist.DDist', (['{new_s: p, ((new_br, new_bc), (-new_brv, new_bcv), new_pp, new_pv): 1 - p}'], {}), '({new_s: p, ((new_br, new_bc), (-new_brv, new_bcv), new_pp,\n new_pv): 1 - p})\n', (4218, 4298), False, 'import dist\n'), ((4370, 4532), 'dist.DDist', 'dist.DDist', (['{new_s: p, ((new_br, new_bc), (-1, new_bcv), new_pp, new_pv): 0.5 * (1 - p),\n ((new_br, new_bc), (1, new_bcv), new_pp, new_pv): 0.5 * (1 - p)}'], {}), '({new_s: p, ((new_br, new_bc), (-1, new_bcv), new_pp, new_pv): \n 0.5 * (1 - p), ((new_br, new_bc), (1, new_bcv), new_pp, new_pv): 0.5 *\n (1 - p)})\n', (4380, 4532), False, 'import dist\n'), ((4627, 4717), 'dist.DDist', 'dist.DDist', (['{new_s: p, ((new_br, new_bc), (new_brv, -new_bcv), new_pp, new_pv): 1 - p}'], {}), '({new_s: p, ((new_br, new_bc), (new_brv, -new_bcv), new_pp,\n new_pv): 1 - p})\n', (4637, 4717), False, 'import dist\n'), ((8377, 8418), 'pickle.dumps', 'pickle.dumps', (['[tabular, batch, scores]', '(0)'], {}), '([tabular, batch, scores], 0)\n', (8389, 8418), False, 'import pickle\n'), ((7782, 7794), 'mdp10.greedy', 'greedy', (['q', 's'], {}), '(q, s)\n', (7788, 7794), False, 'from mdp10 import MDP, TabularQ, NNQ, value_iteration, Q_learn, Q_learn_batch, greedy, sim_episode, evaluate\n')] |
"""
Utilities and helper functions.
These functions can be used to construct 2-sided 1 - alpha confidence
bounds for the average treatment effect in a randomized experiment with binary
outcomes and two treatments.
"""
import numpy as np
from scipy.stats import hypergeom
from itertools import combinations
from math import comb, floor
def nchoosem(n, m):
"""
Exact re-randomization matrix for small n choose m.
Parameters
----------
n: int
total number of subjects
m: int
number of subjects under treatment
Returns
-------
Z: numpy array
re-randomization matrix
"""
c = comb(n, m)
trt = combinations(np.arange(n), m)
Z = np.zeros((c, n), dtype=int)
for i in np.arange(c):
co = next(trt)
for j in np.arange(n):
if j in co:
Z[i, j] = 1
return Z
def combs(n, m, nperm):
"""
Sample from re-randomization matrix.
Parameters
----------
n: int
total number of subjects
m: int
number of subjects under treatment
nperm: int
number of permutations
Returns
-------
Z: numpy array
sample from re-randomization matrix
"""
Z = np.zeros((nperm, n))
for i in np.arange(nperm):
trt = np.random.choice(n, m, replace=False)
for j in np.arange(n):
if j in trt:
Z[i, j] = 1
return Z
def pval_one_lower(n, m, N, Z_all, tau_obs):
"""
Calculate p-value for method I.
Parameters
----------
n: int
total number of subjects
m: int
number of subjects under treatment
N: numpy array
potential table
Z_all: list
re-randomization or sample of re-randomization matrix
tau_obs: float
observed value of tau
Returns
-------
pl: float
p-value
"""
n_Z_all = len(Z_all)
dat = np.zeros((n, 2), dtype=int)
if N[0] > 0:
dat[0:N[0], :] = 1
if N[1] > 0:
for i in np.arange(N[0], N[0]+N[1]):
dat[i, 0] = 1
dat[i, 1] = 0
if N[2] > 0:
for i in np.arange(N[0]+N[1], N[0]+N[1]+N[2]):
dat[i, 0] = 0
dat[i, 1] = 1
if N[3] > 0:
for i in np.arange(N[0]+N[1]+N[2], sum(N)):
dat[i] = [0]*2
tau_hat = np.matmul(Z_all, dat[:, 0]/m) - np.matmul((1 - Z_all),
dat[:, 1]/(n-m))
pl = sum(np.round(tau_hat, 15) >= round(tau_obs, 15))/n_Z_all
return pl
def pval_two(n, m, N, Z_all, tau_obs):
"""
Calculate p-value for method 3.
Parameters
----------
n: int
total number of subjects
m: int
number of subjects under treatment
N: numpy array
potential table
Z_all: list
re-randomization or sample of re-randomization matrix
tau_obs: float
observed value of tau
Returns
-------
pl: float
p-value
"""
n_Z_all = len(Z_all)
dat = np.zeros((n, 2), dtype=int)
if N[0] > 0:
dat[0:N[0], :] = 1
if N[1] > 0:
for i in np.arange(N[0], N[0]+N[1]):
dat[i, 0] = 1
dat[i, 1] = 0
if N[2] > 0:
for i in np.arange(N[0]+N[1], N[0]+N[1]+N[2]):
dat[i, 0] = 0
dat[i, 1] = 1
if N[3] > 0:
for i in np.arange(N[0]+N[1]+N[2], sum(N)):
dat[i] = [0]*2
tau_hat = np.matmul(Z_all, dat[:, 0]/m) - np.matmul((1 - Z_all),
dat[:, 1]/(n-m))
tau_N = (N[1]-N[2])/n
pd = sum(np.round(abs(tau_hat-tau_N), 15) >= round(abs(tau_obs-tau_N),
15))/n_Z_all
return pd
def check_compatible(n11, n10, n01, n00, N11, N10, N01):
"""
Check that observed table is compatible with potential table.
Parameters
----------
n11: int
number of subjects under treatment that experienced outcome 1
n10: int
number of subjects under treatment that experienced outcome 0
n01: int
number of subjects under control that experienced outcome 1
n00: int
number of subjects under control that experienced outcome 0
N11: numpy array of integers
number of subjects under control and treatment with potential outcome 1
outcome 1
N10: numpy array of integers
potential number of subjects under treatment that experienced
outcome 0
N01: numpy array of integers
potential number of subjects under treatment that experienced
outcome 0
Returns
-------
compact: list
booleans indicating compatibility of inputs
"""
n = n11+n10+n01+n00
n_t = len(N10)
lefts = np.empty((n_t, 4), dtype=int)
lefts[:, 0] = 0
lefts[:, 1] = n11-N10
lefts[:, 2] = N11-n01
lefts[:, 3] = N11+N01-n10-n01
rights = np.empty((n_t, 4), dtype=int)
rights[:, 0] = N11
rights[:, 1] = n11
rights[:, 2] = N11+N01-n01
rights[:, 3] = n-N10-n01-n10
left = np.max(lefts, axis=1)
right = np.min(rights, axis=1)
compact = left <= right
return compact
def tau_lower_N11_oneside(n11, n10, n01, n00, N11, Z_all, alpha):
"""
Calculate tau_min and N_accept for method I.
Parameters
----------
n11: int
number of subjects under treatment that experienced outcome 1
n10: int
number of subjects under treatment that experienced outcome 0
n01: int
number of subjects under control that experienced outcome 1
n00: int
number of subjects under control that experienced outcome 0
N11: int
number of subjects under control and treatment with potential outcome 1
Z_all: numpy array
re-randomization or sample of re-randomization matrix
alpha: float
1 - confidence level
Returns
-------
tau_min: float
minimum tau value of accepted potential tables
N_accept: numpy array
accepted potential table
"""
n = n11+n10+n01+n00
m = n11+n10
N01 = 0
N10 = 0
tau_obs = n11/m - n01/(n-m)
M = np.zeros(n-N11+1, dtype=int)
while (N10 <= (n-N11-N01)) and (N01 <= (n-N11)):
pl = pval_one_lower(n, m, [N11, N10, N01, n-(N11+N10+N01)],
Z_all, tau_obs)
if pl >= alpha:
M[N01] = N10
N01 += 1
else:
N10 += 1
if N01 <= (n - N11):
for i in np.arange(N01, (n-N11+1)):
M[i] = n+1
N11_vec0 = np.full((n-N11+1), N11)
N10_vec0 = M
N01_vec0 = np.arange(n-N11+1)
N11_vec = np.empty(0, dtype=int)
N10_vec = np.empty(0, dtype=int)
N01_vec = np.empty(0, dtype=int)
for i in np.arange(len(N11_vec0)):
if N10_vec0[i] <= (n-N11_vec0[i]-N01_vec0[i]):
N10_vec = np.append(N10_vec, np.arange(N10_vec0[i], n-N11_vec0[i] -
N01_vec0[i]+1))
N11_vec = np.append(N11_vec, np.full((n-N11_vec0[i]-N01_vec0[i] -
N10_vec0[i]+1),
N11_vec0[i]))
N01_vec = np.append(N01_vec, np.full((n-N11_vec0[i]-N01_vec0[i] -
N10_vec0[i]+1), N01_vec0[i]))
compat = check_compatible(n11, n10, n01, n00, N11_vec, N10_vec, N01_vec)
if sum(compat) > 0:
tau_min = min(N10_vec[compat] - N01_vec[compat])/n
accept_pos = np.flatnonzero(N10_vec[compat]-N01_vec[compat] ==
n*tau_min)
accept_pos = accept_pos[0]
N_accept = np.array([N11, N10_vec[compat][accept_pos],
N01_vec[compat][accept_pos],
n-(N11+N10_vec[compat][accept_pos] +
N01_vec[compat][accept_pos])])
else:
tau_min = (n11 + n00)/n
N_accept = float('NaN')
return (tau_min, N_accept)
def tau_lower_oneside(n11, n10, n01, n00, alpha, nperm):
"""
Calculate tau_lower, tau_upper, and N_accept for method I.
Parameters
----------
n11: int
number of subjects under treatment that experienced outcome 1
n10: int
number of subjects under treatment that experienced outcome 0
n01: int
number of subjects under control that experienced outcome 1
n00: int
number of subjects under control that experienced outcome 0
alpha: float
1 - confidence level
nperm: int
maximum desired number of permutations
Returns
-------
tau_lower: float
left-side tau for one-sided confidence interval
tau_upper: float
right-side tau for one-sided confidence interval
N_accept: numpy array
accepted potential table for one-sided confidence interval
"""
n = n11+n10+n01+n00
m = n11+n10
if comb(n, m) <= nperm:
Z_all = nchoosem(n, m)
else:
Z_all = combs(n, m, nperm)
tau_min = (n11+n00)/n
N_accept = float('NaN')
for N11 in np.arange(n11+n01+1):
tau_min_N11 = tau_lower_N11_oneside(n11, n10, n01, n00, N11, Z_all,
alpha)
if tau_min_N11[0] < tau_min:
N_accept = tau_min_N11[1]
tau_min = min(tau_min, tau_min_N11[0])
tau_lower = tau_min
tau_upper = (n11+n00)/n
return (tau_lower, tau_upper, N_accept)
def tau_lower_N11_twoside(n11, n10, n01, n00, N11, Z_all, alpha):
"""
Calculate tau_min and N_accept for method 3.
Parameters
----------
n11: int
number of subjects under treatment that experienced outcome 1
n10: int
number of subjects under treatment that experienced outcome 0
n01: int
number of subjects under control that experienced outcome 1
n00: int
number of subjects under control that experienced outcome 0
N11: int
number of subjects under control and treatment with potential outcome 1
Z_all: numpy array
re-randomization or sample of re-randomization matrix
alpha: float
1 - confidence level
Returns
-------
tau_min: float
minimum tau value of accepted potential tables
tau_max: float
minimum tau value of accepted potential tables
N_accept_min: numpy array
minimum accepted potential table
N_accept_max: numpy array
maximum accepted potential table
rand_test_num: int
number of tests run
"""
n = n11 + n10 + n01 + n00
m = n11 + n10
tau_obs = n11/m - n01/(n-m)
ntau_obs = n*n11/m - n*n01/(n-m)
N10 = 0
N01_vec0 = np.arange(n-N11+1)[np.arange(n-N11+1) >= -ntau_obs]
N01 = min(N01_vec0)
M = np.empty(len(N01_vec0), dtype=int)
rand_test_num = 0
while (N10 <= (n-N11-N01)) and (N01 <= (n-N11)):
if N10 <= (N01+ntau_obs):
pl = pval_two(n, m, [N11, N10, N01, n-(N11 + N10 + N01)], Z_all,
tau_obs)
rand_test_num += 1
if pl >= alpha:
M[N01_vec0 == N01] = N10
N01 += 1
else:
N10 += 1
else:
M[N01_vec0 == N01] = N10
N01 += 1
if N01 <= (n-N11):
M[N01_vec0 >= N01] = np.floor(N01_vec0[N01_vec0 >= N01]+ntau_obs)+1
N11_vec0 = [N11]*len(N01_vec0)
N11_vec0 = np.full(len(N01_vec0), N11)
N10_vec0 = M
N11_vec = np.empty(0, dtype=int)
N10_vec = np.empty(0, dtype=int)
N01_vec = np.empty(0, dtype=int)
for i in np.arange(len(N11_vec0)):
N10_upper = int(min((n-N11_vec0[i]-N01_vec0[i]), np.floor(N01_vec0[i] +
ntau_obs)))
if N10_vec0[i] <= N10_upper:
N10_vec = np.append(N10_vec, np.arange(N10_vec0[i], N10_upper+1))
N11_vec = np.append(N11_vec, np.full((N10_upper-N10_vec0[i]+1),
N11_vec0[i]))
N01_vec = np.append(N01_vec, np.full((N10_upper-N10_vec0[i]+1),
N01_vec0[i]))
compat = check_compatible(n11, n10, n01, n00, N11_vec, N10_vec, N01_vec)
if sum(compat) > 0:
tau_min = min(N10_vec[compat] - N01_vec[compat])/n
accept_pos = np.flatnonzero(N10_vec[compat]-N01_vec[compat] ==
n*tau_min)
accept_pos = accept_pos[0]
N_accept_min = np.array([N11, N10_vec[compat][accept_pos],
N01_vec[compat][accept_pos],
n-(N11+N10_vec[compat][accept_pos] +
N01_vec[compat][accept_pos])])
tau_max = max(N10_vec[compat] - N01_vec[compat])/n
accept_pos = np.flatnonzero(N10_vec[compat]-N01_vec[compat] ==
n*tau_max)
accept_pos = accept_pos[0]
N_accept_max = np.array([N11, N10_vec[compat][accept_pos],
N01_vec[compat][accept_pos],
n-(N11+N10_vec[compat][accept_pos] +
N01_vec[compat][accept_pos])])
else:
tau_min = np.inf
N_accept_min = np.nan
tau_max = np.NINF
N_accept_max = np.nan
return (tau_min, tau_max, N_accept_min, N_accept_max, rand_test_num)
def tau_twoside_lower(n11, n10, n01, n00, alpha, Z_all, exact, reps):
"""
Calculate taus and N_accepts for method 3.
Parameters
----------
n11: int
number of subjects under treatment that experienced outcome 1
n10: int
number of subjects under treatment that experienced outcome 0
n01: int
number of subjects under control that experienced outcome 1
n00: int
number of subjects under control that experienced outcome 0
alpha: float
1 - confidence level
Z_all: numpy array
re-randomization or sample of re-randomization matrix
exact: boolean
whether or not to calculate exact confidence interval
reps:
if exact = False, number of simulations per table
Returns
-------
tau_lower: float
left-side tau for two-sided confidence interval
N_accept_lower: numpy array
left-side accepted potential table for two-sided confidence interval
tau_upper: float
right-side tau for two-sided confidence interval
N_accept_upper: numpy array
right-side accepted potential table for two-sided confidence interval
rand_test_total: int
number of tests run
"""
n = n11+n10+n01+n00
m = n11+n10
tau_obs = n11/m - n01/(n-m)
ntau_obs = n*n11/m - n*n01/(n-m)
tau_min = np.inf
tau_max = np.NINF
N_accept_min = np.nan
N_accept_max = np.nan
rand_test_total = 0
for N11 in np.arange(int(min(n11+n01, n+ntau_obs))+1):
tau_min_N11 = tau_lower_N11_twoside(n11, n10, n01, n00, N11, Z_all,
alpha)
rand_test_total = rand_test_total + tau_min_N11[4]
if tau_min_N11[0] < tau_min:
N_accept_min = tau_min_N11[2]
if tau_min_N11[1] > tau_max:
N_accept_max = tau_min_N11[3]
tau_min = min(tau_min, tau_min_N11[0])
tau_max = max(tau_max, tau_min_N11[1])
if (not exact) and (rand_test_total >= reps):
break
tau_lower = tau_min
tau_upper = tau_max
N_accept_lower = N_accept_min
N_accept_upper = N_accept_max
return (tau_lower, N_accept_lower, tau_upper, N_accept_upper,
rand_test_total)
def tau_twoside_less_treated(n11, n10, n01, n00, alpha, exact,
max_combinations, reps):
"""
Calculate taus and N_accepts for method 3.
Parameters
----------
n11: int
number of subjects under treatment that experienced outcome 1
n10: int
number of subjects under treatment that experienced outcome 0
n01: int
number of subjects under control that experienced outcome 1
n00: int
number of subjects under control that experienced outcome 0
alpha: float
1 - confidence level
exact: boolean
whether or not to calculate exact confidence interval
max_combinations: int
if exact = True, maximum desired number of combinations
reps: int
if exact = False, number of simulations per table
Returns
-------
tau_lower: float
left-side tau for two-sided confidence interval
tau_upper: float
right-side tau for two-sided confidence interval
N_accept_lower: numpy array
left-side accepted potential table for two-sided confidence interval
N_accept_upper: numpy array
right-side accepted potential table for two-sided confidence interval
rand_test_total: int
number of tests run
"""
n = n11+n10+n01+n00
m = n11+n10
if exact:
if comb(n, m) <= max_combinations:
Z_all = nchoosem(n, m)
else:
raise Exception('Not enough combinations. Increase \
max_combinations to ' + str(comb(n, m)) +
' for exact interval.')
else:
if comb(n, m) <= max_combinations:
Z_all = nchoosem(n, m)
else:
Z_all = combs(n, m, reps)
ci_lower = tau_twoside_lower(n11, n10, n01, n00, alpha, Z_all, exact, reps)
ci_upper = tau_twoside_lower(n10, n11, n00, n01, alpha, Z_all, exact, reps)
rand_test_total = ci_lower[4] + ci_upper[4]
tau_lower = min(ci_lower[0], -1*ci_upper[2])
tau_upper = max(ci_lower[2], -1*ci_upper[0])
if tau_lower == ci_lower[0]:
N_accept_lower = ci_lower[1]
else:
N_accept_lower = np.flip(ci_upper[3])
if tau_upper == -1*ci_upper[0]:
N_accept_upper = np.flip(ci_upper[1])
else:
N_accept_upper = ci_lower[3]
return (tau_lower, tau_upper, N_accept_lower, N_accept_upper,
rand_test_total)
def tau_twosided_ci(n11, n10, n01, n00, alpha, exact, max_combinations, reps):
"""
Calculate taus and N_accepts for method 3.
Parameters
----------
n11: int
number of subjects under treatment that experienced outcome 1
n10: int
number of subjects under treatment that experienced outcome 0
n01: int
number of subjects under control that experienced outcome 1
n00: int
number of subjects under control that experienced outcome 0
alpha: float
1 - confidence level
exact: boolean
whether or not to calculate exact confidence interval
max_combinations: int
if exact = True, maximum desired number of combinations
reps: int
if exact = False, number of simulations per table
Returns
-------
tau_lower: float
left-side tau for two-sided confidence interval
tau_upper: float
right-side tau for two-sided confidence interval
N_accept_lower: list
left-side accepted potential table for two-sided confidence interval
N_accept_upper: list
right-side accepted potential table for two-sided confidence interval
rand_test_total: int
number of tests run
"""
n = n11+n10+n01+n00
m = n11+n10
if m > (n/2):
ci = tau_twoside_less_treated(n11, n10, n01, n00, alpha, exact,
max_combinations, reps)
tau_lower = -ci[1]
tau_upper = -ci[0]
N_accept_lower = ci[2]
N_accept_upper = ci[3]
rand_test_total = ci[4]
else:
ci = tau_twoside_less_treated(n11, n10, n01, n00, alpha, exact,
max_combinations, reps)
tau_lower = ci[0]
tau_upper = ci[1]
N_accept_lower = ci[2]
N_accept_upper = ci[3]
rand_test_total = ci[4]
if exact:
num_tables = len(nchoosem(n, m))
else:
num_tables = len(combs(n, m, reps))
return ([int(tau_lower*n), int(tau_upper*n)],
[N_accept_lower.tolist(), N_accept_upper.tolist()],
[num_tables, rand_test_total])
def ind(x, a, b):
"""
Indicator function for a <= x <= b.
Parameters
----------
x: int
desired value
a: int
lower bound of interval
b:
upper bound of interval
Returns
-------
1 if a <= x <= b and 0 otherwise.
"""
return (x >= a)*(x <= b)
def lci(x, n, N, alpha):
"""
Calculate lower confidence bound.
Parameters
----------
x: int/numpy array
number(s) of good items in the sample
n: int
sample size
N: int
population size
alpha: float
1 - confidence level
Returns
-------
kk: int/numpy array
lower bound(s)
"""
if isinstance(x, int):
x = np.array([x])
kk = np.arange(0, len(x))
for i in kk:
if x[i] < 0.5:
kk[i] = 0
else:
aa = np.arange(0, N+1)
bb = (aa + 1).astype(np.float64)
bb[1:(N+1)] = hypergeom.cdf(x[i]-1, N, aa[1:(N+1)]-1, n)
dd = np.vstack((aa, bb))
dd = dd[:, dd[1] >= (1-alpha)]
if dd.shape[0]*dd.shape[1] == 2:
kk[i] = dd[0, 0]
else:
kk[i] = max(dd[0])
if isinstance(x, int):
return kk[0]
else:
return kk
def uci(x, n, N, alpha):
"""
Calculate upper confidence bound.
Parameters
----------
x: int/numpy array
number(s) of good items in the sample
n: int
sample size
N: int
population size
alpha: float
1 - confidence level
Returns
-------
kk: int/numpy array
upper bound(s)
"""
if isinstance(x, int):
xs = [x]
else:
xs = x
lcis = lci(n-x, n, N, alpha)
upper = N - lcis
if isinstance(x, int):
return upper[0]
else:
return upper
def exact_CI_odd(N, n, x, alpha):
"""
Calculate exact CI for odd sample size.
Parameters
----------
N: int
population size
n: int (odd)
sample size
x: int
number of good items in the sample
alpha:
1 - confidence level
Returns
-------
lower: int
lower bound of confidence interval
upper: int
upper bound of confidence interval
"""
xx = np.arange(n+1)
lcin1 = lci(xx, n, N, alpha/2)
ucin1 = uci(xx, n, N, alpha/2)
lcin2 = lci(xx, n, N, alpha)
ucin2 = uci(xx, n, N, alpha)
lciw = lcin1
uciw = ucin1
xvalue = int(floor(n/2)+1)
while xvalue > -0.5:
al = lcin2[xvalue]-lciw[xvalue]+1
au = int(uciw[xvalue] - ucin2[xvalue]+1)
if al*au > 1:
ff = np.zeros((al*au, 4))
for i in np.arange(al):
ff[np.arange(i*au, i*au+au), 0] = lciw[xvalue]+i
ff[np.arange(i*au, i*au+au), 1] = np.arange(ucin2[xvalue],
uciw[xvalue]+1)
ff[np.arange(i*au, i*au+au), 2] = (
ff[np.arange(i*au, i*au+au), 1] -
ff[np.arange(i*au, i*au+au), 0])
for ii in np.arange(len(ff)):
lciw[xvalue] = ff[ii, 0]
uciw[xvalue] = ff[ii, 1]
lciw[n-xvalue] = N-uciw[xvalue]
uciw[n-xvalue] = N-lciw[xvalue]
def cpci(M):
kk = np.arange(len(M)).astype(np.float64)
for i in np.arange(len(M)):
xx = np.arange(n+1)
indp = xx.astype(np.float64)
uu = 0
while (uu < n + 0.5):
indp[uu] = (ind(M[i], lciw[uu], uciw[uu]) *
hypergeom.pmf(uu, N, M[i], n))
uu += 1
kk[i] = sum(indp)
return kk
M = np.arange(N+1)
ff[ii, 3] = min(cpci(M))
ff = ff[ff[:, 3] >= (1-alpha), :]
if ff.shape[0]*ff.shape[1] > 4:
ff = sorted(ff, key=lambda x: x[2])
lciw[xvalue] = ff[0][0]
uciw[xvalue] = ff[0][1]
else:
lciw[xvalue] = ff[0][0]
uciw[xvalue] = ff[0][1]
lciw[n-xvalue] = N - uciw[xvalue]
uciw[n-xvalue] = N - lciw[xvalue]
xvalue -= 1
lower = lciw[xx == x]
upper = uciw[xx == x]
return (lower, upper)
def exact_CI_even(N, n, x, alpha):
"""
Calculate exact CI for even sample size.
Parameters
----------
N: int
population size
n: int (even)
sample size
x: int
number of good items in the sample
alpha:
1 - confidence level
Returns
-------
lower: int
lower bound of confidence interval
upper: int
upper bound of confidence interval
"""
xx = np.arange(n+1)
lcin1 = lci(xx, n, N, alpha/2)
ucin1 = uci(xx, n, N, alpha/2)
lcin2 = lci(xx, n, N, alpha)
ucin2 = uci(xx, n, N, alpha)
lciw = lcin1
uciw = ucin1
xvalue = int((n/2))
aa = np.arange(lciw[xvalue], floor(N/2)+1)
ii = 1
while ii < (len(aa) + 0.5):
lciw[xvalue] = aa[ii - 1]
uciw[xvalue] = N - aa[ii - 1]
def cpci(M):
kk = np.arange(len(M)).astype(np.float64)
for i in np.arange(len(M)):
xx = np.arange(n+1)
indp = xx.astype(np.float64)
uu = 0
while (uu < n + 0.5):
indp[uu] = (ind(M[i], lciw[uu], uciw[uu]) *
hypergeom.pmf(uu, N, M[i], n))
uu += 1
kk[i] = sum(indp)
return kk
M = np.arange(N+1)
bb = min(cpci(M))
if (bb >= 1-alpha):
ii1 = ii
ii += 1
else:
ii = len(aa) + 1
lciw[xvalue] = aa[ii1-1]
uciw[xvalue] = N - lciw[xvalue]
xvalue = int((n/2)-1)
while xvalue > -0.5:
al = lcin2[xvalue]-lciw[xvalue]+1
au = int(uciw[xvalue]-ucin2[xvalue]+1)
if al*au > 1:
ff = np.zeros((al*au, 4))
for i in np.arange(al):
ff[np.arange(i*au, i*au+au), 0] = lciw[xvalue]+i
ff[np.arange(i*au, i*au+au), 1] = np.arange(ucin2[xvalue],
uciw[xvalue]+1)
ff[np.arange(i*au, i*au+au), 2] = (
ff[np.arange(i*au, i*au+au), 1] -
ff[np.arange(i*au, i*au+au), 0])
for ii in np.arange(len(ff)):
lciw[xvalue] = ff[ii, 0]
uciw[xvalue] = ff[ii, 1]
lciw[n-xvalue] = N-uciw[xvalue]
uciw[n-xvalue] = N-lciw[xvalue]
def cpci(M):
kk = np.arange(len(M)).astype(np.float64)
for i in np.arange(len(M)):
xx = np.arange(n+1)
indp = xx.astype(np.float64)
uu = 0
while (uu < n + 0.5):
indp[uu] = (ind(M[i], lciw[uu], uciw[uu]) *
hypergeom.pmf(uu, N, M[i], n))
uu += 1
kk[i] = sum(indp)
return kk
M = np.arange(N+1)
ff[ii, 3] = min(cpci(M))
ff = ff[ff[:, 3] >= (1-alpha), :]
print(ff)
if ff.shape[0]*ff.shape[1] > 4:
ff = sorted(ff, key=lambda x: x[2])
lciw[xvalue] = ff[0][0]
uciw[xvalue] = ff[0][1]
else:
lciw[xvalue] = ff[0][0]
uciw[xvalue] = ff[0][1]
lciw[n-xvalue] = N - uciw[xvalue]
uciw[n-xvalue] = N - lciw[xvalue]
xvalue -= 1
lower = lciw[xx == x]
upper = uciw[xx == x]
return (lower, upper)
def exact_CI(N, n, x, alpha):
"""
Calculate exact CI for even or odd sample size.
Parameters
----------
N: int
population size
n: int (even)
sample size
x: int
number of good items in the sample
alpha:
1 - confidence level
Returns
-------
lower: int
lower bound of confidence interval
upper: int
upper bound of confidence interval
"""
if n % 2 == 1:
ci = exact_CI_odd(N, n, x, alpha)
else:
ci = exact_CI_even(N, n, x, alpha)
lower = int(ci[0])
upper = int(ci[1])
return (lower, upper)
def combin_exact_CI(n11, n10, n01, n00, alpha):
"""
Calculate taus for method 2.1.
Parameters
----------
n11: int
number of subjects under treatment that experienced outcome 1
n10: int
number of subjects under treatment that experienced outcome 0
n01: int
number of subjects under control that experienced outcome 1
n00: int
number of subjects under control that experienced outcome 0
alpha: float
1 - confidence level
Returns
-------
tau_lower: float
left-side tau for one-sided confidence interval
tau_upper: float
right-side tau for one-sided confidence interval
"""
n = n11+n10+n01+n00
m = n11+n10
ci_1plus = exact_CI(N=n, n=m, x=n11, alpha=alpha/2)
ci_plus1 = exact_CI(N=n, n=(n-m), x=n01, alpha=alpha/2)
tau_upper = (ci_1plus[1]-ci_plus1[0])/n
tau_lower = (ci_1plus[0]-ci_plus1[1])/n
return (tau_lower, tau_upper)
def N_plus1_exact_CI(n11, n10, n01, n00, alpha):
"""
Calculate taus for method 2.2.
Parameters
----------
n11: int
number of subjects under treatment that experienced outcome 1
n10: int
number of subjects under treatment that experienced outcome 0
n01: int
number of subjects under control that experienced outcome 1
n00: int
number of subjects under control that experienced outcome 0
alpha: float
1 - confidence level
Returns
-------
tau_lower: float
left-side tau for one-sided confidence interval
tau_upper: float
right-side tau for one-sided confidence interval
"""
n = n11+n10+n01+n00
m = n11+n10
tau_min = float('inf')
tau_max = float('-inf')
ci_plus1 = exact_CI(N=n, n=(n-m), x=n01, alpha=alpha)
for N_plus1 in np.arange(ci_plus1[0], ci_plus1[1]+1):
for N11 in np.arange(N_plus1+1):
N01 = N_plus1-N11
for N10 in np.arange(n-N_plus1+1):
N00 = n-N_plus1-N10
if check_compatible(n11, n10, n01, n00, np.array([N11]),
np.array([N10]), np.array([N01]))[0]:
tau = (N10-N01)/n
tau_min = min(tau, tau_min)
tau_max = max(tau, tau_max)
upper = tau_max
lower = tau_min
return (lower, upper)
| [
"numpy.full",
"numpy.flip",
"scipy.stats.hypergeom.pmf",
"scipy.stats.hypergeom.cdf",
"numpy.empty",
"numpy.flatnonzero",
"math.comb",
"numpy.zeros",
"numpy.floor",
"math.floor",
"numpy.max",
"numpy.min",
"numpy.arange",
"numpy.array",
"numpy.matmul",
"numpy.random.choice",
"numpy.ro... | [((644, 654), 'math.comb', 'comb', (['n', 'm'], {}), '(n, m)\n', (648, 654), False, 'from math import comb, floor\n'), ((703, 730), 'numpy.zeros', 'np.zeros', (['(c, n)'], {'dtype': 'int'}), '((c, n), dtype=int)\n', (711, 730), True, 'import numpy as np\n'), ((744, 756), 'numpy.arange', 'np.arange', (['c'], {}), '(c)\n', (753, 756), True, 'import numpy as np\n'), ((1231, 1251), 'numpy.zeros', 'np.zeros', (['(nperm, n)'], {}), '((nperm, n))\n', (1239, 1251), True, 'import numpy as np\n'), ((1265, 1281), 'numpy.arange', 'np.arange', (['nperm'], {}), '(nperm)\n', (1274, 1281), True, 'import numpy as np\n'), ((1920, 1947), 'numpy.zeros', 'np.zeros', (['(n, 2)'], {'dtype': 'int'}), '((n, 2), dtype=int)\n', (1928, 1947), True, 'import numpy as np\n'), ((3030, 3057), 'numpy.zeros', 'np.zeros', (['(n, 2)'], {'dtype': 'int'}), '((n, 2), dtype=int)\n', (3038, 3057), True, 'import numpy as np\n'), ((4784, 4813), 'numpy.empty', 'np.empty', (['(n_t, 4)'], {'dtype': 'int'}), '((n_t, 4), dtype=int)\n', (4792, 4813), True, 'import numpy as np\n'), ((4933, 4962), 'numpy.empty', 'np.empty', (['(n_t, 4)'], {'dtype': 'int'}), '((n_t, 4), dtype=int)\n', (4941, 4962), True, 'import numpy as np\n'), ((5084, 5105), 'numpy.max', 'np.max', (['lefts'], {'axis': '(1)'}), '(lefts, axis=1)\n', (5090, 5105), True, 'import numpy as np\n'), ((5118, 5140), 'numpy.min', 'np.min', (['rights'], {'axis': '(1)'}), '(rights, axis=1)\n', (5124, 5140), True, 'import numpy as np\n'), ((6166, 6198), 'numpy.zeros', 'np.zeros', (['(n - N11 + 1)'], {'dtype': 'int'}), '(n - N11 + 1, dtype=int)\n', (6174, 6198), True, 'import numpy as np\n'), ((6572, 6597), 'numpy.full', 'np.full', (['(n - N11 + 1)', 'N11'], {}), '(n - N11 + 1, N11)\n', (6579, 6597), True, 'import numpy as np\n'), ((6628, 6650), 'numpy.arange', 'np.arange', (['(n - N11 + 1)'], {}), '(n - N11 + 1)\n', (6637, 6650), True, 'import numpy as np\n'), ((6661, 6683), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (6669, 6683), True, 'import numpy as np\n'), ((6698, 6720), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (6706, 6720), True, 'import numpy as np\n'), ((6735, 6757), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (6743, 6757), True, 'import numpy as np\n'), ((9123, 9147), 'numpy.arange', 'np.arange', (['(n11 + n01 + 1)'], {}), '(n11 + n01 + 1)\n', (9132, 9147), True, 'import numpy as np\n'), ((11508, 11530), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (11516, 11530), True, 'import numpy as np\n'), ((11545, 11567), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (11553, 11567), True, 'import numpy as np\n'), ((11582, 11604), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (11590, 11604), True, 'import numpy as np\n'), ((22506, 22522), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (22515, 22522), True, 'import numpy as np\n'), ((25126, 25142), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (25135, 25142), True, 'import numpy as np\n'), ((30648, 30687), 'numpy.arange', 'np.arange', (['ci_plus1[0]', '(ci_plus1[1] + 1)'], {}), '(ci_plus1[0], ci_plus1[1] + 1)\n', (30657, 30687), True, 'import numpy as np\n'), ((678, 690), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (687, 690), True, 'import numpy as np\n'), ((798, 810), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (807, 810), True, 'import numpy as np\n'), ((1297, 1334), 'numpy.random.choice', 'np.random.choice', (['n', 'm'], {'replace': '(False)'}), '(n, m, replace=False)\n', (1313, 1334), True, 'import numpy as np\n'), ((1352, 1364), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1361, 1364), True, 'import numpy as np\n'), ((2026, 2054), 'numpy.arange', 'np.arange', (['N[0]', '(N[0] + N[1])'], {}), '(N[0], N[0] + N[1])\n', (2035, 2054), True, 'import numpy as np\n'), ((2140, 2182), 'numpy.arange', 'np.arange', (['(N[0] + N[1])', '(N[0] + N[1] + N[2])'], {}), '(N[0] + N[1], N[0] + N[1] + N[2])\n', (2149, 2182), True, 'import numpy as np\n'), ((2340, 2371), 'numpy.matmul', 'np.matmul', (['Z_all', '(dat[:, 0] / m)'], {}), '(Z_all, dat[:, 0] / m)\n', (2349, 2371), True, 'import numpy as np\n'), ((2372, 2413), 'numpy.matmul', 'np.matmul', (['(1 - Z_all)', '(dat[:, 1] / (n - m))'], {}), '(1 - Z_all, dat[:, 1] / (n - m))\n', (2381, 2413), True, 'import numpy as np\n'), ((3136, 3164), 'numpy.arange', 'np.arange', (['N[0]', '(N[0] + N[1])'], {}), '(N[0], N[0] + N[1])\n', (3145, 3164), True, 'import numpy as np\n'), ((3250, 3292), 'numpy.arange', 'np.arange', (['(N[0] + N[1])', '(N[0] + N[1] + N[2])'], {}), '(N[0] + N[1], N[0] + N[1] + N[2])\n', (3259, 3292), True, 'import numpy as np\n'), ((3450, 3481), 'numpy.matmul', 'np.matmul', (['Z_all', '(dat[:, 0] / m)'], {}), '(Z_all, dat[:, 0] / m)\n', (3459, 3481), True, 'import numpy as np\n'), ((3482, 3523), 'numpy.matmul', 'np.matmul', (['(1 - Z_all)', '(dat[:, 1] / (n - m))'], {}), '(1 - Z_all, dat[:, 1] / (n - m))\n', (3491, 3523), True, 'import numpy as np\n'), ((6507, 6534), 'numpy.arange', 'np.arange', (['N01', '(n - N11 + 1)'], {}), '(N01, n - N11 + 1)\n', (6516, 6534), True, 'import numpy as np\n'), ((7545, 7609), 'numpy.flatnonzero', 'np.flatnonzero', (['(N10_vec[compat] - N01_vec[compat] == n * tau_min)'], {}), '(N10_vec[compat] - N01_vec[compat] == n * tau_min)\n', (7559, 7609), True, 'import numpy as np\n'), ((7696, 7844), 'numpy.array', 'np.array', (['[N11, N10_vec[compat][accept_pos], N01_vec[compat][accept_pos], n - (N11 +\n N10_vec[compat][accept_pos] + N01_vec[compat][accept_pos])]'], {}), '([N11, N10_vec[compat][accept_pos], N01_vec[compat][accept_pos], n -\n (N11 + N10_vec[compat][accept_pos] + N01_vec[compat][accept_pos])])\n', (7704, 7844), True, 'import numpy as np\n'), ((8957, 8967), 'math.comb', 'comb', (['n', 'm'], {}), '(n, m)\n', (8961, 8967), False, 'from math import comb, floor\n'), ((10716, 10738), 'numpy.arange', 'np.arange', (['(n - N11 + 1)'], {}), '(n - N11 + 1)\n', (10725, 10738), True, 'import numpy as np\n'), ((12376, 12440), 'numpy.flatnonzero', 'np.flatnonzero', (['(N10_vec[compat] - N01_vec[compat] == n * tau_min)'], {}), '(N10_vec[compat] - N01_vec[compat] == n * tau_min)\n', (12390, 12440), True, 'import numpy as np\n'), ((12531, 12679), 'numpy.array', 'np.array', (['[N11, N10_vec[compat][accept_pos], N01_vec[compat][accept_pos], n - (N11 +\n N10_vec[compat][accept_pos] + N01_vec[compat][accept_pos])]'], {}), '([N11, N10_vec[compat][accept_pos], N01_vec[compat][accept_pos], n -\n (N11 + N10_vec[compat][accept_pos] + N01_vec[compat][accept_pos])])\n', (12539, 12679), True, 'import numpy as np\n'), ((12855, 12919), 'numpy.flatnonzero', 'np.flatnonzero', (['(N10_vec[compat] - N01_vec[compat] == n * tau_max)'], {}), '(N10_vec[compat] - N01_vec[compat] == n * tau_max)\n', (12869, 12919), True, 'import numpy as np\n'), ((13010, 13158), 'numpy.array', 'np.array', (['[N11, N10_vec[compat][accept_pos], N01_vec[compat][accept_pos], n - (N11 +\n N10_vec[compat][accept_pos] + N01_vec[compat][accept_pos])]'], {}), '([N11, N10_vec[compat][accept_pos], N01_vec[compat][accept_pos], n -\n (N11 + N10_vec[compat][accept_pos] + N01_vec[compat][accept_pos])])\n', (13018, 13158), True, 'import numpy as np\n'), ((17848, 17868), 'numpy.flip', 'np.flip', (['ci_upper[3]'], {}), '(ci_upper[3])\n', (17855, 17868), True, 'import numpy as np\n'), ((17930, 17950), 'numpy.flip', 'np.flip', (['ci_upper[1]'], {}), '(ci_upper[1])\n', (17937, 17950), True, 'import numpy as np\n'), ((20937, 20950), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (20945, 20950), True, 'import numpy as np\n'), ((25978, 25994), 'numpy.arange', 'np.arange', (['(N + 1)'], {}), '(N + 1)\n', (25987, 25994), True, 'import numpy as np\n'), ((30706, 30728), 'numpy.arange', 'np.arange', (['(N_plus1 + 1)'], {}), '(N_plus1 + 1)\n', (30715, 30728), True, 'import numpy as np\n'), ((10735, 10757), 'numpy.arange', 'np.arange', (['(n - N11 + 1)'], {}), '(n - N11 + 1)\n', (10744, 10757), True, 'import numpy as np\n'), ((11348, 11394), 'numpy.floor', 'np.floor', (['(N01_vec0[N01_vec0 >= N01] + ntau_obs)'], {}), '(N01_vec0[N01_vec0 >= N01] + ntau_obs)\n', (11356, 11394), True, 'import numpy as np\n'), ((17028, 17038), 'math.comb', 'comb', (['n', 'm'], {}), '(n, m)\n', (17032, 17038), False, 'from math import comb, floor\n'), ((17318, 17328), 'math.comb', 'comb', (['n', 'm'], {}), '(n, m)\n', (17322, 17328), False, 'from math import comb, floor\n'), ((21074, 21093), 'numpy.arange', 'np.arange', (['(0)', '(N + 1)'], {}), '(0, N + 1)\n', (21083, 21093), True, 'import numpy as np\n'), ((21163, 21209), 'scipy.stats.hypergeom.cdf', 'hypergeom.cdf', (['(x[i] - 1)', 'N', '(aa[1:N + 1] - 1)', 'n'], {}), '(x[i] - 1, N, aa[1:N + 1] - 1, n)\n', (21176, 21209), False, 'from scipy.stats import hypergeom\n'), ((21223, 21242), 'numpy.vstack', 'np.vstack', (['(aa, bb)'], {}), '((aa, bb))\n', (21232, 21242), True, 'import numpy as np\n'), ((22708, 22720), 'math.floor', 'floor', (['(n / 2)'], {}), '(n / 2)\n', (22713, 22720), False, 'from math import comb, floor\n'), ((22877, 22899), 'numpy.zeros', 'np.zeros', (['(al * au, 4)'], {}), '((al * au, 4))\n', (22885, 22899), True, 'import numpy as np\n'), ((22919, 22932), 'numpy.arange', 'np.arange', (['al'], {}), '(al)\n', (22928, 22932), True, 'import numpy as np\n'), ((25368, 25380), 'math.floor', 'floor', (['(N / 2)'], {}), '(N / 2)\n', (25373, 25380), False, 'from math import comb, floor\n'), ((26375, 26397), 'numpy.zeros', 'np.zeros', (['(al * au, 4)'], {}), '((al * au, 4))\n', (26383, 26397), True, 'import numpy as np\n'), ((26417, 26430), 'numpy.arange', 'np.arange', (['al'], {}), '(al)\n', (26426, 26430), True, 'import numpy as np\n'), ((30781, 30807), 'numpy.arange', 'np.arange', (['(n - N_plus1 + 1)'], {}), '(n - N_plus1 + 1)\n', (30790, 30807), True, 'import numpy as np\n'), ((2481, 2502), 'numpy.round', 'np.round', (['tau_hat', '(15)'], {}), '(tau_hat, 15)\n', (2489, 2502), True, 'import numpy as np\n'), ((6893, 6950), 'numpy.arange', 'np.arange', (['N10_vec0[i]', '(n - N11_vec0[i] - N01_vec0[i] + 1)'], {}), '(N10_vec0[i], n - N11_vec0[i] - N01_vec0[i] + 1)\n', (6902, 6950), True, 'import numpy as np\n'), ((7040, 7109), 'numpy.full', 'np.full', (['(n - N11_vec0[i] - N01_vec0[i] - N10_vec0[i] + 1)', 'N11_vec0[i]'], {}), '(n - N11_vec0[i] - N01_vec0[i] - N10_vec0[i] + 1, N11_vec0[i])\n', (7047, 7109), True, 'import numpy as np\n'), ((7247, 7316), 'numpy.full', 'np.full', (['(n - N11_vec0[i] - N01_vec0[i] - N10_vec0[i] + 1)', 'N01_vec0[i]'], {}), '(n - N11_vec0[i] - N01_vec0[i] - N10_vec0[i] + 1, N01_vec0[i])\n', (7254, 7316), True, 'import numpy as np\n'), ((11701, 11733), 'numpy.floor', 'np.floor', (['(N01_vec0[i] + ntau_obs)'], {}), '(N01_vec0[i] + ntau_obs)\n', (11709, 11733), True, 'import numpy as np\n'), ((11880, 11917), 'numpy.arange', 'np.arange', (['N10_vec0[i]', '(N10_upper + 1)'], {}), '(N10_vec0[i], N10_upper + 1)\n', (11889, 11917), True, 'import numpy as np\n'), ((11958, 12007), 'numpy.full', 'np.full', (['(N10_upper - N10_vec0[i] + 1)', 'N11_vec0[i]'], {}), '(N10_upper - N10_vec0[i] + 1, N11_vec0[i])\n', (11965, 12007), True, 'import numpy as np\n'), ((12097, 12146), 'numpy.full', 'np.full', (['(N10_upper - N10_vec0[i] + 1)', 'N01_vec0[i]'], {}), '(N10_upper - N10_vec0[i] + 1, N01_vec0[i])\n', (12104, 12146), True, 'import numpy as np\n'), ((23049, 23091), 'numpy.arange', 'np.arange', (['ucin2[xvalue]', '(uciw[xvalue] + 1)'], {}), '(ucin2[xvalue], uciw[xvalue] + 1)\n', (23058, 23091), True, 'import numpy as np\n'), ((24114, 24130), 'numpy.arange', 'np.arange', (['(N + 1)'], {}), '(N + 1)\n', (24123, 24130), True, 'import numpy as np\n'), ((25634, 25650), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (25643, 25650), True, 'import numpy as np\n'), ((26547, 26589), 'numpy.arange', 'np.arange', (['ucin2[xvalue]', '(uciw[xvalue] + 1)'], {}), '(ucin2[xvalue], uciw[xvalue] + 1)\n', (26556, 26589), True, 'import numpy as np\n'), ((27612, 27628), 'numpy.arange', 'np.arange', (['(N + 1)'], {}), '(N + 1)\n', (27621, 27628), True, 'import numpy as np\n'), ((22953, 22983), 'numpy.arange', 'np.arange', (['(i * au)', '(i * au + au)'], {}), '(i * au, i * au + au)\n', (22962, 22983), True, 'import numpy as np\n'), ((23018, 23048), 'numpy.arange', 'np.arange', (['(i * au)', '(i * au + au)'], {}), '(i * au, i * au + au)\n', (23027, 23048), True, 'import numpy as np\n'), ((23169, 23199), 'numpy.arange', 'np.arange', (['(i * au)', '(i * au + au)'], {}), '(i * au, i * au + au)\n', (23178, 23199), True, 'import numpy as np\n'), ((23698, 23714), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (23707, 23714), True, 'import numpy as np\n'), ((25851, 25880), 'scipy.stats.hypergeom.pmf', 'hypergeom.pmf', (['uu', 'N', 'M[i]', 'n'], {}), '(uu, N, M[i], n)\n', (25864, 25880), False, 'from scipy.stats import hypergeom\n'), ((26451, 26481), 'numpy.arange', 'np.arange', (['(i * au)', '(i * au + au)'], {}), '(i * au, i * au + au)\n', (26460, 26481), True, 'import numpy as np\n'), ((26516, 26546), 'numpy.arange', 'np.arange', (['(i * au)', '(i * au + au)'], {}), '(i * au, i * au + au)\n', (26525, 26546), True, 'import numpy as np\n'), ((26667, 26697), 'numpy.arange', 'np.arange', (['(i * au)', '(i * au + au)'], {}), '(i * au, i * au + au)\n', (26676, 26697), True, 'import numpy as np\n'), ((27196, 27212), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (27205, 27212), True, 'import numpy as np\n'), ((30897, 30912), 'numpy.array', 'np.array', (['[N11]'], {}), '([N11])\n', (30905, 30912), True, 'import numpy as np\n'), ((30950, 30965), 'numpy.array', 'np.array', (['[N10]'], {}), '([N10])\n', (30958, 30965), True, 'import numpy as np\n'), ((30967, 30982), 'numpy.array', 'np.array', (['[N01]'], {}), '([N01])\n', (30975, 30982), True, 'import numpy as np\n'), ((17231, 17241), 'math.comb', 'comb', (['n', 'm'], {}), '(n, m)\n', (17235, 17241), False, 'from math import comb, floor\n'), ((23225, 23255), 'numpy.arange', 'np.arange', (['(i * au)', '(i * au + au)'], {}), '(i * au, i * au + au)\n', (23234, 23255), True, 'import numpy as np\n'), ((23279, 23309), 'numpy.arange', 'np.arange', (['(i * au)', '(i * au + au)'], {}), '(i * au, i * au + au)\n', (23288, 23309), True, 'import numpy as np\n'), ((26723, 26753), 'numpy.arange', 'np.arange', (['(i * au)', '(i * au + au)'], {}), '(i * au, i * au + au)\n', (26732, 26753), True, 'import numpy as np\n'), ((26777, 26807), 'numpy.arange', 'np.arange', (['(i * au)', '(i * au + au)'], {}), '(i * au, i * au + au)\n', (26786, 26807), True, 'import numpy as np\n'), ((23955, 23984), 'scipy.stats.hypergeom.pmf', 'hypergeom.pmf', (['uu', 'N', 'M[i]', 'n'], {}), '(uu, N, M[i], n)\n', (23968, 23984), False, 'from scipy.stats import hypergeom\n'), ((27453, 27482), 'scipy.stats.hypergeom.pmf', 'hypergeom.pmf', (['uu', 'N', 'M[i]', 'n'], {}), '(uu, N, M[i], n)\n', (27466, 27482), False, 'from scipy.stats import hypergeom\n')] |
import numpy as np
from mpi4py import MPI
import scipy.stats as ss
import matplotlib.pyplot as plt
comm=MPI.COMM_WORLD
rank=comm.Get_rank()
size=comm.Get_size()
#generate normal distributions numbers
n=1000
if rank==0:
sendbuf_0 = np.random.normal(0,1, n)
comm.Bcast(sendbuf_0, root =0)
elif rank==1:
sendbuf_0 = np.empty(n)
comm.Bcast(sendbuf_0, root =0)
cs=['b*','r^']
rv_0=ss.norm.pdf(sendbuf_0)
plt.plot(sendbuf_0,rv_0,cs[0])
m=np.mean(sendbuf_0)
s=np.std(sendbuf_0)
sendbuf_1 = np.random.normal(m,s,n)
rv_1=ss.norm.pdf(sendbuf_1)
plt.plot(sendbuf_1,rv_1,cs[1])
plt.legend
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.std",
"numpy.empty",
"scipy.stats.norm.pdf",
"numpy.mean",
"numpy.random.normal"
] | [((237, 262), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (253, 262), True, 'import numpy as np\n'), ((329, 340), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (337, 340), True, 'import numpy as np\n'), ((412, 434), 'scipy.stats.norm.pdf', 'ss.norm.pdf', (['sendbuf_0'], {}), '(sendbuf_0)\n', (423, 434), True, 'import scipy.stats as ss\n'), ((440, 472), 'matplotlib.pyplot.plot', 'plt.plot', (['sendbuf_0', 'rv_0', 'cs[0]'], {}), '(sendbuf_0, rv_0, cs[0])\n', (448, 472), True, 'import matplotlib.pyplot as plt\n'), ((484, 502), 'numpy.mean', 'np.mean', (['sendbuf_0'], {}), '(sendbuf_0)\n', (491, 502), True, 'import numpy as np\n'), ((510, 527), 'numpy.std', 'np.std', (['sendbuf_0'], {}), '(sendbuf_0)\n', (516, 527), True, 'import numpy as np\n'), ((557, 582), 'numpy.random.normal', 'np.random.normal', (['m', 's', 'n'], {}), '(m, s, n)\n', (573, 582), True, 'import numpy as np\n'), ((597, 619), 'scipy.stats.norm.pdf', 'ss.norm.pdf', (['sendbuf_1'], {}), '(sendbuf_1)\n', (608, 619), True, 'import scipy.stats as ss\n'), ((625, 657), 'matplotlib.pyplot.plot', 'plt.plot', (['sendbuf_1', 'rv_1', 'cs[1]'], {}), '(sendbuf_1, rv_1, cs[1])\n', (633, 657), True, 'import matplotlib.pyplot as plt\n'), ((677, 687), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (685, 687), True, 'import matplotlib.pyplot as plt\n')] |
# %% [markdown]
# ##
import os
import warnings
from itertools import chain
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import seaborn as sns
from joblib import Parallel, delayed
from scipy.stats import poisson
from sklearn.exceptions import ConvergenceWarning
from sklearn.manifold import MDS, TSNE, Isomap
from sklearn.metrics import pairwise_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.utils.testing import ignore_warnings
from tqdm.autonotebook import tqdm
from umap import UMAP
from graspy.embed import (
AdjacencySpectralEmbed,
ClassicalMDS,
LaplacianSpectralEmbed,
OmnibusEmbed,
select_dimension,
selectSVD,
)
from graspy.models import DCSBMEstimator, SBMEstimator
from graspy.plot import pairplot
from graspy.utils import (
augment_diagonal,
binarize,
pass_to_ranks,
remove_loops,
symmetrize,
to_laplace,
)
import matplotlib.patches as patches
from src.align import Procrustes
from src.cluster import BinaryCluster, MaggotCluster, get_paired_inds
from src.data import load_metagraph
from src.graph import MetaGraph, preprocess
from src.hierarchy import signal_flow
from src.io import readcsv, savecsv, savefig
from src.pymaid import start_instance
from src.traverse import Cascade, RandomWalk, to_markov_matrix, to_transmission_matrix
from src.visualization import (
CLASS_COLOR_DICT,
add_connections,
adjplot,
barplot_text,
draw_networkx_nice,
gridmap,
matrixplot,
palplot,
remove_spines,
screeplot,
set_axes_equal,
stacked_barplot,
)
warnings.filterwarnings(action="ignore", category=ConvergenceWarning)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
rc_dict = {
"axes.spines.right": False,
"axes.spines.top": False,
"axes.formatter.limits": (-3, 3),
"figure.figsize": (6, 3),
"figure.dpi": 100,
}
for key, val in rc_dict.items():
mpl.rcParams[key] = val
context = sns.plotting_context(context="talk", font_scale=1, rc=rc_dict)
sns.set_context(context)
np.random.seed(8888)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
def stashcsv(df, name, **kws):
savecsv(df, name, foldername=FNAME, **kws)
# %% [markdown]
# ##
metric = "bic"
bic_ratio = 1
d = 8 # embedding dimension
method = "iso"
basename = f"-method={method}-d={d}-bic_ratio={bic_ratio}-G"
title = f"Method={method}, d={d}, BIC ratio={bic_ratio}"
exp = "137.1-BDP-omni-clust"
# load data
pair_meta = readcsv("meta" + basename, foldername=exp, index_col=0)
pair_meta["lvl0_labels"] = pair_meta["lvl0_labels"].astype(str)
pair_adj = readcsv("adj" + basename, foldername=exp, index_col=0)
pair_mg = MetaGraph(pair_adj.values, pair_meta)
pair_meta = pair_mg.meta
# full_mg = load_metagraph("G")
# full_mg.meta[]
# full_meta = pair_meta
# full_adj = pair_adjs
full_meta = pair_meta
full_mg = pair_mg
# parameters
lowest_level = 8
width = 0.5
gap = 10
# this determines the sorting for everybody
level_names = [f"lvl{i}_labels" for i in range(lowest_level + 1)]
sort_class = level_names + ["merge_class"]
class_order = ["sf"]
total_sort_by = []
for sc in sort_class:
for co in class_order:
class_value = full_meta.groupby(sc)[co].mean()
full_meta[f"{sc}_{co}_order"] = full_meta[sc].map(class_value)
total_sort_by.append(f"{sc}_{co}_order")
total_sort_by.append(sc)
full_mg = full_mg.sort_values(total_sort_by, ascending=False)
full_meta = full_mg.meta
full_adj = full_mg.adj
n_leaf = full_meta[f"lvl{lowest_level}_labels"].nunique()
n_pairs = len(full_meta) // 2
# %% [markdown]
# ##
from graspy.models import SBMEstimator
level = 2
n_row = 3
n_col = 7
scale = 10
fig, axs = plt.subplots(n_row, n_col, figsize=(n_row * scale, n_col * scale))
for level in range(8):
label_name = f"lvl{level}_labels_side"
sbm = SBMEstimator(directed=True, loops=True)
sbm.fit(binarize(full_adj), full_meta[label_name].values)
ax = axs[1, level]
_, _, top, _ = adjplot(
sbm.p_mat_,
ax=ax,
plot_type="heatmap",
sort_class=["hemisphere"] + level_names[: level + 1],
item_order=["merge_class_sf_order", "merge_class", "sf"],
class_order="sf",
meta=full_mg.meta,
palette=CLASS_COLOR_DICT,
colors="merge_class",
ticks=False,
gridline_kws=dict(linewidth=0.05, color="grey", linestyle="--"),
cbar_kws=dict(shrink=0.6),
)
stashfig("big-bhat-fig")
# %% [markdown]
# ##
# Get positions for left and right simultaneously, so they'll line up ###
def get_mid_map(full_meta, leaf_key=None, bilat=False):
if leaf_key is None:
leaf_key = f"lvl{lowest_level}_labels"
# left
if not bilat:
meta = full_meta[full_meta["hemisphere"] == "L"].copy()
else:
meta = full_meta.copy()
sizes = meta.groupby([leaf_key, "merge_class"], sort=False).size()
uni_labels = sizes.index.unique(0)
mids = []
offset = 0
for ul in uni_labels:
heights = sizes.loc[ul]
starts = heights.cumsum() - heights + offset
offset += heights.sum() + gap
minimum = starts[0]
maximum = starts[-1] + heights[-1]
mid = (minimum + maximum) / 2
mids.append(mid)
left_mid_map = dict(zip(uni_labels, mids))
if bilat:
first_mid_map = {}
for k in left_mid_map.keys():
left_mid = left_mid_map[k]
first_mid_map[k + "-"] = left_mid
return first_mid_map
# right
meta = full_meta[full_meta["hemisphere"] == "R"].copy()
sizes = meta.groupby([leaf_key, "merge_class"], sort=False).size()
# uni_labels = np.unique(labels)
uni_labels = sizes.index.unique(0)
mids = []
offset = 0
for ul in uni_labels:
heights = sizes.loc[ul]
starts = heights.cumsum() - heights + offset
offset += heights.sum() + gap
minimum = starts[0]
maximum = starts[-1] + heights[-1]
mid = (minimum + maximum) / 2
mids.append(mid)
right_mid_map = dict(zip(uni_labels, mids))
keys = list(set(list(left_mid_map.keys()) + list(right_mid_map.keys())))
first_mid_map = {}
for k in keys:
left_mid = left_mid_map[k]
right_mid = right_mid_map[k]
first_mid_map[k + "-"] = max(left_mid, right_mid)
return first_mid_map
first_mid_map = get_mid_map(full_meta, bilat=True)
def calc_bar_params(sizes, label, mid):
heights = sizes.loc[label]
n_in_bar = heights.sum()
offset = mid - n_in_bar / 2
starts = heights.cumsum() - heights + offset
colors = np.vectorize(CLASS_COLOR_DICT.get)(heights.index)
return heights, starts, colors
def get_last_mids(label, last_mid_map):
last_mids = []
if label + "-" in last_mid_map:
last_mids.append(last_mid_map[label + "-"])
if label + "-0" in last_mid_map:
last_mids.append(last_mid_map[label + "-0"])
if label + "-1" in last_mid_map:
last_mids.append(last_mid_map[label + "-1"])
if len(last_mids) == 0:
print(label + " has no anchor in mid-map")
return last_mids
def draw_bar_dendrogram(meta, ax, orientation="vertical", width=0.5):
last_mid_map = first_mid_map
line_kws = dict(linewidth=1, color="k")
for level in np.arange(lowest_level + 1)[::-1]:
sizes = meta.groupby([f"lvl{level}_labels", "merge_class"], sort=False).size()
uni_labels = sizes.index.unique(0) # these need to be in the right order
mids = []
for ul in uni_labels:
last_mids = get_last_mids(ul, last_mid_map)
grand_mid = np.mean(last_mids)
heights, starts, colors = calc_bar_params(sizes, ul, grand_mid)
minimum = starts[0]
maximum = starts[-1] + heights[-1]
mid = (minimum + maximum) / 2
mids.append(mid)
# draw the bars
for i in range(len(heights)):
if orientation == "vertical":
ax.bar(
x=level,
height=heights[i],
width=width,
bottom=starts[i],
color=colors[i],
)
else:
ax.barh(
y=level,
height=width,
width=heights[i],
left=starts[i],
color=colors[i],
)
# draw a horizontal line from the middle of this bar
if level != 0: # dont plot dash on the last
if orientation == "vertical":
xs = [level - 0.5 * width, level - width]
ys = [mid, mid]
else:
ys = [level - 0.5 * width, level - width]
xs = [mid, mid]
ax.plot(xs, ys, **line_kws)
# line connecting to children clusters
if level != lowest_level: # don't plot first dash
if orientation == "vertical":
xs = [level + 0.5 * width, level + width]
ys = [grand_mid, grand_mid]
else:
ys = [level + 0.5 * width, level + width]
xs = [grand_mid, grand_mid]
ax.plot(xs, ys, **line_kws)
# draw a vertical line connecting the two child clusters
if len(last_mids) == 2:
if orientation == "vertical":
xs = [level + width, level + width]
ys = last_mids
else:
xs = last_mids
ys = [level + width, level + width]
ax.plot(xs, ys, **line_kws)
last_mid_map = dict(zip(uni_labels, mids))
# %% [markdown]
# ##
from mpl_toolkits.axes_grid1 import make_axes_locatable
from src.utils import get_blockmodel_df
labels = full_meta[f"lvl{lowest_level}_labels"].values
mid_map = {}
for key, val in first_mid_map.items():
new_key = key[:-1]
mid_map[new_key] = val
blockmodel_df = get_blockmodel_df(
full_adj, labels, return_counts=True, use_weights=True
)
group_sizes = full_meta.groupby([f"lvl{lowest_level}_labels"]).size()
blockmodel_df.index.name = "source"
blockmodel_df.columns.name = "target"
blockmodel_df.reset_index(inplace=True)
blockmodel_df
blockmodel_edges = blockmodel_df.melt(id_vars="source", value_name="weight")
blockmodel_edges["x"] = blockmodel_edges["target"].map(mid_map)
blockmodel_edges["y"] = blockmodel_edges["source"].map(mid_map)
blockmodel_edges["source_size"] = blockmodel_edges["source"].map(group_sizes)
blockmodel_edges["target_size"] = blockmodel_edges["target"].map(group_sizes)
blockmodel_edges["source_n_out"] = blockmodel_edges["source"].map(
blockmodel_edges.groupby("source")["weight"].sum()
)
blockmodel_edges["out_weight"] = (
blockmodel_edges["weight"] / blockmodel_edges["source_n_out"]
)
blockmodel_edges["target_n_in"] = blockmodel_edges["target"].map(
blockmodel_edges.groupby("target")["weight"].sum()
)
blockmodel_edges["in_weight"] = (
blockmodel_edges["weight"] / blockmodel_edges["target_n_in"]
)
blockmodel_edges["norm_weight"] = blockmodel_edges["weight"] / np.sqrt(
(blockmodel_edges["source_size"] * blockmodel_edges["target_size"])
)
sns.set_context("talk")
fig, main_ax = plt.subplots(1, 1, figsize=(30, 30))
main_ax.set_ylim((-gap, (2 * n_pairs + gap * n_leaf)))
main_ax.set_xlim(((2 * n_pairs + gap * n_leaf), -gap))
# sns.scatterplot(
# data=blockmodel_edges,
# x="x",
# y="y",
# size="in_weight",
# legend=False,
# sizes=(0, 600),
# # hue="out_weight",
# # palette="Blues",
# # marker="s",
# )
meta = full_meta.copy()
last_mid_map = first_mid_map
sizes = meta.groupby([f"lvl{lowest_level}_labels", "merge_class"], sort=False).size()
uni_labels = sizes.index.unique(0) # these need to be in the right order
mins = []
maxs = []
for ul in uni_labels:
last_mids = get_last_mids(ul, last_mid_map)
grand_mid = np.mean(last_mids)
heights, starts, colors = calc_bar_params(sizes, ul, grand_mid)
minimum = starts[0]
maximum = starts[-1] + heights[-1]
xs = [minimum, maximum, maximum, minimum, minimum]
ys = [minimum, minimum, maximum, maximum, minimum]
# plt.plot(xs, ys)
mins.append(minimum)
maxs.append(maximum)
bound_df = pd.DataFrame(data=[mins, maxs], columns=uni_labels).T
for x in range(len(bound_df)):
for y in range(len(bound_df)):
min_x = bound_df.iloc[x, 0]
min_y = bound_df.iloc[y, 0]
max_x = bound_df.iloc[x, 1]
max_y = bound_df.iloc[y, 1]
width = max_x - min_x
height = max_y - min_y
x_label = bound_df.index[x]
y_label = bound_df.index[y]
edge = blockmodel_edges[
(blockmodel_edges["source"] == y_label)
& (blockmodel_edges["target"] == x_label)
]
rect = patches.Rectangle((min_x, min_y), width, height)
main_ax.add_patch(rect)
main_ax.set_xlabel("")
main_ax.set_ylabel("")
remove_spines(main_ax)
divider = make_axes_locatable(main_ax)
meta = full_meta.copy()
left_ax = divider.append_axes("left", size="20%", pad=0, sharey=main_ax)
ax = left_ax
# ax.set_ylim((-gap, (2 * n_pairs + gap * n_leaf)))
ax.set_xlim((-0.5, lowest_level + 0.5))
draw_bar_dendrogram(meta, ax)
ax.set_yticks([])
ax.spines["left"].set_visible(False)
ax.set_xlabel("Level")
ax.set_xticks(np.arange(lowest_level + 1))
ax.spines["bottom"].set_visible(False)
ax.tick_params(axis="both", which="both", length=0)
# add a scale bar in the bottom left
width = 0.5
ax.bar(x=0, height=100, bottom=0, width=width, color="k")
ax.text(x=0.35, y=0, s="100 neurons")
top_ax = divider.append_axes("top", size="20%", pad=0, sharex=main_ax)
ax = top_ax
# ax.set_xlim(((2 * n_pairs + gap * n_leaf), -gap))
ax.set_ylim((lowest_level + 0.5, -0.5))
draw_bar_dendrogram(meta, ax, orientation="horizontal")
ax.set_xticks([])
ax.spines["left"].set_visible(False)
ax.set_ylabel("Level")
ax.set_yticks(np.arange(lowest_level + 1))
ax.spines["bottom"].set_visible(False)
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
ax.tick_params(axis="both", which="both", length=0)
stashfig(f"sbm-test-dendrogram-lowest={lowest_level}")
| [
"src.visualization.remove_spines",
"numpy.random.seed",
"src.io.savefig",
"src.graph.MetaGraph",
"numpy.mean",
"numpy.arange",
"graspy.models.SBMEstimator",
"graspy.utils.binarize",
"src.io.readcsv",
"pandas.DataFrame",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.subplots",
"seaborn.s... | [((1704, 1773), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'category': 'ConvergenceWarning'}), "(action='ignore', category=ConvergenceWarning)\n", (1727, 1773), False, 'import warnings\n'), ((2067, 2129), 'seaborn.plotting_context', 'sns.plotting_context', ([], {'context': '"""talk"""', 'font_scale': '(1)', 'rc': 'rc_dict'}), "(context='talk', font_scale=1, rc=rc_dict)\n", (2087, 2129), True, 'import seaborn as sns\n'), ((2130, 2154), 'seaborn.set_context', 'sns.set_context', (['context'], {}), '(context)\n', (2145, 2154), True, 'import seaborn as sns\n'), ((2156, 2176), 'numpy.random.seed', 'np.random.seed', (['(8888)'], {}), '(8888)\n', (2170, 2176), True, 'import numpy as np\n'), ((2615, 2670), 'src.io.readcsv', 'readcsv', (["('meta' + basename)"], {'foldername': 'exp', 'index_col': '(0)'}), "('meta' + basename, foldername=exp, index_col=0)\n", (2622, 2670), False, 'from src.io import readcsv, savecsv, savefig\n'), ((2746, 2800), 'src.io.readcsv', 'readcsv', (["('adj' + basename)"], {'foldername': 'exp', 'index_col': '(0)'}), "('adj' + basename, foldername=exp, index_col=0)\n", (2753, 2800), False, 'from src.io import readcsv, savecsv, savefig\n'), ((2811, 2848), 'src.graph.MetaGraph', 'MetaGraph', (['pair_adj.values', 'pair_meta'], {}), '(pair_adj.values, pair_meta)\n', (2820, 2848), False, 'from src.graph import MetaGraph, preprocess\n'), ((3828, 3894), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_row', 'n_col'], {'figsize': '(n_row * scale, n_col * scale)'}), '(n_row, n_col, figsize=(n_row * scale, n_col * scale))\n', (3840, 3894), True, 'import matplotlib.pyplot as plt\n'), ((10245, 10318), 'src.utils.get_blockmodel_df', 'get_blockmodel_df', (['full_adj', 'labels'], {'return_counts': '(True)', 'use_weights': '(True)'}), '(full_adj, labels, return_counts=True, use_weights=True)\n', (10262, 10318), False, 'from src.utils import get_blockmodel_df\n'), ((11483, 11506), 'seaborn.set_context', 'sns.set_context', (['"""talk"""'], {}), "('talk')\n", (11498, 11506), True, 'import seaborn as sns\n'), ((11524, 11560), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(30, 30)'}), '(1, 1, figsize=(30, 30))\n', (11536, 11560), True, 'import matplotlib.pyplot as plt\n'), ((13248, 13270), 'src.visualization.remove_spines', 'remove_spines', (['main_ax'], {}), '(main_ax)\n', (13261, 13270), False, 'from src.visualization import CLASS_COLOR_DICT, add_connections, adjplot, barplot_text, draw_networkx_nice, gridmap, matrixplot, palplot, remove_spines, screeplot, set_axes_equal, stacked_barplot\n'), ((13281, 13309), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['main_ax'], {}), '(main_ax)\n', (13300, 13309), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((1783, 1809), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1799, 1809), False, 'import os\n'), ((2210, 2262), 'src.io.savefig', 'savefig', (['name'], {'foldername': 'FNAME', 'save_on': '(True)'}), '(name, foldername=FNAME, save_on=True, **kws)\n', (2217, 2262), False, 'from src.io import readcsv, savecsv, savefig\n'), ((2300, 2342), 'src.io.savecsv', 'savecsv', (['df', 'name'], {'foldername': 'FNAME'}), '(df, name, foldername=FNAME, **kws)\n', (2307, 2342), False, 'from src.io import readcsv, savecsv, savefig\n'), ((3972, 4011), 'graspy.models.SBMEstimator', 'SBMEstimator', ([], {'directed': '(True)', 'loops': '(True)'}), '(directed=True, loops=True)\n', (3984, 4011), False, 'from graspy.models import SBMEstimator\n'), ((11400, 11474), 'numpy.sqrt', 'np.sqrt', (["(blockmodel_edges['source_size'] * blockmodel_edges['target_size'])"], {}), "(blockmodel_edges['source_size'] * blockmodel_edges['target_size'])\n", (11407, 11474), True, 'import numpy as np\n'), ((12206, 12224), 'numpy.mean', 'np.mean', (['last_mids'], {}), '(last_mids)\n', (12213, 12224), True, 'import numpy as np\n'), ((12556, 12607), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[mins, maxs]', 'columns': 'uni_labels'}), '(data=[mins, maxs], columns=uni_labels)\n', (12568, 12607), True, 'import pandas as pd\n'), ((13637, 13664), 'numpy.arange', 'np.arange', (['(lowest_level + 1)'], {}), '(lowest_level + 1)\n', (13646, 13664), True, 'import numpy as np\n'), ((14227, 14254), 'numpy.arange', 'np.arange', (['(lowest_level + 1)'], {}), '(lowest_level + 1)\n', (14236, 14254), True, 'import numpy as np\n'), ((4024, 4042), 'graspy.utils.binarize', 'binarize', (['full_adj'], {}), '(full_adj)\n', (4032, 4042), False, 'from graspy.utils import augment_diagonal, binarize, pass_to_ranks, remove_loops, symmetrize, to_laplace\n'), ((6726, 6760), 'numpy.vectorize', 'np.vectorize', (['CLASS_COLOR_DICT.get'], {}), '(CLASS_COLOR_DICT.get)\n', (6738, 6760), True, 'import numpy as np\n'), ((7406, 7433), 'numpy.arange', 'np.arange', (['(lowest_level + 1)'], {}), '(lowest_level + 1)\n', (7415, 7433), True, 'import numpy as np\n'), ((13118, 13166), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(min_x, min_y)', 'width', 'height'], {}), '((min_x, min_y), width, height)\n', (13135, 13166), True, 'import matplotlib.patches as patches\n'), ((7740, 7758), 'numpy.mean', 'np.mean', (['last_mids'], {}), '(last_mids)\n', (7747, 7758), True, 'import numpy as np\n')] |
#import pyglet
from pynput import keyboard
from datetime import datetime, timedelta
from ffpyplayer.player import MediaPlayer
import time
import cv2
import numpy as np
fileName = "darude"
player = MediaPlayer("music/" + fileName + ".mp3")
val = ''
end = False
startTime = datetime.now()
array = np.zeros(1000)
fps = 5
length = 1000
f = open("musicTracks/" + fileName + ".txt", "r")
lines = f.read().split('\n')
fps = int(lines[0])
length = int(lines[1])
for i in range(len(lines) - 3):
num = int(lines[i + 2])
if num > 0:
array[i] = num
while val != 'eof' and not end:
frame, val = player.get_frame()
index = (int)((datetime.now() - startTime).total_seconds() * fps)
if index < len(array) and array[index] > 0:
print(array[index])
array[index] = 0
if val != 'eof' and frame is not None:
img, t = frame
# display img
print("end")
""""
while 1:
frame, val = player.get_frame()
if val == 'eof':
break
elif frame is None:
time.sleep(0.01)
print 'not ready'
else:
img, t = frame
print val, t, img
time.sleep(val) """
"""
song = pyglet.media.load('music/darude.mp3')
song.play()
pyglet.app.run()
""" | [
"numpy.zeros",
"ffpyplayer.player.MediaPlayer",
"datetime.datetime.now"
] | [((198, 239), 'ffpyplayer.player.MediaPlayer', 'MediaPlayer', (["('music/' + fileName + '.mp3')"], {}), "('music/' + fileName + '.mp3')\n", (209, 239), False, 'from ffpyplayer.player import MediaPlayer\n'), ((273, 287), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (285, 287), False, 'from datetime import datetime, timedelta\n'), ((296, 310), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (304, 310), True, 'import numpy as np\n'), ((646, 660), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (658, 660), False, 'from datetime import datetime, timedelta\n')] |
from key import Key
from message import Message
from key import Key
import numpy as np
map0 = np.array([1,2,0])
map1 = np.array([0,2,1])
key0 = Key(map0)
key1 = Key(map1)
key3 = key0.substitute(key1.map)
inverted_key0 = key0.invert()
assert np.all(key3.map == np.array([2,1,0]))
assert np.all(inverted_key0.map == np.array([2,0,1]))
alpha = 'abcdefghijklmnopqrstuvwxyz'
beta = 'qwertyuiopasdfghjklzxcvbnm'
key = np.array(range(26))
key[0] = 25
key[25] = 0
key = Key(key)
predicted_frequencies = [1.0/26 for x in range(26)]
predicted_frequencies.append(0)
message = Message(alpha)
assert message.map(key).text == 'zbcdefghijklmnopqrstuvwxya'
assert np.all(message.frequencies() == predicted_frequencies)
with open('sample.txt','r') as source:
text = source.read()
my_message = Message(text)
my_message = my_message.filter()
encipher_key = Key()
encipher_key.random_key()
enciphered_message = my_message.map(encipher_key)
enciphered_message.text
decipher_key = encipher_key.invert()
deciphered_message = enciphered_message.map(decipher_key)
assert deciphered_message.text == my_message.text
| [
"key.Key",
"message.Message",
"numpy.array"
] | [((95, 114), 'numpy.array', 'np.array', (['[1, 2, 0]'], {}), '([1, 2, 0])\n', (103, 114), True, 'import numpy as np\n'), ((120, 139), 'numpy.array', 'np.array', (['[0, 2, 1]'], {}), '([0, 2, 1])\n', (128, 139), True, 'import numpy as np\n'), ((146, 155), 'key.Key', 'Key', (['map0'], {}), '(map0)\n', (149, 155), False, 'from key import Key\n'), ((163, 172), 'key.Key', 'Key', (['map1'], {}), '(map1)\n', (166, 172), False, 'from key import Key\n'), ((467, 475), 'key.Key', 'Key', (['key'], {}), '(key)\n', (470, 475), False, 'from key import Key\n'), ((571, 585), 'message.Message', 'Message', (['alpha'], {}), '(alpha)\n', (578, 585), False, 'from message import Message\n'), ((788, 801), 'message.Message', 'Message', (['text'], {}), '(text)\n', (795, 801), False, 'from message import Message\n'), ((851, 856), 'key.Key', 'Key', ([], {}), '()\n', (854, 856), False, 'from key import Key\n'), ((264, 283), 'numpy.array', 'np.array', (['[2, 1, 0]'], {}), '([2, 1, 0])\n', (272, 283), True, 'import numpy as np\n'), ((318, 337), 'numpy.array', 'np.array', (['[2, 0, 1]'], {}), '([2, 0, 1])\n', (326, 337), True, 'import numpy as np\n')] |
import numpy as np
import random
import os
from colorama import init, Fore
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import copy
import argparse
##### MAZE GENERATOR #####
# https://en.wikipedia.org/wiki/Maze_generation_algorithm
# generate a maze using numpy
#
# Implemented algorithms:
# - Prim's
# - depth-first
WALL = 0
CORRIDOR = 1
#################################################
#for debugging
init()
def print_maze(maze):
for i in range(maze.shape[0]):
for j in range(maze.shape[1]):
if maze[i][j] == 1:
print(Fore.WHITE, f'{maze[i][j]}', end="")
else:
print(Fore.RED, f'{maze[i][j]}', end="")
print('\n')
#################################################
def new_maze(width, height):
maze = np.full((height, width),WALL,int)
return maze
def get_frontier_cells(cell, maze):
frontier=[]
if cell[0]<(maze.shape[0]-3):
if maze[cell[0]+2][cell[1]] == WALL:
frontier.append([cell[0]+2,cell[1]])
if cell[0]>2:
if maze[cell[0]-2][cell[1]] == WALL:
frontier.append([cell[0]-2,cell[1]])
if cell[1]<(maze.shape[1]-3):
if maze[cell[0]][cell[1]+2] == WALL:
frontier.append([cell[0],cell[1]+2])
if cell[1]>2:
if maze[cell[0]][cell[1]-2] == WALL:
frontier.append([cell[0],cell[1]-2])
return frontier
def get_connection(cell, maze):
conn=[]
if cell[0]<(maze.shape[0]-2):
if maze[cell[0]+2][cell[1]] == CORRIDOR:
conn.append([cell[0],cell[1],cell[0]+1, cell[1]])
if cell[0]>1:
if maze[cell[0]-2][cell[1]] == CORRIDOR:
conn.append([cell[0],cell[1],cell[0]-1,cell[1]])
if cell[1]<(maze.shape[1]-2):
if maze[cell[0]][cell[1]+2] == CORRIDOR:
conn.append([cell[0],cell[1],cell[0],cell[1]+1])
if cell[1]>1:
if maze[cell[0]][cell[1]-2] == CORRIDOR:
conn.append([cell[0],cell[1],cell[0],cell[1]-1])
return conn
#####
# Generate maze using Prim's algorithm
#####
def prim_maze(width, height):
mazelist=[] # for animating
maze = new_maze(width, height)
for temp in range(10):
mazelist.append(copy.deepcopy(maze))
# select random starting point..
start_h = int(random.random()*height)
start_w = int(random.random()*width)
# ..NOT on the edge of the maze!
if start_h <= 2:
start_h += 3
if start_h >= height-3:
start_h -= 3
if start_w <= 2:
start_w += 3
if start_w >= width-3:
start_w -= 3
# the starting point become a path, and we add the frontiers
maze[start_h][start_w] = CORRIDOR
mazelist.append(copy.deepcopy(maze))
frontiers = []
frontiers.append([start_h-2, start_w])
frontiers.append([start_h, start_w+2])
frontiers.append([start_h+2, start_w])
frontiers.append([start_h, start_w-2])
# Frontiers of a cell are unvisited cells at distance + 1
# while there are Frontiers in the list, pick a random one.
# Then, pick a random connection to a visited cell in the frontier range
# 1) Make the wall a passage and mark the unvisited cell as part of the maze.
# 2) Add the frontiers of the cell to the frontiers list.
# Remove the frontier from the list.
while frontiers:
rand_front =frontiers[random.randint(0, len(frontiers)-1)]
front_connections =get_connection(rand_front,maze)
if front_connections:
rand_connection = front_connections[random.randint(0,len(front_connections)-1)]
maze[rand_connection[0]][rand_connection[1]]=CORRIDOR
maze[rand_connection[2]][rand_connection[3]]=CORRIDOR
temp_front = get_frontier_cells(rand_front, maze)
for a in temp_front:
if (a not in frontiers):
frontiers.append(a)
temp_front.clear()
front_connections.clear()
frontiers.remove(rand_front)
mazelist.append(copy.deepcopy(maze))
return maze, mazelist
#####
# Generate maze using randomized depth first search algorithm
#####
def get_neighbours_with_connection(cell, maze):
frontier=[]
if cell[0]<(maze.shape[0]-3):
if maze[cell[0]+2][cell[1]] == WALL:
frontier.append([cell[0]+2,cell[1], cell[0]+1, cell[1]])
if cell[0]>2:
if maze[cell[0]-2][cell[1]] == WALL:
frontier.append([cell[0]-2,cell[1],cell[0]-1, cell[1]])
if cell[1]<(maze.shape[1]-3):
if maze[cell[0]][cell[1]+2] == WALL:
frontier.append([cell[0],cell[1]+2, cell[0], cell[1]+1])
if cell[1]>2:
if maze[cell[0]][cell[1]-2] == WALL:
frontier.append([cell[0],cell[1]-2, cell[0], cell[1]-1])
return frontier
def depth_first_maze(width, height):
mazelist=[] # for animating
back_stack=[]
maze = new_maze(width, height)
for temp in range(10):
mazelist.append(copy.deepcopy(maze))
# select random starting point.. 1-N, 2-S, 3-WE, 4_E
face = int(random.randint(1,4))
start_h = 1
start_w = 1
if face == 1:
start_w = int(random.random()*width)
if face == 2:
start_h = height-1
start_w = int(random.random()*width)
if face == 3:
start_h = int(random.random()*height)
if face == 4:
start_w = width-1
start_h= int(random.random()*height)
maze[start_h][start_w] = CORRIDOR
back_stack.append([start_h, start_w])
mazelist.append(copy.deepcopy(maze))
neighbours=[]
while back_stack:
temp_neigh = get_neighbours_with_connection(back_stack[-1], maze)
if len(temp_neigh) == 0:
back_stack.pop()
else:
rand_neigh = temp_neigh[random.randint(0,len(temp_neigh)-1)]
back_stack.append([rand_neigh[0], rand_neigh[1]])
maze[rand_neigh[0]][rand_neigh[1]]=CORRIDOR
maze[rand_neigh[2]][rand_neigh[3]]=CORRIDOR
mazelist.append(copy.deepcopy(maze))
return maze, mazelist
# TO-DO
# def create_entrance_exit(maze):
def main(algo):
mazelist =[]
imagelist=[]
if algo == "prim":
newmaze, mazelist = prim_maze(50,40)
elif algo == "depth":
newmaze, mazelist = depth_first_maze(50,40)
else:
newmaze, mazelist = prim_maze(50,40)
# save image of the maze
plt.imshow(newmaze,cmap='gray')
os.makedirs('mazes/image', exist_ok=True)
plt.imsave('mazes/image/sample'+".png",newmaze, dpi=1200, cmap='gray')
# create animation of the maze
fig = plt.figure(dpi=150, constrained_layout = True)
fig.patch.set_facecolor('black')
plt.axis("off")
counter=0
movie_image_step = len(mazelist) // 200
if movie_image_step == 0:
movie_image_step = 1
for i in mazelist:
if counter%movie_image_step==0:
imagelist.append((plt.imshow(i, cmap='gray'),))
counter+=1
for aa in range(40):
imagelist.append((plt.imshow(newmaze, cmap='gray'),))
im_ani = animation.ArtistAnimation(
fig, imagelist, interval=45, repeat_delay=3000, blit=False
)
os.makedirs('mazes/video', exist_ok=True)
im_ani.save(('mazes/video/sample.gif'))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Maze generator. By default, produce a maze using Prim's algorithm."
)
parser.add_argument(
"-a", type=str, default="prim", help="algorithm to use."
)
args = parser.parse_args()
main(args.a) | [
"colorama.init",
"numpy.full",
"copy.deepcopy",
"os.makedirs",
"argparse.ArgumentParser",
"random.randint",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.axis",
"matplotlib.animation.ArtistAnimation",
"random.random",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.imsave"
] | [((438, 444), 'colorama.init', 'init', ([], {}), '()\n', (442, 444), False, 'from colorama import init, Fore\n'), ((819, 854), 'numpy.full', 'np.full', (['(height, width)', 'WALL', 'int'], {}), '((height, width), WALL, int)\n', (826, 854), True, 'import numpy as np\n'), ((6401, 6433), 'matplotlib.pyplot.imshow', 'plt.imshow', (['newmaze'], {'cmap': '"""gray"""'}), "(newmaze, cmap='gray')\n", (6411, 6433), True, 'import matplotlib.pyplot as plt\n'), ((6437, 6478), 'os.makedirs', 'os.makedirs', (['"""mazes/image"""'], {'exist_ok': '(True)'}), "('mazes/image', exist_ok=True)\n", (6448, 6478), False, 'import os\n'), ((6483, 6556), 'matplotlib.pyplot.imsave', 'plt.imsave', (["('mazes/image/sample' + '.png')", 'newmaze'], {'dpi': '(1200)', 'cmap': '"""gray"""'}), "('mazes/image/sample' + '.png', newmaze, dpi=1200, cmap='gray')\n", (6493, 6556), True, 'import matplotlib.pyplot as plt\n'), ((6600, 6644), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': '(150)', 'constrained_layout': '(True)'}), '(dpi=150, constrained_layout=True)\n', (6610, 6644), True, 'import matplotlib.pyplot as plt\n'), ((6689, 6704), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6697, 6704), True, 'import matplotlib.pyplot as plt\n'), ((7065, 7154), 'matplotlib.animation.ArtistAnimation', 'animation.ArtistAnimation', (['fig', 'imagelist'], {'interval': '(45)', 'repeat_delay': '(3000)', 'blit': '(False)'}), '(fig, imagelist, interval=45, repeat_delay=3000,\n blit=False)\n', (7090, 7154), True, 'import matplotlib.animation as animation\n'), ((7169, 7210), 'os.makedirs', 'os.makedirs', (['"""mazes/video"""'], {'exist_ok': '(True)'}), "('mazes/video', exist_ok=True)\n", (7180, 7210), False, 'import os\n'), ((7297, 7407), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Maze generator. By default, produce a maze using Prim\'s algorithm."""'}), '(description=\n "Maze generator. By default, produce a maze using Prim\'s algorithm.")\n', (7320, 7407), False, 'import argparse\n'), ((2718, 2737), 'copy.deepcopy', 'copy.deepcopy', (['maze'], {}), '(maze)\n', (2731, 2737), False, 'import copy\n'), ((5073, 5093), 'random.randint', 'random.randint', (['(1)', '(4)'], {}), '(1, 4)\n', (5087, 5093), False, 'import random\n'), ((5533, 5552), 'copy.deepcopy', 'copy.deepcopy', (['maze'], {}), '(maze)\n', (5546, 5552), False, 'import copy\n'), ((2229, 2248), 'copy.deepcopy', 'copy.deepcopy', (['maze'], {}), '(maze)\n', (2242, 2248), False, 'import copy\n'), ((2306, 2321), 'random.random', 'random.random', ([], {}), '()\n', (2319, 2321), False, 'import random\n'), ((2348, 2363), 'random.random', 'random.random', ([], {}), '()\n', (2361, 2363), False, 'import random\n'), ((4040, 4059), 'copy.deepcopy', 'copy.deepcopy', (['maze'], {}), '(maze)\n', (4053, 4059), False, 'import copy\n'), ((4979, 4998), 'copy.deepcopy', 'copy.deepcopy', (['maze'], {}), '(maze)\n', (4992, 4998), False, 'import copy\n'), ((5166, 5181), 'random.random', 'random.random', ([], {}), '()\n', (5179, 5181), False, 'import random\n'), ((5256, 5271), 'random.random', 'random.random', ([], {}), '()\n', (5269, 5271), False, 'import random\n'), ((5319, 5334), 'random.random', 'random.random', ([], {}), '()\n', (5332, 5334), False, 'import random\n'), ((5408, 5423), 'random.random', 'random.random', ([], {}), '()\n', (5421, 5423), False, 'import random\n'), ((6025, 6044), 'copy.deepcopy', 'copy.deepcopy', (['maze'], {}), '(maze)\n', (6038, 6044), False, 'import copy\n'), ((7016, 7048), 'matplotlib.pyplot.imshow', 'plt.imshow', (['newmaze'], {'cmap': '"""gray"""'}), "(newmaze, cmap='gray')\n", (7026, 7048), True, 'import matplotlib.pyplot as plt\n'), ((6915, 6941), 'matplotlib.pyplot.imshow', 'plt.imshow', (['i'], {'cmap': '"""gray"""'}), "(i, cmap='gray')\n", (6925, 6941), True, 'import matplotlib.pyplot as plt\n')] |
"""
Dijkstra 2D
@author: <NAME>
"""
import os
import sys
import math
import heapq
import time
sys.path.append(os.path.dirname(os.path.abspath(__file__)) +
"/../../Search_based_Planning/")
from Search_2D import plotting, env
from Search_2D.Astar import AStar
class Dijkstra(AStar):
"""Dijkstra set the cost as the priority
"""
def searching(self):
"""
Breadth-first Searching.
:return: path, visited order
"""
self.PARENT[self.s_start] = self.s_start
self.g[self.s_start] = 0
self.g[self.s_goal] = math.inf
heapq.heappush(self.OPEN,
(0, self.s_start))
while self.OPEN:
_, s = heapq.heappop(self.OPEN)
self.CLOSED.append(s)
if s == self.s_goal:
break
for s_n in self.get_neighbor(s):
new_cost = self.g[s] + self.cost(s, s_n)
if s_n not in self.g:
self.g[s_n] = math.inf
if new_cost < self.g[s_n]: # conditions for updating Cost
self.g[s_n] = new_cost
self.PARENT[s_n] = s
# best first set the heuristics as the priority
heapq.heappush(self.OPEN, (new_cost, s_n))
return self.extract_path(self.PARENT), self.CLOSED
def main():
s_start = (5, 5)
s_goal = (45, 25)
dijkstra = Dijkstra(s_start, s_goal, 'None')
plot = plotting.Plotting(s_start, s_goal)
path, visited = dijkstra.searching()
plot.animation(path, visited, "Dijkstra's") # animation generate
def record_time():
method_name = 'Dijkstra'
time_start=time.time()
# s_start = (5, 5)
# s_goal = (45, 25)
s_start = (10, 10)
s_goal = (490, 290)
dijkstra = Dijkstra(s_start, s_goal, 'None')
path, visited = dijkstra.searching()
time_end=time.time()
time_delta = time_end-time_start
path_len = path_length(path)
print(method_name, time_delta, path_len)
# plot = plotting.Plotting_my(s_start, s_goal)
# plot.animation(path, visited, "Dijkstra's")
return [method_name, time_delta, path_len]
def path_length(path):
import numpy as np
path_=path
length = 0
path_ = np.array(path_)
for i in range(path_.shape[0]-1):
d = path_[i+1,:]-path_[i,:]
length += math.sqrt(np.sum(d**2))
return length
if __name__ == '__main__':
# main()
record_time() | [
"Search_2D.plotting.Plotting",
"os.path.abspath",
"numpy.sum",
"heapq.heappush",
"time.time",
"heapq.heappop",
"numpy.array"
] | [((1490, 1524), 'Search_2D.plotting.Plotting', 'plotting.Plotting', (['s_start', 's_goal'], {}), '(s_start, s_goal)\n', (1507, 1524), False, 'from Search_2D import plotting, env\n'), ((1701, 1712), 'time.time', 'time.time', ([], {}), '()\n', (1710, 1712), False, 'import time\n'), ((1912, 1923), 'time.time', 'time.time', ([], {}), '()\n', (1921, 1923), False, 'import time\n'), ((2278, 2293), 'numpy.array', 'np.array', (['path_'], {}), '(path_)\n', (2286, 2293), True, 'import numpy as np\n'), ((606, 650), 'heapq.heappush', 'heapq.heappush', (['self.OPEN', '(0, self.s_start)'], {}), '(self.OPEN, (0, self.s_start))\n', (620, 650), False, 'import heapq\n'), ((128, 153), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (143, 153), False, 'import os\n'), ((719, 743), 'heapq.heappop', 'heapq.heappop', (['self.OPEN'], {}), '(self.OPEN)\n', (732, 743), False, 'import heapq\n'), ((2396, 2410), 'numpy.sum', 'np.sum', (['(d ** 2)'], {}), '(d ** 2)\n', (2402, 2410), True, 'import numpy as np\n'), ((1269, 1311), 'heapq.heappush', 'heapq.heappush', (['self.OPEN', '(new_cost, s_n)'], {}), '(self.OPEN, (new_cost, s_n))\n', (1283, 1311), False, 'import heapq\n')] |
import numpy as np
import pandas as pd
from bokeh.plotting import *
# Generate some synthetic time series for six different categories
cats = list("abcdef")
y = np.random.randn(2000)
g = np.random.choice(cats, 2000)
for i, l in enumerate(cats):
y[g == l] += i // 2
df = pd.DataFrame(dict(score=y, group=g))
# Find the quartiles, IQR, and outliers for each category
groups = df.groupby('group')
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q2 + 1.5*iqr
lower = q2 - 1.5*iqr
def outliers(group):
cat = group.name
return group[(group.score > upper.loc[cat][0]) | (group.score < lower.loc[cat][0])]['score']
out = groups.apply(outliers).dropna()
# Prepare outlier data for plotting, we need and x (categorical) and y (numeric)
# coordinate for every outlier.
outx = []
outy = []
for cat in cats:
# only add outliers if they exist
if not out.loc[cat].empty:
for value in out[cat]:
outx.append(cat)
outy.append(value)
# EXERCISE: output static HTML file
# EXERCISE: turn on plot hold
# Draw the upper segment extending from the box plot using `segment` which
# takes x0, x1 and y0, y1 as data
# If no outliers, shrink lengths of stems to be no longer than the maximums
qmax = groups.quantile(q=1.00)
upper.score = [min([x,y]) for (x,y) in zip(list(qmax.iloc[:,0]),upper.score) ]
segment(cats, upper.score, cats, q3.score, x_range=cats, line_width=2,
tools="", background_fill="#EFE8E2", line_color="black", title="")
# EXERCISE: draw the lower segment
# Draw the upper box of the box plot using `rect`
rect(cats, (q3.score+q2.score)/2, 0.7, q3.score-q2.score,
fill_color="#E08E79", line_width=2, line_color="black")
# EXERCISE: use `rect` to draw the bottom box with a different color
# OK here we use `rect` to draw the whiskers. It's slightly cheating, but it's
# easier than using segments or lines, since we can specify widths simply with
# categorical percentage units
rect(cats, lower.score, 0.2, 0, line_color="black")
rect(cats, upper.score, 0.2, 0, line_color="black")
# EXERCISE: use `circle` to draw the outliers
# EXERCISE: use grid(), axis(), etc. to style the plot. Some suggestions:
# - remove the X grid lines, change the Y grid line color
# - make the tick labels bigger
xgrid().grid_line_color = None
ygrid().grid_line_color = "white"
ygrid().grid_line_width = 2
xaxis().major_label_text_font_size="12pt"
show() | [
"numpy.random.choice",
"numpy.random.randn"
] | [((162, 183), 'numpy.random.randn', 'np.random.randn', (['(2000)'], {}), '(2000)\n', (177, 183), True, 'import numpy as np\n'), ((188, 216), 'numpy.random.choice', 'np.random.choice', (['cats', '(2000)'], {}), '(cats, 2000)\n', (204, 216), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from MCES import MCES
from blackjack_setup import *
initial_Q = np.zeros([200,2])
initial_policy = np.ones([200, 2]) / 2
gamma = 1
alpha = 0
num_episodes = 100000
Q,_ = MCES(get_episode_blackjack,initial_Q,initial_policy,gamma,alpha,num_episodes)
optimal_actions = Q.argmax(axis=1)
action_noUsableAce = optimal_actions[:100].reshape(10,10,order='F')
action_noUsableAce = np.append(np.zeros([1,10]), action_noUsableAce,axis=0)
action_usableAce = optimal_actions[100:].reshape(10,10,order='F')
action_usableAce = np.append(np.zeros([1,10]),action_usableAce,axis=0)
def plot_actions(actions,title_str):
fig,ax = plt.subplots()
ax_ = ax.imshow(actions,interpolation='nearest',
origin='lower',cmap='gray',alpha=0.8)
cbar = fig.colorbar(ax_,ticks=[0,1])
cbar.ax.set_yticklabels(['hit','stand'])
ax.set_title(title_str)
ax.set_xlabel('Dealer showing')
ax.set_ylabel('Player sum')
ax.xaxis.set_ticks(np.arange(10))
ax.xaxis.set_ticklabels(['A',2,3,4,5,6,7,8,9,10])
ax.yaxis.set_ticks(np.arange(11))
ax.yaxis.set_ticklabels(np.arange(11,22))
# fig.savefig(title_str+'.jpg',dpi=200)
plot_actions(action_usableAce, 'Usable ace')
plot_actions(action_noUsableAce,'No usable ace')
plt.show()
true_action_usableAce = np.zeros([11,10])
true_action_usableAce[-4:] = 1
true_action_usableAce[-4][[0,8,9]] = 0
true_action_noUsableAce = np.ones([11,10])
true_action_noUsableAce[:6,0] = 0
true_action_noUsableAce[:2,1:3] = 0
true_action_noUsableAce[0,3:6] = 0
true_action_noUsableAce[:6,-4:]=0
diff_usableAce = np.sum(action_usableAce != true_action_usableAce)
diff_noUsableAce = np.sum(action_noUsableAce != true_action_noUsableAce)
error_rate = (diff_usableAce + diff_noUsableAce)/200
print('Number of states that have non-optimal action: ', diff_usableAce + diff_noUsableAce)
print('Error rate: ', error_rate)
| [
"numpy.sum",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.ones",
"numpy.arange",
"MCES.MCES",
"matplotlib.pyplot.subplots"
] | [((117, 135), 'numpy.zeros', 'np.zeros', (['[200, 2]'], {}), '([200, 2])\n', (125, 135), True, 'import numpy as np\n'), ((223, 309), 'MCES.MCES', 'MCES', (['get_episode_blackjack', 'initial_Q', 'initial_policy', 'gamma', 'alpha', 'num_episodes'], {}), '(get_episode_blackjack, initial_Q, initial_policy, gamma, alpha,\n num_episodes)\n', (227, 309), False, 'from MCES import MCES\n'), ((1300, 1310), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1308, 1310), True, 'import matplotlib.pyplot as plt\n'), ((1335, 1353), 'numpy.zeros', 'np.zeros', (['[11, 10]'], {}), '([11, 10])\n', (1343, 1353), True, 'import numpy as np\n'), ((1450, 1467), 'numpy.ones', 'np.ones', (['[11, 10]'], {}), '([11, 10])\n', (1457, 1467), True, 'import numpy as np\n'), ((1624, 1673), 'numpy.sum', 'np.sum', (['(action_usableAce != true_action_usableAce)'], {}), '(action_usableAce != true_action_usableAce)\n', (1630, 1673), True, 'import numpy as np\n'), ((1693, 1746), 'numpy.sum', 'np.sum', (['(action_noUsableAce != true_action_noUsableAce)'], {}), '(action_noUsableAce != true_action_noUsableAce)\n', (1699, 1746), True, 'import numpy as np\n'), ((152, 169), 'numpy.ones', 'np.ones', (['[200, 2]'], {}), '([200, 2])\n', (159, 169), True, 'import numpy as np\n'), ((436, 453), 'numpy.zeros', 'np.zeros', (['[1, 10]'], {}), '([1, 10])\n', (444, 453), True, 'import numpy as np\n'), ((577, 594), 'numpy.zeros', 'np.zeros', (['[1, 10]'], {}), '([1, 10])\n', (585, 594), True, 'import numpy as np\n'), ((671, 685), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (683, 685), True, 'import matplotlib.pyplot as plt\n'), ((1010, 1023), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1019, 1023), True, 'import numpy as np\n'), ((1102, 1115), 'numpy.arange', 'np.arange', (['(11)'], {}), '(11)\n', (1111, 1115), True, 'import numpy as np\n'), ((1145, 1162), 'numpy.arange', 'np.arange', (['(11)', '(22)'], {}), '(11, 22)\n', (1154, 1162), True, 'import numpy as np\n')] |
import torch
import numpy as np
import library.inputs as inputs
from Utils.flags import FLAGS
def test_classifier(netC):
device = FLAGS.device
loss_func = torch.nn.CrossEntropyLoss()
testloader = inputs.get_data_iter_test()
correct = 0
total = 0
loss_list = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = netC(images)
_, predicted = torch.max(outputs.data, 1)
loss_list.append(loss_func(outputs, labels).item())
total += labels.size(0)
correct += (predicted == labels).sum().item()
return total, correct, np.mean(loss_list)
| [
"torch.nn.CrossEntropyLoss",
"numpy.mean",
"torch.max",
"torch.no_grad",
"library.inputs.get_data_iter_test"
] | [((166, 193), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (191, 193), False, 'import torch\n'), ((211, 238), 'library.inputs.get_data_iter_test', 'inputs.get_data_iter_test', ([], {}), '()\n', (236, 238), True, 'import library.inputs as inputs\n'), ((297, 312), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (310, 312), False, 'import torch\n'), ((720, 738), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (727, 738), True, 'import numpy as np\n'), ((508, 534), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (517, 534), False, 'import torch\n')] |
'''
Base class for bipartite networks in long or collapsed long form
'''
import numpy as np
import pandas as pd
import bipartitepandas as bpd
class BipartiteLongBase(bpd.BipartiteBase):
'''
Base class for BipartiteLong and BipartiteLongCollapsed, where BipartiteLong and BipartiteLongCollapsed give a bipartite network of firms and workers in long and collapsed long form, respectively. Contains generalized methods. Inherits from BipartiteBase.
Arguments:
*args: arguments for Pandas DataFrame
columns_req (list): required columns (only put general column names for joint columns, e.g. put 'fid' instead of 'f1i', 'f2i'; then put the joint columns in reference_dict)
columns_opt (list): optional columns (only put general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'; then put the joint columns in reference_dict)
reference_dict (dict): clarify which columns are associated with a general column name, e.g. {'wid': 'wid', 'j': ['j1', 'j2']}
col_dtype_dict (dict): link column to datatype
col_dict (dict or None): make data columns readable. Keep None if column names already correct
include_id_reference_dict (bool): if True, create dictionary of Pandas dataframes linking original id values to contiguous id values
**kwargs: keyword arguments for Pandas DataFrame
'''
def __init__(self, *args, columns_req=[], columns_opt=[], reference_dict={}, col_dtype_dict={}, col_dict=None, include_id_reference_dict=False, **kwargs):
if 't' not in columns_req:
columns_req = ['t'] + columns_req
reference_dict = bpd.update_dict({'j': 'j', 'y': 'y', 'g': 'g'}, reference_dict)
# Initialize DataFrame
super().__init__(*args, columns_req=columns_req, columns_opt=columns_opt, reference_dict=reference_dict, col_dtype_dict=col_dtype_dict, col_dict=col_dict, include_id_reference_dict=include_id_reference_dict, **kwargs)
# self.log('BipartiteLongBase object initialized', level='info')
@property
def _constructor(self):
'''
For inheritance from Pandas.
'''
return BipartiteLongBase
def gen_m(self, force=False, copy=True):
'''
Generate m column for data (m == 0 if stayer, m == 1 or 2 if mover).
Arguments:
force (bool): if True, reset 'm' column even if it exists
copy (bool): if False, avoid copy
Returns:
frame (BipartiteBase): BipartiteBase with m column
'''
if copy:
frame = self.copy()
else:
frame = self
if not frame._col_included('m') or force:
i_col = frame.loc[:, 'i'].to_numpy()
j_col = frame.loc[:, 'j'].to_numpy()
i_prev = np.roll(i_col, 1)
i_next = np.roll(i_col, -1)
j_prev = np.roll(j_col, 1)
j_next = np.roll(j_col, -1)
##### Disable Pandas warning #####
pd.options.mode.chained_assignment = None
frame.loc[:, 'm'] = ((i_col == i_prev) & (j_col != j_prev)).astype(int, copy=False) + ((i_col == i_next) & (j_col != j_next)).astype(int, copy=False)
##### Re-enable Pandas warning #####
pd.options.mode.chained_assignment = 'warn'
frame.col_dict['m'] = 'm'
# Sort columns
frame = frame.sort_cols(copy=False)
else:
frame.log("'m' column already included. Returning unaltered frame.", level='info')
return frame
def get_es(self, move_to_worker=False, is_sorted=False, copy=True):
'''
Return (collapsed) long form data reformatted into (collapsed) event study data.
Arguments:
move_to_worker (bool): if True, each move is treated as a new worker
is_sorted (bool): if False, dataframe will be sorted by i (and t, if included). Set to True if already sorted.
copy (bool): if False, avoid copy
Returns:
es_frame (BipartiteEventStudy(Collapsed)): BipartiteEventStudy(Collapsed) object generated from (collapsed) long data
'''
if copy:
frame = self.copy()
else:
frame = self
# Split workers by movers and stayers
stayers = pd.DataFrame(frame.loc[frame.loc[:, 'm'].to_numpy() == 0, :])
movers = pd.DataFrame(frame.loc[frame.groupby('i')['m'].transform('max').to_numpy() > 0, :])
frame.log('workers split by movers and stayers', level='info')
# Add lagged values
all_cols = frame._included_cols()
if not is_sorted:
# Sort data by i (and t, if included)
sort_order = ['i']
if frame._col_included('t'):
# If t column
sort_order.append(bpd.to_list(frame.reference_dict['t'])[0])
movers.sort_values(sort_order, inplace=True)
# Columns to keep
keep_cols = ['i']
for col in all_cols:
for subcol in bpd.to_list(frame.reference_dict[col]):
# Get column number, e.g. j1 will give 1
subcol_number = subcol.strip(col)
## Movers
# Useful for t1 and t2: t1 should go to t11 and t21; t2 should go to t12 and t22
plus_1 = col + '1' + subcol_number
plus_2 = col + '2' + subcol_number
# Lagged value
movers.loc[:, plus_1] = np.roll(movers.loc[:, subcol].to_numpy(), 1)
movers.rename({subcol: plus_2}, axis=1, inplace=True)
if subcol not in ['i', 'm']:
## Stayers (no lags)
stayers.loc[:, plus_1] = stayers.loc[:, subcol]
stayers.rename({subcol: plus_2}, axis=1, inplace=True)
# Columns to keep
keep_cols += [plus_1, plus_2]
elif subcol == 'm':
# Columns to keep
keep_cols += ['m']
# Ensure lagged values are for the same worker, and that neither observation is a stay (this ensures that if there is a mover who stays at a firm for multiple periods, e.g. A -> B -> B -> B -> C, then the event study will be A -> B, B -> C, with the middle B listed as a stayer)
movers = movers.loc[(movers.loc[:, 'i1'].to_numpy() == movers.loc[:, 'i2'].to_numpy()) & (movers.loc[:, 'j1'].to_numpy() != movers.loc[:, 'j2'].to_numpy()), :]
# Set 'm' = 1 for movers
movers.drop(['m1', 'm2'], axis=1, inplace=True)
movers.loc[:, 'm'] = 1
# Correct datatypes (shifting adds nans which converts all columns into float, correct columns that should be int)
for col in all_cols:
if (frame.col_dtype_dict[col] == 'int') and (col != 'm'):
for subcol in bpd.to_list(frame.reference_dict[col]):
# Get column number, e.g. j1 will give 1
subcol_number = subcol.strip(col)
movers.loc[:, col + '1' + subcol_number] = movers.loc[:, col + '1' + subcol_number].astype(int, copy=False)
# Correct i
movers.drop('i2', axis=1, inplace=True)
movers.rename({'i1': 'i'}, axis=1, inplace=True)
# Keep only relevant columns
stayers = stayers.reindex(keep_cols, axis=1, copy=False)
movers = movers.reindex(keep_cols, axis=1, copy=False)
frame.log('columns updated', level='info')
# Merge stayers and movers
data_es = pd.concat([stayers, movers], ignore_index=True) # .reset_index(drop=True)
# Sort columns
sorted_cols = sorted(data_es.columns, key=bpd.col_order)
data_es = data_es.reindex(sorted_cols, axis=1, copy=False)
frame.log('data reformatted as event study', level='info')
es_frame = frame._constructor_es(data_es)
es_frame._set_attributes(frame, no_dict=True)
# Sort data by i and t
es_frame = es_frame.sort_rows(is_sorted=False, copy=False)
if move_to_worker:
es_frame.loc[:, 'i'] = es_frame.index
return es_frame
def _prep_cluster(self, stayers_movers=None, t=None, weighted=True, is_sorted=False, copy=False):
'''
Prepare data for clustering.
Arguments:
stayers_movers (str or None, default=None): if None, clusters on entire dataset; if 'stayers', clusters on only stayers; if 'movers', clusters on only movers
t (int or None, default=None): if None, clusters on entire dataset; if int, gives period in data to consider (only valid for non-collapsed data)
weighted (bool, default=True): if True, weight firm clusters by firm size (if a weight column is included, firm weight is computed using this column; otherwise, each observation has weight 1)
is_sorted (bool): used for event study format, does nothing for long
copy (bool): if False, avoid copy
Returns:
data (Pandas DataFrame): data prepared for clustering
weights (NumPy Array or None): if weighted=True, gives NumPy array of firm weights for clustering; otherwise, is None
jids (NumPy Array): firm ids of firms in subset of data used to cluster
'''
if copy:
frame = self.copy()
else:
frame = self
if stayers_movers is not None:
if stayers_movers == 'stayers':
frame = frame.loc[frame.loc[:, 'm'].to_numpy() == 0, :]
elif stayers_movers == 'movers':
frame = frame.loc[frame.loc[:, 'm'].to_numpy() > 0, :]
else:
raise NotImplementedError("Invalid 'stayers_movers' option, {}. Valid options are 'stayers', 'movers', or None.".format(stayers_movers))
# If period-level, then only use data for that particular period
if t is not None:
if isinstance(frame, bpd.BipartiteLong):
frame = frame.loc[frame.loc[:, 't'].to_numpy() == t, :]
else:
raise NotImplementedError("Cannot use data from a particular period with collapsed data. Data can be converted to long format using the '.uncollapse()' method.")
# Create weights
##### Disable Pandas warning #####
pd.options.mode.chained_assignment = None
if weighted:
if frame._col_included('w'):
frame.loc[:, 'row_weights'] = frame.loc[:, 'w']
weights = frame.groupby('j')['w'].sum().to_numpy()
else:
frame.loc[:, 'row_weights'] = 1
weights = frame.groupby('j').size().to_numpy()
else:
frame.loc[:, 'row_weights'] = 1
weights = None
##### Re-enable Pandas warning #####
pd.options.mode.chained_assignment = 'warn'
# Get unique firm ids (must sort)
jids = np.sort(frame.loc[:, 'j'].unique())
return frame, weights, jids
def _leave_one_observation_out(self, cc_list, component_size_variable='firms', drop_multiples=False):
'''
Extract largest leave-one-observation-out connected component.
Arguments:
cc_list (list of lists): each entry is a connected component
component_size_variable (str): how to determine largest leave-one-observation-out connected component. Options are 'len'/'length' (length of frame), 'firms' (number of unique firms), 'workers' (number of unique workers), 'stayers' (number of unique stayers), and 'movers' (number of unique movers)
drop_multiples (bool): if True, rather than collapsing over spells, drop any spells with multiple observations (this is for computational efficiency when re-collapsing data)
Returns:
frame_largest_cc (BipartiteLongBase): dataframe of largest leave-one-observation-out connected component
'''
# This will become the largest leave-one-observation-out component
frame_largest_cc = None
for cc in sorted(cc_list, reverse=True, key=len):
if (frame_largest_cc is not None) and (component_size_variable == 'firms'):
# If looking at number of firms, can check if frame_cc is already smaller than frame_largest_cc before any computations
skip = frame_largest_cc.n_firms() >= len(cc)
if skip:
continue
# Keep observations in connected components
frame_cc = self.keep_ids('j', cc, drop_multiples, is_sorted=True, copy=True)
if frame_largest_cc is not None:
# If frame_cc is already smaller than frame_largest_cc
skip = bpd.compare_frames(frame_largest_cc, frame_cc, size_variable=component_size_variable, operator='geq')
if skip:
continue
# Remove firms with only 1 mover observation (can have 1 mover with multiple observations)
frame_cc = frame_cc.min_moves_frame(2, drop_multiples, is_sorted=True, copy=False)
if frame_largest_cc is not None:
# If frame_cc is already smaller than frame_largest_cc
skip = bpd.compare_frames(frame_largest_cc, frame_cc, size_variable=component_size_variable, operator='geq')
if skip:
continue
# Construct graph
G2 = frame_cc._construct_graph('leave_one_observation_out')
# Extract articulation firms
articulation_firms = G2.articulation_points()
if len(articulation_firms) > 0:
# If there are articulation firms
# Extract articulation rows
articulation_rows = frame_cc._get_articulation_obs(G2, frame_cc.loc[(frame_cc.loc[:, 'j'].isin(articulation_firms)) & (frame_cc.loc[:, 'm'].to_numpy() > 0), :].index.to_numpy())
if len(articulation_rows) > 0:
# If new frame is not leave-one-out connected, recompute connected components after dropping articulation rows (but note that articulation rows should be kept in the final dataframe)
G2 = frame_cc.drop_rows(articulation_rows, drop_multiples, is_sorted=True, copy=False)._construct_graph('leave_one_observation_out')
cc_list_2 = G2.components()
# Recursion step
frame_cc = frame_cc._leave_one_observation_out(cc_list_2, component_size_variable, drop_multiples)
if frame_largest_cc is None:
# If in the first round
replace = True
elif frame_cc is None:
# If the biconnected components have recursively been eliminated
replace = False
else:
replace = bpd.compare_frames(frame_cc, frame_largest_cc, size_variable=component_size_variable, operator='gt')
if replace:
frame_largest_cc = frame_cc
# Return largest leave-one-observation-out component
return frame_largest_cc
def _leave_one_firm_out(self, bcc_list, component_size_variable='firms', drop_multiples=False):
'''
Extract largest leave-one-firm-out connected component.
Arguments:
bcc_list (list of lists): each entry is a biconnected component
component_size_variable (str): how to determine largest leave-one-firm-out connected component. Options are 'len'/'length' (length of frame), 'firms' (number of unique firms), 'workers' (number of unique workers), 'stayers' (number of unique stayers), and 'movers' (number of unique movers)
drop_multiples (bool): if True, rather than collapsing over spells, drop any spells with multiple observations (this is for computational efficiency when re-collapsing data)
Returns:
frame_largest_bcc (BipartiteLongBase): dataframe of largest leave-one-out connected component
'''
# This will become the largest leave-one-firm-out component
frame_largest_bcc = None
for bcc in sorted(bcc_list, reverse=True, key=len):
if (frame_largest_bcc is not None) and (component_size_variable == 'firms'):
# If looking at number of firms, can check if frame_cc is already smaller than frame_largest_cc before any computations
skip = frame_largest_bcc.n_firms() >= len(bcc)
if skip:
continue
# Keep observations in biconnected components
frame_bcc = self.keep_ids('j', bcc, drop_multiples, is_sorted=True, copy=True)
if frame_largest_bcc is not None:
# If frame_bcc is already smaller than frame_largest_bcc
skip = bpd.compare_frames(frame_largest_bcc, frame_bcc, size_variable=component_size_variable, operator='geq')
if skip:
continue
# Remove firms with only 1 mover observation (can have 1 mover with multiple observations)
# This fixes a discrepency between igraph's biconnected components and the definition of leave-one-out connected set, where biconnected components is True if a firm has only 1 mover, since then it disappears from the graph - but leave-one-out requires the set of firms to remain unchanged
frame_bcc = frame_bcc.min_moves_frame(2, drop_multiples, is_sorted=True, copy=False)
if frame_largest_bcc is not None:
# If frame_bcc is already smaller than frame_largest_bcc
skip = bpd.compare_frames(frame_largest_bcc, frame_bcc, size_variable=component_size_variable, operator='geq')
if skip:
continue
# # Recompute biconnected components
# G2 = frame_bcc._construct_biconnected_graph()
# bcc_list_2 = G2.biconnected_components()
# # If new frame is not biconnected after dropping firms with 1 mover observation, recompute biconnected components
# if not ((len(bcc_list_2) == 1) and (len(bcc_list_2[0]) == frame_bcc.n_firms())):
# frame_bcc = frame_bcc._leave_one_out(bcc_list_2, component_size_variable, drop_multiples)
if frame_largest_bcc is None:
# If in the first round
replace = True
elif frame_bcc is None:
# If the biconnected components have recursively been eliminated
replace = False
else:
replace = bpd.compare_frames(frame_bcc, frame_largest_bcc, size_variable=component_size_variable, operator='gt')
if replace:
frame_largest_bcc = frame_bcc
# Return largest biconnected component
return frame_largest_bcc
def _construct_connected_linkages(self):
'''
Construct numpy array linking firms by movers, for use with connected components.
Returns:
(NumPy Array): firm linkages
'''
move_rows = (self.loc[:, 'm'].to_numpy() > 0)
i_col = self.loc[move_rows, 'i'].to_numpy()
j_col = self.loc[move_rows, 'j'].to_numpy()
j_next = np.roll(j_col, -1)
i_match = (i_col == np.roll(i_col, -1))
j_col = j_col[i_match]
j_next = j_next[i_match]
linkages = np.stack([j_col, j_next], axis=1)
return linkages
def _construct_biconnected_linkages(self):
'''
Construct numpy array linking firms by movers, for use with biconnected components.
Returns:
(NumPy Array): firm linkages
'''
move_rows = (self.loc[:, 'm'].to_numpy() > 0)
i_col = self.loc[move_rows, 'i'].to_numpy()
j_col = self.loc[move_rows, 'j'].to_numpy()
i_next = np.roll(i_col, -1)
j_next = np.roll(j_col, -1)
valid_next = (i_col == i_next)
base_linkages = np.stack([j_col[valid_next], j_next[valid_next]], axis=1)
i_next_2 = np.roll(i_col, -2)
j_next_2 = np.roll(j_col, -2)
valid_next_2 = (i_col == i_next_2)
secondary_linkages = np.stack([j_col[valid_next_2], j_next_2[valid_next_2]], axis=1)
linkages = np.concatenate([base_linkages, secondary_linkages], axis=0)
return linkages
def _biconnected_linkages_indices(self):
'''
Construct numpy array of original indices for biconnected linkages. The first column tells you, for each link in the graph, what index the first observation in the link is coming from; and the second column tells you, for each link in the graph, what index the second observation in the link is coming from.
Returns:
(NumPy Array): original indices
'''
move_rows = (self.loc[:, 'm'].to_numpy() > 0)
i_col = self.loc[move_rows, 'i'].to_numpy()
indices = self.loc[move_rows, :].index.to_numpy()
i_next = np.roll(i_col, -1)
indices_next = np.roll(indices, -1)
valid_next = (i_col == i_next)
base_indices = np.stack([indices[valid_next], indices_next[valid_next]], axis=1)
i_next_2 = np.roll(i_col, -2)
indices_next_2 = np.roll(indices, -2)
valid_next_2 = (i_col == i_next_2)
secondary_indices = np.stack([indices[valid_next_2], indices_next_2[valid_next_2]], axis=1)
original_indices = np.concatenate([base_indices, secondary_indices], axis=0)
return original_indices
def _get_articulation_obs(self, G, obs_list):
'''
Compute articulation observations for self, by checking whether self is leave-one-observation-out connected when dropping selected observations one at a time.
Arguments:
G (igraph Graph): graph linking firms by movers
obs_list (list): list of observations to drop
Returns:
(list): articulation observations for self
'''
# Get original indices for biconnected linkages
original_indices = self._biconnected_linkages_indices()
index_first = original_indices[:, 0]
index_second = original_indices[:, 1]
# Save articulation observations (observations that disconnect the graph when they are removed)
articulation_obs = []
# Check if each observation is an articulation observation
for obs in obs_list:
G_obs = G.copy()
# Observation gives an index in the frame, but we need an index for the graph
try:
# If observation is first in pair
obs_indices = list(np.where(index_first == obs)[0])
except IndexError:
# If observation isn't first in pair
obs_indices = []
try:
# If observation is second in pair
obs_indices += list(np.where(index_second == obs)[0])
except IndexError:
# If observation isn't second in pair
pass
# Delete row(s)
# print(G_row.es().get_attribute_values('to'))
G_obs.delete_edges(obs_indices)
# Check whether removing row(s) disconnects graph
if not G_obs.is_connected():
articulation_obs += [obs]
return articulation_obs
# def _get_articulation_obs(self, G, obs_list):
# ''' # FIXME this is around twice as slow as other implementation
# Compute articulation observations for self, by checking whether self is leave-one-observation-out connected when dropping selected observations one at a time.
# Arguments:
# G (igraph Graph): graph linking firms by movers
# obs_list (list): list of observations to drop
# Returns:
# (list): articulation observations for self
# '''
# # Get original indices for biconnected linkages
# original_indices = self._biconnected_linkages_indices()
# index_first = original_indices[:, 0]
# index_second = original_indices[:, 1]
# # Save articulation observations (observations that disconnect the graph when they are removed)
# articulation_obs = []
# # Check if each observation is an articulation observation
# for obs in obs_list:
# # Observation gives an index in the frame, but we need an index for the graph
# try:
# # If observation is first in pair
# obs_indices = list(np.where(index_first == obs)[0])
# except IndexError:
# # If observation isn't first in pair
# obs_indices = []
# try:
# # If observation is second in pair
# obs_indices += list(np.where(index_second == obs)[0])
# except IndexError:
# # If observation isn't second in pair
# pass
# # Shift indices to account for dropping and re-adding rows to graph
# original_indices = np.concatenate([original_indices[np.delete(np.arange(len(original_indices)), obs_indices), :], original_indices[obs_indices, :]])
# index_first = original_indices[:, 0]
# index_second = original_indices[:, 1]
# # Save graph tuples of observations to be removed, so we can add them back later
# obs_tuples = [G.es()[obs_index].tuple for obs_index in obs_indices]
# # Delete row(s)
# G.delete_edges(obs_indices)
# # Check whether removing row(s) disconnects graph
# if not G.is_connected():
# articulation_obs += [obs]
# # Add rows back
# G.add_edges(obs_tuples)
# return articulation_obs
def keep_ids(self, id_col, keep_ids_list, drop_multiples=False, is_sorted=False, reset_index=True, copy=True):
'''
Only keep ids belonging to a given set of ids.
Arguments:
id_col (str): column of ids to consider ('i', 'j', or 'g')
keep_ids_list (list): ids to keep
drop_multiples (bool): used only if id_col == 'j' and using BipartiteLongCollapsed format. If True, rather than collapsing over spells, drop any spells with multiple observations (this is for computational efficiency)
is_sorted (bool): if False, dataframe will be sorted by i (and t, if included). Set to True if already sorted.
reset_index (bool): if True, reset index at end
copy (bool): if False, avoid copy
Returns:
frame (BipartiteLongBase): dataframe with ids in the given set
'''
keep_ids_list = set(keep_ids_list)
if len(keep_ids_list) == self.n_unique_ids(id_col):
# If keeping everything
if copy:
return self.copy()
return self
frame = self.loc[self.loc[:, id_col].isin(keep_ids_list), :]
if id_col in ['j', 'g']:
if isinstance(frame, bpd.BipartiteLongCollapsed):
# If BipartiteLongCollapsed
frame = frame.recollapse(drop_multiples=drop_multiples, is_sorted=is_sorted, copy=copy)
# We don't need to copy again
copy = False
# Recompute 'm' since it might change from dropping observations or from re-collapsing
frame = frame.gen_m(force=True, copy=copy)
# We don't need to copy again
copy = False
if copy:
# Copy on subset
frame = frame.copy()
if reset_index:
frame.reset_index(drop=True, inplace=True)
return frame
def drop_ids(self, id_col, drop_ids_list, drop_multiples=False, is_sorted=False, reset_index=True, copy=True):
'''
Drop ids belonging to a given set of ids.
Arguments:
id_col (str): column of ids to consider ('i', 'j', or 'g')
drop_ids_list (list): ids to drop
drop_multiples (bool): used only if id_col == 'j' and using BipartiteLongCollapsed format. If True, rather than collapsing over spells, drop any spells with multiple observations (this is for computational efficiency)
is_sorted (bool): if False, dataframe will be sorted by i (and t, if included). Set to True if already sorted.
reset_index (bool): if True, reset index at end
copy (bool): if False, avoid copy
Returns:
frame (BipartiteLongBase): dataframe with ids outside the given set
'''
drop_ids_list = set(drop_ids_list)
if len(drop_ids_list) == 0:
# If nothing input
if copy:
return self.copy()
return self
frame = self.loc[~(self.loc[:, id_col].isin(drop_ids_list)), :]
if id_col in ['j', 'g']:
if isinstance(frame, bpd.BipartiteLongCollapsed):
# If BipartiteLongCollapsed
frame = frame.recollapse(drop_multiples=drop_multiples, is_sorted=is_sorted, copy=copy)
# We don't need to copy again
copy = False
# Recompute 'm' since it might change from dropping observations or from re-collapsing
frame = frame.gen_m(force=True, copy=copy)
# We don't need to copy again
copy = False
if copy:
# Copy on subset
frame = frame.copy()
if reset_index:
frame.reset_index(drop=True, inplace=True)
return frame
def keep_rows(self, rows_list, drop_multiples=False, is_sorted=False, reset_index=True, copy=True):
'''
Only keep particular rows.
Arguments:
rows_list (list): rows to keep
drop_multiples (bool): used only if using BipartiteLongCollapsed format. If True, rather than collapsing over spells, drop any spells with multiple observations (this is for computational efficiency)
is_sorted (bool): if False, dataframe will be sorted by i (and t, if included). Set to True if already sorted.
reset_index (bool): if True, reset index at end
copy (bool): if False, avoid copy
Returns:
frame (BipartiteLongBase): dataframe with given rows
'''
rows_list = set(rows_list)
if len(rows_list) == len(self):
# If keeping everything
if copy:
return self.copy()
return self
rows_list = sorted(list(rows_list))
frame = self.iloc[rows_list]
if isinstance(frame, bpd.BipartiteLongCollapsed):
# If BipartiteLongCollapsed
frame = frame.recollapse(drop_multiples=drop_multiples, is_sorted=is_sorted, copy=copy)
# We don't need to copy again
copy = False
# Recompute 'm' since it might change from dropping observations or from re-collapsing
frame = frame.gen_m(force=True, copy=copy)
if reset_index:
frame.reset_index(drop=True, inplace=True)
return frame
def min_obs_firms(self, threshold=2):
'''
List firms with at least `threshold` many observations.
Arguments:
threshold (int): minimum number of observations required to keep a firm
Returns:
valid_firms (NumPy Array): firms with sufficiently many observations
'''
if threshold == 0:
# If no threshold
return self.unique_ids('j')
n_obs = self.loc[:, 'j'].value_counts()
valid_firms = n_obs[n_obs.to_numpy() >= threshold].index.to_numpy()
return valid_firms
@bpd.recollapse_loop(False)
def min_obs_frame(self, threshold=2, drop_multiples=False, is_sorted=False, copy=True):
'''
Keep firms with at least `threshold` many observations.
Arguments:
threshold (int): minimum number of observations required to keep a firm
drop_multiples (bool): used only for BipartiteLongCollapsed format. If True, rather than collapsing over spells, drop any spells with multiple observations (this is for computational efficiency)
is_sorted (bool): if False, dataframe will be sorted by i (and t, if included). Set to True if already sorted.
copy (bool): if False, avoid copy
Returns:
frame (BipartiteLongBase): dataframe of firms with sufficiently many observations
'''
if threshold == 0:
# If no threshold
if copy:
return self.copy()
return self
frame = self.loc[self.groupby('j')['i'].transform('size').to_numpy() >= threshold, :]
if isinstance(frame, bpd.BipartiteLongCollapsed):
# If BipartiteLongCollapsed
frame = frame.recollapse(drop_multiples=drop_multiples, is_sorted=is_sorted, copy=copy)
# We don't need to copy again
copy = False
# Recompute 'm' since it might change from dropping observations or from re-collapsing
frame = frame.gen_m(force=True, copy=copy)
frame.reset_index(drop=True, inplace=True)
return frame
def min_workers_firms(self, threshold=15):
'''
List firms with at least `threshold` many workers.
Arguments:
threshold (int): minimum number of workers required to keep a firm
Returns:
valid_firms (NumPy Array): list of firms with sufficiently many workers
'''
if threshold == 0:
# If no threshold
return self.unique_ids('j')
n_workers = self.groupby('j')['i'].nunique()
valid_firms = n_workers[n_workers.to_numpy() >= threshold].index.to_numpy()
return valid_firms
@bpd.recollapse_loop(False)
def min_workers_frame(self, threshold=15, drop_multiples=False, is_sorted=False, copy=True):
'''
Return dataframe of firms with at least `threshold` many workers.
Arguments:
threshold (int): minimum number of workers required to keep a firm
drop_multiples (bool): used only for BipartiteLongCollapsed format. If True, rather than collapsing over spells, drop any spells with multiple observations (this is for computational efficiency)
is_sorted (bool): if False, dataframe will be sorted by i (and t, if included). Set to True if already sorted.
copy (bool): if False, avoid copy
Returns:
frame (BipartiteLongBase): dataframe of firms with sufficiently many workers
'''
if threshold == 0:
# If no threshold
if copy:
return self.copy()
return self
frame = self.loc[self.groupby('j')['i'].transform('nunique').to_numpy() >= threshold, :]
if isinstance(frame, bpd.BipartiteLongCollapsed):
# If BipartiteLongCollapsed
frame = frame.recollapse(drop_multiples=drop_multiples, is_sorted=is_sorted, copy=copy)
# We don't need to copy again
copy = False
# Recompute 'm' since it might change from dropping observations or from re-collapsing
frame = frame.gen_m(force=True, copy=copy)
frame.reset_index(drop=True, inplace=True)
return frame
def min_moves_firms(self, threshold=2):
'''
List firms with at least `threshold` many moves. Note that a single mover can have multiple moves at the same firm.
Arguments:
threshold (int): minimum number of moves required to keep a firm
Returns:
valid_firms (NumPy Array): firms with sufficiently many moves
'''
if threshold == 0:
# If no threshold
return self.unique_ids('j')
return self.loc[self.loc[:, 'm'].to_numpy() > 0].min_obs_firms(threshold=threshold)
@bpd.recollapse_loop(True)
def min_moves_frame(self, threshold=2, drop_multiples=False, is_sorted=False, reset_index=True, copy=True):
'''
Return dataframe of firms with at least `threshold` many moves. Note that a single mover can have multiple moves at the same firm.
Arguments:
threshold (int): minimum number of moves required to keep a firm
drop_multiples (bool): used only for collapsed format. If True, rather than collapsing over spells, drop any spells with multiple observations (this is for computational efficiency)
is_sorted (bool): if False, dataframe will be sorted by i (and t, if included). Set to True if already sorted.
reset_index (bool): if True, reset index at end
copy (bool): if False, avoid copy
Returns:
(BipartiteBase): dataframe of firms with sufficiently many moves
'''
if threshold == 0:
# If no threshold
if copy:
return self.copy()
return self
valid_firms = self.min_moves_firms(threshold)
return self.keep_ids('j', keep_ids_list=valid_firms, drop_multiples=drop_multiples, is_sorted=is_sorted, reset_index=reset_index, copy=copy)
| [
"numpy.stack",
"bipartitepandas.recollapse_loop",
"numpy.roll",
"bipartitepandas.update_dict",
"numpy.where",
"bipartitepandas.to_list",
"bipartitepandas.compare_frames",
"pandas.concat",
"numpy.concatenate"
] | [((31607, 31633), 'bipartitepandas.recollapse_loop', 'bpd.recollapse_loop', (['(False)'], {}), '(False)\n', (31626, 31633), True, 'import bipartitepandas as bpd\n'), ((33726, 33752), 'bipartitepandas.recollapse_loop', 'bpd.recollapse_loop', (['(False)'], {}), '(False)\n', (33745, 33752), True, 'import bipartitepandas as bpd\n'), ((35830, 35855), 'bipartitepandas.recollapse_loop', 'bpd.recollapse_loop', (['(True)'], {}), '(True)\n', (35849, 35855), True, 'import bipartitepandas as bpd\n'), ((1640, 1703), 'bipartitepandas.update_dict', 'bpd.update_dict', (["{'j': 'j', 'y': 'y', 'g': 'g'}", 'reference_dict'], {}), "({'j': 'j', 'y': 'y', 'g': 'g'}, reference_dict)\n", (1655, 1703), True, 'import bipartitepandas as bpd\n'), ((7529, 7576), 'pandas.concat', 'pd.concat', (['[stayers, movers]'], {'ignore_index': '(True)'}), '([stayers, movers], ignore_index=True)\n', (7538, 7576), True, 'import pandas as pd\n'), ((19173, 19191), 'numpy.roll', 'np.roll', (['j_col', '(-1)'], {}), '(j_col, -1)\n', (19180, 19191), True, 'import numpy as np\n'), ((19323, 19356), 'numpy.stack', 'np.stack', (['[j_col, j_next]'], {'axis': '(1)'}), '([j_col, j_next], axis=1)\n', (19331, 19356), True, 'import numpy as np\n'), ((19780, 19798), 'numpy.roll', 'np.roll', (['i_col', '(-1)'], {}), '(i_col, -1)\n', (19787, 19798), True, 'import numpy as np\n'), ((19816, 19834), 'numpy.roll', 'np.roll', (['j_col', '(-1)'], {}), '(j_col, -1)\n', (19823, 19834), True, 'import numpy as np\n'), ((19898, 19955), 'numpy.stack', 'np.stack', (['[j_col[valid_next], j_next[valid_next]]'], {'axis': '(1)'}), '([j_col[valid_next], j_next[valid_next]], axis=1)\n', (19906, 19955), True, 'import numpy as np\n'), ((19975, 19993), 'numpy.roll', 'np.roll', (['i_col', '(-2)'], {}), '(i_col, -2)\n', (19982, 19993), True, 'import numpy as np\n'), ((20013, 20031), 'numpy.roll', 'np.roll', (['j_col', '(-2)'], {}), '(j_col, -2)\n', (20020, 20031), True, 'import numpy as np\n'), ((20104, 20167), 'numpy.stack', 'np.stack', (['[j_col[valid_next_2], j_next_2[valid_next_2]]'], {'axis': '(1)'}), '([j_col[valid_next_2], j_next_2[valid_next_2]], axis=1)\n', (20112, 20167), True, 'import numpy as np\n'), ((20187, 20246), 'numpy.concatenate', 'np.concatenate', (['[base_linkages, secondary_linkages]'], {'axis': '(0)'}), '([base_linkages, secondary_linkages], axis=0)\n', (20201, 20246), True, 'import numpy as np\n'), ((20900, 20918), 'numpy.roll', 'np.roll', (['i_col', '(-1)'], {}), '(i_col, -1)\n', (20907, 20918), True, 'import numpy as np\n'), ((20942, 20962), 'numpy.roll', 'np.roll', (['indices', '(-1)'], {}), '(indices, -1)\n', (20949, 20962), True, 'import numpy as np\n'), ((21025, 21090), 'numpy.stack', 'np.stack', (['[indices[valid_next], indices_next[valid_next]]'], {'axis': '(1)'}), '([indices[valid_next], indices_next[valid_next]], axis=1)\n', (21033, 21090), True, 'import numpy as np\n'), ((21110, 21128), 'numpy.roll', 'np.roll', (['i_col', '(-2)'], {}), '(i_col, -2)\n', (21117, 21128), True, 'import numpy as np\n'), ((21154, 21174), 'numpy.roll', 'np.roll', (['indices', '(-2)'], {}), '(indices, -2)\n', (21161, 21174), True, 'import numpy as np\n'), ((21246, 21317), 'numpy.stack', 'np.stack', (['[indices[valid_next_2], indices_next_2[valid_next_2]]'], {'axis': '(1)'}), '([indices[valid_next_2], indices_next_2[valid_next_2]], axis=1)\n', (21254, 21317), True, 'import numpy as np\n'), ((21345, 21402), 'numpy.concatenate', 'np.concatenate', (['[base_indices, secondary_indices]'], {'axis': '(0)'}), '([base_indices, secondary_indices], axis=0)\n', (21359, 21402), True, 'import numpy as np\n'), ((2794, 2811), 'numpy.roll', 'np.roll', (['i_col', '(1)'], {}), '(i_col, 1)\n', (2801, 2811), True, 'import numpy as np\n'), ((2833, 2851), 'numpy.roll', 'np.roll', (['i_col', '(-1)'], {}), '(i_col, -1)\n', (2840, 2851), True, 'import numpy as np\n'), ((2873, 2890), 'numpy.roll', 'np.roll', (['j_col', '(1)'], {}), '(j_col, 1)\n', (2880, 2890), True, 'import numpy as np\n'), ((2912, 2930), 'numpy.roll', 'np.roll', (['j_col', '(-1)'], {}), '(j_col, -1)\n', (2919, 2930), True, 'import numpy as np\n'), ((5029, 5067), 'bipartitepandas.to_list', 'bpd.to_list', (['frame.reference_dict[col]'], {}), '(frame.reference_dict[col])\n', (5040, 5067), True, 'import bipartitepandas as bpd\n'), ((19220, 19238), 'numpy.roll', 'np.roll', (['i_col', '(-1)'], {}), '(i_col, -1)\n', (19227, 19238), True, 'import numpy as np\n'), ((6849, 6887), 'bipartitepandas.to_list', 'bpd.to_list', (['frame.reference_dict[col]'], {}), '(frame.reference_dict[col])\n', (6860, 6887), True, 'import bipartitepandas as bpd\n'), ((12697, 12803), 'bipartitepandas.compare_frames', 'bpd.compare_frames', (['frame_largest_cc', 'frame_cc'], {'size_variable': 'component_size_variable', 'operator': '"""geq"""'}), "(frame_largest_cc, frame_cc, size_variable=\n component_size_variable, operator='geq')\n", (12715, 12803), True, 'import bipartitepandas as bpd\n'), ((13193, 13299), 'bipartitepandas.compare_frames', 'bpd.compare_frames', (['frame_largest_cc', 'frame_cc'], {'size_variable': 'component_size_variable', 'operator': '"""geq"""'}), "(frame_largest_cc, frame_cc, size_variable=\n component_size_variable, operator='geq')\n", (13211, 13299), True, 'import bipartitepandas as bpd\n'), ((16758, 16866), 'bipartitepandas.compare_frames', 'bpd.compare_frames', (['frame_largest_bcc', 'frame_bcc'], {'size_variable': 'component_size_variable', 'operator': '"""geq"""'}), "(frame_largest_bcc, frame_bcc, size_variable=\n component_size_variable, operator='geq')\n", (16776, 16866), True, 'import bipartitepandas as bpd\n'), ((17562, 17670), 'bipartitepandas.compare_frames', 'bpd.compare_frames', (['frame_largest_bcc', 'frame_bcc'], {'size_variable': 'component_size_variable', 'operator': '"""geq"""'}), "(frame_largest_bcc, frame_bcc, size_variable=\n component_size_variable, operator='geq')\n", (17580, 17670), True, 'import bipartitepandas as bpd\n'), ((14799, 14904), 'bipartitepandas.compare_frames', 'bpd.compare_frames', (['frame_cc', 'frame_largest_cc'], {'size_variable': 'component_size_variable', 'operator': '"""gt"""'}), "(frame_cc, frame_largest_cc, size_variable=\n component_size_variable, operator='gt')\n", (14817, 14904), True, 'import bipartitepandas as bpd\n'), ((18525, 18632), 'bipartitepandas.compare_frames', 'bpd.compare_frames', (['frame_bcc', 'frame_largest_bcc'], {'size_variable': 'component_size_variable', 'operator': '"""gt"""'}), "(frame_bcc, frame_largest_bcc, size_variable=\n component_size_variable, operator='gt')\n", (18543, 18632), True, 'import bipartitepandas as bpd\n'), ((4821, 4859), 'bipartitepandas.to_list', 'bpd.to_list', (["frame.reference_dict['t']"], {}), "(frame.reference_dict['t'])\n", (4832, 4859), True, 'import bipartitepandas as bpd\n'), ((22552, 22580), 'numpy.where', 'np.where', (['(index_first == obs)'], {}), '(index_first == obs)\n', (22560, 22580), True, 'import numpy as np\n'), ((22806, 22835), 'numpy.where', 'np.where', (['(index_second == obs)'], {}), '(index_second == obs)\n', (22814, 22835), True, 'import numpy as np\n')] |
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix, lil_matrix
import numpy as np
import scipy.sparse as sps
from tqdm import tqdm
from course_lib.Data_manager.IncrementalSparseMatrix import IncrementalSparseMatrix
def mix_URM(URM_positive: csr_matrix, URM_negative: csr_matrix):
return sps.vstack([URM_positive, URM_negative], format='csr')
def format_URM_slice_uncompressed(users, items_per_users, max_user_id, n_cols):
fm_matrix_builder = IncrementalSparseMatrix(n_cols=n_cols)
row_list = np.repeat(np.arange(items_per_users.shape[0] * items_per_users.shape[1]), repeats=2)
col_list = np.zeros(shape=items_per_users.shape[0] * items_per_users.shape[1] * 2)
user_col_list = np.repeat(users, repeats=items_per_users.shape[1])
items_col_list = np.array(items_per_users).flatten() + max_user_id
col_list[np.arange(items_per_users.shape[0] * items_per_users.shape[1]) * 2] = user_col_list
col_list[np.arange(items_per_users.shape[0] * items_per_users.shape[1]) * 2 + 1] = items_col_list
fm_matrix_builder.add_data_lists(row_list_to_add=row_list, col_list_to_add=col_list,
data_list_to_add=np.ones(len(row_list)))
return fm_matrix_builder.get_SparseMatrix()
#############################################################################
########################## INTEGRATING EXTERNAL INFORMATION##################
#############################################################################
def add_UCM_info(fm_matrix: csr_matrix, UCM: csr_matrix, user_offset):
"""
Given a matrix in the format needed to FM, it adds information concerning the UCM
Note: no group by items should be applied in this case
:param fm_matrix: matrix containing dataset for FM models (last column has no rating list)
:param UCM: UCM information about users
:param user_offset: starting column index for items in fm_matrix (should be 0)
:return: new matrix containing also information about the UCM
"""
fm_matrix_copy = fm_matrix.copy()
user_fm_matrix = fm_matrix[:, user_offset: user_offset + UCM.shape[0]].copy()
UCM_fm_matrix = user_fm_matrix.dot(UCM)
merged_fm = sps.hstack([fm_matrix_copy, UCM_fm_matrix], format="csr")
return merged_fm
def add_ICM_info(fm_matrix: csr_matrix, ICM: csr_matrix, item_offset):
"""
Given a matrix in the format needed for FM, it adds information concerning the ICM
Note: no group by users should be applied in this case
:param fm_matrix: matrix concerning dataset for FM models (last column has no rating list)
:param ICM: ICM information about items
:param item_offset: starting column index for items in fm_matrix (it should be URM_train.shape[0]
of the URM used to construct the fm_matrix)
:return: new matrix integrating ICM data
"""
fm_matrix_copy = fm_matrix.copy()
item_fm_matrix = fm_matrix[:, item_offset: item_offset + ICM.shape[0]].copy()
ICM_fm_matrix = item_fm_matrix.dot(ICM)
merged_fm = sps.hstack([fm_matrix_copy, ICM_fm_matrix], format="csr")
return merged_fm
#################################################################################
########################## SAMPLING STRATEGIES ##################################
#################################################################################
def uniform_sampling_strategy(negative_sample_size, URM, check_replacement=False):
"""
Sample negative samples uniformly from the given URM
:param negative_sample_size: number of negative samples to be sampled
:param URM: URM from which samples are taken
:param check_replacement: whether to check for replacement or not. Checking is expensive
:return: bi-dimensional array of shape (2, negative_sample_size): in the first dimensions row-samples are
stored, while in the second one col-samples are stored. Therefore, in the i-th col of this returned array
you can find a indices of a negative sample in the URM_train
"""
max_row = URM.shape[0]
max_col = URM.shape[1]
collected_samples = np.zeros(shape=(2, negative_sample_size))
sampled = 0
while sampled < negative_sample_size:
if sampled % 10000 == 0:
print("Sampled {} on {}".format(sampled, negative_sample_size))
t_row = np.random.randint(low=0, high=max_row, size=1)[0]
t_col = np.random.randint(low=0, high=max_col, size=1)[0]
t_sample = np.array([[t_row], [t_col]])
if check_replacement:
if (not np.equal(collected_samples, t_sample).min(axis=0).max()) and (URM[t_row, t_col] == 0):
collected_samples[:, sampled] = [t_row, t_col]
sampled += 1
else:
if URM[t_row, t_col] == 0:
collected_samples[:, sampled] = [t_row, t_col]
sampled += 1
return collected_samples if check_replacement else np.unique(collected_samples, axis=1)
def sample_negative_interactions_uniformly(negative_sample_size, URM, batch_size=10000):
n_users = URM.shape[0]
n_items = URM.shape[1]
invalid_users = np.array(URM.tocoo().row, dtype=np.uint64)
invalid_items = np.array(URM.tocoo().col, dtype=np.uint64)
# Convert users and items into a unique integers
shifted_invalid_items = np.left_shift(invalid_items, np.uint64(np.log2(n_users) + 1))
invalid_tuples = np.bitwise_or(invalid_users, shifted_invalid_items)
negative_URM_builder = IncrementalSparseMatrix(n_rows=n_users, n_cols=n_items)
with tqdm(desc="Sampling negative interactions", total=negative_sample_size) as p_bar:
sampled = 0
while sampled < negative_sample_size:
# Sample a batch of users and items
users = np.random.randint(low=0, high=n_users, size=batch_size, dtype=np.uint64)
items = np.random.randint(low=0, high=n_items, size=batch_size, dtype=np.uint64)
# Convert into unique integers
shifted_items = np.left_shift(items, np.uint64(np.log2(n_users) + 1))
tuples = np.bitwise_or(users, shifted_items)
unique_tuples, indices = np.unique(tuples, return_index=True)
# Remove couple of user and items which are already inside the chosen ones
invalid_tuples_mask = np.in1d(unique_tuples, invalid_tuples, assume_unique=True)
valid_indices = indices[~invalid_tuples_mask]
valid_users = users[valid_indices]
valid_items = items[valid_indices]
# Cap the size of batch size if it is the last batch
if sampled + len(valid_users) > negative_sample_size:
remaining_sample_size = negative_sample_size - sampled
valid_users = valid_users[:remaining_sample_size]
valid_items = valid_items[:remaining_sample_size]
# Update builder, sampled elements and progress bar
negative_URM_builder.add_data_lists(valid_users, valid_items, np.ones(len(valid_users)))
sampled += len(valid_users)
p_bar.update(len(valid_users))
# Update invalid users and items
invalid_tuples = np.concatenate([invalid_tuples, tuples[valid_indices]])
return negative_URM_builder.get_SparseMatrix().tocsr()
#################################################################################
########################## NEGATIVE RATING PREPARATION ##########################
#################################################################################
def format_URM_negative_sampling_user_compressed(URM: csr_matrix, negative_rate=1, check_replacement=False,
sampling_function=None):
"""
Format negative interactions of an URM in the way that is needed for the FM model. Here, however, users
and compressed w.r.t. the items they liked in the negative samples sampled
In particular you will have:
- #different_items_sampled @row
- #users+items+1 @cols
- #(negative_sample_size)*(different_items_sampled*2) @data
:param URM: URM to be preprocessed and from which negative samples are taken
:param negative_rate: how much negatives samples do you want in proportion to the negative one
:param check_replacement: whether to check for replacement or not. Checking costs time
:param sampling_function: sampling function that takes in input the negative sample size
and the URM from which samples are taken. If None, uniform sampling will be applied
:return: csr_matrix containing the negative interactions:
"""
negative_sample_size = int(URM.data.size * negative_rate)
new_train = URM.copy().tocoo()
item_offset = URM.shape[0]
print("Start sampling...")
if sampling_function is None:
collected_samples = uniform_sampling_strategy(negative_sample_size=negative_sample_size, URM=URM,
check_replacement=check_replacement)
else:
collected_samples = sampling_function(negative_sample_size=negative_sample_size, URM=URM,
check_replacement=check_replacement)
# Different items sampled
different_items_sampled = np.unique(collected_samples[1])
fm_matrix = coo_matrix((different_items_sampled.size, URM.shape[0] + URM.shape[1] + 1), dtype=np.int8)
row_v = np.zeros(new_train.data.size + (different_items_sampled.size * 2))
col_v = np.zeros(new_train.data.size + (different_items_sampled.size * 2))
data_v = np.zeros(new_train.data.size + (different_items_sampled.size * 2))
print("Matrix builiding...", end="")
# For all the items, set up its content
j = 0 # Index to scan and modify the vectors
URM_train_csc = URM.copy().tocsc()
for i, item in enumerate(different_items_sampled):
# Find all users sampled for that item
item_mask = collected_samples[1] == item
users_sampled_for_that_item = np.unique(collected_samples[0][item_mask])
offset = users_sampled_for_that_item.size
if offset > 0:
col_v[j:j + offset] = users_sampled_for_that_item
row_v[j:j + offset] = i
data_v[j:j + offset] = 1
col_v[j + offset] = item + item_offset
row_v[j + offset] = i
data_v[j + offset] = 1
col_v[j + offset + 1] = fm_matrix.shape[1] - 1
row_v[j + offset + 1] = i
data_v[j + offset + 1] = 1
j = j + offset + 2
else:
raise RuntimeError("Illegal state")
print("Done")
# Setting new information
fm_matrix.row = row_v
fm_matrix.col = col_v
fm_matrix.data = data_v
return fm_matrix.tocsr()
def format_URM_negative_sampling_non_compressed(URM: csr_matrix, negative_rate=1,
sampling_function=None, check_replacement=False):
"""
Format negative interactions of an URM in the way that is needed for the FM model
- We have #positive_interactions * negative_rate @rows
- We have #users+items+1 @cols
- We have 3 interactions in each row: one for the users, one for the item, and -1 for the rating
:param URM: URM to be preprocessed and from which negative samples is taken
:param check_replacement: whether to check for replacement while sampling or not
:param negative_rate: how much negatives samples do you want in proportion to the negative one
:param sampling_function: sampling function that takes in input the negative sample size
and the URM from which samples are taken (and if you want to check for replacement).
If None, uniform sampling will be applied
:return: csr_matrix containing the negative interactions
"""
# Initial set-up
item_offset = URM.shape[0]
last_col = URM.shape[0] + URM.shape[1]
negative_sample_size = int(URM.data.size * negative_rate)
print("Start sampling...")
# Take samples
if sampling_function is None:
collected_samples = uniform_sampling_strategy(negative_sample_size=negative_sample_size,
URM=URM, check_replacement=check_replacement)
else:
collected_samples = sampling_function(negative_sample_size=negative_sample_size, URM=URM,
check_replacement=check_replacement)
fm_matrix = coo_matrix((negative_sample_size, URM.shape[0] + URM.shape[1] + 1), dtype=np.int8)
negative_sample_size = collected_samples[0].size
# Set up initial vectors
row_v = np.zeros(negative_sample_size * 3) # Row should have (i,i,i) repeated for all the size
col_v = np.zeros(negative_sample_size * 3) # This is the "harder" to set
data_v = -np.ones(negative_sample_size * 3) # Already ok, nothing to be added
print("Set up row of COO...")
# Setting row vector
for i in range(0, negative_sample_size):
row_v[3 * i] = i
row_v[(3 * i) + 1] = i
row_v[(3 * i) + 2] = i
print("Set up col of COO...")
# Setting col vector
for i in range(0, negative_sample_size):
# Retrieving information
user = collected_samples[0, i]
item = collected_samples[1, i]
# Fixing col indices to be added to the new matrix
user_index = user
item_index = item + item_offset
col_v[3 * i] = user_index
col_v[(3 * i) + 1] = item_index
col_v[(3 * i) + 2] = last_col
# Setting new information
fm_matrix.row = row_v
fm_matrix.col = col_v
fm_matrix.data = data_v
return fm_matrix.tocsr()
#################################################################################
########################## POSITIVE RATING PREPARATION ##########################
#################################################################################
def format_URM_positive_user_compressed(URM: csr_matrix):
"""
Format positive interactions of an URM in the way that is needed for the FM model.
Here, however, users information are grouped w.r.t. items, meaning that, we will have:
- We have #warm_items @row
- We have #users+items+1 @cols
- We have #(interactions)+(warm_items*2) @data
Each row is representing a warm item and all users that interacted with that item are stored in that row.
:param URM: URM to be preprocessed
:return: preprocessed URM in sparse matrix csr format
"""
warm_items_mask = np.ediff1d(URM.tocsc().indptr) > 0
warm_items = np.arange(URM.shape[1])[warm_items_mask]
new_train = URM.copy().tocoo()
fm_matrix = coo_matrix((warm_items.size, URM.shape[0] + URM.shape[1] + 1), dtype=np.int8)
# Index offset
item_offset = URM.shape[0]
# Set up initial vectors
row_v = np.zeros(new_train.data.size + (warm_items.size * 2))
col_v = np.zeros(new_train.data.size + (warm_items.size * 2))
data_v = np.zeros(new_train.data.size + (warm_items.size * 2)) # Already ok, nothing to be added
# For all the items, set up its content
j = 0 # Index to scan and modify the vectors
URM_train_csc = URM.copy().tocsc()
for i, item in enumerate(warm_items):
# Find all users who liked that item
users_who_liked_item = URM_train_csc[:, item].indices
offset = users_who_liked_item.size
if offset > 0:
col_v[j:j + offset] = users_who_liked_item
row_v[j:j + offset] = i
data_v[j:j + offset] = 1
col_v[j + offset] = item + item_offset
row_v[j + offset] = i
data_v[j + offset] = 1
col_v[j + offset + 1] = fm_matrix.shape[1] - 1
row_v[j + offset + 1] = i
data_v[j + offset + 1] = 1
j = j + offset + 2
else:
raise RuntimeError("Illegal state")
# Setting new information
fm_matrix.row = row_v
fm_matrix.col = col_v
fm_matrix.data = data_v
return fm_matrix.tocsr()
def format_URM_positive_non_compressed(URM: csr_matrix):
"""
Format positive interactions of an URM in the way that is needed for the FM model.
- We have #num_ratings row
- The last column with all the ratings (for implicit dataset it just a col full of 1
- In each row there are 3 interactions: 1 for the user, 1 for the item, and 1 for the rating
- Only positive samples are encoded here
Note: this method works only for implicit dataset
:param URM: URM to be preprocessed
:return: csr_matrix containing the URM preprocessed in the described way
"""
new_train = URM.copy().tocoo()
fm_matrix = sps.coo_matrix((URM.data.size, URM.shape[0] + URM.shape[1] + 1), dtype=np.int8)
# Index offset
item_offset = URM.shape[0]
# Last col
last_col = URM.shape[0] + URM.shape[1]
# Set up initial vectors
row_v = np.zeros(new_train.data.size * 3) # Row should have (i,i,i) repeated for all the size
col_v = np.zeros(new_train.data.size * 3) # This is the "harder" to set
data_v = np.ones(new_train.data.size * 3) # Already ok, nothing to be added
# Setting row vector
for i in range(0, new_train.data.size):
row_v[3 * i] = i
row_v[(3 * i) + 1] = i
row_v[(3 * i) + 2] = i
# Setting col vector
for i in range(0, new_train.data.size):
# Retrieving information
user = new_train.row[i]
item = new_train.col[i]
# Fixing col indices to be added to the new matrix
user_index = user
item_index = item + item_offset
col_v[3 * i] = user_index
col_v[(3 * i) + 1] = item_index
col_v[(3 * i) + 2] = last_col
# Setting new information
fm_matrix.row = row_v
fm_matrix.col = col_v
fm_matrix.data = data_v
return fm_matrix.tocsr()
def convert_URM_to_FM(URM: csr_matrix):
"""
Convert positive interactions of an URM in the way that is needed for the FM model.
- In each row there are 3 interactions: 1 for the user, 1 for the item
- Only positive samples are encoded here
Note: this method works only for implicit dataset
:param URM: URM to be preprocessed
:return: csr_matrix containing the URM preprocessed in the described way
"""
n_users = URM.shape[0]
n_items = URM.shape[1]
n_sample = len(URM.data)
FM_matrix = sps.coo_matrix((n_sample, n_users + n_items))
# Setting rows
FM_matrix.row = np.repeat(np.arange(n_sample), 2) # one row has two ones
# Setting cols
row = np.reshape(URM.tocoo().row, newshape=(n_sample, 1))
col = np.reshape(URM.tocoo().col + n_users, newshape=(n_sample, 1))
row_col = np.concatenate([row, col], axis=1)
unrolled_row_col = np.reshape(row_col, newshape=len(FM_matrix.row))
FM_matrix.col = unrolled_row_col
# Setting data
FM_matrix.data = np.ones(len(FM_matrix.row), dtype=np.float32)
return FM_matrix.tocsr()
| [
"tqdm.tqdm",
"numpy.in1d",
"numpy.concatenate",
"scipy.sparse.vstack",
"numpy.log2",
"numpy.zeros",
"numpy.ones",
"numpy.equal",
"course_lib.Data_manager.IncrementalSparseMatrix.IncrementalSparseMatrix",
"scipy.sparse.coo_matrix",
"numpy.arange",
"numpy.array",
"numpy.random.randint",
"num... | [((303, 357), 'scipy.sparse.vstack', 'sps.vstack', (['[URM_positive, URM_negative]'], {'format': '"""csr"""'}), "([URM_positive, URM_negative], format='csr')\n", (313, 357), True, 'import scipy.sparse as sps\n'), ((464, 502), 'course_lib.Data_manager.IncrementalSparseMatrix.IncrementalSparseMatrix', 'IncrementalSparseMatrix', ([], {'n_cols': 'n_cols'}), '(n_cols=n_cols)\n', (487, 502), False, 'from course_lib.Data_manager.IncrementalSparseMatrix import IncrementalSparseMatrix\n'), ((618, 689), 'numpy.zeros', 'np.zeros', ([], {'shape': '(items_per_users.shape[0] * items_per_users.shape[1] * 2)'}), '(shape=items_per_users.shape[0] * items_per_users.shape[1] * 2)\n', (626, 689), True, 'import numpy as np\n'), ((710, 760), 'numpy.repeat', 'np.repeat', (['users'], {'repeats': 'items_per_users.shape[1]'}), '(users, repeats=items_per_users.shape[1])\n', (719, 760), True, 'import numpy as np\n'), ((2185, 2242), 'scipy.sparse.hstack', 'sps.hstack', (['[fm_matrix_copy, UCM_fm_matrix]'], {'format': '"""csr"""'}), "([fm_matrix_copy, UCM_fm_matrix], format='csr')\n", (2195, 2242), True, 'import scipy.sparse as sps\n'), ((3035, 3092), 'scipy.sparse.hstack', 'sps.hstack', (['[fm_matrix_copy, ICM_fm_matrix]'], {'format': '"""csr"""'}), "([fm_matrix_copy, ICM_fm_matrix], format='csr')\n", (3045, 3092), True, 'import scipy.sparse as sps\n'), ((4098, 4139), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, negative_sample_size)'}), '(shape=(2, negative_sample_size))\n', (4106, 4139), True, 'import numpy as np\n'), ((5391, 5442), 'numpy.bitwise_or', 'np.bitwise_or', (['invalid_users', 'shifted_invalid_items'], {}), '(invalid_users, shifted_invalid_items)\n', (5404, 5442), True, 'import numpy as np\n'), ((5470, 5525), 'course_lib.Data_manager.IncrementalSparseMatrix.IncrementalSparseMatrix', 'IncrementalSparseMatrix', ([], {'n_rows': 'n_users', 'n_cols': 'n_items'}), '(n_rows=n_users, n_cols=n_items)\n', (5493, 5525), False, 'from course_lib.Data_manager.IncrementalSparseMatrix import IncrementalSparseMatrix\n'), ((9235, 9266), 'numpy.unique', 'np.unique', (['collected_samples[1]'], {}), '(collected_samples[1])\n', (9244, 9266), True, 'import numpy as np\n'), ((9284, 9378), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(different_items_sampled.size, URM.shape[0] + URM.shape[1] + 1)'], {'dtype': 'np.int8'}), '((different_items_sampled.size, URM.shape[0] + URM.shape[1] + 1),\n dtype=np.int8)\n', (9294, 9378), False, 'from scipy.sparse import csr_matrix, csc_matrix, coo_matrix, lil_matrix\n'), ((9388, 9452), 'numpy.zeros', 'np.zeros', (['(new_train.data.size + different_items_sampled.size * 2)'], {}), '(new_train.data.size + different_items_sampled.size * 2)\n', (9396, 9452), True, 'import numpy as np\n'), ((9467, 9531), 'numpy.zeros', 'np.zeros', (['(new_train.data.size + different_items_sampled.size * 2)'], {}), '(new_train.data.size + different_items_sampled.size * 2)\n', (9475, 9531), True, 'import numpy as np\n'), ((9547, 9611), 'numpy.zeros', 'np.zeros', (['(new_train.data.size + different_items_sampled.size * 2)'], {}), '(new_train.data.size + different_items_sampled.size * 2)\n', (9555, 9611), True, 'import numpy as np\n'), ((12424, 12511), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(negative_sample_size, URM.shape[0] + URM.shape[1] + 1)'], {'dtype': 'np.int8'}), '((negative_sample_size, URM.shape[0] + URM.shape[1] + 1), dtype=\n np.int8)\n', (12434, 12511), False, 'from scipy.sparse import csr_matrix, csc_matrix, coo_matrix, lil_matrix\n'), ((12602, 12636), 'numpy.zeros', 'np.zeros', (['(negative_sample_size * 3)'], {}), '(negative_sample_size * 3)\n', (12610, 12636), True, 'import numpy as np\n'), ((12702, 12736), 'numpy.zeros', 'np.zeros', (['(negative_sample_size * 3)'], {}), '(negative_sample_size * 3)\n', (12710, 12736), True, 'import numpy as np\n'), ((14636, 14713), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(warm_items.size, URM.shape[0] + URM.shape[1] + 1)'], {'dtype': 'np.int8'}), '((warm_items.size, URM.shape[0] + URM.shape[1] + 1), dtype=np.int8)\n', (14646, 14713), False, 'from scipy.sparse import csr_matrix, csc_matrix, coo_matrix, lil_matrix\n'), ((14807, 14858), 'numpy.zeros', 'np.zeros', (['(new_train.data.size + warm_items.size * 2)'], {}), '(new_train.data.size + warm_items.size * 2)\n', (14815, 14858), True, 'import numpy as np\n'), ((14873, 14924), 'numpy.zeros', 'np.zeros', (['(new_train.data.size + warm_items.size * 2)'], {}), '(new_train.data.size + warm_items.size * 2)\n', (14881, 14924), True, 'import numpy as np\n'), ((14940, 14991), 'numpy.zeros', 'np.zeros', (['(new_train.data.size + warm_items.size * 2)'], {}), '(new_train.data.size + warm_items.size * 2)\n', (14948, 14991), True, 'import numpy as np\n'), ((16646, 16725), 'scipy.sparse.coo_matrix', 'sps.coo_matrix', (['(URM.data.size, URM.shape[0] + URM.shape[1] + 1)'], {'dtype': 'np.int8'}), '((URM.data.size, URM.shape[0] + URM.shape[1] + 1), dtype=np.int8)\n', (16660, 16725), True, 'import scipy.sparse as sps\n'), ((16878, 16911), 'numpy.zeros', 'np.zeros', (['(new_train.data.size * 3)'], {}), '(new_train.data.size * 3)\n', (16886, 16911), True, 'import numpy as np\n'), ((16977, 17010), 'numpy.zeros', 'np.zeros', (['(new_train.data.size * 3)'], {}), '(new_train.data.size * 3)\n', (16985, 17010), True, 'import numpy as np\n'), ((17055, 17087), 'numpy.ones', 'np.ones', (['(new_train.data.size * 3)'], {}), '(new_train.data.size * 3)\n', (17062, 17087), True, 'import numpy as np\n'), ((18364, 18409), 'scipy.sparse.coo_matrix', 'sps.coo_matrix', (['(n_sample, n_users + n_items)'], {}), '((n_sample, n_users + n_items))\n', (18378, 18409), True, 'import scipy.sparse as sps\n'), ((18676, 18710), 'numpy.concatenate', 'np.concatenate', (['[row, col]'], {'axis': '(1)'}), '([row, col], axis=1)\n', (18690, 18710), True, 'import numpy as np\n'), ((528, 590), 'numpy.arange', 'np.arange', (['(items_per_users.shape[0] * items_per_users.shape[1])'], {}), '(items_per_users.shape[0] * items_per_users.shape[1])\n', (537, 590), True, 'import numpy as np\n'), ((4458, 4486), 'numpy.array', 'np.array', (['[[t_row], [t_col]]'], {}), '([[t_row], [t_col]])\n', (4466, 4486), True, 'import numpy as np\n'), ((4917, 4953), 'numpy.unique', 'np.unique', (['collected_samples'], {'axis': '(1)'}), '(collected_samples, axis=1)\n', (4926, 4953), True, 'import numpy as np\n'), ((5535, 5606), 'tqdm.tqdm', 'tqdm', ([], {'desc': '"""Sampling negative interactions"""', 'total': 'negative_sample_size'}), "(desc='Sampling negative interactions', total=negative_sample_size)\n", (5539, 5606), False, 'from tqdm import tqdm\n'), ((9979, 10021), 'numpy.unique', 'np.unique', (['collected_samples[0][item_mask]'], {}), '(collected_samples[0][item_mask])\n', (9988, 10021), True, 'import numpy as np\n'), ((12782, 12815), 'numpy.ones', 'np.ones', (['(negative_sample_size * 3)'], {}), '(negative_sample_size * 3)\n', (12789, 12815), True, 'import numpy as np\n'), ((14543, 14566), 'numpy.arange', 'np.arange', (['URM.shape[1]'], {}), '(URM.shape[1])\n', (14552, 14566), True, 'import numpy as np\n'), ((18460, 18479), 'numpy.arange', 'np.arange', (['n_sample'], {}), '(n_sample)\n', (18469, 18479), True, 'import numpy as np\n'), ((845, 907), 'numpy.arange', 'np.arange', (['(items_per_users.shape[0] * items_per_users.shape[1])'], {}), '(items_per_users.shape[0] * items_per_users.shape[1])\n', (854, 907), True, 'import numpy as np\n'), ((4323, 4369), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'max_row', 'size': '(1)'}), '(low=0, high=max_row, size=1)\n', (4340, 4369), True, 'import numpy as np\n'), ((4389, 4435), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'max_col', 'size': '(1)'}), '(low=0, high=max_col, size=1)\n', (4406, 4435), True, 'import numpy as np\n'), ((5751, 5823), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'n_users', 'size': 'batch_size', 'dtype': 'np.uint64'}), '(low=0, high=n_users, size=batch_size, dtype=np.uint64)\n', (5768, 5823), True, 'import numpy as np\n'), ((5844, 5916), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'n_items', 'size': 'batch_size', 'dtype': 'np.uint64'}), '(low=0, high=n_items, size=batch_size, dtype=np.uint64)\n', (5861, 5916), True, 'import numpy as np\n'), ((6064, 6099), 'numpy.bitwise_or', 'np.bitwise_or', (['users', 'shifted_items'], {}), '(users, shifted_items)\n', (6077, 6099), True, 'import numpy as np\n'), ((6137, 6173), 'numpy.unique', 'np.unique', (['tuples'], {'return_index': '(True)'}), '(tuples, return_index=True)\n', (6146, 6173), True, 'import numpy as np\n'), ((6296, 6354), 'numpy.in1d', 'np.in1d', (['unique_tuples', 'invalid_tuples'], {'assume_unique': '(True)'}), '(unique_tuples, invalid_tuples, assume_unique=True)\n', (6303, 6354), True, 'import numpy as np\n'), ((7166, 7221), 'numpy.concatenate', 'np.concatenate', (['[invalid_tuples, tuples[valid_indices]]'], {}), '([invalid_tuples, tuples[valid_indices]])\n', (7180, 7221), True, 'import numpy as np\n'), ((782, 807), 'numpy.array', 'np.array', (['items_per_users'], {}), '(items_per_users)\n', (790, 807), True, 'import numpy as np\n'), ((942, 1004), 'numpy.arange', 'np.arange', (['(items_per_users.shape[0] * items_per_users.shape[1])'], {}), '(items_per_users.shape[0] * items_per_users.shape[1])\n', (951, 1004), True, 'import numpy as np\n'), ((5347, 5363), 'numpy.log2', 'np.log2', (['n_users'], {}), '(n_users)\n', (5354, 5363), True, 'import numpy as np\n'), ((6020, 6036), 'numpy.log2', 'np.log2', (['n_users'], {}), '(n_users)\n', (6027, 6036), True, 'import numpy as np\n'), ((4538, 4575), 'numpy.equal', 'np.equal', (['collected_samples', 't_sample'], {}), '(collected_samples, t_sample)\n', (4546, 4575), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import re
from scipy.sparse import csr_matrix
import sparse_dot_topn.sparse_dot_topn as ct
from string import punctuation
import spacy
from collections import defaultdict
chars_to_remove = ['"', "'", "[", "]"]
def clean_element(element):
"""Removes unwanted characters from a text and preprocesses it."""
# Remove characters
for char in chars_to_remove:
element = element.replace(char, '')
# Further preprocessing
element = element.strip()
element = element.lower()
return element
def string_to_list(series):
series_list = []
for elements in series:
element_list = []
for element in elements.split(','):
cl_element = clean_element(element)
element_list.append(cl_element)
series_list.append(element_list)
return series_list
def get_orgs(text, nlp):
"""
:param text:
:param nlp:
:return: The entities that were extracted from the doc
:rtype: list
"""
orgs = []
doc = nlp(text)
for ent in doc.ents:
if ent.label_ in ['ORG', 'NORP']:
if ent.text not in punctuation:
org_texts = [ent.text for ent in orgs]
if ent.text not in org_texts:
orgs.append(ent)
return orgs
def get_orgs_sent(text, nlp):
"""
:param text:
:param nlp:
:return: dictionary mapping recognised entity to the sentence it appears in
:rtype: dict
"""
doc = nlp(text)
sents = [sent.text for sent in doc.sents]
orgs_dict = defaultdict(list)
for sent in sents:
doc = nlp(sent)
for ent in doc.ents:
if ent.label_ in ['ORG', 'NORP']:
if ent.text not in punctuation:
orgs_dict[ent.text].append(sent)
orgs_sents = dict()
for org in orgs_dict:
orgs_sents[org] = orgs_dict[org][0]
return orgs_sents
def ngrams_chars(string, n=3):
# string = fix_text(string) # fix text encoding issues
if pd.isna(string):
string = ""
string = string.encode("ascii", errors="ignore").decode() # remove non ascii chars
string = string.lower() # make lower case
chars_to_remove = [")", "(", ".", "|", "[", "]", "{", "}", "'"]
rx = '[' + re.escape(''.join(chars_to_remove)) + ']'
string = re.sub(rx, '', string) # remove the list of chars defined above
string = string.replace('&', 'and')
string = string.replace(',', ' ')
string = string.replace('-', ' ')
string = string.title() # normalise case - capital at start of each word
string = re.sub(' +', ' ', string).strip() # get rid of multiple spaces and replace with a single space
string = ' ' + string + ' ' # pad names for ngrams...
string = re.sub(r'[,-./]|\sBD', r'', string)
ngrams = zip(*[string[i:] for i in range(n)])
n_gramlist = [''.join(ngram) for ngram in ngrams]
return n_gramlist
def awesome_cossim_top(A, B, ntop, lower_bound=0.0):
# force A and B as a CSR matrix.
# If they have already been CSR, there is no overhead
A = A.tocsr()
B = B.tocsr()
M, _ = A.shape
_, N = B.shape
idx_dtype = np.int32
nnz_max = M * ntop
indptr = np.zeros(M + 1, dtype=idx_dtype)
indices = np.zeros(nnz_max, dtype=idx_dtype)
data = np.zeros(nnz_max, dtype=A.dtype)
ct.sparse_dot_topn(
M, N, np.asarray(A.indptr, dtype=idx_dtype),
np.asarray(A.indices, dtype=idx_dtype),
A.data,
np.asarray(B.indptr, dtype=idx_dtype),
np.asarray(B.indices, dtype=idx_dtype),
B.data,
ntop,
lower_bound,
indptr, indices, data)
return csr_matrix((data, indices, indptr), shape=(M, N))
def resolve_org(dirty_name, vectorizer, clean_matrix, companies):
dirty_matrix = vectorizer.transform([dirty_name])
try:
matches = awesome_cossim_top(dirty_matrix, clean_matrix.transpose(), 5, 0.8)
non_zeros = matches.nonzero()
sparsecols = non_zeros[1]
if len(sparsecols) < 1:
# print(f"Nothing detected for {dirty_name}")
return None
else:
candidates = set()
for col in sparsecols:
hit = companies.iloc[col, :]
kvk = hit['kvk_number']
candidates.add(kvk)
return candidates
except Exception as e:
# print(f"Failed to resolve org {dirty_name} with error: {e}")
return None
| [
"numpy.asarray",
"numpy.zeros",
"collections.defaultdict",
"scipy.sparse.csr_matrix",
"pandas.isna",
"re.sub"
] | [((1582, 1599), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1593, 1599), False, 'from collections import defaultdict\n'), ((2041, 2056), 'pandas.isna', 'pd.isna', (['string'], {}), '(string)\n', (2048, 2056), True, 'import pandas as pd\n'), ((2352, 2374), 're.sub', 're.sub', (['rx', '""""""', 'string'], {}), "(rx, '', string)\n", (2358, 2374), False, 'import re\n'), ((2792, 2826), 're.sub', 're.sub', (['"""[,-./]|\\\\sBD"""', '""""""', 'string'], {}), "('[,-./]|\\\\sBD', '', string)\n", (2798, 2826), False, 'import re\n'), ((3240, 3272), 'numpy.zeros', 'np.zeros', (['(M + 1)'], {'dtype': 'idx_dtype'}), '(M + 1, dtype=idx_dtype)\n', (3248, 3272), True, 'import numpy as np\n'), ((3287, 3321), 'numpy.zeros', 'np.zeros', (['nnz_max'], {'dtype': 'idx_dtype'}), '(nnz_max, dtype=idx_dtype)\n', (3295, 3321), True, 'import numpy as np\n'), ((3333, 3365), 'numpy.zeros', 'np.zeros', (['nnz_max'], {'dtype': 'A.dtype'}), '(nnz_max, dtype=A.dtype)\n', (3341, 3365), True, 'import numpy as np\n'), ((3697, 3746), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(data, indices, indptr)'], {'shape': '(M, N)'}), '((data, indices, indptr), shape=(M, N))\n', (3707, 3746), False, 'from scipy.sparse import csr_matrix\n'), ((3405, 3442), 'numpy.asarray', 'np.asarray', (['A.indptr'], {'dtype': 'idx_dtype'}), '(A.indptr, dtype=idx_dtype)\n', (3415, 3442), True, 'import numpy as np\n'), ((3452, 3490), 'numpy.asarray', 'np.asarray', (['A.indices'], {'dtype': 'idx_dtype'}), '(A.indices, dtype=idx_dtype)\n', (3462, 3490), True, 'import numpy as np\n'), ((3516, 3553), 'numpy.asarray', 'np.asarray', (['B.indptr'], {'dtype': 'idx_dtype'}), '(B.indptr, dtype=idx_dtype)\n', (3526, 3553), True, 'import numpy as np\n'), ((3563, 3601), 'numpy.asarray', 'np.asarray', (['B.indices'], {'dtype': 'idx_dtype'}), '(B.indices, dtype=idx_dtype)\n', (3573, 3601), True, 'import numpy as np\n'), ((2624, 2649), 're.sub', 're.sub', (['""" +"""', '""" """', 'string'], {}), "(' +', ' ', string)\n", (2630, 2649), False, 'import re\n')] |
"""
Module for splitting data into train/validation/test sets
"""
import numpy as np
import pandas as pd
def splitting_functions_factory(function_name):
"""Returns splitting function based on name"""
if function_name == "by_time":
return split_by_time
def split_by_time(interactions, fraction_test, random_state=30):
"""
Splits interactions by time. Returns tuple of dataframes: train and test.
"""
np.random.seed(random_state)
test_min_timestamp = np.percentile(
interactions["timestamp"], 100 * (1 - fraction_test)
)
train = interactions[interactions["timestamp"] < test_min_timestamp]
test = interactions[interactions["timestamp"] >= test_min_timestamp]
return train, test
def filtering_restrict_to_train_users(train, test):
"""
Returns test DataFrame restricted to users from train set.
"""
train_users = set(train["user"])
return test[test["user"].isin(train_users)]
def filtering_already_interacted_items(train, test):
"""
Filters out (user, item) pairs from the test set if the given user interacted with a given item in train set.
"""
columns = test.columns
already_interacted_items = train[["user", "item"]].drop_duplicates()
merged = pd.merge(
test, already_interacted_items, on=["user", "item"], how="left", indicator=True
)
test = merged[merged["_merge"] == "left_only"]
return test[columns]
def filtering_restrict_to_unique_user_item_pair(dataframe):
"""
Returns pd.DataFrame where each (user, item) pair appears only once.
A list of corresponding events is stores instead of a single event.
Returned timestamp is the timestamp of the first (user, item) interaction.
"""
return (
dataframe.groupby(["user", "item"])
.agg({"event": list, "timestamp": "min"})
.reset_index()
)
def split(
interactions,
splitting_config=None,
restrict_to_train_users=True,
filter_out_already_interacted_items=True,
restrict_train_to_unique_user_item_pairs=True,
restrict_test_to_unique_user_item_pairs=True,
replace_events_by_ones=True,
):
"""
Main function used for splitting the dataset into the train and test sets.
Parameters
----------
interactions: pd.DataFrame
Interactions dataframe
splitting_config : dict, optional
Dict with name and parameters passed to splitting function.
Currently only name="by_time" supported.
restrict_to_train_users : boolean, optional
Whether to restrict users in the test set only to users from the train set.
filter_out_already_interacted_items : boolean, optional
Whether to filter out (user, item) pairs from the test set if the given user interacted with a given item
in the train set.
restrict_test_to_unique_user_item_pairs
Whether to return only one row per (user, item) pair in test set.
"""
if splitting_config is None:
splitting_config = {
"name": "by_time",
"fraction_test": 0.2,
}
splitting_name = splitting_config["name"]
splitting_config = {k: v for k, v in splitting_config.items() if k != "name"}
train, test = splitting_functions_factory(splitting_name)(
interactions=interactions, **splitting_config
)
if restrict_to_train_users:
test = filtering_restrict_to_train_users(train, test)
if filter_out_already_interacted_items:
test = filtering_already_interacted_items(train, test)
if restrict_train_to_unique_user_item_pairs:
train = filtering_restrict_to_unique_user_item_pair(train)
if restrict_test_to_unique_user_item_pairs:
test = filtering_restrict_to_unique_user_item_pair(test)
if replace_events_by_ones:
train["event"] = 1
test["event"] = 1
return train, test
| [
"numpy.percentile",
"pandas.merge",
"numpy.random.seed"
] | [((438, 466), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (452, 466), True, 'import numpy as np\n'), ((493, 560), 'numpy.percentile', 'np.percentile', (["interactions['timestamp']", '(100 * (1 - fraction_test))'], {}), "(interactions['timestamp'], 100 * (1 - fraction_test))\n", (506, 560), True, 'import numpy as np\n'), ((1262, 1355), 'pandas.merge', 'pd.merge', (['test', 'already_interacted_items'], {'on': "['user', 'item']", 'how': '"""left"""', 'indicator': '(True)'}), "(test, already_interacted_items, on=['user', 'item'], how='left',\n indicator=True)\n", (1270, 1355), True, 'import pandas as pd\n')] |
from flask import url_for, request
from util import json_response
import requests
import urllib
import sys
from datetime import datetime, timedelta
import pandas as pd
from time import sleep
import ccxt
from Trade import get_bitfinex_candle_data,transfer_to_period_data,calcBolling,calcSince,calcEMA
from utility import saveJson
import pytz
import time
import json
from pprint import pprint,pformat
from TradeClient import TradeClient
import configparser
import numpy as np
from echarts_data import get_echarts_html
from Signals import signal_moving_average
config = configparser.ConfigParser()
config.read('config.ini')
tz = pytz.timezone('Asia/Shanghai') #东八区
client = TradeClient(setProxy=True)
def routes(app):
"""app routes"""
@app.route('/')
def index():
"""index page"""
print('index')
r = {}
return json_response(200, r, True)
@app.route("/query")
def query_echarts():
print('query_log')
config.read('config.ini')
rule_type = config['trade']['rule_type']
real_data = config['default']['real_data'] #是否需要实时数据,1:需要,其他:不需要
symbol = config['trade']['symbol'] # 交易品种
forward_num = request.args.get("forward") or ""
backward_num = request.args.get("backward") or ""
begin_time = request.args.get("begin_time") or ""
end_time = request.args.get("end_time") or ""
trade_symbol = request.args.get("trade_symbol") or ""
filename = config['default']['filename']
if (trade_symbol != ""): # 交易品种
trade_symbol = trade_symbol.upper()
print(trade_symbol)
if trade_symbol == 'ETH':
filename = config['default']['filename_eth']
elif trade_symbol == 'BTC':
filename = config['default']['filename_btc']
symbol = trade_symbol + '/USDT'
time_forward = int(config['trade']['time_forward'])
time_interval = config['trade']['time_interval'] # 间隔运行时间,不能低于5min, 实际 15m
since = client.milliseconds() - time_forward
_all_data = pd.read_csv(filename)
_all_data = _all_data.sort_values(by='candle_begin_time', ascending=False)
last_time = _all_data.loc[0, 'candle_begin_time'] # 历史数据文件中,最近的一次时间
all_data = _all_data.copy()
all_data['candle_begin_time'] = pd.to_datetime(all_data['candle_begin_time'])
if real_data == "1": # 需要请求实时数据
df_real = get_bitfinex_candle_data(client.bitfinex1, symbol, time_interval, since=since, limit=1000)
df_real.rename(columns={'candle_begin_time_GMT8':'candle_begin_time'}, inplace = True)
df_real['candle_begin_time'] = pd.to_datetime(df_real['candle_begin_time'])
df_real = df_real.sort_values(by='candle_begin_time', ascending=False)
df_real = df_real[df_real['candle_begin_time'] > last_time]
all_data = all_data.append(df_real, ignore_index=True)
all_data = all_data.sort_values(by='candle_begin_time', ascending=False)
all_data = transfer_to_period_data(all_data, rule_type)
print(all_data.columns.values.tolist())
_forward_num = 0
_backward_num = 0
if (forward_num != ""):
_forward_num = int(forward_num)
if (backward_num != ""):
_backward_num = int(backward_num)
if (begin_time != ""):
all_data = all_data[all_data['candle_begin_time'] >= pd.to_datetime(begin_time)]
if (end_time != ""):
all_data = all_data[all_data['candle_begin_time'] <= pd.to_datetime(end_time)]
all_data.reset_index(inplace=True, drop=True)
df = all_data.copy()
if (forward_num == "" and begin_time == "" and end_time == ""):
_forward_num = 1000
elif _forward_num != 0:
df = df.iloc[-_forward_num:]
if _backward_num != 0:
df = df.iloc[_backward_num:]
df['candle_begin_time'] = df['candle_begin_time'].apply(str)
if 'boll_param' in config:
needBoll = True
n = int(config['boll_param']['n'])
m = float(config['boll_param']['m'])
df = calcBolling(df,n,m)
else:
needBoll = False
n = 0
m = 0
df['upper'] = np.nan
df['lower'] = np.nan
df['median'] = np.nan
if 'ema_param' in config:
needEMA = True
ema_short = int(config['ema_param']['ema_short'])
ema_long = int(config['ema_param']['ema_long'])
df["ema_short"] = calcEMA(df,ema_short)
df["ema_long"] = calcEMA(df,ema_long)
else:
needEMA = False
ema_short = 0
ema_long = 0
df['ema_short'] = np.nan
df['ema_long'] = np.nan
signal = '[],'
# todo
# 参考如下写法,替换自己的交易信号函数
# df = signal_moving_average(df)
_df = df[['candle_begin_time','open','close','low','high']]
_df_boll = df[['upper','lower','median','volume','ema_short','ema_long']]
_df_boll['upper'].fillna(value=0, inplace=True)
_df_boll['lower'].fillna(value=0, inplace=True)
_df_boll['median'].fillna(value=0, inplace=True)
_df_boll['ema_short'].fillna(value=0, inplace=True)
_df_boll['ema_long'].fillna(value=0, inplace=True)
_df_list = np.array(_df).tolist()
_df_boll_list= np.array(_df_boll).transpose().tolist()
str_df_list = pformat(_df_list)
str_df_boll_list = pformat(_df_boll_list)
if 'signal' in df.columns.tolist():
x = list(df[df['signal'].notnull()]['candle_begin_time'])
y = list(df[df['signal'].notnull()]['high'])
z = list(df[df['signal'].notnull()]['signal'])
signal = '['
for i in zip(x,y,z): #rgb(41,60,85)
if i[2] ==1:
temp = "{coord:['"+str(i[0])+"',"+str(i[1]) + "], label:{ normal: { formatter: function (param) { return \"买\";}} } ,itemStyle: {normal: {color: 'rgb(214,18,165)'}}},"
elif i[2] ==-1:
temp = "{coord:['" + str(i[0]) + "'," + str(
i[1]) + "] , label:{ normal: { formatter: function (param) { return \"卖\";}} } ,itemStyle: {normal: {color: 'rgb(0,0,255)'}}},"
else:
temp = "{coord:['" + str(i[0]) + "'," + str(
i[1]) + "], label:{ normal: { formatter: function (param) { return \"平仓\";}} },itemStyle: {normal: {color: 'rgb(224,136,11)'}}},"
signal += temp
signal = signal.rstrip(',')
signal += '],'
_html = get_echarts_html(symbol,str_df_list,str_df_boll_list,signal)
return _html
| [
"pprint.pformat",
"util.json_response",
"Trade.calcBolling",
"flask.request.args.get",
"Trade.calcEMA",
"pandas.read_csv",
"Trade.transfer_to_period_data",
"pandas.to_datetime",
"pytz.timezone",
"numpy.array",
"Trade.get_bitfinex_candle_data",
"echarts_data.get_echarts_html",
"configparser.C... | [((569, 596), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (594, 596), False, 'import configparser\n'), ((628, 658), 'pytz.timezone', 'pytz.timezone', (['"""Asia/Shanghai"""'], {}), "('Asia/Shanghai')\n", (641, 658), False, 'import pytz\n'), ((673, 699), 'TradeClient.TradeClient', 'TradeClient', ([], {'setProxy': '(True)'}), '(setProxy=True)\n', (684, 699), False, 'from TradeClient import TradeClient\n'), ((855, 882), 'util.json_response', 'json_response', (['(200)', 'r', '(True)'], {}), '(200, r, True)\n', (868, 882), False, 'from util import json_response\n'), ((2134, 2155), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (2145, 2155), True, 'import pandas as pd\n'), ((2392, 2437), 'pandas.to_datetime', 'pd.to_datetime', (["all_data['candle_begin_time']"], {}), "(all_data['candle_begin_time'])\n", (2406, 2437), True, 'import pandas as pd\n'), ((3114, 3158), 'Trade.transfer_to_period_data', 'transfer_to_period_data', (['all_data', 'rule_type'], {}), '(all_data, rule_type)\n', (3137, 3158), False, 'from Trade import get_bitfinex_candle_data, transfer_to_period_data, calcBolling, calcSince, calcEMA\n'), ((5658, 5675), 'pprint.pformat', 'pformat', (['_df_list'], {}), '(_df_list)\n', (5665, 5675), False, 'from pprint import pprint, pformat\n'), ((5703, 5725), 'pprint.pformat', 'pformat', (['_df_boll_list'], {}), '(_df_boll_list)\n', (5710, 5725), False, 'from pprint import pprint, pformat\n'), ((6854, 6917), 'echarts_data.get_echarts_html', 'get_echarts_html', (['symbol', 'str_df_list', 'str_df_boll_list', 'signal'], {}), '(symbol, str_df_list, str_df_boll_list, signal)\n', (6870, 6917), False, 'from echarts_data import get_echarts_html\n'), ((1226, 1253), 'flask.request.args.get', 'request.args.get', (['"""forward"""'], {}), "('forward')\n", (1242, 1253), False, 'from flask import url_for, request\n'), ((1283, 1311), 'flask.request.args.get', 'request.args.get', (['"""backward"""'], {}), "('backward')\n", (1299, 1311), False, 'from flask import url_for, request\n'), ((1339, 1369), 'flask.request.args.get', 'request.args.get', (['"""begin_time"""'], {}), "('begin_time')\n", (1355, 1369), False, 'from flask import url_for, request\n'), ((1395, 1423), 'flask.request.args.get', 'request.args.get', (['"""end_time"""'], {}), "('end_time')\n", (1411, 1423), False, 'from flask import url_for, request\n'), ((1456, 1488), 'flask.request.args.get', 'request.args.get', (['"""trade_symbol"""'], {}), "('trade_symbol')\n", (1472, 1488), False, 'from flask import url_for, request\n'), ((2505, 2600), 'Trade.get_bitfinex_candle_data', 'get_bitfinex_candle_data', (['client.bitfinex1', 'symbol', 'time_interval'], {'since': 'since', 'limit': '(1000)'}), '(client.bitfinex1, symbol, time_interval, since=\n since, limit=1000)\n', (2529, 2600), False, 'from Trade import get_bitfinex_candle_data, transfer_to_period_data, calcBolling, calcSince, calcEMA\n'), ((2738, 2782), 'pandas.to_datetime', 'pd.to_datetime', (["df_real['candle_begin_time']"], {}), "(df_real['candle_begin_time'])\n", (2752, 2782), True, 'import pandas as pd\n'), ((4283, 4304), 'Trade.calcBolling', 'calcBolling', (['df', 'n', 'm'], {}), '(df, n, m)\n', (4294, 4304), False, 'from Trade import get_bitfinex_candle_data, transfer_to_period_data, calcBolling, calcSince, calcEMA\n'), ((4714, 4736), 'Trade.calcEMA', 'calcEMA', (['df', 'ema_short'], {}), '(df, ema_short)\n', (4721, 4736), False, 'from Trade import get_bitfinex_candle_data, transfer_to_period_data, calcBolling, calcSince, calcEMA\n'), ((4765, 4786), 'Trade.calcEMA', 'calcEMA', (['df', 'ema_long'], {}), '(df, ema_long)\n', (4772, 4786), False, 'from Trade import get_bitfinex_candle_data, transfer_to_period_data, calcBolling, calcSince, calcEMA\n'), ((5550, 5563), 'numpy.array', 'np.array', (['_df'], {}), '(_df)\n', (5558, 5563), True, 'import numpy as np\n'), ((3523, 3549), 'pandas.to_datetime', 'pd.to_datetime', (['begin_time'], {}), '(begin_time)\n', (3537, 3549), True, 'import pandas as pd\n'), ((3653, 3677), 'pandas.to_datetime', 'pd.to_datetime', (['end_time'], {}), '(end_time)\n', (3667, 3677), True, 'import pandas as pd\n'), ((5596, 5614), 'numpy.array', 'np.array', (['_df_boll'], {}), '(_df_boll)\n', (5604, 5614), True, 'import numpy as np\n')] |
import collections
import logging
import numpy as np
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import tensorflow as tf
from google.protobuf import text_format
import string_int_label_map_pb2
from PIL import Image
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if np.any(np.logical_and(mask != 1, mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_keypoints_on_image(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color, fill=color)
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False):
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100*scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100*scores[i]))
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = STANDARD_COLORS[
classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image,
box_to_instance_masks_map[box],
color=color
)
if instance_boundaries is not None:
draw_mask_on_image_array(
image,
box_to_instance_boundaries_map[box],
color='red',
alpha=1.0
)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates)
return image
def _validate_label_map(label_map):
for item in label_map.item:
if item.id < 0:
raise ValueError('Label map ids should be >= 0.')
if (item.id == 0 and item.name != 'background' and
item.display_name != 'background'):
raise ValueError('Label map id 0 is reserved for the background label')
def create_category_index(categories):
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def get_max_label_map_index(label_map):
return max([item.id for item in label_map.item])
def convert_label_map_to_categories(label_map,
max_num_classes,
use_display_name=True):
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info(
'Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
if use_display_name and item.HasField('display_name'):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
categories.append({'id': item.id, 'name': name})
return categories
def load_labelmap(path):
with tf.io.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
_validate_label_map(label_map)
return label_map
def load_labelmap(path):
with tf.io.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
_validate_label_map(label_map)
return label_map
def get_label_map_dict(label_map_path,
use_display_name=False,
fill_in_gaps_and_background=False):
label_map = load_labelmap(label_map_path)
label_map_dict = {}
for item in label_map.item:
if use_display_name:
label_map_dict[item.display_name] = item.id
else:
label_map_dict[item.name] = item.id
if fill_in_gaps_and_background:
values = set(label_map_dict.values())
if 0 not in values:
label_map_dict['background'] = 0
if not all(isinstance(value, int) for value in values):
raise ValueError('The values in label map must be integers in order to'
'fill_in_gaps_and_background.')
if not all(value >= 0 for value in values):
raise ValueError('The values in the label map must be positive.')
if len(values) != max(values) + 1:
# there are gaps in the labels, fill in gaps.
for value in range(1, max(values)):
if value not in values:
label_map_dict['class_' + str(value)] = value
return label_map_dict
def create_categories_from_labelmap(label_map_path, use_display_name=True):
label_map = load_labelmap(label_map_path)
max_num_classes = max(item.id for item in label_map.item)
return convert_label_map_to_categories(label_map, max_num_classes,
use_display_name)
def create_category_index_from_labelmap(label_map_path, use_display_name=True):
categories = create_categories_from_labelmap(label_map_path, use_display_name)
return create_category_index(categories) | [
"numpy.uint8",
"numpy.ones_like",
"numpy.ceil",
"numpy.logical_and",
"PIL.ImageFont.load_default",
"PIL.Image.composite",
"PIL.ImageColor.getrgb",
"collections.defaultdict",
"PIL.ImageFont.truetype",
"string_int_label_map_pb2.StringIntLabelMap",
"logging.info",
"numpy.array",
"google.protobu... | [((2789, 2813), 'PIL.ImageColor.getrgb', 'ImageColor.getrgb', (['color'], {}), '(color)\n', (2806, 2813), True, 'import PIL.ImageColor as ImageColor\n'), ((2829, 2851), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (2844, 2851), False, 'from PIL import Image\n'), ((3119, 3172), 'PIL.Image.composite', 'Image.composite', (['pil_solid_color', 'pil_image', 'pil_mask'], {}), '(pil_solid_color, pil_image, pil_mask)\n', (3134, 3172), False, 'from PIL import Image\n'), ((4392, 4413), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (4406, 4413), True, 'import PIL.ImageDraw as ImageDraw\n'), ((6259, 6280), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (6273, 6280), True, 'import PIL.ImageDraw as ImageDraw\n'), ((7865, 7894), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (7888, 7894), False, 'import collections\n'), ((7917, 7945), 'collections.defaultdict', 'collections.defaultdict', (['str'], {}), '(str)\n', (7940, 7945), False, 'import collections\n'), ((8045, 8074), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (8068, 8074), False, 'import collections\n'), ((2494, 2530), 'numpy.logical_and', 'np.logical_and', (['(mask != 1)', '(mask != 0)'], {}), '(mask != 1, mask != 0)\n', (2508, 2530), True, 'import numpy as np\n'), ((3957, 3976), 'numpy.array', 'np.array', (['image_pil'], {}), '(image_pil)\n', (3965, 3976), True, 'import numpy as np\n'), ((4843, 4878), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""arial.ttf"""', '(24)'], {}), "('arial.ttf', 24)\n", (4861, 4878), True, 'import PIL.ImageFont as ImageFont\n'), ((5617, 5644), 'numpy.ceil', 'np.ceil', (['(0.05 * text_height)'], {}), '(0.05 * text_height)\n', (5624, 5644), True, 'import numpy as np\n'), ((7252, 7271), 'numpy.array', 'np.array', (['image_pil'], {}), '(image_pil)\n', (7260, 7271), True, 'import numpy as np\n'), ((12111, 12139), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['path', '"""r"""'], {}), "(path, 'r')\n", (12128, 12139), True, 'import tensorflow as tf\n'), ((12200, 12244), 'string_int_label_map_pb2.StringIntLabelMap', 'string_int_label_map_pb2.StringIntLabelMap', ([], {}), '()\n', (12242, 12244), False, 'import string_int_label_map_pb2\n'), ((12486, 12514), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['path', '"""r"""'], {}), "(path, 'r')\n", (12503, 12514), True, 'import tensorflow as tf\n'), ((12575, 12619), 'string_int_label_map_pb2.StringIntLabelMap', 'string_int_label_map_pb2.StringIntLabelMap', ([], {}), '()\n', (12617, 12619), False, 'import string_int_label_map_pb2\n'), ((2894, 2912), 'numpy.ones_like', 'np.ones_like', (['mask'], {}), '(mask)\n', (2906, 2912), True, 'import numpy as np\n'), ((4910, 4934), 'PIL.ImageFont.load_default', 'ImageFont.load_default', ([], {}), '()\n', (4932, 4934), True, 'import PIL.ImageFont as ImageFont\n'), ((11641, 11733), 'logging.info', 'logging.info', (['"""Ignore item %d since it falls outside of requested label range."""', 'item.id'], {}), "('Ignore item %d since it falls outside of requested label range.',\n item.id)\n", (11653, 11733), False, 'import logging\n'), ((12262, 12308), 'google.protobuf.text_format.Merge', 'text_format.Merge', (['label_map_string', 'label_map'], {}), '(label_map_string, label_map)\n', (12279, 12308), False, 'from google.protobuf import text_format\n'), ((12637, 12683), 'google.protobuf.text_format.Merge', 'text_format.Merge', (['label_map_string', 'label_map'], {}), '(label_map_string, label_map)\n', (12654, 12683), False, 'from google.protobuf import text_format\n'), ((2994, 3015), 'numpy.uint8', 'np.uint8', (['solid_color'], {}), '(solid_color)\n', (3002, 3015), True, 'import numpy as np\n'), ((3063, 3093), 'numpy.uint8', 'np.uint8', (['(255.0 * alpha * mask)'], {}), '(255.0 * alpha * mask)\n', (3071, 3093), True, 'import numpy as np\n'), ((3716, 3731), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (3724, 3731), True, 'import numpy as np\n'), ((7081, 7096), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (7089, 7096), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 2 20:46:53 2020
@author: Daniel
"""
import pandas as pd
import scanpy as sc
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import statsmodels.api as sm
#from pyglmnet import GLM, simulate_glm
from sklearn import linear_model, model_selection
plt.rcParams['svg.fonttype'] = 'none'
sns.set(context='paper',
style='whitegrid',
palette='colorblind',
font='Arial',
font_scale=2,
color_codes=True)
# Load count and alignment data and merge them into one annotated dataframe
adata = sc.read_h5ad(r"E:\Dropbox\Dropbox\01_SST Project_daniel\033_PatchSeq CA3 SO\transcriptomics\Patch-Seq\count_exons_introns_full_named_postqc.h5ad")
full_df = pd.read_csv(r"C:\Users\Daniel\repos\PatchSeq\full_df.csv", index_col=0)
ephys_df = pd.read_csv(r"C:\Users\Daniel\repos\PatchSeq\ephys_df.csv", index_col=0)
adata.var_names_make_unique()
adata.obs_names_make_unique()
adata.obs = pd.concat([adata.obs, full_df], axis=1, sort=False, join='inner')
adata.obs = adata.obs.loc[:, ~adata.obs.columns.duplicated()]
adata = adata[adata.obs_names[adata.obs['ephys'] == 1],:]
sc.pp.log1p(adata, base=2, copy=False)
vg_channels = pd.read_csv(r"E:\Dropbox\Dropbox\01_SST Project_daniel\033_PatchSeq CA3 SO\transcriptomics\Patch-Seq\voltage_gated_ion_channels_list.txt",delimiter='\t')
vg_channels_names = [x.title() for x in vg_channels['Approved symbol']]
# Find genes that still exist in adata after quality control
vg_channels_names_exist = list(filter(lambda x: x in adata.var_names, vg_channels_names))
adata_vgcs = adata[:,vg_channels_names_exist]
np.random.seed(354536)
random_names = np.random.choice(adata.var_names, adata_vgcs.shape[1])
adata_randos = adata[:,random_names]
col_names = [
"Max. Freq. (Hz)",
"Slow AHP (mV)",
"Rheobase (pA)",
"I at Max. Freq. (pA)",
"Adaptation ratio",
"Avg Spike Time (s)",
"Input R (MOhm)",
"Capacitance (pF)",
"Sag Amplitude (mV)",
"Resting (mV)",
"RS AHP Amp. (mV)",
"RS Max. Slope (mV/ms)",
"RS Min. Slope (mV/ms)",
"RS Peak (mV)",
"RS Half Width (ms)",
"RS Threshold (mV)",
"FS AHP Amp. (mV)",
"FS Max. Slope (mV/ms)",
"FS Min. Slope (mV/ms)",
"FS Peak (mV)",
"FS Half Width (ms)",
"FS Threshold (mV)",
"LS AHP Amp. (mV)",
"LS Max. Slope (mV/ms)",
"LS Min. Slope (mV/ms)",
"LS Peak (mV)",
"LS Half Width (ms)",
"LS Threshold (mV)"]
model = sm.families.Poisson()
parameter_name = "Max. Freq. (Hz)"
features = sm.add_constant(adata_vgcs.to_df())
classes = adata_vgcs.obs[parameter_name]
features_rnd = sm.add_constant(adata_randos.to_df())
# GLM to predict
poisson_model = sm.GLM(classes, features, family=model)
poisson_results = poisson_model.fit()
prediction = poisson_results.predict(features)
fig, ax = plt.subplots(1)
ax.scatter(classes, prediction)
ax.plot([0, 300], [0, 300])
ax.set_title("Trained on VGCS")
ax.set_xlabel("Actual Max Freq.")
ax.set_ylabel("Predicted Max Freq.")
poisson_model_rnd = sm.GLM(classes, features_rnd, family=model)
poisson_results_rnd = poisson_model_rnd.fit()
prediction_rnd = poisson_results_rnd.predict(features_rnd)
fig, ax = plt.subplots(1)
ax.scatter(classes, prediction_rnd)
ax.plot([0, 300], [0, 300])
ax.set_title("Trained on 95 Random Genes")
ax.set_xlabel("Actual Max Freq.")
ax.set_ylabel("Predicted Max Freq.")
# Use GLM only on n top genes
pvalues = poisson_results.pvalues.drop('const')
n_top_genes = 50
top_gene_names = pvalues[poisson_results.pvalues.argsort()[:n_top_genes]].index
adata_top_genes = adata[:,top_gene_names]
features = sm.add_constant(adata_top_genes.to_df())
classes = adata_top_genes.obs[parameter_name]
#features_rnd = sm.add_constant(adata_.to_df())
# GLM to predict
poisson_model = sm.GLM(classes, features, family=model)
poisson_results = poisson_model.fit()
prediction = poisson_results.predict(features)
fig, ax = plt.subplots(1)
ax.scatter(classes, prediction)
ax.plot([0, 300], [0, 300])
ax.set_title("Trained on top genes")
ax.set_xlabel("Actual Max Freq.")
ax.set_ylabel("Predicted Max Freq.")
# Run the GLM again for random genes, same number as top genes
random_names = np.random.choice(adata.var_names, n_top_genes)
adata_randos = adata[:,random_names]
features_rnd = sm.add_constant(adata_randos.to_df())
poisson_model_rnd = sm.GLM(classes, features_rnd, model)
poisson_results_rnd = poisson_model_rnd.fit()
prediction_rnd = poisson_results_rnd.predict(features_rnd)
fig, ax = plt.subplots(1)
ax.scatter(classes, prediction_rnd)
ax.plot([0, 300], [0, 300])
ax.set_title("Trained on 50 Random Genes")
ax.set_xlabel("Actual Max Freq.")
ax.set_ylabel("Predicted Max Freq.")
"""
# GLM model on random genes as control
poisson_model_rnd = sm.GLM(y_train_rnd, X_train_rnd, family=sm.families.Poisson())
poisson_results_rnd = poisson_model_rnd.fit()
poisson_train_rnd_prediction = poisson_results_rnd.predict(X_train_rnd)
plt.figure()
plt.scatter(y_train_rnd, poisson_train_rnd_prediction)
poisson_test_rnd_prediction = poisson_results.predict(X_test_rnd)
plt.figure()
plt.scatter(y_test_rnd, poisson_test_rnd_prediction)
"""
#X_train, X_test, y_train, y_test = model_selection.train_test_split(features_vgcs, classes, test_size=1)
"""
# GLM to classify colocalizing cells
features = sm.add_constant(adata_vgcs.to_df())
classes = adata_vgcs.obs['Max. Freq. (Hz)']
poisson_model = sm.GLM(classes, features, family=sm.families.Gaussian())
poisson_results = poisson_model.fit()
print(poisson_results.summary())
poisson_out_df = pd.DataFrame({"classes": classes, "prediction": poisson_results.predict()})
fig, ax = plt.subplots(1)
sns.scatterplot(x="classes", y ="prediction", data=poisson_out_df)
# GLM to classify colocalizing cells
features = sm.add_constant(adata_randos.to_df())
classes = adata_randos.obs['Max. Freq. (Hz)']
poisson_model = sm.GLM(classes, features, family=sm.families.Gaussian())
poisson_results = poisson_model.fit()
print(poisson_results.summary())
poisson_out_df = pd.DataFrame({"classes": classes, "prediction": poisson_results.predict()})
fig, ax = plt.subplots(1)
sns.scatterplot(x="classes", y ="prediction", data=poisson_out_df)
""" | [
"statsmodels.api.GLM",
"numpy.random.seed",
"pandas.read_csv",
"scanpy.read_h5ad",
"matplotlib.pyplot.subplots",
"numpy.random.choice",
"statsmodels.api.families.Poisson",
"scanpy.pp.log1p",
"seaborn.set",
"pandas.concat"
] | [((357, 473), 'seaborn.set', 'sns.set', ([], {'context': '"""paper"""', 'style': '"""whitegrid"""', 'palette': '"""colorblind"""', 'font': '"""Arial"""', 'font_scale': '(2)', 'color_codes': '(True)'}), "(context='paper', style='whitegrid', palette='colorblind', font=\n 'Arial', font_scale=2, color_codes=True)\n", (364, 473), True, 'import seaborn as sns\n'), ((594, 756), 'scanpy.read_h5ad', 'sc.read_h5ad', (['"""E:\\\\Dropbox\\\\Dropbox\\\\01_SST Project_daniel\\\\033_PatchSeq CA3 SO\\\\transcriptomics\\\\Patch-Seq\\\\count_exons_introns_full_named_postqc.h5ad"""'], {}), "(\n 'E:\\\\Dropbox\\\\Dropbox\\\\01_SST Project_daniel\\\\033_PatchSeq CA3 SO\\\\transcriptomics\\\\Patch-Seq\\\\count_exons_introns_full_named_postqc.h5ad'\n )\n", (606, 756), True, 'import scanpy as sc\n'), ((751, 826), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\Daniel\\\\repos\\\\PatchSeq\\\\full_df.csv"""'], {'index_col': '(0)'}), "('C:\\\\Users\\\\Daniel\\\\repos\\\\PatchSeq\\\\full_df.csv', index_col=0)\n", (762, 826), True, 'import pandas as pd\n'), ((834, 910), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\Daniel\\\\repos\\\\PatchSeq\\\\ephys_df.csv"""'], {'index_col': '(0)'}), "('C:\\\\Users\\\\Daniel\\\\repos\\\\PatchSeq\\\\ephys_df.csv', index_col=0)\n", (845, 910), True, 'import pandas as pd\n'), ((980, 1045), 'pandas.concat', 'pd.concat', (['[adata.obs, full_df]'], {'axis': '(1)', 'sort': '(False)', 'join': '"""inner"""'}), "([adata.obs, full_df], axis=1, sort=False, join='inner')\n", (989, 1045), True, 'import pandas as pd\n'), ((1168, 1206), 'scanpy.pp.log1p', 'sc.pp.log1p', (['adata'], {'base': '(2)', 'copy': '(False)'}), '(adata, base=2, copy=False)\n', (1179, 1206), True, 'import scanpy as sc\n'), ((1222, 1392), 'pandas.read_csv', 'pd.read_csv', (['"""E:\\\\Dropbox\\\\Dropbox\\\\01_SST Project_daniel\\\\033_PatchSeq CA3 SO\\\\transcriptomics\\\\Patch-Seq\\\\voltage_gated_ion_channels_list.txt"""'], {'delimiter': '"""\t"""'}), "(\n 'E:\\\\Dropbox\\\\Dropbox\\\\01_SST Project_daniel\\\\033_PatchSeq CA3 SO\\\\transcriptomics\\\\Patch-Seq\\\\voltage_gated_ion_channels_list.txt'\n , delimiter='\\t')\n", (1233, 1392), True, 'import pandas as pd\n'), ((1648, 1670), 'numpy.random.seed', 'np.random.seed', (['(354536)'], {}), '(354536)\n', (1662, 1670), True, 'import numpy as np\n'), ((1686, 1740), 'numpy.random.choice', 'np.random.choice', (['adata.var_names', 'adata_vgcs.shape[1]'], {}), '(adata.var_names, adata_vgcs.shape[1])\n', (1702, 1740), True, 'import numpy as np\n'), ((2497, 2518), 'statsmodels.api.families.Poisson', 'sm.families.Poisson', ([], {}), '()\n', (2516, 2518), True, 'import statsmodels.api as sm\n'), ((2731, 2770), 'statsmodels.api.GLM', 'sm.GLM', (['classes', 'features'], {'family': 'model'}), '(classes, features, family=model)\n', (2737, 2770), True, 'import statsmodels.api as sm\n'), ((2868, 2883), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (2880, 2883), True, 'import matplotlib.pyplot as plt\n'), ((3068, 3111), 'statsmodels.api.GLM', 'sm.GLM', (['classes', 'features_rnd'], {'family': 'model'}), '(classes, features_rnd, family=model)\n', (3074, 3111), True, 'import statsmodels.api as sm\n'), ((3229, 3244), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (3241, 3244), True, 'import matplotlib.pyplot as plt\n'), ((3825, 3864), 'statsmodels.api.GLM', 'sm.GLM', (['classes', 'features'], {'family': 'model'}), '(classes, features, family=model)\n', (3831, 3864), True, 'import statsmodels.api as sm\n'), ((3962, 3977), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (3974, 3977), True, 'import matplotlib.pyplot as plt\n'), ((4225, 4271), 'numpy.random.choice', 'np.random.choice', (['adata.var_names', 'n_top_genes'], {}), '(adata.var_names, n_top_genes)\n', (4241, 4271), True, 'import numpy as np\n'), ((4384, 4420), 'statsmodels.api.GLM', 'sm.GLM', (['classes', 'features_rnd', 'model'], {}), '(classes, features_rnd, model)\n', (4390, 4420), True, 'import statsmodels.api as sm\n'), ((4537, 4552), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (4549, 4552), True, 'import matplotlib.pyplot as plt\n')] |
import torch
import torch.nn.functional as F
import cv2 as cv
import numpy as np
import os
from glob import glob
from icecream import ic
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import Slerp
import imageio
import pdb
# This function is borrowed from IDR: https://github.com/lioryariv/idr
def load_K_Rt_from_P(filename, P=None):
if P is None:
lines = open(filename).read().splitlines()
if len(lines) == 4:
lines = lines[1:]
lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)]
P = np.asarray(lines).astype(np.float32).squeeze()
out = cv.decomposeProjectionMatrix(P)
K = out[0]
R = out[1]
t = out[2]
K = K / K[2, 2]
intrinsics = np.eye(4)
intrinsics[:3, :3] = K
pose = np.eye(4, dtype=np.float32)
pose[:3, :3] = R.transpose()
pose[:3, 3] = (t[:3] / t[3])[:, 0]
return intrinsics, pose
def load_dtu_data(basedir, normalize=True, reso_level=2, mask=True, white_bg=True):
rgb_paths = sorted(glob(os.path.join(basedir, 'image', '*png')))
mask_paths = sorted(glob(os.path.join(basedir, 'mask', '*png')))
render_cameras_name = 'cameras_sphere.npz' if normalize else 'cameras_large.npz'
camera_dict = np.load(os.path.join(basedir, render_cameras_name))
world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(len(rgb_paths))]
if normalize:
scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(len(rgb_paths))]
else:
scale_mats_np = None
all_intrinsics = []
all_poses = []
all_imgs = []
all_masks = []
cv2gl = np.array([[1,0,0,0], [0,-1,0,0], [0,0,-1,0], [0,0,0,1]], dtype=np.float32)
for i, (world_mat, im_name) in enumerate(zip(world_mats_np, rgb_paths)):
if normalize:
P = world_mat @ scale_mats_np[i]
else:
P = world_mat
P = P[:3, :4]
intrinsics, pose = load_K_Rt_from_P(None, P) # pose: w2c
# pose = np.linalg.inv(pose) # convert to c2w
# pose = pose @ cv2gl # convert from opencv to opengl
all_intrinsics.append(intrinsics)
all_poses.append(pose)
if len(mask_paths) > 0:
all_masks.append((imageio.imread(mask_paths[i]) / 255.).astype(np.float32)[...,:3])
all_imgs.append((imageio.imread(im_name) / 255.).astype(np.float32))
# import ipdb; ipdb.set_trace()
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
H, W = imgs[0].shape[:2]
K = all_intrinsics[0]
focal = all_intrinsics[0][0,0]
print("Date original shape: ", H, W)
if mask:
assert len(mask_paths) > 0
bg = 1. if white_bg else 0.
masks = np.stack(all_masks, 0)
imgs = imgs * masks + bg * (1 - masks)
if reso_level > 1:
H, W = int(H / reso_level), int(W / reso_level)
imgs = F.interpolate(torch.from_numpy(imgs).permute(0,3,1,2), size=(H, W)).permute(0,2,3,1).numpy()
K[:2] /= reso_level
focal /= reso_level
# i_split = [np.arange(len(imgs)), np.arange(len(imgs))[::10], np.arange(len(imgs))[::10]]
i_test = [8, 13, 16, 21, 26, 31, 34]
i_val = i_test
i_train = list(set(np.arange(len(imgs))) - set(i_test))
i_split = [np.array(i_train), np.array(i_val), np.array(i_test)]
render_poses = poses[i_split[-1]]
near, far = inward_nearfar_heuristic(poses[i_split[0], :3, 3])
return imgs, poses, render_poses, [H, W, focal], K, i_split, near, far, scale_mats_np[0]
def inward_nearfar_heuristic(cam_o, ratio=0.05):
near = 1
far = 5
return near, far
class Dataset:
def __init__(self, conf):
super(Dataset, self).__init__()
print('Load data: Begin')
self.device = torch.device('cuda')
self.conf = conf
self.data_dir = conf.get_string('data_dir')
self.render_cameras_name = conf.get_string('render_cameras_name')
self.object_cameras_name = conf.get_string('object_cameras_name')
self.camera_outside_sphere = conf.get_bool('camera_outside_sphere', default=True)
self.scale_mat_scale = conf.get_float('scale_mat_scale', default=1.1)
camera_dict = np.load(os.path.join(self.data_dir, self.render_cameras_name))
self.camera_dict = camera_dict
self.images_lis = sorted(glob(os.path.join(self.data_dir, 'image/*.png')))
self.n_images = len(self.images_lis)
self.images_np = np.stack([cv.imread(im_name) for im_name in self.images_lis]) / 256.0
self.masks_lis = sorted(glob(os.path.join(self.data_dir, 'mask/*.png')))
self.masks_np = np.stack([cv.imread(im_name) for im_name in self.masks_lis]) / 256.0
# world_mat is a projection matrix from world to image
self.world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]
self.scale_mats_np = []
# scale_mat: used for coordinate normalization, we assume the scene to render is inside a unit sphere at origin.
self.scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]
self.intrinsics_all = []
self.pose_all = []
for scale_mat, world_mat in zip(self.scale_mats_np, self.world_mats_np):
P = world_mat @ scale_mat
P = P[:3, :4]
intrinsics, pose = load_K_Rt_from_P(None, P)
self.intrinsics_all.append(torch.from_numpy(intrinsics).float())
self.pose_all.append(torch.from_numpy(pose).float())
self.images = torch.from_numpy(self.images_np.astype(np.float32)).cpu() # [n_images, H, W, 3]
self.masks = torch.from_numpy(self.masks_np.astype(np.float32)).cpu() # [n_images, H, W, 3]
self.intrinsics_all = torch.stack(self.intrinsics_all).to(self.device) # [n_images, 4, 4]
self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4]
self.focal = self.intrinsics_all[0][0, 0]
self.pose_all = torch.stack(self.pose_all).to(self.device) # [n_images, 4, 4]
self.H, self.W = self.images.shape[1], self.images.shape[2]
self.image_pixels = self.H * self.W
object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0])
object_bbox_max = np.array([ 1.01, 1.01, 1.01, 1.0])
# Object scale mat: region of interest to **extract mesh**
object_scale_mat = np.load(os.path.join(self.data_dir, self.object_cameras_name))['scale_mat_0']
object_bbox_min = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_min[:, None]
object_bbox_max = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_max[:, None]
self.object_bbox_min = object_bbox_min[:3, 0]
self.object_bbox_max = object_bbox_max[:3, 0]
print('Load data: End')
def near_far_from_sphere(self, rays_o, rays_d):
a = torch.sum(rays_d**2, dim=-1, keepdim=True)
b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)
mid = 0.5 * (-b) / a
near = mid - 1.0
far = mid + 1.0
return near, far
def image_at(self, idx, resolution_level):
img = cv.imread(self.images_lis[idx])
return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255) | [
"numpy.stack",
"torch.stack",
"torch.sum",
"numpy.asarray",
"imageio.imread",
"cv2.imread",
"numpy.array",
"numpy.linalg.inv",
"torch.device",
"numpy.eye",
"torch.inverse",
"os.path.join",
"cv2.decomposeProjectionMatrix",
"cv2.resize",
"torch.from_numpy"
] | [((648, 679), 'cv2.decomposeProjectionMatrix', 'cv.decomposeProjectionMatrix', (['P'], {}), '(P)\n', (676, 679), True, 'import cv2 as cv\n'), ((763, 772), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (769, 772), True, 'import numpy as np\n'), ((812, 839), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'np.float32'}), '(4, dtype=np.float32)\n', (818, 839), True, 'import numpy as np\n'), ((1688, 1779), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]], dtype=\n np.float32)\n', (1696, 1779), True, 'import numpy as np\n'), ((2480, 2501), 'numpy.stack', 'np.stack', (['all_imgs', '(0)'], {}), '(all_imgs, 0)\n', (2488, 2501), True, 'import numpy as np\n'), ((2514, 2536), 'numpy.stack', 'np.stack', (['all_poses', '(0)'], {}), '(all_poses, 0)\n', (2522, 2536), True, 'import numpy as np\n'), ((1275, 1317), 'os.path.join', 'os.path.join', (['basedir', 'render_cameras_name'], {}), '(basedir, render_cameras_name)\n', (1287, 1317), False, 'import os\n'), ((2768, 2790), 'numpy.stack', 'np.stack', (['all_masks', '(0)'], {}), '(all_masks, 0)\n', (2776, 2790), True, 'import numpy as np\n'), ((3314, 3331), 'numpy.array', 'np.array', (['i_train'], {}), '(i_train)\n', (3322, 3331), True, 'import numpy as np\n'), ((3333, 3348), 'numpy.array', 'np.array', (['i_val'], {}), '(i_val)\n', (3341, 3348), True, 'import numpy as np\n'), ((3350, 3366), 'numpy.array', 'np.array', (['i_test'], {}), '(i_test)\n', (3358, 3366), True, 'import numpy as np\n'), ((3805, 3825), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3817, 3825), False, 'import torch\n'), ((5941, 5975), 'torch.inverse', 'torch.inverse', (['self.intrinsics_all'], {}), '(self.intrinsics_all)\n', (5954, 5975), False, 'import torch\n'), ((6272, 6308), 'numpy.array', 'np.array', (['[-1.01, -1.01, -1.01, 1.0]'], {}), '([-1.01, -1.01, -1.01, 1.0])\n', (6280, 6308), True, 'import numpy as np\n'), ((6335, 6368), 'numpy.array', 'np.array', (['[1.01, 1.01, 1.01, 1.0]'], {}), '([1.01, 1.01, 1.01, 1.0])\n', (6343, 6368), True, 'import numpy as np\n'), ((6968, 7012), 'torch.sum', 'torch.sum', (['(rays_d ** 2)'], {'dim': '(-1)', 'keepdim': '(True)'}), '(rays_d ** 2, dim=-1, keepdim=True)\n', (6977, 7012), False, 'import torch\n'), ((7243, 7274), 'cv2.imread', 'cv.imread', (['self.images_lis[idx]'], {}), '(self.images_lis[idx])\n', (7252, 7274), True, 'import cv2 as cv\n'), ((1054, 1092), 'os.path.join', 'os.path.join', (['basedir', '"""image"""', '"""*png"""'], {}), "(basedir, 'image', '*png')\n", (1066, 1092), False, 'import os\n'), ((1124, 1161), 'os.path.join', 'os.path.join', (['basedir', '"""mask"""', '"""*png"""'], {}), "(basedir, 'mask', '*png')\n", (1136, 1161), False, 'import os\n'), ((4252, 4305), 'os.path.join', 'os.path.join', (['self.data_dir', 'self.render_cameras_name'], {}), '(self.data_dir, self.render_cameras_name)\n', (4264, 4305), False, 'import os\n'), ((7029, 7077), 'torch.sum', 'torch.sum', (['(rays_o * rays_d)'], {'dim': '(-1)', 'keepdim': '(True)'}), '(rays_o * rays_d, dim=-1, keepdim=True)\n', (7038, 7077), False, 'import torch\n'), ((4384, 4426), 'os.path.join', 'os.path.join', (['self.data_dir', '"""image/*.png"""'], {}), "(self.data_dir, 'image/*.png')\n", (4396, 4426), False, 'import os\n'), ((4606, 4647), 'os.path.join', 'os.path.join', (['self.data_dir', '"""mask/*.png"""'], {}), "(self.data_dir, 'mask/*.png')\n", (4618, 4647), False, 'import os\n'), ((5837, 5869), 'torch.stack', 'torch.stack', (['self.intrinsics_all'], {}), '(self.intrinsics_all)\n', (5848, 5869), False, 'import torch\n'), ((6070, 6096), 'torch.stack', 'torch.stack', (['self.pose_all'], {}), '(self.pose_all)\n', (6081, 6096), False, 'import torch\n'), ((6474, 6527), 'os.path.join', 'os.path.join', (['self.data_dir', 'self.object_cameras_name'], {}), '(self.data_dir, self.object_cameras_name)\n', (6486, 6527), False, 'import os\n'), ((6570, 6606), 'numpy.linalg.inv', 'np.linalg.inv', (['self.scale_mats_np[0]'], {}), '(self.scale_mats_np[0])\n', (6583, 6606), True, 'import numpy as np\n'), ((6679, 6715), 'numpy.linalg.inv', 'np.linalg.inv', (['self.scale_mats_np[0]'], {}), '(self.scale_mats_np[0])\n', (6692, 6715), True, 'import numpy as np\n'), ((7291, 7363), 'cv2.resize', 'cv.resize', (['img', '(self.W // resolution_level, self.H // resolution_level)'], {}), '(img, (self.W // resolution_level, self.H // resolution_level))\n', (7300, 7363), True, 'import cv2 as cv\n'), ((4509, 4527), 'cv2.imread', 'cv.imread', (['im_name'], {}), '(im_name)\n', (4518, 4527), True, 'import cv2 as cv\n'), ((4684, 4702), 'cv2.imread', 'cv.imread', (['im_name'], {}), '(im_name)\n', (4693, 4702), True, 'import cv2 as cv\n'), ((590, 607), 'numpy.asarray', 'np.asarray', (['lines'], {}), '(lines)\n', (600, 607), True, 'import numpy as np\n'), ((2376, 2399), 'imageio.imread', 'imageio.imread', (['im_name'], {}), '(im_name)\n', (2390, 2399), False, 'import imageio\n'), ((5497, 5525), 'torch.from_numpy', 'torch.from_numpy', (['intrinsics'], {}), '(intrinsics)\n', (5513, 5525), False, 'import torch\n'), ((5568, 5590), 'torch.from_numpy', 'torch.from_numpy', (['pose'], {}), '(pose)\n', (5584, 5590), False, 'import torch\n'), ((2285, 2314), 'imageio.imread', 'imageio.imread', (['mask_paths[i]'], {}), '(mask_paths[i])\n', (2299, 2314), False, 'import imageio\n'), ((2948, 2970), 'torch.from_numpy', 'torch.from_numpy', (['imgs'], {}), '(imgs)\n', (2964, 2970), False, 'import torch\n')] |
from random import randrange,seed
import numpy as np
import pandas as pd
from copy import deepcopy
from numpy import array,var,mean
from numpy import polyfit
import json
#parameter set up
start_ind = 20000
col_sample_lim = 10
row_sample_lim = 800
err_thres = 1e-7
forest_num = 150
max_depth = 100
D_array = []
tree_array = []
second_fit_model = []
original_D = []
oobCheckResult = []
'''
take mean of label subset
'''
def avg(D):
res = mean(D)
if( res == np.nan ): return 0
return res
'''
calc reg_error by var*size
'''
def reg_error(D):
res = var(D['y'])*len(D['y'])
if( res == np.nan ): return 0
return res
'''
split the subset into two subsets of given feature
< split point, first subset
> split point, second subset
'''
def do_split(D, feature, split_point):
div0 = []
div0_y = []
div1 = []
div1_y = []
#print( len(D['x'])
for i in range( len( D['x'] ) ):
if( D['x'][i][feature] < split_point ):
div0.append( D['x'][i] )
div0_y.append( D['y'][i] )
else:
div1.append( D['x'][i] )
div1_y.append( D['y'][i] )
return {'x': array(div0), 'y':div0_y}, {'x': array(div1), 'y':div1_y}
'''
find the best split point by trying all the values of randomly selected features
'''
def find_split_point(D):
selected_feature = set([])
while(len(selected_feature)< col_sample_lim):
selected_feature.add( randrange( len(D['x'][0] ) ) ) #pick up some features
now_error = reg_error( D )
sub_D_0 = []
sub_D_1 = []
best_feature = None
best_split_point = None
best_reg_error = 2147483647
for F in selected_feature:
split_value = set( D['x'][ :, F] ) #all unique value for this feature
for V in split_value:
sub_D_0, sub_D_1 = do_split( D, F, V )
new_eval = reg_error(sub_D_0) + reg_error(sub_D_1)
if( new_eval < best_reg_error ):
best_reg_error = new_eval
best_feature = F
best_split_point = V
if( np.abs(best_reg_error - now_error) <= err_thres ): #no improvement, no need for split
return None, mean(D['y'])
else:
return best_feature, best_split_point
'''
create a single decision tree traversely
'''
def create_tree(D, dep):
F,V = find_split_point(D)
if( F == None ): return V
if( (dep > max_depth) or (len(D['y'])<=2) ): return mean(D['y'])
node = {}
sub_D_0, sub_D_1 = do_split( D, F, V )
node[ 'l_child' ] = create_tree(sub_D_0, dep+1)
node[ 'r_child' ] = create_tree(sub_D_1, dep+1)
node[ 'F' ] = F
node[ 'V' ] = V
#print(node)
return node
'''
get the predicted value from a single decision tree
'''
def get_value(node, input):
#print(node)
if(type(node) is dict):
res = 0
if( input[ node['F'] ] < node['V']):
res = get_value( node['l_child'] , input)
if( res == None ): res = get_value( node['r_child'] , input)
return res
else:
res = get_value( node['r_child'] , input)
if( res == None ): res = get_value( node['l_child'] , input)
return res
else:
return node
'''
randomly selected samples and build different decision trees
'''
def build_forest(D):
global D_array
global tree_array
D_array = []
tree_array = []
for i in range( forest_num ):
print(i)
D_array.append({'x':[], 'y': []})
for j in range( row_sample_lim ):
ind = randrange( len(D['x'] ) )
D_array[i]['x'].append( D['x'][ind] )
D_array[i]['y'].append( deepcopy(D['y'][ind]) )
D_array[i]['x'] = array( deepcopy(D_array[i]['x']) )
#print(D_array[i])
tree_array.append( create_tree(D_array[i], 0) )
'''
predict values of the given P input
if is_polyfit is true,
we are using training input to find the linear relation ship
between predicted value and actual value
'''
def do_prediction(P, is_polyfit = False):
res = []
global start_ind
global second_fit_model
global oobCheckResult
if(is_polyfit):
for tree in tree_array:
oobCheckResult.append(0)
with open('model_new.json','w') as f:
f.write( json.dumps(tree_array) )
pred_ind = 0
for i in P:
sum = 0
tree_ind = 0
for tree in tree_array:
temp = get_value( tree, i)
if(is_polyfit):
oobCheckResult[tree_ind] += np.abs( original_D['y'][pred_ind] - temp )
else:
temp *= oobCheckResult[tree_ind]
sum += temp
tree_ind += 1
pred_ind += 1
if( is_polyfit ):
res.append( sum / forest_num )
else:
res.append(sum)
if(is_polyfit):
for i in range( len(oobCheckResult) ):
oobCheckResult[i]/=len(original_D['x'])
oobCheckResult[i] = 1/oobCheckResult[i]
sum_of_oob = np.sum(oobCheckResult)
for i in range( len(oobCheckResult) ):
oobCheckResult[i]/=sum_of_oob
if( not is_polyfit):
print(oobCheckResult)
f = open('res.csv', 'w')
f.write('Id,Response\n')
for i in res:
#f.write( str(start_ind) + ',' + str( int(i+0.5) ) + '\n' )
#print(i)
temp = 0
for ind in range( len(second_fit_model) ):
temp += (i** (len(second_fit_model) - ind - 1) ) * second_fit_model[ind]
#print(temp)
f.write( str(start_ind) + ',' + str( int(temp+0.5) ) + '\n' )
start_ind += 1
f.close()
else:
second_fit_model = polyfit(res, original_D['y'], 1)
print(oobCheckResult)
print( second_fit_model )
start_ind = 0
'''
get the start ID
'''
with open('data/testing.csv') as f:
l = f.readline()
l = f.readline()
start_ind = int(l.split(',')[0])
#preprocessing, see readme.txt
if(True):
train = pd.read_csv("data/training.csv")
test = pd.read_csv("data/testing.csv")
banned_key = ["Id", "Response"]
original_D = train.append(test)
original_D[ 'Product_Info_2' ] = pd.factorize(original_D["Product_Info_2"])[0]
original_D.fillna(-1, inplace=True)
original_D['Response'] = original_D['Response'].astype(int)
train = original_D[ original_D['Response']>0 ].copy()
test = original_D[original_D['Response']<0].copy()
target_vars = [col for col in train.columns if col not in banned_key]
#print( train['response'])
row_sample_lim = max( int(len(train["Response"])/5), 800 )
original_D = {'x': array(train[target_vars]) , 'y':array(train["Response"])}
build_forest( {'x': array(train[target_vars]) , 'y':array(train["Response"])} )
do_prediction( array(train[target_vars]), True)
do_prediction( array(test[target_vars]) )
| [
"copy.deepcopy",
"numpy.abs",
"numpy.sum",
"numpy.polyfit",
"pandas.read_csv",
"json.dumps",
"numpy.mean",
"numpy.array",
"pandas.factorize",
"numpy.var"
] | [((448, 455), 'numpy.mean', 'mean', (['D'], {}), '(D)\n', (452, 455), False, 'from numpy import array, var, mean\n'), ((6187, 6219), 'pandas.read_csv', 'pd.read_csv', (['"""data/training.csv"""'], {}), "('data/training.csv')\n", (6198, 6219), True, 'import pandas as pd\n'), ((6231, 6262), 'pandas.read_csv', 'pd.read_csv', (['"""data/testing.csv"""'], {}), "('data/testing.csv')\n", (6242, 6262), True, 'import pandas as pd\n'), ((577, 588), 'numpy.var', 'var', (["D['y']"], {}), "(D['y'])\n", (580, 588), False, 'from numpy import array, var, mean\n'), ((2116, 2150), 'numpy.abs', 'np.abs', (['(best_reg_error - now_error)'], {}), '(best_reg_error - now_error)\n', (2122, 2150), True, 'import numpy as np\n'), ((2495, 2507), 'numpy.mean', 'mean', (["D['y']"], {}), "(D['y'])\n", (2499, 2507), False, 'from numpy import array, var, mean\n'), ((5138, 5160), 'numpy.sum', 'np.sum', (['oobCheckResult'], {}), '(oobCheckResult)\n', (5144, 5160), True, 'import numpy as np\n'), ((5869, 5901), 'numpy.polyfit', 'polyfit', (['res', "original_D['y']", '(1)'], {}), "(res, original_D['y'], 1)\n", (5876, 5901), False, 'from numpy import polyfit\n'), ((6372, 6414), 'pandas.factorize', 'pd.factorize', (["original_D['Product_Info_2']"], {}), "(original_D['Product_Info_2'])\n", (6384, 6414), True, 'import pandas as pd\n'), ((6830, 6855), 'numpy.array', 'array', (['train[target_vars]'], {}), '(train[target_vars])\n', (6835, 6855), False, 'from numpy import array, var, mean\n'), ((6862, 6886), 'numpy.array', 'array', (["train['Response']"], {}), "(train['Response'])\n", (6867, 6886), False, 'from numpy import array, var, mean\n'), ((6992, 7017), 'numpy.array', 'array', (['train[target_vars]'], {}), '(train[target_vars])\n', (6997, 7017), False, 'from numpy import array, var, mean\n'), ((7045, 7069), 'numpy.array', 'array', (['test[target_vars]'], {}), '(test[target_vars])\n', (7050, 7069), False, 'from numpy import array, var, mean\n'), ((1187, 1198), 'numpy.array', 'array', (['div0'], {}), '(div0)\n', (1192, 1198), False, 'from numpy import array, var, mean\n'), ((1219, 1230), 'numpy.array', 'array', (['div1'], {}), '(div1)\n', (1224, 1230), False, 'from numpy import array, var, mean\n'), ((2223, 2235), 'numpy.mean', 'mean', (["D['y']"], {}), "(D['y'])\n", (2227, 2235), False, 'from numpy import array, var, mean\n'), ((3793, 3818), 'copy.deepcopy', 'deepcopy', (["D_array[i]['x']"], {}), "(D_array[i]['x'])\n", (3801, 3818), False, 'from copy import deepcopy\n'), ((4381, 4403), 'json.dumps', 'json.dumps', (['tree_array'], {}), '(tree_array)\n', (4391, 4403), False, 'import json\n'), ((6912, 6937), 'numpy.array', 'array', (['train[target_vars]'], {}), '(train[target_vars])\n', (6917, 6937), False, 'from numpy import array, var, mean\n'), ((6944, 6968), 'numpy.array', 'array', (["train['Response']"], {}), "(train['Response'])\n", (6949, 6968), False, 'from numpy import array, var, mean\n'), ((3736, 3757), 'copy.deepcopy', 'deepcopy', (["D['y'][ind]"], {}), "(D['y'][ind])\n", (3744, 3757), False, 'from copy import deepcopy\n'), ((4642, 4682), 'numpy.abs', 'np.abs', (["(original_D['y'][pred_ind] - temp)"], {}), "(original_D['y'][pred_ind] - temp)\n", (4648, 4682), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import time
from typing import Any, Dict, List, Tuple, NoReturn
class EKF(object):
def __init__(self):
self.init()
def init(self):
#state vector
self.X = np.array([[0], # x
[0], # y
[0], # v_x
[0], # v_y
[0], # a_x
[0], # a_y
[0], # j_x
[0]]) # j_y
#identity matrix
self.I = np.eye(8)
#process covariance
self.P = 1000*self.I
#jacobian h(x)
self.H = np.array([[1., 0., 0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0.]])
#covariance noise process
coeficient_Q = np.array([0.1, # x
0.1, # y
0.5, # v_x
0.5, # v_y
0.1, # a_x
0.1, # a_y
0.5, # j_x
0.5])# j_y
self.Q = np.eye(8)*coeficient_Q
#covariance noise observation
self.R = np.array([[1., 0.], # x_obs
[0., 1.]]) # y_obs
def _update(self, z:np.array) -> np.ndarray:
z = z.reshape(2,1) # x_obs, y_obs
# estimate innovation
y = z - np.dot(self.H, self.X)
# estimate innovation covariance
S = np.dot(np.dot(self.H, self.P), np.transpose(self.H)) + self.R
# estimate the near-optiman kalman gain
K = np.dot(np.dot(self.P, np.transpose(self.H)), np.linalg.inv(S))
# update state
self.X = self.X + np.dot(K, y)
# update process covariance
self.P = np.dot((self.I - np.dot(K, self.H)), self.P)
return self.X.reshape(8)
def _predict(self, dt:float):
"""
A) prediction step
- Equations:
x = xo + vx*dt
y = yo + vy*dt
v_x = vo_x + ax*t
v_y = vo_y + ay*t
a_x = (v_x - vo_x)/dt
a_y = (v_y - vo_y)/dt
j_x = (a_x - ao_x)/dt
j_y = (a_y - ao_y)/dt
"""
# # # x y v_x v_y a_x a_y j_x j_y
# jac_X = [[1., 0., dt, 0., 0., 0., 0., 0.], # x
# [0., 1., 0., dt, 0., 0., 0., 0.], # y
# [0., 0., 1., 0., dt, 0., 0., 0.], # v_x
# [0., 0., 0., 1., 0., dt, 0., 0.], # v_y
# [0., 0., 1./dt, 0., 0., 0., 0., 0.], # a_x
# [0., 0., 0., 1./dt, 0., 0., 0., 0.], # a_y
# [0., 0., 0., 0., 1./dt, 0., 0., 0.], # j_x
# [0., 0., 0., 0., 0., 1./dt, 0., 0.]] # j_y
# x y v_x v_y a_x a_y j_x j_y
jac_X = [[1., 0., dt, 0., 0., 0., 0., 0.], # x
[0., 1., 0., dt, 0., 0., 0., 0.], # y
[1./dt, 0., 0., 0., 0., 0., 0., 0.], # v_x
[0., 1./dt, 0., 0., 0., 0., 0., 0.], # v_y
[0., 0., 1./dt, 0., 0., 0., 0., 0.], # a_x
[0., 0., 0., 1./dt, 0., 0., 0., 0.], # a_y
[0., 0., 0., 0., 1./dt, 0., 0., 0.], # j_x
[0., 0., 0., 0., 0., 1./dt, 0., 0.]] # j_y
# estimate P (process covariance) (without control input U)
self.P = np.dot(np.dot(jac_X, self.P), np.transpose(jac_X))
# estimate new state vector X (prediction)
_x = self.X[0] + self.X[2]*dt
_y = self.X[1] + self.X[3]*dt
_v_x = (_x - self.X[0])/dt#self.X[2] + self.X[4]*dt
_v_y = (_y - self.X[1])/dt#self.X[3] + self.X[5]*dt
_a_x = (_v_x - self.X[2])/dt
_a_y = (_v_y - self.X[3])/dt
_j_x = (_a_x - self.X[4])/dt
_j_y = (_a_y - self.X[5])/dt
self.X = np.array([_x,
_y,
_v_x,
_v_y,
_a_x,
_a_y,
_j_x,
_j_y]).reshape((8,1))
# def _predict2(self, dt:float):
# """
# A) prediction step
# - Equations:
# x = xo + vx*dt + (axt^2)/2
# y = yo + vy*dt + (ayt^2)/2
# v_x = vo_x + ax*t
# v_y = vo_y + ay*t
# a_x = (v_x - vo_x)/dt
# a_y = (v_y - vo_y)/dt
# j_x = (a_x - ao_x)/dt
# j_y = (a_y - ao_y)/dt
# """
# # x y v_x v_y a_x a_y j_x j_y
# jac_X = [[1., 0., dt, 0., (dt*dt)/2., 0. , 0., 0.], # x
# [0., 1., 0., dt, 0., (dt*dt)/2., 0., 0.], # y
# [0., 0., 1., 0., dt, 0. , 0., 0.], # v_x
# [0., 0., 0., 1., 0., dt, 0., 0.], # v_y
# [0., 0., 1./dt, 0., 0., 0., 0., 0.], # a_x
# [0., 0., 0., 1./dt, 0., 0., 0., 0.], # a_y
# [0., 0., 0., 0., 1./dt, 0., 0., 0.], # j_x
# [0., 0., 0., 0., 0., 1./dt, 0., 0.]] # j_y
# # estimate P (process covariance) (without control input U)
# self.P = np.dot(np.dot(jac_X, self.P), np.transpose(jac_X))
# # estimate new state vector X (prediction)
# _x = self.X[0,0] + self.X[2,0]*dt + (self.X[4,0]*dt*dt)/2.
# _y = self.X[1,0] + self.X[3,0]*dt + (self.X[5,0]*dt*dt)/2.
# _v_x = self.X[2,0] + self.X[4,0]*dt
# _v_y = self.X[3,0] + self.X[5,0]*dt
# _a_x = (_v_x - self.X[2,0])/dt
# _a_y = (_v_y - self.X[3,0])/dt
# _j_x = (_a_x - self.X[4,0])/dt
# _j_y = (_a_y - self.X[5,0])/dt
# self.X = np.array([_x,
# _y,
# _v_x,
# _v_y,
# _a_x,
# _a_y,
# _j_x,
# _j_y]).reshape((8,1))
def clean(self)->NoReturn:
self.init()
def process(self, traj:np.ndarray) -> np.ndarray:
self.X[0] = traj[0, 1] #x
self.X[1] = traj[0, 2] #y
self.X[2] = (traj[1, 1] - traj[0,1])/(traj[1, 0] - traj[0,0]) #vx
self.X[3] = (traj[1, 2] - traj[0,2])/(traj[1, 0] - traj[0,0]) #vy
self.X[4] = np.random.randint(-2,2,1)
self.X[5] = np.random.randint(-2,2,1)
last_t = traj[0,0]
result = []
for tj in traj[1:,:]:
dt = tj[0] - last_t
self._predict(dt = dt)
x = self._update(z=tj[1:])
result.append(x)
last_t = tj[0]
return np.asarray([result])
| [
"numpy.eye",
"numpy.asarray",
"numpy.transpose",
"numpy.random.randint",
"numpy.array",
"numpy.linalg.inv",
"numpy.dot"
] | [((209, 259), 'numpy.array', 'np.array', (['[[0], [0], [0], [0], [0], [0], [0], [0]]'], {}), '([[0], [0], [0], [0], [0], [0], [0], [0]])\n', (217, 259), True, 'import numpy as np\n'), ((420, 429), 'numpy.eye', 'np.eye', (['(8)'], {}), '(8)\n', (426, 429), True, 'import numpy as np\n'), ((506, 605), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0]])\n', (514, 605), True, 'import numpy as np\n'), ((639, 689), 'numpy.array', 'np.array', (['[0.1, 0.1, 0.5, 0.5, 0.1, 0.1, 0.5, 0.5]'], {}), '([0.1, 0.1, 0.5, 0.5, 0.1, 0.1, 0.5, 0.5])\n', (647, 689), True, 'import numpy as np\n'), ((873, 907), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (881, 907), True, 'import numpy as np\n'), ((5079, 5106), 'numpy.random.randint', 'np.random.randint', (['(-2)', '(2)', '(1)'], {}), '(-2, 2, 1)\n', (5096, 5106), True, 'import numpy as np\n'), ((5119, 5146), 'numpy.random.randint', 'np.random.randint', (['(-2)', '(2)', '(1)'], {}), '(-2, 2, 1)\n', (5136, 5146), True, 'import numpy as np\n'), ((5335, 5355), 'numpy.asarray', 'np.asarray', (['[result]'], {}), '([result])\n', (5345, 5355), True, 'import numpy as np\n'), ((807, 816), 'numpy.eye', 'np.eye', (['(8)'], {}), '(8)\n', (813, 816), True, 'import numpy as np\n'), ((1049, 1071), 'numpy.dot', 'np.dot', (['self.H', 'self.X'], {}), '(self.H, self.X)\n', (1055, 1071), True, 'import numpy as np\n'), ((1269, 1285), 'numpy.linalg.inv', 'np.linalg.inv', (['S'], {}), '(S)\n', (1282, 1285), True, 'import numpy as np\n'), ((1324, 1336), 'numpy.dot', 'np.dot', (['K', 'y'], {}), '(K, y)\n', (1330, 1336), True, 'import numpy as np\n'), ((2648, 2669), 'numpy.dot', 'np.dot', (['jac_X', 'self.P'], {}), '(jac_X, self.P)\n', (2654, 2669), True, 'import numpy as np\n'), ((2671, 2690), 'numpy.transpose', 'np.transpose', (['jac_X'], {}), '(jac_X)\n', (2683, 2690), True, 'import numpy as np\n'), ((1121, 1143), 'numpy.dot', 'np.dot', (['self.H', 'self.P'], {}), '(self.H, self.P)\n', (1127, 1143), True, 'import numpy as np\n'), ((1145, 1165), 'numpy.transpose', 'np.transpose', (['self.H'], {}), '(self.H)\n', (1157, 1165), True, 'import numpy as np\n'), ((1246, 1266), 'numpy.transpose', 'np.transpose', (['self.H'], {}), '(self.H)\n', (1258, 1266), True, 'import numpy as np\n'), ((1395, 1412), 'numpy.dot', 'np.dot', (['K', 'self.H'], {}), '(K, self.H)\n', (1401, 1412), True, 'import numpy as np\n'), ((3047, 3101), 'numpy.array', 'np.array', (['[_x, _y, _v_x, _v_y, _a_x, _a_y, _j_x, _j_y]'], {}), '([_x, _y, _v_x, _v_y, _a_x, _a_y, _j_x, _j_y])\n', (3055, 3101), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Author: kerlomz <<EMAIL>>
import time
import random
import numpy as np
import tensorflow as tf
import framework
import utils
from config import *
from tensorflow.python.framework.graph_util import convert_variables_to_constants
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
tf.logging.set_verbosity(tf.logging.INFO)
def compile_graph(acc):
input_graph = tf.Graph()
sess = tf.Session(graph=input_graph)
with sess.graph.as_default():
model = framework.GraphOCR(
RunMode.Predict,
NETWORK_MAP[NEU_CNN],
NETWORK_MAP[NEU_RECURRENT]
)
model.build_graph()
input_graph_def = sess.graph.as_graph_def()
saver = tf.train.Saver(var_list=tf.global_variables())
tf.logging.info(tf.train.latest_checkpoint(MODEL_PATH))
saver.restore(sess, tf.train.latest_checkpoint(MODEL_PATH))
output_graph_def = convert_variables_to_constants(
sess,
input_graph_def,
output_node_names=['dense_decoded']
)
last_compile_model_path = COMPILE_MODEL_PATH.replace('.pb', '_{}.pb'.format(int(acc * 10000)))
with tf.gfile.GFile(last_compile_model_path, mode='wb') as gf:
gf.write(output_graph_def.SerializeToString())
generate_config(acc)
def train_process(mode=RunMode.Trains):
model = framework.GraphOCR(mode, NETWORK_MAP[NEU_CNN], NETWORK_MAP[NEU_RECURRENT])
model.build_graph()
tf.logging.info('Loading Trains DataSet...')
train_feeder = utils.DataIterator(mode=RunMode.Trains)
if TRAINS_USE_TFRECORDS:
train_feeder.read_sample_from_tfrecords(TRAINS_PATH)
tf.logging.info('Loading Test DataSet...')
test_feeder = utils.DataIterator(mode=RunMode.Test)
test_feeder.read_sample_from_tfrecords(TEST_PATH)
else:
if isinstance(TRAINS_PATH, list):
origin_list = []
for trains_path in TRAINS_PATH:
origin_list += [os.path.join(trains_path, trains) for trains in os.listdir(trains_path)]
else:
origin_list = [os.path.join(TRAINS_PATH, trains) for trains in os.listdir(TRAINS_PATH)]
np.random.shuffle(origin_list)
if not HAS_TEST_SET:
test_list = origin_list[:TEST_SET_NUM]
trains_list = origin_list[TEST_SET_NUM:]
else:
if isinstance(TEST_PATH, list):
test_list = []
for test_path in TEST_PATH:
test_list += [os.path.join(test_path, test) for test in os.listdir(test_path)]
else:
test_list = [os.path.join(TEST_PATH, test) for test in os.listdir(TEST_PATH)]
np.random.shuffle(test_list)
trains_list = origin_list
train_feeder.read_sample_from_files(trains_list)
tf.logging.info('Loading Test DataSet...')
test_feeder = utils.DataIterator(mode=RunMode.Test)
test_feeder.read_sample_from_files(test_list)
tf.logging.info('Total {} Trains DataSets'.format(train_feeder.size))
tf.logging.info('Total {} Test DataSets'.format(test_feeder.size))
if test_feeder.size >= train_feeder.size:
exception("The number of training sets cannot be less than the test set.", )
num_train_samples = train_feeder.size
num_test_samples = test_feeder.size
if num_test_samples < TEST_BATCH_SIZE:
exception(
"The number of test sets cannot be less than the test batch size.",
ConfigException.INSUFFICIENT_SAMPLE
)
num_batches_per_epoch = int(num_train_samples / BATCH_SIZE)
config = tf.ConfigProto(
# allow_soft_placement=True,
log_device_placement=False,
gpu_options=tf.GPUOptions(
allocator_type='BFC',
allow_growth=True, # it will cause fragmentation.
per_process_gpu_memory_fraction=GPU_USAGE)
)
accuracy = 0
epoch_count = 1
with tf.Session(config=config) as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=2)
train_writer = tf.summary.FileWriter('logs', sess.graph)
try:
saver.restore(sess, tf.train.latest_checkpoint(MODEL_PATH))
except ValueError:
pass
tf.logging.info('Start training...')
while 1:
shuffle_trains_idx = np.random.permutation(num_train_samples)
start_time = time.time()
last_train_avg_cost = 0
for cur_batch in range(num_batches_per_epoch):
batch_time = time.time()
index_list = [
shuffle_trains_idx[i % num_train_samples] for i in
range(cur_batch * BATCH_SIZE, (cur_batch + 1) * BATCH_SIZE)
]
if TRAINS_USE_TFRECORDS:
classified_batch = train_feeder.generate_batch_by_tfrecords(sess)
else:
classified_batch = train_feeder.generate_batch_by_files(index_list)
step = 0
class_num = len(classified_batch)
avg_cost = 0
for index, (shape, batch) in enumerate(classified_batch.items()):
batch_inputs, batch_seq_len, batch_labels = batch
feed = {
model.inputs: batch_inputs,
model.labels: batch_labels,
}
summary_str, batch_cost, step, _ = sess.run(
[model.merged_summary, model.cost, model.global_step, model.train_op],
feed_dict=feed
)
avg_cost += batch_cost
last_train_avg_cost = avg_cost / class_num
train_writer.add_summary(summary_str, step)
if step % 100 == index and step not in range(class_num):
tf.logging.info('Step: {} Time: {:.3f} sec/batch, Cost = {:.5f}, {}-BatchSize: {}'.format(
step,
time.time() - batch_time,
batch_cost,
shape,
len(batch_inputs)
))
if step % TRAINS_SAVE_STEPS == index and index == (class_num - 1) and step not in range(class_num):
saver.save(sess, SAVE_MODEL, global_step=step)
# tf.logging.info('save checkpoint at step {0}'.format(step))
if step % TRAINS_VALIDATION_STEPS == (class_num - 1) and step not in range(class_num):
shuffle_test_idx = np.random.permutation(num_test_samples)
batch_time = time.time()
index_test = [
shuffle_test_idx[i % num_test_samples] for i in
range(cur_batch * TEST_BATCH_SIZE, (cur_batch + 1) * TEST_BATCH_SIZE)
]
if TRAINS_USE_TFRECORDS:
classified_batch = test_feeder.generate_batch_by_tfrecords(sess)
else:
classified_batch = test_feeder.generate_batch_by_files(index_test)
all_dense_decoded = []
lr = 0
for index, (shape, batch) in enumerate(classified_batch.items()):
test_inputs, batch_seq_len, test_labels = batch
val_feed = {
model.inputs: test_inputs,
model.labels: test_labels
}
dense_decoded, sub_lr = sess.run(
[model.dense_decoded, model.lrn_rate],
feed_dict=val_feed
)
all_dense_decoded += dense_decoded.tolist()
lr += sub_lr
accuracy = utils.accuracy_calculation(
test_feeder.labels,
all_dense_decoded,
ignore_value=[0, -1],
)
log = "Epoch: {}, Step: {}, Accuracy = {:.4f}, Cost = {:.5f}, " \
"Time = {:.3f} sec/batch, LearningRate: {}"
tf.logging.info(log.format(
epoch_count,
step,
accuracy,
last_train_avg_cost, time.time() - batch_time, lr / len(classified_batch)
))
if accuracy >= TRAINS_END_ACC and epoch_count >= TRAINS_END_EPOCHS and last_train_avg_cost <= TRAINS_END_COST:
break
if accuracy >= TRAINS_END_ACC and epoch_count >= TRAINS_END_EPOCHS and last_train_avg_cost <= TRAINS_END_COST:
compile_graph(accuracy)
tf.logging.info('Total Time: {} sec.'.format(time.time() - start_time))
break
epoch_count += 1
def generate_config(acc):
with open(MODEL_CONFIG_PATH, "r", encoding="utf8") as current_fp:
text = "".join(current_fp.readlines())
text = text.replace("ModelName: {}".format(TARGET_MODEL), "ModelName: {}_{}".format(TARGET_MODEL, int(acc * 10000)))
with open(os.path.join(OUTPUT_PATH, "{}_model.yaml".format(TARGET_MODEL)), "w", encoding="utf8") as save_fp:
save_fp.write(text)
def main(_):
init()
train_process()
tf.logging.info('Training completed.')
pass
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| [
"tensorflow.logging.info",
"tensorflow.global_variables_initializer",
"utils.accuracy_calculation",
"tensorflow.Session",
"framework.GraphOCR",
"tensorflow.logging.set_verbosity",
"utils.DataIterator",
"time.time",
"tensorflow.global_variables",
"tensorflow.python.framework.graph_util.convert_vari... | [((342, 383), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (366, 383), True, 'import tensorflow as tf\n'), ((428, 438), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (436, 438), True, 'import tensorflow as tf\n'), ((450, 479), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'input_graph'}), '(graph=input_graph)\n', (460, 479), True, 'import tensorflow as tf\n'), ((962, 1057), 'tensorflow.python.framework.graph_util.convert_variables_to_constants', 'convert_variables_to_constants', (['sess', 'input_graph_def'], {'output_node_names': "['dense_decoded']"}), "(sess, input_graph_def, output_node_names=[\n 'dense_decoded'])\n", (992, 1057), False, 'from tensorflow.python.framework.graph_util import convert_variables_to_constants\n'), ((1385, 1459), 'framework.GraphOCR', 'framework.GraphOCR', (['mode', 'NETWORK_MAP[NEU_CNN]', 'NETWORK_MAP[NEU_RECURRENT]'], {}), '(mode, NETWORK_MAP[NEU_CNN], NETWORK_MAP[NEU_RECURRENT])\n', (1403, 1459), False, 'import framework\n'), ((1489, 1533), 'tensorflow.logging.info', 'tf.logging.info', (['"""Loading Trains DataSet..."""'], {}), "('Loading Trains DataSet...')\n", (1504, 1533), True, 'import tensorflow as tf\n'), ((1553, 1592), 'utils.DataIterator', 'utils.DataIterator', ([], {'mode': 'RunMode.Trains'}), '(mode=RunMode.Trains)\n', (1571, 1592), False, 'import utils\n'), ((9584, 9622), 'tensorflow.logging.info', 'tf.logging.info', (['"""Training completed."""'], {}), "('Training completed.')\n", (9599, 9622), True, 'import tensorflow as tf\n'), ((9665, 9706), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (9689, 9706), True, 'import tensorflow as tf\n'), ((9711, 9723), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (9721, 9723), True, 'import tensorflow as tf\n'), ((531, 621), 'framework.GraphOCR', 'framework.GraphOCR', (['RunMode.Predict', 'NETWORK_MAP[NEU_CNN]', 'NETWORK_MAP[NEU_RECURRENT]'], {}), '(RunMode.Predict, NETWORK_MAP[NEU_CNN], NETWORK_MAP[\n NEU_RECURRENT])\n', (549, 621), False, 'import framework\n'), ((1192, 1242), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['last_compile_model_path'], {'mode': '"""wb"""'}), "(last_compile_model_path, mode='wb')\n", (1206, 1242), True, 'import tensorflow as tf\n'), ((1691, 1733), 'tensorflow.logging.info', 'tf.logging.info', (['"""Loading Test DataSet..."""'], {}), "('Loading Test DataSet...')\n", (1706, 1733), True, 'import tensorflow as tf\n'), ((1756, 1793), 'utils.DataIterator', 'utils.DataIterator', ([], {'mode': 'RunMode.Test'}), '(mode=RunMode.Test)\n', (1774, 1793), False, 'import utils\n'), ((2204, 2234), 'numpy.random.shuffle', 'np.random.shuffle', (['origin_list'], {}), '(origin_list)\n', (2221, 2234), True, 'import numpy as np\n'), ((2856, 2898), 'tensorflow.logging.info', 'tf.logging.info', (['"""Loading Test DataSet..."""'], {}), "('Loading Test DataSet...')\n", (2871, 2898), True, 'import tensorflow as tf\n'), ((2921, 2958), 'utils.DataIterator', 'utils.DataIterator', ([], {'mode': 'RunMode.Test'}), '(mode=RunMode.Test)\n', (2939, 2958), False, 'import utils\n'), ((3980, 4005), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (3990, 4005), True, 'import tensorflow as tf\n'), ((4033, 4066), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4064, 4066), True, 'import tensorflow as tf\n'), ((4186, 4227), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""logs"""', 'sess.graph'], {}), "('logs', sess.graph)\n", (4207, 4227), True, 'import tensorflow as tf\n'), ((4365, 4401), 'tensorflow.logging.info', 'tf.logging.info', (['"""Start training..."""'], {}), "('Start training...')\n", (4380, 4401), True, 'import tensorflow as tf\n'), ((830, 868), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (856, 868), True, 'import tensorflow as tf\n'), ((898, 936), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (924, 936), True, 'import tensorflow as tf\n'), ((2724, 2752), 'numpy.random.shuffle', 'np.random.shuffle', (['test_list'], {}), '(test_list)\n', (2741, 2752), True, 'import numpy as np\n'), ((3760, 3861), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allocator_type': '"""BFC"""', 'allow_growth': '(True)', 'per_process_gpu_memory_fraction': 'GPU_USAGE'}), "(allocator_type='BFC', allow_growth=True,\n per_process_gpu_memory_fraction=GPU_USAGE)\n", (3773, 3861), True, 'import tensorflow as tf\n'), ((4125, 4146), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (4144, 4146), True, 'import tensorflow as tf\n'), ((4453, 4493), 'numpy.random.permutation', 'np.random.permutation', (['num_train_samples'], {}), '(num_train_samples)\n', (4474, 4493), True, 'import numpy as np\n'), ((4519, 4530), 'time.time', 'time.time', ([], {}), '()\n', (4528, 4530), False, 'import time\n'), ((783, 804), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (802, 804), True, 'import tensorflow as tf\n'), ((4273, 4311), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (4299, 4311), True, 'import tensorflow as tf\n'), ((4655, 4666), 'time.time', 'time.time', ([], {}), '()\n', (4664, 4666), False, 'import time\n'), ((6755, 6794), 'numpy.random.permutation', 'np.random.permutation', (['num_test_samples'], {}), '(num_test_samples)\n', (6776, 6794), True, 'import numpy as np\n'), ((6828, 6839), 'time.time', 'time.time', ([], {}), '()\n', (6837, 6839), False, 'import time\n'), ((8050, 8141), 'utils.accuracy_calculation', 'utils.accuracy_calculation', (['test_feeder.labels', 'all_dense_decoded'], {'ignore_value': '[0, -1]'}), '(test_feeder.labels, all_dense_decoded,\n ignore_value=[0, -1])\n', (8076, 8141), False, 'import utils\n'), ((9045, 9056), 'time.time', 'time.time', ([], {}), '()\n', (9054, 9056), False, 'import time\n'), ((8583, 8594), 'time.time', 'time.time', ([], {}), '()\n', (8592, 8594), False, 'import time\n'), ((6161, 6172), 'time.time', 'time.time', ([], {}), '()\n', (6170, 6172), False, 'import time\n')] |
"""
Voronoi Kivy App
===================
Runs the voronoi GUI app.
"""
from kivy.support import install_twisted_reactor
install_twisted_reactor()
from itertools import cycle
import logging
import os
import numpy as np
import json
import csv
import math
from functools import cmp_to_key
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.togglebutton import ToggleButton
from kivy.lang import Builder
from kivy.app import App
from kivy.graphics.vertex_instructions import Line, Point, Mesh, Ellipse, \
Rectangle
from kivy.graphics.tesselator import Tesselator, WINDING_ODD, TYPE_POLYGONS
from kivy.graphics import Color
import matplotlib.pyplot as plt
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import Metrics, dp
from kivy.properties import NumericProperty
from kivy.graphics.context_instructions import \
PushMatrix, PopMatrix, Rotate, Translate, Scale, MatrixInstruction
from kivy.uix.spinner import Spinner
from kivy.uix.scatter import Scatter
from kivy.resources import resource_find, resource_add_path
import distopia
from distopia.app.voronoi_data import GeoDataCounty, GeoDataPrecinct2017
from distopia.precinct import Precinct
from distopia.mapping.voronoi import VoronoiMapping
from distopia.app.ros import RosBridge
__all__ = ('VoronoiWidget', 'VoronoiApp')
class VoronoiWidget(Widget):
"""The widget through which we interact with the precincts and districts.
"""
voronoi_mapping = None
fiducial_graphics = {}
district_graphics = []
precinct_graphics = {}
colors = []
fiducials_color = {}
table_mode = False
align_mat = None
screen_offset = 0, 0
touches = {}
ros_bridge = None
district_blocks_fid = []
focus_block_fid = 8
focus_block_logical_id = 8
_has_focus = False
district_metrics_fn = None
state_metrics_fn = None
show_voronoi_boundaries = False
current_fid_id = None
focus_gui_pos = None
focus_metrics = []
focus_metric_width = 100
focus_metric_height = 100
screen_size = (1920, 1080)
focus_region_width = 0
n_focus_rows = 0
n_focus_cols = 0
gui_touch_focus_buttons = None
current_focus_metric = ''
max_fiducials_per_district = 5
visualize_metric_data = True
def __init__(
self, voronoi_mapping=None, table_mode=False, align_mat=None,
screen_offset=(0, 0), ros_bridge=None, district_blocks_fid=None,
focus_block_fid=0, focus_block_logical_id=0, district_metrics_fn=None,
state_metrics_fn=None,
show_voronoi_boundaries=False, focus_metrics=[],
focus_metric_width=100, focus_metric_height=100,
screen_size=(1920, 1080), max_fiducials_per_district=5,
visualize_metric_data=True, **kwargs):
super(VoronoiWidget, self).__init__(**kwargs)
self.voronoi_mapping = voronoi_mapping
self.ros_bridge = ros_bridge
self.district_blocks_fid = district_blocks_fid
self.focus_block_fid = focus_block_fid
self.focus_block_logical_id = focus_block_logical_id
self.show_voronoi_boundaries = show_voronoi_boundaries
self.max_fiducials_per_district = max_fiducials_per_district
self.visualize_metric_data = visualize_metric_data
self.focus_metrics = focus_metrics
self.focus_metric_width = focus_metric_width
self.focus_metric_height = focus_metric_height
self.screen_size = screen_size
self.n_focus_rows = rows = int(screen_size[1] // focus_metric_height)
self.n_focus_cols = cols = int(math.ceil(len(focus_metrics) / rows))
self.focus_region_width = cols * focus_metric_width
self.show_district_selection()
self.show_focus_region()
self.fiducial_graphics = {}
self.fiducials_color = {}
self.colors = cycle(plt.get_cmap('tab10').colors)
self.table_mode = table_mode
self.align_mat = align_mat
self.district_graphics = []
self.district_metrics_fn = district_metrics_fn
self.state_metrics_fn = state_metrics_fn
self.screen_offset = screen_offset
self.touches = {}
with self.canvas.before:
PushMatrix()
Translate(*[v * Metrics.density for v in screen_offset])
with self.canvas.after:
PopMatrix()
self.show_precincts()
def show_district_selection(self):
if not self.table_mode:
h = 34 * len(self.district_blocks_fid) + 5 * (
len(self.district_blocks_fid) - 1)
box = self.gui_touch_focus_buttons = BoxLayout(
orientation='vertical', size=(dp(100), dp(h)),
spacing=dp(5), pos=(self.focus_region_width, 0))
for i, val in enumerate(self.district_blocks_fid):
btn = ToggleButton(
text='District {}'.format(i + 1), group='focus',
allow_no_selection=False)
box.add_widget(btn)
def update_current_fid(*largs, button=btn, value=val):
if button.state == 'down':
self.current_fid_id = value
btn.fbind('state', update_current_fid)
box.children[-1].state = 'down'
self.add_widget(box)
def show_focus_region(self):
focus_metrics = self.focus_metrics
if not focus_metrics:
return
if not self.table_mode:
btn = ToggleButton(
text='Focus', group='focus', allow_no_selection=False)
self.gui_touch_focus_buttons.add_widget(btn)
def update_current_fid(*largs, button=btn):
if button.state == 'down':
self.current_fid_id = self.focus_block_logical_id
btn.fbind('state', update_current_fid)
i = 0
focus_metric_width = self.focus_metric_width
focus_metric_height = self.focus_metric_height
for col in range(self.n_focus_cols):
for row in range(self.n_focus_rows):
name = focus_metrics[i]
x0 = col * focus_metric_width
x1 = x0 + focus_metric_width
y0 = row * focus_metric_height
y1 = y0 + focus_metric_height
self.add_widget(
Factory.SizedLabel(text=name, pos=(x0, y0)))
with self.canvas:
Line(points=[x0, y0, x1, y0, x1, y1, x0, y1], width=2)
i += 1
if i >= len(focus_metrics):
break
if i >= len(focus_metrics):
break
def show_precincts(self):
precinct_graphics = self.precinct_graphics = {}
with self.canvas:
PushMatrix()
Translate(self.focus_region_width, 0)
Scale(Metrics.density)
for precinct in self.voronoi_mapping.precincts:
assert len(precinct.boundary) >= 6
tess = Tesselator()
tess.add_contour(precinct.boundary)
tess.tesselate(WINDING_ODD, TYPE_POLYGONS)
graphics = [
Color(rgba=(0, 0, 0, 1))]
for vertices, indices in tess.meshes:
graphics.append(
Mesh(
vertices=vertices, indices=indices,
mode="triangle_fan"))
graphics.append(Color(rgba=(0, 1, 0, 1)))
graphics.append(Line(points=precinct.boundary, width=1))
precinct_graphics[precinct] = graphics
PopMatrix()
def on_touch_down(self, touch):
if not self.table_mode:
if self.gui_touch_focus_buttons.collide_point(*touch.pos):
return self.gui_touch_focus_buttons.on_touch_down(touch)
return self.gui_touch_down(touch)
return self.fiducial_down(touch)
def on_touch_move(self, touch):
if not self.table_mode and \
self.gui_touch_focus_buttons.collide_point(*touch.pos):
return self.gui_touch_focus_buttons.on_touch_down(touch)
if touch.uid not in self.touches:
return False
if self.table_mode:
return self.fiducial_move(touch)
return self.gui_touch_move(touch)
def on_touch_up(self, touch):
if not self.table_mode and \
self.gui_touch_focus_buttons.collide_point(*touch.pos):
return self.gui_touch_focus_buttons.on_touch_down(touch)
if touch.uid not in self.touches:
return False
if self.table_mode:
return self.fiducial_up(touch)
return self.gui_touch_up(touch)
def align_touch(self, pos):
if self.align_mat is not None:
pos = tuple(
np.dot(self.align_mat, np.array([pos[0], pos[1], 1]))[:2])
x0, y0 = self.screen_offset
pos = pos[0] - x0, pos[1] - y0
return pos
def handle_focus_block(self, pos):
assert self.focus_metrics
if pos is None:
self.current_focus_metric = ''
if self.visualize_metric_data:
self.paint_precinct_by_district(clear_error=False)
if self.ros_bridge is not None:
self.ros_bridge.update_tuio_focus(False, '')
return
x, y = pos
x_ = (x - self.focus_region_width) / Metrics.density
y_ = y / Metrics.density
if x < self.focus_region_width:
rows = self.n_focus_rows
metric = ''
if y < len(self.focus_metrics) * self.focus_metric_height:
row = int(y / self.focus_metric_height)
col = int(x / self.focus_metric_width)
metric = self.focus_metrics[col * rows + row]
self.current_focus_metric = metric
if self.visualize_metric_data:
if metric:
self.paint_precinct_by_metric()
else:
self.paint_precinct_by_district(clear_error=False)
if self.ros_bridge is not None:
self.ros_bridge.update_tuio_focus(False, metric)
else:
self.current_focus_metric = ''
if self.visualize_metric_data:
self.paint_precinct_by_district(clear_error=False)
try:
district = self.voronoi_mapping.get_pos_district(
(x_, y_))
except (IndexError, TypeError):
district = None
if self.ros_bridge is not None:
# it's not on any district, send a no block present signal
if district is None:
self.ros_bridge.update_tuio_focus(False, '')
else:
self.ros_bridge.update_tuio_focus(True, district.identity)
def focus_block_down(self, touch, pos):
# there's already a focus block on the table
if self._has_focus or not self.focus_metrics:
return True
self._has_focus = touch
with self.canvas:
color = Color(rgba=(1, 0, 1, 1))
point = Point(points=pos, pointsize=7)
info = {'fid': touch.fid, 'last_pos': pos, 'graphics': (color, point),
'focus': True}
self.touches[touch.uid] = info
self.handle_focus_block(pos)
return True
def focus_block_move(self, touch, pos):
"""Only called in table mode and if the touch has been seen before
and it is a focus block.
"""
assert self.focus_metrics
info = self.touches[touch.uid]
info['last_pos'] = pos
info['graphics'][1].points = pos
self.handle_focus_block(pos)
return True
def focus_block_up(self, touch):
"""Only called in table mode and if the touch has been seen before
and it is a focus block.
"""
assert self.focus_metrics
info = self.touches[touch.uid]
for item in info['graphics']:
self.canvas.remove(item)
del self.touches[touch.uid]
self._has_focus = None
self.handle_focus_block(None)
return True
def fiducial_down(self, touch):
focus_id = self.focus_block_logical_id
blocks_fid = self.district_blocks_fid
if 'markerid' not in touch.profile or (
touch.fid not in blocks_fid and touch.fid != focus_id):
return False
x, y = pos = self.align_touch(touch.pos)
# handle focus block
if touch.fid == focus_id:
return self.focus_block_down(touch, pos)
if x < self.focus_region_width:
return True
with self.canvas:
color = Color(rgba=(1, 1, 1, 1))
point = Point(points=pos, pointsize=7)
logical_id = blocks_fid.index(touch.fid)
key = self.add_fiducial((x - self.focus_region_width, y), logical_id)
info = {'fid': touch.fid, 'fiducial_key': key, 'last_pos': pos,
'graphics': (color, point), 'logical_id': logical_id}
self.touches[touch.uid] = info
self.voronoi_mapping.request_reassignment(self.voronoi_callback)
return True
def fiducial_move(self, touch):
"""Only called in table mode and if the touch has been seen before.
"""
info = self.touches[touch.uid]
x, y = pos = self.align_touch(touch.pos)
if info['last_pos'] == pos:
return True
if 'focus' in info:
return self.focus_block_move(touch, pos)
info['last_pos'] = pos
info['graphics'][1].points = pos
self.voronoi_mapping.move_fiducial(
info['fiducial_key'], (x - self.focus_region_width, y))
self.voronoi_mapping.request_reassignment(self.voronoi_callback)
return True
def fiducial_up(self, touch):
"""Only called in table mode and if the touch has been seen before.
"""
info = self.touches[touch.uid]
if 'focus' in info:
return self.focus_block_up(touch)
del self.touches[touch.uid]
for item in info['graphics']:
self.canvas.remove(item)
x, y = self.align_touch(touch.pos)
self.remove_fiducial(
info['fiducial_key'], (x - self.focus_region_width, y))
self.voronoi_mapping.request_reassignment(self.voronoi_callback)
return True
def gui_touch_down(self, touch):
x, y = pos = self.align_touch(touch.pos)
info = {'moved': False, 'fiducial_key': None}
# are we near a voronoi touch?
x_ = (x - self.focus_region_width) / Metrics.density
y_ = y / Metrics.density
for key, (x2, y2) in self.voronoi_mapping.get_fiducials().items():
if ((x_ - x2) ** 2 + (y_ - y2) ** 2) ** .5 < 10:
info['fiducial_key'] = key
self.touches[touch.uid] = info
return True
# are we near the focus touch?
if self.focus_gui_pos:
assert self.focus_metrics
x2, y2 = self.focus_gui_pos
if ((x - x2) ** 2 + (y - y2) ** 2) ** .5 < 10:
info['focus'] = True
self.touches[touch.uid] = info
return True
# handle focus down
if self.current_fid_id is self.focus_block_logical_id:
if self.focus_gui_pos or not self.focus_metrics:
return True
self.focus_gui_pos = pos
with self.canvas:
color = Color(rgba=(1, 0, 1, 1))
point = Point(points=pos, pointsize=7)
self.fiducial_graphics['focus'] = color, point
info['focus'] = True
info['moved'] = True
self.touches[touch.uid] = info
self.handle_focus_block(pos)
return True
if x_ < 0:
return True
# with self.canvas:
# color = Color(rgba=(1, 1, 1, 1))
# point = Point(points=pos, pointsize=7)
current_id = self.current_fid_id
if len(
[1 for val in self.voronoi_mapping.get_fiducial_ids().values()
if val == current_id]) >= self.max_fiducials_per_district:
return True
key = self.add_fiducial((x_, y_), current_id)
label = self.fiducial_graphics[key] = Label(
text=str(self.current_fid_id + 1),
center=tuple(map(float, pos)),
font_size='20dp')
self.add_widget(label)
info['fiducial_key'] = key
info['moved'] = True
self.touches[touch.uid] = info
self.voronoi_mapping.request_reassignment(self.voronoi_callback)
return True
def gui_touch_move(self, touch):
"""Only called when not in table mode and if the touch has been seen
before.
"""
x, y = pos = self.align_touch(touch.pos)
x_ = (x - self.focus_region_width) / Metrics.density
y_ = y / Metrics.density
info = self.touches[touch.uid]
info['moved'] = True
if 'focus' in info:
if self.focus_gui_pos != pos:
self.handle_focus_block(pos)
self.focus_gui_pos = self.fiducial_graphics['focus'][1].points = pos
return True
key = info['fiducial_key']
pos_ = (x_, y_)
if self.voronoi_mapping.get_fiducials()[key] != pos_:
self.fiducial_graphics[key].center = tuple(map(float, pos))
self.voronoi_mapping.move_fiducial(key, pos_)
self.voronoi_mapping.request_reassignment(self.voronoi_callback)
return True
def gui_touch_up(self, touch):
"""Only called when not in table mode and if the touch has been seen
before.
"""
x, y = pos = self.align_touch(touch.pos)
x_ = (x - self.focus_region_width) / Metrics.density
y_ = y / Metrics.density
info = self.touches.pop(touch.uid)
if 'focus' in info:
# if moved, we leave point on gui
if info['moved']:
if self.focus_gui_pos != pos:
self.handle_focus_block(pos)
self.focus_gui_pos = self.fiducial_graphics['focus'][1].points = pos
return True
# if it didn't move, we remove the point
for item in self.fiducial_graphics['focus']:
self.canvas.remove(item)
del self.fiducial_graphics['focus']
self.focus_gui_pos = None
self.handle_focus_block(None)
return True
key = info['fiducial_key']
pos_ = (x_, y_)
if info['moved']:
if self.voronoi_mapping.get_fiducials()[key] != pos_:
self.fiducial_graphics[key].center = tuple(map(float, pos))
self.voronoi_mapping.move_fiducial(key, pos_)
self.voronoi_mapping.request_reassignment(self.voronoi_callback)
return True
self.remove_widget(self.fiducial_graphics[key])
# for item in self.fiducial_graphics[key]:
# self.canvas.remove(item)
del self.fiducial_graphics[key]
self.remove_fiducial(key, pos_)
self.voronoi_mapping.request_reassignment(self.voronoi_callback)
return True
def add_fiducial(self, location, identity):
fiducial = self.voronoi_mapping.add_fiducial(location, identity)
if identity not in self.fiducials_color:
self.fiducials_color[identity] = list(next(self.colors))
return fiducial
def remove_fiducial(self, fiducial, location):
self.voronoi_mapping.remove_fiducial(fiducial)
def voronoi_callback(self, *largs):
def _callback(dt):
self.process_voronoi_output(*largs)
Clock.schedule_once(_callback)
def clear_voronoi(self):
for graphics in self.precinct_graphics.values():
graphics[0].rgba = 0, 0, 0, 1
for item in self.district_graphics:
self.canvas.remove(item)
self.district_graphics = []
def paint_precinct_by_district(self, clear_error=True):
colors = self.fiducials_color
if not self.voronoi_mapping.districts:
if clear_error:
for graphics in self.precinct_graphics.values():
graphics[0].rgba = 0, 0, 0, 1
else:
for graphics in self.precinct_graphics.values():
graphics[0].rgba = 0, 0, 0, graphics[0].a
return
for district in self.voronoi_mapping.districts:
color = colors[district.identity]
for precinct in district.precincts:
p_color = self.precinct_graphics[precinct][0]
if clear_error:
p_color.rgba = color + [1., ]
else:
p_color.rgba = color + [p_color.a]
def paint_precinct_by_metric(self):
metric_name = self.current_focus_metric
assert metric_name
assert self.visualize_metric_data
metrics = [
precinct.metrics[metric_name].scalar_value for
precinct in self.voronoi_mapping.precincts]
min_ = min(metrics)
range_ = max(metrics) - min_
graphics = self.precinct_graphics
for precinct, metric in zip(self.voronoi_mapping.precincts, metrics):
val = (metric - min_) / range_
color = graphics[precinct][0]
color.rgba = 0, val * .333 + .176, val * .314 + .392, color.a
def process_voronoi_output(
self, districts, fiducial_identity, fiducial_pos, error=[],
post_callback=None, largs=(),
data_is_old=False):
if data_is_old:
return
if post_callback is not None:
post_callback(*largs)
if not error:
fid_ids = [self.district_blocks_fid[i] for i in fiducial_identity]
if self.ros_bridge is not None:
self.ros_bridge.update_voronoi(
fiducial_pos, fid_ids, fiducial_identity, districts,
self.district_metrics_fn, self.state_metrics_fn)
if self.visualize_metric_data and self.current_focus_metric:
for graphics in self.precinct_graphics.values():
graphics[0].a = 1. # undo possible previous error display
else:
if not districts:
self.clear_voronoi()
else:
self.paint_precinct_by_district()
if error:
for precinct in error:
self.precinct_graphics[precinct][0].a = 0
for item in self.district_graphics:
self.canvas.remove(item)
self.district_graphics = []
if self.show_voronoi_boundaries:
with self.canvas:
PushMatrix()
Translate(self.focus_region_width, 0)
Scale(Metrics.density)
self.district_graphics.append(Color(1, 1, 0, 1))
for district in districts:
if not district.boundary:
continue
self.district_graphics.append(
Line(points=district.boundary + district.boundary[:2],
width=2))
PopMatrix()
class VoronoiApp(App):
"""The Kivy application that creates the GUI.
"""
voronoi_mapping = None
ros_bridge = None
use_county_dataset = True
geo_data = None
precincts = []
screen_size = (1900, 800)
table_mode = False
alignment_filename = 'alignment.txt'
screen_offset = 0, 0
show_precinct_id = False
district_blocks_fid = [0, 1, 2, 3, 4, 5, 6, 7]
focus_block_fid = 8
focus_block_logical_id = 8
use_ros = False
metrics = ['demographics', ]
ros_host = 'localhost'
ros_port = 9090
show_voronoi_boundaries = False
focus_metrics = []
focus_metric_width = 100
focus_metric_height = 100
metric_data = None
log_data = False
max_fiducials_per_district = 5
scale = 1.
county_containing_rect = [0, 0, 0, 0]
precinct_2017_containing_rect = [0, 0, 0, 0]
display_landmarks = True
visualize_metric_data = True
def load_data_create_voronoi(self):
"""Loads and initializes all the data and voronoi mapping.
"""
if self.use_county_dataset:
geo_data = self.geo_data = GeoDataCounty()
geo_data.containing_rect = self.county_containing_rect
else:
geo_data = self.geo_data = GeoDataPrecinct2017()
geo_data.containing_rect = self.precinct_2017_containing_rect
geo_data.screen_size = self.screen_size
try:
geo_data.load_npz_data()
except FileNotFoundError:
geo_data.load_data()
geo_data.generate_polygons()
geo_data.scale_to_screen()
geo_data.smooth_vertices()
self.voronoi_mapping = vor = VoronoiMapping()
vor.start_processing_thread()
vor.screen_size = self.screen_size
self.precincts = precincts = []
for i, (name, polygons) in enumerate(
zip(geo_data.get_ordered_record_names(), geo_data.polygons)):
precinct = Precinct(
name=name, boundary=polygons[0].reshape(-1).tolist(),
identity=i, location=polygons[0].mean(axis=0).tolist())
precincts.append(precinct)
vor.set_precincts(precincts)
def show_landmarks(self, widget):
if not self.display_landmarks:
return
offset = widget.focus_region_width
landmarks = self.geo_data.landmarks
if not landmarks:
return
with widget.canvas:
for x, y, size, name, label in landmarks:
x, y = dp(x), dp(y)
size = dp(size)
x += offset
if name:
Color(1, 1, 1, .6)
Rectangle(
pos=(x - size / 2., y - size / 2.), size=(size, size),
source=resource_find('{}.png'.format(name)))
if label:
label_wid = Label(
text=label, pos=(x - size / 2., y + size / 2.),
font_size=dp(15))
widget.add_widget(label_wid)
def set_size(*largs, obj=label_wid, center=x):
obj.size = obj.texture_size
obj.center_x = center
label_wid.fbind('texture_size', set_size)
def show_precinct_labels(self, widget):
offset = widget.focus_region_width
for i, precinct in enumerate(self.precincts):
x, y = map(dp, precinct.location)
x += offset
label = Label(
text=str(precinct.identity), center=(x, y),
font_size='20dp')
widget.add_widget(label)
def load_config(self):
keys = [
'use_county_dataset', 'screen_size',
'table_mode', 'alignment_filename', 'screen_offset',
'show_precinct_id', 'focus_block_fid',
'focus_block_logical_id', 'district_blocks_fid', 'use_ros',
'metrics', 'ros_host', 'ros_port', 'show_voronoi_boundaries',
'focus_metrics', 'focus_metric_width', 'focus_metric_height',
'log_data', 'max_fiducials_per_district', 'scale',
'county_containing_rect', 'precinct_2017_containing_rect',
'display_landmarks', 'visualize_metric_data'
]
fname = os.path.join(
os.path.dirname(distopia.__file__), 'data', 'config.json')
if not os.path.exists(fname):
config = {key: getattr(self, key) for key in keys}
with open(fname, 'w') as fp:
json.dump(config, fp, indent=2, sort_keys=True)
with open(fname, 'r') as fp:
for key, val in json.load(fp).items():
setattr(self, key, val)
config = {key: getattr(self, key) for key in keys}
with open(fname, 'w') as fp:
json.dump(config, fp, indent=2, sort_keys=True)
for metric in self.focus_metrics:
if metric not in self.metrics:
raise ValueError(
'Cannot enable focus metric "{}" because it\'s not in '
'metrics "{}"'.format(metric, self.metrics))
def build(self):
"""Builds the GUI.
"""
resource_add_path(
os.path.join(os.path.dirname(distopia.__file__), 'data', 'media'))
self.load_config()
mat = None
if self.alignment_filename:
fname = os.path.join(
os.path.dirname(distopia.__file__), 'data',
self.alignment_filename)
try:
mat = np.loadtxt(fname, delimiter=',', skiprows=3)
except Exception as e:
logging.exception("Not using alignment: {}".format(e))
self.load_data_create_voronoi()
self.metric_data = self.geo_data.load_metrics(
self.metrics, self.precincts)
self.voronoi_mapping.verify_adjacency = \
self.geo_data.set_precinct_adjacency(self.precincts)
self.geo_data.load_landmarks()
widget = VoronoiWidget(
voronoi_mapping=self.voronoi_mapping,
table_mode=self.table_mode, align_mat=mat,
screen_offset=list(map(dp, self.screen_offset)),
ros_bridge=self.ros_bridge,
district_blocks_fid=self.district_blocks_fid,
focus_block_fid=self.focus_block_fid,
focus_block_logical_id=self.focus_block_logical_id,
district_metrics_fn=self.metric_data.compute_district_metrics,
state_metrics_fn=self.metric_data.create_state_metrics,
show_voronoi_boundaries=self.show_voronoi_boundaries,
focus_metrics=self.focus_metrics,
screen_size=list(map(dp, self.screen_size)),
focus_metric_height=dp(self.focus_metric_height),
focus_metric_width=dp(self.focus_metric_width),
max_fiducials_per_district=self.max_fiducials_per_district,
visualize_metric_data=self.visualize_metric_data
)
if self.use_ros:
box = BoxLayout()
voronoi_widget = widget
err = Label(text='No ROS bridge. Please set use_ros to False')
widget = box
box.add_widget(err)
def enable_widget(*largs):
box.remove_widget(err)
box.add_widget(voronoi_widget)
voronoi_widget.ros_bridge = self.ros_bridge
if self.show_precinct_id:
self.show_precinct_labels(voronoi_widget)
self.show_landmarks(voronoi_widget)
self.ros_bridge = RosBridge(
host=self.ros_host, port=self.ros_port,
ready_callback=Clock.create_trigger(enable_widget),
log_data=self.log_data)
else:
if self.show_precinct_id:
self.show_precinct_labels(widget)
self.show_landmarks(widget)
size = list(map(dp, self.screen_size))
size = [v / self.scale for v in size]
scatter = Scatter(
do_rotation=False, do_scale=False, do_translation_y=False,
do_translation_x=False, scale=self.scale,
do_collide_after_children=False)
scatter.add_widget(widget)
widget.size_hint = None, None
scatter.size_hint = None, None
scatter.fbind('pos', lambda *l: setattr(scatter, 'pos', (0, 0)))
scatter.pos = 0, 0
scatter.size = size
widget.size = size
return scatter
Builder.load_string("""
<SizedLabel@Label>:
size: self.texture_size
""")
if __name__ == '__main__':
app = VoronoiApp()
try:
app.run()
finally:
app.voronoi_mapping.stop_thread()
if app.ros_bridge:
app.ros_bridge.stop_threads()
| [
"distopia.app.voronoi_data.GeoDataPrecinct2017",
"kivy.clock.Clock.create_trigger",
"distopia.app.voronoi_data.GeoDataCounty",
"kivy.clock.Clock.schedule_once",
"kivy.graphics.context_instructions.Scale",
"kivy.graphics.Color",
"kivy.graphics.vertex_instructions.Mesh",
"os.path.dirname",
"os.path.ex... | [((122, 147), 'kivy.support.install_twisted_reactor', 'install_twisted_reactor', ([], {}), '()\n', (145, 147), False, 'from kivy.support import install_twisted_reactor\n'), ((32025, 32101), 'kivy.lang.Builder.load_string', 'Builder.load_string', (['"""\n<SizedLabel@Label>:\n size: self.texture_size\n"""'], {}), '("""\n<SizedLabel@Label>:\n size: self.texture_size\n""")\n', (32044, 32101), False, 'from kivy.lang import Builder\n'), ((19956, 19986), 'kivy.clock.Clock.schedule_once', 'Clock.schedule_once', (['_callback'], {}), '(_callback)\n', (19975, 19986), False, 'from kivy.clock import Clock\n'), ((25191, 25207), 'distopia.mapping.voronoi.VoronoiMapping', 'VoronoiMapping', ([], {}), '()\n', (25205, 25207), False, 'from distopia.mapping.voronoi import VoronoiMapping\n'), ((31553, 31698), 'kivy.uix.scatter.Scatter', 'Scatter', ([], {'do_rotation': '(False)', 'do_scale': '(False)', 'do_translation_y': '(False)', 'do_translation_x': '(False)', 'scale': 'self.scale', 'do_collide_after_children': '(False)'}), '(do_rotation=False, do_scale=False, do_translation_y=False,\n do_translation_x=False, scale=self.scale, do_collide_after_children=False)\n', (31560, 31698), False, 'from kivy.uix.scatter import Scatter\n'), ((4296, 4308), 'kivy.graphics.context_instructions.PushMatrix', 'PushMatrix', ([], {}), '()\n', (4306, 4308), False, 'from kivy.graphics.context_instructions import PushMatrix, PopMatrix, Rotate, Translate, Scale, MatrixInstruction\n'), ((4321, 4379), 'kivy.graphics.context_instructions.Translate', 'Translate', (['*[(v * Metrics.density) for v in screen_offset]'], {}), '(*[(v * Metrics.density) for v in screen_offset])\n', (4330, 4379), False, 'from kivy.graphics.context_instructions import PushMatrix, PopMatrix, Rotate, Translate, Scale, MatrixInstruction\n'), ((4422, 4433), 'kivy.graphics.context_instructions.PopMatrix', 'PopMatrix', ([], {}), '()\n', (4431, 4433), False, 'from kivy.graphics.context_instructions import PushMatrix, PopMatrix, Rotate, Translate, Scale, MatrixInstruction\n'), ((5565, 5632), 'kivy.uix.togglebutton.ToggleButton', 'ToggleButton', ([], {'text': '"""Focus"""', 'group': '"""focus"""', 'allow_no_selection': '(False)'}), "(text='Focus', group='focus', allow_no_selection=False)\n", (5577, 5632), False, 'from kivy.uix.togglebutton import ToggleButton\n'), ((6858, 6870), 'kivy.graphics.context_instructions.PushMatrix', 'PushMatrix', ([], {}), '()\n', (6868, 6870), False, 'from kivy.graphics.context_instructions import PushMatrix, PopMatrix, Rotate, Translate, Scale, MatrixInstruction\n'), ((6883, 6920), 'kivy.graphics.context_instructions.Translate', 'Translate', (['self.focus_region_width', '(0)'], {}), '(self.focus_region_width, 0)\n', (6892, 6920), False, 'from kivy.graphics.context_instructions import PushMatrix, PopMatrix, Rotate, Translate, Scale, MatrixInstruction\n'), ((6933, 6955), 'kivy.graphics.context_instructions.Scale', 'Scale', (['Metrics.density'], {}), '(Metrics.density)\n', (6938, 6955), False, 'from kivy.graphics.context_instructions import PushMatrix, PopMatrix, Rotate, Translate, Scale, MatrixInstruction\n'), ((7724, 7735), 'kivy.graphics.context_instructions.PopMatrix', 'PopMatrix', ([], {}), '()\n', (7733, 7735), False, 'from kivy.graphics.context_instructions import PushMatrix, PopMatrix, Rotate, Translate, Scale, MatrixInstruction\n'), ((11239, 11263), 'kivy.graphics.Color', 'Color', ([], {'rgba': '(1, 0, 1, 1)'}), '(rgba=(1, 0, 1, 1))\n', (11244, 11263), False, 'from kivy.graphics import Color\n'), ((11284, 11314), 'kivy.graphics.vertex_instructions.Point', 'Point', ([], {'points': 'pos', 'pointsize': '(7)'}), '(points=pos, pointsize=7)\n', (11289, 11314), False, 'from kivy.graphics.vertex_instructions import Line, Point, Mesh, Ellipse, Rectangle\n'), ((12877, 12901), 'kivy.graphics.Color', 'Color', ([], {'rgba': '(1, 1, 1, 1)'}), '(rgba=(1, 1, 1, 1))\n', (12882, 12901), False, 'from kivy.graphics import Color\n'), ((12922, 12952), 'kivy.graphics.vertex_instructions.Point', 'Point', ([], {'points': 'pos', 'pointsize': '(7)'}), '(points=pos, pointsize=7)\n', (12927, 12952), False, 'from kivy.graphics.vertex_instructions import Line, Point, Mesh, Ellipse, Rectangle\n'), ((24636, 24651), 'distopia.app.voronoi_data.GeoDataCounty', 'GeoDataCounty', ([], {}), '()\n', (24649, 24651), False, 'from distopia.app.voronoi_data import GeoDataCounty, GeoDataPrecinct2017\n'), ((24772, 24793), 'distopia.app.voronoi_data.GeoDataPrecinct2017', 'GeoDataPrecinct2017', ([], {}), '()\n', (24791, 24793), False, 'from distopia.app.voronoi_data import GeoDataCounty, GeoDataPrecinct2017\n'), ((27859, 27893), 'os.path.dirname', 'os.path.dirname', (['distopia.__file__'], {}), '(distopia.__file__)\n', (27874, 27893), False, 'import os\n'), ((27933, 27954), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (27947, 27954), False, 'import os\n'), ((28362, 28409), 'json.dump', 'json.dump', (['config', 'fp'], {'indent': '(2)', 'sort_keys': '(True)'}), '(config, fp, indent=2, sort_keys=True)\n', (28371, 28409), False, 'import json\n'), ((30570, 30581), 'kivy.uix.boxlayout.BoxLayout', 'BoxLayout', ([], {}), '()\n', (30579, 30581), False, 'from kivy.uix.boxlayout import BoxLayout\n'), ((30636, 30692), 'kivy.uix.label.Label', 'Label', ([], {'text': '"""No ROS bridge. Please set use_ros to False"""'}), "(text='No ROS bridge. Please set use_ros to False')\n", (30641, 30692), False, 'from kivy.uix.label import Label\n'), ((3938, 3959), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (3950, 3959), True, 'import matplotlib.pyplot as plt\n'), ((7090, 7102), 'kivy.graphics.tesselator.Tesselator', 'Tesselator', ([], {}), '()\n', (7100, 7102), False, 'from kivy.graphics.tesselator import Tesselator, WINDING_ODD, TYPE_POLYGONS\n'), ((15696, 15720), 'kivy.graphics.Color', 'Color', ([], {'rgba': '(1, 0, 1, 1)'}), '(rgba=(1, 0, 1, 1))\n', (15701, 15720), False, 'from kivy.graphics import Color\n'), ((15745, 15775), 'kivy.graphics.vertex_instructions.Point', 'Point', ([], {'points': 'pos', 'pointsize': '(7)'}), '(points=pos, pointsize=7)\n', (15750, 15775), False, 'from kivy.graphics.vertex_instructions import Line, Point, Mesh, Ellipse, Rectangle\n'), ((23001, 23013), 'kivy.graphics.context_instructions.PushMatrix', 'PushMatrix', ([], {}), '()\n', (23011, 23013), False, 'from kivy.graphics.context_instructions import PushMatrix, PopMatrix, Rotate, Translate, Scale, MatrixInstruction\n'), ((23030, 23067), 'kivy.graphics.context_instructions.Translate', 'Translate', (['self.focus_region_width', '(0)'], {}), '(self.focus_region_width, 0)\n', (23039, 23067), False, 'from kivy.graphics.context_instructions import PushMatrix, PopMatrix, Rotate, Translate, Scale, MatrixInstruction\n'), ((23084, 23106), 'kivy.graphics.context_instructions.Scale', 'Scale', (['Metrics.density'], {}), '(Metrics.density)\n', (23089, 23106), False, 'from kivy.graphics.context_instructions import PushMatrix, PopMatrix, Rotate, Translate, Scale, MatrixInstruction\n'), ((23480, 23491), 'kivy.graphics.context_instructions.PopMatrix', 'PopMatrix', ([], {}), '()\n', (23489, 23491), False, 'from kivy.graphics.context_instructions import PushMatrix, PopMatrix, Rotate, Translate, Scale, MatrixInstruction\n'), ((26078, 26086), 'kivy.metrics.dp', 'dp', (['size'], {}), '(size)\n', (26080, 26086), False, 'from kivy.metrics import Metrics, dp\n'), ((28076, 28123), 'json.dump', 'json.dump', (['config', 'fp'], {'indent': '(2)', 'sort_keys': '(True)'}), '(config, fp, indent=2, sort_keys=True)\n', (28085, 28123), False, 'import json\n'), ((28784, 28818), 'os.path.dirname', 'os.path.dirname', (['distopia.__file__'], {}), '(distopia.__file__)\n', (28799, 28818), False, 'import os\n'), ((28971, 29005), 'os.path.dirname', 'os.path.dirname', (['distopia.__file__'], {}), '(distopia.__file__)\n', (28986, 29005), False, 'import os\n'), ((29095, 29139), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {'delimiter': '""","""', 'skiprows': '(3)'}), "(fname, delimiter=',', skiprows=3)\n", (29105, 29139), True, 'import numpy as np\n'), ((30293, 30321), 'kivy.metrics.dp', 'dp', (['self.focus_metric_height'], {}), '(self.focus_metric_height)\n', (30295, 30321), False, 'from kivy.metrics import Metrics, dp\n'), ((30354, 30381), 'kivy.metrics.dp', 'dp', (['self.focus_metric_width'], {}), '(self.focus_metric_width)\n', (30356, 30381), False, 'from kivy.metrics import Metrics, dp\n'), ((4793, 4798), 'kivy.metrics.dp', 'dp', (['(5)'], {}), '(5)\n', (4795, 4798), False, 'from kivy.metrics import Metrics, dp\n'), ((6423, 6466), 'kivy.factory.Factory.SizedLabel', 'Factory.SizedLabel', ([], {'text': 'name', 'pos': '(x0, y0)'}), '(text=name, pos=(x0, y0))\n', (6441, 6466), False, 'from kivy.factory import Factory\n'), ((6522, 6576), 'kivy.graphics.vertex_instructions.Line', 'Line', ([], {'points': '[x0, y0, x1, y0, x1, y1, x0, y1]', 'width': '(2)'}), '(points=[x0, y0, x1, y0, x1, y1, x0, y1], width=2)\n', (6526, 6576), False, 'from kivy.graphics.vertex_instructions import Line, Point, Mesh, Ellipse, Rectangle\n'), ((7264, 7288), 'kivy.graphics.Color', 'Color', ([], {'rgba': '(0, 0, 0, 1)'}), '(rgba=(0, 0, 0, 1))\n', (7269, 7288), False, 'from kivy.graphics import Color\n'), ((7558, 7582), 'kivy.graphics.Color', 'Color', ([], {'rgba': '(0, 1, 0, 1)'}), '(rgba=(0, 1, 0, 1))\n', (7563, 7582), False, 'from kivy.graphics import Color\n'), ((7616, 7655), 'kivy.graphics.vertex_instructions.Line', 'Line', ([], {'points': 'precinct.boundary', 'width': '(1)'}), '(points=precinct.boundary, width=1)\n', (7620, 7655), False, 'from kivy.graphics.vertex_instructions import Line, Point, Mesh, Ellipse, Rectangle\n'), ((23153, 23170), 'kivy.graphics.Color', 'Color', (['(1)', '(1)', '(0)', '(1)'], {}), '(1, 1, 0, 1)\n', (23158, 23170), False, 'from kivy.graphics import Color\n'), ((26042, 26047), 'kivy.metrics.dp', 'dp', (['x'], {}), '(x)\n', (26044, 26047), False, 'from kivy.metrics import Metrics, dp\n'), ((26049, 26054), 'kivy.metrics.dp', 'dp', (['y'], {}), '(y)\n', (26051, 26054), False, 'from kivy.metrics import Metrics, dp\n'), ((26161, 26180), 'kivy.graphics.Color', 'Color', (['(1)', '(1)', '(1)', '(0.6)'], {}), '(1, 1, 1, 0.6)\n', (26166, 26180), False, 'from kivy.graphics import Color\n'), ((28190, 28203), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (28199, 28203), False, 'import json\n'), ((31221, 31256), 'kivy.clock.Clock.create_trigger', 'Clock.create_trigger', (['enable_widget'], {}), '(enable_widget)\n', (31241, 31256), False, 'from kivy.clock import Clock\n'), ((4752, 4759), 'kivy.metrics.dp', 'dp', (['(100)'], {}), '(100)\n', (4754, 4759), False, 'from kivy.metrics import Metrics, dp\n'), ((4761, 4766), 'kivy.metrics.dp', 'dp', (['h'], {}), '(h)\n', (4763, 4766), False, 'from kivy.metrics import Metrics, dp\n'), ((7405, 7466), 'kivy.graphics.vertex_instructions.Mesh', 'Mesh', ([], {'vertices': 'vertices', 'indices': 'indices', 'mode': '"""triangle_fan"""'}), "(vertices=vertices, indices=indices, mode='triangle_fan')\n", (7409, 7466), False, 'from kivy.graphics.vertex_instructions import Line, Point, Mesh, Ellipse, Rectangle\n'), ((8964, 8993), 'numpy.array', 'np.array', (['[pos[0], pos[1], 1]'], {}), '([pos[0], pos[1], 1])\n', (8972, 8993), True, 'import numpy as np\n'), ((23370, 23433), 'kivy.graphics.vertex_instructions.Line', 'Line', ([], {'points': '(district.boundary + district.boundary[:2])', 'width': '(2)'}), '(points=district.boundary + district.boundary[:2], width=2)\n', (23374, 23433), False, 'from kivy.graphics.vertex_instructions import Line, Point, Mesh, Ellipse, Rectangle\n'), ((26530, 26536), 'kivy.metrics.dp', 'dp', (['(15)'], {}), '(15)\n', (26532, 26536), False, 'from kivy.metrics import Metrics, dp\n')] |
import numpy as np
import pandas as pd
import scanpy as sc
import scanpy.external as sce
def create_cluster_annotation_overview(
adata,
n_levels,
cluster_label,
min_fraction_for_dominancy=0.80,
min_fraction_annotated=0.5,
compartment_of_interest=None,
):
"""Function to calculate for each cluster, for each annotation level, if it is
dominated by a cell type.
Args:
adata - scanpy AnnData object
n_levels - number of annotation levels (named "ann_level_[number]" in adata.obs)
cluster_label - column name of cluster column in adata.obs
min_fraction_for_dominancy - minimum fraction of annotated cells to belong to
one cell type, to be called "dominant". Should be
higher than 0.5.
min_fraction_annotated - minumum fraction of cells in cluster that need to be
annotated, before "dominancy" analysis is possible
compartment_of_interest - ann_level_1 compartment to which to limit the cluster
analysis. Only clusters that belong to multiple
compartments or this specific compartment are included
in the output df.
Returns:
cluster df - pandas dataframe with for each cluster information on what is the
dominant cluster (if there is one), and the fraction of annotated
cells belonging to the dominant cluster
"""
cluster_df = pd.DataFrame(
index=adata.obs[cluster_label].cat.categories,
columns=zip(
["ann{}_dom_type".format(level) for level in range(1, n_levels + 1)],
["ann{}_dom_fract".format(level) for level in range(1, n_levels + 1)],
),
)
for level in range(1, n_levels + 1):
level_name = "ann_level_" + str(level)
clust_cell_types = adata.obs.groupby([cluster_label, level_name]).agg(
{level_name: "count"}
)
# count fraction of cells that is annotated at this level:
clust_cell_types["annotated"] = [
"no" if celltype[:2] in ["1_", "2_", "3_", "4_"] else "yes"
for celltype in clust_cell_types.index.get_level_values(1)
]
number_annotated = clust_cell_types.groupby([cluster_label, "annotated"]).agg(
{level_name: "sum"}
)
fraction_annotated = number_annotated.groupby(level=0).apply(
lambda x: x / float(x.sum())
)
# keep only cells that are annotated at this level:
rows_to_keep = [
rownumber
for rownumber, rowname in enumerate(clust_cell_types.index.get_level_values(1))
if not rowname[:2] in ["1_", "2_", "3_", "4_"]
]
clust_cell_types = clust_cell_types.iloc[rows_to_keep, :]
# convert to proportions
clust_cell_types = clust_cell_types.groupby(level=0)[level_name].apply(
lambda x: x / float(x.sum())
)
# add "dominant" annotation:
dominant_types = clust_cell_types.index[
clust_cell_types > min_fraction_for_dominancy
]
dominant_fractions = clust_cell_types[clust_cell_types > min_fraction_for_dominancy]
# copy dominant types to cluster_df:
cluster_df.loc[
dominant_types.get_level_values(0), "ann{}_dom_type".format(level)
] = dominant_types.get_level_values(1)
# copy dominance fractions to cluster_df
cluster_df.loc[
dominant_fractions.index.get_level_values(0), "ann{}_dom_fract".format(level)
] = dominant_fractions.values
# set underannotated entries to "underann"
# first, make sure columns are not categorical (they would not accept new cat)
for cat in ["ann{}_dom_type".format(level), "ann{}_dom_fract".format(level)]:
cluster_df[cat] = cluster_df[cat].tolist()
idx = pd.IndexSlice
underannotated_boolean = (
fraction_annotated.loc[idx[:, "yes"], :] < min_fraction_annotated
)
cluster_df.loc[
underannotated_boolean[level_name].values,
["ann{}_dom_type".format(level), "ann{}_dom_fract".format(level)],
] = "underann"
if compartment_of_interest != None:
# subset epithelial and split clusters
cluster_df = cluster_df.loc[
[
main_type == compartment_of_interest or split_cluster
for main_type, split_cluster in zip(
cluster_df.ann1_dom_type, cluster_df.ann1_dom_type.isnull()
)
],
:,
]
return cluster_df
def add_nested_clustering(
adata,
cluster_df,
cluster_label_previous,
cluster_label_new,
cluster_res=0.2,
min_cluster_size=100,
verbose=True,
):
"""Function that goes through one round of clustering of already existing
clusters, based on the input cluster df. All clusters that don't have a
dominant cluster yet at all levels in the df (as long as they are
sufficiently annotated) will be reclustered individually.
Returns adata with new clustering (under adata.obs[cluster_label_new].
"""
# copy original clustering
adata.obs[cluster_label_new] = adata.obs[cluster_label_previous].tolist()
for cluster in cluster_df.index:
if verbose:
print("Cluster:", cluster)
dom_types = cluster_df.loc[cluster, :]
if dom_types.isnull().any():
subadata = adata[adata.obs[cluster_label_previous] == cluster, :].copy()
if subadata.shape[0] < min_cluster_size:
if verbose:
print("cluster size smaller than", min_cluster_size, "\n")
continue
if verbose:
print("reclustering...\n")
sc.tl.pca(subadata)
sc.tl.leiden(subadata, resolution=cluster_res, key_added=cluster_label_new)
subadata.obs[cluster_label_new] = [
"{}.{}".format(cluster, new_cluster)
for new_cluster in subadata.obs[cluster_label_new]
]
adata.obs.loc[subadata.obs.index, cluster_label_new] = subadata.obs[
cluster_label_new
]
else:
if verbose:
print("clustered to full resolution!\n")
# order categories "numerically" (so not 1, 10, 11 but 1, 2, 3... 10, 11):
cluster_numbers = list(sorted(set(adata.obs[cluster_label_new])))
prefix_cluster = [float(x.split(".")[0]) for x in cluster_numbers]
cluster_numbers_ordered = [
cluster_numbers[idx] for idx in np.argsort(prefix_cluster)
]
adata.obs[cluster_label_new] = pd.Categorical(
adata.obs[cluster_label_new], categories=cluster_numbers_ordered
)
return adata
def add_nested_clustering_blind(
adata,
cluster_label_previous,
cluster_label_new,
use_rep,
cluster_alg="leiden",
cluster_res=0.2,
cluster_k=30,
min_cluster_size=50,
redo_pca=True,
verbose=True,
):
"""Function that goes through one round of clustering of already existing
clusters, based on the input cluster df. All clusters will be reclustered
individually. ("blind" because we don't take into account annotation
purity of clusters.)
Args:
adata - anndata object to be clustered
cluster_label_previous - parent cluster label
cluster_label_new - label for new clustering
use_rep - name of .obsm object to be used for neighbor graph
cluster_alg - <"leiden","phenograph">
cluster_res - only applicable when using "leiden" as cluster_alg
cluster_k - only applicable when using "phenograph" as cluster_alg.
min_cluster_size - only applicable when using "phenograph" as cluster_alg
Make sure that cluster_k < min_cluster_size
redo_pca - boolean. whether to re-calculate PCA for subclusters
verbose - boolean
Returns adata with new clustering (under adata.obs[cluster_label_new].
"""
# copy original clustering
clusters_previous = adata.obs[cluster_label_previous].tolist()
adata.obs[cluster_label_new] = clusters_previous
if not redo_pca:
print("Not re-doing pca before nested clustering iterations!")
for cluster in sorted(set(clusters_previous)):
if verbose:
print("Cluster:", cluster)
subadata = adata[adata.obs[cluster_label_previous] == cluster, :].copy()
if subadata.shape[0] < min_cluster_size:
if verbose:
print("cluster size smaller than", min_cluster_size, "\n")
continue
if verbose:
print("reclustering...\n")
if redo_pca:
if verbose:
print("running pca...")
sc.tl.pca(subadata)
if cluster_alg == "leiden":
if verbose:
print("calculating 30 nearest neighbors")
print("using rep:", use_rep)
sc.pp.neighbors(subadata, n_neighbors=30, use_rep=use_rep)
if verbose:
print("clustering")
sc.tl.leiden(subadata, resolution=cluster_res, key_added=cluster_label_new)
elif cluster_alg == "phenograph":
subadata.obs[cluster_label_new] = pd.Categorical(
sce.tl.phenograph(subadata.obsm[use_rep], k=cluster_k)[0]
)
else:
raise ValueError("Your cluster_alg argument is incorrect.")
subadata.obs[cluster_label_new] = [
"{}.{}".format(cluster, new_cluster)
for new_cluster in subadata.obs[cluster_label_new]
]
adata.obs.loc[subadata.obs.index, cluster_label_new] = subadata.obs[
cluster_label_new
]
# order categories "numerically" (so not 1, 10, 11 but 1, 2, 3... 10, 11):
# convert all cluster names to strings, instead of a mix of strings and ints:
adata.obs[cluster_label_new] = [
str(clust) for clust in adata.obs[cluster_label_new]
]
cluster_numbers = list(sorted(set(adata.obs[cluster_label_new])))
prefix_cluster = [float(x.split(".")[0]) for x in cluster_numbers]
cluster_numbers_ordered = [
cluster_numbers[idx] for idx in np.argsort(prefix_cluster)
]
adata.obs[cluster_label_new] = pd.Categorical(
adata.obs[cluster_label_new], categories=cluster_numbers_ordered
)
return adata
def get_cluster_markers(adata, cluster_label, marker_ref, ngenes=100, verbose=True):
"""
Calculates markers for every cluster, using either all other cells or
the parent cluster as a reference (i.e. for cluster 00.00.01, it
uses all clusters starting with 00.00 as reference. For cluster
00, it uses all cells as reference).
sc.tl.rank_genes is used for marker gene calculation.
Arguments:
adata - AnnData object
cluster_label - string
label in adata.obs that contains nested-cluster names
marker_ref - either "all" or "sisters". Which clusters to compare with.
ngenes - number of marker genes to get per cluster
Returns:
cluster_markers - pd.DataFrame
dataframe with, for each cluster, 100 highest scoring genes,
plus matching logfc and adj pvalue
"""
# input check:
if marker_ref == "all":
print("Doing one versus all differential expression analysis.")
elif marker_ref == "sisters":
print("Doing one versus sisters differential expression analysis.")
else:
raise ValueError("marker_ref argument should be set to either 'all' or 'sisters'.")
# convert clusters to strings:
adata.obs[cluster_label] = [str(cl) for cl in adata.obs[cluster_label]]
# store cluster set
clusters = sorted(set(adata.obs[cluster_label]))
colnames_nested = [
[clust + "_gene", clust + "_logfc", clust + "_pval_adj"] for clust in clusters
]
colnames = [item for sublist in colnames_nested for item in sublist]
cluster_markers = pd.DataFrame(index=range(100), columns=colnames)
parents_tested = list()
for clust in clusters:
clust_depth = len(clust.split("."))
if clust_depth == 1:
parent = "all"
if parent not in parents_tested:
if verbose:
print("ranking genes for parent group", parent)
parents_tested.append(parent)
sc.tl.rank_genes_groups(adata, groupby=cluster_label, n_genes=ngenes)
# store results for all clusters from this parent
# i.e. all clusters of depth 1
for d1_cluster in [
clust for clust in clusters if len(clust.split(".")) == 1
]:
# create a subdf that will allow us to sort genes per cluster
submarker_df = pd.DataFrame(
index=range(ngenes),
columns=[
d1_cluster + "_gene",
d1_cluster + "_logfc",
d1_cluster + "_pval_adj",
],
)
submarker_df[d1_cluster + "_gene"] = adata.uns["rank_genes_groups"][
"names"
][d1_cluster]
submarker_df[d1_cluster + "_logfc"] = adata.uns[
"rank_genes_groups"
]["logfoldchanges"][d1_cluster]
submarker_df[d1_cluster + "_pval_adj"] = adata.uns[
"rank_genes_groups"
]["pvals_adj"][d1_cluster]
# sort values:
submarker_df.sort_values(
by=[d1_cluster + "_pval_adj", d1_cluster + "_logfc"],
ascending=[True, False],
inplace=True,
)
submarker_df = submarker_df.reset_index().drop(columns="index")
# and add to big dataframe
cluster_markers.loc[
submarker_df.index, submarker_df.columns
] = submarker_df.values
else:
parent = ".".join(clust.split(".")[: clust_depth - 1])
if parent not in parents_tested:
# depending on reference choice, use whole adata as reference
# or only the parent cluster.
if marker_ref == "all":
subadata = adata
elif marker_ref == "sisters":
subadata = adata[[cl.startswith(parent) for cl in adata.obs[cluster_label]],:].copy()
if verbose:
print("ranking genes for parent group", parent)
parents_tested.append(parent)
siblings = [c for c in clusters if c.startswith(parent)]
if len(siblings) < 2 and marker_ref == "sisters":
print("Cluster {} has only one subcluster. Skipping DEA for this parent.".format(parent))
else:
sc.tl.rank_genes_groups(subadata, groupby=cluster_label, groups=siblings, n_genes=ngenes)
for same_depth_sibling in [
sib for sib in siblings if len(clust.split(".")) == clust_depth
]:
# create a subdf that will allow us to sort genes per cluster
submarker_df = pd.DataFrame(
index=range(ngenes),
columns=[
same_depth_sibling + "_gene",
same_depth_sibling + "_logfc",
same_depth_sibling + "_pval_adj",
],
)
submarker_df[same_depth_sibling + "_gene"] = subadata.uns[
"rank_genes_groups"
]["names"][same_depth_sibling]
submarker_df[same_depth_sibling + "_logfc"] = subadata.uns[
"rank_genes_groups"
]["logfoldchanges"][same_depth_sibling]
submarker_df[same_depth_sibling + "_pval_adj"] = subadata.uns[
"rank_genes_groups"
]["pvals_adj"][same_depth_sibling]
# sort values:
submarker_df.sort_values(
by=[
same_depth_sibling + "_pval_adj",
same_depth_sibling + "_logfc",
],
ascending=[True, False],
inplace=True,
)
submarker_df = submarker_df.reset_index().drop(columns="index")
# add to big dataframe
cluster_markers.loc[
submarker_df.index, submarker_df.columns
] = submarker_df.values
return cluster_markers
def create_cluster_mapping_overview(
adata,
n_levels,
cluster_label_to_decompose,
cluster_label_to_count_prefix,
min_fraction_for_dominancy=0.5,
index_name=None,
):
"""Function to calculate for a new clustering, which clusters from an old
clustering are the dominant ones in your new clustering (or vice versa).
Args:
adata - scanpy AnnData object
n_levels - number of annotation levels (named "ann_level_[number]" in adata.obs)
cluster_label_to_decompose - column name of cluster column in adata.obs
for which we want to know of what clusters it consists
cluster_label_to_count_prefix - column name (excluding level number) of
clusters by which we want to define our cluster-to-decompose
min_fraction_for_dominancy - minimum fraction of annotated cells to belong to
one cell type, to be called "dominant". Should be
higher than 0.5.
index_name - name to give to index column
Returns:
cluster df - pandas dataframe with for each cluster information on what is the
dominant cluster (if there is one), and the fraction of annotated
cells belonging to the dominant cluster
"""
# set up dataframe with one row per new cluster
cluster_df = pd.DataFrame(
index=adata.obs[cluster_label_to_decompose].cat.categories,
columns=zip(
[
f"{cluster_label_to_count_prefix}{level}_dom_type"
for level in range(1, n_levels + 1)
],
[
f"{cluster_label_to_count_prefix}{level}_dom_fract"
for level in range(1, n_levels + 1)
],
),
)
# loop through cluster-to-count levels
for level in range(1, n_levels + 1):
cluster_to_count_level_name = f"{cluster_label_to_count_prefix}{level}"
clust_cell_types = adata.obs.groupby(
[cluster_label_to_decompose, cluster_to_count_level_name]
).agg({cluster_to_count_level_name: "count"})
# convert to proportions
clust_cell_types = clust_cell_types.groupby(level=0)[
cluster_to_count_level_name
].apply(lambda x: x / float(x.sum()))
# add "dominant" annotation:
dominant_types = clust_cell_types.index[
clust_cell_types > min_fraction_for_dominancy
]
dominant_fractions = clust_cell_types[
clust_cell_types > min_fraction_for_dominancy
]
# copy dominant types to cluster_df:
cluster_df.loc[
dominant_types.get_level_values(0),
f"{cluster_to_count_level_name}_dom_type",
] = dominant_types.get_level_values(1)
# copy dominance fractions to cluster_df
cluster_df.loc[
dominant_fractions.index.get_level_values(0),
f"{cluster_to_count_level_name}_dom_fract",
] = dominant_fractions.values
if not pd.isnull(index_name):
cluster_df.index.name = index_name
return cluster_df
| [
"scanpy.tl.pca",
"scanpy.pp.neighbors",
"pandas.isnull",
"numpy.argsort",
"scanpy.external.tl.phenograph",
"scanpy.tl.leiden",
"scanpy.tl.rank_genes_groups",
"pandas.Categorical"
] | [((6743, 6828), 'pandas.Categorical', 'pd.Categorical', (['adata.obs[cluster_label_new]'], {'categories': 'cluster_numbers_ordered'}), '(adata.obs[cluster_label_new], categories=cluster_numbers_ordered\n )\n', (6757, 6828), True, 'import pandas as pd\n'), ((10366, 10451), 'pandas.Categorical', 'pd.Categorical', (['adata.obs[cluster_label_new]'], {'categories': 'cluster_numbers_ordered'}), '(adata.obs[cluster_label_new], categories=cluster_numbers_ordered\n )\n', (10380, 10451), True, 'import pandas as pd\n'), ((20147, 20168), 'pandas.isnull', 'pd.isnull', (['index_name'], {}), '(index_name)\n', (20156, 20168), True, 'import pandas as pd\n'), ((5869, 5888), 'scanpy.tl.pca', 'sc.tl.pca', (['subadata'], {}), '(subadata)\n', (5878, 5888), True, 'import scanpy as sc\n'), ((5901, 5976), 'scanpy.tl.leiden', 'sc.tl.leiden', (['subadata'], {'resolution': 'cluster_res', 'key_added': 'cluster_label_new'}), '(subadata, resolution=cluster_res, key_added=cluster_label_new)\n', (5913, 5976), True, 'import scanpy as sc\n'), ((6675, 6701), 'numpy.argsort', 'np.argsort', (['prefix_cluster'], {}), '(prefix_cluster)\n', (6685, 6701), True, 'import numpy as np\n'), ((8857, 8876), 'scanpy.tl.pca', 'sc.tl.pca', (['subadata'], {}), '(subadata)\n', (8866, 8876), True, 'import scanpy as sc\n'), ((9052, 9110), 'scanpy.pp.neighbors', 'sc.pp.neighbors', (['subadata'], {'n_neighbors': '(30)', 'use_rep': 'use_rep'}), '(subadata, n_neighbors=30, use_rep=use_rep)\n', (9067, 9110), True, 'import scanpy as sc\n'), ((9183, 9258), 'scanpy.tl.leiden', 'sc.tl.leiden', (['subadata'], {'resolution': 'cluster_res', 'key_added': 'cluster_label_new'}), '(subadata, resolution=cluster_res, key_added=cluster_label_new)\n', (9195, 9258), True, 'import scanpy as sc\n'), ((10298, 10324), 'numpy.argsort', 'np.argsort', (['prefix_cluster'], {}), '(prefix_cluster)\n', (10308, 10324), True, 'import numpy as np\n'), ((12460, 12529), 'scanpy.tl.rank_genes_groups', 'sc.tl.rank_genes_groups', (['adata'], {'groupby': 'cluster_label', 'n_genes': 'ngenes'}), '(adata, groupby=cluster_label, n_genes=ngenes)\n', (12483, 12529), True, 'import scanpy as sc\n'), ((15134, 15227), 'scanpy.tl.rank_genes_groups', 'sc.tl.rank_genes_groups', (['subadata'], {'groupby': 'cluster_label', 'groups': 'siblings', 'n_genes': 'ngenes'}), '(subadata, groupby=cluster_label, groups=siblings,\n n_genes=ngenes)\n', (15157, 15227), True, 'import scanpy as sc\n'), ((9379, 9433), 'scanpy.external.tl.phenograph', 'sce.tl.phenograph', (['subadata.obsm[use_rep]'], {'k': 'cluster_k'}), '(subadata.obsm[use_rep], k=cluster_k)\n', (9396, 9433), True, 'import scanpy.external as sce\n')] |
import numpy as np
import cv2
import torch
import torch.nn as nn
def remap_using_flow_fields(image, disp_x, disp_y, interpolation=cv2.INTER_LINEAR, border_mode=cv2.BORDER_CONSTANT):
"""
Opencv remap
map_x contains the index of the matching horizontal position of each pixel [i,j] while map_y contains the
index of the matching vertical position of each pixel [i,j]
All arrays are numpy
args:
image: image to remap, HxWxC
disp_x: displacement in the horizontal direction to apply to each pixel. must be float32. HxW
disp_y: displacement in the vertical direction to apply to each pixel. must be float32. HxW
interpolation
border_mode
output:
remapped image. HxWxC
"""
h_scale, w_scale=disp_x.shape[:2]
# estimate the grid
X, Y = np.meshgrid(np.linspace(0, w_scale - 1, w_scale),
np.linspace(0, h_scale - 1, h_scale))
map_x = (X+disp_x).astype(np.float32)
map_y = (Y+disp_y).astype(np.float32)
remapped_image = cv2.remap(image, map_x, map_y, interpolation=interpolation, borderMode=border_mode)
return remapped_image
def remap_using_correspondence_map(image, map_x, map_y, interpolation=cv2.INTER_LINEAR, border_mode=cv2.BORDER_CONSTANT):
"""
Opencv remap
map_x contains the index of the matching horizontal position of each pixel [i,j] while map_y contains the
index of the matching vertical position of each pixel [i,j]
All arrays are numpy
args:
image: image to remap, HxWxC
map_x: mapping in the horizontal direction to apply to each pixel. must be float32. HxW
map_y: mapping in the vertical direction to apply to each pixel. must be float32. HxW
interpolation
border_mode
output:
remapped image. HxWxC
"""
remapped_image = cv2.remap(image, map_x, map_y, interpolation=interpolation, borderMode=border_mode)
return remapped_image
def warp(x, flo):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
args:
x: [B, C, H, W]
flo: [B, 2, H, W] flow
outputs:
output: warped x [B, C, H, W]
"""
B, C, H, W = x.size()
# mesh grid
xx = torch.arange(0, W).view(1, -1).repeat(H, 1)
yy = torch.arange(0, H).view(-1, 1).repeat(1, W)
xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1)
yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1)
grid = torch.cat((xx, yy), 1).float()
if x.is_cuda:
grid = grid.cuda()
vgrid = grid + flo
# makes a mapping out of the flow
# scale grid to [-1,1]
vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0
vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0
vgrid = vgrid.permute(0, 2, 3, 1)
if float(torch.__version__[:3]) >= 1.3:
output = nn.functional.grid_sample(x, vgrid, align_corners=True)
else:
output = nn.functional.grid_sample(x, vgrid)
return output
def warp_with_mapping(x, vgrid):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
args:
x: [B, C, H, W] (im2)
vgrid: [B, 2, H, W] mapping instead of flow
outputs:
output: warped x [B, C, H, W]
"""
B, C, H, W = x.size()
# mesh grid
vgrid = vgrid.clone()
# scale grid to [-1,1]
vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0
vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0
vgrid = vgrid.permute(0, 2, 3, 1)
if float(torch.__version__[:3]) >= 1.3:
output = nn.functional.grid_sample(x, vgrid, align_corners=True)
else:
output = nn.functional.grid_sample(x, vgrid)
return output | [
"torch.nn.functional.grid_sample",
"torch.cat",
"cv2.remap",
"torch.arange",
"numpy.linspace"
] | [((1040, 1128), 'cv2.remap', 'cv2.remap', (['image', 'map_x', 'map_y'], {'interpolation': 'interpolation', 'borderMode': 'border_mode'}), '(image, map_x, map_y, interpolation=interpolation, borderMode=\n border_mode)\n', (1049, 1128), False, 'import cv2\n'), ((1850, 1938), 'cv2.remap', 'cv2.remap', (['image', 'map_x', 'map_y'], {'interpolation': 'interpolation', 'borderMode': 'border_mode'}), '(image, map_x, map_y, interpolation=interpolation, borderMode=\n border_mode)\n', (1859, 1938), False, 'import cv2\n'), ((836, 872), 'numpy.linspace', 'np.linspace', (['(0)', '(w_scale - 1)', 'w_scale'], {}), '(0, w_scale - 1, w_scale)\n', (847, 872), True, 'import numpy as np\n'), ((897, 933), 'numpy.linspace', 'np.linspace', (['(0)', '(h_scale - 1)', 'h_scale'], {}), '(0, h_scale - 1, h_scale)\n', (908, 933), True, 'import numpy as np\n'), ((2865, 2920), 'torch.nn.functional.grid_sample', 'nn.functional.grid_sample', (['x', 'vgrid'], {'align_corners': '(True)'}), '(x, vgrid, align_corners=True)\n', (2890, 2920), True, 'import torch.nn as nn\n'), ((2948, 2983), 'torch.nn.functional.grid_sample', 'nn.functional.grid_sample', (['x', 'vgrid'], {}), '(x, vgrid)\n', (2973, 2983), True, 'import torch.nn as nn\n'), ((3622, 3677), 'torch.nn.functional.grid_sample', 'nn.functional.grid_sample', (['x', 'vgrid'], {'align_corners': '(True)'}), '(x, vgrid, align_corners=True)\n', (3647, 3677), True, 'import torch.nn as nn\n'), ((3705, 3740), 'torch.nn.functional.grid_sample', 'nn.functional.grid_sample', (['x', 'vgrid'], {}), '(x, vgrid)\n', (3730, 3740), True, 'import torch.nn as nn\n'), ((2442, 2464), 'torch.cat', 'torch.cat', (['(xx, yy)', '(1)'], {}), '((xx, yy), 1)\n', (2451, 2464), False, 'import torch\n'), ((2238, 2256), 'torch.arange', 'torch.arange', (['(0)', 'W'], {}), '(0, W)\n', (2250, 2256), False, 'import torch\n'), ((2291, 2309), 'torch.arange', 'torch.arange', (['(0)', 'H'], {}), '(0, H)\n', (2303, 2309), False, 'import torch\n')] |
"""
Example network for the living machines paper
<NAME>
January 27th 2022
"""
import numpy as np
import matplotlib.pyplot as plt
from sns_toolbox.design.neurons import NonSpikingNeuron
from sns_toolbox.design.connections import NonSpikingSynapse
from sns_toolbox.design.networks import Network
from sns_toolbox.simulate.backends import SNS_Numpy
spiking = True
delay = True
neuron_type = NonSpikingNeuron()
slow_neuron_type = NonSpikingNeuron(membrane_capacitance=50.0)
synapse_excitatory = NonSpikingSynapse(relative_reversal_potential=40.0)
synapse_inhibitory = NonSpikingSynapse(max_conductance=1.0,relative_reversal_potential=-40.0)
synapse_modulatory = NonSpikingSynapse(relative_reversal_potential=0.0)
net = Network(name='Network')
net.add_neuron(neuron_type,name='0',color='cornflowerblue')
net.add_neuron(neuron_type,name='1',color='darkorange')
net.add_neuron(slow_neuron_type,name='2',color='firebrick')
net.add_connection(synapse_excitatory,'0','1')
net.add_connection(synapse_excitatory,'0','2')
net.add_connection(synapse_modulatory,'1','1')
net.add_connection(synapse_inhibitory,'2','0')
net.add_input('0',name='Iapp')
net.add_output('0',name='O0')
net.add_output('1',name='O1')
net.add_output('2',name='O2')
# net.render_graph(view=True,imgFormat='svg')
# Set simulation parameters
dt = 0.01
t_max = 50
# Initialize a vector of timesteps
t = np.arange(0, t_max, dt)
# Initialize vectors which store the input to our network, and for data to be written to during simulation from outputs
inputs = np.zeros([len(t),1])+20.0 # Input vector must be 2d, even if second dimension is 1
data = np.zeros([len(t),3])
# Compile the network to use the Numpy CPU backend (if you want to see what's happening, set debug to true)
model = SNS_Numpy(net, delay=delay, spiking=spiking, dt=dt, debug=False)
"""Simulate the network"""
# At every step, apply the current input to a forward pass of the network and store the results in 'data'
for i in range(len(t)):
data[i,:] = model.forward(inputs[i,:])
"""Plot the data"""
# First section
plt.figure()
# plt.title('First Section')
plt.plot(t,data.transpose()[:][0],label='SourceNrn',color='blue') # When plotting, all data needs to be transposed first
plt.plot(t,data.transpose()[:][1],label='SourceNrn',color='orange',linestyle='dashed')
plt.plot(t,data.transpose()[:][2],label='SourceNrn',color='red',linestyle='dotted')
# plt.legend()
# # Second section
# plt.figure()
# # plt.title('Second Section')
# plt.plot(t,data.transpose()[:][1],label='SourceNrn',color='orange')
# # plt.legend()
#
# # Third section
# plt.figure()
# # plt.title('Third Section')
# plt.plot(t,data.transpose()[:][0],label='SourceNrn',color='red')
# # plt.legend()
plt.show() # Show the plots | [
"sns_toolbox.design.neurons.NonSpikingNeuron",
"sns_toolbox.design.connections.NonSpikingSynapse",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.arange",
"sns_toolbox.design.networks.Network",
"sns_toolbox.simulate.backends.SNS_Numpy"
] | [((393, 411), 'sns_toolbox.design.neurons.NonSpikingNeuron', 'NonSpikingNeuron', ([], {}), '()\n', (409, 411), False, 'from sns_toolbox.design.neurons import NonSpikingNeuron\n'), ((431, 474), 'sns_toolbox.design.neurons.NonSpikingNeuron', 'NonSpikingNeuron', ([], {'membrane_capacitance': '(50.0)'}), '(membrane_capacitance=50.0)\n', (447, 474), False, 'from sns_toolbox.design.neurons import NonSpikingNeuron\n'), ((496, 547), 'sns_toolbox.design.connections.NonSpikingSynapse', 'NonSpikingSynapse', ([], {'relative_reversal_potential': '(40.0)'}), '(relative_reversal_potential=40.0)\n', (513, 547), False, 'from sns_toolbox.design.connections import NonSpikingSynapse\n'), ((569, 642), 'sns_toolbox.design.connections.NonSpikingSynapse', 'NonSpikingSynapse', ([], {'max_conductance': '(1.0)', 'relative_reversal_potential': '(-40.0)'}), '(max_conductance=1.0, relative_reversal_potential=-40.0)\n', (586, 642), False, 'from sns_toolbox.design.connections import NonSpikingSynapse\n'), ((663, 713), 'sns_toolbox.design.connections.NonSpikingSynapse', 'NonSpikingSynapse', ([], {'relative_reversal_potential': '(0.0)'}), '(relative_reversal_potential=0.0)\n', (680, 713), False, 'from sns_toolbox.design.connections import NonSpikingSynapse\n'), ((721, 744), 'sns_toolbox.design.networks.Network', 'Network', ([], {'name': '"""Network"""'}), "(name='Network')\n", (728, 744), False, 'from sns_toolbox.design.networks import Network\n'), ((1369, 1392), 'numpy.arange', 'np.arange', (['(0)', 't_max', 'dt'], {}), '(0, t_max, dt)\n', (1378, 1392), True, 'import numpy as np\n'), ((1752, 1816), 'sns_toolbox.simulate.backends.SNS_Numpy', 'SNS_Numpy', (['net'], {'delay': 'delay', 'spiking': 'spiking', 'dt': 'dt', 'debug': '(False)'}), '(net, delay=delay, spiking=spiking, dt=dt, debug=False)\n', (1761, 1816), False, 'from sns_toolbox.simulate.backends import SNS_Numpy\n'), ((2055, 2067), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2065, 2067), True, 'import matplotlib.pyplot as plt\n'), ((2710, 2720), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2718, 2720), True, 'import matplotlib.pyplot as plt\n')] |
# read in all LAMOST labels
import numpy as np
from matplotlib import rc
from matplotlib import cm
import matplotlib as mpl
rc('font', family='serif')
rc('text', usetex=True)
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
direc = "/home/annaho/aida41040/annaho/TheCannon/examples"
teff = np.loadtxt(
"%s/lamost_dr2/lamost_labels_all_dates.csv" %direc, delimiter=',',
dtype='float', usecols=(1,), skiprows=1)
logg = np.loadtxt(
"%s/lamost_dr2/lamost_labels_all_dates.csv" %direc, delimiter=',',
dtype='float', usecols=(2,), skiprows=1)
feh = np.loadtxt(
"%s/lamost_dr2/lamost_labels_all_dates.csv" %direc, delimiter=',',
dtype='float', usecols=(3,), skiprows=1)
# read in cannon labels
#labels_cannon = np.load("%s/test_training_overlap/test_labels.npz" %direc)['arr_0']
labels_cannon = np.load("../run_9_more_metal_poor/all_cannon_labels.npz")['arr_0']
cannon_teff = labels_cannon[:,0]
cannon_logg = labels_cannon[:,1]
cannon_feh = labels_cannon[:,2]
# read in apogee labels
direc_apogee = "../run_9_more_metal_poor/"
tr_IDs = np.load("%s/tr_id.npz" %direc_apogee)['arr_0']
labels_apogee = np.load("%s/tr_label.npz" %direc_apogee)['arr_0']
apogee_teff = labels_apogee[:,0]
apogee_logg = labels_apogee[:,1]
apogee_feh = labels_apogee[:,2]
# read in lamost labels
IDs_lamost = np.loadtxt(
"%s/test_training_overlap/lamost_sorted_by_ra_with_dr2_params.txt" %direc,
usecols=(0,), dtype=(str))
labels_all_lamost = np.loadtxt(
"%s/test_training_overlap/lamost_sorted_by_ra_with_dr2_params.txt" %direc,
usecols=(3,4,5), dtype=(float))
inds = np.array([np.where(IDs_lamost==a)[0][0] for a in tr_IDs])
labels_lamost = labels_all_lamost[inds,:]
lamost_teff = labels_lamost[:,0]
lamost_logg = labels_lamost[:,1]
lamost_feh = labels_lamost[:,2]
# plot all
fig, (ax0,ax1) = plt.subplots(ncols=2, figsize=(12,6),
sharex=True, sharey=True)
plt.subplots_adjust(wspace=0.3)
def dr1(ax):
ax.hist2d(teff,logg,bins=1000,norm=LogNorm(), cmap="Greys")
ax.set_ylim(ax0.get_ylim()[1],ax0.get_ylim()[0])
ax.set_xlim(ax0.get_xlim()[1], ax0.get_xlim()[0])
ax.set_xlim(7500, 3800)
ax.tick_params(axis='x', labelsize=16)
ax.tick_params(axis='y', labelsize=16)
dr1(ax0)
dr1(ax1)
cmap = cm.plasma
# plot training set, lamost
lamost_feh[lamost_feh>0.25]=0.25
lamost_feh[lamost_feh<-1.1]=-1.1
im = ax0.scatter(lamost_teff,lamost_logg,c=lamost_feh, s=1, lw=0, cmap=cmap)
cbar = plt.colorbar(im, ax=ax0, label="[Fe/H] [dex] from LAMOST DR2")
cbar.ax.tick_params(labelsize=16)
cbar.set_clim(-1.1,0.25)
ax0.set_xlabel("$\mbox{T}_{\mbox{eff}}$ [K]", fontsize=16)
ax0.set_ylabel("log g [dex]", fontsize=16)
ax0.text(0.05, 0.95, "Colored Points: reference set\nwith their LAMOST labels",
horizontalalignment='left', verticalalignment='top', transform=ax0.transAxes,
fontsize=16)
ax0.text(0.05, 0.80, "Black Points: \n Full LAMOST DR2", transform=ax0.transAxes,
fontsize=16, verticalalignment='top', horizontalalignment='left')
ax0.locator_params(nbins=5)
# plot training set, apogee
apogee_feh[apogee_feh>0.25] = 0.25
apogee_feh[apogee_feh<-1.1] = -1.1
im = ax1.scatter(apogee_teff,apogee_logg,c=apogee_feh, s=1, lw=0, cmap=cmap)
cbar = plt.colorbar(im, ax=ax1, label="[Fe/H] [dex] from APOGEE DR12")
cbar.ax.tick_params(labelsize=16)
cbar.set_clim(-1.1,0.25)
ax1.set_xlabel("${\mbox{T}_{\mbox{eff}}}$ [K]", fontsize=16)
ax1.set_ylabel("log g [dex]", fontsize=16)
ax1.locator_params(nbins=5)
ax1.text(0.05, 0.95, "Colored Points: reference set\nwith their APOGEE labels",
horizontalalignment='left', verticalalignment='top', transform=ax1.transAxes,
fontsize=16)
ax1.text(0.05, 0.80, "Black Points: \n Full LAMOST DR2", transform=ax1.transAxes,
fontsize=16, verticalalignment='top', horizontalalignment='left')
plt.subplots_adjust(top=0.85)
#plt.show()
plt.savefig("ts_in_full_lamost_label_space.png")
plt.close()
| [
"matplotlib.rc",
"numpy.load",
"matplotlib.pyplot.close",
"matplotlib.pyplot.colorbar",
"matplotlib.colors.LogNorm",
"numpy.where",
"numpy.loadtxt",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((125, 151), 'matplotlib.rc', 'rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (127, 151), False, 'from matplotlib import rc\n'), ((152, 175), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (154, 175), False, 'from matplotlib import rc\n'), ((314, 438), 'numpy.loadtxt', 'np.loadtxt', (["('%s/lamost_dr2/lamost_labels_all_dates.csv' % direc)"], {'delimiter': '""","""', 'dtype': '"""float"""', 'usecols': '(1,)', 'skiprows': '(1)'}), "('%s/lamost_dr2/lamost_labels_all_dates.csv' % direc, delimiter=\n ',', dtype='float', usecols=(1,), skiprows=1)\n", (324, 438), True, 'import numpy as np\n'), ((458, 582), 'numpy.loadtxt', 'np.loadtxt', (["('%s/lamost_dr2/lamost_labels_all_dates.csv' % direc)"], {'delimiter': '""","""', 'dtype': '"""float"""', 'usecols': '(2,)', 'skiprows': '(1)'}), "('%s/lamost_dr2/lamost_labels_all_dates.csv' % direc, delimiter=\n ',', dtype='float', usecols=(2,), skiprows=1)\n", (468, 582), True, 'import numpy as np\n'), ((609, 733), 'numpy.loadtxt', 'np.loadtxt', (["('%s/lamost_dr2/lamost_labels_all_dates.csv' % direc)"], {'delimiter': '""","""', 'dtype': '"""float"""', 'usecols': '(3,)', 'skiprows': '(1)'}), "('%s/lamost_dr2/lamost_labels_all_dates.csv' % direc, delimiter=\n ',', dtype='float', usecols=(3,), skiprows=1)\n", (619, 733), True, 'import numpy as np\n'), ((1366, 1486), 'numpy.loadtxt', 'np.loadtxt', (["('%s/test_training_overlap/lamost_sorted_by_ra_with_dr2_params.txt' % direc)"], {'usecols': '(0,)', 'dtype': 'str'}), "(\n '%s/test_training_overlap/lamost_sorted_by_ra_with_dr2_params.txt' %\n direc, usecols=(0,), dtype=str)\n", (1376, 1486), True, 'import numpy as np\n'), ((1529, 1656), 'numpy.loadtxt', 'np.loadtxt', (["('%s/test_training_overlap/lamost_sorted_by_ra_with_dr2_params.txt' % direc)"], {'usecols': '(3, 4, 5)', 'dtype': 'float'}), "(\n '%s/test_training_overlap/lamost_sorted_by_ra_with_dr2_params.txt' %\n direc, usecols=(3, 4, 5), dtype=float)\n", (1539, 1656), True, 'import numpy as np\n'), ((1912, 1976), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'figsize': '(12, 6)', 'sharex': '(True)', 'sharey': '(True)'}), '(ncols=2, figsize=(12, 6), sharex=True, sharey=True)\n', (1924, 1976), True, 'import matplotlib.pyplot as plt\n'), ((2007, 2038), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.3)'}), '(wspace=0.3)\n', (2026, 2038), True, 'import matplotlib.pyplot as plt\n'), ((2555, 2617), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'ax0', 'label': '"""[Fe/H] [dex] from LAMOST DR2"""'}), "(im, ax=ax0, label='[Fe/H] [dex] from LAMOST DR2')\n", (2567, 2617), True, 'import matplotlib.pyplot as plt\n'), ((3327, 3390), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'ax1', 'label': '"""[Fe/H] [dex] from APOGEE DR12"""'}), "(im, ax=ax1, label='[Fe/H] [dex] from APOGEE DR12')\n", (3339, 3390), True, 'import matplotlib.pyplot as plt\n'), ((3920, 3949), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.85)'}), '(top=0.85)\n', (3939, 3949), True, 'import matplotlib.pyplot as plt\n'), ((3962, 4010), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ts_in_full_lamost_label_space.png"""'], {}), "('ts_in_full_lamost_label_space.png')\n", (3973, 4010), True, 'import matplotlib.pyplot as plt\n'), ((4011, 4022), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4020, 4022), True, 'import matplotlib.pyplot as plt\n'), ((872, 929), 'numpy.load', 'np.load', (['"""../run_9_more_metal_poor/all_cannon_labels.npz"""'], {}), "('../run_9_more_metal_poor/all_cannon_labels.npz')\n", (879, 929), True, 'import numpy as np\n'), ((1116, 1154), 'numpy.load', 'np.load', (["('%s/tr_id.npz' % direc_apogee)"], {}), "('%s/tr_id.npz' % direc_apogee)\n", (1123, 1154), True, 'import numpy as np\n'), ((1179, 1220), 'numpy.load', 'np.load', (["('%s/tr_label.npz' % direc_apogee)"], {}), "('%s/tr_label.npz' % direc_apogee)\n", (1186, 1220), True, 'import numpy as np\n'), ((2092, 2101), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {}), '()\n', (2099, 2101), False, 'from matplotlib.colors import LogNorm\n'), ((1694, 1719), 'numpy.where', 'np.where', (['(IDs_lamost == a)'], {}), '(IDs_lamost == a)\n', (1702, 1719), True, 'import numpy as np\n')] |
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
file = sys.argv[1]
bins = int(sys.argv[2])
npix = int(sys.argv[3])
psf = np.zeros( [ bins, npix ] )
prow, pcol, pval = np.loadtxt( file, usecols=(0,1,2), unpack=True )
for vals in zip ( prow, pcol, pval ):
psf[vals[0],vals[1]] += vals[2]
plt.figure(1)
plt.imshow( psf, interpolation='nearest', cmap=cm.Oranges, aspect='auto', vmin=0, vmax=1.0e-6 )
mappng = file + '.png'
plt.savefig ( mappng )
plt.clf()
plt.close()
| [
"matplotlib.pyplot.clf",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"matplotlib.pyplot.savefig"
] | [((164, 186), 'numpy.zeros', 'np.zeros', (['[bins, npix]'], {}), '([bins, npix])\n', (172, 186), True, 'import numpy as np\n'), ((211, 259), 'numpy.loadtxt', 'np.loadtxt', (['file'], {'usecols': '(0, 1, 2)', 'unpack': '(True)'}), '(file, usecols=(0, 1, 2), unpack=True)\n', (221, 259), True, 'import numpy as np\n'), ((336, 349), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (346, 349), True, 'import matplotlib.pyplot as plt\n'), ((351, 447), 'matplotlib.pyplot.imshow', 'plt.imshow', (['psf'], {'interpolation': '"""nearest"""', 'cmap': 'cm.Oranges', 'aspect': '"""auto"""', 'vmin': '(0)', 'vmax': '(1e-06)'}), "(psf, interpolation='nearest', cmap=cm.Oranges, aspect='auto',\n vmin=0, vmax=1e-06)\n", (361, 447), True, 'import matplotlib.pyplot as plt\n'), ((471, 490), 'matplotlib.pyplot.savefig', 'plt.savefig', (['mappng'], {}), '(mappng)\n', (482, 490), True, 'import matplotlib.pyplot as plt\n'), ((494, 503), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (501, 503), True, 'import matplotlib.pyplot as plt\n'), ((505, 516), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (514, 516), True, 'import matplotlib.pyplot as plt\n')] |
from src.utils.pose import Pose2D, PoseConfig
import random
import cv2
import numpy as np
class DataAugmentation:
def __init__(self):
self.sym_permutation = [i for i in range(len(PoseConfig.NAMES))]
self.sym_permutation[PoseConfig.L_SHOULDER] = PoseConfig.R_SHOULDER
self.sym_permutation[PoseConfig.R_SHOULDER] = PoseConfig.L_SHOULDER
self.sym_permutation[PoseConfig.L_ELBOW] = PoseConfig.R_ELBOW
self.sym_permutation[PoseConfig.R_ELBOW] = PoseConfig.L_ELBOW
self.sym_permutation[PoseConfig.L_WRIST] = PoseConfig.R_WRIST
self.sym_permutation[PoseConfig.R_WRIST] = PoseConfig.L_WRIST
self.sym_permutation[PoseConfig.L_HIP] = PoseConfig.R_HIP
self.sym_permutation[PoseConfig.R_HIP] = PoseConfig.L_HIP
self.sym_permutation[PoseConfig.R_KNEE] = PoseConfig.L_KNEE
self.sym_permutation[PoseConfig.L_KNEE] = PoseConfig.R_KNEE
self.sym_permutation[PoseConfig.L_ANKLE] = PoseConfig.R_ANKLE
self.sym_permutation[PoseConfig.R_ANKLE] = PoseConfig.L_ANKLE
def apply(self, image, poses):
image = self._distort_image(image)
if (random.random() > 0.5):
image, poses = self._symetry(image, poses)
return image, poses
def random_subsample(self, image):
if random.random() < 0.20:
size_reduction = 0.5 + 0.5*random.random()
initWidth = image.shape[1]
initHeight = image.shape[0]
width = int(image.shape[1]*size_reduction)
height = int(image.shape[0]*size_reduction)
image = cv2.resize(image, (width, height))
image = cv2.resize(image, (initWidth, initHeight))
return image
def _rand_scale(self, s):
scale = random.uniform(1, s)
if (random.randint(1, 10000) % 2):
return scale
return 1. / scale
def _distort_image(self, image):
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
image = image.astype(np.float32)
if random.random()<0.50:
if random.random() < 0.50:
image[:, :, 2] = image[:, :, 2] * (0.4 + 0.40 * random.random())
else:
image[:, :, 2] = image[:, :, 2] * (0.4 + 0.60 * random.random())
image = np.clip(image, 0, 255)
image = image.astype(np.uint8)
image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
image = self.random_subsample(image)
return image
def _symetry(self, image, poses):
image = cv2.flip(image, 1)
new_poses = []
for pose in poses:
joints = pose.get_joints()
is_active_joints = pose.get_active_joints()
joints[is_active_joints, 0] = 1.0 - joints[is_active_joints, 0]
joints = joints[self.sym_permutation, :]
new_poses.append(Pose2D(joints))
return image, new_poses
| [
"random.randint",
"random.uniform",
"cv2.cvtColor",
"numpy.clip",
"random.random",
"cv2.flip",
"src.utils.pose.Pose2D",
"cv2.resize"
] | [((1775, 1795), 'random.uniform', 'random.uniform', (['(1)', 's'], {}), '(1, s)\n', (1789, 1795), False, 'import random\n'), ((1947, 1985), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2HSV'], {}), '(image, cv2.COLOR_RGB2HSV)\n', (1959, 1985), False, 'import cv2\n'), ((2298, 2320), 'numpy.clip', 'np.clip', (['image', '(0)', '(255)'], {}), '(image, 0, 255)\n', (2305, 2320), True, 'import numpy as np\n'), ((2378, 2416), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_HSV2RGB'], {}), '(image, cv2.COLOR_HSV2RGB)\n', (2390, 2416), False, 'import cv2\n'), ((2541, 2559), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (2549, 2559), False, 'import cv2\n'), ((1152, 1167), 'random.random', 'random.random', ([], {}), '()\n', (1165, 1167), False, 'import random\n'), ((1312, 1327), 'random.random', 'random.random', ([], {}), '()\n', (1325, 1327), False, 'import random\n'), ((1605, 1639), 'cv2.resize', 'cv2.resize', (['image', '(width, height)'], {}), '(image, (width, height))\n', (1615, 1639), False, 'import cv2\n'), ((1660, 1702), 'cv2.resize', 'cv2.resize', (['image', '(initWidth, initHeight)'], {}), '(image, (initWidth, initHeight))\n', (1670, 1702), False, 'import cv2\n'), ((1808, 1832), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (1822, 1832), False, 'import random\n'), ((2039, 2054), 'random.random', 'random.random', ([], {}), '()\n', (2052, 2054), False, 'import random\n'), ((2076, 2091), 'random.random', 'random.random', ([], {}), '()\n', (2089, 2091), False, 'import random\n'), ((2864, 2878), 'src.utils.pose.Pose2D', 'Pose2D', (['joints'], {}), '(joints)\n', (2870, 2878), False, 'from src.utils.pose import Pose2D, PoseConfig\n'), ((1376, 1391), 'random.random', 'random.random', ([], {}), '()\n', (1389, 1391), False, 'import random\n'), ((2164, 2179), 'random.random', 'random.random', ([], {}), '()\n', (2177, 2179), False, 'import random\n'), ((2263, 2278), 'random.random', 'random.random', ([], {}), '()\n', (2276, 2278), False, 'import random\n')] |
# Modified version of scikit-learn's DictVectorizer
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.externals import six
from sklearn.externals.six.moves import xrange
from sklearn.utils import check_array, tosequence
from sklearn.utils.fixes import frombuffer_empty
bad_vals_as_strings = set([str(float('nan')), str(float('inf')), str(float('-inf')), 'None', 'none', 'NaN', 'NAN', 'nan', 'NULL', 'null', '', 'inf', '-inf'])
class DataFrameVectorizer(BaseEstimator, TransformerMixin):
"""Transforms a DataFrame to vectors.
Just like scikit-learn's DictVectorizer, but adjusted to take a DataFrame as input, instead of a list of dictionaries.
This transformer turns a DataFrame into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
However, note that this transformer will only do a binary one-hot encoding
when feature values are of type string. If categorical features are
represented as numeric values such as int, the DictVectorizer can be
followed by OneHotEncoder to complete binary one-hot encoding.
Features that do not occur in a row will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator : string, optional
Separator string used when constructing new features for one-hot
coding.
sparse : boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort : boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, column_descriptions=None, dtype=np.float32, separator="=", sparse=True, sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
if column_descriptions == None:
column_descriptions = {}
self.column_descriptions = column_descriptions
self.vals_to_drop = set(['ignore', 'output', 'regressor', 'classifier'])
def fit(self, X, y=None):
"""Learn a list of column_name -> indices mappings.
Parameters
----------
X : DataFrame
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for col_name in X.columns:
# Ignore 'ignore', 'output', etc.
if self.column_descriptions.get(col_name, False) not in self.vals_to_drop:
if X[col_name].dtype == 'object' or self.column_descriptions.get(col_name, False) == 'categorical':
# If this is a categorical column, or the dtype continues to be object, iterate through each row to get all the possible values that we are one-hot-encoding.
for val in X[col_name]:
try:
feature_name = col_name + self.separator + str(val)
except UnicodeEncodeError:
str_val = val.encode('ascii', 'ignore').decode('ascii')
feature_name = col_name + self.separator + str_val
if feature_name not in vocab:
feature_names.append(feature_name)
vocab[feature_name] = len(vocab)
# Ideally we shouldn't have to check for for duplicate columns, but in case we're passed a DataFrame with duplicate columns, consolidate down to a single column. Maybe not the ideal solution, but solves a class of bugs, and puts the reasonable onus on the user to not pass in two data columns with different meanings but the same column name
# And of course, if this is a categorical column, do not include the column name itself, just include the feature_names as calculated above
elif col_name not in vocab:
feature_names.append(col_name)
vocab[col_name] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
# del self.column_descriptions
return self
def _transform(self, X):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
if isinstance(X, dict):
for f, val in X.items():
if isinstance(val, six.string_types):
f = f + self.separator + val
val = 1
if f in vocab and str(val) not in bad_vals_as_strings:
# Get the index position from vocab, then append that index position to indices
indices.append(vocab[f])
# Convert the val to the correct dtype, then append to our values list
values.append(dtype(val))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError('The dictionary passed into DataFrameVectorizer is empty')
else:
# collect all the possible feature names and build sparse matrix at
# same time
for row_idx, row in X.iterrows():
for col_idx, val in enumerate(row):
f = X.columns[col_idx]
if isinstance(val, six.string_types):
f = f + self.separator + val
val = 1
# Only include this in our output if it was part of our training data. Silently ignore it otherwise.
if f in vocab and str(val) not in bad_vals_as_strings:
# Get the index position from vocab, then append that index position to indices
indices.append(vocab[f])
# Convert the val to the correct dtype, then append to our values list
values.append(dtype(val))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError('The DataFrame passed into DataFrameVectorizer is empty')
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# # Sort everything if asked
# if fitting and self.sort:
# feature_names.sort()
# map_index = np.empty(len(feature_names), dtype=np.int32)
# for new_val, f in enumerate(feature_names):
# map_index[new_val] = vocab[f]
# vocab[f] = new_val
# result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
# if fitting:
# self.feature_names_ = feature_names
# self.vocabulary_ = vocab
return result_matrix
def transform(self, X, y=None):
"""Transform DataFrame to array or sparse matrix.
Columns (or string values in categorical columns) not encountered during fit will be
silently ignored.
Parameters
----------
X : DataFrame where all values are strings or convertible to dtype.
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X)
# if self.sparse:
# return self._transform(X)
# else:
# dtype = self.dtype
# vocab = self.vocabulary_
# X = _tosequence(X)
# Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
# for i, x in enumerate(X):
# for f, v in six.iteritems(x):
# if isinstance(v, six.string_types):
# f = "%s%s%s" % (f, self.separator, v)
# v = 1
# try:
# Xa[i, vocab[f]] = dtype(v)
# except KeyError:
# pass
# return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| [
"numpy.frombuffer",
"sklearn.utils.fixes.frombuffer_empty",
"scipy.sparse.csr_matrix",
"numpy.where",
"array.array",
"sklearn.externals.six.iteritems",
"operator.itemgetter"
] | [((6245, 6255), 'array.array', 'array', (['"""i"""'], {}), "('i')\n", (6250, 6255), False, 'from array import array\n'), ((6273, 6288), 'array.array', 'array', (['"""i"""', '[0]'], {}), "('i', [0])\n", (6278, 6288), False, 'from array import array\n'), ((8263, 8303), 'sklearn.utils.fixes.frombuffer_empty', 'frombuffer_empty', (['indices'], {'dtype': 'np.intc'}), '(indices, dtype=np.intc)\n', (8279, 8303), False, 'from sklearn.utils.fixes import frombuffer_empty\n'), ((8321, 8357), 'numpy.frombuffer', 'np.frombuffer', (['indptr'], {'dtype': 'np.intc'}), '(indptr, dtype=np.intc)\n', (8334, 8357), True, 'import numpy as np\n'), ((8429, 8495), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['(values, indices, indptr)'], {'shape': 'shape', 'dtype': 'dtype'}), '((values, indices, indptr), shape=shape, dtype=dtype)\n', (8442, 8495), True, 'import scipy.sparse as sp\n'), ((5810, 5820), 'array.array', 'array', (['"""i"""'], {}), "('i')\n", (5815, 5820), False, 'from array import array\n'), ((11858, 11875), 'numpy.where', 'np.where', (['support'], {}), '(support)\n', (11866, 11875), True, 'import numpy as np\n'), ((12104, 12128), 'sklearn.externals.six.iteritems', 'six.iteritems', (['new_vocab'], {}), '(new_vocab)\n', (12117, 12128), False, 'from sklearn.externals import six\n'), ((12186, 12199), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (12196, 12199), False, 'from operator import itemgetter\n')] |
# -*- coding: utf-8 -*-
# Import modules
import pytest
import numpy as np
from matplotlib import animation
# Import from package
from seagull import lifeforms as lf
import seagull as sg
def test_simulator_run():
"""Test if the run() method returns the computed statistics"""
board = sg.Board(size=(10, 10))
board.add(lf.Blinker(length=3), loc=(0, 1))
sim = sg.Simulator(board)
stats = sim.run(sg.rules.conway_classic, iters=10)
assert isinstance(stats, dict)
@pytest.mark.parametrize("exclude_init", [True, False])
def test_simulator_get_history_shape(exclude_init):
"""Test if get_history() will return the expected shape"""
board = sg.Board(size=(10, 10))
board.add(lf.Blinker(length=3), loc=(0, 1))
sim = sg.Simulator(board)
sim.run(sg.rules.conway_classic, iters=10)
hist = sim.get_history(exclude_init)
expected_depth = 10 if exclude_init else 11
assert hist.shape == (expected_depth, 10, 10)
def test_simulator_animate():
"""Test if animate() method returns a FuncAnimation"""
board = sg.Board(size=(10, 10))
board.add(lf.Blinker(length=3), loc=(0, 1))
sim = sg.Simulator(board)
sim.run(sg.rules.conway_classic, iters=10)
anim = sim.animate()
assert isinstance(anim, animation.FuncAnimation)
def test_simulator_animate_without_run():
"""Test if animate() method throws an error when called before run()"""
board = sg.Board(size=(10, 10))
board.add(lf.Blinker(length=3), loc=(0, 1))
sim = sg.Simulator(board)
with pytest.raises(ValueError):
sim.animate()
def test_compute_statistics():
"""Test if compute_statistics() returns a dictionary"""
board = sg.Board(size=(10, 10))
board.add(lf.Blinker(length=3), loc=(0, 1))
sim = sg.Simulator(board)
sim.run(sg.rules.conway_classic, iters=10)
stats = sim.compute_statistics(sim.get_history())
assert isinstance(stats, dict)
def test_simulator_inplace():
"""Test if board state didn't change after a simulation run"""
board = sg.Board(size=(10, 10))
board.add(lf.Glider(), loc=(0, 0))
# Initial board state, must be the same always
init_board = board.state.copy()
# Run simulator
sim = sg.Simulator(board)
sim.run(sg.rules.conway_classic, iters=10)
assert np.array_equal(board.state, init_board)
| [
"seagull.lifeforms.Glider",
"seagull.lifeforms.Blinker",
"seagull.Simulator",
"pytest.raises",
"numpy.array_equal",
"pytest.mark.parametrize",
"seagull.Board"
] | [((491, 545), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""exclude_init"""', '[True, False]'], {}), "('exclude_init', [True, False])\n", (514, 545), False, 'import pytest\n'), ((296, 319), 'seagull.Board', 'sg.Board', ([], {'size': '(10, 10)'}), '(size=(10, 10))\n', (304, 319), True, 'import seagull as sg\n'), ((378, 397), 'seagull.Simulator', 'sg.Simulator', (['board'], {}), '(board)\n', (390, 397), True, 'import seagull as sg\n'), ((673, 696), 'seagull.Board', 'sg.Board', ([], {'size': '(10, 10)'}), '(size=(10, 10))\n', (681, 696), True, 'import seagull as sg\n'), ((755, 774), 'seagull.Simulator', 'sg.Simulator', (['board'], {}), '(board)\n', (767, 774), True, 'import seagull as sg\n'), ((1064, 1087), 'seagull.Board', 'sg.Board', ([], {'size': '(10, 10)'}), '(size=(10, 10))\n', (1072, 1087), True, 'import seagull as sg\n'), ((1146, 1165), 'seagull.Simulator', 'sg.Simulator', (['board'], {}), '(board)\n', (1158, 1165), True, 'import seagull as sg\n'), ((1423, 1446), 'seagull.Board', 'sg.Board', ([], {'size': '(10, 10)'}), '(size=(10, 10))\n', (1431, 1446), True, 'import seagull as sg\n'), ((1505, 1524), 'seagull.Simulator', 'sg.Simulator', (['board'], {}), '(board)\n', (1517, 1524), True, 'import seagull as sg\n'), ((1688, 1711), 'seagull.Board', 'sg.Board', ([], {'size': '(10, 10)'}), '(size=(10, 10))\n', (1696, 1711), True, 'import seagull as sg\n'), ((1770, 1789), 'seagull.Simulator', 'sg.Simulator', (['board'], {}), '(board)\n', (1782, 1789), True, 'import seagull as sg\n'), ((2037, 2060), 'seagull.Board', 'sg.Board', ([], {'size': '(10, 10)'}), '(size=(10, 10))\n', (2045, 2060), True, 'import seagull as sg\n'), ((2219, 2238), 'seagull.Simulator', 'sg.Simulator', (['board'], {}), '(board)\n', (2231, 2238), True, 'import seagull as sg\n'), ((2297, 2336), 'numpy.array_equal', 'np.array_equal', (['board.state', 'init_board'], {}), '(board.state, init_board)\n', (2311, 2336), True, 'import numpy as np\n'), ((334, 354), 'seagull.lifeforms.Blinker', 'lf.Blinker', ([], {'length': '(3)'}), '(length=3)\n', (344, 354), True, 'from seagull import lifeforms as lf\n'), ((711, 731), 'seagull.lifeforms.Blinker', 'lf.Blinker', ([], {'length': '(3)'}), '(length=3)\n', (721, 731), True, 'from seagull import lifeforms as lf\n'), ((1102, 1122), 'seagull.lifeforms.Blinker', 'lf.Blinker', ([], {'length': '(3)'}), '(length=3)\n', (1112, 1122), True, 'from seagull import lifeforms as lf\n'), ((1461, 1481), 'seagull.lifeforms.Blinker', 'lf.Blinker', ([], {'length': '(3)'}), '(length=3)\n', (1471, 1481), True, 'from seagull import lifeforms as lf\n'), ((1534, 1559), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1547, 1559), False, 'import pytest\n'), ((1726, 1746), 'seagull.lifeforms.Blinker', 'lf.Blinker', ([], {'length': '(3)'}), '(length=3)\n', (1736, 1746), True, 'from seagull import lifeforms as lf\n'), ((2075, 2086), 'seagull.lifeforms.Glider', 'lf.Glider', ([], {}), '()\n', (2084, 2086), True, 'from seagull import lifeforms as lf\n')] |
"""
Implementation of global motion estimators.
"""
import numpy as np
import os
import scipy.ndimage
import scipy.spatial
from ..utils import *
def _hausdorff_distance(E_1, E_2):
# binary structure
diamond = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
# extract only 1-pixel border line of objects
E_1_per = E_1 - scipy.ndimage.morphology.binary_erosion(E_1, structure=diamond)
E_2_per = E_2 - scipy.ndimage.morphology.binary_erosion(E_2, structure=diamond)
A = scipy.ndimage.morphology.distance_transform_edt(~E_2_per)[E_1_per].max()
B = scipy.ndimage.morphology.distance_transform_edt(~E_1_per)[E_2_per].max()
return np.max((A, B))
def globalEdgeMotion(frame1, frame2, r=6, method='hamming'):
"""Global motion estimation using edge features
Given two frames, find a robust global translation vector
found using edge information.
Parameters
----------
frame1 : ndarray
first input frame, shape (1, M, N, C), (1, M, N), (M, N, C) or (M, N)
frame2 : ndarray
second input frame, shape (1, M, N, C), (1, M, N), (M, N, C) or (M, N)
r : int
Search radius for measuring correspondences.
method : string
"hamming" --> use Hamming distance when measuring edge correspondence distances. The distance used in the census transform. [#f1]_
"hausdorff" --> use Hausdorff distance when measuring edge correspondence distances. [#f2]_
Returns
----------
globalMotionVector : ndarray, shape (2,)
The motion to minimize edge distances by moving frame2 with respect to frame1.
References
----------
.. [#f1] <NAME> and <NAME>. Non-parametric local transforms for computing visual correspondence. Computer Vision-ECCV, 151-158, 1994.
.. [#f2] <NAME>, <NAME>, and <NAME>. Feature-based algorithms for detecting and classifying scene breaks. Cornell University, 1995.
"""
# if type bool, then these are edge maps. No need to convert them
if frame1.dtype != np.bool:
E_1 = canny(frame1)
else:
E_1 = frame1
if frame2.dtype != np.bool:
E_2 = canny(frame2)
else:
E_2 = frame2
distances = []
displacements = []
for dx in range(-r, r+1, 1):
for dy in range(-r, r+1, 1):
cimage = np.roll(E_2, dx, axis=0)
cimage = np.roll(cimage, dy, axis=1)
# smallest distance between a point of points found in cimage
if method == 'hamming':
distance = scipy.spatial.distance.hamming(np.ravel(cimage), np.ravel(E_1))
elif method == 'hausdorff':
distance = _hausdorff_distance(cimage, E_2)
else:
raise Notimplemented
# compute # of bit flip distance
distances.append(distance)
displacements.append([dx, dy])
idx = np.argmin(distances)
return displacements[idx]
| [
"numpy.ravel",
"numpy.roll",
"numpy.argmin",
"numpy.max",
"numpy.array"
] | [((222, 265), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 1, 1], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, 1, 1], [0, 1, 0]])\n', (230, 265), True, 'import numpy as np\n'), ((659, 673), 'numpy.max', 'np.max', (['(A, B)'], {}), '((A, B))\n', (665, 673), True, 'import numpy as np\n'), ((2886, 2906), 'numpy.argmin', 'np.argmin', (['distances'], {}), '(distances)\n', (2895, 2906), True, 'import numpy as np\n'), ((2318, 2342), 'numpy.roll', 'np.roll', (['E_2', 'dx'], {'axis': '(0)'}), '(E_2, dx, axis=0)\n', (2325, 2342), True, 'import numpy as np\n'), ((2364, 2391), 'numpy.roll', 'np.roll', (['cimage', 'dy'], {'axis': '(1)'}), '(cimage, dy, axis=1)\n', (2371, 2391), True, 'import numpy as np\n'), ((2560, 2576), 'numpy.ravel', 'np.ravel', (['cimage'], {}), '(cimage)\n', (2568, 2576), True, 'import numpy as np\n'), ((2578, 2591), 'numpy.ravel', 'np.ravel', (['E_1'], {}), '(E_1)\n', (2586, 2591), True, 'import numpy as np\n')] |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# ------------------------------------------------------------------------------
import math
import numpy as np
import torchvision
import cv2
import os
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def save_batch_image_with_joints_multi(batch_image,
batch_joints,
batch_joints_vis,
num_person,
file_name,
nrow=8,
padding=2):
'''
batch_image: [batch_size, channel, height, width]
batch_joints: [batch_size, num_person, num_joints, 3],
batch_joints_vis: [batch_size, num_person, num_joints, 1],
num_person: [batch_size]
}
'''
batch_image = batch_image.flip(1)
grid = torchvision.utils.make_grid(batch_image, nrow, padding, True)
ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
ndarr = ndarr.copy()
nmaps = batch_image.size(0)
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil(float(nmaps) / xmaps))
height = int(batch_image.size(2) + padding)
width = int(batch_image.size(3) + padding)
k = 0
for y in range(ymaps):
for x in range(xmaps):
if k >= nmaps:
break
for n in range(num_person[k]):
joints = batch_joints[k, n]
joints_vis = batch_joints_vis[k, n]
for joint, joint_vis in zip(joints, joints_vis):
joint[0] = x * width + padding + joint[0]
joint[1] = y * height + padding + joint[1]
if joint_vis[0]:
cv2.circle(ndarr, (int(joint[0]), int(joint[1])), 2,
[0, 255, 255], 2)
k = k + 1
cv2.imwrite(file_name, ndarr)
def save_batch_heatmaps_multi(batch_image, batch_heatmaps, file_name, normalize=True):
'''
batch_image: [batch_size, channel, height, width]
batch_heatmaps: ['batch_size, num_joints, height, width]
file_name: saved file name
'''
if normalize:
batch_image = batch_image.clone()
min = float(batch_image.min())
max = float(batch_image.max())
batch_image.add_(-min).div_(max - min + 1e-5)
batch_image = batch_image.flip(1)
batch_size = batch_heatmaps.size(0)
num_joints = batch_heatmaps.size(1)
heatmap_height = batch_heatmaps.size(2)
heatmap_width = batch_heatmaps.size(3)
grid_image = np.zeros(
(batch_size * heatmap_height, (num_joints + 1) * heatmap_width, 3),
dtype=np.uint8)
for i in range(batch_size):
image = batch_image[i].mul(255)\
.clamp(0, 255)\
.byte()\
.permute(1, 2, 0)\
.cpu().numpy()
heatmaps = batch_heatmaps[i].mul(255)\
.clamp(0, 255)\
.byte()\
.cpu().numpy()
resized_image = cv2.resize(image,
(int(heatmap_width), int(heatmap_height)))
height_begin = heatmap_height * i
height_end = heatmap_height * (i + 1)
for j in range(num_joints):
heatmap = heatmaps[j, :, :]
colored_heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
masked_image = colored_heatmap * 0.7 + resized_image * 0.3
width_begin = heatmap_width * (j + 1)
width_end = heatmap_width * (j + 2)
grid_image[height_begin:height_end, width_begin:width_end, :] = \
masked_image
# grid_image[height_begin:height_end, width_begin:width_end, :] = \
# colored_heatmap*0.7 + resized_image*0.3
grid_image[height_begin:height_end, 0:heatmap_width, :] = resized_image
cv2.imwrite(file_name, grid_image)
def save_debug_images_multi(config, input, meta, target, output, prefix):
if not config.DEBUG.DEBUG:
return
basename = os.path.basename(prefix)
dirname = os.path.dirname(prefix)
dirname1 = os.path.join(dirname, 'image_with_joints')
dirname2 = os.path.join(dirname, 'batch_heatmaps')
for dir in [dirname1, dirname2]:
if not os.path.exists(dir):
os.makedirs(dir)
prefix1 = os.path.join(dirname1, basename)
prefix2 = os.path.join(dirname2, basename)
if config.DEBUG.SAVE_BATCH_IMAGES_GT:
save_batch_image_with_joints_multi(input, meta['joints'], meta['joints_vis'], meta['num_person'], '{}_gt.jpg'.format(prefix1))
if config.DEBUG.SAVE_HEATMAPS_GT:
save_batch_heatmaps_multi(input, target, '{}_hm_gt.jpg'.format(prefix2))
if config.DEBUG.SAVE_HEATMAPS_PRED:
save_batch_heatmaps_multi(input, output, '{}_hm_pred.jpg'.format(prefix2))
# panoptic
LIMBS15 = [[0, 1], [0, 2], [0, 3], [3, 4], [4, 5], [0, 9], [9, 10],
[10, 11], [2, 6], [2, 12], [6, 7], [7, 8], [12, 13], [13, 14]]
# # h36m
# LIMBS17 = [[0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8],
# [8, 9], [9, 10], [8, 14], [14, 15], [15, 16], [8, 11], [11, 12], [12, 13]]
# coco17
LIMBS17 = [[0, 1], [0, 2], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7], [7, 9], [6, 8], [8, 10], [5, 11], [11, 13], [13, 15],
[6, 12], [12, 14], [14, 16], [5, 6], [11, 12]]
# shelf / campus
LIMBS14 = [[0, 1], [1, 2], [3, 4], [4, 5], [2, 3], [6, 7], [7, 8], [9, 10],
[10, 11], [2, 8], [3, 9], [8, 12], [9, 12], [12, 13]]
def save_debug_3d_images(config, meta, preds, prefix):
if not config.DEBUG.DEBUG:
return
basename = os.path.basename(prefix)
dirname = os.path.dirname(prefix)
dirname1 = os.path.join(dirname, '3d_joints') # 3d_joints 保存
if not os.path.exists(dirname1):
os.makedirs(dirname1)
prefix = os.path.join(dirname1, basename)
file_name = prefix + "_3d.png"
# preds = preds.cpu().numpy()
batch_size = meta['num_person'].shape[0]
xplot = min(4, batch_size)
yplot = int(math.ceil(float(batch_size) / xplot))
width = 4.0 * xplot
height = 4.0 * yplot
fig = plt.figure(0, figsize=(width, height))
plt.subplots_adjust(left=0.05, right=0.95, bottom=0.05,
top=0.95, wspace=0.05, hspace=0.15)
for i in range(batch_size):
num_person = meta['num_person'][i]
joints_3d = meta['joints_3d'][i]
joints_3d_vis = meta['joints_3d_vis'][i]
ax = plt.subplot(yplot, xplot, i + 1, projection='3d')
for n in range(num_person):
joint = joints_3d[n]
joint_vis = joints_3d_vis[n]
for k in eval("LIMBS{}".format(len(joint))):
if joint_vis[k[0], 0] and joint_vis[k[1], 0]:
x = [float(joint[k[0], 0]), float(joint[k[1], 0])]
y = [float(joint[k[0], 1]), float(joint[k[1], 1])]
z = [float(joint[k[0], 2]), float(joint[k[1], 2])]
ax.plot(x, y, z, c='r', lw=1.5, marker='o', markerfacecolor='w', markersize=2,
markeredgewidth=1)
else:
x = [float(joint[k[0], 0]), float(joint[k[1], 0])]
y = [float(joint[k[0], 1]), float(joint[k[1], 1])]
z = [float(joint[k[0], 2]), float(joint[k[1], 2])]
ax.plot(x, y, z, c='r', ls='--', lw=1.5, marker='o', markerfacecolor='w', markersize=2,
markeredgewidth=1)
colors = ['b', 'g', 'c', 'y', 'm', 'orange', 'pink', 'royalblue', 'lightgreen', 'gold']
if preds is not None:
pred = preds[i]
for n in range(len(pred)):
joint = pred[n]
if joint[0, 3] >= 0:
for k in eval("LIMBS{}".format(len(joint))):
x = [float(joint[k[0], 0]), float(joint[k[1], 0])]
y = [float(joint[k[0], 1]), float(joint[k[1], 1])]
z = [float(joint[k[0], 2]), float(joint[k[1], 2])]
ax.plot(x, y, z, c=colors[int(n % 10)], lw=1.5, marker='o', markerfacecolor='w', markersize=2,
markeredgewidth=1)
plt.savefig(file_name)
plt.close(0)
def save_debug_3d_cubes(config, meta, root, prefix):
if not config.DEBUG.DEBUG:
return
basename = os.path.basename(prefix)
dirname = os.path.dirname(prefix)
dirname1 = os.path.join(dirname, 'root_cubes')
if not os.path.exists(dirname1):
os.makedirs(dirname1)
prefix = os.path.join(dirname1, basename)
file_name = prefix + "_root.png"
batch_size = root.shape[0]
root_id = config.DATASET.ROOTIDX
xplot = min(4, batch_size)
yplot = int(math.ceil(float(batch_size) / xplot))
width = 6.0 * xplot
height = 4.0 * yplot
fig = plt.figure(0, figsize=(width, height))
plt.subplots_adjust(left=0.05, right=0.95, bottom=0.05,
top=0.95, wspace=0.05, hspace=0.15)
for i in range(batch_size):
roots_gt = meta['roots_3d'][i]
num_person = meta['num_person'][i]
roots_pred = root[i]
ax = plt.subplot(yplot, xplot, i + 1, projection='3d')
x = roots_gt[:num_person, 0].cpu()
y = roots_gt[:num_person, 1].cpu()
z = roots_gt[:num_person, 2].cpu()
ax.scatter(x, y, z, c='r')
index = roots_pred[:, 3] >= 0
x = roots_pred[index, 0].cpu()
y = roots_pred[index, 1].cpu()
z = roots_pred[index, 2].cpu()
ax.scatter(x, y, z, c='b')
space_size = config.MULTI_PERSON.SPACE_SIZE
space_center = config.MULTI_PERSON.SPACE_CENTER
ax.set_xlim(space_center[0] - space_size[0] / 2, space_center[0] + space_size[0] / 2)
ax.set_ylim(space_center[1] - space_size[1] / 2, space_center[1] + space_size[1] / 2)
ax.set_zlim(space_center[2] - space_size[2] / 2, space_center[2] + space_size[2] / 2)
plt.savefig(file_name)
plt.close(0)
| [
"matplotlib.pyplot.subplot",
"os.makedirs",
"os.path.basename",
"cv2.imwrite",
"os.path.dirname",
"matplotlib.pyplot.close",
"numpy.zeros",
"os.path.exists",
"torchvision.utils.make_grid",
"matplotlib.pyplot.figure",
"matplotlib.use",
"cv2.applyColorMap",
"matplotlib.pyplot.subplots_adjust",... | [((346, 367), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (360, 367), False, 'import matplotlib\n'), ((1048, 1109), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['batch_image', 'nrow', 'padding', '(True)'], {}), '(batch_image, nrow, padding, True)\n', (1075, 1109), False, 'import torchvision\n'), ((2059, 2088), 'cv2.imwrite', 'cv2.imwrite', (['file_name', 'ndarr'], {}), '(file_name, ndarr)\n', (2070, 2088), False, 'import cv2\n'), ((2757, 2853), 'numpy.zeros', 'np.zeros', (['(batch_size * heatmap_height, (num_joints + 1) * heatmap_width, 3)'], {'dtype': 'np.uint8'}), '((batch_size * heatmap_height, (num_joints + 1) * heatmap_width, 3),\n dtype=np.uint8)\n', (2765, 2853), True, 'import numpy as np\n'), ((4177, 4211), 'cv2.imwrite', 'cv2.imwrite', (['file_name', 'grid_image'], {}), '(file_name, grid_image)\n', (4188, 4211), False, 'import cv2\n'), ((4350, 4374), 'os.path.basename', 'os.path.basename', (['prefix'], {}), '(prefix)\n', (4366, 4374), False, 'import os\n'), ((4389, 4412), 'os.path.dirname', 'os.path.dirname', (['prefix'], {}), '(prefix)\n', (4404, 4412), False, 'import os\n'), ((4428, 4470), 'os.path.join', 'os.path.join', (['dirname', '"""image_with_joints"""'], {}), "(dirname, 'image_with_joints')\n", (4440, 4470), False, 'import os\n'), ((4486, 4525), 'os.path.join', 'os.path.join', (['dirname', '"""batch_heatmaps"""'], {}), "(dirname, 'batch_heatmaps')\n", (4498, 4525), False, 'import os\n'), ((4644, 4676), 'os.path.join', 'os.path.join', (['dirname1', 'basename'], {}), '(dirname1, basename)\n', (4656, 4676), False, 'import os\n'), ((4691, 4723), 'os.path.join', 'os.path.join', (['dirname2', 'basename'], {}), '(dirname2, basename)\n', (4703, 4723), False, 'import os\n'), ((5939, 5963), 'os.path.basename', 'os.path.basename', (['prefix'], {}), '(prefix)\n', (5955, 5963), False, 'import os\n'), ((5978, 6001), 'os.path.dirname', 'os.path.dirname', (['prefix'], {}), '(prefix)\n', (5993, 6001), False, 'import os\n'), ((6017, 6051), 'os.path.join', 'os.path.join', (['dirname', '"""3d_joints"""'], {}), "(dirname, '3d_joints')\n", (6029, 6051), False, 'import os\n'), ((6149, 6181), 'os.path.join', 'os.path.join', (['dirname1', 'basename'], {}), '(dirname1, basename)\n', (6161, 6181), False, 'import os\n'), ((6442, 6480), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {'figsize': '(width, height)'}), '(0, figsize=(width, height))\n', (6452, 6480), True, 'from matplotlib import pyplot as plt\n'), ((6485, 6581), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.05)', 'right': '(0.95)', 'bottom': '(0.05)', 'top': '(0.95)', 'wspace': '(0.05)', 'hspace': '(0.15)'}), '(left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=\n 0.05, hspace=0.15)\n', (6504, 6581), True, 'from matplotlib import pyplot as plt\n'), ((8534, 8556), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (8545, 8556), True, 'from matplotlib import pyplot as plt\n'), ((8561, 8573), 'matplotlib.pyplot.close', 'plt.close', (['(0)'], {}), '(0)\n', (8570, 8573), True, 'from matplotlib import pyplot as plt\n'), ((8691, 8715), 'os.path.basename', 'os.path.basename', (['prefix'], {}), '(prefix)\n', (8707, 8715), False, 'import os\n'), ((8730, 8753), 'os.path.dirname', 'os.path.dirname', (['prefix'], {}), '(prefix)\n', (8745, 8753), False, 'import os\n'), ((8769, 8804), 'os.path.join', 'os.path.join', (['dirname', '"""root_cubes"""'], {}), "(dirname, 'root_cubes')\n", (8781, 8804), False, 'import os\n'), ((8887, 8919), 'os.path.join', 'os.path.join', (['dirname1', 'basename'], {}), '(dirname1, basename)\n', (8899, 8919), False, 'import os\n'), ((9172, 9210), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {'figsize': '(width, height)'}), '(0, figsize=(width, height))\n', (9182, 9210), True, 'from matplotlib import pyplot as plt\n'), ((9215, 9311), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.05)', 'right': '(0.95)', 'bottom': '(0.05)', 'top': '(0.95)', 'wspace': '(0.05)', 'hspace': '(0.15)'}), '(left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=\n 0.05, hspace=0.15)\n', (9234, 9311), True, 'from matplotlib import pyplot as plt\n'), ((10289, 10311), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (10300, 10311), True, 'from matplotlib import pyplot as plt\n'), ((10316, 10328), 'matplotlib.pyplot.close', 'plt.close', (['(0)'], {}), '(0)\n', (10325, 10328), True, 'from matplotlib import pyplot as plt\n'), ((6079, 6103), 'os.path.exists', 'os.path.exists', (['dirname1'], {}), '(dirname1)\n', (6093, 6103), False, 'import os\n'), ((6113, 6134), 'os.makedirs', 'os.makedirs', (['dirname1'], {}), '(dirname1)\n', (6124, 6134), False, 'import os\n'), ((6779, 6828), 'matplotlib.pyplot.subplot', 'plt.subplot', (['yplot', 'xplot', '(i + 1)'], {'projection': '"""3d"""'}), "(yplot, xplot, i + 1, projection='3d')\n", (6790, 6828), True, 'from matplotlib import pyplot as plt\n'), ((8817, 8841), 'os.path.exists', 'os.path.exists', (['dirname1'], {}), '(dirname1)\n', (8831, 8841), False, 'import os\n'), ((8851, 8872), 'os.makedirs', 'os.makedirs', (['dirname1'], {}), '(dirname1)\n', (8862, 8872), False, 'import os\n'), ((9487, 9536), 'matplotlib.pyplot.subplot', 'plt.subplot', (['yplot', 'xplot', '(i + 1)'], {'projection': '"""3d"""'}), "(yplot, xplot, i + 1, projection='3d')\n", (9498, 9536), True, 'from matplotlib import pyplot as plt\n'), ((3631, 3675), 'cv2.applyColorMap', 'cv2.applyColorMap', (['heatmap', 'cv2.COLORMAP_JET'], {}), '(heatmap, cv2.COLORMAP_JET)\n', (3648, 3675), False, 'import cv2\n'), ((4579, 4598), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (4593, 4598), False, 'import os\n'), ((4612, 4628), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (4623, 4628), False, 'import os\n')] |
from itertools import product
from typing import IO, AnyStr, Generator, Set, Tuple
import matplotlib.axes._axes
import numpy as np
import pandas as pd
from sequencing_tools.stats_tools import hamming_distance
class IUPAC:
"""
Working with IUPAC bases
Usage::
iupac = IUPAC()
iupac.check_degenerate('ACTGN') # {'N'}
iupac.check_degenerate('ACTGY') # {'Y'}
list(IUPAC().expand('ACTGN')) #['ACTGA', 'ACTGC', 'ACTGT', 'ACTGG']
list(IUPAC().expand('ACTGY')) #['ACTGC', 'ACTGT']
"""
def __init__(self) -> None:
self.IUPAC = {
"R": ["A", "G"],
"Y": ["C", "T"],
"S": ["G", "C"],
"W": ["A", "T"],
"K": ["G", "T"],
"M": ["A", "C"],
"B": ["C", "G", "T"],
"D": ["A", "G", "T"],
"H": ["A", "C", "T"],
"V": ["A", "C", "G"],
"N": ["A", "C", "T", "G"],
}
def check_degenerate(self, seq: str) -> Set[str]:
"""
Args:
seq (str): sequence with dengerate base
Returns:
set: all degenerated bases from this sequence
"""
bases = set(seq)
return set(self.IUPAC.keys()).intersection(bases)
def expand(self, seq: str) -> Generator[str, None, None]:
"""
output all possible sequence by looping over the dengerative base
Args:
seq (str): sequence with dengerate base
Returns:
Generator(str): all possible sequences from the DNA/RNA pool
"""
degenerative_bases = self.check_degenerate(seq)
expandable_list = [self.IUPAC[b] for b in degenerative_bases]
for base_combination in product(*expandable_list):
new_seq = seq
for i, db in enumerate(degenerative_bases):
new_seq = new_seq.replace(db, base_combination[i])
assert set(new_seq) - {"A", "C", "T", "G"} == set()
yield new_seq
def readfa(file_handle: IO[AnyStr]) -> Generator[Tuple[str, str], None, None]:
"""
A fasta reader iterator
Args:
fp: file handle of a fasta file
Returns:
(str, str): sequence id, sequence
Usage::
with open('test.fa') as fasta:
for seq_id, seq in readfq(fasta):
print(seq_id, seq)
"""
seqid = ""
seq = ""
seq_count = 0
for line in file_handle:
if line.startswith(">"):
seq_count += 1
if seq_count > 1:
yield seqid, seq
seq = ""
seqid = ""
seqid = line[1:].strip()
else:
seq += line.strip()
yield seqid, seq
class MultiAlignments:
def __init__(self, fa_file: str, RNA: bool = False) -> None:
"""
Plotting multiple-alignment fasta, sequences must be of the same length
Args:
fa_file (str): fasta file path
Example::
# $ cat test.fa
# >1
# GGGGAATTAGCTCAAGCGGTAGAGCGCTTGCTTAGCATGCAAGAGGTAGTGGGATCGATG
# >2
# GGGGAATTAGCTCAAGCGGTAGAGCGCTTGCTTAGCATGCAAGAGGTAGTGGGATCGATG
# >3
# GGGGAATTAGCTCAAGCGGTAAAACGCTTGCTTAGCATGCAAGAGGTAGTGGGATCGATG
# >4
# GGGCAACTAGCTCAAGCGGTAAAACGCTTGCTTAGCATGCAAGAGGTAGTGGGATCGATG
# >5
# GCGCAACTAGCTCAAGCGGTAAAACGCTTGCTTAGCATGCAAGAGGTAGTGGGATCGAGG
# >6
# GGGGAATTAGTTCAAGCGGTAGAGCGCTTGCTTAGCATGCAAGAGGTAGTGGGATCGATG
ma = multi_alignment('test.fa')
ax = plt.subplot()
ma.plot(ax = ax)
ma.concensus()
#('GGGGAATTAGCTCAAGCGGTAAAACGCTTGCTTAGCATGCAAGAGGTAGTGGGATCGATG',
# [array([1.]),
# array([0.16666667, 0.83333333]),
# array([1.]),
# array([0.33333333, 0.66666667]),
# array([1.]),
# array([1.]),
# array([0.33333333, 0.66666667]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([0.83333333, 0.16666667]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([0.5, 0.5]),
# array([1.]),
# array([0.5, 0.5]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([1.]),
# array([0.16666667, 0.83333333]),
# array([1.])])
"""
self.records = []
with open(fa_file) as fa:
for seqid, seq in readfa(fa):
if RNA:
seq = seq.replace("T", "U").replace("t", "u")
self.records.append([seqid] + list(seq))
self.mul_df = pd.DataFrame(self.records).rename(
columns={0: "seq_id"}
) #: sequence matrix, each column is a position, and nucleotide as value
self.pairwise = None #: pairwise matrix computed by :py:meth:`sequencing_tools.fasta_tools.MultiAlignment.PairMatrix`
self.colors = {
"A": "red",
"C": "blue",
"U": "green",
"G": "orange",
"-": "black",
"a": "red",
"c": "blue",
"u": "green",
"g": "orange",
"t": "green",
} #: color dictionary guiding the multiplex alignment plotting
def plot(
self,
ax: matplotlib.axes._axes.Axes,
min_pos: float = 0,
max_pos: float = None,
fontsize: float = 20,
labelsize: float = 20,
sample_regex: str = "[A-Za-z0-9_-]+",
) -> None:
"""
Args:
ax (plt.axes): matplotlib axes
min_pos (float): start position to plot on the multialignments
max_pos (float): end position to plot on the mutlialignments
fontsize (int): fontsize for the nucleotides
labelsize (int): fontsize for sequence id
sample_regex (str): regex for including sequecne id
"""
if not max_pos:
max_pos = self.mul_df.shape[1]
ax.plot([min_pos, max_pos], [0, self.mul_df.shape[0]], alpha=0)
for i, (id, seq) in enumerate(
self.mul_df.pipe(lambda d: d[d.seq_id.str.contains(sample_regex)])
.set_index("seq_id")
.iterrows()
):
ax.text(min_pos - 1, i, id, fontsize=labelsize, ha="right", va="center")
for j, b in seq.items():
if min_pos <= j <= max_pos:
b = "U" if b == "T" else b
t = ax.text(
j,
i,
b,
fontsize=fontsize,
family="monospace",
va="center",
ha="center",
color=self.colors[b],
)
[ax.spines[s].set_visible(False) for s in ["top", "right", "left", "bottom"]]
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.tick_params(axis=u"both", which=u"both", length=0)
def concensus(self) -> Tuple[str, np.ndarray]:
"""
compute a consensus sequence from highest frequency base at each position
Returns:
tuple(str, list of numpy array: (consensus sequence, fraction of base making up the composition of the position)
"""
sequence_matrix = np.array(self.records)[:, 1:]
consensus_seq = ""
scores = []
for pos in range(sequence_matrix.shape[1]):
bases = sequence_matrix[:, pos]
# bases = bases[bases!='-']
b, bcount = np.unique(bases, return_counts=True)
cb = b[bcount.argmax()]
# if cb == '-':
# cb = str(b[bcount.argmax()][0])
# elif len(b) > 1:
# cb = str(b[bcount == np.sort(bcount)[-2]][0])
consensus_seq += cb
score = bcount / bcount.sum()
scores.append(score)
return consensus_seq, scores
def PairMatrix(self) -> None:
"""
Calculate the hamming distances between each sequence pair
"""
pairwise = []
for id1, seq1 in self.mul_df.set_index("seq_id").iterrows():
for id2, seq2 in self.mul_df.set_index("seq_id").iterrows():
seq1 = "".join(seq1)
seq2 = "".join(seq2)
record = (id1, id2, hamming_distance(seq1, seq2))
pairwise.append(record)
self.pairwise = pd.DataFrame(pairwise, columns=["id1", "id2", "distance"])
| [
"pandas.DataFrame",
"sequencing_tools.stats_tools.hamming_distance",
"numpy.array",
"itertools.product",
"numpy.unique"
] | [((1734, 1759), 'itertools.product', 'product', (['*expandable_list'], {}), '(*expandable_list)\n', (1741, 1759), False, 'from itertools import product\n'), ((9628, 9686), 'pandas.DataFrame', 'pd.DataFrame', (['pairwise'], {'columns': "['id1', 'id2', 'distance']"}), "(pairwise, columns=['id1', 'id2', 'distance'])\n", (9640, 9686), True, 'import pandas as pd\n'), ((8506, 8528), 'numpy.array', 'np.array', (['self.records'], {}), '(self.records)\n', (8514, 8528), True, 'import numpy as np\n'), ((8743, 8779), 'numpy.unique', 'np.unique', (['bases'], {'return_counts': '(True)'}), '(bases, return_counts=True)\n', (8752, 8779), True, 'import numpy as np\n'), ((5790, 5816), 'pandas.DataFrame', 'pd.DataFrame', (['self.records'], {}), '(self.records)\n', (5802, 5816), True, 'import pandas as pd\n'), ((9533, 9561), 'sequencing_tools.stats_tools.hamming_distance', 'hamming_distance', (['seq1', 'seq2'], {}), '(seq1, seq2)\n', (9549, 9561), False, 'from sequencing_tools.stats_tools import hamming_distance\n')] |
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Conv2D
from keras.models import model_from_json
from sklearn.model_selection import train_test_split
import keras
import numpy
import pickle
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from io import StringIO
import os
def remake(single_data, x_list): #DEBUG ONLY
x_max = len(single_data[0])
y_max = len(single_data)
output = []
for word_depth in range(x_max):
found = False
for unique_words in range(y_max):
if(single_data[unique_words][word_depth] == 1):
found = True
output.append(x_list[unique_words])
break
if(not found):
output.append("N\A")
print(output)
print(output[4:8])
return
def clean(temp_raw_data):
temp_raw_data = temp_raw_data.lower()
raw_data = ""
for char in temp_raw_data:
if(ord(char) < 128):
raw_data += char
else:
raw_data += " "
raw_data = raw_data.strip()
#########################################################
long_string = ""
start = 0
space_count = 0
last_char = ""
for char_num, character in enumerate(raw_data): #remove tables/diagrams ##TODO IMPROVE
if(character == "\n" and last_char == "\n"):
if(space_count >= 2):
long_string += raw_data[start:char_num]
start = char_num + 1
space_count = 0
elif(character == " "):
space_count += 1
last_char = character
###########################################################
tokens = word_tokenize(raw_data)
#print(len(tokens))
###################################################################
last_item = ""
for item_num, item in enumerate(tokens): #fix hyphenated words
if(len(last_item) > 1 and last_item[-1] == "-"):
item = last_item[:-1] + item
del tokens[item_num - 1]
last_item = item
#################################################################
############Replace numbers with NUMBER###########################
number_array = []
for item_num, item in enumerate(tokens): #fix hyphenated words
has_num = False
alpha_count = 0
for char in item:
if(char.isdigit()):
has_num = True
elif(char.isalpha()):
alpha_count += 1
if(has_num and alpha_count < 2):
number_array.append([item_num, item])
tokens[item_num] = "NUMBER"
return tokens, number_array
##################################################################
def convert(fname):
print("PDF START: ", fname, " -------------------------------------------------------------------------------------")
output = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, 'utf-8', laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
with open(fname, 'rb') as infile:
for page_num, page in enumerate(PDFPage.get_pages(infile)):
if(page_num < 1): #first x page(s)
interpreter.process_page(page)
text = output.getvalue()
converter.close()
output.close
return text
def arrays_creater(single_pdf, x_list, output_window, type_array):
split_array = []
single_array2D = []
for word in x_list:
single_array1D = [0 for x in range(len(single_pdf))]
for item_count in range(0,len(single_pdf)):
if(single_pdf[item_count] == word):
single_array1D[item_count] = 1
single_array2D.append(single_array1D)
massive_array = []
for iter in range(0,len(single_pdf)-11,output_window):
temp_array2D = []
for word in x_list:
temp_array1D = [0 for x in range(12)]
for item_count in range(0,12):
if(single_pdf[item_count+iter] == word):
temp_array1D[item_count] = 1
temp_array2D.append(temp_array1D)
massive_array.append([temp_array2D, type_array])
return massive_array
def phrase_locator(input_data, many_pharses, par_num, output_window): #one pdf
temp_array = [-1 for x in range(len(input_data))]
for pharses in many_pharses:
for pharse in pharses[1:]:
pharse = pharse.lower()
pharse = pharse.strip()
raw_pharse = ""
for char in pharse:
if(ord(char) < 128):
raw_pharse += char
else:
raw_pharse += " "
tokens = word_tokenize(raw_pharse)
useful_phrase = False #Actually fix cases like 18-bit TODODODOODODODOD0
for item_num, item in enumerate(tokens): #modify the pharse to be in the same format
has_num = False
alpha_count = 0
for char in item:
if(char.isdigit()):
has_num = True
elif(char.isalpha()):
alpha_count += 1
if(has_num and alpha_count < 2):
useful_phrase = True
tokens[item_num] = "NUMBER"
if (useful_phrase and len(tokens) >= 3):
num_loc = tokens.index("NUMBER")
subtract_len = (len(tokens)) - num_loc
match = 0
for word_count, word in enumerate(input_data):
if(match >= len(tokens)):
temp_array[word_count - subtract_len] = pharses[0]
match = 0
print("PHRASE: ", tokens)
print(word_count - subtract_len, " ", pharses[0])
else:
if(tokens[match] in input_data[word_count]):
match += 1
else:
match = 0
massive_array = []
center_finder = int((12-output_window)/2)
for iter in range(center_finder,len(temp_array)-7,output_window): #TODO 7 IS BAD FIND A GOOD NUMBER WITH OUTPUTWINDOW INVOLVED
small_array = [[0 for x in range(output_window)] for y in range(par_num)]
for item_count in range(0,output_window):
par_identify = temp_array[item_count+iter]
if(par_identify > -1):
small_array[par_identify][item_count] = 1
massive_array.append(small_array)
return massive_array
def conversion(retrieve_labels, parameter_num):
print(retrieve_labels)
keys = ["Input Supply Voltage Range", "Resolution", "Sampling Frequency", "Temperature", "SNR",
"INL", "DNL", "Conversion Rate", "Input Voltage", "Output Voltage",
"Load Current", "Input Capacitance Range", "Output Frequency", "Reference Frequency", "Bandwidth",
"Dropout Voltage", "Quiescent Current in Shutdown", "PSRR", "Output Current", "RMS",
"Row", "Column", "Access Time", "Temperature Range", "Temperature Resolution"]
mod_labels = []
for key_num, key in enumerate(keys):
if(key_num >= parameter_num):
break
if(key in retrieve_labels):
mod_labels.append([key_num] + retrieve_labels[key])
return mod_labels
def reversion(number):
keys = ["Input Supply Voltage Range", "Resolution", "Sampling Frequency", "Temperature", "SNR",
"INL", "DNL", "Conversion Rate", "Input Voltage", "Output Voltage",
"Load Current", "Input Capacitance Range", "Output Frequency", "Reference Frequency", "Bandwidth",
"Dropout Voltage", "Quiescent Current in Shutdown", "PSRR", "Output Current", "RMS",
"Row", "Column", "Access Time", "Temperature Range", "Temperature Resolution"]
return keys[number]
#with open(r"C:\Users\Zach\Downloads\training_set_ADC\3ad4002-4006-4010.p", "rb") as file:
# temp = (pickle.load(file))
# print("Here: ", temp)
#stop = input()
#############START############################
unique_words = 2500
output_window = 4 #need to change crop_amount manually to change this
parameter_num = 25 #MAX 25
reConvert = False
reBuild = True
Type = ["ADC", "CDC", "DCDC", "PLL", "LDO", "SRAM", "Temp_Sen"]
folder_locs = [os.path.join(r"C:\Users\Zach\Downloads\Text_extract", Type_iter, "PDFs") for Type_iter in Type]
result_locs = [os.path.join(r"C:\Users\Zach\Downloads\Text_extract", Type_iter, "Results") for Type_iter in Type]
pdf_file_locs = [] #2D array of pdf file locations
full_type_array = []
for folder_iter, folder in enumerate(folder_locs):
type_array = [0 for x in range(len(Type))]
type_array[folder_iter] = 1
for file in os.listdir(folder):
pdf_file_locs.append(os.path.join(folder, file))
full_type_array.append(type_array)
retrieve_labels = []
for folder in result_locs:
for file in os.listdir(folder):
with open(os.path.join(folder, file), "rb") as file_loc:
retrieve_labels.append(conversion(pickle.load(file_loc), parameter_num))
print(retrieve_labels)
if(reConvert):
raw_data = []
for pdf_file in pdf_file_locs:
try:
raw_data.append(convert(pdf_file))
except:
print("FAIL") #If one fails they entire program will break
with open(r"C:\Users\Zach\Downloads\Text_extract\raw_data.txt", "wb") as file:
pickle.dump(raw_data, file)
with open(r"C:\Users\Zach\Downloads\Text_extract\full_type_array.txt", "wb") as file:
pickle.dump(full_type_array, file)
else:
with open(r"C:\Users\Zach\Downloads\Text_extract\raw_data.txt", "rb") as file:
raw_data = pickle.load(file)
with open(r"C:\Users\Zach\Downloads\Text_extract\full_type_array.txt", "rb") as file:
full_type_array = pickle.load(file)
#print(full_type_array)
if(reBuild):
if(1): #temp
##############################################################find which words to use
word_list = []
clean_data = []
temp_nums_array = []
for raw in raw_data:
temp_clean, temp_nums = clean(raw)
clean_data.append(temp_clean)
temp_nums_array += temp_nums
word_list += temp_clean
freq = (nltk.FreqDist(word_list)).most_common(unique_words)
x_list = []
for tup in freq:
x_list.append(str(tup[0]))
print(x_list)
#################################################################
data = []
labels = []
for item_count, item in enumerate(clean_data):
labels += phrase_locator(item, retrieve_labels[item_count], parameter_num, output_window)
data += arrays_creater(item, x_list, output_window, full_type_array[item_count])
#print(clean_data)
#print(labels)
labels = numpy.array(labels)
##########trim data########################################################
pos_data_buffer = 0
trimmed_labels = []
trimmed_data = []
for iter_num, iter in enumerate(labels): #1 useful : 7 useless
if(1 in iter):
pos_data_buffer += 15 #Change this for debugging
trimmed_labels.append(iter)
trimmed_data.append(data[iter_num])
else:
if(pos_data_buffer >= 1):
trimmed_labels.append(iter)
trimmed_data.append(data[iter_num])
pos_data_buffer -= 1
trimmed_labels = numpy.array(trimmed_labels)
trimmed_data = numpy.array(trimmed_data)
#######################################################################
numpy.save(r"C:\Users\Zach\Downloads\Text_extract\DATA", trimmed_data)
numpy.save(r"C:\Users\Zach\Downloads\Text_extract\LABELS", trimmed_labels)
else:
trimmed_data = numpy.load(r"C:\Users\Zach\Downloads\Text_extract\DATA.npy")
trimmed_labels = numpy.load(r"C:\Users\Zach\Downloads\Text_extract\LABELS.npy")
#print(trimmed_labels)
x_train, x_valid, y_train, y_valid = train_test_split(trimmed_data, trimmed_labels, test_size = 0.2, shuffle = True)
double_train = [[],[]]
for array in x_train:
double_train[0].append(array[0])
double_train[1].append(array[1])
double_train[0] = numpy.expand_dims(numpy.array(double_train[0]), axis = 3)
double_train[1] = numpy.array(double_train[1])
double_valid = [[],[]]
for array in x_valid:
double_valid[0].append(array[0])
double_valid[1].append(array[1])
double_valid[0] = numpy.expand_dims(numpy.array(double_valid[0]), axis = 3)
double_valid[1] = numpy.array(double_valid[1])
#crop_amount = int((12 - output_window) / 2) #I HAVE TO HARD CODE THIS TO SAVE THE MODEL
print("Starting Convolution")
keras_input = keras.layers.Input(shape=(unique_words,12,1), name='keras_input')
keras_input2 = keras.layers.Input(shape = (len(Type),), name='keras_input2' )
#########Type#########
Type_shutdown_net = (Dense(parameter_num, activation='sigmoid'))(keras_input2)
pharse_check_a = Conv2D(512, kernel_size=(unique_words, 2), strides = (1,1), activation='relu')(keras_input)
pharse_check_a2 = keras.layers.MaxPooling2D(pool_size=(1,11), strides = (1,1))(pharse_check_a) #TODO TRY WITHOUT
pharse_check_a3 = keras.layers.Flatten()(pharse_check_a2)
pharse_check_b = Conv2D(512, kernel_size=(unique_words, 4), strides = (1,1), activation='relu')(keras_input)
pharse_check_b2 = keras.layers.MaxPooling2D(pool_size=(1,9), strides = (1,1))(pharse_check_b)
pharse_check_b3 = keras.layers.Flatten()(pharse_check_b2)
pharse_check_c = Conv2D(512, kernel_size=(unique_words, 6), strides = (1,1), activation='relu')(keras_input)
pharse_check_c2 = keras.layers.MaxPooling2D(pool_size=(1,7), strides = (1,1))(pharse_check_c)
pharse_check_c3 = keras.layers.Flatten()(pharse_check_c2)
pharse_check_d = Conv2D(512, kernel_size=(unique_words, 8), strides = (1,1), activation='relu')(keras_input)
pharse_check_d2 = keras.layers.MaxPooling2D(pool_size=(1,5), strides = (1,1))(pharse_check_d)
pharse_check_d3 = keras.layers.Flatten()(pharse_check_d2)
merged_phase_check = keras.layers.concatenate([pharse_check_a3, pharse_check_b3, pharse_check_c3, pharse_check_d3, keras_input2], axis=-1)
pharse_check3 = (Dense(2048, activation='relu'))(merged_phase_check)
pharse_check4 = (Dense(2048, activation='relu'))(pharse_check3)
pharse_check5 = (Dense(parameter_num, activation='sigmoid'))(pharse_check4)
pharse_check6 = keras.layers.multiply([Type_shutdown_net,pharse_check5])
#########Type#########
#########POS#############
number_loc = keras.layers.Lambda(lambda input : input[:,0,4:12-4,0])(keras_input) #REQUIRE A NUMBER/// This needs NUMBER to be in the top slot //THE 4s should be crop amount
cropped_data = keras.layers.Cropping2D(cropping=((0, 0), (4, 4)))(keras_input) #removes data outside of the output window //The 4s should be crop amont
real_data_a = Conv2D(256, kernel_size=(unique_words, 1), strides = (1,1), activation='relu')(cropped_data)
real_data_a2 = keras.layers.Flatten()(real_data_a)
real_data_b = Conv2D(256, kernel_size=(unique_words, 2), strides = (1,1), activation='relu')(cropped_data)
real_data_b2 = keras.layers.Flatten()(real_data_b)
real_data_c = keras.layers.Flatten()(cropped_data)
real_data_c2 = (Dense(2048, activation='relu'))(real_data_c)
large_data = Conv2D(512, kernel_size=(unique_words, 4), activation='relu')(keras_input)
large_data2 = keras.layers.Flatten()(large_data) #input not cropped
merged_phase_check = keras.layers.concatenate([real_data_a2, real_data_b2, real_data_c2, large_data2], axis=-1)
real_data3 = (Dense(1024, activation='relu'))(merged_phase_check)
merged_phase_check2 = keras.layers.concatenate([real_data3, pharse_check6, keras_input2], axis=-1)
real_data4 = (Dense(512, activation='relu'))(merged_phase_check2)
real_data5 = (Dense(512, activation='relu'))(real_data4)
real_data6 = (Dense(output_window, activation='sigmoid'))(real_data5)
real_data7 = keras.layers.multiply([number_loc,real_data6])
#########POS############
exact_loc = keras.layers.RepeatVector(parameter_num)(real_data7)
type = keras.layers.RepeatVector(output_window)(pharse_check6)
type2 = keras.layers.Permute((2,1))(type)
merged = keras.layers.multiply([exact_loc,type2])
model = keras.models.Model(inputs=[keras_input, keras_input2], outputs=merged)
model.compile(loss=keras.losses.binary_crossentropy, optimizer = 'adadelta', metrics=['accuracy'])
model.fit({'keras_input': double_train[0], 'keras_input2' : double_train[1]}, y_train, validation_data = ({'keras_input': double_valid[0], 'keras_input2' : double_valid[1]}, y_valid), epochs = 6000, batch_size = 256)
# Save the weights
model.save_weights(r"C:\Users\Zach\Downloads\Text_extract\model_weights.h5")
# Save the model architecture
with open(r"C:\Users\Zach\Downloads\Text_extract\model_architecture.json", 'w') as f:
f.write(model.to_json())
with open(r"C:\Users\Zach\Downloads\Text_extract\x_list.txt", "wb") as file:
pickle.dump(x_list, file)
numpy.save(r"C:\Users\Zach\Downloads\Text_extract\x_valid0", double_valid[0])
numpy.save(r"C:\Users\Zach\Downloads\Text_extract\x_valid1", double_valid[1])
numpy.save(r"C:\Users\Zach\Downloads\Text_extract\y_valid", y_valid)
else:
with open(r"C:\Users\Zach\Downloads\Text_extract\model_architecture.json", 'r') as f:
model = model_from_json(f.read())
model.load_weights(r"C:\Users\Zach\Downloads\Text_extract\model_weights.h5")
with open(r"C:\Users\Zach\Downloads\Text_extract\x_list.txt", "rb") as file:
x_list = pickle.load(file)
double_valid = [numpy.load(r"C:\Users\Zach\Downloads\Text_extract\x_valid0.npy"), numpy.load(r"C:\Users\Zach\Downloads\Text_extract\x_valid1.npy")]
y_valid = numpy.load(r"C:\Users\Zach\Downloads\Text_extract\y_valid.npy")
model.summary()
#################TEST###############
final_results = model.predict(double_valid)
TP = 0
TN = 0
FP = 0
FN = 0
for sr_num, single_result in enumerate(final_results):
large_index = sr_num * output_window
print("------------------------------------ " + str(sr_num))
for par_num, par in enumerate(single_result):
for res_num, result in enumerate(par):
if((result >= .5 and y_valid[sr_num][par_num][res_num] == 1)):
print("SUCCESS")
TP += 1
elif((result < .5 and y_valid[sr_num][par_num][res_num] == 1)):
print("FAIL_FN")
FN += 1
#print(single_result)
#print(y_valid[sr_num])
#print("")
elif(result >= .5 and y_valid[sr_num][par_num][res_num] == 0):
print("FAIL_FP")
FP += 1
#print(single_result)
#print(y_valid[sr_num])
#print("")
else:
TN += 1
print("Precision: ", TP/(TP+FP))
print("Recall: ", TP/(TP+FN))
print("Accuracy: ", (TP+TN)/(TP+TN+FP+FN))
while(1):
num = int(input("Enter a number: "))
if(num == -1):
break
print(Type[numpy.argmax(double_valid[1][num])])
print(y_valid[num])
remake(double_valid[0][num], x_list)
for item_iter, item in enumerate(final_results[num]):
print(reversion(item_iter), " ", item)
print("")
####################################
#########DEBUG##################################
loc = "124si3500"
test_pdf = os.path.join(r"C:\Users\Zach\Downloads\Text\DCDC", (loc + ".pdf")) #other location
#test_pdf = os.path.join(r"C:\Users\Zach\Downloads\Text_extract\ADC\PDFs", (loc + ".pdf"))
test_clean, temp_nums = clean(convert(test_pdf))
print(test_clean)
test_data = arrays_creater(test_clean, x_list, output_window, [1,0,0,0,0,0,0])
double_final = [[],[]]
for array in test_data:
double_final[0].append(array[0])
double_final[1].append(array[1])
double_final[0] = numpy.expand_dims(numpy.array(double_final[0]), axis = 3)
double_final[1] = numpy.array(double_final[1])
results = model.predict(double_final)
#print(test_clean)
first_ele = [i[0] for i in temp_nums]
#with open(os.path.join(r"C:\Users\Zach\Downloads\Text_extract\ADC\Results", (loc + ".p")), "rb") as file_loc:
# test_labels = pickle.load(file_loc)
#print(test_labels)
#test_labels = conversion(test_labels, parameter_num)
#phrase_locator(test_clean, test_labels, parameter_num, output_window) #here for debug only
for sr_num, single_result in enumerate(results):
large_index = sr_num * 4
for par_num, par in enumerate(single_result):
for res_num, result in enumerate(par):
if(result > .5):
index = first_ele.index(large_index + res_num + 4) #need to offset
print(reversion(par_num), " ", temp_nums[index])
#########DEBUG##################################
| [
"numpy.load",
"pickle.dump",
"keras.layers.Cropping2D",
"numpy.argmax",
"sklearn.model_selection.train_test_split",
"pdfminer.layout.LAParams",
"keras.models.Model",
"pickle.load",
"keras.layers.Input",
"keras.layers.concatenate",
"os.path.join",
"pdfminer.pdfinterp.PDFPageInterpreter",
"ker... | [((20609, 20677), 'os.path.join', 'os.path.join', (['"""C:\\\\Users\\\\Zach\\\\Downloads\\\\Text\\\\DCDC"""', "(loc + '.pdf')"], {}), "('C:\\\\Users\\\\Zach\\\\Downloads\\\\Text\\\\DCDC', loc + '.pdf')\n", (20621, 20677), False, 'import os\n'), ((21168, 21196), 'numpy.array', 'numpy.array', (['double_final[1]'], {}), '(double_final[1])\n', (21179, 21196), False, 'import numpy\n'), ((1970, 1993), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['raw_data'], {}), '(raw_data)\n', (1983, 1993), False, 'from nltk.tokenize import word_tokenize\n'), ((3202, 3212), 'io.StringIO', 'StringIO', ([], {}), '()\n', (3210, 3212), False, 'from io import StringIO\n'), ((3228, 3248), 'pdfminer.pdfinterp.PDFResourceManager', 'PDFResourceManager', ([], {}), '()\n', (3246, 3248), False, 'from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\n'), ((3346, 3384), 'pdfminer.pdfinterp.PDFPageInterpreter', 'PDFPageInterpreter', (['manager', 'converter'], {}), '(manager, converter)\n', (3364, 3384), False, 'from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\n'), ((8879, 8954), 'os.path.join', 'os.path.join', (['"""C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract"""', 'Type_iter', '"""PDFs"""'], {}), "('C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract', Type_iter, 'PDFs')\n", (8891, 8954), False, 'import os\n'), ((8991, 9069), 'os.path.join', 'os.path.join', (['"""C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract"""', 'Type_iter', '"""Results"""'], {}), "('C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract', Type_iter, 'Results')\n", (9003, 9069), False, 'import os\n'), ((9320, 9338), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (9330, 9338), False, 'import os\n'), ((9511, 9529), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (9521, 9529), False, 'import os\n'), ((12832, 12907), 'sklearn.model_selection.train_test_split', 'train_test_split', (['trimmed_data', 'trimmed_labels'], {'test_size': '(0.2)', 'shuffle': '(True)'}), '(trimmed_data, trimmed_labels, test_size=0.2, shuffle=True)\n', (12848, 12907), False, 'from sklearn.model_selection import train_test_split\n'), ((13159, 13187), 'numpy.array', 'numpy.array', (['double_train[1]'], {}), '(double_train[1])\n', (13170, 13187), False, 'import numpy\n'), ((13437, 13465), 'numpy.array', 'numpy.array', (['double_valid[1]'], {}), '(double_valid[1])\n', (13448, 13465), False, 'import numpy\n'), ((13621, 13688), 'keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(unique_words, 12, 1)', 'name': '"""keras_input"""'}), "(shape=(unique_words, 12, 1), name='keras_input')\n", (13639, 13688), False, 'import keras\n'), ((15046, 15167), 'keras.layers.concatenate', 'keras.layers.concatenate', (['[pharse_check_a3, pharse_check_b3, pharse_check_c3, pharse_check_d3,\n keras_input2]'], {'axis': '(-1)'}), '([pharse_check_a3, pharse_check_b3, pharse_check_c3,\n pharse_check_d3, keras_input2], axis=-1)\n', (15070, 15167), False, 'import keras\n'), ((15410, 15467), 'keras.layers.multiply', 'keras.layers.multiply', (['[Type_shutdown_net, pharse_check5]'], {}), '([Type_shutdown_net, pharse_check5])\n', (15431, 15467), False, 'import keras\n'), ((16522, 16616), 'keras.layers.concatenate', 'keras.layers.concatenate', (['[real_data_a2, real_data_b2, real_data_c2, large_data2]'], {'axis': '(-1)'}), '([real_data_a2, real_data_b2, real_data_c2,\n large_data2], axis=-1)\n', (16546, 16616), False, 'import keras\n'), ((16711, 16787), 'keras.layers.concatenate', 'keras.layers.concatenate', (['[real_data3, pharse_check6, keras_input2]'], {'axis': '(-1)'}), '([real_data3, pharse_check6, keras_input2], axis=-1)\n', (16735, 16787), False, 'import keras\n'), ((17018, 17065), 'keras.layers.multiply', 'keras.layers.multiply', (['[number_loc, real_data6]'], {}), '([number_loc, real_data6])\n', (17039, 17065), False, 'import keras\n'), ((17298, 17339), 'keras.layers.multiply', 'keras.layers.multiply', (['[exact_loc, type2]'], {}), '([exact_loc, type2])\n', (17319, 17339), False, 'import keras\n'), ((17354, 17424), 'keras.models.Model', 'keras.models.Model', ([], {'inputs': '[keras_input, keras_input2]', 'outputs': 'merged'}), '(inputs=[keras_input, keras_input2], outputs=merged)\n', (17372, 17424), False, 'import keras\n'), ((18152, 18237), 'numpy.save', 'numpy.save', (['"""C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\x_valid0"""', 'double_valid[0]'], {}), "('C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\x_valid0',\n double_valid[0])\n", (18162, 18237), False, 'import numpy\n'), ((18235, 18320), 'numpy.save', 'numpy.save', (['"""C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\x_valid1"""', 'double_valid[1]'], {}), "('C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\x_valid1',\n double_valid[1])\n", (18245, 18320), False, 'import numpy\n'), ((18318, 18390), 'numpy.save', 'numpy.save', (['"""C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\y_valid"""', 'y_valid'], {}), "('C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\y_valid', y_valid)\n", (18328, 18390), False, 'import numpy\n'), ((18896, 18963), 'numpy.load', 'numpy.load', (['"""C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\y_valid.npy"""'], {}), "('C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\y_valid.npy')\n", (18906, 18963), False, 'import numpy\n'), ((21109, 21137), 'numpy.array', 'numpy.array', (['double_final[0]'], {}), '(double_final[0])\n', (21120, 21137), False, 'import numpy\n'), ((10026, 10053), 'pickle.dump', 'pickle.dump', (['raw_data', 'file'], {}), '(raw_data, file)\n', (10037, 10053), False, 'import pickle\n'), ((10154, 10188), 'pickle.dump', 'pickle.dump', (['full_type_array', 'file'], {}), '(full_type_array, file)\n', (10165, 10188), False, 'import pickle\n'), ((10300, 10317), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (10311, 10317), False, 'import pickle\n'), ((10436, 10453), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (10447, 10453), False, 'import pickle\n'), ((11518, 11537), 'numpy.array', 'numpy.array', (['labels'], {}), '(labels)\n', (11529, 11537), False, 'import numpy\n'), ((12244, 12271), 'numpy.array', 'numpy.array', (['trimmed_labels'], {}), '(trimmed_labels)\n', (12255, 12271), False, 'import numpy\n'), ((12296, 12321), 'numpy.array', 'numpy.array', (['trimmed_data'], {}), '(trimmed_data)\n', (12307, 12321), False, 'import numpy\n'), ((12416, 12490), 'numpy.save', 'numpy.save', (['"""C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\DATA"""', 'trimmed_data'], {}), "('C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\DATA', trimmed_data)\n", (12426, 12490), False, 'import numpy\n'), ((12496, 12574), 'numpy.save', 'numpy.save', (['"""C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\LABELS"""', 'trimmed_labels'], {}), "('C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\LABELS', trimmed_labels)\n", (12506, 12574), False, 'import numpy\n'), ((12606, 12670), 'numpy.load', 'numpy.load', (['"""C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\DATA.npy"""'], {}), "('C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\DATA.npy')\n", (12616, 12670), False, 'import numpy\n'), ((12693, 12759), 'numpy.load', 'numpy.load', (['"""C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\LABELS.npy"""'], {}), "('C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\LABELS.npy')\n", (12703, 12759), False, 'import numpy\n'), ((13096, 13124), 'numpy.array', 'numpy.array', (['double_train[0]'], {}), '(double_train[0])\n', (13107, 13124), False, 'import numpy\n'), ((13374, 13402), 'numpy.array', 'numpy.array', (['double_valid[0]'], {}), '(double_valid[0])\n', (13385, 13402), False, 'import numpy\n'), ((13828, 13870), 'keras.layers.Dense', 'Dense', (['parameter_num'], {'activation': '"""sigmoid"""'}), "(parameter_num, activation='sigmoid')\n", (13833, 13870), False, 'from keras.layers import Dense\n'), ((13910, 13987), 'keras.layers.Conv2D', 'Conv2D', (['(512)'], {'kernel_size': '(unique_words, 2)', 'strides': '(1, 1)', 'activation': '"""relu"""'}), "(512, kernel_size=(unique_words, 2), strides=(1, 1), activation='relu')\n", (13916, 13987), False, 'from keras.layers import Conv2D\n'), ((14025, 14085), 'keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(1, 11)', 'strides': '(1, 1)'}), '(pool_size=(1, 11), strides=(1, 1))\n', (14050, 14085), False, 'import keras\n'), ((14143, 14165), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (14163, 14165), False, 'import keras\n'), ((14208, 14285), 'keras.layers.Conv2D', 'Conv2D', (['(512)'], {'kernel_size': '(unique_words, 4)', 'strides': '(1, 1)', 'activation': '"""relu"""'}), "(512, kernel_size=(unique_words, 4), strides=(1, 1), activation='relu')\n", (14214, 14285), False, 'from keras.layers import Conv2D\n'), ((14323, 14382), 'keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(1, 9)', 'strides': '(1, 1)'}), '(pool_size=(1, 9), strides=(1, 1))\n', (14348, 14382), False, 'import keras\n'), ((14422, 14444), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (14442, 14444), False, 'import keras\n'), ((14486, 14563), 'keras.layers.Conv2D', 'Conv2D', (['(512)'], {'kernel_size': '(unique_words, 6)', 'strides': '(1, 1)', 'activation': '"""relu"""'}), "(512, kernel_size=(unique_words, 6), strides=(1, 1), activation='relu')\n", (14492, 14563), False, 'from keras.layers import Conv2D\n'), ((14601, 14660), 'keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(1, 7)', 'strides': '(1, 1)'}), '(pool_size=(1, 7), strides=(1, 1))\n', (14626, 14660), False, 'import keras\n'), ((14700, 14722), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (14720, 14722), False, 'import keras\n'), ((14764, 14841), 'keras.layers.Conv2D', 'Conv2D', (['(512)'], {'kernel_size': '(unique_words, 8)', 'strides': '(1, 1)', 'activation': '"""relu"""'}), "(512, kernel_size=(unique_words, 8), strides=(1, 1), activation='relu')\n", (14770, 14841), False, 'from keras.layers import Conv2D\n'), ((14879, 14938), 'keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(1, 5)', 'strides': '(1, 1)'}), '(pool_size=(1, 5), strides=(1, 1))\n', (14904, 14938), False, 'import keras\n'), ((14978, 15000), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (14998, 15000), False, 'import keras\n'), ((15186, 15216), 'keras.layers.Dense', 'Dense', (['(2048)'], {'activation': '"""relu"""'}), "(2048, activation='relu')\n", (15191, 15216), False, 'from keras.layers import Dense\n'), ((15260, 15290), 'keras.layers.Dense', 'Dense', (['(2048)'], {'activation': '"""relu"""'}), "(2048, activation='relu')\n", (15265, 15290), False, 'from keras.layers import Dense\n'), ((15330, 15372), 'keras.layers.Dense', 'Dense', (['parameter_num'], {'activation': '"""sigmoid"""'}), "(parameter_num, activation='sigmoid')\n", (15335, 15372), False, 'from keras.layers import Dense\n'), ((15548, 15607), 'keras.layers.Lambda', 'keras.layers.Lambda', (['(lambda input: input[:, 0, 4:12 - 4, 0])'], {}), '(lambda input: input[:, 0, 4:12 - 4, 0])\n', (15567, 15607), False, 'import keras\n'), ((15731, 15781), 'keras.layers.Cropping2D', 'keras.layers.Cropping2D', ([], {'cropping': '((0, 0), (4, 4))'}), '(cropping=((0, 0), (4, 4)))\n', (15754, 15781), False, 'import keras\n'), ((15887, 15964), 'keras.layers.Conv2D', 'Conv2D', (['(256)'], {'kernel_size': '(unique_words, 1)', 'strides': '(1, 1)', 'activation': '"""relu"""'}), "(256, kernel_size=(unique_words, 1), strides=(1, 1), activation='relu')\n", (15893, 15964), False, 'from keras.layers import Conv2D\n'), ((16000, 16022), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (16020, 16022), False, 'import keras\n'), ((16055, 16132), 'keras.layers.Conv2D', 'Conv2D', (['(256)'], {'kernel_size': '(unique_words, 2)', 'strides': '(1, 1)', 'activation': '"""relu"""'}), "(256, kernel_size=(unique_words, 2), strides=(1, 1), activation='relu')\n", (16061, 16132), False, 'from keras.layers import Conv2D\n'), ((16168, 16190), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (16188, 16190), False, 'import keras\n'), ((16223, 16245), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (16243, 16245), False, 'import keras\n'), ((16281, 16311), 'keras.layers.Dense', 'Dense', (['(2048)'], {'activation': '"""relu"""'}), "(2048, activation='relu')\n", (16286, 16311), False, 'from keras.layers import Dense\n'), ((16346, 16407), 'keras.layers.Conv2D', 'Conv2D', (['(512)'], {'kernel_size': '(unique_words, 4)', 'activation': '"""relu"""'}), "(512, kernel_size=(unique_words, 4), activation='relu')\n", (16352, 16407), False, 'from keras.layers import Conv2D\n'), ((16440, 16462), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (16460, 16462), False, 'import keras\n'), ((16632, 16662), 'keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (16637, 16662), False, 'from keras.layers import Dense\n'), ((16809, 16838), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (16814, 16838), False, 'from keras.layers import Dense\n'), ((16881, 16910), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (16886, 16910), False, 'from keras.layers import Dense\n'), ((16944, 16986), 'keras.layers.Dense', 'Dense', (['output_window'], {'activation': '"""sigmoid"""'}), "(output_window, activation='sigmoid')\n", (16949, 16986), False, 'from keras.layers import Dense\n'), ((17114, 17154), 'keras.layers.RepeatVector', 'keras.layers.RepeatVector', (['parameter_num'], {}), '(parameter_num)\n', (17139, 17154), False, 'import keras\n'), ((17179, 17219), 'keras.layers.RepeatVector', 'keras.layers.RepeatVector', (['output_window'], {}), '(output_window)\n', (17204, 17219), False, 'import keras\n'), ((17248, 17276), 'keras.layers.Permute', 'keras.layers.Permute', (['(2, 1)'], {}), '((2, 1))\n', (17268, 17276), False, 'import keras\n'), ((18121, 18146), 'pickle.dump', 'pickle.dump', (['x_list', 'file'], {}), '(x_list, file)\n', (18132, 18146), False, 'import pickle\n'), ((18710, 18727), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (18721, 18727), False, 'import pickle\n'), ((18749, 18817), 'numpy.load', 'numpy.load', (['"""C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\x_valid0.npy"""'], {}), "('C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\x_valid0.npy')\n", (18759, 18817), False, 'import numpy\n'), ((18815, 18883), 'numpy.load', 'numpy.load', (['"""C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\x_valid1.npy"""'], {}), "('C:\\\\Users\\\\Zach\\\\Downloads\\\\Text_extract\\\\x_valid1.npy')\n", (18825, 18883), False, 'import numpy\n'), ((3315, 3325), 'pdfminer.layout.LAParams', 'LAParams', ([], {}), '()\n', (3323, 3325), False, 'from pdfminer.layout import LAParams\n'), ((3467, 3492), 'pdfminer.pdfpage.PDFPage.get_pages', 'PDFPage.get_pages', (['infile'], {}), '(infile)\n', (3484, 3492), False, 'from pdfminer.pdfpage import PDFPage\n'), ((5114, 5139), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['raw_pharse'], {}), '(raw_pharse)\n', (5127, 5139), False, 'from nltk.tokenize import word_tokenize\n'), ((9370, 9396), 'os.path.join', 'os.path.join', (['folder', 'file'], {}), '(folder, file)\n', (9382, 9396), False, 'import os\n'), ((20252, 20286), 'numpy.argmax', 'numpy.argmax', (['double_valid[1][num]'], {}), '(double_valid[1][num])\n', (20264, 20286), False, 'import numpy\n'), ((9550, 9576), 'os.path.join', 'os.path.join', (['folder', 'file'], {}), '(folder, file)\n', (9562, 9576), False, 'import os\n'), ((10916, 10940), 'nltk.FreqDist', 'nltk.FreqDist', (['word_list'], {}), '(word_list)\n', (10929, 10940), False, 'import nltk\n'), ((9644, 9665), 'pickle.load', 'pickle.load', (['file_loc'], {}), '(file_loc)\n', (9655, 9665), False, 'import pickle\n')] |
from scipy.stats import norm, randint
import pandas as pd
import numpy as np
import json
from datetime import datetime
import tempfile
from itertools import islice, cycle
import pytest
from thermostat_nw.multiple import (
multiple_thermostat_calculate_epa_field_savings_metrics,
)
from thermostat_nw.exporters import certification_to_csv
from thermostat_nw.columns import EXPORT_COLUMNS
from thermostat_nw.stats import (
combine_output_dataframes,
compute_summary_statistics,
summary_statistics_to_csv,
)
from .fixtures.single_stage import thermostat_emg_aux_constant_on_outlier
def get_fake_output_df(n_columns):
columns = EXPORT_COLUMNS
string_placeholder = ["PLACEHOLDER"] * n_columns
zero_column = [
0 if randint.rvs(0, 30) > 0 else (None if randint.rvs(0, 2) > 0 else np.inf)
for i in randint.rvs(0, 1, size=n_columns)
]
one_column = [
1 if randint.rvs(0, 30) > 0 else (None if randint.rvs(0, 2) > 0 else np.inf)
for i in randint.rvs(0, 1, size=n_columns)
]
float_column = [
i if randint.rvs(0, 30) > 0 else (None if randint.rvs(0, 2) > 0 else np.inf)
for i in norm.rvs(size=n_columns)
]
zipcodes = ["01234", "12345", "23456", "34567", "43210", "54321", "65432", "76543"]
zipcode_column = [i for i in islice(cycle(zipcodes), None, n_columns)]
core_day_set_names = ["cooling_2012", "heating_2012-2013", "cooling_2013"]
core_day_set_name_column = [
i for i in islice(cycle(core_day_set_names), None, n_columns)
]
data = {
"sw_version": string_placeholder,
"ct_identifier": string_placeholder,
"heat_type": string_placeholder,
"heat_stage": string_placeholder,
"cool_type": string_placeholder,
"cool_stage": string_placeholder,
"heating_or_cooling": core_day_set_name_column,
"station": string_placeholder,
"zipcode": zipcode_column,
"climate_zone": string_placeholder,
"start_date": datetime(2011, 1, 1),
"end_date": datetime(2012, 1, 1),
"n_days_both_heating_and_cooling": one_column,
"n_days_in_inputfile_date_range": one_column,
"n_days_insufficient_data": zero_column,
"n_core_heating_days": one_column,
"baseline_percentile_core_cooling_comfort_temperature": float_column,
"baseline_percentile_core_heating_comfort_temperature": float_column,
"regional_average_baseline_cooling_comfort_temperature": float_column,
"regional_average_baseline_heating_comfort_temperature": float_column,
"percent_savings_baseline_percentile": float_column,
"avoided_daily_mean_core_day_runtime_baseline_percentile": float_column,
"avoided_total_core_day_runtime_baseline_percentile": float_column,
"baseline_daily_mean_core_day_runtime_baseline_percentile": float_column,
"baseline_total_core_day_runtime_baseline_percentile": float_column,
"_daily_mean_core_day_demand_baseline_baseline_percentile": float_column,
"percent_savings_baseline_regional": float_column,
"avoided_daily_mean_core_day_runtime_baseline_regional": float_column,
"avoided_total_core_day_runtime_baseline_regional": float_column,
"baseline_daily_mean_core_day_runtime_baseline_regional": float_column,
"baseline_total_core_day_runtime_baseline_regional": float_column,
"_daily_mean_core_day_demand_baseline_baseline_regional": float_column,
"percent_savings_baseline_hourly_regional": float_column,
"avoided_daily_mean_core_day_runtime_baseline_hourly_regional": float_column,
"avoided_total_core_day_runtime_baseline_hourly_regional": float_column,
"baseline_daily_mean_core_day_runtime_baseline_hourly_regional": float_column,
"baseline_total_core_day_runtime_baseline_hourly_regional": float_column,
"_daily_mean_core_day_demand_baseline_baseline_hourly_regional": float_column,
"mean_demand": float_column,
"alpha": float_column,
"tau": float_column,
"mean_sq_err": float_column,
"root_mean_sq_err": float_column,
"cv_root_mean_sq_err": float_column,
"mean_abs_err": float_column,
"mean_abs_pct_err": float_column,
"cov_x": string_placeholder,
"nfev": float_column,
"mesg": string_placeholder,
"total_core_cooling_runtime": float_column,
"total_core_heating_runtime": float_column,
"total_auxiliary_heating_core_day_runtime": float_column,
"total_emergency_heating_core_day_runtime": float_column,
"daily_mean_core_cooling_runtime": float_column,
"daily_mean_core_heating_runtime": float_column,
"core_cooling_days_mean_indoor_temperature": float_column,
"core_cooling_days_mean_outdoor_temperature": float_column,
"core_heating_days_mean_indoor_temperature": float_column,
"core_heating_days_mean_outdoor_temperature": float_column,
"core_mean_indoor_temperature": float_column,
"core_mean_outdoor_temperature": float_column,
"heat_gain_constant": float_column,
"heat_loss_constant": float_column,
"hvac_constant": float_column,
"overall_temperature_variance": float_column,
"weekly_temperature_variance": float_column,
"avg_daily_cooling_runtime": float_column,
"avg_daily_heating_runtime": float_column,
"avg_daily_auxiliary_runtime": float_column,
"avg_daily_emergency_runtime": float_column,
"lm_intercept": float_column,
"lm_intercept_se": float_column,
"lm_main_slope": float_column,
"lm_main_slope_se": float_column,
"lm_secondary_slope": float_column,
"lm_secondary_slope_se": float_column,
"lm_cvrmse": float_column,
"lm_rsquared": float_column,
"excess_resistance_score_1hr": float_column,
"excess_resistance_score_2hr": float_column,
"excess_resistance_score_3hr": float_column,
"dnru_daily": float_column,
"dnru_reduction_daily": float_column,
"mu_estimate_daily": float_column,
"sigma_estimate_daily": float_column,
"sigmoid_model_error_daily": float_column,
"sigmoid_integral_daily": float_column,
"aux_exceeds_heat_runtime_daily": string_placeholder,
"dnru_hourly": float_column,
"dnru_reduction_hourly": float_column,
"mu_estimate_hourly": float_column,
"sigma_estimate_hourly": float_column,
"sigmoid_model_error_hourly": float_column,
"sigmoid_integral_hourly": float_column,
"aux_exceeds_heat_runtime_hourly": string_placeholder,
"rhu1_aux_duty_cycle": float_column,
"rhu1_emg_duty_cycle": float_column,
"rhu1_compressor_duty_cycle": float_column,
"rhu1_00F_to_05F": float_column,
"rhu1_05F_to_10F": float_column,
"rhu1_10F_to_15F": float_column,
"rhu1_15F_to_20F": float_column,
"rhu1_20F_to_25F": float_column,
"rhu1_25F_to_30F": float_column,
"rhu1_30F_to_35F": float_column,
"rhu1_35F_to_40F": float_column,
"rhu1_40F_to_45F": float_column,
"rhu1_45F_to_50F": float_column,
"rhu1_50F_to_55F": float_column,
"rhu1_55F_to_60F": float_column,
"rhu1_00F_to_05F_aux_duty_cycle": float_column,
"rhu1_05F_to_10F_aux_duty_cycle": float_column,
"rhu1_10F_to_15F_aux_duty_cycle": float_column,
"rhu1_15F_to_20F_aux_duty_cycle": float_column,
"rhu1_20F_to_25F_aux_duty_cycle": float_column,
"rhu1_25F_to_30F_aux_duty_cycle": float_column,
"rhu1_30F_to_35F_aux_duty_cycle": float_column,
"rhu1_35F_to_40F_aux_duty_cycle": float_column,
"rhu1_40F_to_45F_aux_duty_cycle": float_column,
"rhu1_45F_to_50F_aux_duty_cycle": float_column,
"rhu1_50F_to_55F_aux_duty_cycle": float_column,
"rhu1_55F_to_60F_aux_duty_cycle": float_column,
"rhu1_00F_to_05F_emg_duty_cycle": float_column,
"rhu1_05F_to_10F_emg_duty_cycle": float_column,
"rhu1_10F_to_15F_emg_duty_cycle": float_column,
"rhu1_15F_to_20F_emg_duty_cycle": float_column,
"rhu1_20F_to_25F_emg_duty_cycle": float_column,
"rhu1_25F_to_30F_emg_duty_cycle": float_column,
"rhu1_30F_to_35F_emg_duty_cycle": float_column,
"rhu1_35F_to_40F_emg_duty_cycle": float_column,
"rhu1_40F_to_45F_emg_duty_cycle": float_column,
"rhu1_45F_to_50F_emg_duty_cycle": float_column,
"rhu1_50F_to_55F_emg_duty_cycle": float_column,
"rhu1_55F_to_60F_emg_duty_cycle": float_column,
"rhu1_00F_to_05F_compressor_duty_cycle": float_column,
"rhu1_05F_to_10F_compressor_duty_cycle": float_column,
"rhu1_10F_to_15F_compressor_duty_cycle": float_column,
"rhu1_15F_to_20F_compressor_duty_cycle": float_column,
"rhu1_20F_to_25F_compressor_duty_cycle": float_column,
"rhu1_25F_to_30F_compressor_duty_cycle": float_column,
"rhu1_30F_to_35F_compressor_duty_cycle": float_column,
"rhu1_35F_to_40F_compressor_duty_cycle": float_column,
"rhu1_40F_to_45F_compressor_duty_cycle": float_column,
"rhu1_45F_to_50F_compressor_duty_cycle": float_column,
"rhu1_50F_to_55F_compressor_duty_cycle": float_column,
"rhu1_55F_to_60F_compressor_duty_cycle": float_column,
"rhu2_aux_duty_cycle": float_column,
"rhu2_emg_duty_cycle": float_column,
"rhu2_compressor_duty_cycle": float_column,
"rhu2_00F_to_05F": float_column,
"rhu2_05F_to_10F": float_column,
"rhu2_10F_to_15F": float_column,
"rhu2_15F_to_20F": float_column,
"rhu2_20F_to_25F": float_column,
"rhu2_25F_to_30F": float_column,
"rhu2_30F_to_35F": float_column,
"rhu2_35F_to_40F": float_column,
"rhu2_40F_to_45F": float_column,
"rhu2_45F_to_50F": float_column,
"rhu2_50F_to_55F": float_column,
"rhu2_55F_to_60F": float_column,
"rhu2_00F_to_05F_aux_duty_cycle": float_column,
"rhu2_05F_to_10F_aux_duty_cycle": float_column,
"rhu2_10F_to_15F_aux_duty_cycle": float_column,
"rhu2_15F_to_20F_aux_duty_cycle": float_column,
"rhu2_20F_to_25F_aux_duty_cycle": float_column,
"rhu2_25F_to_30F_aux_duty_cycle": float_column,
"rhu2_30F_to_35F_aux_duty_cycle": float_column,
"rhu2_35F_to_40F_aux_duty_cycle": float_column,
"rhu2_40F_to_45F_aux_duty_cycle": float_column,
"rhu2_45F_to_50F_aux_duty_cycle": float_column,
"rhu2_50F_to_55F_aux_duty_cycle": float_column,
"rhu2_55F_to_60F_aux_duty_cycle": float_column,
"rhu2_00F_to_05F_emg_duty_cycle": float_column,
"rhu2_05F_to_10F_emg_duty_cycle": float_column,
"rhu2_10F_to_15F_emg_duty_cycle": float_column,
"rhu2_15F_to_20F_emg_duty_cycle": float_column,
"rhu2_20F_to_25F_emg_duty_cycle": float_column,
"rhu2_25F_to_30F_emg_duty_cycle": float_column,
"rhu2_30F_to_35F_emg_duty_cycle": float_column,
"rhu2_35F_to_40F_emg_duty_cycle": float_column,
"rhu2_40F_to_45F_emg_duty_cycle": float_column,
"rhu2_45F_to_50F_emg_duty_cycle": float_column,
"rhu2_50F_to_55F_emg_duty_cycle": float_column,
"rhu2_55F_to_60F_emg_duty_cycle": float_column,
"rhu2_00F_to_05F_compressor_duty_cycle": float_column,
"rhu2_05F_to_10F_compressor_duty_cycle": float_column,
"rhu2_10F_to_15F_compressor_duty_cycle": float_column,
"rhu2_15F_to_20F_compressor_duty_cycle": float_column,
"rhu2_20F_to_25F_compressor_duty_cycle": float_column,
"rhu2_25F_to_30F_compressor_duty_cycle": float_column,
"rhu2_30F_to_35F_compressor_duty_cycle": float_column,
"rhu2_35F_to_40F_compressor_duty_cycle": float_column,
"rhu2_40F_to_45F_compressor_duty_cycle": float_column,
"rhu2_45F_to_50F_compressor_duty_cycle": float_column,
"rhu2_50F_to_55F_compressor_duty_cycle": float_column,
"rhu2_55F_to_60F_compressor_duty_cycle": float_column,
"rhu2_30F_to_45F": float_column,
"rhu2_30F_to_45F_aux_duty_cycle": float_column,
"rhu2_30F_to_45F_emg_duty_cycle": float_column,
"rhu2_30F_to_45F_compressor_duty_cycle": float_column,
}
df = pd.DataFrame(data, columns=columns)
return df
@pytest.fixture
def dataframes():
df1 = get_fake_output_df(10)
df2 = get_fake_output_df(10)
dfs = [df1, df2]
return dfs
@pytest.fixture
def combined_dataframe():
df = get_fake_output_df(100)
return df
def test_combine_output_dataframes(dataframes):
combined = combine_output_dataframes(dataframes)
assert combined.shape == (20, 122)
def test_compute_summary_statistics(combined_dataframe):
summary_statistics = compute_summary_statistics(combined_dataframe)
assert [len(s) for s in summary_statistics] == [
49,
49,
49,
49,
3057,
1657,
3057,
1657,
]
def test_compute_summary_statistics_advanced(combined_dataframe):
summary_statistics = compute_summary_statistics(
combined_dataframe, advanced_filtering=True
)
assert [len(s) for s in summary_statistics] == [
49,
49,
49,
49,
49,
49,
49,
49,
3057,
1657,
3057,
1657,
3057,
1657,
3057,
1657,
]
def test_summary_statistics_to_csv(combined_dataframe):
summary_statistics = compute_summary_statistics(combined_dataframe)
_, fname = tempfile.mkstemp()
product_id = "FAKE"
stats_df = summary_statistics_to_csv(summary_statistics, fname, product_id)
assert isinstance(stats_df, pd.DataFrame)
stats_df_reread = pd.read_csv(fname)
assert stats_df_reread.shape == (3225, 5)
def test_certification(combined_dataframe):
_, fname_stats = tempfile.mkstemp()
_, fname_cert = tempfile.mkstemp()
product_id = "FAKE"
stats_df = compute_summary_statistics(combined_dataframe)
certification_df = certification_to_csv(stats_df, fname_cert, product_id)
assert certification_df.shape == (5, 8)
def test_iqr_filtering(thermostat_emg_aux_constant_on_outlier):
thermostats_iqflt = list(thermostat_emg_aux_constant_on_outlier)
# Run the metrics / statistics with the outlier thermostat in place
iqflt_metrics = multiple_thermostat_calculate_epa_field_savings_metrics(
thermostats_iqflt, how="entire_dataset"
)
iqflt_output_dataframe = pd.DataFrame(iqflt_metrics, columns=EXPORT_COLUMNS)
iqflt_summary_statistics = compute_summary_statistics(iqflt_output_dataframe)
# Remove the outlier thermostat
thermostats_noiq = []
for thermostat in list(thermostats_iqflt):
if thermostat.thermostat_id != "thermostat_single_emg_aux_constant_on_outlier":
thermostats_noiq.append(thermostat)
if len(thermostats_noiq) == 5:
raise ValueError("Try again")
# Re-run the metrics / statistics with the outlier thermostat removed
noiq_metrics = multiple_thermostat_calculate_epa_field_savings_metrics(
thermostats_noiq, how="entire_dataset"
)
noiq_output_dataframe = pd.DataFrame(noiq_metrics, columns=EXPORT_COLUMNS)
noiq_summary_statistics = compute_summary_statistics(noiq_output_dataframe)
# Verify that the IQFLT removed the outliers by comparing this with the
# metrics with the outlier thermostat already removed.
for column in range(0, len(iqflt_summary_statistics)):
fields_iqflt = [x for x in iqflt_summary_statistics[column] if "IQFLT" in x]
for field_iqflt in fields_iqflt:
field_noiq = field_iqflt.replace("rhu2IQFLT", "rhu2")
left_side = iqflt_summary_statistics[column][field_iqflt]
right_side = noiq_summary_statistics[column][field_noiq]
if np.isnan(left_side) or np.isnan(right_side):
assert np.isnan(left_side) and np.isnan(right_side)
else:
assert left_side == right_side
| [
"pandas.DataFrame",
"thermostat_nw.stats.compute_summary_statistics",
"tempfile.mkstemp",
"pandas.read_csv",
"scipy.stats.norm.rvs",
"thermostat_nw.stats.combine_output_dataframes",
"thermostat_nw.multiple.multiple_thermostat_calculate_epa_field_savings_metrics",
"scipy.stats.randint.rvs",
"numpy.is... | [((12410, 12445), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'columns'}), '(data, columns=columns)\n', (12422, 12445), True, 'import pandas as pd\n'), ((12754, 12791), 'thermostat_nw.stats.combine_output_dataframes', 'combine_output_dataframes', (['dataframes'], {}), '(dataframes)\n', (12779, 12791), False, 'from thermostat_nw.stats import combine_output_dataframes, compute_summary_statistics, summary_statistics_to_csv\n'), ((12915, 12961), 'thermostat_nw.stats.compute_summary_statistics', 'compute_summary_statistics', (['combined_dataframe'], {}), '(combined_dataframe)\n', (12941, 12961), False, 'from thermostat_nw.stats import combine_output_dataframes, compute_summary_statistics, summary_statistics_to_csv\n'), ((13819, 13837), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (13835, 13837), False, 'import tempfile\n'), ((13877, 13941), 'thermostat_nw.stats.summary_statistics_to_csv', 'summary_statistics_to_csv', (['summary_statistics', 'fname', 'product_id'], {}), '(summary_statistics, fname, product_id)\n', (13902, 13941), False, 'from thermostat_nw.stats import combine_output_dataframes, compute_summary_statistics, summary_statistics_to_csv\n'), ((14011, 14029), 'pandas.read_csv', 'pd.read_csv', (['fname'], {}), '(fname)\n', (14022, 14029), True, 'import pandas as pd\n'), ((14143, 14161), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (14159, 14161), False, 'import tempfile\n'), ((14182, 14200), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (14198, 14200), False, 'import tempfile\n'), ((14240, 14286), 'thermostat_nw.stats.compute_summary_statistics', 'compute_summary_statistics', (['combined_dataframe'], {}), '(combined_dataframe)\n', (14266, 14286), False, 'from thermostat_nw.stats import combine_output_dataframes, compute_summary_statistics, summary_statistics_to_csv\n'), ((14310, 14364), 'thermostat_nw.exporters.certification_to_csv', 'certification_to_csv', (['stats_df', 'fname_cert', 'product_id'], {}), '(stats_df, fname_cert, product_id)\n', (14330, 14364), False, 'from thermostat_nw.exporters import certification_to_csv\n'), ((14637, 14737), 'thermostat_nw.multiple.multiple_thermostat_calculate_epa_field_savings_metrics', 'multiple_thermostat_calculate_epa_field_savings_metrics', (['thermostats_iqflt'], {'how': '"""entire_dataset"""'}), "(thermostats_iqflt,\n how='entire_dataset')\n", (14692, 14737), False, 'from thermostat_nw.multiple import multiple_thermostat_calculate_epa_field_savings_metrics\n'), ((14777, 14828), 'pandas.DataFrame', 'pd.DataFrame', (['iqflt_metrics'], {'columns': 'EXPORT_COLUMNS'}), '(iqflt_metrics, columns=EXPORT_COLUMNS)\n', (14789, 14828), True, 'import pandas as pd\n'), ((14860, 14910), 'thermostat_nw.stats.compute_summary_statistics', 'compute_summary_statistics', (['iqflt_output_dataframe'], {}), '(iqflt_output_dataframe)\n', (14886, 14910), False, 'from thermostat_nw.stats import combine_output_dataframes, compute_summary_statistics, summary_statistics_to_csv\n'), ((15325, 15424), 'thermostat_nw.multiple.multiple_thermostat_calculate_epa_field_savings_metrics', 'multiple_thermostat_calculate_epa_field_savings_metrics', (['thermostats_noiq'], {'how': '"""entire_dataset"""'}), "(thermostats_noiq,\n how='entire_dataset')\n", (15380, 15424), False, 'from thermostat_nw.multiple import multiple_thermostat_calculate_epa_field_savings_metrics\n'), ((15463, 15513), 'pandas.DataFrame', 'pd.DataFrame', (['noiq_metrics'], {'columns': 'EXPORT_COLUMNS'}), '(noiq_metrics, columns=EXPORT_COLUMNS)\n', (15475, 15513), True, 'import pandas as pd\n'), ((15544, 15593), 'thermostat_nw.stats.compute_summary_statistics', 'compute_summary_statistics', (['noiq_output_dataframe'], {}), '(noiq_output_dataframe)\n', (15570, 15593), False, 'from thermostat_nw.stats import combine_output_dataframes, compute_summary_statistics, summary_statistics_to_csv\n'), ((2009, 2029), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(1)'], {}), '(2011, 1, 1)\n', (2017, 2029), False, 'from datetime import datetime\n'), ((2051, 2071), 'datetime.datetime', 'datetime', (['(2012)', '(1)', '(1)'], {}), '(2012, 1, 1)\n', (2059, 2071), False, 'from datetime import datetime\n'), ((13225, 13296), 'thermostat_nw.stats.compute_summary_statistics', 'compute_summary_statistics', (['combined_dataframe'], {'advanced_filtering': '(True)'}), '(combined_dataframe, advanced_filtering=True)\n', (13251, 13296), False, 'from thermostat_nw.stats import combine_output_dataframes, compute_summary_statistics, summary_statistics_to_csv\n'), ((840, 873), 'scipy.stats.randint.rvs', 'randint.rvs', (['(0)', '(1)'], {'size': 'n_columns'}), '(0, 1, size=n_columns)\n', (851, 873), False, 'from scipy.stats import norm, randint\n'), ((1001, 1034), 'scipy.stats.randint.rvs', 'randint.rvs', (['(0)', '(1)'], {'size': 'n_columns'}), '(0, 1, size=n_columns)\n', (1012, 1034), False, 'from scipy.stats import norm, randint\n'), ((1164, 1188), 'scipy.stats.norm.rvs', 'norm.rvs', ([], {'size': 'n_columns'}), '(size=n_columns)\n', (1172, 1188), False, 'from scipy.stats import norm, randint\n'), ((13756, 13802), 'thermostat_nw.stats.compute_summary_statistics', 'compute_summary_statistics', (['combined_dataframe'], {}), '(combined_dataframe)\n', (13782, 13802), False, 'from thermostat_nw.stats import combine_output_dataframes, compute_summary_statistics, summary_statistics_to_csv\n'), ((751, 769), 'scipy.stats.randint.rvs', 'randint.rvs', (['(0)', '(30)'], {}), '(0, 30)\n', (762, 769), False, 'from scipy.stats import norm, randint\n'), ((912, 930), 'scipy.stats.randint.rvs', 'randint.rvs', (['(0)', '(30)'], {}), '(0, 30)\n', (923, 930), False, 'from scipy.stats import norm, randint\n'), ((1075, 1093), 'scipy.stats.randint.rvs', 'randint.rvs', (['(0)', '(30)'], {}), '(0, 30)\n', (1086, 1093), False, 'from scipy.stats import norm, randint\n'), ((1323, 1338), 'itertools.cycle', 'cycle', (['zipcodes'], {}), '(zipcodes)\n', (1328, 1338), False, 'from itertools import islice, cycle\n'), ((1496, 1521), 'itertools.cycle', 'cycle', (['core_day_set_names'], {}), '(core_day_set_names)\n', (1501, 1521), False, 'from itertools import islice, cycle\n'), ((16136, 16155), 'numpy.isnan', 'np.isnan', (['left_side'], {}), '(left_side)\n', (16144, 16155), True, 'import numpy as np\n'), ((16159, 16179), 'numpy.isnan', 'np.isnan', (['right_side'], {}), '(right_side)\n', (16167, 16179), True, 'import numpy as np\n'), ((788, 805), 'scipy.stats.randint.rvs', 'randint.rvs', (['(0)', '(2)'], {}), '(0, 2)\n', (799, 805), False, 'from scipy.stats import norm, randint\n'), ((949, 966), 'scipy.stats.randint.rvs', 'randint.rvs', (['(0)', '(2)'], {}), '(0, 2)\n', (960, 966), False, 'from scipy.stats import norm, randint\n'), ((1112, 1129), 'scipy.stats.randint.rvs', 'randint.rvs', (['(0)', '(2)'], {}), '(0, 2)\n', (1123, 1129), False, 'from scipy.stats import norm, randint\n'), ((16204, 16223), 'numpy.isnan', 'np.isnan', (['left_side'], {}), '(left_side)\n', (16212, 16223), True, 'import numpy as np\n'), ((16228, 16248), 'numpy.isnan', 'np.isnan', (['right_side'], {}), '(right_side)\n', (16236, 16248), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
__author__ = 'carlos.diaz'
# Density estimation in spectropolarimetric inversions
import numpy as np
import matplotlib.pyplot as plt
from torch import nn
import torch
import time
import os
import nde_utils
import nde_nflow
from tqdm import tqdm
import sys
from ipdb import set_trace as stop
# =========================================================================
def fix_leakage(prior,samples, logprob=None):
# Remove samples outside the prior
index_param = []
for i_param in range(samples.shape[1]):
index_param.append( np.where(samples[:,i_param]<prior[0][i_param])[0] )
index_param.append( np.where(samples[:,i_param]>prior[1][i_param])[0] )
import itertools
final_index = np.array(list(itertools.chain.from_iterable(index_param)))
if len(final_index) > 0:
# print(samples[final_index,:])
samples = np.delete(samples,final_index,axis=0)
if logprob is not None:
logprob = np.delete(logprob,final_index,axis=0)
return samples, logprob
if len(final_index) < 1 and logprob is not None:
return samples, logprob
if logprob is None:
return samples
class bayes_inversion(object):
# =========================================================================
def __init__(self, directory = 'bayes_inversion_output_final/', device = 'cpu'):
# Configuration
self.args = nde_utils.dotdict()
self.args.kwargs = {'num_workers': 1, 'pin_memory': True} if device=="cuda" else {}
self.args.directory = directory
self.device = device
if not os.path.exists(self.args.directory): os.makedirs(self.args.directory)
# =========================================================================
def create_database(self, batch_size = 100, tauvalues = 15, spectral_range=0, noise=5e-4,size=1e6):
import sparsetools as sp
print('[INFO] Using spectral range '+str(spectral_range))
print('[INFO] Reading database')
mdir = '../gaussian_model/'
lines = np.load(mdir+'trainfixe_lines.npy')[:int(size),:]
values = np.load(mdir+'trainfixe_values.npy')[:int(size),:]
self.waves_info = np.load(mdir+'train_waves_info.npy')
self.waves = np.load(mdir+'train_waves.npy')
self.lenwave = len(self.waves)
self.spectral_range = spectral_range
if self.spectral_range == 5:
spc_idx = range(self.lenwave)
elif self.spectral_range == 0:
spc_idx = range(21,self.lenwave )
self.ltau = np.load(mdir+'train_ltau.npy')
self.mltau = np.load(mdir+'train_mltau.npy')
self.lentau = len(self.mltau)
self.spectral_idx = np.load(mdir+'train_spectral_idx.npy')
self.waves_info = self.waves_info[spc_idx]
self.waves = self.waves[spc_idx]
self.lenwave = len(self.waves)
self.spectral_idx = self.spectral_idx[spc_idx]
lines = lines[:,spc_idx]
split = 0.9
train_split = int(lines.shape[0]*split)
wholedataset = np.arange(lines.shape[0])
np.random.shuffle(wholedataset)
mdd = np.ones(27,dtype=np.float32)*1e-2
self.args.batch_size = batch_size
self.train_loader = nde_utils.basicLoader(lines[wholedataset[:train_split],:], values[wholedataset[:train_split],:], noise=noise, batch_size=self.args.batch_size, shuffle=True, xnoise=1.0,amplitude=mdd, **self.args.kwargs)
self.vali_loader = nde_utils.basicLoader(lines[wholedataset[train_split:],:], values[wholedataset[train_split:],:], noise=noise, batch_size=self.args.batch_size, shuffle=True, xnoise=1.0,amplitude=mdd, **self.args.kwargs)
print("[INFO] len(ltau):", self.lentau)
print("[INFO] len(waves):", self.lenwave)
print('[INFO] Datasize obsdata: ',lines.shape)
print('[INFO] Datasize params: ',values.shape)
print('[INFO] Train/valid split: ',train_split,int(lines.shape[0]*(1.0-split)))
#vali cube:
print('[INFO] Reading test database')
mdir = '../gaussian_model/'
lines = np.load(mdir+'test_lines_exp.npy')[:,spc_idx]
values = np.load(mdir+'test_values_exp.npy')
self.test_loader = nde_utils.basicLoader(lines, values, noise=noise, batch_size=self.args.batch_size, shuffle=True, **self.args.kwargs)
# =========================================================================
def train_network(self, num_epochs = 2000, learning_rate = 1e-6, log_interval = 1, continueTraining=True, name_posterior= 'posterior',num_flows=5,num_blocks=1,mhidden_features=32,transformtype="rq-coupling",num_bins=8):
name_posterior = name_posterior+'_sp'+str(self.spectral_range)
self.args.y_size = self.lentau*3
self.args.x_size = self.lenwave
self.model = nde_nflow.NFLOW(self.args.y_size, self.args.x_size,num_flows=num_flows, mhidden_features=mhidden_features, num_blocks=num_blocks,
train_loader=self.train_loader, embedding_net=None, transformtype=transformtype,num_bins=num_bins)
nde_utils.get_params(self.model)
self.args.learning_rate = learning_rate
self.args.num_epochs = num_epochs
self.args.log_interval = log_interval
self.args.name_posterior = name_posterior
print('[INFO] name_posterior: ',name_posterior)
if continueTraining: self.model = torch.load(self.args.directory+name_posterior+'_best.pth'); print('Loading previous weigths...')
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
train_loss_avg = []
vali_loss_avg = []
time0 = time.time()
from tqdm import trange
t = trange(num_epochs, desc='', leave=True)
self.valimin = 1e3
self.count = 0
self.maxiloop = 100
for epoch in t:
self.model.train()
avgloss = 0
for batch_idx, (params, data) in enumerate(tqdm(self.train_loader, desc='', leave=False)):
data = data.to(self.device)
params = params.to(self.device)
optimizer.zero_grad()
loss = self.model.forward(params, data)
loss.backward()
optimizer.step()
avgloss += loss.item()
avgloss /= (batch_idx +1)
train_loss_avg.append(avgloss)
self.model.eval()
avgloss2 = 0
for batch_idx, (params, data) in enumerate(self.vali_loader):
data = data.to(self.device)
params = params.to(self.device)
loss = self.model.forward(params, data)
avgloss2 += loss.item()
avgloss2 /= (batch_idx +1)
vali_loss_avg.append(avgloss2)
argminiv = np.argmin(vali_loss_avg)
miniv = np.mean(vali_loss_avg[argminiv-1:argminiv+1+1])
fig = plt.figure(); plt.plot(train_loss_avg); plt.plot(vali_loss_avg)
plt.axhline(np.mean(train_loss_avg[-10:]),color='C0',ls='--')
plt.axhline(np.mean(train_loss_avg[-10:]),color='k',ls='--',alpha=0.5)
plt.axvline(argminiv,color='k',ls='--',alpha=0.5)
plt.axhline(miniv,color='C1',ls='--')
plt.axhline(miniv,color='k',ls='--',alpha=0.5)
plt.title('loss_final: {0:2.2f} / {1:2.2f}'.format( np.mean(train_loss_avg[-10:]), miniv ))
plt.xlabel('Epochs'); plt.ylabel('Loss')
plt.savefig(self.args.directory+self.args.name_posterior+'_train_loss_avg.pdf'); plt.close(fig)
self.test_plots(8160)
self.test_plots(17954)
self.test_plots(11387)
t.set_postfix({'loss': '{:.2f}'.format(avgloss)})
t.refresh()
if avgloss2 < self.valimin:
self.valimin = np.copy(avgloss2)
self.count = 0
torch.save(self.model, self.args.directory+self.args.name_posterior+'_best.pth')
else:
self.count += 1
if self.count > self.maxiloop:
print('[INFO] Done')
print('[INFO] name_posterior: ',name_posterior)
sys.exit()
# =========================================================================
def test_plots(self, testindex=0,nsamples = 1000):
import mathtools as mt
mltau = self.mltau
waves = self.waves
testvalue = self.test_loader.dataset.modelparameters[testindex,:]
testobs = self.test_loader.dataset.observations[testindex,:]
samples_histo = self.model.obtain_samples(testobs,nsamples).data.cpu().numpy()
samples_temp = samples_histo[:,self.lentau*0:self.lentau*1]
samples_vlos = samples_histo[:,self.lentau*1:self.lentau*2]
samples_vturb = samples_histo[:,self.lentau*2:self.lentau*3]
fig3 = plt.figure(figsize=(8,16))
plt.subplot(411)
plt.fill_between(mltau,np.percentile(samples_temp, 2.5, axis=0),np.percentile(samples_temp, 97.5, axis=0),alpha=0.2,color='C1')
plt.fill_between(mltau,np.percentile(samples_temp, 16, axis=0),np.percentile(samples_temp, 84, axis=0),alpha=0.4,color='C1')
plt.plot(mltau,np.percentile(samples_temp, 50, axis=0),'.--',color='C1',label='fit (sigma 1&2)')
plt.plot(mltau,testvalue[self.lentau*0:self.lentau*1], "k", marker='s', markersize=2, label="truth", ls='none')
plt.xlabel(r"log($\tau$)")
plt.ylabel("T [kK]");
plt.ylim(3.0,15.0)
plt.legend(fontsize=14)
plt.subplot(412)
plt.fill_between(mltau,np.percentile(samples_vlos, 2.5, axis=0),np.percentile(samples_vlos, 97.5, axis=0),alpha=0.2,color='C1')
plt.fill_between(mltau,np.percentile(samples_vlos, 16, axis=0),np.percentile(samples_vlos, 84, axis=0),alpha=0.4,color='C1')
plt.plot(mltau,np.percentile(samples_vlos, 50, axis=0),'.--',color='C1',label='fit (sigma 1&2)')
plt.plot(mltau,testvalue[self.lentau*1:self.lentau*2], "k", marker='s', markersize=2, label="truth", ls='none')
plt.xlabel(r"log($\tau$)")
plt.ylabel(r"v$_{LOS}$ [km/s]");
plt.ylim(-12.0,+12.0)
plt.legend(fontsize=14)
plt.subplot(413)
plt.fill_between(mltau,np.percentile(samples_vturb, 2.5, axis=0),np.percentile(samples_vturb, 97.5, axis=0),alpha=0.2,color='C1')
plt.fill_between(mltau,np.percentile(samples_vturb, 16, axis=0),np.percentile(samples_vturb, 84, axis=0),alpha=0.4,color='C1')
plt.plot(mltau,np.percentile(samples_vturb, 50, axis=0),'.--',color='C1',label='fit (sigma 1&2)')
plt.plot(mltau,testvalue[self.lentau*2:self.lentau*3], "k", marker='s', markersize=2, label="truth", ls='none')
plt.xlabel(r"log($\tau$)")
plt.ylabel(r"v$_{TURB}$ [km/s]");
plt.ylim(0.0,+10.0)
plt.legend(fontsize=14)
plt.subplot(414)
plt.plot(waves, testobs,'.--',color='C1',label='Full line')
plt.plot(waves, testobs, "k", marker='s', markersize=5, label="Used points", ls='none')
plt.xlabel(r"$\lambda - \lambda_0 [\AA]$")
plt.ylabel(r"I/I$_{C(QS)}$");
plt.legend(fontsize=14)
plt.savefig(self.args.directory+self.args.name_posterior+'_'+str(testindex)+'_im_plot_nn.pdf')
plt.close(fig3)
if __name__ == "__main__":
myflow = bayes_inversion()
myflow.create_database(spectral_range=5, tauvalues = 9, noise=1e-2, size=1e6)
myflow.train_network(num_epochs=3000,continueTraining=False,learning_rate = 1e-4,name_posterior= 'posterior_15_10_64_t9_1e-2g_1e6',num_flows=15,num_blocks=10,mhidden_features=64) | [
"numpy.load",
"numpy.ones",
"numpy.argmin",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"nde_utils.basicLoader",
"matplotlib.pyplot.axvline",
"numpy.copy",
"matplotlib.pyplot.close",
"torch.load",
"os.path.exists",
"numpy.random.shuffle",
"matplotlib.pyplot.axhline",
"nde_n... | [((889, 928), 'numpy.delete', 'np.delete', (['samples', 'final_index'], {'axis': '(0)'}), '(samples, final_index, axis=0)\n', (898, 928), True, 'import numpy as np\n'), ((1442, 1461), 'nde_utils.dotdict', 'nde_utils.dotdict', ([], {}), '()\n', (1459, 1461), False, 'import nde_utils\n'), ((2234, 2272), 'numpy.load', 'np.load', (["(mdir + 'train_waves_info.npy')"], {}), "(mdir + 'train_waves_info.npy')\n", (2241, 2272), True, 'import numpy as np\n'), ((2292, 2325), 'numpy.load', 'np.load', (["(mdir + 'train_waves.npy')"], {}), "(mdir + 'train_waves.npy')\n", (2299, 2325), True, 'import numpy as np\n'), ((2594, 2626), 'numpy.load', 'np.load', (["(mdir + 'train_ltau.npy')"], {}), "(mdir + 'train_ltau.npy')\n", (2601, 2626), True, 'import numpy as np\n'), ((2646, 2679), 'numpy.load', 'np.load', (["(mdir + 'train_mltau.npy')"], {}), "(mdir + 'train_mltau.npy')\n", (2653, 2679), True, 'import numpy as np\n'), ((2744, 2784), 'numpy.load', 'np.load', (["(mdir + 'train_spectral_idx.npy')"], {}), "(mdir + 'train_spectral_idx.npy')\n", (2751, 2784), True, 'import numpy as np\n'), ((3096, 3121), 'numpy.arange', 'np.arange', (['lines.shape[0]'], {}), '(lines.shape[0])\n', (3105, 3121), True, 'import numpy as np\n'), ((3130, 3161), 'numpy.random.shuffle', 'np.random.shuffle', (['wholedataset'], {}), '(wholedataset)\n', (3147, 3161), True, 'import numpy as np\n'), ((3282, 3497), 'nde_utils.basicLoader', 'nde_utils.basicLoader', (['lines[wholedataset[:train_split], :]', 'values[wholedataset[:train_split], :]'], {'noise': 'noise', 'batch_size': 'self.args.batch_size', 'shuffle': '(True)', 'xnoise': '(1.0)', 'amplitude': 'mdd'}), '(lines[wholedataset[:train_split], :], values[\n wholedataset[:train_split], :], noise=noise, batch_size=self.args.\n batch_size, shuffle=True, xnoise=1.0, amplitude=mdd, **self.args.kwargs)\n', (3303, 3497), False, 'import nde_utils\n'), ((3513, 3728), 'nde_utils.basicLoader', 'nde_utils.basicLoader', (['lines[wholedataset[train_split:], :]', 'values[wholedataset[train_split:], :]'], {'noise': 'noise', 'batch_size': 'self.args.batch_size', 'shuffle': '(True)', 'xnoise': '(1.0)', 'amplitude': 'mdd'}), '(lines[wholedataset[train_split:], :], values[\n wholedataset[train_split:], :], noise=noise, batch_size=self.args.\n batch_size, shuffle=True, xnoise=1.0, amplitude=mdd, **self.args.kwargs)\n', (3534, 3728), False, 'import nde_utils\n'), ((4197, 4234), 'numpy.load', 'np.load', (["(mdir + 'test_values_exp.npy')"], {}), "(mdir + 'test_values_exp.npy')\n", (4204, 4234), True, 'import numpy as np\n'), ((4260, 4381), 'nde_utils.basicLoader', 'nde_utils.basicLoader', (['lines', 'values'], {'noise': 'noise', 'batch_size': 'self.args.batch_size', 'shuffle': '(True)'}), '(lines, values, noise=noise, batch_size=self.args.\n batch_size, shuffle=True, **self.args.kwargs)\n', (4281, 4381), False, 'import nde_utils\n'), ((4866, 5109), 'nde_nflow.NFLOW', 'nde_nflow.NFLOW', (['self.args.y_size', 'self.args.x_size'], {'num_flows': 'num_flows', 'mhidden_features': 'mhidden_features', 'num_blocks': 'num_blocks', 'train_loader': 'self.train_loader', 'embedding_net': 'None', 'transformtype': 'transformtype', 'num_bins': 'num_bins'}), '(self.args.y_size, self.args.x_size, num_flows=num_flows,\n mhidden_features=mhidden_features, num_blocks=num_blocks, train_loader=\n self.train_loader, embedding_net=None, transformtype=transformtype,\n num_bins=num_bins)\n', (4881, 5109), False, 'import nde_nflow\n'), ((5133, 5165), 'nde_utils.get_params', 'nde_utils.get_params', (['self.model'], {}), '(self.model)\n', (5153, 5165), False, 'import nde_utils\n'), ((5711, 5722), 'time.time', 'time.time', ([], {}), '()\n', (5720, 5722), False, 'import time\n'), ((5776, 5815), 'tqdm.trange', 'trange', (['num_epochs'], {'desc': '""""""', 'leave': '(True)'}), "(num_epochs, desc='', leave=True)\n", (5782, 5815), False, 'from tqdm import trange\n'), ((8998, 9025), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 16)'}), '(figsize=(8, 16))\n', (9008, 9025), True, 'import matplotlib.pyplot as plt\n'), ((9033, 9049), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(411)'], {}), '(411)\n', (9044, 9049), True, 'import matplotlib.pyplot as plt\n'), ((9432, 9552), 'matplotlib.pyplot.plot', 'plt.plot', (['mltau', 'testvalue[self.lentau * 0:self.lentau * 1]', '"""k"""'], {'marker': '"""s"""', 'markersize': '(2)', 'label': '"""truth"""', 'ls': '"""none"""'}), "(mltau, testvalue[self.lentau * 0:self.lentau * 1], 'k', marker='s',\n markersize=2, label='truth', ls='none')\n", (9440, 9552), True, 'import matplotlib.pyplot as plt\n'), ((9552, 9578), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log($\\\\tau$)"""'], {}), "('log($\\\\tau$)')\n", (9562, 9578), True, 'import matplotlib.pyplot as plt\n'), ((9587, 9607), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""T [kK]"""'], {}), "('T [kK]')\n", (9597, 9607), True, 'import matplotlib.pyplot as plt\n'), ((9617, 9636), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(3.0)', '(15.0)'], {}), '(3.0, 15.0)\n', (9625, 9636), True, 'import matplotlib.pyplot as plt\n'), ((9644, 9667), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (9654, 9667), True, 'import matplotlib.pyplot as plt\n'), ((9685, 9701), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(412)'], {}), '(412)\n', (9696, 9701), True, 'import matplotlib.pyplot as plt\n'), ((10084, 10204), 'matplotlib.pyplot.plot', 'plt.plot', (['mltau', 'testvalue[self.lentau * 1:self.lentau * 2]', '"""k"""'], {'marker': '"""s"""', 'markersize': '(2)', 'label': '"""truth"""', 'ls': '"""none"""'}), "(mltau, testvalue[self.lentau * 1:self.lentau * 2], 'k', marker='s',\n markersize=2, label='truth', ls='none')\n", (10092, 10204), True, 'import matplotlib.pyplot as plt\n'), ((10204, 10230), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log($\\\\tau$)"""'], {}), "('log($\\\\tau$)')\n", (10214, 10230), True, 'import matplotlib.pyplot as plt\n'), ((10239, 10269), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""v$_{LOS}$ [km/s]"""'], {}), "('v$_{LOS}$ [km/s]')\n", (10249, 10269), True, 'import matplotlib.pyplot as plt\n'), ((10280, 10302), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-12.0)', '(+12.0)'], {}), '(-12.0, +12.0)\n', (10288, 10302), True, 'import matplotlib.pyplot as plt\n'), ((10310, 10333), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (10320, 10333), True, 'import matplotlib.pyplot as plt\n'), ((10343, 10359), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(413)'], {}), '(413)\n', (10354, 10359), True, 'import matplotlib.pyplot as plt\n'), ((10747, 10867), 'matplotlib.pyplot.plot', 'plt.plot', (['mltau', 'testvalue[self.lentau * 2:self.lentau * 3]', '"""k"""'], {'marker': '"""s"""', 'markersize': '(2)', 'label': '"""truth"""', 'ls': '"""none"""'}), "(mltau, testvalue[self.lentau * 2:self.lentau * 3], 'k', marker='s',\n markersize=2, label='truth', ls='none')\n", (10755, 10867), True, 'import matplotlib.pyplot as plt\n'), ((10867, 10893), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log($\\\\tau$)"""'], {}), "('log($\\\\tau$)')\n", (10877, 10893), True, 'import matplotlib.pyplot as plt\n'), ((10902, 10933), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""v$_{TURB}$ [km/s]"""'], {}), "('v$_{TURB}$ [km/s]')\n", (10912, 10933), True, 'import matplotlib.pyplot as plt\n'), ((10944, 10964), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.0)', '(+10.0)'], {}), '(0.0, +10.0)\n', (10952, 10964), True, 'import matplotlib.pyplot as plt\n'), ((10972, 10995), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (10982, 10995), True, 'import matplotlib.pyplot as plt\n'), ((11021, 11037), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(414)'], {}), '(414)\n', (11032, 11037), True, 'import matplotlib.pyplot as plt\n'), ((11046, 11108), 'matplotlib.pyplot.plot', 'plt.plot', (['waves', 'testobs', '""".--"""'], {'color': '"""C1"""', 'label': '"""Full line"""'}), "(waves, testobs, '.--', color='C1', label='Full line')\n", (11054, 11108), True, 'import matplotlib.pyplot as plt\n'), ((11114, 11205), 'matplotlib.pyplot.plot', 'plt.plot', (['waves', 'testobs', '"""k"""'], {'marker': '"""s"""', 'markersize': '(5)', 'label': '"""Used points"""', 'ls': '"""none"""'}), "(waves, testobs, 'k', marker='s', markersize=5, label='Used points',\n ls='none')\n", (11122, 11205), True, 'import matplotlib.pyplot as plt\n'), ((11210, 11254), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\lambda - \\\\lambda_0 [\\\\AA]$"""'], {}), "('$\\\\lambda - \\\\lambda_0 [\\\\AA]$')\n", (11220, 11254), True, 'import matplotlib.pyplot as plt\n'), ((11261, 11288), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""I/I$_{C(QS)}$"""'], {}), "('I/I$_{C(QS)}$')\n", (11271, 11288), True, 'import matplotlib.pyplot as plt\n'), ((11299, 11322), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (11309, 11322), True, 'import matplotlib.pyplot as plt\n'), ((11434, 11449), 'matplotlib.pyplot.close', 'plt.close', (['fig3'], {}), '(fig3)\n', (11443, 11449), True, 'import matplotlib.pyplot as plt\n'), ((757, 799), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['index_param'], {}), '(index_param)\n', (786, 799), False, 'import itertools\n'), ((981, 1020), 'numpy.delete', 'np.delete', (['logprob', 'final_index'], {'axis': '(0)'}), '(logprob, final_index, axis=0)\n', (990, 1020), True, 'import numpy as np\n'), ((1638, 1673), 'os.path.exists', 'os.path.exists', (['self.args.directory'], {}), '(self.args.directory)\n', (1652, 1673), False, 'import os\n'), ((1675, 1707), 'os.makedirs', 'os.makedirs', (['self.args.directory'], {}), '(self.args.directory)\n', (1686, 1707), False, 'import os\n'), ((2089, 2126), 'numpy.load', 'np.load', (["(mdir + 'trainfixe_lines.npy')"], {}), "(mdir + 'trainfixe_lines.npy')\n", (2096, 2126), True, 'import numpy as np\n'), ((2156, 2194), 'numpy.load', 'np.load', (["(mdir + 'trainfixe_values.npy')"], {}), "(mdir + 'trainfixe_values.npy')\n", (2163, 2194), True, 'import numpy as np\n'), ((3177, 3206), 'numpy.ones', 'np.ones', (['(27)'], {'dtype': 'np.float32'}), '(27, dtype=np.float32)\n', (3184, 3206), True, 'import numpy as np\n'), ((4134, 4170), 'numpy.load', 'np.load', (["(mdir + 'test_lines_exp.npy')"], {}), "(mdir + 'test_lines_exp.npy')\n", (4141, 4170), True, 'import numpy as np\n'), ((5452, 5514), 'torch.load', 'torch.load', (["(self.args.directory + name_posterior + '_best.pth')"], {}), "(self.args.directory + name_posterior + '_best.pth')\n", (5462, 5514), False, 'import torch\n'), ((6908, 6932), 'numpy.argmin', 'np.argmin', (['vali_loss_avg'], {}), '(vali_loss_avg)\n', (6917, 6932), True, 'import numpy as np\n'), ((6953, 7006), 'numpy.mean', 'np.mean', (['vali_loss_avg[argminiv - 1:argminiv + 1 + 1]'], {}), '(vali_loss_avg[argminiv - 1:argminiv + 1 + 1])\n', (6960, 7006), True, 'import numpy as np\n'), ((7021, 7033), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7031, 7033), True, 'import matplotlib.pyplot as plt\n'), ((7035, 7059), 'matplotlib.pyplot.plot', 'plt.plot', (['train_loss_avg'], {}), '(train_loss_avg)\n', (7043, 7059), True, 'import matplotlib.pyplot as plt\n'), ((7061, 7084), 'matplotlib.pyplot.plot', 'plt.plot', (['vali_loss_avg'], {}), '(vali_loss_avg)\n', (7069, 7084), True, 'import matplotlib.pyplot as plt\n'), ((7254, 7306), 'matplotlib.pyplot.axvline', 'plt.axvline', (['argminiv'], {'color': '"""k"""', 'ls': '"""--"""', 'alpha': '(0.5)'}), "(argminiv, color='k', ls='--', alpha=0.5)\n", (7265, 7306), True, 'import matplotlib.pyplot as plt\n'), ((7316, 7355), 'matplotlib.pyplot.axhline', 'plt.axhline', (['miniv'], {'color': '"""C1"""', 'ls': '"""--"""'}), "(miniv, color='C1', ls='--')\n", (7327, 7355), True, 'import matplotlib.pyplot as plt\n'), ((7366, 7415), 'matplotlib.pyplot.axhline', 'plt.axhline', (['miniv'], {'color': '"""k"""', 'ls': '"""--"""', 'alpha': '(0.5)'}), "(miniv, color='k', ls='--', alpha=0.5)\n", (7377, 7415), True, 'import matplotlib.pyplot as plt\n'), ((7529, 7549), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (7539, 7549), True, 'import matplotlib.pyplot as plt\n'), ((7551, 7569), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (7561, 7569), True, 'import matplotlib.pyplot as plt\n'), ((7582, 7669), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.args.directory + self.args.name_posterior + '_train_loss_avg.pdf')"], {}), "(self.args.directory + self.args.name_posterior +\n '_train_loss_avg.pdf')\n", (7593, 7669), True, 'import matplotlib.pyplot as plt\n'), ((7663, 7677), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (7672, 7677), True, 'import matplotlib.pyplot as plt\n'), ((9081, 9121), 'numpy.percentile', 'np.percentile', (['samples_temp', '(2.5)'], {'axis': '(0)'}), '(samples_temp, 2.5, axis=0)\n', (9094, 9121), True, 'import numpy as np\n'), ((9122, 9163), 'numpy.percentile', 'np.percentile', (['samples_temp', '(97.5)'], {'axis': '(0)'}), '(samples_temp, 97.5, axis=0)\n', (9135, 9163), True, 'import numpy as np\n'), ((9217, 9256), 'numpy.percentile', 'np.percentile', (['samples_temp', '(16)'], {'axis': '(0)'}), '(samples_temp, 16, axis=0)\n', (9230, 9256), True, 'import numpy as np\n'), ((9257, 9296), 'numpy.percentile', 'np.percentile', (['samples_temp', '(84)'], {'axis': '(0)'}), '(samples_temp, 84, axis=0)\n', (9270, 9296), True, 'import numpy as np\n'), ((9342, 9381), 'numpy.percentile', 'np.percentile', (['samples_temp', '(50)'], {'axis': '(0)'}), '(samples_temp, 50, axis=0)\n', (9355, 9381), True, 'import numpy as np\n'), ((9733, 9773), 'numpy.percentile', 'np.percentile', (['samples_vlos', '(2.5)'], {'axis': '(0)'}), '(samples_vlos, 2.5, axis=0)\n', (9746, 9773), True, 'import numpy as np\n'), ((9774, 9815), 'numpy.percentile', 'np.percentile', (['samples_vlos', '(97.5)'], {'axis': '(0)'}), '(samples_vlos, 97.5, axis=0)\n', (9787, 9815), True, 'import numpy as np\n'), ((9869, 9908), 'numpy.percentile', 'np.percentile', (['samples_vlos', '(16)'], {'axis': '(0)'}), '(samples_vlos, 16, axis=0)\n', (9882, 9908), True, 'import numpy as np\n'), ((9909, 9948), 'numpy.percentile', 'np.percentile', (['samples_vlos', '(84)'], {'axis': '(0)'}), '(samples_vlos, 84, axis=0)\n', (9922, 9948), True, 'import numpy as np\n'), ((9994, 10033), 'numpy.percentile', 'np.percentile', (['samples_vlos', '(50)'], {'axis': '(0)'}), '(samples_vlos, 50, axis=0)\n', (10007, 10033), True, 'import numpy as np\n'), ((10391, 10432), 'numpy.percentile', 'np.percentile', (['samples_vturb', '(2.5)'], {'axis': '(0)'}), '(samples_vturb, 2.5, axis=0)\n', (10404, 10432), True, 'import numpy as np\n'), ((10433, 10475), 'numpy.percentile', 'np.percentile', (['samples_vturb', '(97.5)'], {'axis': '(0)'}), '(samples_vturb, 97.5, axis=0)\n', (10446, 10475), True, 'import numpy as np\n'), ((10529, 10569), 'numpy.percentile', 'np.percentile', (['samples_vturb', '(16)'], {'axis': '(0)'}), '(samples_vturb, 16, axis=0)\n', (10542, 10569), True, 'import numpy as np\n'), ((10570, 10610), 'numpy.percentile', 'np.percentile', (['samples_vturb', '(84)'], {'axis': '(0)'}), '(samples_vturb, 84, axis=0)\n', (10583, 10610), True, 'import numpy as np\n'), ((10656, 10696), 'numpy.percentile', 'np.percentile', (['samples_vturb', '(50)'], {'axis': '(0)'}), '(samples_vturb, 50, axis=0)\n', (10669, 10696), True, 'import numpy as np\n'), ((571, 620), 'numpy.where', 'np.where', (['(samples[:, i_param] < prior[0][i_param])'], {}), '(samples[:, i_param] < prior[0][i_param])\n', (579, 620), True, 'import numpy as np\n'), ((651, 700), 'numpy.where', 'np.where', (['(samples[:, i_param] > prior[1][i_param])'], {}), '(samples[:, i_param] > prior[1][i_param])\n', (659, 700), True, 'import numpy as np\n'), ((6028, 6073), 'tqdm.tqdm', 'tqdm', (['self.train_loader'], {'desc': '""""""', 'leave': '(False)'}), "(self.train_loader, desc='', leave=False)\n", (6032, 6073), False, 'from tqdm import tqdm\n'), ((7109, 7138), 'numpy.mean', 'np.mean', (['train_loss_avg[-10:]'], {}), '(train_loss_avg[-10:])\n', (7116, 7138), True, 'import numpy as np\n'), ((7183, 7212), 'numpy.mean', 'np.mean', (['train_loss_avg[-10:]'], {}), '(train_loss_avg[-10:])\n', (7190, 7212), True, 'import numpy as np\n'), ((7942, 7959), 'numpy.copy', 'np.copy', (['avgloss2'], {}), '(avgloss2)\n', (7949, 7959), True, 'import numpy as np\n'), ((8007, 8095), 'torch.save', 'torch.save', (['self.model', "(self.args.directory + self.args.name_posterior + '_best.pth')"], {}), "(self.model, self.args.directory + self.args.name_posterior +\n '_best.pth')\n", (8017, 8095), False, 'import torch\n'), ((8311, 8321), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8319, 8321), False, 'import sys\n'), ((7477, 7506), 'numpy.mean', 'np.mean', (['train_loss_avg[-10:]'], {}), '(train_loss_avg[-10:])\n', (7484, 7506), True, 'import numpy as np\n')] |
import time
import sys
def get_close_matches_Levenshtein(
word,
possibilities,
n=3,
cutoff=0.6,
full=False):
'''Replaces difflib.get_close_matches with faster algortihm based on
Levenshtein.ratio.
HINT: Similarity increase significatively after lower() and unidecode()
Refs: https://en.wikipedia.org/wiki/Levenshtein_distance
'''
import pandas as pd
import Levenshtein
if isinstance(possibilities, str):
possibilities = [possibilities]
rs = pd.DataFrame()
MATCH = False
for p in possibilities:
similarity = Levenshtein.ratio(word, p)
# print(word,'::',p,similarity)
# sys.exit()
if similarity >= cutoff:
MATCH = True
rs = rs.append({'similarity': similarity,
'match': p}, ignore_index=True)
if MATCH:
rs = rs.sort_values(
'similarity', ascending=False).reset_index(drop=True)
if full:
return list(rs['match'][:n].values), list(
rs['similarity'][:n].values)
else:
return list(rs['match'][:n].values)
else:
if full:
return ([], 0)
else:
return []
def check_hash(df, hashseries, in_hash, min_match=10):
''' hashseries obtained from dataframe df, e.g
hashseris=df.some_column.str.replace(r'\W+','').str.lower().map(unicode)
within which in_hash will be searched for match at least min_match characters
'''
comparision = True
for si in reversed(range(0, len(in_hash)+1)):
chk = df[hashseries.str.match(in_hash[:si])]
if chk.shape[0] > 0:
return comparision, chk
# break
if si < min_match:
comparision = False
return comparision, pd.DataFrame()
def columns_add_prefix(df, prefix):
return df.rename(dict((key, prefix+'_'+key) for key in df.columns.values), axis=1)
def fill_NaN(df):
'''Fill NaN entries with proper empty values
Type : dtype: Fill with
string: "0" : ''
float : "float64"
'''
for key in df.columns:
if df[key].dtype == 'O':
df[key] = df[key].fillna('')
elif df[key].dtype == 'float64':
df[key] = df[key].fillna(0.0)
return df
def read_excel_fill_NaN(*args, **kwargs):
'''Fill NaN entries with proper empty values
Type : dtype: Fill with
string: "0" : ''
float : "float64"
'''
df = pd.read_excel(*args, **kwargs)
df = fill_NaN(df)
return df
# To add to main publications object:
def add_sjr_info_from_issn(
self,
SJR,
column_issn='SN',
SJR_column_journal='SJR_Title',
SJR_column_issn='SJR_Issn'):
'''self is an publication object and SJR is the info for a journal in column SJR_Issn'''
if SJR_column_journal not in self.articles.columns:
sys.exit("Run first the the more exact and fast add_sjr_info")
self.articles = fill_NaN(self.articles)
kk = self.articles[self.articles[SJR_column_journal] == '']
for issn in kk[column_issn].str.replace('-', '').unique():
mtch = SJR[SJR[SJR_column_issn].str.contains(
issn)].reset_index(drop=True)
if mtch.shape[0] == 1:
moa = kk[kk[column_issn].str.replace('-', '') == issn]
if moa.shape[0] >= 1:
# DEBUG: more filters if
for key in SJR.columns.values:
self.articles.loc[moa.index.values, key] = mtch.ix[0][key]
return self
def add_sjr_info_from_journal(
self,
SJR,
column_journal='SO',
SJR_column_journal='SJR_Title'):
'''self is an publication object and SJR is the info for a journal in column SJR_Issn'''
if SJR_column_journal not in self.articles.columns:
sys.exit("Run first the more exact and fast add_sjr_info")
self.articles = fill_NaN(self.articles)
kk = self.articles[self.articles[SJR_column_journal] == '']
# kk_hash_SO=kk[column_journal].str.replace('\W+','').str.lower().str.strip().map(unidecode)
SJR_hash_Title = SJR[SJR_column_journal].str.replace(
'\W+', '').str.lower().str.strip().map(unidecode)
for title in kk[column_journal].str.lower().str.strip().unique():
hash_match, mtch = check_hash(
SJR, SJR_hash_Title, re.sub('\W+', '', title).lower().strip())
if hash_match:
mtch = mtch.reset_index(drop=True)
if mtch.shape[0] > 1:
newtitle = re.sub(r'\W+', ' ', title)
mtch = SJR[SJR[SJR_column_journal].str.lower(
).str.strip().str.match('%s ' % newtitle)]
if mtch.shape[0]:
mtch = mtch.reset_index(drop=True)
if mtch.shape[0] == 1:
moa = kk[kk[column_journal].str.lower().str.strip() == title]
if moa.shape[0] >= 1:
for key in SJR.columns.values:
self.articles.loc[moa.index.values,
key] = mtch.ix[0][key]
return self
def add_sjr_info(self, SJR, column_journal='SO',
SJR_column_journal='SJR_Title'):
'''self is an publication object and SJR is the info for a journal in column SJR_Title'''
self.articles = self.articles.reset_index(drop=True)
for joa in np.intersect1d(
self.articles[column_journal].str.lower().str.strip().unique(),
SJR[SJR_column_journal].str.lower().str.strip().unique()):
moa = self.articles[self.articles[column_journal].str.lower() == joa]
if moa.shape[0]:
mtch = SJR[SJR[SJR_column_journal].str.lower(
).str.strip() == joa].reset_index(drop=True)
if mtch.shape[0] == 1:
# DEBUG: filter by ISSN if >1:
for key in SJR.columns.values:
self.articles.loc[moa.index.values, key] = mtch.ix[0][key]
return self
def merge_with_close_matches(
left,
right,
left_on='ST',
right_on='UDEA_simple_título',
left_extra_on='SO',
right_extra_on='UDEA_nombre revista o premio',
how='inner',
n=1,
cutoff=0.6,
full=True,
cutoff_extra=0.6):
'''For each entry of the column: left_on of DataFrame left (cannot have empty fields),
try to find the close match inside each row of right DataFrame, by comparing with
the right_on entry of the row. When a row match is found, the full right row is appended
to the matched row in the left DataFrame.
If the similarity between the entries at left_on and right_on is less than 0.8,
an additional check is performed between the entries left_extra_on and right_extra_on
of the matched row.
how implemented: inner and left (Default: inner)
'''
import numpy as np
from unidecode import unidecode
import pandas as pd
# import sys #import globally
# print(left[left_on][0])
# sys.exit()
words = left[left_on].str.lower().map(unidecode)
possibilities = right[right_on].str.lower().map(unidecode)
joined = pd.DataFrame()
mi = np.array([])
for i in left.index:
if i % 100 == 0:
print('.', end="")
joined_series = left.loc[i]
#joined_series=joined_series.append(pd.Series( {similarity_column:0} ))
title, similarity = get_close_matches_Levenshtein(
words[i], possibilities, n=n, cutoff=cutoff, full=full)
# print(i,words[i],title,similarity) #cutuff 0.6 0.7 0.8 0.85 0.91 0.95
# sys.exit()
if title:
mtch = right[possibilities == title[0]]
# >=cutoff, e.g 0.65 0.95 0.81 0.86 0.9 0.96
chk_cutoff = similarity[0]
crosscheck = cutoff + 0.2 # 0.8 # e.g. 0.8 0.9 0.9 0.9 0.9 0.9
if crosscheck >= 1:
# force check if match worst than this (by experience)
crosscheck = 0.95
if chk_cutoff < crosscheck: # e.g 0.65<0.8 0.95~<0.9 0.81~<0.0 0.86<0.9 0.91<~0.9 0.96~<0.9
if get_close_matches_Levenshtein(unidecode(left[left_extra_on][i].lower()), [unidecode(
mtch[right_extra_on][mtch.index[0]].lower())], cutoff=cutoff_extra): # cutoff=0.6
chk_cutoff = crosscheck + 0.1
if chk_cutoff >= crosscheck:
joined_series = joined_series.append(mtch.loc[mtch.index[0]])
if how == 'outer':
mi = np.concatenate((mi, mtch.index.values))
# joined_series[similarity_column]=similarity[0]
if how == 'inner':
joined = joined.append(joined_series, ignore_index=True)
if how == 'left' or 'outer':
joined = joined.append(joined_series, ignore_index=True)
if how == 'outer':
joined = joined.append(right.drop(
right.index[list(mi.astype(int))]).reset_index(drop=True))
return joined
def merge_udea_points(
original_df,
target_df,
check_columns=None,
check_against_colums=None,
drop_not_UDEA_columns=True,
old_extra_column='UDEA_nombre revista o premio',
new_extra_column='SO',
DEBUG=False):
'''
# STEPS: 0:Simplified, 1:full title including translations, 2:reverse translation in UDEA
drop_not_UDEA_columns=True: Remove other columns, if False remove UDEA_ ones
'''
if check_columns is None:
check_columns = ['UDEA_simple_title', 'UDEA_título', 'UDEA_título']
if check_against_colums is None:
check_against_colums = ['TI', 'SCP_Title', 'TI']
# Specific of STEP
STEP = 0
old = original_df # 100
old_column = check_columns[STEP]
new = target_df[target_df[check_against_colums[STEP]] != '']
new_column = check_against_colums[STEP]
st = time.time()
joined = merge_with_close_matches(
old,
new,
old_column,
new_column,
old_extra_column,
new_extra_column,
n=1,
cutoff=0.6,
full=True)
print(time.time() - st)
joined = fill_NaN(joined)
if DEBUG:
print('check final shape after STEP %d: %d' % (STEP, joined.shape[0]))
udea_found = joined[joined[new_column] != ''].reset_index(drop=True) # 42
original_df = joined[joined[new_column] == ''].reset_index(drop=True) # 58
if DEBUG:
print(STEP, '->', udea_found.shape, original_df.shape,
udea_found.shape[0] + original_df.shape[0])
original_df_not_scp_title = original_df[original_df.SCP_Title == ''].reset_index(
drop=True) # 33
original_df = original_df[original_df.SCP_Title !=
''].reset_index(drop=True) # 25
print(STEP, ':', udea_found.shape[0],
original_df_not_scp_title.shape[0], original_df.shape[0])
if DEBUG:
print(STEP, '->', original_df_not_scp_title.shape, original_df.shape,
original_df_not_scp_title.shape[0] + original_df.shape[0])
for STEP in [1, 2]:
for k in original_df.columns:
if drop_not_UDEA_columns:
if k.find('UDEA') == -1:
original_df = original_df.drop(k, axis=1)
else:
if k.find('UDEA') > -1:
original_df = original_df.drop(k, axis=1)
old = original_df # 1:25; #2: 58
old_column = check_columns[STEP]
new = target_df[target_df[check_against_colums[STEP]] != '']
new_column = check_against_colums[STEP]
st = time.time()
joined = merge_with_close_matches(
old,
new,
old_column,
new_column,
old_extra_column,
new_extra_column,
n=1,
cutoff=0.6,
full=True)
print(time.time() - st)
joined = fill_NaN(joined)
if STEP == 1:
original_df = original_df_not_scp_title # 1: 33
if new_column in joined: # 1: False; #2: True
udea_found = udea_found.append(
joined[joined[new_column] != ''], ignore_index=True) # 2:7+42=49
if STEP == 1:
original_df = original_df.append(
joined[joined[new_column] == ''], ignore_index=True)
else:
original_df = joined[joined[new_column] == ''] # 2:51
if DEBUG:
print(STEP,
':::>',
joined[joined[new_column] != ''].shape[0],
joined[joined[new_column] == ''].shape[0])
else: # 1: True; 2: False
# udea found is the same because not new mathc was found
if STEP == 1:
original_df = original_df.append(
joined, ignore_index=True) # 1: 33+25=58
print(STEP, ':', udea_found.shape[0], original_df.shape[0])
target_df_UDEA = udea_found # 2: 49
target_df_UDEA = target_df_UDEA.append(
original_df, ignore_index=True) # 49+51
target_df_UDEA = fill_NaN(target_df_UDEA)
print(udea_found.shape[0], original_df.shape[0], target_df_UDEA.shape[0])
return target_df_UDEA
def merge_udea_points_new(
original_df,
target_df,
check_columns=None,
check_against_colums=None,
drop_not_UDEA_columns=True,
old_extra_column='UDEA_nombre revista o premio',
new_extra_column='SO',
how='inner',
DEBUG=False):
'''
# STEPS: 0:Simplified, 1:full title including translations, 2:reverse translation in UDEA
drop_not_UDEA_columns=True: Remove other columns, if False remove UDEA_ ones
'''
if check_columns is None:
check_columns = ['UDEA_simple_title', 'UDEA_título', 'UDEA_título']
if check_against_colums is None:
check_against_colums = ['TI', 'SCP_Title', 'TI']
# Specific of STEP
STEP = 0
old = original_df # 100
old_column = check_columns[STEP]
new = target_df[target_df[check_against_colums[STEP]] != '']
new_column = check_against_colums[STEP]
st = time.time()
joined = merge_with_close_matches(
old,
new,
old_column,
new_column,
old_extra_column,
new_extra_column,
how=how,
n=1,
cutoff=0.6,
full=True)
print(time.time() - st)
joined = fill_NaN(joined)
if DEBUG:
print('check final shape after STEP %d: %d' % (STEP, joined.shape[0]))
udea_found = joined[joined[new_column] != ''].reset_index(drop=True) # 42
original_df = joined[joined[new_column] == ''].reset_index(drop=True) # 58
if DEBUG:
print(STEP, '->', udea_found.shape, original_df.shape,
udea_found.shape[0] + original_df.shape[0])
original_df_not_scp_title = original_df[original_df.SCP_Title == ''].reset_index(
drop=True) # 33
original_df = original_df[original_df.SCP_Title !=
''].reset_index(drop=True) # 25
print(STEP, ':', udea_found.shape[0],
original_df_not_scp_title.shape[0], original_df.shape[0])
if DEBUG:
print(STEP, '->', original_df_not_scp_title.shape, original_df.shape,
original_df_not_scp_title.shape[0] + original_df.shape[0])
for STEP in [1, 2]:
for k in original_df.columns:
if drop_not_UDEA_columns:
if k.find('UDEA') == -1:
original_df = original_df.drop(k, axis=1)
else:
if k.find('UDEA') > -1:
original_df = original_df.drop(k, axis=1)
old = original_df # 1:25; #2: 58
old_column = check_columns[STEP]
new = target_df[target_df[check_against_colums[STEP]] != '']
new_column = check_against_colums[STEP]
st = time.time()
joined = merge_with_close_matches(
old,
new,
old_column,
new_column,
old_extra_column,
new_extra_column,
how=how,
n=1,
cutoff=0.6,
full=True)
print(time.time() - st)
joined = fill_NaN(joined)
if STEP == 1:
original_df = original_df_not_scp_title # 1: 33
if new_column in joined: # 1: False; #2: True
udea_found = udea_found.append(
joined[joined[new_column] != ''], ignore_index=True) # 2:7+42=49
if STEP == 1:
original_df = original_df.append(
joined[joined[new_column] == ''], ignore_index=True)
else:
original_df = joined[joined[new_column] == ''] # 2:51
if DEBUG:
print(STEP,
':::>',
joined[joined[new_column] != ''].shape[0],
joined[joined[new_column] == ''].shape[0])
else: # 1: True; 2: False
# udea found is the same because not new mathc was found
if STEP == 1:
original_df = original_df.append(
joined, ignore_index=True) # 1: 33+25=58
print(STEP, ':', udea_found.shape[0], original_df.shape[0])
target_df_UDEA = udea_found # 2: 49
target_df_UDEA = target_df_UDEA.append(
original_df, ignore_index=True) # 49+51
target_df_UDEA = fill_NaN(target_df_UDEA)
print(udea_found.shape[0], original_df.shape[0], target_df_UDEA.shape[0])
return target_df_UDEA
def get_doi(
surname=None, # 'florez',
title=r'Baryonic violation of R-parity from anomalous $U(1)_H$',
other='',
DOI=None,
check_text=None,
check_mesagges_key=None,
similarity=0.6,
JSON=False):
'''
Search for a DOI and check against the full DOI info. If JSON is set True, the full
info is returned as a python dictionary
Implementations:
1) Search and check for a DOI by surname and title or if only DOI is given, just check the DOI.
The checking is doing by comparing check_text with the check_mesagges_key from the full info.
By default the given 'title' is used for the check.
For other possible check_mesagges_key's, see in a browser the several keys of the 'mesagges'
dictionary at:
https://api.crossref.org/v1/works/DOI,
for example:
https://api.crossref.org/v1/works/10.1103/physrevd.87.095010
2) if only DOI is given, just get full info about the DOI without any checks.
(only the key 'messages' is checed to exists)
Examples:
2) get_doi(surname='Florez',title='Baryonic violation of R-parity from anomalous U(1)H',JSON=True)
3) get_doi(DOI)
'''
import requests
# import time imported globally
import numpy as np
if not check_text:
check_text = title.lower()
if not check_mesagges_key:
check_mesagges_key = 'title' # 'container-title'
if not surname and DOI:
if not check_text and not check_mesagges_key:
similarity = 0.1
check_text = 'http://dx.doi.org/'
check_mesagges_key = 'DOI'
JSON = True
doi = ''
if JSON:
doi = {}
if not DOI:
search = ''
if surname:
search = surname
if title:
if len(search) > 0:
search = search + ', ' + title
else:
search = search + title
if other:
if len(search) > 0:
search = search + ', ' + other
r = requests.get('http://search.crossref.org/?q=%s' % search)
urldoi = 'http://dx.doi.org/'
DOI = ''
try:
DOI = r.text.split(urldoi)[1].split("\',")[0].split('>\n')[0]
except IndexError:
DOI = ''
if DOI:
json = 'https://api.crossref.org/v1/works/'
rr = requests.get(json + DOI)
if rr.status_code == 200:
if 'message' in rr.json():
if check_mesagges_key in rr.json()['message']:
if len(rr.json()["message"][check_mesagges_key]):
chk = get_close_matches_Levenshtein(
check_text,
rr.json()[
"message"][check_mesagges_key][0].lower(),
n=1,
cutoff=similarity)
if chk:
if "DOI" in rr.json()["message"]:
doi = rr.json()["message"]["DOI"]
if JSON: # Overwrite upon previous doi
doi = rr.json()["message"]
time.sleep(np.random.randint(1, 3))
return doi
| [
"pandas.DataFrame",
"numpy.concatenate",
"time.time",
"pandas.read_excel",
"numpy.random.randint",
"numpy.array",
"Levenshtein.ratio",
"requests.get",
"sys.exit"
] | [((537, 551), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (549, 551), True, 'import pandas as pd\n'), ((2522, 2552), 'pandas.read_excel', 'pd.read_excel', (['*args'], {}), '(*args, **kwargs)\n', (2535, 2552), True, 'import pandas as pd\n'), ((7225, 7239), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7237, 7239), True, 'import pandas as pd\n'), ((7249, 7261), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7257, 7261), True, 'import numpy as np\n'), ((9970, 9981), 'time.time', 'time.time', ([], {}), '()\n', (9979, 9981), False, 'import time\n'), ((14237, 14248), 'time.time', 'time.time', ([], {}), '()\n', (14246, 14248), False, 'import time\n'), ((619, 645), 'Levenshtein.ratio', 'Levenshtein.ratio', (['word', 'p'], {}), '(word, p)\n', (636, 645), False, 'import Levenshtein\n'), ((2945, 3007), 'sys.exit', 'sys.exit', (['"""Run first the the more exact and fast add_sjr_info"""'], {}), "('Run first the the more exact and fast add_sjr_info')\n", (2953, 3007), False, 'import sys\n'), ((3879, 3937), 'sys.exit', 'sys.exit', (['"""Run first the more exact and fast add_sjr_info"""'], {}), "('Run first the more exact and fast add_sjr_info')\n", (3887, 3937), False, 'import sys\n'), ((11683, 11694), 'time.time', 'time.time', ([], {}), '()\n', (11692, 11694), False, 'import time\n'), ((15967, 15978), 'time.time', 'time.time', ([], {}), '()\n', (15976, 15978), False, 'import time\n'), ((19694, 19751), 'requests.get', 'requests.get', (["('http://search.crossref.org/?q=%s' % search)"], {}), "('http://search.crossref.org/?q=%s' % search)\n", (19706, 19751), False, 'import requests\n'), ((20020, 20044), 'requests.get', 'requests.get', (['(json + DOI)'], {}), '(json + DOI)\n', (20032, 20044), False, 'import requests\n'), ((20825, 20848), 'numpy.random.randint', 'np.random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (20842, 20848), True, 'import numpy as np\n'), ((10202, 10213), 'time.time', 'time.time', ([], {}), '()\n', (10211, 10213), False, 'import time\n'), ((14486, 14497), 'time.time', 'time.time', ([], {}), '()\n', (14495, 14497), False, 'import time\n'), ((1845, 1859), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1857, 1859), True, 'import pandas as pd\n'), ((11959, 11970), 'time.time', 'time.time', ([], {}), '()\n', (11968, 11970), False, 'import time\n'), ((16264, 16275), 'time.time', 'time.time', ([], {}), '()\n', (16273, 16275), False, 'import time\n'), ((8611, 8650), 'numpy.concatenate', 'np.concatenate', (['(mi, mtch.index.values)'], {}), '((mi, mtch.index.values))\n', (8625, 8650), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*--
# Copyright (c) 2021, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
from numbers import Integral, Number
from time import time
import numpy as np
from optuna import TrialPruned # NOQA
import sklearn
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.model_selection import BaseCrossValidator # NOQA
from sklearn.model_selection import check_cv, cross_validate
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.utils import check_random_state
from sklearn.utils.metaestimators import _safe_split
try:
from sklearn.utils import _safe_indexing as sklearn_safe_indexing
except:
from sklearn.utils import safe_indexing as sklearn_safe_indexing
try:
from sklearn.metrics import check_scoring
except:
from sklearn.metrics.scorer import check_scoring
class _Objective(object):
"""
Callable that implements objective function.
Parameters
----------
model:
Object to use to fit the data. This is assumed to implement the
scikit-learn estimator interface. Either this needs to provide
``score``, or ``scoring`` must be passed.
X:
Training data.
y:
Target variable.
cv:
Cross-validation strategy.
enable_pruning:
If :obj:`True`, pruning is performed in the case where the
underlying estimator supports ``partial_fit``.
error_score:
Value to assign to the score if an error occurs in fitting. If
'raise', the error is raised. If numeric,
``sklearn.exceptions.FitFailedWarning`` is raised. This does not
affect the refit step, which will always raise the error.
fit_params:
Parameters passed to ``fit`` one the estimator.
groups:
Group labels for the samples used while splitting the dataset into
train/validation set.
max_iter,
max number of iteration for ``partial_fit``
return_train_score:
If :obj:`True`, training scores will be included. Computing
training scores is used to get insights on how different
hyperparameter settings impact the overfitting/underfitting
trade-off. However computing training scores can be
computationally expensive and is not strictly required to select
the hyperparameters that yield the best generalization
performance. Hence, we default it to be False and do not expose
this parameter to the users.
scoring:
Scorer function.
scoring_name:
name of the Scorer function.
step_name:
step name of the estimator in a pipeline
Returns
-------
the objective score
"""
def __init__(
self,
model, # type: Union[BaseEstimator, Pipeline]
param_distributions, # type: Mapping[str, distributions.BaseDistribution]
cv, # type: BaseCrossValidator
enable_pruning, # type: bool
error_score, # type: Union[Number, str]
fit_params, # type: Dict[str, Any]
groups, # type: Optional[OneDimArrayLikeType]
max_iter, # type: int
return_train_score, # type: bool
scoring, # type: Callable[..., Number]
scoring_name, # type: str
step_name, # type: str
):
# type: (...) -> None
self.model = model
self.param_distributions = param_distributions
self.cv = cv
self.enable_pruning = enable_pruning
self.error_score = error_score
self.fit_params = fit_params
self.groups = groups
self.max_iter = max_iter
self.return_train_score = return_train_score
self.scoring = scoring
self.scoring_name = scoring_name
self.step_name = step_name
def __call__(self, X, y, trial):
# type: (trial_module.Trial) -> float
estimator = clone(self.model)
params = self._get_params(trial, self.param_distributions)
params = self._extract_max_iter(params)
estimator.set_params(**params)
if self.enable_pruning:
scores = self._cross_validate_with_pruning(X, y, trial, estimator)
else:
scores = cross_validate(
estimator,
X,
y,
cv=self.cv,
error_score=self.error_score,
fit_params=self.fit_params,
groups=self.groups,
return_train_score=self.return_train_score,
scoring=self.scoring,
)
self._store_scores(trial, scores, self.scoring_name)
return trial.user_attrs["mean_test_score"]
def _extract_max_iter(self, params):
if self.enable_pruning:
max_iter_name = "max_iter"
if self.step_name:
max_iter_name = self.step_name + "__" + max_iter_name
if max_iter_name in params:
self.max_iter = params.pop(max_iter_name)
return params
def _cross_validate_with_pruning(
self,
X,
y,
trial, # type: trial_module.Trial
estimator, # type: BaseEstimator
):
# type: (...) -> Dict[str, OneDimArrayLikeType]
if is_classifier(estimator):
partial_fit_params = self.fit_params.copy()
classes = np.unique(y)
partial_fit_params.setdefault("classes", classes)
else:
partial_fit_params = self.fit_params.copy()
n_splits = self.cv.get_n_splits(X, y, groups=self.groups)
estimators = [clone(estimator) for _ in range(n_splits)]
scores = {
"fit_time": np.zeros(n_splits),
"score_time": np.zeros(n_splits),
"test_score": np.empty(n_splits),
}
if self.return_train_score:
scores["train_score"] = np.empty(n_splits)
for step in range(self.max_iter):
for i, (train, test) in enumerate(self.cv.split(X, y, groups=self.groups)):
out = self._partial_fit_and_score(
X, y, estimators[i], train, test, partial_fit_params
)
if self.return_train_score:
scores["train_score"][i] = out.pop(0)
scores["test_score"][i] = out[0]
scores["fit_time"][i] += out[1]
scores["score_time"][i] += out[2]
intermediate_value = np.nanmean(scores["test_score"])
trial.report(intermediate_value, step=step)
if trial.should_prune():
self._store_scores(trial, scores, self.scoring_name)
raise TrialPruned("trial was pruned at iteration {}.".format(step))
return scores
def _get_params(self, trial, param_distributions):
# type: (trial_module.Trial) -> Dict[str, Any]
return {
name: trial._suggest(name, distribution.get_distribution())
for name, distribution in param_distributions.items()
}
def _partial_fit_and_score(
self,
X,
y,
estimator, # type: BaseEstimator
train, # type: List[int]
test, # type: List[int]
partial_fit_params, # type: Dict[str, Any]
):
# type: (...) -> List[Number]
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train_indices=train)
start_time = time()
try:
estimator.partial_fit(X_train, y_train, **partial_fit_params)
except Exception as e:
if self.error_score == "raise":
raise e
elif isinstance(self.error_score, Number):
fit_time = time() - start_time
test_score = self.error_score
score_time = 0.0
if self.return_train_score:
train_score = self.error_score
else:
raise ValueError("error_score must be 'raise' or numeric.")
else:
fit_time = time() - start_time
# Required for type checking but is never expected to fail.
assert isinstance(fit_time, Number)
scoring_start_time = time()
test_score = self.scoring(estimator, X_test, y_test)
score_time = time() - scoring_start_time
# Required for type checking but is never expected to fail.
assert isinstance(score_time, Number)
if self.return_train_score:
train_score = self.scoring(estimator, X_train, y_train)
ret = [test_score, fit_time, score_time]
if self.return_train_score:
ret.insert(0, train_score)
return ret
def _store_scores(self, trial, scores, scoring_name):
# type: (trial_module.Trial, Dict[str, OneDimArrayLikeType]) -> None
trial.set_user_attr("metric", scoring_name)
for name, array in scores.items():
if name in ["test_score", "train_score"]:
for i, score in enumerate(array):
trial.set_user_attr("split{}_{}".format(i, name), score)
trial.set_user_attr("mean_{}".format(name), np.nanmean(array))
trial.set_user_attr("std_{}".format(name), np.nanstd(array))
| [
"sklearn.model_selection.cross_validate",
"numpy.empty",
"numpy.nanstd",
"numpy.zeros",
"time.time",
"sklearn.utils.metaestimators._safe_split",
"sklearn.base.is_classifier",
"sklearn.base.clone",
"numpy.unique",
"numpy.nanmean"
] | [((4141, 4158), 'sklearn.base.clone', 'clone', (['self.model'], {}), '(self.model)\n', (4146, 4158), False, 'from sklearn.base import BaseEstimator, clone, is_classifier\n'), ((5490, 5514), 'sklearn.base.is_classifier', 'is_classifier', (['estimator'], {}), '(estimator)\n', (5503, 5514), False, 'from sklearn.base import BaseEstimator, clone, is_classifier\n'), ((7575, 7610), 'sklearn.utils.metaestimators._safe_split', '_safe_split', (['estimator', 'X', 'y', 'train'], {}), '(estimator, X, y, train)\n', (7586, 7610), False, 'from sklearn.utils.metaestimators import _safe_split\n'), ((7636, 7691), 'sklearn.utils.metaestimators._safe_split', '_safe_split', (['estimator', 'X', 'y', 'test'], {'train_indices': 'train'}), '(estimator, X, y, test, train_indices=train)\n', (7647, 7691), False, 'from sklearn.utils.metaestimators import _safe_split\n'), ((7714, 7720), 'time.time', 'time', ([], {}), '()\n', (7718, 7720), False, 'from time import time\n'), ((4460, 4656), 'sklearn.model_selection.cross_validate', 'cross_validate', (['estimator', 'X', 'y'], {'cv': 'self.cv', 'error_score': 'self.error_score', 'fit_params': 'self.fit_params', 'groups': 'self.groups', 'return_train_score': 'self.return_train_score', 'scoring': 'self.scoring'}), '(estimator, X, y, cv=self.cv, error_score=self.error_score,\n fit_params=self.fit_params, groups=self.groups, return_train_score=self\n .return_train_score, scoring=self.scoring)\n', (4474, 4656), False, 'from sklearn.model_selection import check_cv, cross_validate\n'), ((5594, 5606), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (5603, 5606), True, 'import numpy as np\n'), ((5831, 5847), 'sklearn.base.clone', 'clone', (['estimator'], {}), '(estimator)\n', (5836, 5847), False, 'from sklearn.base import BaseEstimator, clone, is_classifier\n'), ((5917, 5935), 'numpy.zeros', 'np.zeros', (['n_splits'], {}), '(n_splits)\n', (5925, 5935), True, 'import numpy as np\n'), ((5963, 5981), 'numpy.zeros', 'np.zeros', (['n_splits'], {}), '(n_splits)\n', (5971, 5981), True, 'import numpy as np\n'), ((6009, 6027), 'numpy.empty', 'np.empty', (['n_splits'], {}), '(n_splits)\n', (6017, 6027), True, 'import numpy as np\n'), ((6112, 6130), 'numpy.empty', 'np.empty', (['n_splits'], {}), '(n_splits)\n', (6120, 6130), True, 'import numpy as np\n'), ((6690, 6722), 'numpy.nanmean', 'np.nanmean', (["scores['test_score']"], {}), "(scores['test_score'])\n", (6700, 6722), True, 'import numpy as np\n'), ((8493, 8499), 'time.time', 'time', ([], {}), '()\n', (8497, 8499), False, 'from time import time\n'), ((8320, 8326), 'time.time', 'time', ([], {}), '()\n', (8324, 8326), False, 'from time import time\n'), ((8590, 8596), 'time.time', 'time', ([], {}), '()\n', (8594, 8596), False, 'from time import time\n'), ((9469, 9486), 'numpy.nanmean', 'np.nanmean', (['array'], {}), '(array)\n', (9479, 9486), True, 'import numpy as np\n'), ((9543, 9559), 'numpy.nanstd', 'np.nanstd', (['array'], {}), '(array)\n', (9552, 9559), True, 'import numpy as np\n'), ((7992, 7998), 'time.time', 'time', ([], {}), '()\n', (7996, 7998), False, 'from time import time\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 13:04:43 2021
Three-state two-mode linear vibronic model of pyrazine (S0/S1/S2)
@author: bing
"""
import numpy as np
import numba
from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron
import sys
# sys.path.append(r'C:\Users\Bing\Google Drive\lime')
#sys.path.append(r'/Users/bing/Google Drive/lime')
from lime.phys import boson
from lime.style import set_style
from lime.units import au2ev, wavenumber2hartree, wavenum2au
def pos(n_vib):
"""
position matrix elements <n|Q|n'>
"""
X = np.zeros((n_vib, n_vib))
for i in range(1, n_vib):
X[i, i-1] = np.sqrt(i/2.)
for i in range(n_vib-1):
X[i, i+1] = np.sqrt((i+1)/2.)
return X
def polariton_hamiltonian(n_el, n_vc, n_vt, cav, g):
"""
contruct vibronic-cavity basis for polaritonic states, that is a
direct product of electron-vibration-photon space
"""
#n_basis = n_el * n_cav * n_vc * n_vt
freq_vc = 952. * wavenumber2hartree
freq_vt = 597. * wavenumber2hartree
Eshift = np.array([0.0, 31800.0, 39000]) * wavenumber2hartree
kappa = np.array([0.0, -847.0, 1202.]) * wavenumber2hartree
coup = 2110.0 * wavenumber2hartree # inter-state coupling lambda
# indentity matrices in each subspace
I_el = identity(n_el)
I_cav = cav.idm
I_vc = identity(n_vc)
I_vt = identity(n_vt)
h_cav = cav.ham()
h_vt = boson(freq_vt, n_vt, ZPE=False)
h_vc = boson(freq_vc, n_vc, ZPE=False)
h_el = np.diagflat(Eshift)
# the bare term in the system Hamiltonian
h0 = kron(h_el, kron(I_cav, kron(I_vc, I_vt))) + \
kron(I_el, kron(h_cav, kron(I_vc, I_vt))) \
+ kron(I_el, kron(I_cav, kron(h_vc, I_vt))) +\
kron(I_el, kron(I_cav, kron(I_vc, h_vt))) \
X = pos(n_vt)
h1 = kron(np.diagflat(kappa), kron(I_cav, kron(I_vc, X)))
Xc = pos(n_vc)
trans_el = np.zeros((n_el, n_el)) # electronic excitation operator
#deex = np.zeros((n_el, n_el)) # deexcitation
#deex[2, 1] = 1.0
#ex[1, 2] = 1.0
trans_el[1,2] = trans_el[2,1] = 1.0 #= ex + deex
#h_fake = kron(np.diagflat(kappa), kron(I_cav, kron(Xc, I_vt)))
###
# h_m = np.zeros((n_basis, n_basis))
#
# for m, b0 in enumerate(basis_set):
# for n, b1 in enumerate(basis_set):
## h_m[m,n] = h_el[b0.n_el, b1.n_el] * h_cav[b0.n_cav, b1.n_cav] * h_vc[b0.n_vc, b1.n_vc]
# h_m[m,n] = trans_el[b0.n_el, b1.n_el] * I_cav[b0.n_cav, b1.n_cav] \
# * Xc[b0.n_vc, b1.n_vc] * I_vt[b0.n_vt, b1.n_vt]
h2 = coup * kron(trans_el, kron(I_cav, kron(Xc, I_vt)), format='csr')
# if n_cav = n _el
deex_cav = cav.get_annihilate()
ex_cav = cav.get_create()
d_ex = np.zeros((n_el, n_el)) # electronic excitation operator
d_deex = np.zeros((n_el, n_el)) # deexcitation
d_deex[0, 2] = 1.0
d_ex[2, 0] = 1.0
dip = d_deex + d_ex
h3 = g * kron(dip, kron(deex_cav + ex_cav, kron(I_vc, I_vt)))
h_s = h0 + h1 + h2 + h3
# polaritonic states can be obtained by diagonalizing H_S
# v is the basis transformation matrix, v[i,j] = <old basis i| polaritonic state j>
#eigvals, v = np.linalg.eigh(h_s)
#h_s = csr_matrix(h_s)
# collapse operators in dissipative dynamics
Sc = kron(I_el, kron(I_cav, kron(Xc, I_vt)), format='csr')
St = kron(I_el, kron(I_cav, kron(I_vc, X)), format='csr')
# St = csr_matrix(St)
# Sc = csr_matrix(Sc)
return h_s, Sc, St
def vibronic_hamiltonian(n_el, n_vc, n_vt):
"""
contruct vibronic-cavity basis for polaritonic states, that is a
direct product of electron-vibration-photon space
"""
#n_basis = n_el * n_cav * n_vc * n_vt
freq_vc = 952. * wavenumber2hartree
freq_vt = 597. * wavenumber2hartree
Eshift = np.array([0.0, 31800.0, 39000]) * wavenumber2hartree
kappa = np.array([0.0, -847.0, 1202.]) * wavenumber2hartree
coup = 2110.0 * wavenumber2hartree # inter-state coupling lambda
# indentity matrices in each subspace
I_el = identity(n_el)
I_vc = identity(n_vc)
I_vt = identity(n_vt)
h_vt = boson(freq_vt, n_vt, ZPE=False)
h_vc = boson(freq_vc, n_vc, ZPE=False)
h_el = np.diagflat(Eshift)
# the bare term in the system Hamiltonian
h0 = kron(h_el, kron(I_vc, I_vt)) + kron(I_el, kron(h_vc, I_vt)) +\
kron(I_el, kron(I_vc, h_vt))
X = pos(n_vt)
h1 = kron(np.diagflat(kappa), kron(I_vc, X))
Xc = pos(n_vc)
trans_el = np.zeros((n_el, n_el)) # electronic excitation operator
#deex = np.zeros((n_el, n_el)) # deexcitation
#deex[2, 1] = 1.0
#ex[1, 2] = 1.0
trans_el[1,2] = trans_el[2,1] = 1.0 #= ex + deex
#h_fake = kron(np.diagflat(kappa), kron(I_cav, kron(Xc, I_vt)))
###
# h_m = np.zeros((n_basis, n_basis))
#
# for m, b0 in enumerate(basis_set):
# for n, b1 in enumerate(basis_set):
## h_m[m,n] = h_el[b0.n_el, b1.n_el] * h_cav[b0.n_cav, b1.n_cav] * h_vc[b0.n_vc, b1.n_vc]
# h_m[m,n] = trans_el[b0.n_el, b1.n_el] * I_cav[b0.n_cav, b1.n_cav] \
# * Xc[b0.n_vc, b1.n_vc] * I_vt[b0.n_vt, b1.n_vt]
h2 = coup * kron(trans_el, kron(Xc, I_vt), format='csr')
h_s = h0 + h1 + h2
# polaritonic states can be obtained by diagonalizing H_S
# v is the basis transformation matrix, v[i,j] = <old basis i| polaritonic state j>
#eigvals, v = np.linalg.eigh(h_s)
# collapse operators in dissipative dynamics
# Sc = kron(I_el, kron(Xc, I_vt), format='csr')
# St = kron(I_el, kron(I_vc, X), format='csr')
return h_s
def DPES(x, y, nstates=3):
"""
Diabatic PES
Parameters
----------
x : TYPE
qc coupling mode coordinate
y : TYPE
qt tuning mode coordinate
Returns
-------
2D array
molecular Hamiltonian
"""
freq_vc = 952. * wavenumber2hartree
freq_vt = 597. * wavenumber2hartree
Eshift = np.array([31800.0, 39000]) * wavenumber2hartree
kappa = np.array([-847.0, 1202.]) * wavenumber2hartree
V0 = freq_vc * x**2/2. + freq_vt * y**2/2 + kappa[0] * y + Eshift[0]
V1 = freq_vc * x**2/2 + freq_vt * y**2/2 + kappa[1] * y + Eshift[1]
coup = 2110 * x * wavenumber2hartree
Vg = freq_vc * x**2/2. + freq_vt * y**2/2
hmol = np.zeros((nstates, nstates))
hmol[0, 0] = Vg
hmol[1, 1] = V0
hmol[2, 2] = V1
hmol[1,2] = hmol[2,1] = coup
return hmol
def get_apes(x, y):
"""
diabatic PES
input:
R: 1d array with length n_dof
output:
V: same size as R, potential energy
"""
freq_vc = 952. * wavenum2au
freq_vt = 597. * wavenum2au
Eshift = np.array([31800.0, 39000]) * wavenum2au
kappa = np.array([-847.0, 1202.]) * wavenum2au
V0 = freq_vc * x**2/2. + freq_vt * y**2/2 + kappa[0] * y + Eshift[0]
V1 = freq_vc * x**2/2 + freq_vt * y**2/2 + kappa[1] * y + Eshift[1]
coup = 2110 * x * wavenum2au
Vg = freq_vc * x**2/2. + freq_vt * y**2/2.
#A0 = np.zeros(len(x))
#A1 = np.zeros(len(x))
#for i in range(len(x)):
V = np.array([[V0, coup], [coup, V1]])
eigs = np.linalg.eigvalsh(V)
return Vg, eigs
def cut():
x = 0
y = np.linspace(-8,6,100)
dpes = DPES(x, y)
fig, ax = plt.subplots(figsize=(4,4))
set_style(13)
for surface in dpes:
ax.plot(y, surface * au2ev, lw=2)
#ax.plot(y, (dpes[1] - dpes[0]) * au2ev, label='0-1')
#ax.plot(y, (dpes[2]- dpes[0]) * au2ev, label='0-2')
#ax.legend()
#ax.set_ylim(4.31, 4.32)
#ax.grid()
ax.set_ylabel('Energy (eV)')
ax.set_xlabel('Tuning mode')
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.savefig('dpes.pdf', dpi=1200, transparent=True)
plt.show()
def plot3d():
#data = [go.Surface(z=apes)]
#fig = go.Figure(data = data)
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5,4))
set_style(fontsize=14)
ax = fig.add_subplot(111, projection='3d')
#py.iplot(fig)
x = np.linspace(-4, 4)
y = np.linspace(-4, 4)
apes = np.zeros((len(x), len(y)))
apes1 = np.zeros((len(x), len(y)))
apes2 = np.zeros((len(x), len(y)))
for i in range(len(x)):
for j in range(len(y)):
apes[i,j], [apes1[i,j], apes2[i,j]] = get_apes(x[i], y[j])
X, Y = np.meshgrid(x, y)
for surface in [apes, apes1, apes2]:
ax.plot_surface(X, Y, surface * au2ev, rstride=1, cstride=1, cmap='viridis',\
edgecolor='k',
linewidth=0.1)
#surf(ground)
# ax.plot_surface(X, Y, apes1 * au2ev, rstride=6, cstride=6, cmap='viridis', edgecolor='k'\
# , linewidth=0.5)
#
# ax.plot_surface(X, Y, apes2 * au2ev, rstride=6, cstride=6, cmap='viridis', edgecolor='k'\
# , linewidth=0.5)
ax.view_init(10, -60)
ax.set_zlim(0, 7)
ax.set_xlabel(r'Couping mode')
ax.set_ylabel(r'Tuning mode')
ax.zaxis.set_rotate_label(False) # disable automatic rotation
ax.set_zlabel('Energy (eV)', rotation=90)
#fig.subplots_adjust(top=0.95, bottom=0.16,left=0.16, right=0.9)
plt.savefig('apes_3d.pdf')
plt.show()
def contour():
#data = [go.Surface(z=apes)]
#fig = go.Figure(data = data)
x = np.linspace(-6, 6, 200)
y = np.linspace(-4, 4, 200)
apes = np.zeros((len(x), len(y)))
apes1 = np.zeros((len(x), len(y)))
apes2 = np.zeros((len(x), len(y)))
for i in range(len(x)):
for j in range(len(y)):
apes[i,j], [apes1[i,j], apes2[i,j]] = get_apes(x[i], y[j])
X, Y = np.meshgrid(x, y)
for j, surface in enumerate([apes, apes1, apes2]):
# fig, ax = plt.subplots()
fig, ax = matplot(x, y, surface.T * au2ev, cmap='inferno')
#ax.contour(apes1)
#surf(ground)
# ax.plot_surface(X, Y, apes1 * au2ev, rstride=6, cstride=6, cmap='viridis', edgecolor='k'\
# , linewidth=0.5)
#
# ax.plot_surface(X, Y, apes2 * au2ev, rstride=6, cstride=6, cmap='viridis', edgecolor='k'\
# , linewidth=0.5)
ax.set_xlabel(r'Tuning mode $Q_\mathrm{t}$')
ax.set_ylabel(r'Coupling mode $Q_\mathrm{c}$')
#ax.zaxis.set_rotate_label(False) # disable automatic rotation
#ax.set_zlabel('Energy (eV)', rotation=90)
fig.subplots_adjust(top=0.95, bottom=0.16,left=0.16, right=0.95)
plt.savefig('apes{}_contour.pdf'.format(j))
return
def mayavi(surfaces):
from mayavi import mlab
apes, apes1 = surfaces
fig = mlab.figure()
surf2 = mlab.surf(apes * au2ev, warp_scale=20)
surf3 = mlab.surf(apes1 * au2ev, warp_scale=20)
#mlab.surf(ground * au2ev, warp_scale=20)
mlab.axes(xlabel = 'Coupling mode', ylabel = 'Tuning mode')
#mlab.view(60, 74, 17, [-2.5, -4.6, -0.3])
mlab.show()
if __name__ == '__main__':
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import rcParams
rcParams['axes.labelpad'] = 6
rcParams['xtick.major.pad']='2'
rcParams['ytick.major.pad']='2'
# mayavi()
# contour()
#cut()
plot3d()
| [
"mayavi.mlab.figure",
"scipy.sparse.kron",
"numpy.diagflat",
"matplotlib.pyplot.figure",
"numpy.meshgrid",
"lime.style.set_style",
"mayavi.mlab.axes",
"mayavi.mlab.surf",
"lime.phys.boson",
"scipy.sparse.identity",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"... | [((603, 627), 'numpy.zeros', 'np.zeros', (['(n_vib, n_vib)'], {}), '((n_vib, n_vib))\n', (611, 627), True, 'import numpy as np\n'), ((1347, 1361), 'scipy.sparse.identity', 'identity', (['n_el'], {}), '(n_el)\n', (1355, 1361), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((1394, 1408), 'scipy.sparse.identity', 'identity', (['n_vc'], {}), '(n_vc)\n', (1402, 1408), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((1420, 1434), 'scipy.sparse.identity', 'identity', (['n_vt'], {}), '(n_vt)\n', (1428, 1434), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((1469, 1500), 'lime.phys.boson', 'boson', (['freq_vt', 'n_vt'], {'ZPE': '(False)'}), '(freq_vt, n_vt, ZPE=False)\n', (1474, 1500), False, 'from lime.phys import boson\n'), ((1512, 1543), 'lime.phys.boson', 'boson', (['freq_vc', 'n_vc'], {'ZPE': '(False)'}), '(freq_vc, n_vc, ZPE=False)\n', (1517, 1543), False, 'from lime.phys import boson\n'), ((1556, 1575), 'numpy.diagflat', 'np.diagflat', (['Eshift'], {}), '(Eshift)\n', (1567, 1575), True, 'import numpy as np\n'), ((1957, 1979), 'numpy.zeros', 'np.zeros', (['(n_el, n_el)'], {}), '((n_el, n_el))\n', (1965, 1979), True, 'import numpy as np\n'), ((2790, 2812), 'numpy.zeros', 'np.zeros', (['(n_el, n_el)'], {}), '((n_el, n_el))\n', (2798, 2812), True, 'import numpy as np\n'), ((2859, 2881), 'numpy.zeros', 'np.zeros', (['(n_el, n_el)'], {}), '((n_el, n_el))\n', (2867, 2881), True, 'import numpy as np\n'), ((4092, 4106), 'scipy.sparse.identity', 'identity', (['n_el'], {}), '(n_el)\n', (4100, 4106), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((4118, 4132), 'scipy.sparse.identity', 'identity', (['n_vc'], {}), '(n_vc)\n', (4126, 4132), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((4144, 4158), 'scipy.sparse.identity', 'identity', (['n_vt'], {}), '(n_vt)\n', (4152, 4158), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((4171, 4202), 'lime.phys.boson', 'boson', (['freq_vt', 'n_vt'], {'ZPE': '(False)'}), '(freq_vt, n_vt, ZPE=False)\n', (4176, 4202), False, 'from lime.phys import boson\n'), ((4214, 4245), 'lime.phys.boson', 'boson', (['freq_vc', 'n_vc'], {'ZPE': '(False)'}), '(freq_vc, n_vc, ZPE=False)\n', (4219, 4245), False, 'from lime.phys import boson\n'), ((4258, 4277), 'numpy.diagflat', 'np.diagflat', (['Eshift'], {}), '(Eshift)\n', (4269, 4277), True, 'import numpy as np\n'), ((4540, 4562), 'numpy.zeros', 'np.zeros', (['(n_el, n_el)'], {}), '((n_el, n_el))\n', (4548, 4562), True, 'import numpy as np\n'), ((6347, 6375), 'numpy.zeros', 'np.zeros', (['(nstates, nstates)'], {}), '((nstates, nstates))\n', (6355, 6375), True, 'import numpy as np\n'), ((7136, 7170), 'numpy.array', 'np.array', (['[[V0, coup], [coup, V1]]'], {}), '([[V0, coup], [coup, V1]])\n', (7144, 7170), True, 'import numpy as np\n'), ((7183, 7204), 'numpy.linalg.eigvalsh', 'np.linalg.eigvalsh', (['V'], {}), '(V)\n', (7201, 7204), True, 'import numpy as np\n'), ((7257, 7280), 'numpy.linspace', 'np.linspace', (['(-8)', '(6)', '(100)'], {}), '(-8, 6, 100)\n', (7268, 7280), True, 'import numpy as np\n'), ((7317, 7345), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (7329, 7345), True, 'import matplotlib.pyplot as plt\n'), ((7349, 7362), 'lime.style.set_style', 'set_style', (['(13)'], {}), '(13)\n', (7358, 7362), False, 'from lime.style import set_style\n'), ((7798, 7849), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""dpes.pdf"""'], {'dpi': '(1200)', 'transparent': '(True)'}), "('dpes.pdf', dpi=1200, transparent=True)\n", (7809, 7849), True, 'import matplotlib.pyplot as plt\n'), ((7854, 7864), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7862, 7864), True, 'import matplotlib.pyplot as plt\n'), ((8001, 8027), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (8011, 8027), True, 'import matplotlib.pyplot as plt\n'), ((8031, 8053), 'lime.style.set_style', 'set_style', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (8040, 8053), False, 'from lime.style import set_style\n'), ((8130, 8148), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)'], {}), '(-4, 4)\n', (8141, 8148), True, 'import numpy as np\n'), ((8157, 8175), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)'], {}), '(-4, 4)\n', (8168, 8175), True, 'import numpy as np\n'), ((8437, 8454), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (8448, 8454), True, 'import numpy as np\n'), ((9247, 9273), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""apes_3d.pdf"""'], {}), "('apes_3d.pdf')\n", (9258, 9273), True, 'import matplotlib.pyplot as plt\n'), ((9279, 9289), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9287, 9289), True, 'import matplotlib.pyplot as plt\n'), ((9385, 9408), 'numpy.linspace', 'np.linspace', (['(-6)', '(6)', '(200)'], {}), '(-6, 6, 200)\n', (9396, 9408), True, 'import numpy as np\n'), ((9417, 9440), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(200)'], {}), '(-4, 4, 200)\n', (9428, 9440), True, 'import numpy as np\n'), ((9702, 9719), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (9713, 9719), True, 'import numpy as np\n'), ((10654, 10667), 'mayavi.mlab.figure', 'mlab.figure', ([], {}), '()\n', (10665, 10667), False, 'from mayavi import mlab\n'), ((10681, 10719), 'mayavi.mlab.surf', 'mlab.surf', (['(apes * au2ev)'], {'warp_scale': '(20)'}), '(apes * au2ev, warp_scale=20)\n', (10690, 10719), False, 'from mayavi import mlab\n'), ((10732, 10771), 'mayavi.mlab.surf', 'mlab.surf', (['(apes1 * au2ev)'], {'warp_scale': '(20)'}), '(apes1 * au2ev, warp_scale=20)\n', (10741, 10771), False, 'from mayavi import mlab\n'), ((10825, 10880), 'mayavi.mlab.axes', 'mlab.axes', ([], {'xlabel': '"""Coupling mode"""', 'ylabel': '"""Tuning mode"""'}), "(xlabel='Coupling mode', ylabel='Tuning mode')\n", (10834, 10880), False, 'from mayavi import mlab\n'), ((10937, 10948), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (10946, 10948), False, 'from mayavi import mlab\n'), ((679, 695), 'numpy.sqrt', 'np.sqrt', (['(i / 2.0)'], {}), '(i / 2.0)\n', (686, 695), True, 'import numpy as np\n'), ((742, 764), 'numpy.sqrt', 'np.sqrt', (['((i + 1) / 2.0)'], {}), '((i + 1) / 2.0)\n', (749, 764), True, 'import numpy as np\n'), ((1106, 1137), 'numpy.array', 'np.array', (['[0.0, 31800.0, 39000]'], {}), '([0.0, 31800.0, 39000])\n', (1114, 1137), True, 'import numpy as np\n'), ((1171, 1202), 'numpy.array', 'np.array', (['[0.0, -847.0, 1202.0]'], {}), '([0.0, -847.0, 1202.0])\n', (1179, 1202), True, 'import numpy as np\n'), ((1872, 1890), 'numpy.diagflat', 'np.diagflat', (['kappa'], {}), '(kappa)\n', (1883, 1890), True, 'import numpy as np\n'), ((3851, 3882), 'numpy.array', 'np.array', (['[0.0, 31800.0, 39000]'], {}), '([0.0, 31800.0, 39000])\n', (3859, 3882), True, 'import numpy as np\n'), ((3916, 3947), 'numpy.array', 'np.array', (['[0.0, -847.0, 1202.0]'], {}), '([0.0, -847.0, 1202.0])\n', (3924, 3947), True, 'import numpy as np\n'), ((4469, 4487), 'numpy.diagflat', 'np.diagflat', (['kappa'], {}), '(kappa)\n', (4480, 4487), True, 'import numpy as np\n'), ((4489, 4502), 'scipy.sparse.kron', 'kron', (['I_vc', 'X'], {}), '(I_vc, X)\n', (4493, 4502), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((5993, 6019), 'numpy.array', 'np.array', (['[31800.0, 39000]'], {}), '([31800.0, 39000])\n', (6001, 6019), True, 'import numpy as np\n'), ((6053, 6079), 'numpy.array', 'np.array', (['[-847.0, 1202.0]'], {}), '([-847.0, 1202.0])\n', (6061, 6079), True, 'import numpy as np\n'), ((6724, 6750), 'numpy.array', 'np.array', (['[31800.0, 39000]'], {}), '([31800.0, 39000])\n', (6732, 6750), True, 'import numpy as np\n'), ((6776, 6802), 'numpy.array', 'np.array', (['[-847.0, 1202.0]'], {}), '([-847.0, 1202.0])\n', (6784, 6802), True, 'import numpy as np\n'), ((1904, 1917), 'scipy.sparse.kron', 'kron', (['I_vc', 'X'], {}), '(I_vc, X)\n', (1908, 1917), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((3361, 3375), 'scipy.sparse.kron', 'kron', (['Xc', 'I_vt'], {}), '(Xc, I_vt)\n', (3365, 3375), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((3424, 3437), 'scipy.sparse.kron', 'kron', (['I_vc', 'X'], {}), '(I_vc, X)\n', (3428, 3437), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((4417, 4433), 'scipy.sparse.kron', 'kron', (['I_vc', 'h_vt'], {}), '(I_vc, h_vt)\n', (4421, 4433), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((5228, 5242), 'scipy.sparse.kron', 'kron', (['Xc', 'I_vt'], {}), '(Xc, I_vt)\n', (5232, 5242), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((1816, 1832), 'scipy.sparse.kron', 'kron', (['I_vc', 'h_vt'], {}), '(I_vc, h_vt)\n', (1820, 1832), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((2657, 2671), 'scipy.sparse.kron', 'kron', (['Xc', 'I_vt'], {}), '(Xc, I_vt)\n', (2661, 2671), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((3014, 3030), 'scipy.sparse.kron', 'kron', (['I_vc', 'I_vt'], {}), '(I_vc, I_vt)\n', (3018, 3030), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((4345, 4361), 'scipy.sparse.kron', 'kron', (['I_vc', 'I_vt'], {}), '(I_vc, I_vt)\n', (4349, 4361), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((4376, 4392), 'scipy.sparse.kron', 'kron', (['h_vc', 'I_vt'], {}), '(h_vc, I_vt)\n', (4380, 4392), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((1763, 1779), 'scipy.sparse.kron', 'kron', (['h_vc', 'I_vt'], {}), '(h_vc, I_vt)\n', (1767, 1779), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((1655, 1671), 'scipy.sparse.kron', 'kron', (['I_vc', 'I_vt'], {}), '(I_vc, I_vt)\n', (1659, 1671), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n'), ((1709, 1725), 'scipy.sparse.kron', 'kron', (['I_vc', 'I_vt'], {}), '(I_vc, I_vt)\n', (1713, 1725), False, 'from scipy.sparse import identity, coo_matrix, lil_matrix, csr_matrix, kron\n')] |
import pandas as pd
import numpy as np
import re
import os
from pandas import json_normalize
import json
from alive_progress import alive_bar
class PrepareNSMCLogs:
def __init__(self, config):
self.raw_logs_dir = config.raw_logs_dir
self.prepared_logs_dir = config.prepared_logs_dir
self.filename = config.filename
@staticmethod
def starts_with_timestamp(line):
pattern = re.compile("^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})")
return bool(pattern.match(line))
def multiline_logs_processing(self, fpath):
dfs = []
with open(fpath) as f:
logs = f.readlines()
with alive_bar(len(logs), title="Parsing json to csv") as bar:
for log in logs:
json_log = json.loads(log)
df = json_normalize(json_log)
dfs.append(df)
bar()
all_logs_df = pd.concat(dfs)
all_logs_df.dropna(how='all', axis=1, inplace=True)
all_logs_df.to_csv(f'{self.prepared_logs_dir}{self.filename}', index=False)
return all_logs_df
def prepare_raw_nsmc_data(self):
fpath = os.path.join(self.raw_logs_dir, self.filename)
self.multiline_logs_processing(fpath)
print("Logs are prepared in csv format and saved to: ", self.prepared_logs_dir)
df = pd.read_csv(f'{self.prepared_logs_dir}{self.filename}')
df = df.drop(['type', 'tags', 'pid', 'method', 'statusCode', 'req.url', 'req.method', 'res.responseTime',
'req.headers.accept', 'req.remoteAddress', 'req.userAgent', 'res.statusCode', 'res.contentLength',
'req.headers.x-request-id', 'req.headers.x-real-ip', 'req.headers.x-forwarded-for',
'req.headers.x-forwarded-host', 'req.headers.x-forwarded-proto', 'req.headers.x-original-uri',
'req.headers.x-scheme', 'req.headers.content-length', 'req.headers.accept-language',
'req.headers.accept-encoding', 'req.headers.kbn-version', 'req.headers.origin',
'req.headers.referer', 'req.headers.sec-fetch-dest', 'req.headers.sec-fetch-mode',
'req.headers.sec-fetch-site', 'req.headers.netguard-proxy-roles', 'req.headers.username',
'req.referer', 'req.headers.content-type', 'req.headers.sec-ch-ua',
'req.headers.sec-ch-ua-mobile',
'req.headers.sec-ch-ua-platform', 'req.headers.upgrade-insecure-requests',
'req.headers.sec-fetch-user', 'req.headers.x-requested-with', 'req.headers.cache-control',
'state', 'prevState', 'prevMsg', 'req.headers.if-none-match', 'req.headers.if-modified-since',
'req.headers.dnt', 'req.headers.kbn-xsrf'], axis=1)
# remove some special signs from column names
df.columns = [re.sub('\.', '_', col) for col in df.columns]
df.columns = [re.sub('-', '_', col) for col in df.columns]
df.columns = [re.sub('@', '', col) for col in df.columns]
for idx, row in df.iterrows():
message_groups = re.match(
r'^(\w*) /(\w*)/(\w*)/(\w*).=(.+) ([0-9][0-9][0-9]) ([0-9]+ms) - ([0-9]+\.[0-9]+B)',
row.message)
'''
this regex if for parsing data like this:
POST /api/saved_objects/_bulk_get?=%2Fvar%2Flib%2Fmlocate.db 200 6ms - 9.0B
'''
if message_groups:
message_groups = list(message_groups.groups())
url = message_groups[4]
url = f'url=[={url}]'
message_groups[4] = url
df.loc[idx, 'message'] = ' '.join(message_groups)
else:
message_groups = re.match(r'^(\w*) /(\w*)/(\w*).*([0-9][0-9][0-9]) ([0-9]+ms) - ([0-9]+\.[0-9]+B)',
row.message)
'''
this regex if for parsing data like this:
GET /api/status?pretty= 200 8ms - 9.0B
'''
if message_groups is None:
''' this is for even shorter messages '''
message_groups = re.match(r'^(\w*).*([0-9][0-9][0-9]) ([0-9]+ms) - ([0-9]+\.[0-9]+B)', row.message)
if message_groups:
message_groups = list(message_groups.groups())
# if match, change the message, if not, leave it as it is
df.loc[idx, 'message'] = ' '.join(message_groups)
# change host value to format that is easy to parse
host = row.req_headers_host
host = f'host=[{host}]'
df.loc[idx, 'req_headers_host'] = host
# change req_headers_user_agent values to popular services names
req_headers_user_agent_groups = re.match(r'^(\w*)/', str(row.req_headers_user_agent))
if req_headers_user_agent_groups:
df.loc[idx, 'req_headers_user_agent'] = ' '.join(req_headers_user_agent_groups.groups())
if '443' in str(row.req_headers_x_forwarded_port):
df.loc[idx, 'req_headers_x_forwarded_port'] = 'HTTPS'
elif '80' in str(row.req_headers_x_forwarded_port):
df.loc[idx, 'req_headers_x_forwarded_port'] = 'HTTP'
elif '21' in str(row.req_headers_x_forwarded_port):
df.loc[idx, 'req_headers_x_forwarded_port'] = 'FTP'
elif '22' in str(row.req_headers_x_forwarded_port):
df.loc[idx, 'req_headers_x_forwarded_port'] = 'SSH'
elif '25' in str(row.req_headers_x_forwarded_port):
df.loc[idx, 'req_headers_x_forwarded_port'] = 'SMTP'
elif '53' in str(row.req_headers_x_forwarded_port):
df.loc[idx, 'req_headers_x_forwarded_port'] = 'DNS'
elif '8080' in str(row.req_headers_x_forwarded_port):
df.loc[idx, 'req_headers_x_forwarded_port'] = 'HTTP'
else:
df.loc[idx, 'req_headers_x_forwarded_port'] = 'UNKNOWN'
# change timestamp to datetime format
timestamp_groups = re.match(r'([0-9].*-[0-9].*-[0-9].*)T([0-9].*:[0-9].*:[0-9].*)Z', str(row.timestamp))
if timestamp_groups:
df.loc[idx, 'timestamp'] = ' '.join(timestamp_groups.groups())
# if there is no user, set it to '-'
if pd.isnull(row.req_headers_netguard_proxy_user):
df.loc[idx, 'req_headers_netguard_proxy_user'] = '-'
return df
def save_prepared_data(self, df):
np.savetxt(F'{self.prepared_logs_dir}{self.filename}', df.values, fmt="%s")
| [
"json.loads",
"pandas.read_csv",
"pandas.json_normalize",
"numpy.savetxt",
"re.match",
"pandas.isnull",
"os.path.join",
"pandas.concat",
"re.sub",
"re.compile"
] | [((419, 477), 're.compile', 're.compile', (['"""^(\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2})"""'], {}), "('^(\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2})')\n", (429, 477), False, 'import re\n'), ((940, 954), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (949, 954), True, 'import pandas as pd\n'), ((1180, 1226), 'os.path.join', 'os.path.join', (['self.raw_logs_dir', 'self.filename'], {}), '(self.raw_logs_dir, self.filename)\n', (1192, 1226), False, 'import os\n'), ((1375, 1430), 'pandas.read_csv', 'pd.read_csv', (['f"""{self.prepared_logs_dir}{self.filename}"""'], {}), "(f'{self.prepared_logs_dir}{self.filename}')\n", (1386, 1430), True, 'import pandas as pd\n'), ((6663, 6738), 'numpy.savetxt', 'np.savetxt', (['f"""{self.prepared_logs_dir}{self.filename}"""', 'df.values'], {'fmt': '"""%s"""'}), "(f'{self.prepared_logs_dir}{self.filename}', df.values, fmt='%s')\n", (6673, 6738), True, 'import numpy as np\n'), ((2937, 2960), 're.sub', 're.sub', (['"""\\\\."""', '"""_"""', 'col'], {}), "('\\\\.', '_', col)\n", (2943, 2960), False, 'import re\n'), ((3005, 3026), 're.sub', 're.sub', (['"""-"""', '"""_"""', 'col'], {}), "('-', '_', col)\n", (3011, 3026), False, 'import re\n'), ((3072, 3092), 're.sub', 're.sub', (['"""@"""', '""""""', 'col'], {}), "('@', '', col)\n", (3078, 3092), False, 'import re\n'), ((3185, 3305), 're.match', 're.match', (['"""^(\\\\w*) /(\\\\w*)/(\\\\w*)/(\\\\w*).=(.+) ([0-9][0-9][0-9]) ([0-9]+ms) - ([0-9]+\\\\.[0-9]+B)"""', 'row.message'], {}), "(\n '^(\\\\w*) /(\\\\w*)/(\\\\w*)/(\\\\w*).=(.+) ([0-9][0-9][0-9]) ([0-9]+ms) - ([0-9]+\\\\.[0-9]+B)'\n , row.message)\n", (3193, 3305), False, 'import re\n'), ((6480, 6526), 'pandas.isnull', 'pd.isnull', (['row.req_headers_netguard_proxy_user'], {}), '(row.req_headers_netguard_proxy_user)\n', (6489, 6526), True, 'import pandas as pd\n'), ((3829, 3937), 're.match', 're.match', (['"""^(\\\\w*) /(\\\\w*)/(\\\\w*).*([0-9][0-9][0-9]) ([0-9]+ms) - ([0-9]+\\\\.[0-9]+B)"""', 'row.message'], {}), "(\n '^(\\\\w*) /(\\\\w*)/(\\\\w*).*([0-9][0-9][0-9]) ([0-9]+ms) - ([0-9]+\\\\.[0-9]+B)'\n , row.message)\n", (3837, 3937), False, 'import re\n'), ((791, 806), 'json.loads', 'json.loads', (['log'], {}), '(log)\n', (801, 806), False, 'import json\n'), ((832, 856), 'pandas.json_normalize', 'json_normalize', (['json_log'], {}), '(json_log)\n', (846, 856), False, 'from pandas import json_normalize\n'), ((4263, 4351), 're.match', 're.match', (['"""^(\\\\w*).*([0-9][0-9][0-9]) ([0-9]+ms) - ([0-9]+\\\\.[0-9]+B)"""', 'row.message'], {}), "('^(\\\\w*).*([0-9][0-9][0-9]) ([0-9]+ms) - ([0-9]+\\\\.[0-9]+B)', row.\n message)\n", (4271, 4351), False, 'import re\n')] |
import re, string
import numpy as np
import pandas as pd
import pickle
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
re_tok = re.compile(f'([{string.punctuation}“”¨«»®´·º½¾¿¡§£₤‘’])')
def tokenize(s):
return re_tok.sub(r' \1 ', s).split()
class NBTfidfVectorizer(TfidfVectorizer):
"""Class for generating Naive Bayes features with tf-idf priors.
Can also be used to generate tf-idf only.
"""
def __init__(self):
super().__init__(
ngram_range=(1,2), tokenizer=tokenize,
min_df=3, max_df=0.9, strip_accents='unicode', use_idf=1,
smooth_idf=1, sublinear_tf=1)
# Nive Bayes parameter
self._r = None
def fit(self, X, y):
"""Calculate NB and tf-idf parameters """
# fit and generate TF-IDF features
X_tfidf = super().fit_transform(X)
# get NB features
p = (X_tfidf[y == 1].sum(0) + 1) / ((y == 1).sum() + 1)
q = (X_tfidf[y == 0].sum(0) + 1) / ((y == 0).sum() + 1)
self._r = np.log(p / q)
def transform(self, X):
X_tfidf = super().transform(X)
return X_tfidf.multiply(self._r)
def fit_transform(self, X, y):
self.fit(X, y)
return self.transform(X)
class NBLogisticRegression(LogisticRegression, NBTfidfVectorizer):
def __init__(self):
self.regressor = LogisticRegression(C=4, dual=True)
self.vectorizer = NBTfidfVectorizer()
def fit(self, X, y):
print('Fitting NBTfidf')
X_NBTfidf = self.vectorizer.fit_transform(X, y)
print('Fitting LogisticRegression')
self.regressor.fit(X_NBTfidf, y)
def predict_proba(self, X):
X_NBTfidf = self.vectorizer.transform(X)
return self.regressor.predict_proba(X_NBTfidf)[:,1]
def predict(self, X):
X_NBTfidf = self.vectorizer.transform(X)
return self.regressor.predict(X_NBTfidf)
if __name__ == '__main__':
# Code from https://www.kaggle.com/jhoward/nb-svm-strong-linear-baseline
data = pd.read_csv('../datasets/Kaggle_Toxic/data/train.csv')
data['toxic'] = data['toxic'] + data['insult'] + data['obscene'] + data['severe_toxic'] + data['identity_hate'] + data['threat']
data['toxic'][data['toxic'] != 0] = 1
train, test = train_test_split(data, test_size=0.25)
train['none'] = 1-train['toxic']
print('{} none labels out of {} comments'.format(train['none'].sum(), train.shape[0]))
print('so {} of the comments are non toxic'.format(train['none'].sum() / train.shape[0]))
COMMENT = 'comment_text'
train[COMMENT].fillna("<unk>", inplace=True)
test[COMMENT].fillna("<unk>", inplace=True)
logistic = NBLogisticRegression()
logistic.fit(train[COMMENT], train['toxic'].values)
train_preds = logistic.predict(train[COMMENT])
test_preds = logistic.predict(test[COMMENT])
print('Train accuracy is: {:.3f}'.format(accuracy_score(train['toxic'], train_preds)))
print('Train recall (True positive) is {:.3f}'.format(recall_score(train['toxic'], train_preds)))
print('Train precision is {:.3f}'.format(precision_score(train['toxic'], train_preds)))
print('Train F1 is {:3f}'.format(f1_score(train['toxic'], train_preds)))
print('*' * 20)
print('*' * 20)
print('Test accuracy is: {:.3f}'.format(accuracy_score(test['toxic'], test_preds)))
print('Test recall (True positive) is {:.3f}'.format(recall_score(test['toxic'], test_preds)))
print('Test precision is {:.3f}'.format(precision_score(test['toxic'], test_preds)))
print('Test F1 is {:3f}'.format(f1_score(test['toxic'], test_preds)))
print('#' * 20)
print('#' * 20)
print('Training model on full data')
logistic = NBLogisticRegression()
logistic.fit(data[COMMENT], data['toxic'].values)
print('Saving trained toxicity model')
with open('toxicity_model.pkl', 'wb') as f:
pickle.dump(logistic, f)
| [
"pickle.dump",
"numpy.log",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.recall_score",
"sklearn.linear_model.LogisticRegression",
"sklearn.metrics.f1_score",
"sklearn.metrics.precision_score",
"re.compile"
] | [((341, 398), 're.compile', 're.compile', (['f"""([{string.punctuation}“”¨«»®´·º½¾¿¡§£₤‘’])"""'], {}), "(f'([{string.punctuation}“”¨«»®´·º½¾¿¡§£₤‘’])')\n", (351, 398), False, 'import re, string\n'), ((2285, 2339), 'pandas.read_csv', 'pd.read_csv', (['"""../datasets/Kaggle_Toxic/data/train.csv"""'], {}), "('../datasets/Kaggle_Toxic/data/train.csv')\n", (2296, 2339), True, 'import pandas as pd\n'), ((2538, 2576), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data'], {'test_size': '(0.25)'}), '(data, test_size=0.25)\n', (2554, 2576), False, 'from sklearn.model_selection import train_test_split\n'), ((1252, 1265), 'numpy.log', 'np.log', (['(p / q)'], {}), '(p / q)\n', (1258, 1265), True, 'import numpy as np\n'), ((1598, 1632), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(4)', 'dual': '(True)'}), '(C=4, dual=True)\n', (1616, 1632), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4187, 4211), 'pickle.dump', 'pickle.dump', (['logistic', 'f'], {}), '(logistic, f)\n', (4198, 4211), False, 'import pickle\n'), ((3185, 3228), 'sklearn.metrics.accuracy_score', 'accuracy_score', (["train['toxic']", 'train_preds'], {}), "(train['toxic'], train_preds)\n", (3199, 3228), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n'), ((3290, 3331), 'sklearn.metrics.recall_score', 'recall_score', (["train['toxic']", 'train_preds'], {}), "(train['toxic'], train_preds)\n", (3302, 3331), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n'), ((3380, 3424), 'sklearn.metrics.precision_score', 'precision_score', (["train['toxic']", 'train_preds'], {}), "(train['toxic'], train_preds)\n", (3395, 3424), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n'), ((3465, 3502), 'sklearn.metrics.f1_score', 'f1_score', (["train['toxic']", 'train_preds'], {}), "(train['toxic'], train_preds)\n", (3473, 3502), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n'), ((3594, 3635), 'sklearn.metrics.accuracy_score', 'accuracy_score', (["test['toxic']", 'test_preds'], {}), "(test['toxic'], test_preds)\n", (3608, 3635), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n'), ((3696, 3735), 'sklearn.metrics.recall_score', 'recall_score', (["test['toxic']", 'test_preds'], {}), "(test['toxic'], test_preds)\n", (3708, 3735), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n'), ((3783, 3825), 'sklearn.metrics.precision_score', 'precision_score', (["test['toxic']", 'test_preds'], {}), "(test['toxic'], test_preds)\n", (3798, 3825), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n'), ((3865, 3900), 'sklearn.metrics.f1_score', 'f1_score', (["test['toxic']", 'test_preds'], {}), "(test['toxic'], test_preds)\n", (3873, 3900), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 31 10:11:15 2018
@author: nsde
"""
#%%
from dlmnn.helper.neighbor_funcs import findTargetNeighbours, knnClassifier
from dlmnn.helper.tf_funcs import tf_makePairwiseFunc, tf_findImposters
from dlmnn.helper.tf_funcs import tf_LMNN_loss, tf_featureExtractor
from dlmnn.helper.layers import L2normalize
from dlmnn.helper.logger import stat_logger
from dlmnn.helper.utility import get_optimizer
from dlmnn.helper.embeddings import embedding_projector
import tensorflow as tf
from tensorflow.python.keras import Sequential
import numpy as np
import datetime, os
#%%
class lmnnredo(object):
""" """
def __init__(self, session=None, dir_loc=None):
# Initilize session and tensorboard dirs
self.session = tf.Session() if session is None else session
self.dir_loc = './logs' if dir_loc is None else dir_loc
self._writer = None
# Initialize feature extractor
self.extractor = Sequential()
# Set idication for when model is build
self.built = False
#%%
def add(self, layer):
""" Add layer to extractor """
self.extractor.add(layer)
#%%
def compile(self, k=1, optimizer='adam', learning_rate = 1e-4,
mu=0.5, margin=1, normalize=False):
""" Builds the tensorflow graph that is evaluated in the fit method """
assert len(self.extractor.layers)!=0, '''Layers must be added with the
lmnn.add() method before this function is called '''
self.built = True
# Set number of neighbours
self.k = k
# Normalize extracted features if asked for
if normalize: self.extractor.add(L2normalize())
# Shapes
self.input_shape = self.extractor.input_shape
self.output_shape = self.extractor.output_shape
# Placeholders for data
self.global_step = tf.Variable(0, trainable=False)
self.Xp = tf.placeholder(tf.float32, shape=self.input_shape, name='In_features')
self.yp = tf.placeholder(tf.int32, shape=(None,), name='In_targets')
self.tNp = tf.placeholder(tf.int32, shape=(None, 2), name='In_targetNeighbours')
# Feature extraction function and pairwise distance function
self.extractor_func = tf_featureExtractor(self.extractor)
self.dist_func = tf_makePairwiseFunc(self.extractor_func)
# Build graph
D = self.dist_func(self.Xp, self.Xp)
tup = tf_findImposters(D, self.yp, self.tNp, margin=margin)
self._LMNN_loss, D_1, D_2, D_3 = tf_LMNN_loss(D, self.tNp, tup, mu, margin=margin)
# Construct training operation
self.optimizer = get_optimizer(optimizer)(learning_rate=learning_rate)
self._trainer = self.optimizer.minimize(self._LMNN_loss,
global_step=self.global_step)
# Summaries
self._n_tup = tf.shape(tup)[0]
true_imp = tf.cast(tf.less(D_3, D_2), tf.float32)
features = self.extractor_func(self.Xp)
tf.summary.scalar('Loss', self._LMNN_loss)
tf.summary.scalar('Num_imp', self._n_tup)
tf.summary.scalar('Loss_pull', tf.reduce_sum(D_1))
tf.summary.scalar('Loss_push', tf.reduce_sum(margin + D_2 - D_3))
tf.summary.scalar('True_imp', tf.reduce_sum(true_imp))
tf.summary.scalar('Frac_true_imp', tf.reduce_mean(true_imp))
tf.summary.scalar('Sparsity_tanh', tf.reduce_mean(tf.reduce_sum(
tf.tanh(tf.pow(features, 2.0)), axis=1)))
tf.summary.scalar('Sparsity_l0', tf.reduce_mean(tf.reduce_sum(
tf.cast(tf.equal(features, 0), tf.int32), axis=1)))
self._summary = tf.summary.merge_all()
# Initilize session
init = tf.global_variables_initializer()
self.session.run(init)
# Create callable functions
self._transformer = self.session.make_callable(
self.extractor_func(self.Xp), [self.Xp])
self._distances = self.session.make_callable(
self.dist_func(self.Xp, self.Xp), [self.Xp])
#%%
def reintialize(self):
self._assert_if_build()
init = tf.global_variables_initializer()
self.session.run(init)
#%%
def fit(self, Xtrain, ytrain, maxEpoch=100, batch_size=50, tN=None,
run_id=None, verbose=2, snapshot=10, val_set=None, tN_val=None,
redo_step=5):
"""
"""
self._assert_if_build()
# Tensorboard file writers
run_id = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M') if run_id \
is None else run_id
self.current_loc = self.dir_loc + '/' + run_id
if not os.path.exists(self.dir_loc): os.makedirs(self.dir_loc)
if verbose == 2:
self._writer = tf.summary.FileWriter(self.current_loc)
self._writer.add_graph(self.session.graph)
# Check for validation set
validation = False
if val_set:
validation = True
Xval, yval = val_set
# Training parameters
Xtrain = Xtrain.astype('float32')
ytrain = ytrain.astype('int32')
N_train = Xtrain.shape[0]
n_batch_train = int(np.ceil(N_train / batch_size))
print(70*'-')
print('Number of training samples: ', N_train)
if validation:
Xval = Xval.astype('float32')
yval = yval.astype('int32')
N_val = Xval.shape[0]
n_batch_val = int(np.ceil(N_val / batch_size))
print('Number of validation samples: ', N_val)
print(70*'-')
# Target neighbours
if tN is None:
tN = findTargetNeighbours(Xtrain, ytrain, self.k, name='Training')
if validation and tN_val is None:
tN_val = findTargetNeighbours(Xval, yval, self.k, name='Validation')
# Training loop
stats = stat_logger(maxEpoch, n_batch_train, verbose=verbose)
stats.on_train_begin() # Start training
for e in range(maxEpoch):
stats.on_epoch_begin() # Start epoch
# At redo step, recompute target neighbours
if (e+1) % redo_step == 0:
tN_old = tN
Xtrans = self.transform(Xtrain, batch_size=batch_size)
tN = findTargetNeighbours(Xtrans, ytrain, self.k, name='Training', do_pca=False)
tN_change = self._tN_change(tN_old, tN)
stats.add_stat('tN_change', tN_change)
# Write stats to summary protocol buffer
summ = tf.Summary(value=[tf.Summary.Value(tag='tN_change', simple_value=tN_change)])
self._writer.add_summary(summ, global_step=n_batch_train*e)
# Permute target neighbours
tN = np.random.permutation(tN)
# Do backpropagation
for b in range(n_batch_train):
stats.on_batch_begin() # Start batch
# Get data
feed_dict = self._get_feed_dict(self.k*batch_size*b,
self.k*batch_size*(b+1),
Xtrain, ytrain, tN)
# Evaluate graph
_, loss_out, ntup_out, summ = self.session.run(
[self._trainer, self._LMNN_loss,
self._n_tup, self._summary],
feed_dict=feed_dict)
# Save stats
stats.add_stat('loss', loss_out)
stats.add_stat('#imp', ntup_out)
# Save to tensorboard
if verbose==2:
self._writer.add_summary(summ, global_step=b+n_batch_train*e)
stats.on_batch_end() # End batch
# If we are at an snapshot epoch and are doing validation
if validation and ((e+1) % snapshot == 0 or (e+1) == maxEpoch or e==0):
# Evaluate loss and tuples on val data
tN_val = np.random.permutation(tN_val)
for b in range(n_batch_val):
feed_dict = self._get_feed_dict(self.k*batch_size*b,
self.k*batch_size*(b+1),
Xval, yval, tN_val)
loss_out= self.session.run(self._LMNN_loss, feed_dict=feed_dict)
stats.add_stat('loss_val', loss_out)
# Compute accuracy
acc = self.evaluate(Xval, yval, Xtrain, ytrain, batch_size=batch_size)
stats.add_stat('acc_val', acc)
if verbose==2:
# Write stats to summary protocol buffer
summ = tf.Summary(value=[
tf.Summary.Value(tag='Loss_val', simple_value=np.mean(stats.get_stat('loss_val'))),
tf.Summary.Value(tag='Accuracy_val', simple_value=np.mean(stats.get_stat('acc_val')))])
# Save to tensorboard
self._writer.add_summary(summ, global_step=n_batch_train*e)
stats.on_epoch_end() # End epoch
# Check if we should terminate
if stats.terminate: break
# Write stats to console (if verbose=True)
stats.write_stats()
stats.on_train_end() # End training
# Save variables and training stats
self.save_weights(run_id + '/trained_metric')
stats.save(self.current_loc + '/training_stats')
return stats
#%%
def transform(self, X, batch_size=64):
''' Transform the data in X
Arguments:
X: N x ?, matrix or tensor of data
batch_size: scalar, number of samples to transform in parallel
Output:
X_trans: N x ?, matrix or tensor with the transformed data
'''
self._assert_if_build()
# Parameters for transformer
N = X.shape[0]
n_batch = int(np.ceil(N / batch_size))
X_trans = np.zeros((N, *self.output_shape[1:]))
# Transform data in batches
for b in range(n_batch):
X_batch = X[batch_size*b:batch_size*(b+1)]
X_trans[batch_size*b:batch_size*(b+1)] = self._transformer(X_batch)
return X_trans
#%%
def predict(self, Xtest, Xtrain, ytrain, batch_size=64):
self._assert_if_build()
Xtest = self.transform(Xtest, batch_size=batch_size)
Xtrain = self.transform(Xtrain, batch_size=batch_size)
pred = knnClassifier(Xtest, Xtrain, ytrain, self.k)
return pred
#%%
def evaluate(self, Xtest, ytest, Xtrain, ytrain, batch_size=64):
''' Evaluates the current metric
Arguments:
Xtest: M x ? metrix or tensor with test data for which we want to
predict its classes for
Xtrain: N x ? matrix or tensor with training data
ytrain: N x 1 vector with class labels for the training data
k: scalar, number of neighbours to look at
batch_size: integer, number of samples to transform in parallel
Output
accuracy: scalar, accuracy of the prediction for the current metric
'''
self._assert_if_build()
pred = self.predict(Xtest, Xtrain, ytrain, batch_size=batch_size)
accuracy = np.mean(pred == ytest)
return accuracy
#%%
def save_weights(self, filename, step=None):
''' Save all weights/variables in the current session to a file
Arguments:
filename: str, name of the file to write to
step: integer, appended to the filename to distingues different saved
files from each other
'''
self._assert_if_build()
saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))
saver.save(self.session, self.dir_loc+'/'+filename, global_step = step)
#%%
def save_embeddings(self, data, direc=None, labels=None):
""" Embed some data with the current network, and save these to
tensorboard for vizualization
Arguments:
data: data to embed, shape must be equal to model.input_shape
direc: directory to save data to
labels: if data has labels, supply these for vizualization
"""
self._assert_if_build()
direc = self.current_loc if direc is None else '.'
embeddings = self.transform(data)
imgs = data if (data.ndim==4 and (data.shape[-1]==1 or data.shape[-1]==3)) else None
embedding_projector(embeddings, direc, name='embedding',
imgs=imgs, labels=labels, writer=self._writer)
#%%
def get_weights(self):
""" Returns a list of weights in the current graph """
self._assert_if_build()
weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
return self.session.run(weights)
#%%
def summary(self):
self._assert_if_build()
print('Model: lmnn')
print('='*65)
print('Input shape: ', self.extractor.input_shape)
self.extractor.summary()
#%%
def _get_feed_dict(self, idx_start, idx_end, X, y, tN):
tN_batch = tN[idx_start:idx_end]
idx, inv_idx = np.unique(tN_batch, return_inverse=True)
inv_idx = np.reshape(inv_idx, (-1, 2))
X_batch = X[idx]
y_batch = y[idx]
feed_dict = {self.Xp: X_batch, self.yp: y_batch, self.tNp: inv_idx}
return feed_dict
#%%
def _assert_if_build(self):
assert self.built, '''Model is not build, call lmnn.compile()
before this function is called '''
#%%
def _tN_change(self, tN1, tN2):
N = int(len(tN1)/self.k)
idx1=np.argsort(tN1[:,0])
idx2=np.argsort(tN2[:,0])
tN1_sort=tN1[idx1]
tN2_sort=tN2[idx2]
count = 0
for i in range(N):
count += len(np.intersect1d(tN1_sort[self.k*i:self.k*(i+1),1],
tN2_sort[self.k*i:self.k*(i+1),1]))
tN_change = 1-count/(N*self.k)
return tN_change
#%%
if __name__ == '__main__':
# Construct model
model = lmnnredo()
| [
"tensorflow.reduce_sum",
"tensorflow.get_collection",
"dlmnn.helper.tf_funcs.tf_LMNN_loss",
"numpy.argsort",
"tensorflow.Variable",
"numpy.mean",
"numpy.unique",
"tensorflow.less",
"os.path.exists",
"tensorflow.placeholder",
"dlmnn.helper.tf_funcs.tf_findImposters",
"tensorflow.summary.FileWri... | [((1007, 1019), 'tensorflow.python.keras.Sequential', 'Sequential', ([], {}), '()\n', (1017, 1019), False, 'from tensorflow.python.keras import Sequential\n'), ((1990, 2021), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (2001, 2021), True, 'import tensorflow as tf\n'), ((2040, 2110), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'self.input_shape', 'name': '"""In_features"""'}), "(tf.float32, shape=self.input_shape, name='In_features')\n", (2054, 2110), True, 'import tensorflow as tf\n'), ((2129, 2187), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None,)', 'name': '"""In_targets"""'}), "(tf.int32, shape=(None,), name='In_targets')\n", (2143, 2187), True, 'import tensorflow as tf\n'), ((2207, 2276), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None, 2)', 'name': '"""In_targetNeighbours"""'}), "(tf.int32, shape=(None, 2), name='In_targetNeighbours')\n", (2221, 2276), True, 'import tensorflow as tf\n'), ((2385, 2420), 'dlmnn.helper.tf_funcs.tf_featureExtractor', 'tf_featureExtractor', (['self.extractor'], {}), '(self.extractor)\n', (2404, 2420), False, 'from dlmnn.helper.tf_funcs import tf_LMNN_loss, tf_featureExtractor\n'), ((2446, 2486), 'dlmnn.helper.tf_funcs.tf_makePairwiseFunc', 'tf_makePairwiseFunc', (['self.extractor_func'], {}), '(self.extractor_func)\n', (2465, 2486), False, 'from dlmnn.helper.tf_funcs import tf_makePairwiseFunc, tf_findImposters\n'), ((2577, 2630), 'dlmnn.helper.tf_funcs.tf_findImposters', 'tf_findImposters', (['D', 'self.yp', 'self.tNp'], {'margin': 'margin'}), '(D, self.yp, self.tNp, margin=margin)\n', (2593, 2630), False, 'from dlmnn.helper.tf_funcs import tf_makePairwiseFunc, tf_findImposters\n'), ((2672, 2721), 'dlmnn.helper.tf_funcs.tf_LMNN_loss', 'tf_LMNN_loss', (['D', 'self.tNp', 'tup', 'mu'], {'margin': 'margin'}), '(D, self.tNp, tup, mu, margin=margin)\n', (2684, 2721), False, 'from dlmnn.helper.tf_funcs import tf_LMNN_loss, tf_featureExtractor\n'), ((3175, 3217), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss"""', 'self._LMNN_loss'], {}), "('Loss', self._LMNN_loss)\n", (3192, 3217), True, 'import tensorflow as tf\n'), ((3227, 3268), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Num_imp"""', 'self._n_tup'], {}), "('Num_imp', self._n_tup)\n", (3244, 3268), True, 'import tensorflow as tf\n'), ((3828, 3850), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (3848, 3850), True, 'import tensorflow as tf\n'), ((3910, 3943), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3941, 3943), True, 'import tensorflow as tf\n'), ((4339, 4372), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4370, 4372), True, 'import tensorflow as tf\n'), ((6118, 6171), 'dlmnn.helper.logger.stat_logger', 'stat_logger', (['maxEpoch', 'n_batch_train'], {'verbose': 'verbose'}), '(maxEpoch, n_batch_train, verbose=verbose)\n', (6129, 6171), False, 'from dlmnn.helper.logger import stat_logger\n'), ((10511, 10548), 'numpy.zeros', 'np.zeros', (['(N, *self.output_shape[1:])'], {}), '((N, *self.output_shape[1:]))\n', (10519, 10548), True, 'import numpy as np\n'), ((11034, 11078), 'dlmnn.helper.neighbor_funcs.knnClassifier', 'knnClassifier', (['Xtest', 'Xtrain', 'ytrain', 'self.k'], {}), '(Xtest, Xtrain, ytrain, self.k)\n', (11047, 11078), False, 'from dlmnn.helper.neighbor_funcs import findTargetNeighbours, knnClassifier\n'), ((11878, 11900), 'numpy.mean', 'np.mean', (['(pred == ytest)'], {}), '(pred == ytest)\n', (11885, 11900), True, 'import numpy as np\n'), ((13116, 13224), 'dlmnn.helper.embeddings.embedding_projector', 'embedding_projector', (['embeddings', 'direc'], {'name': '"""embedding"""', 'imgs': 'imgs', 'labels': 'labels', 'writer': 'self._writer'}), "(embeddings, direc, name='embedding', imgs=imgs, labels=\n labels, writer=self._writer)\n", (13135, 13224), False, 'from dlmnn.helper.embeddings import embedding_projector\n'), ((13401, 13452), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES)\n', (13418, 13452), True, 'import tensorflow as tf\n'), ((13852, 13892), 'numpy.unique', 'np.unique', (['tN_batch'], {'return_inverse': '(True)'}), '(tN_batch, return_inverse=True)\n', (13861, 13892), True, 'import numpy as np\n'), ((13911, 13939), 'numpy.reshape', 'np.reshape', (['inv_idx', '(-1, 2)'], {}), '(inv_idx, (-1, 2))\n', (13921, 13939), True, 'import numpy as np\n'), ((14352, 14373), 'numpy.argsort', 'np.argsort', (['tN1[:, 0]'], {}), '(tN1[:, 0])\n', (14362, 14373), True, 'import numpy as np\n'), ((14386, 14407), 'numpy.argsort', 'np.argsort', (['tN2[:, 0]'], {}), '(tN2[:, 0])\n', (14396, 14407), True, 'import numpy as np\n'), ((797, 809), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (807, 809), True, 'import tensorflow as tf\n'), ((2795, 2819), 'dlmnn.helper.utility.get_optimizer', 'get_optimizer', (['optimizer'], {}), '(optimizer)\n', (2808, 2819), False, 'from dlmnn.helper.utility import get_optimizer\n'), ((3044, 3057), 'tensorflow.shape', 'tf.shape', (['tup'], {}), '(tup)\n', (3052, 3057), True, 'import tensorflow as tf\n'), ((3088, 3105), 'tensorflow.less', 'tf.less', (['D_3', 'D_2'], {}), '(D_3, D_2)\n', (3095, 3105), True, 'import tensorflow as tf\n'), ((3308, 3326), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['D_1'], {}), '(D_1)\n', (3321, 3326), True, 'import tensorflow as tf\n'), ((3367, 3400), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(margin + D_2 - D_3)'], {}), '(margin + D_2 - D_3)\n', (3380, 3400), True, 'import tensorflow as tf\n'), ((3440, 3463), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['true_imp'], {}), '(true_imp)\n', (3453, 3463), True, 'import tensorflow as tf\n'), ((3508, 3532), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['true_imp'], {}), '(true_imp)\n', (3522, 3532), True, 'import tensorflow as tf\n'), ((4879, 4907), 'os.path.exists', 'os.path.exists', (['self.dir_loc'], {}), '(self.dir_loc)\n', (4893, 4907), False, 'import datetime, os\n'), ((4909, 4934), 'os.makedirs', 'os.makedirs', (['self.dir_loc'], {}), '(self.dir_loc)\n', (4920, 4934), False, 'import datetime, os\n'), ((4988, 5027), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.current_loc'], {}), '(self.current_loc)\n', (5009, 5027), True, 'import tensorflow as tf\n'), ((5420, 5449), 'numpy.ceil', 'np.ceil', (['(N_train / batch_size)'], {}), '(N_train / batch_size)\n', (5427, 5449), True, 'import numpy as np\n'), ((5888, 5949), 'dlmnn.helper.neighbor_funcs.findTargetNeighbours', 'findTargetNeighbours', (['Xtrain', 'ytrain', 'self.k'], {'name': '"""Training"""'}), "(Xtrain, ytrain, self.k, name='Training')\n", (5908, 5949), False, 'from dlmnn.helper.neighbor_funcs import findTargetNeighbours, knnClassifier\n'), ((6013, 6072), 'dlmnn.helper.neighbor_funcs.findTargetNeighbours', 'findTargetNeighbours', (['Xval', 'yval', 'self.k'], {'name': '"""Validation"""'}), "(Xval, yval, self.k, name='Validation')\n", (6033, 6072), False, 'from dlmnn.helper.neighbor_funcs import findTargetNeighbours, knnClassifier\n'), ((7043, 7068), 'numpy.random.permutation', 'np.random.permutation', (['tN'], {}), '(tN)\n', (7064, 7068), True, 'import numpy as np\n'), ((10468, 10491), 'numpy.ceil', 'np.ceil', (['(N / batch_size)'], {}), '(N / batch_size)\n', (10475, 10491), True, 'import numpy as np\n'), ((12330, 12381), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES)\n', (12347, 12381), True, 'import tensorflow as tf\n'), ((1779, 1792), 'dlmnn.helper.layers.L2normalize', 'L2normalize', ([], {}), '()\n', (1790, 1792), False, 'from dlmnn.helper.layers import L2normalize\n'), ((5700, 5727), 'numpy.ceil', 'np.ceil', (['(N_val / batch_size)'], {}), '(N_val / batch_size)\n', (5707, 5727), True, 'import numpy as np\n'), ((6531, 6606), 'dlmnn.helper.neighbor_funcs.findTargetNeighbours', 'findTargetNeighbours', (['Xtrans', 'ytrain', 'self.k'], {'name': '"""Training"""', 'do_pca': '(False)'}), "(Xtrans, ytrain, self.k, name='Training', do_pca=False)\n", (6551, 6606), False, 'from dlmnn.helper.neighbor_funcs import findTargetNeighbours, knnClassifier\n'), ((8347, 8376), 'numpy.random.permutation', 'np.random.permutation', (['tN_val'], {}), '(tN_val)\n', (8368, 8376), True, 'import numpy as np\n'), ((14531, 14633), 'numpy.intersect1d', 'np.intersect1d', (['tN1_sort[self.k * i:self.k * (i + 1), 1]', 'tN2_sort[self.k * i:self.k * (i + 1), 1]'], {}), '(tN1_sort[self.k * i:self.k * (i + 1), 1], tN2_sort[self.k *\n i:self.k * (i + 1), 1])\n', (14545, 14633), True, 'import numpy as np\n'), ((4709, 4732), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4730, 4732), False, 'import datetime, os\n'), ((3631, 3652), 'tensorflow.pow', 'tf.pow', (['features', '(2.0)'], {}), '(features, 2.0)\n', (3637, 3652), True, 'import tensorflow as tf\n'), ((3760, 3781), 'tensorflow.equal', 'tf.equal', (['features', '(0)'], {}), '(features, 0)\n', (3768, 3781), True, 'import tensorflow as tf\n'), ((6833, 6890), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""tN_change"""', 'simple_value': 'tN_change'}), "(tag='tN_change', simple_value=tN_change)\n", (6849, 6890), True, 'import tensorflow as tf\n')] |
import os
from scipy.stats import truncnorm
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import cv2
from skimage import transform
import numpy as np
import matplotlib.pyplot as plt
import loadModel
import tensorflow as tf
import math
import OpenEXR
import Imath
import exr2png
import array
def genSynthetic(exrPath,maskPath):
#exrImage = cv2.imread(exrPath,cv2.IMREAD_UNCHANGED)
maskImage = cv2.imread(maskPath)
maskImage = transform.resize(maskImage, (64,128))
source = cv2.imread(exrPath).astype('float32')
down_size = 10
new_region = maskImage == 0
source[new_region] = 0
return source
def cv2plt(img):
b,g,r = cv2.split(img)
img = cv2.merge([r, g, b])
return img
def get_truncnorm(mean, sd, l, r):
return truncnorm(
(l - mean) / sd, (r - mean) / sd, loc=mean, scale=sd)
def truncated_normal(mean, sig, bound):
return get_truncnorm(mean, sig, bound[0], bound[1]).rvs()
def truncated_lognormal(mean, sig, bound):
x = get_truncnorm(mean, sig, math.log(bound[0]), math.log(bound[1])).rvs()
return math.e ** x
def radiometric_distortions(image):
image = np.array(image)
image[image < 0] = 0
e = truncated_lognormal(0.2, math.sqrt(0.2), [0.1, 10])
# print("e:{0}".format(str(e)))
image = e * image
b = 0
lis = []
for i in range(3):
if i == 0: # R
wc = truncated_lognormal(0, 0.06, [0.1,0.2])
if i == 1: # G
wc = truncated_lognormal(0, 0.06, [0.3,0.4])
if i == 2: # B
wc = truncated_lognormal(0, 0.06, [0.5, 2.0])
# print("wc:{0}".format(str(wc)))
lis.append(wc * image[..., i])
image = tf.stack(lis, axis=-1)
# print(image.shape)
g = truncated_lognormal(0.0035, math.sqrt(0.2), [0.85, 1.2])
image = image ** (1.0 / g)
# image=pow(image,0.5)
# print("**:",tf.reduce_sum(tf.constant(image)))
return image
def load(image_file):
file = OpenEXR.InputFile(image_file)
dw = file.header()['dataWindow']
sz = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
FLOAT = Imath.PixelType(Imath.PixelType.FLOAT)
(R, G, B) = [array.array('f', file.channel(Chan, FLOAT)).tolist() for Chan in ("R", "G", "B")]
R = np.reshape(R, (sz[1], sz[0]))
G = np.reshape(G, (sz[1], sz[0]))
B = np.reshape(B, (sz[1], sz[0]))
re = np.zeros((sz[1], sz[0], 3), dtype=np.float32)
re[:, :, 0] = R
re[:, :, 1] = G
re[:, :, 2] = B
re[re < 0] = 0
re = tf.constant(re, dtype=tf.float32)
re = tf.image.resize(re, [64, 128])
return re
def exr2png(gray,img):
# print(img)
# cv2.imshow('1', img)
img = img.numpy()
img[img < 0] = 0
delt = 0.001
# print(img.shape)
sum = img.shape[0] * img.shape[1]
mid = np.log(delt + img)
# print(mid)
logValue = mid.sum() / (3 * sum)
final_v = np.exp(logValue)
# print(final_v)
Lxy = gray / final_v * img
# print(Lxy)
L = Lxy / (1. + Lxy)
# print(L)
# cv2.imshow('2', L)
L = L * 255
# L= L[0:32,:,:]
# print(L.shape)
return L
from PIL import Image
from PIL import ImageEnhance
def enhance(src):
img = Image.open(src)
# 对比度增强
enh_con = ImageEnhance.Contrast(img)
contrast = 1.5
img_contrasted = enh_con.enhance(contrast)
img_contrasted.show()
return img_contrasted
import cv2
import matplotlib
from skimage import transform,data
import matplotlib.pyplot as plt
import os
import numpy as np
def genSynthetic(img,maskPath,gray):
#exrImage = cv2.imread(exrPath,cv2.IMREAD_UNCHANGED)
maskImage = cv2.imread(maskPath)
maskImage = transform.resize(maskImage, (64,128))
newPng = exr2png(gray,img)
newPng = newPng.astype(int)
down_size = 10
new_region = np.ones((64,128,3))
new_region[down_size:,...] = maskImage[0:64-down_size,...]
new_region = new_region == 0
newPng[new_region] = 0
return newPng
import glob
if __name__ == '__main__':
mask_dir = '/media/czy/DataDisk/czy/40000data/mask'
skybox_dir = '/media/czy/DataDisk/czy/40000data/skybox'
mask_files = os.listdir(mask_dir)
skybox_files = os.listdir(skybox_dir)
mask_files.sort()
skybox_files.sort()
SynDir= '/media/czy/DataDisk/czy/newDeepBlue2019.12.28/Syn'
SkyboxSaveDir = '/media/czy/DataDisk/czy/newDeepBlue2019.12.28/skybox/'
index = 2000000
for skybox_path in skybox_files:
#get the skybox
skybox = load(skybox_dir +'/'+skybox_path)
for i in range(2):
radio_skybox = radiometric_distortions(skybox)
rnd = np.random.randint(1,40000,1)
gray = np.random.uniform(0.1,0.4)
radio_skybox_png = exr2png(gray,radio_skybox)
synthetic_pic = genSynthetic(radio_skybox,mask_dir+'/'+mask_files[rnd[0]],gray)
index +=1
cv2.imwrite(SynDir+'/'+str(index)+'.png',cv2plt(synthetic_pic))
cv2.imwrite(SkyboxSaveDir+'/' +str(index)+'.png',cv2plt(radio_skybox_png))
print(index)
| [
"scipy.stats.truncnorm",
"numpy.ones",
"numpy.random.randint",
"skimage.transform.resize",
"numpy.exp",
"tensorflow.stack",
"cv2.split",
"numpy.reshape",
"math.log",
"Imath.PixelType",
"math.sqrt",
"tensorflow.constant",
"cv2.merge",
"os.listdir",
"numpy.random.uniform",
"OpenEXR.Input... | [((446, 466), 'cv2.imread', 'cv2.imread', (['maskPath'], {}), '(maskPath)\n', (456, 466), False, 'import cv2\n'), ((484, 522), 'skimage.transform.resize', 'transform.resize', (['maskImage', '(64, 128)'], {}), '(maskImage, (64, 128))\n', (500, 522), False, 'from skimage import transform, data\n'), ((702, 716), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (711, 716), False, 'import cv2\n'), ((727, 747), 'cv2.merge', 'cv2.merge', (['[r, g, b]'], {}), '([r, g, b])\n', (736, 747), False, 'import cv2\n'), ((810, 873), 'scipy.stats.truncnorm', 'truncnorm', (['((l - mean) / sd)', '((r - mean) / sd)'], {'loc': 'mean', 'scale': 'sd'}), '((l - mean) / sd, (r - mean) / sd, loc=mean, scale=sd)\n', (819, 873), False, 'from scipy.stats import truncnorm\n'), ((1184, 1199), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1192, 1199), True, 'import numpy as np\n'), ((1727, 1749), 'tensorflow.stack', 'tf.stack', (['lis'], {'axis': '(-1)'}), '(lis, axis=-1)\n', (1735, 1749), True, 'import tensorflow as tf\n'), ((2004, 2033), 'OpenEXR.InputFile', 'OpenEXR.InputFile', (['image_file'], {}), '(image_file)\n', (2021, 2033), False, 'import OpenEXR\n'), ((2145, 2183), 'Imath.PixelType', 'Imath.PixelType', (['Imath.PixelType.FLOAT'], {}), '(Imath.PixelType.FLOAT)\n', (2160, 2183), False, 'import Imath\n'), ((2292, 2321), 'numpy.reshape', 'np.reshape', (['R', '(sz[1], sz[0])'], {}), '(R, (sz[1], sz[0]))\n', (2302, 2321), True, 'import numpy as np\n'), ((2330, 2359), 'numpy.reshape', 'np.reshape', (['G', '(sz[1], sz[0])'], {}), '(G, (sz[1], sz[0]))\n', (2340, 2359), True, 'import numpy as np\n'), ((2368, 2397), 'numpy.reshape', 'np.reshape', (['B', '(sz[1], sz[0])'], {}), '(B, (sz[1], sz[0]))\n', (2378, 2397), True, 'import numpy as np\n'), ((2407, 2452), 'numpy.zeros', 'np.zeros', (['(sz[1], sz[0], 3)'], {'dtype': 'np.float32'}), '((sz[1], sz[0], 3), dtype=np.float32)\n', (2415, 2452), True, 'import numpy as np\n'), ((2543, 2576), 'tensorflow.constant', 'tf.constant', (['re'], {'dtype': 'tf.float32'}), '(re, dtype=tf.float32)\n', (2554, 2576), True, 'import tensorflow as tf\n'), ((2586, 2616), 'tensorflow.image.resize', 'tf.image.resize', (['re', '[64, 128]'], {}), '(re, [64, 128])\n', (2601, 2616), True, 'import tensorflow as tf\n'), ((2834, 2852), 'numpy.log', 'np.log', (['(delt + img)'], {}), '(delt + img)\n', (2840, 2852), True, 'import numpy as np\n'), ((2921, 2937), 'numpy.exp', 'np.exp', (['logValue'], {}), '(logValue)\n', (2927, 2937), True, 'import numpy as np\n'), ((3227, 3242), 'PIL.Image.open', 'Image.open', (['src'], {}), '(src)\n', (3237, 3242), False, 'from PIL import Image\n'), ((3269, 3295), 'PIL.ImageEnhance.Contrast', 'ImageEnhance.Contrast', (['img'], {}), '(img)\n', (3290, 3295), False, 'from PIL import ImageEnhance\n'), ((3653, 3673), 'cv2.imread', 'cv2.imread', (['maskPath'], {}), '(maskPath)\n', (3663, 3673), False, 'import cv2\n'), ((3691, 3729), 'skimage.transform.resize', 'transform.resize', (['maskImage', '(64, 128)'], {}), '(maskImage, (64, 128))\n', (3707, 3729), False, 'from skimage import transform, data\n'), ((3743, 3761), 'exr2png', 'exr2png', (['gray', 'img'], {}), '(gray, img)\n', (3750, 3761), False, 'import exr2png\n'), ((3829, 3850), 'numpy.ones', 'np.ones', (['(64, 128, 3)'], {}), '((64, 128, 3))\n', (3836, 3850), True, 'import numpy as np\n'), ((4165, 4185), 'os.listdir', 'os.listdir', (['mask_dir'], {}), '(mask_dir)\n', (4175, 4185), False, 'import os\n'), ((4205, 4227), 'os.listdir', 'os.listdir', (['skybox_dir'], {}), '(skybox_dir)\n', (4215, 4227), False, 'import os\n'), ((1258, 1272), 'math.sqrt', 'math.sqrt', (['(0.2)'], {}), '(0.2)\n', (1267, 1272), False, 'import math\n'), ((1811, 1825), 'math.sqrt', 'math.sqrt', (['(0.2)'], {}), '(0.2)\n', (1820, 1825), False, 'import math\n'), ((536, 555), 'cv2.imread', 'cv2.imread', (['exrPath'], {}), '(exrPath)\n', (546, 555), False, 'import cv2\n'), ((4650, 4680), 'numpy.random.randint', 'np.random.randint', (['(1)', '(40000)', '(1)'], {}), '(1, 40000, 1)\n', (4667, 4680), True, 'import numpy as np\n'), ((4699, 4726), 'numpy.random.uniform', 'np.random.uniform', (['(0.1)', '(0.4)'], {}), '(0.1, 0.4)\n', (4716, 4726), True, 'import numpy as np\n'), ((4757, 4784), 'exr2png', 'exr2png', (['gray', 'radio_skybox'], {}), '(gray, radio_skybox)\n', (4764, 4784), False, 'import exr2png\n'), ((1065, 1083), 'math.log', 'math.log', (['bound[0]'], {}), '(bound[0])\n', (1073, 1083), False, 'import math\n'), ((1085, 1103), 'math.log', 'math.log', (['bound[1]'], {}), '(bound[1])\n', (1093, 1103), False, 'import math\n')] |
'''
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2018, STEREOLABS.
//
// All rights reserved.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
/*****************************************************************************************
** This sample demonstrates how to capture stereo images and calibration parameters **
** from the ZED camera with OpenCV without using the ZED SDK. **
*****************************************************************************************/
'''
import numpy as np
import os
import argparse
import configparser
import sys
import cv2
import wget
from math import sin, cos, radians, pi
import websocket
import json
import threading
from datetime import datetime
from threading import Thread
from reachy import Reachy, parts
from reachy.trajectory.player import TrajectoryPlayer
from scipy.spatial.transform import Rotation as R
camera_upside_down = True
rectify = False
capture_files = False
find_circles = True
color_buffer = 90 # +/- this value to filter color in different lighting
circle_coordinates = {
1: (0, 0),
2: (0, 0),
3: (0, 0),
4: (0, 0),
5: (0, 0),
6: (0, 0),
7: (0, 0),
8: (0, 0),
9: (0, 0)
}
distance_to_button = 100
grid_sticker_angles = [
55, # 1
57, # 2
64, # 3
70, # 4
80, # 5
90, # 6
100, # 7
112, # 8
122 # 9
]
current_mode = 'play' # 'play' or 'build'
song_playing = False
current_genre = ''
current_group = 'button_a'
play_songs_group = 'button_e'
reachy_build_song_queue = []
reachy_play_song_queue = []
current_button = 0
button_number_group_mapping = {
'button_1': ('button_a', 'button_1'),
'button_2': ('button_a', 'button_2'),
'button_3': ('button_a', 'button_3'),
'button_4': ('button_a', 'button_4'),
'button_5': ('button_b', 'button_1'),
'button_6': ('button_b', 'button_2'),
'button_7': ('button_b', 'button_3'),
'button_8': ('button_b', 'button_4'),
'button_9': ('button_c', 'button_1'),
'button_10': ('button_c', 'button_2'),
'button_11': ('button_c', 'button_3'),
'button_12': ('button_c', 'button_4'),
'button_13': ('button_d', 'button_1'),
'button_14': ('button_d', 'button_2'),
'button_15': ('button_d', 'button_3'),
'button_16': ('button_d', 'button_4')
}
song_mapping = {
'One More Time': 'button_1',
'Robot Rock': 'button_2',
}
maschine_buttons = ["GROUP_A",
"GROUP_B",
"GROUP_C",
"GROUP_D",
"GROUP_E",
"GROUP_F",
"GROUP_G",
"GROUP_H",
"SAMPLE_13",
"SAMPLE_14",
"SAMPLE_15",
"SAMPLE_16",
"SAMPLE_9",
"SAMPLE_10",
"SAMPLE_11",
"SAMPLE_12",
"SAMPLE_5",
"SAMPLE_6",
"SAMPLE_7",
"SAMPLE_8",
"SAMPLE_1",
"SAMPLE_2",
"SAMPLE_3",
"SAMPLE_4"]
maschine_button_columns = { # button ID -> column index 1 through 9
"GROUP_A": 1,
"GROUP_B": 2,
"GROUP_C": 3,
"GROUP_D": 4,
"GROUP_E": 1,
"GROUP_F": 2,
"GROUP_G": 3,
"GROUP_H": 4,
"SAMPLE_13": 6,
"SAMPLE_14": 7,
"SAMPLE_15": 8,
"SAMPLE_16": 9,
"SAMPLE_9": 6,
"SAMPLE_10": 7,
"SAMPLE_11": 8,
"SAMPLE_12": 9,
"SAMPLE_5": 6,
"SAMPLE_6": 7,
"SAMPLE_7": 8,
"SAMPLE_8": 9,
"SAMPLE_1": 6,
"SAMPLE_2": 7,
"SAMPLE_3": 8,
"SAMPLE_4": 9
}
maschine_button_distances = { # button ID -> approximate distance in pixels from sticker to button
"GROUP_A": 80,
"GROUP_B": 79,
"GROUP_C": 78,
"GROUP_D": 77,
"GROUP_E": 65,
"GROUP_F": 64,
"GROUP_G": 63,
"GROUP_H": 62,
"SAMPLE_13": 104,
"SAMPLE_14": 105,
"SAMPLE_15": 109,
"SAMPLE_16": 117,
"SAMPLE_9": 86,
"SAMPLE_10": 86,
"SAMPLE_11": 90,
"SAMPLE_12": 94,
"SAMPLE_5": 66,
"SAMPLE_6": 67,
"SAMPLE_7": 68,
"SAMPLE_8": 72,
"SAMPLE_1": 40,
"SAMPLE_2": 41,
"SAMPLE_3": 43,
"SAMPLE_4": 46,
}
class RobotMode:
REAL = 'real'
SIM = 'sim'
# crude way to work around old way to initialize Reachy
# actual initialization happens in __main__
# TODO: major refactor to not make Reachy a global variable that happens on startup
reachy = None
def relax(arm):
assert arm == 'left' or arm == 'right'
if arm == 'left':
arm_motors = reachy.left_arm.motors
elif arm == 'right':
arm_motors = reachy.right_arm.motors
# relax all motors into compliant mode for arm
for m in arm_motors:
m.compliant = True
def stiffen(arm):
assert arm == 'left' or arm == 'right'
if arm == 'left':
arm_motors = reachy.left_arm.motors
elif arm == 'right':
arm_motors = reachy.right_arm.motors
# relax all motors into compliant mode for arm
for m in arm_motors:
m.compliant = False
def goto_arm_joint_solution(arm_choice, joint_solution, duration, wait):
"""
parameters:
arm_choice (str): choice of arm to which to give 'go to' instructions
joint_solution (array): 4x4 array providing joint destination location
duration (int): time in seconds to get to joint_solution
wait (bool): whether to wait or not
Moves `arm_choice` to `joint_solution` within a time frame of `duration` and `wait`s.
"""
if arm_choice == "left":
arm_motors = reachy.left_arm.motors
else:
arm_motors = reachy.right_arm.motors
reachy.goto({
m.name: j
for j, m in zip(joint_solution, arm_motors)
}, duration=duration, wait=wait)
right_buttons = ['button_1', 'button_2', 'button_3', 'button_4',
'button_5', 'button_6', 'button_7', 'button_8',
'button_9', 'button_10', 'button_11', 'button_12',
'button_13', 'button_14', 'button_15', 'button_16',
'control_choke']
left_buttons = ['button_a', 'button_b', 'button_c', 'button_d',
'button_e', 'button_f', 'button_g', 'button_h',
'control_play', 'control_record', 'control_stop'
'control_shift', 'control_mute', 'control_pattern']
def left_button_press(button_letters):
global current_group
for b in button_letters:
assert b in left_buttons
for button_letter in button_letters:
stiffen(arm='left')
my_loaded_trajectory = np.load(button_letter + '1.npz')
trajectory_player = TrajectoryPlayer(reachy, my_loaded_trajectory)
trajectory_player.play(wait=True, fade_in_duration=0.4)
reachy.left_arm.hand.open(end_pos=1, duration=0.3)
if button_letter in ['button_a', 'button_b', 'button_c', 'button_d',
'button_e', 'button_f', 'button_g', 'button_h']:
current_group = button_letter
# close left-hand gripper
reachy.left_arm.hand.close(duration=0.3)
my_loaded_trajectory = np.load(button_letter + '2.npz')
trajectory_player = TrajectoryPlayer(reachy, my_loaded_trajectory)
trajectory_player.play(wait=True, fade_in_duration=0.1)
relax(arm='left')
def play_song(song_name):
global reachy_moving
reach_moving = True
global current_group
global song_playing
assert song_name in song_mapping
button = song_mapping[song_name]
assert button in right_buttons
if song_playing:
choke()
if current_group != play_songs_group:
left_button_press([play_songs_group])
right_button_press([button])
song_playing = True
relax(arm='left')
reachy_moving = False
def select_pattern(button_numbers, hold_button='control_pattern'):
global current_group
global reachy_moving
reachy_moving = True
for b in button_numbers:
assert b in right_buttons
assert hold_button in left_buttons
for button_number in button_numbers:
button_group = button_number_group_mapping[button_number][0]
second_button = button_number_group_mapping[button_number][1]
print('mapping ' + button_number + ' to group ' + button_group + ' and second button ' + second_button)
if current_group != button_group:
left_button_press([button_group])
stiffen(arm='left')
if hold_button == 'mute':
reachy.left_arm.hand.open()
my_loaded_trajectory = np.load(hold_button + '1.npz')
trajectory_player = TrajectoryPlayer(reachy, my_loaded_trajectory)
trajectory_player.play(wait=True, fade_in_duration=0.4)
right_button_press([second_button])
my_loaded_trajectory = np.load(hold_button + '2.npz')
trajectory_player = TrajectoryPlayer(reachy, my_loaded_trajectory)
trajectory_player.play(wait=True, fade_in_duration=0.1)
reachy.left_arm.hand.close()
relax(arm='left')
reachy_moving = False
def right_button_press(button_letters):
for b in button_letters:
assert b in right_buttons
for button_letter in button_letters:
stiffen(arm='right')
my_loaded_trajectory = np.load(button_letter + '.npz')
trajectory_player = TrajectoryPlayer(reachy, my_loaded_trajectory)
trajectory_player.play(wait=True, fade_in_duration=0.4)
relax(arm='right')
# Hold shift + mute = choke
def choke():
hold_button = 'control_shift'
stiffen(arm='left')
my_loaded_trajectory = np.load(hold_button + '1.npz')
trajectory_player = TrajectoryPlayer(reachy, my_loaded_trajectory)
trajectory_player.play(wait=True, fade_in_duration=0.4)
right_button_press(['control_choke'])
my_loaded_trajectory = np.load(hold_button + '2.npz')
trajectory_player = TrajectoryPlayer(reachy, my_loaded_trajectory)
trajectory_player.play(wait=True, fade_in_duration=0.1)
def reset_reachy():
# TODO add any extra reset steps
global reachy_play_song_queue
global reachy_build_song_queue
global song_playing
global reachy_moving
reachy_play_song_queue = []
reachy_build_song_queue = []
if song_playing:
reachy_moving = True
choke()
reachy_moving = False
song_playing = False
def change_mode(new_mode):
global reachy_play_song_queue
global reachy_build_song_queue
global current_mode
# Destroy any remaining items in the queue
if current_mode != new_mode:
if current_mode == 'play':
reachy_play_song_queue = []
elif current_mode == 'build':
reachy_build_song_queue = []
# else do nothing...
current_mode = new_mode
reset_reachy()
def change_genre(new_genre):
global current_genre
current_genre = new_genre
def connect_websocket():
# websocket.enableTrace(True)
ws = websocket.WebSocketApp("wss://3q99jw33n1.execute-api.us-east-1.amazonaws.com/prod",
on_open=on_open,
on_message=on_message,
on_error=on_error,
on_close=on_close)
wst = threading.Thread(target=ws.run_forever)
wst.daemon = True
wst.start()
def on_message(ws, message):
global current_mode
# print(message)
body = json.loads(message)
if 'button' in body:
button = body['button']
print('adding button to queue: ' + button)
reachy_build_song_queue.append(button)
elif 'session' in body:
session_action = body['session']
print('session: ' + str(session_action))
if session_action == 'stop':
reset_reachy()
elif 'mode' in body:
change_mode(new_mode=body['mode'])
print('switched to mode: ' + str(current_mode))
elif 'genre' in body:
change_genre(new_genre=body['genre'])
print('switched to genre: ' + str(body['genre']))
elif 'song' in body:
reachy_play_song_queue.append(body['song'])
print('adding song to queue: ' + str(body['song']))
else:
print('ERROR: Unknown message received' + str(message))
def on_error(ws, error):
print(error)
def on_close(ws, close_status_code, close_msg):
# print('### websocket closed ###')
print('### Reconnecting Websocket ' + datetime.now().strftime("%m/%d/%Y, %H:%M:%S") + ' ###')
connect_websocket()
def on_open(ws):
print('### websocket opened ###')
def point_pos(x0, y0, d, theta):
theta_rad = pi / 2 - radians(theta)
return int(round(x0 + d * cos(theta_rad))), int(round(y0 + d * sin(theta_rad)))
def download_calibration_file(serial_number):
if os.name == 'nt':
hidden_path = os.getenv('APPDATA') + '\\Stereolabs\\settings\\'
else:
hidden_path = '/usr/local/zed/settings/'
calibration_file = hidden_path + 'SN' + str(serial_number) + '.conf'
if os.path.isfile(calibration_file) == False:
url = 'http://calib.stereolabs.com/?SN='
filename = wget.download(url=url + str(serial_number), out=calibration_file)
if os.path.isfile(calibration_file) == False:
print('Invalid Calibration File')
return ""
return calibration_file
def init_calibration(calibration_file, image_size):
cameraMarix_left = cameraMatrix_right = map_left_y = map_left_x = map_right_y = map_right_x = np.array([])
config = configparser.ConfigParser()
config.read(calibration_file)
check_data = True
resolution_str = ''
if image_size.width == 2208:
resolution_str = '2K'
elif image_size.width == 1920:
resolution_str = 'FHD'
elif image_size.width == 1280:
resolution_str = 'HD'
elif image_size.width == 672:
resolution_str = 'VGA'
else:
resolution_str = 'HD'
check_data = False
T_ = np.array([-float(config['STEREO']['Baseline'] if 'Baseline' in config['STEREO'] else 0),
float(config['STEREO']['TY_' + resolution_str] if 'TY_' + resolution_str in config['STEREO'] else 0),
float(
config['STEREO']['TZ_' + resolution_str] if 'TZ_' + resolution_str in config['STEREO'] else 0)])
left_cam_cx = float(
config['LEFT_CAM_' + resolution_str]['cx'] if 'cx' in config['LEFT_CAM_' + resolution_str] else 0)
left_cam_cy = float(
config['LEFT_CAM_' + resolution_str]['cy'] if 'cy' in config['LEFT_CAM_' + resolution_str] else 0)
left_cam_fx = float(
config['LEFT_CAM_' + resolution_str]['fx'] if 'fx' in config['LEFT_CAM_' + resolution_str] else 0)
left_cam_fy = float(
config['LEFT_CAM_' + resolution_str]['fy'] if 'fy' in config['LEFT_CAM_' + resolution_str] else 0)
left_cam_k1 = float(
config['LEFT_CAM_' + resolution_str]['k1'] if 'k1' in config['LEFT_CAM_' + resolution_str] else 0)
left_cam_k2 = float(
config['LEFT_CAM_' + resolution_str]['k2'] if 'k2' in config['LEFT_CAM_' + resolution_str] else 0)
left_cam_p1 = float(
config['LEFT_CAM_' + resolution_str]['p1'] if 'p1' in config['LEFT_CAM_' + resolution_str] else 0)
left_cam_p2 = float(
config['LEFT_CAM_' + resolution_str]['p2'] if 'p2' in config['LEFT_CAM_' + resolution_str] else 0)
left_cam_p3 = float(
config['LEFT_CAM_' + resolution_str]['p3'] if 'p3' in config['LEFT_CAM_' + resolution_str] else 0)
left_cam_k3 = float(
config['LEFT_CAM_' + resolution_str]['k3'] if 'k3' in config['LEFT_CAM_' + resolution_str] else 0)
right_cam_cx = float(
config['RIGHT_CAM_' + resolution_str]['cx'] if 'cx' in config['RIGHT_CAM_' + resolution_str] else 0)
right_cam_cy = float(
config['RIGHT_CAM_' + resolution_str]['cy'] if 'cy' in config['RIGHT_CAM_' + resolution_str] else 0)
right_cam_fx = float(
config['RIGHT_CAM_' + resolution_str]['fx'] if 'fx' in config['RIGHT_CAM_' + resolution_str] else 0)
right_cam_fy = float(
config['RIGHT_CAM_' + resolution_str]['fy'] if 'fy' in config['RIGHT_CAM_' + resolution_str] else 0)
right_cam_k1 = float(
config['RIGHT_CAM_' + resolution_str]['k1'] if 'k1' in config['RIGHT_CAM_' + resolution_str] else 0)
right_cam_k2 = float(
config['RIGHT_CAM_' + resolution_str]['k2'] if 'k2' in config['RIGHT_CAM_' + resolution_str] else 0)
right_cam_p1 = float(
config['RIGHT_CAM_' + resolution_str]['p1'] if 'p1' in config['RIGHT_CAM_' + resolution_str] else 0)
right_cam_p2 = float(
config['RIGHT_CAM_' + resolution_str]['p2'] if 'p2' in config['RIGHT_CAM_' + resolution_str] else 0)
right_cam_p3 = float(
config['RIGHT_CAM_' + resolution_str]['p3'] if 'p3' in config['RIGHT_CAM_' + resolution_str] else 0)
right_cam_k3 = float(
config['RIGHT_CAM_' + resolution_str]['k3'] if 'k3' in config['RIGHT_CAM_' + resolution_str] else 0)
R_zed = np.array(
[float(config['STEREO']['RX_' + resolution_str] if 'RX_' + resolution_str in config['STEREO'] else 0),
float(config['STEREO']['CV_' + resolution_str] if 'CV_' + resolution_str in config['STEREO'] else 0),
float(config['STEREO']['RZ_' + resolution_str] if 'RZ_' + resolution_str in config['STEREO'] else 0)])
R, _ = cv2.Rodrigues(R_zed)
cameraMatrix_left = np.array([[left_cam_fx, 0, left_cam_cx],
[0, left_cam_fy, left_cam_cy],
[0, 0, 1]])
cameraMatrix_right = np.array([[right_cam_fx, 0, right_cam_cx],
[0, right_cam_fy, right_cam_cy],
[0, 0, 1]])
distCoeffs_left = np.array([[left_cam_k1], [left_cam_k2], [left_cam_p1], [left_cam_p2], [left_cam_k3]])
distCoeffs_right = np.array([[right_cam_k1], [right_cam_k2], [right_cam_p1], [right_cam_p2], [right_cam_k3]])
T = np.array([[T_[0]], [T_[1]], [T_[2]]])
R1 = R2 = P1 = P2 = np.array([])
R1, R2, P1, P2 = cv2.stereoRectify(cameraMatrix1=cameraMatrix_left,
cameraMatrix2=cameraMatrix_right,
distCoeffs1=distCoeffs_left,
distCoeffs2=distCoeffs_right,
R=R, T=T,
flags=cv2.CALIB_ZERO_DISPARITY,
alpha=0,
imageSize=(image_size.width, image_size.height),
newImageSize=(image_size.width, image_size.height))[0:4]
map_left_x, map_left_y = cv2.initUndistortRectifyMap(cameraMatrix_left, distCoeffs_left, R1, P1,
(image_size.width, image_size.height), cv2.CV_32FC1)
map_right_x, map_right_y = cv2.initUndistortRectifyMap(cameraMatrix_right, distCoeffs_right, R2, P2,
(image_size.width, image_size.height), cv2.CV_32FC1)
cameraMatrix_left = P1
cameraMatrix_right = P2
return cameraMatrix_left, cameraMatrix_right, map_left_x, map_left_y, map_right_x, map_right_y
class Resolution:
width = 1280
height = 720
def main(args, robot):
global color_buffer
global current_button
global circle_coordinates
global distance_to_button
global reachy_moving
# Open the ZED camera
cap = cv2.VideoCapture(0)
if cap.isOpened() == 0:
exit(-1)
image_size = Resolution()
image_size.width = 1280
image_size.height = 720
# Set the video resolution to HD720
cap.set(cv2.CAP_PROP_FRAME_WIDTH, image_size.width * 2)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, image_size.height)
calibration_file = args.camera_config_path
if args.camera_config_path is None:
serial_number = args.camera_id
calibration_file = download_calibration_file(serial_number)
if calibration_file == "":
print("No camera calibration file found. Exiting.")
exit(1)
print("Calibration file found. Loading...")
camera_matrix_left, camera_matrix_right, map_left_x, map_left_y, map_right_x, map_right_y = init_calibration(
calibration_file, image_size)
i = 0
while True:
if robot:
# check queue
if not reachy_moving and current_mode == 'play' and len(reachy_play_song_queue) > 0:
reachy_moving = True
next_item = reachy_play_song_queue.pop(0)
print('playing next song in queue: ' + next_item + ', remaining queue items: ' + ', '.join(reachy_play_song_queue))
Thread(target=play_song, args=(next_item,)).start()
elif not reachy_moving and current_mode == 'build' and len(reachy_build_song_queue) > 0:
reachy_moving = True
next_item = reachy_build_song_queue.pop(0)
print('pressing button in queue: ' + next_item + ', remaining queue items: ' + ', '.join(reachy_build_song_queue))
Thread(target=select_pattern, args=([next_item],)).start()
if args.camera_mode == RobotMode.REAL:
# region camera handling and computer vision
# Get a new frame from camera
retval, frame = cap.read()
# Extract left and right images from side-by-side
left_right_image = np.split(frame, 2, axis=1)
if camera_upside_down:
left_image_raw = cv2.flip(left_right_image[0], -1)
right_image_raw = cv2.flip(left_right_image[1], -1)
else:
left_image_raw = left_right_image[0]
right_image_raw = left_right_image[1]
# Display images
cv2.imshow("left RAW", left_image_raw)
cv2.imshow("right RAW", right_image_raw)
if rectify:
left_rect = cv2.remap(left_image_raw, map_left_x, map_left_y, interpolation=cv2.INTER_LINEAR)
right_rect = cv2.remap(right_image_raw, map_right_x, map_right_y, interpolation=cv2.INTER_LINEAR)
cv2.imshow("left RECT", left_rect)
cv2.imshow("right RECT", right_rect)
if capture_files and i > 90:
cv2.imwrite('left_raw2.jpg', left_image_raw)
cv2.imwrite('right_raw2.jpg', right_image_raw)
cv2.imwrite('left_rect2.jpg', left_rect)
cv2.imwrite('right_rect2.jpg', right_rect)
break
if find_circles:
image = left_image_raw
# font
font = cv2.FONT_HERSHEY_SIMPLEX
# fontScale
fontScale = 1
# Blue color in BGR
color = (255, 0, 0)
color_arm = (0, 0, 255)
# Line thickness of 2 px
thickness = 2
board_min_y = args.board_edge_bottom
board_max_y = args.board_edge_top
arm_min_y = 0
arm_max_y = 90
y = 173
x = 0
h = 200
w = 670
lower_red = np.array([1, 200, 70])
upper_red = np.array([15, 255, 175])
image = image[y:y + h, x:x + w]
import heapq
def closest_points(list_of_tuples, x_value, n=9):
return heapq.nsmallest(n, list_of_tuples, lambda pnt: abs(pnt[0] - x_value))
output = image.copy()
# RGB
# bottom = [5, 20, 76]
# upper = [21, 60, 167]
# HSV
bottom = [2, 207, 80]
upper = [9, 243, 166]
bottom = [max(bottom[0] - color_buffer, 0), max(bottom[1] - color_buffer, 0),
max(bottom[2] - color_buffer, 0)]
upper = [min(upper[0] + color_buffer, 179), min(upper[1] + color_buffer, 255),
min(upper[2] + color_buffer, 255)]
# print(bottom)
# print(upper)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask_red = cv2.inRange(hsv, np.array(bottom), np.array(upper))
res_red = cv2.bitwise_and(image, image, mask=mask_red)
gray_masked = cv2.cvtColor(res_red, cv2.COLOR_BGR2GRAY)
cv2.imshow('res_red', res_red)
cv2.imshow('gray_masked', gray_masked)
# cv2.waitKey(0)
# detect circles in the image
circles = cv2.HoughCircles(gray_masked, cv2.HOUGH_GRADIENT,
minDist=33,
dp=1.1,
param1=130,
param2=8,
minRadius=4,
maxRadius=12)
# ensure at least some circles were found
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
board_circles = [(x, y, r) for (x, y, r) in circles if board_min_y <= y <= board_max_y]
board_circles = closest_points(list_of_tuples=board_circles, x_value=278, n=9)
board_circles = sorted(
[(i[0], i[1], i[2]) for i in board_circles if board_min_y <= i[1] <= board_max_y],
key=lambda l: l[0])
# loop over the (x, y) coordinates and radius of the circles
i = 1
if len(board_circles) == 9:
for (x, y, r) in board_circles:
# draw the circle in the output image, then draw a rectangle
# corresponding to the center of the circle
cv2.circle(output, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
# print(str(x) + ',' + str(y) + ': ' + str(image[y,x]))
cv2.putText(output, str(i), (x - 10, y + 40), font, fontScale, color, thickness, cv2.LINE_AA)
circle_coordinates[i] = (x,y)
# x1, y1 = point_pos(x0=x, y0=y, d=100, theta=grid_sticker_angles[i - 1] + 90)
# cv2.line(output, (x, y), (x1, y1), 255, 2)
i += 1
current_button_str = maschine_buttons[current_button]
column_num = maschine_button_columns[current_button_str]
origin_coordinates = circle_coordinates[column_num]
x0 = origin_coordinates[0]
y0 = origin_coordinates[1]
button_distance = maschine_button_distances[current_button_str]
# button_distance = distance_to_button
theta = grid_sticker_angles[column_num - 1] + 90
x1, y1 = point_pos(x0=x0, y0=y0, d=button_distance, theta=theta)
cv2.line(output, (origin_coordinates[0], origin_coordinates[1]), (x1, y1), 255, 2)
arm_circles = sorted([(i[0], i[1], i[2]) for i in circles if arm_min_y <= i[1] <= arm_max_y],
key=lambda l: l[0])
for (x, y, r) in arm_circles:
# draw the circle in the output image, then draw a rectangle
# corresponding to the center of the circle
cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 255, 255), -1)
if len(arm_circles) == 4:
arm_l1 = [arm_circles[0][0], arm_circles[0][1]]
arm_l2 = [arm_circles[1][0], arm_circles[1][1]]
arm_r1 = [arm_circles[3][0], arm_circles[3][1]]
arm_r2 = [arm_circles[2][0], arm_circles[2][1]]
cv2.line(output, (arm_l1[0], arm_l1[1]), (arm_l2[0], arm_l2[1]), 255, 2)
cv2.line(output, (arm_r1[0], arm_r1[1]), (arm_r2[0], arm_r2[1]), 255, 2)
cv2.putText(output, 'L1', (arm_l1[0], arm_l1[1]), font, fontScale, color_arm, thickness,
cv2.LINE_AA)
cv2.putText(output, 'L2', (arm_l2[0], arm_l2[1]), font, fontScale, color_arm, thickness,
cv2.LINE_AA)
cv2.putText(output, 'R1', (arm_r1[0], arm_r1[1]), font, fontScale, color_arm, thickness,
cv2.LINE_AA)
cv2.putText(output, 'R2', (arm_r2[0], arm_r2[1]), font, fontScale, color_arm, thickness,
cv2.LINE_AA)
else:
# print('Cannot find orange arm dot coordinates')
ok = True
# show the output image
cv2.imshow("output", np.hstack([image, output]))
# cv2.waitKey(0)
key = cv2.waitKey(30)
if key == 105: # i on keyboard
color_buffer += 5
print('color_buffer' + str(color_buffer))
elif key == 108: # l on keyboard
color_buffer -= 5
print('color_buffer' + str(color_buffer))
elif key == 106: # j on keyboard
distance_to_button += 1
print('distance ' + str(distance_to_button))
elif key == 107: # k on keyboard
distance_to_button -= 1
print('distance ' + str(distance_to_button))
elif key == 116: # t on keyboard
if current_button == len(maschine_buttons) - 1:
current_button = 0
else:
current_button += 1
current_button_str = maschine_buttons[current_button]
column_num = maschine_button_columns[current_button_str]
origin_coordinates = circle_coordinates[column_num]
print(current_button_str)
print(column_num)
print(origin_coordinates)
elif key >= 0:
break
i += 1
# endregion
exit(0)
if __name__ == "__main__":
cli_commands = {
'robot_mode': {
'default': RobotMode.SIM,
'type': str,
'help': "Mode to run the robot in. Options: 'SIM' for simulator and 'REAL' for real hardware."
},
'camera_mode': {
'default': RobotMode.SIM,
'type': str,
'help': "Mode to run the camera in. Options: Use 'SIM' when no camera is available " \
"(which currently just ignorse camera part of code, but might add in preloaded stream someday)" \
"and 'REAL' for when real camera is available."
},
'camera_id': {
'default': '15618',
'type': int,
'help': 'Camera serial number.'
},
'camera_config_path': {
'default': './SN15618.conf',
'type': str,
'help': 'Path to camera config file if manually supplying one.'
},
'board_edge_top': {
'default': 200,
'type': int,
'help': '(Max Y-value): Pixel value of the top edge of the board face with the orange circles.'
},
'board_edge_bottom': {
'default': 90,
'type': int,
'help': '(Min Y-value): Pixel value of the bottom edge of the board face with the orange circles.'
},
}
parser = argparse.ArgumentParser(description='Input for Reachy with Zed camera.')
for command, values in cli_commands.items():
parser.add_argument(
f"--{command}",
default=values['default'],
type=values['type'],
help=values['help']
)
args = parser.parse_args()
connect_websocket()
if args.robot_mode == RobotMode.REAL:
if reachy is None:
reachy = Reachy(
right_arm=parts.RightArm(
io='/dev/ttyUSB*',
hand='force_gripper',
),
left_arm=parts.LeftArm(
io='/dev/ttyUSB*',
hand='force_gripper')
)
stiffen(arm='left')
stiffen(arm='right')
relax(arm='left')
relax(arm='right')
main(args, reachy)
| [
"numpy.load",
"argparse.ArgumentParser",
"cv2.bitwise_and",
"cv2.stereoRectify",
"cv2.remap",
"reachy.parts.LeftArm",
"os.path.isfile",
"cv2.rectangle",
"cv2.imshow",
"numpy.round",
"reachy.trajectory.player.TrajectoryPlayer",
"websocket.WebSocketApp",
"cv2.line",
"json.loads",
"math.rad... | [((10556, 10586), 'numpy.load', 'np.load', (["(hold_button + '1.npz')"], {}), "(hold_button + '1.npz')\n", (10563, 10586), True, 'import numpy as np\n'), ((10611, 10657), 'reachy.trajectory.player.TrajectoryPlayer', 'TrajectoryPlayer', (['reachy', 'my_loaded_trajectory'], {}), '(reachy, my_loaded_trajectory)\n', (10627, 10657), False, 'from reachy.trajectory.player import TrajectoryPlayer\n'), ((10789, 10819), 'numpy.load', 'np.load', (["(hold_button + '2.npz')"], {}), "(hold_button + '2.npz')\n", (10796, 10819), True, 'import numpy as np\n'), ((10844, 10890), 'reachy.trajectory.player.TrajectoryPlayer', 'TrajectoryPlayer', (['reachy', 'my_loaded_trajectory'], {}), '(reachy, my_loaded_trajectory)\n', (10860, 10890), False, 'from reachy.trajectory.player import TrajectoryPlayer\n'), ((11902, 12073), 'websocket.WebSocketApp', 'websocket.WebSocketApp', (['"""wss://3q99jw33n1.execute-api.us-east-1.amazonaws.com/prod"""'], {'on_open': 'on_open', 'on_message': 'on_message', 'on_error': 'on_error', 'on_close': 'on_close'}), "(\n 'wss://3q99jw33n1.execute-api.us-east-1.amazonaws.com/prod', on_open=\n on_open, on_message=on_message, on_error=on_error, on_close=on_close)\n", (11924, 12073), False, 'import websocket\n'), ((12203, 12242), 'threading.Thread', 'threading.Thread', ([], {'target': 'ws.run_forever'}), '(target=ws.run_forever)\n', (12219, 12242), False, 'import threading\n'), ((12368, 12387), 'json.loads', 'json.loads', (['message'], {}), '(message)\n', (12378, 12387), False, 'import json\n'), ((14428, 14440), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14436, 14440), True, 'import numpy as np\n'), ((14455, 14482), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (14480, 14482), False, 'import configparser\n'), ((18297, 18317), 'cv2.Rodrigues', 'cv2.Rodrigues', (['R_zed'], {}), '(R_zed)\n', (18310, 18317), False, 'import cv2\n'), ((18342, 18429), 'numpy.array', 'np.array', (['[[left_cam_fx, 0, left_cam_cx], [0, left_cam_fy, left_cam_cy], [0, 0, 1]]'], {}), '([[left_cam_fx, 0, left_cam_cx], [0, left_cam_fy, left_cam_cy], [0,\n 0, 1]])\n', (18350, 18429), True, 'import numpy as np\n'), ((18520, 18611), 'numpy.array', 'np.array', (['[[right_cam_fx, 0, right_cam_cx], [0, right_cam_fy, right_cam_cy], [0, 0, 1]]'], {}), '([[right_cam_fx, 0, right_cam_cx], [0, right_cam_fy, right_cam_cy],\n [0, 0, 1]])\n', (18528, 18611), True, 'import numpy as np\n'), ((18701, 18791), 'numpy.array', 'np.array', (['[[left_cam_k1], [left_cam_k2], [left_cam_p1], [left_cam_p2], [left_cam_k3]]'], {}), '([[left_cam_k1], [left_cam_k2], [left_cam_p1], [left_cam_p2], [\n left_cam_k3]])\n', (18709, 18791), True, 'import numpy as np\n'), ((18811, 18906), 'numpy.array', 'np.array', (['[[right_cam_k1], [right_cam_k2], [right_cam_p1], [right_cam_p2], [right_cam_k3]\n ]'], {}), '([[right_cam_k1], [right_cam_k2], [right_cam_p1], [right_cam_p2], [\n right_cam_k3]])\n', (18819, 18906), True, 'import numpy as np\n'), ((18911, 18948), 'numpy.array', 'np.array', (['[[T_[0]], [T_[1]], [T_[2]]]'], {}), '([[T_[0]], [T_[1]], [T_[2]]])\n', (18919, 18948), True, 'import numpy as np\n'), ((18973, 18985), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (18981, 18985), True, 'import numpy as np\n'), ((19651, 19780), 'cv2.initUndistortRectifyMap', 'cv2.initUndistortRectifyMap', (['cameraMatrix_left', 'distCoeffs_left', 'R1', 'P1', '(image_size.width, image_size.height)', 'cv2.CV_32FC1'], {}), '(cameraMatrix_left, distCoeffs_left, R1, P1, (\n image_size.width, image_size.height), cv2.CV_32FC1)\n', (19678, 19780), False, 'import cv2\n'), ((19864, 19995), 'cv2.initUndistortRectifyMap', 'cv2.initUndistortRectifyMap', (['cameraMatrix_right', 'distCoeffs_right', 'R2', 'P2', '(image_size.width, image_size.height)', 'cv2.CV_32FC1'], {}), '(cameraMatrix_right, distCoeffs_right, R2, P2, (\n image_size.width, image_size.height), cv2.CV_32FC1)\n', (19891, 19995), False, 'import cv2\n'), ((20457, 20476), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (20473, 20476), False, 'import cv2\n'), ((32980, 33052), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Input for Reachy with Zed camera."""'}), "(description='Input for Reachy with Zed camera.')\n", (33003, 33052), False, 'import argparse\n'), ((7545, 7577), 'numpy.load', 'np.load', (["(button_letter + '1.npz')"], {}), "(button_letter + '1.npz')\n", (7552, 7577), True, 'import numpy as np\n'), ((7606, 7652), 'reachy.trajectory.player.TrajectoryPlayer', 'TrajectoryPlayer', (['reachy', 'my_loaded_trajectory'], {}), '(reachy, my_loaded_trajectory)\n', (7622, 7652), False, 'from reachy.trajectory.player import TrajectoryPlayer\n'), ((8090, 8122), 'numpy.load', 'np.load', (["(button_letter + '2.npz')"], {}), "(button_letter + '2.npz')\n", (8097, 8122), True, 'import numpy as np\n'), ((8151, 8197), 'reachy.trajectory.player.TrajectoryPlayer', 'TrajectoryPlayer', (['reachy', 'my_loaded_trajectory'], {}), '(reachy, my_loaded_trajectory)\n', (8167, 8197), False, 'from reachy.trajectory.player import TrajectoryPlayer\n'), ((9516, 9546), 'numpy.load', 'np.load', (["(hold_button + '1.npz')"], {}), "(hold_button + '1.npz')\n", (9523, 9546), True, 'import numpy as np\n'), ((9575, 9621), 'reachy.trajectory.player.TrajectoryPlayer', 'TrajectoryPlayer', (['reachy', 'my_loaded_trajectory'], {}), '(reachy, my_loaded_trajectory)\n', (9591, 9621), False, 'from reachy.trajectory.player import TrajectoryPlayer\n'), ((9763, 9793), 'numpy.load', 'np.load', (["(hold_button + '2.npz')"], {}), "(hold_button + '2.npz')\n", (9770, 9793), True, 'import numpy as np\n'), ((9822, 9868), 'reachy.trajectory.player.TrajectoryPlayer', 'TrajectoryPlayer', (['reachy', 'my_loaded_trajectory'], {}), '(reachy, my_loaded_trajectory)\n', (9838, 9868), False, 'from reachy.trajectory.player import TrajectoryPlayer\n'), ((10229, 10260), 'numpy.load', 'np.load', (["(button_letter + '.npz')"], {}), "(button_letter + '.npz')\n", (10236, 10260), True, 'import numpy as np\n'), ((10289, 10335), 'reachy.trajectory.player.TrajectoryPlayer', 'TrajectoryPlayer', (['reachy', 'my_loaded_trajectory'], {}), '(reachy, my_loaded_trajectory)\n', (10305, 10335), False, 'from reachy.trajectory.player import TrajectoryPlayer\n'), ((13564, 13578), 'math.radians', 'radians', (['theta'], {}), '(theta)\n', (13571, 13578), False, 'from math import sin, cos, radians, pi\n'), ((13947, 13979), 'os.path.isfile', 'os.path.isfile', (['calibration_file'], {}), '(calibration_file)\n', (13961, 13979), False, 'import os\n'), ((19008, 19322), 'cv2.stereoRectify', 'cv2.stereoRectify', ([], {'cameraMatrix1': 'cameraMatrix_left', 'cameraMatrix2': 'cameraMatrix_right', 'distCoeffs1': 'distCoeffs_left', 'distCoeffs2': 'distCoeffs_right', 'R': 'R', 'T': 'T', 'flags': 'cv2.CALIB_ZERO_DISPARITY', 'alpha': '(0)', 'imageSize': '(image_size.width, image_size.height)', 'newImageSize': '(image_size.width, image_size.height)'}), '(cameraMatrix1=cameraMatrix_left, cameraMatrix2=\n cameraMatrix_right, distCoeffs1=distCoeffs_left, distCoeffs2=\n distCoeffs_right, R=R, T=T, flags=cv2.CALIB_ZERO_DISPARITY, alpha=0,\n imageSize=(image_size.width, image_size.height), newImageSize=(\n image_size.width, image_size.height))\n', (19025, 19322), False, 'import cv2\n'), ((13757, 13777), 'os.getenv', 'os.getenv', (['"""APPDATA"""'], {}), "('APPDATA')\n", (13766, 13777), False, 'import os\n'), ((14136, 14168), 'os.path.isfile', 'os.path.isfile', (['calibration_file'], {}), '(calibration_file)\n', (14150, 14168), False, 'import os\n'), ((22427, 22453), 'numpy.split', 'np.split', (['frame', '(2)'], {'axis': '(1)'}), '(frame, 2, axis=1)\n', (22435, 22453), True, 'import numpy as np\n'), ((22791, 22829), 'cv2.imshow', 'cv2.imshow', (['"""left RAW"""', 'left_image_raw'], {}), "('left RAW', left_image_raw)\n", (22801, 22829), False, 'import cv2\n'), ((22842, 22882), 'cv2.imshow', 'cv2.imshow', (['"""right RAW"""', 'right_image_raw'], {}), "('right RAW', right_image_raw)\n", (22852, 22882), False, 'import cv2\n'), ((30373, 30388), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (30384, 30388), False, 'import cv2\n'), ((22522, 22555), 'cv2.flip', 'cv2.flip', (['left_right_image[0]', '(-1)'], {}), '(left_right_image[0], -1)\n', (22530, 22555), False, 'import cv2\n'), ((22590, 22623), 'cv2.flip', 'cv2.flip', (['left_right_image[1]', '(-1)'], {}), '(left_right_image[1], -1)\n', (22598, 22623), False, 'import cv2\n'), ((22936, 23022), 'cv2.remap', 'cv2.remap', (['left_image_raw', 'map_left_x', 'map_left_y'], {'interpolation': 'cv2.INTER_LINEAR'}), '(left_image_raw, map_left_x, map_left_y, interpolation=cv2.\n INTER_LINEAR)\n', (22945, 23022), False, 'import cv2\n'), ((23047, 23136), 'cv2.remap', 'cv2.remap', (['right_image_raw', 'map_right_x', 'map_right_y'], {'interpolation': 'cv2.INTER_LINEAR'}), '(right_image_raw, map_right_x, map_right_y, interpolation=cv2.\n INTER_LINEAR)\n', (23056, 23136), False, 'import cv2\n'), ((23149, 23183), 'cv2.imshow', 'cv2.imshow', (['"""left RECT"""', 'left_rect'], {}), "('left RECT', left_rect)\n", (23159, 23183), False, 'import cv2\n'), ((23200, 23236), 'cv2.imshow', 'cv2.imshow', (['"""right RECT"""', 'right_rect'], {}), "('right RECT', right_rect)\n", (23210, 23236), False, 'import cv2\n'), ((24236, 24258), 'numpy.array', 'np.array', (['[1, 200, 70]'], {}), '([1, 200, 70])\n', (24244, 24258), True, 'import numpy as np\n'), ((24287, 24311), 'numpy.array', 'np.array', (['[15, 255, 175]'], {}), '([15, 255, 175])\n', (24295, 24311), True, 'import numpy as np\n'), ((25187, 25225), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (25199, 25225), False, 'import cv2\n'), ((25331, 25375), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'image'], {'mask': 'mask_red'}), '(image, image, mask=mask_red)\n', (25346, 25375), False, 'import cv2\n'), ((25406, 25447), 'cv2.cvtColor', 'cv2.cvtColor', (['res_red', 'cv2.COLOR_BGR2GRAY'], {}), '(res_red, cv2.COLOR_BGR2GRAY)\n', (25418, 25447), False, 'import cv2\n'), ((25464, 25494), 'cv2.imshow', 'cv2.imshow', (['"""res_red"""', 'res_red'], {}), "('res_red', res_red)\n", (25474, 25494), False, 'import cv2\n'), ((25511, 25549), 'cv2.imshow', 'cv2.imshow', (['"""gray_masked"""', 'gray_masked'], {}), "('gray_masked', gray_masked)\n", (25521, 25549), False, 'import cv2\n'), ((25656, 25778), 'cv2.HoughCircles', 'cv2.HoughCircles', (['gray_masked', 'cv2.HOUGH_GRADIENT'], {'minDist': '(33)', 'dp': '(1.1)', 'param1': '(130)', 'param2': '(8)', 'minRadius': '(4)', 'maxRadius': '(12)'}), '(gray_masked, cv2.HOUGH_GRADIENT, minDist=33, dp=1.1,\n param1=130, param2=8, minRadius=4, maxRadius=12)\n', (25672, 25778), False, 'import cv2\n'), ((23303, 23347), 'cv2.imwrite', 'cv2.imwrite', (['"""left_raw2.jpg"""', 'left_image_raw'], {}), "('left_raw2.jpg', left_image_raw)\n", (23314, 23347), False, 'import cv2\n'), ((23368, 23414), 'cv2.imwrite', 'cv2.imwrite', (['"""right_raw2.jpg"""', 'right_image_raw'], {}), "('right_raw2.jpg', right_image_raw)\n", (23379, 23414), False, 'import cv2\n'), ((23435, 23475), 'cv2.imwrite', 'cv2.imwrite', (['"""left_rect2.jpg"""', 'left_rect'], {}), "('left_rect2.jpg', left_rect)\n", (23446, 23475), False, 'import cv2\n'), ((23496, 23538), 'cv2.imwrite', 'cv2.imwrite', (['"""right_rect2.jpg"""', 'right_rect'], {}), "('right_rect2.jpg', right_rect)\n", (23507, 23538), False, 'import cv2\n'), ((25270, 25286), 'numpy.array', 'np.array', (['bottom'], {}), '(bottom)\n', (25278, 25286), True, 'import numpy as np\n'), ((25288, 25303), 'numpy.array', 'np.array', (['upper'], {}), '(upper)\n', (25296, 25303), True, 'import numpy as np\n'), ((28334, 28421), 'cv2.line', 'cv2.line', (['output', '(origin_coordinates[0], origin_coordinates[1])', '(x1, y1)', '(255)', '(2)'], {}), '(output, (origin_coordinates[0], origin_coordinates[1]), (x1, y1), \n 255, 2)\n', (28342, 28421), False, 'import cv2\n'), ((33453, 33508), 'reachy.parts.RightArm', 'parts.RightArm', ([], {'io': '"""/dev/ttyUSB*"""', 'hand': '"""force_gripper"""'}), "(io='/dev/ttyUSB*', hand='force_gripper')\n", (33467, 33508), False, 'from reachy import Reachy, parts\n'), ((33594, 33648), 'reachy.parts.LeftArm', 'parts.LeftArm', ([], {'io': '"""/dev/ttyUSB*"""', 'hand': '"""force_gripper"""'}), "(io='/dev/ttyUSB*', hand='force_gripper')\n", (33607, 33648), False, 'from reachy import Reachy, parts\n'), ((13367, 13381), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13379, 13381), False, 'from datetime import datetime\n'), ((13609, 13623), 'math.cos', 'cos', (['theta_rad'], {}), '(theta_rad)\n', (13612, 13623), False, 'from math import sin, cos, radians, pi\n'), ((13646, 13660), 'math.sin', 'sin', (['theta_rad'], {}), '(theta_rad)\n', (13649, 13660), False, 'from math import sin, cos, radians, pi\n'), ((21693, 21736), 'threading.Thread', 'Thread', ([], {'target': 'play_song', 'args': '(next_item,)'}), '(target=play_song, args=(next_item,))\n', (21699, 21736), False, 'from threading import Thread\n'), ((28819, 28891), 'cv2.rectangle', 'cv2.rectangle', (['output', '(x - 5, y - 5)', '(x + 5, y + 5)', '(0, 255, 255)', '(-1)'], {}), '(output, (x - 5, y - 5), (x + 5, y + 5), (0, 255, 255), -1)\n', (28832, 28891), False, 'import cv2\n'), ((29251, 29323), 'cv2.line', 'cv2.line', (['output', '(arm_l1[0], arm_l1[1])', '(arm_l2[0], arm_l2[1])', '(255)', '(2)'], {}), '(output, (arm_l1[0], arm_l1[1]), (arm_l2[0], arm_l2[1]), 255, 2)\n', (29259, 29323), False, 'import cv2\n'), ((29348, 29420), 'cv2.line', 'cv2.line', (['output', '(arm_r1[0], arm_r1[1])', '(arm_r2[0], arm_r2[1])', '(255)', '(2)'], {}), '(output, (arm_r1[0], arm_r1[1]), (arm_r2[0], arm_r2[1]), 255, 2)\n', (29356, 29420), False, 'import cv2\n'), ((29446, 29551), 'cv2.putText', 'cv2.putText', (['output', '"""L1"""', '(arm_l1[0], arm_l1[1])', 'font', 'fontScale', 'color_arm', 'thickness', 'cv2.LINE_AA'], {}), "(output, 'L1', (arm_l1[0], arm_l1[1]), font, fontScale,\n color_arm, thickness, cv2.LINE_AA)\n", (29457, 29551), False, 'import cv2\n'), ((29608, 29713), 'cv2.putText', 'cv2.putText', (['output', '"""L2"""', '(arm_l2[0], arm_l2[1])', 'font', 'fontScale', 'color_arm', 'thickness', 'cv2.LINE_AA'], {}), "(output, 'L2', (arm_l2[0], arm_l2[1]), font, fontScale,\n color_arm, thickness, cv2.LINE_AA)\n", (29619, 29713), False, 'import cv2\n'), ((29770, 29875), 'cv2.putText', 'cv2.putText', (['output', '"""R1"""', '(arm_r1[0], arm_r1[1])', 'font', 'fontScale', 'color_arm', 'thickness', 'cv2.LINE_AA'], {}), "(output, 'R1', (arm_r1[0], arm_r1[1]), font, fontScale,\n color_arm, thickness, cv2.LINE_AA)\n", (29781, 29875), False, 'import cv2\n'), ((29932, 30037), 'cv2.putText', 'cv2.putText', (['output', '"""R2"""', '(arm_r2[0], arm_r2[1])', 'font', 'fontScale', 'color_arm', 'thickness', 'cv2.LINE_AA'], {}), "(output, 'R2', (arm_r2[0], arm_r2[1]), font, fontScale,\n color_arm, thickness, cv2.LINE_AA)\n", (29943, 30037), False, 'import cv2\n'), ((30290, 30316), 'numpy.hstack', 'np.hstack', (['[image, output]'], {}), '([image, output])\n', (30299, 30316), True, 'import numpy as np\n'), ((22089, 22139), 'threading.Thread', 'Thread', ([], {'target': 'select_pattern', 'args': '([next_item],)'}), '(target=select_pattern, args=([next_item],))\n', (22095, 22139), False, 'from threading import Thread\n'), ((26234, 26257), 'numpy.round', 'np.round', (['circles[0, :]'], {}), '(circles[0, :])\n', (26242, 26257), True, 'import numpy as np\n'), ((27074, 27119), 'cv2.circle', 'cv2.circle', (['output', '(x, y)', 'r', '(0, 255, 0)', '(4)'], {}), '(output, (x, y), r, (0, 255, 0), 4)\n', (27084, 27119), False, 'import cv2\n'), ((27148, 27220), 'cv2.rectangle', 'cv2.rectangle', (['output', '(x - 5, y - 5)', '(x + 5, y + 5)', '(0, 128, 255)', '(-1)'], {}), '(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)\n', (27161, 27220), False, 'import cv2\n')] |
"""Testing things."""
import os
import shutil
import uuid
import difflib
import importlib
import contextlib
import warnings
import unittest
import numpy as np
import pandas as pd
import threading
import psutil
import copy
import pprint
from yggdrasil.config import ygg_cfg, cfg_logging
from yggdrasil import tools, backwards, platform, units
from yggdrasil.communication import cleanup_comms
from yggdrasil.components import import_component
# Test data
data_dir = os.path.join(os.path.dirname(__file__), 'data')
data_list = [
('txt', 'ascii_file.txt'),
('table', 'ascii_table.txt')]
data = {k: os.path.join(data_dir, v) for k, v in data_list}
# Test scripts
script_dir = os.path.join(os.path.dirname(__file__), 'scripts')
script_list = [
('c', ['gcc_model.c', 'hellofunc.c']),
('c++', ['gcc_model.cpp', 'hellofunc.c']),
('make', 'gcc_model'),
('cmake', 'gcc_model'),
('matlab', 'matlab_model.m'),
('matlab_error', 'matlab_error_model.m'),
('python', 'python_model.py'),
('error', 'error_model.py'),
('lpy', 'lpy_model.lpy'),
('r', 'r_model.R')]
scripts = {}
for k, v in script_list:
if isinstance(v, list):
scripts[k] = [os.path.join(script_dir, iv) for iv in v]
else:
scripts[k] = os.path.join(script_dir, v)
# scripts = {k: os.path.join(script_dir, v) for k, v in script_list}
if platform._is_win: # pragma: windows
scripts['executable'] = ['timeout', '0']
else:
scripts['executable'] = ['sleep', 0.1]
# Test yamls
yaml_dir = os.path.join(os.path.dirname(__file__), 'yamls')
yaml_list = [
('c', 'gcc_model.yml'),
('cpp', 'gpp_model.yml'),
('make', 'make_model.yml'),
('cmake', 'cmake_model.yml'),
('matlab', 'matlab_model.yml'),
('python', 'python_model.yml'),
('error', 'error_model.yml'),
('lpy', 'lpy_model.yml')]
yamls = {k: os.path.join(yaml_dir, v) for k, v in yaml_list}
# Makefile
if platform._is_win: # pragma: windows
makefile0 = os.path.join(script_dir, "Makefile_windows")
else:
makefile0 = os.path.join(script_dir, "Makefile_linux")
shutil.copy(makefile0, os.path.join(script_dir, "Makefile"))
# Flag for enabling tests that take a long time or are for extra examples
enable_long_tests = tools.check_environ_bool("YGG_ENABLE_LONG_TESTS")
skip_extra_examples = tools.check_environ_bool("YGG_SKIP_EXTRA_EXAMPLES")
# Wrapped class to allow handling of arrays
class WrappedTestCase(unittest.TestCase): # pragma: no cover
def __init__(self, *args, **kwargs):
super(WrappedTestCase, self).__init__(*args, **kwargs)
self.addTypeEqualityFunc(units._unit_quantity, 'assertUnitsEqual')
self.addTypeEqualityFunc(units._unit_array, 'assertUnitsEqual')
self.addTypeEqualityFunc(np.ndarray, 'assertArrayEqual')
self.addTypeEqualityFunc(pd.DataFrame, 'assertArrayEqual')
def has_units(self, obj):
if isinstance(obj, (list, tuple)):
for x in obj:
if self.has_units(x):
return True
elif isinstance(obj, dict):
for x in obj.values():
if self.has_units(x):
return True
else:
return units.has_units(obj)
return False
def _getAssertEqualityFunc(self, first, second):
# Allow comparison of tuple to list and units to anything
if (type(first), type(second)) in [(list, tuple), (tuple, list)]:
return self.assertSequenceEqual
elif units.has_units(first) or units.has_units(second):
return self.assertUnitsEqual
return super(WrappedTestCase, self)._getAssertEqualityFunc(first, second)
def assertEqual(self, first, second, msg=None, dont_nest=False):
r"""Fail if the two objects are unequal as determined by the '=='
operator."""
if (not dont_nest):
# Do nested evaluation for objects containing units
if (self.has_units(first) or self.has_units(second)):
self.assertEqualNested(first, second, msg=msg)
return
try:
super(WrappedTestCase, self).assertEqual(first, second, msg=msg)
except ValueError:
if dont_nest:
raise
self.assertEqualNested(first, second, msg=msg)
def assertEqualNested(self, first, second, msg=None):
r"""Fail if the two objects are unequal as determined by descending
recursively into the object if it is a list, tuple, or dictionary."""
if isinstance(first, list):
self.assertSequenceEqualNested(first, second, msg=msg, seq_type=list)
elif isinstance(first, tuple):
self.assertSequenceEqualNested(first, second, msg=msg, seq_type=tuple)
elif isinstance(first, dict):
self.assertDictEqualNested(first, second, msg=msg)
else:
self.assertEqual(first, second, msg=msg, dont_nest=True)
def assertSequenceEqualNested(self, seq1, seq2, msg=None, seq_type=None):
if seq_type is not None:
seq_type_name = seq_type.__name__
# Currently it makes more sense to allow equality between lists and
# tuples
# if not isinstance(seq1, seq_type):
# raise self.failureException(
# 'First sequence is not a %s: %s'
# % (seq_type_name, unittest.util.safe_repr(seq1)))
# if not isinstance(seq2, seq_type):
# raise self.failureException(
# 'Second sequence is not a %s: %s'
# % (seq_type_name, unittest.util.safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
seq1_repr = unittest.util.safe_repr(seq1)
seq2_repr = unittest.util.safe_repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in range(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n'
% (i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n'
% (i, seq_type_name))
break
self.assertEqualNested(
item1, item2,
msg=differing + '\nFirst differing element at index %d' % i)
else:
if (len1 == len2):
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, unittest.util.safe_repr(seq1[len2])))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, unittest.util.safe_repr(seq2[len1])))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def assertDictEqualNested(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
self.assertEqual(sorted(list(d1.keys())), sorted(list(d2.keys())),
'Dictionaries do not have the same keys')
for k in d1.keys():
standardMsg = 'Value for key %s differs' % k
msg_k = self._formatMessage(msg, standardMsg)
self.assertEqual(d1[k], d2[k], msg=msg_k)
def assertUnitsEqual(self, first, second, msg=None):
r"""Assertion for equality in case of objects with units."""
if units.has_units(first) and units.has_units(second):
first = units.convert_to(first, units.get_units(second))
self.assertEqual(units.get_data(first), units.get_data(second))
def assertArrayEqual(self, first, second, msg=None):
r"""Assertion for equality in case of arrays."""
try:
np.testing.assert_array_equal(first, second)
except AssertionError as e:
standardMsg = str(e)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
if backwards.PY2: # pragma: Python 2
# Dummy TestCase instance, so we can initialize an instance
# and access the assert instance methods
class DummyTestCase(WrappedTestCase): # pragma: no cover
def __init__(self):
super(DummyTestCase, self).__init__('_dummy')
def _dummy(self):
pass
# A metaclass that makes __getattr__ static
class AssertsAccessorType(type): # pragma: no cover
dummy = DummyTestCase()
def __getattr__(cls, key):
return getattr(AssertsAccessor.dummy, key)
# The actual accessor, a static class, that redirect the asserts
class AssertsAccessor(object): # pragma: no cover
__metaclass__ = AssertsAccessorType
ut = AssertsAccessor
else: # pragma: Python 3
ut = WrappedTestCase()
long_running = unittest.skipIf(not enable_long_tests, "Long tests not enabled.")
extra_example = unittest.skipIf(skip_extra_examples, "Extra examples not enabled.")
# def long_running(func):
# r"""Decorator for marking long tests that should be skipped if
# YGG_ENABLE_LONG_TESTS is set.
# Args:
# func (callable): Test function or method.
# """
# return unittest.skipIf(not enable_long_tests, "Long tests not enabled.")(func)
def assert_raises(exception, *args, **kwargs):
r"""Assert that a call raises an exception.
Args:
exception (Exception): Exception class that should be raised.
callable (function, class, optional): Callable that should raise the
exception. If not provided, a context manager is returned.
*args: Additional arguments are passed to the callable.
**kwargs: Additional keyword arguments are passed to the callable.
Raises:
AssertionError: If the correct exception is not raised.
"""
return ut.assertRaises(exception, *args, **kwargs)
@contextlib.contextmanager
def assert_warns(warning, *args, **kwargs):
r"""Assert that a call (or context) raises an exception.
Args:
warning (Warning): Warning class that should be raised.
callable (function, class, optional): Function that should raise
the warning. If not provided, a context manager is returned.
*args: Additional arguments are passed to the callable.
**kwargs: Additional keyword arguments are passed to the callable.
Raises:
AssertionError: If the correct warning is not caught.
"""
if backwards.PY2: # pragma: Python 2
if args and args[0] is None: # pragma: debug
warnings.warn("callable is None",
DeprecationWarning, 3)
args = ()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
try:
if not args:
yield w
else: # pragma: debug
callable_obj = args[0]
args = args[1:]
callable_obj(*args, **kwargs)
finally:
assert(len(w) >= 1)
for iw in w:
assert(issubclass(iw.category, warning))
else: # pragma: Python 3
yield ut.assertWarns(warning, *args, **kwargs)
def assert_equal(x, y):
r"""Assert that two messages are equivalent.
Args:
x (object): Python object to compare against y.
y (object): Python object to compare against x.
Raises:
AssertionError: If the two messages are not equivalent.
"""
ut.assertEqual(x, y)
def assert_not_equal(x, y):
r"""Assert that two objects are NOT equivalent.
Args:
x (object): Python object to compare against y.
y (object): Python object to compare against x.
Raises:
AssertionError: If the two objects are equivalent.
"""
ut.assertNotEqual(x, y)
class YggTestBase(unittest.TestCase):
r"""Wrapper for unittest.TestCase that allows use of setup and
teardown methods along with description prefix.
Args:
description_prefix (str, optional): String to prepend docstring
test message with. Default to empty string.
skip_unittest (bool, optional): If True, the unittest parent
class will not be initialized. Defaults to False.
Attributes:
uuid (str): Random unique identifier.
attr_list (list): List of attributes that should be checked for after
initialization.
timeout (float): Maximum time in seconds for timeouts.
sleeptime (float): Time in seconds that should be waited for sleeps.
"""
attr_list = list()
def __init__(self, *args, **kwargs):
self._description_prefix = kwargs.pop('description_prefix',
str(self.__class__).split("'")[1])
self.uuid = str(uuid.uuid4())
self.timeout = 10.0
self.sleeptime = 0.01
self.attr_list = copy.deepcopy(self.__class__.attr_list)
self._teardown_complete = False
self._new_default_comm = None
self._old_default_comm = None
self._old_loglevel = None
self._old_encoding = None
self.debug_flag = False
self._first_test = True
skip_unittest = kwargs.pop('skip_unittest', False)
if not skip_unittest:
super(YggTestBase, self).__init__(*args, **kwargs)
def assert_equal(self, x, y):
r"""Assert that two values are equal."""
return assert_equal(x, y)
def assert_less_equal(self, x, y):
r"""Assert that one value is less than or equal to another."""
return self.assertLessEqual(x, y)
def assert_greater(self, x, y):
r"""Assert that one value is greater than another."""
return self.assertGreater(x, y)
def assert_raises(self, *args, **kwargs):
r"""Assert that a function raises an error."""
return self.assertRaises(*args, **kwargs)
@property
def comm_count(self):
r"""int: The number of comms."""
out = 0
for k in self.cleanup_comm_classes:
cls = import_component('comm', k)
out += cls.comm_count()
return out
@property
def fd_count(self):
r"""int: The number of open file descriptors."""
proc = psutil.Process()
if platform._is_win: # pragma: windows
out = proc.num_handles()
else:
out = proc.num_fds()
# print(proc.num_fds(), proc.num_threads(), len(proc.connections("all")),
# len(proc.open_files()))
return out
@property
def thread_count(self):
r"""int: The number of active threads."""
return threading.active_count()
def set_utf8_encoding(self):
r"""Set the encoding to utf-8 if it is not already."""
old_lang = os.environ.get('LANG', '')
if 'UTF-8' not in old_lang: # pragma: debug
self._old_encoding = old_lang
os.environ['LANG'] = 'en_US.UTF-8'
def reset_encoding(self):
r"""Reset the encoding to the original value before the test."""
if self._old_encoding is not None: # pragma: debug
os.environ['LANG'] = self._old_encoding
self._old_encoding = None
def debug_log(self): # pragma: debug
r"""Turn on debugging."""
self._old_loglevel = ygg_cfg.get('debug', 'ygg')
ygg_cfg.set('debug', 'ygg', 'DEBUG')
cfg_logging()
def reset_log(self): # pragma: debug
r"""Resetting logging to prior value."""
if self._old_loglevel is not None:
ygg_cfg.set('debug', 'ygg', self._old_loglevel)
cfg_logging()
self._old_loglevel = None
def set_default_comm(self, default_comm=None):
r"""Set the default comm."""
self._old_default_comm = os.environ.get('YGG_DEFAULT_COMM', None)
if default_comm is None:
default_comm = self._new_default_comm
if default_comm is not None:
from yggdrasil.communication.DefaultComm import DefaultComm
os.environ['YGG_DEFAULT_COMM'] = default_comm
DefaultComm._reset_alias()
def reset_default_comm(self):
r"""Reset the default comm to the original value."""
if self._old_default_comm is None:
if 'YGG_DEFAULT_COMM' in os.environ:
del os.environ['YGG_DEFAULT_COMM']
else: # pragma: debug
os.environ['YGG_DEFAULT_COMM'] = self._old_default_comm
def setUp(self, *args, **kwargs):
self.setup(*args, **kwargs)
def tearDown(self, *args, **kwargs):
self.teardown(*args, **kwargs)
def setup(self, nprev_comm=None, nprev_thread=None, nprev_fd=None):
r"""Record the number of open comms, threads, and file descriptors.
Args:
nprev_comm (int, optional): Number of previous comm channels.
If not provided, it is determined to be the present number of
default comms.
nprev_thread (int, optional): Number of previous threads.
If not provided, it is determined to be the present number of
threads.
nprev_fd (int, optional): Number of previous open file descriptors.
If not provided, it is determined to be the present number of
open file descriptors.
"""
self.set_default_comm()
self.set_utf8_encoding()
if self.debug_flag: # pragma: debug
self.debug_log()
if nprev_comm is None:
nprev_comm = self.comm_count
if nprev_thread is None:
nprev_thread = self.thread_count
if nprev_fd is None:
nprev_fd = self.fd_count
self.nprev_comm = nprev_comm
self.nprev_thread = nprev_thread
self.nprev_fd = nprev_fd
def teardown(self, ncurr_comm=None, ncurr_thread=None, ncurr_fd=None):
r"""Check the number of open comms, threads, and file descriptors.
Args:
ncurr_comm (int, optional): Number of current comms. If not
provided, it is determined to be the present number of comms.
ncurr_thread (int, optional): Number of current threads. If not
provided, it is determined to be the present number of threads.
ncurr_fd (int, optional): Number of current open file descriptors.
If not provided, it is determined to be the present number of
open file descriptors.
"""
self._teardown_complete = True
x = tools.YggClass('dummy', timeout=self.timeout, sleeptime=self.sleeptime)
# Give comms time to close
if ncurr_comm is None:
Tout = x.start_timeout()
while ((not Tout.is_out)
and (self.comm_count > self.nprev_comm)): # pragma: debug
x.sleep()
x.stop_timeout()
ncurr_comm = self.comm_count
self.assert_less_equal(ncurr_comm, self.nprev_comm)
# Give threads time to close
if ncurr_thread is None:
Tout = x.start_timeout()
while ((not Tout.is_out)
and (self.thread_count > self.nprev_thread)): # pragma: debug
x.sleep()
x.stop_timeout()
ncurr_thread = self.thread_count
self.assert_less_equal(ncurr_thread, self.nprev_thread)
# Give files time to close
self.cleanup_comms()
if ncurr_fd is None:
if not self._first_test:
Tout = x.start_timeout()
while ((not Tout.is_out)
and (self.fd_count > self.nprev_fd)): # pragma: debug
x.sleep()
x.stop_timeout()
ncurr_fd = self.fd_count
fds_created = ncurr_fd - self.nprev_fd
# print("FDS CREATED: %d" % fds_created)
if not self._first_test:
self.assert_equal(fds_created, 0)
# Reset the log, encoding, and default comm
self.reset_log()
self.reset_encoding()
self.reset_default_comm()
self._first_test = False
@property
def cleanup_comm_classes(self):
r"""list: Comm classes that should be cleaned up following the test."""
return [tools.get_default_comm()]
def cleanup_comms(self):
r"""Cleanup all comms."""
for k in self.cleanup_comm_classes:
cleanup_comms(k)
@property
def description_prefix(self):
r"""String prefix to prepend docstr test message with."""
return self._description_prefix
def shortDescription(self):
r"""Prefix first line of doc string."""
out = super(YggTestBase, self).shortDescription()
if self.description_prefix:
out = '%s: %s' % (self.description_prefix, out)
return out
def check_file_exists(self, fname):
r"""Check that a file exists.
Args:
fname (str): Full path to the file that should be checked.
"""
Tout = self.start_timeout(2)
while (not Tout.is_out) and (not os.path.isfile(fname)): # pragma: debug
self.sleep()
self.stop_timeout()
if not os.path.isfile(fname): # pragma: debug
raise AssertionError("File '%s' dosn't exist." % fname)
def check_file_size(self, fname, fsize):
r"""Check that file is the correct size.
Args:
fname (str): Full path to the file that should be checked.
fsize (int): Size that the file should be in bytes.
"""
result = None
if isinstance(fsize, backwards.string_types):
result = fsize
fsize = len(result)
Tout = self.start_timeout(2)
if (os.stat(fname).st_size != fsize): # pragma: debug
print('file sizes not equal', os.stat(fname).st_size, fsize)
while ((not Tout.is_out)
and (os.stat(fname).st_size != fsize)): # pragma: debug
self.sleep()
self.stop_timeout()
if os.stat(fname).st_size != fsize: # pragma: debug
if (result is not None) and (fsize < 200):
print("Expected:")
print(result)
print("Actual:")
with open(fname, 'r') as fd:
print(fd.read())
raise AssertionError("File size (%d), dosn't match expected size (%d)."
% (os.stat(fname).st_size, fsize))
def check_file_contents(self, fname, result):
r"""Check that the contents of a file are correct.
Args:
fname (str): Full path to the file that should be checked.
result (str): Contents of the file.
"""
with open(fname, 'r') as fd:
ocont = fd.read()
if ocont != result: # pragma: debug
odiff = '\n'.join(list(difflib.Differ().compare(ocont, result)))
raise AssertionError(('File contents do not match expected result.'
'Diff:\n%s') % odiff)
def check_file(self, fname, result):
r"""Check that a file exists, is the correct size, and has the correct
contents.
Args:
fname (str): Full path to the file that should be checked.
result (str): Contents of the file.
"""
self.check_file_exists(fname)
self.check_file_size(fname, len(result))
self.check_file_contents(fname, result)
class YggTestClass(YggTestBase):
r"""Test class for a YggClass."""
testing_option_kws = {}
_mod = None
_cls = None
skip_init = False
def __init__(self, *args, **kwargs):
self._inst_args = list()
self._inst_kwargs = dict()
self._extra_instances = []
super(YggTestClass, self).__init__(*args, **kwargs)
def setup(self, *args, **kwargs):
r"""Create an instance of the class."""
super(YggTestClass, self).setup(*args, **kwargs)
if not self.skip_init:
self._instance = self.create_instance()
def teardown(self, *args, **kwargs):
r"""Remove the instance."""
self.clear_instance()
super(YggTestClass, self).teardown(*args, **kwargs)
for i in range(len(self._extra_instances)):
inst = self._extra_instances[i]
self._extra_instances[i] = None
self.remove_instance(inst)
del inst
self._extra_instances = []
@property
def description_prefix(self):
r"""String prefix to prepend docstr test message with."""
if self.cls is None:
return super(YggTestClass, self).description_prefix
else:
return self.cls
@property
def cls(self):
r"""str: Class to be tested."""
return self._cls
@property
def mod(self):
r"""str: Absolute name of module containing class to be tested."""
return self._mod
@property
def inst_args(self):
r"""list: Arguments for creating a class instance."""
return self._inst_args
@property
def inst_kwargs(self):
r"""dict: Keyword arguments for creating a class instance."""
out = self._inst_kwargs
return out
@property
def import_cls(self):
r"""Import the tested class from its module"""
if self.mod is None:
raise Exception("No module registered.")
if self.cls is None:
raise Exception("No class registered.")
mod = importlib.import_module(self.mod)
cls = getattr(mod, self.cls)
return cls
def get_options(self):
r"""Get testing options."""
if self.mod is None: # pragma: debug
return {}
return self.import_cls.get_testing_options(**self.testing_option_kws)
@property
def testing_options(self):
r"""dict: Testing options."""
if getattr(self, '_testing_options', None) is None:
self._testing_options = self.get_options()
return self._testing_options
@property
def instance(self):
r"""object: Instance of the test driver."""
if self._teardown_complete:
raise RuntimeError("Instance referenced after teardown.")
if self.skip_init: # pragma: debug
raise RuntimeError("skip_init is True, so instance cannot be used.")
if not hasattr(self, '_instance'): # pragma: debug
self._instance = self.create_instance()
return self._instance
def create_error_instance(self, inst_class=None, args=None, kwargs=None,
error_class=None, error_on_init=False): # pragma: no cover
r"""Create a new instance of the class that is wrapped in ErrorClass."""
if inst_class is None:
inst_class = self.import_cls
if args is None:
args = self.inst_args
if kwargs is None:
kwargs = self.inst_kwargs
if error_class is None:
error_class = ErrorClass
if error_class == ErrorClass:
# This could be a normal class that contains error classes
args.insert(0, inst_class)
kwargs['error_on_init'] = error_on_init
error_kwargs = dict(inst_class=error_class, args=args, kwargs=kwargs)
if error_on_init:
self.assert_raises(MagicTestError, self.create_instance, **error_kwargs)
else:
out = self.create_instance(**error_kwargs)
self._extra_instances.append(out)
return out
def create_instance(self, inst_class=None, args=None, kwargs=None):
r"""Create a new instance of the class."""
if inst_class is None:
inst_class = self.import_cls
if args is None:
args = self.inst_args
if kwargs is None:
kwargs = self.inst_kwargs
inst = inst_class(*args, **kwargs)
return inst
def remove_instance(self, inst):
r"""Remove an instance of the class."""
pass
def clear_instance(self):
r"""Clear the instance."""
if hasattr(self, '_instance'):
inst = self._instance
self._instance = None
self.remove_instance(inst)
delattr(self, '_instance')
class IOInfo(object):
r"""Simple class for useful IO attributes."""
def __init__(self):
self.field_names = ['name', 'count', 'size']
self.field_units = ['n/a', 'umol', 'cm']
self.nfields = len(self.field_names)
self.comment = b'# '
self.delimiter = b'\t'
self.newline = b'\n'
self.field_names = [backwards.as_bytes(x) for x in self.field_names]
self.field_units = [backwards.as_bytes(x) for x in self.field_units]
class YggTestClassInfo(YggTestClass, IOInfo):
r"""Test class for a YggClass with IOInfo available."""
def __init__(self, *args, **kwargs):
super(YggTestClassInfo, self).__init__(*args, **kwargs)
IOInfo.__init__(self)
class MagicTestError(Exception):
r"""Special exception for testing."""
pass
def ErrorClass(base_class, *args, **kwargs):
r"""Wrapper to return errored version of a class.
Args:
base_class (class): Base class to use.
*args: Additional arguments are passed to the class constructor.
**kwargs: Additional keyword arguments are passed to the class
constructor.
"""
class ErrorClass(base_class):
r"""Dummy class that will raise an error for any requested method.
Args:
error_on_init (bool, optional): If True, an error will be raised
in place of the base class's __init__ method. Defaults to False.
*args: Additional arguments are passed to the parent class.
**kwargs: Additional keyword arguments are passed to the parent class.
Attributes:
error_location (str): Name of the method/attribute that will raise
an error.
"""
_is_error_class = True
def __init__(self, *args, **kwargs):
error_on_init = kwargs.pop('error_on_init', False)
if error_on_init:
self.error_method()
self._replaced_methods = dict()
super(ErrorClass, self).__init__(*args, **kwargs)
def empty_method(self, *args, **kwargs):
r"""Method that won't do anything."""
pass
def error_method(self, *args, **kwargs):
r"""Method that will raise a MagicTestError."""
raise MagicTestError("This is a test error.")
def getattr(self, attr):
r"""Get the underlying object for an attribute name."""
for obj in [self] + self.__class__.mro():
if attr in obj.__dict__:
return obj.__dict__[attr]
raise AttributeError # pragma: debug
def setattr(self, attr, value):
r"""Set the attribute at the class level."""
setattr(self.__class__, attr, value)
def replace_method(self, method_name, replacement):
r"""Temporarily replace method with another."""
self._replaced_methods[method_name] = self.getattr(method_name)
self.setattr(method_name, replacement)
def restore_method(self, method_name):
r"""Restore the original method."""
self.setattr(method_name, self._replaced_methods.pop(method_name))
def restore_all(self):
r"""Restored all replaced methods."""
meth_list = list(self._replaced_methods.keys())
for k in meth_list:
self.restore_method(k)
def empty_replace(self, method_name, **kwargs):
r"""Replace a method with an empty method."""
self.replace_method(method_name, self.empty_method, **kwargs)
def error_replace(self, method_name, **kwargs):
r"""Replace a method with an errored method."""
self.replace_method(method_name, self.error_method, **kwargs)
return ErrorClass(*args, **kwargs)
__all__ = ['data', 'scripts', 'yamls', 'IOInfo', 'ErrorClass',
'YggTestBase', 'YggTestClass',
'YggTestBaseInfo', 'YggTestClassInfo']
| [
"threading.active_count",
"pprint.pformat",
"difflib.Differ",
"yggdrasil.communication.DefaultComm.DefaultComm._reset_alias",
"yggdrasil.units.has_units",
"os.path.isfile",
"yggdrasil.tools.check_environ_bool",
"unittest.util.safe_repr",
"os.path.join",
"unittest.skipIf",
"warnings.simplefilter"... | [((2237, 2286), 'yggdrasil.tools.check_environ_bool', 'tools.check_environ_bool', (['"""YGG_ENABLE_LONG_TESTS"""'], {}), "('YGG_ENABLE_LONG_TESTS')\n", (2261, 2286), False, 'from yggdrasil import tools, backwards, platform, units\n'), ((2309, 2360), 'yggdrasil.tools.check_environ_bool', 'tools.check_environ_bool', (['"""YGG_SKIP_EXTRA_EXAMPLES"""'], {}), "('YGG_SKIP_EXTRA_EXAMPLES')\n", (2333, 2360), False, 'from yggdrasil import tools, backwards, platform, units\n'), ((11014, 11079), 'unittest.skipIf', 'unittest.skipIf', (['(not enable_long_tests)', '"""Long tests not enabled."""'], {}), "(not enable_long_tests, 'Long tests not enabled.')\n", (11029, 11079), False, 'import unittest\n'), ((11096, 11163), 'unittest.skipIf', 'unittest.skipIf', (['skip_extra_examples', '"""Extra examples not enabled."""'], {}), "(skip_extra_examples, 'Extra examples not enabled.')\n", (11111, 11163), False, 'import unittest\n'), ((480, 505), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (495, 505), False, 'import os\n'), ((605, 630), 'os.path.join', 'os.path.join', (['data_dir', 'v'], {}), '(data_dir, v)\n', (617, 630), False, 'import os\n'), ((696, 721), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (711, 721), False, 'import os\n'), ((1531, 1556), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1546, 1556), False, 'import os\n'), ((1853, 1878), 'os.path.join', 'os.path.join', (['yaml_dir', 'v'], {}), '(yaml_dir, v)\n', (1865, 1878), False, 'import os\n'), ((1970, 2014), 'os.path.join', 'os.path.join', (['script_dir', '"""Makefile_windows"""'], {}), "(script_dir, 'Makefile_windows')\n", (1982, 2014), False, 'import os\n'), ((2037, 2079), 'os.path.join', 'os.path.join', (['script_dir', '"""Makefile_linux"""'], {}), "(script_dir, 'Makefile_linux')\n", (2049, 2079), False, 'import os\n'), ((2103, 2139), 'os.path.join', 'os.path.join', (['script_dir', '"""Makefile"""'], {}), "(script_dir, 'Makefile')\n", (2115, 2139), False, 'import os\n'), ((1258, 1285), 'os.path.join', 'os.path.join', (['script_dir', 'v'], {}), '(script_dir, v)\n', (1270, 1285), False, 'import os\n'), ((15143, 15182), 'copy.deepcopy', 'copy.deepcopy', (['self.__class__.attr_list'], {}), '(self.__class__.attr_list)\n', (15156, 15182), False, 'import copy\n'), ((16499, 16515), 'psutil.Process', 'psutil.Process', ([], {}), '()\n', (16513, 16515), False, 'import psutil\n'), ((16896, 16920), 'threading.active_count', 'threading.active_count', ([], {}), '()\n', (16918, 16920), False, 'import threading\n'), ((17037, 17063), 'os.environ.get', 'os.environ.get', (['"""LANG"""', '""""""'], {}), "('LANG', '')\n", (17051, 17063), False, 'import os\n'), ((17578, 17605), 'yggdrasil.config.ygg_cfg.get', 'ygg_cfg.get', (['"""debug"""', '"""ygg"""'], {}), "('debug', 'ygg')\n", (17589, 17605), False, 'from yggdrasil.config import ygg_cfg, cfg_logging\n'), ((17614, 17650), 'yggdrasil.config.ygg_cfg.set', 'ygg_cfg.set', (['"""debug"""', '"""ygg"""', '"""DEBUG"""'], {}), "('debug', 'ygg', 'DEBUG')\n", (17625, 17650), False, 'from yggdrasil.config import ygg_cfg, cfg_logging\n'), ((17659, 17672), 'yggdrasil.config.cfg_logging', 'cfg_logging', ([], {}), '()\n', (17670, 17672), False, 'from yggdrasil.config import ygg_cfg, cfg_logging\n'), ((18054, 18094), 'os.environ.get', 'os.environ.get', (['"""YGG_DEFAULT_COMM"""', 'None'], {}), "('YGG_DEFAULT_COMM', None)\n", (18068, 18094), False, 'import os\n'), ((20806, 20877), 'yggdrasil.tools.YggClass', 'tools.YggClass', (['"""dummy"""'], {'timeout': 'self.timeout', 'sleeptime': 'self.sleeptime'}), "('dummy', timeout=self.timeout, sleeptime=self.sleeptime)\n", (20820, 20877), False, 'from yggdrasil import tools, backwards, platform, units\n'), ((27791, 27824), 'importlib.import_module', 'importlib.import_module', (['self.mod'], {}), '(self.mod)\n', (27814, 27824), False, 'import importlib\n'), ((1185, 1213), 'os.path.join', 'os.path.join', (['script_dir', 'iv'], {}), '(script_dir, iv)\n', (1197, 1213), False, 'import os\n'), ((6226, 6255), 'unittest.util.safe_repr', 'unittest.util.safe_repr', (['seq1'], {}), '(seq1)\n', (6249, 6255), False, 'import unittest\n'), ((6280, 6309), 'unittest.util.safe_repr', 'unittest.util.safe_repr', (['seq2'], {}), '(seq2)\n', (6303, 6309), False, 'import unittest\n'), ((9605, 9627), 'yggdrasil.units.has_units', 'units.has_units', (['first'], {}), '(first)\n', (9620, 9627), False, 'from yggdrasil import tools, backwards, platform, units\n'), ((9632, 9655), 'yggdrasil.units.has_units', 'units.has_units', (['second'], {}), '(second)\n', (9647, 9655), False, 'from yggdrasil import tools, backwards, platform, units\n'), ((9751, 9772), 'yggdrasil.units.get_data', 'units.get_data', (['first'], {}), '(first)\n', (9765, 9772), False, 'from yggdrasil import tools, backwards, platform, units\n'), ((9774, 9796), 'yggdrasil.units.get_data', 'units.get_data', (['second'], {}), '(second)\n', (9788, 9796), False, 'from yggdrasil import tools, backwards, platform, units\n'), ((9946, 9990), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['first', 'second'], {}), '(first, second)\n', (9975, 9990), True, 'import numpy as np\n'), ((12750, 12806), 'warnings.warn', 'warnings.warn', (['"""callable is None"""', 'DeprecationWarning', '(3)'], {}), "('callable is None', DeprecationWarning, 3)\n", (12763, 12806), False, 'import warnings\n'), ((12868, 12904), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (12891, 12904), False, 'import warnings\n'), ((12923, 12954), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (12944, 12954), False, 'import warnings\n'), ((15046, 15058), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (15056, 15058), False, 'import uuid\n'), ((16305, 16332), 'yggdrasil.components.import_component', 'import_component', (['"""comm"""', 'k'], {}), "('comm', k)\n", (16321, 16332), False, 'from yggdrasil.components import import_component\n'), ((17820, 17867), 'yggdrasil.config.ygg_cfg.set', 'ygg_cfg.set', (['"""debug"""', '"""ygg"""', 'self._old_loglevel'], {}), "('debug', 'ygg', self._old_loglevel)\n", (17831, 17867), False, 'from yggdrasil.config import ygg_cfg, cfg_logging\n'), ((17880, 17893), 'yggdrasil.config.cfg_logging', 'cfg_logging', ([], {}), '()\n', (17891, 17893), False, 'from yggdrasil.config import ygg_cfg, cfg_logging\n'), ((18357, 18383), 'yggdrasil.communication.DefaultComm.DefaultComm._reset_alias', 'DefaultComm._reset_alias', ([], {}), '()\n', (18381, 18383), False, 'from yggdrasil.communication.DefaultComm import DefaultComm\n'), ((22528, 22552), 'yggdrasil.tools.get_default_comm', 'tools.get_default_comm', ([], {}), '()\n', (22550, 22552), False, 'from yggdrasil import tools, backwards, platform, units\n'), ((22674, 22690), 'yggdrasil.communication.cleanup_comms', 'cleanup_comms', (['k'], {}), '(k)\n', (22687, 22690), False, 'from yggdrasil.communication import cleanup_comms\n'), ((23465, 23486), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (23479, 23486), False, 'import os\n'), ((30928, 30949), 'yggdrasil.backwards.as_bytes', 'backwards.as_bytes', (['x'], {}), '(x)\n', (30946, 30949), False, 'from yggdrasil import tools, backwards, platform, units\n'), ((31005, 31026), 'yggdrasil.backwards.as_bytes', 'backwards.as_bytes', (['x'], {}), '(x)\n', (31023, 31026), False, 'from yggdrasil import tools, backwards, platform, units\n'), ((3196, 3216), 'yggdrasil.units.has_units', 'units.has_units', (['obj'], {}), '(obj)\n', (3211, 3216), False, 'from yggdrasil import tools, backwards, platform, units\n'), ((3489, 3511), 'yggdrasil.units.has_units', 'units.has_units', (['first'], {}), '(first)\n', (3504, 3511), False, 'from yggdrasil import tools, backwards, platform, units\n'), ((3515, 3538), 'yggdrasil.units.has_units', 'units.has_units', (['second'], {}), '(second)\n', (3530, 3538), False, 'from yggdrasil import tools, backwards, platform, units\n'), ((9701, 9724), 'yggdrasil.units.get_units', 'units.get_units', (['second'], {}), '(second)\n', (9716, 9724), False, 'from yggdrasil import tools, backwards, platform, units\n'), ((23356, 23377), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (23370, 23377), False, 'import os\n'), ((24015, 24029), 'os.stat', 'os.stat', (['fname'], {}), '(fname)\n', (24022, 24029), False, 'import os\n'), ((24308, 24322), 'os.stat', 'os.stat', (['fname'], {}), '(fname)\n', (24315, 24322), False, 'import os\n'), ((24108, 24122), 'os.stat', 'os.stat', (['fname'], {}), '(fname)\n', (24115, 24122), False, 'import os\n'), ((24192, 24206), 'os.stat', 'os.stat', (['fname'], {}), '(fname)\n', (24199, 24206), False, 'import os\n'), ((7778, 7813), 'unittest.util.safe_repr', 'unittest.util.safe_repr', (['seq1[len2]'], {}), '(seq1[len2])\n', (7801, 7813), False, 'import unittest\n'), ((8669, 8689), 'pprint.pformat', 'pprint.pformat', (['seq1'], {}), '(seq1)\n', (8683, 8689), False, 'import pprint\n'), ((8730, 8750), 'pprint.pformat', 'pprint.pformat', (['seq2'], {}), '(seq2)\n', (8744, 8750), False, 'import pprint\n'), ((24713, 24727), 'os.stat', 'os.stat', (['fname'], {}), '(fname)\n', (24720, 24727), False, 'import os\n'), ((25149, 25165), 'difflib.Differ', 'difflib.Differ', ([], {}), '()\n', (25163, 25165), False, 'import difflib\n'), ((8328, 8363), 'unittest.util.safe_repr', 'unittest.util.safe_repr', (['seq2[len1]'], {}), '(seq2[len1])\n', (8351, 8363), False, 'import unittest\n')] |
import pybullet as p
import os
from Buggy_cloth_contact.helper import create_spheres, get_quaternion
import numpy as np
width = height = 720
hz = 480
gravity = 9.8
simulation_steps = 1000
contact_visual_sphere_num = 500
# basic pybullet initialization
p_id = p.connect(p.GUI,
options='--background_color_red=0.8 --background_color_green=0.9 --background_color_blue=1.0 --width=%d --height=%d' %
(width, height))
# p_id = p.connect(p.DIRECT)
p.resetSimulation(p.RESET_USE_DEFORMABLE_WORLD, physicsClientId=p_id)
p.setTimeStep(1.0 / hz)
p.resetDebugVisualizerCamera(cameraDistance=1.75, cameraYaw=-25, cameraPitch=-45, cameraTargetPosition=[-0.2, 0, 0.4], physicsClientId=p_id)
p.configureDebugVisualizer(p.COV_ENABLE_MOUSE_PICKING, 0, physicsClientId=p_id)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0, physicsClientId=p_id)
p.setRealTimeSimulation(0, physicsClientId=p_id)
p.setGravity(0, 0, -gravity, physicsClientId=p_id)
# load the ground plane
plane = p.loadURDF('Buggy_cloth_contact/assets/plane.urdf')
# Disable rendering during creation
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0, physicsClientId=p_id)
# load a square cloth
cloth = p.loadSoftBody(
'Buggy_cloth_contact/assets/bl_cloth_25_cuts.obj',
scale=0.2,
mass=1,
useBendingSprings=1,
useMassSpring=1,
springElasticStiffness=40,
springDampingStiffness=0.1,
springDampingAllDirections=0,
springBendingStiffness=0,
useNeoHookean=0,
useSelfCollision=1,
collisionMargin=0.0001,
frictionCoeff=1.0,
useFaceContact=1,
physicsClientId=p_id)
p.changeVisualShape(cloth, -1, rgbaColor=[1, 1, 1, 0.5], flags=0, physicsClientId=p_id)
p.changeVisualShape(cloth, -1, flags=p.VISUAL_SHAPE_DOUBLE_SIDED, physicsClientId=p_id)
p.setPhysicsEngineParameter(numSubSteps=5, physicsClientId=p_id)
euler = [np.pi / 2, 0, 0]
p.resetBasePositionAndOrientation(cloth, [0, 0, 0.8], get_quaternion(euler))
p.stepSimulation(physicsClientId=p_id)
# load a table
table = p.loadURDF('Buggy_cloth_contact/assets/table_tall.urdf', basePosition=[-0.2, -0.3, 0],
baseOrientation=[0, 0, 0, 1], physicsClientId=p_id)
# for visualizing the contact points
batch_positions = []
for i in range(contact_visual_sphere_num):
batch_positions.append(np.array([100, 100+i, 100]))
visual_points = create_spheres(p_id, radius=0.01, mass=0, batch_positions=batch_positions, visual=True, collision=False, rgba=[1, 1, 1, 1])
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1, physicsClientId=p_id)
# let the cloth falls down the table and visualizes the contact points & forces
for _ in range(simulation_steps):
print("=" * 20, "simulation step {}".format(_), "=" * 20)
p.stepSimulation(physicsClientId=p_id)
# get and visualize cloth contact data
x, y, z, cx, cy, cz, fx, fy, fz = p.getSoftBodyData(cloth, physicsClientId=p_id)
forces = np.concatenate([np.expand_dims(fx, axis=-1), np.expand_dims(fy, axis=-1), np.expand_dims(fz, axis=-1)], axis=-1)
contact_positions = np.concatenate([np.expand_dims(cx, axis=-1), np.expand_dims(cy, axis=-1), np.expand_dims(cz, axis=-1)], axis=-1)
total_contact_num = forces.shape[0]
i = 0
total_force = np.zeros(3)
zero_contact_point = 0
non_zero_count = 0
if total_contact_num > 0:
for cp, f in zip(contact_positions, forces):
if i >= len(visual_points):
break
p.resetBasePositionAndOrientation(visual_points[i], cp, [0, 0, 0, 1], physicsClientId=p_id)
if np.array_equal(f, np.zeros(3)): # zero forces are visualized as blue dots
zero_contact_point += 1
color = np.array([0, 0, 1, 0.2])
else: # non-zero forces are visualized as red dots
color = np.array([1, 0, 0, 1])
non_zero_count += 1
p.changeVisualShape(visual_points[i], -1, rgbaColor=color, flags=0, physicsClientId=p_id)
if np.linalg.norm(f) > 0:
total_force += forces[i]
i += 1
print("there are {} contact points, {} contact points have zero force".format(
total_contact_num, zero_contact_point
))
if non_zero_count > 0:
print('average non-zero contact force is: ', total_force / non_zero_count)
print('total force:', total_force) | [
"pybullet.loadSoftBody",
"pybullet.resetSimulation",
"pybullet.resetDebugVisualizerCamera",
"numpy.linalg.norm",
"pybullet.connect",
"pybullet.setRealTimeSimulation",
"pybullet.setGravity",
"pybullet.getSoftBodyData",
"pybullet.setTimeStep",
"pybullet.resetBasePositionAndOrientation",
"Buggy_clo... | [((261, 423), 'pybullet.connect', 'p.connect', (['p.GUI'], {'options': "('--background_color_red=0.8 --background_color_green=0.9 --background_color_blue=1.0 --width=%d --height=%d'\n % (width, height))"}), "(p.GUI, options=\n '--background_color_red=0.8 --background_color_green=0.9 --background_color_blue=1.0 --width=%d --height=%d'\n % (width, height))\n", (270, 423), True, 'import pybullet as p\n'), ((453, 522), 'pybullet.resetSimulation', 'p.resetSimulation', (['p.RESET_USE_DEFORMABLE_WORLD'], {'physicsClientId': 'p_id'}), '(p.RESET_USE_DEFORMABLE_WORLD, physicsClientId=p_id)\n', (470, 522), True, 'import pybullet as p\n'), ((523, 546), 'pybullet.setTimeStep', 'p.setTimeStep', (['(1.0 / hz)'], {}), '(1.0 / hz)\n', (536, 546), True, 'import pybullet as p\n'), ((547, 691), 'pybullet.resetDebugVisualizerCamera', 'p.resetDebugVisualizerCamera', ([], {'cameraDistance': '(1.75)', 'cameraYaw': '(-25)', 'cameraPitch': '(-45)', 'cameraTargetPosition': '[-0.2, 0, 0.4]', 'physicsClientId': 'p_id'}), '(cameraDistance=1.75, cameraYaw=-25,\n cameraPitch=-45, cameraTargetPosition=[-0.2, 0, 0.4], physicsClientId=p_id)\n', (575, 691), True, 'import pybullet as p\n'), ((688, 767), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_MOUSE_PICKING', '(0)'], {'physicsClientId': 'p_id'}), '(p.COV_ENABLE_MOUSE_PICKING, 0, physicsClientId=p_id)\n', (714, 767), True, 'import pybullet as p\n'), ((768, 837), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_GUI', '(0)'], {'physicsClientId': 'p_id'}), '(p.COV_ENABLE_GUI, 0, physicsClientId=p_id)\n', (794, 837), True, 'import pybullet as p\n'), ((838, 886), 'pybullet.setRealTimeSimulation', 'p.setRealTimeSimulation', (['(0)'], {'physicsClientId': 'p_id'}), '(0, physicsClientId=p_id)\n', (861, 886), True, 'import pybullet as p\n'), ((887, 937), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-gravity)'], {'physicsClientId': 'p_id'}), '(0, 0, -gravity, physicsClientId=p_id)\n', (899, 937), True, 'import pybullet as p\n'), ((972, 1023), 'pybullet.loadURDF', 'p.loadURDF', (['"""Buggy_cloth_contact/assets/plane.urdf"""'], {}), "('Buggy_cloth_contact/assets/plane.urdf')\n", (982, 1023), True, 'import pybullet as p\n'), ((1060, 1135), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_RENDERING', '(0)'], {'physicsClientId': 'p_id'}), '(p.COV_ENABLE_RENDERING, 0, physicsClientId=p_id)\n', (1086, 1135), True, 'import pybullet as p\n'), ((1176, 1549), 'pybullet.loadSoftBody', 'p.loadSoftBody', (['"""Buggy_cloth_contact/assets/bl_cloth_25_cuts.obj"""'], {'scale': '(0.2)', 'mass': '(1)', 'useBendingSprings': '(1)', 'useMassSpring': '(1)', 'springElasticStiffness': '(40)', 'springDampingStiffness': '(0.1)', 'springDampingAllDirections': '(0)', 'springBendingStiffness': '(0)', 'useNeoHookean': '(0)', 'useSelfCollision': '(1)', 'collisionMargin': '(0.0001)', 'frictionCoeff': '(1.0)', 'useFaceContact': '(1)', 'physicsClientId': 'p_id'}), "('Buggy_cloth_contact/assets/bl_cloth_25_cuts.obj', scale=0.2,\n mass=1, useBendingSprings=1, useMassSpring=1, springElasticStiffness=40,\n springDampingStiffness=0.1, springDampingAllDirections=0,\n springBendingStiffness=0, useNeoHookean=0, useSelfCollision=1,\n collisionMargin=0.0001, frictionCoeff=1.0, useFaceContact=1,\n physicsClientId=p_id)\n", (1190, 1549), True, 'import pybullet as p\n'), ((1724, 1815), 'pybullet.changeVisualShape', 'p.changeVisualShape', (['cloth', '(-1)'], {'rgbaColor': '[1, 1, 1, 0.5]', 'flags': '(0)', 'physicsClientId': 'p_id'}), '(cloth, -1, rgbaColor=[1, 1, 1, 0.5], flags=0,\n physicsClientId=p_id)\n', (1743, 1815), True, 'import pybullet as p\n'), ((1812, 1903), 'pybullet.changeVisualShape', 'p.changeVisualShape', (['cloth', '(-1)'], {'flags': 'p.VISUAL_SHAPE_DOUBLE_SIDED', 'physicsClientId': 'p_id'}), '(cloth, -1, flags=p.VISUAL_SHAPE_DOUBLE_SIDED,\n physicsClientId=p_id)\n', (1831, 1903), True, 'import pybullet as p\n'), ((1900, 1964), 'pybullet.setPhysicsEngineParameter', 'p.setPhysicsEngineParameter', ([], {'numSubSteps': '(5)', 'physicsClientId': 'p_id'}), '(numSubSteps=5, physicsClientId=p_id)\n', (1927, 1964), True, 'import pybullet as p\n'), ((2068, 2106), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {'physicsClientId': 'p_id'}), '(physicsClientId=p_id)\n', (2084, 2106), True, 'import pybullet as p\n'), ((2131, 2273), 'pybullet.loadURDF', 'p.loadURDF', (['"""Buggy_cloth_contact/assets/table_tall.urdf"""'], {'basePosition': '[-0.2, -0.3, 0]', 'baseOrientation': '[0, 0, 0, 1]', 'physicsClientId': 'p_id'}), "('Buggy_cloth_contact/assets/table_tall.urdf', basePosition=[-0.2,\n -0.3, 0], baseOrientation=[0, 0, 0, 1], physicsClientId=p_id)\n", (2141, 2273), True, 'import pybullet as p\n'), ((2449, 2576), 'Buggy_cloth_contact.helper.create_spheres', 'create_spheres', (['p_id'], {'radius': '(0.01)', 'mass': '(0)', 'batch_positions': 'batch_positions', 'visual': '(True)', 'collision': '(False)', 'rgba': '[1, 1, 1, 1]'}), '(p_id, radius=0.01, mass=0, batch_positions=batch_positions,\n visual=True, collision=False, rgba=[1, 1, 1, 1])\n', (2463, 2576), False, 'from Buggy_cloth_contact.helper import create_spheres, get_quaternion\n'), ((2583, 2658), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_RENDERING', '(1)'], {'physicsClientId': 'p_id'}), '(p.COV_ENABLE_RENDERING, 1, physicsClientId=p_id)\n', (2609, 2658), True, 'import pybullet as p\n'), ((2045, 2066), 'Buggy_cloth_contact.helper.get_quaternion', 'get_quaternion', (['euler'], {}), '(euler)\n', (2059, 2066), False, 'from Buggy_cloth_contact.helper import create_spheres, get_quaternion\n'), ((2840, 2878), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {'physicsClientId': 'p_id'}), '(physicsClientId=p_id)\n', (2856, 2878), True, 'import pybullet as p\n'), ((2961, 3007), 'pybullet.getSoftBodyData', 'p.getSoftBodyData', (['cloth'], {'physicsClientId': 'p_id'}), '(cloth, physicsClientId=p_id)\n', (2978, 3007), True, 'import pybullet as p\n'), ((3340, 3351), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3348, 3351), True, 'import numpy as np\n'), ((2404, 2433), 'numpy.array', 'np.array', (['[100, 100 + i, 100]'], {}), '([100, 100 + i, 100])\n', (2412, 2433), True, 'import numpy as np\n'), ((3037, 3064), 'numpy.expand_dims', 'np.expand_dims', (['fx'], {'axis': '(-1)'}), '(fx, axis=-1)\n', (3051, 3064), True, 'import numpy as np\n'), ((3066, 3093), 'numpy.expand_dims', 'np.expand_dims', (['fy'], {'axis': '(-1)'}), '(fy, axis=-1)\n', (3080, 3093), True, 'import numpy as np\n'), ((3095, 3122), 'numpy.expand_dims', 'np.expand_dims', (['fz'], {'axis': '(-1)'}), '(fz, axis=-1)\n', (3109, 3122), True, 'import numpy as np\n'), ((3174, 3201), 'numpy.expand_dims', 'np.expand_dims', (['cx'], {'axis': '(-1)'}), '(cx, axis=-1)\n', (3188, 3201), True, 'import numpy as np\n'), ((3203, 3230), 'numpy.expand_dims', 'np.expand_dims', (['cy'], {'axis': '(-1)'}), '(cy, axis=-1)\n', (3217, 3230), True, 'import numpy as np\n'), ((3232, 3259), 'numpy.expand_dims', 'np.expand_dims', (['cz'], {'axis': '(-1)'}), '(cz, axis=-1)\n', (3246, 3259), True, 'import numpy as np\n'), ((3560, 3655), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['visual_points[i]', 'cp', '[0, 0, 0, 1]'], {'physicsClientId': 'p_id'}), '(visual_points[i], cp, [0, 0, 0, 1],\n physicsClientId=p_id)\n', (3593, 3655), True, 'import pybullet as p\n'), ((3989, 4082), 'pybullet.changeVisualShape', 'p.changeVisualShape', (['visual_points[i]', '(-1)'], {'rgbaColor': 'color', 'flags': '(0)', 'physicsClientId': 'p_id'}), '(visual_points[i], -1, rgbaColor=color, flags=0,\n physicsClientId=p_id)\n', (4008, 4082), True, 'import pybullet as p\n'), ((3685, 3696), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3693, 3696), True, 'import numpy as np\n'), ((3805, 3829), 'numpy.array', 'np.array', (['[0, 0, 1, 0.2]'], {}), '([0, 0, 1, 0.2])\n', (3813, 3829), True, 'import numpy as np\n'), ((3917, 3939), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (3925, 3939), True, 'import numpy as np\n'), ((4094, 4111), 'numpy.linalg.norm', 'np.linalg.norm', (['f'], {}), '(f)\n', (4108, 4111), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.