code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import torch
import numpy as np
import os
import timeit
from PIL import Image
from torchvision.utils import save_image
import torch.cuda as cutorch
from utils import SimpleProgressBar, IMGs_dataset
from opts import parse_opts
from DiffAugment_pytorch import DiffAugment
''' Settings '''
args = parse_opts()
# some parameters in opts
gan_arch = args.GAN_arch
loss_type = args.loss_type_gan
niters = args.niters_gan
resume_niters = args.resume_niters_gan
dim_gan = args.dim_gan
lr_g = args.lr_g_gan
lr_d = args.lr_d_gan
save_niters_freq = args.save_niters_freq
batch_size_disc = args.batch_size_disc
batch_size_gene = args.batch_size_gene
# batch_size_max = max(batch_size_disc, batch_size_gene)
num_D_steps = args.num_D_steps
visualize_freq = args.visualize_freq
num_workers = args.num_workers
threshold_type = args.threshold_type
nonzero_soft_weight_threshold = args.nonzero_soft_weight_threshold
num_channels = args.num_channels
img_size = args.img_size
max_label = args.max_label
use_DiffAugment = args.gan_DiffAugment
policy = args.gan_DiffAugment_policy
## horizontal flip images
def hflip_images(batch_images):
''' for numpy arrays '''
uniform_threshold = np.random.uniform(0,1,len(batch_images))
indx_gt = np.where(uniform_threshold>0.5)[0]
batch_images[indx_gt] = np.flip(batch_images[indx_gt], axis=3)
return batch_images
# def hflip_images(batch_images):
# ''' for torch tensors '''
# uniform_threshold = np.random.uniform(0,1,len(batch_images))
# indx_gt = np.where(uniform_threshold>0.5)[0]
# batch_images[indx_gt] = torch.flip(batch_images[indx_gt], dims=[3])
# return batch_images
## normalize images
def normalize_images(batch_images):
batch_images = batch_images/255.0
batch_images = (batch_images - 0.5)/0.5
return batch_images
def train_ccgan(kernel_sigma, kappa, train_images, train_labels, netG, netD, net_y2h, save_images_folder, save_models_folder = None, clip_label=False):
'''
Note that train_images are not normalized to [-1,1]
'''
netG = netG.cuda()
netD = netD.cuda()
net_y2h = net_y2h.cuda()
net_y2h.eval()
optimizerG = torch.optim.Adam(netG.parameters(), lr=lr_g, betas=(0.5, 0.999))
optimizerD = torch.optim.Adam(netD.parameters(), lr=lr_d, betas=(0.5, 0.999))
if save_models_folder is not None and resume_niters>0:
save_file = save_models_folder + "/CcGAN_{}_{}_nDsteps_{}_checkpoint_intrain/CcGAN_checkpoint_niters_{}.pth".format(gan_arch, threshold_type, num_D_steps, resume_niters)
checkpoint = torch.load(save_file)
netG.load_state_dict(checkpoint['netG_state_dict'])
netD.load_state_dict(checkpoint['netD_state_dict'])
optimizerG.load_state_dict(checkpoint['optimizerG_state_dict'])
optimizerD.load_state_dict(checkpoint['optimizerD_state_dict'])
torch.set_rng_state(checkpoint['rng_state'])
#end if
#################
unique_train_labels = np.sort(np.array(list(set(train_labels))))
# printed images with labels between the 5-th quantile and 95-th quantile of training labels
n_row=10; n_col = n_row
z_fixed = torch.randn(n_row*n_col, dim_gan, dtype=torch.float).cuda()
start_label = np.quantile(train_labels, 0.05)
end_label = np.quantile(train_labels, 0.95)
selected_labels = np.linspace(start_label, end_label, num=n_row)
y_fixed = np.zeros(n_row*n_col)
for i in range(n_row):
curr_label = selected_labels[i]
for j in range(n_col):
y_fixed[i*n_col+j] = curr_label
print(y_fixed)
y_fixed = torch.from_numpy(y_fixed).type(torch.float).view(-1,1).cuda()
start_time = timeit.default_timer()
for niter in range(resume_niters, niters):
''' Train Discriminator '''
for _ in range(num_D_steps):
## randomly draw batch_size_disc y's from unique_train_labels
batch_target_labels_in_dataset = np.random.choice(unique_train_labels, size=batch_size_disc, replace=True)
## add Gaussian noise; we estimate image distribution conditional on these labels
batch_epsilons = np.random.normal(0, kernel_sigma, batch_size_disc)
batch_target_labels = batch_target_labels_in_dataset + batch_epsilons
## find index of real images with labels in the vicinity of batch_target_labels
## generate labels for fake image generation; these labels are also in the vicinity of batch_target_labels
batch_real_indx = np.zeros(batch_size_disc, dtype=int) #index of images in the datata; the labels of these images are in the vicinity
batch_fake_labels = np.zeros(batch_size_disc)
for j in range(batch_size_disc):
## index for real images
if threshold_type == "hard":
indx_real_in_vicinity = np.where(np.abs(train_labels-batch_target_labels[j])<= kappa)[0]
else:
# reverse the weight function for SVDL
indx_real_in_vicinity = np.where((train_labels-batch_target_labels[j])**2 <= -np.log(nonzero_soft_weight_threshold)/kappa)[0]
## if the max gap between two consecutive ordered unique labels is large, it is possible that len(indx_real_in_vicinity)<1
while len(indx_real_in_vicinity)<1:
batch_epsilons_j = np.random.normal(0, kernel_sigma, 1)
batch_target_labels[j] = batch_target_labels_in_dataset[j] + batch_epsilons_j
if clip_label:
batch_target_labels = np.clip(batch_target_labels, 0.0, 1.0)
## index for real images
if threshold_type == "hard":
indx_real_in_vicinity = np.where(np.abs(train_labels-batch_target_labels[j])<= kappa)[0]
else:
# reverse the weight function for SVDL
indx_real_in_vicinity = np.where((train_labels-batch_target_labels[j])**2 <= -np.log(nonzero_soft_weight_threshold)/kappa)[0]
#end while len(indx_real_in_vicinity)<1
assert len(indx_real_in_vicinity)>=1
batch_real_indx[j] = np.random.choice(indx_real_in_vicinity, size=1)[0]
## labels for fake images generation
if threshold_type == "hard":
lb = batch_target_labels[j] - kappa
ub = batch_target_labels[j] + kappa
else:
lb = batch_target_labels[j] - np.sqrt(-np.log(nonzero_soft_weight_threshold)/kappa)
ub = batch_target_labels[j] + np.sqrt(-np.log(nonzero_soft_weight_threshold)/kappa)
lb = max(0.0, lb); ub = min(ub, 1.0)
assert lb<=ub
assert lb>=0 and ub>=0
assert lb<=1 and ub<=1
batch_fake_labels[j] = np.random.uniform(lb, ub, size=1)[0]
#end for j
## draw real image/label batch from the training set
batch_real_images = torch.from_numpy(normalize_images(hflip_images(train_images[batch_real_indx])))
batch_real_images = batch_real_images.type(torch.float).cuda()
batch_real_labels = train_labels[batch_real_indx]
batch_real_labels = torch.from_numpy(batch_real_labels).type(torch.float).cuda()
## generate the fake image batch
batch_fake_labels = torch.from_numpy(batch_fake_labels).type(torch.float).cuda()
z = torch.randn(batch_size_disc, dim_gan, dtype=torch.float).cuda()
batch_fake_images = netG(z, net_y2h(batch_fake_labels))
## target labels on gpu
batch_target_labels = torch.from_numpy(batch_target_labels).type(torch.float).cuda()
## weight vector
if threshold_type == "soft":
real_weights = torch.exp(-kappa*(batch_real_labels-batch_target_labels)**2).cuda()
fake_weights = torch.exp(-kappa*(batch_fake_labels-batch_target_labels)**2).cuda()
else:
real_weights = torch.ones(batch_size_disc, dtype=torch.float).cuda()
fake_weights = torch.ones(batch_size_disc, dtype=torch.float).cuda()
#end if threshold type
# forward pass
if use_DiffAugment:
real_dis_out = netD(DiffAugment(batch_real_images, policy=policy), net_y2h(batch_target_labels))
fake_dis_out = netD(DiffAugment(batch_fake_images.detach(), policy=policy), net_y2h(batch_target_labels))
else:
real_dis_out = netD(batch_real_images, net_y2h(batch_target_labels))
fake_dis_out = netD(batch_fake_images.detach(), net_y2h(batch_target_labels))
if loss_type == "vanilla":
real_dis_out = torch.nn.Sigmoid()(real_dis_out)
fake_dis_out = torch.nn.Sigmoid()(fake_dis_out)
d_loss_real = - torch.log(real_dis_out+1e-20)
d_loss_fake = - torch.log(1-fake_dis_out+1e-20)
elif loss_type == "hinge":
d_loss_real = torch.nn.ReLU()(1.0 - real_dis_out)
d_loss_fake = torch.nn.ReLU()(1.0 + fake_dis_out)
else:
raise ValueError('Not supported loss type!!!')
d_loss = torch.mean(real_weights.view(-1) * d_loss_real.view(-1)) + torch.mean(fake_weights.view(-1) * d_loss_fake.view(-1))
optimizerD.zero_grad()
d_loss.backward()
optimizerD.step()
#end for step_D_index
''' Train Generator '''
netG.train()
# generate fake images
## randomly draw batch_size_gene y's from unique_train_labels
batch_target_labels_in_dataset = np.random.choice(unique_train_labels, size=batch_size_gene, replace=True)
## add Gaussian noise; we estimate image distribution conditional on these labels
batch_epsilons = np.random.normal(0, kernel_sigma, batch_size_gene)
batch_target_labels = batch_target_labels_in_dataset + batch_epsilons
batch_target_labels = torch.from_numpy(batch_target_labels).type(torch.float).cuda()
z = torch.randn(batch_size_gene, dim_gan, dtype=torch.float).cuda()
batch_fake_images = netG(z, net_y2h(batch_target_labels))
# loss
if use_DiffAugment:
dis_out = netD(DiffAugment(batch_fake_images, policy=policy), net_y2h(batch_target_labels))
else:
dis_out = netD(batch_fake_images, net_y2h(batch_target_labels))
if loss_type == "vanilla":
dis_out = torch.nn.Sigmoid()(dis_out)
g_loss = - torch.mean(torch.log(dis_out+1e-20))
elif loss_type == "hinge":
g_loss = - dis_out.mean()
# backward
optimizerG.zero_grad()
g_loss.backward()
optimizerG.step()
# print loss
if (niter+1) % 20 == 0:
print ("CcGAN,%s: [Iter %d/%d] [D loss: %.4e] [G loss: %.4e] [real prob: %.3f] [fake prob: %.3f] [Time: %.4f]" % (gan_arch, niter+1, niters, d_loss.item(), g_loss.item(), real_dis_out.mean().item(), fake_dis_out.mean().item(), timeit.default_timer()-start_time))
if (niter+1) % visualize_freq == 0:
netG.eval()
with torch.no_grad():
gen_imgs = netG(z_fixed, net_y2h(y_fixed))
gen_imgs = gen_imgs.detach().cpu()
save_image(gen_imgs.data, save_images_folder + '/{}.png'.format(niter+1), nrow=n_row, normalize=True)
if save_models_folder is not None and ((niter+1) % save_niters_freq == 0 or (niter+1) == niters):
save_file = save_models_folder + "/CcGAN_{}_{}_nDsteps_{}_checkpoint_intrain/CcGAN_checkpoint_niters_{}.pth".format(gan_arch, threshold_type, num_D_steps, niter+1)
os.makedirs(os.path.dirname(save_file), exist_ok=True)
torch.save({
'netG_state_dict': netG.state_dict(),
'netD_state_dict': netD.state_dict(),
'optimizerG_state_dict': optimizerG.state_dict(),
'optimizerD_state_dict': optimizerD.state_dict(),
'rng_state': torch.get_rng_state()
}, save_file)
#end for niter
return netG, netD
def sample_ccgan_given_labels(netG, net_y2h, labels, batch_size = 500, to_numpy=True, denorm=True, verbose=True):
'''
netG: pretrained generator network
labels: float. normalized labels.
'''
nfake = len(labels)
if batch_size>nfake:
batch_size=nfake
fake_images = []
fake_labels = np.concatenate((labels, labels[0:batch_size]))
netG=netG.cuda()
netG.eval()
net_y2h = net_y2h.cuda()
net_y2h.eval()
with torch.no_grad():
if verbose:
pb = SimpleProgressBar()
n_img_got = 0
while n_img_got < nfake:
z = torch.randn(batch_size, dim_gan, dtype=torch.float).cuda()
y = torch.from_numpy(fake_labels[n_img_got:(n_img_got+batch_size)]).type(torch.float).view(-1,1).cuda()
batch_fake_images = netG(z, net_y2h(y))
if denorm: #denorm imgs to save memory
assert batch_fake_images.max().item()<=1.0 and batch_fake_images.min().item()>=-1.0
batch_fake_images = batch_fake_images*0.5+0.5
batch_fake_images = batch_fake_images*255.0
batch_fake_images = batch_fake_images.type(torch.uint8)
# assert batch_fake_images.max().item()>1
fake_images.append(batch_fake_images.cpu())
n_img_got += batch_size
if verbose:
pb.update(min(float(n_img_got)/nfake, 1)*100)
##end while
fake_images = torch.cat(fake_images, dim=0)
#remove extra entries
fake_images = fake_images[0:nfake]
fake_labels = fake_labels[0:nfake]
if to_numpy:
fake_images = fake_images.numpy()
return fake_images, fake_labels | [
"numpy.clip",
"torch.nn.ReLU",
"numpy.log",
"torch.exp",
"torch.from_numpy",
"torch.get_rng_state",
"numpy.flip",
"torch.nn.Sigmoid",
"numpy.where",
"opts.parse_opts",
"numpy.linspace",
"numpy.concatenate",
"DiffAugment_pytorch.DiffAugment",
"torch.randn",
"numpy.random.normal",
"numpy... | [((296, 308), 'opts.parse_opts', 'parse_opts', ([], {}), '()\n', (306, 308), False, 'from opts import parse_opts\n'), ((1297, 1335), 'numpy.flip', 'np.flip', (['batch_images[indx_gt]'], {'axis': '(3)'}), '(batch_images[indx_gt], axis=3)\n', (1304, 1335), True, 'import numpy as np\n'), ((3218, 3249), 'numpy.quantile', 'np.quantile', (['train_labels', '(0.05)'], {}), '(train_labels, 0.05)\n', (3229, 3249), True, 'import numpy as np\n'), ((3266, 3297), 'numpy.quantile', 'np.quantile', (['train_labels', '(0.95)'], {}), '(train_labels, 0.95)\n', (3277, 3297), True, 'import numpy as np\n'), ((3320, 3366), 'numpy.linspace', 'np.linspace', (['start_label', 'end_label'], {'num': 'n_row'}), '(start_label, end_label, num=n_row)\n', (3331, 3366), True, 'import numpy as np\n'), ((3381, 3404), 'numpy.zeros', 'np.zeros', (['(n_row * n_col)'], {}), '(n_row * n_col)\n', (3389, 3404), True, 'import numpy as np\n'), ((3659, 3681), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (3679, 3681), False, 'import timeit\n'), ((12649, 12695), 'numpy.concatenate', 'np.concatenate', (['(labels, labels[0:batch_size])'], {}), '((labels, labels[0:batch_size]))\n', (12663, 12695), True, 'import numpy as np\n'), ((13782, 13811), 'torch.cat', 'torch.cat', (['fake_images'], {'dim': '(0)'}), '(fake_images, dim=0)\n', (13791, 13811), False, 'import torch\n'), ((1234, 1267), 'numpy.where', 'np.where', (['(uniform_threshold > 0.5)'], {}), '(uniform_threshold > 0.5)\n', (1242, 1267), True, 'import numpy as np\n'), ((2557, 2578), 'torch.load', 'torch.load', (['save_file'], {}), '(save_file)\n', (2567, 2578), False, 'import torch\n'), ((2851, 2895), 'torch.set_rng_state', 'torch.set_rng_state', (["checkpoint['rng_state']"], {}), "(checkpoint['rng_state'])\n", (2870, 2895), False, 'import torch\n'), ((9799, 9872), 'numpy.random.choice', 'np.random.choice', (['unique_train_labels'], {'size': 'batch_size_gene', 'replace': '(True)'}), '(unique_train_labels, size=batch_size_gene, replace=True)\n', (9815, 9872), True, 'import numpy as np\n'), ((9988, 10038), 'numpy.random.normal', 'np.random.normal', (['(0)', 'kernel_sigma', 'batch_size_gene'], {}), '(0, kernel_sigma, batch_size_gene)\n', (10004, 10038), True, 'import numpy as np\n'), ((12790, 12805), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12803, 12805), False, 'import torch\n'), ((3140, 3194), 'torch.randn', 'torch.randn', (['(n_row * n_col)', 'dim_gan'], {'dtype': 'torch.float'}), '(n_row * n_col, dim_gan, dtype=torch.float)\n', (3151, 3194), False, 'import torch\n'), ((3926, 3999), 'numpy.random.choice', 'np.random.choice', (['unique_train_labels'], {'size': 'batch_size_disc', 'replace': '(True)'}), '(unique_train_labels, size=batch_size_disc, replace=True)\n', (3942, 3999), True, 'import numpy as np\n'), ((4123, 4173), 'numpy.random.normal', 'np.random.normal', (['(0)', 'kernel_sigma', 'batch_size_disc'], {}), '(0, kernel_sigma, batch_size_disc)\n', (4139, 4173), True, 'import numpy as np\n'), ((4498, 4534), 'numpy.zeros', 'np.zeros', (['batch_size_disc'], {'dtype': 'int'}), '(batch_size_disc, dtype=int)\n', (4506, 4534), True, 'import numpy as np\n'), ((4646, 4671), 'numpy.zeros', 'np.zeros', (['batch_size_disc'], {}), '(batch_size_disc)\n', (4654, 4671), True, 'import numpy as np\n'), ((12844, 12863), 'utils.SimpleProgressBar', 'SimpleProgressBar', ([], {}), '()\n', (12861, 12863), False, 'from utils import SimpleProgressBar, IMGs_dataset\n'), ((10223, 10279), 'torch.randn', 'torch.randn', (['batch_size_gene', 'dim_gan'], {'dtype': 'torch.float'}), '(batch_size_gene, dim_gan, dtype=torch.float)\n', (10234, 10279), False, 'import torch\n'), ((10424, 10469), 'DiffAugment_pytorch.DiffAugment', 'DiffAugment', (['batch_fake_images'], {'policy': 'policy'}), '(batch_fake_images, policy=policy)\n', (10435, 10469), False, 'from DiffAugment_pytorch import DiffAugment\n'), ((10648, 10666), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (10664, 10666), False, 'import torch\n'), ((11327, 11342), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11340, 11342), False, 'import torch\n'), ((11879, 11905), 'os.path.dirname', 'os.path.dirname', (['save_file'], {}), '(save_file)\n', (11894, 11905), False, 'import os\n'), ((5371, 5407), 'numpy.random.normal', 'np.random.normal', (['(0)', 'kernel_sigma', '(1)'], {}), '(0, kernel_sigma, 1)\n', (5387, 5407), True, 'import numpy as np\n'), ((6220, 6267), 'numpy.random.choice', 'np.random.choice', (['indx_real_in_vicinity'], {'size': '(1)'}), '(indx_real_in_vicinity, size=1)\n', (6236, 6267), True, 'import numpy as np\n'), ((6912, 6945), 'numpy.random.uniform', 'np.random.uniform', (['lb', 'ub'], {'size': '(1)'}), '(lb, ub, size=1)\n', (6929, 6945), True, 'import numpy as np\n'), ((7536, 7592), 'torch.randn', 'torch.randn', (['batch_size_disc', 'dim_gan'], {'dtype': 'torch.float'}), '(batch_size_disc, dim_gan, dtype=torch.float)\n', (7547, 7592), False, 'import torch\n'), ((8390, 8435), 'DiffAugment_pytorch.DiffAugment', 'DiffAugment', (['batch_real_images'], {'policy': 'policy'}), '(batch_real_images, policy=policy)\n', (8401, 8435), False, 'from DiffAugment_pytorch import DiffAugment\n'), ((8857, 8875), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (8873, 8875), False, 'import torch\n'), ((8921, 8939), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (8937, 8939), False, 'import torch\n'), ((8986, 9017), 'torch.log', 'torch.log', (['(real_dis_out + 1e-20)'], {}), '(real_dis_out + 1e-20)\n', (8995, 9017), False, 'import torch\n'), ((9048, 9083), 'torch.log', 'torch.log', (['(1 - fake_dis_out + 1e-20)'], {}), '(1 - fake_dis_out + 1e-20)\n', (9057, 9083), False, 'import torch\n'), ((10710, 10736), 'torch.log', 'torch.log', (['(dis_out + 1e-20)'], {}), '(dis_out + 1e-20)\n', (10719, 10736), False, 'import torch\n'), ((12236, 12257), 'torch.get_rng_state', 'torch.get_rng_state', ([], {}), '()\n', (12255, 12257), False, 'import torch\n'), ((12935, 12986), 'torch.randn', 'torch.randn', (['batch_size', 'dim_gan'], {'dtype': 'torch.float'}), '(batch_size, dim_gan, dtype=torch.float)\n', (12946, 12986), False, 'import torch\n'), ((5587, 5625), 'numpy.clip', 'np.clip', (['batch_target_labels', '(0.0)', '(1.0)'], {}), '(batch_target_labels, 0.0, 1.0)\n', (5594, 5625), True, 'import numpy as np\n'), ((7904, 7970), 'torch.exp', 'torch.exp', (['(-kappa * (batch_real_labels - batch_target_labels) ** 2)'], {}), '(-kappa * (batch_real_labels - batch_target_labels) ** 2)\n', (7913, 7970), False, 'import torch\n'), ((8003, 8069), 'torch.exp', 'torch.exp', (['(-kappa * (batch_fake_labels - batch_target_labels) ** 2)'], {}), '(-kappa * (batch_fake_labels - batch_target_labels) ** 2)\n', (8012, 8069), False, 'import torch\n'), ((8120, 8166), 'torch.ones', 'torch.ones', (['batch_size_disc'], {'dtype': 'torch.float'}), '(batch_size_disc, dtype=torch.float)\n', (8130, 8166), False, 'import torch\n'), ((8205, 8251), 'torch.ones', 'torch.ones', (['batch_size_disc'], {'dtype': 'torch.float'}), '(batch_size_disc, dtype=torch.float)\n', (8215, 8251), False, 'import torch\n'), ((9149, 9164), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (9162, 9164), False, 'import torch\n'), ((9215, 9230), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (9228, 9230), False, 'import torch\n'), ((10147, 10184), 'torch.from_numpy', 'torch.from_numpy', (['batch_target_labels'], {}), '(batch_target_labels)\n', (10163, 10184), False, 'import torch\n'), ((3578, 3603), 'torch.from_numpy', 'torch.from_numpy', (['y_fixed'], {}), '(y_fixed)\n', (3594, 3603), False, 'import torch\n'), ((7319, 7354), 'torch.from_numpy', 'torch.from_numpy', (['batch_real_labels'], {}), '(batch_real_labels)\n', (7335, 7354), False, 'import torch\n'), ((7459, 7494), 'torch.from_numpy', 'torch.from_numpy', (['batch_fake_labels'], {}), '(batch_fake_labels)\n', (7475, 7494), False, 'import torch\n'), ((7739, 7776), 'torch.from_numpy', 'torch.from_numpy', (['batch_target_labels'], {}), '(batch_target_labels)\n', (7755, 7776), False, 'import torch\n'), ((11205, 11227), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (11225, 11227), False, 'import timeit\n'), ((4857, 4902), 'numpy.abs', 'np.abs', (['(train_labels - batch_target_labels[j])'], {}), '(train_labels - batch_target_labels[j])\n', (4863, 4902), True, 'import numpy as np\n'), ((5777, 5822), 'numpy.abs', 'np.abs', (['(train_labels - batch_target_labels[j])'], {}), '(train_labels - batch_target_labels[j])\n', (5783, 5822), True, 'import numpy as np\n'), ((6563, 6600), 'numpy.log', 'np.log', (['nonzero_soft_weight_threshold'], {}), '(nonzero_soft_weight_threshold)\n', (6569, 6600), True, 'import numpy as np\n'), ((6667, 6704), 'numpy.log', 'np.log', (['nonzero_soft_weight_threshold'], {}), '(nonzero_soft_weight_threshold)\n', (6673, 6704), True, 'import numpy as np\n'), ((13010, 13073), 'torch.from_numpy', 'torch.from_numpy', (['fake_labels[n_img_got:n_img_got + batch_size]'], {}), '(fake_labels[n_img_got:n_img_got + batch_size])\n', (13026, 13073), False, 'import torch\n'), ((5092, 5129), 'numpy.log', 'np.log', (['nonzero_soft_weight_threshold'], {}), '(nonzero_soft_weight_threshold)\n', (5098, 5129), True, 'import numpy as np\n'), ((6024, 6061), 'numpy.log', 'np.log', (['nonzero_soft_weight_threshold'], {}), '(nonzero_soft_weight_threshold)\n', (6030, 6061), True, 'import numpy as np\n')] |
import tensorflow as tf
#import tensorlayer as tl
import numpy as np
class DQN(object):
def __init__(self, hps, name_variable):
self._hps = hps
self._name_variable = name_variable
def variable_summaries(self, var_name, var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries_{}'.format(var_name)):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def _add_placeholders(self):
"""Add placeholders to the graph. These are entry points for any input data."""
self._x = tf.placeholder(tf.float32, [None, self._hps.dqn_input_feature_len], name='x') # size (dataset_len, input_feature_len)
self._y = tf.placeholder(tf.float32, [None, self._hps.vocab_size], name='y') # size (dataset_len, 1)
self._train_step = tf.placeholder(tf.int32, None,name='train_step')
def _make_feed_dict(self, batch):
feed_dict = {}
feed_dict[self._x] = batch._x
feed_dict[self._y] = batch._y
return feed_dict
def _add_tf_layers(self):
""" Based on the dqn_layers flag, it creates multiple dense layers to do the regression. """
h = tf.layers.dense(self._x, units = self._hps.dqn_input_feature_len, activation=tf.nn.relu, name='{}_input_layer'.format(self._name_variable))
for i, layer in enumerate(self._hps.dqn_layers.split(',')):
h = tf.layers.dense(h, units = int(layer), activation = tf.nn.relu, name='{}_h_{}'.format(self._name_variable, i))
self.advantage_layer = tf.layers.dense(h, units = self._hps.vocab_size, activation = tf.nn.softmax, name='{}_advantage'.format(self._name_variable))
if self._hps.dueling_net:
# in dueling net, we have two extra output layers; one for value function estimation
# and the other for advantage estimation, we then use the difference between these two layers
# to calculate the q-estimation
self_layer = tf.layers.dense(h, units = 1, activation = tf.identity, name='{}_value'.format(self._name_variable))
normalized_al = self.advantage_layer-tf.reshape(tf.reduce_mean(self.advantage_layer,axis=1),[-1,1]) # equation 9 in https://arxiv.org/pdf/1511.06581.pdf
value_extended = tf.concat([self_layer] * self._hps.vocab_size, axis=1)
self.output = value_extended + normalized_al
else:
self.output = self.advantage_layer
def _add_train_op(self):
# In regression, the objective loss is Mean Squared Error (MSE).
self.loss = tf.losses.mean_squared_error(labels = self._y, predictions = self.output)
tvars = tf.trainable_variables()
gradients = tf.gradients(self.loss, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE)
# Clip the gradients
with tf.device("/gpu:{}".format(self._hps.dqn_gpu_num)):
grads, global_norm = tf.clip_by_global_norm(gradients, self._hps.max_grad_norm)
# Add a summary
tf.summary.scalar('global_norm', global_norm)
# Apply adagrad optimizer
optimizer = tf.train.AdamOptimizer(self._hps.lr)
with tf.device("/gpu:{}".format(self._hps.dqn_gpu_num)):
self.train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step, name='train_step')
self.variable_summaries('dqn_loss',self.loss)
def _add_update_weights_op(self):
""" Updates the weight of the target network based on the current network. """
self.model_trainables = tf.trainable_variables(scope='{}_relay_network'.format(self._name_variable)) # target variables
self._new_trainables = [tf.placeholder(tf.float32, None,name='trainables_{}'.format(i)) for i in range(len(self.model_trainables))]
self.assign_ops = []
if self._hps.dqn_polyak_averaging: # target parameters are slowly updating using: \phi_target = \tau * \phi_target + (1-\tau) * \phi_target
tau = (tf.cast(self._train_step,tf.float32) % self._hps.dqn_target_update)/float(self._hps.dqn_target_update)
for i, mt in enumerate(self.model_trainables):
nt = self._new_trainables[i]
self.assign_ops.append(mt.assign(tau * mt + (1-tau) * nt))
else:
if self._train_step % self._hps.dqn_target_update == 0:
for i, mt in enumerate(self.model_trainables):
nt = self._new_trainables[i]
self.assign_ops.append(mt.assign(nt))
def build_graph(self):
with tf.variable_scope('{}_relay_network'.format(self._name_variable)), tf.device("/gpu:{}".format(self._hps.dqn_gpu_num)):
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self._add_placeholders()
self._add_tf_layers()
self._add_train_op()
self._add_update_weights_op()
self._summaries = tf.summary.merge_all()
def run_train_steps(self, sess, batch):
feed_dict = self._make_feed_dict(batch)
to_return = {'train_op': self.train_op,
'summaries': self._summaries,
'loss': self.loss,
'global_step': self.global_step}
return sess.run(to_return, feed_dict)
def run_test_steps(self, sess, x, y=None, return_loss=False, return_best_action=False):
# when return_loss is True, the model will return the loss of the prediction
# return_loss should be False, during estimation (decoding)
feed_dict = {self._x:x}
to_return = {'estimates': self.output}
if return_loss:
feed_dict.update({self._y:y})
to_return.update({'loss': self.loss})
output = sess.run(to_return, feed_dict)
if return_best_action:
output['best_action']=np.argmax(output['estimates'],axis=1)
return output
def run_update_weights(self, sess, train_step, weights):
feed_dict = {self._train_step:train_step}
for i, w in enumerate(weights):
feed_dict.update({self._new_trainables[i]:w})
_ = sess.run(self.assign_ops, feed_dict)
| [
"tensorflow.train.AdamOptimizer",
"tensorflow.reduce_min",
"tensorflow.summary.merge_all",
"tensorflow.Variable",
"tensorflow.placeholder",
"numpy.argmax",
"tensorflow.reduce_max",
"tensorflow.gradients",
"tensorflow.summary.scalar",
"tensorflow.concat",
"tensorflow.summary.histogram",
"tensor... | [((947, 1024), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self._hps.dqn_input_feature_len]'], {'name': '"""x"""'}), "(tf.float32, [None, self._hps.dqn_input_feature_len], name='x')\n", (961, 1024), True, 'import tensorflow as tf\n'), ((1083, 1149), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self._hps.vocab_size]'], {'name': '"""y"""'}), "(tf.float32, [None, self._hps.vocab_size], name='y')\n", (1097, 1149), True, 'import tensorflow as tf\n'), ((1201, 1250), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', 'None'], {'name': '"""train_step"""'}), "(tf.int32, None, name='train_step')\n", (1215, 1250), True, 'import tensorflow as tf\n'), ((2948, 3017), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', ([], {'labels': 'self._y', 'predictions': 'self.output'}), '(labels=self._y, predictions=self.output)\n', (2976, 3017), True, 'import tensorflow as tf\n'), ((3039, 3063), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (3061, 3063), True, 'import tensorflow as tf\n'), ((3084, 3178), 'tensorflow.gradients', 'tf.gradients', (['self.loss', 'tvars'], {'aggregation_method': 'tf.AggregationMethod.EXPERIMENTAL_TREE'}), '(self.loss, tvars, aggregation_method=tf.AggregationMethod.\n EXPERIMENTAL_TREE)\n', (3096, 3178), True, 'import tensorflow as tf\n'), ((3394, 3439), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""global_norm"""', 'global_norm'], {}), "('global_norm', global_norm)\n", (3411, 3439), True, 'import tensorflow as tf\n'), ((3495, 3531), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self._hps.lr'], {}), '(self._hps.lr)\n', (3517, 3531), True, 'import tensorflow as tf\n'), ((416, 435), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['var'], {}), '(var)\n', (430, 435), True, 'import tensorflow as tf\n'), ((448, 479), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mean"""', 'mean'], {}), "('mean', mean)\n", (465, 479), True, 'import tensorflow as tf\n'), ((606, 641), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""stddev"""', 'stddev'], {}), "('stddev', stddev)\n", (623, 641), True, 'import tensorflow as tf\n'), ((768, 806), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""histogram"""', 'var'], {}), "('histogram', var)\n", (788, 806), True, 'import tensorflow as tf\n'), ((2652, 2706), 'tensorflow.concat', 'tf.concat', (['([self_layer] * self._hps.vocab_size)'], {'axis': '(1)'}), '([self_layer] * self._hps.vocab_size, axis=1)\n', (2661, 2706), True, 'import tensorflow as tf\n'), ((3302, 3360), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['gradients', 'self._hps.max_grad_norm'], {}), '(gradients, self._hps.max_grad_norm)\n', (3324, 3360), True, 'import tensorflow as tf\n'), ((5075, 5126), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (5086, 5126), True, 'import tensorflow as tf\n'), ((5303, 5325), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (5323, 5325), True, 'import tensorflow as tf\n'), ((6173, 6211), 'numpy.argmax', 'np.argmax', (["output['estimates']"], {'axis': '(1)'}), "(output['estimates'], axis=1)\n", (6182, 6211), True, 'import numpy as np\n'), ((497, 520), 'tensorflow.name_scope', 'tf.name_scope', (['"""stddev"""'], {}), "('stddev')\n", (510, 520), True, 'import tensorflow as tf\n'), ((679, 697), 'tensorflow.reduce_max', 'tf.reduce_max', (['var'], {}), '(var)\n', (692, 697), True, 'import tensorflow as tf\n'), ((736, 754), 'tensorflow.reduce_min', 'tf.reduce_min', (['var'], {}), '(var)\n', (749, 754), True, 'import tensorflow as tf\n'), ((2518, 2562), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.advantage_layer'], {'axis': '(1)'}), '(self.advantage_layer, axis=1)\n', (2532, 2562), True, 'import tensorflow as tf\n'), ((4364, 4401), 'tensorflow.cast', 'tf.cast', (['self._train_step', 'tf.float32'], {}), '(self._train_step, tf.float32)\n', (4371, 4401), True, 'import tensorflow as tf\n'), ((570, 591), 'tensorflow.square', 'tf.square', (['(var - mean)'], {}), '(var - mean)\n', (579, 591), True, 'import tensorflow as tf\n')] |
from pdb import set_trace as br
import csv
import numpy as np
ages=np.array([])
with open('input.txt') as f:
c = csv.reader(f, delimiter=' ',skipinitialspace=True)
for a in c:
ages=a[0].split(',')
for [i,a] in enumerate(ages):
ages[i] = int(a)
ages=np.array(ages)
populations=[]
for i in range(0,9):
populations.append(len(ages[ages==i]))
print(populations)
for t in range(0,256):
resets=populations[0]
populations[0]=populations[1]
populations[1]=populations[2]
populations[2]=populations[3]
populations[3]=populations[4]
populations[4]=populations[5]
populations[5]=populations[6]
populations[6]=populations[7]+resets
populations[7]=populations[8]
populations[8]=resets
# Okay I'm not proud that the above didn't compress a section of it into a loop but that's how it was when I left it. #noRefactoringAllowed
print(sum(populations)) | [
"numpy.array",
"csv.reader"
] | [((68, 80), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (76, 80), True, 'import numpy as np\n'), ((270, 284), 'numpy.array', 'np.array', (['ages'], {}), '(ages)\n', (278, 284), True, 'import numpy as np\n'), ((118, 169), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""" """', 'skipinitialspace': '(True)'}), "(f, delimiter=' ', skipinitialspace=True)\n", (128, 169), False, 'import csv\n')] |
# -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- <NAME> (<EMAIL>)
- <NAME> (<EMAIL>)
- <NAME> (<EMAIL>)
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
from tqdm import tqdm
import numpy as np
from os import remove, path
from tpot import TPOTClassifier, TPOTRegressor
from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name
from tpot.operator_utils import TPOTOperatorClassFactory
from tpot.config.classifier import classifier_config_dict
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from deap import creator
from nose.tools import assert_raises, assert_equal
test_operator_key_1 = 'sklearn.feature_selection.SelectPercentile'
test_operator_key_2 = 'sklearn.feature_selection.SelectFromModel'
TPOTSelectPercentile, TPOTSelectPercentile_args = TPOTOperatorClassFactory(
test_operator_key_1,
classifier_config_dict[test_operator_key_1]
)
TPOTSelectFromModel, TPOTSelectFromModel_args = TPOTOperatorClassFactory(
test_operator_key_2,
classifier_config_dict[test_operator_key_2]
)
mnist_data = load_digits()
training_features, testing_features, training_target, testing_target = \
train_test_split(mnist_data.data.astype(np.float64), mnist_data.target.astype(np.float64), random_state=42)
tpot_obj = TPOTClassifier()
tpot_obj._fit_init()
tpot_obj_reg = TPOTRegressor()
tpot_obj_reg._fit_init()
def test_export_random_ind():
"""Assert that the TPOTClassifier can generate the same pipeline export with random seed of 39."""
tpot_obj = TPOTClassifier(random_state=39)
tpot_obj._fit_init()
tpot_obj._pbar = tqdm(total=1, disable=True)
pipeline = tpot_obj._toolbox.individual()
expected_code = """import numpy as np
import pandas as pd
from sklearn.feature_selection import SelectPercentile, f_classif
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.tree import DecisionTreeClassifier
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \\
train_test_split(features, tpot_data['target'].values, random_state=39)
exported_pipeline = make_pipeline(
SelectPercentile(score_func=f_classif, percentile=65),
DecisionTreeClassifier(criterion="gini", max_depth=7, min_samples_leaf=4, min_samples_split=18)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
"""
assert expected_code == export_pipeline(pipeline, tpot_obj.operators, tpot_obj._pset, random_state=tpot_obj.random_state)
def test_export():
"""Assert that TPOT's export function throws a RuntimeError when no optimized pipeline exists."""
assert_raises(RuntimeError, tpot_obj.export, "test_export.py")
pipeline_string = (
'KNeighborsClassifier(CombineDFs('
'DecisionTreeClassifier(input_matrix, DecisionTreeClassifier__criterion=gini, '
'DecisionTreeClassifier__max_depth=8,DecisionTreeClassifier__min_samples_leaf=5,'
'DecisionTreeClassifier__min_samples_split=5), ZeroCount(input_matrix))'
'KNeighborsClassifier__n_neighbors=10, '
'KNeighborsClassifier__p=1,KNeighborsClassifier__weights=uniform'
)
pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
tpot_obj._optimized_pipeline = pipeline
tpot_obj.export("test_export.py")
assert path.isfile("test_export.py")
remove("test_export.py") # clean up exported file
def test_generate_pipeline_code():
"""Assert that generate_pipeline_code() returns the correct code given a specific pipeline."""
tpot_obj._fit_init()
pipeline = [
'KNeighborsClassifier',
[
'CombineDFs',
[
'GradientBoostingClassifier',
'input_matrix',
38.0,
5,
5,
5,
0.05,
0.5],
[
'GaussianNB',
[
'ZeroCount',
'input_matrix'
]
]
],
18,
'uniform',
2
]
expected_code = """make_pipeline(
make_union(
StackingEstimator(estimator=GradientBoostingClassifier(learning_rate=38.0, max_depth=5, max_features=5, min_samples_leaf=5, min_samples_split=0.05, n_estimators=0.5)),
StackingEstimator(estimator=make_pipeline(
ZeroCount(),
GaussianNB()
))
),
KNeighborsClassifier(n_neighbors=18, p="uniform", weights=2)
)"""
assert expected_code == generate_pipeline_code(pipeline, tpot_obj.operators)
def test_generate_pipeline_code_2():
"""Assert that generate_pipeline_code() returns the correct code given a specific pipeline with two CombineDFs."""
pipeline = [
'KNeighborsClassifier',
[
'CombineDFs',
[
'GradientBoostingClassifier',
'input_matrix',
38.0,
5,
5,
5,
0.05,
0.5],
[
'CombineDFs',
[
'MinMaxScaler',
'input_matrix'
],
['ZeroCount',
[
'MaxAbsScaler',
'input_matrix'
]
]
]
],
18,
'uniform',
2
]
expected_code = """make_pipeline(
make_union(
StackingEstimator(estimator=GradientBoostingClassifier(learning_rate=38.0, max_depth=5, max_features=5, min_samples_leaf=5, min_samples_split=0.05, n_estimators=0.5)),
make_union(
MinMaxScaler(),
make_pipeline(
MaxAbsScaler(),
ZeroCount()
)
)
),
KNeighborsClassifier(n_neighbors=18, p="uniform", weights=2)
)"""
assert expected_code == generate_pipeline_code(pipeline, tpot_obj.operators)
def test_generate_import_code():
"""Assert that generate_import_code() returns the correct set of dependancies for a given pipeline."""
pipeline = creator.Individual.from_string('GaussianNB(RobustScaler(input_matrix))', tpot_obj._pset)
expected_code = """import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
"""
assert expected_code == generate_import_code(pipeline, tpot_obj.operators)
def test_generate_import_code_2():
"""Assert that generate_import_code() returns the correct set of dependancies and dependancies are importable."""
pipeline_string = (
'KNeighborsClassifier(CombineDFs('
'DecisionTreeClassifier(input_matrix, DecisionTreeClassifier__criterion=gini, '
'DecisionTreeClassifier__max_depth=8,DecisionTreeClassifier__min_samples_leaf=5,'
'DecisionTreeClassifier__min_samples_split=5), ZeroCount(input_matrix))'
'KNeighborsClassifier__n_neighbors=10, '
'KNeighborsClassifier__p=1,KNeighborsClassifier__weights=uniform'
)
pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
import_code = generate_import_code(pipeline, tpot_obj.operators)
expected_code = """import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import make_pipeline, make_union
from sklearn.tree import DecisionTreeClassifier
from tpot.builtins import StackingEstimator, ZeroCount
"""
exec(import_code) # should not raise error
assert expected_code == import_code
def test_operators():
"""Assert that the TPOT operators match the output of their sklearn counterparts."""
for op in tpot_obj.operators:
check_export.description = ("Assert that the TPOT {} operator exports "
"as expected".format(op.__name__))
yield check_export, op, tpot_obj
def check_export(op, tpot_obj):
"""Assert that a TPOT operator exports as a class constructor."""
prng = np.random.RandomState(42)
np.random.seed(42)
args = []
for type_ in op.parameter_types()[0][1:]:
args.append(prng.choice(tpot_obj._pset.terminals[type_]).value)
export_string = op.export(*args)
assert export_string.startswith(op.__name__ + "(") and export_string.endswith(")")
def test_export_pipeline():
"""Assert that exported_pipeline() generated a compile source file as expected given a fixed pipeline."""
pipeline_string = (
'KNeighborsClassifier(CombineDFs('
'DecisionTreeClassifier(input_matrix, DecisionTreeClassifier__criterion=gini, '
'DecisionTreeClassifier__max_depth=8,DecisionTreeClassifier__min_samples_leaf=5,'
'DecisionTreeClassifier__min_samples_split=5),SelectPercentile(input_matrix, SelectPercentile__percentile=20))'
'KNeighborsClassifier__n_neighbors=10, '
'KNeighborsClassifier__p=1,KNeighborsClassifier__weights=uniform'
)
pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
expected_code = """import numpy as np
import pandas as pd
from sklearn.feature_selection import SelectPercentile, f_classif
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import make_pipeline, make_union
from sklearn.tree import DecisionTreeClassifier
from tpot.builtins import StackingEstimator
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \\
train_test_split(features, tpot_data['target'].values, random_state=None)
exported_pipeline = make_pipeline(
make_union(
StackingEstimator(estimator=DecisionTreeClassifier(criterion="gini", max_depth=8, min_samples_leaf=5, min_samples_split=5)),
SelectPercentile(score_func=f_classif, percentile=20)
),
KNeighborsClassifier(n_neighbors=10, p=1, weights="uniform")
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
"""
assert expected_code == export_pipeline(pipeline, tpot_obj.operators, tpot_obj._pset)
def test_export_pipeline_2():
"""Assert that exported_pipeline() generated a compile source file as expected given a fixed simple pipeline (only one classifier)."""
pipeline_string = (
'KNeighborsClassifier('
'input_matrix, '
'KNeighborsClassifier__n_neighbors=10, '
'KNeighborsClassifier__p=1, '
'KNeighborsClassifier__weights=uniform'
')'
)
pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
expected_code = """import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \\
train_test_split(features, tpot_data['target'].values, random_state=None)
exported_pipeline = KNeighborsClassifier(n_neighbors=10, p=1, weights="uniform")
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
"""
assert expected_code == export_pipeline(pipeline, tpot_obj.operators, tpot_obj._pset)
def test_export_pipeline_3():
"""Assert that exported_pipeline() generated a compile source file as expected given a fixed simple pipeline with a preprocessor."""
pipeline_string = (
'DecisionTreeClassifier(SelectPercentile(input_matrix, SelectPercentile__percentile=20),'
'DecisionTreeClassifier__criterion=gini, DecisionTreeClassifier__max_depth=8,'
'DecisionTreeClassifier__min_samples_leaf=5, DecisionTreeClassifier__min_samples_split=5)'
)
pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
expected_code = """import numpy as np
import pandas as pd
from sklearn.feature_selection import SelectPercentile, f_classif
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.tree import DecisionTreeClassifier
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \\
train_test_split(features, tpot_data['target'].values, random_state=None)
exported_pipeline = make_pipeline(
SelectPercentile(score_func=f_classif, percentile=20),
DecisionTreeClassifier(criterion="gini", max_depth=8, min_samples_leaf=5, min_samples_split=5)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
"""
assert expected_code == export_pipeline(pipeline, tpot_obj.operators, tpot_obj._pset)
def test_export_pipeline_4():
"""Assert that exported_pipeline() generated a compile source file as expected given a fixed simple pipeline with input_matrix in CombineDFs."""
pipeline_string = (
'KNeighborsClassifier(CombineDFs('
'DecisionTreeClassifier(input_matrix, DecisionTreeClassifier__criterion=gini, '
'DecisionTreeClassifier__max_depth=8,DecisionTreeClassifier__min_samples_leaf=5,'
'DecisionTreeClassifier__min_samples_split=5),input_matrix)'
'KNeighborsClassifier__n_neighbors=10, '
'KNeighborsClassifier__p=1,KNeighborsClassifier__weights=uniform'
)
pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
expected_code = """import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import make_pipeline, make_union
from sklearn.tree import DecisionTreeClassifier
from tpot.builtins import StackingEstimator
from sklearn.preprocessing import FunctionTransformer
from copy import copy
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \\
train_test_split(features, tpot_data['target'].values, random_state=None)
exported_pipeline = make_pipeline(
make_union(
StackingEstimator(estimator=DecisionTreeClassifier(criterion="gini", max_depth=8, min_samples_leaf=5, min_samples_split=5)),
FunctionTransformer(copy)
),
KNeighborsClassifier(n_neighbors=10, p=1, weights="uniform")
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
"""
assert expected_code == export_pipeline(pipeline, tpot_obj.operators, tpot_obj._pset)
def test_export_pipeline_5():
"""Assert that exported_pipeline() generated a compile source file as expected given a fixed simple pipeline with SelectFromModel."""
pipeline_string = (
'DecisionTreeRegressor(SelectFromModel(input_matrix, '
'SelectFromModel__ExtraTreesRegressor__max_features=0.05, SelectFromModel__ExtraTreesRegressor__n_estimators=100, '
'SelectFromModel__threshold=0.05), DecisionTreeRegressor__max_depth=8,'
'DecisionTreeRegressor__min_samples_leaf=5, DecisionTreeRegressor__min_samples_split=5)'
)
pipeline = creator.Individual.from_string(pipeline_string, tpot_obj_reg._pset)
expected_code = """import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.tree import DecisionTreeRegressor
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \\
train_test_split(features, tpot_data['target'].values, random_state=None)
exported_pipeline = make_pipeline(
SelectFromModel(estimator=ExtraTreesRegressor(max_features=0.05, n_estimators=100), threshold=0.05),
DecisionTreeRegressor(max_depth=8, min_samples_leaf=5, min_samples_split=5)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
"""
assert expected_code == export_pipeline(pipeline, tpot_obj_reg.operators, tpot_obj_reg._pset)
def test_operator_export():
"""Assert that a TPOT operator can export properly with a callable function as a parameter."""
assert list(TPOTSelectPercentile.arg_types) == TPOTSelectPercentile_args
export_string = TPOTSelectPercentile.export(5)
assert export_string == "SelectPercentile(score_func=f_classif, percentile=5)"
def test_operator_export_2():
"""Assert that a TPOT operator can export properly with a BaseEstimator as a parameter."""
assert list(TPOTSelectFromModel.arg_types) == TPOTSelectFromModel_args
export_string = TPOTSelectFromModel.export('gini', 0.10, 100, 0.10)
expected_string = ("SelectFromModel(estimator=ExtraTreesClassifier(criterion=\"gini\","
" max_features=0.1, n_estimators=100), threshold=0.1)")
print(export_string)
assert export_string == expected_string
def test_get_by_name():
"""Assert that the Operator class returns operators by name appropriately."""
assert get_by_name("SelectPercentile", tpot_obj.operators).__class__ == TPOTSelectPercentile.__class__
assert get_by_name("SelectFromModel", tpot_obj.operators).__class__ == TPOTSelectFromModel.__class__
def test_get_by_name_2():
"""Assert that get_by_name raises TypeError with a incorrect operator name."""
assert_raises(TypeError, get_by_name, "RandomForestRegressor", tpot_obj.operators)
# use correct name
ret_op_class = get_by_name("RandomForestClassifier", tpot_obj.operators)
def test_get_by_name_3():
"""Assert that get_by_name raises ValueError with duplicate operators in operator dictionary."""
# no duplicate
ret_op_class = get_by_name("SelectPercentile", tpot_obj.operators)
# add a copy of TPOTSelectPercentile into operator list
tpot_obj.operators.append(TPOTSelectPercentile)
assert_raises(ValueError, get_by_name, "SelectPercentile", tpot_obj.operators)
def test_indent():
"""Assert that indenting a multiline string by 4 spaces prepends 4 spaces before each new line."""
multiline_string = """test
test1
test2
test3"""
indented_multiline_string = """ test
test1
test2
test3"""
assert indented_multiline_string == _indent(multiline_string, 4)
def test_pipeline_score_save():
"""Assert that the TPOTClassifier can generate a scored pipeline export correctly."""
tpot_obj = TPOTClassifier()
tpot_obj._fit_init()
tpot_obj._pbar = tqdm(total=1, disable=True)
pipeline_string = (
'DecisionTreeClassifier(SelectPercentile(input_matrix, SelectPercentile__percentile=20),'
'DecisionTreeClassifier__criterion=gini, DecisionTreeClassifier__max_depth=8,'
'DecisionTreeClassifier__min_samples_leaf=5, DecisionTreeClassifier__min_samples_split=5)'
)
pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
expected_code = """import numpy as np
import pandas as pd
from sklearn.feature_selection import SelectPercentile, f_classif
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.tree import DecisionTreeClassifier
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \\
train_test_split(features, tpot_data['target'].values, random_state=None)
# Average CV score on the training set was:0.929813743
exported_pipeline = make_pipeline(
SelectPercentile(score_func=f_classif, percentile=20),
DecisionTreeClassifier(criterion="gini", max_depth=8, min_samples_leaf=5, min_samples_split=5)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
"""
assert_equal(expected_code, export_pipeline(pipeline, tpot_obj.operators, tpot_obj._pset, pipeline_score=0.929813743))
def test_imputer_in_export():
"""Assert that TPOT exports a pipeline with an imputation step if imputation was used in fit()."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
features_with_nan = np.copy(training_features)
features_with_nan[0][0] = float('nan')
tpot_obj.fit(features_with_nan, training_target)
# use fixed pipeline since the random.seed() performs differently in python 2.* and 3.*
pipeline_string = (
'KNeighborsClassifier('
'input_matrix, '
'KNeighborsClassifier__n_neighbors=10, '
'KNeighborsClassifier__p=1, '
'KNeighborsClassifier__weights=uniform'
')'
)
tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
export_code = export_pipeline(tpot_obj._optimized_pipeline, tpot_obj.operators, tpot_obj._pset, tpot_obj._imputed)
expected_code = """import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import Imputer
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \\
train_test_split(features, tpot_data['target'].values, random_state=None)
imputer = Imputer(strategy="median")
imputer.fit(training_features)
training_features = imputer.transform(training_features)
testing_features = imputer.transform(testing_features)
exported_pipeline = KNeighborsClassifier(n_neighbors=10, p=1, weights="uniform")
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
"""
assert_equal(export_code, expected_code)
| [
"tpot.export_utils.get_by_name",
"tpot.operator_utils.TPOTOperatorClassFactory",
"numpy.copy",
"tpot.export_utils.export_pipeline",
"tqdm.tqdm",
"deap.creator.Individual.from_string",
"sklearn.datasets.load_digits",
"tpot.export_utils.generate_import_code",
"nose.tools.assert_raises",
"os.path.isf... | [((1574, 1669), 'tpot.operator_utils.TPOTOperatorClassFactory', 'TPOTOperatorClassFactory', (['test_operator_key_1', 'classifier_config_dict[test_operator_key_1]'], {}), '(test_operator_key_1, classifier_config_dict[\n test_operator_key_1])\n', (1598, 1669), False, 'from tpot.operator_utils import TPOTOperatorClassFactory\n'), ((1724, 1819), 'tpot.operator_utils.TPOTOperatorClassFactory', 'TPOTOperatorClassFactory', (['test_operator_key_2', 'classifier_config_dict[test_operator_key_2]'], {}), '(test_operator_key_2, classifier_config_dict[\n test_operator_key_2])\n', (1748, 1819), False, 'from tpot.operator_utils import TPOTOperatorClassFactory\n'), ((1839, 1852), 'sklearn.datasets.load_digits', 'load_digits', ([], {}), '()\n', (1850, 1852), False, 'from sklearn.datasets import load_digits\n'), ((2050, 2066), 'tpot.TPOTClassifier', 'TPOTClassifier', ([], {}), '()\n', (2064, 2066), False, 'from tpot import TPOTClassifier, TPOTRegressor\n'), ((2104, 2119), 'tpot.TPOTRegressor', 'TPOTRegressor', ([], {}), '()\n', (2117, 2119), False, 'from tpot import TPOTClassifier, TPOTRegressor\n'), ((2294, 2325), 'tpot.TPOTClassifier', 'TPOTClassifier', ([], {'random_state': '(39)'}), '(random_state=39)\n', (2308, 2325), False, 'from tpot import TPOTClassifier, TPOTRegressor\n'), ((2372, 2399), 'tqdm.tqdm', 'tqdm', ([], {'total': '(1)', 'disable': '(True)'}), '(total=1, disable=True)\n', (2376, 2399), False, 'from tqdm import tqdm\n'), ((3652, 3714), 'nose.tools.assert_raises', 'assert_raises', (['RuntimeError', 'tpot_obj.export', '"""test_export.py"""'], {}), "(RuntimeError, tpot_obj.export, 'test_export.py')\n", (3665, 3714), False, 'from nose.tools import assert_raises, assert_equal\n'), ((4186, 4249), 'deap.creator.Individual.from_string', 'creator.Individual.from_string', (['pipeline_string', 'tpot_obj._pset'], {}), '(pipeline_string, tpot_obj._pset)\n', (4216, 4249), False, 'from deap import creator\n'), ((4343, 4372), 'os.path.isfile', 'path.isfile', (['"""test_export.py"""'], {}), "('test_export.py')\n", (4354, 4372), False, 'from os import remove, path\n'), ((4377, 4401), 'os.remove', 'remove', (['"""test_export.py"""'], {}), "('test_export.py')\n", (4383, 4401), False, 'from os import remove, path\n'), ((7171, 7263), 'deap.creator.Individual.from_string', 'creator.Individual.from_string', (['"""GaussianNB(RobustScaler(input_matrix))"""', 'tpot_obj._pset'], {}), "('GaussianNB(RobustScaler(input_matrix))',\n tpot_obj._pset)\n", (7201, 7263), False, 'from deap import creator\n'), ((8219, 8282), 'deap.creator.Individual.from_string', 'creator.Individual.from_string', (['pipeline_string', 'tpot_obj._pset'], {}), '(pipeline_string, tpot_obj._pset)\n', (8249, 8282), False, 'from deap import creator\n'), ((8301, 8351), 'tpot.export_utils.generate_import_code', 'generate_import_code', (['pipeline', 'tpot_obj.operators'], {}), '(pipeline, tpot_obj.operators)\n', (8321, 8351), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n'), ((9222, 9247), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (9243, 9247), True, 'import numpy as np\n'), ((9252, 9270), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (9266, 9270), True, 'import numpy as np\n'), ((10180, 10243), 'deap.creator.Individual.from_string', 'creator.Individual.from_string', (['pipeline_string', 'tpot_obj._pset'], {}), '(pipeline_string, tpot_obj._pset)\n', (10210, 10243), False, 'from deap import creator\n'), ((11941, 12004), 'deap.creator.Individual.from_string', 'creator.Individual.from_string', (['pipeline_string', 'tpot_obj._pset'], {}), '(pipeline_string, tpot_obj._pset)\n', (11971, 12004), False, 'from deap import creator\n'), ((13328, 13391), 'deap.creator.Individual.from_string', 'creator.Individual.from_string', (['pipeline_string', 'tpot_obj._pset'], {}), '(pipeline_string, tpot_obj._pset)\n', (13358, 13391), False, 'from deap import creator\n'), ((15078, 15141), 'deap.creator.Individual.from_string', 'creator.Individual.from_string', (['pipeline_string', 'tpot_obj._pset'], {}), '(pipeline_string, tpot_obj._pset)\n', (15108, 15141), False, 'from deap import creator\n'), ((16979, 17046), 'deap.creator.Individual.from_string', 'creator.Individual.from_string', (['pipeline_string', 'tpot_obj_reg._pset'], {}), '(pipeline_string, tpot_obj_reg._pset)\n', (17009, 17046), False, 'from deap import creator\n'), ((19438, 19525), 'nose.tools.assert_raises', 'assert_raises', (['TypeError', 'get_by_name', '"""RandomForestRegressor"""', 'tpot_obj.operators'], {}), "(TypeError, get_by_name, 'RandomForestRegressor', tpot_obj.\n operators)\n", (19451, 19525), False, 'from nose.tools import assert_raises, assert_equal\n'), ((19563, 19620), 'tpot.export_utils.get_by_name', 'get_by_name', (['"""RandomForestClassifier"""', 'tpot_obj.operators'], {}), "('RandomForestClassifier', tpot_obj.operators)\n", (19574, 19620), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n'), ((19789, 19840), 'tpot.export_utils.get_by_name', 'get_by_name', (['"""SelectPercentile"""', 'tpot_obj.operators'], {}), "('SelectPercentile', tpot_obj.operators)\n", (19800, 19840), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n'), ((19957, 20035), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'get_by_name', '"""SelectPercentile"""', 'tpot_obj.operators'], {}), "(ValueError, get_by_name, 'SelectPercentile', tpot_obj.operators)\n", (19970, 20035), False, 'from nose.tools import assert_raises, assert_equal\n'), ((20499, 20515), 'tpot.TPOTClassifier', 'TPOTClassifier', ([], {}), '()\n', (20513, 20515), False, 'from tpot import TPOTClassifier, TPOTRegressor\n'), ((20562, 20589), 'tqdm.tqdm', 'tqdm', ([], {'total': '(1)', 'disable': '(True)'}), '(total=1, disable=True)\n', (20566, 20589), False, 'from tqdm import tqdm\n'), ((20919, 20982), 'deap.creator.Individual.from_string', 'creator.Individual.from_string', (['pipeline_string', 'tpot_obj._pset'], {}), '(pipeline_string, tpot_obj._pset)\n', (20949, 20982), False, 'from deap import creator\n'), ((22265, 22391), 'tpot.TPOTClassifier', 'TPOTClassifier', ([], {'random_state': '(42)', 'population_size': '(1)', 'offspring_size': '(2)', 'generations': '(1)', 'verbosity': '(0)', 'config_dict': '"""TPOT light"""'}), "(random_state=42, population_size=1, offspring_size=2,\n generations=1, verbosity=0, config_dict='TPOT light')\n", (22279, 22391), False, 'from tpot import TPOTClassifier, TPOTRegressor\n'), ((22466, 22492), 'numpy.copy', 'np.copy', (['training_features'], {}), '(training_features)\n', (22473, 22492), True, 'import numpy as np\n'), ((22951, 23014), 'deap.creator.Individual.from_string', 'creator.Individual.from_string', (['pipeline_string', 'tpot_obj._pset'], {}), '(pipeline_string, tpot_obj._pset)\n', (22981, 23014), False, 'from deap import creator\n'), ((23034, 23139), 'tpot.export_utils.export_pipeline', 'export_pipeline', (['tpot_obj._optimized_pipeline', 'tpot_obj.operators', 'tpot_obj._pset', 'tpot_obj._imputed'], {}), '(tpot_obj._optimized_pipeline, tpot_obj.operators, tpot_obj.\n _pset, tpot_obj._imputed)\n', (23049, 23139), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n'), ((24098, 24138), 'nose.tools.assert_equal', 'assert_equal', (['export_code', 'expected_code'], {}), '(export_code, expected_code)\n', (24110, 24138), False, 'from nose.tools import assert_raises, assert_equal\n'), ((3427, 3529), 'tpot.export_utils.export_pipeline', 'export_pipeline', (['pipeline', 'tpot_obj.operators', 'tpot_obj._pset'], {'random_state': 'tpot_obj.random_state'}), '(pipeline, tpot_obj.operators, tpot_obj._pset, random_state=\n tpot_obj.random_state)\n', (3442, 3529), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n'), ((5557, 5609), 'tpot.export_utils.generate_pipeline_code', 'generate_pipeline_code', (['pipeline', 'tpot_obj.operators'], {}), '(pipeline, tpot_obj.operators)\n', (5579, 5609), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n'), ((6960, 7012), 'tpot.export_utils.generate_pipeline_code', 'generate_pipeline_code', (['pipeline', 'tpot_obj.operators'], {}), '(pipeline, tpot_obj.operators)\n', (6982, 7012), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n'), ((7541, 7591), 'tpot.export_utils.generate_import_code', 'generate_import_code', (['pipeline', 'tpot_obj.operators'], {}), '(pipeline, tpot_obj.operators)\n', (7561, 7591), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n'), ((11458, 11519), 'tpot.export_utils.export_pipeline', 'export_pipeline', (['pipeline', 'tpot_obj.operators', 'tpot_obj._pset'], {}), '(pipeline, tpot_obj.operators, tpot_obj._pset)\n', (11473, 11519), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n'), ((12767, 12828), 'tpot.export_utils.export_pipeline', 'export_pipeline', (['pipeline', 'tpot_obj.operators', 'tpot_obj._pset'], {}), '(pipeline, tpot_obj.operators, tpot_obj._pset)\n', (12782, 12828), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n'), ((14375, 14436), 'tpot.export_utils.export_pipeline', 'export_pipeline', (['pipeline', 'tpot_obj.operators', 'tpot_obj._pset'], {}), '(pipeline, tpot_obj.operators, tpot_obj._pset)\n', (14390, 14436), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n'), ((16338, 16399), 'tpot.export_utils.export_pipeline', 'export_pipeline', (['pipeline', 'tpot_obj.operators', 'tpot_obj._pset'], {}), '(pipeline, tpot_obj.operators, tpot_obj._pset)\n', (16353, 16399), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n'), ((18092, 18161), 'tpot.export_utils.export_pipeline', 'export_pipeline', (['pipeline', 'tpot_obj_reg.operators', 'tpot_obj_reg._pset'], {}), '(pipeline, tpot_obj_reg.operators, tpot_obj_reg._pset)\n', (18107, 18161), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n'), ((20331, 20359), 'tpot.export_utils._indent', '_indent', (['multiline_string', '(4)'], {}), '(multiline_string, 4)\n', (20338, 20359), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n'), ((22024, 22117), 'tpot.export_utils.export_pipeline', 'export_pipeline', (['pipeline', 'tpot_obj.operators', 'tpot_obj._pset'], {'pipeline_score': '(0.929813743)'}), '(pipeline, tpot_obj.operators, tpot_obj._pset,\n pipeline_score=0.929813743)\n', (22039, 22117), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n'), ((19121, 19172), 'tpot.export_utils.get_by_name', 'get_by_name', (['"""SelectPercentile"""', 'tpot_obj.operators'], {}), "('SelectPercentile', tpot_obj.operators)\n", (19132, 19172), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n'), ((19228, 19278), 'tpot.export_utils.get_by_name', 'get_by_name', (['"""SelectFromModel"""', 'tpot_obj.operators'], {}), "('SelectFromModel', tpot_obj.operators)\n", (19239, 19278), False, 'from tpot.export_utils import export_pipeline, generate_import_code, _indent, generate_pipeline_code, get_by_name\n')] |
import numpy as np
import tensorflow as tf
from read_data import DataSet
from mytensorflow import padded_reshape
def argmax(x):
return np.unravel_index(x.argmax(), x.shape)
class Evaluation(object):
def __init__(self, data_type, global_step, idxs, yp, tensor_dict=None):
self.data_type = data_type
self.global_step = global_step
self.idxs = idxs
self.yp = yp
self.num_examples = len(yp)
self.tensor_dict = None
self.dict = {'data_type': data_type,
'global_step': global_step,
'yp': yp,
'idxs': idxs,
'num_examples': self.num_examples}
if tensor_dict is not None:
self.tensor_dict = {key: val.tolist() for key, val in tensor_dict.items()}
for key, val in self.tensor_dict.items():
self.dict[key] = val
self.summaries = None
def __repr__(self):
return "{} step {}".format(self.data_type, self.global_step)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_yp = self.yp + other.yp
new_idxs = self.idxs + other.idxs
new_tensor_dict = None
if self.tensor_dict is not None:
new_tensor_dict = {key: val + other.tensor_dict[key] for key, val in self.tensor_dict.items()}
return Evaluation(self.data_type, self.global_step, new_idxs, new_yp, tensor_dict=new_tensor_dict)
def __radd__(self, other):
return self.__add__(other)
class LabeledEvaluation(Evaluation):
def __init__(self, data_type, global_step, idxs, yp, y, tensor_dict=None):
super(LabeledEvaluation, self).__init__(data_type, global_step, idxs, yp, tensor_dict=tensor_dict)
self.y = y
self.dict['y'] = y
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_yp = self.yp + other.yp
new_y = self.y + other.y
new_idxs = self.idxs + other.idxs
if self.tensor_dict is not None:
new_tensor_dict = {key: np.concatenate((val, other.tensor_dict[key]), axis=0) for key, val in self.tensor_dict.items()}
return LabeledEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_y, tensor_dict=new_tensor_dict)
class AccuracyEvaluation(LabeledEvaluation):
def __init__(self, data_type, global_step, idxs, yp, y, correct, loss, tensor_dict=None):
super(AccuracyEvaluation, self).__init__(data_type, global_step, idxs, yp, y, tensor_dict=tensor_dict)
self.loss = loss
self.correct = correct
self.acc = sum(correct) / len(correct)
self.dict['loss'] = loss
self.dict['correct'] = correct
self.dict['acc'] = self.acc
loss_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/loss'.format(data_type), simple_value=self.loss)])
acc_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/acc'.format(data_type), simple_value=self.acc)])
self.summaries = [loss_summary, acc_summary]
def __repr__(self):
return "{} step {}: accuracy={}, loss={}".format(self.data_type, self.global_step, self.acc, self.loss)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_idxs = self.idxs + other.idxs
new_yp = self.yp + other.yp
new_y = self.y + other.y
new_correct = self.correct + other.correct
new_loss = (self.loss * self.num_examples + other.loss * other.num_examples) / len(new_correct)
if self.tensor_dict is not None:
new_tensor_dict = {key: np.concatenate((val, other.tensor_dict[key]), axis=0) for key, val in self.tensor_dict.items()}
return AccuracyEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_y, new_correct, new_loss, tensor_dict=new_tensor_dict)
class Evaluator(object):
def __init__(self, config, model, tensor_dict=None):
self.config = config
self.model = model
self.global_step = model.global_step
self.logits = model.logits
self.tensor_dict = {} if tensor_dict is None else tensor_dict
def get_evaluation(self, sess, batch):
idxs, data_set = batch
feed_dict = self.model.get_feed_dict(data_set, False, supervised=False)
global_step, logits, vals = sess.run([self.global_step, self.logits, list(self.tensor_dict.values())], feed_dict=feed_dict)
logits = logits[:data_set.num_examples]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = Evaluation(data_set.data_type, int(global_step), idxs, logits.tolist(), tensor_dict=tensor_dict)
return e
def get_evaluation_from_batches(self, sess, batches):
e = sum(self.get_evaluation(sess, batch) for batch in batches)
return e
class LabeledEvaluator(Evaluator):
def __init__(self, config, model, tensor_dict=None):
super(LabeledEvaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.z = model.z
def get_evaluation(self, sess, batch):
idxs, data_set = batch
feed_dict = self.model.get_feed_dict(data_set, False, supervised=False)
global_step, logits, vals = sess.run([self.global_step, self.logits, list(self.tensor_dict.values())], feed_dict=feed_dict)
logits = logits[:data_set.num_examples]
z = feed_dict[self.z]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = LabeledEvaluation(data_set.data_type, int(global_step), idxs, logits.tolist(), z.tolist(), tensor_dict=tensor_dict)
return e
class AccuracyEvaluator(LabeledEvaluator):
def __init__(self, config, model, tensor_dict=None):
super(AccuracyEvaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.loss = model.loss
def get_evaluation(self, sess, batch):
idxs, data_set = self._split_batch(batch)
assert isinstance(data_set, DataSet)
feed_dict = self.model.get_feed_dict(data_set, False)
global_step, logits, loss, vals = sess.run([self.global_step, self.logits, self.loss, list(self.tensor_dict.values())], feed_dict=feed_dict)
z = data_set.data['z_list']
logits = logits[:data_set.num_examples]
correct = [self.__class__.compare(yi, ypi) for yi, ypi in zip(z, logits)]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = AccuracyEvaluation(data_set.data_type, int(global_step), idxs, logits.tolist(), z, correct, float(loss), tensor_dict=tensor_dict)
return e
def _split_batch(self, batch):
return batch
def _get_feed_dict(self, batch):
return self.model.get_feed_dict(batch[1], False)
@staticmethod
def compare(yi, ypi):
if int(np.argmax(yi)) == int(np.argmax(ypi)):
return True
return False
class MultiGPUEvaluator(AccuracyEvaluator):
def __init__(self, config, models, tensor_dict=None):
super(MultiGPUEvaluator, self).__init__(config, models[0], tensor_dict=tensor_dict)
self.models = models
with tf.name_scope("eval_concat"):
self.logits = tf.concat(axis=0, values=[model.logits for model in models])
self.loss = tf.add_n([model.loss for model in models])/len(models)
def _split_batch(self, batches):
idxs_list, data_sets = zip(*batches)
idxs = sum(idxs_list, ())
data_set = sum(data_sets, data_sets[0].get_empty())
return idxs, data_set
def _get_feed_dict(self, batches):
feed_dict = {}
for model, (_, data_set) in zip(self.models, batches):
feed_dict.update(model.get_feed_dict(data_set, False))
return feed_dict
| [
"numpy.argmax",
"tensorflow.concat",
"tensorflow.add_n",
"tensorflow.name_scope",
"numpy.concatenate"
] | [((7385, 7413), 'tensorflow.name_scope', 'tf.name_scope', (['"""eval_concat"""'], {}), "('eval_concat')\n", (7398, 7413), True, 'import tensorflow as tf\n'), ((7441, 7501), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(0)', 'values': '[model.logits for model in models]'}), '(axis=0, values=[model.logits for model in models])\n', (7450, 7501), True, 'import tensorflow as tf\n'), ((2275, 2328), 'numpy.concatenate', 'np.concatenate', (['(val, other.tensor_dict[key])'], {'axis': '(0)'}), '((val, other.tensor_dict[key]), axis=0)\n', (2289, 2328), True, 'import numpy as np\n'), ((3899, 3952), 'numpy.concatenate', 'np.concatenate', (['(val, other.tensor_dict[key])'], {'axis': '(0)'}), '((val, other.tensor_dict[key]), axis=0)\n', (3913, 3952), True, 'import numpy as np\n'), ((7063, 7076), 'numpy.argmax', 'np.argmax', (['yi'], {}), '(yi)\n', (7072, 7076), True, 'import numpy as np\n'), ((7085, 7099), 'numpy.argmax', 'np.argmax', (['ypi'], {}), '(ypi)\n', (7094, 7099), True, 'import numpy as np\n'), ((7526, 7568), 'tensorflow.add_n', 'tf.add_n', (['[model.loss for model in models]'], {}), '([model.loss for model in models])\n', (7534, 7568), True, 'import tensorflow as tf\n')] |
import numpy as np
import h5py
import illustris_python as il
import matplotlib.pyplot as plt
def SelectDisk(snap_num):
'''
Input Snapshot number, like snap_num = 99 (z=0)
Select disk galaxies, return haloID of them.
'''
#select halo stellar particles > 40000
with h5py.File('/Raid0/zhouzb/TNGdata/offsets_0%d.hdf5'%snap_num,'r') as offset:
haloSBT = (np.array(offset['Subhalo']['SnapByType']))[:,4]
#Total halo number, it also be an index of haloID
ids = np.arange(len(haloSBT))
halolen = haloSBT[1:]
halolen = np.append(halolen, halolen.max()) - haloSBT
with h5py.File('/Raid0/zhouzb/TNGdata/stellar_circs.hdf5','r') as cir:
haloID = np.array(cir['Snapshot_%d'%snap_num]['SubfindID'])
cir07frac = np.array(cir['Snapshot_%d'%snap_num]['CircAbove07Frac'])
MassTensor = np.array(cir['Snapshot_%d'%snap_num]['MassTensorEigenVals'])
#circularity parameter ϵ > 0.2
cir_mask = cir07frac > 0.2
#flatness of the galaxy is defined as the ratio M1=(M2M3)**0.5 , disk galaxy's flatness < 0.7
MassTensor = MassTensor[cir_mask]
haloID = haloID[cir_mask]
flat = MassTensor[:,0]/(MassTensor[:,1]*MassTensor[:,2])**0.5
flat_mask = flat < 0.7
haloID = haloID[flat_mask]
mas_mask = halolen[haloID] >= 40000
haloID = haloID[mas_mask]
return haloID
snap_num = 99
ids = SelectDisk(99)
StellarHalfmassRads = (il.groupcat.loadSubhalos('/Raid1/Illustris/TNG-100',snap_num,'SubhaloHalfmassRadType'))[:,4]
new_bar02 = 0
new_bar04 = 0
new_bar = [ [], [] ]
new_Strong = [ [], [] ]
new_disk = []
did = 0
for haloID in ids:
did += 1
if did % 10 ==0:
print('%d halo finished'%did)
tmp = np.load('/Raid0/zhouzb/TNG_a2/snap_%d/%d.npy'%(snap_num ,haloID),'r')
A2 = np.array(tmp[0])
rlist = np.array(tmp[1])
half_r = StellarHalfmassRads[haloID]
#Ignore the most center stellar particles (First 0.1%)
A2 = A2[int(len(A2) / 100):]
rlist = rlist[int(len(rlist) / 100):]
new_disk.append(A2.max())
if A2.max() > 0.2:
new_bar[0].append(haloID)
new_bar[1].append(A2.max())
new_bar02 += 1
if A2.max() > 0.4:
new_Strong[0].append(haloID)
new_Strong[1].append(A2.max())
new_bar04 += 1
print('Disk halo number: %d'%len(ids))
print('A2 > 0.2 number: '+str(new_bar02))
print('A2 > 0.4 number: '+str(new_bar04))
| [
"illustris_python.groupcat.loadSubhalos",
"numpy.array",
"numpy.load",
"h5py.File"
] | [((1468, 1560), 'illustris_python.groupcat.loadSubhalos', 'il.groupcat.loadSubhalos', (['"""/Raid1/Illustris/TNG-100"""', 'snap_num', '"""SubhaloHalfmassRadType"""'], {}), "('/Raid1/Illustris/TNG-100', snap_num,\n 'SubhaloHalfmassRadType')\n", (1492, 1560), True, 'import illustris_python as il\n'), ((1772, 1844), 'numpy.load', 'np.load', (["('/Raid0/zhouzb/TNG_a2/snap_%d/%d.npy' % (snap_num, haloID))", '"""r"""'], {}), "('/Raid0/zhouzb/TNG_a2/snap_%d/%d.npy' % (snap_num, haloID), 'r')\n", (1779, 1844), True, 'import numpy as np\n'), ((1852, 1868), 'numpy.array', 'np.array', (['tmp[0]'], {}), '(tmp[0])\n', (1860, 1868), True, 'import numpy as np\n'), ((1882, 1898), 'numpy.array', 'np.array', (['tmp[1]'], {}), '(tmp[1])\n', (1890, 1898), True, 'import numpy as np\n'), ((300, 367), 'h5py.File', 'h5py.File', (["('/Raid0/zhouzb/TNGdata/offsets_0%d.hdf5' % snap_num)", '"""r"""'], {}), "('/Raid0/zhouzb/TNGdata/offsets_0%d.hdf5' % snap_num, 'r')\n", (309, 367), False, 'import h5py\n'), ((640, 698), 'h5py.File', 'h5py.File', (['"""/Raid0/zhouzb/TNGdata/stellar_circs.hdf5"""', '"""r"""'], {}), "('/Raid0/zhouzb/TNGdata/stellar_circs.hdf5', 'r')\n", (649, 698), False, 'import h5py\n'), ((724, 776), 'numpy.array', 'np.array', (["cir['Snapshot_%d' % snap_num]['SubfindID']"], {}), "(cir['Snapshot_%d' % snap_num]['SubfindID'])\n", (732, 776), True, 'import numpy as np\n'), ((796, 854), 'numpy.array', 'np.array', (["cir['Snapshot_%d' % snap_num]['CircAbove07Frac']"], {}), "(cir['Snapshot_%d' % snap_num]['CircAbove07Frac'])\n", (804, 854), True, 'import numpy as np\n'), ((875, 937), 'numpy.array', 'np.array', (["cir['Snapshot_%d' % snap_num]['MassTensorEigenVals']"], {}), "(cir['Snapshot_%d' % snap_num]['MassTensorEigenVals'])\n", (883, 937), True, 'import numpy as np\n'), ((396, 437), 'numpy.array', 'np.array', (["offset['Subhalo']['SnapByType']"], {}), "(offset['Subhalo']['SnapByType'])\n", (404, 437), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
thread_4=np.array([6246])
thread_3=np.array([6339])
thread_2=np.array([6459])
thread_1 =np.array([6488])
x=[]
x.append(thread_1)
x.append(thread_2)
x.append(thread_3)
x.append(thread_4)
x=np.array(x)
print(x)
#x = np.random.randn(100, 8)
mins = x.min(1)
print(mins)
maxes = x.max(1)
means = x.mean(1)
std = x.std(1)
print(x.shape)
# create stacked errorbars:
# plt.errorbar(np.arange(4), means, std, fmt='ok', lw=3)
# plt.errorbar(np.arange(4), means, [means - mins, maxes - means],
# fmt='.k', ecolor='gray', lw=1)
labels=["1","2","3","4"]
# plt.xticks(range(len(labels)),labels)
#ax = plt.subplots(1,1)
#ax.set_xticks(labels)
plt.xlabel('Number of threads', fontsize=18)
plt.ylabel('Exec. Time', fontsize=16)
plt.xlim(-1, 4)
#plt.show()
print(means)
min_mean=means[0]
print(means)
plt.plot(labels,means,'ro')
print(means)
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show"
] | [((60, 76), 'numpy.array', 'np.array', (['[6246]'], {}), '([6246])\n', (68, 76), True, 'import numpy as np\n'), ((86, 102), 'numpy.array', 'np.array', (['[6339]'], {}), '([6339])\n', (94, 102), True, 'import numpy as np\n'), ((112, 128), 'numpy.array', 'np.array', (['[6459]'], {}), '([6459])\n', (120, 128), True, 'import numpy as np\n'), ((139, 155), 'numpy.array', 'np.array', (['[6488]'], {}), '([6488])\n', (147, 155), True, 'import numpy as np\n'), ((239, 250), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (247, 250), True, 'import numpy as np\n'), ((698, 742), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads"""'], {'fontsize': '(18)'}), "('Number of threads', fontsize=18)\n", (708, 742), True, 'import matplotlib.pyplot as plt\n'), ((743, 780), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Exec. Time"""'], {'fontsize': '(16)'}), "('Exec. Time', fontsize=16)\n", (753, 780), True, 'import matplotlib.pyplot as plt\n'), ((781, 796), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1)', '(4)'], {}), '(-1, 4)\n', (789, 796), True, 'import matplotlib.pyplot as plt\n'), ((855, 884), 'matplotlib.pyplot.plot', 'plt.plot', (['labels', 'means', '"""ro"""'], {}), "(labels, means, 'ro')\n", (863, 884), True, 'import matplotlib.pyplot as plt\n'), ((896, 906), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (904, 906), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import random
from scipy import stats
from utils import features, class_names
from rcviz import viz
# for split between two integer values
HALF = 0.5
class DecisionTree(object):
class Node(object):
def __init__(self, label):
"""
The node in a decision tree.
Args:
label: The class label of a node.
depth: The depth of a node in a decision tree.
"""
self.label = label
self.left = None
self.right = None
self.idx = None
self.thresh = None
def set_l(self, node):
"""
Set NODE as current left child.
Args:
node: The left child.
"""
self.left = node
def set_r(self, node):
"""
Set NODE as current right child.
Args:
node: The right child.
"""
self.right = node
def set_idx(self, idx):
"""
Set feature to split.
Args:
idx: The column index of the feature to split.
"""
self.idx = idx
def set_thr(self, thresh):
"""
Set split threshold.
Args:
thresh: The threshold to split the data.
If feature <= threshold, then comes
to the left subtree, else, the right
subtree.
"""
self.thresh = thresh
def __str__(self):
if self.idx is not None and self.thresh is not None:
return str(features[self.idx]) + " thr: " + str(self.thresh)
else:
return "leaf"
def __init__(self, X, y, mode, criteria, seed=1, feature_rate=1):
"""
A decision tree.
Args:
X: The original features to train.
y: The original labels.
mode: Based on either 'ig' - information gain, or,
'gini' - gini index.
criteria: dict, specify the stopping criteria.
feature_rate: Helper argument for random forest to random
select some features from the original features.
"""
if mode not in ["ig", "gini"]:
raise ValueError("mode should be either 'ig' or 'gini', "
"but found %s" % mode)
self.tree = None
self.n_features = X.shape[1]
self.n_classes = len(set(y))
self.mode = mode
self.max_depth = criteria.get("max_depth", None)
self.node_purity = criteria.get("node_purity", None)
self.min_gain = criteria.get("min_gain", None)
self.seed = seed
self.feature_rate = feature_rate
def set_criteria(self, criteria):
"""
Change the criteria of current decision tree.
"""
self.max_depth = criteria.get("max_depth", None)
self.node_purity = criteria.get("node_purity", None)
self.min_gain = criteria.get("min_gain", None)
def feature_selector(self):
"""
Return a list of index of features to be considered to
split during training.
"""
idx = list(range(self.n_features))
if self.feature_rate == 1:
return idx
random.seed(self.seed)
feature_idx = random.sample(
idx, int(self.feature_rate * self.n_features))
return sorted(feature_idx)
@staticmethod
def entropy(y):
_, counts = np.unique(y, return_counts=True)
return stats.entropy(counts, base=2)
@staticmethod
def information_gain(X, y, thresh):
en = DecisionTree.entropy(y)
num_d = y.shape[0]
left_indicies = X <= thresh
# left partition
left_sub = y[left_indicies]
en_left = DecisionTree.entropy(left_sub)
en_left = (left_sub.shape[0] / num_d) * en_left
# right partition
right_sub = y[~left_indicies]
en_right = DecisionTree.entropy(right_sub)
en_right = (right_sub.shape[0] / num_d) * en_right
# information gain
ig = en - en_left - en_right
return ig
@staticmethod
def gini_impurity(y):
total = y.shape[0]
_, counts = np.unique(y, return_counts=True)
return 1 - np.sum(np.square(counts / total))
@staticmethod
def gini_purification(X, y, thresh):
num_d = y.shape[0]
left_indicies = X <= thresh
# left partition
left_sub = y[left_indicies]
gini_left = DecisionTree.gini_impurity(left_sub)
gini_left = (left_sub.shape[0] / num_d) * gini_left
# right partition
right_sub = y[~left_indicies]
gini_right = DecisionTree.gini_impurity(right_sub)
gini_right = (right_sub.shape[0] / num_d) * gini_right
# gini index
gini_index = gini_left + gini_right
return gini_index
def split(self, X, y, idx, thresh):
"""
Split the data given the index and threshold.
Args:
X: Data to split.
y: Labels corresponding to the data.
idx: int, specify a vector of feature.
thresh: float, used for splitting the data into
two branches.
"""
feature = X[:, idx]
left_indices = feature <= thresh
left_x = X[left_indices]
left_y = y[left_indices]
right_x = X[~left_indices]
right_y = y[~left_indices]
return (left_x, left_y), (right_x, right_y)
def segmenter(self, X, y):
"""
Find the best feature and threshold to split.
"""
best_idx = 0
best_thresh = 0
best_criterion = -np.inf if self.mode == "ig" else np.inf
for idx in self.feature_selector():
feature = X[:, idx]
values = np.unique(feature)
for value in values:
if self.mode == "ig":
c = self.information_gain(feature, y, value + HALF)
if c > best_criterion:
best_criterion = c
best_idx = idx
best_thresh = value + HALF
else:
c = self.gini_purification(feature, y, value + HALF)
if c < best_criterion:
best_criterion = c
best_idx = idx
best_thresh = value + HALF
if self.min_gain and self.mode == "ig" \
and best_criterion < self.min_gain:
return None, None
return best_idx, best_thresh
def train(self, X, y, verbose=True):
"""
Train the decision tree given training data X and y.
If verbose, after training, return the training evaluation.
"""
self.tree = self.__train(X, y)
if verbose:
print("#Train\t", end="")
return self.validate(X, y)
def __train(self, X, y, depth=0):
"""
Recursively split the data and create nodes to build the
decision tree.
"""
counts = [np.sum(y == i) for i in range(self.n_classes)]
label = np.argmax(counts)
node = self.Node(label)
# Check if the node has no data to split
if X.shape[0] == 0:
return node
if np.min(counts) == 0:
return node
# Check the node purity stopping criteria.
if self.node_purity:
proportion = [i / X.shape[0] for i in counts]
if np.max(proportion) >= self.node_purity:
return node
# Check the max depth stopping criteria.
if not self.max_depth or depth < self.max_depth:
idx, thr = self.segmenter(X, y)
# Check the min information gain stopping criteria.
if idx is None and thr is None:
return node
# Check if X[:, idx] have the same value.
if np.unique(X[:, idx]).shape[0] == 1:
return node
node.set_idx(idx)
node.set_thr(thr)
sub_l, sub_r = self.split(X, y, idx, thr)
node.set_l(self.__train(*sub_l, depth=depth + 1))
node.set_r(self.__train(*sub_r, depth=depth + 1))
return node
def predict(self, X):
"""
Predict the label given X, each row of X represents
a sample.
"""
return [self.__predict(sample) for sample in X]
def __predict(self, sample):
"""
Predict the label given X, one sample.
"""
node = self.tree
while node.left:
if sample[node.idx] <= node.thresh:
node = node.left
else:
node = node.right
return node.label
def validate(self, val_X, val_y):
"""
Validate the performance given X and y.
"""
pre_y = self.predict(val_X)
correct = [1 if val_y[i] == pre_y[i] else 0 for i in range(len(val_y))]
rate = sum(correct) / val_y.shape[0]
print("Decision Tree | MD: %s | NP: %s | MG: %s |"
" mode: %s | val rate: %.4f" %
(self.max_depth, self.node_purity,
self.min_gain, self.mode, rate))
return rate
@staticmethod
@viz
def n(tree):
"""
For visualization usage.
"""
node = tree
if node.left is None and node.right is None:
return "*label: " + str(class_names[node.label])
DecisionTree.n(node.left)
DecisionTree.n(node.right)
@staticmethod
def __inorder(node, seq):
if node:
DecisionTree.__inorder(node.left, seq)
seq.append(" | " + str(node) + " | ")
DecisionTree.__inorder(node.right, seq)
@staticmethod
def __preorder(node, seq):
if node:
seq.append(" | " + str(node) + " | ")
DecisionTree.__preorder(node.left, seq)
DecisionTree.__preorder(node.right, seq)
def __repr__(self):
seq = []
DecisionTree.__inorder(self.tree, seq)
string_in = "".join(seq)
seq = []
DecisionTree.__preorder(self.tree, seq)
string_pre = "".join(seq)
return "Inorder: \n" + string_in + "\n\nPreorder: \n" + string_pre
| [
"scipy.stats.entropy",
"numpy.unique",
"numpy.argmax",
"random.seed",
"numpy.square",
"numpy.max",
"numpy.sum",
"numpy.min"
] | [((3379, 3401), 'random.seed', 'random.seed', (['self.seed'], {}), '(self.seed)\n', (3390, 3401), False, 'import random\n'), ((3592, 3624), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (3601, 3624), True, 'import numpy as np\n'), ((3640, 3669), 'scipy.stats.entropy', 'stats.entropy', (['counts'], {'base': '(2)'}), '(counts, base=2)\n', (3653, 3669), False, 'from scipy import stats\n'), ((4343, 4375), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (4352, 4375), True, 'import numpy as np\n'), ((7271, 7288), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (7280, 7288), True, 'import numpy as np\n'), ((5931, 5949), 'numpy.unique', 'np.unique', (['feature'], {}), '(feature)\n', (5940, 5949), True, 'import numpy as np\n'), ((7208, 7222), 'numpy.sum', 'np.sum', (['(y == i)'], {}), '(y == i)\n', (7214, 7222), True, 'import numpy as np\n'), ((7433, 7447), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (7439, 7447), True, 'import numpy as np\n'), ((4402, 4427), 'numpy.square', 'np.square', (['(counts / total)'], {}), '(counts / total)\n', (4411, 4427), True, 'import numpy as np\n'), ((7631, 7649), 'numpy.max', 'np.max', (['proportion'], {}), '(proportion)\n', (7637, 7649), True, 'import numpy as np\n'), ((8054, 8074), 'numpy.unique', 'np.unique', (['X[:, idx]'], {}), '(X[:, idx])\n', (8063, 8074), True, 'import numpy as np\n')] |
import numpy as np
neighbouringIndex = [[a-1, b-1] for a in range(3) for b in range(3)]
neighbouringIndex.pop(4)
def occupied(mStart, nStart, dataSet):
count = 0
for m, n in neighbouringIndex:
try:
if mStart+m >= 0 and nStart+n >= 0:
if dataSet[mStart+m, nStart+n] == '#':
count += 1
except IndexError:
continue
return count
def occupied2(mStart, nStart, dataSet):
count = 0
for m, n in neighbouringIndex:
locM = mStart + m
locN = nStart + n
while True:
if locM < 0 or locN < 0:
break
try:
element = dataSet[locM, locN]
if element == '#':
count += 1
break
if element == 'L':
break
locM += m
locN += n
except IndexError:
break
return count
if __name__ == '__main__':
with open('adventOfCode\\11data.txt', 'r') as f:
lines = f.read().splitlines()
data = np.char.array([[c for c in line] for line in lines])
steps = 0
while True:
changes = 0
temp = data.copy()
for i in range(data.shape[0]):
for j in range(data.shape[1]):
element = data[i, j]
if element == '.':
continue
occ = occupied2(i, j, data)
if element == 'L' and occ == 0:
temp[i, j] = '#'
changes += 1
elif element == '#' and occ >= 5:
temp[i, j] = 'L'
changes += 1
if changes < 1:
break
steps += 1
# print(f'step {steps}')
# print(temp)
data = temp.copy()
print('steps =', steps)
print('occupied seats =', np.sum(data=='#'))
#
# testlines = '.......#.\n' \
# '...#.....\n' \
# '.#.......\n' \
# '.........\n' \
# '..#L....#\n' \
# '....#....\n' \
# '.........\n' \
# '#........\n' \
# '...#.....'
# data2 = np.char.array([[c for c in line] for line in testlines.splitlines()])
# occupied2(4, 3, data2) | [
"numpy.sum",
"numpy.char.array"
] | [((843, 895), 'numpy.char.array', 'np.char.array', (['[[c for c in line] for line in lines]'], {}), '([[c for c in line] for line in lines])\n', (856, 895), True, 'import numpy as np\n'), ((1430, 1449), 'numpy.sum', 'np.sum', (["(data == '#')"], {}), "(data == '#')\n", (1436, 1449), True, 'import numpy as np\n')] |
import numpy as np
def test_append():
from hexgrid import HexPoints, append
h1 = HexPoints(1, 0, -1)
h2 = HexPoints(-1, 0, 1)
h = append(h1, h2)
assert len(h) == 2
assert h.points.shape == (2, 3)
assert np.all(h.points == np.array([[1, 0, -1], [-1, 0, 1]]))
def test_concatenate():
from hexgrid import HexPoints, concatenate
h1 = HexPoints(1, 0, -1)
h2 = HexPoints(-1, 0, 1)
h3 = HexPoints(-1, 2, -1)
h = concatenate([h1, h2, h3])
assert len(h) == 3
assert h.points.shape == (3, 3)
assert np.all(h.points == np.array([[1, 0, -1], [-1, 0, 1], [-1, 2, -1]]))
| [
"hexgrid.append",
"numpy.array",
"hexgrid.concatenate",
"hexgrid.HexPoints"
] | [((91, 110), 'hexgrid.HexPoints', 'HexPoints', (['(1)', '(0)', '(-1)'], {}), '(1, 0, -1)\n', (100, 110), False, 'from hexgrid import HexPoints, concatenate\n'), ((120, 139), 'hexgrid.HexPoints', 'HexPoints', (['(-1)', '(0)', '(1)'], {}), '(-1, 0, 1)\n', (129, 139), False, 'from hexgrid import HexPoints, concatenate\n'), ((149, 163), 'hexgrid.append', 'append', (['h1', 'h2'], {}), '(h1, h2)\n', (155, 163), False, 'from hexgrid import HexPoints, append\n'), ((371, 390), 'hexgrid.HexPoints', 'HexPoints', (['(1)', '(0)', '(-1)'], {}), '(1, 0, -1)\n', (380, 390), False, 'from hexgrid import HexPoints, concatenate\n'), ((400, 419), 'hexgrid.HexPoints', 'HexPoints', (['(-1)', '(0)', '(1)'], {}), '(-1, 0, 1)\n', (409, 419), False, 'from hexgrid import HexPoints, concatenate\n'), ((429, 449), 'hexgrid.HexPoints', 'HexPoints', (['(-1)', '(2)', '(-1)'], {}), '(-1, 2, -1)\n', (438, 449), False, 'from hexgrid import HexPoints, concatenate\n'), ((459, 484), 'hexgrid.concatenate', 'concatenate', (['[h1, h2, h3]'], {}), '([h1, h2, h3])\n', (470, 484), False, 'from hexgrid import HexPoints, concatenate\n'), ((253, 287), 'numpy.array', 'np.array', (['[[1, 0, -1], [-1, 0, 1]]'], {}), '([[1, 0, -1], [-1, 0, 1]])\n', (261, 287), True, 'import numpy as np\n'), ((574, 621), 'numpy.array', 'np.array', (['[[1, 0, -1], [-1, 0, 1], [-1, 2, -1]]'], {}), '([[1, 0, -1], [-1, 0, 1], [-1, 2, -1]])\n', (582, 621), True, 'import numpy as np\n')] |
"""
train deep feature embedding with SoftmaxLoss/CenterLoss/ASoftmaxLoss/ArcLoss
DenseNet121 as backbone
@Author: LucasX
"""
import ast
import copy
import os
import sys
import time
import argparse
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import confusion_matrix
from torch.optim import lr_scheduler
from research.cbir.densenet import DenseNet121
from research.cbir.losses import AngularLoss, ArcLoss, CenterLoss
sys.path.append('../')
from research.cbir import data_loader
from research.cbir.cfg import cfg
from research.cbir.file_utils import mkdir_if_not_exist
parser = argparse.ArgumentParser()
parser.add_argument('-loss', type=str, choices=['SoftmaxLoss', 'CenterLoss', 'ASoftmaxLoss', 'ArcLoss'])
parser.add_argument('-arch', type=str, choices=['DenseNet121'])
parser.add_argument('-infer', type=ast.literal_eval, dest='flag')
parser.add_argument('-dim', type=int, default=1024, help='embedding dimension size')
args = vars(parser.parse_args())
def train_model_with_modified_softmax_loss(model, dataloaders, criterion, optimizer, scheduler):
"""
train model with modified SoftmaxLoss, such as vanilla Softmax and ASoftmax Loss
:param optimizer:
:param criterion:
:param model:
:param dataloaders:
:param scheduler:
:return:
"""
print(model)
model_name = model.__class__.__name__
model = model.float()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
model = model.to(device)
dataset_sizes = {x: dataloaders[x].__len__() * cfg['batch_size'] for x in ['train', 'val', 'test']}
for k, v in dataset_sizes.items():
print('Dataset size of {0} is {1}...'.format(k, v))
if not args['infer']:
print('Start training %s...' % model_name)
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(cfg['epoch']):
print('-' * 100)
print('Epoch {}/{}'.format(epoch, cfg['epoch'] - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
if torch.__version__ <= '1.1.0':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
# for data in dataloaders[phase]:
for i, data in enumerate(dataloaders[phase], 0):
inputs, labels = data['image'], data['type']
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
feats, outputs = model(inputs)
loss = criterion(outputs, labels)
outputs = outputs[0] # 0=cos_theta 1=phi_theta
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
if torch.__version__ >= '1.1.0':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
tmp_correct = 0
tmp_total = 0
tmp_y_pred = []
tmp_y_true = []
tmp_filenames = []
for data in dataloaders['val']:
images, labels, filename = data['image'], data['type'], data['filename']
images = images.to(device)
labels = labels.to(device)
feats, outputs = model(images)
outputs = outputs[0] # 0=cos_theta 1=phi_theta
_, predicted = torch.max(outputs.data, 1)
tmp_total += labels.size(0)
tmp_correct += (predicted == labels).sum().item()
tmp_y_pred += predicted.to("cpu").detach().numpy().tolist()
tmp_y_true += labels.to("cpu").detach().numpy().tolist()
tmp_filenames += filename
tmp_acc = tmp_correct / tmp_total
print('Confusion Matrix of {0} on val set: '.format(model_name))
cm = confusion_matrix(tmp_y_true, tmp_y_pred)
print(cm)
cm = np.array(cm)
print('Accuracy = {0}'.format(tmp_acc))
precisions = []
recalls = []
for i in range(len(cm)):
precisions.append(cm[i][i] / sum(cm[:, i].tolist()))
recalls.append(cm[i][i] / sum(cm[i, :].tolist()))
print("Precision of {0} on val set = {1}".format(model_name, sum(precisions) / len(precisions)))
print("Recall of {0} on val set = {1}".format(model_name, sum(recalls) / len(recalls)))
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
model.load_state_dict(best_model_wts)
model_path_dir = './model'
mkdir_if_not_exist(model_path_dir)
if torch.cuda.device_count() > 1:
torch.save(model.module.state_dict(),
'./model/{0}_best_epoch-{1}.pth'.format(model_name, epoch))
else:
torch.save(model.state_dict(),
'./model/{0}_best_epoch-{1}.pth'.format(model_name, epoch))
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
model_path_dir = './model'
mkdir_if_not_exist(model_path_dir)
if torch.cuda.device_count() > 1:
torch.save(model.module.state_dict(), './model/%s.pth' % model_name)
else:
torch.save(model.state_dict(), './model/%s.pth' % model_name)
else:
print('Start testing %s...' % model.__class__.__name__)
model.load_state_dict(torch.load(os.path.join('./model/%s.pth' % model_name)))
model.eval()
correct = 0
total = 0
y_pred = []
y_true = []
filenames = []
probs = []
with torch.no_grad():
for data in dataloaders['test']:
images, labels, filename = data['image'], data['type'], data['filename']
images = images.to(device)
labels = labels.to(device)
feats, outputs = model(images)
outputs = outputs[0] # 0=cos_theta 1=phi_theta
_, predicted = torch.max(outputs.data, 1)
outputs = F.softmax(outputs)
# get TOP-K output labels and corresponding probabilities
topK_prob, topK_label = torch.topk(outputs, 1)
probs += topK_prob.to("cpu").detach().numpy().tolist()
total += labels.size(0)
correct += (predicted == labels).sum().item()
y_pred += predicted.to("cpu").detach().numpy().tolist()
y_true += labels.to("cpu").detach().numpy().tolist()
filenames += filename
print('Accuracy of {0} on test set: {1}% '.format(model_name, 100 * correct / total))
print(
'Confusion Matrix of {0} on test set: '.format(model_name))
cm = confusion_matrix(y_true, y_pred)
print(cm)
cm = np.array(cm)
precisions = []
recalls = []
for i in range(len(cm)):
precisions.append(cm[i][i] / sum(cm[:, i].tolist()))
recalls.append(cm[i][i] / sum(cm[i, :].tolist()))
print('Precision List: ')
print(precisions)
print('Recall List: ')
print(recalls)
print("Precision of {0} on val set = {1}".format(model_name,
sum(precisions) / len(precisions)))
print(
"Recall of {0} on val set = {1}".format(model_name, sum(recalls) / len(recalls)))
print('Output CSV...')
col = ['filename', 'gt', 'pred', 'prob']
df = pd.DataFrame([[filenames[i], y_true[i], y_pred[i], probs[i][0]] for i in range(len(filenames))],
columns=col)
df.to_csv("./%s.csv" % model_name, index=False)
print('CSV has been generated...')
def train_model_for_centerloss(model, dataloaders, criterion_xent, criterion_cent, optimizer_model, optimizer_centloss,
scheduler):
"""
train model with CenterLoss
:param optimizer_centloss:
:param optimizer_model:
:param criterion_cent:
:param criterion_xent:
:param model:
:param dataloaders:
:param scheduler:
:param num_epochs:
:param inference:
:return:
"""
print(model)
model_name = model.__class__.__name__
model = model.float()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
model = model.to(device)
dataset_sizes = {x: dataloaders[x].__len__() * cfg['batch_size'] for x in ['train', 'val', 'test']}
for k, v in dataset_sizes.items():
print('Dataset size of {0} is {1}...'.format(k, v))
if not args['infer']:
print('Start training %s...' % model_name)
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(cfg['epoch']):
print('-' * 100)
print('Epoch {}/{}'.format(epoch, cfg['epoch'] - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
if torch.__version__ <= '1.1.0':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
# for data in dataloaders[phase]:
for i, data in enumerate(dataloaders[phase], 0):
inputs, labels = data['image'], data['type']
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer_model.zero_grad()
optimizer_centloss.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
feats, outputs = model(inputs)
_, preds = torch.max(outputs, 1)
xent_loss = criterion_xent(outputs, labels)
loss = criterion_cent(feats, labels) * 0.001 + xent_loss
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer_model.step()
# multiple (1./alpha) in order to remove the effect of alpha on updating centers
for param in criterion_cent.parameters():
param.grad.data *= (1. / 1.)
optimizer_centloss.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
if torch.__version__ >= '1.1.0':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
tmp_correct = 0
tmp_total = 0
tmp_y_pred = []
tmp_y_true = []
tmp_filenames = []
for data in dataloaders['val']:
images, labels, filename = data['image'], data['type'], data['filename']
images = images.to(device)
labels = labels.to(device)
feats, outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
tmp_total += labels.size(0)
tmp_correct += (predicted == labels).sum().item()
tmp_y_pred += predicted.to("cpu").detach().numpy().tolist()
tmp_y_true += labels.to("cpu").detach().numpy().tolist()
tmp_filenames += filename
tmp_acc = tmp_correct / tmp_total
print('Confusion Matrix of {0} on val set: '.format(model_name))
cm = confusion_matrix(tmp_y_true, tmp_y_pred)
print(cm)
cm = np.array(cm)
print('Accuracy = {0}'.format(tmp_acc))
precisions = []
recalls = []
for i in range(len(cm)):
precisions.append(cm[i][i] / sum(cm[:, i].tolist()))
recalls.append(cm[i][i] / sum(cm[i, :].tolist()))
print("Precision of {0} on val set = {1}".format(model_name, sum(precisions) / len(precisions)))
print("Recall of {0} on val set = {1}".format(model_name, sum(recalls) / len(recalls)))
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
model.load_state_dict(best_model_wts)
model_path_dir = './model'
mkdir_if_not_exist(model_path_dir)
if torch.cuda.device_count() > 1:
torch.save(model.module.state_dict(),
'./model/{0}_best_epoch-{1}.pth'.format(model_name, epoch))
else:
torch.save(model.state_dict(), './model/{0}_best_epoch-{1}.pth'.format(model_name, epoch))
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
model_path_dir = './model'
mkdir_if_not_exist(model_path_dir)
if torch.cuda.device_count() > 1:
torch.save(model.module.state_dict(), './model/%s.pth' % model_name)
else:
torch.save(model.state_dict(), './model/%s.pth' % model_name)
else:
print('Start testing %s...' % model.__class__.__name__)
model.load_state_dict(torch.load(os.path.join('./model/%s.pth' % model_name)))
model.eval()
correct = 0
total = 0
y_pred = []
y_true = []
filenames = []
probs = []
with torch.no_grad():
for data in dataloaders['test']:
images, labels, filename = data['image'], data['type'], data['filename']
images = images.to(device)
labels = labels.to(device)
feats, outputs = model(images)
outputs = F.softmax(outputs)
# get TOP-K output labels and corresponding probabilities
topK_prob, topK_label = torch.topk(outputs, 2)
probs += topK_prob.to("cpu").detach().numpy().tolist()
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
y_pred += predicted.to("cpu").detach().numpy().tolist()
y_true += labels.to("cpu").detach().numpy().tolist()
filenames += filename
print('Accuracy of {0} on test set: {1}% '.format(model_name, 100 * correct / total))
print(
'Confusion Matrix of {0} on test set: '.format(model_name))
cm = confusion_matrix(y_true, y_pred)
print(cm)
cm = np.array(cm)
precisions = []
recalls = []
for i in range(len(cm)):
precisions.append(cm[i][i] / sum(cm[:, i].tolist()))
recalls.append(cm[i][i] / sum(cm[i, :].tolist()))
print('Precision List: ')
print(precisions)
print('Recall List: ')
print(recalls)
print("Precision of {0} on val set = {1}".format(model_name,
sum(precisions) / len(precisions)))
print(
"Recall of {0} on val set = {1}".format(model_name, sum(recalls) / len(recalls)))
print('Output CSV...')
col = ['filename', 'gt', 'pred', 'prob']
df = pd.DataFrame([[filenames[i], y_true[i], y_pred[i], probs[i][0]] for i in range(len(filenames))],
columns=col)
df.to_csv("./%s.csv" % model_name, index=False)
print('CSV has been generated...')
def main_with_centerloss(model):
"""
train model
:param model:
:param epoch:
:param data_name:
:return:
"""
criterion_xent = nn.CrossEntropyLoss()
criterion_cent = CenterLoss(num_classes=cfg['out_num'], feat_dim=1024)
optimizer_model = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
optimizer_centloss = optim.SGD(criterion_cent.parameters(), lr=0.5)
cosine_anneal_warmup_lr_scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer_model, T_0=10, T_mult=10,
eta_min=0, last_epoch=-1)
print('start loading ImageDataset...')
trainloader, valloader, testloader = data_loader.load_imagedataset_data()
dataloaders = {
'train': trainloader,
'val': valloader,
'test': testloader
}
train_model_for_centerloss(model=model, dataloaders=dataloaders, criterion_xent=criterion_xent,
criterion_cent=criterion_cent,
optimizer_model=optimizer_model, optimizer_centloss=optimizer_centloss,
scheduler=cosine_anneal_warmup_lr_scheduler)
def train_model_for_arcloss(model, dataloaders, criterion, optimizer, metric, scheduler):
"""
:param model:
:param dataloaders:
:param criterion:
:param optimizer:
:param metric:
:param scheduler:
:return:
"""
print(model)
model_name = model.__class__.__name__
model = model.float()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
model = model.to(device)
dataset_sizes = {x: dataloaders[x].__len__() * cfg['batch_size'] for x in ['train', 'val', 'test']}
for k, v in dataset_sizes.items():
print('Dataset size of {0} is {1}...'.format(k, v))
if not args['infer']:
print('Start training %s...' % model_name)
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(arch['epoch']):
print('-' * 100)
print('Epoch {}/{}'.format(epoch, arch['epoch'] - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
# for data in dataloaders[phase]:
for i, data in enumerate(dataloaders[phase], 0):
inputs, labels = data['image'], data['type']
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# feats, outputs = model(inputs)
outputs = model(inputs)
thetas = metric(outputs, labels)
loss = criterion(thetas, labels)
_, preds = torch.max(thetas, 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
tmp_correct = 0
tmp_total = 0
tmp_y_pred = []
tmp_y_true = []
tmp_filenames = []
for data in dataloaders['val']:
images, labels, filename = data['image'], data['type'], data['filename']
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
thetas = metric(outputs, labels)
_, predicted = torch.max(thetas.data, 1)
tmp_total += labels.size(0)
tmp_correct += (predicted == labels).sum().item()
tmp_y_pred += predicted.to("cpu").detach().numpy().tolist()
tmp_y_true += labels.to("cpu").detach().numpy().tolist()
tmp_filenames += filename
tmp_acc = tmp_correct / tmp_total
print('Confusion Matrix of {0} on val set: '.format(model_name))
cm = confusion_matrix(tmp_y_true, tmp_y_pred)
print(cm)
cm = np.array(cm)
print('Accuracy = {0}'.format(tmp_acc))
precisions = []
recalls = []
for i in range(len(cm)):
precisions.append(cm[i][i] / sum(cm[:, i].tolist()))
recalls.append(cm[i][i] / sum(cm[i, :].tolist()))
print("Precision of {0} on val set = {1}".format(model_name, sum(precisions) / len(precisions)))
print("Recall of {0} on val set = {1}".format(model_name, sum(recalls) / len(recalls)))
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
model.load_state_dict(best_model_wts)
model_path_dir = './model'
mkdir_if_not_exist(model_path_dir)
if torch.cuda.device_count() > 1:
torch.save(model.module.state_dict(),
'./model/{0}_best_epoch-{1}.pth'.format(model_name, epoch))
else:
torch.save(model.state_dict(), './model/{0}_best_epoch-{1}.pth'.format(model_name, epoch))
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
model_path_dir = './model'
mkdir_if_not_exist(model_path_dir)
if torch.cuda.device_count() > 1:
torch.save(model.module.state_dict(), './model/%s.pth' % model_name)
else:
torch.save(model.state_dict(), './model/%s.pth' % model_name)
else:
print('Start testing %s...' % model.__class__.__name__)
model.load_state_dict(torch.load(os.path.join('./model/%s.pth' % model_name)))
model.eval()
correct = 0
total = 0
y_pred = []
y_true = []
filenames = []
probs = []
with torch.no_grad():
for data in dataloaders['test']:
images, labels, filename = data['image'], data['type'], data['filename']
images = images.to(device)
labels = labels.to(device)
# feats, outputs = model(images)
outputs = model(images)
thetas = metric(outputs, labels)
_, predicted = torch.max(thetas.data, 1)
outputs = F.softmax(outputs)
# get TOP-K output labels and corresponding probabilities
topK_prob, topK_label = torch.topk(outputs, 1)
probs += topK_prob.to("cpu").detach().numpy().tolist()
total += labels.size(0)
correct += (predicted == labels).sum().item()
y_pred += predicted.to("cpu").detach().numpy().tolist()
y_true += labels.to("cpu").detach().numpy().tolist()
filenames += filename
print('Accuracy of {0} on test set: {1}% '.format(model_name, 100 * correct / total))
print(
'Confusion Matrix of {0} on test set: '.format(model_name))
cm = confusion_matrix(y_true, y_pred)
print(cm)
cm = np.array(cm)
precisions = []
recalls = []
for i in range(len(cm)):
precisions.append(cm[i][i] / sum(cm[:, i].tolist()))
recalls.append(cm[i][i] / sum(cm[i, :].tolist()))
print('Precision List: ')
print(precisions)
print('Recall List: ')
print(recalls)
print("Precision of {0} on val set = {1}".format(model_name,
sum(precisions) / len(precisions)))
print(
"Recall of {0} on val set = {1}".format(model_name, sum(recalls) / len(recalls)))
print('Output CSV...')
col = ['filename', 'gt', 'pred', 'prob']
df = pd.DataFrame([[filenames[i], y_true[i], y_pred[i], probs[i][0]] for i in range(len(filenames))],
columns=col)
df.to_csv("./%s.csv" % model_name, index=False)
print('CSV has been generated...')
def main_with_softmaxloss(model):
"""
train model with vanilla SoftmaxLoss as supervision
:param model:
:return:
"""
criterion_softmaxloss = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=cfg['init_lr'], momentum=0.9, weight_decay=cfg['weight_decay'])
cosine_anneal_warmup_lr_scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=10,
eta_min=0, last_epoch=-1)
print('start loading ImageDataset...')
trainloader, valloader, testloader = data_loader.load_imagedataset_data()
dataloaders = {
'train': trainloader,
'val': valloader,
'test': testloader
}
train_model_with_modified_softmax_loss(model=model, dataloaders=dataloaders, criterion=criterion_softmaxloss,
optimizer=optimizer,
scheduler=cosine_anneal_warmup_lr_scheduler)
def main_with_asoftmaxloss(model):
"""
train model with vanilla ASoftmaxLoss as supervision
:param model:
:return:
"""
criterion_aloss = AngularLoss()
optimizer = optim.SGD(model.parameters(), lr=cfg['init_lr'], momentum=0.9, weight_decay=cfg['weight_decay'])
cosine_anneal_warmup_lr_scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=10,
eta_min=0, last_epoch=-1)
print('start loading ImageDataset...')
trainloader, valloader, testloader = data_loader.load_imagedataset_data()
dataloaders = {
'train': trainloader,
'val': valloader,
'test': testloader
}
train_model_with_modified_softmax_loss(model=model, dataloaders=dataloaders, criterion=criterion_aloss,
optimizer=optimizer,
scheduler=cosine_anneal_warmup_lr_scheduler)
def main_with_arcloss(model):
"""
train model
:param model:
:param epoch:
:return:
"""
criterion_xent_loss = nn.CrossEntropyLoss()
arc_metric = ArcLoss(args['dim'], cfg['out_num'])
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=60, gamma=0.1)
print('start loading ImageDataset...')
trainloader, valloader, testloader = data_loader.load_imagedataset_data()
dataloaders = {
'train': trainloader,
'val': valloader,
'test': testloader
}
train_model_for_arcloss(model=model, dataloaders=dataloaders, criterion=criterion_xent_loss,
optimizer=optimizer, metric=arc_metric,
scheduler=exp_lr_scheduler)
if __name__ == '__main__':
if args['arch'] == 'DenseNet121':
arch = DenseNet121(num_cls=cfg['out_num'])
if args['loss'] == 'SoftmaxLoss':
main_with_softmaxloss(model=arch)
elif args['loss'] == 'ASoftmaxLoss':
main_with_asoftmaxloss(model=arch)
elif args['loss'] == 'ArcLoss':
main_with_arcloss(model=arch)
elif args['loss'] == 'CenterLoss':
main_with_centerloss(model=arch)
| [
"torch.nn.CrossEntropyLoss",
"research.cbir.losses.ArcLoss",
"torch.max",
"torch.cuda.device_count",
"numpy.array",
"torch.cuda.is_available",
"torch.sum",
"research.cbir.densenet.DenseNet121",
"torch.optim.lr_scheduler.CosineAnnealingWarmRestarts",
"sys.path.append",
"torch.nn.functional.softma... | [((530, 552), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (545, 552), False, 'import sys\n'), ((691, 716), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (714, 716), False, 'import argparse\n'), ((8744, 8776), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (8760, 8776), False, 'from sklearn.metrics import confusion_matrix\n'), ((8800, 8812), 'numpy.array', 'np.array', (['cm'], {}), '(cm)\n', (8808, 8812), True, 'import numpy as np\n'), ((17668, 17700), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (17684, 17700), False, 'from sklearn.metrics import confusion_matrix\n'), ((17725, 17737), 'numpy.array', 'np.array', (['cm'], {}), '(cm)\n', (17733, 17737), True, 'import numpy as np\n'), ((18744, 18765), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (18763, 18765), True, 'import torch.nn as nn\n'), ((18787, 18840), 'research.cbir.losses.CenterLoss', 'CenterLoss', ([], {'num_classes': "cfg['out_num']", 'feat_dim': '(1024)'}), "(num_classes=cfg['out_num'], feat_dim=1024)\n", (18797, 18840), False, 'from research.cbir.losses import AngularLoss, ArcLoss, CenterLoss\n'), ((19048, 19154), 'torch.optim.lr_scheduler.CosineAnnealingWarmRestarts', 'lr_scheduler.CosineAnnealingWarmRestarts', (['optimizer_model'], {'T_0': '(10)', 'T_mult': '(10)', 'eta_min': '(0)', 'last_epoch': '(-1)'}), '(optimizer_model, T_0=10, T_mult=10,\n eta_min=0, last_epoch=-1)\n', (19088, 19154), False, 'from torch.optim import lr_scheduler\n'), ((19317, 19353), 'research.cbir.data_loader.load_imagedataset_data', 'data_loader.load_imagedataset_data', ([], {}), '()\n', (19351, 19353), False, 'from research.cbir import data_loader\n'), ((27214, 27246), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (27230, 27246), False, 'from sklearn.metrics import confusion_matrix\n'), ((27271, 27283), 'numpy.array', 'np.array', (['cm'], {}), '(cm)\n', (27279, 27283), True, 'import numpy as np\n'), ((28297, 28318), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (28316, 28318), True, 'import torch.nn as nn\n'), ((28472, 28572), 'torch.optim.lr_scheduler.CosineAnnealingWarmRestarts', 'lr_scheduler.CosineAnnealingWarmRestarts', (['optimizer'], {'T_0': '(10)', 'T_mult': '(10)', 'eta_min': '(0)', 'last_epoch': '(-1)'}), '(optimizer, T_0=10, T_mult=10,\n eta_min=0, last_epoch=-1)\n', (28512, 28572), False, 'from torch.optim import lr_scheduler\n'), ((28735, 28771), 'research.cbir.data_loader.load_imagedataset_data', 'data_loader.load_imagedataset_data', ([], {}), '()\n', (28769, 28771), False, 'from research.cbir import data_loader\n'), ((29312, 29325), 'research.cbir.losses.AngularLoss', 'AngularLoss', ([], {}), '()\n', (29323, 29325), False, 'from research.cbir.losses import AngularLoss, ArcLoss, CenterLoss\n'), ((29479, 29579), 'torch.optim.lr_scheduler.CosineAnnealingWarmRestarts', 'lr_scheduler.CosineAnnealingWarmRestarts', (['optimizer'], {'T_0': '(10)', 'T_mult': '(10)', 'eta_min': '(0)', 'last_epoch': '(-1)'}), '(optimizer, T_0=10, T_mult=10,\n eta_min=0, last_epoch=-1)\n', (29519, 29579), False, 'from torch.optim import lr_scheduler\n'), ((29742, 29778), 'research.cbir.data_loader.load_imagedataset_data', 'data_loader.load_imagedataset_data', ([], {}), '()\n', (29776, 29778), False, 'from research.cbir import data_loader\n'), ((30289, 30310), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (30308, 30310), True, 'import torch.nn as nn\n'), ((30328, 30364), 'research.cbir.losses.ArcLoss', 'ArcLoss', (["args['dim']", "cfg['out_num']"], {}), "(args['dim'], cfg['out_num'])\n", (30335, 30364), False, 'from research.cbir.losses import AngularLoss, ArcLoss, CenterLoss\n'), ((30476, 30531), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer'], {'step_size': '(60)', 'gamma': '(0.1)'}), '(optimizer, step_size=60, gamma=0.1)\n', (30495, 30531), False, 'from torch.optim import lr_scheduler\n'), ((30617, 30653), 'research.cbir.data_loader.load_imagedataset_data', 'data_loader.load_imagedataset_data', ([], {}), '()\n', (30651, 30653), False, 'from research.cbir import data_loader\n'), ((1561, 1586), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1584, 1586), False, 'import torch\n'), ((1671, 1693), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (1686, 1693), True, 'import torch.nn as nn\n'), ((2022, 2033), 'time.time', 'time.time', ([], {}), '()\n', (2031, 2033), False, 'import time\n'), ((7149, 7183), 'research.cbir.file_utils.mkdir_if_not_exist', 'mkdir_if_not_exist', (['model_path_dir'], {}), '(model_path_dir)\n', (7167, 7183), False, 'from research.cbir.file_utils import mkdir_if_not_exist\n'), ((7683, 7698), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7696, 7698), False, 'import torch\n'), ((10276, 10301), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (10299, 10301), False, 'import torch\n'), ((10386, 10408), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (10401, 10408), True, 'import torch.nn as nn\n'), ((10737, 10748), 'time.time', 'time.time', ([], {}), '()\n', (10746, 10748), False, 'import time\n'), ((16135, 16169), 'research.cbir.file_utils.mkdir_if_not_exist', 'mkdir_if_not_exist', (['model_path_dir'], {}), '(model_path_dir)\n', (16153, 16169), False, 'from research.cbir.file_utils import mkdir_if_not_exist\n'), ((16668, 16683), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16681, 16683), False, 'import torch\n'), ((20223, 20248), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (20246, 20248), False, 'import torch\n'), ((20333, 20355), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (20348, 20355), True, 'import torch.nn as nn\n'), ((20684, 20695), 'time.time', 'time.time', ([], {}), '()\n', (20693, 20695), False, 'import time\n'), ((25599, 25633), 'research.cbir.file_utils.mkdir_if_not_exist', 'mkdir_if_not_exist', (['model_path_dir'], {}), '(model_path_dir)\n', (25617, 25633), False, 'from research.cbir.file_utils import mkdir_if_not_exist\n'), ((26132, 26147), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (26145, 26147), False, 'import torch\n'), ((31068, 31103), 'research.cbir.densenet.DenseNet121', 'DenseNet121', ([], {'num_cls': "cfg['out_num']"}), "(num_cls=cfg['out_num'])\n", (31079, 31103), False, 'from research.cbir.densenet import DenseNet121\n'), ((1515, 1540), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1538, 1540), False, 'import torch\n'), ((1619, 1644), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1642, 1644), False, 'import torch\n'), ((6838, 6849), 'time.time', 'time.time', ([], {}), '()\n', (6847, 6849), False, 'import time\n'), ((7195, 7220), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (7218, 7220), False, 'import torch\n'), ((8036, 8062), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (8045, 8062), False, 'import torch\n'), ((8086, 8104), 'torch.nn.functional.softmax', 'F.softmax', (['outputs'], {}), '(outputs)\n', (8095, 8104), True, 'import torch.nn.functional as F\n'), ((8211, 8233), 'torch.topk', 'torch.topk', (['outputs', '(1)'], {}), '(outputs, 1)\n', (8221, 8233), False, 'import torch\n'), ((10230, 10255), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10253, 10255), False, 'import torch\n'), ((10334, 10359), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (10357, 10359), False, 'import torch\n'), ((15824, 15835), 'time.time', 'time.time', ([], {}), '()\n', (15833, 15835), False, 'import time\n'), ((16181, 16206), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (16204, 16206), False, 'import torch\n'), ((16956, 16974), 'torch.nn.functional.softmax', 'F.softmax', (['outputs'], {}), '(outputs)\n', (16965, 16974), True, 'import torch.nn.functional as F\n'), ((17081, 17103), 'torch.topk', 'torch.topk', (['outputs', '(2)'], {}), '(outputs, 2)\n', (17091, 17103), False, 'import torch\n'), ((17199, 17225), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (17208, 17225), False, 'import torch\n'), ((20177, 20202), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (20200, 20202), False, 'import torch\n'), ((20281, 20306), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (20304, 20306), False, 'import torch\n'), ((25288, 25299), 'time.time', 'time.time', ([], {}), '()\n', (25297, 25299), False, 'import time\n'), ((25645, 25670), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (25668, 25670), False, 'import torch\n'), ((26507, 26532), 'torch.max', 'torch.max', (['thetas.data', '(1)'], {}), '(thetas.data, 1)\n', (26516, 26532), False, 'import torch\n'), ((26556, 26574), 'torch.nn.functional.softmax', 'F.softmax', (['outputs'], {}), '(outputs)\n', (26565, 26574), True, 'import torch.nn.functional as F\n'), ((26681, 26703), 'torch.topk', 'torch.topk', (['outputs', '(1)'], {}), '(outputs, 1)\n', (26691, 26703), False, 'import torch\n'), ((7512, 7555), 'os.path.join', 'os.path.join', (["('./model/%s.pth' % model_name)"], {}), "('./model/%s.pth' % model_name)\n", (7524, 7555), False, 'import os\n'), ((16497, 16540), 'os.path.join', 'os.path.join', (["('./model/%s.pth' % model_name)"], {}), "('./model/%s.pth' % model_name)\n", (16509, 16540), False, 'import os\n'), ((25961, 26004), 'os.path.join', 'os.path.join', (["('./model/%s.pth' % model_name)"], {}), "('./model/%s.pth' % model_name)\n", (25973, 26004), False, 'import os\n'), ((3859, 3890), 'torch.sum', 'torch.sum', (['(preds == labels.data)'], {}), '(preds == labels.data)\n', (3868, 3890), False, 'import torch\n'), ((5491, 5531), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['tmp_y_true', 'tmp_y_pred'], {}), '(tmp_y_true, tmp_y_pred)\n', (5507, 5531), False, 'from sklearn.metrics import confusion_matrix\n'), ((5587, 5599), 'numpy.array', 'np.array', (['cm'], {}), '(cm)\n', (5595, 5599), True, 'import numpy as np\n'), ((6392, 6426), 'research.cbir.file_utils.mkdir_if_not_exist', 'mkdir_if_not_exist', (['model_path_dir'], {}), '(model_path_dir)\n', (6410, 6426), False, 'from research.cbir.file_utils import mkdir_if_not_exist\n'), ((12952, 12983), 'torch.sum', 'torch.sum', (['(preds == labels.data)'], {}), '(preds == labels.data)\n', (12961, 12983), False, 'import torch\n'), ((14512, 14552), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['tmp_y_true', 'tmp_y_pred'], {}), '(tmp_y_true, tmp_y_pred)\n', (14528, 14552), False, 'from sklearn.metrics import confusion_matrix\n'), ((14608, 14620), 'numpy.array', 'np.array', (['cm'], {}), '(cm)\n', (14616, 14620), True, 'import numpy as np\n'), ((15413, 15447), 'research.cbir.file_utils.mkdir_if_not_exist', 'mkdir_if_not_exist', (['model_path_dir'], {}), '(model_path_dir)\n', (15431, 15447), False, 'from research.cbir.file_utils import mkdir_if_not_exist\n'), ((22499, 22530), 'torch.sum', 'torch.sum', (['(preds == labels.data)'], {}), '(preds == labels.data)\n', (22508, 22530), False, 'import torch\n'), ((23976, 24016), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['tmp_y_true', 'tmp_y_pred'], {}), '(tmp_y_true, tmp_y_pred)\n', (23992, 24016), False, 'from sklearn.metrics import confusion_matrix\n'), ((24072, 24084), 'numpy.array', 'np.array', (['cm'], {}), '(cm)\n', (24080, 24084), True, 'import numpy as np\n'), ((24877, 24911), 'research.cbir.file_utils.mkdir_if_not_exist', 'mkdir_if_not_exist', (['model_path_dir'], {}), '(model_path_dir)\n', (24895, 24911), False, 'from research.cbir.file_utils import mkdir_if_not_exist\n'), ((3228, 3268), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (["(phase == 'train')"], {}), "(phase == 'train')\n", (3250, 3268), False, 'import torch\n'), ((3491, 3512), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (3500, 3512), False, 'import torch\n'), ((4956, 4982), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (4965, 4982), False, 'import torch\n'), ((6450, 6475), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6473, 6475), False, 'import torch\n'), ((12000, 12040), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (["(phase == 'train')"], {}), "(phase == 'train')\n", (12022, 12040), False, 'import torch\n'), ((12132, 12153), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (12141, 12153), False, 'import torch\n'), ((13977, 14003), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (13986, 14003), False, 'import torch\n'), ((15471, 15496), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (15494, 15496), False, 'import torch\n'), ((21835, 21875), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (["(phase == 'train')"], {}), "(phase == 'train')\n", (21857, 21875), False, 'import torch\n'), ((22132, 22152), 'torch.max', 'torch.max', (['thetas', '(1)'], {}), '(thetas, 1)\n', (22141, 22152), False, 'import torch\n'), ((23442, 23467), 'torch.max', 'torch.max', (['thetas.data', '(1)'], {}), '(thetas.data, 1)\n', (23451, 23467), False, 'import torch\n'), ((24935, 24960), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (24958, 24960), False, 'import torch\n')] |
#!/usr/bin/python
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Reference for the code of this script
# Block 0 - Library Imports
# Block 1.0 - Define And Create Gaussians
# Block 2.0 - Write CSV File
"""
Summary:
This script produces 5 dataframes for my simple example, 1 signal and 4 background dataframe/s.
All 5 consist of N_events datapoints, following a 2-dimensional Gaussian distribution with
different means and standard deviations. This results in two variables which I simply called X and Y.
Furthermore, I make a color-coded scatter plot to visualize the datapoints and save it in the current dir.
Lastly the dataframes are saved in the given path in Block 2.
Since this is just to provide me with an example to work with, you don't need to worry about this file really.
Just make sure the Data you want to train and test on is divided into a signal and background dataframe and saved
in the corresponding directory.
"""
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Block 0 - Library Imports
import os
import sys
import csv
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib.cm as cm
np.set_printoptions(threshold=sys.maxsize)
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Block 1.0 - Define And Create Gaussians
np.random.seed(5) # Set a random seed for reproducible
N_events = 100000 # Number of events per gaussian
# Feature List
Feature_List = ['X', 'Y']
# Signal
signal = np.random.multivariate_normal([0, 0], [[0.5, 0],[0, 0.5]], N_events)
SignalEmptydf = pd.DataFrame()
# Backgrounds
bg_1 = np.random.multivariate_normal([0.5, -1.5], [[0.1, 0],[0, 0.1]], N_events)
bg_2 = np.random.multivariate_normal([-2, 1], [[0.5, 0],[0, 1.0]], N_events)
bg_3 = np.random.multivariate_normal([3, 2], [[3, 0],[0, 2]], N_events)
bg_4 = np.random.multivariate_normal([8, 8], [[0.1, 0],[0, 0.1]], N_events)
# Turn into Dataframes
SignalEmptydf = pd.DataFrame()
Bg1Emptydf = pd.DataFrame()
Bg2Emptydf = pd.DataFrame()
Bg3Emptydf = pd.DataFrame()
Bg4Emptydf = pd.DataFrame()
for k, l in enumerate(Feature_List):
SignalEmptydf[Feature_List[k]] = signal[:,k]
Bg1Emptydf[Feature_List[k]] = bg_1[:,k]
Bg2Emptydf[Feature_List[k]] = bg_2[:,k]
Bg3Emptydf[Feature_List[k]] = bg_3[:,k]
Bg4Emptydf[Feature_List[k]] = bg_4[:,k]
# Let's make a quick scatter plot
fig, ax = plt.subplots(figsize=(10, 8))
# Add signal and background events
ax.scatter(signal[:,0], signal[:,1],c='blue', marker='x', s=10, label='Signal', alpha=1.0, edgecolors='none')
ax.scatter(bg_1[:,0], bg_1[:,1],c='red', marker='+', s=8, label='Background 1', alpha=0.1, edgecolors='none')
ax.scatter(bg_2[:,0], bg_2[:,1],c='darkred', marker='+', s=8, label='Background 2', alpha=0.1, edgecolors='none')
ax.scatter(bg_3[:,0], bg_3[:,1],c='black', marker='+', s=8, label='Background 3', alpha=0.1, edgecolors='none')
ax.scatter(bg_4[:,0], bg_4[:,1],c='purple', marker='+', s=8, label='Background 4', alpha=0.1, edgecolors='none')
# Legend, Labels, etc.
ax.legend(loc='best', title="Groups")
ax.axis('equal')
plt.title("Signal and Background Gaussians, " + str(format(N_events, ',d')) + " Events each")
plt.xlabel("X")
plt.ylabel("Y")
# Save Plot
fig_name = "Gaussian_Plot.png"
plt.savefig(fig_name)
plt.close()
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Block 2.0 - Write CSV File
# Paths
data_path = "Data\\"
signal_path = data_path + "Signal\\"
background_path = data_path + "Background\\"
if not os.path.exists(data_path):
os.makedirs(data_path)
if not os.path.exists(signal_path):
os.makedirs(signal_path)
if not os.path.exists(background_path):
os.makedirs(background_path)
# Signal CSV File
SignalEmptydf.to_csv("Gaussian_Signal.csv", sep=",", encoding='utf-8', index=False)
# Background CSV Files
Bg1Emptydf.to_csv(signal_path+"\Gaussian_Bg_1.csv", sep=",", encoding='utf-8', index=False)
Bg2Emptydf.to_csv(background_path+"\Gaussian_Bg_2.csv", sep=",", encoding='utf-8', index=False)
Bg3Emptydf.to_csv(background_path+"\Gaussian_Bg_3.csv", sep=",", encoding='utf-8', index=False)
Bg4Emptydf.to_csv(background_path+"\Gaussian_Bg_4.csv", sep=",", encoding='utf-8', index=False)
| [
"os.path.exists",
"matplotlib.pyplot.savefig",
"os.makedirs",
"matplotlib.pyplot.ylabel",
"numpy.random.multivariate_normal",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"numpy.random.seed",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"numpy.set_printoptions"
] | [((1435, 1477), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (1454, 1477), True, 'import numpy as np\n'), ((1708, 1725), 'numpy.random.seed', 'np.random.seed', (['(5)'], {}), '(5)\n', (1722, 1725), True, 'import numpy as np\n'), ((1874, 1943), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['[0, 0]', '[[0.5, 0], [0, 0.5]]', 'N_events'], {}), '([0, 0], [[0.5, 0], [0, 0.5]], N_events)\n', (1903, 1943), True, 'import numpy as np\n'), ((1959, 1973), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1971, 1973), True, 'import pandas as pd\n'), ((1995, 2069), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['[0.5, -1.5]', '[[0.1, 0], [0, 0.1]]', 'N_events'], {}), '([0.5, -1.5], [[0.1, 0], [0, 0.1]], N_events)\n', (2024, 2069), True, 'import numpy as np\n'), ((2076, 2146), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['[-2, 1]', '[[0.5, 0], [0, 1.0]]', 'N_events'], {}), '([-2, 1], [[0.5, 0], [0, 1.0]], N_events)\n', (2105, 2146), True, 'import numpy as np\n'), ((2153, 2218), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['[3, 2]', '[[3, 0], [0, 2]]', 'N_events'], {}), '([3, 2], [[3, 0], [0, 2]], N_events)\n', (2182, 2218), True, 'import numpy as np\n'), ((2225, 2294), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['[8, 8]', '[[0.1, 0], [0, 0.1]]', 'N_events'], {}), '([8, 8], [[0.1, 0], [0, 0.1]], N_events)\n', (2254, 2294), True, 'import numpy as np\n'), ((2333, 2347), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2345, 2347), True, 'import pandas as pd\n'), ((2361, 2375), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2373, 2375), True, 'import pandas as pd\n'), ((2389, 2403), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2401, 2403), True, 'import pandas as pd\n'), ((2417, 2431), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2429, 2431), True, 'import pandas as pd\n'), ((2445, 2459), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2457, 2459), True, 'import pandas as pd\n'), ((2752, 2781), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (2764, 2781), True, 'from matplotlib import pyplot as plt\n'), ((3548, 3563), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (3558, 3563), True, 'from matplotlib import pyplot as plt\n'), ((3564, 3579), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (3574, 3579), True, 'from matplotlib import pyplot as plt\n'), ((3623, 3644), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_name'], {}), '(fig_name)\n', (3634, 3644), True, 'from matplotlib import pyplot as plt\n'), ((3645, 3656), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3654, 3656), True, 'from matplotlib import pyplot as plt\n'), ((3993, 4018), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (4007, 4018), False, 'import os\n'), ((4021, 4043), 'os.makedirs', 'os.makedirs', (['data_path'], {}), '(data_path)\n', (4032, 4043), False, 'import os\n'), ((4051, 4078), 'os.path.exists', 'os.path.exists', (['signal_path'], {}), '(signal_path)\n', (4065, 4078), False, 'import os\n'), ((4081, 4105), 'os.makedirs', 'os.makedirs', (['signal_path'], {}), '(signal_path)\n', (4092, 4105), False, 'import os\n'), ((4113, 4144), 'os.path.exists', 'os.path.exists', (['background_path'], {}), '(background_path)\n', (4127, 4144), False, 'import os\n'), ((4147, 4175), 'os.makedirs', 'os.makedirs', (['background_path'], {}), '(background_path)\n', (4158, 4175), False, 'import os\n')] |
from typing import Tuple
import numpy as np
from keras.callbacks import ReduceLROnPlateau
from sklearn.utils import class_weight
from vivid.estimators.base import MetaBlock
from vivid.sklearn_extend.neural_network import ScikitKerasClassifier, SKerasRegressor, ROCAucCallback
class BaseSkerasBlock(MetaBlock):
initial_params = {
'input_scaling': True,
'epochs': 30,
'batch_size': 128,
'workers': -1
}
def get_keras_callbacks(self, training_set, validation_set):
return [
ReduceLROnPlateau(patience=5, verbose=1)
]
def get_fit_params_on_each_fold(self,
model_params: dict,
training_set: Tuple[np.ndarray, np.ndarray],
validation_set: Tuple[np.ndarray, np.ndarray],
indexes_set: Tuple[np.ndarray, np.ndarray],
experiment) -> dict:
params = super(BaseSkerasBlock, self).get_fit_params_on_each_fold(
model_params=model_params,
training_set=training_set,
validation_set=validation_set,
indexes_set=indexes_set,
experiment=experiment)
add_params = {
'callbacks': self.get_keras_callbacks(training_set, validation_set),
'validation_data': validation_set,
}
params.update(add_params)
return params
class KerasClassifierBlock(BaseSkerasBlock):
model_class = ScikitKerasClassifier
def get_keras_callbacks(self, training_set, validation_set):
return [
*super(KerasClassifierBlock, self).get_keras_callbacks(training_set, validation_set),
ROCAucCallback(training_data=training_set, validation_data=validation_set),
]
def get_fit_params_on_each_fold(self,
model_params: dict,
training_set: Tuple[np.ndarray, np.ndarray],
validation_set: Tuple[np.ndarray, np.ndarray],
indexes_set: Tuple[np.ndarray, np.ndarray],
experiment) -> dict:
params = super(KerasClassifierBlock, self) \
.get_fit_params_on_each_fold(model_params, training_set, validation_set, indexes_set, experiment)
y = training_set[1]
weight = class_weight.compute_class_weight('balanced', np.unique(y), y)
params['class_weight'] = weight
return params
class KerasRegressorBlock(BaseSkerasBlock):
model_class = SKerasRegressor
| [
"vivid.sklearn_extend.neural_network.ROCAucCallback",
"keras.callbacks.ReduceLROnPlateau",
"numpy.unique"
] | [((540, 580), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'patience': '(5)', 'verbose': '(1)'}), '(patience=5, verbose=1)\n', (557, 580), False, 'from keras.callbacks import ReduceLROnPlateau\n'), ((1758, 1832), 'vivid.sklearn_extend.neural_network.ROCAucCallback', 'ROCAucCallback', ([], {'training_data': 'training_set', 'validation_data': 'validation_set'}), '(training_data=training_set, validation_data=validation_set)\n', (1772, 1832), False, 'from vivid.sklearn_extend.neural_network import ScikitKerasClassifier, SKerasRegressor, ROCAucCallback\n'), ((2499, 2511), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2508, 2511), True, 'import numpy as np\n')] |
import numpy as np
def euclidean_distances(X, Y=None, Y_norm_squared=None, X_norm_squared=None):
'''
将数据的每行看做样本,计算两矩阵样本之间的欧氏距离
:param X: matrix one
:param Y: matrix two
:param Y_norm_squared:
:param X_norm_squared:
:return: pairwise距离矩阵
'''
X = np.array(X)
Y = np.array(Y) if Y else X # 若未指定Y则令其为X
dist_mat = np.dot(X, Y.T)
X_squared = np.sum(np.square(X), axis=1).reshape((dist_mat.shape[0], -1))
Y_squared = np.sum(np.square(Y), axis=1).reshape((-1, dist_mat.shape[1]))
squared_dist = X_squared - 2 * dist_mat + Y_squared
squared_dist[squared_dist < 0] = 0 # 在某些数据下可能出现负数,需要做截断处理
return np.sqrt(squared_dist)
if __name__ == '__main__':
X = [[0, 1], [1, 1]]
Y = [[0, 0]]
print(euclidean_distances(X))
print(euclidean_distances(X, Y))
| [
"numpy.array",
"numpy.dot",
"numpy.sqrt",
"numpy.square"
] | [((283, 294), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (291, 294), True, 'import numpy as np\n'), ((357, 371), 'numpy.dot', 'np.dot', (['X', 'Y.T'], {}), '(X, Y.T)\n', (363, 371), True, 'import numpy as np\n'), ((660, 681), 'numpy.sqrt', 'np.sqrt', (['squared_dist'], {}), '(squared_dist)\n', (667, 681), True, 'import numpy as np\n'), ((303, 314), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (311, 314), True, 'import numpy as np\n'), ((396, 408), 'numpy.square', 'np.square', (['X'], {}), '(X)\n', (405, 408), True, 'import numpy as np\n'), ((474, 486), 'numpy.square', 'np.square', (['Y'], {}), '(Y)\n', (483, 486), True, 'import numpy as np\n')] |
"""
TODO:
Translation, Derivatives, and Subdevatives
Translating polynumbers and the Derivative | Arithmetic and Geometry Math Foundations 69 | https://www.youtube.com/watch?v=vyRFz8J4Y_M&list=PL5A714C94D40392AB&index=72
L_beta(p) = p(beta + alpha) = p(alpha + beta)
"""
import numpy as np
import fractions
import numbers
class Rational(fractions.Fraction):
"""
Extension of the Fraction class, mostly to make printing nicer
>>> 3 * -(Rational(3) / 2)
"""
def __str__(self):
if self.denominator == 1:
return str(self.numerator)
else:
return '({}/{})'.format(self.numerator, self.denominator)
def __repr__(self):
return str(self)
def __neg__(self):
return Rational(super().__neg__())
def __add__(self, other):
return Rational(super().__add__(other))
def __radd__(self, other):
return Rational(super().__radd__(other))
def __sub__(self, other):
return Rational(super().__sub__(other))
def __mul__(self, other):
return Rational(super().__mul__(other))
def __rmul__(self, other):
return Rational(super().__rmul__(other))
def __truediv__(self, other):
return Rational(super().__truediv__(other))
def __floordiv__(self, other):
return Rational(super().__floordiv__(other))
def rationalize(data):
"""
Takes an ndarray and ensures its members are rational
Example:
>>> data = ((np.random.rand(3, 5)) * 100)
>>> rationalize(data)
"""
if isinstance(data, np.ndarray):
data = np.vectorize(Rational)(data)
elif isinstance(data, numbers.Number):
data = Rational(data)
else:
raise TypeError(type(data))
return data
class PolyNumberV1:
"""
A PolyNumberV1 as defined by <NAME>
Coefficients are stored in ascending order of degree, i.e.
``c = self.coeff[2]`` is the term for ``c * alpha ** 2``
References:
https://www.youtube.com/channel/UCXl0Zbk8_rvjyLwAR-Xh9pQ
"""
def __init__(self, coeff):
self.coeff = np.asarray(coeff)
def __str__(self):
return 'PolyNumberV1({})'.format(str(self.coeff))
def __repr__(self):
return repr(self.coeff).replace('array', 'PolyNumberV1')
@classmethod
def coerce(cls, data):
return cls(np.atleast_1d(np.asarray(data)))
@classmethod
def from_degree(cls, degree):
""" construct the "unary?" polynomial of a certain degree """
# not sure what to call this
if degree < 0:
return cls([0])
else:
return cls(([0] * (degree) + [1]))
def __lshift__(self, places):
return PolyNumberV1(self.coeff[places:])
def __rshift__(self, places):
"""
import timerit
ti = timerit.Timerit()
ti.reset('pad').call(lambda: np.pad(self.coeff, (n, 0))).print()
ti.reset('stack').call(lambda: np.hstack([np.zeros_like(self.coeff, shape=(n)), self.coeff])).print()
self = PolyNumberV1([1])
places = 3
self >> 5
self << places
"""
return self.lower_pad(places)
def lower_pad(self, places):
left_pad = np.zeros_like(self.coeff, shape=places)
new_coeff = np.hstack([left_pad, self.coeff])
return PolyNumberV1(new_coeff)
def upper_pad(self, places):
right_pad = np.zeros_like(self.coeff, shape=places)
new_coeff = np.hstack([self.coeff, right_pad])
return PolyNumberV1(new_coeff)
@classmethod
def random(cls, num_coeff=3, min_coeff=0, max_coeff=21):
coeff = np.random.randint(min_coeff, max_coeff, size=num_coeff)
self = cls(coeff)
return self
def as_rational(self):
return PolyNumberV1(np.array(list(map(Rational, self.coeff)), dtype=object))
def drop_lead_zeros(self):
nonzero_idxs = np.nonzero(self.coeff)[0]
if len(nonzero_idxs) > 0:
d = nonzero_idxs.max() + 1
else:
d = 0
return PolyNumberV1(self.coeff[0:d])
def copy(self):
return PolyNumberV1(self.coeff.copy())
def lead(self):
"""
Return leading (highest power) coefficient
"""
if len(self.coeff) > 0:
return self.coeff[-1]
else:
return 0
def __eq__(self, other):
p = self.coeff
q = other.coeff
n = min(len(p), len(q))
return np.all(p[0:n] == q[0:n]) and np.all(p[n:] == 0) and np.all(q[n:] == 0)
def degree(self):
"""
Number of coefficients
References:
https://en.wikipedia.org/wiki/Degree_of_a_polynomial#Degree_of_the_zero_polynomial
"""
if len(self.coeff) == 0:
return -float('inf')
elif self.coeff[-1] != 0:
return len(self.coeff) - 1
else:
return len(self.drop_lead_zeros().coeff) - 1
def __neg__(self):
return PolyNumberV1(-self.coeff)
def __add__(self, other):
p = self.coeff
q = other.coeff
if len(p) > len(q):
p, q = q, p
dtype = np.result_type(p, q)
r = q.copy().astype(dtype)
r[0:len(p)] += p
return PolyNumberV1(r)
def __sub__(self, other):
return self + (-other)
def as_polynomial(self):
"""
Returns the numpy polynomial representation
"""
return np.polynomial.Polynomial(self.coeff)
def __mul__(self, other):
"""
Example:
self = PolyNumberV1([2, 7, 2, -3]).as_rational()
other = PolyNumberV1([1, 3]).as_rational()
result = self * other
print('result = {!r}'.format(result))
p1 = self.as_polynomial()
p2 = other.as_polynomial()
p3 = p1 * p2
print('p3 = {!r}'.format(p3))
divmod(p1, p2)
divmod(self, other)
"""
if 0:
# More efficient
return PolyNumberV1(np.polymul(self.coeff, other.coeff))
else:
# Reasonably efficient
p = self.coeff
q = other.coeff
len_p = len(p)
len_q = len(q)
p_basis_idxs = np.arange(len_p)[:, None]
q_basis_idxs = np.arange(len_q)[None, :]
r_idxs = (q_basis_idxs + p_basis_idxs).ravel()
raveled_r_idxs = np.arange(len_p * len_q)
p_idxs, q_idxs = np.unravel_index(raveled_r_idxs, (len_p, len_q))
terms = p[p_idxs] * q[q_idxs]
len_r = (len_p + len_q) - 1
r = np.zeros(len_r, dtype=terms.dtype)
np.add.at(r, r_idxs, terms)
result = PolyNumberV1(r)
return result
def __divmod__(self, other):
"""
Not efficient
"""
n = self
d = other
# https://en.wikipedia.org/wiki/Polynomial_greatest_common_divisor#Euclidean_division
# https://en.wikipedia.org/wiki/Polynomial_long_division
zero = PolyNumberV1.coerce(0)
r = n # init remainder
q = zero.copy() # init quotient (div result)
shift = r.degree() - d.degree()
while r != zero and shift >= 0:
t = PolyNumberV1([(r.lead() / d.lead())]).lower_pad(shift)
q = q + t
r = (r - (d * t)).drop_lead_zeros()
shift = r.degree() - d.degree()
return (q, r)
def __truediv__(self, other):
return divmod(self, other)[0]
def __mod__(self, other):
return divmod(self, other)[1]
class PolyNumberNd(PolyNumberV1):
"""
Generalization of PolyNumbers, BiPolyNumbers, TriPolyNumbers, etc...
"""
def __init__(self, coeff):
"""
Args:
coeff (ndarray): each dimension corresponds to a different poly
number "variable", i.e. alpha, beta, etc...
Example:
>>> import sys, ubelt
>>> sys.path.append(ubelt.expandpath('~/misc/learn'))
>>> from polynumber import * # NOQA
>>> coeff = rationalize(np.array([
>>> [1, 7, 10],
>>> [7, 20, 0],
>>> [10, 0, 0],
>>> ]))
>>> self = PolyNumberNd(coeff)
"""
self.coeff = coeff
def demo():
p = PolyNumberV1([2, 7, 2, -3]).as_rational()
q = PolyNumberV1([1, 3]).as_rational()
p = PolyNumberV1.random(23).as_rational()
q = PolyNumberV1.random(20).as_rational()
self, other = p, q # NOQA
r_sum = p + q
r_sub = p - q
r_mul = p * q
r_div, r_rem = divmod(p, q)
print('r_sub = {!r}'.format(r_sub))
print('r_sum = {!r}'.format(r_sum))
print('r_mul = {!r}'.format(r_mul))
print('r_div = {!r}'.format(r_div))
print('r_rem = {!r}'.format(r_rem))
assert (r_div * q + r_rem) == p
def symcheck():
import sympy as sym
x = sym.symbols('x')
a_coeff = sym.symbols(', '.join(['a' + str(i) for i in range(4)]))
b_coeff = sym.symbols(', '.join(['b' + str(i) for i in range(4)]))
poly_a = sum(a_i * (x ** i) for i, a_i in enumerate(a_coeff))
poly_b = sum(b_i * (x ** i) for i, b_i in enumerate(b_coeff))
poly_c = (poly_a * poly_b).expand().collect(x)
z = sym.Poly(poly_a) * sym.Poly(poly_b)
terms = poly_c.as_ordered_terms()
print(sum(sorted(terms, key=lambda term: term.as_powers_dict().get(x, 0))))
class PolyNumber(np.polynomial.Polynomial):
"""
Inherit capabilities from numpy Polynomial
"""
# def __init__(self, coeff):
# super().__init__(coeff)
# self.coeff = np.asarray(coeff)
def __str__(self):
return 'PolyNumber({})'.format(str(self.coef))
def __repr__(self):
return repr(self.coef).replace('array', 'PolyNumber')
@classmethod
def coerce(cls, data):
return cls(np.atleast_1d(np.asarray(data)))
@classmethod
def random(cls, num_coeff=3, min_coeff=0, max_coeff=21):
coef = np.random.randint(min_coeff, max_coeff, size=num_coeff)
self = cls(coef)
return self
def as_rational(self):
return PolyNumber(np.array(list(map(Rational, self.coef)), dtype=object))
def numpy_polynomials():
# Numpy does not seem to have a polynomial class for N dimensions
poly_a = PolyNumber([2, 7, 2, -3]).as_rational()
poly_b = PolyNumber([1, 3]).as_rational()
prod = poly_a * poly_b
div, rem = divmod(poly_a, poly_b)
bipoly_b = PolyNumber(np.array([[1, 0, 1], [0, 0, 0], [1, 0, 0]]))
| [
"numpy.polymul",
"sympy.Poly",
"numpy.hstack",
"numpy.result_type",
"numpy.asarray",
"sympy.symbols",
"numpy.array",
"numpy.random.randint",
"numpy.zeros",
"numpy.unravel_index",
"numpy.polynomial.Polynomial",
"numpy.nonzero",
"numpy.add.at",
"numpy.all",
"numpy.zeros_like",
"numpy.ara... | [((8956, 8972), 'sympy.symbols', 'sym.symbols', (['"""x"""'], {}), "('x')\n", (8967, 8972), True, 'import sympy as sym\n'), ((2104, 2121), 'numpy.asarray', 'np.asarray', (['coeff'], {}), '(coeff)\n', (2114, 2121), True, 'import numpy as np\n'), ((3227, 3266), 'numpy.zeros_like', 'np.zeros_like', (['self.coeff'], {'shape': 'places'}), '(self.coeff, shape=places)\n', (3240, 3266), True, 'import numpy as np\n'), ((3287, 3320), 'numpy.hstack', 'np.hstack', (['[left_pad, self.coeff]'], {}), '([left_pad, self.coeff])\n', (3296, 3320), True, 'import numpy as np\n'), ((3414, 3453), 'numpy.zeros_like', 'np.zeros_like', (['self.coeff'], {'shape': 'places'}), '(self.coeff, shape=places)\n', (3427, 3453), True, 'import numpy as np\n'), ((3474, 3508), 'numpy.hstack', 'np.hstack', (['[self.coeff, right_pad]'], {}), '([self.coeff, right_pad])\n', (3483, 3508), True, 'import numpy as np\n'), ((3643, 3698), 'numpy.random.randint', 'np.random.randint', (['min_coeff', 'max_coeff'], {'size': 'num_coeff'}), '(min_coeff, max_coeff, size=num_coeff)\n', (3660, 3698), True, 'import numpy as np\n'), ((5164, 5184), 'numpy.result_type', 'np.result_type', (['p', 'q'], {}), '(p, q)\n', (5178, 5184), True, 'import numpy as np\n'), ((5459, 5495), 'numpy.polynomial.Polynomial', 'np.polynomial.Polynomial', (['self.coeff'], {}), '(self.coeff)\n', (5483, 5495), True, 'import numpy as np\n'), ((9310, 9326), 'sympy.Poly', 'sym.Poly', (['poly_a'], {}), '(poly_a)\n', (9318, 9326), True, 'import sympy as sym\n'), ((9329, 9345), 'sympy.Poly', 'sym.Poly', (['poly_b'], {}), '(poly_b)\n', (9337, 9345), True, 'import sympy as sym\n'), ((10040, 10095), 'numpy.random.randint', 'np.random.randint', (['min_coeff', 'max_coeff'], {'size': 'num_coeff'}), '(min_coeff, max_coeff, size=num_coeff)\n', (10057, 10095), True, 'import numpy as np\n'), ((10541, 10584), 'numpy.array', 'np.array', (['[[1, 0, 1], [0, 0, 0], [1, 0, 0]]'], {}), '([[1, 0, 1], [0, 0, 0], [1, 0, 0]])\n', (10549, 10584), True, 'import numpy as np\n'), ((1603, 1625), 'numpy.vectorize', 'np.vectorize', (['Rational'], {}), '(Rational)\n', (1615, 1625), True, 'import numpy as np\n'), ((3913, 3935), 'numpy.nonzero', 'np.nonzero', (['self.coeff'], {}), '(self.coeff)\n', (3923, 3935), True, 'import numpy as np\n'), ((4478, 4502), 'numpy.all', 'np.all', (['(p[0:n] == q[0:n])'], {}), '(p[0:n] == q[0:n])\n', (4484, 4502), True, 'import numpy as np\n'), ((4507, 4525), 'numpy.all', 'np.all', (['(p[n:] == 0)'], {}), '(p[n:] == 0)\n', (4513, 4525), True, 'import numpy as np\n'), ((4530, 4548), 'numpy.all', 'np.all', (['(q[n:] == 0)'], {}), '(q[n:] == 0)\n', (4536, 4548), True, 'import numpy as np\n'), ((6440, 6464), 'numpy.arange', 'np.arange', (['(len_p * len_q)'], {}), '(len_p * len_q)\n', (6449, 6464), True, 'import numpy as np\n'), ((6494, 6542), 'numpy.unravel_index', 'np.unravel_index', (['raveled_r_idxs', '(len_p, len_q)'], {}), '(raveled_r_idxs, (len_p, len_q))\n', (6510, 6542), True, 'import numpy as np\n'), ((6643, 6677), 'numpy.zeros', 'np.zeros', (['len_r'], {'dtype': 'terms.dtype'}), '(len_r, dtype=terms.dtype)\n', (6651, 6677), True, 'import numpy as np\n'), ((6690, 6717), 'numpy.add.at', 'np.add.at', (['r', 'r_idxs', 'terms'], {}), '(r, r_idxs, terms)\n', (6699, 6717), True, 'import numpy as np\n'), ((2372, 2388), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (2382, 2388), True, 'import numpy as np\n'), ((6048, 6083), 'numpy.polymul', 'np.polymul', (['self.coeff', 'other.coeff'], {}), '(self.coeff, other.coeff)\n', (6058, 6083), True, 'import numpy as np\n'), ((6272, 6288), 'numpy.arange', 'np.arange', (['len_p'], {}), '(len_p)\n', (6281, 6288), True, 'import numpy as np\n'), ((6325, 6341), 'numpy.arange', 'np.arange', (['len_q'], {}), '(len_q)\n', (6334, 6341), True, 'import numpy as np\n'), ((9927, 9943), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (9937, 9943), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 18 14:19:48 2022
@author: <NAME>
"""
import warnings
import pdb
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import numpy as np
import tensorflow as tf
from keras.models import Model
from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization
# from keras.optimizers import Adam
from keras.optimizers import adam_v2
from keras import backend as K
K.set_image_data_format('channels_last')
np.random.seed(10101)
img_rows = 256
img_cols = 256
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def iou_coef(y_true, y_pred, smooth=1):
intersection = K.sum(K.abs(y_true * y_pred), axis=[1,2,3])
union = K.sum(y_true,[1,2,3])+K.sum(y_pred,[1,2,3])-intersection
iou = K.mean((intersection + smooth) / (union + smooth), axis=0)
return iou
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def focal_loss(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
BCE = K.binary_crossentropy(y_true_f, y_pred_f)
BCE_EXP = K.exp(-BCE)
focal_loss = K.mean(0.8 * K.pow((1-BCE_EXP), 2.) * BCE)
return focal_loss
def loss(y_true, y_pred):
return -(0.4*dice_coef(y_true, y_pred)+0.6*iou_coef(y_true, y_pred))
def aspp_block(x, num_filters, rate_scale=1):
x1 = Conv2D(num_filters, (3, 3), dilation_rate=(6 * rate_scale, 6 * rate_scale), padding="same")(x)
x1 = BatchNormalization()(x1)
x2 = Conv2D(num_filters, (3, 3), dilation_rate=(12 * rate_scale, 12 * rate_scale), padding="same")(x)
x2 = BatchNormalization()(x2)
x3 = Conv2D(num_filters, (3, 3), dilation_rate=(18 * rate_scale, 18 * rate_scale), padding="same")(x)
x3 = BatchNormalization()(x3)
x4 = Conv2D(num_filters, (3, 3), padding="same")(x)
x4 = BatchNormalization()(x4)
y = Add()([x1, x2, x3, x4])
y = Conv2D(num_filters, (1, 1), padding="same")(y)
return y
def squeeze_excite_block(inputs, ratio=8):
init = inputs
channel_axis = -1
filters = init.shape[channel_axis]
se_shape = (1, 1, filters)
se = GlobalAveragePooling2D()(init)
se = Reshape(se_shape)(se)
se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
x = Multiply()([init, se])
return x
def resnet_block(x, n_filter, strides=1):
x_init = x
## Conv 1
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(n_filter, (3, 3), padding="same", strides=strides)(x)
## Conv 2
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(n_filter, (3, 3), padding="same", strides=1)(x)
## Shortcut
s = Conv2D(n_filter, (1, 1), padding="same", strides=strides)(x_init)
s = BatchNormalization()(s)
## Add
x = Add()([x, s])
x = squeeze_excite_block(x)
return x
def get_rwnet():
inputs = Input((img_rows, img_cols, 3))
conv1 = resnet_block(inputs,32 , strides=1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = resnet_block(pool1,64 , strides=1)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = resnet_block(pool2, 128, strides=1)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = resnet_block(pool3, 256, strides=1)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = aspp_block(pool4, 512)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = resnet_block(up6, 256, strides=1)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = resnet_block(up7, 128, strides=1)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = resnet_block(up8, 64, strides=1)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = resnet_block(up9, 32, strides=1)
down10 = concatenate([Conv2D(32, (3, 3), activation='relu', padding='same')(conv9), conv9], axis=3)
conv10 = resnet_block(down10, 32, strides=1)
pool10 = MaxPooling2D(pool_size=(2, 2))(conv10)
down11 = concatenate([Conv2D(64, (3, 3), activation='relu', padding='same')(pool10), conv8], axis=3)
conv11 = resnet_block(down11, 64, strides=1)
pool11 = MaxPooling2D(pool_size=(2, 2))(conv11)
down12 = concatenate([Conv2D(128, (3, 3), activation='relu', padding='same')(pool11), conv7], axis=3)
conv12 = resnet_block(down12, 128, strides=1)
pool12 = MaxPooling2D(pool_size=(2, 2))(conv12)
down13 = concatenate([Conv2D(256, (3, 3), activation='relu', padding='same')(pool12), conv6], axis=3)
conv13 = resnet_block(down13, 256, strides=1)
pool13 = MaxPooling2D(pool_size=(2, 2))(conv13)
conv14 = aspp_block(pool13, 512)
up15 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv14), conv13], axis=3)
conv15 = resnet_block(up15, 256, strides=1)
up16 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv15), conv12], axis=3)
conv16 = resnet_block(up16, 128, strides=1)
up17 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv16), conv11], axis=3)
conv17 = resnet_block(up17, 64, strides=1)
up18 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv17), conv10], axis=3)
conv18 = resnet_block(up18, 32, strides=1)
conv18 = aspp_block(conv18, 32)
conv19 = Conv2D(1, (1, 1), activation='sigmoid')(conv18)
model = Model(inputs=[inputs], outputs=[conv19])
# model.compile(optimizer=adam_v2(lr=1e-4), loss=[loss], metrics=[dice_coef, iou_coef])
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4), loss=[loss], metrics=[dice_coef, iou_coef])
return model
def segment(model_path, img):
model = get_rwnet()
model.load_weights(model_path)
img_mask = model.predict(img, verbose=0)[0]
img_mask = (img_mask[:, :, 0] * 255.).astype(np.uint8)
return img_mask | [
"keras.layers.Conv2D",
"keras.backend.sum",
"keras.backend.flatten",
"keras.layers.Activation",
"keras.layers.Dense",
"keras.backend.pow",
"numpy.random.seed",
"keras.models.Model",
"keras.layers.GlobalAveragePooling2D",
"keras.backend.abs",
"keras.backend.exp",
"keras.layers.Add",
"keras.la... | [((561, 601), 'keras.backend.set_image_data_format', 'K.set_image_data_format', (['"""channels_last"""'], {}), "('channels_last')\n", (584, 601), True, 'from keras import backend as K\n'), ((604, 625), 'numpy.random.seed', 'np.random.seed', (['(10101)'], {}), '(10101)\n', (618, 625), True, 'import numpy as np\n'), ((118, 143), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (141, 143), False, 'import warnings\n'), ((149, 211), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (172, 211), False, 'import warnings\n'), ((716, 733), 'keras.backend.flatten', 'K.flatten', (['y_true'], {}), '(y_true)\n', (725, 733), True, 'from keras import backend as K\n'), ((749, 766), 'keras.backend.flatten', 'K.flatten', (['y_pred'], {}), '(y_pred)\n', (758, 766), True, 'from keras import backend as K\n'), ((786, 812), 'keras.backend.sum', 'K.sum', (['(y_true_f * y_pred_f)'], {}), '(y_true_f * y_pred_f)\n', (791, 812), True, 'from keras import backend as K\n'), ((1077, 1135), 'keras.backend.mean', 'K.mean', (['((intersection + smooth) / (union + smooth))'], {'axis': '(0)'}), '((intersection + smooth) / (union + smooth), axis=0)\n', (1083, 1135), True, 'from keras import backend as K\n'), ((1272, 1289), 'keras.backend.flatten', 'K.flatten', (['y_true'], {}), '(y_true)\n', (1281, 1289), True, 'from keras import backend as K\n'), ((1305, 1322), 'keras.backend.flatten', 'K.flatten', (['y_pred'], {}), '(y_pred)\n', (1314, 1322), True, 'from keras import backend as K\n'), ((1333, 1374), 'keras.backend.binary_crossentropy', 'K.binary_crossentropy', (['y_true_f', 'y_pred_f'], {}), '(y_true_f, y_pred_f)\n', (1354, 1374), True, 'from keras import backend as K\n'), ((1389, 1400), 'keras.backend.exp', 'K.exp', (['(-BCE)'], {}), '(-BCE)\n', (1394, 1400), True, 'from keras import backend as K\n'), ((3298, 3328), 'keras.layers.Input', 'Input', (['(img_rows, img_cols, 3)'], {}), '((img_rows, img_cols, 3))\n', (3303, 3328), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((6040, 6080), 'keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv19]'}), '(inputs=[inputs], outputs=[conv19])\n', (6045, 6080), False, 'from keras.models import Model\n'), ((964, 986), 'keras.backend.abs', 'K.abs', (['(y_true * y_pred)'], {}), '(y_true * y_pred)\n', (969, 986), True, 'from keras import backend as K\n'), ((1639, 1734), 'keras.layers.Conv2D', 'Conv2D', (['num_filters', '(3, 3)'], {'dilation_rate': '(6 * rate_scale, 6 * rate_scale)', 'padding': '"""same"""'}), "(num_filters, (3, 3), dilation_rate=(6 * rate_scale, 6 * rate_scale),\n padding='same')\n", (1645, 1734), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((1743, 1763), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1761, 1763), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((1778, 1876), 'keras.layers.Conv2D', 'Conv2D', (['num_filters', '(3, 3)'], {'dilation_rate': '(12 * rate_scale, 12 * rate_scale)', 'padding': '"""same"""'}), "(num_filters, (3, 3), dilation_rate=(12 * rate_scale, 12 * rate_scale\n ), padding='same')\n", (1784, 1876), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((1884, 1904), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1902, 1904), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((1919, 2017), 'keras.layers.Conv2D', 'Conv2D', (['num_filters', '(3, 3)'], {'dilation_rate': '(18 * rate_scale, 18 * rate_scale)', 'padding': '"""same"""'}), "(num_filters, (3, 3), dilation_rate=(18 * rate_scale, 18 * rate_scale\n ), padding='same')\n", (1925, 2017), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((2025, 2045), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2043, 2045), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((2060, 2103), 'keras.layers.Conv2D', 'Conv2D', (['num_filters', '(3, 3)'], {'padding': '"""same"""'}), "(num_filters, (3, 3), padding='same')\n", (2066, 2103), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((2116, 2136), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2134, 2136), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((2150, 2155), 'keras.layers.Add', 'Add', ([], {}), '()\n', (2153, 2155), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((2182, 2225), 'keras.layers.Conv2D', 'Conv2D', (['num_filters', '(1, 1)'], {'padding': '"""same"""'}), "(num_filters, (1, 1), padding='same')\n", (2188, 2225), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((2407, 2431), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (2429, 2431), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((2447, 2464), 'keras.layers.Reshape', 'Reshape', (['se_shape'], {}), '(se_shape)\n', (2454, 2464), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((2478, 2572), 'keras.layers.Dense', 'Dense', (['(filters // ratio)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'use_bias': '(False)'}), "(filters // ratio, activation='relu', kernel_initializer='he_normal',\n use_bias=False)\n", (2483, 2572), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((2582, 2670), 'keras.layers.Dense', 'Dense', (['filters'], {'activation': '"""sigmoid"""', 'kernel_initializer': '"""he_normal"""', 'use_bias': '(False)'}), "(filters, activation='sigmoid', kernel_initializer='he_normal',\n use_bias=False)\n", (2587, 2670), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((2680, 2690), 'keras.layers.Multiply', 'Multiply', ([], {}), '()\n', (2688, 2690), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((2797, 2817), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2815, 2817), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((2829, 2847), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2839, 2847), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((2859, 2916), 'keras.layers.Conv2D', 'Conv2D', (['n_filter', '(3, 3)'], {'padding': '"""same"""', 'strides': 'strides'}), "(n_filter, (3, 3), padding='same', strides=strides)\n", (2865, 2916), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((2942, 2962), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2960, 2962), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((2974, 2992), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2984, 2992), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((3004, 3055), 'keras.layers.Conv2D', 'Conv2D', (['n_filter', '(3, 3)'], {'padding': '"""same"""', 'strides': '(1)'}), "(n_filter, (3, 3), padding='same', strides=1)\n", (3010, 3055), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((3085, 3142), 'keras.layers.Conv2D', 'Conv2D', (['n_filter', '(1, 1)'], {'padding': '"""same"""', 'strides': 'strides'}), "(n_filter, (1, 1), padding='same', strides=strides)\n", (3091, 3142), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((3159, 3179), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3177, 3179), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((3203, 3208), 'keras.layers.Add', 'Add', ([], {}), '()\n', (3206, 3208), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((3389, 3419), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3401, 3419), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((3487, 3517), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3499, 3517), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((3586, 3616), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3598, 3616), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((3685, 3715), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3697, 3715), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((4554, 4584), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (4566, 4584), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((4761, 4791), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (4773, 4791), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((4974, 5004), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (4986, 5004), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((5183, 5213), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5195, 5213), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((5980, 6019), 'keras.layers.Conv2D', 'Conv2D', (['(1)', '(1, 1)'], {'activation': '"""sigmoid"""'}), "(1, (1, 1), activation='sigmoid')\n", (5986, 6019), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((1012, 1036), 'keras.backend.sum', 'K.sum', (['y_true', '[1, 2, 3]'], {}), '(y_true, [1, 2, 3])\n', (1017, 1036), True, 'from keras import backend as K\n'), ((1034, 1058), 'keras.backend.sum', 'K.sum', (['y_pred', '[1, 2, 3]'], {}), '(y_pred, [1, 2, 3])\n', (1039, 1058), True, 'from keras import backend as K\n'), ((6201, 6247), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (6225, 6247), True, 'import tensorflow as tf\n'), ((856, 871), 'keras.backend.sum', 'K.sum', (['y_true_f'], {}), '(y_true_f)\n', (861, 871), True, 'from keras import backend as K\n'), ((874, 889), 'keras.backend.sum', 'K.sum', (['y_pred_f'], {}), '(y_pred_f)\n', (879, 889), True, 'from keras import backend as K\n'), ((1431, 1454), 'keras.backend.pow', 'K.pow', (['(1 - BCE_EXP)', '(2.0)'], {}), '(1 - BCE_EXP, 2.0)\n', (1436, 1454), True, 'from keras import backend as K\n'), ((3782, 3842), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(256)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(256, (2, 2), strides=(2, 2), padding='same')\n", (3797, 3842), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((3941, 4001), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(128)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(128, (2, 2), strides=(2, 2), padding='same')\n", (3956, 4001), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((4096, 4155), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(64)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(64, (2, 2), strides=(2, 2), padding='same')\n", (4111, 4155), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((4253, 4312), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(32)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(32, (2, 2), strides=(2, 2), padding='same')\n", (4268, 4312), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((4408, 4461), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(32, (3, 3), activation='relu', padding='same')\n", (4414, 4461), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((4620, 4673), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64, (3, 3), activation='relu', padding='same')\n", (4626, 4673), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((4831, 4885), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(128, (3, 3), activation='relu', padding='same')\n", (4837, 4885), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((5040, 5094), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(256, (3, 3), activation='relu', padding='same')\n", (5046, 5094), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((5288, 5348), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(256)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(256, (2, 2), strides=(2, 2), padding='same')\n", (5303, 5348), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((5453, 5513), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(128)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(128, (2, 2), strides=(2, 2), padding='same')\n", (5468, 5513), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((5619, 5678), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(64)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(64, (2, 2), strides=(2, 2), padding='same')\n", (5634, 5678), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n'), ((5784, 5843), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(32)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(32, (2, 2), strides=(2, 2), padding='same')\n", (5799, 5843), False, 'from keras.layers import Input, concatenate, Conv2D, Add, MaxPooling2D, Activation, Dense, Reshape, GlobalAveragePooling2D, Multiply, Conv2DTranspose, BatchNormalization\n')] |
from PyQt5.QtWidgets import QMainWindow, QWidget, QPushButton, QLabel, QHBoxLayout, QApplication, QVBoxLayout
from PyQt5.QtGui import QColor, QPainter, QBrush, QPalette
from PyQt5.QtCore import Qt
import sys
import copy
import numpy as np
from test import Predict
import cv2
__appname__ = 'MnistRec'
class PaintCanvas(QWidget):
def __init__(self, *args, **kwargs):
super(PaintCanvas, self).__init__(*args, **kwargs)
self.setMouseTracking(True)
self.drawingColor = QColor(0, 0, 0)
self.pointsList = []
self.painter = QPainter(self)
self.tempPoints = []
self.setFixedSize(28, 28)
self.bDrawing = False
pal = QPalette(self.palette())
pal.setColor(QPalette.Background, Qt.white)
self.setAutoFillBackground(True);
self.setPalette(pal)
def paintForEvent(self, ev):
pos = ev.pos()
window = self.parent().window()
if window is not None:
window.labelCoordinates.setText('x: %d, y: %d'%(pos.x(), pos.y()))
# print(ev.button())
if self.bDrawing or ev.button() == Qt.LeftButton:
self.tempPoints.append((pos.x(), pos.y()))
self.update()
def mouseMoveEvent(self, ev):
self.paintForEvent(ev)
def mousePressEvent(self, ev):
self.paintForEvent(ev)
self.bDrawing = True
def mouseReleaseEvent(self, ev):
self.paintForEvent(ev)
self.pointsList.append(copy.deepcopy(self.tempPoints))
self.tempPoints.clear()
self.bDrawing = False
def paintEvent(self, ev):
p = self.painter
p.begin(self)
p.setPen(self.drawingColor)
brush = QBrush(Qt.BDiagPattern)
p.setBrush(brush)
# print(self.pointsList)
# print(self.tempPoints)
for points in self.pointsList:
for i in range(len(points)-1):
p.drawLine(points[i][0], points[i][1], points[i+1][0], points[i+1][1])
for tPidx in range(len(self.tempPoints)-1):
p. drawLine(self.tempPoints[tPidx][0], self.tempPoints[tPidx][1], self.tempPoints[tPidx+1][0], self.tempPoints[tPidx+1][1])
p.end()
class MnistRecQtMain(QMainWindow):
def __init__(self):
super(MnistRecQtMain, self).__init__()
# self.resize(800, 600)
self.setWindowTitle(__appname__)
self.paintCanvas = PaintCanvas(parent=self)
self.recButton = QPushButton('Recognize')
self.recButton.clicked.connect(self.recognize)
self.cleanButton = QPushButton('Clean')
self.cleanButton.clicked.connect(self.cleanCanvas)
self.digitLabel = QLabel('')
layout = QHBoxLayout()
layout.addWidget(self.paintCanvas)
buttonLayout = QVBoxLayout()
buttonLayout.addWidget(self.recButton)
buttonLayout.addWidget(self.cleanButton)
layout.addLayout(buttonLayout)
layout.addWidget(self.digitLabel)
centralWidget = QWidget()
centralWidget.setLayout(layout)
self.setCentralWidget(centralWidget)
self.statusBar().showMessage('%s started.'%__appname__)
self.statusBar().show()
self.labelCoordinates = QLabel('')
self.statusBar().addPermanentWidget(self.labelCoordinates)
self.predict = Predict()
def recognize(self):
img = np.ones((28, 28))
for points in self.paintCanvas.pointsList:
# 只是根据点画粗度为1的线
# for point in points:
# if point[0] < 28 and point[1] < 28:
# img[point[0], point[1]] = 0
# 根据鼠标模拟出粗度为3的笔画的图形
# 先去除不合图片要求的点
points = [point for point in points if point[0] < 28 and point[1] < 28]
for i in range(len(points)-1):
img = cv2.line(img, points[i], points[i+1], (0, 0, 0), 3)
img = np.reshape(img, (28, 28, 1))
self.digitLabel.setText(str(self.predict.predict_img(img)))
def cleanCanvas(self):
self.paintCanvas.pointsList.clear()
self.paintCanvas.update()
def get_main_app(argv=[]):
app = QApplication(argv)
app.setApplicationName(__appname__)
win = MnistRecQtMain()
win.show()
return app, win
app, _win = get_main_app(sys.argv)
sys.exit(app.exec_()) | [
"PyQt5.QtWidgets.QWidget",
"numpy.reshape",
"PyQt5.QtGui.QPainter",
"numpy.ones",
"PyQt5.QtGui.QColor",
"cv2.line",
"PyQt5.QtGui.QBrush",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QLabel",
"test.Predict",
"PyQt5.QtWidgets.QApplication",
"copy.deepcopy",
"PyQt5.QtWidgets.QVBoxLayout",
... | [((3535, 3553), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['argv'], {}), '(argv)\n', (3547, 3553), False, 'from PyQt5.QtWidgets import QMainWindow, QWidget, QPushButton, QLabel, QHBoxLayout, QApplication, QVBoxLayout\n'), ((473, 488), 'PyQt5.QtGui.QColor', 'QColor', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (479, 488), False, 'from PyQt5.QtGui import QColor, QPainter, QBrush, QPalette\n'), ((529, 543), 'PyQt5.QtGui.QPainter', 'QPainter', (['self'], {}), '(self)\n', (537, 543), False, 'from PyQt5.QtGui import QColor, QPainter, QBrush, QPalette\n'), ((1480, 1503), 'PyQt5.QtGui.QBrush', 'QBrush', (['Qt.BDiagPattern'], {}), '(Qt.BDiagPattern)\n', (1486, 1503), False, 'from PyQt5.QtGui import QColor, QPainter, QBrush, QPalette\n'), ((2128, 2152), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Recognize"""'], {}), "('Recognize')\n", (2139, 2152), False, 'from PyQt5.QtWidgets import QMainWindow, QWidget, QPushButton, QLabel, QHBoxLayout, QApplication, QVBoxLayout\n'), ((2223, 2243), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Clean"""'], {}), "('Clean')\n", (2234, 2243), False, 'from PyQt5.QtWidgets import QMainWindow, QWidget, QPushButton, QLabel, QHBoxLayout, QApplication, QVBoxLayout\n'), ((2317, 2327), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['""""""'], {}), "('')\n", (2323, 2327), False, 'from PyQt5.QtWidgets import QMainWindow, QWidget, QPushButton, QLabel, QHBoxLayout, QApplication, QVBoxLayout\n'), ((2339, 2352), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (2350, 2352), False, 'from PyQt5.QtWidgets import QMainWindow, QWidget, QPushButton, QLabel, QHBoxLayout, QApplication, QVBoxLayout\n'), ((2407, 2420), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (2418, 2420), False, 'from PyQt5.QtWidgets import QMainWindow, QWidget, QPushButton, QLabel, QHBoxLayout, QApplication, QVBoxLayout\n'), ((2592, 2601), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (2599, 2601), False, 'from PyQt5.QtWidgets import QMainWindow, QWidget, QPushButton, QLabel, QHBoxLayout, QApplication, QVBoxLayout\n'), ((2785, 2795), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['""""""'], {}), "('')\n", (2791, 2795), False, 'from PyQt5.QtWidgets import QMainWindow, QWidget, QPushButton, QLabel, QHBoxLayout, QApplication, QVBoxLayout\n'), ((2874, 2883), 'test.Predict', 'Predict', ([], {}), '()\n', (2881, 2883), False, 'from test import Predict\n'), ((2915, 2932), 'numpy.ones', 'np.ones', (['(28, 28)'], {}), '((28, 28))\n', (2922, 2932), True, 'import numpy as np\n'), ((3318, 3346), 'numpy.reshape', 'np.reshape', (['img', '(28, 28, 1)'], {}), '(img, (28, 28, 1))\n', (3328, 3346), True, 'import numpy as np\n'), ((1295, 1325), 'copy.deepcopy', 'copy.deepcopy', (['self.tempPoints'], {}), '(self.tempPoints)\n', (1308, 1325), False, 'import copy\n'), ((3258, 3311), 'cv2.line', 'cv2.line', (['img', 'points[i]', 'points[i + 1]', '(0, 0, 0)', '(3)'], {}), '(img, points[i], points[i + 1], (0, 0, 0), 3)\n', (3266, 3311), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# ## **Heart Failure Prediction!**
#
# In this Notebook we will see how to apply KNN and how to use H2o.ai automl library for classification task. If you find this notebook usefull please Upvote!
# %% id="-eFeHGM7wjXi"
#importing Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %% id="BeFbH5mmwjXj" outputId="1d28b870-a332-42d9-db22-6bf5979bd77b"
#importing dataset
df = pd.read_csv('../input/heart-failure-prediction/heart.csv')
df.head()
# %% id="58V7HLIKwjXk" outputId="16830d86-2484-458f-bf53-30e988f99219"
df.shape
# %% id="EKqElv-D1s5Z" outputId="65eba788-4183-4354-f74a-ad7c4a1e6317"
df.info()
# %% id="A82b8jbLwjXk" outputId="a916ec4d-6c0d-4bb0-841e-5d675676cc4e"
df.describe()
# %% id="vCpUngBKVac2" outputId="68162c6f-6bd5-4794-c332-e15701dabfe5"
df.isnull().sum()
#There is no null values
# %% [markdown] id="o2qmlr1DdLX9"
# ## Data Exploration
# %% [markdown] id="dBRqUEwuy1KY"
#
# Now we can plot the distribution of data wrt dependent variable i.e HeartDisease
# %% id="JsDv6UI4wjXn" outputId="6b218281-de0c-472c-b871-6595a7d069d2"
sns.pairplot(df, hue='HeartDisease')
# %% [markdown] id="ahSOpNk1zEta"
# 5. Which are most useful variable in classification? Prove using correlation.
# %% id="uXcHuo7pzCLZ" outputId="6c4cdfd1-7b22-4967-89e8-ffb3e9e2a152"
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm')
# %% id="OG7hK1UJ0Rja" outputId="85b4bd55-da22-45b9-cce1-4cfea00ab94f"
sns.set_theme(style="whitegrid")
sns.boxplot(x="Age", data=df, palette="Set3")
plt.title("Age Distribution")
# %% id="wMVgYoGw0oIl" outputId="2c985389-575f-4635-a0bf-cf2f56bb137e"
fig = plt.figure(figsize=(15, 20))
ax = fig.gca()
df.hist(ax=ax)
# %% id="kzgu_ezi03y8" outputId="76bbae80-a830-45c2-b0e6-e6c787ecd193"
df.HeartDisease.value_counts().plot(kind='bar')
plt.xlabel("Heart Diseases or Not")
plt.ylabel("Count")
plt.title("Heart Diseases")
#Here we can see that dataset is not much imbalanced so there is no need to balance.
# %% [markdown] id="-7E3IpLKdRV1"
# ## Data Preprocessing
# %% id="zaHcUNcbWjkZ"
cat = ['Sex', 'ChestPainType', 'RestingECG', 'ExerciseAngina', 'ST_Slope']
# %% id="tVKS4fLEWBZc"
from sklearn.preprocessing import LabelEncoder
lb = LabelEncoder()
df[cat] = df[cat].apply(lb.fit_transform)
# %% id="4OPXop0HwjXo" outputId="997a4068-3bfd-4298-8a60-584126552665"
X = df.drop('HeartDisease', axis=1)
X.head()
# %% id="KLJJYpttwjXo" outputId="00502435-bf1a-42e9-d631-c767be3f82bc"
y = df['HeartDisease']
y.head()
# %% id="0T5IVw1awjXp"
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.2,
random_state=0)
# %% id="Cw17uwfRwjXp" outputId="e218a7c7-4a0e-42b3-c151-3b0b3d4e5205"
X_train.shape
# %% id="p3L49Xf8wjXq" outputId="09340038-570a-453a-9cf8-ed9afee82303"
from sklearn.preprocessing import QuantileTransformer
scaler = QuantileTransformer()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# %% [markdown] id="o2xpOahkwjXt"
# ## Using KNN
#
# K-nearest neighbors (KNN) algorithm is a type of supervised ML algorithm which can be used for both classification as well as regression predictive problems. However, it is mainly used for classification predictive problems in industry. The following two properties would define KNN well −
#
# * Lazy learning algorithm − KNN is a lazy learning algorithm because it does not have a specialized training phase and uses all the data for training while classification.
#
# * Non-parametric learning algorithm − KNN is also a non-parametric learning algorithm because it doesn’t assume anything about the underlying data.
# %% id="WB5aOGmiwjXv"
from sklearn.neighbors import KNeighborsClassifier
# %% id="ruOf41A6wjXw" outputId="d39b639a-1acf-484a-dbea-0684c9945b3b"
knn = KNeighborsClassifier(n_neighbors=5, metric='euclidean', p=2)
knn.fit(X_train, y_train)
# %% id="To358l1ewjXw" outputId="67755f95-06f1-427d-aa9b-49660f3c2c51"
y_pred = knn.predict(X_test)
y_pred
# %% id="T0iEKtMKwjXx" outputId="175787b0-d6b1-4839-cfa5-d5420308b378"
knn.score(X_test, y_test)
# %% id="KGBIL8jwwjXx"
from sklearn.metrics import accuracy_score
from sklearn import metrics
# %% id="7piDOhe1wjXx" outputId="a66fe438-6dfb-4b08-e5fc-80c758664db2"
metrics.accuracy_score(y_test, y_pred)
# %% id="oE_JuTbAwjXy" outputId="3c59caf9-3488-429c-f2be-bc2102738ee3"
from sklearn.metrics import confusion_matrix
mat = confusion_matrix(y_test, y_pred)
mat
# %% id="vefokjVQ2loi" outputId="73ce3707-11c8-40c1-c6b4-b5f4b9eefb18"
from sklearn.metrics import classification_report
target_names = ['Heart Diseases', 'Normal']
print(classification_report(y_test, y_pred, target_names=target_names))
# %% [markdown] id="mIL4fznQ3A4P"
# To select optimize k value we will use elbow method
# %% id="OrZlJRGMwjXy"
#For selecting K value
error_rate = []
# Will take some time
for i in range(1, 40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error_rate.append(np.mean(pred_i != y_test))
# %% id="UdPt0WK3wjXy" outputId="852d55a4-9a39-4298-b145-94bc7f23eb7a"
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 6))
plt.plot(range(1, 40),
error_rate,
color='red',
linestyle='dashed',
marker='o',
markerfacecolor='green',
markersize=10)
plt.title('Error Rate vs. K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
# %% id="AgeM11Jv3F9X" outputId="d58693b2-1908-4924-e1a6-2b16792815d9"
#From graph we can see that optimize k value is 16,17,18
# Now we will train our KNN classifier with this k values
knn = KNeighborsClassifier(n_neighbors=3, metric='euclidean', p=2)
knn.fit(X_train, y_train)
# %% id="dCLyRoU13X9n" outputId="f7ae117c-2e18-496c-925d-a78728eefabe"
y_pred = knn.predict(X_test)
y_pred
# %% id="MTDLrkM33upo" outputId="49eb41dd-bf1b-4398-ad41-f606dbd5f0fe"
knn.score(X_test, y_test)
# %% id="yXXP59Ff3r5Q" outputId="dfb33483-6385-48e4-8988-f565adbaafe1"
from sklearn.metrics import confusion_matrix
mat = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(10, 8))
sns.heatmap(mat, annot=True)
# %% id="Z7NJY1xg3a2v" outputId="203352f9-cb86-433d-c406-68e139682617"
from sklearn.metrics import classification_report
target_names = ['Diabetes', 'Normal']
print(classification_report(y_test, y_pred, target_names=target_names))
# %% [markdown] id="IkXajhdm4Pur"
# 6. Quantify goodness of your model and discuss steps taken for improvement.
#
# For this dataset KNN had archive 87% accuracy. We can further improve accuracy by using bagging and boosting techniques.
#
# 7. Can we use KNN for regression also? Why / Why not?
#
# KNN algorithm can be used for both classification and regression problems. The KNN algorithm uses ‘feature similarity’ to predict the values of any new data points. This means that the new point is assigned a value based on how closely it resembles the points in the training set.
#
# 8. Discuss drawbacks of algorithms such as KNN
#
# -> It does not work well with large dataset and high dimensional dataset.
#
# -> Knn is noise sensitive dataset, we need to do feature engineering like outlier removal, handling missing value,etc.
#
# -> Require high memory – need to store all of the training data
#
# -> Given that it stores all of the training, it can be computationally expensive
#
# %% [markdown] id="OQM4NIy8daNG"
# ## Using H2o.ai AutoML
# %% id="CuEIHC-zYRfi" outputId="e04d6864-4028-483a-df10-249e6275fc47"
# %% id="bXVffHNX4JkT" outputId="360dcee6-ea22-4555-9cfe-0e117b36fe41"
import h2o
# We will be using default parameter Here with H2O init method
h2o.init()
# %% id="q9PVDyTmYQcL" outputId="b1f4d3ef-b531-4cba-a7bc-0d9c71b92c04"
# Convert to h2o dataframe
hf = h2o.H2OFrame(df)
# %% id="DlXM7bxrY8Fn" outputId="28a1baa7-be21-4f52-8b36-05d90ba90c7e"
# Data Transform - Split train : test datasets
train, valid = hf.split_frame(ratios=[.80], seed=1234)
print("Training Dataset", train.shape)
print("Validation Dataset", valid.shape)
# %% id="hzGc-1jGZAO0" outputId="1b638585-7444-4c59-8a33-25885d3e94fe"
train.head(5)
# %% id="SchOpTAhaN71" outputId="2690d099-f8f8-4031-93df-ceaf8a44206a"
valid.head()
# %% id="hWcWh0jXZCSV"
# Identify predictors and response
featureColumns = train.columns
targetColumn = "HeartDisease"
featureColumns.remove(targetColumn)
# %% id="ITk8vdwOZLVe" outputId="4b37ab65-dc09-4b26-e081-aeaa1cc8d99e"
import time
from h2o.automl import H2OAutoML
# Run AutoML for YY base models (limited to 1 hour max runtime by default)
aml = H2OAutoML(max_models=12, seed=1234, balance_classes=True)
aml.train(x=featureColumns,
y=targetColumn,
training_frame=train,
validation_frame=valid)
# %% id="ql70026xZfdI" outputId="95e3404d-ad3b-46fe-a432-ab34593de3bc"
lb = aml.leaderboard
print(lb.head(rows=lb.nrows))
# Explain an AutoML object i.e. explain all models
exa = aml.explain(valid)
# %% id="pLsL0vEdZp1r"
# Evaluate the best model with testing data.
model = aml.leader
# %% id="osb0CpR5Z5IF" outputId="f4e8a259-dcfc-4018-b784-80b123e4d4fd"
# %% id="mihl7WKqbPow" outputId="86853480-ae0c-42c1-b831-9f6df1bc442b"
# For Classification
import scikitplot as skplt
from sklearn.metrics import accuracy_score, classification_report
from sklearn.metrics import cohen_kappa_score, confusion_matrix
# Predict with the best model.
predicted_y = model.predict(valid[featureColumns])
predicted_data = predicted_y.as_data_frame()
valid_dataset = valid.as_data_frame()
# Evaluate the skill of the Trained model
acc = accuracy_score(valid_dataset[targetColumn],
np.round(abs(predicted_data['predict'])))
classReport = classification_report(valid_dataset[targetColumn],
np.round(abs(predicted_data['predict'])))
confMatrix = confusion_matrix(valid_dataset[targetColumn],
np.round(abs(predicted_data['predict'])))
print()
print('Testing Results of the trained model: ')
print()
print('Accuracy : ', acc)
print()
print('Confusion Matrix :\n', confMatrix)
print()
print('Classification Report :\n', classReport)
# Confusion matrix
skplt.metrics.plot_confusion_matrix(valid_dataset[targetColumn],
np.round(abs(predicted_data['predict'])),
figsize=(7, 7))
plt.show()
| [
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.classification_report",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.preprocessing.QuantileTransformer",
"h2o.H2OFrame",
"seaborn.pairplot",
"numpy.mean",
"h2o.automl.H2OAutoML",
"matplotl... | [((738, 796), 'pandas.read_csv', 'pd.read_csv', (['"""../input/heart-failure-prediction/heart.csv"""'], {}), "('../input/heart-failure-prediction/heart.csv')\n", (749, 796), True, 'import pandas as pd\n'), ((1421, 1457), 'seaborn.pairplot', 'sns.pairplot', (['df'], {'hue': '"""HeartDisease"""'}), "(df, hue='HeartDisease')\n", (1433, 1457), True, 'import seaborn as sns\n'), ((1784, 1816), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""whitegrid"""'}), "(style='whitegrid')\n", (1797, 1816), True, 'import seaborn as sns\n'), ((1817, 1862), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""Age"""', 'data': 'df', 'palette': '"""Set3"""'}), "(x='Age', data=df, palette='Set3')\n", (1828, 1862), True, 'import seaborn as sns\n'), ((1863, 1892), 'matplotlib.pyplot.title', 'plt.title', (['"""Age Distribution"""'], {}), "('Age Distribution')\n", (1872, 1892), True, 'import matplotlib.pyplot as plt\n'), ((1971, 1999), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 20)'}), '(figsize=(15, 20))\n', (1981, 1999), True, 'import matplotlib.pyplot as plt\n'), ((2150, 2185), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Heart Diseases or Not"""'], {}), "('Heart Diseases or Not')\n", (2160, 2185), True, 'import matplotlib.pyplot as plt\n'), ((2186, 2205), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (2196, 2205), True, 'import matplotlib.pyplot as plt\n'), ((2206, 2233), 'matplotlib.pyplot.title', 'plt.title', (['"""Heart Diseases"""'], {}), "('Heart Diseases')\n", (2215, 2233), True, 'import matplotlib.pyplot as plt\n'), ((2554, 2568), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2566, 2568), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2945, 2998), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(X, y, test_size=0.2, random_state=0)\n', (2961, 2998), False, 'from sklearn.model_selection import train_test_split\n'), ((3377, 3398), 'sklearn.preprocessing.QuantileTransformer', 'QuantileTransformer', ([], {}), '()\n', (3396, 3398), False, 'from sklearn.preprocessing import QuantileTransformer\n'), ((4298, 4358), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(5)', 'metric': '"""euclidean"""', 'p': '(2)'}), "(n_neighbors=5, metric='euclidean', p=2)\n", (4318, 4358), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((4758, 4796), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (4780, 4796), False, 'from sklearn import metrics\n'), ((4921, 4953), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (4937, 4953), False, 'from sklearn.metrics import cohen_kappa_score, confusion_matrix\n'), ((5659, 5686), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (5669, 5686), True, 'import matplotlib.pyplot as plt\n'), ((5861, 5896), 'matplotlib.pyplot.title', 'plt.title', (['"""Error Rate vs. K Value"""'], {}), "('Error Rate vs. K Value')\n", (5870, 5896), True, 'import matplotlib.pyplot as plt\n'), ((5897, 5912), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""K"""'], {}), "('K')\n", (5907, 5912), True, 'import matplotlib.pyplot as plt\n'), ((5913, 5937), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error Rate"""'], {}), "('Error Rate')\n", (5923, 5937), True, 'import matplotlib.pyplot as plt\n'), ((6132, 6192), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(3)', 'metric': '"""euclidean"""', 'p': '(2)'}), "(n_neighbors=3, metric='euclidean', p=2)\n", (6152, 6192), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((6549, 6581), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (6565, 6581), False, 'from sklearn.metrics import cohen_kappa_score, confusion_matrix\n'), ((6582, 6609), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (6592, 6609), True, 'import matplotlib.pyplot as plt\n'), ((6610, 6638), 'seaborn.heatmap', 'sns.heatmap', (['mat'], {'annot': '(True)'}), '(mat, annot=True)\n', (6621, 6638), True, 'import seaborn as sns\n'), ((8162, 8172), 'h2o.init', 'h2o.init', ([], {}), '()\n', (8170, 8172), False, 'import h2o\n'), ((8277, 8293), 'h2o.H2OFrame', 'h2o.H2OFrame', (['df'], {}), '(df)\n', (8289, 8293), False, 'import h2o\n'), ((9074, 9131), 'h2o.automl.H2OAutoML', 'H2OAutoML', ([], {'max_models': '(12)', 'seed': '(1234)', 'balance_classes': '(True)'}), '(max_models=12, seed=1234, balance_classes=True)\n', (9083, 9131), False, 'from h2o.automl import H2OAutoML\n'), ((10871, 10881), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10879, 10881), True, 'import matplotlib.pyplot as plt\n'), ((5131, 5195), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {'target_names': 'target_names'}), '(y_test, y_pred, target_names=target_names)\n', (5152, 5195), False, 'from sklearn.metrics import accuracy_score, classification_report\n'), ((5406, 5441), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'i'}), '(n_neighbors=i)\n', (5426, 5441), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((6806, 6870), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {'target_names': 'target_names'}), '(y_test, y_pred, target_names=target_names)\n', (6827, 6870), False, 'from sklearn.metrics import accuracy_score, classification_report\n'), ((5527, 5552), 'numpy.mean', 'np.mean', (['(pred_i != y_test)'], {}), '(pred_i != y_test)\n', (5534, 5552), True, 'import numpy as np\n')] |
"""
"""
from cddm.map import k_indexmap, rfft2_grid
from cddm.fft import rfft2_crop
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
#diffusion constant
from examples.paper.one_component.conf import D, DATA_PATH, KIMAX, KJMAX
from examples.paper.one_component.conf import *
from examples.paper.form_factor import g1
import os.path as path
SAVEFIG = True
colors = ["C{}".format(i) for i in range(10)]
MARKERS = {"full": "1", "standard" : "2", "fast" : "3", "dual" : "4","random" : "+"}
LABELS = {"full": r"DDM ($N={}$ @ $\delta t = {}$)".format(NFRAMES_FULL,DT_FULL),
"standard": r"DDM ($N={}$ @ $\delta t = {}$)".format(NFRAMES_STANDARD,DT_STANDARD),
"fast": r"DDM ($N={}$ @ $\delta t = {}$)".format(NFRAMES_FAST,DT_FAST),
"dual": r"C-DDM ($N={}$ @ $p = {}$)".format(NFRAMES_DUAL,PERIOD),
"random": r"R-DDM ($N={}$ @ $p = {}$)".format(NFRAMES_RANDOM,PERIOD_RANDOM)}
NORM = 6
amp = rfft2_crop(g1(0),KIMAX,KJMAX)
def _g1(x,f,a,b):
"""g1: exponential decay"""
return a * np.exp(-f*x) + b
def _fit_k(x,ys,p0):
for y in ys:
try:
popt,pcov = curve_fit(_g1, x,y, p0 = p0)
yield popt, np.diag(pcov)
except:
yield (np.nan,)*3, (np.nan,)*3
def _fit_data(x, data, imap):
"""performs fitting and plotting of cross correlation data"""
popt0 = [0.01,1,0]
popt = np.empty((data.shape[0], data.shape[1],3),float)
pcov = np.empty((data.shape[0], data.shape[1],3),float)
#make all data invalid
popt[...] = np.nan
pcov[...] = np.nan
for i in range(3, KIMAX):
mask = imap == i
y = data[mask,:]
out = np.array(list(_fit_k(x,y,popt0)))
p = out[:,0]
c = out[:,1]
popt0 = np.nanmean(p,axis = 0)
popt[mask] = p
pcov[mask] = c
return popt, pcov
def _lin(x,k):
return k*x
def fit(x,y, label = "data"):
imap = k_indexmap(y.shape[0], y.shape[1], angle=0, sector=180, kstep=1.0)
popt, pcov = _fit_data(x, y, imap)
ki, kj = rfft2_grid(y.shape[0], y.shape[1])
k = (ki**2 + kj**2)**0.5
#mask of valid (successfully fitted) data
mask =np.all( np.logical_not(np.isnan(popt)), axis = -1)
return mask, k, popt, pcov
fig1 = plt.figure()
ax1,ax1a = fig1.subplots(1,2)
#ax1 = ax1a.twinx()
fig2 = plt.figure()
ax2,ax2a = fig2.subplots(1,2)
#ax2 = ax2a.twinx()
#for i,label in enumerate(("standard", "random", "dual")):
for i,label in enumerate(("full","standard","fast","dual","random")):
x = np.load(path.join(DATA_PATH, "corr_{}_t.npy".format(label)))
y = np.load(path.join(DATA_PATH, "corr_{}_data_norm{}.npy".format(label, NORM)))
#time mask for valid data. For a given time, all data at any k value must be valid
mask = np.isnan(y)
mask = np.logical_not(np.all(mask, axis = tuple(range(mask.ndim-1))))
x,y = x[mask], y[...,mask]
if label == "dual":
m,ks,p,c = fit(x,y, label = label)
else:
#skip the first element (zero time)
m,ks,p,c = fit(x[1:],y[...,1:], label = label)
f = p[m,0]
a = p[m,1]
k = ks[m]
a_true = amp[m]
fe= (c[m,0])**0.5
ae= (c[m,1])**0.5
from operator import itemgetter
s = np.array(sorted(((k,f,fe,a,ae,at) for (k,f,fe,a,ae,at) in zip(k,f,fe,a,ae,a_true)),key=itemgetter(0)))
k = s[:,0]
f = s[:,1]
fe = s[:,2]
a = s[:,3]
ae = s[:,4]
a_true = s[:,5]
f_err = fe / f
k_err = k
a_err = ae / a
f_true = _lin(k**2,D)
KERNEL_SIZE = 30
kernel = (1/KERNEL_SIZE,)*KERNEL_SIZE
f_err = (np.convolve(((f-f_true)/f_true)**2, kernel ,mode = "valid") ** 0.5)
k_err = np.convolve(k, kernel ,mode = "valid")
a_err = (np.convolve(((a-a_true)/a_true)**2, kernel ,mode = "valid") ** 0.5)
ax1.plot((k**2)[::KERNEL_SIZE],f[::KERNEL_SIZE],MARKERS[label], color = colors[i],fillstyle='none', label = LABELS[label])
ax2.plot(k[::KERNEL_SIZE],a[::KERNEL_SIZE],MARKERS[label], color = colors[i],fillstyle='none', label = LABELS[label])
ax1a.semilogy((k_err**2)[::KERNEL_SIZE],f_err[::KERNEL_SIZE],"-",color = colors[i],fillstyle='none', label = LABELS[label])
ax2a.semilogy(k_err[::KERNEL_SIZE],a_err[::KERNEL_SIZE],"-",color = colors[i],fillstyle='none', label = LABELS[label])
x = k**2
popt,pcov = curve_fit(_lin, x, f, sigma = fe)
#ax1.plot(x,_lin(x,*popt), "--", color = colors[i], label = "fit {}".format(label))
err = np.sqrt(np.diag(pcov))/popt
print("Measured D ({}): {:.3e} (1 +- {:.4f})".format(label, popt[0], err[0]))
ax1.plot(x,_lin(x,D), "k--", label = "expected value")
def ampmean(amp):
imap = k_indexmap(amp.shape[0], amp.shape[1], angle=0, sector=180, kstep=1.0)
for i in range(KIMAX):
mask = imap == i
yield amp[mask].mean()
y = list(ampmean(amp))
ax2.plot(y, "k--", label = "expected value")
print("True D: {:.3e}".format(D))
ax1.set_xlabel("$q^2$")
ax1.set_ylabel(r"$1/\tau_0$")
ax1.set_ylim(0,0.15)
ax1a.set_ylabel("$err$")
ax2a.set_ylabel("$err$")
# ax2.set_ylim(0.01,10)
ax1.legend(prop={'size': 8})
ax2.legend(prop={'size': 8})
ax1.set_title(r"$1/\tau_0(q)$")
ax1a.set_title(r"err$(q)$")
# ax1a.set_xlabel("$q$")
ax2.set_ylabel(r"$a$")
ax2.set_ylim(0.6,1.1)
# ax2a.set_ylabel("$\sigma$")
ax2a.set_xlabel("$q$")
ax1a.set_xlabel("$q^2$")
# ax2a.set_ylim(0.001,100)
ax1a.legend(prop={'size': 8})
ax2a.legend(prop={'size': 8})
ax2.set_title(r"$a(q)$")
ax2a.set_title(r"err$(q)$")
# ax2a.set_title(r"$\sigma(q)$")
fig1.tight_layout()
fig2.tight_layout()
if SAVEFIG:
fig1.savefig("fit_rate_norm{}.pdf".format(NORM))
fig2.savefig("fit_amplitude_norm{}.pdf".format(NORM))
plt.show()
| [
"scipy.optimize.curve_fit",
"cddm.map.rfft2_grid",
"numpy.convolve",
"examples.paper.form_factor.g1",
"cddm.map.k_indexmap",
"numpy.diag",
"numpy.exp",
"numpy.nanmean",
"matplotlib.pyplot.figure",
"numpy.empty",
"numpy.isnan",
"operator.itemgetter",
"matplotlib.pyplot.show"
] | [((2338, 2350), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2348, 2350), True, 'import matplotlib.pyplot as plt\n'), ((2412, 2424), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2422, 2424), True, 'import matplotlib.pyplot as plt\n'), ((5842, 5852), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5850, 5852), True, 'import matplotlib.pyplot as plt\n'), ((984, 989), 'examples.paper.form_factor.g1', 'g1', (['(0)'], {}), '(0)\n', (986, 989), False, 'from examples.paper.form_factor import g1\n'), ((1443, 1493), 'numpy.empty', 'np.empty', (['(data.shape[0], data.shape[1], 3)', 'float'], {}), '((data.shape[0], data.shape[1], 3), float)\n', (1451, 1493), True, 'import numpy as np\n'), ((1503, 1553), 'numpy.empty', 'np.empty', (['(data.shape[0], data.shape[1], 3)', 'float'], {}), '((data.shape[0], data.shape[1], 3), float)\n', (1511, 1553), True, 'import numpy as np\n'), ((1991, 2057), 'cddm.map.k_indexmap', 'k_indexmap', (['y.shape[0]', 'y.shape[1]'], {'angle': '(0)', 'sector': '(180)', 'kstep': '(1.0)'}), '(y.shape[0], y.shape[1], angle=0, sector=180, kstep=1.0)\n', (2001, 2057), False, 'from cddm.map import k_indexmap, rfft2_grid\n'), ((2118, 2152), 'cddm.map.rfft2_grid', 'rfft2_grid', (['y.shape[0]', 'y.shape[1]'], {}), '(y.shape[0], y.shape[1])\n', (2128, 2152), False, 'from cddm.map import k_indexmap, rfft2_grid\n'), ((2866, 2877), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (2874, 2877), True, 'import numpy as np\n'), ((3804, 3840), 'numpy.convolve', 'np.convolve', (['k', 'kernel'], {'mode': '"""valid"""'}), "(k, kernel, mode='valid')\n", (3815, 3840), True, 'import numpy as np\n'), ((4494, 4525), 'scipy.optimize.curve_fit', 'curve_fit', (['_lin', 'x', 'f'], {'sigma': 'fe'}), '(_lin, x, f, sigma=fe)\n', (4503, 4525), False, 'from scipy.optimize import curve_fit\n'), ((4824, 4894), 'cddm.map.k_indexmap', 'k_indexmap', (['amp.shape[0]', 'amp.shape[1]'], {'angle': '(0)', 'sector': '(180)', 'kstep': '(1.0)'}), '(amp.shape[0], amp.shape[1], angle=0, sector=180, kstep=1.0)\n', (4834, 4894), False, 'from cddm.map import k_indexmap, rfft2_grid\n'), ((1825, 1846), 'numpy.nanmean', 'np.nanmean', (['p'], {'axis': '(0)'}), '(p, axis=0)\n', (1835, 1846), True, 'import numpy as np\n'), ((3723, 3786), 'numpy.convolve', 'np.convolve', (['(((f - f_true) / f_true) ** 2)', 'kernel'], {'mode': '"""valid"""'}), "(((f - f_true) / f_true) ** 2, kernel, mode='valid')\n", (3734, 3786), True, 'import numpy as np\n'), ((3861, 3924), 'numpy.convolve', 'np.convolve', (['(((a - a_true) / a_true) ** 2)', 'kernel'], {'mode': '"""valid"""'}), "(((a - a_true) / a_true) ** 2, kernel, mode='valid')\n", (3872, 3924), True, 'import numpy as np\n'), ((1069, 1083), 'numpy.exp', 'np.exp', (['(-f * x)'], {}), '(-f * x)\n', (1075, 1083), True, 'import numpy as np\n'), ((1175, 1202), 'scipy.optimize.curve_fit', 'curve_fit', (['_g1', 'x', 'y'], {'p0': 'p0'}), '(_g1, x, y, p0=p0)\n', (1184, 1202), False, 'from scipy.optimize import curve_fit\n'), ((2266, 2280), 'numpy.isnan', 'np.isnan', (['popt'], {}), '(popt)\n', (2274, 2280), True, 'import numpy as np\n'), ((4634, 4647), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (4641, 4647), True, 'import numpy as np\n'), ((3425, 3438), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (3435, 3438), False, 'from operator import itemgetter\n'), ((1230, 1243), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (1237, 1243), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
import os
import argparse
import numpy as np
from matplotlib import rc
import matplotlib.pyplot as plt
from colorama import init, Fore
from PIL import Image
import yaml
init(autoreset=True)
rc('font', **{'serif': ['Cardo'], 'size': 25})
rc('text', usetex=True)
def _xyToRGB(u, v):
ang = int(np.rad2deg(np.arctan2(u, v)))
if ang == 0:
return [255, 0, 0]
elif ang == 120:
return [0, 255, 0]
elif ang == -120:
return [0, 0, 255]
if ang > 0 and ang < 120:
k = ang / 120.0
return [int((1-k) * 255), int(k*255), 0]
if ang < 0 and ang > -120:
k = (0-ang) / 120.0
return [int((1-k) * 255), 0, int(k*255)]
if ang > 120 and ang <= 180:
k = (ang - 120.0) / 120.0
return [0, int((1-k) * 255), int(k * 255)]
if ang < -120:
k = (- 120.0 - ang) / 120.0
return [0, int(k * 255), int((1-k) * 255)]
def _readTwcsSingle(Twcs_fn):
Twcs_flat = np.loadtxt(Twcs_fn)
assert Twcs_flat.shape[1] == 16 + 1
return [np.array(v.ravel().tolist()[1:]).reshape((4, 4)) for v in Twcs_flat]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--points3d", required=True)
parser.add_argument("--views", required=True)
parser.add_argument('--plot_cfg', type=str, default=None)
parser.add_argument("--Twcs", type=str, default=None)
parser.add_argument('--top_res_dir', required=True)
parser.add_argument('--out_dir', type=str, default=None)
parser.add_argument('--max_z', default=5, type=float)
parser.add_argument('--view_color_code', action='store_true', dest='view_color_code')
parser.set_defaults(view_color_code=False)
args = parser.parse_args()
print(args.__dict__)
assert args.plot_cfg is not None or args.Twcs is not None
out_dir = args.out_dir if args.out_dir else args.top_res_dir
raw_pts3d = np.loadtxt(args.points3d)
assert raw_pts3d.shape[1] == 3
n_raw_pts = raw_pts3d.shape[0]
raw_views = np.loadtxt(args.views)
assert raw_views.shape[0] == n_raw_pts
print(Fore.YELLOW + "Read {} landmarks.".format(n_raw_pts))
all_Twcs = []
all_colors = []
all_labels = []
if args.plot_cfg:
assert os.path.exists(args.plot_cfg)
print(Fore.YELLOW + "Read multiple Twcs from {}".format(args.plot_cfg))
with open(args.plot_cfg, 'r') as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
assert type(cfg) == list
for cfg_i in cfg:
assert len(cfg_i) == 1
for res_i, settings_i in cfg_i.items():
print("- read {} with settings {}".format(res_i, settings_i))
all_Twcs.append(_readTwcsSingle(
os.path.join(args.top_res_dir, res_i, 'stamped_Twc.txt')))
all_colors.append(settings_i['color'])
label_i = settings_i['label']
if label_i == 'None':
all_labels.append(None)
else:
all_labels.append(label_i)
else:
print(Fore.YELLOW + "Read single Twcs from {}".format(args.Twcs))
all_Twcs.append(_readTwcsSingle(args.Twcs))
all_colors.append('seagreen')
all_labels.append('rrt')
# filter and preprocess (swap x y, and invert the swapped y)
print("Preprocess landmarks...")
pts3d = []
views = []
for idx in range(n_raw_pts):
pt_i = raw_pts3d[idx]
view_i = raw_views[idx]
if pt_i[2] > args.max_z:
continue
pts3d.append([pt_i[1], -pt_i[0], pt_i[2]])
views.append([view_i[1], -view_i[0], view_i[2]])
pts3d = np.array(pts3d)
views = np.array(views)
print("Preprocess camera views...")
all_cam_views_2d = []
all_cam_pos_2d = []
all_start_pos = []
for Twcs in all_Twcs:
cam_views_2d = []
cam_pos_2d = []
for Twc in Twcs:
cam_view_i = np.dot(Twc[0:3, 0:3], [0.0, 0.0, 1.0])
cam_view_2d_i = np.array([cam_view_i[1], -cam_view_i[0]])
cam_view_2d_i = 0.2 * cam_view_2d_i / np.linalg.norm(cam_view_2d_i)
cam_views_2d.append(cam_view_2d_i.ravel().tolist())
cam_pos_i = Twc[0:3, 3]
cam_pos_2d.append([cam_pos_i[1], -cam_pos_i[0]])
all_start_pos.append(cam_pos_2d[0])
all_cam_views_2d.append(np.array(cam_views_2d))
all_cam_pos_2d.append(np.array(cam_pos_2d))
aver_start = np.mean(np.array(all_start_pos), axis=0)
# colormap
dim = 500
center = 250
rad = 250
img = np.zeros((500, 500, 4), 'uint8')
for ri in range(dim):
for ci in range(dim):
x = ci - center
y = -(ri - center)
if np.sqrt(x*x + y*y) >= rad:
img[ri, ci, :] = [0, 0, 0, 0]
continue
img[ri, ci, :] = _xyToRGB(x, y) + [255]
colormap = Image.fromarray(img)
colormap = colormap.convert('RGBA')
savecolormap = Image.new('RGBA', (dim, dim), (0, 0, 0, 0))
savecolormap.paste(colormap, (0, 0))
colormap_fn = os.path.join(out_dir, 'colormap.png')
savecolormap.save(colormap_fn)
colormap_plt = plt.imread(colormap_fn)
# plot
print("Plotting...")
n_pts = pts3d.shape[0]
pts_colors = []
for idx in range(n_pts):
if args.view_color_code:
c = _xyToRGB(views[idx][0], views[idx][1])
else:
c = [0, 0, 255]
c_str = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
pts_colors.append(c_str)
#
fig = plt.figure(figsize=(16, 9))
ax = fig.add_subplot(111)
ax.scatter(pts3d[:, 0], pts3d[:, 1], c=pts_colors)
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
# insert colormap for points
if args.view_color_code:
cm_ax = fig.add_axes([0.01, 0.81, 0.15, 0.15], anchor='SW')
cm_ax.imshow(colormap_plt)
cm_ax.axis('off')
for cam_views_2d, cam_pos_2d, color, label in zip(
all_cam_views_2d, all_cam_pos_2d, all_colors, all_labels):
ax.quiver(cam_pos_2d[:, 0], cam_pos_2d[:, 1], cam_views_2d[:, 0], cam_views_2d[:, 1],
width=0.003, headwidth=3, headlength=5, headaxislength=4, scale=6,
scale_units='width', color=color, label=label)
ax.plot(cam_pos_2d[:, 0], cam_pos_2d[:, 1],
color=color, linestyle='--')
# ax.text(aver_start[0], aver_start[1] - 1.5, 'start', color='darkgreen',
# backgroundcolor='lightgray')
ax.legend(ncol=len(cam_views_2d), loc='upper center')
plt.axis('equal')
plt.tight_layout()
fig.savefig(os.path.join(out_dir, '2d_top_view.png'),
bbox_inches='tight')
| [
"numpy.sqrt",
"PIL.Image.new",
"yaml.load",
"numpy.array",
"numpy.arctan2",
"matplotlib.rc",
"numpy.linalg.norm",
"colorama.init",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.dot",
"matplotlib.pyplot.axis",
"PIL.Image.fromarray",
"matplotlib.pyplot.imread",
"os.path.join",
"num... | [((195, 215), 'colorama.init', 'init', ([], {'autoreset': '(True)'}), '(autoreset=True)\n', (199, 215), False, 'from colorama import init, Fore\n'), ((217, 263), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'serif': ['Cardo'], 'size': 25})\n", (219, 263), False, 'from matplotlib import rc\n'), ((264, 287), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (266, 287), False, 'from matplotlib import rc\n'), ((981, 1000), 'numpy.loadtxt', 'np.loadtxt', (['Twcs_fn'], {}), '(Twcs_fn)\n', (991, 1000), True, 'import numpy as np\n'), ((1164, 1189), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1187, 1189), False, 'import argparse\n'), ((1929, 1954), 'numpy.loadtxt', 'np.loadtxt', (['args.points3d'], {}), '(args.points3d)\n', (1939, 1954), True, 'import numpy as np\n'), ((2041, 2063), 'numpy.loadtxt', 'np.loadtxt', (['args.views'], {}), '(args.views)\n', (2051, 2063), True, 'import numpy as np\n'), ((3689, 3704), 'numpy.array', 'np.array', (['pts3d'], {}), '(pts3d)\n', (3697, 3704), True, 'import numpy as np\n'), ((3717, 3732), 'numpy.array', 'np.array', (['views'], {}), '(views)\n', (3725, 3732), True, 'import numpy as np\n'), ((4605, 4637), 'numpy.zeros', 'np.zeros', (['(500, 500, 4)', '"""uint8"""'], {}), "((500, 500, 4), 'uint8')\n", (4613, 4637), True, 'import numpy as np\n'), ((4933, 4953), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (4948, 4953), False, 'from PIL import Image\n'), ((5013, 5056), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(dim, dim)', '(0, 0, 0, 0)'], {}), "('RGBA', (dim, dim), (0, 0, 0, 0))\n", (5022, 5056), False, 'from PIL import Image\n'), ((5116, 5153), 'os.path.join', 'os.path.join', (['out_dir', '"""colormap.png"""'], {}), "(out_dir, 'colormap.png')\n", (5128, 5153), False, 'import os\n'), ((5208, 5231), 'matplotlib.pyplot.imread', 'plt.imread', (['colormap_fn'], {}), '(colormap_fn)\n', (5218, 5231), True, 'import matplotlib.pyplot as plt\n'), ((5588, 5615), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (5598, 5615), True, 'import matplotlib.pyplot as plt\n'), ((6609, 6626), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (6617, 6626), True, 'import matplotlib.pyplot as plt\n'), ((6631, 6649), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6647, 6649), True, 'import matplotlib.pyplot as plt\n'), ((2267, 2296), 'os.path.exists', 'os.path.exists', (['args.plot_cfg'], {}), '(args.plot_cfg)\n', (2281, 2296), False, 'import os\n'), ((4500, 4523), 'numpy.array', 'np.array', (['all_start_pos'], {}), '(all_start_pos)\n', (4508, 4523), True, 'import numpy as np\n'), ((6666, 6706), 'os.path.join', 'os.path.join', (['out_dir', '"""2d_top_view.png"""'], {}), "(out_dir, '2d_top_view.png')\n", (6678, 6706), False, 'import os\n'), ((335, 351), 'numpy.arctan2', 'np.arctan2', (['u', 'v'], {}), '(u, v)\n', (345, 351), True, 'import numpy as np\n'), ((2439, 2475), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (2448, 2475), False, 'import yaml\n'), ((3973, 4011), 'numpy.dot', 'np.dot', (['Twc[0:3, 0:3]', '[0.0, 0.0, 1.0]'], {}), '(Twc[0:3, 0:3], [0.0, 0.0, 1.0])\n', (3979, 4011), True, 'import numpy as np\n'), ((4040, 4081), 'numpy.array', 'np.array', (['[cam_view_i[1], -cam_view_i[0]]'], {}), '([cam_view_i[1], -cam_view_i[0]])\n', (4048, 4081), True, 'import numpy as np\n'), ((4399, 4421), 'numpy.array', 'np.array', (['cam_views_2d'], {}), '(cam_views_2d)\n', (4407, 4421), True, 'import numpy as np\n'), ((4453, 4473), 'numpy.array', 'np.array', (['cam_pos_2d'], {}), '(cam_pos_2d)\n', (4461, 4473), True, 'import numpy as np\n'), ((4132, 4161), 'numpy.linalg.norm', 'np.linalg.norm', (['cam_view_2d_i'], {}), '(cam_view_2d_i)\n', (4146, 4161), True, 'import numpy as np\n'), ((4768, 4790), 'numpy.sqrt', 'np.sqrt', (['(x * x + y * y)'], {}), '(x * x + y * y)\n', (4775, 4790), True, 'import numpy as np\n'), ((2769, 2825), 'os.path.join', 'os.path.join', (['args.top_res_dir', 'res_i', '"""stamped_Twc.txt"""'], {}), "(args.top_res_dir, res_i, 'stamped_Twc.txt')\n", (2781, 2825), False, 'import os\n')] |
import numpy as np
import pytest
from chainer_chemistry.dataset.preprocessors import wle as WLE # NOQA
from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset
@pytest.fixture
def small_datasets():
N_1 = 3
N_2 = 5
# one-hot atom labels: 1 tp N
atom_array_1 = np.arange(N_1)
atom_array_2 = np.arange(N_2)
# adj-array, manually
# all connectes. expanded labels is a permutaion of 0,1,2
adj_array_1 = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]).astype(np.int32)
# node 0 --> 0-1.2
# node 1 --> 1-0.2
# node 2 --> 2-0.1
adj_array_2 = np.array([[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[1, 1, 0, 0, 1]]).astype(np.float32)
# node 0 --> 0-1.4
# node 1 --> 1-0.4
# node 2 --> 2-3
# node 3 --> 3-2
# node 4 --> 4-0.1
# supervised labels, dummy
teach_signal_1 = np.array(1).astype(np.int)
teach_signal_2 = np.array(0).astype(np.int)
# concat in a one numpy array!
atom_arrays = np.array([atom_array_1, atom_array_2])
adj_arrays = np.array([adj_array_1, adj_array_2])
teach_signals = np.array([teach_signal_1, teach_signal_2])
# train/val/test dataset, respectively
datasets = [NumpyTupleDataset(atom_arrays, adj_arrays, teach_signals),
NumpyTupleDataset(atom_arrays, adj_arrays, teach_signals),
NumpyTupleDataset(atom_arrays, adj_arrays, teach_signals)]
return datasets
def _get_elements(datasets, idx):
return [[mol[1] for mol in d] for d in datasets]
def _get_atom_arrays(datasets):
return _get_elements(datasets, 0)
def _get_adj_arrays(datasets):
return _get_elements(datasets, 1)
def _get_wle_arrays(datasets):
return _get_elements(datasets, 2)
def _get_teach_signals(datasets, is_cwle=False):
if is_cwle:
return _get_elements(datasets, 2)
else:
return _get_elements(datasets, 3)
def _check_np_array(actuals, expects):
assert len(actuals) == len(expects) == 3 # train/test/val
for actual_adjs, expect_adjs in zip(actuals, expects):
assert len(actual_adjs) == len(expect_adjs)
[np.testing.assert_array_equal(a, e)
for a, e in zip(actual_adjs, expect_adjs)]
def test_wle(small_datasets):
ret_value = WLE.apply_wle_for_datasets(small_datasets, 0)
actual_datasets, actual_labels, actual_frequency = ret_value
expected_frequency = {'0-1.2': 3,
'1-0.2': 3,
'2-0.1': 3,
'0-1.4': 3,
'1-0.4': 3,
'2-3': 3,
'3-2': 3,
'4-0.1': 3}
assert expected_frequency == actual_frequency
expected_labels = set(expected_frequency.keys())
assert expected_labels == set(actual_labels)
actual_adj_arrays = _get_adj_arrays(actual_datasets)
expect_adj_arrays = _get_adj_arrays(small_datasets)
_check_np_array(actual_adj_arrays, expect_adj_arrays)
actual_signal_arrays = _get_teach_signals(actual_datasets)
expect_signal_arrays = _get_teach_signals(small_datasets)
_check_np_array(actual_signal_arrays, expect_signal_arrays)
# Check atom_arrays of train/val/test datasets are identical.
# 2 is the number of samples in each (train/val/test) dataset.
atom_arrays = _get_atom_arrays(actual_datasets)
first_mols = [d[0] for d in atom_arrays]
second_mols = [d[1] for d in atom_arrays]
for mols in (first_mols, second_mols):
assert len(mols) == 3
np.testing.assert_array_equal(mols[0], mols[1])
np.testing.assert_array_equal(mols[1], mols[2])
def test_2_hop_wle(small_datasets):
k = 2
ret_value = WLE.apply_wle_for_datasets(small_datasets, 0, k)
actual_datasets, actual_labels, actual_frequency = ret_value
expected_frequency = {'0-1.2': 3,
'1-0.2': 3,
'2-0.1': 3,
'3-4.7': 3,
'4-3.7': 3,
'5-6': 3,
'6-5': 3,
'7-3.4': 3}
# <NAME> (<EMAIL>)
# The following assertion checks too strong condition.
# Specifically it assumes that the WLE algorithm assigns
# the extended atom labels appeared in the first iteration
# in a certain order and runs the second iteration.
# Strictly speaking, this is not required in the algorithm.
assert expected_frequency == actual_frequency
expected_labels = set(expected_frequency.keys())
assert expected_labels == set(actual_labels)
actual_adj_arrays = _get_adj_arrays(actual_datasets)
expect_adj_arrays = _get_adj_arrays(small_datasets)
_check_np_array(actual_adj_arrays, expect_adj_arrays)
actual_signal_arrays = _get_teach_signals(actual_datasets)
expect_signal_arrays = _get_teach_signals(small_datasets)
_check_np_array(actual_signal_arrays, expect_signal_arrays)
# Check atom_arrays of train/val/test datasets are identical.
# 2 is the number of samples in each (train/val/test) dataset.
atom_arrays = _get_atom_arrays(actual_datasets)
first_mols = [d[0] for d in atom_arrays]
second_mols = [d[1] for d in atom_arrays]
for mols in (first_mols, second_mols):
assert len(mols) == 3
np.testing.assert_array_equal(mols[0], mols[1])
np.testing.assert_array_equal(mols[1], mols[2])
def test_cwle(small_datasets):
ret_value = WLE.apply_cwle_for_datasets(small_datasets)
actual_datasets, actual_labels, actual_frequency = ret_value
expected_frequency = {'1.2': 3,
'0.2': 3,
'0.1': 6,
'1.4': 3,
'0.4': 3,
'3': 3,
'2': 3}
assert expected_frequency == actual_frequency
expected_labels = set(expected_frequency.keys())
assert expected_labels == set(actual_labels)
actual_adj_arrays = _get_adj_arrays(actual_datasets)
expect_adj_arrays = _get_adj_arrays(small_datasets)
_check_np_array(actual_adj_arrays, expect_adj_arrays)
actual_signal_arrays = _get_teach_signals(actual_datasets, True)
expect_signal_arrays = _get_teach_signals(small_datasets)
_check_np_array(actual_signal_arrays, expect_signal_arrays)
# Check atom_arrays of train/val/test datasets are identical.
atom_arrays = _get_atom_arrays(actual_datasets)
first_mols = [d[0] for d in atom_arrays]
second_mols = [d[1] for d in atom_arrays]
for mols in (first_mols, second_mols):
assert len(mols) == 3
np.testing.assert_array_equal(mols[0], mols[1])
np.testing.assert_array_equal(mols[1], mols[2])
# Check wle_arrays of train/val/test datasets are identical.
wle_arrays = _get_wle_arrays(actual_datasets)
first_mols = [d[0] for d in wle_arrays]
second_mols = [d[1] for d in wle_arrays]
for mols in [first_mols, second_mols]:
assert len(mols) == 3
np.testing.assert_array_equal(mols[0], mols[1])
np.testing.assert_array_equal(mols[1], mols[2])
def test_findmaxidx_atom_label(small_datasets):
actual = WLE.findmaxidx(small_datasets, 'atom_label')
expect = 5
assert actual == expect
@pytest.fixture
def cwle_datasets():
B = 10
D_atom = 5
D_wle = 50
K_large = 10000
atom_arrays = [np.full((B, D_atom), K_large) for _ in range(3)]
adj_arrays = [np.eye(B, dtype=np.int32) for _ in range(3)]
wle_arrays = [np.arange(B * D_wle, dtype=np.int32).reshape(B, -1)
for _ in range(3)]
signal_arrays = [np.full(B, K_large) for _ in range(3)]
print(wle_arrays[0].shape)
datasets = [NumpyTupleDataset(atom_arrays[i],
adj_arrays[i],
wle_arrays[i],
signal_arrays[i])
for i in range(3)]
return datasets
def test_findmaxidx_wle(cwle_datasets):
actual = WLE.findmaxidx(cwle_datasets, 'wle_label')
expect = 10 * 50
assert actual == expect
| [
"numpy.eye",
"numpy.full",
"numpy.testing.assert_array_equal",
"numpy.array",
"chainer_chemistry.dataset.preprocessors.wle.apply_wle_for_datasets",
"chainer_chemistry.dataset.preprocessors.wle.findmaxidx",
"chainer_chemistry.dataset.preprocessors.wle.apply_cwle_for_datasets",
"chainer_chemistry.datase... | [((313, 327), 'numpy.arange', 'np.arange', (['N_1'], {}), '(N_1)\n', (322, 327), True, 'import numpy as np\n'), ((348, 362), 'numpy.arange', 'np.arange', (['N_2'], {}), '(N_2)\n', (357, 362), True, 'import numpy as np\n'), ((1222, 1260), 'numpy.array', 'np.array', (['[atom_array_1, atom_array_2]'], {}), '([atom_array_1, atom_array_2])\n', (1230, 1260), True, 'import numpy as np\n'), ((1279, 1315), 'numpy.array', 'np.array', (['[adj_array_1, adj_array_2]'], {}), '([adj_array_1, adj_array_2])\n', (1287, 1315), True, 'import numpy as np\n'), ((1337, 1379), 'numpy.array', 'np.array', (['[teach_signal_1, teach_signal_2]'], {}), '([teach_signal_1, teach_signal_2])\n', (1345, 1379), True, 'import numpy as np\n'), ((2537, 2582), 'chainer_chemistry.dataset.preprocessors.wle.apply_wle_for_datasets', 'WLE.apply_wle_for_datasets', (['small_datasets', '(0)'], {}), '(small_datasets, 0)\n', (2563, 2582), True, 'from chainer_chemistry.dataset.preprocessors import wle as WLE\n'), ((4027, 4075), 'chainer_chemistry.dataset.preprocessors.wle.apply_wle_for_datasets', 'WLE.apply_wle_for_datasets', (['small_datasets', '(0)', 'k'], {}), '(small_datasets, 0, k)\n', (4053, 4075), True, 'from chainer_chemistry.dataset.preprocessors import wle as WLE\n'), ((5836, 5879), 'chainer_chemistry.dataset.preprocessors.wle.apply_cwle_for_datasets', 'WLE.apply_cwle_for_datasets', (['small_datasets'], {}), '(small_datasets)\n', (5863, 5879), True, 'from chainer_chemistry.dataset.preprocessors import wle as WLE\n'), ((7606, 7650), 'chainer_chemistry.dataset.preprocessors.wle.findmaxidx', 'WLE.findmaxidx', (['small_datasets', '"""atom_label"""'], {}), "(small_datasets, 'atom_label')\n", (7620, 7650), True, 'from chainer_chemistry.dataset.preprocessors import wle as WLE\n'), ((8465, 8507), 'chainer_chemistry.dataset.preprocessors.wle.findmaxidx', 'WLE.findmaxidx', (['cwle_datasets', '"""wle_label"""'], {}), "(cwle_datasets, 'wle_label')\n", (8479, 8507), True, 'from chainer_chemistry.dataset.preprocessors import wle as WLE\n'), ((1443, 1500), 'chainer_chemistry.datasets.numpy_tuple_dataset.NumpyTupleDataset', 'NumpyTupleDataset', (['atom_arrays', 'adj_arrays', 'teach_signals'], {}), '(atom_arrays, adj_arrays, teach_signals)\n', (1460, 1500), False, 'from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset\n'), ((1519, 1576), 'chainer_chemistry.datasets.numpy_tuple_dataset.NumpyTupleDataset', 'NumpyTupleDataset', (['atom_arrays', 'adj_arrays', 'teach_signals'], {}), '(atom_arrays, adj_arrays, teach_signals)\n', (1536, 1576), False, 'from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset\n'), ((1595, 1652), 'chainer_chemistry.datasets.numpy_tuple_dataset.NumpyTupleDataset', 'NumpyTupleDataset', (['atom_arrays', 'adj_arrays', 'teach_signals'], {}), '(atom_arrays, adj_arrays, teach_signals)\n', (1612, 1652), False, 'from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset\n'), ((3853, 3900), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['mols[0]', 'mols[1]'], {}), '(mols[0], mols[1])\n', (3882, 3900), True, 'import numpy as np\n'), ((3910, 3957), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['mols[1]', 'mols[2]'], {}), '(mols[1], mols[2])\n', (3939, 3957), True, 'import numpy as np\n'), ((5678, 5725), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['mols[0]', 'mols[1]'], {}), '(mols[0], mols[1])\n', (5707, 5725), True, 'import numpy as np\n'), ((5735, 5782), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['mols[1]', 'mols[2]'], {}), '(mols[1], mols[2])\n', (5764, 5782), True, 'import numpy as np\n'), ((7035, 7082), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['mols[0]', 'mols[1]'], {}), '(mols[0], mols[1])\n', (7064, 7082), True, 'import numpy as np\n'), ((7092, 7139), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['mols[1]', 'mols[2]'], {}), '(mols[1], mols[2])\n', (7121, 7139), True, 'import numpy as np\n'), ((7434, 7481), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['mols[0]', 'mols[1]'], {}), '(mols[0], mols[1])\n', (7463, 7481), True, 'import numpy as np\n'), ((7491, 7538), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['mols[1]', 'mols[2]'], {}), '(mols[1], mols[2])\n', (7520, 7538), True, 'import numpy as np\n'), ((7826, 7855), 'numpy.full', 'np.full', (['(B, D_atom)', 'K_large'], {}), '((B, D_atom), K_large)\n', (7833, 7855), True, 'import numpy as np\n'), ((7894, 7919), 'numpy.eye', 'np.eye', (['B'], {'dtype': 'np.int32'}), '(B, dtype=np.int32)\n', (7900, 7919), True, 'import numpy as np\n'), ((8070, 8089), 'numpy.full', 'np.full', (['B', 'K_large'], {}), '(B, K_large)\n', (8077, 8089), True, 'import numpy as np\n'), ((8162, 8247), 'chainer_chemistry.datasets.numpy_tuple_dataset.NumpyTupleDataset', 'NumpyTupleDataset', (['atom_arrays[i]', 'adj_arrays[i]', 'wle_arrays[i]', 'signal_arrays[i]'], {}), '(atom_arrays[i], adj_arrays[i], wle_arrays[i],\n signal_arrays[i])\n', (8179, 8247), False, 'from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset\n'), ((474, 517), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 1, 1], [1, 1, 1]]'], {}), '([[1, 1, 1], [1, 1, 1], [1, 1, 1]])\n', (482, 517), True, 'import numpy as np\n'), ((686, 786), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 1], [1, 1, 0, 0, 1], [0, 0, 1, 1, 0], [0, 0, 1, 1, 0], [1, 1,\n 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 1], [1, 1, 0, 0, 1], [0, 0, 1, 1, 0], [0, 0, 1, 1, 0\n ], [1, 1, 0, 0, 1]])\n', (694, 786), True, 'import numpy as np\n'), ((1089, 1100), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (1097, 1100), True, 'import numpy as np\n'), ((1138, 1149), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (1146, 1149), True, 'import numpy as np\n'), ((2393, 2428), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['a', 'e'], {}), '(a, e)\n', (2422, 2428), True, 'import numpy as np\n'), ((7958, 7994), 'numpy.arange', 'np.arange', (['(B * D_wle)'], {'dtype': 'np.int32'}), '(B * D_wle, dtype=np.int32)\n', (7967, 7994), True, 'import numpy as np\n')] |
import time
import numpy as np
from numpy import newaxis as nax
from numpy.linalg import inv, cholesky
from taps.models import Model
from taps.ml.kernels import Kernel
from taps.ml.means import Mean
from taps.ml.regressions import Regression
class Gaussian(Model):
"""
Gaussian Potential Energy Surface model
Using the given data, estimate the potential. Additionally, it can estimate
the covariance
Parameters
----------
'real_model': Model class
Actuall model that Gaussian PES supposed to be approximate
'kernel': Kernel class
kernel function for the Gaussian process
'mean': Mean class
User define Mean function used in GP
"""
implemented_properties = {'covariance', 'potential', 'gradients',
'hessian'}
def __init__(self, real_model=None, kernel=None, mean=None,
regression=None, **kwargs):
"""
data array
"""
self.real_model = real_model or self
self.kernel = kernel or Kernel()
self.mean = mean or Mean()
self.regression = regression or Regression()
super().__init__(**kwargs)
def calculate(self, paths, coords, properties=None, **kwargs):
"""
P : int | length of image to consider / or number of image to calculate
D : int | Dimension
N : int | Number of Atom in one image
Xm :
Data coords
Xn :
points want to calc
Y :
return : Dim x N x P - 2 array
"""
data = paths.get_image_data(prj=self.prj)
orig_shape = coords.shape[:-1]
D, M, N = np.prod(coords.D), len(data['potential']), coords.N
Xm = data['kernel']['X']
Xn = coords.coords.reshape(D, N)
Y = data['kernel']['Y']
k, m = self.kernel, self.mean
# Re calculate the hyperparameters if data has changed
if data['data_ids'] != self._cache.get('data_ids'):
self.regression(mean=m, kernel=k, data=data)
self._cache['K_y_inv'] = inv(k(Xm, Xm, noise=True))
self._cache['data_ids'] = data['data_ids'].copy()
K_y_inv = self._cache['K_y_inv']
if 'potential_and_gradient' in properties:
N = len(Xn[..., :])
K_s = k(Xm, Xn) # (D+1)N x (D+1)M x P
mu = m(Xn) + K_s.T @ K_y_inv @ (Y - m(Xn))
E = mu[: N]
F = -mu[N:].reshape(*orig_shape, N)
self.results['potential_and_forces'] = E, F
if 'potential' in properties:
K_s = k(Xm, Xn, potential_only=True) # (D+1)N x M
potential = m.V(Xn) + K_s.T @ K_y_inv @ (Y - m(Xm))
self.results['potential'] = potential
if 'gradients' in properties:
dK_s = k(Xm, Xn, gradient_only=True) # (D+1)N x (D+1)M x P
mu_f = m.dV(Xn) + dK_s.T @ K_y_inv @ (Y - m(Xm))
self.results['gradients'] = mu_f.reshape(*orig_shape, N)
if 'hessian' in properties:
K_s = k(Xm, Xn, hessian_only=True) # (D+1)N x DDM
H = m.H(Xn) + K_s.T @ K_y_inv @ (Y - m(Xm)) # DDM
self.results['hessian'] = H.reshape(D, D, N)
if 'covariance' in properties:
K = k(Xn, Xn, orig=True)
K_s = k(Xm, Xn)
K_s_T = k(Xn, Xm)
self.results['covariance'] = K - (K_s_T @ K_y_inv @ K_s)[:N, :N]
def get_covariance(self, paths, **kwargs):
cov_coords = self.get_properties(paths, properties='covariance',
**kwargs)
_ = np.diag(cov_coords)
cov_coords = _.copy()
cov_coords[_ < 0] = 0
return 1.96 * np.sqrt(cov_coords) / 2
def get_hyperparameters(self, hyperparameters_list=None):
return self.kernel.get_hyperparameters(hyperparameters_list)
def get_state_info(self):
info = []
for k, v in self.kernel.hyperparameters.items():
info.append(f"{k: <11}" + ": " + str(v))
return '\n'.join(info)
| [
"numpy.prod",
"numpy.sqrt",
"numpy.diag",
"taps.ml.regressions.Regression",
"taps.ml.means.Mean",
"taps.ml.kernels.Kernel"
] | [((3607, 3626), 'numpy.diag', 'np.diag', (['cov_coords'], {}), '(cov_coords)\n', (3614, 3626), True, 'import numpy as np\n'), ((1044, 1052), 'taps.ml.kernels.Kernel', 'Kernel', ([], {}), '()\n', (1050, 1052), False, 'from taps.ml.kernels import Kernel\n'), ((1081, 1087), 'taps.ml.means.Mean', 'Mean', ([], {}), '()\n', (1085, 1087), False, 'from taps.ml.means import Mean\n'), ((1128, 1140), 'taps.ml.regressions.Regression', 'Regression', ([], {}), '()\n', (1138, 1140), False, 'from taps.ml.regressions import Regression\n'), ((1662, 1679), 'numpy.prod', 'np.prod', (['coords.D'], {}), '(coords.D)\n', (1669, 1679), True, 'import numpy as np\n'), ((3709, 3728), 'numpy.sqrt', 'np.sqrt', (['cov_coords'], {}), '(cov_coords)\n', (3716, 3728), True, 'import numpy as np\n')] |
from pybulletgym.envs.roboschool.robots.robot_bases import MJCFBasedRobot
import numpy as np
class Reacher(MJCFBasedRobot):
TARG_LIMIT = 0.27
def __init__(self):
MJCFBasedRobot.__init__(self, 'reacher.xml', 'body0', action_dim=2, obs_dim=9)
def robot_specific_reset(self, bullet_client):
self.jdict["target_x"].reset_current_position(
self.np_random.uniform(low=-self.TARG_LIMIT, high=self.TARG_LIMIT), 0)
self.jdict["target_y"].reset_current_position(
self.np_random.uniform(low=-self.TARG_LIMIT, high=self.TARG_LIMIT), 0)
self.fingertip = self.parts["fingertip"]
self.target = self.parts["target"]
self.central_joint = self.jdict["joint0"]
self.elbow_joint = self.jdict["joint1"]
self.central_joint.reset_current_position(self.np_random.uniform(low=-3.14, high=3.14), 0)
self.elbow_joint.reset_current_position(self.np_random.uniform(low=-3.14, high=3.14), 0)
def apply_action(self, a):
assert (np.isfinite(a).all())
self.central_joint.set_motor_torque(0.05 * float(np.clip(a[0], -1, +1)))
self.elbow_joint.set_motor_torque(0.05 * float(np.clip(a[1], -1, +1)))
def calc_state(self):
theta, self.theta_dot = self.central_joint.current_relative_position()
self.gamma, self.gamma_dot = self.elbow_joint.current_relative_position()
target_x, _ = self.jdict["target_x"].current_position()
target_y, _ = self.jdict["target_y"].current_position()
self.to_target_vec = np.array(self.fingertip.pose().xyz()) - np.array(self.target.pose().xyz())
return np.array([
target_x,
target_y,
self.to_target_vec[0],
self.to_target_vec[1],
np.cos(theta),
np.sin(theta),
self.theta_dot,
self.gamma,
self.gamma_dot,
])
def calc_potential(self):
return -100 * np.linalg.norm(self.to_target_vec)
| [
"numpy.clip",
"pybulletgym.envs.roboschool.robots.robot_bases.MJCFBasedRobot.__init__",
"numpy.isfinite",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin"
] | [((181, 259), 'pybulletgym.envs.roboschool.robots.robot_bases.MJCFBasedRobot.__init__', 'MJCFBasedRobot.__init__', (['self', '"""reacher.xml"""', '"""body0"""'], {'action_dim': '(2)', 'obs_dim': '(9)'}), "(self, 'reacher.xml', 'body0', action_dim=2, obs_dim=9)\n", (204, 259), False, 'from pybulletgym.envs.roboschool.robots.robot_bases import MJCFBasedRobot\n'), ((1962, 1996), 'numpy.linalg.norm', 'np.linalg.norm', (['self.to_target_vec'], {}), '(self.to_target_vec)\n', (1976, 1996), True, 'import numpy as np\n'), ((1022, 1036), 'numpy.isfinite', 'np.isfinite', (['a'], {}), '(a)\n', (1033, 1036), True, 'import numpy as np\n'), ((1776, 1789), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1782, 1789), True, 'import numpy as np\n'), ((1803, 1816), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1809, 1816), True, 'import numpy as np\n'), ((1101, 1122), 'numpy.clip', 'np.clip', (['a[0]', '(-1)', '(+1)'], {}), '(a[0], -1, +1)\n', (1108, 1122), True, 'import numpy as np\n'), ((1180, 1201), 'numpy.clip', 'np.clip', (['a[1]', '(-1)', '(+1)'], {}), '(a[1], -1, +1)\n', (1187, 1201), True, 'import numpy as np\n')] |
from QuantumGameTheory import Game, QuantumGameCircuit
from qiskit import Aer, execute
from qiskit.quantum_info import Operator
from qiskit.extensions import XGate, YGate, SGate, ZGate, HGate, TGate, RZGate, RYGate
import numpy as np
from protocols import Protocol
class Backend():
def __init__(self, type):
self.lookup_table = self._gen_lookup_table()
self.game = Game(type)
self.backend = Aer.get_backend("qasm_simulator")
self.img_path = "assets/circuit.png"
def _gen_lookup_table(self):
op1 = RZGate(-3 * np.pi / 8)
op2 = RYGate(np.pi / 2)
op3 = RZGate(np.pi / 2)
result = {"X": XGate(), "Y": YGate(), "S": SGate(), "Z": ZGate(), "H": HGate(), "T": TGate(), "W": self._gen_w_gate(),
"Rz1": RZGate(-3 * np.pi / 8), "Rz2": RZGate(np.pi/2), "Ry1": RYGate(np.pi/2)}
return result
def _gen_w_gate(self):
I = np.matrix('1 0; 0 1')
X = np.matrix('0 1; 1 0')
Y = np.matrix('0 -1j; 1j 0')
Z = np.matrix('1 0; 0 -1')
a = (1 / np.sqrt(2)) * np.cos(np.pi / 16) * (I + 1j * X) - \
(1j / np.sqrt(2)) * np.sin(np.pi / 16) * (Y + Z)
return Operator(a)
def _simulation(self, qgc):
job_sim = execute(qgc.circ, self.backend, shots=1)
res_sim = job_sim.result()
counts = res_sim.get_counts(qgc.circ)
return counts, [int(s) for s in list(counts.keys())[0]]
def play(self, player_gates, protocol: str):
protocol = Protocol[protocol]
player_gate_objects = []
for i in range(len(player_gates)):
player_gate_objects.append([])
for j in player_gates[i]:
player_gate_objects[i].append(self.lookup_table[j])
qgc = QuantumGameCircuit(player_gate_objects, protocol)
qgc.draw_circuit(self.img_path)
counts, choices = self._simulation(qgc)
game_result = self.game.get_result(choices)
return counts, game_result[::-1]
| [
"qiskit.extensions.TGate",
"qiskit.extensions.RYGate",
"qiskit.extensions.YGate",
"qiskit.execute",
"qiskit.extensions.ZGate",
"numpy.sqrt",
"QuantumGameTheory.QuantumGameCircuit",
"qiskit.extensions.XGate",
"qiskit.quantum_info.Operator",
"qiskit.extensions.SGate",
"QuantumGameTheory.Game",
"... | [((387, 397), 'QuantumGameTheory.Game', 'Game', (['type'], {}), '(type)\n', (391, 397), False, 'from QuantumGameTheory import Game, QuantumGameCircuit\n'), ((421, 454), 'qiskit.Aer.get_backend', 'Aer.get_backend', (['"""qasm_simulator"""'], {}), "('qasm_simulator')\n", (436, 454), False, 'from qiskit import Aer, execute\n'), ((549, 571), 'qiskit.extensions.RZGate', 'RZGate', (['(-3 * np.pi / 8)'], {}), '(-3 * np.pi / 8)\n', (555, 571), False, 'from qiskit.extensions import XGate, YGate, SGate, ZGate, HGate, TGate, RZGate, RYGate\n'), ((586, 603), 'qiskit.extensions.RYGate', 'RYGate', (['(np.pi / 2)'], {}), '(np.pi / 2)\n', (592, 603), False, 'from qiskit.extensions import XGate, YGate, SGate, ZGate, HGate, TGate, RZGate, RYGate\n'), ((618, 635), 'qiskit.extensions.RZGate', 'RZGate', (['(np.pi / 2)'], {}), '(np.pi / 2)\n', (624, 635), False, 'from qiskit.extensions import XGate, YGate, SGate, ZGate, HGate, TGate, RZGate, RYGate\n'), ((922, 943), 'numpy.matrix', 'np.matrix', (['"""1 0; 0 1"""'], {}), "('1 0; 0 1')\n", (931, 943), True, 'import numpy as np\n'), ((956, 977), 'numpy.matrix', 'np.matrix', (['"""0 1; 1 0"""'], {}), "('0 1; 1 0')\n", (965, 977), True, 'import numpy as np\n'), ((990, 1014), 'numpy.matrix', 'np.matrix', (['"""0 -1j; 1j 0"""'], {}), "('0 -1j; 1j 0')\n", (999, 1014), True, 'import numpy as np\n'), ((1027, 1049), 'numpy.matrix', 'np.matrix', (['"""1 0; 0 -1"""'], {}), "('1 0; 0 -1')\n", (1036, 1049), True, 'import numpy as np\n'), ((1196, 1207), 'qiskit.quantum_info.Operator', 'Operator', (['a'], {}), '(a)\n', (1204, 1207), False, 'from qiskit.quantum_info import Operator\n'), ((1259, 1299), 'qiskit.execute', 'execute', (['qgc.circ', 'self.backend'], {'shots': '(1)'}), '(qgc.circ, self.backend, shots=1)\n', (1266, 1299), False, 'from qiskit import Aer, execute\n'), ((1773, 1822), 'QuantumGameTheory.QuantumGameCircuit', 'QuantumGameCircuit', (['player_gate_objects', 'protocol'], {}), '(player_gate_objects, protocol)\n', (1791, 1822), False, 'from QuantumGameTheory import Game, QuantumGameCircuit\n'), ((659, 666), 'qiskit.extensions.XGate', 'XGate', ([], {}), '()\n', (664, 666), False, 'from qiskit.extensions import XGate, YGate, SGate, ZGate, HGate, TGate, RZGate, RYGate\n'), ((673, 680), 'qiskit.extensions.YGate', 'YGate', ([], {}), '()\n', (678, 680), False, 'from qiskit.extensions import XGate, YGate, SGate, ZGate, HGate, TGate, RZGate, RYGate\n'), ((687, 694), 'qiskit.extensions.SGate', 'SGate', ([], {}), '()\n', (692, 694), False, 'from qiskit.extensions import XGate, YGate, SGate, ZGate, HGate, TGate, RZGate, RYGate\n'), ((701, 708), 'qiskit.extensions.ZGate', 'ZGate', ([], {}), '()\n', (706, 708), False, 'from qiskit.extensions import XGate, YGate, SGate, ZGate, HGate, TGate, RZGate, RYGate\n'), ((715, 722), 'qiskit.extensions.HGate', 'HGate', ([], {}), '()\n', (720, 722), False, 'from qiskit.extensions import XGate, YGate, SGate, ZGate, HGate, TGate, RZGate, RYGate\n'), ((729, 736), 'qiskit.extensions.TGate', 'TGate', ([], {}), '()\n', (734, 736), False, 'from qiskit.extensions import XGate, YGate, SGate, ZGate, HGate, TGate, RZGate, RYGate\n'), ((788, 810), 'qiskit.extensions.RZGate', 'RZGate', (['(-3 * np.pi / 8)'], {}), '(-3 * np.pi / 8)\n', (794, 810), False, 'from qiskit.extensions import XGate, YGate, SGate, ZGate, HGate, TGate, RZGate, RYGate\n'), ((819, 836), 'qiskit.extensions.RZGate', 'RZGate', (['(np.pi / 2)'], {}), '(np.pi / 2)\n', (825, 836), False, 'from qiskit.extensions import XGate, YGate, SGate, ZGate, HGate, TGate, RZGate, RYGate\n'), ((843, 860), 'qiskit.extensions.RYGate', 'RYGate', (['(np.pi / 2)'], {}), '(np.pi / 2)\n', (849, 860), False, 'from qiskit.extensions import XGate, YGate, SGate, ZGate, HGate, TGate, RZGate, RYGate\n'), ((1082, 1100), 'numpy.cos', 'np.cos', (['(np.pi / 16)'], {}), '(np.pi / 16)\n', (1088, 1100), True, 'import numpy as np\n'), ((1152, 1170), 'numpy.sin', 'np.sin', (['(np.pi / 16)'], {}), '(np.pi / 16)\n', (1158, 1170), True, 'import numpy as np\n'), ((1068, 1078), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1075, 1078), True, 'import numpy as np\n'), ((1138, 1148), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1145, 1148), True, 'import numpy as np\n')] |
from __future__ import print_function
import cv2
import os
import numpy as np
class ImagePreprocessor(object):
"""Image preprocessor. Have methods to read image from path and flow directory.
"""
def __init__(self):
pass
def read_image(self,path,shape=None):
"""Reads image with given path. If shape is given the output image will be
numpy ndarray of that shape. Otherwise the shape of output image will not be changed.
Arguments:
path {string} -- Absolute or relative path to image
Keyword Arguments:
shape {tuple} -- Shape of output image (default: {None})
Returns:
numpy.ndarray -- Image with given path.
"""
assert os.path.exists(path), "Path '"+str(path)+"' does not exist"
assert shape is None or len(shape)==2 or len(shape)==3,"Shape value should be none or list of len 2 or len 3"
image = cv2.imread(path)
if not(shape is None) and (len(shape)==2 or shape[2]==1):
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
if not (shape is None):
image = cv2.resize(image,(shape[0],shape[1]))
if not(shape is None):
image = image.reshape(shape)
return image
def __get_all_image_files(self,path,ext=[".jpg",".png",".bmp"]):
"""Gets all image files which have extension given by `ext` argument.
Arguments:
path {str} -- Path to folder containing images
Keyword Arguments:
ext {list} -- Extesion of images (default: {[".jpg",".png",".bmp"]})
Returns:
list -- list containg file names of all images inside directory given by `path` argument
"""
assert os.path.exists(path), "Path '"+str(path)+"' does not exist"
assert type(str) or len(ext) == 0,"ext should contain at list one element"
if type(ext)==str:
ext = [ext]
output = []
for file_name in os.listdir(path):
_,e = os.path.splitext(file_name)
if e in ext:
output+=[file_name]
return output
def read_all_images(self,directory,image_shape,sorted=False,ext =[".jpg",".png",".bmp"],output_length=None):
"""Reads all images given inside directory.
Arguments:
directory {str} -- Path to directory
image_shape {tuple} -- output images shape
sorted {bool} -- If the dataset is sorted for example for sequences.
output_length {int} -- length of output array. Can be used to output fixed length array.
If it is None then the length of output array is equal to number of images inside that directory.
Keyword Arguments:
ext {list} -- Extension of images to read (default: {[".jpg",".png",".bmp"]})
Returns:
numpy.ndarray -- Numpy array for images inside directory
"""
assert os.path.exists(directory), "Path '"+str(directory)+"' does not exist"
assert type(str) or len(ext) == 0,"ext should contain at list one element"
assert len(image_shape)==2 or len(image_shape)==3,"Image Shape should be list of len 2 or len 3"
image_files = self.__get_all_image_files(directory,ext)
if not(output_length is None):
length = output_length
else:
length = len(image_files)
if sorted:
image_files.sort()
if len(image_shape) == 3:
output = np.zeros((length,image_shape[0],image_shape[1],image_shape[2]))
else:
output = np.zeros((length,image_shape[0],image_shape[1]))
for i in range(length):
image = self.read_image(os.path.join(directory,image_files[i]),image_shape)
output[i] = image
return output
| [
"os.path.exists",
"os.listdir",
"os.path.splitext",
"os.path.join",
"numpy.zeros",
"cv2.cvtColor",
"cv2.resize",
"cv2.imread"
] | [((759, 779), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (773, 779), False, 'import os\n'), ((953, 969), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (963, 969), False, 'import cv2\n'), ((1784, 1804), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1798, 1804), False, 'import os\n'), ((2023, 2039), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2033, 2039), False, 'import os\n'), ((3014, 3039), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (3028, 3039), False, 'import os\n'), ((1057, 1096), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1069, 1096), False, 'import cv2\n'), ((1148, 1187), 'cv2.resize', 'cv2.resize', (['image', '(shape[0], shape[1])'], {}), '(image, (shape[0], shape[1]))\n', (1158, 1187), False, 'import cv2\n'), ((2059, 2086), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (2075, 2086), False, 'import os\n'), ((3576, 3642), 'numpy.zeros', 'np.zeros', (['(length, image_shape[0], image_shape[1], image_shape[2])'], {}), '((length, image_shape[0], image_shape[1], image_shape[2]))\n', (3584, 3642), True, 'import numpy as np\n'), ((3675, 3725), 'numpy.zeros', 'np.zeros', (['(length, image_shape[0], image_shape[1])'], {}), '((length, image_shape[0], image_shape[1]))\n', (3683, 3725), True, 'import numpy as np\n'), ((3792, 3831), 'os.path.join', 'os.path.join', (['directory', 'image_files[i]'], {}), '(directory, image_files[i])\n', (3804, 3831), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from copy import deepcopy
try:
# py >= 3.3
from unittest.mock import patch
except ImportError:
# py < 3.3
from mock import patch
import numpy as np
import pytest
from pygam import *
from pygam.utils import check_X, check_y, check_X_y, sig_code, check_iterable_depth
# TODO check dtypes works as expected
# TODO checkX, checky, check XY expand as needed, call out bad domain
@pytest.fixture
def wage_gam(wage_X_y):
X, y = wage_X_y
gam = LinearGAM(s(0) + s(1) + f(2)).fit(X,y)
return gam
@pytest.fixture
def default_gam(default_X_y):
X, y = default_X_y
gam = LogisticGAM().fit(X,y)
return gam
def test_check_X_categorical_prediction_exceeds_training(wage_X_y, wage_gam):
"""
if our categorical variable is outside the training range
we should get an error
"""
X, y = wage_X_y # last feature is categorical
gam = wage_gam
# get edge knots for last feature
eks = gam.edge_knots_[-1]
# add 1 to all Xs, thus pushing some X past the max value
X[:,-1] = eks[-1] + 1
with pytest.raises(ValueError):
gam.predict(X)
def test_check_y_not_int_not_float(wage_X_y, wage_gam):
"""y must be int or float, or we should get a value error"""
X, y = wage_X_y
y_str = ['hi'] * len(y)
with pytest.raises(ValueError):
check_y(y_str, wage_gam.link, wage_gam.distribution)
def test_check_y_casts_to_numerical(wage_X_y, wage_gam):
"""check_y will try to cast data to numerical types"""
X, y = wage_X_y
y = y.astype('object')
y = check_y(y, wage_gam.link, wage_gam.distribution)
assert y.dtype == 'float'
def test_check_y_not_min_samples(wage_X_y, wage_gam):
"""check_y expects a minimum number of samples"""
X, y = wage_X_y
with pytest.raises(ValueError):
check_y(y, wage_gam.link, wage_gam.distribution, min_samples=len(y)+1, verbose=False)
def test_check_y_not_in_domain_link(default_X_y, default_gam):
"""if you give labels outide of the links domain, check_y will raise an error"""
X, y = default_X_y
gam = default_gam
with pytest.raises(ValueError):
check_y(y + .1, default_gam.link, default_gam.distribution, verbose=False)
def test_check_X_not_int_not_float():
"""X must be an in or a float"""
with pytest.raises(ValueError):
check_X(['hi'], verbose=False)
def test_check_X_too_many_dims():
"""check_X accepts at most 2D inputs"""
with pytest.raises(ValueError):
check_X(np.ones((5,4,3)))
def test_check_X_not_min_samples():
with pytest.raises(ValueError):
check_X(np.ones((5)), min_samples=6, verbose=False)
def test_check_X_y_different_lengths():
with pytest.raises(ValueError):
check_X_y(np.ones(5), np.ones(4))
def test_input_data_after_fitting(mcycle_X_y):
"""
our check_X and check_y functions should be invoked
any time external data is input to the model
"""
X, y = mcycle_X_y
weights = np.ones_like(y)
X_nan = deepcopy(X)
X_nan[0] = X_nan[0] * np.nan
y_nan = deepcopy(y.values)
y_nan[0] = y_nan[0] * np.nan
weights_nan = deepcopy(weights)
weights_nan[0] = weights_nan[0] * np.nan
gam = LinearGAM()
with pytest.raises(ValueError):
gam.fit(X_nan, y, weights)
with pytest.raises(ValueError):
gam.fit(X, y_nan, weights)
with pytest.raises(ValueError):
gam.fit(X, y, weights_nan)
gam = gam.fit(X, y)
# test X is nan
with pytest.raises(ValueError):
gam.predict(X_nan)
with pytest.raises(ValueError):
gam.predict_mu(X_nan)
with pytest.raises(ValueError):
gam.confidence_intervals(X_nan)
with pytest.raises(ValueError):
gam.prediction_intervals(X_nan)
with pytest.raises(ValueError):
gam.partial_dependence(X_nan)
with pytest.raises(ValueError):
gam.deviance_residuals(X_nan, y, weights)
with pytest.raises(ValueError):
gam.loglikelihood(X_nan, y, weights)
with pytest.raises(ValueError):
gam.gridsearch(X_nan, y, weights)
with pytest.raises(ValueError):
gam.sample(X_nan, y)
# test y is nan
with pytest.raises(ValueError):
gam.deviance_residuals(X, y_nan, weights)
with pytest.raises(ValueError):
gam.loglikelihood(X, y_nan, weights)
with pytest.raises(ValueError):
gam.gridsearch(X, y_nan, weights)
with pytest.raises(ValueError):
gam.sample(X, y_nan, weights=weights, n_bootstraps=2)
# test weights is nan
with pytest.raises(ValueError):
gam.deviance_residuals(X, y, weights_nan)
with pytest.raises(ValueError):
gam.loglikelihood(X, y, weights_nan)
with pytest.raises(ValueError):
gam.gridsearch(X, y, weights_nan)
with pytest.raises(ValueError):
gam.sample(X, y, weights=weights_nan, n_bootstraps=2)
def test_catch_chol_pos_def_error(default_X_y):
"""
regresion test
doing a gridsearch with a poorly conditioned penalty matrix should not crash
"""
X, y = default_X_y
gam = LogisticGAM().gridsearch(X, y, lam=np.logspace(10, 12, 3))
def test_pvalue_sig_codes():
"""make sure we get the codes we exepct"""
with pytest.raises(AssertionError):
sig_code(-1)
assert sig_code(0) == '***'
assert sig_code(0.00101) == '**'
assert sig_code(0.0101) == '*'
assert sig_code(0.0501) == '.'
assert sig_code(0.101) == ' '
def test_b_spline_basis_extrapolates(mcycle_X_y):
X, y = mcycle_X_y
gam = LinearGAM().fit(X, y)
slopes = []
X = gam.generate_X_grid(term=0, n=50000)
y = gam.predict(X)
slopes.append((y[1] - y[0]) / (X[1] - X[0]))
mean = X.mean()
X -= mean
X *= 1.1
X += mean
y = gam.predict(X)
slopes.append((y[1] - y[0]) / (X[1] - X[0]))
assert np.allclose(slopes[0], slopes[1], atol=1e-4)
def test_iterable_depth():
it = [[[3]]]
assert check_iterable_depth(it) == 3
assert check_iterable_depth(it, max_depth=2) == 2
def test_no_SKSPIMPORT(mcycle_X_y):
"""make sure our module work with and without scikit-sparse
"""
from pygam.utils import SKSPIMPORT
if SKSPIMPORT:
with patch('pygam.utils.SKSPIMPORT', new=False) as SKSPIMPORT_patch:
from pygam.utils import SKSPIMPORT
assert SKSPIMPORT == False
X, y = mcycle_X_y
assert LinearGAM().fit(X, y)._is_fitted
| [
"numpy.ones_like",
"numpy.allclose",
"mock.patch",
"pygam.utils.check_iterable_depth",
"numpy.ones",
"pygam.utils.check_X",
"pytest.raises",
"copy.deepcopy",
"pygam.utils.check_y",
"numpy.logspace",
"pygam.utils.sig_code"
] | [((1575, 1623), 'pygam.utils.check_y', 'check_y', (['y', 'wage_gam.link', 'wage_gam.distribution'], {}), '(y, wage_gam.link, wage_gam.distribution)\n', (1582, 1623), False, 'from pygam.utils import check_X, check_y, check_X_y, sig_code, check_iterable_depth\n'), ((2987, 3002), 'numpy.ones_like', 'np.ones_like', (['y'], {}), '(y)\n', (2999, 3002), True, 'import numpy as np\n'), ((3016, 3027), 'copy.deepcopy', 'deepcopy', (['X'], {}), '(X)\n', (3024, 3027), False, 'from copy import deepcopy\n'), ((3074, 3092), 'copy.deepcopy', 'deepcopy', (['y.values'], {}), '(y.values)\n', (3082, 3092), False, 'from copy import deepcopy\n'), ((3145, 3162), 'copy.deepcopy', 'deepcopy', (['weights'], {}), '(weights)\n', (3153, 3162), False, 'from copy import deepcopy\n'), ((5846, 5892), 'numpy.allclose', 'np.allclose', (['slopes[0]', 'slopes[1]'], {'atol': '(0.0001)'}), '(slopes[0], slopes[1], atol=0.0001)\n', (5857, 5892), True, 'import numpy as np\n'), ((1084, 1109), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1097, 1109), False, 'import pytest\n'), ((1314, 1339), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1327, 1339), False, 'import pytest\n'), ((1349, 1401), 'pygam.utils.check_y', 'check_y', (['y_str', 'wage_gam.link', 'wage_gam.distribution'], {}), '(y_str, wage_gam.link, wage_gam.distribution)\n', (1356, 1401), False, 'from pygam.utils import check_X, check_y, check_X_y, sig_code, check_iterable_depth\n'), ((1794, 1819), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1807, 1819), False, 'import pytest\n'), ((2119, 2144), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2132, 2144), False, 'import pytest\n'), ((2154, 2229), 'pygam.utils.check_y', 'check_y', (['(y + 0.1)', 'default_gam.link', 'default_gam.distribution'], {'verbose': '(False)'}), '(y + 0.1, default_gam.link, default_gam.distribution, verbose=False)\n', (2161, 2229), False, 'from pygam.utils import check_X, check_y, check_X_y, sig_code, check_iterable_depth\n'), ((2315, 2340), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2328, 2340), False, 'import pytest\n'), ((2350, 2380), 'pygam.utils.check_X', 'check_X', (["['hi']"], {'verbose': '(False)'}), "(['hi'], verbose=False)\n", (2357, 2380), False, 'from pygam.utils import check_X, check_y, check_X_y, sig_code, check_iterable_depth\n'), ((2469, 2494), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2482, 2494), False, 'import pytest\n'), ((2576, 2601), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2589, 2601), False, 'import pytest\n'), ((2713, 2738), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2726, 2738), False, 'import pytest\n'), ((3241, 3266), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3254, 3266), False, 'import pytest\n'), ((3312, 3337), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3325, 3337), False, 'import pytest\n'), ((3383, 3408), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3396, 3408), False, 'import pytest\n'), ((3499, 3524), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3512, 3524), False, 'import pytest\n'), ((3562, 3587), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3575, 3587), False, 'import pytest\n'), ((3628, 3653), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3641, 3653), False, 'import pytest\n'), ((3704, 3729), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3717, 3729), False, 'import pytest\n'), ((3780, 3805), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3793, 3805), False, 'import pytest\n'), ((3854, 3879), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3867, 3879), False, 'import pytest\n'), ((3940, 3965), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3953, 3965), False, 'import pytest\n'), ((4021, 4046), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4034, 4046), False, 'import pytest\n'), ((4099, 4124), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4112, 4124), False, 'import pytest\n'), ((4185, 4210), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4198, 4210), False, 'import pytest\n'), ((4271, 4296), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4284, 4296), False, 'import pytest\n'), ((4352, 4377), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4365, 4377), False, 'import pytest\n'), ((4430, 4455), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4443, 4455), False, 'import pytest\n'), ((4555, 4580), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4568, 4580), False, 'import pytest\n'), ((4641, 4666), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4654, 4666), False, 'import pytest\n'), ((4722, 4747), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4735, 4747), False, 'import pytest\n'), ((4800, 4825), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4813, 4825), False, 'import pytest\n'), ((5233, 5262), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (5246, 5262), False, 'import pytest\n'), ((5272, 5284), 'pygam.utils.sig_code', 'sig_code', (['(-1)'], {}), '(-1)\n', (5280, 5284), False, 'from pygam.utils import check_X, check_y, check_X_y, sig_code, check_iterable_depth\n'), ((5297, 5308), 'pygam.utils.sig_code', 'sig_code', (['(0)'], {}), '(0)\n', (5305, 5308), False, 'from pygam.utils import check_X, check_y, check_X_y, sig_code, check_iterable_depth\n'), ((5329, 5346), 'pygam.utils.sig_code', 'sig_code', (['(0.00101)'], {}), '(0.00101)\n', (5337, 5346), False, 'from pygam.utils import check_X, check_y, check_X_y, sig_code, check_iterable_depth\n'), ((5366, 5382), 'pygam.utils.sig_code', 'sig_code', (['(0.0101)'], {}), '(0.0101)\n', (5374, 5382), False, 'from pygam.utils import check_X, check_y, check_X_y, sig_code, check_iterable_depth\n'), ((5401, 5417), 'pygam.utils.sig_code', 'sig_code', (['(0.0501)'], {}), '(0.0501)\n', (5409, 5417), False, 'from pygam.utils import check_X, check_y, check_X_y, sig_code, check_iterable_depth\n'), ((5436, 5451), 'pygam.utils.sig_code', 'sig_code', (['(0.101)'], {}), '(0.101)\n', (5444, 5451), False, 'from pygam.utils import check_X, check_y, check_X_y, sig_code, check_iterable_depth\n'), ((5947, 5971), 'pygam.utils.check_iterable_depth', 'check_iterable_depth', (['it'], {}), '(it)\n', (5967, 5971), False, 'from pygam.utils import check_X, check_y, check_X_y, sig_code, check_iterable_depth\n'), ((5988, 6025), 'pygam.utils.check_iterable_depth', 'check_iterable_depth', (['it'], {'max_depth': '(2)'}), '(it, max_depth=2)\n', (6008, 6025), False, 'from pygam.utils import check_X, check_y, check_X_y, sig_code, check_iterable_depth\n'), ((2512, 2530), 'numpy.ones', 'np.ones', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (2519, 2530), True, 'import numpy as np\n'), ((2619, 2629), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (2626, 2629), True, 'import numpy as np\n'), ((2758, 2768), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (2765, 2768), True, 'import numpy as np\n'), ((2770, 2780), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (2777, 2780), True, 'import numpy as np\n'), ((5123, 5145), 'numpy.logspace', 'np.logspace', (['(10)', '(12)', '(3)'], {}), '(10, 12, 3)\n', (5134, 5145), True, 'import numpy as np\n'), ((6211, 6253), 'mock.patch', 'patch', (['"""pygam.utils.SKSPIMPORT"""'], {'new': '(False)'}), "('pygam.utils.SKSPIMPORT', new=False)\n", (6216, 6253), False, 'from mock import patch\n')] |
from data.import_data import tokenize, import_data
from gensim.models import Word2Vec
from sklearn.base import BaseEstimator, TransformerMixin
import numpy as np
class PaddedSentenceTransformer(BaseEstimator, TransformerMixin):
def __init__(self, sentence_length=50, encoder_size=100, padding_token='\0', unknown_token='$unknown$'):
"""
sentence_length is in tokens
"""
self.sentence_length = sentence_length
self.encoder_size = encoder_size
self.padding_token = padding_token
self.unknown_token = unknown_token
def pad_sentence_(self, sentence):
"""
sentence should be tokenized
"""
if len(sentence) > self.sentence_length:
sentence = sentence[:self.sentence_length - 1]
sentence.append('.')
elif len(sentence) < self.sentence_length:
for _ in range(self.sentence_length - len(sentence)):
sentence.append(self.padding_token)
# print(sentence)
assert(len(sentence) == self.sentence_length)
return sentence
def encode_sentence_(self, tokenized_sentence):
out = []
for token in tokenized_sentence:
try:
out.append(self.encoder[token])
except KeyError:
out.append(self.encoder[self.unknown_token])
if len(tokenized_sentence) != 50:
print("")
print(tokenized_sentence)
print("")
print(out)
return out
def process_data_(self, X):
tokenized_sentences = [tokenize(s) for s in X]
return [self.pad_sentence_(s) for s in tokenized_sentences]
def fit_(self, padded_sentences):
self.encoder = Word2Vec(
# make sure the unknown token ends up in our vocabulary
padded_sentences + [[self.unknown_token]],
workers=8,
min_count=0,
max_vocab_size=None,
size=self.encoder_size)
def fit(self, X, y=None):
padded_sentences = self.process_data_(X)
self.fit_(padded_sentences)
return self
def transform_(self, padded_sentences):
return np.array([self.encode_sentence_(s)
for s in padded_sentences])
def fit_transform(self, X, y=None):
padded_sentences = self.process_data_(X)
self.fit_(padded_sentences)
return self.transform_(padded_sentences)
def transform(self, X, y=None):
padded_sentences = self.process_data_(X)
return self.transform_(padded_sentences)
if __name__ == '__main__':
tr, te = import_data()
count = len(tr.text)
data = np.array(tr.text[:count])
trans = PaddedSentenceTransformer()
result = trans.fit_transform(data)
for s, r in zip(data, result):
assert(len(r) == trans.sentence_length)
| [
"data.import_data.tokenize",
"numpy.array",
"data.import_data.import_data",
"gensim.models.Word2Vec"
] | [((2631, 2644), 'data.import_data.import_data', 'import_data', ([], {}), '()\n', (2642, 2644), False, 'from data.import_data import tokenize, import_data\n'), ((2682, 2707), 'numpy.array', 'np.array', (['tr.text[:count]'], {}), '(tr.text[:count])\n', (2690, 2707), True, 'import numpy as np\n'), ((1745, 1869), 'gensim.models.Word2Vec', 'Word2Vec', (['(padded_sentences + [[self.unknown_token]])'], {'workers': '(8)', 'min_count': '(0)', 'max_vocab_size': 'None', 'size': 'self.encoder_size'}), '(padded_sentences + [[self.unknown_token]], workers=8, min_count=0,\n max_vocab_size=None, size=self.encoder_size)\n', (1753, 1869), False, 'from gensim.models import Word2Vec\n'), ((1591, 1602), 'data.import_data.tokenize', 'tokenize', (['s'], {}), '(s)\n', (1599, 1602), False, 'from data.import_data import tokenize, import_data\n')] |
import numpy
from sklearn.datasets import load_iris
#loading iris data set
iris=load_iris()
#to print
print(iris.feature_names)
print(iris.target_names)
#training data
#features data
print(iris.data)
#target data means flowers data
print(iris.target)
setosa=iris.data[0:50]
print(setosa)
s_data=iris.target[0:50]
print(s_data)
x=[0,50,100]
#training target
only_target_training=numpy.delete(iris.target,x,axis=0)
only_data_training=numpy.delete(iris.data,x,axis=0)
#target data value
print(only_target_training)
#flower data feature
print(only_data_training)
print(only_target_training.size)
#testing target
test_target=iris.target[x]
test_data=iris.data[x]
print(test_target)
print(test_data)
#calling algo
clf=tree.DecisionTreeClassifier()
trained=clf.fit(only_data_training,only_target_training)
output=trained.predict(test_data)
print(output)
| [
"sklearn.datasets.load_iris",
"numpy.delete"
] | [((80, 91), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (89, 91), False, 'from sklearn.datasets import load_iris\n'), ((378, 414), 'numpy.delete', 'numpy.delete', (['iris.target', 'x'], {'axis': '(0)'}), '(iris.target, x, axis=0)\n', (390, 414), False, 'import numpy\n'), ((432, 466), 'numpy.delete', 'numpy.delete', (['iris.data', 'x'], {'axis': '(0)'}), '(iris.data, x, axis=0)\n', (444, 466), False, 'import numpy\n')] |
"""
make_bmap.py
Creates an image that can be used as a bump mapping texture.
<NAME>
shader.in
"""
import numpy as np
from PIL import Image
from math import sqrt
def main():
NX, NY = 256, 256
nmap = np.zeros([NX, NY, 3], np.float32)
r = 32.0
rsq = r*r
centers = [(64, 64), (192, 64), (64, 192), (192, 192)]
for i in range(NX):
for j in range(NY):
inside = False
for C in centers:
x = (i-C[0])
y = (j-C[1])
if x*x + y*y < rsq :
nmap[i][j][0] = x / r
nmap[i][j][1] = y / r
nmap[i][j][2] = sqrt(rsq - (x*x + y*y))/ r
inside = True
if not inside:
nmap[i][j][0] = 0.0
nmap[i][j][1] = 0.0
nmap[i][j][2] = 1.0
# [-1, 1] to [0, 255]
nmap = 255.0*0.5*(nmap + 1.0)
img = np.array(nmap, np.uint8)
img = Image.fromarray(img)
img.save("bmap.png")
# call main
if __name__ == '__main__':
main()
| [
"numpy.array",
"numpy.zeros",
"math.sqrt",
"PIL.Image.fromarray"
] | [((253, 286), 'numpy.zeros', 'np.zeros', (['[NX, NY, 3]', 'np.float32'], {}), '([NX, NY, 3], np.float32)\n', (261, 286), True, 'import numpy as np\n'), ((1001, 1025), 'numpy.array', 'np.array', (['nmap', 'np.uint8'], {}), '(nmap, np.uint8)\n', (1009, 1025), True, 'import numpy as np\n'), ((1037, 1057), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (1052, 1057), False, 'from PIL import Image\n'), ((719, 746), 'math.sqrt', 'sqrt', (['(rsq - (x * x + y * y))'], {}), '(rsq - (x * x + y * y))\n', (723, 746), False, 'from math import sqrt\n')] |
import sys
import numpy as np
def coadd_cameras(flux_cam, wave_cam, ivar_cam, mask_cam=None):
"""Adds spectra from the three cameras as long as they have the same number of wavelength bins.
This is not a replacement for desispec.coaddition.coadd_cameras,
but a simpler (versatile and faster) implementation which uses only numpy.
This also assumes the input spectra grid are already aligned
(i.e. same wavelength grid in the overlapping regions),
This is likely the case if the spectra are from the official data releases.
Parameters
----------
flux_cam : dict
Dictionary containing the flux values from the three cameras
wave_cam : dict
Dictionary containing the wavelength values from the three cameras
ivar_cam : dict
Dictionary containing the inverse variance values from the three cameras
mask_cam : dict, optional
Dictionary containing the mask values from the three cameras
Returns
-------
Tuple
returns the combined flux, wavelength and inverse variance grids.
"""
sbands = np.array(["b", "r", "z"]) # bands sorted by inc. wavelength
# create wavelength array
wave = None
tolerance = 0.0001 # A , tolerance
shifts = {}
for b in sbands:
wave_camera = np.atleast_2d(wave_cam[b].copy())
if wave is None:
wave = wave_camera
else:
shifts[b] = np.sum(
np.all((wave + tolerance) < wave_camera[:, 0][:, None], axis=0)
)
wave = np.append(
wave,
wave_camera[
:, np.all(wave_camera > (wave[:, -1][:, None] + tolerance), axis=0)
],
axis=1,
)
nwave = wave.shape[1]
blue = sbands[0]
ntarget = len(flux_cam[blue])
flux = None
ivar = None
mask = None
for b in sbands:
flux_camera = np.atleast_2d(flux_cam[b].copy())
ivar_camera = np.atleast_2d(ivar_cam[b].copy())
ivar_camera[ivar_camera <= 0] = 0
if mask_cam is not None:
mask_camera = np.atleast_2d(mask_cam[b].astype(bool))
ivar_camera[mask_camera] = 0
if flux is None:
flux = np.zeros((ntarget, nwave), dtype=flux_cam[blue].dtype)
flux[:, : flux_camera.shape[1]] += flux_camera * ivar_camera
ivar = np.zeros((ntarget, nwave), dtype=flux_cam[blue].dtype)
ivar[:, : ivar_camera.shape[1]] += ivar_camera
if mask is not None:
mask = np.ones((ntarget, nwave), dtype=mask_cam[blue].dtype)
mask[:, : mask_camera.shape[1]] &= mask_camera
else:
flux[:, shifts[b] : (shifts[b] + flux_camera.shape[1])] += (
flux_camera * ivar_camera
)
ivar[:, shifts[b] : (shifts[b] + ivar_camera.shape[1])] += ivar_camera
if mask is not None:
mask[:, shifts[b] : (shifts[b] + mask_camera.shape[1])] &= mask_camera
flux = flux / ivar
flux[~np.isfinite(flux)] = 0
ivar[~np.isfinite(ivar)] = 0
if wave_cam[blue].ndim == 1:
wave = np.squeeze(wave)
if mask_cam is not None:
return flux, wave, ivar, mask
else:
return flux, wave, ivar | [
"numpy.ones",
"numpy.squeeze",
"numpy.array",
"numpy.zeros",
"numpy.isfinite",
"numpy.all"
] | [((1095, 1120), 'numpy.array', 'np.array', (["['b', 'r', 'z']"], {}), "(['b', 'r', 'z'])\n", (1103, 1120), True, 'import numpy as np\n'), ((3165, 3181), 'numpy.squeeze', 'np.squeeze', (['wave'], {}), '(wave)\n', (3175, 3181), True, 'import numpy as np\n'), ((2246, 2300), 'numpy.zeros', 'np.zeros', (['(ntarget, nwave)'], {'dtype': 'flux_cam[blue].dtype'}), '((ntarget, nwave), dtype=flux_cam[blue].dtype)\n', (2254, 2300), True, 'import numpy as np\n'), ((2393, 2447), 'numpy.zeros', 'np.zeros', (['(ntarget, nwave)'], {'dtype': 'flux_cam[blue].dtype'}), '((ntarget, nwave), dtype=flux_cam[blue].dtype)\n', (2401, 2447), True, 'import numpy as np\n'), ((3060, 3077), 'numpy.isfinite', 'np.isfinite', (['flux'], {}), '(flux)\n', (3071, 3077), True, 'import numpy as np\n'), ((3093, 3110), 'numpy.isfinite', 'np.isfinite', (['ivar'], {}), '(ivar)\n', (3104, 3110), True, 'import numpy as np\n'), ((1454, 1515), 'numpy.all', 'np.all', (['(wave + tolerance < wave_camera[:, 0][:, None])'], {'axis': '(0)'}), '(wave + tolerance < wave_camera[:, 0][:, None], axis=0)\n', (1460, 1515), True, 'import numpy as np\n'), ((2563, 2616), 'numpy.ones', 'np.ones', (['(ntarget, nwave)'], {'dtype': 'mask_cam[blue].dtype'}), '((ntarget, nwave), dtype=mask_cam[blue].dtype)\n', (2570, 2616), True, 'import numpy as np\n'), ((1636, 1698), 'numpy.all', 'np.all', (['(wave_camera > wave[:, -1][:, None] + tolerance)'], {'axis': '(0)'}), '(wave_camera > wave[:, -1][:, None] + tolerance, axis=0)\n', (1642, 1698), True, 'import numpy as np\n')] |
import glob
import os
import h5py
import keras.layers as layers
import numpy as np
import tensorflow as tf
from keras import backend, optimizers, regularizers
from keras.models import Model
import joblib
import optuna
from optuna.integration import KerasPruningCallback
from optuna.visualization import *
from utils import format_data, slicer, split
from utils_keras import loss_norm_error
# Model name
PREFIX = "model_pred-d_{}-"
SUFFIX = "{}.h5"
def objective(trial):
# Open data file
f = h5py.File(DT_FL, "r")
dt = f[DT_DST]
# Format data for LSTM training
x_data, y_data = format_data(dt, wd=WD, get_y=True)
x_data = np.squeeze(x_data)
# Split data and get slices
idxs = split(x_data.shape[0], N_TRAIN, N_VALID)
slc_trn, slc_vld, slc_tst = slicer(x_data.shape, idxs)
# Get data
x_train = x_data[slc_trn[0]]
y_train = y_data[slc_trn[0]] - x_train
x_val = x_data[slc_vld[0]]
y_val = y_data[slc_vld[0]] - x_val
# Limits and options
# Filters
# n_lstm = [[4, 128], [4, 128], [4, 128]]
n_lstm = [[4, 196], [4, 196], [4, 196]]
# Regularizer
l2_lm = [1e-7, 1e-3]
# Activation functions
act_opts = ["relu", "elu", "tanh", "linear"]
# Latent space cfg
lt_sz = [5, 150]
lt_dv = [0.3, 0.7]
# Learning rate
lm_lr = [1e-5, 1]
# Clear tensorflow session
tf.keras.backend.clear_session()
# Input
inputs = layers.Input(shape=x_train.shape[1:])
p = inputs
# Dense layers
# n_lyr_dense = trial.suggest_int("n_lyr_dense", 0, 2)
n_lyr_dense = trial.suggest_int("n_lyr_dense", 1, 3)
for i in range(n_lyr_dense):
# For the current layer
# Get number of filters
l = trial.suggest_int("n{}_dense".format(i), n_lstm[i][0], n_lstm[i][1])
# Get the activation function
act = trial.suggest_categorical("d{}_activation".format(i), act_opts)
# Regularization value
l2 = trial.suggest_loguniform("d{}_l2".format(i), l2_lm[0], l2_lm[1])
l2_reg = regularizers.l2(l=l2)
# Set layer
p = layers.Dense(
l,
activation=act,
# kernel_regularizer=l2_reg,
name="{}_dense".format(i + 1),
)(p)
# Dropout
dp = trial.suggest_uniform("d{}_dropout".format(i), 0, 1)
p = layers.Dropout(dp, name="{}_dropout_dense".format(i + 1))(p)
bn = trial.suggest_categorical("d{}_batchnorm".format(i), [0, 1])
if bn == 1:
p = layers.BatchNormalization(name="{}_bnorm_dense".format(i + 1))(p)
out = layers.Dense(y_data.shape[1], activation="linear")(p)
pred = Model(inputs, out, name="auto_encoder_add")
# opt_opts = ["adam", "nadam", "adamax", "RMSprop"]
# opt = trial.suggest_categorical("optimizer", opt_opts)
opt = "adam"
if opt == "adam":
k_optf = optimizers.Adam
elif opt == "nadam":
k_optf = optimizers.Nadam
elif opt == "adamax":
k_optf = optimizers.Adamax
elif opt == "RMSprop":
k_optf = optimizers.RMSprop
lr = trial.suggest_loguniform("lr", lm_lr[0], lm_lr[1])
if lr > 0:
k_opt = k_optf(learning_rate=lr)
else:
k_opt = k_optf()
pred.compile(optimizer=k_opt, loss="mse", metrics=["mse", loss_norm_error])
batch_size = int(trial.suggest_uniform("batch_sz", 2, 32))
pred.summary()
hist = pred.fit(
x_train,
y_train,
epochs=100,
batch_size=batch_size,
shuffle=True,
validation_data=(x_val, y_val),
callbacks=[KerasPruningCallback(trial, "val_mse")],
verbose=1,
)
txt = PREFIX + SUFFIX
pred.save(txt.format(RUN_VERSION, trial.number))
return hist.history["val_mse"][-1]
def clean_models(study):
# Get best model
bst = study.best_trial.number
# Rename best model
txt = PREFIX + SUFFIX
nw_name = PREFIX.format(RUN_VERSION)[:-1] + ".h5"
os.rename(txt.format(RUN_VERSION, bst), nw_name)
# Remove the other models
rm_mdls = glob.glob(PREFIX.format(RUN_VERSION) + "*")
for mdl in rm_mdls:
os.remove(mdl)
pass
def main():
# Use Optuna to performa a hyperparameter optimisation
study = optuna.create_study(
direction="minimize", pruner=optuna.pruners.MedianPruner()
)
# Start the optimisation process
study.optimize(objective, n_trials=100, timeout=1600)
# Keep only the best model
clean_models(study)
# Save Optuna study
joblib.dump(study, study_nm.format(RUN_VERSION))
if __name__ == "__main__":
# Study naming
study_nm = "study_d_v{}.pkl"
# File to be used
DT_FL = "data_compact.h5"
# Dataset to be used
DT_DST = "model_ae-smp_4_scaled"
# Split train test and validation datasets
N_TRAIN = 0.8
N_VALID = 0.1
# Window size to be used to predict the next sample
WD = 2
# Current search run
RUN_VERSION = 1
main()
| [
"numpy.squeeze",
"h5py.File",
"utils.format_data",
"utils.slicer",
"keras.layers.Input",
"keras.models.Model",
"optuna.pruners.MedianPruner",
"keras.regularizers.l2",
"keras.layers.Dense",
"optuna.integration.KerasPruningCallback",
"tensorflow.keras.backend.clear_session",
"utils.split",
"os... | [((505, 526), 'h5py.File', 'h5py.File', (['DT_FL', '"""r"""'], {}), "(DT_FL, 'r')\n", (514, 526), False, 'import h5py\n'), ((604, 638), 'utils.format_data', 'format_data', (['dt'], {'wd': 'WD', 'get_y': '(True)'}), '(dt, wd=WD, get_y=True)\n', (615, 638), False, 'from utils import format_data, slicer, split\n'), ((653, 671), 'numpy.squeeze', 'np.squeeze', (['x_data'], {}), '(x_data)\n', (663, 671), True, 'import numpy as np\n'), ((715, 755), 'utils.split', 'split', (['x_data.shape[0]', 'N_TRAIN', 'N_VALID'], {}), '(x_data.shape[0], N_TRAIN, N_VALID)\n', (720, 755), False, 'from utils import format_data, slicer, split\n'), ((788, 814), 'utils.slicer', 'slicer', (['x_data.shape', 'idxs'], {}), '(x_data.shape, idxs)\n', (794, 814), False, 'from utils import format_data, slicer, split\n'), ((1371, 1403), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (1401, 1403), True, 'import tensorflow as tf\n'), ((1429, 1466), 'keras.layers.Input', 'layers.Input', ([], {'shape': 'x_train.shape[1:]'}), '(shape=x_train.shape[1:])\n', (1441, 1466), True, 'import keras.layers as layers\n'), ((2655, 2698), 'keras.models.Model', 'Model', (['inputs', 'out'], {'name': '"""auto_encoder_add"""'}), "(inputs, out, name='auto_encoder_add')\n", (2660, 2698), False, 'from keras.models import Model\n'), ((2037, 2058), 'keras.regularizers.l2', 'regularizers.l2', ([], {'l': 'l2'}), '(l=l2)\n', (2052, 2058), False, 'from keras import backend, optimizers, regularizers\n'), ((2589, 2639), 'keras.layers.Dense', 'layers.Dense', (['y_data.shape[1]'], {'activation': '"""linear"""'}), "(y_data.shape[1], activation='linear')\n", (2601, 2639), True, 'import keras.layers as layers\n'), ((4119, 4133), 'os.remove', 'os.remove', (['mdl'], {}), '(mdl)\n', (4128, 4133), False, 'import os\n'), ((4286, 4315), 'optuna.pruners.MedianPruner', 'optuna.pruners.MedianPruner', ([], {}), '()\n', (4313, 4315), False, 'import optuna\n'), ((3575, 3613), 'optuna.integration.KerasPruningCallback', 'KerasPruningCallback', (['trial', '"""val_mse"""'], {}), "(trial, 'val_mse')\n", (3595, 3613), False, 'from optuna.integration import KerasPruningCallback\n')] |
import numpy as np
from scipy.spatial import distance_matrix
def epsilon_greedy_policy(epsilon, state, q_values, options):
valid = np.array(options)
if np.random.random() > epsilon:
max_value = np.max(q_values[state][valid])
max_actions = np.intersect1d(valid, np.where(q_values[state] == max_value))
action = np.random.choice(max_actions)
else:
action = np.random.choice(valid)
return action
def update_qvalues(q_values, distances, state, action, alpha, gamma):
next_value = q_values[action].max()
reward = -distances[state, action]
q_values[state, action] *= 1 - alpha
q_values[state, action] += alpha * (reward + gamma * next_value)
if state != 0:
next_value = q_values[0].max()
zreward = -distances[state, 0]
q_values[state, 0] *= 1 - (alpha / 100)
q_values[state, 0] += (alpha / 100) * (zreward + gamma * next_value)
return q_values, reward
class QModel:
def __init__(self, points):
np.random.seed(42)
self.points = points
self.n = len(points)
self.distances = distance_matrix(self.points, self.points)
def learn(self, distances, epochs=100, epsilon0=1.0, alpha0=0.1, gamma=0.97, decay=0.0):
rewards = []
q_values = np.zeros([self.n, self.n])
q_values[range(self.n), range(self.n)] = -np.inf
for i in range(epochs):
total_reward = 0
state = 0
path = [state]
options = list(range(self.n))
alpha = alpha0 / (1 + i * decay)
epsilon = epsilon0 / (1 + i * decay)
while len(options) > 1:
options.remove(state)
action = epsilon_greedy_policy(epsilon, state, q_values, options)
q_values, reward = update_qvalues(
q_values, distances, state, action, alpha, gamma
)
total_reward += reward
path.append(action)
state = action
# back to start
action = 0
q_values, reward = update_qvalues(
q_values, distances, state, action, alpha, gamma
)
total_reward += reward
path.append(action)
rewards.append(total_reward)
if i % 200 == 0:
print("reward", reward)
return q_values
def solve(self):
q_values = self.learn(self.distances, epochs=400, epsilon0=1, gamma=-1, alpha0=1, decay=0.05)
state = 0
path = [state]
options = list(range(self.n))
distance = 0
while len(options) > 1:
options.remove(state)
action = epsilon_greedy_policy(0, state, q_values, options)
distance += self.distances[state, action]
path.append(action)
state = action
path.append(0)
distance += self.distances[state, 0]
return {
"ordered_points": np.array(self.points)[path].tolist(),
"distance": distance,
}
| [
"numpy.random.random",
"numpy.random.choice",
"numpy.where",
"scipy.spatial.distance_matrix",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.random.seed"
] | [((137, 154), 'numpy.array', 'np.array', (['options'], {}), '(options)\n', (145, 154), True, 'import numpy as np\n'), ((163, 181), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (179, 181), True, 'import numpy as np\n'), ((213, 243), 'numpy.max', 'np.max', (['q_values[state][valid]'], {}), '(q_values[state][valid])\n', (219, 243), True, 'import numpy as np\n'), ((345, 374), 'numpy.random.choice', 'np.random.choice', (['max_actions'], {}), '(max_actions)\n', (361, 374), True, 'import numpy as np\n'), ((402, 425), 'numpy.random.choice', 'np.random.choice', (['valid'], {}), '(valid)\n', (418, 425), True, 'import numpy as np\n'), ((1015, 1033), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1029, 1033), True, 'import numpy as np\n'), ((1117, 1158), 'scipy.spatial.distance_matrix', 'distance_matrix', (['self.points', 'self.points'], {}), '(self.points, self.points)\n', (1132, 1158), False, 'from scipy.spatial import distance_matrix\n'), ((1293, 1319), 'numpy.zeros', 'np.zeros', (['[self.n, self.n]'], {}), '([self.n, self.n])\n', (1301, 1319), True, 'import numpy as np\n'), ((288, 326), 'numpy.where', 'np.where', (['(q_values[state] == max_value)'], {}), '(q_values[state] == max_value)\n', (296, 326), True, 'import numpy as np\n'), ((3004, 3025), 'numpy.array', 'np.array', (['self.points'], {}), '(self.points)\n', (3012, 3025), True, 'import numpy as np\n')] |
""" Higher-level transformation functions """
import pandas as pd
import scipy.stats
import numpy as np
from pathlib import Path
from luna.pathology.spatial.stats import *
def generate_k_function_statistics(cell_paths, method_data, main_index=None):
"""
Compute K-function spatial statistics on given cell-data
Args:
cell_paths (str or list[str]): paths to a single or multiple FOV regions
method_data (dict): Configuration:
"index": (str, optional) Column containting the patient/desired ID, if available (overrides main_index)
"phenotype1" : {
"name" : (str) Column name to query
'value' : (str) Phenotype string to match (e.g. CD68)
},
"phenotype2" : {
"name" : (str) Column name to query
'value' : (str) Phenotype string to match (e.g. panCK)
},
"count" : (bool) Flag to compute counting stats.
"radius" : (float) Radius cutoff
"intensity" : (str, optional) Column containing intensity information
"distance" : (bool) Flag to compute intensity-distance stats.
Returns:
pd.DataFrame: spatial statistics aggregated over FOVs
"""
if type(cell_paths)==str:
cell_paths = [cell_paths]
print (cell_paths)
agg_k_data = {}
pheno1_col = method_data["phenotype1"]["name"]
pheno1_val = method_data["phenotype1"]["value"]
pheno2_col = method_data["phenotype2"]["name"]
pheno2_val = method_data["phenotype2"]["value"]
index_col = method_data.get("index", None)
radius = method_data["radius"]
count = method_data["count"]
distance = method_data["distance"]
intensity_col = method_data.get("intensity", None)
indices = set()
for cell_path in cell_paths:
if Path(cell_path).suffix == ".parquet":
df = pd.read_parquet( cell_path )
elif Path(cell_path).suffix == ".csv":
df = pd.read_csv( cell_path )
else:
raise RuntimeError(f"Invalid input data type {cell_path}")
# Look up the index for this slice
if index_col:
index = df[method_data['index']].iloc[0]
indices.add(index)
# Create the data arrays
pheno1 = df[df[pheno1_col] == pheno1_val]
pheno2 = df[df[pheno2_col] == pheno2_val]
p1XY = np.array(pheno1[["Centroid X µm","Centroid Y µm"]])
p2XY = np.array(pheno2[["Centroid X µm","Centroid Y µm"]])
if intensity_col:
I = np.array(pheno2[intensity_col])
else:
I = []
if distance:
raise RuntimeError("Can't compute intensity-distance function without intensity information")
if p1XY.size == 0:
print(f"WARNING: List of phenotype 1 cells ({pheno1_val}) is empty for {index}")
if p2XY.size == 0:
print(f"WARNING: List of phenotype 2 cells ({pheno2_val}) is empty for {index}")
# Compute the K function
print (f"Running... {cell_path}")
fov_k_data = Kfunction(p1XY, p2XY, radius, ls=True, count=count, intensity=I, distance=distance)
for key in fov_k_data:
if key in agg_k_data:
np.append(agg_k_data[key],fov_k_data[key])
else:
agg_k_data[key] = fov_k_data[key]
data_out = {}
for kfunct in agg_k_data.keys():
arr = agg_k_data[kfunct]
if len(arr)==0: arr=[0]
data_out.update(
{
f"For_{pheno1_val}_Find_{pheno2_val}_at{radius}_{kfunct}_{intensity_col}_mean": np.mean(arr),
f"For_{pheno1_val}_Find_{pheno2_val}_at{radius}_{kfunct}_{intensity_col}_variance": np.var(arr),
f"For_{pheno1_val}_Find_{pheno2_val}_at{radius}_{kfunct}_{intensity_col}_skew": scipy.stats.skew(arr),
f"For_{pheno1_val}_Find_{pheno2_val}_at{radius}_{kfunct}_{intensity_col}_kurtosis": scipy.stats.kurtosis(arr)
}
)
df_slice_out = pd.DataFrame(data_out, index=[0]).astype(np.float64)
if main_index is None:
if not len(indices)==1:
raise RuntimeError (f"Multiple cell maps with different indices! Found: {indices}")
main_index = indices.pop()
df_slice_out['main_index'] = main_index
df_slice_out = df_slice_out.set_index('main_index')
print (df_slice_out)
return df_slice_out | [
"numpy.mean",
"pandas.read_parquet",
"pandas.read_csv",
"pathlib.Path",
"numpy.append",
"numpy.array",
"pandas.DataFrame",
"numpy.var"
] | [((2513, 2565), 'numpy.array', 'np.array', (["pheno1[['Centroid X µm', 'Centroid Y µm']]"], {}), "(pheno1[['Centroid X µm', 'Centroid Y µm']])\n", (2521, 2565), True, 'import numpy as np\n'), ((2580, 2632), 'numpy.array', 'np.array', (["pheno2[['Centroid X µm', 'Centroid Y µm']]"], {}), "(pheno2[['Centroid X µm', 'Centroid Y µm']])\n", (2588, 2632), True, 'import numpy as np\n'), ((2009, 2035), 'pandas.read_parquet', 'pd.read_parquet', (['cell_path'], {}), '(cell_path)\n', (2024, 2035), True, 'import pandas as pd\n'), ((2675, 2706), 'numpy.array', 'np.array', (['pheno2[intensity_col]'], {}), '(pheno2[intensity_col])\n', (2683, 2706), True, 'import numpy as np\n'), ((4167, 4200), 'pandas.DataFrame', 'pd.DataFrame', (['data_out'], {'index': '[0]'}), '(data_out, index=[0])\n', (4179, 4200), True, 'import pandas as pd\n'), ((1954, 1969), 'pathlib.Path', 'Path', (['cell_path'], {}), '(cell_path)\n', (1958, 1969), False, 'from pathlib import Path\n'), ((2103, 2125), 'pandas.read_csv', 'pd.read_csv', (['cell_path'], {}), '(cell_path)\n', (2114, 2125), True, 'import pandas as pd\n'), ((3380, 3423), 'numpy.append', 'np.append', (['agg_k_data[key]', 'fov_k_data[key]'], {}), '(agg_k_data[key], fov_k_data[key])\n', (3389, 3423), True, 'import numpy as np\n'), ((3749, 3761), 'numpy.mean', 'np.mean', (['arr'], {}), '(arr)\n', (3756, 3761), True, 'import numpy as np\n'), ((3863, 3874), 'numpy.var', 'np.var', (['arr'], {}), '(arr)\n', (3869, 3874), True, 'import numpy as np\n'), ((2052, 2067), 'pathlib.Path', 'Path', (['cell_path'], {}), '(cell_path)\n', (2056, 2067), False, 'from pathlib import Path\n')] |
# ==============================================================================
# Copyright (c) 2018, Yamagishi Laboratory, National Institute of Informatics
# Author: <NAME> (<EMAIL>)
# All rights reserved.
# ==============================================================================
""" """
import os, sys
from collections import namedtuple
import tensorflow as tf
import numpy as np
np.set_printoptions(threshold=sys.maxsize)
from pyspark import RDD, StorageLevel
from utils.tfrecord import write_tfrecord, write_phones, int64_feature, bytes_feature
from preprocess.cleaners import basic_cleaners
from preprocess.text import text_to_sequence
from extensions.flite import Flite
def write_preprocessed_target_data(_id: int, key: str, codes: np.ndarray, codes_length: int, lang, filename: str):
raw_codes = codes.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'id': int64_feature([_id]),
'key': bytes_feature([key.encode('utf-8')]),
'lang': bytes_feature([lang.encode('utf-8')]),
'codes': bytes_feature([raw_codes]),
'codes_length': int64_feature([codes_length]),
'codes_width': int64_feature([codes.shape[1]]),
}))
write_tfrecord(example, filename)
def write_preprocessed_source_data(_id: int, key: str, source: np.ndarray, text, phones: np.ndarray, phone_txt, speaker_id, age, gender, lang, filename: str):
raw_source = source.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'id': int64_feature([_id]),
'key': bytes_feature([key.encode('utf-8')]),
'source': bytes_feature([raw_source]),
'source_length': int64_feature([len(source)]),
'text': bytes_feature([text.encode('utf-8')]),
'phone': bytes_feature([phones.tostring()]),
'phone_length': int64_feature([len(phones)]),
'phone_txt': bytes_feature([phone_txt.encode('utf-8')]),
'speaker_id': int64_feature([speaker_id]),
'age': int64_feature([age]),
'gender': int64_feature([gender]),
'lang': bytes_feature([lang.encode('utf-8')]),
}))
write_tfrecord(example, filename)
# write_phones(phone_txt, filename.split(".")[0]+".txt")
class SpeakerInfo(namedtuple("SpeakerInfo", ["id", "age", "gender"])):
pass
class TxtCodeRecord(namedtuple("TxtCodeRecord", ["id", "key", "txt_path", "code_path", "speaker_info"])):
pass
#class MelStatistics(namedtuple("MelStatistics", ["id", "key", "max", "min", "sum", "length", "moment2"])):
# pass
class TargetRDD:
def __init__(self, rdd: RDD):
self.rdd = rdd
def keys(self):
return self.rdd.map(lambda s: s.key).collect()
def max(self):
return self.rdd.map(lambda s: s.max).reduce(lambda a, b: np.maximum(a, b))
def min(self):
return self.rdd.map(lambda s: s.min).reduce(lambda a, b: np.minimum(a, b))
def average(self):
total_value = self.rdd.map(lambda s: s.sum).reduce(lambda a, b: a + b)
total_length = self.rdd.map(lambda s: s.length).reduce(lambda a, b: a + b)
return total_value / total_length
def moment2(self):
total_value = self.rdd.map(lambda s: s.moment2).reduce(lambda a, b: a + b)
total_length = self.rdd.map(lambda s: s.length).reduce(lambda a, b: a + b)
return total_value / total_length
class CODES:
def __init__(self, in_dir, out_dir, version, num_codes, hparams, speaker_info_filename='speaker-info.txt'):
self.in_dir = in_dir
self.out_dir = out_dir
self.speaker_info_filename = speaker_info_filename
self.g2p = Flite(hparams.flite_binary_path, hparams.phoneset_path) if hparams.phoneme == 'flite' else None
self.version = int(version)
self.num_codes = int(num_codes)
def list_files(self):
def code_files(speaker_info: SpeakerInfo):
code_dir = self.in_dir
spk = "p"+str(speaker_info.id)
# print(spk)
return [os.path.join(code_dir, code_file) for code_file in sorted(os.listdir(code_dir)) if (code_file.endswith('.txt') and code_file.startswith(spk))]
def text_files(speaker_info: SpeakerInfo):
txt_dir = self.in_dir
spk = "p"+str(speaker_info.id)
# print(spk)
return [os.path.join(txt_dir, txt_file) for txt_file in sorted(os.listdir(txt_dir)) if (txt_file.endswith('.txt') and txt_file.startswith(spk))]
def text_and_code_records(file_pairs, speaker_info):
def create_record(txt_f, code_f, speaker_info):
key1 = os.path.basename(code_f).strip('.txt')
key2 = os.path.basename(txt_f).strip('.txt')
assert key1 == key2
return TxtCodeRecord(0, key1, txt_f, code_f, speaker_info)
return [create_record(txt_f, code_f, speaker_info) for txt_f, code_f in file_pairs]
records = sum(
[text_and_code_records(zip(text_files(si), code_files(si)), si) for si in self._load_speaker_info()], [])
return [TxtCodeRecord(i, r.key, r.txt_path, r.code_path, r.speaker_info) for i, r in enumerate(records)]
def process_sources(self, rdd: RDD):
return map(self._process_txt, rdd)
def process_targets(self, rdd: RDD):
return map(self._process_code, rdd)
def _load_speaker_info(self):
with open(self.speaker_info_filename, mode='r', encoding='utf8') as f:
for l in f.readlines()[1:]:
si = l.split()
gender = 0 if si[2] == 'F' else 1
if str(si[0]) != "315": # FixMe: Why 315 is missing?
yield SpeakerInfo(int(si[0]), int(si[1]), gender)
def _process_code(self, record: TxtCodeRecord):
with open(os.path.join(self.in_dir, record.code_path), mode='r', encoding='utf8') as f:
txt = f.readline().rstrip("\n")
if len(txt.split("\t")) == 2:
txt = txt.split("\t")[1]
codelist = txt.split(" ")
codeints = [int(c) for c in codelist if c != ""]
# print(len(codeints))
start = self.version-1
if start >= 0:
# print("splitting")
codeints = codeints[start::2]
# print("v", self.version, "s", start, "len", len(codeints))
a = np.array(codeints)
codes = np.zeros((a.size, self.num_codes))
codes[np.arange(a.size),a] = 1
codes = np.array(codes, np.float32)
codes_length = a.size
# print(codes.shape)
# sys.exit()
file_path = os.path.join(self.out_dir, f"{record.key}.target.tfrecord")
write_preprocessed_target_data(record.id, record.key, codes, codes_length, "EN", file_path)
return record.key
def _process_txt(self, record: TxtCodeRecord):
with open(os.path.join(self.in_dir, record.txt_path), mode='r', encoding='utf8') as f:
txt = f.readline().rstrip("\n").split("\t")[0]
sequence, clean_text = text_to_sequence(txt, basic_cleaners)
phone_ids, phone_txt = self.g2p.convert_to_phoneme(clean_text) if self.g2p is not None else (None, None)
source = np.array(sequence, dtype=np.int64)
phone_ids = np.array(phone_ids, dtype=np.int64) if phone_ids is not None else None
file_path = os.path.join(self.out_dir, f"{record.key}.source.tfrecord")
# print("seq", sequence)
# print("phn", phone_txt)
write_preprocessed_source_data(record.id, record.key, source, clean_text, phone_ids, phone_txt, record.speaker_info.id, record.speaker_info.age, record.speaker_info.gender, "EN", file_path)
return record.key
| [
"collections.namedtuple",
"utils.tfrecord.write_tfrecord",
"numpy.minimum",
"os.listdir",
"os.path.join",
"utils.tfrecord.bytes_feature",
"numpy.array",
"extensions.flite.Flite",
"numpy.zeros",
"os.path.basename",
"preprocess.text.text_to_sequence",
"utils.tfrecord.int64_feature",
"numpy.max... | [((393, 435), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (412, 435), True, 'import numpy as np\n'), ((2250, 2300), 'collections.namedtuple', 'namedtuple', (['"""SpeakerInfo"""', "['id', 'age', 'gender']"], {}), "('SpeakerInfo', ['id', 'age', 'gender'])\n", (2260, 2300), False, 'from collections import namedtuple\n'), ((2334, 2421), 'collections.namedtuple', 'namedtuple', (['"""TxtCodeRecord"""', "['id', 'key', 'txt_path', 'code_path', 'speaker_info']"], {}), "('TxtCodeRecord', ['id', 'key', 'txt_path', 'code_path',\n 'speaker_info'])\n", (2344, 2421), False, 'from collections import namedtuple\n'), ((1217, 1250), 'utils.tfrecord.write_tfrecord', 'write_tfrecord', (['example', 'filename'], {}), '(example, filename)\n', (1231, 1250), False, 'from utils.tfrecord import write_tfrecord, write_phones, int64_feature, bytes_feature\n'), ((2131, 2164), 'utils.tfrecord.write_tfrecord', 'write_tfrecord', (['example', 'filename'], {}), '(example, filename)\n', (2145, 2164), False, 'from utils.tfrecord import write_tfrecord, write_phones, int64_feature, bytes_feature\n'), ((3633, 3688), 'extensions.flite.Flite', 'Flite', (['hparams.flite_binary_path', 'hparams.phoneset_path'], {}), '(hparams.flite_binary_path, hparams.phoneset_path)\n', (3638, 3688), False, 'from extensions.flite import Flite\n'), ((7152, 7189), 'preprocess.text.text_to_sequence', 'text_to_sequence', (['txt', 'basic_cleaners'], {}), '(txt, basic_cleaners)\n', (7168, 7189), False, 'from preprocess.text import text_to_sequence\n'), ((7328, 7362), 'numpy.array', 'np.array', (['sequence'], {'dtype': 'np.int64'}), '(sequence, dtype=np.int64)\n', (7336, 7362), True, 'import numpy as np\n'), ((7482, 7541), 'os.path.join', 'os.path.join', (['self.out_dir', 'f"""{record.key}.source.tfrecord"""'], {}), "(self.out_dir, f'{record.key}.source.tfrecord')\n", (7494, 7541), False, 'import os, sys\n'), ((2786, 2802), 'numpy.maximum', 'np.maximum', (['a', 'b'], {}), '(a, b)\n', (2796, 2802), True, 'import numpy as np\n'), ((2889, 2905), 'numpy.minimum', 'np.minimum', (['a', 'b'], {}), '(a, b)\n', (2899, 2905), True, 'import numpy as np\n'), ((4013, 4046), 'os.path.join', 'os.path.join', (['code_dir', 'code_file'], {}), '(code_dir, code_file)\n', (4025, 4046), False, 'import os, sys\n'), ((4329, 4360), 'os.path.join', 'os.path.join', (['txt_dir', 'txt_file'], {}), '(txt_dir, txt_file)\n', (4341, 4360), False, 'import os, sys\n'), ((5795, 5838), 'os.path.join', 'os.path.join', (['self.in_dir', 'record.code_path'], {}), '(self.in_dir, record.code_path)\n', (5807, 5838), False, 'import os, sys\n'), ((6402, 6420), 'numpy.array', 'np.array', (['codeints'], {}), '(codeints)\n', (6410, 6420), True, 'import numpy as np\n'), ((6445, 6479), 'numpy.zeros', 'np.zeros', (['(a.size, self.num_codes)'], {}), '((a.size, self.num_codes))\n', (6453, 6479), True, 'import numpy as np\n'), ((6551, 6578), 'numpy.array', 'np.array', (['codes', 'np.float32'], {}), '(codes, np.float32)\n', (6559, 6578), True, 'import numpy as np\n'), ((6709, 6768), 'os.path.join', 'os.path.join', (['self.out_dir', 'f"""{record.key}.target.tfrecord"""'], {}), "(self.out_dir, f'{record.key}.target.tfrecord')\n", (6721, 6768), False, 'import os, sys\n'), ((6981, 7023), 'os.path.join', 'os.path.join', (['self.in_dir', 'record.txt_path'], {}), '(self.in_dir, record.txt_path)\n', (6993, 7023), False, 'import os, sys\n'), ((7387, 7422), 'numpy.array', 'np.array', (['phone_ids'], {'dtype': 'np.int64'}), '(phone_ids, dtype=np.int64)\n', (7395, 7422), True, 'import numpy as np\n'), ((919, 939), 'utils.tfrecord.int64_feature', 'int64_feature', (['[_id]'], {}), '([_id])\n', (932, 939), False, 'from utils.tfrecord import write_tfrecord, write_phones, int64_feature, bytes_feature\n'), ((1066, 1092), 'utils.tfrecord.bytes_feature', 'bytes_feature', (['[raw_codes]'], {}), '([raw_codes])\n', (1079, 1092), False, 'from utils.tfrecord import write_tfrecord, write_phones, int64_feature, bytes_feature\n'), ((1118, 1147), 'utils.tfrecord.int64_feature', 'int64_feature', (['[codes_length]'], {}), '([codes_length])\n', (1131, 1147), False, 'from utils.tfrecord import write_tfrecord, write_phones, int64_feature, bytes_feature\n'), ((1172, 1203), 'utils.tfrecord.int64_feature', 'int64_feature', (['[codes.shape[1]]'], {}), '([codes.shape[1]])\n', (1185, 1203), False, 'from utils.tfrecord import write_tfrecord, write_phones, int64_feature, bytes_feature\n'), ((1529, 1549), 'utils.tfrecord.int64_feature', 'int64_feature', (['[_id]'], {}), '([_id])\n', (1542, 1549), False, 'from utils.tfrecord import write_tfrecord, write_phones, int64_feature, bytes_feature\n'), ((1622, 1649), 'utils.tfrecord.bytes_feature', 'bytes_feature', (['[raw_source]'], {}), '([raw_source])\n', (1635, 1649), False, 'from utils.tfrecord import write_tfrecord, write_phones, int64_feature, bytes_feature\n'), ((1955, 1982), 'utils.tfrecord.int64_feature', 'int64_feature', (['[speaker_id]'], {}), '([speaker_id])\n', (1968, 1982), False, 'from utils.tfrecord import write_tfrecord, write_phones, int64_feature, bytes_feature\n'), ((1999, 2019), 'utils.tfrecord.int64_feature', 'int64_feature', (['[age]'], {}), '([age])\n', (2012, 2019), False, 'from utils.tfrecord import write_tfrecord, write_phones, int64_feature, bytes_feature\n'), ((2039, 2062), 'utils.tfrecord.int64_feature', 'int64_feature', (['[gender]'], {}), '([gender])\n', (2052, 2062), False, 'from utils.tfrecord import write_tfrecord, write_phones, int64_feature, bytes_feature\n'), ((4071, 4091), 'os.listdir', 'os.listdir', (['code_dir'], {}), '(code_dir)\n', (4081, 4091), False, 'import os, sys\n'), ((4384, 4403), 'os.listdir', 'os.listdir', (['txt_dir'], {}), '(txt_dir)\n', (4394, 4403), False, 'import os, sys\n'), ((4611, 4635), 'os.path.basename', 'os.path.basename', (['code_f'], {}), '(code_f)\n', (4627, 4635), False, 'import os, sys\n'), ((4673, 4696), 'os.path.basename', 'os.path.basename', (['txt_f'], {}), '(txt_f)\n', (4689, 4696), False, 'import os, sys\n'), ((6502, 6519), 'numpy.arange', 'np.arange', (['a.size'], {}), '(a.size)\n', (6511, 6519), True, 'import numpy as np\n')] |
#!/bin/python
import math
import random
import numpy
'''
A simple random vector generator. It is initialized with n, q, and seed. To generate vectors call generate with count being the number of vectors that are wanted. If generate is not specified a count it assumes 1 and gives 1 vector as output.
'''
class RVG(object):
def __init__(self, n, q, seed):
self.__seed__ = seed
self.__n__ = n #number of dimensions
self.__q__ = q #some large number?
self.__g__ = random.SystemRandom(self.__seed__)
def generate(self, count=1):
for i in range(0, count):
v = numpy.ndarray(self.__n__, dtype=numpy.integer)
for j in range(0, self.__n__):
v[j] = self.__g__.randint(0, self.__q__-1) #% self.q #.randint(0, self.q)
yield v
| [
"numpy.ndarray",
"random.SystemRandom"
] | [((499, 533), 'random.SystemRandom', 'random.SystemRandom', (['self.__seed__'], {}), '(self.__seed__)\n', (518, 533), False, 'import random\n'), ((618, 664), 'numpy.ndarray', 'numpy.ndarray', (['self.__n__'], {'dtype': 'numpy.integer'}), '(self.__n__, dtype=numpy.integer)\n', (631, 664), False, 'import numpy\n')] |
import numpy as np
import logging
class ReactionNode:
def __init__(self, parent, pro, template, init_value):
self.parent = parent
self.depth = self.parent.depth + 1
self.id = -1
# update count
self.count = 1
# one_step_model pro
self.pro = pro
# one_step_model template
self.template = template
self.valid = True
# child nodes: mol node
self.children = []
# given by experience guidance network
self.value = init_value
# Q_value :first time update = init_value
self.Q_value = init_value
# successfully found a valid synthesis route
self.succ = False
parent.children.append(self)
def v_self(self):
return self.Q_value
def v_pro(self):
return self.pro
def v_visit_time(self):
return self.count
def set_invaild(self):
self.valid = False
newQ = -10.0
self.Q_value = (self.Q_value * self.count + newQ) / (self.count + 1)
self.count += 1
# select a child mol node during selection phase
def select_child(self):
# check if all child nodes have been expanded
# mol_node.open = true ( not expanded ) marked 1; expanded marked 0
check_expansion = [1 if child.open else 0 for child in self.children]
# not succeed marked 1
check_success = [0 if child.succ else 1 for child in self.children]
# not succeed and not expanded
check_array = [check_expansion[i] & check_success[i] for i in range(len(check_expansion))]
check = np.max(check_array)
if check == 0:
# no node which is not successful and not expanded
# randomly select one not successful
select_indexs = np.where(np.array(check_success) == 1)
select_index = np.random.randint(0, len(select_indexs))
return self.children[select_indexs[0][select_index]]
else:
# return the first one
return self.children[np.argmax(check_array)]
def set_success(self):
self.succ = True
self.Q_value = 10.0
# update phase
def update_Q(self):
succe = True
totoal_Vm = 0
# record the child nodes information
not_expand = 0
for i in range(len(self.children)):
node = self.children[i]
succe = succe & node.succ
if node.open:
not_expand += 1
else:
totoal_Vm = totoal_Vm + node.value
if succe:
self.succ = True
newQ = 10.0
self.Q_value = (self.Q_value * self.count + newQ) / (self.count + 1)
self.count += 1
return
else:
# only use expanded child nodes
newQ = totoal_Vm / (len(self.children) - not_expand)
self.Q_value = (self.Q_value * self.count + newQ) / (self.count + 1)
# self.Q_value = (self.Q_value * 0.5) + newQ * 0.5
self.count += 1
return
def serialize(self):
return '%d' % (self.id) | [
"numpy.array",
"numpy.argmax",
"numpy.max"
] | [((1673, 1692), 'numpy.max', 'np.max', (['check_array'], {}), '(check_array)\n', (1679, 1692), True, 'import numpy as np\n'), ((2123, 2145), 'numpy.argmax', 'np.argmax', (['check_array'], {}), '(check_array)\n', (2132, 2145), True, 'import numpy as np\n'), ((1869, 1892), 'numpy.array', 'np.array', (['check_success'], {}), '(check_success)\n', (1877, 1892), True, 'import numpy as np\n')] |
#!/usr/bin/env python
'''
Scan HF/DFT PES.
'''
import numpy
from pyscf import gto
from pyscf import scf, dft
#
# A scanner can take the initial guess from previous calculation
# automatically.
#
mol = gto.Mole()
mf_scanner = scf.RHF(mol).as_scanner()
ehf1 = []
for b in numpy.arange(0.7, 4.01, 0.1):
mol = gto.M(verbose = 5,
output = 'out_hf-%2.1f' % b,
atom = [["F", (0., 0., 0.)],
["H", (0., 0., b)],],
basis = 'cc-pvdz')
ehf1.append(mf_scanner(mol))
#
# Create a new scanner, the results of last calculation will not be used as
# initial guess.
#
mf_scanner = dft.RKS(mol).set(xc='b3lyp').as_scanner()
ehf2 = []
for b in reversed(numpy.arange(0.7, 4.01, 0.1)):
mol = gto.M(verbose = 5,
output = 'out_b3lyp-%2.1f' % b,
atom = [["F", (0., 0., 0.)],
["H", (0., 0., b)],],
basis = 'cc-pvdz')
ehf2.append(mf_scanner(mol))
x = numpy.arange(0.7, 4.01, .1)
ehf2.reverse()
with open('hf-scan.txt', 'w') as fout:
fout.write(' HF 0.7->4.0 B3LYP 4.0->0.7\n')
for i, xi in enumerate(x):
fout.write('%2.1f %14.8f %14.8f\n'
% (xi, ehf1[i], ehf2[i]))
import matplotlib.pyplot as plt
plt.plot(x, ehf1, label='HF,0.7->4.0')
plt.plot(x, ehf2, label='HF,4.0->0.7')
plt.legend()
plt.show()
| [
"pyscf.gto.Mole",
"pyscf.gto.M",
"numpy.arange",
"matplotlib.pyplot.plot",
"pyscf.scf.RHF",
"pyscf.dft.RKS",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((204, 214), 'pyscf.gto.Mole', 'gto.Mole', ([], {}), '()\n', (212, 214), False, 'from pyscf import gto\n'), ((273, 301), 'numpy.arange', 'numpy.arange', (['(0.7)', '(4.01)', '(0.1)'], {}), '(0.7, 4.01, 0.1)\n', (285, 301), False, 'import numpy\n'), ((989, 1017), 'numpy.arange', 'numpy.arange', (['(0.7)', '(4.01)', '(0.1)'], {}), '(0.7, 4.01, 0.1)\n', (1001, 1017), False, 'import numpy\n'), ((1282, 1320), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'ehf1'], {'label': '"""HF,0.7->4.0"""'}), "(x, ehf1, label='HF,0.7->4.0')\n", (1290, 1320), True, 'import matplotlib.pyplot as plt\n'), ((1321, 1359), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'ehf2'], {'label': '"""HF,4.0->0.7"""'}), "(x, ehf2, label='HF,4.0->0.7')\n", (1329, 1359), True, 'import matplotlib.pyplot as plt\n'), ((1360, 1372), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1370, 1372), True, 'import matplotlib.pyplot as plt\n'), ((1373, 1383), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1381, 1383), True, 'import matplotlib.pyplot as plt\n'), ((313, 431), 'pyscf.gto.M', 'gto.M', ([], {'verbose': '(5)', 'output': "('out_hf-%2.1f' % b)", 'atom': "[['F', (0.0, 0.0, 0.0)], ['H', (0.0, 0.0, b)]]", 'basis': '"""cc-pvdz"""'}), "(verbose=5, output='out_hf-%2.1f' % b, atom=[['F', (0.0, 0.0, 0.0)], [\n 'H', (0.0, 0.0, b)]], basis='cc-pvdz')\n", (318, 431), False, 'from pyscf import gto\n'), ((717, 745), 'numpy.arange', 'numpy.arange', (['(0.7)', '(4.01)', '(0.1)'], {}), '(0.7, 4.01, 0.1)\n', (729, 745), False, 'import numpy\n'), ((758, 878), 'pyscf.gto.M', 'gto.M', ([], {'verbose': '(5)', 'output': "('out_b3lyp-%2.1f' % b)", 'atom': "[['F', (0.0, 0.0, 0.0)], ['H', (0.0, 0.0, b)]]", 'basis': '"""cc-pvdz"""'}), "(verbose=5, output='out_b3lyp-%2.1f' % b, atom=[['F', (0.0, 0.0, 0.0)],\n ['H', (0.0, 0.0, b)]], basis='cc-pvdz')\n", (763, 878), False, 'from pyscf import gto\n'), ((228, 240), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (235, 240), False, 'from pyscf import scf, dft\n'), ((647, 659), 'pyscf.dft.RKS', 'dft.RKS', (['mol'], {}), '(mol)\n', (654, 659), False, 'from pyscf import scf, dft\n')] |
import numpy as np
import scipy.stats as stats
import networkx as nx
import distance_inner as dm
def checkdist(dist):
N = dist.shape[0]
for i in range(N):
for j in range(i):
for k in range(j):
a, b, c = dist[i, j], dist[j, k], dist[k, i]
if (a + b < c or a + c < b or b + c < a):
return False
return True
def compute_distance(rholist, tlabel):
"""
Create distrance matrix from trace distance
"""
N = len(rholist)
dist = np.zeros((N, N))
for i in range(N):
for j in range(i+1, N):
ri, rj = rholist[i], rholist[j]
tmp = 0
if (tlabel == 'trace'):
tmp = dm.trace_distance(ri, rj)
elif tlabel == 'bures':
tmp = dm.bures_distance(ri, rj)
elif tlabel == 'angle':
tmp = dm.bures_angle(ri, rj)
else:
print('Not found type of ditance {}'.format(tlabel))
return dist
dist[i, j] = tmp
for i in range(N):
for j in range(i):
dist[i, j] = dist[j, i]
# Check distance condition
print('Check dist', checkdist(dist))
return dist
def shortest(matrix, inv=True):
"""
Create distance matrix fro weighted matrix
"""
sz = matrix.shape[0]
matrix = np.abs(matrix)
a = np.max(matrix)
if a > 0:
matrix = 1.0 - matrix / a
G = nx.from_numpy_matrix(matrix)
dist = nx.algorithms.shortest_paths.dense.floyd_warshall_numpy(G)
print(dist.shape)
return np.array(dist)
#return matrix
def pearson(matrix):
"""
Calculate the Pearsons correlation coefficient
and the 2-tailed p-value (see scipy.stats.pearsonr)
Argument: 2d numpy array (e.g. mutual information matrix)
Return: a tuple with matrix containing Pearson correlation
coefficients and a second matrix with the 2-tailed p-values
"""
ll = matrix.shape[0]
pearsonR = np.zeros((ll, ll))
pvalues = np.zeros((ll, ll))
# Pearson coefficients should be symmetric
for ii in range(ll):
for jj in range(ii+1, ll):
r, p = stats.pearsonr(matrix[ii, :], matrix[jj, :])
pearsonR[ii][jj] = r
pearsonR[jj][ii] = r
pvalues[ii][jj] = p
pvalues[jj][ii] = p
return (pearsonR, pvalues)
| [
"numpy.abs",
"distance_inner.trace_distance",
"distance_inner.bures_distance",
"networkx.algorithms.shortest_paths.dense.floyd_warshall_numpy",
"numpy.max",
"numpy.array",
"numpy.zeros",
"distance_inner.bures_angle",
"scipy.stats.pearsonr",
"networkx.from_numpy_matrix"
] | [((526, 542), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (534, 542), True, 'import numpy as np\n'), ((1369, 1383), 'numpy.abs', 'np.abs', (['matrix'], {}), '(matrix)\n', (1375, 1383), True, 'import numpy as np\n'), ((1392, 1406), 'numpy.max', 'np.max', (['matrix'], {}), '(matrix)\n', (1398, 1406), True, 'import numpy as np\n'), ((1462, 1490), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['matrix'], {}), '(matrix)\n', (1482, 1490), True, 'import networkx as nx\n'), ((1502, 1560), 'networkx.algorithms.shortest_paths.dense.floyd_warshall_numpy', 'nx.algorithms.shortest_paths.dense.floyd_warshall_numpy', (['G'], {}), '(G)\n', (1557, 1560), True, 'import networkx as nx\n'), ((1594, 1608), 'numpy.array', 'np.array', (['dist'], {}), '(dist)\n', (1602, 1608), True, 'import numpy as np\n'), ((2002, 2020), 'numpy.zeros', 'np.zeros', (['(ll, ll)'], {}), '((ll, ll))\n', (2010, 2020), True, 'import numpy as np\n'), ((2035, 2053), 'numpy.zeros', 'np.zeros', (['(ll, ll)'], {}), '((ll, ll))\n', (2043, 2053), True, 'import numpy as np\n'), ((2181, 2225), 'scipy.stats.pearsonr', 'stats.pearsonr', (['matrix[ii, :]', 'matrix[jj, :]'], {}), '(matrix[ii, :], matrix[jj, :])\n', (2195, 2225), True, 'import scipy.stats as stats\n'), ((720, 745), 'distance_inner.trace_distance', 'dm.trace_distance', (['ri', 'rj'], {}), '(ri, rj)\n', (737, 745), True, 'import distance_inner as dm\n'), ((804, 829), 'distance_inner.bures_distance', 'dm.bures_distance', (['ri', 'rj'], {}), '(ri, rj)\n', (821, 829), True, 'import distance_inner as dm\n'), ((888, 910), 'distance_inner.bures_angle', 'dm.bures_angle', (['ri', 'rj'], {}), '(ri, rj)\n', (902, 910), True, 'import distance_inner as dm\n')] |
'''
<NAME>
Author: <NAME>
This contains code to read data from the SQL database, and put it in figures
'''
#import pandas as pd
import numpy as np
from numpy import random
import matplotlib.pyplot as plt
#import scipy
#import sklearn
import time
#import sys
#import re
import sqlite3 as lite
#Note: SQL has the following tables
'''sim_types(id integer primary key,
name text,
p0_name text,
p1_name text,
p2_name text,
p3_name text,
p4_name text,
card_types integer,
unique(name))
sim_results(id integer primary key,
sim_type_id integer,
sim_version integer,
p0 real,
p1 real,
p2 real,
p3 real,
p4 real,
stdev real,
mean real,
reliability real,
deleted bit)'''
#I'm only making a few figures, so these are pretty much ad hoc functions
def lab_sim_fig():
#get data from database
con = lite.connect('sim.db')
with con:
cur = con.cursor()
cmd = '''select mean,stdev,reliability,p1
from sim_results
where sim_type_id = 5
and deleted = 0
and p0 = 2000'''
cur.execute(cmd)
results = cur.fetchall()
con.close()
#Process the results
num_results = len(results)
L = np.zeros(num_results)
mean = np.zeros(num_results)
mean_dud = np.zeros(num_results)
stdev = np.zeros(num_results)
reliability = np.zeros(num_results)
for i in range(num_results):
L[i] = results[i][3]
reliability[i] = results[i][2]
mean[i] = results[i][0]*(1-reliability[i]) + 2000*reliability[i]
mean_dud[i] = results[i][0]
stdev[i] = ((results[i][1]**2 + results[i][0]**2)*(1-reliability[i]) + 2000**2*reliability[i] - mean[i]**2 )**0.5
#stdev[i] = results[i][1]
sorter = np.argsort(L)
L = L[sorter]
mean = mean[sorter]
mean_dud = mean_dud[sorter]
stdev = stdev[sorter]
reliability = reliability[sorter]
sub_i = L < 0.5
super_i = L >= 0.5
x_sub = L[sub_i]
y_sub = mean[sub_i]
x_super = L[super_i]
reliability = reliability[super_i]
#Not plotting stdev or mean_dud, although I could
#stdev_sub = stdev[sub_i]
#plot mean and standard deviation in subcritical phase
plt.plot(x_sub,y_sub,color='#bb8844',lw=2)
#plt.fill_between(x_sub,y_sub+stdev_sub,y_sub-stdev_sub,color='#ddaa44',alpha=0.25)
plt.axis([0, 1, 0, 40])
plt.xlabel('Fraction Labs',fontsize=16)
plt.tick_params(labelsize=16)
plt.ylabel('Expected Payoff',color='#bb8844',fontsize=16)
plt.tick_params(labelsize=16)
#draw line at phase transition
l = plt.axvline(x=0.5, color='#000000',lw=2,linestyle='dashed')
#plot reliability in supercritical phase
ax2 = plt.gca().twinx()
ax2.plot(x_super, reliability, 'b',lw=2)
plt.axis([0, 1, 0, 1])
plt.ylabel('Reliability', color='b',fontsize=16)
plt.tick_params(labelsize=16)
plt.title('Laboratory Engine',fontsize=24)
def lab_fin_fig():
#get data from database
con = lite.connect('sim.db')
with con:
cur = con.cursor()
cmd = '''select mean,stdev,p0,p1
from sim_results
where sim_type_id = 6
and deleted = 0
and p0 = 15 + p1'''
cur.execute(cmd)
results = cur.fetchall()
con.close()
#Process the results
num_results = len(results)
L = np.zeros(num_results)
mean = np.zeros(num_results)
stdev = np.zeros(num_results)
for i in range(num_results):
L[i] = results[i][3]/(results[i][2])
mean[i] = results[i][0]
stdev[i] = results[i][1]
sorter = np.argsort(L)
L = L[sorter]
mean = mean[sorter]
stdev = stdev[sorter]
#plot mean and standard deviation in subcritical phase
plt.plot(L,mean,color='#bb8844',lw=2)
plt.fill_between(L,mean+stdev,mean-stdev,color='#ddaa44',alpha=0.25)
plt.axis([0, .8, 0, 20])
plt.xlabel('Fraction Labs',fontsize=16)
plt.tick_params(labelsize=16)
plt.ylabel('Expected Payoff',color='#bb8844',fontsize=16)
plt.tick_params(labelsize=16)
#draw line at phase transition
l = plt.axvline(x=0.4, color='#000000',lw=2,linestyle='dashed')
plt.title('Lab deck with 15 Copper',fontsize=24)
def vsm_sim_fig():
#Initialize arrays
density = 100
mean = np.zeros((density,density))
mean_dud = np.zeros((density,density))
stdev = np.zeros((density,density))
reliability = np.zeros((density,density))
mean[:] = np.nan
mean_dud[:] = np.nan
stdev[:] = np.nan
reliability[:] = np.nan
i,j = np.indices((density,density))
V_coord = i/density
S_coord = j/density
#get data from database
con = lite.connect('sim.db')
with con:
cur = con.cursor()
for i,j in np.ndindex((density,density)):
cmd = '''select avg(mean),avg(reliability),avg(stdev),avg(p0)
from sim_results
where sim_type_id = 7
and deleted = 0
and p0 >= 1000
and p2 >= %.3f and p2 < %.3f
and p3 >= %.3f and p3 < %.3f''' % (i/density,(i+1)/density,j/density,(j+1)/density)
cur.execute(cmd)
results = cur.fetchone()
if results[0] is not None:
reliability[i,j] = results[1]
mean[i,j] = results[0]*(1-results[1]) + results[3]*results[1]
mean_dud[i,j] = results[0]
stdev[i,j] = results[2]
con.close()
#Separate out the subcritical and supercritical parts
sub_mean = mean
sub_mean[np.logical_and(V_coord > .25,V_coord > 1-S_coord*3)] = np.nan
reliability[np.logical_or(V_coord < .25, V_coord < 1-S_coord*3)] = np.nan
critical_S = np.array([0,.25,.75])
critical_V = np.array([1,.25,.25])
fig, ax = plt.subplots(figsize=(20,10))
ax.plot(critical_S,critical_V,lw=3,linestyle="dashed",color='w')
c1 = ax.imshow(sub_mean,extent=(0,1,0,1),origin='lower',interpolation='none',cmap=plt.cm.plasma_r)
c2 = ax.imshow(reliability,extent=(0,1,0,1),origin='lower',interpolation='none',cmap=plt.cm.winter_r)
#c1 = plt.contourf(S_coord,V_coord,sub_mean,np.arange(0,10,.2),cmap=plt.cm.plasma_r,extend="both")
#c2 = plt.contourf(S_coord,V_coord,reliability,10,cmap=plt.cm.GnBu_r)
ax.set_xlabel('Fraction Smithies',fontsize=24)
ax.set_ylabel('Fraction Villages',fontsize=24)
ax.tick_params(labelsize=16)
#c1.cmap.set_under('#EFF821')
#c1.cmap.set_over('#0C0786')
c1.set_clim(4, 10)
cax1 = fig.add_axes([0.75, 0.13, 0.03, 0.35])
cbar1 = fig.colorbar(c1,cax = cax1)
cbar1.ax.set_ylabel('Expected Payoff',fontsize=24)
cbar1.ax.tick_params(labelsize=16)
cax2 = fig.add_axes([0.75, 0.53, 0.03, 0.35])
cbar2 = fig.colorbar(c2,cax = cax2)
cbar2.ax.set_ylabel('Reliability',fontsize=24)
cbar2.ax.tick_params(labelsize=16)
ax.set_title('Village/Smithy Engine',fontsize=36)
| [
"matplotlib.pyplot.title",
"sqlite3.connect",
"numpy.logical_and",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.tick_params",
"numpy.ndindex",
"numpy.indices",
"matplotlib.pyplot.fill_between",
"numpy.argsort",
... | [((1024, 1046), 'sqlite3.connect', 'lite.connect', (['"""sim.db"""'], {}), "('sim.db')\n", (1036, 1046), True, 'import sqlite3 as lite\n'), ((1418, 1439), 'numpy.zeros', 'np.zeros', (['num_results'], {}), '(num_results)\n', (1426, 1439), True, 'import numpy as np\n'), ((1451, 1472), 'numpy.zeros', 'np.zeros', (['num_results'], {}), '(num_results)\n', (1459, 1472), True, 'import numpy as np\n'), ((1488, 1509), 'numpy.zeros', 'np.zeros', (['num_results'], {}), '(num_results)\n', (1496, 1509), True, 'import numpy as np\n'), ((1522, 1543), 'numpy.zeros', 'np.zeros', (['num_results'], {}), '(num_results)\n', (1530, 1543), True, 'import numpy as np\n'), ((1562, 1583), 'numpy.zeros', 'np.zeros', (['num_results'], {}), '(num_results)\n', (1570, 1583), True, 'import numpy as np\n'), ((1973, 1986), 'numpy.argsort', 'np.argsort', (['L'], {}), '(L)\n', (1983, 1986), True, 'import numpy as np\n'), ((2435, 2480), 'matplotlib.pyplot.plot', 'plt.plot', (['x_sub', 'y_sub'], {'color': '"""#bb8844"""', 'lw': '(2)'}), "(x_sub, y_sub, color='#bb8844', lw=2)\n", (2443, 2480), True, 'import matplotlib.pyplot as plt\n'), ((2570, 2593), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 1, 0, 40]'], {}), '([0, 1, 0, 40])\n', (2578, 2593), True, 'import matplotlib.pyplot as plt\n'), ((2598, 2638), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fraction Labs"""'], {'fontsize': '(16)'}), "('Fraction Labs', fontsize=16)\n", (2608, 2638), True, 'import matplotlib.pyplot as plt\n'), ((2642, 2671), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(16)'}), '(labelsize=16)\n', (2657, 2671), True, 'import matplotlib.pyplot as plt\n'), ((2676, 2735), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expected Payoff"""'], {'color': '"""#bb8844"""', 'fontsize': '(16)'}), "('Expected Payoff', color='#bb8844', fontsize=16)\n", (2686, 2735), True, 'import matplotlib.pyplot as plt\n'), ((2738, 2767), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(16)'}), '(labelsize=16)\n', (2753, 2767), True, 'import matplotlib.pyplot as plt\n'), ((2812, 2873), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(0.5)', 'color': '"""#000000"""', 'lw': '(2)', 'linestyle': '"""dashed"""'}), "(x=0.5, color='#000000', lw=2, linestyle='dashed')\n", (2823, 2873), True, 'import matplotlib.pyplot as plt\n'), ((2995, 3017), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (3003, 3017), True, 'import matplotlib.pyplot as plt\n'), ((3022, 3071), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reliability"""'], {'color': '"""b"""', 'fontsize': '(16)'}), "('Reliability', color='b', fontsize=16)\n", (3032, 3071), True, 'import matplotlib.pyplot as plt\n'), ((3075, 3104), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(16)'}), '(labelsize=16)\n', (3090, 3104), True, 'import matplotlib.pyplot as plt\n'), ((3109, 3152), 'matplotlib.pyplot.title', 'plt.title', (['"""Laboratory Engine"""'], {'fontsize': '(24)'}), "('Laboratory Engine', fontsize=24)\n", (3118, 3152), True, 'import matplotlib.pyplot as plt\n'), ((3214, 3236), 'sqlite3.connect', 'lite.connect', (['"""sim.db"""'], {}), "('sim.db')\n", (3226, 3236), True, 'import sqlite3 as lite\n'), ((3602, 3623), 'numpy.zeros', 'np.zeros', (['num_results'], {}), '(num_results)\n', (3610, 3623), True, 'import numpy as np\n'), ((3635, 3656), 'numpy.zeros', 'np.zeros', (['num_results'], {}), '(num_results)\n', (3643, 3656), True, 'import numpy as np\n'), ((3669, 3690), 'numpy.zeros', 'np.zeros', (['num_results'], {}), '(num_results)\n', (3677, 3690), True, 'import numpy as np\n'), ((3857, 3870), 'numpy.argsort', 'np.argsort', (['L'], {}), '(L)\n', (3867, 3870), True, 'import numpy as np\n'), ((4003, 4043), 'matplotlib.pyplot.plot', 'plt.plot', (['L', 'mean'], {'color': '"""#bb8844"""', 'lw': '(2)'}), "(L, mean, color='#bb8844', lw=2)\n", (4011, 4043), True, 'import matplotlib.pyplot as plt\n'), ((4045, 4121), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['L', '(mean + stdev)', '(mean - stdev)'], {'color': '"""#ddaa44"""', 'alpha': '(0.25)'}), "(L, mean + stdev, mean - stdev, color='#ddaa44', alpha=0.25)\n", (4061, 4121), True, 'import matplotlib.pyplot as plt\n'), ((4118, 4143), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 0.8, 0, 20]'], {}), '([0, 0.8, 0, 20])\n', (4126, 4143), True, 'import matplotlib.pyplot as plt\n'), ((4147, 4187), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fraction Labs"""'], {'fontsize': '(16)'}), "('Fraction Labs', fontsize=16)\n", (4157, 4187), True, 'import matplotlib.pyplot as plt\n'), ((4191, 4220), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(16)'}), '(labelsize=16)\n', (4206, 4220), True, 'import matplotlib.pyplot as plt\n'), ((4225, 4284), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expected Payoff"""'], {'color': '"""#bb8844"""', 'fontsize': '(16)'}), "('Expected Payoff', color='#bb8844', fontsize=16)\n", (4235, 4284), True, 'import matplotlib.pyplot as plt\n'), ((4287, 4316), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(16)'}), '(labelsize=16)\n', (4302, 4316), True, 'import matplotlib.pyplot as plt\n'), ((4361, 4422), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(0.4)', 'color': '"""#000000"""', 'lw': '(2)', 'linestyle': '"""dashed"""'}), "(x=0.4, color='#000000', lw=2, linestyle='dashed')\n", (4372, 4422), True, 'import matplotlib.pyplot as plt\n'), ((4430, 4479), 'matplotlib.pyplot.title', 'plt.title', (['"""Lab deck with 15 Copper"""'], {'fontsize': '(24)'}), "('Lab deck with 15 Copper', fontsize=24)\n", (4439, 4479), True, 'import matplotlib.pyplot as plt\n'), ((4555, 4583), 'numpy.zeros', 'np.zeros', (['(density, density)'], {}), '((density, density))\n', (4563, 4583), True, 'import numpy as np\n'), ((4598, 4626), 'numpy.zeros', 'np.zeros', (['(density, density)'], {}), '((density, density))\n', (4606, 4626), True, 'import numpy as np\n'), ((4638, 4666), 'numpy.zeros', 'np.zeros', (['(density, density)'], {}), '((density, density))\n', (4646, 4666), True, 'import numpy as np\n'), ((4684, 4712), 'numpy.zeros', 'np.zeros', (['(density, density)'], {}), '((density, density))\n', (4692, 4712), True, 'import numpy as np\n'), ((4823, 4853), 'numpy.indices', 'np.indices', (['(density, density)'], {}), '((density, density))\n', (4833, 4853), True, 'import numpy as np\n'), ((4944, 4966), 'sqlite3.connect', 'lite.connect', (['"""sim.db"""'], {}), "('sim.db')\n", (4956, 4966), True, 'import sqlite3 as lite\n'), ((6036, 6061), 'numpy.array', 'np.array', (['[0, 0.25, 0.75]'], {}), '([0, 0.25, 0.75])\n', (6044, 6061), True, 'import numpy as np\n'), ((6075, 6100), 'numpy.array', 'np.array', (['[1, 0.25, 0.25]'], {}), '([1, 0.25, 0.25])\n', (6083, 6100), True, 'import numpy as np\n'), ((6111, 6141), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (6123, 6141), True, 'import matplotlib.pyplot as plt\n'), ((5027, 5057), 'numpy.ndindex', 'np.ndindex', (['(density, density)'], {}), '((density, density))\n', (5037, 5057), True, 'import numpy as np\n'), ((5874, 5931), 'numpy.logical_and', 'np.logical_and', (['(V_coord > 0.25)', '(V_coord > 1 - S_coord * 3)'], {}), '(V_coord > 0.25, V_coord > 1 - S_coord * 3)\n', (5888, 5931), True, 'import numpy as np\n'), ((5952, 6008), 'numpy.logical_or', 'np.logical_or', (['(V_coord < 0.25)', '(V_coord < 1 - S_coord * 3)'], {}), '(V_coord < 0.25, V_coord < 1 - S_coord * 3)\n', (5965, 6008), True, 'import numpy as np\n'), ((2928, 2937), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2935, 2937), True, 'import matplotlib.pyplot as plt\n')] |
"""
A quick script to write to a psrdada buffer in order to test a psrdada reader.
"""
import os
import subprocess
from time import sleep
import numpy as np
from psrdada import Writer
KEY_STRING = 'adad'
KEY = 0xadad
NANT = 64 #16
NCHAN = 384 #1536 #*4
NPOL = 2
NBLS = NANT*(NANT+1)//2
def main():
"""Writes a psrdada buffer for test"""
vis_temp = np.arange(NBLS*NCHAN*NPOL*2, dtype=np.float32)
# Define the data rate, including the buffer size
# and the header size
samples_per_frame = 1
# sample_rate = 1/0.134217728
header_size = 4096
buffer_size = int(4*NBLS*NPOL*NCHAN*samples_per_frame*2)
assert buffer_size == vis_temp.nbytes, ("Sample data size and buffer "
"size do not match.")
# Create the buffer
# data_rate = buffer_size*(sample_rate/samples_per_frame)/1e6
os.system('dada_db -a {0} -b {1} -k {2}'.format(header_size, buffer_size,
KEY_STRING))
print('Buffer created')
# Start the reader
read = 'python ./meridian_fringestop.py /home/ubuntu/data/ /home/ubuntu/proj/dsa110-shell/dsa110-meridian-fs/dsamfs/data/test_parameters.yaml /home/ubuntu/proj/dsa110-shell/dsa110-meridian-fs/dsamfs/data/test_header.txt'
read_log = open('/home/ubuntu/data/tmp/write.log', 'w')
_read_proc = subprocess.Popen(read, shell=True, stdout=read_log,
stderr=read_log)
print('Reader started')
sleep(0.1)
# Write to the buffer
writer = Writer(KEY)
print('Writer created')
for i in range(48):
page = writer.getNextPage()
data = np.asarray(page)
data[...] = vis_temp.view(np.int8)
if i < 9:
writer.markFilled()
else:
writer.markEndOfData()
vis_temp += 1
# Wait to allow reader to clear pages
sleep(1)
writer.disconnect()
os.system('dada_db -d -k {0}'.format(KEY_STRING))
if __name__ == '__main__':
main()
| [
"psrdada.Writer",
"subprocess.Popen",
"numpy.asarray",
"time.sleep",
"numpy.arange"
] | [((359, 411), 'numpy.arange', 'np.arange', (['(NBLS * NCHAN * NPOL * 2)'], {'dtype': 'np.float32'}), '(NBLS * NCHAN * NPOL * 2, dtype=np.float32)\n', (368, 411), True, 'import numpy as np\n'), ((1360, 1428), 'subprocess.Popen', 'subprocess.Popen', (['read'], {'shell': '(True)', 'stdout': 'read_log', 'stderr': 'read_log'}), '(read, shell=True, stdout=read_log, stderr=read_log)\n', (1376, 1428), False, 'import subprocess\n'), ((1495, 1505), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (1500, 1505), False, 'from time import sleep\n'), ((1546, 1557), 'psrdada.Writer', 'Writer', (['KEY'], {}), '(KEY)\n', (1552, 1557), False, 'from psrdada import Writer\n'), ((1661, 1677), 'numpy.asarray', 'np.asarray', (['page'], {}), '(page)\n', (1671, 1677), True, 'import numpy as np\n'), ((1896, 1904), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1901, 1904), False, 'from time import sleep\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Cours :
GTI770 — Systèmes intelligents et apprentissage machine
Projet :
Laboratoire 1 — Extraction de primitives
Étudiants :
Noms — Code permanent
Groupe :
GTI770-H18-0X
"""
import csv
import numpy as np
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.utils import shuffle
from commons.exceptions.fileNotFoundException import FileNotFoundException
from commons.exceptions.unableToLoadDatasetException import UnableToLoadDatasetException
from commons.exceptions.validationSizeException import ValidationSizeException
from commons.helpers.dataset.dataset import DataSet
class SpamDataSetFeatureStrategy:
"""
A class for handling data set files.
"""
def _is_positive(self, number):
""" Verify the type of a variable.
Make a comparison of the type of a variable to insure it is a float.
Args:
number: a floating point number.
Returns:
A boolean; true if number passed in argument is of type numpy.float32, false if type is not matched.
"""
try:
if number < 0.0:
raise ValidationSizeException(
"Validation size must be a positive floating point number or equals to 0.0.")
if number >= 0.0:
return number
except AttributeError:
raise ValidationSizeException("Validation size is not a valid floating point number.")
def _is_type(self, number, type=np.float32):
""" Verify the type of a variable.
Make a comparison of the type of a variable to insure it is a float.
Args:
number: a floating point number.
Returns:
A boolean; true if number passed in argument is of type numpy.float32, false if type is not matched.
"""
try:
return number.dtype.num == np.dtype(type).num
except AttributeError:
raise ValidationSizeException("Validation size is not a valid floating point number.")
def _create_datasets(self, spam_features, labels, validation_size):
# Creates inner DataSets class.
class DataSets(object):
pass
# Create an instance of a DataSets object.
data_sets = DataSets()
# Check if the parameter is of type numpy.float64.
self._is_type(validation_size)
self._is_positive(validation_size)
# Calculates the training set and validation set size.
train_size = int(np.round((1 - validation_size) * spam_features.shape[0]))
validation_size = int(np.round(validation_size * spam_features.shape[0]))
# Assign the images to the training and validation data sets.
train_spam_features = spam_features[:train_size]
train_labels = labels[:train_size]
validation_spam_features = spam_features[-validation_size:]
validation_labels = labels[-validation_size:]
# Create the data sets.
data_sets.train = DataSet().withFeatures(train_spam_features).withLabels(train_labels)
data_sets.valid = DataSet().withFeatures(validation_spam_features).withLabels(validation_labels)
return data_sets
def _load_feature_vector(self, csv_file, one_hot):
""" Read the feature vector of a spam sample.
Read the label associated to a feature vector in the reference .arff file.
Args:
arff_file (str): The file path to the .arff file containing the ground truth.
Returns:
A tuple with the extracted feature vectors and their associated class labels.
"""
class DataSets(object):
pass
data_sets = DataSets()
# Declare two lists for holding spam features and the associated class label.
spam_vectors = list()
labels = list()
encoded_labels = list()
try:
# Open the ground truth file.
with open(csv_file, mode="r") as ground_truth_csv:
reader = csv.reader(ground_truth_csv, delimiter=",", quoting=csv.QUOTE_NONNUMERIC)
# For each row, store the spam feature vector and its associated class.
for row in reader:
spam_vectors.append(row[0:57])
labels.append(row[57])
except FileNotFoundError:
raise FileNotFoundException("CSV file not found. Please enter in parameter a valid CSV file.")
# Transforms the feature list into a numpy array.
spam_vectors = np.array(spam_vectors)
# Reshape vertically the label vector and transform it in a numpy array.
labels = np.array(labels).reshape(-1, 1)
# Declare a label encoder from scikit-learn.
label_encoder = LabelEncoder()
# Fit label encoder and return the classes encoded into integer in range [0,2]
encoded_labels = label_encoder.fit_transform(labels)
if one_hot == True:
# Declare a one-hot encoder from scikit-learn.
one_hot_encoder = OneHotEncoder(sparse=False)
encoded_labels = encoded_labels.reshape(-1, 1)
# Fit label encoder and return the classes encoded into integer in range [0,2]
encoded_labels = one_hot_encoder.fit_transform(encoded_labels)
# Shuffle the data.
features, encoded_labels = shuffle(spam_vectors, encoded_labels)
return features, encoded_labels
def load_dataset(self, csv_file, one_hot, validation_size):
""" Load a data set.
Args:
csv_file: a CSV file containing ground truth and file names.
feature_vector: a boolean. It True, will load the data set from a feature vector.
If False, will load the data set required to extract galaxy image features.
Returns:
A tuple containing the feature vectors and labels associated to these vectors.
"""
try:
feature_vectors, labels = self._load_feature_vector(csv_file, one_hot)
return self._create_datasets(feature_vectors, labels, validation_size)
except Exception as e:
raise UnableToLoadDatasetException("Unable to load spam data set with cause: " + str(e))
| [
"numpy.dtype",
"sklearn.preprocessing.LabelEncoder",
"commons.exceptions.validationSizeException.ValidationSizeException",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.utils.shuffle",
"commons.helpers.dataset.dataset.DataSet",
"numpy.array",
"commons.exceptions.fileNotFoundException.FileNotFoundExc... | [((4587, 4609), 'numpy.array', 'np.array', (['spam_vectors'], {}), '(spam_vectors)\n', (4595, 4609), True, 'import numpy as np\n'), ((4819, 4833), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4831, 4833), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n'), ((5420, 5457), 'sklearn.utils.shuffle', 'shuffle', (['spam_vectors', 'encoded_labels'], {}), '(spam_vectors, encoded_labels)\n', (5427, 5457), False, 'from sklearn.utils import shuffle\n'), ((2560, 2616), 'numpy.round', 'np.round', (['((1 - validation_size) * spam_features.shape[0])'], {}), '((1 - validation_size) * spam_features.shape[0])\n', (2568, 2616), True, 'import numpy as np\n'), ((2648, 2698), 'numpy.round', 'np.round', (['(validation_size * spam_features.shape[0])'], {}), '(validation_size * spam_features.shape[0])\n', (2656, 2698), True, 'import numpy as np\n'), ((5101, 5128), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (5114, 5128), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n'), ((1193, 1304), 'commons.exceptions.validationSizeException.ValidationSizeException', 'ValidationSizeException', (['"""Validation size must be a positive floating point number or equals to 0.0."""'], {}), "(\n 'Validation size must be a positive floating point number or equals to 0.0.'\n )\n", (1216, 1304), False, 'from commons.exceptions.validationSizeException import ValidationSizeException\n'), ((1426, 1511), 'commons.exceptions.validationSizeException.ValidationSizeException', 'ValidationSizeException', (['"""Validation size is not a valid floating point number."""'], {}), "('Validation size is not a valid floating point number.'\n )\n", (1449, 1511), False, 'from commons.exceptions.validationSizeException import ValidationSizeException\n'), ((2003, 2088), 'commons.exceptions.validationSizeException.ValidationSizeException', 'ValidationSizeException', (['"""Validation size is not a valid floating point number."""'], {}), "('Validation size is not a valid floating point number.'\n )\n", (2026, 2088), False, 'from commons.exceptions.validationSizeException import ValidationSizeException\n'), ((4071, 4144), 'csv.reader', 'csv.reader', (['ground_truth_csv'], {'delimiter': '""","""', 'quoting': 'csv.QUOTE_NONNUMERIC'}), "(ground_truth_csv, delimiter=',', quoting=csv.QUOTE_NONNUMERIC)\n", (4081, 4144), False, 'import csv\n'), ((4416, 4509), 'commons.exceptions.fileNotFoundException.FileNotFoundException', 'FileNotFoundException', (['"""CSV file not found. Please enter in parameter a valid CSV file."""'], {}), "(\n 'CSV file not found. Please enter in parameter a valid CSV file.')\n", (4437, 4509), False, 'from commons.exceptions.fileNotFoundException import FileNotFoundException\n'), ((4709, 4725), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (4717, 4725), True, 'import numpy as np\n'), ((1934, 1948), 'numpy.dtype', 'np.dtype', (['type'], {}), '(type)\n', (1942, 1948), True, 'import numpy as np\n'), ((3052, 3061), 'commons.helpers.dataset.dataset.DataSet', 'DataSet', ([], {}), '()\n', (3059, 3061), False, 'from commons.helpers.dataset.dataset import DataSet\n'), ((3147, 3156), 'commons.helpers.dataset.dataset.DataSet', 'DataSet', ([], {}), '()\n', (3154, 3156), False, 'from commons.helpers.dataset.dataset import DataSet\n')] |
# openCV import
import cv2
import mediapipe as mp
import numpy as np
max_num_hands = 1
gesture = {
0:'fist', 1:'one', 2:'two', 3:'three', 4:'four', 5:'five',
## 11번이 뻐큐
6:'six', 7:'rock', 8:'spiderman', 9:'yeah', 10:'ok', 11:'fy'
}
# MediaPipe hands model
mp_hands = mp.solutions.hands
mp_drawing = mp.solutions.drawing_utils
hands = mp_hands.Hands(
max_num_hands=max_num_hands,
min_detection_confidence=0.5,
min_tracking_confidence=0.5)
# Gesture recognition data
file = np.genfromtxt('Chapter02\data\gesture_train.csv', delimiter=',')
print(file.shape)
cap = cv2.VideoCapture(0)
# 화면을 클릭했을 때만 나타나게 합니다
# 이렇게 해놓고 뻐큐 손가락을 돌려가면서 10개의 데이터를 모을 겁니다.
# 마우스를 클릭하면 데이터 1개씩 추가되도록 만들었습니다.
def click(event, x, y, flags, param):
global data, file
if event == cv2.EVENT_LBUTTONDOWN:
file = np.vstack((file, data))
# file = np.concatenate((file, data), axis=1)
# ValueError: all the input arrays must have same number of dimensions, but the array at index 0 has 2 dimension(s) and the array at index 1 has 1 dimension(s)
print(file.shape)
cv2.namedWindow('Dataset')
cv2.setMouseCallback('Dataset', click)
while cap.isOpened():
ret, img = cap.read()
if not ret:
continue
img = cv2.flip(img, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
result = hands.process(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
if result.multi_hand_landmarks is not None:
for res in result.multi_hand_landmarks:
joint = np.zeros((21, 3))
for j, lm in enumerate(res.landmark):
joint[j] = [lm.x, lm.y, lm.z]
# Compute angles between joints
v1 = joint[[0,1,2,3,0,5,6,7,0,9,10,11,0,13,14,15,0,17,18,19],:] # Parent joint
v2 = joint[[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20],:] # Child joint
v = v2 - v1 # [20,3]
# Normalize v
v = v / np.linalg.norm(v, axis=1)[:, np.newaxis]
# Get angle using arcos of dot product
angle = np.arccos(np.einsum('nt,nt->n',
v[[0,1,2,4,5,6,8,9,10,12,13,14,16,17,18],:],
v[[1,2,3,5,6,7,9,10,11,13,14,15,17,18,19],:])) # [15,]
angle = np.degrees(angle) # Convert radian to degree
data = np.array([angle], dtype=np.float32)
## 각도 데이터의 마지막에 정답 라벨인 11을 추가해줍니다
data = np.append(data, 11)
mp_drawing.draw_landmarks(img, res, mp_hands.HAND_CONNECTIONS)
cv2.imshow('Dataset', img)
if cv2.waitKey(1) == ord('q'):
break
# 데이터를 저장합니다.
# 데이터가 수집되면 여기로 들어오는데 여기로 들어오게 하면 안되고 내 위치에 맞게 경로 바꿈
np.savetxt('Chapter02\data\gesture_train_fy.csv', file, delimiter=',')
| [
"cv2.setMouseCallback",
"cv2.flip",
"numpy.linalg.norm",
"cv2.imshow",
"numpy.append",
"numpy.array",
"cv2.waitKey",
"numpy.zeros",
"numpy.einsum",
"cv2.VideoCapture",
"numpy.savetxt",
"cv2.cvtColor",
"numpy.vstack",
"numpy.degrees",
"numpy.genfromtxt",
"cv2.namedWindow"
] | [((499, 565), 'numpy.genfromtxt', 'np.genfromtxt', (['"""Chapter02\\\\data\\\\gesture_train.csv"""'], {'delimiter': '""","""'}), "('Chapter02\\\\data\\\\gesture_train.csv', delimiter=',')\n", (512, 565), True, 'import numpy as np\n'), ((590, 609), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (606, 609), False, 'import cv2\n'), ((1097, 1123), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Dataset"""'], {}), "('Dataset')\n", (1112, 1123), False, 'import cv2\n'), ((1124, 1162), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""Dataset"""', 'click'], {}), "('Dataset', click)\n", (1144, 1162), False, 'import cv2\n'), ((2651, 2723), 'numpy.savetxt', 'np.savetxt', (['"""Chapter02\\\\data\\\\gesture_train_fy.csv"""', 'file'], {'delimiter': '""","""'}), "('Chapter02\\\\data\\\\gesture_train_fy.csv', file, delimiter=',')\n", (2661, 2723), True, 'import numpy as np\n'), ((1256, 1272), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (1264, 1272), False, 'import cv2\n'), ((1283, 1319), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1295, 1319), False, 'import cv2\n'), ((1364, 1400), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2BGR'], {}), '(img, cv2.COLOR_RGB2BGR)\n', (1376, 1400), False, 'import cv2\n'), ((2506, 2532), 'cv2.imshow', 'cv2.imshow', (['"""Dataset"""', 'img'], {}), "('Dataset', img)\n", (2516, 2532), False, 'import cv2\n'), ((824, 847), 'numpy.vstack', 'np.vstack', (['(file, data)'], {}), '((file, data))\n', (833, 847), True, 'import numpy as np\n'), ((2540, 2554), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2551, 2554), False, 'import cv2\n'), ((1518, 1535), 'numpy.zeros', 'np.zeros', (['(21, 3)'], {}), '((21, 3))\n', (1526, 1535), True, 'import numpy as np\n'), ((2239, 2256), 'numpy.degrees', 'np.degrees', (['angle'], {}), '(angle)\n', (2249, 2256), True, 'import numpy as np\n'), ((2304, 2339), 'numpy.array', 'np.array', (['[angle]'], {'dtype': 'np.float32'}), '([angle], dtype=np.float32)\n', (2312, 2339), True, 'import numpy as np\n'), ((2405, 2424), 'numpy.append', 'np.append', (['data', '(11)'], {}), '(data, 11)\n', (2414, 2424), True, 'import numpy as np\n'), ((2063, 2210), 'numpy.einsum', 'np.einsum', (['"""nt,nt->n"""', 'v[[0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18], :]', 'v[[1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15, 17, 18, 19], :]'], {}), "('nt,nt->n', v[[0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18\n ], :], v[[1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15, 17, 18, 19], :])\n", (2072, 2210), True, 'import numpy as np\n'), ((1940, 1965), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {'axis': '(1)'}), '(v, axis=1)\n', (1954, 1965), True, 'import numpy as np\n')] |
import numpy as np
import copy
from scipy.interpolate import interp1d
from lenstronomy.LightModel.light_model import LightModel
__all__ = ['LightProfile']
class LightProfile(object):
"""
class to deal with the light distribution for GalKin
In particular, this class allows for:
- (faster) interpolated calculation for a given profile (for a range that the Jeans equation is computed)
- drawing 3d and 2d distributions from a given (spherical) profile
(within bounds where the Jeans equation is expected to be accurate)
- 2d projected profiles within the 3d integration range (truncated)
"""
def __init__(self, profile_list, interpol_grid_num=2000, max_interpolate=1000, min_interpolate=0.001,
max_draw=None):
"""
:param profile_list: list of light profiles for LightModel module (must support light_3d() functionalities)
:param interpol_grid_num: int; number of interpolation steps (logarithmically between min and max value)
:param max_interpolate: float; maximum interpolation of 3d light profile
:param min_interpolate: float; minimum interpolate (and also drawing of light profile)
:param max_draw: float; (optional) if set, draws up to this radius, else uses max_interpolate value
"""
self.light_model = LightModel(light_model_list=profile_list)
self._interp_grid_num = interpol_grid_num
self._max_interpolate = max_interpolate
self._min_interpolate = min_interpolate
if max_draw is None:
max_draw = max_interpolate
self._max_draw = max_draw
def light_3d(self, r, kwargs_list):
"""
three-dimensional light profile
:param r: 3d radius
:param kwargs_list: list of keyword arguments of light profiles (see LightModule)
:return: flux per 3d volume at radius r
"""
light_3d = self.light_model.light_3d(r, kwargs_list)
return light_3d
def light_3d_interp(self, r, kwargs_list, new_compute=False):
"""
interpolated three-dimensional light profile within bounds [min_interpolate, max_interpolate]
in logarithmic units with interpol_grid_num numbers of interpolation steps
:param r: 3d radius
:param kwargs_list: list of keyword arguments of light profiles (see LightModule)
:param new_compute: boolean, if True, re-computes the interpolation
(becomes valid with updated kwargs_list argument)
:return: flux per 3d volume at radius r
"""
if not hasattr(self, '_f_light_3d') or new_compute is True:
r_array = np.logspace(np.log10(self._min_interpolate), np.log10(self._max_interpolate), self._interp_grid_num)
light_3d_array = self.light_model.light_3d(r_array, kwargs_list)
light_3d_array[light_3d_array < 10 ** (-1000)] = 10 ** (-1000)
f = interp1d(np.log(r_array), np.log(light_3d_array), fill_value=(np.log(light_3d_array[0]), -1000),
bounds_error=False) # "extrapolate"
self._f_light_3d = f
return np.exp(self._f_light_3d(np.log(r)))
def light_2d(self, R, kwargs_list):
"""
projected light profile (integrated to infinity in the projected axis)
:param R: projected 2d radius
:param kwargs_list: list of keyword arguments of light profiles (see LightModule)
:return: projected surface brightness
"""
kwargs_light_circularized = self._circularize_kwargs(kwargs_list)
return self.light_model.surface_brightness(R, 0, kwargs_light_circularized)
def _circularize_kwargs(self, kwargs_list):
"""
:param kwargs_list: list of keyword arguments of light profiles (see LightModule)
:return: circularized arguments
"""
# TODO make sure averaging is done azimuthally
if not hasattr(self, '_kwargs_light_circularized'):
kwargs_list_copy = copy.deepcopy(kwargs_list)
kwargs_list_new = []
for kwargs in kwargs_list_copy:
if 'e1' in kwargs:
kwargs['e1'] = 0
if 'e2' in kwargs:
kwargs['e2'] = 0
kwargs_list_new.append({k: v for k, v in kwargs.items() if k not in ['center_x', 'center_y']})
self._kwargs_light_circularized = kwargs_list_new
return self._kwargs_light_circularized
def _light_2d_finite_single(self, R, kwargs_list):
"""
projected light profile (integrated to FINITE 3d boundaries from the max_interpolate)
for a single float number of R
:param R: projected 2d radius (between min_interpolate and max_interpolate)
:param kwargs_list: list of keyword arguments of light profiles (see LightModule)
:return: projected surface brightness
"""
# here we perform a logarithmic integral
stop = np.log10(np.maximum(np.sqrt(self._max_interpolate**2 - R**2), self._min_interpolate + 0.00001))
x = np.logspace(start=np.log10(self._min_interpolate), stop=stop, num=self._interp_grid_num)
r_array = np.sqrt(x**2 + R**2)
flux_r = self.light_3d(r_array, kwargs_list)
dlog_r = (np.log10(x[2]) - np.log10(x[1])) * np.log(10)
flux_r *= dlog_r * x
# linear integral
# x = np.linspace(start=self._min_interpolate, stop=np.sqrt(self._max_interpolate ** 2 - R ** 2),
# num=self._interp_grid_num)
# r_array = np.sqrt(x ** 2 + R ** 2)
# dr = x[1] - x[0]
# flux_r = self.light_3d(r_array + dr / 2, kwargs_circ)
# dr = x[1] - x[0]
# flux_r *= dr
flux_R = np.sum(flux_r)
# perform finite integral
# out = integrate.quad(lambda x: self.light_3d(np.sqrt(R ** 2 + x ** 2), kwargs_circ), self._min_interpolate,
# np.sqrt(self._max_interpolate**2 - R**2))
# print(out_1, out, 'test')
# flux_R = out[0]
return flux_R * 2 # integral in both directions
def light_2d_finite(self, R, kwargs_list):
"""
projected light profile (integrated to FINITE 3d boundaries from the max_interpolate)
:param R: projected 2d radius (between min_interpolate and max_interpolate
:param kwargs_list: list of keyword arguments of light profiles (see LightModule)
:return: projected surface brightness
"""
kwargs_circ = self._circularize_kwargs(kwargs_list)
n = len(np.atleast_1d(R))
if n <= 1:
return self._light_2d_finite_single(R, kwargs_circ)
else:
light_2d = np.zeros(n)
for i, R_i in enumerate(R):
light_2d[i] = self._light_2d_finite_single(R_i, kwargs_circ)
return light_2d
def draw_light_2d_linear(self, kwargs_list, n=1, new_compute=False):
"""
constructs the CDF and draws from it random realizations of projected radii R
The interpolation of the CDF is done in linear projected radius space
:param kwargs_list: list of keyword arguments of light profiles (see LightModule)
:param n: int; number of draws
:param new_compute: boolean, if True, re-computes the interpolation
(becomes valid with updated kwargs_list argument)
:return: draw of projected radius for the given light profile distribution
"""
if not hasattr(self, '_light_cdf') or new_compute is True:
r_array = np.linspace(self._min_interpolate, self._max_draw, self._interp_grid_num)
cum_sum = np.zeros_like(r_array)
sum_light = 0
for i, r in enumerate(r_array):
if i == 0:
cum_sum[i] = 0
else:
sum_light += self.light_2d(r, kwargs_list) * r
cum_sum[i] = copy.deepcopy(sum_light)
cum_sum_norm = cum_sum/cum_sum[-1]
f = interp1d(cum_sum_norm, r_array)
self._light_cdf = f
cdf_draw = np.random.uniform(0., 1, n)
r_draw = self._light_cdf(cdf_draw)
return r_draw
def draw_light_2d(self, kwargs_list, n=1, new_compute=False):
"""
constructs the CDF and draws from it random realizations of projected radii R
CDF is constructed in logarithmic projected radius spacing
:param kwargs_list: light model keyword argument list
:param n: int, number of draws per functino call
:param new_compute: re-computes the interpolated CDF
:return: realization of projected radius following the distribution of the light model
"""
if not hasattr(self, '_light_cdf_log') or new_compute is True:
r_array = np.logspace(np.log10(self._min_interpolate), np.log10(self._max_draw), self._interp_grid_num)
cum_sum = np.zeros_like(r_array)
sum_light = 0
for i, r in enumerate(r_array):
if i == 0:
cum_sum[i] = 0
else:
sum_light += self.light_2d(r, kwargs_list) * r * r
cum_sum[i] = copy.deepcopy(sum_light)
cum_sum_norm = cum_sum/cum_sum[-1]
f = interp1d(cum_sum_norm, np.log(r_array))
self._light_cdf_log = f
cdf_draw = np.random.uniform(0., 1, n)
r_log_draw = self._light_cdf_log(cdf_draw)
return np.exp(r_log_draw)
def draw_light_3d(self, kwargs_list, n=1, new_compute=False):
"""
constructs the CDF and draws from it random realizations of 3D radii r
:param kwargs_list: light model keyword argument list
:param n: int, number of draws per function call
:param new_compute: re-computes the interpolated CDF
:return: realization of projected radius following the distribution of the light model
"""
if not hasattr(self, '_light_3d_cdf_log') or new_compute is True:
r_array = np.logspace(np.log10(self._min_interpolate), np.log10(self._max_draw), self._interp_grid_num)
dlog_r = np.log10(r_array[1]) - np.log10(r_array[0])
r_array_int = np.logspace(np.log10(self._min_interpolate) + dlog_r / 2, np.log10(self._max_draw) + dlog_r / 2, self._interp_grid_num)
cum_sum = np.zeros_like(r_array)
sum_light = 0
for i, r in enumerate(r_array_int[:-1]):
# if i == 0:
# cum_sum[i] = 0
# else:
sum_light += self.light_3d(r, kwargs_list) * r**2 * (r_array[i+1] - r_array[i]) # * r
cum_sum[i+1] = copy.deepcopy(sum_light)
cum_sum_norm = cum_sum/cum_sum[-1]
f = interp1d(cum_sum_norm, np.log(r_array))
self._light_3d_cdf_log = f
cdf_draw = np.random.uniform(0., 1, n)
r_log_draw = self._light_3d_cdf_log(cdf_draw)
return np.exp(r_log_draw)
def delete_cache(self):
"""
deletes cached interpolation function of the CDF for a specific light profile
:return: None
"""
if hasattr(self, '_light_cdf_log'):
del self._light_cdf_log
if hasattr(self, '_light_cdf'):
del self._light_cdf
if hasattr(self, '_f_light_3d'):
del self._f_light_3d
if hasattr(self, '_kwargs_light_circularized'):
del self._kwargs_light_circularized
| [
"numpy.log10",
"numpy.sqrt",
"copy.deepcopy",
"lenstronomy.LightModel.light_model.LightModel",
"numpy.log",
"scipy.interpolate.interp1d",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.linspace",
"numpy.random.uniform",
"numpy.zeros_like",
"numpy.atleast_1d"
] | [((1339, 1380), 'lenstronomy.LightModel.light_model.LightModel', 'LightModel', ([], {'light_model_list': 'profile_list'}), '(light_model_list=profile_list)\n', (1349, 1380), False, 'from lenstronomy.LightModel.light_model import LightModel\n'), ((5175, 5199), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + R ** 2)'], {}), '(x ** 2 + R ** 2)\n', (5182, 5199), True, 'import numpy as np\n'), ((5731, 5745), 'numpy.sum', 'np.sum', (['flux_r'], {}), '(flux_r)\n', (5737, 5745), True, 'import numpy as np\n'), ((8091, 8119), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1)', 'n'], {}), '(0.0, 1, n)\n', (8108, 8119), True, 'import numpy as np\n'), ((9377, 9405), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1)', 'n'], {}), '(0.0, 1, n)\n', (9394, 9405), True, 'import numpy as np\n'), ((9471, 9489), 'numpy.exp', 'np.exp', (['r_log_draw'], {}), '(r_log_draw)\n', (9477, 9489), True, 'import numpy as np\n'), ((10870, 10898), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1)', 'n'], {}), '(0.0, 1, n)\n', (10887, 10898), True, 'import numpy as np\n'), ((10967, 10985), 'numpy.exp', 'np.exp', (['r_log_draw'], {}), '(r_log_draw)\n', (10973, 10985), True, 'import numpy as np\n'), ((3993, 4019), 'copy.deepcopy', 'copy.deepcopy', (['kwargs_list'], {}), '(kwargs_list)\n', (4006, 4019), False, 'import copy\n'), ((5302, 5312), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (5308, 5312), True, 'import numpy as np\n'), ((6553, 6569), 'numpy.atleast_1d', 'np.atleast_1d', (['R'], {}), '(R)\n', (6566, 6569), True, 'import numpy as np\n'), ((6691, 6702), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (6699, 6702), True, 'import numpy as np\n'), ((7547, 7620), 'numpy.linspace', 'np.linspace', (['self._min_interpolate', 'self._max_draw', 'self._interp_grid_num'], {}), '(self._min_interpolate, self._max_draw, self._interp_grid_num)\n', (7558, 7620), True, 'import numpy as np\n'), ((7643, 7665), 'numpy.zeros_like', 'np.zeros_like', (['r_array'], {}), '(r_array)\n', (7656, 7665), True, 'import numpy as np\n'), ((8008, 8039), 'scipy.interpolate.interp1d', 'interp1d', (['cum_sum_norm', 'r_array'], {}), '(cum_sum_norm, r_array)\n', (8016, 8039), False, 'from scipy.interpolate import interp1d\n'), ((8913, 8935), 'numpy.zeros_like', 'np.zeros_like', (['r_array'], {}), '(r_array)\n', (8926, 8935), True, 'import numpy as np\n'), ((10359, 10381), 'numpy.zeros_like', 'np.zeros_like', (['r_array'], {}), '(r_array)\n', (10372, 10381), True, 'import numpy as np\n'), ((2666, 2697), 'numpy.log10', 'np.log10', (['self._min_interpolate'], {}), '(self._min_interpolate)\n', (2674, 2697), True, 'import numpy as np\n'), ((2699, 2730), 'numpy.log10', 'np.log10', (['self._max_interpolate'], {}), '(self._max_interpolate)\n', (2707, 2730), True, 'import numpy as np\n'), ((2932, 2947), 'numpy.log', 'np.log', (['r_array'], {}), '(r_array)\n', (2938, 2947), True, 'import numpy as np\n'), ((2949, 2971), 'numpy.log', 'np.log', (['light_3d_array'], {}), '(light_3d_array)\n', (2955, 2971), True, 'import numpy as np\n'), ((3154, 3163), 'numpy.log', 'np.log', (['r'], {}), '(r)\n', (3160, 3163), True, 'import numpy as np\n'), ((4980, 5024), 'numpy.sqrt', 'np.sqrt', (['(self._max_interpolate ** 2 - R ** 2)'], {}), '(self._max_interpolate ** 2 - R ** 2)\n', (4987, 5024), True, 'import numpy as np\n'), ((5086, 5117), 'numpy.log10', 'np.log10', (['self._min_interpolate'], {}), '(self._min_interpolate)\n', (5094, 5117), True, 'import numpy as np\n'), ((5267, 5281), 'numpy.log10', 'np.log10', (['x[2]'], {}), '(x[2])\n', (5275, 5281), True, 'import numpy as np\n'), ((5284, 5298), 'numpy.log10', 'np.log10', (['x[1]'], {}), '(x[1])\n', (5292, 5298), True, 'import numpy as np\n'), ((8809, 8840), 'numpy.log10', 'np.log10', (['self._min_interpolate'], {}), '(self._min_interpolate)\n', (8817, 8840), True, 'import numpy as np\n'), ((8842, 8866), 'numpy.log10', 'np.log10', (['self._max_draw'], {}), '(self._max_draw)\n', (8850, 8866), True, 'import numpy as np\n'), ((9305, 9320), 'numpy.log', 'np.log', (['r_array'], {}), '(r_array)\n', (9311, 9320), True, 'import numpy as np\n'), ((10044, 10075), 'numpy.log10', 'np.log10', (['self._min_interpolate'], {}), '(self._min_interpolate)\n', (10052, 10075), True, 'import numpy as np\n'), ((10077, 10101), 'numpy.log10', 'np.log10', (['self._max_draw'], {}), '(self._max_draw)\n', (10085, 10101), True, 'import numpy as np\n'), ((10147, 10167), 'numpy.log10', 'np.log10', (['r_array[1]'], {}), '(r_array[1])\n', (10155, 10167), True, 'import numpy as np\n'), ((10170, 10190), 'numpy.log10', 'np.log10', (['r_array[0]'], {}), '(r_array[0])\n', (10178, 10190), True, 'import numpy as np\n'), ((10684, 10708), 'copy.deepcopy', 'copy.deepcopy', (['sum_light'], {}), '(sum_light)\n', (10697, 10708), False, 'import copy\n'), ((10795, 10810), 'numpy.log', 'np.log', (['r_array'], {}), '(r_array)\n', (10801, 10810), True, 'import numpy as np\n'), ((7920, 7944), 'copy.deepcopy', 'copy.deepcopy', (['sum_light'], {}), '(sum_light)\n', (7933, 7944), False, 'import copy\n'), ((9194, 9218), 'copy.deepcopy', 'copy.deepcopy', (['sum_light'], {}), '(sum_light)\n', (9207, 9218), False, 'import copy\n'), ((10229, 10260), 'numpy.log10', 'np.log10', (['self._min_interpolate'], {}), '(self._min_interpolate)\n', (10237, 10260), True, 'import numpy as np\n'), ((10275, 10299), 'numpy.log10', 'np.log10', (['self._max_draw'], {}), '(self._max_draw)\n', (10283, 10299), True, 'import numpy as np\n'), ((2985, 3010), 'numpy.log', 'np.log', (['light_3d_array[0]'], {}), '(light_3d_array[0])\n', (2991, 3010), True, 'import numpy as np\n')] |
from numpy.core.arrayprint import format_float_positional
from old.model import ChannelInterfaceData
from re import S
import numpy as np
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import (
QComboBox,
QGridLayout,
QLabel,
QPushButton,
QVBoxLayout,
QWidget,
)
from config import CALIBRATION_VOLTAGE, CHANNEL_NAMES_IN, CHANNEL_NAMES_OUT, DEBUG_MODE
"""
Make new window
(assumes DAQ output is correct)
Step 1:
Hook up DAQ output into DAQ input directly
Step 2:
Run calibration
- go through each channel, see what it reads when output is known (0V, 1V, 3V)
- reset voltages on writer to saved params
- save the offsets
Step 3:
Hook up back onto coils
-Save and finish
"""
# TODO: if previous session has a sin wave, calibration will end up using that sin wave instead of
# the calibration voltage
class CalibrationWindow(QtGui.QMainWindow):
"""
Calibrates the DAQ reader to ensure the correct offset for all channels
"""
# data with offsets applied
corrected_data = QtCore.pyqtSignal(object)
offsets_received = QtCore.pyqtSignal(object)
def __init__(
self, parent, writer, reader, write_channels, read_channels, saved_offsets
):
super().__init__(parent)
self.writer = writer
self.reader = reader
self.write_channels = write_channels
self.read_channels = read_channels
self.calibration_state = True
self.calibration_voltage = CALIBRATION_VOLTAGE
self.handler_counter = 0
self.offsets = saved_offsets
# value represents the index of the output channel to take voltage readings from
self.assigned_output = [int for x in CHANNEL_NAMES_IN]
self.init_ui()
self.calibration_btn.clicked.connect(self.on_calibration_btn_clicked)
def init_ui(self):
self.setWindowTitle("Calibration")
self.mainbox = QWidget(self)
self.setCentralWidget(self.mainbox)
layout = QVBoxLayout(self)
layout.setSpacing(5)
self.mainbox.setLayout(layout)
grid_layout = QGridLayout(self)
self.sel_output_ch_combo = [QComboBox(self) for i in CHANNEL_NAMES_IN]
grid_layout.addWidget(QLabel("Input Ch."), 0, 0)
grid_layout.addWidget(QLabel("Output Ch."), 0, 1)
grid_layout.addWidget(QLabel("Offsets"), 0, 2)
for i, ch_in in enumerate(CHANNEL_NAMES_IN):
for j, ch_out in enumerate(CHANNEL_NAMES_OUT):
item = ch_out + " (ao" + str(self.write_channels[j]) + ")"
self.sel_output_ch_combo[i].addItem(item, userData=i)
# set -1 to trigger event handler for all combo boxes on creation
self.sel_output_ch_combo[i].setCurrentIndex(-1)
# first argument of handler when connected is index of selection (indicated by _).
# ignore so the selected combobox qt object can be passed instead
handler = lambda _, combo=self.sel_output_ch_combo[i]: self.on_output_channel_selected(combo)
self.sel_output_ch_combo[i].currentIndexChanged.connect(handler)
self.sel_output_ch_combo[i].setCurrentIndex(0)
self.offsets_label = [
QLabel(str(self.offsets[i]) + "V")
for i, ch in enumerate(CHANNEL_NAMES_IN)
]
grid_layout.addWidget(
QLabel(ch_in + " (ai" + str(self.read_channels[i]) + ")"), i + 1, 0
)
grid_layout.addWidget(self.sel_output_ch_combo[i], i + 1, 1)
grid_layout.addWidget(self.offsets_label[i], i + 1, 2)
# make associations between daq input channel and the daq out channel it is receiving voltage from
self.calibration_voltage_label = QLabel(
"Calibration Voltage: {}V\n".format(self.calibration_voltage)
)
self.calibration_btn = QPushButton("Start Calibration")
instructions = "\nEnsure the input DAQ channels are connected to the corresponding"
instructions += "\noutput DAQ channels before starting calibration\n"
instructions += "\n(Exit to skip calibration)\n"
layout.addWidget(self.calibration_voltage_label)
layout.addLayout(grid_layout)
layout.addWidget(QLabel(instructions))
layout.addWidget(self.calibration_btn)
# combo = contains the info for the selected output DAQ we are reading from
# combo.currentData() = input channel that we are assigning to
def on_output_channel_selected(self, combo):
self.assigned_output[combo.currentData()] = combo.currentIndex()
print(self.assigned_output)
def on_calibration_btn_clicked(self):
if self.calibration_state:
self.run_calibration()
else:
print("Exited Calibration")
self.close()
# handler that is called when reader takes in a buffer
def on_data_collected(self, data):
self.handler_counter += 1
# allow the DAQ to clear its buffer before taking in voltage
if self.handler_counter < 2:
return
self.handler_counter = 0
print("Calibration data received")
# collect mean of data in buffer, and apply offset to plotter
for i, ch in enumerate(CHANNEL_NAMES_IN):
index = self.assigned_output[i]
self.offsets[i] = self.calibration_voltage - np.mean(data[index])
self.offsets_label[i].setText(str(self.offsets[i]) + "V")
print("Offset:", self.offsets)
self.offsets_received.emit(self.offsets)
self.calibration_btn.setText("Finish Calibration")
self.calibration_state = False
# writer.pause will make one more call to this handler once paused
# disconnect before writer can make another call to this handler
self.reader.incoming_data.disconnect(self.on_data_collected)
self.writer.pause() # will end up emitting on_data_collected again
self.writer.output_states = self.saved_writer_states
self.writer.voltages = self.saved_writer_voltages
self.writer.frequencies = self.saved_writer_frequencies
def run_calibration(self):
print("Calibration Started")
# connect to reader to get input
self.reader.incoming_data.connect(self.on_data_collected)
self.saved_writer_states = self.writer.output_states
self.saved_writer_frequencies = self.writer.frequencies
self.saved_writer_voltages = self.writer.voltages
# set calibration voltages
for i, ch in enumerate(CHANNEL_NAMES_OUT):
self.writer.output_states[i] = True
self.writer.voltages[i] = self.calibration_voltage
self.writer.frequencies[i] = 0
self.writer.resume()
# handler takes input from reader and then emits the calibrated data
# maybe put in another object
def apply_calibration(self, data):
corrected = [chan_data + self.offsets[i] for i, chan_data in enumerate(data)]
self.corrected_data.emit(corrected)
| [
"PyQt5.QtWidgets.QWidget",
"numpy.mean",
"PyQt5.QtCore.pyqtSignal",
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QPushButton"
] | [((1025, 1050), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['object'], {}), '(object)\n', (1042, 1050), False, 'from PyQt5 import QtCore, QtGui\n'), ((1074, 1099), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['object'], {}), '(object)\n', (1091, 1099), False, 'from PyQt5 import QtCore, QtGui\n'), ((1906, 1919), 'PyQt5.QtWidgets.QWidget', 'QWidget', (['self'], {}), '(self)\n', (1913, 1919), False, 'from PyQt5.QtWidgets import QComboBox, QGridLayout, QLabel, QPushButton, QVBoxLayout, QWidget\n'), ((1981, 1998), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', (['self'], {}), '(self)\n', (1992, 1998), False, 'from PyQt5.QtWidgets import QComboBox, QGridLayout, QLabel, QPushButton, QVBoxLayout, QWidget\n'), ((2090, 2107), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['self'], {}), '(self)\n', (2101, 2107), False, 'from PyQt5.QtWidgets import QComboBox, QGridLayout, QLabel, QPushButton, QVBoxLayout, QWidget\n'), ((3876, 3908), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Start Calibration"""'], {}), "('Start Calibration')\n", (3887, 3908), False, 'from PyQt5.QtWidgets import QComboBox, QGridLayout, QLabel, QPushButton, QVBoxLayout, QWidget\n'), ((2144, 2159), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', (['self'], {}), '(self)\n', (2153, 2159), False, 'from PyQt5.QtWidgets import QComboBox, QGridLayout, QLabel, QPushButton, QVBoxLayout, QWidget\n'), ((2218, 2237), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Input Ch."""'], {}), "('Input Ch.')\n", (2224, 2237), False, 'from PyQt5.QtWidgets import QComboBox, QGridLayout, QLabel, QPushButton, QVBoxLayout, QWidget\n'), ((2275, 2295), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Output Ch."""'], {}), "('Output Ch.')\n", (2281, 2295), False, 'from PyQt5.QtWidgets import QComboBox, QGridLayout, QLabel, QPushButton, QVBoxLayout, QWidget\n'), ((2333, 2350), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Offsets"""'], {}), "('Offsets')\n", (2339, 2350), False, 'from PyQt5.QtWidgets import QComboBox, QGridLayout, QLabel, QPushButton, QVBoxLayout, QWidget\n'), ((4258, 4278), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['instructions'], {}), '(instructions)\n', (4264, 4278), False, 'from PyQt5.QtWidgets import QComboBox, QGridLayout, QLabel, QPushButton, QVBoxLayout, QWidget\n'), ((5382, 5402), 'numpy.mean', 'np.mean', (['data[index]'], {}), '(data[index])\n', (5389, 5402), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import torch
from torchimage.utils import NdSpec
from torchimage.padding import Padder
from torchimage.pooling import AvgPoolNd
from torchimage.padding.utils import same_padding_width
from torchimage.shapes.conv_like import n_original_elements_1d, n_original_elements_nd
class MyTestCase(unittest.TestCase):
@staticmethod
def n_orignal_elements_gt(in_size, pad_width, kernel_size, stride):
x = torch.ones(in_size, dtype=torch.int32)
padder = Padder(pad_width=pad_width, mode="constant", constant_values=0)
x = padder.forward(x, axes=None)
return x.unfold(0, size=kernel_size, step=stride).sum(dim=-1).tolist()
def test_n_original_elements_1d(self):
for i in range(20):
in_size = np.random.randint(1, 7)
pad_width = np.random.randint(0, 7, size=2).tolist()
kernel_size = np.random.randint(1, 7)
stride = np.random.randint(1, 7)
if sum(pad_width) + in_size < kernel_size:
return
with self.subTest(in_size=in_size, pad_width=pad_width, kernel_size=kernel_size, stride=stride):
# print(f"{in_size=}; {pad_width=}; {kernel_size=}; {stride=}")
expected = self.n_orignal_elements_gt(in_size=in_size, pad_width=pad_width, kernel_size=kernel_size, stride=stride)
# print(f"{expected=}")
actual = n_original_elements_1d(in_size=in_size, pad_width=pad_width, kernel_size=kernel_size, stride=stride)
# print(f"{actual=}")
self.assertEqual(expected, actual)
def test_n_original_elements_nd(self):
# average pooling, such that border cases (for instance) has smaller re-normalization weight
for i in range(10):
ndim = np.random.randint(1, 6)
shape = np.random.randint(10, 30, size=ndim).tolist()
kernel_size = np.random.randint(2, 8, size=ndim)
stride = np.random.randint(2, 8, size=ndim)
pad_width = NdSpec.apply(
same_padding_width,
NdSpec(kernel_size), NdSpec(stride), NdSpec(shape)
)
old_layer = AvgPoolNd(kernel_size, stride=stride, same_padder=Padder(mode="constant", constant_values=0), count_include_pad=True)
expected = torch.round(old_layer.forward(torch.ones(tuple(shape)), axes=None) * np.prod(kernel_size)).to(dtype=torch.int32)
actual = n_original_elements_nd(in_size=shape, pad_width=pad_width,
kernel_size=kernel_size, stride=stride)
with self.subTest(i=i):
self.assertTrue(torch.equal(actual, expected))
if __name__ == '__main__':
unittest.main()
| [
"numpy.prod",
"torchimage.shapes.conv_like.n_original_elements_nd",
"torch.equal",
"numpy.random.randint",
"torchimage.padding.Padder",
"torchimage.shapes.conv_like.n_original_elements_1d",
"unittest.main",
"torchimage.utils.NdSpec",
"torch.ones"
] | [((2754, 2769), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2767, 2769), False, 'import unittest\n'), ((449, 487), 'torch.ones', 'torch.ones', (['in_size'], {'dtype': 'torch.int32'}), '(in_size, dtype=torch.int32)\n', (459, 487), False, 'import torch\n'), ((505, 568), 'torchimage.padding.Padder', 'Padder', ([], {'pad_width': 'pad_width', 'mode': '"""constant"""', 'constant_values': '(0)'}), "(pad_width=pad_width, mode='constant', constant_values=0)\n", (511, 568), False, 'from torchimage.padding import Padder\n'), ((783, 806), 'numpy.random.randint', 'np.random.randint', (['(1)', '(7)'], {}), '(1, 7)\n', (800, 806), True, 'import numpy as np\n'), ((898, 921), 'numpy.random.randint', 'np.random.randint', (['(1)', '(7)'], {}), '(1, 7)\n', (915, 921), True, 'import numpy as np\n'), ((943, 966), 'numpy.random.randint', 'np.random.randint', (['(1)', '(7)'], {}), '(1, 7)\n', (960, 966), True, 'import numpy as np\n'), ((1815, 1838), 'numpy.random.randint', 'np.random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (1832, 1838), True, 'import numpy as np\n'), ((1931, 1965), 'numpy.random.randint', 'np.random.randint', (['(2)', '(8)'], {'size': 'ndim'}), '(2, 8, size=ndim)\n', (1948, 1965), True, 'import numpy as np\n'), ((1987, 2021), 'numpy.random.randint', 'np.random.randint', (['(2)', '(8)'], {'size': 'ndim'}), '(2, 8, size=ndim)\n', (2004, 2021), True, 'import numpy as np\n'), ((2479, 2582), 'torchimage.shapes.conv_like.n_original_elements_nd', 'n_original_elements_nd', ([], {'in_size': 'shape', 'pad_width': 'pad_width', 'kernel_size': 'kernel_size', 'stride': 'stride'}), '(in_size=shape, pad_width=pad_width, kernel_size=\n kernel_size, stride=stride)\n', (2501, 2582), False, 'from torchimage.shapes.conv_like import n_original_elements_1d, n_original_elements_nd\n'), ((1433, 1538), 'torchimage.shapes.conv_like.n_original_elements_1d', 'n_original_elements_1d', ([], {'in_size': 'in_size', 'pad_width': 'pad_width', 'kernel_size': 'kernel_size', 'stride': 'stride'}), '(in_size=in_size, pad_width=pad_width, kernel_size=\n kernel_size, stride=stride)\n', (1455, 1538), False, 'from torchimage.shapes.conv_like import n_original_elements_1d, n_original_elements_nd\n'), ((2113, 2132), 'torchimage.utils.NdSpec', 'NdSpec', (['kernel_size'], {}), '(kernel_size)\n', (2119, 2132), False, 'from torchimage.utils import NdSpec\n'), ((2134, 2148), 'torchimage.utils.NdSpec', 'NdSpec', (['stride'], {}), '(stride)\n', (2140, 2148), False, 'from torchimage.utils import NdSpec\n'), ((2150, 2163), 'torchimage.utils.NdSpec', 'NdSpec', (['shape'], {}), '(shape)\n', (2156, 2163), False, 'from torchimage.utils import NdSpec\n'), ((831, 862), 'numpy.random.randint', 'np.random.randint', (['(0)', '(7)'], {'size': '(2)'}), '(0, 7, size=2)\n', (848, 862), True, 'import numpy as np\n'), ((1859, 1895), 'numpy.random.randint', 'np.random.randint', (['(10)', '(30)'], {'size': 'ndim'}), '(10, 30, size=ndim)\n', (1876, 1895), True, 'import numpy as np\n'), ((2253, 2295), 'torchimage.padding.Padder', 'Padder', ([], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(mode='constant', constant_values=0)\n", (2259, 2295), False, 'from torchimage.padding import Padder\n'), ((2690, 2719), 'torch.equal', 'torch.equal', (['actual', 'expected'], {}), '(actual, expected)\n', (2701, 2719), False, 'import torch\n'), ((2414, 2434), 'numpy.prod', 'np.prod', (['kernel_size'], {}), '(kernel_size)\n', (2421, 2434), True, 'import numpy as np\n')] |
"""
This is a module for inverse distance weighting (IDW) Spatial Interpolation
"""
import numpy as np
from ..utils.distance import haversine, euclidean
from ..base import Base
class IDW(Base):
"""A class that is declared for performing IDW Interpolation.
For more information on how this method works, kindly refer to
https://en.wikipedia.org/wiki/Inverse_distance_weighting
Parameters
----------
exponent : positive float, optional
The rate of fall of values from source data points.
Higher the exponent, lower is the value when we move
across space. Default value is 2.
Attributes
----------
Interpolated Values : {array-like, 2D matrix}, shape(resolution, resolution)
This contains all the interpolated values when the interpolation is performed
over a grid, instead of interpolation over a set of points.
X : {array-like, 2D matrix}, shape(n_samples, 2)
Set of all the coordinates available for interpolation.
y : array-like, shape(n_samples,)
Set of all the available values at the specified X coordinates.
result : array_like, shape(n_to_predict, )
Set of all the interpolated values when interpolating over a given
set of data points.
"""
def __init__(self, exponent=2, resolution="standard", coordinate_type="Euclidean"):
super().__init__(resolution, coordinate_type)
self.exponent = exponent
self.interpolated_values = None
self.X = None
self.y = None
self.result = None
if self.coordinate_type == 'Geographic':
self.distance = haversine
elif self.coordinate_type == 'Euclidean':
self.distance = euclidean
else:
raise NotImplementedError(
"Only Geographic and Euclidean Coordinates are available")
def _fit(self, X, y):
"""This function is for the IDW Class.
This is not expected to be called directly
"""
self.X = X
self.y = y
return self
def _predict_grid(self, x1lim, x2lim):
""" Gridded interpolation for natural neighbors interpolation. This function should not
be called directly.
"""
lims = (*x1lim, *x2lim)
x1min, x1max, x2min, x2max = lims
x1 = np.linspace(x1min, x1max, self.resolution)
x2 = np.linspace(x2min, x2max, self.resolution)
X1, X2 = np.meshgrid(x1, x2)
return self._predict(np.array([X1.ravel(), X2.ravel()]).T)
def _predict(self, X):
"""The function call to predict using the interpolated data
in IDW interpolation. This should not be called directly.
"""
dist = self.distance(self.X, X)
weights = 1 / np.power(dist, self.exponent)
result = (weights * self.y[:, None]).sum(axis=0) / weights.sum(axis=0)
# if point is from train data, ground truth must not change
for i in range(X.shape[0]):
mask = np.equal(X[i], self.X).all(axis=1)
if mask.any():
result[i] = (self.y*mask).sum()
return result
| [
"numpy.meshgrid",
"numpy.linspace",
"numpy.equal",
"numpy.power"
] | [((2308, 2350), 'numpy.linspace', 'np.linspace', (['x1min', 'x1max', 'self.resolution'], {}), '(x1min, x1max, self.resolution)\n', (2319, 2350), True, 'import numpy as np\n'), ((2364, 2406), 'numpy.linspace', 'np.linspace', (['x2min', 'x2max', 'self.resolution'], {}), '(x2min, x2max, self.resolution)\n', (2375, 2406), True, 'import numpy as np\n'), ((2424, 2443), 'numpy.meshgrid', 'np.meshgrid', (['x1', 'x2'], {}), '(x1, x2)\n', (2435, 2443), True, 'import numpy as np\n'), ((2748, 2777), 'numpy.power', 'np.power', (['dist', 'self.exponent'], {}), '(dist, self.exponent)\n', (2756, 2777), True, 'import numpy as np\n'), ((2981, 3003), 'numpy.equal', 'np.equal', (['X[i]', 'self.X'], {}), '(X[i], self.X)\n', (2989, 3003), True, 'import numpy as np\n')] |
# from utils import plotLearning
import os
import gym
import numpy as np
from multiagent.tf_ddpg.ddpg_agent import Agent
if __name__ == '__main__':
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
env_name = 'BipedalWalker-v3'
env = gym.make(env_name)
agent = Agent(alpha=0.0001, beta=0.001, input_dims=[24], tau=0.001, env=env, n_actions=4,
chkpt_dir='tmp/ddpg/' + env_name)
np.random.seed(0)
agent.load_models()
score_history = []
while agent.count < 5000:
agent.count += 1
obs = env.reset()
done = False
score = 0
while not done:
act = agent.choose_action(obs)
new_state, reward, done, info = env.step(act)
agent.remember(obs, act, reward, new_state, int(done))
agent.learn()
score += reward
obs = new_state
# env.render()
score_history.append(score)
saving_step = 200
if agent.count % saving_step == 0:
print('episode ', agent.count, ', mean score %.2f' % np.mean(score_history[-saving_step:]),
'training 1000 games avg %.2f' % np.mean(score_history[-1000:]))
agent.save_models()
# filename = 'bipedal.png'
# plotLearning(score_hostory, filename, window=100)
| [
"numpy.mean",
"numpy.random.seed",
"gym.make",
"multiagent.tf_ddpg.ddpg_agent.Agent"
] | [((292, 310), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (300, 310), False, 'import gym\n'), ((324, 443), 'multiagent.tf_ddpg.ddpg_agent.Agent', 'Agent', ([], {'alpha': '(0.0001)', 'beta': '(0.001)', 'input_dims': '[24]', 'tau': '(0.001)', 'env': 'env', 'n_actions': '(4)', 'chkpt_dir': "('tmp/ddpg/' + env_name)"}), "(alpha=0.0001, beta=0.001, input_dims=[24], tau=0.001, env=env,\n n_actions=4, chkpt_dir='tmp/ddpg/' + env_name)\n", (329, 443), False, 'from multiagent.tf_ddpg.ddpg_agent import Agent\n'), ((463, 480), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (477, 480), True, 'import numpy as np\n'), ((1123, 1160), 'numpy.mean', 'np.mean', (['score_history[-saving_step:]'], {}), '(score_history[-saving_step:])\n', (1130, 1160), True, 'import numpy as np\n'), ((1213, 1243), 'numpy.mean', 'np.mean', (['score_history[-1000:]'], {}), '(score_history[-1000:])\n', (1220, 1243), True, 'import numpy as np\n')] |
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from collections import defaultdict
import numpy as np
# takes DNA sequence, outputs one-hot-encoded matrix with rows A, T, G, C
def one_hot_encoder(sequence):
l = len(sequence)
x = np.zeros((4,l),dtype = 'int8')
for j, i in enumerate(sequence):
if i == "A" or i == "a":
x[0][j] = 1
elif i == "T" or i == "t":
x[1][j] = 1
elif i == "G" or i == "g":
x[2][j] = 1
elif i == "C" or i == "c":
x[3][j] = 1
else:
return "contains_N"
return x
#read names and postions from bed file
def read_bed(filename):
positions = defaultdict(list)
with open(filename) as f:
for line in f:
name, chr, start, stop = line.split()
positions[name].append((chr, int(start), int(stop)))
return positions
# parse fasta file and turn into dictionary
def read_fasta(genome_dir, num_chr):
chr_dict = dict()
for chr in range(1, num_chr):
chr_file_path = genome_dir + "chr{}.fa".format(chr)
chr_dict.update(SeqIO.to_dict(SeqIO.parse(open(chr_file_path), 'fasta')))
return chr_dict
#get sequences for peaks from reference genome
def get_sequences(positions, chr_dict, num_chr):
one_hot_seqs = []
peak_seqs = []
invalid_ids = []
peak_names = []
target_chr = ['chr{}'.format(i) for i in range(1, num_chr)]
for name in positions:
for (chr, start, stop) in positions[name]:
if chr in target_chr:
chr_seq = chr_dict[chr].seq
peak_seq = str(chr_seq)[start - 1:stop].lower()
one_hot_seq = one_hot_encoder(peak_seq)
if isinstance(one_hot_seq, np.ndarray): # it is valid sequence
peak_names.append(name)
peak_seqs.append(peak_seq)
one_hot_seqs.append(one_hot_seq)
else:
invalid_ids.append(name[20:])
else:
invalid_ids.append(name[20:])
one_hot_seqs = np.stack(one_hot_seqs)
peak_seqs = np.stack(peak_seqs)
peak_names = np.stack(peak_names)
return one_hot_seqs, peak_seqs, invalid_ids, peak_names
def format_intensities(intensity_file, invalid_ids):
cell_type_array = []
peak_names = []
with open(intensity_file) as f:
for i, line in enumerate(f):
if i == 0: continue
columns = line.split()
peak_name = columns[0]
if '\x1a' not in columns:
cell_act = columns[1:]
cell_type_array.append(cell_act)
peak_names.append(peak_name)
cell_type_array = np.stack(cell_type_array)
peak_names = np.stack(peak_names)
return cell_type_array, peak_names
| [
"numpy.stack",
"numpy.zeros",
"collections.defaultdict"
] | [((273, 303), 'numpy.zeros', 'np.zeros', (['(4, l)'], {'dtype': '"""int8"""'}), "((4, l), dtype='int8')\n", (281, 303), True, 'import numpy as np\n'), ((714, 731), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (725, 731), False, 'from collections import defaultdict\n'), ((2127, 2149), 'numpy.stack', 'np.stack', (['one_hot_seqs'], {}), '(one_hot_seqs)\n', (2135, 2149), True, 'import numpy as np\n'), ((2166, 2185), 'numpy.stack', 'np.stack', (['peak_seqs'], {}), '(peak_seqs)\n', (2174, 2185), True, 'import numpy as np\n'), ((2203, 2223), 'numpy.stack', 'np.stack', (['peak_names'], {}), '(peak_names)\n', (2211, 2223), True, 'import numpy as np\n'), ((2754, 2779), 'numpy.stack', 'np.stack', (['cell_type_array'], {}), '(cell_type_array)\n', (2762, 2779), True, 'import numpy as np\n'), ((2797, 2817), 'numpy.stack', 'np.stack', (['peak_names'], {}), '(peak_names)\n', (2805, 2817), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import copy
import itertools
from classTestify import Testify
# this class is to testify if the input matrices suitable, i.e., every user can
# get every file from senders (min(user_demands_file) > 0)
from classCapability import Capability
# this class is to output the [K*J] matrix named as 'demands_sender', where
# demands_sender[k][j] == 1 indicates that the demand of user_k can be fulfilled
# by sender_j. (otherwise, demands_sender[k][j] == 0)
from classTable import Table
# this class is to ouput the matrix named as 'capablility_table', where
# capability_table[0] = [[set of capable single_sender],[set of capable double_senders],.....]
# collected the capable senders of different union size for the first delivery task
from class2ndMethod import SecondMethod
# this class is to output the dictionary as 'track', where track['DS_1'] == 34
# indicates that the first delivery task is assigned to the double_sender_union
# ---{sender_3, sender_4}. (assignment_phase by 2nd method)
from class2Randr import SecondRate
# this class is to output the R and r by 2nd method (delivery_phase).
# use xxx.[key-Tab] to find more outputs!
from class1stRandr import FirstRate
# this class is to output the R and r by 1st method. Since no maximum matching
# is used in 1st method, so we combine the assignment_phase and delivery_phase
# together in this class. use xxx.[key-Tab] to find more outputs!
from classFenpei import HopcroftKarp
class GeneralizedCodedCaching(object):
r"""
In the multi-sender mulit-user system, we have
- I files, J senders and K users, the cache size of user is M
INPUT:
- ''demands'' -- [K*I] matrix: which user is asking for which file
- ''distribution'' -- [I*J] matrix: which file is stored by which sender
- ''connection'' -- [J*K] matrix: which sender is connected to which user
"""
def __init__(self, demands, distribution, connection):
self.__demands = demands
self.__distribution = distribution
self.__connection = connection
def calculater(self):
K = self.__demands.shape[0]
I = self.__demands.shape[1]
J = self.__distribution.shape[1]
a = Testify(self.__distribution, self.__connection)
user_demands_file = a.testify_phase()
b = Capability(self.__demands, self.__distribution, self.__connection)
demands_sender = b.capability_matrix().tolist()
self.R_1 = []
self.R_2 = []
self.r_1 = []
self.r_2 = []
for M in range(I+1): # M belongs to [0,1,2,...,I]
t = int(M*K/I)
T = itertools.combinations(range(K), t)
files = [f for f in T]
file_part = len(files)
packet_size = 1/file_part
if M == 0: # we do maximum matching for the assignment if M=0
assignment_M_0 = dict()
for user in range(len(demands_sender)):
sender_recorder = []
for sender in range(len(demands_sender[user])):
if demands_sender[user][sender] == 1:
sender_recorder.append(sender+1)
#print(sender_recorder)
assignment_M_0['DS_'+str(user+1)] = set(sender_recorder)
R = 0
r = 1 # every user get his required file from one of its capable senders,
# i.e., r_max = 1, r_min = 0.
while assignment_M_0 != {}:
assignment_result_M_0 = HopcroftKarp(copy.deepcopy(assignment_M_0)).maximum_matching()
for keys in assignment_result_M_0:
if type(keys) != int:
assignment_M_0.pop(keys)
R = R + 1
self.R_1.append(R)
self.R_2.append(R)
self.r_1.append(r)
self.r_2.append(r)
elif M == I:
R = 0
r = 0
self.R_1.append(R)
self.R_2.append(R)
self.r_1.append(r)
self.r_2.append(r)
else:
# for the 1st method
method_1 = FirstRate(demands_sender, t)
rate_pair_1 = method_1.required_rate() #[R, r] for the first method
self.R_1.append(rate_pair_1[0]*packet_size)
self.r_1.append(rate_pair_1[1]*packet_size)
#*****************************************************
# for the 2nd method
method_2_step_1 = Table(demands_sender, K, J, M)
capability_table = method_2_step_1.table_list() # capablitiy_table is a list [[[{},{},...],...],...]
method_2_step_2 = SecondMethod(capability_table)
track = method_2_step_2.assignment_phase() # track is a list of dict.
method_2_step_3 = SecondRate(demands_sender, track, t)
rate_pair_2 = method_2_step_3.required_rate() # [R, r] for the second method
self.R_2.append(rate_pair_2[0]*packet_size)
self.r_2.append(rate_pair_2[1]*packet_size)
def draw_picture(self):
# At first, set the axis for M
cach_size = []
for m in range(self.__demands.shape[1] + 1): # I+1
cach_size.append(m)
# Then, draw the system performance facing different cache size(M)
plt.subplot(121)
plt.plot(cach_size, self.R_1, "go-", label="$R_1$", linewidth=2)
plt.plot(cach_size, self.R_2, "rv-", label="$R_2$")
plt.axis([0, self.__demands.shape[1]+0.2 , 0, self.__demands.shape[1]+0.2])
plt.legend()
plt.xlabel("Cache size M (F-bits)")
plt.ylabel("Performance Analysis (F-bits)")
plt.title("Maximum required transmission rate of senders")
plt.subplot(122)
plt.plot(cach_size, self.r_1, 'go-', label='$r_1$', linewidth=2)
plt.plot(cach_size, self.r_2, 'rv-', label='$r_2$')
plt.axis([0, self.__demands.shape[1]+0.2 , 0, self.__demands.shape[1]+0.2])
plt.legend()
plt.xlabel("Cache size M (F-bits)")
plt.ylabel("Performance Analysis (F-bits)")
plt.title("Maximum required transmission rate through links")
plt.show()
#********************************************************************
if __name__ == "__main__":
# this is the network structure in Fig. 4.6 and Fig. 4.7
demands = np.array([[0, 1, 0], # user_1 needs file_2
[1, 0, 0], # user_2 needs file_1
[0, 0, 1]]) # user_3 needs file_3
distribution = np.array([[1,1,1], # file_1 is stored in sender_1, sender_2 and sender_3
[1,1,1],
[1,1,1]])
connection = np.array([[0,1,1], # sender_1 is connected to user_2 and user_3
[1,0,1], # sender_2 is connected to user_1 and user_3
[1,1,0]])# sender_3 is connected to user_1 and user_2
r"""
Of course, you can type in any matrices if you like
"""
## # this is the network structure in Fig. 4.2
## demands = np.array([[1, 0, 0, 0],
## [0, 1, 0, 0],
## [0, 0, 1, 0],
## [0, 0, 0, 1]])
##
## distribution = np.array([[1,0,0],
## [1,0,1],
## [0,1,1],
## [0,1,0]])
##
## connection = np.array([[1,0,0,1],
## [0,1,1,1],
## [1,1,1,1]])
##
## # this is the network structure in Fig. 4.3 and Fig. 4.4
## demands = np.array([[1, 0, 0],
## [0, 1, 0],
## [0, 0, 1]])
##
## distribution = np.array([[1,1,1],
## [1,1,1],
## [1,1,1]])
##
## connection = np.array([[1,1,1],
## [1,1,1],
## [1,1,1]])
## # this is the network stucture in Fig. 4.10
## demands = np.array([[1, 0, 0, 0, 0, 0],
## [0, 1, 0, 0, 0, 0],
## [0, 0, 1, 0, 0, 0],
## [0, 0, 0, 1, 0, 0],
## [0, 0, 0, 0, 1, 0],
## [0, 0, 0, 0, 0, 1]])
##
## distribution = np.array([[1, 1, 1, 1],
## [1, 1, 1, 1],
## [1, 1, 1, 1],
## [1, 1, 1, 1],
## [1, 1, 1, 1],
## [1, 1, 1, 1]])
##
## connection = np.array([[1, 1, 1, 0, 0, 0],
## [1, 0, 0, 1, 1, 0],
## [0, 1, 0, 1, 0, 1],
## [0, 0, 1, 0, 1, 1]])
a = GeneralizedCodedCaching(demands, distribution, connection)
b = a.calculater()
c = a.draw_picture()
| [
"classTable.Table",
"matplotlib.pyplot.title",
"classTestify.Testify",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"class2Randr.SecondRate",
"numpy.array",
"classCapability.Capability",
"copy.deepcopy",
"class2ndMethod.SecondMethod",
"matplotlib.pyplot.axi... | [((6645, 6688), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 1]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 1]])\n', (6653, 6688), True, 'import numpy as np\n'), ((6828, 6871), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 1, 1], [1, 1, 1]]'], {}), '([[1, 1, 1], [1, 1, 1], [1, 1, 1]])\n', (6836, 6871), True, 'import numpy as np\n'), ((6996, 7039), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (7004, 7039), True, 'import numpy as np\n'), ((2257, 2304), 'classTestify.Testify', 'Testify', (['self.__distribution', 'self.__connection'], {}), '(self.__distribution, self.__connection)\n', (2264, 2304), False, 'from classTestify import Testify\n'), ((2364, 2430), 'classCapability.Capability', 'Capability', (['self.__demands', 'self.__distribution', 'self.__connection'], {}), '(self.__demands, self.__distribution, self.__connection)\n', (2374, 2430), False, 'from classCapability import Capability\n'), ((5594, 5610), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (5605, 5610), True, 'import matplotlib.pyplot as plt\n'), ((5619, 5683), 'matplotlib.pyplot.plot', 'plt.plot', (['cach_size', 'self.R_1', '"""go-"""'], {'label': '"""$R_1$"""', 'linewidth': '(2)'}), "(cach_size, self.R_1, 'go-', label='$R_1$', linewidth=2)\n", (5627, 5683), True, 'import matplotlib.pyplot as plt\n'), ((5692, 5743), 'matplotlib.pyplot.plot', 'plt.plot', (['cach_size', 'self.R_2', '"""rv-"""'], {'label': '"""$R_2$"""'}), "(cach_size, self.R_2, 'rv-', label='$R_2$')\n", (5700, 5743), True, 'import matplotlib.pyplot as plt\n'), ((5752, 5830), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, self.__demands.shape[1] + 0.2, 0, self.__demands.shape[1] + 0.2]'], {}), '([0, self.__demands.shape[1] + 0.2, 0, self.__demands.shape[1] + 0.2])\n', (5760, 5830), True, 'import matplotlib.pyplot as plt\n'), ((5836, 5848), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5846, 5848), True, 'import matplotlib.pyplot as plt\n'), ((5857, 5892), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cache size M (F-bits)"""'], {}), "('Cache size M (F-bits)')\n", (5867, 5892), True, 'import matplotlib.pyplot as plt\n'), ((5901, 5944), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Performance Analysis (F-bits)"""'], {}), "('Performance Analysis (F-bits)')\n", (5911, 5944), True, 'import matplotlib.pyplot as plt\n'), ((5953, 6011), 'matplotlib.pyplot.title', 'plt.title', (['"""Maximum required transmission rate of senders"""'], {}), "('Maximum required transmission rate of senders')\n", (5962, 6011), True, 'import matplotlib.pyplot as plt\n'), ((6021, 6037), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (6032, 6037), True, 'import matplotlib.pyplot as plt\n'), ((6046, 6110), 'matplotlib.pyplot.plot', 'plt.plot', (['cach_size', 'self.r_1', '"""go-"""'], {'label': '"""$r_1$"""', 'linewidth': '(2)'}), "(cach_size, self.r_1, 'go-', label='$r_1$', linewidth=2)\n", (6054, 6110), True, 'import matplotlib.pyplot as plt\n'), ((6119, 6170), 'matplotlib.pyplot.plot', 'plt.plot', (['cach_size', 'self.r_2', '"""rv-"""'], {'label': '"""$r_2$"""'}), "(cach_size, self.r_2, 'rv-', label='$r_2$')\n", (6127, 6170), True, 'import matplotlib.pyplot as plt\n'), ((6179, 6257), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, self.__demands.shape[1] + 0.2, 0, self.__demands.shape[1] + 0.2]'], {}), '([0, self.__demands.shape[1] + 0.2, 0, self.__demands.shape[1] + 0.2])\n', (6187, 6257), True, 'import matplotlib.pyplot as plt\n'), ((6263, 6275), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6273, 6275), True, 'import matplotlib.pyplot as plt\n'), ((6284, 6319), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cache size M (F-bits)"""'], {}), "('Cache size M (F-bits)')\n", (6294, 6319), True, 'import matplotlib.pyplot as plt\n'), ((6328, 6371), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Performance Analysis (F-bits)"""'], {}), "('Performance Analysis (F-bits)')\n", (6338, 6371), True, 'import matplotlib.pyplot as plt\n'), ((6380, 6441), 'matplotlib.pyplot.title', 'plt.title', (['"""Maximum required transmission rate through links"""'], {}), "('Maximum required transmission rate through links')\n", (6389, 6441), True, 'import matplotlib.pyplot as plt\n'), ((6453, 6463), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6461, 6463), True, 'import matplotlib.pyplot as plt\n'), ((4349, 4377), 'class1stRandr.FirstRate', 'FirstRate', (['demands_sender', 't'], {}), '(demands_sender, t)\n', (4358, 4377), False, 'from class1stRandr import FirstRate\n'), ((4727, 4757), 'classTable.Table', 'Table', (['demands_sender', 'K', 'J', 'M'], {}), '(demands_sender, K, J, M)\n', (4732, 4757), False, 'from classTable import Table\n'), ((4911, 4941), 'class2ndMethod.SecondMethod', 'SecondMethod', (['capability_table'], {}), '(capability_table)\n', (4923, 4941), False, 'from class2ndMethod import SecondMethod\n'), ((5063, 5099), 'class2Randr.SecondRate', 'SecondRate', (['demands_sender', 'track', 't'], {}), '(demands_sender, track, t)\n', (5073, 5099), False, 'from class2Randr import SecondRate\n'), ((3638, 3667), 'copy.deepcopy', 'copy.deepcopy', (['assignment_M_0'], {}), '(assignment_M_0)\n', (3651, 3667), False, 'import copy\n')] |
#################################################################################
# Deep Visual-Semantic Quantization for Efficient Image Retrieval #
# Authors: <NAME>, <NAME>, <NAME>, <NAME> #
# Contact: <EMAIL> #
##################################################################################
import os
import random
import shutil
import time
from datetime import datetime
from math import ceil
import numpy as np
import tensorflow as tf
from sklearn.cluster import MiniBatchKMeans
from architecture import img_alexnet_layers
from evaluation import MAPs_CQ
from .util import Dataset
class DVSQ(object):
def __init__(self, config):
# Initialize setting
print("initializing")
np.set_printoptions(precision=4)
self.stage = tf.placeholder_with_default(tf.constant(0), [])
self.device = config['device']
self.output_dim = config['output_dim']
self.n_class = config['label_dim']
self.subspace_num = config['n_subspace']
self.subcenter_num = config['n_subcenter']
self.code_batch_size = config['code_batch_size']
self.cq_lambda = config['cq_lambda']
self.max_iter_update_Cb = config['max_iter_update_Cb']
self.max_iter_update_b = config['max_iter_update_b']
self.batch_size = config['batch_size']
self.val_batch_size = config['val_batch_size']
self.max_iter = config['max_iter']
self.img_model = config['img_model']
self.loss_type = config['loss_type']
self.learning_rate = config['learning_rate']
self.learning_rate_decay_factor = config['learning_rate_decay_factor']
self.decay_step = config['decay_step']
self.finetune_all = config['finetune_all']
self.wordvec_dict = config['wordvec_dict']
self.file_name = 'lr_{}_cqlambda_{}_subspace_num_{}_dataset_{}'.format(
self.learning_rate,
self.cq_lambda,
self.subspace_num,
config['dataset'])
self.save_dir = os.path.join(
config['save_dir'], self.file_name + '.npy')
self.log_dir = config['log_dir']
# Setup session
print("launching session")
configProto = tf.ConfigProto()
configProto.gpu_options.allow_growth = True
configProto.allow_soft_placement = True
self.sess = tf.Session(config=configProto)
# Create variables and placeholders
with tf.device(self.device):
self.img = tf.placeholder(tf.float32, [None, 256, 256, 3])
self.img_label = tf.placeholder(tf.float32, [None, self.n_class])
self.model_weights = config['model_weights']
self.img_last_layer, self.deep_param_img, self.train_layers, self.train_last_layer = self.load_model()
# TODO
self.C = tf.Variable(tf.random_uniform([self.subspace_num * self.subcenter_num, self.output_dim],
minval=-1, maxval=1, dtype=tf.float32, name='centers'))
self.deep_param_img['C'] = self.C
# Centers shared in different modalities (image & text)
# Binary codes for different modalities (image & text)
self.img_output_all = tf.placeholder(
tf.float32, [None, self.output_dim])
self.img_b_all = tf.placeholder(
tf.float32, [None, self.subspace_num * self.subcenter_num])
self.b_img = tf.placeholder(
tf.float32, [None, self.subspace_num * self.subcenter_num])
self.ICM_m = tf.placeholder(tf.int32, [])
self.ICM_b_m = tf.placeholder(
tf.float32, [None, self.subcenter_num])
self.ICM_b_all = tf.placeholder(
tf.float32, [None, self.subcenter_num * self.subspace_num])
self.ICM_X = tf.placeholder(
tf.float32, [self.code_batch_size, self.output_dim])
self.ICM_C_m = tf.slice(
self.C, [self.ICM_m * self.subcenter_num, 0], [self.subcenter_num, self.output_dim])
self.ICM_X_residual = tf.add(tf.subtract(self.ICM_X, tf.matmul(
self.ICM_b_all, self.C)), tf.matmul(self.ICM_b_m, self.ICM_C_m))
ICM_X_expand = tf.expand_dims(self.ICM_X_residual, 1) # N * 1 * D
ICM_C_m_expand = tf.expand_dims(self.ICM_C_m, 0) # 1 * M * D
# N*sc*D * D*n
word_dict = tf.constant(np.loadtxt(
self.wordvec_dict), dtype=tf.float32)
ICM_word_dict = tf.reshape(
tf.matmul(
tf.reshape(
ICM_X_expand - ICM_C_m_expand,
[self.code_batch_size * self.subcenter_num, self.output_dim]),
tf.transpose(word_dict)),
[self.code_batch_size, self.subcenter_num, self.n_class])
ICM_sum_squares = tf.reduce_sum(
tf.square(ICM_word_dict), reduction_indices=2)
ICM_best_centers = tf.argmin(ICM_sum_squares, 1)
self.ICM_best_centers_one_hot = tf.one_hot(
ICM_best_centers, self.subcenter_num, dtype=tf.float32)
self.global_step = tf.Variable(0, trainable=False)
self.train_op = self.apply_loss_function(self.global_step)
self.sess.run(tf.global_variables_initializer())
return
def load_model(self):
if self.img_model == 'alexnet':
img_output = img_alexnet_layers(
self.img, self.batch_size, self.output_dim,
self.stage, self.model_weights, val_batch_size=self.val_batch_size)
else:
raise Exception('cannot use such CNN model as ' + self.img_model)
return img_output
def save_model(self, model_file=None):
if model_file is None:
model_file = self.save_dir
model = {}
for layer in self.deep_param_img:
model[layer] = self.sess.run(self.deep_param_img[layer])
print("saving model to %s" % model_file)
folder = os.path.dirname(model_file)
if os.path.exists(folder) is False:
os.makedirs(folder)
np.save(model_file, np.array(model))
return
def save_codes(self, database, query, C, model_file=None):
if model_file is None:
model_file = self.model_weights + "_codes.npy"
model = {
'db_features': database.output,
'db_reconstr': np.dot(database.codes, C),
'db_label': database.label,
'val_features': query.output,
'val_reconstr': np.dot(query.codes, C),
'val_label': query.label,
}
print("saving codes to %s" % model_file)
np.save(model_file, np.array(model))
return
def apply_loss_function(self, global_step):
# loss function
if self.loss_type == 'cos_margin_multi_label':
assert self.output_dim == 300
word_dict = tf.constant(np.loadtxt(
self.wordvec_dict), dtype=tf.float32)
margin_param = tf.constant(self.margin_param, dtype=tf.float32)
# N: batchsize, L: label_dim, D: 300
# img_label: N * L
# word_dic: L * D
# v_label: N * L * D
v_label = tf.multiply(tf.expand_dims(
self.img_label, 2), tf.expand_dims(word_dict, 0))
# img_last: N * D
# ip_1: N * L
ip_1 = tf.reduce_sum(tf.multiply(
tf.expand_dims(self.img_last_layer, 1), v_label), 2)
# mod_1: N * L
v_label_mod = tf.multiply(tf.expand_dims(
tf.ones([self.batch_size, self.n_class]), 2), tf.expand_dims(word_dict, 0))
mod_1 = tf.sqrt(tf.multiply(tf.expand_dims(tf.reduce_sum(tf.square(
self.img_last_layer), 1), 1), tf.reduce_sum(tf.square(v_label_mod), 2)))
# cos_1: N * L
cos_1 = tf.div(ip_1, mod_1)
ip_2 = tf.matmul(self.img_last_layer, word_dict, transpose_b=True)
# multiply ids to inner product
def reduce_shaper(t):
return tf.reshape(tf.reduce_sum(t, 1), [tf.shape(t)[0], 1])
mod_2 = tf.sqrt(tf.matmul(reduce_shaper(tf.square(
self.img_last_layer)), reduce_shaper(tf.square(word_dict)), transpose_b=True))
# cos_2: N * L
cos_2 = tf.div(ip_2, mod_2)
# cos - cos: N * L * L
cos_cos_1 = tf.subtract(margin_param, tf.subtract(
tf.expand_dims(cos_1, 2), tf.expand_dims(cos_2, 1)))
# we need to let the wrong place be 0
cos_cos = tf.multiply(cos_cos_1, tf.expand_dims(self.img_label, 2))
cos_loss = tf.reduce_sum(tf.maximum(
tf.constant(0, dtype=tf.float32), cos_cos))
self.cos_loss = tf.div(cos_loss, tf.multiply(tf.constant(
self.n_class, dtype=tf.float32), tf.reduce_sum(self.img_label)))
elif self.loss_type == 'cos_softmargin_multi_label':
assert self.output_dim == 300
word_dict = tf.constant(np.loadtxt(
self.wordvec_dict), dtype=tf.float32)
# N: batchsize, L: label_dim, D: 300
# img_label: N * L
# word_dic: L * D
# v_label: N * L * D
# img_last: N * D
ip_2 = tf.matmul(self.img_last_layer, word_dict, transpose_b=True)
# multiply ids to inner product
def reduce_shaper(t):
return tf.reshape(tf.reduce_sum(t, 1), [tf.shape(t)[0], 1])
mod_2 = tf.sqrt(tf.matmul(reduce_shaper(tf.square(
self.img_last_layer)), reduce_shaper(tf.square(word_dict)), transpose_b=True))
# cos_2: N * L
cos_2 = tf.div(ip_2, mod_2)
# word_dic: L * D
# ip_3: L * L
# compute soft margin
ip_3 = tf.matmul(word_dict, word_dict, transpose_b=True)
# use word_dic to avoid 0 in /
mod_3 = tf.sqrt(tf.matmul(reduce_shaper(tf.square(word_dict)), reduce_shaper(
tf.square(word_dict)), transpose_b=True))
margin_param = tf.subtract(tf.constant(
1.0, dtype=tf.float32), tf.div(ip_3, mod_3))
# cos - cos: N * L * L
cos_cos_1 = tf.subtract(tf.expand_dims(margin_param, 0), tf.subtract(
tf.expand_dims(cos_2, 2), tf.expand_dims(cos_2, 1)))
# we need to let the wrong place be 0
cos_cos = tf.multiply(cos_cos_1, tf.expand_dims(self.img_label, 2))
cos_loss = tf.reduce_sum(tf.maximum(
tf.constant(0, dtype=tf.float32), cos_cos))
self.cos_loss = tf.div(cos_loss, tf.multiply(tf.constant(
self.n_class, dtype=tf.float32), tf.reduce_sum(self.img_label)))
self.precq_loss_img = tf.reduce_mean(tf.reduce_sum(
tf.square(tf.subtract(self.img_last_layer, tf.matmul(self.b_img, self.C))), 1))
word_dict = tf.constant(np.loadtxt(
self.wordvec_dict), dtype=tf.float32)
self.cq_loss_img = tf.reduce_mean(tf.reduce_sum(tf.square(tf.matmul(tf.subtract(
self.img_last_layer, tf.matmul(self.b_img, self.C)), tf.transpose(word_dict))), 1))
self.q_lambda = tf.Variable(self.cq_lambda, name='cq_lambda')
self.cq_loss = tf.multiply(self.q_lambda, self.cq_loss_img)
self.loss = self.cos_loss + self.cq_loss
# Last layer has a 10 times learning rate
self.lr = tf.train.exponential_decay(
self.learning_rate, global_step, self.decay_step, self.learning_rate_decay_factor, staircase=True)
opt = tf.train.MomentumOptimizer(learning_rate=self.lr, momentum=0.9)
grads_and_vars = opt.compute_gradients(
self.loss, self.train_layers + self.train_last_layer)
fcgrad, _ = grads_and_vars[-2]
fbgrad, _ = grads_and_vars[-1]
# for debug
self.grads_and_vars = grads_and_vars
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('cos_loss', self.cos_loss)
tf.summary.scalar('cq_loss', self.cq_loss)
tf.summary.scalar('lr', self.lr)
self.merged = tf.summary.merge_all()
if self.finetune_all:
return opt.apply_gradients([(grads_and_vars[0][0], self.train_layers[0]),
(grads_and_vars[1][0]*2, self.train_layers[1]),
(grads_and_vars[2][0], self.train_layers[2]),
(grads_and_vars[3][0]*2, self.train_layers[3]),
(grads_and_vars[4][0], self.train_layers[4]),
(grads_and_vars[5][0]*2, self.train_layers[5]),
(grads_and_vars[6][0], self.train_layers[6]),
(grads_and_vars[7][0]*2, self.train_layers[7]),
(grads_and_vars[8][0], self.train_layers[8]),
(grads_and_vars[9][0]*2, self.train_layers[9]),
(grads_and_vars[10][0], self.train_layers[10]),
(grads_and_vars[11][0]*2, self.train_layers[11]),
(grads_and_vars[12][0], self.train_layers[12]),
(grads_and_vars[13][0]*2, self.train_layers[13]),
(fcgrad*10, self.train_last_layer[0]),
(fbgrad*20, self.train_last_layer[1])], global_step=global_step)
else:
return opt.apply_gradients([(fcgrad*10, self.train_last_layer[0]),
(fbgrad*20, self.train_last_layer[1])], global_step=global_step)
def initial_centers(self, img_output):
C_init = np.zeros(
[self.subspace_num * self.subcenter_num, self.output_dim])
print("#DVSQ train# initilizing Centers")
all_output = img_output
div = int(self.output_dim / self.subspace_num)
for i in range(self.subspace_num):
kmeans = MiniBatchKMeans(n_clusters=self.subcenter_num).fit(
all_output[:, i * div: (i + 1) * div])
C_init[i * self.subcenter_num: (i + 1) * self.subcenter_num, i * div: (i + 1) * div] = kmeans.cluster_centers_
print("step: ", i, " finish")
return C_init
def update_centers(self, img_dataset):
'''
Optimize:
self.C = (U * hu^T + V * hv^T) (hu * hu^T + hv * hv^T)^{-1}
self.C^T = (hu * hu^T + hv * hv^T)^{-1} (hu * U^T + hv * V^T)
but all the C need to be replace with C^T :
self.C = (hu * hu^T + hv * hv^T)^{-1} (hu^T * U + hv^T * V)
'''
old_C_value = self.sess.run(self.C)
h = self.img_b_all
U = self.img_output_all
smallResidual = tf.constant(
np.eye(self.subcenter_num * self.subspace_num, dtype=np.float32) * 0.001)
Uh = tf.matmul(tf.transpose(h), U)
hh = tf.add(tf.matmul(tf.transpose(h), h), smallResidual)
compute_centers = tf.matmul(tf.matrix_inverse(hh), Uh)
update_C = self.C.assign(compute_centers)
C_value = self.sess.run(update_C, feed_dict={
self.img_output_all: img_dataset.output,
self.img_b_all: img_dataset.codes,
})
C_sums = np.sum(np.square(C_value), axis=1)
C_zeros_ids = np.where(C_sums < 1e-8)
C_value[C_zeros_ids, :] = old_C_value[C_zeros_ids, :]
self.sess.run(self.C.assign(C_value))
def update_codes_ICM(self, output, code):
'''
Optimize:
min || output - self.C * codes ||
min || output - codes * self.C ||
args:
output: [n_train, n_output]
self.C: [n_subspace * n_subcenter, n_output]
[C_1, C_2, ... C_M]
codes: [n_train, n_subspace * n_subcenter]
'''
code = np.zeros(code.shape)
for iterate in range(self.max_iter_update_b):
sub_list = [i for i in range(self.subspace_num)]
random.shuffle(sub_list)
for m in sub_list:
best_centers_one_hot_val = self.sess.run(self.ICM_best_centers_one_hot, feed_dict={
self.ICM_b_m: code[:, m * self.subcenter_num: (m + 1) * self.subcenter_num],
self.ICM_b_all: code,
self.ICM_m: m,
self.ICM_X: output,
})
code[:, m * self.subcenter_num: (m + 1) *
self.subcenter_num] = best_centers_one_hot_val
return code
def update_codes_batch(self, dataset, batch_size):
'''
update codes in batch size
'''
total_batch = int(ceil(dataset.n_samples / (batch_size)))
dataset.finish_epoch()
for i in range(total_batch):
output_val, code_val = dataset.next_batch_output_codes(batch_size)
codes_val = self.update_codes_ICM(output_val, code_val)
dataset.feed_batch_codes(batch_size, codes_val)
def train_cq(self, img_dataset):
print("%s #train# start training" % datetime.now())
epoch = 0
epoch_iter = int(ceil(img_dataset.n_samples / self.batch_size))
# tensorboard
tflog_path = os.path.join(self.log_dir, self.file_name)
if os.path.exists(tflog_path):
shutil.rmtree(tflog_path)
train_writer = tf.summary.FileWriter(tflog_path, self.sess.graph)
for train_iter in range(self.max_iter):
images, labels, codes = img_dataset.next_batch(self.batch_size)
start_time = time.time()
# for epoch 0, q_lambda = 0, for epoch > 0, q_lambda = self.cq_lambda
if epoch <= 1:
assign_lambda = self.q_lambda.assign(epoch * self.cq_lambda)
self.sess.run([assign_lambda])
_, loss, output, summary = self.sess.run([self.train_op, self.loss, self.img_last_layer, self.merged],
feed_dict={self.img: images,
self.img_label: labels,
self.b_img: codes})
train_writer.add_summary(summary, train_iter)
img_dataset.feed_batch_output(self.batch_size, output)
duration = time.time() - start_time
# every epoch: update codes and centers
if train_iter % (2 * epoch_iter) == 0 and train_iter != 0:
if epoch == 0:
with tf.device(self.device):
for i in range(self.max_iter_update_Cb):
self.sess.run(self.C.assign(
self.initial_centers(img_dataset.output)))
epoch = epoch + 1
for i in range(self.max_iter_update_Cb):
self.update_codes_batch(img_dataset, self.code_batch_size)
self.update_centers(img_dataset)
if train_iter < 100 or train_iter % 50 == 0:
print("%s #train# step %4d, loss = %.4f, %.1f sec/batch"
% (datetime.now(), train_iter + 1, loss, duration))
print("%s #traing# finish training" % datetime.now())
self.save_model()
print("model saved")
self.sess.close()
def validation(self, img_query, img_database, R=100):
print("%s #validation# start validation" % (datetime.now()))
query_batch = int(ceil(img_query.n_samples / (self.val_batch_size)))
print("%s #validation# totally %d query in %d batches" % (datetime.now(), img_query.n_samples, query_batch))
for i in range(query_batch):
images, labels, codes = img_query.next_batch(self.val_batch_size)
output, loss = self.sess.run([self.img_last_layer, self.cos_loss],
feed_dict={self.img: images, self.img_label: labels, self.stage: 1})
img_query.feed_batch_output(self.val_batch_size, output)
print('Cosine Loss: %s' % loss)
database_batch = int(ceil(img_database.n_samples / (self.val_batch_size)))
print("%s #validation# totally %d database in %d batches" %
(datetime.now(), img_database.n_samples, database_batch))
for i in range(database_batch):
images, labels, codes = img_database.next_batch(self.val_batch_size)
output, loss = self.sess.run([self.img_last_layer, self.cos_loss],
feed_dict={self.img: images, self.img_label: labels, self.stage: 1})
img_database.feed_batch_output(self.val_batch_size, output)
# print output[:10, :10]
if i % 100 == 0:
print('Cosine Loss[%d/%d]: %s' % (i, database_batch, loss))
self.update_codes_batch(img_query, self.code_batch_size)
self.update_codes_batch(img_database, self.code_batch_size)
C_tmp = self.sess.run(self.C)
# save features and codes
self.save_codes(img_database, img_query, C_tmp)
print("%s #validation# calculating MAP@%d" % (datetime.now(), R))
mAPs = MAPs_CQ(C_tmp, self.subspace_num, self.subcenter_num, R)
self.sess.close()
return {
'i2i_nocq': mAPs.get_mAPs_by_feature(img_database, img_query),
'i2i_AQD': mAPs.get_mAPs_AQD(img_database, img_query),
'i2i_SQD': mAPs.get_mAPs_SQD(img_database, img_query)
}
def train(train_img, config):
model = DVSQ(config)
img_dataset = Dataset(train_img, config['output_dim'], config['n_subspace'] * config['n_subcenter'])
model.train_cq(img_dataset)
return model.save_dir
def validation(database_img, query_img, config):
model = DVSQ(config)
img_database = Dataset(database_img, config['output_dim'], config['n_subspace'] * config['n_subcenter'])
img_query = Dataset(query_img, config['output_dim'], config['n_subspace'] * config['n_subcenter'])
return model.validation(img_query, img_database, config['R'])
| [
"tensorflow.div",
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"tensorflow.multiply",
"numpy.array",
"os.path.exists",
"tensorflow.slice",
"numpy.where",
"tensorflow.Session",
"tensorflow.placeholder",
"numpy.dot",
"architecture.img_alexnet_layers",
"tensorflow.matm... | [((797, 829), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)'}), '(precision=4)\n', (816, 829), True, 'import numpy as np\n'), ((2101, 2158), 'os.path.join', 'os.path.join', (["config['save_dir']", "(self.file_name + '.npy')"], {}), "(config['save_dir'], self.file_name + '.npy')\n", (2113, 2158), False, 'import os\n'), ((2295, 2311), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2309, 2311), True, 'import tensorflow as tf\n'), ((2432, 2462), 'tensorflow.Session', 'tf.Session', ([], {'config': 'configProto'}), '(config=configProto)\n', (2442, 2462), True, 'import tensorflow as tf\n'), ((6145, 6172), 'os.path.dirname', 'os.path.dirname', (['model_file'], {}), '(model_file)\n', (6160, 6172), False, 'import os\n'), ((11415, 11460), 'tensorflow.Variable', 'tf.Variable', (['self.cq_lambda'], {'name': '"""cq_lambda"""'}), "(self.cq_lambda, name='cq_lambda')\n", (11426, 11460), True, 'import tensorflow as tf\n'), ((11484, 11528), 'tensorflow.multiply', 'tf.multiply', (['self.q_lambda', 'self.cq_loss_img'], {}), '(self.q_lambda, self.cq_loss_img)\n', (11495, 11528), True, 'import tensorflow as tf\n'), ((11647, 11776), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['self.learning_rate', 'global_step', 'self.decay_step', 'self.learning_rate_decay_factor'], {'staircase': '(True)'}), '(self.learning_rate, global_step, self.decay_step,\n self.learning_rate_decay_factor, staircase=True)\n', (11673, 11776), True, 'import tensorflow as tf\n'), ((11800, 11863), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', ([], {'learning_rate': 'self.lr', 'momentum': '(0.9)'}), '(learning_rate=self.lr, momentum=0.9)\n', (11826, 11863), True, 'import tensorflow as tf\n'), ((12130, 12166), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'self.loss'], {}), "('loss', self.loss)\n", (12147, 12166), True, 'import tensorflow as tf\n'), ((12175, 12219), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cos_loss"""', 'self.cos_loss'], {}), "('cos_loss', self.cos_loss)\n", (12192, 12219), True, 'import tensorflow as tf\n'), ((12228, 12270), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cq_loss"""', 'self.cq_loss'], {}), "('cq_loss', self.cq_loss)\n", (12245, 12270), True, 'import tensorflow as tf\n'), ((12279, 12311), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""lr"""', 'self.lr'], {}), "('lr', self.lr)\n", (12296, 12311), True, 'import tensorflow as tf\n'), ((12334, 12356), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (12354, 12356), True, 'import tensorflow as tf\n'), ((14057, 14124), 'numpy.zeros', 'np.zeros', (['[self.subspace_num * self.subcenter_num, self.output_dim]'], {}), '([self.subspace_num * self.subcenter_num, self.output_dim])\n', (14065, 14124), True, 'import numpy as np\n'), ((15683, 15707), 'numpy.where', 'np.where', (['(C_sums < 1e-08)'], {}), '(C_sums < 1e-08)\n', (15691, 15707), True, 'import numpy as np\n'), ((16214, 16234), 'numpy.zeros', 'np.zeros', (['code.shape'], {}), '(code.shape)\n', (16222, 16234), True, 'import numpy as np\n'), ((17589, 17631), 'os.path.join', 'os.path.join', (['self.log_dir', 'self.file_name'], {}), '(self.log_dir, self.file_name)\n', (17601, 17631), False, 'import os\n'), ((17643, 17669), 'os.path.exists', 'os.path.exists', (['tflog_path'], {}), '(tflog_path)\n', (17657, 17669), False, 'import os\n'), ((17732, 17782), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['tflog_path', 'self.sess.graph'], {}), '(tflog_path, self.sess.graph)\n', (17753, 17782), True, 'import tensorflow as tf\n'), ((21539, 21595), 'evaluation.MAPs_CQ', 'MAPs_CQ', (['C_tmp', 'self.subspace_num', 'self.subcenter_num', 'R'], {}), '(C_tmp, self.subspace_num, self.subcenter_num, R)\n', (21546, 21595), False, 'from evaluation import MAPs_CQ\n'), ((879, 893), 'tensorflow.constant', 'tf.constant', (['(0)'], {}), '(0)\n', (890, 893), True, 'import tensorflow as tf\n'), ((2522, 2544), 'tensorflow.device', 'tf.device', (['self.device'], {}), '(self.device)\n', (2531, 2544), True, 'import tensorflow as tf\n'), ((2569, 2616), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 256, 256, 3]'], {}), '(tf.float32, [None, 256, 256, 3])\n', (2583, 2616), True, 'import tensorflow as tf\n'), ((2646, 2694), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.n_class]'], {}), '(tf.float32, [None, self.n_class])\n', (2660, 2694), True, 'import tensorflow as tf\n'), ((3321, 3372), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.output_dim]'], {}), '(tf.float32, [None, self.output_dim])\n', (3335, 3372), True, 'import tensorflow as tf\n'), ((3419, 3493), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.subspace_num * self.subcenter_num]'], {}), '(tf.float32, [None, self.subspace_num * self.subcenter_num])\n', (3433, 3493), True, 'import tensorflow as tf\n'), ((3537, 3611), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.subspace_num * self.subcenter_num]'], {}), '(tf.float32, [None, self.subspace_num * self.subcenter_num])\n', (3551, 3611), True, 'import tensorflow as tf\n'), ((3654, 3682), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[]'], {}), '(tf.int32, [])\n', (3668, 3682), True, 'import tensorflow as tf\n'), ((3710, 3764), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.subcenter_num]'], {}), '(tf.float32, [None, self.subcenter_num])\n', (3724, 3764), True, 'import tensorflow as tf\n'), ((3811, 3885), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.subcenter_num * self.subspace_num]'], {}), '(tf.float32, [None, self.subcenter_num * self.subspace_num])\n', (3825, 3885), True, 'import tensorflow as tf\n'), ((3928, 3995), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.code_batch_size, self.output_dim]'], {}), '(tf.float32, [self.code_batch_size, self.output_dim])\n', (3942, 3995), True, 'import tensorflow as tf\n'), ((4040, 4137), 'tensorflow.slice', 'tf.slice', (['self.C', '[self.ICM_m * self.subcenter_num, 0]', '[self.subcenter_num, self.output_dim]'], {}), '(self.C, [self.ICM_m * self.subcenter_num, 0], [self.subcenter_num,\n self.output_dim])\n', (4048, 4137), True, 'import tensorflow as tf\n'), ((4335, 4373), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.ICM_X_residual', '(1)'], {}), '(self.ICM_X_residual, 1)\n', (4349, 4373), True, 'import tensorflow as tf\n'), ((4416, 4447), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.ICM_C_m', '(0)'], {}), '(self.ICM_C_m, 0)\n', (4430, 4447), True, 'import tensorflow as tf\n'), ((5092, 5121), 'tensorflow.argmin', 'tf.argmin', (['ICM_sum_squares', '(1)'], {}), '(ICM_sum_squares, 1)\n', (5101, 5121), True, 'import tensorflow as tf\n'), ((5166, 5232), 'tensorflow.one_hot', 'tf.one_hot', (['ICM_best_centers', 'self.subcenter_num'], {'dtype': 'tf.float32'}), '(ICM_best_centers, self.subcenter_num, dtype=tf.float32)\n', (5176, 5232), True, 'import tensorflow as tf\n'), ((5282, 5313), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (5293, 5313), True, 'import tensorflow as tf\n'), ((5553, 5687), 'architecture.img_alexnet_layers', 'img_alexnet_layers', (['self.img', 'self.batch_size', 'self.output_dim', 'self.stage', 'self.model_weights'], {'val_batch_size': 'self.val_batch_size'}), '(self.img, self.batch_size, self.output_dim, self.stage,\n self.model_weights, val_batch_size=self.val_batch_size)\n', (5571, 5687), False, 'from architecture import img_alexnet_layers\n'), ((6184, 6206), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (6198, 6206), False, 'import os\n'), ((6229, 6248), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (6240, 6248), False, 'import os\n'), ((6277, 6292), 'numpy.array', 'np.array', (['model'], {}), '(model)\n', (6285, 6292), True, 'import numpy as np\n'), ((6552, 6577), 'numpy.dot', 'np.dot', (['database.codes', 'C'], {}), '(database.codes, C)\n', (6558, 6577), True, 'import numpy as np\n'), ((6689, 6711), 'numpy.dot', 'np.dot', (['query.codes', 'C'], {}), '(query.codes, C)\n', (6695, 6711), True, 'import numpy as np\n'), ((6838, 6853), 'numpy.array', 'np.array', (['model'], {}), '(model)\n', (6846, 6853), True, 'import numpy as np\n'), ((7169, 7217), 'tensorflow.constant', 'tf.constant', (['self.margin_param'], {'dtype': 'tf.float32'}), '(self.margin_param, dtype=tf.float32)\n', (7180, 7217), True, 'import tensorflow as tf\n'), ((8038, 8057), 'tensorflow.div', 'tf.div', (['ip_1', 'mod_1'], {}), '(ip_1, mod_1)\n', (8044, 8057), True, 'import tensorflow as tf\n'), ((8078, 8137), 'tensorflow.matmul', 'tf.matmul', (['self.img_last_layer', 'word_dict'], {'transpose_b': '(True)'}), '(self.img_last_layer, word_dict, transpose_b=True)\n', (8087, 8137), True, 'import tensorflow as tf\n'), ((8498, 8517), 'tensorflow.div', 'tf.div', (['ip_2', 'mod_2'], {}), '(ip_2, mod_2)\n', (8504, 8517), True, 'import tensorflow as tf\n'), ((11144, 11173), 'numpy.loadtxt', 'np.loadtxt', (['self.wordvec_dict'], {}), '(self.wordvec_dict)\n', (11154, 11173), True, 'import numpy as np\n'), ((15243, 15258), 'tensorflow.transpose', 'tf.transpose', (['h'], {}), '(h)\n', (15255, 15258), True, 'import tensorflow as tf\n'), ((15365, 15386), 'tensorflow.matrix_inverse', 'tf.matrix_inverse', (['hh'], {}), '(hh)\n', (15382, 15386), True, 'import tensorflow as tf\n'), ((15633, 15651), 'numpy.square', 'np.square', (['C_value'], {}), '(C_value)\n', (15642, 15651), True, 'import numpy as np\n'), ((16364, 16388), 'random.shuffle', 'random.shuffle', (['sub_list'], {}), '(sub_list)\n', (16378, 16388), False, 'import random\n'), ((17041, 17077), 'math.ceil', 'ceil', (['(dataset.n_samples / batch_size)'], {}), '(dataset.n_samples / batch_size)\n', (17045, 17077), False, 'from math import ceil\n'), ((17498, 17543), 'math.ceil', 'ceil', (['(img_dataset.n_samples / self.batch_size)'], {}), '(img_dataset.n_samples / self.batch_size)\n', (17502, 17543), False, 'from math import ceil\n'), ((17683, 17708), 'shutil.rmtree', 'shutil.rmtree', (['tflog_path'], {}), '(tflog_path)\n', (17696, 17708), False, 'import shutil\n'), ((17933, 17944), 'time.time', 'time.time', ([], {}), '()\n', (17942, 17944), False, 'import time\n'), ((19852, 19899), 'math.ceil', 'ceil', (['(img_query.n_samples / self.val_batch_size)'], {}), '(img_query.n_samples / self.val_batch_size)\n', (19856, 19899), False, 'from math import ceil\n'), ((20467, 20517), 'math.ceil', 'ceil', (['(img_database.n_samples / self.val_batch_size)'], {}), '(img_database.n_samples / self.val_batch_size)\n', (20471, 20517), False, 'from math import ceil\n'), ((2921, 3056), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self.subspace_num * self.subcenter_num, self.output_dim]'], {'minval': '(-1)', 'maxval': '(1)', 'dtype': 'tf.float32', 'name': '"""centers"""'}), "([self.subspace_num * self.subcenter_num, self.output_dim],\n minval=-1, maxval=1, dtype=tf.float32, name='centers')\n", (2938, 3056), True, 'import tensorflow as tf\n'), ((4269, 4306), 'tensorflow.matmul', 'tf.matmul', (['self.ICM_b_m', 'self.ICM_C_m'], {}), '(self.ICM_b_m, self.ICM_C_m)\n', (4278, 4306), True, 'import tensorflow as tf\n'), ((4526, 4555), 'numpy.loadtxt', 'np.loadtxt', (['self.wordvec_dict'], {}), '(self.wordvec_dict)\n', (4536, 4555), True, 'import numpy as np\n'), ((5014, 5038), 'tensorflow.square', 'tf.square', (['ICM_word_dict'], {}), '(ICM_word_dict)\n', (5023, 5038), True, 'import tensorflow as tf\n'), ((5411, 5444), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5442, 5444), True, 'import tensorflow as tf\n'), ((7076, 7105), 'numpy.loadtxt', 'np.loadtxt', (['self.wordvec_dict'], {}), '(self.wordvec_dict)\n', (7086, 7105), True, 'import numpy as np\n'), ((7396, 7429), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.img_label', '(2)'], {}), '(self.img_label, 2)\n', (7410, 7429), True, 'import tensorflow as tf\n'), ((7448, 7476), 'tensorflow.expand_dims', 'tf.expand_dims', (['word_dict', '(0)'], {}), '(word_dict, 0)\n', (7462, 7476), True, 'import tensorflow as tf\n'), ((7792, 7820), 'tensorflow.expand_dims', 'tf.expand_dims', (['word_dict', '(0)'], {}), '(word_dict, 0)\n', (7806, 7820), True, 'import tensorflow as tf\n'), ((8781, 8814), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.img_label', '(2)'], {}), '(self.img_label, 2)\n', (8795, 8814), True, 'import tensorflow as tf\n'), ((9477, 9536), 'tensorflow.matmul', 'tf.matmul', (['self.img_last_layer', 'word_dict'], {'transpose_b': '(True)'}), '(self.img_last_layer, word_dict, transpose_b=True)\n', (9486, 9536), True, 'import tensorflow as tf\n'), ((9897, 9916), 'tensorflow.div', 'tf.div', (['ip_2', 'mod_2'], {}), '(ip_2, mod_2)\n', (9903, 9916), True, 'import tensorflow as tf\n'), ((10027, 10076), 'tensorflow.matmul', 'tf.matmul', (['word_dict', 'word_dict'], {'transpose_b': '(True)'}), '(word_dict, word_dict, transpose_b=True)\n', (10036, 10076), True, 'import tensorflow as tf\n'), ((15146, 15210), 'numpy.eye', 'np.eye', (['(self.subcenter_num * self.subspace_num)'], {'dtype': 'np.float32'}), '(self.subcenter_num * self.subspace_num, dtype=np.float32)\n', (15152, 15210), True, 'import numpy as np\n'), ((15293, 15308), 'tensorflow.transpose', 'tf.transpose', (['h'], {}), '(h)\n', (15305, 15308), True, 'import tensorflow as tf\n'), ((17439, 17453), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (17451, 17453), False, 'from datetime import datetime\n'), ((18699, 18710), 'time.time', 'time.time', ([], {}), '()\n', (18708, 18710), False, 'import time\n'), ((19600, 19614), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (19612, 19614), False, 'from datetime import datetime\n'), ((19809, 19823), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (19821, 19823), False, 'from datetime import datetime\n'), ((4216, 4249), 'tensorflow.matmul', 'tf.matmul', (['self.ICM_b_all', 'self.C'], {}), '(self.ICM_b_all, self.C)\n', (4225, 4249), True, 'import tensorflow as tf\n'), ((4679, 4787), 'tensorflow.reshape', 'tf.reshape', (['(ICM_X_expand - ICM_C_m_expand)', '[self.code_batch_size * self.subcenter_num, self.output_dim]'], {}), '(ICM_X_expand - ICM_C_m_expand, [self.code_batch_size * self.\n subcenter_num, self.output_dim])\n', (4689, 4787), True, 'import tensorflow as tf\n'), ((4853, 4876), 'tensorflow.transpose', 'tf.transpose', (['word_dict'], {}), '(word_dict)\n', (4865, 4876), True, 'import tensorflow as tf\n'), ((7596, 7634), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.img_last_layer', '(1)'], {}), '(self.img_last_layer, 1)\n', (7610, 7634), True, 'import tensorflow as tf\n'), ((7746, 7786), 'tensorflow.ones', 'tf.ones', (['[self.batch_size, self.n_class]'], {}), '([self.batch_size, self.n_class])\n', (7753, 7786), True, 'import tensorflow as tf\n'), ((8251, 8270), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['t', '(1)'], {}), '(t, 1)\n', (8264, 8270), True, 'import tensorflow as tf\n'), ((8633, 8657), 'tensorflow.expand_dims', 'tf.expand_dims', (['cos_1', '(2)'], {}), '(cos_1, 2)\n', (8647, 8657), True, 'import tensorflow as tf\n'), ((8659, 8683), 'tensorflow.expand_dims', 'tf.expand_dims', (['cos_2', '(1)'], {}), '(cos_2, 1)\n', (8673, 8683), True, 'import tensorflow as tf\n'), ((8882, 8914), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.float32'}), '(0, dtype=tf.float32)\n', (8893, 8914), True, 'import tensorflow as tf\n'), ((8983, 9026), 'tensorflow.constant', 'tf.constant', (['self.n_class'], {'dtype': 'tf.float32'}), '(self.n_class, dtype=tf.float32)\n', (8994, 9026), True, 'import tensorflow as tf\n'), ((9045, 9074), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.img_label'], {}), '(self.img_label)\n', (9058, 9074), True, 'import tensorflow as tf\n'), ((9217, 9246), 'numpy.loadtxt', 'np.loadtxt', (['self.wordvec_dict'], {}), '(self.wordvec_dict)\n', (9227, 9246), True, 'import numpy as np\n'), ((10307, 10341), 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'dtype': 'tf.float32'}), '(1.0, dtype=tf.float32)\n', (10318, 10341), True, 'import tensorflow as tf\n'), ((10360, 10379), 'tensorflow.div', 'tf.div', (['ip_3', 'mod_3'], {}), '(ip_3, mod_3)\n', (10366, 10379), True, 'import tensorflow as tf\n'), ((10453, 10484), 'tensorflow.expand_dims', 'tf.expand_dims', (['margin_param', '(0)'], {}), '(margin_param, 0)\n', (10467, 10484), True, 'import tensorflow as tf\n'), ((10663, 10696), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.img_label', '(2)'], {}), '(self.img_label, 2)\n', (10677, 10696), True, 'import tensorflow as tf\n'), ((14339, 14385), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'self.subcenter_num'}), '(n_clusters=self.subcenter_num)\n', (14354, 14385), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((19969, 19983), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (19981, 19983), False, 'from datetime import datetime\n'), ((20604, 20618), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (20616, 20618), False, 'from datetime import datetime\n'), ((21504, 21518), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (21516, 21518), False, 'from datetime import datetime\n'), ((7962, 7984), 'tensorflow.square', 'tf.square', (['v_label_mod'], {}), '(v_label_mod)\n', (7971, 7984), True, 'import tensorflow as tf\n'), ((8345, 8375), 'tensorflow.square', 'tf.square', (['self.img_last_layer'], {}), '(self.img_last_layer)\n', (8354, 8375), True, 'import tensorflow as tf\n'), ((8409, 8429), 'tensorflow.square', 'tf.square', (['word_dict'], {}), '(word_dict)\n', (8418, 8429), True, 'import tensorflow as tf\n'), ((9650, 9669), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['t', '(1)'], {}), '(t, 1)\n', (9663, 9669), True, 'import tensorflow as tf\n'), ((10515, 10539), 'tensorflow.expand_dims', 'tf.expand_dims', (['cos_2', '(2)'], {}), '(cos_2, 2)\n', (10529, 10539), True, 'import tensorflow as tf\n'), ((10541, 10565), 'tensorflow.expand_dims', 'tf.expand_dims', (['cos_2', '(1)'], {}), '(cos_2, 1)\n', (10555, 10565), True, 'import tensorflow as tf\n'), ((10764, 10796), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.float32'}), '(0, dtype=tf.float32)\n', (10775, 10796), True, 'import tensorflow as tf\n'), ((10865, 10908), 'tensorflow.constant', 'tf.constant', (['self.n_class'], {'dtype': 'tf.float32'}), '(self.n_class, dtype=tf.float32)\n', (10876, 10908), True, 'import tensorflow as tf\n'), ((10927, 10956), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.img_label'], {}), '(self.img_label)\n', (10940, 10956), True, 'import tensorflow as tf\n'), ((11075, 11104), 'tensorflow.matmul', 'tf.matmul', (['self.b_img', 'self.C'], {}), '(self.b_img, self.C)\n', (11084, 11104), True, 'import tensorflow as tf\n'), ((11360, 11383), 'tensorflow.transpose', 'tf.transpose', (['word_dict'], {}), '(word_dict)\n', (11372, 11383), True, 'import tensorflow as tf\n'), ((18904, 18926), 'tensorflow.device', 'tf.device', (['self.device'], {}), '(self.device)\n', (18913, 18926), True, 'import tensorflow as tf\n'), ((7891, 7921), 'tensorflow.square', 'tf.square', (['self.img_last_layer'], {}), '(self.img_last_layer)\n', (7900, 7921), True, 'import tensorflow as tf\n'), ((8273, 8284), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (8281, 8284), True, 'import tensorflow as tf\n'), ((9744, 9774), 'tensorflow.square', 'tf.square', (['self.img_last_layer'], {}), '(self.img_last_layer)\n', (9753, 9774), True, 'import tensorflow as tf\n'), ((9808, 9828), 'tensorflow.square', 'tf.square', (['word_dict'], {}), '(word_dict)\n', (9817, 9828), True, 'import tensorflow as tf\n'), ((10172, 10192), 'tensorflow.square', 'tf.square', (['word_dict'], {}), '(word_dict)\n', (10181, 10192), True, 'import tensorflow as tf\n'), ((10226, 10246), 'tensorflow.square', 'tf.square', (['word_dict'], {}), '(word_dict)\n', (10235, 10246), True, 'import tensorflow as tf\n'), ((11328, 11357), 'tensorflow.matmul', 'tf.matmul', (['self.b_img', 'self.C'], {}), '(self.b_img, self.C)\n', (11337, 11357), True, 'import tensorflow as tf\n'), ((19504, 19518), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (19516, 19518), False, 'from datetime import datetime\n'), ((9672, 9683), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (9680, 9683), True, 'import tensorflow as tf\n')] |
from typing import Tuple
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LightSource
from moviepy.video.io.bindings import mplfig_to_npimage
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from stl.mesh import Mesh
class RandomProjection(object):
"""
Starting from a 3D STL-versioned file, it creates a random 2D projection of the figure from an arbitrary PoV.
It also randomizes illumination parameters related to the azimuth and altitude values (int values 0-255).
And it does the same for the bright and dark surface vectors (RGBA vectors).
STLMesh -> NPArray
"""
def __call__(self, mesh: Mesh) -> np.ndarray:
random_rotation_vectors = 2 * (np.random.rand(3) - 0.5)
random_rotation_angle = float(np.radians(360 * np.random.rand()))
mesh.rotate(random_rotation_vectors, random_rotation_angle)
poly_mesh = RandomProjection.__create_illumination(mesh)
array_img = RandomProjection.__plot_to_array_data(mesh, poly_mesh)
return array_img
@staticmethod
def __create_illumination(mesh: Mesh) -> Poly3DCollection:
lt, dk = RandomProjection.__generate_random_brightness_parameters()
azimuth = float(np.random.rand())
altitude = float(np.random.rand())
poly_mesh = mplot3d.art3d.Poly3DCollection(mesh.vectors)
ls = LightSource(azimuth, altitude)
sns = ls.shade_normals(mesh.get_unit_normals(), fraction=1.0)
rgba = np.array([(lt - dk) * s + dk for s in sns])
poly_mesh.set_facecolor(rgba)
return poly_mesh
@staticmethod
def __plot_to_array_data(mesh: Mesh, poly_mesh: Poly3DCollection) -> np.ndarray:
figure = plt.figure()
axes = mplot3d.Axes3D(figure)
axes.add_collection3d(poly_mesh)
points = mesh.points.reshape(-1, 3)
points_top = max(np.ptp(points, 0)) / 2
controls = [(min(points[:, i]) + max(points[:, i])) / 2 for i in range(3)]
limits = [[controls[i] - points_top, controls[i] + points_top] for i in range(3)]
axes.auto_scale_xyz(*limits)
axes.axis('off')
np_img = mplfig_to_npimage(figure)
plt.close(figure)
return np_img
@staticmethod
def __generate_random_brightness_parameters() -> Tuple[np.ndarray, np.ndarray]:
# TODO: Implement
pass
| [
"mpl_toolkits.mplot3d.art3d.Poly3DCollection",
"numpy.ptp",
"moviepy.video.io.bindings.mplfig_to_npimage",
"numpy.random.rand",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.colors.LightSource",
"matplotlib.pyplot.figure",
"mpl_toolkits.mplot3d.Axes3D"
] | [((1362, 1406), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'mplot3d.art3d.Poly3DCollection', (['mesh.vectors'], {}), '(mesh.vectors)\n', (1392, 1406), False, 'from mpl_toolkits import mplot3d\n'), ((1421, 1451), 'matplotlib.colors.LightSource', 'LightSource', (['azimuth', 'altitude'], {}), '(azimuth, altitude)\n', (1432, 1451), False, 'from matplotlib.colors import LightSource\n'), ((1537, 1582), 'numpy.array', 'np.array', (['[((lt - dk) * s + dk) for s in sns]'], {}), '([((lt - dk) * s + dk) for s in sns])\n', (1545, 1582), True, 'import numpy as np\n'), ((1767, 1779), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1777, 1779), True, 'from matplotlib import pyplot as plt\n'), ((1795, 1817), 'mpl_toolkits.mplot3d.Axes3D', 'mplot3d.Axes3D', (['figure'], {}), '(figure)\n', (1809, 1817), False, 'from mpl_toolkits import mplot3d\n'), ((2206, 2231), 'moviepy.video.io.bindings.mplfig_to_npimage', 'mplfig_to_npimage', (['figure'], {}), '(figure)\n', (2223, 2231), False, 'from moviepy.video.io.bindings import mplfig_to_npimage\n'), ((2240, 2257), 'matplotlib.pyplot.close', 'plt.close', (['figure'], {}), '(figure)\n', (2249, 2257), True, 'from matplotlib import pyplot as plt\n'), ((1280, 1296), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1294, 1296), True, 'import numpy as np\n'), ((1323, 1339), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1337, 1339), True, 'import numpy as np\n'), ((764, 781), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (778, 781), True, 'import numpy as np\n'), ((1929, 1946), 'numpy.ptp', 'np.ptp', (['points', '(0)'], {}), '(points, 0)\n', (1935, 1946), True, 'import numpy as np\n'), ((844, 860), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (858, 860), True, 'import numpy as np\n')] |
import scanpy as sc
import numpy as np
import matplotlib.pyplot as plt
from .utils import metagene_loadings
import pandas as pd
import seaborn as sns
def ordered_matrixplot(d, n_genes=5, groups=None, **kwargs):
"""
matrix plot of ranked groups, with columns ordered by score instead of abs(score).
Separates up- from down-regulated genes better, resulting in visually-cleaner plots
:param d:
:param n_genes:
:param kwargs:
:return:
"""
top_genes = np.stack([np.array(list(x)) for x in d.uns['rank_genes_groups']['names']][:n_genes])
top_scores = np.stack([np.array(list(x)) for x in d.uns['rank_genes_groups']['scores']][:n_genes])
# order top genes by actual score, not absolute value
ordered_top_genes = np.take_along_axis(top_genes, np.argsort(-1 * top_scores, axis=0), axis=0)
# print(ordered_top_genes)
grouping_key = d.uns['rank_genes_groups']['params']['groupby']
group_names = list(d.uns['rank_genes_groups']['names'].dtype.fields.keys())
ordered_top_mapping = {group_names[i]: ordered_top_genes[:, i] for i in range(len(group_names))}
if groups is not None:
ordered_top_mapping = {k: v for k, v in ordered_top_mapping.items() if k in groups}
# print(ordered_top_mapping)
sc.pl.matrixplot(d, var_names=ordered_top_mapping, groupby=grouping_key, **kwargs)
def plot_metagenes(data, comps=None, key='sca', **kwargs):
if comps is None:
if type(data) is dict:
comps = list(range(data['loadings'].shape[1]))
else:
comps = list(range(data.varm[key + '_loadings'].shape[1]))
fig, axs = plt.subplots(len(comps), 1)
loadings = metagene_loadings(data, key=key, **kwargs)
for i, comp in enumerate(comps):
df = pd.DataFrame(loadings[comp])
sns.barplot(data=df, x='genes', y='scores', ax=axs.flatten()[i])
axs.flatten()[i].set_title('component {}'.format(i + 1))
for tick in axs.flatten()[i].get_xticklabels():
tick.set_rotation(45)
fig.set_size_inches(0.5 * df.shape[0], 5 * len(comps))
plt.subplots_adjust(hspace=0.5)
return fig
| [
"numpy.argsort",
"pandas.DataFrame",
"matplotlib.pyplot.subplots_adjust",
"scanpy.pl.matrixplot"
] | [((1277, 1364), 'scanpy.pl.matrixplot', 'sc.pl.matrixplot', (['d'], {'var_names': 'ordered_top_mapping', 'groupby': 'grouping_key'}), '(d, var_names=ordered_top_mapping, groupby=grouping_key, **\n kwargs)\n', (1293, 1364), True, 'import scanpy as sc\n'), ((2093, 2124), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.5)'}), '(hspace=0.5)\n', (2112, 2124), True, 'import matplotlib.pyplot as plt\n'), ((788, 823), 'numpy.argsort', 'np.argsort', (['(-1 * top_scores)'], {'axis': '(0)'}), '(-1 * top_scores, axis=0)\n', (798, 823), True, 'import numpy as np\n'), ((1771, 1799), 'pandas.DataFrame', 'pd.DataFrame', (['loadings[comp]'], {}), '(loadings[comp])\n', (1783, 1799), True, 'import pandas as pd\n')] |
import numpy as np
from .radon import radon_matrix as calc_radon_matrix
def ramp_filter(axis_omega):
"""Return the ramp filter (in the frequency domain) for the frequency
coordinates specified in *axis_omega* in units of rad / s.
"""
return np.abs(axis_omega.Hz().centers)
class BackProjector():
"""
"""
def __init__(self, grid, grid_y, radon_matrix=None, **kwds):
self.grid = grid
self.grid_y = grid_y
if radon_matrix is None:
radon_matrix = calc_radon_matrix(grid, grid_y, **kwds)
else:
assert radon_matrix.shape == (np.prod(grid_y.shape), np.prod(grid.shape))
self.radon_matrix = radon_matrix
self.alpha = grid_y.axis_y.T / (grid.axis_x.T * grid.axis_y.T)
def __getitem__(self, index):
"""
"""
assert index >= 0 and index < self.grid_y.axis_x.N
return self.radon_matrix.T[:, index::self.grid_y.axis_x.N] * self.alpha
def __matmul__(self, other):
"""
"""
y = self.radon_matrix.T @ other
return y * self.alpha
def fbp(grid, grid_y, sinogram, radon_matrix=None, **kwds):
"""Given the construction domain specification *grid*, the projection
domain specification *grid_y*, and *sinogram* (x axis is theta and
y axis is t), calculate and return the tomographic reconstruction
by filtered back projection.
"""
assert grid_y.shape == sinogram.shape
if radon_matrix is not None:
backprojector = BackProjector(grid, grid_y, radon_matrix=radon_matrix, **kwds)
axis_theta = grid_y.axis_x
axis_t = grid_y.axis_y
grid_y_omega0, FT0_sinogram = grid_y.spectrum(sinogram, real=True, axis=0)
axis_omega_t = grid_y_omega0.axis_y
W = ramp_filter(axis_omega_t)
# apply ramp filter to each column of the sinogram
_, sinogram_ramp = grid_y_omega0.ispectrum(np.atleast_2d(W).T * FT0_sinogram, axis=0)
S = np.zeros(grid.shape)
if radon_matrix is None:
X, Y = np.meshgrid(grid.axis_x.centers, grid.axis_y.centers)
# process each projection for each angle
for k, theta_k in enumerate(np.radians(axis_theta.centers)):
# compute backprojection of the ramp filtered projection
if radon_matrix is None:
# calculate the backprojection by linear interpolation in the projection domain
t_theta_k = X * np.cos(theta_k) + Y * np.sin(theta_k)
S_k = np.interp(t_theta_k.flat, axis_t.centers, sinogram_ramp[:, k], left=np.nan, right=np.nan)
else:
# use Radon transform matrix if provided
S_k = backprojector[k] @ sinogram_ramp[:, k]
S_k.shape = S.shape
# accumulate the result
S += S_k
S *= np.radians(axis_theta.T)
return S
| [
"numpy.radians",
"numpy.atleast_2d",
"numpy.prod",
"numpy.zeros",
"numpy.cos",
"numpy.interp",
"numpy.sin",
"numpy.meshgrid"
] | [((1938, 1958), 'numpy.zeros', 'np.zeros', (['grid.shape'], {}), '(grid.shape)\n', (1946, 1958), True, 'import numpy as np\n'), ((2741, 2765), 'numpy.radians', 'np.radians', (['axis_theta.T'], {}), '(axis_theta.T)\n', (2751, 2765), True, 'import numpy as np\n'), ((2003, 2056), 'numpy.meshgrid', 'np.meshgrid', (['grid.axis_x.centers', 'grid.axis_y.centers'], {}), '(grid.axis_x.centers, grid.axis_y.centers)\n', (2014, 2056), True, 'import numpy as np\n'), ((2134, 2164), 'numpy.radians', 'np.radians', (['axis_theta.centers'], {}), '(axis_theta.centers)\n', (2144, 2164), True, 'import numpy as np\n'), ((2441, 2534), 'numpy.interp', 'np.interp', (['t_theta_k.flat', 'axis_t.centers', 'sinogram_ramp[:, k]'], {'left': 'np.nan', 'right': 'np.nan'}), '(t_theta_k.flat, axis_t.centers, sinogram_ramp[:, k], left=np.nan,\n right=np.nan)\n', (2450, 2534), True, 'import numpy as np\n'), ((1887, 1903), 'numpy.atleast_2d', 'np.atleast_2d', (['W'], {}), '(W)\n', (1900, 1903), True, 'import numpy as np\n'), ((609, 630), 'numpy.prod', 'np.prod', (['grid_y.shape'], {}), '(grid_y.shape)\n', (616, 630), True, 'import numpy as np\n'), ((632, 651), 'numpy.prod', 'np.prod', (['grid.shape'], {}), '(grid.shape)\n', (639, 651), True, 'import numpy as np\n'), ((2385, 2400), 'numpy.cos', 'np.cos', (['theta_k'], {}), '(theta_k)\n', (2391, 2400), True, 'import numpy as np\n'), ((2407, 2422), 'numpy.sin', 'np.sin', (['theta_k'], {}), '(theta_k)\n', (2413, 2422), True, 'import numpy as np\n')] |
import numpy as np
import cv2
from os import path as osp
from tqdm import tqdm
import shutil
import sys
def imwrite(path, img):
img = (img[:, :, [2,1,0]] * 255.0).round().astype(np.uint8)
cv2.imwrite(path, img)
def imread(path):
img = cv2.imread(path)
return img[:, :, [2, 1, 0]]
def tmap(x):
'''
Tone mapping algorithm. Refered to as simple tone-mapped domain.
'''
return x / (x + 0.25)
def ccm_info_get(ccm_txt):
with open(ccm_txt) as fi:
for line in fi:
(key, val) = line.split(':')
val_list = val.split()
ccm = [np.float32(v) for v in val_list]
ccm = np.array(ccm)
ccm = ccm.reshape((3, 3))
return ccm
def ccmProcess_rgb(img, ccm):
'''
Input images are in RGB domain.
'''
new_img = img.copy()
new_img[:,:,0] = ccm[0,0] * img[:,:,0] + ccm[0,1]* img[:,:,1] + \
ccm[0,2] * img[:,:,2]
new_img[:,:,1] = ccm[1,0] * img[:,:,0] + ccm[1,1]* img[:,:,1] + \
ccm[1,2] * img[:,:,2]
new_img[:,:,2] = ccm[2,0] * img[:,:,0] + ccm[2,1]* img[:,:,1] + \
ccm[2,2] * img[:,:,2]
return new_img
def cc_img(img, ccm):
'''
Color correct an image given corresponding matrix.
Assume images in linear domain.
'''
# clip to fit ZTE sensor
img = np.clip(img, 0, 16.0)
img_cc = ccmProcess_rgb(img, ccm)
img_cc = np.clip(img_cc, 0, 16)
return img_cc
def WB(img, ref):
'''
Simple white balance algorithm to copy color from reference image.
Assume both images range [0, 1].
'''
balanced_img = np.zeros_like(img, dtype=np.float32)
for c in range(3):
balanced_img[:, :, c] = img[:, :, c] / img[:, :, c].sum() * ref[:, :, c].sum()
balanced_img = np.clip(balanced_img, 0, 1)
return balanced_img
def simple_to_linear(img, linear_max=500):
'''
From simple tone-mapped domain to linear domain.
'''
img = np.clip(img, 0, tmap(linear_max))
img = img / (4 * (1-img))
return img
def linear_to_gamma(img, linear_max=12):
A = 1 / linear_max**(1/2.8)
img = np.clip(img, 0, linear_max)
img = A*(img**(1/2.8))
return img
def contrast(img, limit=1.0):
'''
Apply contrast enhancement. Tune argument "limit" to adjust.
'''
img = (img[:, :, [2,1,0]] * 255.0).round().astype(np.uint8)
clahe = cv2.createCLAHE(clipLimit=limit, tileGridSize=(8,8))
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) # convert from BGR to LAB color space
l, a, b = cv2.split(lab) # split on 3 different channels
l2 = clahe.apply(l) # apply CLAHE to the L-channel
lab = cv2.merge((l2,a,b)) # merge channels
img2 = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) # convert from LAB to BGR
return img2[:, :, [2,1,0]] / 255.0
| [
"numpy.clip",
"cv2.imwrite",
"cv2.merge",
"cv2.createCLAHE",
"numpy.array",
"cv2.cvtColor",
"cv2.split",
"numpy.zeros_like",
"numpy.float32",
"cv2.imread"
] | [((199, 221), 'cv2.imwrite', 'cv2.imwrite', (['path', 'img'], {}), '(path, img)\n', (210, 221), False, 'import cv2\n'), ((252, 268), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (262, 268), False, 'import cv2\n'), ((1375, 1396), 'numpy.clip', 'np.clip', (['img', '(0)', '(16.0)'], {}), '(img, 0, 16.0)\n', (1382, 1396), True, 'import numpy as np\n'), ((1448, 1470), 'numpy.clip', 'np.clip', (['img_cc', '(0)', '(16)'], {}), '(img_cc, 0, 16)\n', (1455, 1470), True, 'import numpy as np\n'), ((1653, 1689), 'numpy.zeros_like', 'np.zeros_like', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (1666, 1689), True, 'import numpy as np\n'), ((1819, 1846), 'numpy.clip', 'np.clip', (['balanced_img', '(0)', '(1)'], {}), '(balanced_img, 0, 1)\n', (1826, 1846), True, 'import numpy as np\n'), ((2160, 2187), 'numpy.clip', 'np.clip', (['img', '(0)', 'linear_max'], {}), '(img, 0, linear_max)\n', (2167, 2187), True, 'import numpy as np\n'), ((2424, 2477), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': 'limit', 'tileGridSize': '(8, 8)'}), '(clipLimit=limit, tileGridSize=(8, 8))\n', (2439, 2477), False, 'import cv2\n'), ((2488, 2524), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2LAB'], {}), '(img, cv2.COLOR_BGR2LAB)\n', (2500, 2524), False, 'import cv2\n'), ((2578, 2592), 'cv2.split', 'cv2.split', (['lab'], {}), '(lab)\n', (2587, 2592), False, 'import cv2\n'), ((2694, 2715), 'cv2.merge', 'cv2.merge', (['(l2, a, b)'], {}), '((l2, a, b))\n', (2703, 2715), False, 'import cv2\n'), ((2743, 2779), 'cv2.cvtColor', 'cv2.cvtColor', (['lab', 'cv2.COLOR_LAB2BGR'], {}), '(lab, cv2.COLOR_LAB2BGR)\n', (2755, 2779), False, 'import cv2\n'), ((656, 669), 'numpy.array', 'np.array', (['ccm'], {}), '(ccm)\n', (664, 669), True, 'import numpy as np\n'), ((605, 618), 'numpy.float32', 'np.float32', (['v'], {}), '(v)\n', (615, 618), True, 'import numpy as np\n')] |
"""
=====================================================================================
aevmod version 1.0
Copyright (2021) NTESS
https://github.com/sandialabs/aevmod
Copyright 2021 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains
certain rights in this software.
This file is part of aevmod. aevmod is open-source software: you can redistribute it
and/or modify it under the terms of BSD 2-Clause License
(https://opensource.org/licenses/BSD-2-Clause). A copy of the license is also
provided under the main directory
Questions? Contact <NAME> at <<EMAIL>>
Sandia National Laboratories, Livermore, CA, USA
=====================================================================================
"""
import sys
import numpy as np
import math
import aevmod
import pytest
import utils
# ====================================================================================================
def test_aev():
# define atom types
types = ['C','H']
# instantiate aev object
myaev = aevmod.aev(types,8,4,4)
# read geometry xyz file
symb, vxyz = utils.read_xyz("tests/xyz_c2h5.txt")
# instantiate cnf object
cnf = aevmod.config(symb)
# build index sets
myaev.build_index_sets(cnf)
# add the structure to the cnf object data
npt = cnf.add_structures(vxyz);
# evaluate aev of each
got_aev = myaev.eval(cnf)
# read true aev data for this system
nptt, n_atom, dout, tru_aev = utils.read_aev("tests/aev_c2h5_8_4_4.txt");
# check
try:
# confirm that the two files pertain to the same npt value
assert npt == nptt
# confirm that the aev we evaluate is close enough to the reference true value
assert np.allclose(got_aev,tru_aev, rtol=1.e-15, atol=1.e-15)
except AssertionError:
print("got_aev disagrees with tru_aev")
errmx = 0.0
for p in range(npt):
for a in range(n_atom):
for r in range(dout):
errmx=max(errmx,abs(got_aev[p][a][r]-tru_aev[p][a][r]))
print("errmx:",errmx)
sys.exit(1)
# ====================================================================================================
| [
"utils.read_xyz",
"numpy.allclose",
"aevmod.aev",
"sys.exit",
"utils.read_aev",
"aevmod.config"
] | [((1085, 1111), 'aevmod.aev', 'aevmod.aev', (['types', '(8)', '(4)', '(4)'], {}), '(types, 8, 4, 4)\n', (1095, 1111), False, 'import aevmod\n'), ((1150, 1186), 'utils.read_xyz', 'utils.read_xyz', (['"""tests/xyz_c2h5.txt"""'], {}), "('tests/xyz_c2h5.txt')\n", (1164, 1186), False, 'import utils\n'), ((1222, 1241), 'aevmod.config', 'aevmod.config', (['symb'], {}), '(symb)\n', (1235, 1241), False, 'import aevmod\n'), ((1497, 1539), 'utils.read_aev', 'utils.read_aev', (['"""tests/aev_c2h5_8_4_4.txt"""'], {}), "('tests/aev_c2h5_8_4_4.txt')\n", (1511, 1539), False, 'import utils\n'), ((1730, 1783), 'numpy.allclose', 'np.allclose', (['got_aev', 'tru_aev'], {'rtol': '(1e-15)', 'atol': '(1e-15)'}), '(got_aev, tru_aev, rtol=1e-15, atol=1e-15)\n', (1741, 1783), True, 'import numpy as np\n'), ((2070, 2081), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2078, 2081), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 9 11:21:20 2019
@author: <EMAIL>
"""
import ephem
import numpy as np
from datetime import datetime
import concurrent.futures
import matplotlib.pyplot as plt
def get_sza(glon, glat):
global t, horizon
obs = ephem.Observer()
obs.lat = np.deg2rad(glat)
obs.lon = np.deg2rad(glon)
obs.date = ephem.Date(t)
sun = ephem.Sun()
sun.compute(obs)
# sun_azm = np.degrees(sun.ra)
sza = 90 - np.degrees(sun.alt) + horizon
return sza
glon, glat = np.meshgrid(np.arange(-180,181,2), np.arange(-90,91,1))
galt = 0
re = 6371
horizon = -np.degrees(np.arccos(re/(re + galt)))
t = datetime(2017,9,7,23,30)
with concurrent.futures.ThreadPoolExecutor(max_workers=50) as ex:
sza_worker = np.asarray([ex.submit(get_sza, glon.ravel()[i], glat.ravel()[i]) for i in range(glon.size)])
sza = np.nan*np.ones(glon.ravel().size)
for i in range(sza_worker.size):
sza[i] = sza_worker[i].result()
sza = sza.reshape(glon.shape)
night_ravel = (sza.ravel() >= 90)
night = night_ravel.reshape(glon.shape)
sza_day = sza
sza_day[night] = np.nan
terminator = (sza > 90-.2) & (sza < 90+.2)
terminator_mask = np.zeros(glon.shape)
terminator_mask[terminator] = 1
plt.figure()
plt.pcolormesh(glon, glat, sza_day)
plt.pcolormesh(glon, glat, terminator_mask)
#sun = ephem.Sun()
#obs = ephem.Observer()
#obs.lat = np.deg2rad(glat)
#obs.lon = np.deg2rad(glon)
#obs.date = ephem.Date(t)
#sun.compute(obs)
#sr = sun.radius
#sun_azm = np.degrees(sun.ra)
#sun_elv = np.degrees(sun.alt) - horizon
#
#print (sun_azm, sun_elv) | [
"datetime.datetime",
"ephem.Observer",
"numpy.arccos",
"ephem.Sun",
"matplotlib.pyplot.pcolormesh",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.deg2rad",
"numpy.degrees",
"ephem.Date",
"numpy.arange"
] | [((661, 689), 'datetime.datetime', 'datetime', (['(2017)', '(9)', '(7)', '(23)', '(30)'], {}), '(2017, 9, 7, 23, 30)\n', (669, 689), False, 'from datetime import datetime\n'), ((1178, 1198), 'numpy.zeros', 'np.zeros', (['glon.shape'], {}), '(glon.shape)\n', (1186, 1198), True, 'import numpy as np\n'), ((1231, 1243), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1241, 1243), True, 'import matplotlib.pyplot as plt\n'), ((1244, 1279), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['glon', 'glat', 'sza_day'], {}), '(glon, glat, sza_day)\n', (1258, 1279), True, 'import matplotlib.pyplot as plt\n'), ((1280, 1323), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['glon', 'glat', 'terminator_mask'], {}), '(glon, glat, terminator_mask)\n', (1294, 1323), True, 'import matplotlib.pyplot as plt\n'), ((264, 280), 'ephem.Observer', 'ephem.Observer', ([], {}), '()\n', (278, 280), False, 'import ephem\n'), ((295, 311), 'numpy.deg2rad', 'np.deg2rad', (['glat'], {}), '(glat)\n', (305, 311), True, 'import numpy as np\n'), ((326, 342), 'numpy.deg2rad', 'np.deg2rad', (['glon'], {}), '(glon)\n', (336, 342), True, 'import numpy as np\n'), ((358, 371), 'ephem.Date', 'ephem.Date', (['t'], {}), '(t)\n', (368, 371), False, 'import ephem\n'), ((387, 398), 'ephem.Sun', 'ephem.Sun', ([], {}), '()\n', (396, 398), False, 'import ephem\n'), ((545, 568), 'numpy.arange', 'np.arange', (['(-180)', '(181)', '(2)'], {}), '(-180, 181, 2)\n', (554, 568), True, 'import numpy as np\n'), ((568, 589), 'numpy.arange', 'np.arange', (['(-90)', '(91)', '(1)'], {}), '(-90, 91, 1)\n', (577, 589), True, 'import numpy as np\n'), ((630, 657), 'numpy.arccos', 'np.arccos', (['(re / (re + galt))'], {}), '(re / (re + galt))\n', (639, 657), True, 'import numpy as np\n'), ((469, 488), 'numpy.degrees', 'np.degrees', (['sun.alt'], {}), '(sun.alt)\n', (479, 488), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import numpy.testing as npt
import pytest
import pytest_check as check
from astropop.image_processing.imarith import imarith
from astropop.framedata import FrameData
# TODO: Test with None FrameData
# TODO: Test with None scalar values
# TODO: '%' and '**' functions
pars = pytest.mark.parametrize('op,vs', [('+', {'f1': {'v': 30, 'u': None},
'f2': {'v': 10, 'u': None},
'r': {'v': 40, 'u': None}}),
('+', {'f1': {'v': 30, 'u': 3},
'f2': {'v': 10, 'u': 4},
'r': {'v': 40, 'u': 5}}),
('-', {'f1': {'v': 30, 'u': None},
'f2': {'v': 10, 'u': None},
'r': {'v': 20, 'u': None}}),
('-', {'f1': {'v': 30, 'u': 3},
'f2': {'v': 10, 'u': 4},
'r': {'v': 20, 'u': 5}}),
('*', {'f1': {'v': 5, 'u': None},
'f2': {'v': 6, 'u': None},
'r': {'v': 30, 'u': None}}),
('*', {'f1': {'v': 5, 'u': 0.3},
'f2': {'v': 6, 'u': 0.4},
'r': {'v': 30,
'u': 2.022375}}),
('/', {'f1': {'v': 10, 'u': None},
'f2': {'v': 3, 'u': None},
'r': {'v': 3.33333333,
'u': None}}),
('/', {'f1': {'v': 10, 'u': 1},
'f2': {'v': 3, 'u': 0.3},
'r': {'v': 3.33333333,
'u': 0.47140452}}),
('//', {'f1': {'v': 10, 'u': None},
'f2': {'v': 3, 'u': None},
'r': {'v': 3.000000,
'u': None}}),
('//', {'f1': {'v': 10, 'u': 1},
'f2': {'v': 3, 'u': 0.3},
'r': {'v': 3.000000,
'u': 0.424264}})])
@pytest.mark.parametrize('handle_mask', [True, False])
@pytest.mark.parametrize('inplace', [True, False])
@pars
def test_imarith_ops_frames(op, vs, inplace, handle_mask):
propag_errors = [False] # use list to gen_frame works
def gen_frame(v):
# Gen frames with {'v', 'u'} dict
shape = (10, 10)
if v['u'] is None:
frame = FrameData(np.ones(shape, dtype='f8'), unit='adu')
else:
frame = FrameData(np.ones(shape, dtype='f8'), unit='adu',
uncertainty=v['u'])
propag_errors[0] = True # noqa
frame.data[:] = v['v']
return frame
frame1 = gen_frame(vs['f1'])
frame2 = gen_frame(vs['f2'])
exp_res = gen_frame(vs['r'])
if handle_mask:
mask1 = np.zeros((10, 10))
mask2 = np.zeros((10, 10))
mask1[5, 5] = 1
mask2[3, 3] = 1
exp_mask = np.zeros((10, 10))
exp_mask[5, 5] = 1
exp_mask[3, 3] = 1
frame1.mask = mask1
frame2.mask = mask2
exp_res.mask = exp_mask
propag_errors = propag_errors[0]
res = imarith(frame1, frame2, op, inplace=inplace,
propagate_errors=propag_errors,
handle_mask=handle_mask)
npt.assert_array_almost_equal(res.data, exp_res.data)
if propag_errors:
npt.assert_array_almost_equal(res.uncertainty,
exp_res.uncertainty)
if handle_mask:
npt.assert_array_equal(res.mask, exp_res.mask)
if inplace:
check.is_true(res is frame1)
else:
check.is_false(res is frame1)
def test_invalid_op():
frame1 = FrameData(np.zeros((10, 10)), unit='')
frame2 = FrameData(np.zeros((10, 10)), unit='')
with pytest.raises(ValueError) as exc:
imarith(frame1, frame2, 'not an op')
check.is_in('not supported', str(exc.value))
def test_invalid_shapes():
frame1 = FrameData(np.zeros((10, 10)), unit='')
frame2 = FrameData(np.zeros((5, 5)), unit='')
with pytest.raises(ValueError):
imarith(frame1, frame2, '+')
| [
"numpy.testing.assert_array_almost_equal",
"numpy.ones",
"pytest.mark.parametrize",
"numpy.zeros",
"pytest_check.is_false",
"pytest.raises",
"pytest_check.is_true",
"numpy.testing.assert_array_equal",
"astropop.image_processing.imarith.imarith"
] | [((363, 1372), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""op,vs"""', "[('+', {'f1': {'v': 30, 'u': None}, 'f2': {'v': 10, 'u': None}, 'r': {'v': \n 40, 'u': None}}), ('+', {'f1': {'v': 30, 'u': 3}, 'f2': {'v': 10, 'u': \n 4}, 'r': {'v': 40, 'u': 5}}), ('-', {'f1': {'v': 30, 'u': None}, 'f2':\n {'v': 10, 'u': None}, 'r': {'v': 20, 'u': None}}), ('-', {'f1': {'v': \n 30, 'u': 3}, 'f2': {'v': 10, 'u': 4}, 'r': {'v': 20, 'u': 5}}), ('*', {\n 'f1': {'v': 5, 'u': None}, 'f2': {'v': 6, 'u': None}, 'r': {'v': 30,\n 'u': None}}), ('*', {'f1': {'v': 5, 'u': 0.3}, 'f2': {'v': 6, 'u': 0.4},\n 'r': {'v': 30, 'u': 2.022375}}), ('/', {'f1': {'v': 10, 'u': None},\n 'f2': {'v': 3, 'u': None}, 'r': {'v': 3.33333333, 'u': None}}), ('/', {\n 'f1': {'v': 10, 'u': 1}, 'f2': {'v': 3, 'u': 0.3}, 'r': {'v': \n 3.33333333, 'u': 0.47140452}}), ('//', {'f1': {'v': 10, 'u': None},\n 'f2': {'v': 3, 'u': None}, 'r': {'v': 3.0, 'u': None}}), ('//', {'f1':\n {'v': 10, 'u': 1}, 'f2': {'v': 3, 'u': 0.3}, 'r': {'v': 3.0, 'u': \n 0.424264}})]"], {}), "('op,vs', [('+', {'f1': {'v': 30, 'u': None}, 'f2':\n {'v': 10, 'u': None}, 'r': {'v': 40, 'u': None}}), ('+', {'f1': {'v': \n 30, 'u': 3}, 'f2': {'v': 10, 'u': 4}, 'r': {'v': 40, 'u': 5}}), ('-', {\n 'f1': {'v': 30, 'u': None}, 'f2': {'v': 10, 'u': None}, 'r': {'v': 20,\n 'u': None}}), ('-', {'f1': {'v': 30, 'u': 3}, 'f2': {'v': 10, 'u': 4},\n 'r': {'v': 20, 'u': 5}}), ('*', {'f1': {'v': 5, 'u': None}, 'f2': {'v':\n 6, 'u': None}, 'r': {'v': 30, 'u': None}}), ('*', {'f1': {'v': 5, 'u': \n 0.3}, 'f2': {'v': 6, 'u': 0.4}, 'r': {'v': 30, 'u': 2.022375}}), ('/',\n {'f1': {'v': 10, 'u': None}, 'f2': {'v': 3, 'u': None}, 'r': {'v': \n 3.33333333, 'u': None}}), ('/', {'f1': {'v': 10, 'u': 1}, 'f2': {'v': 3,\n 'u': 0.3}, 'r': {'v': 3.33333333, 'u': 0.47140452}}), ('//', {'f1': {\n 'v': 10, 'u': None}, 'f2': {'v': 3, 'u': None}, 'r': {'v': 3.0, 'u':\n None}}), ('//', {'f1': {'v': 10, 'u': 1}, 'f2': {'v': 3, 'u': 0.3}, 'r':\n {'v': 3.0, 'u': 0.424264}})])\n", (386, 1372), False, 'import pytest\n'), ((2934, 2987), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""handle_mask"""', '[True, False]'], {}), "('handle_mask', [True, False])\n", (2957, 2987), False, 'import pytest\n'), ((2989, 3038), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inplace"""', '[True, False]'], {}), "('inplace', [True, False])\n", (3012, 3038), False, 'import pytest\n'), ((4046, 4151), 'astropop.image_processing.imarith.imarith', 'imarith', (['frame1', 'frame2', 'op'], {'inplace': 'inplace', 'propagate_errors': 'propag_errors', 'handle_mask': 'handle_mask'}), '(frame1, frame2, op, inplace=inplace, propagate_errors=propag_errors,\n handle_mask=handle_mask)\n', (4053, 4151), False, 'from astropop.image_processing.imarith import imarith\n'), ((4189, 4242), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['res.data', 'exp_res.data'], {}), '(res.data, exp_res.data)\n', (4218, 4242), True, 'import numpy.testing as npt\n'), ((3716, 3734), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (3724, 3734), True, 'import numpy as np\n'), ((3751, 3769), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (3759, 3769), True, 'import numpy as np\n'), ((3837, 3855), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (3845, 3855), True, 'import numpy as np\n'), ((4273, 4340), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['res.uncertainty', 'exp_res.uncertainty'], {}), '(res.uncertainty, exp_res.uncertainty)\n', (4302, 4340), True, 'import numpy.testing as npt\n'), ((4407, 4453), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['res.mask', 'exp_res.mask'], {}), '(res.mask, exp_res.mask)\n', (4429, 4453), True, 'import numpy.testing as npt\n'), ((4479, 4507), 'pytest_check.is_true', 'check.is_true', (['(res is frame1)'], {}), '(res is frame1)\n', (4492, 4507), True, 'import pytest_check as check\n'), ((4526, 4555), 'pytest_check.is_false', 'check.is_false', (['(res is frame1)'], {}), '(res is frame1)\n', (4540, 4555), True, 'import pytest_check as check\n'), ((4604, 4622), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (4612, 4622), True, 'import numpy as np\n'), ((4656, 4674), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (4664, 4674), True, 'import numpy as np\n'), ((4694, 4719), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4707, 4719), False, 'import pytest\n'), ((4736, 4772), 'astropop.image_processing.imarith.imarith', 'imarith', (['frame1', 'frame2', '"""not an op"""'], {}), "(frame1, frame2, 'not an op')\n", (4743, 4772), False, 'from astropop.image_processing.imarith import imarith\n'), ((4878, 4896), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (4886, 4896), True, 'import numpy as np\n'), ((4930, 4946), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (4938, 4946), True, 'import numpy as np\n'), ((4966, 4991), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4979, 4991), False, 'import pytest\n'), ((5001, 5029), 'astropop.image_processing.imarith.imarith', 'imarith', (['frame1', 'frame2', '"""+"""'], {}), "(frame1, frame2, '+')\n", (5008, 5029), False, 'from astropop.image_processing.imarith import imarith\n'), ((3310, 3336), 'numpy.ones', 'np.ones', (['shape'], {'dtype': '"""f8"""'}), "(shape, dtype='f8')\n", (3317, 3336), True, 'import numpy as np\n'), ((3394, 3420), 'numpy.ones', 'np.ones', (['shape'], {'dtype': '"""f8"""'}), "(shape, dtype='f8')\n", (3401, 3420), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import os
import sys
import warnings
warnings.filterwarnings("ignore")
import argparse
import h5py
import pandas as pd
import numpy as np
def decompress_seq(x, length=16, bits=64):
nucleotides = np.array(list(b"ACGT"))
x = np.uint64(x)
assert length <= (bits / 2 - 1)
if x & (1 << (bits - 1)):
return "N" * length
result = bytearray(length)
for i in range(length):
result[(length - 1) - i] = nucleotides[x & np.uint64(0b11)]
x = x >> np.uint64(2)
return result.decode("ascii")
def compute_legacy(mole_file, filt_mat_file):
with h5py.File(mole_file) as f:
print(list(f))
genome = f["/genome_ids"][0].decode("ascii")
gem_group = np.array(f["/gem_group"]).astype(str)
barcodes = np.array(f["/barcode"]).astype(str)
reads = np.array(f["/reads"]) # .astype(np.int64)
umis = reads.astype(bool).astype(np.int8)
gems = np.char.add("-", gem_group)
bcs = np.char.add(np.array([decompress_seq(bc) for bc in barcodes]), gems)
uniq_bcs, uniq_coords, inverse_coords = np.unique(
bcs, return_index=True, return_inverse=True
)
rcs = np.bincount(inverse_coords, reads)
ucs = np.bincount(inverse_coords, umis)
seqsat = pd.DataFrame({"umis": ucs, "reads": rcs}, index=bcs[uniq_coords])
seqsat["saturation"] = (seqsat.reads - seqsat.umis) * 100 / seqsat.reads
with h5py.File(filt_mat_file) as fin:
filt_bcs = pd.Index(fin[f"/{genome}/barcodes"]).astype(str)
return seqsat.reindex(filt_bcs)
def compute_modern(mole_file, filt_mat_file, *args):
with h5py.File(mole_file) as f:
gem_group = np.array(f["/gem_group"])
barcode_idx = np.array(f["/barcode_idx"]).astype(np.int64)
barcodes = np.array(f["/barcodes"]).astype(str)
reads = np.array(f["/count"]) # .astype(np.int64)
umis = reads.astype(bool).astype(np.int8)
uniq_idxs, uniq_coords = np.unique(barcode_idx, return_index=True)
gems = np.char.add("-", gem_group[uniq_coords].astype(str))
bcs = np.char.add(barcodes[uniq_idxs], gems)
rcs = np.bincount(barcode_idx, reads)[uniq_idxs]
ucs = np.bincount(barcode_idx, umis)[uniq_idxs]
seqsat = pd.DataFrame({"umis": ucs, "reads": rcs}, index=bcs)
seqsat["saturation"] = (seqsat.reads - seqsat.umis) * 100 / seqsat.reads
with h5py.File(filt_mat_file) as fin:
filt_bcs = pd.Index(fin["/matrix/barcodes"]).astype(str)
return seqsat.reindex(filt_bcs)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("molecule_info_path", help="Path to 'molecule_info.h5' file")
parser.add_argument(
"filtered_matrix_path", help="Path to 'filtered_*_bc_matrix*.h5' file"
)
parser.add_argument("software", help="10X software version")
parser.add_argument("software_version", help="10X software version")
parser.add_argument(
"--outfile",
default="sequencing_saturation.csv",
help="Output csv file for saturation metrics.",
)
return parser.parse_args()
def main():
args = parse_args()
if (args.software == "cellranger") and (args.software_version[0] in "12"):
seqsat = compute_legacy(
args.molecule_info_path, args.filtered_matrix_path
)
else:
seqsat = compute_modern(args.molecule_info_path, args.filtered_matrix_path)
seqsat.to_csv(args.outfile)
if __name__ == "__main__":
main()
| [
"numpy.unique",
"argparse.ArgumentParser",
"h5py.File",
"pandas.Index",
"numpy.array",
"numpy.uint64",
"numpy.char.add",
"pandas.DataFrame",
"numpy.bincount",
"warnings.filterwarnings"
] | [((61, 94), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (84, 94), False, 'import warnings\n'), ((260, 272), 'numpy.uint64', 'np.uint64', (['x'], {}), '(x)\n', (269, 272), True, 'import numpy as np\n'), ((2614, 2639), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2637, 2639), False, 'import argparse\n'), ((615, 635), 'h5py.File', 'h5py.File', (['mole_file'], {}), '(mole_file)\n', (624, 635), False, 'import h5py\n'), ((848, 869), 'numpy.array', 'np.array', (["f['/reads']"], {}), "(f['/reads'])\n", (856, 869), True, 'import numpy as np\n'), ((957, 984), 'numpy.char.add', 'np.char.add', (['"""-"""', 'gem_group'], {}), "('-', gem_group)\n", (968, 984), True, 'import numpy as np\n'), ((1116, 1170), 'numpy.unique', 'np.unique', (['bcs'], {'return_index': '(True)', 'return_inverse': '(True)'}), '(bcs, return_index=True, return_inverse=True)\n', (1125, 1170), True, 'import numpy as np\n'), ((1207, 1241), 'numpy.bincount', 'np.bincount', (['inverse_coords', 'reads'], {}), '(inverse_coords, reads)\n', (1218, 1241), True, 'import numpy as np\n'), ((1256, 1289), 'numpy.bincount', 'np.bincount', (['inverse_coords', 'umis'], {}), '(inverse_coords, umis)\n', (1267, 1289), True, 'import numpy as np\n'), ((1308, 1373), 'pandas.DataFrame', 'pd.DataFrame', (["{'umis': ucs, 'reads': rcs}"], {'index': 'bcs[uniq_coords]'}), "({'umis': ucs, 'reads': rcs}, index=bcs[uniq_coords])\n", (1320, 1373), True, 'import pandas as pd\n'), ((1465, 1489), 'h5py.File', 'h5py.File', (['filt_mat_file'], {}), '(filt_mat_file)\n', (1474, 1489), False, 'import h5py\n'), ((1667, 1687), 'h5py.File', 'h5py.File', (['mole_file'], {}), '(mole_file)\n', (1676, 1687), False, 'import h5py\n'), ((1715, 1740), 'numpy.array', 'np.array', (["f['/gem_group']"], {}), "(f['/gem_group'])\n", (1723, 1740), True, 'import numpy as np\n'), ((1880, 1901), 'numpy.array', 'np.array', (["f['/count']"], {}), "(f['/count'])\n", (1888, 1901), True, 'import numpy as np\n'), ((2007, 2048), 'numpy.unique', 'np.unique', (['barcode_idx'], {'return_index': '(True)'}), '(barcode_idx, return_index=True)\n', (2016, 2048), True, 'import numpy as np\n'), ((2131, 2169), 'numpy.char.add', 'np.char.add', (['barcodes[uniq_idxs]', 'gems'], {}), '(barcodes[uniq_idxs], gems)\n', (2142, 2169), True, 'import numpy as np\n'), ((2302, 2354), 'pandas.DataFrame', 'pd.DataFrame', (["{'umis': ucs, 'reads': rcs}"], {'index': 'bcs'}), "({'umis': ucs, 'reads': rcs}, index=bcs)\n", (2314, 2354), True, 'import pandas as pd\n'), ((2446, 2470), 'h5py.File', 'h5py.File', (['filt_mat_file'], {}), '(filt_mat_file)\n', (2455, 2470), False, 'import h5py\n'), ((511, 523), 'numpy.uint64', 'np.uint64', (['(2)'], {}), '(2)\n', (520, 523), True, 'import numpy as np\n'), ((2185, 2216), 'numpy.bincount', 'np.bincount', (['barcode_idx', 'reads'], {}), '(barcode_idx, reads)\n', (2196, 2216), True, 'import numpy as np\n'), ((2242, 2272), 'numpy.bincount', 'np.bincount', (['barcode_idx', 'umis'], {}), '(barcode_idx, umis)\n', (2253, 2272), True, 'import numpy as np\n'), ((477, 489), 'numpy.uint64', 'np.uint64', (['(3)'], {}), '(3)\n', (486, 489), True, 'import numpy as np\n'), ((739, 764), 'numpy.array', 'np.array', (["f['/gem_group']"], {}), "(f['/gem_group'])\n", (747, 764), True, 'import numpy as np\n'), ((796, 819), 'numpy.array', 'np.array', (["f['/barcode']"], {}), "(f['/barcode'])\n", (804, 819), True, 'import numpy as np\n'), ((1517, 1553), 'pandas.Index', 'pd.Index', (["fin[f'/{genome}/barcodes']"], {}), "(fin[f'/{genome}/barcodes'])\n", (1525, 1553), True, 'import pandas as pd\n'), ((1763, 1790), 'numpy.array', 'np.array', (["f['/barcode_idx']"], {}), "(f['/barcode_idx'])\n", (1771, 1790), True, 'import numpy as np\n'), ((1827, 1851), 'numpy.array', 'np.array', (["f['/barcodes']"], {}), "(f['/barcodes'])\n", (1835, 1851), True, 'import numpy as np\n'), ((2498, 2531), 'pandas.Index', 'pd.Index', (["fin['/matrix/barcodes']"], {}), "(fin['/matrix/barcodes'])\n", (2506, 2531), True, 'import pandas as pd\n')] |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import json
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras_preprocessing.text import tokenizer_from_json
import nltk
import re
from nltk.corpus import stopwords
nltk.download('stopwords')
from flask import Flask, request, render_template
app = Flask(__name__, template_folder='templates', static_folder='statics')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/predict', methods=['POST', 'GET'])
def predict():
if request.method == 'POST':
news = request.form.get('link')
prediction = create_test_input(news)
return prediction
def create_test_input(message):
temp = ("".join(message.split(' ')))
if len(message.split(' ')) < 4 and temp.isalnum() and not temp.isdigit():
return "Enter a Headline with more words"
elif temp.isnumeric():
return "Invalid headline"
model = load_model('models/latest_model.h5')
max_length = 31
f = open('tokenizer/latest_tokenizer.json')
data = json.load(f)
tokenizer = tokenizer_from_json(data)
f.close()
corpus = []
review = re.sub('[^a-zA-Z]', ' ', message)
review = review.split()
# review = [word for word in review]
review = [word for word in review if not word in stopwords.words('english')]
corpus.append(review)
sequences = tokenizer.texts_to_sequences(corpus)
if len(sequences[0]) < 3:
return "Enter a Headline with more words"
data = pad_sequences(sequences, maxlen=max_length)
X_final = np.array(data)
prediction = (model.predict(X_final) > 0.7).astype("int32")
if prediction[0][0] == 1:
return "Genuine news"
else:
return "Fake news"
if __name__ == '__main__':
app.run(debug=False)
| [
"flask.render_template",
"nltk.corpus.stopwords.words",
"nltk.download",
"flask.Flask",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"keras_preprocessing.text.tokenizer_from_json",
"flask.request.form.get",
"numpy.array",
"tensorflow.keras.models.load_model",
"json.load",
"re.sub"
] | [((320, 346), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (333, 346), False, 'import nltk\n'), ((411, 480), 'flask.Flask', 'Flask', (['__name__'], {'template_folder': '"""templates"""', 'static_folder': '"""statics"""'}), "(__name__, template_folder='templates', static_folder='statics')\n", (416, 480), False, 'from flask import Flask, request, render_template\n'), ((526, 555), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (541, 555), False, 'from flask import Flask, request, render_template\n'), ((1058, 1094), 'tensorflow.keras.models.load_model', 'load_model', (['"""models/latest_model.h5"""'], {}), "('models/latest_model.h5')\n", (1068, 1094), False, 'from tensorflow.keras.models import load_model\n'), ((1179, 1191), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1188, 1191), False, 'import json\n'), ((1209, 1234), 'keras_preprocessing.text.tokenizer_from_json', 'tokenizer_from_json', (['data'], {}), '(data)\n', (1228, 1234), False, 'from keras_preprocessing.text import tokenizer_from_json\n'), ((1281, 1314), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'message'], {}), "('[^a-zA-Z]', ' ', message)\n", (1287, 1314), False, 'import re\n'), ((1656, 1699), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['sequences'], {'maxlen': 'max_length'}), '(sequences, maxlen=max_length)\n', (1669, 1699), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((1715, 1729), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1723, 1729), True, 'import numpy as np\n'), ((673, 697), 'flask.request.form.get', 'request.form.get', (['"""link"""'], {}), "('link')\n", (689, 697), False, 'from flask import Flask, request, render_template\n'), ((1445, 1471), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1460, 1471), False, 'from nltk.corpus import stopwords\n')] |
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import acl
import os
import cv2
import numpy as np
import sys
import time
from model_processors.BaseProcessor import BaseProcessor
sys.path.append("../../../../src/lib")
from atlas_utils.resource_list import resource_list
class ModelProcessor(BaseProcessor):
def __init__(self, params):
super().__init__(params)
# parameters for preprocessing
self.ih, self.iw = (params['camera_height'], params['camera_width'])
self.h, self.w = params['model_height'], params['model_width']
self.scale = min(self.w / self.iw, self.h / self.ih)
self.nw = int(self.iw * self.scale)
self.nh = int(self.ih * self.scale)
# parameters for postprocessing
self.image_shape = [params['camera_height'], params['camera_width']]
self.model_shape = [self.h, self.w]
self.num_classes = 1
self.anchors = self.get_anchors()
def release_acl(self):
print("acl resource release all resource")
resource_list.destroy()
if self._acl_resource.stream:
print("acl resource release stream")
acl.rt.destroy_stream(self._acl_resource.stream)
if self._acl_resource.context:
print("acl resource release context")
acl.rt.destroy_context(self._acl_resource.context)
print("Reset acl device ", self._acl_resource.device_id)
acl.rt.reset_device(self._acl_resource.device_id)
acl.finalize()
print("Release acl resource success")
def predict(self, frame):
preprocessed = self.preprocess(frame)
outputs = self.model.execute([preprocessed])
postprocess_start = time.process_time()
result = self.postprocess(frame, outputs)
print(f"@predict.postprocess = {time.process_time() - postprocess_start}")
return result
def preprocess(self, frame):
"""preprocess frame from drone"""
# preprocessing: resize and paste input image to a new image with size 416*416
img = np.array(frame, dtype='float32')
img_resize = cv2.resize(img, (self.nw, self.nh), interpolation=cv2.INTER_CUBIC)
img_new = np.ones((416, 416, 3), np.float32) * 128
img_new[(self.h - self.nh) // 2: ((self.h - self.nh) // 2 + self.nh),
(self.w - self.nw) // 2: (self.w - self.nw) // 2 + self.nw, :] = img_resize[:, :, :]
img_new = img_new / 255.
return img_new
def postprocess(self, frame, outputs):
yolo_eval_start = time.process_time()
box_axis, box_score = yolo_eval(
outputs, self.anchors, self.num_classes, self.image_shape)
yolo_eval_end = time.process_time() - yolo_eval_start
nparryList, boxList = get_box_img(frame, box_axis)
if len(nparryList) > 0:
for box in boxList:
cv2.rectangle(frame, (box[0], box[2]), (box[1], box[3]), (255, 0, 0), 4)
print(f"\n####################################################################")
print(f"@postprocess.yolo_eval process duration = {round(yolo_eval_end, 3)}")
# print(f"@postprocess:getbox process duration = {round(getbox_end, 3)}")
# print(f"@postprocess:forloop process duration = {round(forloop_end, 3)}")
return frame, yolo_eval_end
def get_anchors(self):
"""return anchors
Returns:
[ndarray]: anchors array
"""
anchors = np.array([[10.,13.], [16.,30.], [33.,23.], [30.,61.], [62.,45.], [59.,119.], [116.,90.], [156.,198.], [373.,326.]])
return anchors
def sigmoid(x):
"""sigmoid"""
x = x.astype("float32")
return 1 / (1 + np.exp(-x))
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
"""Convert final layer features to bounding box parameters."""
feats = feats.astype("float32")
num_anchors = len(anchors) # 3
anchors_tensor = np.reshape(anchors, [1, 1, 1, num_anchors, 2])
grid_shape = np.shape(feats)[1:3]
grid_y = np.tile(np.reshape(
range(0, grid_shape[0]), [-1, 1, 1, 1]), [1, grid_shape[1], 1, 1])
grid_x = np.tile(np.reshape(
range(0, grid_shape[1]), [1, -1, 1, 1]), [grid_shape[0], 1, 1, 1])
grid = np.concatenate([grid_x, grid_y], axis=-1)
grid = grid.astype("float32")
feats = np.reshape(
feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
box_xy = (sigmoid(feats[..., :2]) + grid) / \
np.cast[feats.dtype](grid_shape[::-1])
box_wh = np.exp(feats[..., 2:4]) * anchors_tensor / \
np.cast[feats.dtype](input_shape[::-1])
box_confidence = sigmoid(feats[..., 4:5])
box_class_probs = sigmoid(feats[..., 5:])
box_wh = box_wh.astype("float32")
ret = {
"box_xy": box_xy,
"box_wh": box_wh,
}
if calc_loss == True:
ret["grid"] = grid
ret["feats"] = feats
else:
ret["box_confidence"] = box_confidence
ret["box_class_probs"] = box_class_probs
return ret
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
"""Get corrected boxes"""
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
input_shape = np.cast[box_yx.dtype](input_shape)
image_shape = np.cast[box_yx.dtype](image_shape)
new_shape = np.round(image_shape * np.min(input_shape / image_shape))
offset = (input_shape - new_shape) / 2. / input_shape
scale = input_shape / new_shape
box_yx = (box_yx - offset) * scale
box_hw *= scale
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = np.concatenate([
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
], axis=-1)
# Scale boxes back to original image shape.
boxes *= np.concatenate([image_shape, image_shape], axis=-1)
return boxes
def yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):
"""Process Conv layer output"""
heads = yolo_head(feats, anchors, num_classes, input_shape)
boxes = yolo_correct_boxes(heads['box_xy'], heads['box_wh'], input_shape, image_shape)
boxes = np.reshape(boxes, [-1, 4])
box_scores = heads['box_confidence'] * heads['box_class_probs']
box_scores = np.reshape(box_scores, [-1, num_classes])
return (boxes, box_scores)
def nms(bounding_boxes, confidence_score, threshold):
"""non maximum suppression for filter boxes"""
print(f"\n@predict.postprocess.yolo_eval.nms analysis")
# If no bounding boxes, return empty list
if len(bounding_boxes) == 0:
print("\t@nms: returns empty list")
return [], []
def_var_start = time.process_time()
# Bounding boxes
boxes = np.array(bounding_boxes)
# coordinates of bounding boxes
start_x = boxes[:, 0]
start_y = boxes[:, 1]
end_x = boxes[:, 2]
end_y = boxes[:, 3]
# Confidence scores of bounding boxes
score = np.array(confidence_score)
# Compute areas of bounding boxes
areas = (end_x - start_x + 1) * (end_y - start_y + 1)
# Sort by confidence score of bounding boxes
order = np.argsort(score)[::-1]
keep = []
def_var_end = time.process_time() - def_var_start
print(f"\t@nms: define variables (bbox, area, etc duration: {def_var_end}")
while_loop_start = time.process_time()
# Iterate bounding boxes
while order.size > 0:
# The index of largest confidence score
index = order[0]
keep.append(index)
# Compute ordinates of intersection-over-union(IOU)
x1 = np.maximum(start_x[index], start_x[order[:-1]])
x2 = np.minimum(end_x[index], end_x[order[:-1]])
y1 = np.maximum(start_y[index], start_y[order[:-1]])
y2 = np.minimum(end_y[index], end_y[order[:-1]])
# Compute areas of intersection-over-union
w = np.maximum(0.0, x2 - x1 + 1)
h = np.maximum(0.0, y2 - y1 + 1)
intersection = w * h
# Compute the ratio between intersection and union
ratio = intersection / (areas[index] + areas[order[1:]] - intersection)
inds = np.where(ratio <= threshold)[0]
order = order[inds + 1]
while_loop_end = time.process_time() - while_loop_start
print(f"\t@nms: whileloop bbox iteration: {while_loop_end}")
picked_boxes = [bounding_boxes[i] for i in keep]
if not score.shape:
picked_score = [score]
else:
picked_score = [score[i] for i in keep]
return picked_boxes, picked_score
def yolo_eval(yolo_outputs, anchors, num_classes, image_shape, score_threshold=.5, iou_threshold=.45):
"""
Obtain predicted boxes axis and corresponding scores
Args:
yolo_outputs: output (3 feature maps) of YOLO V3 model, sizes are 1*13*13*18; 1*26*26*18; 1*52*52*18 seperately
anchors: anchors pre-calculated
num_classes: only 1 class here, which is "head"
image_shape: original image input
Returns:
predicted boxes axis and corresponding scores
"""
print("\n@postprocess:yolo_eval analysis:")
num_layers = len(yolo_outputs)
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
yolo_output_0 = yolo_outputs[0]
input_shape = [yolo_output_0.shape[1] * 32, yolo_output_0.shape[2] * 32]
input_shape = np.array(input_shape)
boxes = []
box_scores = []
# forloop start
num_layer_start = time.process_time()
for l in range(num_layers):
_boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
anchors[anchor_mask[l]], num_classes, input_shape, image_shape)
boxes.append(_boxes)
box_scores.append(_box_scores)
num_layer_end = time.process_time() - num_layer_start
print(f"\t@yolo_eval:forloop process duration: {num_layer_end}")
boxes = np.concatenate(boxes, axis=0)
box_scores = np.concatenate(box_scores, axis=0)
mask = box_scores >= score_threshold
class_boxes = boxes[np.nonzero(box_scores * mask)[0], :]
class_box_scores = box_scores[np.nonzero(box_scores * mask)[0], :]
class_box_scores = np.squeeze(class_box_scores)
# nms
nms_start = time.process_time()
box, score = nms(class_boxes, class_box_scores, iou_threshold)
nms_end = time.process_time() - num_layer_start
print(f"\t@yolo_eval:nms process duration: {nms_end}")
return box, score
def get_box_img(image, box_axis):
"""
Pack detected head area and corresponding location in the source image for WHENet
Args:
image: source image read from camera
box_axis: location of boxes detected in YOLOV3
box_score: scores of boxes detected in YOLOV3
Returns:
nparryList: head area
boxList: location in the source image
"""
nparryList = []
boxList = []
for i in range(len(box_axis)):
top, left, bottom, right = box_axis[i]
top_modified = top - abs(top - bottom) / 10
bottom_modified = bottom + abs(top - bottom) / 10
left_modified = left - abs(left - right) / 5
right_modified = right + abs(left - right) / 5
top_modified = max(0, np.round(top_modified).astype('int32'))
left_modified = max(0, np.round(left_modified).astype('int32'))
bottom_modified = min(image.shape[0], np.round(
bottom_modified).astype('int32'))
right_modified = min(image.shape[1], np.round(
right_modified).astype('int32'))
boxList.append([left_modified, right_modified,
top_modified, bottom_modified])
nparryList.append(
image[top_modified:bottom_modified, left_modified:right_modified])
return nparryList, boxList
| [
"cv2.rectangle",
"numpy.argsort",
"numpy.array",
"time.process_time",
"sys.path.append",
"numpy.reshape",
"numpy.where",
"acl.rt.destroy_context",
"numpy.exp",
"acl.rt.destroy_stream",
"numpy.concatenate",
"numpy.min",
"numpy.maximum",
"numpy.round",
"numpy.ones",
"atlas_utils.resource... | [((664, 702), 'sys.path.append', 'sys.path.append', (['"""../../../../src/lib"""'], {}), "('../../../../src/lib')\n", (679, 702), False, 'import sys\n'), ((4479, 4525), 'numpy.reshape', 'np.reshape', (['anchors', '[1, 1, 1, num_anchors, 2]'], {}), '(anchors, [1, 1, 1, num_anchors, 2])\n', (4489, 4525), True, 'import numpy as np\n'), ((4794, 4835), 'numpy.concatenate', 'np.concatenate', (['[grid_x, grid_y]'], {'axis': '(-1)'}), '([grid_x, grid_y], axis=-1)\n', (4808, 4835), True, 'import numpy as np\n'), ((4883, 4971), 'numpy.reshape', 'np.reshape', (['feats', '[-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]'], {}), '(feats, [-1, grid_shape[0], grid_shape[1], num_anchors, \n num_classes + 5])\n', (4893, 4971), True, 'import numpy as np\n'), ((6172, 6283), 'numpy.concatenate', 'np.concatenate', (['[box_mins[..., 0:1], box_mins[..., 1:2], box_maxes[..., 0:1], box_maxes[...,\n 1:2]]'], {'axis': '(-1)'}), '([box_mins[..., 0:1], box_mins[..., 1:2], box_maxes[..., 0:1],\n box_maxes[..., 1:2]], axis=-1)\n', (6186, 6283), True, 'import numpy as np\n'), ((6416, 6467), 'numpy.concatenate', 'np.concatenate', (['[image_shape, image_shape]'], {'axis': '(-1)'}), '([image_shape, image_shape], axis=-1)\n', (6430, 6467), True, 'import numpy as np\n'), ((6773, 6799), 'numpy.reshape', 'np.reshape', (['boxes', '[-1, 4]'], {}), '(boxes, [-1, 4])\n', (6783, 6799), True, 'import numpy as np\n'), ((6885, 6926), 'numpy.reshape', 'np.reshape', (['box_scores', '[-1, num_classes]'], {}), '(box_scores, [-1, num_classes])\n', (6895, 6926), True, 'import numpy as np\n'), ((7293, 7312), 'time.process_time', 'time.process_time', ([], {}), '()\n', (7310, 7312), False, 'import time\n'), ((7351, 7375), 'numpy.array', 'np.array', (['bounding_boxes'], {}), '(bounding_boxes)\n', (7359, 7375), True, 'import numpy as np\n'), ((7568, 7594), 'numpy.array', 'np.array', (['confidence_score'], {}), '(confidence_score)\n', (7576, 7594), True, 'import numpy as np\n'), ((7951, 7970), 'time.process_time', 'time.process_time', ([], {}), '()\n', (7968, 7970), False, 'import time\n'), ((9919, 9940), 'numpy.array', 'np.array', (['input_shape'], {}), '(input_shape)\n', (9927, 9940), True, 'import numpy as np\n'), ((10019, 10038), 'time.process_time', 'time.process_time', ([], {}), '()\n', (10036, 10038), False, 'import time\n'), ((10464, 10493), 'numpy.concatenate', 'np.concatenate', (['boxes'], {'axis': '(0)'}), '(boxes, axis=0)\n', (10478, 10493), True, 'import numpy as np\n'), ((10511, 10545), 'numpy.concatenate', 'np.concatenate', (['box_scores'], {'axis': '(0)'}), '(box_scores, axis=0)\n', (10525, 10545), True, 'import numpy as np\n'), ((10742, 10770), 'numpy.squeeze', 'np.squeeze', (['class_box_scores'], {}), '(class_box_scores)\n', (10752, 10770), True, 'import numpy as np\n'), ((10798, 10817), 'time.process_time', 'time.process_time', ([], {}), '()\n', (10815, 10817), False, 'import time\n'), ((1532, 1555), 'atlas_utils.resource_list.resource_list.destroy', 'resource_list.destroy', ([], {}), '()\n', (1553, 1555), False, 'from atlas_utils.resource_list import resource_list\n'), ((1931, 1980), 'acl.rt.reset_device', 'acl.rt.reset_device', (['self._acl_resource.device_id'], {}), '(self._acl_resource.device_id)\n', (1950, 1980), False, 'import acl\n'), ((1989, 2003), 'acl.finalize', 'acl.finalize', ([], {}), '()\n', (2001, 2003), False, 'import acl\n'), ((2217, 2236), 'time.process_time', 'time.process_time', ([], {}), '()\n', (2234, 2236), False, 'import time\n'), ((2569, 2601), 'numpy.array', 'np.array', (['frame'], {'dtype': '"""float32"""'}), "(frame, dtype='float32')\n", (2577, 2601), True, 'import numpy as np\n'), ((2623, 2689), 'cv2.resize', 'cv2.resize', (['img', '(self.nw, self.nh)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (self.nw, self.nh), interpolation=cv2.INTER_CUBIC)\n', (2633, 2689), False, 'import cv2\n'), ((3062, 3081), 'time.process_time', 'time.process_time', ([], {}), '()\n', (3079, 3081), False, 'import time\n'), ((3993, 4140), 'numpy.array', 'np.array', (['[[10.0, 13.0], [16.0, 30.0], [33.0, 23.0], [30.0, 61.0], [62.0, 45.0], [\n 59.0, 119.0], [116.0, 90.0], [156.0, 198.0], [373.0, 326.0]]'], {}), '([[10.0, 13.0], [16.0, 30.0], [33.0, 23.0], [30.0, 61.0], [62.0, \n 45.0], [59.0, 119.0], [116.0, 90.0], [156.0, 198.0], [373.0, 326.0]])\n', (4001, 4140), True, 'import numpy as np\n'), ((4544, 4559), 'numpy.shape', 'np.shape', (['feats'], {}), '(feats)\n', (4552, 4559), True, 'import numpy as np\n'), ((7754, 7771), 'numpy.argsort', 'np.argsort', (['score'], {}), '(score)\n', (7764, 7771), True, 'import numpy as np\n'), ((7811, 7830), 'time.process_time', 'time.process_time', ([], {}), '()\n', (7828, 7830), False, 'import time\n'), ((8200, 8247), 'numpy.maximum', 'np.maximum', (['start_x[index]', 'start_x[order[:-1]]'], {}), '(start_x[index], start_x[order[:-1]])\n', (8210, 8247), True, 'import numpy as np\n'), ((8261, 8304), 'numpy.minimum', 'np.minimum', (['end_x[index]', 'end_x[order[:-1]]'], {}), '(end_x[index], end_x[order[:-1]])\n', (8271, 8304), True, 'import numpy as np\n'), ((8318, 8365), 'numpy.maximum', 'np.maximum', (['start_y[index]', 'start_y[order[:-1]]'], {}), '(start_y[index], start_y[order[:-1]])\n', (8328, 8365), True, 'import numpy as np\n'), ((8379, 8422), 'numpy.minimum', 'np.minimum', (['end_y[index]', 'end_y[order[:-1]]'], {}), '(end_y[index], end_y[order[:-1]])\n', (8389, 8422), True, 'import numpy as np\n'), ((8487, 8515), 'numpy.maximum', 'np.maximum', (['(0.0)', '(x2 - x1 + 1)'], {}), '(0.0, x2 - x1 + 1)\n', (8497, 8515), True, 'import numpy as np\n'), ((8528, 8556), 'numpy.maximum', 'np.maximum', (['(0.0)', '(y2 - y1 + 1)'], {}), '(0.0, y2 - y1 + 1)\n', (8538, 8556), True, 'import numpy as np\n'), ((8827, 8846), 'time.process_time', 'time.process_time', ([], {}), '()\n', (8844, 8846), False, 'import time\n'), ((10344, 10363), 'time.process_time', 'time.process_time', ([], {}), '()\n', (10361, 10363), False, 'import time\n'), ((10899, 10918), 'time.process_time', 'time.process_time', ([], {}), '()\n', (10916, 10918), False, 'import time\n'), ((1655, 1703), 'acl.rt.destroy_stream', 'acl.rt.destroy_stream', (['self._acl_resource.stream'], {}), '(self._acl_resource.stream)\n', (1676, 1703), False, 'import acl\n'), ((1806, 1856), 'acl.rt.destroy_context', 'acl.rt.destroy_context', (['self._acl_resource.context'], {}), '(self._acl_resource.context)\n', (1828, 1856), False, 'import acl\n'), ((2708, 2742), 'numpy.ones', 'np.ones', (['(416, 416, 3)', 'np.float32'], {}), '((416, 416, 3), np.float32)\n', (2715, 2742), True, 'import numpy as np\n'), ((3218, 3237), 'time.process_time', 'time.process_time', ([], {}), '()\n', (3235, 3237), False, 'import time\n'), ((4230, 4240), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (4236, 4240), True, 'import numpy as np\n'), ((5086, 5109), 'numpy.exp', 'np.exp', (['feats[..., 2:4]'], {}), '(feats[..., 2:4])\n', (5092, 5109), True, 'import numpy as np\n'), ((5891, 5924), 'numpy.min', 'np.min', (['(input_shape / image_shape)'], {}), '(input_shape / image_shape)\n', (5897, 5924), True, 'import numpy as np\n'), ((8741, 8769), 'numpy.where', 'np.where', (['(ratio <= threshold)'], {}), '(ratio <= threshold)\n', (8749, 8769), True, 'import numpy as np\n'), ((3396, 3468), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(box[0], box[2])', '(box[1], box[3])', '(255, 0, 0)', '(4)'], {}), '(frame, (box[0], box[2]), (box[1], box[3]), (255, 0, 0), 4)\n', (3409, 3468), False, 'import cv2\n'), ((10611, 10640), 'numpy.nonzero', 'np.nonzero', (['(box_scores * mask)'], {}), '(box_scores * mask)\n', (10621, 10640), True, 'import numpy as np\n'), ((10682, 10711), 'numpy.nonzero', 'np.nonzero', (['(box_scores * mask)'], {}), '(box_scores * mask)\n', (10692, 10711), True, 'import numpy as np\n'), ((11784, 11806), 'numpy.round', 'np.round', (['top_modified'], {}), '(top_modified)\n', (11792, 11806), True, 'import numpy as np\n'), ((11855, 11878), 'numpy.round', 'np.round', (['left_modified'], {}), '(left_modified)\n', (11863, 11878), True, 'import numpy as np\n'), ((11942, 11967), 'numpy.round', 'np.round', (['bottom_modified'], {}), '(bottom_modified)\n', (11950, 11967), True, 'import numpy as np\n'), ((12043, 12067), 'numpy.round', 'np.round', (['right_modified'], {}), '(right_modified)\n', (12051, 12067), True, 'import numpy as np\n'), ((2327, 2346), 'time.process_time', 'time.process_time', ([], {}), '()\n', (2344, 2346), False, 'import time\n')] |
# Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
import pickle
import numpy as np
import toast
import toast.tod as tt
from toast_planck import shdet
from toast_planck.preproc_modules import Transf1, GainCorrector
from toast_planck.utilities import bolo_to_pnt, read_gains
from toast_planck.imo import IMO
from toast_planck.shdet import SHDet
class OpSimSHDET(toast.Operator):
"""
Operator which takes parameters and a timestream of optical power and
pass this information into SHDET to simulate detector response.
Args:
params (dicts): SHDET parameter dictionary.
optical (str): if None, read TOD otherwise the cache name to use to
get the optical power in Watts.
out (str): if None, write the output to the TOD, otherwise the cache
name to use.
"""
def __init__(self, dets = None, imofile=None, adc_table=None, params=None,
margin=0, calfile=None, tffile=None, read_no_signal=False,
offset_file=None):
self._imo = IMO(imofile)
self._margin = margin
self._calfile = calfile
self._read_no_signal = read_no_signal
# these are dictionaries
self._adc_table = adc_table
self._tffile = tffile
self._params = params
self._offset_file = offset_file
self._shdet = {}
self._n = {}
self._nadc = {}
self._nparam = {}
self._sentinel = {}
self._nparam2 = {}
if self._offset_file is not None:
self._offsets = {}
else:
self._offsets = None
self._base_seed = {}
for det in dets:
self._shdet[det] = SHDet(parameters = params[det])
self._n[det] = self._shdet[det].get_n()
self._nadc[det] = self._shdet[det].get_nadc()
self._nparam[det] = self._shdet[det].get_nparam()
self._sentinel[det] = self._shdet[det].get_sentinel()
self._nparam2[det] = self._shdet[det].get_nparam2()
if 'seed' in params[det].keys():
self._base_seed[det] = params[det]['seed']
else:
self._base_seed[det] = None
if self._offset_file is not None:
self._offsets[det] = pickle.load(open(
self._offset_file.replace('DETECTOR', det), 'rb'))
# The margin is required in preproc. SHDET outputs should have the
# margin allocated and filled to be able to replace reading flight
# data off disk.
# placeholders for the SHDet transfer function
self._tf_freq = {}
self._TF = {}
super().__init__()
def get_TF(self):
return self._tf_freq, self._TF
def exec(self, data):
# the two-level pytoast communicator
comm = data.comm
# the global communicator
cworld = comm.comm_world
# Measure SHDet transfer function to set up tau deconvolver
for det in self._shdet.keys():
if self._tffile is None:
tf_freq, tf_real, tf_imag \
= self._shdet[det].measure_transfer_function(comm=comm)
TF = tf_real + 1j * tf_imag
else:
# load TF from file
input_tf = np.genfromtxt(self._tffile[det]).T
tf_freq = input_tf[0]
TF = input_tf[1] + 1j * input_tf[2]
# remove the mean
TF /= np.mean(TF[:5])
# store the transfer function until later
self._tf_freq[det] = tf_freq
self._TF[det] = TF
for obs in data.obs:
tod = obs['tod']
nsamp = tod.local_samples[1]
intervals = tod.local_intervals(obs['intervals'])
local_starts = [ival.first for ival in intervals]
local_stops = [ival.last+1 for ival in intervals]
ring_offset = tod.globalfirst_ring
for interval in obs['intervals']:
if interval.last < tod.local_samples[0]:
ring_offset += 1
timestamps = tod.local_timestamps(margin=self._margin)
for det in tod.local_dets:
#print(det) # DEBUG
bolo_id = bolo_to_pnt(det)
bc = np.int(bolo_id[:2]) # belt code
transf1 = Transf1()
gaincorrector = GainCorrector(self._imo, bolo_id, linear=True)
if self._calfile is not None:
gains = read_gains(self._calfile, det, timestamps[0],
timestamps[-1], cworld)
else:
gains = None
# get the optical power in Kelvin
signal = tod.local_signal(det)
if len(signal) != nsamp + 2*self._margin:
raise Exception('Cached signal does not include margins.')
# DEBUG: filter the input signal to remove noise
#from scipy.signal import fftconvolve
#kernel = np.ones(101)/101.
#signal = fftconvolve( signal, kernel, mode='same' )
# DEBUG end
# DEBUG: just zero the signal
if self._read_no_signal:
signal = np.zeros(np.shape(signal))
if ((self._adc_table is not None)):
# Load the ADC nonlinearity table. If the path to the
# table contains the string "DETECTOR", it will be
# replaced with the correct detector name
path = self._adc_table[det]
if 'DETECTOR' in path:
path = path.replace('DETECTOR', det)
try:
adc_table = np.genfromtxt(path)
except:
raise RuntimeError(
'Warning: cannot read ADC table from {}'
''.format(path))
#adc_table = np.arange(self._nadc[det], dtype=np.float)
else:
# Linear ADC table
adc_table = np.arange(self._nadc[det], dtype=np.float)
ring_number = ring_offset - 1
# Loop over rings, call shdet for every ring separately
for ring_start, ring_stop in zip(local_starts, local_stops):
ring_number += 1
# This slice does not have the margins so that every
# shdet call is disjoint
ind = slice(ring_start+self._margin, ring_stop+self._margin)
sig = signal[ind] # a memory view to one ring of data.
tme = timestamps[ind]
if self._offsets is not None:
offsets = self._offsets[det]
ioffset = np.argmin(np.abs(offsets[0] - tme[0]))
optical_offset = offsets[4][ioffset] # K_CMB
raw_offset = offsets[1][ioffset] # digitized units
else:
optical_offset = None
raw_offset = None
# This is the time-dependent scaling between
# integer-valued 180Hz data and KCMB. One value per
# TOI sample.
dsp2cmb = np.ones(len(sig), dtype=np.float64)
dsp2cmb = transf1.convert(dsp2cmb, tme, det)
dsp2cmb = gaincorrector.correct(dsp2cmb, np.isnan(dsp2cmb))
dsp2cmb = tt.calibrate(tme, dsp2cmb, *gains)
# update the random number seed for noise generation
if self._base_seed[det] is not None:
noise_seed = 30000*bc + ring_number \
+ self._base_seed[det]*3000000
else:
noise_seed = None
# There are no quality flags: every sample is assumed
# to have a reasonable optical power value. This will
# be guaranteed at the pipeline level.
sig_shdet = self._shdet[det].simulate(
sig, noise_seed=noise_seed,
optical_offset=optical_offset, raw_offset=raw_offset,
adc_table=adc_table)
signal[ind] = sig_shdet[:len(sig)]
# Return the simulated timeline
# FIXME: This is where the margins need to be communicated
# between processes
tod.local_signal(det)[:] = signal
return
| [
"numpy.mean",
"numpy.abs",
"numpy.genfromtxt",
"toast_planck.imo.IMO",
"toast.tod.calibrate",
"toast_planck.preproc_modules.GainCorrector",
"toast_planck.utilities.read_gains",
"toast_planck.shdet.SHDet",
"toast_planck.utilities.bolo_to_pnt",
"numpy.isnan",
"numpy.shape",
"numpy.int",
"toast... | [((1186, 1198), 'toast_planck.imo.IMO', 'IMO', (['imofile'], {}), '(imofile)\n', (1189, 1198), False, 'from toast_planck.imo import IMO\n'), ((1837, 1866), 'toast_planck.shdet.SHDet', 'SHDet', ([], {'parameters': 'params[det]'}), '(parameters=params[det])\n', (1842, 1866), False, 'from toast_planck.shdet import SHDet\n'), ((3610, 3625), 'numpy.mean', 'np.mean', (['TF[:5]'], {}), '(TF[:5])\n', (3617, 3625), True, 'import numpy as np\n'), ((4400, 4416), 'toast_planck.utilities.bolo_to_pnt', 'bolo_to_pnt', (['det'], {}), '(det)\n', (4411, 4416), False, 'from toast_planck.utilities import bolo_to_pnt, read_gains\n'), ((4438, 4457), 'numpy.int', 'np.int', (['bolo_id[:2]'], {}), '(bolo_id[:2])\n', (4444, 4457), True, 'import numpy as np\n'), ((4497, 4506), 'toast_planck.preproc_modules.Transf1', 'Transf1', ([], {}), '()\n', (4504, 4506), False, 'from toast_planck.preproc_modules import Transf1, GainCorrector\n'), ((4539, 4585), 'toast_planck.preproc_modules.GainCorrector', 'GainCorrector', (['self._imo', 'bolo_id'], {'linear': '(True)'}), '(self._imo, bolo_id, linear=True)\n', (4552, 4585), False, 'from toast_planck.preproc_modules import Transf1, GainCorrector\n'), ((3436, 3468), 'numpy.genfromtxt', 'np.genfromtxt', (['self._tffile[det]'], {}), '(self._tffile[det])\n', (3449, 3468), True, 'import numpy as np\n'), ((4661, 4730), 'toast_planck.utilities.read_gains', 'read_gains', (['self._calfile', 'det', 'timestamps[0]', 'timestamps[-1]', 'cworld'], {}), '(self._calfile, det, timestamps[0], timestamps[-1], cworld)\n', (4671, 4730), False, 'from toast_planck.utilities import bolo_to_pnt, read_gains\n'), ((6318, 6360), 'numpy.arange', 'np.arange', (['self._nadc[det]'], {'dtype': 'np.float'}), '(self._nadc[det], dtype=np.float)\n', (6327, 6360), True, 'import numpy as np\n'), ((7763, 7797), 'toast.tod.calibrate', 'tt.calibrate', (['tme', 'dsp2cmb', '*gains'], {}), '(tme, dsp2cmb, *gains)\n', (7775, 7797), True, 'import toast.tod as tt\n'), ((5448, 5464), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (5456, 5464), True, 'import numpy as np\n'), ((5939, 5958), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {}), '(path)\n', (5952, 5958), True, 'import numpy as np\n'), ((7714, 7731), 'numpy.isnan', 'np.isnan', (['dsp2cmb'], {}), '(dsp2cmb)\n', (7722, 7731), True, 'import numpy as np\n'), ((7062, 7089), 'numpy.abs', 'np.abs', (['(offsets[0] - tme[0])'], {}), '(offsets[0] - tme[0])\n', (7068, 7089), True, 'import numpy as np\n')] |
import argparse
from PIL import Image, ImageOps
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file')
args = parser.parse_args()
if args.file is None:
raise ValueError("Missing File Arguments")
return args
def scale_depth(image, max_depth=3.0, depth_scale=10000):
"""Scale Z16 image to Float between 0-1
Arguments:
image {PIL.Image} -- Pillow Image
Keyword Arguments:
max_depth {float} -- Maximum Depth (default: {4.0})
"""
data = np.asarray(image)
scale_factor = (max_depth / 10.0) * 10000
data1 = ((data / scale_factor) * 255).astype(np.uint8)
scaled_image = Image.fromarray(data1, mode='L')
color_image = ImageOps.colorize(scaled_image, 'blue', 'red')
return color_image
def show_image(fpath):
with open(fpath, 'rb') as f:
image_bytes = f.read()
image = Image.frombytes("I;16", (848, 480), image_bytes, 'raw')
color_image = scale_depth(image)
color_image.show()
def main():
args = parse_args()
print(args)
show_image(args.file)
if __name__ == "__main__":
main()
| [
"PIL.Image.fromarray",
"argparse.ArgumentParser",
"PIL.ImageOps.colorize",
"numpy.asarray",
"PIL.Image.frombytes"
] | [((100, 125), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (123, 125), False, 'import argparse\n'), ((555, 572), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (565, 572), True, 'import numpy as np\n'), ((697, 729), 'PIL.Image.fromarray', 'Image.fromarray', (['data1'], {'mode': '"""L"""'}), "(data1, mode='L')\n", (712, 729), False, 'from PIL import Image, ImageOps\n'), ((748, 794), 'PIL.ImageOps.colorize', 'ImageOps.colorize', (['scaled_image', '"""blue"""', '"""red"""'], {}), "(scaled_image, 'blue', 'red')\n", (765, 794), False, 'from PIL import Image, ImageOps\n'), ((919, 974), 'PIL.Image.frombytes', 'Image.frombytes', (['"""I;16"""', '(848, 480)', 'image_bytes', '"""raw"""'], {}), "('I;16', (848, 480), image_bytes, 'raw')\n", (934, 974), False, 'from PIL import Image, ImageOps\n')] |
import numpy as np
# Define a class to receive the characteristics of each line detection
class Line():
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = []
#average x values of the fitted line over the last n iterations
self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#polynomial coefficients for the most recent fit
self.current_fit = [] #[np.array([False])]
#radius of curvature of the line in some units
self.radius_of_curvature = []
#distance in meters of vehicle center from the line
self.line_base_pos = []
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
#x values for detected line pixels
self.allx = None
#y values for detected line pixels
self.ally = None
#make sure to append new values to current_fit before calling
def low_pass_filter(self, window_size = 11):
#shape = self.current_fit.shape
snapshot = self.current_fit[-window_size:]
if snapshot[-1:] == [10, 20, 0]:
snapshot = np.delete(snapshot, -1)
best_fit = np.mean(snapshot, axis = 0)
self.best_fit = best_fit
return best_fit
#make sure to append new values to radius_of_curvature before calling
def get_curvature_LPF(self, window_size = 15):
snapshot = self.radius_of_curvature[-window_size:]
curvature = np.mean(snapshot, axis = 0)
return curvature
#make sure to append new values to line_base before calling
def get_relative_position_LPF(self, window_size = 30):
snapshot = self.line_base_pos[-window_size:]
relative_position = np.mean(snapshot, axis = 0)
return relative_position | [
"numpy.array",
"numpy.mean",
"numpy.delete"
] | [((886, 920), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': '"""float"""'}), "([0, 0, 0], dtype='float')\n", (894, 920), True, 'import numpy as np\n'), ((1389, 1414), 'numpy.mean', 'np.mean', (['snapshot'], {'axis': '(0)'}), '(snapshot, axis=0)\n', (1396, 1414), True, 'import numpy as np\n'), ((1684, 1709), 'numpy.mean', 'np.mean', (['snapshot'], {'axis': '(0)'}), '(snapshot, axis=0)\n', (1691, 1709), True, 'import numpy as np\n'), ((1942, 1967), 'numpy.mean', 'np.mean', (['snapshot'], {'axis': '(0)'}), '(snapshot, axis=0)\n', (1949, 1967), True, 'import numpy as np\n'), ((1333, 1356), 'numpy.delete', 'np.delete', (['snapshot', '(-1)'], {}), '(snapshot, -1)\n', (1342, 1356), True, 'import numpy as np\n')] |
import random, threading, time
import numpy as np
from utils import policy
from utils.curiosity import CuriosityPrio
from utils.learning_rate import LinearAutoSchedule as LinearSchedule
def critic_launch(cfg, bot, objective_id, task_factory, update_goal, ping, sync, loss_gate, share_gate, stats):
critic = Critic(cfg, bot, objective_id, task_factory, update_goal)
critic.training_loop(ping, sync, share_gate, loss_gate, stats)
print("CRITIC OVER")
class Critic:
def __init__(self, cfg, bot, objective_id, task_factory, update_goal):
assert not cfg['gae'] or cfg['n_step'] == 1, "gae is currently enabled only with one step lookahead!"
self.cfg = cfg
self.objective_id = objective_id
self.bot = bot
self.update_goal = update_goal
self.stop = False
self.debug_out_ex = "y" * 10
self.n_step = self.cfg['n_step']
self.discount = self.cfg['discount_rate']
self.n_discount = 1. if self.cfg['gae'] else (self.discount ** self.n_step)
self.batch_size = self.cfg['batch_size']
self.counter = 0
self.tau = LinearSchedule(cfg['tau_replay_counter'],
initial_p=self.cfg['tau_base'],
final_p=cfg['tau_final'])
self.replay = task_factory.make_replay_buffer(cfg)
self.full_episode = []
self.last_train_cap = self.cfg['critic_learn_delta']
# here imho configurable choise : use curiosity, td errors, random, or another method
self.curiosity = CuriosityPrio(
task_factory.state_size, task_factory.action_size,
task_factory.action_range, task_factory.wrap_action, cfg['device'], cfg)
def training_loop(self, ping, sync, share_gate, loss_gate, stats):
while True:
exp = share_gate.get()
if None == exp:
break
full, action, exp = exp
if not full:
self._inject(action, exp)
else:
self._train(loss_gate, ping, stats, action, exp)
if not self.cfg['critic_learn_delta']:
continue
if len(self.full_episode) < self.last_train_cap:
continue
self.last_train_cap += self.cfg['critic_learn_delta']
# print("\n%s\nDO FAST TRAIN : %i\n%s\n"%('*' * 60, len(self.full_episode), '*' * 60))
ping.put(True) # old style scoping ... python nicer way out there ?
for batch in self._do_sampling():
self._eval(loss_gate, stats, batch)
ping.get()
self._dtor(ping, sync, stats)
def _dtor(self, ping, sync, stats):
self.stop = True
while not ping.empty():
time.sleep(.1)
while not stats.empty():
stats.get()
sync.put(True)
def _inject(self, action, exp):
goals, states, features, actions, probs, rewards, n_goals, n_states, n_features, good = exp
if not len(states):
return
n_rewards = policy.td_lambda(rewards, self.n_step, self.discount) if not self.cfg['gae'] else policy.gae(
rewards, self.bot.qa_future(
self.objective_id,
np.vstack([goals, [goals[-1]]]).reshape(len(goals) + 1, -1),
np.vstack([states, n_states[-1]]),
np.vstack([features, [n_features[-1]]]),
np.vstack([actions, [action]])),
self.discount, self.cfg['gae_tau'], stochastic=False)
full_episode = np.vstack(zip(*[goals, states, features, actions, probs, rewards, n_goals, n_states, n_features, n_rewards, good]))
if not len(self.full_episode):
self.full_episode = full_episode
else:
self.full_episode = np.vstack([self.full_episode, full_episode])
def _train(self, loss_gate, ping, stats, action, exp):
self._inject(action, exp)
self._update_memory()
self._self_play(loss_gate, ping, stats)
# abandoned reinforce clip, as i think that is no go for AGI...
# print("\n%s\nFULL EPISODE LENGTH : %i\n%s\n"%('*' * 60, len(self.full_episode), '*' * 60))
self.full_episode = []
self.last_train_cap = self.cfg['critic_learn_delta']
def _self_play(self, loss_gate, ping, stats):
ping.put(True)
for _ in range(self.cfg['full_replay_count']):
samples = self._select()
if None == samples:
continue
self._eval(loss_gate, stats, samples.T)
ping.get()
def _update_memory(self):
goals, states, features, actions, probs, rewards, n_goals, n_states, n_features, n_rewards, good = self.full_episode.T
goals, states, n_goals, n_states, actions = np.vstack(goals), np.vstack(states), np.vstack(n_goals), np.vstack(n_states), np.vstack(actions)
prios = self.curiosity.weight(states, n_states, actions)
self.replay.add(
map(lambda i: (
goals[i], states[i], features[i], actions[i], probs[i], rewards[i],
n_goals[i], n_states[i], n_features[i], n_rewards[i]
), filter(
lambda i: bool(sum(good[i:i+self.cfg['good_reach']])),
range(len(states)))),
prios, hash(states.tostring()))
self.curiosity.update(states, n_states, actions)
def _eval(self, loss_gate, stats, args):
if self.stop:
return
goals, states, features, actions, probs, n_goals, n_states, n_features, n_rewards = args
assert len(n_features) == len(features), "features missmatch"
if len(n_features) != len(features):
return
goals, states, features, actions = np.vstack(goals), np.vstack(states), np.vstack(features), np.vstack(actions)
n_goals, n_states, n_features, n_rewards = np.vstack(n_goals), np.vstack(n_states), np.vstack(n_features), np.vstack(n_rewards)
# func approximators; self play
n_qa = self.bot.q_future(self.objective_id, n_goals, n_states, n_features)
# n_step target + bellman equation
td_targets = n_rewards + self.n_discount * n_qa
# learn !!
self.counter += 1
self.bot.learn_critic(self.objective_id, goals, states, features, actions, td_targets,
self.tau.value() * (0 == self.counter % self.cfg['critic_update_delay']))
# propagate back to simulation ~ debug purposes
if None != stats and self.cfg['dbgout'] and not self.stop:
stats.put("[ TARGET:{:2f} replay::{} ]<----".format(
td_targets[-1].item(), len(self.replay)))
# propagate back to main process
loss_gate.put([ goals, states, features, actions, probs, td_targets ])
# WARNING : EXPERIMENT ~~> here we on purpose provide same features as for n-state
# basically we are leaking future of that trajectory, what our agent will do ?
# bellman will be probably not proud of me at this point :)
#loss_gate.put([ goals, states, n_features, actions, probs, td_targets ])
def _population(self, batch):
return random.sample(range(len(batch)), random.randint(
1, min(2 * self.cfg['batch_size'], len(batch) - 1)))
def _do_sampling(self):
if self.stop:
return
batch = self._fast_exp()
if None == batch:
return
# first_order_experience_focus = '''
for _ in range(self.cfg['fast_exp_epochs']):
samples = self._select()
mini_batch = batch if None == samples else np.vstack([batch, samples])
population = self._population(mini_batch)
yield mini_batch[population].T
replay_focused = '''
for _ in range(self.cfg['fast_exp_epochs']):
population = self._population(batch)
samples = self._select()
if None != samples:
yield np.vstack([batch[population], samples]).T
else:
yield batch[population].T
# '''
population = self._population(batch)
yield batch[population].T # push towards latest experience
def _fast_exp(self):
if max(len(self.replay), len(self.full_episode)) < self.batch_size:
return None
goals, states, features, actions, probs, _, n_goals, n_states, n_features, n_rewards, _ = self.full_episode.T
return np.vstack(zip(goals, states, features, actions, probs, n_goals, n_states, n_features, n_rewards))
def _select(self):
if len(self.replay) < self.batch_size:
return None
data = self.replay.sample(self.batch_size, self)
if None == data:
return None
goals, states, features, actions, probs, _, n_goals, n_states, n_features, n_rewards = data
if not len(actions):
return None
self._update_replay_prios(states, n_states, actions)
return np.vstack(zip(goals, states, features, actions, probs, n_goals, n_states, n_features, n_rewards))
def _update_replay_prios(self, states, n_states, actions):
if not self.cfg['replay_cleaning']:
return
states, n_states, actions = np.vstack(states), np.vstack(n_states), np.vstack(actions)
prios = self.curiosity.weight(states, n_states, actions)
# seems we are bit too far for PG ( PPO ) to do something good, replay buffer should abandon those
prios[self.cfg['prob_treshold'] < np.abs(np.vstack(probs).mean(-1))] = 0
self.replay.update(prios)
# main bottleneck of whole solution, but we experimenting so .. :)
# also i think can be played with, when enough hardware/resources
# -> properly scale, and do thinks on background in paralell..
# + if main concern is speed i would not do it in python in first place ..
def reanalyze_experience(self, episode, indices, recalc):
# imho i iterate too much trough episode ... better to implement it in one sweep ... TODO
goals, states, f, a, p = zip(*[
[e[0][0], e[0][1], e[0][2], e[0][3], e[0][4]] for e in episode ])
goals, states = np.asarray(goals), np.asarray(states)
if recalc:
f, p = self.bot.reevaluate(self.objective_id, goals, states, a)
r, g, s, n_g, n_s = zip(*self.update_goal(
*zip(*[( # magic *
e[0][5], # rewards .. just so he can forward it to us back
e[0][0], # goals ..
e[0][1], # states ..
e[0][6], # n_goals ..
e[0][7], # n_states ..
# e[0][2], # action .. well for now no need, however some emulator may need them
bool(random.randint(0, self.cfg['her_max_ratio'])), # update or not
) for e in episode])))
n = [ e[0][9] for e in episode ] if not recalc or not self.cfg['gae'] else policy.gae(
r,
self.bot.qa_future(self.objective_id, goals, states, np.asarray(f), np.asarray(a)),
self.discount, self.cfg['gae_tau'])
for i in indices:
yield ( g[i], s[i], f[i], a[i], p[i], r[i],
n_g[i], n_s[i], f[(i + self.n_step) if i+self.n_step < len(f) else -1], n[i] )
| [
"utils.policy.td_lambda",
"random.randint",
"numpy.asarray",
"time.sleep",
"utils.curiosity.CuriosityPrio",
"numpy.vstack",
"utils.learning_rate.LinearAutoSchedule"
] | [((1126, 1229), 'utils.learning_rate.LinearAutoSchedule', 'LinearSchedule', (["cfg['tau_replay_counter']"], {'initial_p': "self.cfg['tau_base']", 'final_p': "cfg['tau_final']"}), "(cfg['tau_replay_counter'], initial_p=self.cfg['tau_base'],\n final_p=cfg['tau_final'])\n", (1140, 1229), True, 'from utils.learning_rate import LinearAutoSchedule as LinearSchedule\n'), ((1529, 1670), 'utils.curiosity.CuriosityPrio', 'CuriosityPrio', (['task_factory.state_size', 'task_factory.action_size', 'task_factory.action_range', 'task_factory.wrap_action', "cfg['device']", 'cfg'], {}), "(task_factory.state_size, task_factory.action_size,\n task_factory.action_range, task_factory.wrap_action, cfg['device'], cfg)\n", (1542, 1670), False, 'from utils.curiosity import CuriosityPrio\n'), ((2743, 2758), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2753, 2758), False, 'import random, threading, time\n'), ((3043, 3096), 'utils.policy.td_lambda', 'policy.td_lambda', (['rewards', 'self.n_step', 'self.discount'], {}), '(rewards, self.n_step, self.discount)\n', (3059, 3096), False, 'from utils import policy\n'), ((3812, 3856), 'numpy.vstack', 'np.vstack', (['[self.full_episode, full_episode]'], {}), '([self.full_episode, full_episode])\n', (3821, 3856), True, 'import numpy as np\n'), ((4800, 4816), 'numpy.vstack', 'np.vstack', (['goals'], {}), '(goals)\n', (4809, 4816), True, 'import numpy as np\n'), ((4818, 4835), 'numpy.vstack', 'np.vstack', (['states'], {}), '(states)\n', (4827, 4835), True, 'import numpy as np\n'), ((4837, 4855), 'numpy.vstack', 'np.vstack', (['n_goals'], {}), '(n_goals)\n', (4846, 4855), True, 'import numpy as np\n'), ((4857, 4876), 'numpy.vstack', 'np.vstack', (['n_states'], {}), '(n_states)\n', (4866, 4876), True, 'import numpy as np\n'), ((4878, 4896), 'numpy.vstack', 'np.vstack', (['actions'], {}), '(actions)\n', (4887, 4896), True, 'import numpy as np\n'), ((5780, 5796), 'numpy.vstack', 'np.vstack', (['goals'], {}), '(goals)\n', (5789, 5796), True, 'import numpy as np\n'), ((5798, 5815), 'numpy.vstack', 'np.vstack', (['states'], {}), '(states)\n', (5807, 5815), True, 'import numpy as np\n'), ((5817, 5836), 'numpy.vstack', 'np.vstack', (['features'], {}), '(features)\n', (5826, 5836), True, 'import numpy as np\n'), ((5838, 5856), 'numpy.vstack', 'np.vstack', (['actions'], {}), '(actions)\n', (5847, 5856), True, 'import numpy as np\n'), ((5908, 5926), 'numpy.vstack', 'np.vstack', (['n_goals'], {}), '(n_goals)\n', (5917, 5926), True, 'import numpy as np\n'), ((5928, 5947), 'numpy.vstack', 'np.vstack', (['n_states'], {}), '(n_states)\n', (5937, 5947), True, 'import numpy as np\n'), ((5949, 5970), 'numpy.vstack', 'np.vstack', (['n_features'], {}), '(n_features)\n', (5958, 5970), True, 'import numpy as np\n'), ((5972, 5992), 'numpy.vstack', 'np.vstack', (['n_rewards'], {}), '(n_rewards)\n', (5981, 5992), True, 'import numpy as np\n'), ((9269, 9286), 'numpy.vstack', 'np.vstack', (['states'], {}), '(states)\n', (9278, 9286), True, 'import numpy as np\n'), ((9288, 9307), 'numpy.vstack', 'np.vstack', (['n_states'], {}), '(n_states)\n', (9297, 9307), True, 'import numpy as np\n'), ((9309, 9327), 'numpy.vstack', 'np.vstack', (['actions'], {}), '(actions)\n', (9318, 9327), True, 'import numpy as np\n'), ((10187, 10204), 'numpy.asarray', 'np.asarray', (['goals'], {}), '(goals)\n', (10197, 10204), True, 'import numpy as np\n'), ((10206, 10224), 'numpy.asarray', 'np.asarray', (['states'], {}), '(states)\n', (10216, 10224), True, 'import numpy as np\n'), ((7641, 7668), 'numpy.vstack', 'np.vstack', (['[batch, samples]'], {}), '([batch, samples])\n', (7650, 7668), True, 'import numpy as np\n'), ((3322, 3355), 'numpy.vstack', 'np.vstack', (['[states, n_states[-1]]'], {}), '([states, n_states[-1]])\n', (3331, 3355), True, 'import numpy as np\n'), ((3377, 3416), 'numpy.vstack', 'np.vstack', (['[features, [n_features[-1]]]'], {}), '([features, [n_features[-1]]])\n', (3386, 3416), True, 'import numpy as np\n'), ((3438, 3468), 'numpy.vstack', 'np.vstack', (['[actions, [action]]'], {}), '([actions, [action]])\n', (3447, 3468), True, 'import numpy as np\n'), ((11032, 11045), 'numpy.asarray', 'np.asarray', (['f'], {}), '(f)\n', (11042, 11045), True, 'import numpy as np\n'), ((11047, 11060), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (11057, 11060), True, 'import numpy as np\n'), ((3241, 3272), 'numpy.vstack', 'np.vstack', (['[goals, [goals[-1]]]'], {}), '([goals, [goals[-1]]])\n', (3250, 3272), True, 'import numpy as np\n'), ((9541, 9557), 'numpy.vstack', 'np.vstack', (['probs'], {}), '(probs)\n', (9550, 9557), True, 'import numpy as np\n'), ((10746, 10790), 'random.randint', 'random.randint', (['(0)', "self.cfg['her_max_ratio']"], {}), "(0, self.cfg['her_max_ratio'])\n", (10760, 10790), False, 'import random, threading, time\n')] |
import unittest
import os
import numpy as np
from rastervision.core.data import RasterStats, StatsTransformerConfig
from rastervision.pipeline import rv_config
class TestRasterTransformer(unittest.TestCase):
def test_stats_transformer(self):
raster_stats = RasterStats()
raster_stats.means = list(np.ones((4, )))
raster_stats.stds = list(np.ones((4, )) * 2)
with rv_config.get_tmp_dir() as tmp_dir:
stats_uri = os.path.join(tmp_dir, 'stats.json')
raster_stats.save(stats_uri)
# All values have z-score of 1, which translates to
# uint8 value of 170.
transformer = StatsTransformerConfig(stats_uri=stats_uri).build()
chip = np.ones((2, 2, 4)) * 3
out_chip = transformer.transform(chip)
expected_out_chip = np.ones((2, 2, 4)) * 170
np.testing.assert_equal(out_chip, expected_out_chip)
if __name__ == '__main__':
unittest.main()
| [
"numpy.ones",
"rastervision.core.data.RasterStats",
"numpy.testing.assert_equal",
"os.path.join",
"rastervision.core.data.StatsTransformerConfig",
"rastervision.pipeline.rv_config.get_tmp_dir",
"unittest.main"
] | [((966, 981), 'unittest.main', 'unittest.main', ([], {}), '()\n', (979, 981), False, 'import unittest\n'), ((273, 286), 'rastervision.core.data.RasterStats', 'RasterStats', ([], {}), '()\n', (284, 286), False, 'from rastervision.core.data import RasterStats, StatsTransformerConfig\n'), ((321, 334), 'numpy.ones', 'np.ones', (['(4,)'], {}), '((4,))\n', (328, 334), True, 'import numpy as np\n'), ((404, 427), 'rastervision.pipeline.rv_config.get_tmp_dir', 'rv_config.get_tmp_dir', ([], {}), '()\n', (425, 427), False, 'from rastervision.pipeline import rv_config\n'), ((464, 499), 'os.path.join', 'os.path.join', (['tmp_dir', '"""stats.json"""'], {}), "(tmp_dir, 'stats.json')\n", (476, 499), False, 'import os\n'), ((880, 932), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['out_chip', 'expected_out_chip'], {}), '(out_chip, expected_out_chip)\n', (903, 932), True, 'import numpy as np\n'), ((370, 383), 'numpy.ones', 'np.ones', (['(4,)'], {}), '((4,))\n', (377, 383), True, 'import numpy as np\n'), ((737, 755), 'numpy.ones', 'np.ones', (['(2, 2, 4)'], {}), '((2, 2, 4))\n', (744, 755), True, 'import numpy as np\n'), ((843, 861), 'numpy.ones', 'np.ones', (['(2, 2, 4)'], {}), '((2, 2, 4))\n', (850, 861), True, 'import numpy as np\n'), ((666, 709), 'rastervision.core.data.StatsTransformerConfig', 'StatsTransformerConfig', ([], {'stats_uri': 'stats_uri'}), '(stats_uri=stats_uri)\n', (688, 709), False, 'from rastervision.core.data import RasterStats, StatsTransformerConfig\n')] |
import numpy as np
import os
from utils.text_featurizers import TextFeaturizer
from utils.speech_featurizers import SpeechFeaturizer
import logging
class AM():
def __init__(self,config):
self.config = config
self.update_model_type()
self.speech_config= self.config['speech_config']
if self.model_type!='MultiTask':
self.text_config=self.config['decoder_config']
else:
self.text_config = self.config['decoder3_config']
self.model_config=self.config['model_config']
self.text_feature=TextFeaturizer(self.text_config,True)
self.speech_feature=SpeechFeaturizer(self.speech_config)
self.init_steps=None
def update_model_type(self):
if 'Streaming' in self.config['model_config']['name']:
assert self.config['speech_config']['streaming'] is True
else:
assert self.config['speech_config']['streaming'] is False
if 'CTC' in self.config['model_config']['name'] and 'Multi' not in self.config['model_config']['name'] :
self.config['decoder_config'].update({'model_type': 'CTC'})
self.model_type='CTC'
elif 'Multi' in self.config['model_config']['name']:
self.config['decoder1_config'].update({'model_type': 'CTC'})
self.config['decoder2_config'].update({'model_type': 'CTC'})
self.config['decoder3_config'].update({'model_type': 'CTC'})
self.config['decoder_config'].update({'model_type': 'CTC'})
self.model_type = 'MultiTask'
elif 'LAS' in self.config['model_config']['name']:
self.config['decoder_config'].update({'model_type': 'LAS'})
self.model_type = 'LAS'
else:
self.config['decoder_config'].update({'model_type': 'Transducer'})
self.model_type = 'Transducer'
def conformer_model(self, training):
from AMmodel.streaming_conformer import StreamingConformerCTC, StreamingConformerTransducer
from AMmodel.conformer import ConformerCTC, ConformerLAS, ConformerTransducer
self.model_config.update({'vocabulary_size': self.text_feature.num_classes})
if self.model_config['name'] == 'ConformerTransducer':
self.model_config.pop('LAS_decoder')
self.model_config.pop('enable_tflite_convertible')
self.model_config.update({'speech_config': self.speech_config})
self.model = ConformerTransducer(**self.model_config)
elif self.model_config['name'] == 'ConformerCTC':
self.model_config.update({'speech_config': self.speech_config})
self.model = ConformerCTC(**self.model_config)
elif self.model_config['name'] == 'ConformerLAS':
self.config['model_config']['LAS_decoder'].update({'n_classes': self.text_feature.num_classes})
self.config['model_config']['LAS_decoder'].update({'startid': self.text_feature.start})
self.model = ConformerLAS(self.config['model_config'], training=training,
enable_tflite_convertible=self.config['model_config'][
'enable_tflite_convertible'],
speech_config=self.speech_config)
elif self.model_config['name'] == 'StreamingConformerCTC':
self.model_config.update({'speech_config': self.speech_config})
self.model = StreamingConformerCTC(**self.model_config)
elif self.model_config['name'] == 'StreamingConformerTransducer':
self.model_config.pop('enable_tflite_convertible')
self.model_config.update({'speech_config': self.speech_config})
self.model = StreamingConformerTransducer(**self.model_config)
else:
raise ('not in supported model list')
def ds2_model(self,training):
from AMmodel.deepspeech2 import DeepSpeech2CTC,DeepSpeech2LAS,DeepSpeech2Transducer
self.model_config['Transducer_decoder']['vocabulary_size']= self.text_feature.num_classes
f,c=self.speech_feature.compute_feature_dim()
input_shape=[None,f,c]
self.model_config.update({'input_shape':input_shape})
self.model_config.update({'dmodel':self.model_config['rnn_conf']['rnn_units']})
if self.model_config['name'] == 'DeepSpeech2Transducer':
self.model_config.pop('LAS_decoder')
self.model_config.pop('enable_tflite_convertible')
self.model = DeepSpeech2Transducer(input_shape,self.model_config,speech_config=self.speech_config)
elif self.model_config['name'] == 'DeepSpeech2CTC':
self.model = DeepSpeech2CTC(input_shape,self.model_config,self.text_feature.num_classes,speech_config=self.speech_config)
elif self.model_config['name'] == 'DeepSpeech2LAS':
self.model_config['LAS_decoder'].update({'n_classes': self.text_feature.num_classes})
self.model_config['LAS_decoder'].update({'startid': self.text_feature.start})
self.model = DeepSpeech2LAS(self.model_config,input_shape, training=training,
enable_tflite_convertible=self.model_config[
'enable_tflite_convertible'],speech_config=self.speech_config)
else:
raise ('not in supported model list')
def multi_task_model(self,training):
from AMmodel.MultiConformer import ConformerMultiTaskCTC
token1_feature = TextFeaturizer(self.config['decoder1_config'])
token2_feature = TextFeaturizer(self.config['decoder2_config'])
token3_feature = TextFeaturizer(self.config['decoder3_config'])
self.model_config.update({
'classes1':token1_feature.num_classes,
'classes2':token2_feature.num_classes,
'classes3':token3_feature.num_classes,
})
self.model = ConformerMultiTaskCTC(self.model_config, training=training,
speech_config=self.speech_config)
def load_model(self,training=True):
if 'Multi' in self.model_config['name']:
self.multi_task_model(training)
elif 'Conformer' in self.model_config['name']:
self.conformer_model(training)
else:
self.ds2_model(training)
self.model.add_featurizers(self.text_feature)
f,c=self.speech_feature.compute_feature_dim()
if not training:
if self.text_config['model_type'] != 'LAS':
if self.model.mel_layer is not None:
self.model._build([3,16000,1])
self.model.return_pb_function([None,None,1])
else:
self.model._build([3, 80, f, c])
self.model.return_pb_function([None,None, f, c])
else:
if self.model.mel_layer is not None:
self.model._build([3,16000,1], training)
self.model.return_pb_function([None,None,1])
else:
self.model._build([2, 80, f, c], training)
self.model.return_pb_function([None,None, f, c])
self.load_checkpoint(self.config)
def convert_to_pb(self,export_path):
import tensorflow as tf
concrete_func = self.model.recognize_pb.get_concrete_function()
tf.saved_model.save(self.model,export_path,signatures=concrete_func)
def decode_result(self,word):
de=[]
for i in word:
if i!=self.text_feature.stop:
de.append(self.text_feature.index_to_token[int(i)])
else:
break
return de
def predict(self,fp):
if '.pcm' in fp:
data=np.fromfile(fp,'int16')
data=np.array(data,'float32')
data/=32768
else:
data = self.speech_feature.load_wav(fp)
if self.model.mel_layer is None:
mel=self.speech_feature.extract(data)
mel=np.expand_dims(mel,0)
input_length=np.array([[mel.shape[1]//self.model.time_reduction_factor]],'int32')
else:
mel=data.reshape([1,-1,1])
input_length = np.array([[mel.shape[1] // self.model.time_reduction_factor//(self.speech_config['sample_rate']*
self.speech_config['stride_ms']/1000)]], 'int32')
if self.speech_config['streaming']:
chuck_size=self.model.chuck_size
if mel.shape[1]%chuck_size!=0:
T=mel.shape[1]//chuck_size*chuck_size+chuck_size
pad_T=T-mel.shape[1]
mel=np.hstack((mel,np.zeros([1,pad_T,1])))
mel=mel.reshape([1,-1,chuck_size,1])
input_length = np.array(
[[mel.shape[2] // self.model.time_reduction_factor // (self.speech_config['sample_rate'] *
self.speech_config['stride_ms'] / 1000)]],
'int32')
mel=mel.astype('float32')
if 'CTC' in self.model_type:
states= self.model.initial_states(mel)
result=[]
for i in range(mel.shape[1]):
result_, states = self.model.recognize_pb(mel[:, i], input_length, states)
result_=result_.numpy()[0]
for n in result_:
if n!=-1:
result.append(n)
result = np.array(result)
result=np.expand_dims(result,0)
else:
states,result=self.model.initial_states(mel)
for i in range(mel.shape[1]):
result,states=self.model.recognize_pb(mel[:,i],result,states)
result=result.numpy()
result=np.hstack((result,np.ones([1])))
else:
result=self.model.recognize_pb(mel,input_length)[0]
return result
def load_checkpoint(self,config):
"""Load checkpoint."""
self.checkpoint_dir = os.path.join(config['learning_config']['running_config']["outdir"], "checkpoints")
files = os.listdir(self.checkpoint_dir)
files.sort(key=lambda x: int(x.split('_')[-1].replace('.h5', '')))
self.model.load_weights(os.path.join(self.checkpoint_dir, files[-1]))
self.init_steps= int(files[-1].split('_')[-1].replace('.h5', ''))
| [
"utils.text_featurizers.TextFeaturizer",
"numpy.fromfile",
"AMmodel.conformer.ConformerTransducer",
"numpy.array",
"AMmodel.streaming_conformer.StreamingConformerTransducer",
"AMmodel.deepspeech2.DeepSpeech2Transducer",
"os.listdir",
"numpy.ones",
"AMmodel.MultiConformer.ConformerMultiTaskCTC",
"t... | [((587, 625), 'utils.text_featurizers.TextFeaturizer', 'TextFeaturizer', (['self.text_config', '(True)'], {}), '(self.text_config, True)\n', (601, 625), False, 'from utils.text_featurizers import TextFeaturizer\n'), ((654, 690), 'utils.speech_featurizers.SpeechFeaturizer', 'SpeechFeaturizer', (['self.speech_config'], {}), '(self.speech_config)\n', (670, 690), False, 'from utils.speech_featurizers import SpeechFeaturizer\n'), ((5598, 5644), 'utils.text_featurizers.TextFeaturizer', 'TextFeaturizer', (["self.config['decoder1_config']"], {}), "(self.config['decoder1_config'])\n", (5612, 5644), False, 'from utils.text_featurizers import TextFeaturizer\n'), ((5671, 5717), 'utils.text_featurizers.TextFeaturizer', 'TextFeaturizer', (["self.config['decoder2_config']"], {}), "(self.config['decoder2_config'])\n", (5685, 5717), False, 'from utils.text_featurizers import TextFeaturizer\n'), ((5744, 5790), 'utils.text_featurizers.TextFeaturizer', 'TextFeaturizer', (["self.config['decoder3_config']"], {}), "(self.config['decoder3_config'])\n", (5758, 5790), False, 'from utils.text_featurizers import TextFeaturizer\n'), ((6023, 6121), 'AMmodel.MultiConformer.ConformerMultiTaskCTC', 'ConformerMultiTaskCTC', (['self.model_config'], {'training': 'training', 'speech_config': 'self.speech_config'}), '(self.model_config, training=training, speech_config=\n self.speech_config)\n', (6044, 6121), False, 'from AMmodel.MultiConformer import ConformerMultiTaskCTC\n'), ((7544, 7614), 'tensorflow.saved_model.save', 'tf.saved_model.save', (['self.model', 'export_path'], {'signatures': 'concrete_func'}), '(self.model, export_path, signatures=concrete_func)\n', (7563, 7614), True, 'import tensorflow as tf\n'), ((10379, 10465), 'os.path.join', 'os.path.join', (["config['learning_config']['running_config']['outdir']", '"""checkpoints"""'], {}), "(config['learning_config']['running_config']['outdir'],\n 'checkpoints')\n", (10391, 10465), False, 'import os\n'), ((10479, 10510), 'os.listdir', 'os.listdir', (['self.checkpoint_dir'], {}), '(self.checkpoint_dir)\n', (10489, 10510), False, 'import os\n'), ((2507, 2547), 'AMmodel.conformer.ConformerTransducer', 'ConformerTransducer', ([], {}), '(**self.model_config)\n', (2526, 2547), False, 'from AMmodel.conformer import ConformerCTC, ConformerLAS, ConformerTransducer\n'), ((4584, 4676), 'AMmodel.deepspeech2.DeepSpeech2Transducer', 'DeepSpeech2Transducer', (['input_shape', 'self.model_config'], {'speech_config': 'self.speech_config'}), '(input_shape, self.model_config, speech_config=self.\n speech_config)\n', (4605, 4676), False, 'from AMmodel.deepspeech2 import DeepSpeech2CTC, DeepSpeech2LAS, DeepSpeech2Transducer\n'), ((7933, 7957), 'numpy.fromfile', 'np.fromfile', (['fp', '"""int16"""'], {}), "(fp, 'int16')\n", (7944, 7957), True, 'import numpy as np\n'), ((7975, 8000), 'numpy.array', 'np.array', (['data', '"""float32"""'], {}), "(data, 'float32')\n", (7983, 8000), True, 'import numpy as np\n'), ((8203, 8225), 'numpy.expand_dims', 'np.expand_dims', (['mel', '(0)'], {}), '(mel, 0)\n', (8217, 8225), True, 'import numpy as np\n'), ((8253, 8324), 'numpy.array', 'np.array', (['[[mel.shape[1] // self.model.time_reduction_factor]]', '"""int32"""'], {}), "([[mel.shape[1] // self.model.time_reduction_factor]], 'int32')\n", (8261, 8324), True, 'import numpy as np\n'), ((8405, 8566), 'numpy.array', 'np.array', (["[[mel.shape[1] // self.model.time_reduction_factor // (self.speech_config[\n 'sample_rate'] * self.speech_config['stride_ms'] / 1000)]]", '"""int32"""'], {}), "([[mel.shape[1] // self.model.time_reduction_factor // (self.\n speech_config['sample_rate'] * self.speech_config['stride_ms'] / 1000)]\n ], 'int32')\n", (8413, 8566), True, 'import numpy as np\n'), ((9021, 9182), 'numpy.array', 'np.array', (["[[mel.shape[2] // self.model.time_reduction_factor // (self.speech_config[\n 'sample_rate'] * self.speech_config['stride_ms'] / 1000)]]", '"""int32"""'], {}), "([[mel.shape[2] // self.model.time_reduction_factor // (self.\n speech_config['sample_rate'] * self.speech_config['stride_ms'] / 1000)]\n ], 'int32')\n", (9029, 9182), True, 'import numpy as np\n'), ((10620, 10664), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', 'files[-1]'], {}), '(self.checkpoint_dir, files[-1])\n', (10632, 10664), False, 'import os\n'), ((2710, 2743), 'AMmodel.conformer.ConformerCTC', 'ConformerCTC', ([], {}), '(**self.model_config)\n', (2722, 2743), False, 'from AMmodel.conformer import ConformerCTC, ConformerLAS, ConformerTransducer\n'), ((4757, 4873), 'AMmodel.deepspeech2.DeepSpeech2CTC', 'DeepSpeech2CTC', (['input_shape', 'self.model_config', 'self.text_feature.num_classes'], {'speech_config': 'self.speech_config'}), '(input_shape, self.model_config, self.text_feature.\n num_classes, speech_config=self.speech_config)\n', (4771, 4873), False, 'from AMmodel.deepspeech2 import DeepSpeech2CTC, DeepSpeech2LAS, DeepSpeech2Transducer\n'), ((9785, 9801), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (9793, 9801), True, 'import numpy as np\n'), ((9826, 9851), 'numpy.expand_dims', 'np.expand_dims', (['result', '(0)'], {}), '(result, 0)\n', (9840, 9851), True, 'import numpy as np\n'), ((3039, 3226), 'AMmodel.conformer.ConformerLAS', 'ConformerLAS', (["self.config['model_config']"], {'training': 'training', 'enable_tflite_convertible': "self.config['model_config']['enable_tflite_convertible']", 'speech_config': 'self.speech_config'}), "(self.config['model_config'], training=training,\n enable_tflite_convertible=self.config['model_config'][\n 'enable_tflite_convertible'], speech_config=self.speech_config)\n", (3051, 3226), False, 'from AMmodel.conformer import ConformerCTC, ConformerLAS, ConformerTransducer\n'), ((5143, 5325), 'AMmodel.deepspeech2.DeepSpeech2LAS', 'DeepSpeech2LAS', (['self.model_config', 'input_shape'], {'training': 'training', 'enable_tflite_convertible': "self.model_config['enable_tflite_convertible']", 'speech_config': 'self.speech_config'}), "(self.model_config, input_shape, training=training,\n enable_tflite_convertible=self.model_config['enable_tflite_convertible'\n ], speech_config=self.speech_config)\n", (5157, 5325), False, 'from AMmodel.deepspeech2 import DeepSpeech2CTC, DeepSpeech2LAS, DeepSpeech2Transducer\n'), ((3511, 3553), 'AMmodel.streaming_conformer.StreamingConformerCTC', 'StreamingConformerCTC', ([], {}), '(**self.model_config)\n', (3532, 3553), False, 'from AMmodel.streaming_conformer import StreamingConformerCTC, StreamingConformerTransducer\n'), ((8919, 8942), 'numpy.zeros', 'np.zeros', (['[1, pad_T, 1]'], {}), '([1, pad_T, 1])\n', (8927, 8942), True, 'import numpy as np\n'), ((10147, 10159), 'numpy.ones', 'np.ones', (['[1]'], {}), '([1])\n', (10154, 10159), True, 'import numpy as np\n'), ((3796, 3845), 'AMmodel.streaming_conformer.StreamingConformerTransducer', 'StreamingConformerTransducer', ([], {}), '(**self.model_config)\n', (3824, 3845), False, 'from AMmodel.streaming_conformer import StreamingConformerCTC, StreamingConformerTransducer\n')] |
import os, sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.interpolate import interp1d
plt.rcParams.update({'font.size': 9})
# set up plot
fig = plt.figure(figsize=(3.5, 3.7))
gs = gridspec.GridSpec(2, 1)
axl = fig.add_subplot(gs[0,0])
axr = fig.add_subplot(gs[1,0])
# set up axes, labels
Mlims = [0.005, 20]
Llims = [-9, -2.5]
Rlims = [0, 2.8]
axl.set_xlim(Mlims)
axl.set_xscale('log')
axl.set_ylim(Llims)
axl.set_xticklabels([])
axl.set_ylabel('log ($L_p / L_\odot$)', labelpad=5)
axr.set_xlim(Mlims)
axr.set_xscale('log')
axr.set_xticks([0.01, 0.1, 1, 10])
axr.set_xticklabels(['0.01', '0.1', '1', '10'])
axr.set_xlabel('$M_p$ (M$_{\\rm Jup}$)')
axr.set_ylim(Rlims)
axr.set_ylabel('$R_p$ (R$_{\\rm Jup}$)')
# constants
Ljup = 8.710e-10
Lsun = 3.828e33
Mear = 5.9722e27
Mjup = 1.898e30
Rjup = 6.9911e9
sig_ = 5.67e-5
# load Linder et al. 2019 model grids
# cloudy
mfile = 'Linder19/BEX_evol_mags_-2_MH_0.00_fsed_1.00.dat'
mLAGE, mMPL, mRPL, mLPL = np.loadtxt(mfile, usecols=(0, 1, 2, 3), skiprows=4).T
yo = (mLAGE == 6.0)
M_L19c = mMPL[yo] * Mear / Mjup
L_L19c = np.log10(mLPL[yo] * Ljup)
R_L19c = mRPL[yo]
axl.plot(M_L19c, L_L19c, 'oC1', markersize=4, fillstyle='none')
axr.plot(M_L19c, R_L19c, 'oC1', markersize=4, fillstyle='none')
# clear
mfile = 'Linder19/BEX_evol_mags_-2_MH_0.00.dat'
mLAGE, mMPL, mRPL, mLPL = np.loadtxt(mfile, usecols=(0, 1, 2, 3), skiprows=4).T
wo = (mLAGE == 6.0)
M_L19 = mMPL[wo] * Mear / Mjup
L_L19 = np.log10(mLPL[wo] * Ljup)
R_L19 = mRPL[wo]
axl.plot(M_L19, L_L19, 'oC1', markersize=3)
axr.plot(M_L19, R_L19, 'oC1', markersize=3)
# Spiegel & Burrows 2012 (hot and cold) at 1 Myr
M_SB12 = np.array([1., 2., 5., 10.])
R_SB12hot1 = np.array([1.73, 1.69, 1.85, 2.30])
R_SB12cold1 = np.array([1.41, 1.32, 1.24, 1.14])
T_SB12hot1 = np.array([830., 1200., 1800., 2400.])
T_SB12cold1 = np.array([550., 620., 690., 690.])
L_SB12hot1 = 4*np.pi * sig_ * (R_SB12hot1 * Rjup)**2 * T_SB12hot1**4 / Lsun
L_SB12cold1 = 4*np.pi * sig_ * (R_SB12cold1 * Rjup)**2 * T_SB12cold1**4 / Lsun
axl.plot(M_SB12, np.log10(L_SB12cold1), 'P', color='m', markersize=5)
axl.plot(M_SB12, np.log10(L_SB12hot1), 'X', color='r', markersize=5)
axr.plot(M_SB12, R_SB12cold1, 'P', color='m', markersize=5)
axr.plot(M_SB12, R_SB12hot1, 'X', color='r', markersize=5)
# model means
mean_M = np.concatenate((M_L19[:-1], np.array([1., 2., 5., 10., 20.])))
mean_L, mean_R = np.zeros_like(mean_M), np.zeros_like(mean_M)
mean_L[0] = L_L19[0]
mean_L[1:6] = 0.5*(L_L19[1:6] + L_L19c[0:5])
mean_L[6] = (L_L19[6] + np.log10(L_SB12cold1[0]) + np.log10(L_SB12hot1[0])) / 3.
mean_L[7:10] = 0.5*(np.log10(L_SB12cold1[1:]) + np.log10(L_SB12hot1[1:]))
mean_L[10] = -4
mean_R[0] = R_L19[0]
mean_R[1:6] = 0.5*(R_L19[1:6] + R_L19c[0:5])
mean_R[6] = (R_L19[6] + R_SB12cold1[0] + R_SB12hot1[0]) / 3.
mean_R[7:10] = 0.5*(R_SB12cold1[1:] + R_SB12hot1[1:])
mean_R[10] = 1.9
Lint = interp1d(mean_M, mean_L, kind='quadratic', fill_value='extrapolate')
Rint = interp1d(mean_M, mean_R, kind='quadratic', fill_value='extrapolate')
Mgrid = np.logspace(-2, np.log10(20), 128)
axl.plot(Mgrid, Lint(Mgrid), ':C0')
axr.plot(Mgrid, Rint(Mgrid), ':C0')
np.savez('planetevol.npz', Mgrid=Mgrid, Lgrid=Lint(Mgrid), Rgrid=Rint(Mgrid))
# labeling
axl.plot([0.07], [0.90], 'X', color='r', markersize=5, transform=axl.transAxes)
axl.text(0.10, 0.90, 'SB12 - hot', ha='left', va='center',
color='r', transform=axl.transAxes, fontsize=7)
axl.plot([0.07], [0.83], 'P', color='m', markersize=5, transform=axl.transAxes)
axl.text(0.10, 0.83, 'SB12 - cold', ha='left', va='center',
color='m', transform=axl.transAxes, fontsize=7)
axl.plot([0.07], [0.76], 'oC1', markersize=3, transform=axl.transAxes)
axl.text(0.10, 0.76, 'L19 (solar)', ha='left', va='center',
color='C1', transform=axl.transAxes, fontsize=7)
axl.plot([0.07], [0.69], 'oC1', markersize=4, fillstyle='none',
transform=axl.transAxes)
axl.text(0.10, 0.69, 'L19 (solar, clouds)', ha='left', va='center',
color='C1', transform=axl.transAxes, fontsize=7)
axl.plot([0.02, 0.09], [0.62, 0.62], ':C0', transform=axl.transAxes)
axl.text(0.10, 0.62, 'adopted', ha='left', va='center', color='C0',
transform=axl.transAxes, fontsize=7)
fig.subplots_adjust(left=0.13, right=0.87, bottom=0.10, top=0.99, hspace=0.04)
fig.savefig('../figs/planet_evol.pdf')
fig.clf()
| [
"numpy.log10",
"scipy.interpolate.interp1d",
"numpy.array",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"numpy.loadtxt",
"numpy.zeros_like"
] | [((145, 182), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 9}"], {}), "({'font.size': 9})\n", (164, 182), True, 'import matplotlib.pyplot as plt\n'), ((205, 235), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3.5, 3.7)'}), '(figsize=(3.5, 3.7))\n', (215, 235), True, 'import matplotlib.pyplot as plt\n'), ((241, 264), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(1)'], {}), '(2, 1)\n', (258, 264), True, 'import matplotlib.gridspec as gridspec\n'), ((1131, 1156), 'numpy.log10', 'np.log10', (['(mLPL[yo] * Ljup)'], {}), '(mLPL[yo] * Ljup)\n', (1139, 1156), True, 'import numpy as np\n'), ((1499, 1524), 'numpy.log10', 'np.log10', (['(mLPL[wo] * Ljup)'], {}), '(mLPL[wo] * Ljup)\n', (1507, 1524), True, 'import numpy as np\n'), ((1691, 1722), 'numpy.array', 'np.array', (['[1.0, 2.0, 5.0, 10.0]'], {}), '([1.0, 2.0, 5.0, 10.0])\n', (1699, 1722), True, 'import numpy as np\n'), ((1733, 1766), 'numpy.array', 'np.array', (['[1.73, 1.69, 1.85, 2.3]'], {}), '([1.73, 1.69, 1.85, 2.3])\n', (1741, 1766), True, 'import numpy as np\n'), ((1782, 1816), 'numpy.array', 'np.array', (['[1.41, 1.32, 1.24, 1.14]'], {}), '([1.41, 1.32, 1.24, 1.14])\n', (1790, 1816), True, 'import numpy as np\n'), ((1831, 1872), 'numpy.array', 'np.array', (['[830.0, 1200.0, 1800.0, 2400.0]'], {}), '([830.0, 1200.0, 1800.0, 2400.0])\n', (1839, 1872), True, 'import numpy as np\n'), ((1883, 1921), 'numpy.array', 'np.array', (['[550.0, 620.0, 690.0, 690.0]'], {}), '([550.0, 620.0, 690.0, 690.0])\n', (1891, 1921), True, 'import numpy as np\n'), ((2926, 2994), 'scipy.interpolate.interp1d', 'interp1d', (['mean_M', 'mean_L'], {'kind': '"""quadratic"""', 'fill_value': '"""extrapolate"""'}), "(mean_M, mean_L, kind='quadratic', fill_value='extrapolate')\n", (2934, 2994), False, 'from scipy.interpolate import interp1d\n'), ((3002, 3070), 'scipy.interpolate.interp1d', 'interp1d', (['mean_M', 'mean_R'], {'kind': '"""quadratic"""', 'fill_value': '"""extrapolate"""'}), "(mean_M, mean_R, kind='quadratic', fill_value='extrapolate')\n", (3010, 3070), False, 'from scipy.interpolate import interp1d\n'), ((1016, 1067), 'numpy.loadtxt', 'np.loadtxt', (['mfile'], {'usecols': '(0, 1, 2, 3)', 'skiprows': '(4)'}), '(mfile, usecols=(0, 1, 2, 3), skiprows=4)\n', (1026, 1067), True, 'import numpy as np\n'), ((1386, 1437), 'numpy.loadtxt', 'np.loadtxt', (['mfile'], {'usecols': '(0, 1, 2, 3)', 'skiprows': '(4)'}), '(mfile, usecols=(0, 1, 2, 3), skiprows=4)\n', (1396, 1437), True, 'import numpy as np\n'), ((2092, 2113), 'numpy.log10', 'np.log10', (['L_SB12cold1'], {}), '(L_SB12cold1)\n', (2100, 2113), True, 'import numpy as np\n'), ((2162, 2182), 'numpy.log10', 'np.log10', (['L_SB12hot1'], {}), '(L_SB12hot1)\n', (2170, 2182), True, 'import numpy as np\n'), ((2438, 2459), 'numpy.zeros_like', 'np.zeros_like', (['mean_M'], {}), '(mean_M)\n', (2451, 2459), True, 'import numpy as np\n'), ((2461, 2482), 'numpy.zeros_like', 'np.zeros_like', (['mean_M'], {}), '(mean_M)\n', (2474, 2482), True, 'import numpy as np\n'), ((3096, 3108), 'numpy.log10', 'np.log10', (['(20)'], {}), '(20)\n', (3104, 3108), True, 'import numpy as np\n'), ((2386, 2423), 'numpy.array', 'np.array', (['[1.0, 2.0, 5.0, 10.0, 20.0]'], {}), '([1.0, 2.0, 5.0, 10.0, 20.0])\n', (2394, 2423), True, 'import numpy as np\n'), ((2600, 2623), 'numpy.log10', 'np.log10', (['L_SB12hot1[0]'], {}), '(L_SB12hot1[0])\n', (2608, 2623), True, 'import numpy as np\n'), ((2650, 2675), 'numpy.log10', 'np.log10', (['L_SB12cold1[1:]'], {}), '(L_SB12cold1[1:])\n', (2658, 2675), True, 'import numpy as np\n'), ((2678, 2702), 'numpy.log10', 'np.log10', (['L_SB12hot1[1:]'], {}), '(L_SB12hot1[1:])\n', (2686, 2702), True, 'import numpy as np\n'), ((2573, 2597), 'numpy.log10', 'np.log10', (['L_SB12cold1[0]'], {}), '(L_SB12cold1[0])\n', (2581, 2597), True, 'import numpy as np\n')] |
from typing import Tuple, Callable, Union, Any
import numpy as np
import torch
from torch.nn import Module, Parameter
from netlens.image_proc import IMAGENET_MEAN, IMAGENET_STD
class RawParam(Module):
"""
A raw 'parameterized image', that just wraps a normal tensor.
This has to be the first layer in the network. It wraps the input and is differentiable
"""
def __init__(self, input: torch.Tensor, cloned: bool = True):
super().__init__()
self.param = Parameter(input.clone().detach().requires_grad_() if cloned else input)
def forward(self):
return self.param
def __repr__(self):
return f'{self.__class__.__name__}: {self.param.shape}'
# Decorrelation code ported from Lucid: https://github.com/tensorflow/lucid
color_correlation_svd_sqrt = np.asarray([[0.26, 0.09, 0.02],
[0.27, 0.00, -0.05],
[0.27, -0.09, 0.03]]).astype("float32")
max_norm_svd_sqrt = np.max(np.linalg.norm(color_correlation_svd_sqrt, axis=0))
def _get_default_device():
return 'cuda' if torch.cuda.is_available() else 'cpu'
def _linear_decorrelate_color(t: torch.Tensor) -> torch.Tensor:
"""Multiply input by sqrt of empirical (ImageNet) color correlation matrix.
If you interpret t's innermost dimension as describing colors in a
decorrelated version of the color space (which is a very natural way to
describe colors -- see discussion in Feature Visualization article) the way
to map back to normal colors is multiply the square root of your color
correlations.
"""
assert t.shape[0] == 1 # must be (N,C,W,H)
t_flat = t.squeeze(0).view((3, -1))
color_correlation_normalized = torch.tensor(color_correlation_svd_sqrt / max_norm_svd_sqrt, device=t.device)
t_flat = color_correlation_normalized @ t_flat
t = t_flat.view(t.shape)
return t
def rfft2d_freqs(h: int, w: int) -> np.ndarray:
"""Computes 2D spectrum frequencies."""
fy = np.fft.fftfreq(h)[:, None]
fx = np.fft.fftfreq(w)
return np.sqrt(fx * fx + fy * fy)
def _assert_image_param_inputs(im_initial: torch.Tensor, size: Tuple[int, int]):
assert (im_initial is not None) ^ (size is not None), "Exactly one of 'im_initial' or 'size' has to be specified."
if im_initial is not None:
assert im_initial.dim() == 4 and im_initial.shape[:2] == (1, 3), "The image must be of shape (1,3,H,W)"
size = im_initial.shape[2:]
device = im_initial.device
else:
device = _get_default_device()
return size, device
def fourier_image(im_initial: torch.Tensor = None, size: Tuple[int, int] = None, spectrum_scale: float = 0.01, decay_power: float = 1.0) \
-> Tuple[torch.Tensor, Callable]:
"""
Image initialized in the Fourier domain
"""
size, device = _assert_image_param_inputs(im_initial, size)
# this is needed to compute only once
freqs = rfft2d_freqs(*size)
scale = 1.0 / np.maximum(freqs, 1.0 / max(*size)) ** decay_power
scale *= np.sqrt(size[0] * size[1])
scale = torch.tensor(scale, dtype=torch.float32, device=device)
def _get_spectrum(_im):
scaled_spectrum_t = torch.rfft(_im.squeeze(0), signal_ndim=2, onesided=False)
return scaled_spectrum_t / scale[None, ..., None]
def _get_image(_spectrum):
scaled_spectrum_t = scale[None, ..., None] * _spectrum
return torch.irfft(scaled_spectrum_t, signal_ndim=2, onesided=False, signal_sizes=size).unsqueeze(0)
if im_initial is not None:
spectrum = _get_spectrum(im_initial.clone().detach()).detach()
else:
spectrum = (spectrum_scale * torch.randn((3, *freqs.shape, 2))).to(device) # dimensions: (C,W,H,Re/Im)
return spectrum, _get_image
def random_image(im_initial: torch.Tensor = None, size: Tuple[int, int] = None, sd: float = 0.5) -> Tuple[torch.Tensor, Callable]:
"""
Create a random 'image' from a normal distribution
"""
size, device = _assert_image_param_inputs(im_initial, size)
if im_initial is not None:
im = im_initial.clone().detach()
else:
im = torch.randn(1, 3, *size, device=device) * sd
return im, lambda x: x
class ImageParam(Module):
"""Class to create a parameterized image.
Parameters:
size: size of image, can be a tuple or an integer. If it's a tuple, the image will be square.
fft (bool): parameterize the image in the Fourier domain.
decorrelate (bool): decorrelate the colours of the image.
sigmoid (bool): apply sigmoid after decorrelation to ensure values are in range(0,1)
kwargs: passed on to the image function fourier_image or random_im.
"""
def __init__(self, im_initial: torch.Tensor = None, size: Union[int, Tuple[int, int]] = None, fft: bool = True, decorrelate: bool = True,
sigmoid: bool = True, norm_stats: Tuple[Any, Any] = (IMAGENET_MEAN, IMAGENET_STD), **kwargs):
super().__init__()
self.fft = fft
self.decorrelate = decorrelate
self.sigmoid = sigmoid
self.norm_stats = norm_stats
im_func = fourier_image if fft else random_image
size = (size, size) if isinstance(size, int) else size
self.param, self.get_image = im_func(im_initial, size, **kwargs)
self.param = Parameter(self.param)
def forward(self):
im = self.get_image(self.param)
if self.decorrelate:
im = _linear_decorrelate_color(im)
im = torch.sigmoid(im) if self.sigmoid else im.clamp(min=0.0, max=1.0)
return self.normalize(im)
def normalize(self, im):
if self.norm_stats is None:
return im
mean = torch.as_tensor(self.norm_stats[0], dtype=im.dtype, device=im.device)
std = torch.as_tensor(self.norm_stats[1], dtype=im.dtype, device=im.device)
return im.sub(mean[:, None, None]).div(std[:, None, None])
def denormalize(self, im):
if self.norm_stats is None:
return im
mean = torch.as_tensor(self.norm_stats[0], dtype=im.dtype, device=im.device)
std = torch.as_tensor(self.norm_stats[1], dtype=im.dtype, device=im.device)
return im.mul(std[:, None, None]).add(mean[:, None, None]).clamp(min=0.0, max=1.0)
def __repr__(self):
return f"{self.__class__.__name__}: {self.size}px, fft={self.fft}, decorrelate={self.decorrelate}"
| [
"torch.as_tensor",
"numpy.sqrt",
"numpy.fft.fftfreq",
"torch.sigmoid",
"numpy.asarray",
"torch.tensor",
"torch.irfft",
"torch.cuda.is_available",
"torch.nn.Parameter",
"numpy.linalg.norm",
"torch.randn"
] | [((1015, 1065), 'numpy.linalg.norm', 'np.linalg.norm', (['color_correlation_svd_sqrt'], {'axis': '(0)'}), '(color_correlation_svd_sqrt, axis=0)\n', (1029, 1065), True, 'import numpy as np\n'), ((1752, 1829), 'torch.tensor', 'torch.tensor', (['(color_correlation_svd_sqrt / max_norm_svd_sqrt)'], {'device': 't.device'}), '(color_correlation_svd_sqrt / max_norm_svd_sqrt, device=t.device)\n', (1764, 1829), False, 'import torch\n'), ((2062, 2079), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['w'], {}), '(w)\n', (2076, 2079), True, 'import numpy as np\n'), ((2091, 2117), 'numpy.sqrt', 'np.sqrt', (['(fx * fx + fy * fy)'], {}), '(fx * fx + fy * fy)\n', (2098, 2117), True, 'import numpy as np\n'), ((3071, 3097), 'numpy.sqrt', 'np.sqrt', (['(size[0] * size[1])'], {}), '(size[0] * size[1])\n', (3078, 3097), True, 'import numpy as np\n'), ((3110, 3165), 'torch.tensor', 'torch.tensor', (['scale'], {'dtype': 'torch.float32', 'device': 'device'}), '(scale, dtype=torch.float32, device=device)\n', (3122, 3165), False, 'import torch\n'), ((812, 885), 'numpy.asarray', 'np.asarray', (['[[0.26, 0.09, 0.02], [0.27, 0.0, -0.05], [0.27, -0.09, 0.03]]'], {}), '([[0.26, 0.09, 0.02], [0.27, 0.0, -0.05], [0.27, -0.09, 0.03]])\n', (822, 885), True, 'import numpy as np\n'), ((1117, 1142), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1140, 1142), False, 'import torch\n'), ((2026, 2043), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['h'], {}), '(h)\n', (2040, 2043), True, 'import numpy as np\n'), ((5347, 5368), 'torch.nn.Parameter', 'Parameter', (['self.param'], {}), '(self.param)\n', (5356, 5368), False, 'from torch.nn import Module, Parameter\n'), ((5727, 5796), 'torch.as_tensor', 'torch.as_tensor', (['self.norm_stats[0]'], {'dtype': 'im.dtype', 'device': 'im.device'}), '(self.norm_stats[0], dtype=im.dtype, device=im.device)\n', (5742, 5796), False, 'import torch\n'), ((5811, 5880), 'torch.as_tensor', 'torch.as_tensor', (['self.norm_stats[1]'], {'dtype': 'im.dtype', 'device': 'im.device'}), '(self.norm_stats[1], dtype=im.dtype, device=im.device)\n', (5826, 5880), False, 'import torch\n'), ((6053, 6122), 'torch.as_tensor', 'torch.as_tensor', (['self.norm_stats[0]'], {'dtype': 'im.dtype', 'device': 'im.device'}), '(self.norm_stats[0], dtype=im.dtype, device=im.device)\n', (6068, 6122), False, 'import torch\n'), ((6137, 6206), 'torch.as_tensor', 'torch.as_tensor', (['self.norm_stats[1]'], {'dtype': 'im.dtype', 'device': 'im.device'}), '(self.norm_stats[1], dtype=im.dtype, device=im.device)\n', (6152, 6206), False, 'import torch\n'), ((4165, 4204), 'torch.randn', 'torch.randn', (['(1)', '(3)', '*size'], {'device': 'device'}), '(1, 3, *size, device=device)\n', (4176, 4204), False, 'import torch\n'), ((5524, 5541), 'torch.sigmoid', 'torch.sigmoid', (['im'], {}), '(im)\n', (5537, 5541), False, 'import torch\n'), ((3449, 3534), 'torch.irfft', 'torch.irfft', (['scaled_spectrum_t'], {'signal_ndim': '(2)', 'onesided': '(False)', 'signal_sizes': 'size'}), '(scaled_spectrum_t, signal_ndim=2, onesided=False, signal_sizes=size\n )\n', (3460, 3534), False, 'import torch\n'), ((3693, 3726), 'torch.randn', 'torch.randn', (['(3, *freqs.shape, 2)'], {}), '((3, *freqs.shape, 2))\n', (3704, 3726), False, 'import torch\n')] |
from typing import Callable, Tuple
import numpy as np
import model
# types
SIM_PARAMETERS = Tuple[float, float, float, float, float, float, float]
RESULT = Tuple[float, float, float, float]
HF1 = Callable[[SIM_PARAMETERS], float]
HF2 = Callable[[RESULT], float]
def set_parameters(
k: float, m: float, v: float, r: float, tend: int, dt: int
) -> SIM_PARAMETERS:
"""Formats simulation parameters."""
rs = map(_percentage, [m, v, r])
ts = map(_per_year, [tend, dt])
return (k, *rs, 0, *ts)
def start(initial_price: float, ps: SIM_PARAMETERS) -> np.ndarray:
"""Starts simulation."""
result = _run(_gen_stonk_gbm(initial_price, ps), ps, np.array([]))
return np.round(result, 2)
def _gen_stonk_gbm(s, ps):
args = _m(ps), _sig(ps), _dt(ps)
yield s
yield from _gen_stonk_gbm(s + model.ds(s, *args), ps)
def _run(gs, ps, rs):
if _t(ps) > _tend(ps):
return rs
else:
r = _compute(next(gs), ps, rs)
rs = _run(gs, _increment_time(ps), _append(rs, r))
return rs
def _compute(s, ps, rs):
v = _cash_balance(evaluate_bsm(s, ps), _tail(rs), ps)
return v
def _cash_balance(rn, rnm, ps):
sn, cn, dn = rn
_, _, dnm, vnm = rnm
vn = (dn - dnm)*sn + vnm*np.e**(_r(ps)*_dt(ps))
return sn, cn, dn, vn
def evaluate_bsm(s, ps):
"""Evaluates Black-Scholes model for a European call option."""
args = s, _k(ps), _sig(ps), _r(ps), _t(ps), _tend(ps)
return s, model.c(*args), model.delta(*args)
_k: HF1 = lambda ps: ps[0]
_m: HF1 = lambda ps: ps[1]
_sig: HF1 = lambda ps: ps[2]
_r: HF1 = lambda ps: ps[3]
_t: HF1 = lambda ps: ps[4]
_tend: HF1 = lambda ps: ps[5]
_dt: HF1 = lambda ps: ps[6]
_s: HF2 = lambda ps: ps[0]
_c: HF2 = lambda ps: ps[1]
_dcds: HF2 = lambda ps: ps[2]
_v: HF2 = lambda ps: ps[3]
_per_year: Callable[[int], float] = lambda x: round(x/365, 5)
_percentage: Callable[[float], float] = lambda x: x/100
def _increment_time(ps):
return _k(ps), _m(ps), _sig(ps), _r(ps), _t(ps)+_dt(ps), _tend(ps), _dt(ps)
def _append(xs, ys):
if np.size(xs) == 0:
return np.array([ys])
else:
return np.vstack([xs, ys])
def _tail(rs):
try:
return rs[-1]
except IndexError:
return np.array([0, 0, 0, 0])
| [
"model.ds",
"numpy.size",
"model.delta",
"numpy.array",
"numpy.vstack",
"model.c",
"numpy.round"
] | [((693, 712), 'numpy.round', 'np.round', (['result', '(2)'], {}), '(result, 2)\n', (701, 712), True, 'import numpy as np\n'), ((668, 680), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (676, 680), True, 'import numpy as np\n'), ((1466, 1480), 'model.c', 'model.c', (['*args'], {}), '(*args)\n', (1473, 1480), False, 'import model\n'), ((1482, 1500), 'model.delta', 'model.delta', (['*args'], {}), '(*args)\n', (1493, 1500), False, 'import model\n'), ((2090, 2101), 'numpy.size', 'np.size', (['xs'], {}), '(xs)\n', (2097, 2101), True, 'import numpy as np\n'), ((2123, 2137), 'numpy.array', 'np.array', (['[ys]'], {}), '([ys])\n', (2131, 2137), True, 'import numpy as np\n'), ((2163, 2182), 'numpy.vstack', 'np.vstack', (['[xs, ys]'], {}), '([xs, ys])\n', (2172, 2182), True, 'import numpy as np\n'), ((2269, 2291), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (2277, 2291), True, 'import numpy as np\n'), ((825, 843), 'model.ds', 'model.ds', (['s', '*args'], {}), '(s, *args)\n', (833, 843), False, 'import model\n')] |
import os
# import config
from config.edict_config import config
import mxnet as mx
from mxnet.io import DataBatch, DataIter
import numpy as np
class SyntheticDataIter(DataIter):
def __init__(self, num_classes, data_shape, max_iter, dtype):
self.batch_size = data_shape[0]
self.cur_iter = 0
self.max_iter = max_iter
self.dtype = dtype
label = np.random.randint(0, num_classes, [self.batch_size,])
data = np.random.uniform(-1, 1, data_shape)
self.data = mx.nd.array(data, dtype=self.dtype, ctx=mx.Context('cpu_pinned', 0))
self.label = mx.nd.array(label, dtype=self.dtype, ctx=mx.Context('cpu_pinned', 0))
def __iter__(self):
return self
@property
def provide_data(self):
return [mx.io.DataDesc('data', self.data.shape, self.dtype)]
@property
def provide_label(self):
return [mx.io.DataDesc('softmax_label', (self.batch_size,), self.dtype)]
def next(self):
self.cur_iter += 1
if self.cur_iter <= self.max_iter:
return DataBatch(data=(self.data,),
label=(self.label,),
pad=0,
index=None,
provide_data=self.provide_data,
provide_label=self.provide_label)
else:
raise StopIteration
def __next__(self):
return self.next()
def reset(self):
self.cur_iter = 0
class MultipleDataIter(DataIter):
def __init__(self, rec_path, batch_size, num_parts, image_shape, data_nthread):
self.batch_size = batch_size
self.num_parts = num_parts
self.image_shape = image_shape
self.iters = self.getMultipleIter(rec_path, batch_size, num_parts, image_shape, data_nthread)
def getMultipleIter(self, rec_path, batch_size, num_parts, image_shape, data_nthreads):
train_iters = []
for i in xrange(num_parts):
train_iters.append(mx.io.ImageRecordIter(
path_imgrec=rec_path,
label_width=1,
data_name='data',
label_name='softmax_label',
data_shape=image_shape,
batch_size=batch_size//num_parts,
pad=0,
fill_value=127,
random_resized_crop=True,
max_random_area=1.0,
min_random_area=0.08,
max_aspect_ratio=4.0 / 3.0,
min_aspect_ratio=3.0 / 4.0,
brightness=0.4,
contrast=0.4,
saturation=0.4,
mean_r=123.68,
mean_g=116.28,
mean_b=103.53,
std_r=58.395,
std_g=57.12,
std_b=57.375,
pca_noise=0.1,
scale=1,
inter_method=2,
rand_mirror=True,
shuffle=True,
shuffle_chunk_size=4096,
preprocess_threads=data_nthreads,
prefetch_buffer=16,
num_parts=num_parts,
part_index=i))
return train_iters
def mergeIters(self):
dataBatchs = []
for iter_elem in self.iters:
dataBatchs.append(next(iter_elem))
total_data = dataBatchs[0].data[0]
total_label = dataBatchs[0].label[0]
if len(dataBatchs) > 1:
for i in xrange(1, len(dataBatchs)):
total_data = mx.nd.concat(total_data, dataBatchs[i].data[0], dim=0)
total_label = mx.nd.concat(total_label, dataBatchs[i].label[0], dim=0)
total_data_batch = DataBatch(data=(total_data,),
label=(total_label,),
pad=0,
index=None,
provide_data=self.provide_data,
provide_label=self.provide_label)
return total_data_batch
def __iter__(self):
return self
@property
def provide_data(self):
return [mx.io.DataDesc('data', (self.batch_size,) + self.image_shape, np.float32)]
@property
def provide_label(self):
return [mx.io.DataDesc('softmax_label', (self.batch_size,), np.float32)]
def next(self):
dataBatchs = []
for iter_elem in self.iters:
dataBatchs.append(next(iter_elem))
total_data = dataBatchs[0].data[0]
total_label = dataBatchs[0].label[0]
if len(dataBatchs) > 1:
for i in xrange(1, len(dataBatchs)):
total_data = mx.nd.concat(total_data, dataBatchs[i].data[0], dim=0)
total_label = mx.nd.concat(total_label, dataBatchs[i].label[0], dim=0)
total_data_batch = DataBatch(data=(total_data,),
label=(total_label,),
pad=0,
index=None,
provide_data=self.provide_data,
provide_label=self.provide_label)
return total_data_batch
def __next__(self):
return self.next()
def reset(self):
for iter_elem in self.iters:
iter_elem.reset()
def imagenet_iterator(data_dir, batch_size, kv, image_shape):
num_examples = 1281167
if config.benchmark is not None and config.benchmark > 0:
data_shape = (batch_size,) + image_shape
train = SyntheticDataIter(config.num_classes, data_shape, 5005, np.float32)
return (train, None, num_examples)
train = mx.io.ImageRecordIter(
path_imgrec = os.path.join(data_dir, "train.rec"),
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
resize = 256,
data_shape = image_shape,
batch_size = batch_size,
pad = 0,
fill_value = 127,
random_resized_crop = True,
max_random_area = 1.0,
min_random_area = 0.08,
max_aspect_ratio = 4.0 / 3.0,
min_aspect_ratio = 3.0 / 4.0,
brightness = 0.4,
contrast = 0.4,
saturation = 0.4,
mean_r = 123.68,
mean_g = 116.28,
mean_b = 103.53,
std_r = 58.395,
std_g = 57.12,
std_b = 57.375,
pca_noise = 0.1,
scale = 1,
inter_method = 2,
rand_mirror = True,
shuffle = True,
shuffle_chunk_size = 4096,
preprocess_threads = config.data_nthreads,
prefetch_buffer = 16,
num_parts = kv.num_workers,
part_index = kv.rank)
val = mx.io.ImageRecordIter(
path_imgrec = os.path.join(data_dir, "val.rec"),
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
resize = 256,
batch_size = batch_size,
data_shape = image_shape,
mean_r = 123.68,
mean_g = 116.28,
mean_b = 103.53,
std_r = 58.395,
std_g = 57.12,
std_b = 57.375,
scale = 1,
inter_method = 2,
rand_crop = False,
rand_mirror = False,
preprocess_threads = 8,
num_parts = kv.num_workers,
part_index = kv.rank)
return train, val, num_examples
def multiple_imagenet_iterator(data_dir, batch_size, num_parts, image_shape, data_nthread):
num_examples = 1281167
train = MultipleDataIter(os.path.join(data_dir, "train.rec"), batch_size, num_parts, image_shape, data_nthread)
val = mx.io.ImageRecordIter(
path_imgrec = os.path.join(data_dir, "val.rec"),
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
resize = 256,
batch_size = batch_size,
data_shape = image_shape,
mean_r = 123.68,
mean_g = 116.28,
mean_b = 103.53,
std_r = 58.395,
std_g = 57.12,
std_b = 57.375,
scale = 1,
inter_method = 2,
rand_crop = False,
rand_mirror = False,
preprocess_threads = 8,
num_parts = 1,
part_index = 0)
return train, val, num_examples
| [
"mxnet.Context",
"mxnet.io.DataDesc",
"os.path.join",
"numpy.random.randint",
"numpy.random.uniform",
"mxnet.io.DataBatch",
"mxnet.io.ImageRecordIter",
"mxnet.nd.concat"
] | [((389, 441), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_classes', '[self.batch_size]'], {}), '(0, num_classes, [self.batch_size])\n', (406, 441), True, 'import numpy as np\n'), ((458, 494), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'data_shape'], {}), '(-1, 1, data_shape)\n', (475, 494), True, 'import numpy as np\n'), ((3677, 3817), 'mxnet.io.DataBatch', 'DataBatch', ([], {'data': '(total_data,)', 'label': '(total_label,)', 'pad': '(0)', 'index': 'None', 'provide_data': 'self.provide_data', 'provide_label': 'self.provide_label'}), '(data=(total_data,), label=(total_label,), pad=0, index=None,\n provide_data=self.provide_data, provide_label=self.provide_label)\n', (3686, 3817), False, 'from mxnet.io import DataBatch, DataIter\n'), ((4832, 4972), 'mxnet.io.DataBatch', 'DataBatch', ([], {'data': '(total_data,)', 'label': '(total_label,)', 'pad': '(0)', 'index': 'None', 'provide_data': 'self.provide_data', 'provide_label': 'self.provide_label'}), '(data=(total_data,), label=(total_label,), pad=0, index=None,\n provide_data=self.provide_data, provide_label=self.provide_label)\n', (4841, 4972), False, 'from mxnet.io import DataBatch, DataIter\n'), ((8192, 8227), 'os.path.join', 'os.path.join', (['data_dir', '"""train.rec"""'], {}), "(data_dir, 'train.rec')\n", (8204, 8227), False, 'import os\n'), ((777, 828), 'mxnet.io.DataDesc', 'mx.io.DataDesc', (['"""data"""', 'self.data.shape', 'self.dtype'], {}), "('data', self.data.shape, self.dtype)\n", (791, 828), True, 'import mxnet as mx\n'), ((889, 952), 'mxnet.io.DataDesc', 'mx.io.DataDesc', (['"""softmax_label"""', '(self.batch_size,)', 'self.dtype'], {}), "('softmax_label', (self.batch_size,), self.dtype)\n", (903, 952), True, 'import mxnet as mx\n'), ((1063, 1201), 'mxnet.io.DataBatch', 'DataBatch', ([], {'data': '(self.data,)', 'label': '(self.label,)', 'pad': '(0)', 'index': 'None', 'provide_data': 'self.provide_data', 'provide_label': 'self.provide_label'}), '(data=(self.data,), label=(self.label,), pad=0, index=None,\n provide_data=self.provide_data, provide_label=self.provide_label)\n', (1072, 1201), False, 'from mxnet.io import DataBatch, DataIter\n'), ((4136, 4209), 'mxnet.io.DataDesc', 'mx.io.DataDesc', (['"""data"""', '((self.batch_size,) + self.image_shape)', 'np.float32'], {}), "('data', (self.batch_size,) + self.image_shape, np.float32)\n", (4150, 4209), True, 'import mxnet as mx\n'), ((4271, 4334), 'mxnet.io.DataDesc', 'mx.io.DataDesc', (['"""softmax_label"""', '(self.batch_size,)', 'np.float32'], {}), "('softmax_label', (self.batch_size,), np.float32)\n", (4285, 4334), True, 'import mxnet as mx\n'), ((5727, 5762), 'os.path.join', 'os.path.join', (['data_dir', '"""train.rec"""'], {}), "(data_dir, 'train.rec')\n", (5739, 5762), False, 'import os\n'), ((7171, 7204), 'os.path.join', 'os.path.join', (['data_dir', '"""val.rec"""'], {}), "(data_dir, 'val.rec')\n", (7183, 7204), False, 'import os\n'), ((8346, 8379), 'os.path.join', 'os.path.join', (['data_dir', '"""val.rec"""'], {}), "(data_dir, 'val.rec')\n", (8358, 8379), False, 'import os\n'), ((555, 582), 'mxnet.Context', 'mx.Context', (['"""cpu_pinned"""', '(0)'], {}), "('cpu_pinned', 0)\n", (565, 582), True, 'import mxnet as mx\n'), ((646, 673), 'mxnet.Context', 'mx.Context', (['"""cpu_pinned"""', '(0)'], {}), "('cpu_pinned', 0)\n", (656, 673), True, 'import mxnet as mx\n'), ((2005, 2676), 'mxnet.io.ImageRecordIter', 'mx.io.ImageRecordIter', ([], {'path_imgrec': 'rec_path', 'label_width': '(1)', 'data_name': '"""data"""', 'label_name': '"""softmax_label"""', 'data_shape': 'image_shape', 'batch_size': '(batch_size // num_parts)', 'pad': '(0)', 'fill_value': '(127)', 'random_resized_crop': '(True)', 'max_random_area': '(1.0)', 'min_random_area': '(0.08)', 'max_aspect_ratio': '(4.0 / 3.0)', 'min_aspect_ratio': '(3.0 / 4.0)', 'brightness': '(0.4)', 'contrast': '(0.4)', 'saturation': '(0.4)', 'mean_r': '(123.68)', 'mean_g': '(116.28)', 'mean_b': '(103.53)', 'std_r': '(58.395)', 'std_g': '(57.12)', 'std_b': '(57.375)', 'pca_noise': '(0.1)', 'scale': '(1)', 'inter_method': '(2)', 'rand_mirror': '(True)', 'shuffle': '(True)', 'shuffle_chunk_size': '(4096)', 'preprocess_threads': 'data_nthreads', 'prefetch_buffer': '(16)', 'num_parts': 'num_parts', 'part_index': 'i'}), "(path_imgrec=rec_path, label_width=1, data_name='data',\n label_name='softmax_label', data_shape=image_shape, batch_size=\n batch_size // num_parts, pad=0, fill_value=127, random_resized_crop=\n True, max_random_area=1.0, min_random_area=0.08, max_aspect_ratio=4.0 /\n 3.0, min_aspect_ratio=3.0 / 4.0, brightness=0.4, contrast=0.4,\n saturation=0.4, mean_r=123.68, mean_g=116.28, mean_b=103.53, std_r=\n 58.395, std_g=57.12, std_b=57.375, pca_noise=0.1, scale=1, inter_method\n =2, rand_mirror=True, shuffle=True, shuffle_chunk_size=4096,\n preprocess_threads=data_nthreads, prefetch_buffer=16, num_parts=\n num_parts, part_index=i)\n", (2026, 2676), True, 'import mxnet as mx\n'), ((3508, 3562), 'mxnet.nd.concat', 'mx.nd.concat', (['total_data', 'dataBatchs[i].data[0]'], {'dim': '(0)'}), '(total_data, dataBatchs[i].data[0], dim=0)\n', (3520, 3562), True, 'import mxnet as mx\n'), ((3593, 3649), 'mxnet.nd.concat', 'mx.nd.concat', (['total_label', 'dataBatchs[i].label[0]'], {'dim': '(0)'}), '(total_label, dataBatchs[i].label[0], dim=0)\n', (3605, 3649), True, 'import mxnet as mx\n'), ((4663, 4717), 'mxnet.nd.concat', 'mx.nd.concat', (['total_data', 'dataBatchs[i].data[0]'], {'dim': '(0)'}), '(total_data, dataBatchs[i].data[0], dim=0)\n', (4675, 4717), True, 'import mxnet as mx\n'), ((4748, 4804), 'mxnet.nd.concat', 'mx.nd.concat', (['total_label', 'dataBatchs[i].label[0]'], {'dim': '(0)'}), '(total_label, dataBatchs[i].label[0], dim=0)\n', (4760, 4804), True, 'import mxnet as mx\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME>, University of Bristol, <EMAIL>
This programme will take an input array of peaks in 1D I vs q data (such as those returned from the finder programme),
and returns a dictionary of possible phases that the data can take on, along with the miller plane index and the peaks
used to for that possible phase assignment. There are separate (but almost identical) methods for distinguishing cubic phases and
Lamellar/Inverse Hexagonal ones. It is recommended that having used the peak finding programme, the phase is attempted to be assigned
by using the number of peaks found in the data. In general from the author's experience, the La and HII phases produce fewer Bragg peaks,
such that if a condition were used along the lines of if len(peaks)<3: La_HII_possible_phases(peaks, etc) else: Q_possible_phases(peaks etc)
then there should be a good chance of assigning the correct phase. Otherwise there is a risk of simultaneously assigning the HII along
with a cubic one. Worst comes to worst... The old fashioned hand method won't fail...
The information passed to the dictionary at the end should be enough to plot I vs q data with information about which peak has been
indexed as which, along with information about the lattice parameter and phase. See the optional plot in the finder.py programme for
more of an idea about the kind of way that matplotlib can plot something like this, using a combination of plt.axvline and plt.text.
At the bottom of this programme there is an example set of data in a comment that can be run through to see what result to expect at the end.
"""
import numpy as np
"""
La_HII_possible_phases works similarly to Q_possible_phases, in that it uses a statistical methodology to work out which peaks can
be assigned to which phase. However, as fewer peaks are expected to be passed to this module, it simply determines the phase by finding
a consistent lattice parameter, and taking the longest assignment from La or HII given to it.
La_HII_possible_phases will return a dictionary keyed by phase name, with values of lattice parameter, hkl plane factors, and the peaks
correspondingly assigned.
pass the following parameters to this function:
peaks - an array of peaks that have previously been found elsewhere
"""
def La_HII_possible_phases(peaks):
La_ratios=np.array([1,2,3])[:,np.newaxis]
HII_ratios=np.sqrt(np.array([1,3,4])[:,np.newaxis])
La_init = 2*np.pi*(1/peaks)*La_ratios
HII_init = (2/np.sqrt(3))*2*np.pi*(1/peaks)*HII_ratios
La=np.ndarray.flatten(La_init)
HII=np.ndarray.flatten(HII_init)
values=np.concatenate((La,HII))
hist,bin_edges=np.histogram(values,bins=2*np.size(values))
inds=np.digitize(values,bin_edges)-1
hist_max_bin_pos=np.where(inds==np.argmax(hist))[0]
La_sourced=hist_max_bin_pos[np.where(hist_max_bin_pos<len(La))]
HII_sourced=hist_max_bin_pos[np.where(hist_max_bin_pos>len(La)-1)]
n=np.reshape(np.arange(0,np.size(La_init)),np.shape(La_init))
La_peaks=np.zeros(0)
La_factors=np.zeros(0)
HII_peaks=np.zeros(0)
HII_factors=np.zeros(0)
for a in range(0,len(La_sourced)):
La_hkl=La_ratios[np.where(np.mod(La_sourced[a],np.size(n))==n)[0]][0][0]
La_peak=peaks[np.where(np.mod(La_sourced[a],np.size(n))==n)[1]][0]
La_peaks=np.append(La_peaks,La_peak)
La_factors=np.append(La_factors,La_hkl)
for b in range(0,len(HII_sourced)):
HII_hkl=HII_ratios[np.where(np.mod(HII_sourced[b],np.size(n))==n)[0]][0][0]
HII_peak=peaks[np.where(np.mod(HII_sourced[b],np.size(n))==n)[1]][0]
HII_peaks=np.append(HII_peaks,HII_peak)
HII_factors=np.append(HII_factors,HII_hkl)
phase_dict={}
if len(La_peaks)>len(HII_peaks):
phase_dict['La']=np.mean(values[np.where(inds==np.argmax(hist))]),La_factors,La_peaks
elif len(HII_peaks)>len(La_peaks):
phase_dict['HII']=np.mean(values[np.where(inds==np.argmax(hist))]),HII_factors,HII_peaks
return phase_dict
"""
Q_possible_phases works by creating matrices of lattice parameter values that can arise having declared that any peak that
has been found can be indexed as any miller index for any phase. These values are then collapsed into a single 1D array,
which is investigated as a histogram. The number of bins in teh histogram is arbitrarily taken as twice the number of values,
so care should taken. Peaks in the histogram will arise at the points where there are matching values
resulting from peaks being correctly indexed in the correct phase. The possible_phases takes a threshold number, such that
bins with more values in it than the threshold are considered to be possible phase values. This is due to the fact
that because of symmetry degeneracies, 'correct' phase values may arise from more than a single phase matrix. The values
in the bins which exceed threshold population are then investigated for their origins: which peak and index were
responsible for bringing them about?
The Q_possible_phases will return a dictionary, keyed through lattice parameters, with associated values of the phase (D=0, P=1, G=3),
the peaks that have been indexed, and the indicies assigned to the peak.
pass the following parameters to this function:
peaks - an array of peaks that have previously been found elsewhere
"""
def Q_possible_phases(peaks):
#define the characteristic peak ratios
QIID=np.array([2,3,4,6,8,9,10,11])[:,np.newaxis]
QIIP=np.array([2,4,6,8,10,12,14])[:,np.newaxis]
QIIG=np.array([6,8,14,16,20,22,24])[:,np.newaxis]
QIID_ratios=np.sqrt(QIID)
QIIP_ratios=np.sqrt(QIIP)
QIIG_ratios=np.sqrt(QIIG)
'''
1) create matrices of all possible lattice parameter values
2) flatten each matrix to one dimension
3) combine the matricies into one
'''
D_init = 2*np.pi*(1/peaks)*QIID_ratios
P_init = 2*np.pi*(1/peaks)*QIIP_ratios
G_init = 2*np.pi*(1/peaks)*QIIG_ratios
'''
n_D, n_P, n_G are arrays of integers running from 0 to the size of the respective initial arrays. They will be used later
on to determine the source of where matching lattice parameter values have arisen from.
'''
n_D=np.reshape(np.arange(0,np.size(D_init)),np.shape(D_init))
n_P=np.reshape(np.arange(0,np.size(P_init)),np.shape(P_init))
n_G=np.reshape(np.arange(0,np.size(G_init)),np.shape(G_init))
n=np.reshape(np.arange(0,np.size(np.ndarray.flatten(np.concatenate((n_D,n_G,n_P))))),np.shape(np.concatenate((n_D,n_G,n_P))))
D=np.ndarray.flatten(D_init)
P=np.ndarray.flatten(P_init)
G=np.ndarray.flatten(G_init)
values=np.concatenate((D,P,G))
#histogram the data so that we have some bins. bin number increase is arbitrary.
hist, bin_edges=np.histogram(values,bins=np.int(2*np.size(values)))
#digitise the data (see numpy docs for explanations)
inds=np.digitize(values,bin_edges)
#will return the possible phases, their lattice parameters, and the peaks and hkl index from which they arise as a dictionary.
phase_dict={}
for i in range(0, np.size(values)):
try:
#find the values from the values array which are actually present in each bin and put them in the values array
binned_values=values[np.where(inds==i)]
#this size filtering is completely arbitrary.
if np.size(binned_values)>5:
#trace where the values in the bin originated from in the arrays.
positions_array=np.zeros(0)
for k in range(0, np.size(binned_values)):
positions_array=np.append(positions_array,np.where(binned_values[k]==values)[0])
#look at the distribution of the origin of the arrays - they should be group dependent on the phase.
#D_sourced, P_sourced, G_sourced are the positions in the values array where the matching peaks have come from
final_pos_array=np.unique(positions_array)
#split the positions up into which cubic phase calculation they have come from.
D_factors=np.where(final_pos_array<np.size(D))[0][0:]
P_factors=(np.where(final_pos_array<=(np.size(P)+np.size(D))-1)[0][0:])[np.size(D_factors):]
G_factors=np.where(final_pos_array> (np.size(P)+np.size(D))-1)[0][0:]
#correspond the positions in the factors arrays to where they come from in the final positions array
D_sourced=final_pos_array[D_factors].astype(int)
P_sourced=final_pos_array[P_factors].astype(int)
G_sourced=final_pos_array[G_factors].astype(int)
'''
want to find where the matching phases have come from in the array to see which one is the real one.
e.g. np.mod(o_sourced[a],n) corrects the position in the o array for running the same length as the sourced array
then find where the value is the same to identify the row
then find from which ratio factor the peak originated from.
'''
D_sourced_factors=np.zeros(0,dtype=np.int)
P_sourced_factors=np.zeros(0,dtype=np.int)
G_sourced_factors=np.zeros(0,dtype=np.int)
D_sourced_peaks=np.zeros(0)
P_sourced_peaks=np.zeros(0)
G_sourced_peaks=np.zeros(0)
for a in range(0,len(D_sourced)):
D_array_position=D_sourced[a]
D_array_comparison_pos=np.mod(D_array_position,np.size(D))
D_position=np.where(D_array_comparison_pos==n)
D_hkl=QIID[D_position[0][0]][0]
D_peak_hkl=peaks[D_position[1][0]]
D_sourced_factors=np.append(D_sourced_factors,np.int(D_hkl))
D_sourced_peaks=np.append(D_sourced_peaks,D_peak_hkl)
for b in range(0,len(P_sourced)):
P_array_position=P_sourced[b]
P_array_comparison_pos=P_array_position-np.size(D)
P_position=np.where(P_array_comparison_pos==n)
P_hkl=QIIP[P_position[0][0]][0]
P_peak_hkl=peaks[P_position[1][0]]
P_sourced_factors=np.append(P_sourced_factors,np.int(P_hkl))
P_sourced_peaks=np.append(P_sourced_peaks,P_peak_hkl)
for c in range(0,len(G_sourced)):
G_array_position=G_sourced[c]
G_array_comparison_pos=G_array_position-np.size(P)-np.size(D)
G_position=np.where(G_array_comparison_pos==n)
G_hkl=QIIG[G_position[0][0]][0]
G_peak_hkl=peaks[G_position[1][0]]
G_sourced_factors=np.append(G_sourced_factors,np.int(G_hkl))
G_sourced_peaks=np.append(G_sourced_peaks,G_peak_hkl)
'''
Only save the phase (as keyed number: D=0, P=1,G=2), and related data to the returned dictionary if
there are more than 3 peaks in there.
As the coincidence of factors between the QIID and QIIP is high, attempt to clarify which phase
is actually present if the same factors have been assigned to the same peaks.
'''
if len(D_sourced_factors) >3 and len(P_sourced_factors) >3:
lp=np.mean((np.mean(values[D_sourced]),np.mean(values[P_sourced])))
#find which set of values is longer and which is shorter
if len(D_sourced_factors)>len(P_sourced_factors):
shorter_factors=P_sourced_factors
shorter_peaks=P_sourced_peaks
longer_factors=D_sourced_factors
longer_peaks=D_sourced_peaks
switch=0
else:
shorter_factors=D_sourced_factors
shorter_peaks=D_sourced_peaks
longer_factors=P_sourced_factors
longer_peaks=P_sourced_peaks
switch=1
#find which pairs of peaks and factors have been assigned.
matching_factors=np.intersect1d(shorter_factors,longer_factors)
matching_peaks=np.intersect1d(shorter_peaks,longer_peaks)
'''
if the shorter set of factors is completely incidental into the longer set, then
the phase can be assigned as being the longer set of factors.
'''
if (len(matching_factors)==len(shorter_factors)) and (len(matching_peaks)==len(shorter_peaks)):
phase_dict[switch]=lp,longer_factors,longer_peaks
elif len(D_sourced_factors) >3 and len(P_sourced_factors) <4:
phase_dict[0] = np.mean(values[D_sourced]), D_sourced_factors, D_sourced_peaks
elif len(D_sourced_factors) <4 and len(P_sourced_factors) >3:
phase_dict[1] = np.mean(values[P_sourced]), P_sourced_factors, P_sourced_peaks
if len(G_sourced_factors) >3:
phase_dict[2] = np.mean(values[G_sourced]), G_sourced_factors, G_sourced_peaks
except IndexError:
pass
return phase_dict
"""
projection_testing is the final clarification stage of identifying which of the possible identified phases are 'real'.
The phases are checked against a fundamental 'mode' that the lattice parameter and phase identified. From this fundamental
value, the peaks in q which should exist can be calculated. These proposed peaks are subsequently checked against the peaks
which actually exist in the data. This is done through constructing a difference matrix, populated by the differences between
the peaks in the projected and physical arrays. The matrix is then searched for where the value is very small - ie. the proposed
peak is present in the physical data. If all or all but one or two of the proposed peaks are present in the physical data,
then it is said that that phase proposed is real, and not a feature of degenerate symmetry in the data. NB! you might want to
change the number of peaks that are acceptably omissible depending on how successful you are. Alternatively: change the
number of peak indicies used for calculations throughout the code.
pass the following parameters to this function:
phase_array - the integer spacing ratios of the proposed phase that needs to be tested.
fundamental - the ratio of a peak value of a phase to the square root of its index. Defined in the main below as the average
of these values across a set of peaks in a proposed phase.
peak_array - the full set of peaks that have been actually been physically found in the data, to test against a set of peaks
which should exist given the peaks present.
lo_q - the same low limit in q that was used to define the width in which peaks are to be found
"""
def Q_projection_testing(phase_array, fundamental, peak_array,lo_q):
#now project the fundamental q value over the phase
projected_values=(np.sqrt(phase_array)*fundamental)[:,np.newaxis]
#check that the first projected peak is within the finding q width:
if projected_values[0]>lo_q:
'''
the matches variable is an evaluation of where peaks that have been projected correspond to peaks that actually exist.
arbitrarily, if the difference in the lengths of the arrays is less than 2, (Ie. all peaks are present or only one or two
are missing in the data) then return a confirmation that the phase is a real assignment of the peaks.
'''
matches=np.where(np.abs(np.subtract(projected_values,peak_array))<0.001)[0]
if len(matches)>3:
return 1
#if the lowest peak is not in the desired q range
else:
return 0
"""
the main module runs the above modules, passing the required data from one to the other.
pass the following parameters to this function:
peaks - an array of peaks that have previously been found elsewhere
lo_q - the same low limit in q that was used to define the width in which peaks are to be found
"""
def Q_main(peaks,lo_q):
QIID_ratios=np.array([2,3,4,6,8,9,10,11])
QIIP_ratios=np.array([2,4,6,8,10,12,14])
QIIG_ratios=np.array([6,8,14,16,20,22,24])
phases=Q_possible_phases(peaks)
clar={}
for key in phases.keys():
fundamental=np.mean(phases[key][2]/np.sqrt(phases[key][1]))
if key ==0:
D_projection=Q_projection_testing(QIID_ratios,fundamental,peaks,lo_q)
if D_projection==1:
clar['D']=phases[key][0],phases[key][1],phases[key][2]
elif key ==1:
P_projection=Q_projection_testing(QIIP_ratios,fundamental,peaks,lo_q)
if P_projection==1:
clar['P']=phases[key][0],phases[key][1],phases[key][2]
elif key ==2:
G_projection=Q_projection_testing(QIIG_ratios,fundamental,peaks,lo_q)
if G_projection==1:
clar['G']=phases[key][0],phases[key][1],phases[key][2]
return clar
'''
start from the main: pass the low_q condition as the same value from finder.py, this will then perform the phase
assignment routines based on how many peaks were found. (see comment at top.)
'''
def main(peaks,lo_q):
all_peaks=peaks
ID={}
i=0
#give tolerance of 1 unassignable peak in the data.
while len(peaks)>1:
#discriminate what to test for based on number of peaks
if len(peaks)<4:
La_HII_ID=La_HII_possible_phases(peaks)
ID.update(La_HII_ID)
else:
Q_ID=Q_main(peaks,lo_q)
ID.update(Q_ID)
#now find which peaks have been assigned and which haven't, so that an iteration can try to assign them all
assigned_peaks=np.zeros(0)
for key in ID.keys():
assigned_peaks=np.append(assigned_peaks,ID[key][2])
unassigned_peaks=np.setxor1d(assigned_peaks,all_peaks)
peaks=unassigned_peaks
#loop 5 times. If it hasn't found something by this point then it's probably best to deal with it by hand.
i=i+1
if i>5:
break
#return any peaks that are unassigned
if len(peaks)>0:
ID['unassigned_peaks']=peaks
return ID
'''
#here is some example fake data which can be used to test the programme to see the expected output.
#there is a Bonnet ratio linked QIIP and QIID phase, demonstrating that the phases can be *both* correctly identified
#from a set of peaks passed to the main function in this programme
fundamental=0.06
QIIP=np.sqrt(np.array([2,4,6,8,10,12,14]))
QIIP_peaks=np.random.normal(QIIP*fundamental,0.0001)
QIID=np.sqrt(np.array([2,3,4,6,8,9,10]))
QIID_peaks=np.random.normal(QIID*fundamental*1.28,0.0001)
coexisting_Q_peaks=np.sort(np.concatenate((QIIP_peaks,QIID_peaks)))
#print('P peaks, exact and slightly randomised: ',QIIP*fundamental,QIIP_peaks)
#print('D peaks, exact and slightly randomised', QIID*fundamental*1.28, QIID_peaks)
#print('coexisting (randomised) D, P peaks: ', coexisting_Q_peaks)
La_test=np.array([0.09, 0.27])
test_La_Q_coex=np.sort(np.append(QIID_peaks,La_test))
Q_test=main(test_La_Q_coex,0.06)
print('\ndas ende', Q_test)
'''
| [
"numpy.intersect1d",
"numpy.mean",
"numpy.sqrt",
"numpy.unique",
"numpy.digitize",
"numpy.where",
"numpy.size",
"numpy.argmax",
"numpy.subtract",
"numpy.append",
"numpy.ndarray.flatten",
"numpy.zeros",
"numpy.array",
"numpy.concatenate",
"numpy.setxor1d",
"numpy.shape",
"numpy.int"
] | [((2601, 2628), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['La_init'], {}), '(La_init)\n', (2619, 2628), True, 'import numpy as np\n'), ((2638, 2666), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['HII_init'], {}), '(HII_init)\n', (2656, 2666), True, 'import numpy as np\n'), ((2685, 2710), 'numpy.concatenate', 'np.concatenate', (['(La, HII)'], {}), '((La, HII))\n', (2699, 2710), True, 'import numpy as np\n'), ((3135, 3146), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (3143, 3146), True, 'import numpy as np\n'), ((3163, 3174), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (3171, 3174), True, 'import numpy as np\n'), ((3190, 3201), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (3198, 3201), True, 'import numpy as np\n'), ((3219, 3230), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (3227, 3230), True, 'import numpy as np\n'), ((5829, 5842), 'numpy.sqrt', 'np.sqrt', (['QIID'], {}), '(QIID)\n', (5836, 5842), True, 'import numpy as np\n'), ((5860, 5873), 'numpy.sqrt', 'np.sqrt', (['QIIP'], {}), '(QIIP)\n', (5867, 5873), True, 'import numpy as np\n'), ((5891, 5904), 'numpy.sqrt', 'np.sqrt', (['QIIG'], {}), '(QIIG)\n', (5898, 5904), True, 'import numpy as np\n'), ((6797, 6823), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['D_init'], {}), '(D_init)\n', (6815, 6823), True, 'import numpy as np\n'), ((6831, 6857), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['P_init'], {}), '(P_init)\n', (6849, 6857), True, 'import numpy as np\n'), ((6865, 6891), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['G_init'], {}), '(G_init)\n', (6883, 6891), True, 'import numpy as np\n'), ((6910, 6935), 'numpy.concatenate', 'np.concatenate', (['(D, P, G)'], {}), '((D, P, G))\n', (6924, 6935), True, 'import numpy as np\n'), ((7173, 7203), 'numpy.digitize', 'np.digitize', (['values', 'bin_edges'], {}), '(values, bin_edges)\n', (7184, 7203), True, 'import numpy as np\n'), ((17305, 17341), 'numpy.array', 'np.array', (['[2, 3, 4, 6, 8, 9, 10, 11]'], {}), '([2, 3, 4, 6, 8, 9, 10, 11])\n', (17313, 17341), True, 'import numpy as np\n'), ((17352, 17386), 'numpy.array', 'np.array', (['[2, 4, 6, 8, 10, 12, 14]'], {}), '([2, 4, 6, 8, 10, 12, 14])\n', (17360, 17386), True, 'import numpy as np\n'), ((17398, 17434), 'numpy.array', 'np.array', (['[6, 8, 14, 16, 20, 22, 24]'], {}), '([6, 8, 14, 16, 20, 22, 24])\n', (17406, 17434), True, 'import numpy as np\n'), ((2393, 2412), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2401, 2412), True, 'import numpy as np\n'), ((2796, 2826), 'numpy.digitize', 'np.digitize', (['values', 'bin_edges'], {}), '(values, bin_edges)\n', (2807, 2826), True, 'import numpy as np\n'), ((3096, 3113), 'numpy.shape', 'np.shape', (['La_init'], {}), '(La_init)\n', (3104, 3113), True, 'import numpy as np\n'), ((3471, 3499), 'numpy.append', 'np.append', (['La_peaks', 'La_peak'], {}), '(La_peaks, La_peak)\n', (3480, 3499), True, 'import numpy as np\n'), ((3519, 3548), 'numpy.append', 'np.append', (['La_factors', 'La_hkl'], {}), '(La_factors, La_hkl)\n', (3528, 3548), True, 'import numpy as np\n'), ((3799, 3829), 'numpy.append', 'np.append', (['HII_peaks', 'HII_peak'], {}), '(HII_peaks, HII_peak)\n', (3808, 3829), True, 'import numpy as np\n'), ((3850, 3881), 'numpy.append', 'np.append', (['HII_factors', 'HII_hkl'], {}), '(HII_factors, HII_hkl)\n', (3859, 3881), True, 'import numpy as np\n'), ((5654, 5690), 'numpy.array', 'np.array', (['[2, 3, 4, 6, 8, 9, 10, 11]'], {}), '([2, 3, 4, 6, 8, 9, 10, 11])\n', (5662, 5690), True, 'import numpy as np\n'), ((5708, 5742), 'numpy.array', 'np.array', (['[2, 4, 6, 8, 10, 12, 14]'], {}), '([2, 4, 6, 8, 10, 12, 14])\n', (5716, 5742), True, 'import numpy as np\n'), ((5761, 5797), 'numpy.array', 'np.array', (['[6, 8, 14, 16, 20, 22, 24]'], {}), '([6, 8, 14, 16, 20, 22, 24])\n', (5769, 5797), True, 'import numpy as np\n'), ((6491, 6507), 'numpy.shape', 'np.shape', (['D_init'], {}), '(D_init)\n', (6499, 6507), True, 'import numpy as np\n'), ((6558, 6574), 'numpy.shape', 'np.shape', (['P_init'], {}), '(P_init)\n', (6566, 6574), True, 'import numpy as np\n'), ((6625, 6641), 'numpy.shape', 'np.shape', (['G_init'], {}), '(G_init)\n', (6633, 6641), True, 'import numpy as np\n'), ((7385, 7400), 'numpy.size', 'np.size', (['values'], {}), '(values)\n', (7392, 7400), True, 'import numpy as np\n'), ((19032, 19043), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (19040, 19043), True, 'import numpy as np\n'), ((19176, 19214), 'numpy.setxor1d', 'np.setxor1d', (['assigned_peaks', 'all_peaks'], {}), '(assigned_peaks, all_peaks)\n', (19187, 19214), True, 'import numpy as np\n'), ((2449, 2468), 'numpy.array', 'np.array', (['[1, 3, 4]'], {}), '([1, 3, 4])\n', (2457, 2468), True, 'import numpy as np\n'), ((3078, 3094), 'numpy.size', 'np.size', (['La_init'], {}), '(La_init)\n', (3085, 3094), True, 'import numpy as np\n'), ((6474, 6489), 'numpy.size', 'np.size', (['D_init'], {}), '(D_init)\n', (6481, 6489), True, 'import numpy as np\n'), ((6541, 6556), 'numpy.size', 'np.size', (['P_init'], {}), '(P_init)\n', (6548, 6556), True, 'import numpy as np\n'), ((6608, 6623), 'numpy.size', 'np.size', (['G_init'], {}), '(G_init)\n', (6615, 6623), True, 'import numpy as np\n'), ((6748, 6779), 'numpy.concatenate', 'np.concatenate', (['(n_D, n_G, n_P)'], {}), '((n_D, n_G, n_P))\n', (6762, 6779), True, 'import numpy as np\n'), ((16135, 16155), 'numpy.sqrt', 'np.sqrt', (['phase_array'], {}), '(phase_array)\n', (16142, 16155), True, 'import numpy as np\n'), ((19103, 19140), 'numpy.append', 'np.append', (['assigned_peaks', 'ID[key][2]'], {}), '(assigned_peaks, ID[key][2])\n', (19112, 19140), True, 'import numpy as np\n'), ((2763, 2778), 'numpy.size', 'np.size', (['values'], {}), '(values)\n', (2770, 2778), True, 'import numpy as np\n'), ((2872, 2887), 'numpy.argmax', 'np.argmax', (['hist'], {}), '(hist)\n', (2881, 2887), True, 'import numpy as np\n'), ((7575, 7594), 'numpy.where', 'np.where', (['(inds == i)'], {}), '(inds == i)\n', (7583, 7594), True, 'import numpy as np\n'), ((7670, 7692), 'numpy.size', 'np.size', (['binned_values'], {}), '(binned_values)\n', (7677, 7692), True, 'import numpy as np\n'), ((7825, 7836), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (7833, 7836), True, 'import numpy as np\n'), ((8296, 8322), 'numpy.unique', 'np.unique', (['positions_array'], {}), '(positions_array)\n', (8305, 8322), True, 'import numpy as np\n'), ((9566, 9591), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.int'}), '(0, dtype=np.int)\n', (9574, 9591), True, 'import numpy as np\n'), ((9626, 9651), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.int'}), '(0, dtype=np.int)\n', (9634, 9651), True, 'import numpy as np\n'), ((9686, 9711), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.int'}), '(0, dtype=np.int)\n', (9694, 9711), True, 'import numpy as np\n'), ((9762, 9773), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (9770, 9773), True, 'import numpy as np\n'), ((9807, 9818), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (9815, 9818), True, 'import numpy as np\n'), ((9852, 9863), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (9860, 9863), True, 'import numpy as np\n'), ((17570, 17593), 'numpy.sqrt', 'np.sqrt', (['phases[key][1]'], {}), '(phases[key][1])\n', (17577, 17593), True, 'import numpy as np\n'), ((6706, 6737), 'numpy.concatenate', 'np.concatenate', (['(n_D, n_G, n_P)'], {}), '((n_D, n_G, n_P))\n', (6720, 6737), True, 'import numpy as np\n'), ((7081, 7096), 'numpy.size', 'np.size', (['values'], {}), '(values)\n', (7088, 7096), True, 'import numpy as np\n'), ((7872, 7894), 'numpy.size', 'np.size', (['binned_values'], {}), '(binned_values)\n', (7879, 7894), True, 'import numpy as np\n'), ((10096, 10133), 'numpy.where', 'np.where', (['(D_array_comparison_pos == n)'], {}), '(D_array_comparison_pos == n)\n', (10104, 10133), True, 'import numpy as np\n'), ((10378, 10416), 'numpy.append', 'np.append', (['D_sourced_peaks', 'D_peak_hkl'], {}), '(D_sourced_peaks, D_peak_hkl)\n', (10387, 10416), True, 'import numpy as np\n'), ((10660, 10697), 'numpy.where', 'np.where', (['(P_array_comparison_pos == n)'], {}), '(P_array_comparison_pos == n)\n', (10668, 10697), True, 'import numpy as np\n'), ((10948, 10986), 'numpy.append', 'np.append', (['P_sourced_peaks', 'P_peak_hkl'], {}), '(P_sourced_peaks, P_peak_hkl)\n', (10957, 10986), True, 'import numpy as np\n'), ((11221, 11258), 'numpy.where', 'np.where', (['(G_array_comparison_pos == n)'], {}), '(G_array_comparison_pos == n)\n', (11229, 11258), True, 'import numpy as np\n'), ((11509, 11547), 'numpy.append', 'np.append', (['G_sourced_peaks', 'G_peak_hkl'], {}), '(G_sourced_peaks, G_peak_hkl)\n', (11518, 11547), True, 'import numpy as np\n'), ((13008, 13055), 'numpy.intersect1d', 'np.intersect1d', (['shorter_factors', 'longer_factors'], {}), '(shorter_factors, longer_factors)\n', (13022, 13055), True, 'import numpy as np\n'), ((13091, 13134), 'numpy.intersect1d', 'np.intersect1d', (['shorter_peaks', 'longer_peaks'], {}), '(shorter_peaks, longer_peaks)\n', (13105, 13134), True, 'import numpy as np\n'), ((2550, 2560), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (2557, 2560), True, 'import numpy as np\n'), ((8607, 8625), 'numpy.size', 'np.size', (['D_factors'], {}), '(D_factors)\n', (8614, 8625), True, 'import numpy as np\n'), ((10052, 10062), 'numpy.size', 'np.size', (['D'], {}), '(D)\n', (10059, 10062), True, 'import numpy as np\n'), ((10326, 10339), 'numpy.int', 'np.int', (['D_hkl'], {}), '(D_hkl)\n', (10332, 10339), True, 'import numpy as np\n'), ((10617, 10627), 'numpy.size', 'np.size', (['D'], {}), '(D)\n', (10624, 10627), True, 'import numpy as np\n'), ((10896, 10909), 'numpy.int', 'np.int', (['P_hkl'], {}), '(P_hkl)\n', (10902, 10909), True, 'import numpy as np\n'), ((11178, 11188), 'numpy.size', 'np.size', (['D'], {}), '(D)\n', (11185, 11188), True, 'import numpy as np\n'), ((11457, 11470), 'numpy.int', 'np.int', (['G_hkl'], {}), '(G_hkl)\n', (11463, 11470), True, 'import numpy as np\n'), ((14073, 14099), 'numpy.mean', 'np.mean', (['values[G_sourced]'], {}), '(values[G_sourced])\n', (14080, 14099), True, 'import numpy as np\n'), ((16720, 16761), 'numpy.subtract', 'np.subtract', (['projected_values', 'peak_array'], {}), '(projected_values, peak_array)\n', (16731, 16761), True, 'import numpy as np\n'), ((3996, 4011), 'numpy.argmax', 'np.argmax', (['hist'], {}), '(hist)\n', (4005, 4011), True, 'import numpy as np\n'), ((7960, 7996), 'numpy.where', 'np.where', (['(binned_values[k] == values)'], {}), '(binned_values[k] == values)\n', (7968, 7996), True, 'import numpy as np\n'), ((11167, 11177), 'numpy.size', 'np.size', (['P'], {}), '(P)\n', (11174, 11177), True, 'import numpy as np\n'), ((12138, 12164), 'numpy.mean', 'np.mean', (['values[D_sourced]'], {}), '(values[D_sourced])\n', (12145, 12164), True, 'import numpy as np\n'), ((12165, 12191), 'numpy.mean', 'np.mean', (['values[P_sourced]'], {}), '(values[P_sourced])\n', (12172, 12191), True, 'import numpy as np\n'), ((13679, 13705), 'numpy.mean', 'np.mean', (['values[D_sourced]'], {}), '(values[D_sourced])\n', (13686, 13705), True, 'import numpy as np\n'), ((3420, 3430), 'numpy.size', 'np.size', (['n'], {}), '(n)\n', (3427, 3430), True, 'import numpy as np\n'), ((3747, 3757), 'numpy.size', 'np.size', (['n'], {}), '(n)\n', (3754, 3757), True, 'import numpy as np\n'), ((4138, 4153), 'numpy.argmax', 'np.argmax', (['hist'], {}), '(hist)\n', (4147, 4153), True, 'import numpy as np\n'), ((8499, 8509), 'numpy.size', 'np.size', (['D'], {}), '(D)\n', (8506, 8509), True, 'import numpy as np\n'), ((13892, 13918), 'numpy.mean', 'np.mean', (['values[P_sourced]'], {}), '(values[P_sourced])\n', (13899, 13918), True, 'import numpy as np\n'), ((3341, 3351), 'numpy.size', 'np.size', (['n'], {}), '(n)\n', (3348, 3351), True, 'import numpy as np\n'), ((3666, 3676), 'numpy.size', 'np.size', (['n'], {}), '(n)\n', (3673, 3676), True, 'import numpy as np\n'), ((8682, 8692), 'numpy.size', 'np.size', (['P'], {}), '(P)\n', (8689, 8692), True, 'import numpy as np\n'), ((8693, 8703), 'numpy.size', 'np.size', (['D'], {}), '(D)\n', (8700, 8703), True, 'import numpy as np\n'), ((8573, 8583), 'numpy.size', 'np.size', (['P'], {}), '(P)\n', (8580, 8583), True, 'import numpy as np\n'), ((8584, 8594), 'numpy.size', 'np.size', (['D'], {}), '(D)\n', (8591, 8594), True, 'import numpy as np\n')] |
import os
import numpy
def convert_to_numpy(arr, backend, device="cpu"):
"""Converts an array or collection of arrays to np.ndarray"""
if isinstance(arr, (list, tuple)):
return [convert_to_numpy(subarr, backend, device) for subarr in arr]
if type(arr) is numpy.ndarray:
# this is stricter than isinstance,
# we don't want subclasses to get passed through
return arr
if backend == "cupy":
return arr.get()
if backend == "jax":
return numpy.asarray(arr)
if backend == "pytorch":
if device == "gpu":
return numpy.asarray(arr.cpu())
else:
return numpy.asarray(arr)
if backend == "tensorflow":
return numpy.asarray(arr)
if backend == "aesara":
return numpy.asarray(arr)
raise RuntimeError(
f"Got unexpected array / backend combination: {type(arr)} / {backend}"
)
class BackendNotSupported(Exception):
pass
class BackendConflict(Exception):
pass
def check_backend_conflicts(backends, device):
if device == "gpu":
gpu_backends = set(backends) - {"numba", "numpy", "aesara"}
if len(gpu_backends) > 1:
raise BackendConflict(
f"Can only use one GPU backend at the same time (got: {gpu_backends})"
)
class SetupContext:
def __init__(self, f):
self._f = f
self._f_args = (tuple(), dict())
def __call__(self, *args, **kwargs):
self._f_args = (args, kwargs)
return self
def __enter__(self):
self._env = os.environ.copy()
args, kwargs = self._f_args
self._f_iter = iter(self._f(*args, **kwargs))
try:
module = next(self._f_iter)
except Exception as e:
raise BackendNotSupported(str(e)) from None
return module
def __exit__(self, *args, **kwargs):
try:
next(self._f_iter)
except StopIteration:
pass
os.environ = self._env
setup_function = SetupContext
# setup function definitions
@setup_function
def setup_numpy(device="cpu"):
import numpy
os.environ.update(
OMP_NUM_THREADS="1",
)
yield numpy
@setup_function
def setup_aesara(device="cpu"):
os.environ.update(
OMP_NUM_THREADS="1",
)
if device == "gpu":
raise RuntimeError("aesara uses JAX on GPU")
import aesara
# clang needs this, aesara#127
aesara.config.gcc__cxxflags = "-Wno-c++11-narrowing"
yield aesara
@setup_function
def setup_numba(device="cpu"):
os.environ.update(
OMP_NUM_THREADS="1",
)
import numba
yield numba
@setup_function
def setup_cupy(device="cpu"):
if device != "gpu":
raise RuntimeError("cupy requires GPU mode")
import cupy
yield cupy
@setup_function
def setup_jax(device="cpu"):
os.environ.update(
XLA_FLAGS=(
"--xla_cpu_multi_thread_eigen=false "
"intra_op_parallelism_threads=1 "
"inter_op_parallelism_threads=1 "
),
)
if device in ("cpu", "gpu"):
os.environ.update(JAX_PLATFORM_NAME=device)
import jax
from jax.config import config
if device == "tpu":
config.update("jax_xla_backend", "tpu_driver")
config.update("jax_backend_target", os.environ.get("JAX_BACKEND_TARGET"))
if device != "tpu":
# use 64 bit floats (not supported on TPU)
config.update("jax_enable_x64", True)
if device == "gpu":
assert len(jax.devices()) > 0
yield jax
@setup_function
def setup_pytorch(device="cpu"):
os.environ.update(
OMP_NUM_THREADS="1",
)
import torch
if device == "gpu":
assert torch.cuda.is_available()
assert torch.cuda.device_count() > 0
yield torch
@setup_function
def setup_tensorflow(device="cpu"):
os.environ.update(
OMP_NUM_THREADS="1",
)
import tensorflow as tf
tf.config.threading.set_inter_op_parallelism_threads(1)
tf.config.threading.set_intra_op_parallelism_threads(1)
if device == "gpu":
gpus = tf.config.experimental.list_physical_devices("GPU")
assert gpus
else:
tf.config.experimental.set_visible_devices([], "GPU")
yield tf
__backends__ = {
"numpy": setup_numpy,
"cupy": setup_cupy,
"jax": setup_jax,
"aesara": setup_aesara,
"numba": setup_numba,
"pytorch": setup_pytorch,
"tensorflow": setup_tensorflow,
}
| [
"tensorflow.config.threading.set_intra_op_parallelism_threads",
"numpy.asarray",
"os.environ.get",
"os.environ.copy",
"torch.cuda.device_count",
"jax.devices",
"tensorflow.config.experimental.set_visible_devices",
"os.environ.update",
"torch.cuda.is_available",
"tensorflow.config.threading.set_int... | [((2151, 2189), 'os.environ.update', 'os.environ.update', ([], {'OMP_NUM_THREADS': '"""1"""'}), "(OMP_NUM_THREADS='1')\n", (2168, 2189), False, 'import os\n'), ((2275, 2313), 'os.environ.update', 'os.environ.update', ([], {'OMP_NUM_THREADS': '"""1"""'}), "(OMP_NUM_THREADS='1')\n", (2292, 2313), False, 'import os\n'), ((2588, 2626), 'os.environ.update', 'os.environ.update', ([], {'OMP_NUM_THREADS': '"""1"""'}), "(OMP_NUM_THREADS='1')\n", (2605, 2626), False, 'import os\n'), ((2884, 3022), 'os.environ.update', 'os.environ.update', ([], {'XLA_FLAGS': '"""--xla_cpu_multi_thread_eigen=false intra_op_parallelism_threads=1 inter_op_parallelism_threads=1 """'}), "(XLA_FLAGS=\n '--xla_cpu_multi_thread_eigen=false intra_op_parallelism_threads=1 inter_op_parallelism_threads=1 '\n )\n", (2901, 3022), False, 'import os\n'), ((3635, 3673), 'os.environ.update', 'os.environ.update', ([], {'OMP_NUM_THREADS': '"""1"""'}), "(OMP_NUM_THREADS='1')\n", (3652, 3673), False, 'import os\n'), ((3892, 3930), 'os.environ.update', 'os.environ.update', ([], {'OMP_NUM_THREADS': '"""1"""'}), "(OMP_NUM_THREADS='1')\n", (3909, 3930), False, 'import os\n'), ((3979, 4034), 'tensorflow.config.threading.set_inter_op_parallelism_threads', 'tf.config.threading.set_inter_op_parallelism_threads', (['(1)'], {}), '(1)\n', (4031, 4034), True, 'import tensorflow as tf\n'), ((4039, 4094), 'tensorflow.config.threading.set_intra_op_parallelism_threads', 'tf.config.threading.set_intra_op_parallelism_threads', (['(1)'], {}), '(1)\n', (4091, 4094), True, 'import tensorflow as tf\n'), ((507, 525), 'numpy.asarray', 'numpy.asarray', (['arr'], {}), '(arr)\n', (520, 525), False, 'import numpy\n'), ((728, 746), 'numpy.asarray', 'numpy.asarray', (['arr'], {}), '(arr)\n', (741, 746), False, 'import numpy\n'), ((791, 809), 'numpy.asarray', 'numpy.asarray', (['arr'], {}), '(arr)\n', (804, 809), False, 'import numpy\n'), ((1581, 1598), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (1596, 1598), False, 'import os\n'), ((3124, 3167), 'os.environ.update', 'os.environ.update', ([], {'JAX_PLATFORM_NAME': 'device'}), '(JAX_PLATFORM_NAME=device)\n', (3141, 3167), False, 'import os\n'), ((3251, 3297), 'jax.config.config.update', 'config.update', (['"""jax_xla_backend"""', '"""tpu_driver"""'], {}), "('jax_xla_backend', 'tpu_driver')\n", (3264, 3297), False, 'from jax.config import config\n'), ((3464, 3501), 'jax.config.config.update', 'config.update', (['"""jax_enable_x64"""', '(True)'], {}), "('jax_enable_x64', True)\n", (3477, 3501), False, 'from jax.config import config\n'), ((3746, 3771), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3769, 3771), False, 'import torch\n'), ((4135, 4186), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (4179, 4186), True, 'import tensorflow as tf\n'), ((4225, 4278), 'tensorflow.config.experimental.set_visible_devices', 'tf.config.experimental.set_visible_devices', (['[]', '"""GPU"""'], {}), "([], 'GPU')\n", (4267, 4278), True, 'import tensorflow as tf\n'), ((661, 679), 'numpy.asarray', 'numpy.asarray', (['arr'], {}), '(arr)\n', (674, 679), False, 'import numpy\n'), ((3342, 3378), 'os.environ.get', 'os.environ.get', (['"""JAX_BACKEND_TARGET"""'], {}), "('JAX_BACKEND_TARGET')\n", (3356, 3378), False, 'import os\n'), ((3787, 3812), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3810, 3812), False, 'import torch\n'), ((3546, 3559), 'jax.devices', 'jax.devices', ([], {}), '()\n', (3557, 3559), False, 'import jax\n')] |
import numpy as np
from PIL import Image
def load_torch_tensor(filename, idx = 0, lineno = 0):
if idx>0:
lineno = 2+21*idx
with open(filename) as fp:
for i, line in enumerate(fp):
if i == lineno:
a = np.fromstring(line, dtype='float', sep= ' ')
return a
def load_torch_network_parameter(filename, model, idx = 0, lineno = 0):
params = load_torch_tensor(filename, idx, lineno)
#print('PARAMCOUNT', params.shape)
w = model.get_weights()
idx = 0
for a in w:
c = a.size
#print(a.shape)
if len(a.shape)==4:
a[:] = np.transpose(params[idx:(idx+c)].reshape(a.shape[::-1]), [2,3,1,0])
#print(a[2,1,1,0])
elif len(a.shape)==2:
a[:] = np.transpose(params[idx:(idx+c)].reshape(a.shape[::-1]), [1,0])
#print(a[1,0])
else:
a[:] = params[idx:(idx+c)]
#print(a[1])
idx += c
return w
def load_torch_model(filename, model):
w = load_torch_network_parameter(filename, model, 1)
model.set_weights(w)
def load_transitions(filename):
a = load_torch_tensor(filename, 1)
term = load_torch_tensor(filename, 2)
r = load_torch_tensor(filename, 3)[:32]
return a, r, term
def load_step(loadBase, ACTION_COUNT, model, model_eval):
load_torch_model(loadBase + 'tokeras.network.params.t7', model)
if not model_eval is None:
load_torch_model(loadBase + 'tokeras.target.params.t7', model_eval)
global ts, ts2, ta, tr, tterm, results, dw, delta, deltas, g, g2, targets, tmp, prediction, prediction_layer
ts = []
for I in range(32):
im1 = Image.open(loadBase + 'image-s-'+str(I+1)+'-0.png')
im2 = Image.open(loadBase + 'image-s-'+str(I+1)+'-1.png')
im3 = Image.open(loadBase + 'image-s-'+str(I+1)+'-2.png')
im4 = Image.open(loadBase + 'image-s-'+str(I+1)+'-3.png')
ts.append([np.array(im1), np.array(im2), np.array(im3), np.array(im4)])
#print(ts)
ts = np.array(ts, dtype='f')/255.0
ts2 = []
for I in range(32):
im1 = Image.open(loadBase + 'image-s2-'+str(I+1)+'-0.png')
im2 = Image.open(loadBase + 'image-s2-'+str(I+1)+'-1.png')
im3 = Image.open(loadBase + 'image-s2-'+str(I+1)+'-2.png')
im4 = Image.open(loadBase + 'image-s2-'+str(I+1)+'-3.png')
ts2.append([np.array(im1), np.array(im2), np.array(im3), np.array(im4)])
#print(ts)
ts2 = np.array(ts2, dtype='f')/255.0
ta, tr, tterm = load_transitions(loadBase + 'tokeras.trans.t7')
ta = ta.astype('int') - 1
tterm = tterm.astype('int')
results = load_torch_tensor(loadBase + 'tokeras.results.t7', lineno=17)
#dw = load_torch_tensor(loadBase + 'tokeras.dw.t7', lineno=17)
dw = load_torch_network_parameter(loadBase + 'tokeras.dw.t7', model, lineno=17)
delta = load_torch_tensor(loadBase + 'tokeras.delta.t7', lineno=17)
deltas = load_torch_network_parameter(loadBase + 'tokeras.deltas.t7', model, lineno=17)
g = load_torch_network_parameter(loadBase + 'tokeras.g.t7', model, lineno=17)
g2 = load_torch_network_parameter(loadBase + 'tokeras.g2.t7', model, lineno=17)
targets = load_torch_tensor(loadBase + 'tokeras.targets.t7', lineno=17)
tmp = load_torch_network_parameter(loadBase + 'tokeras.tmp.t7', model, lineno=17)
results = np.resize(results, (32,ACTION_COUNT))
targets = np.resize(targets, (32,ACTION_COUNT))
#if not model_eval is None:
# prediction = model_eval.predict_on_batch(ts2)
# prediction_layer = model_layer.predict_on_batch(ts2)
# #print('Prediction difference: ',prediction-results)
print('Shapes: ', results.shape, len(dw), len(deltas), delta.shape, targets.shape, len(tmp))
return ts, ts2, ta, tr, tterm, results, dw, delta, deltas, g, g2, targets, tmp #, prediction, prediction_layer | [
"numpy.array",
"numpy.resize",
"numpy.fromstring"
] | [((3054, 3092), 'numpy.resize', 'np.resize', (['results', '(32, ACTION_COUNT)'], {}), '(results, (32, ACTION_COUNT))\n', (3063, 3092), True, 'import numpy as np\n'), ((3103, 3141), 'numpy.resize', 'np.resize', (['targets', '(32, ACTION_COUNT)'], {}), '(targets, (32, ACTION_COUNT))\n', (3112, 3141), True, 'import numpy as np\n'), ((1798, 1821), 'numpy.array', 'np.array', (['ts'], {'dtype': '"""f"""'}), "(ts, dtype='f')\n", (1806, 1821), True, 'import numpy as np\n'), ((2197, 2221), 'numpy.array', 'np.array', (['ts2'], {'dtype': '"""f"""'}), "(ts2, dtype='f')\n", (2205, 2221), True, 'import numpy as np\n'), ((214, 257), 'numpy.fromstring', 'np.fromstring', (['line'], {'dtype': '"""float"""', 'sep': '""" """'}), "(line, dtype='float', sep=' ')\n", (227, 257), True, 'import numpy as np\n'), ((1719, 1732), 'numpy.array', 'np.array', (['im1'], {}), '(im1)\n', (1727, 1732), True, 'import numpy as np\n'), ((1734, 1747), 'numpy.array', 'np.array', (['im2'], {}), '(im2)\n', (1742, 1747), True, 'import numpy as np\n'), ((1749, 1762), 'numpy.array', 'np.array', (['im3'], {}), '(im3)\n', (1757, 1762), True, 'import numpy as np\n'), ((1764, 1777), 'numpy.array', 'np.array', (['im4'], {}), '(im4)\n', (1772, 1777), True, 'import numpy as np\n'), ((2117, 2130), 'numpy.array', 'np.array', (['im1'], {}), '(im1)\n', (2125, 2130), True, 'import numpy as np\n'), ((2132, 2145), 'numpy.array', 'np.array', (['im2'], {}), '(im2)\n', (2140, 2145), True, 'import numpy as np\n'), ((2147, 2160), 'numpy.array', 'np.array', (['im3'], {}), '(im3)\n', (2155, 2160), True, 'import numpy as np\n'), ((2162, 2175), 'numpy.array', 'np.array', (['im4'], {}), '(im4)\n', (2170, 2175), True, 'import numpy as np\n')] |
import numpy as np
def phi(eingabe):
eingabe = str(eingabe)
check = len(eingabe.partition('.')[0])
res = ''.join(filter(lambda i: i.isdigit(), eingabe))
liste = list(map(int, str(res)))
liste[check - 2] = liste[check - 2] - 1
if check == 1:
liste.insert(0, -1)
liste[len(liste) - 1] = liste[len(liste) - 1] + 1 # warum wird letztes Element -1???
p = np.poly1d(liste)
roots = p.roots
print(roots)
phi(333) # Wert hier eingeben
| [
"numpy.poly1d"
] | [((399, 415), 'numpy.poly1d', 'np.poly1d', (['liste'], {}), '(liste)\n', (408, 415), True, 'import numpy as np\n')] |
#from styx_msgs.msg import TrafficLight
from keras.preprocessing import image
import numpy as np
from darkflow.net.build import TFNet
from styx_msgs.msg import TrafficLight
import cv2
class TLClassifier(object):
def __init__(self):
#TODO load classifier
options = {"model": "cfg/tiny-yolo.cfg", "load": "tiny-yolo.weights", "threshold": 0.05, "gpu": 0.8}
self.tfnet = TFNet(options)
pass
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#image = image[0:300, 100:700]
#TODO implement light color prediction
#print(image.shape[0], "X")
#print(image.shape[1], "Y")
#print("----------------------before predict")
result = self.tfnet.return_predict(image)
#print("----------------------after predict")
# define the list of boundaries
boundaries = [
([0,200,0], [255,255,100], "green"), #green
([0,0,200], [50,50,255], "red"), #red
([0,210,210], [80,255,255], "yellow") #yellow
]
max_frac = 0.0
colorf = ""
tl_detected = False
#print("------------------start")
for i in range(len(result)):
if(result[i]["confidence"] > 0.3 and result[i]["label"] == "traffic light"):
tl_detected = True
xs = result[i]['topleft']['x']
xe = result[i]['bottomright']['x']
ys = result[i]['topleft']['y']
ye = result[i]['bottomright']['y']
crop_img = image[ys:ye, xs:xe]
total = crop_img.shape[0]*crop_img.shape[1]
for (lower, upper, color) in boundaries:
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
mask = cv2.inRange(crop_img, lower, upper)
c = np.sum(mask)//255
frac = c/total
if frac > max_frac and frac >= 0.01:
max_frac = frac
colorf = color
print("------------------------", colorf)
if(tl_detected):
if(colorf == "red"):
return TrafficLight.RED
if(colorf == "green"):
return TrafficLight.GREEN
if(colorf == "yellow"):
return TrafficLight.YELLOW
return TrafficLight.UNKNOWN
| [
"cv2.inRange",
"numpy.array",
"darkflow.net.build.TFNet",
"numpy.sum"
] | [((399, 413), 'darkflow.net.build.TFNet', 'TFNet', (['options'], {}), '(options)\n', (404, 413), False, 'from darkflow.net.build import TFNet\n'), ((1962, 1992), 'numpy.array', 'np.array', (['lower'], {'dtype': '"""uint8"""'}), "(lower, dtype='uint8')\n", (1970, 1992), True, 'import numpy as np\n'), ((2023, 2053), 'numpy.array', 'np.array', (['upper'], {'dtype': '"""uint8"""'}), "(upper, dtype='uint8')\n", (2031, 2053), True, 'import numpy as np\n'), ((2083, 2118), 'cv2.inRange', 'cv2.inRange', (['crop_img', 'lower', 'upper'], {}), '(crop_img, lower, upper)\n', (2094, 2118), False, 'import cv2\n'), ((2143, 2155), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (2149, 2155), True, 'import numpy as np\n')] |
import numpy as np
from scipy import signal
from .base import VHRMethod
class LGI(VHRMethod):
methodName = 'LGI'
def __init__(self, **kwargs):
super(LGI, self).__init__(**kwargs)
def apply(self, X):
#M = np.mean(X, axis=1)
#M = M[:, np.newaxis]
#Xzero = X - M # zero mean (row)
U,_,_ = np.linalg.svd(X)
S = U[:,0].reshape(1,-1) # array 2D shape (1,3)
P = np.identity(3) - np.matmul(S.T,S)
Y = np.dot(P,X)
bvp = Y[1,:]
return bvp | [
"numpy.linalg.svd",
"numpy.dot",
"numpy.identity",
"numpy.matmul"
] | [((365, 381), 'numpy.linalg.svd', 'np.linalg.svd', (['X'], {}), '(X)\n', (378, 381), True, 'import numpy as np\n'), ((498, 510), 'numpy.dot', 'np.dot', (['P', 'X'], {}), '(P, X)\n', (504, 510), True, 'import numpy as np\n'), ((451, 465), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (462, 465), True, 'import numpy as np\n'), ((468, 485), 'numpy.matmul', 'np.matmul', (['S.T', 'S'], {}), '(S.T, S)\n', (477, 485), True, 'import numpy as np\n')] |
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
arr = np.random.logistic(loc=1, scale=2, size=10)
print(arr)
arr = np.random.logistic(size=1000) # DEFUALT loc=0, scale=1
sns.distplot(arr, hist=False)
plt.show()
| [
"numpy.random.logistic",
"matplotlib.pyplot.show",
"seaborn.distplot"
] | [((81, 124), 'numpy.random.logistic', 'np.random.logistic', ([], {'loc': '(1)', 'scale': '(2)', 'size': '(10)'}), '(loc=1, scale=2, size=10)\n', (99, 124), True, 'import numpy as np\n'), ((144, 173), 'numpy.random.logistic', 'np.random.logistic', ([], {'size': '(1000)'}), '(size=1000)\n', (162, 173), True, 'import numpy as np\n'), ((200, 229), 'seaborn.distplot', 'sns.distplot', (['arr'], {'hist': '(False)'}), '(arr, hist=False)\n', (212, 229), True, 'import seaborn as sns\n'), ((230, 240), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (238, 240), True, 'import matplotlib.pyplot as plt\n')] |
import time
import math
import cv2
import numpy as np
def GetLocation(move_type, current_frame):
#time.sleep(1) #artificial one seconf processing time
#Use relative coordinates to the current position of the "Gun", defind as an integer below
if move_type == "relative":
coordinate = action_space.sample()
else:
duck = cv2.cvtColor(cv2.imread('DuckAll.png'),cv2.COLOR_RGB2GRAY)
sift = cv2.SIFT_create()
frame = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
#frame = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
kp1, des1 = sift.detectAndCompute(duck,None)
kp2, des2 = sift.detectAndCompute(frame,None)
FLAN_INDEX_KDTREE = 0
index_params = dict (algorithm = FLAN_INDEX_KDTREE, trees=5)
search_params = dict (checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch (des1, des2,k=2)
positions = []
cv2.imwrite("frame.png", frame)
for m1, m2 in matches:
if m1.distance < 0.58 * m2.distance:
pos = np.array(kp2[m1.trainIdx].pt)
pos1 = np.array(kp2[m1.trainIdx].pt)
pos1[0] = round(pos[1])
pos1[1] = round(pos[0])
positions.append(pos1)
#print("Number of Matches: ", len(positions))
#coordinate = np.mean(positions, axis =0)
#possy = np.expand_dims(np.array(positions),axis=0)
if(len(positions) == 0):
coordinate = [-1,-1]
else:
coordinate = np.mean(positions, axis =0)
keypoints = []
keypoints = positions.copy()
# Find centroid
centroid = np.mean(keypoints, axis = 0)
# Compute the Euclidean distance of all points to the centroid
EuclideanDistance = []
for i in range(0, len(keypoints)):
point = keypoints[i]
a = point[0] - centroid[0]
b = point[1] - centroid[1]
distance = math.sqrt((a**2) + (b**2))
EuclideanDistance.append(distance)
EuclideanDistance = np.array(EuclideanDistance)
mean, std = cv2.meanStdDev(EuclideanDistance)
mean = mean[0][0]
std = std[0][0]
new_keypoints = []
# Filter original keypoints
for ip in range(0, len(positions)):
if EuclideanDistance[ip] <= mean + 2*std:
new_keypoints.append(keypoints[ip])
if(len(new_keypoints) == 0):
coordinate = [-1,-1]
else:
coordinate = np.mean(new_keypoints, axis =0)
#print(coordinate)
return[{'coordinate' : coordinate, 'move_type' : move_type}]
| [
"cv2.meanStdDev",
"cv2.imwrite",
"numpy.mean",
"math.sqrt",
"numpy.array",
"cv2.SIFT_create",
"cv2.FlannBasedMatcher",
"cv2.cvtColor",
"cv2.imread"
] | [((436, 453), 'cv2.SIFT_create', 'cv2.SIFT_create', ([], {}), '()\n', (451, 453), False, 'import cv2\n'), ((471, 518), 'cv2.cvtColor', 'cv2.cvtColor', (['current_frame', 'cv2.COLOR_BGR2GRAY'], {}), '(current_frame, cv2.COLOR_BGR2GRAY)\n', (483, 518), False, 'import cv2\n'), ((857, 907), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['index_params', 'search_params'], {}), '(index_params, search_params)\n', (878, 907), False, 'import cv2\n'), ((990, 1021), 'cv2.imwrite', 'cv2.imwrite', (['"""frame.png"""', 'frame'], {}), "('frame.png', frame)\n", (1001, 1021), False, 'import cv2\n'), ((366, 391), 'cv2.imread', 'cv2.imread', (['"""DuckAll.png"""'], {}), "('DuckAll.png')\n", (376, 391), False, 'import cv2\n'), ((1610, 1636), 'numpy.mean', 'np.mean', (['positions'], {'axis': '(0)'}), '(positions, axis=0)\n', (1617, 1636), True, 'import numpy as np\n'), ((1757, 1783), 'numpy.mean', 'np.mean', (['keypoints'], {'axis': '(0)'}), '(keypoints, axis=0)\n', (1764, 1783), True, 'import numpy as np\n'), ((2193, 2220), 'numpy.array', 'np.array', (['EuclideanDistance'], {}), '(EuclideanDistance)\n', (2201, 2220), True, 'import numpy as np\n'), ((2245, 2278), 'cv2.meanStdDev', 'cv2.meanStdDev', (['EuclideanDistance'], {}), '(EuclideanDistance)\n', (2259, 2278), False, 'import cv2\n'), ((1125, 1154), 'numpy.array', 'np.array', (['kp2[m1.trainIdx].pt'], {}), '(kp2[m1.trainIdx].pt)\n', (1133, 1154), True, 'import numpy as np\n'), ((1178, 1207), 'numpy.array', 'np.array', (['kp2[m1.trainIdx].pt'], {}), '(kp2[m1.trainIdx].pt)\n', (1186, 1207), True, 'import numpy as np\n'), ((2085, 2111), 'math.sqrt', 'math.sqrt', (['(a ** 2 + b ** 2)'], {}), '(a ** 2 + b ** 2)\n', (2094, 2111), False, 'import math\n'), ((2698, 2728), 'numpy.mean', 'np.mean', (['new_keypoints'], {'axis': '(0)'}), '(new_keypoints, axis=0)\n', (2705, 2728), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from basic_class.basic_class import basic_class
import nibabel as nib
import numpy as np
class basic_operator(basic_class):
def __init__(self, para_hub):
super().__init__()
self.para_hub = para_hub
def process(self, input_data, para=None, others=None):
pass
def data_reshape(self, input_data):
return input_data
def excute(self, input_data, others=None):
return_data = []
input_data = self.data_reshape(input_data)
for para in self.para_hub:
return_data.extend(self.process(input_data, para=para, others=others))
return return_data
class nib_smooth(basic_operator):
def __init__(self, para_hub):
super().__init__(para_hub=para_hub)
def process(self, input_data, para=None, others=None):
# special usage for [6, 256, 256, 80]-like data
if len(input_data.shape) == 4:
return_data = []
for idx in range(input_data.shape[0]):
temp_data = input_data[idx, :, :, :]
temp_nii = nib.Nifti1Image(temp_data, others["affine"], others["header"])
return_data.append(nib.processing.smooth_image(temp_nii, fwhm=para, mode='nearest').get_fdata())
else:
temp_nii = nib.Nifti1Image(input_data, others["affine"], others["header"])
return_data = nib.processing.smooth_image(temp_nii, fwhm=para, mode='nearest').get_fdata()
return return_data
class gaussian_noise(basic_operator):
def __init__(self, para_hub):
super().__init__(para_hub=para_hub)
def process(self, input_data, para=None, others=None):
noise = np.random.normal(loc=0, scale=np.std(input_data)*para, size=input_data.shape)
return input_data+noise
class poisson_noise(basic_operator):
def __init__(self, para_hub):
super().__init__(para_hub=para_hub)
def process(self, input_data, para=None, others=None):
noise = np.random.poisson(size=input_data.shape, lam=np.mean(input_data*para))
return input_data+noise
class operation_interpreter(basic_class):
def __init__(self, op_conf, others_hub):
super().__init__()
self.op_conf = op_conf
self.others_hub = others_hub
def apply(self, input_data):
return_data = []
# only one operator
if self.op_conf["n_method"] == 1:
opeartor = globals()[self.op_conf["method"][0]["name"]](self.op_conf["method"][0]["para"])
return_data = opeartor.excute(input_data, others=self.others_hub[0])
else:
operator_hub = []
for method in self.op_conf["method"]:
opeartor = globals()[method["name"]](method["para"])
operator_hub.append(opeartor)
if self.op_conf["method_relation"] == "parallel":
for idx, opeartor in enumerate(operator_hub):
return_data.extend(opeartor.excute(input_data, others=self.others_hub[idx]))
return np.asarray(return_data)
| [
"numpy.mean",
"nibabel.processing.smooth_image",
"numpy.asarray",
"nibabel.Nifti1Image",
"numpy.std"
] | [((3056, 3079), 'numpy.asarray', 'np.asarray', (['return_data'], {}), '(return_data)\n', (3066, 3079), True, 'import numpy as np\n'), ((1316, 1379), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['input_data', "others['affine']", "others['header']"], {}), "(input_data, others['affine'], others['header'])\n", (1331, 1379), True, 'import nibabel as nib\n'), ((1102, 1164), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['temp_data', "others['affine']", "others['header']"], {}), "(temp_data, others['affine'], others['header'])\n", (1117, 1164), True, 'import nibabel as nib\n'), ((2051, 2077), 'numpy.mean', 'np.mean', (['(input_data * para)'], {}), '(input_data * para)\n', (2058, 2077), True, 'import numpy as np\n'), ((1406, 1470), 'nibabel.processing.smooth_image', 'nib.processing.smooth_image', (['temp_nii'], {'fwhm': 'para', 'mode': '"""nearest"""'}), "(temp_nii, fwhm=para, mode='nearest')\n", (1433, 1470), True, 'import nibabel as nib\n'), ((1734, 1752), 'numpy.std', 'np.std', (['input_data'], {}), '(input_data)\n', (1740, 1752), True, 'import numpy as np\n'), ((1200, 1264), 'nibabel.processing.smooth_image', 'nib.processing.smooth_image', (['temp_nii'], {'fwhm': 'para', 'mode': '"""nearest"""'}), "(temp_nii, fwhm=para, mode='nearest')\n", (1227, 1264), True, 'import nibabel as nib\n')] |
"""Computation of the dissimilarity representation of a set of objects
(streamlines) from a set of prototypes (streamlines) given a distance
function. Some prototype selection algorithms are available.
See <NAME>., <NAME>., <NAME>., The Approximation of
the Dissimilarity Projection, http://dx.doi.org/10.1109/PRNI.2012.13
Copyright 2017 <NAME>
MIT License
"""
from __future__ import division
import numpy as np
from dipy.tracking.distances import bundles_distances_mam
try:
from joblib import Parallel, delayed, cpu_count
joblib_available = True
except:
joblib_available = False
def furthest_first_traversal(tracks, k, distance, permutation=True):
"""This is the farthest first traversal (fft) algorithm which
selects k streamlines out of a set of streamlines (tracks). This
algorithms is known to be a good sub-optimal solution to the
k-center problem, i.e. the k streamlines are sequentially selected
in order to be far away from each other.
Parameters
----------
tracks : list or array of objects
an iterable of streamlines.
k : int
the number of streamlines to select.
distance : function
a distance function between groups of streamlines, like
dipy.tracking.distances.bundles_distances_mam
permutation : bool
True if you want to shuffle the streamlines first. No
side-effect.
Return
------
idx : array of int
an array of k indices of the k selected streamlines.
Notes
-----
- Hochbaum, <NAME>. and Shmoys, <NAME>., A Best Possible
Heuristic for the k-Center Problem, Mathematics of Operations
Research, 1985.
- http://en.wikipedia.org/wiki/Metric_k-center
See Also
--------
subset_furthest_first
"""
if permutation:
idx = np.random.permutation(len(tracks))
tracks = tracks[idx]
else:
idx = np.arange(len(tracks), dtype=np.int)
T = [0]
while len(T) < k:
z = distance(tracks, tracks[T]).min(1).argmax()
T.append(z)
return idx[T]
def subset_furthest_first(tracks, k, distance, permutation=True, c=2.0):
"""The subset furthest first (sff) algorithm is a stochastic
version of the furthest first traversal (fft) algorithm. Sff
scales well on large set of objects (streamlines) because it
does not depend on len(tracks).
Parameters
----------
tracks : list or array of objects
an iterable of streamlines.
k : int
the number of streamlines to select.
distance : function
a distance function between groups of streamlines, like
dipy.tracking.distances.bundles_distances_mam
permutation : bool
True if you want to shuffle the streamlines first. No
side-effect.
c : float
Parameter to tune the probability that the random subset of
streamlines is sufficiently representive of tracks. Typically
2.0-3.0.
Return
------
idx : array of int
an array of k indices of the k selected streamlines.
See Also
--------
furthest_first_traversal
Notes
-----
See: <NAME>, <NAME>, <NAME>, The Approximation
of the Dissimilarity Projection, Proceedings of the 2012
International Workshop on Pattern Recognition in NeuroImaging
(PRNI), pp.85,88, 2-4 July 2012 doi:10.1109/PRNI.2012.13
"""
size = int(max(1, np.ceil(c * k * np.log(k))))
if permutation:
idx = np.random.permutation(len(tracks))[:size]
else:
idx = range(size)
return idx[furthest_first_traversal(tracks[idx],
k, distance,
permutation=False)]
def dissimilarity(tracks, prototypes, distance, n_jobs=-1, verbose=False):
"""Compute the dissimilarity (distance) matrix between tracks and
given prototypes. This function supports parallel (multicore)
computation.
Parameters
----------
tracks : list or array of objects
an iterable of streamlines.
prototypes : iterable of objects
The prototypes.
distance : function
Distance function between groups of streamlines.
prototype_policy : string
Shortname for the prototype selection policy. The default
value is 'sff'.
n_jobs : int
If joblib is available, split the dissimilarity computation
in n_jobs. If n_jobs is -1, then all available cpus/cores
are used. The default value is -1.
verbose : bool
If true prints some messages. Deafault is True.
Return
------
dissimilarity_matrix : array (N, num_prototypes)
See Also
--------
furthest_first_traversal, subset_furthest_first
Notes
-----
"""
if verbose:
print("Computing the dissimilarity matrix.")
if joblib_available and n_jobs != 1:
if n_jobs is None or n_jobs == -1:
n_jobs = cpu_count()
if verbose:
print("Parallel computation of the dissimilarity matrix: %s cpus." % n_jobs)
if n_jobs > 1:
tmp = np.linspace(0, len(tracks), n_jobs + 1).astype(np.int)
else: # corner case: joblib detected 1 cpu only.
tmp = (0, len(tracks))
chunks = zip(tmp[:-1], tmp[1:])
dissimilarity_matrix = np.vstack(Parallel(n_jobs=n_jobs)(delayed(distance)(tracks[start:stop], prototypes) for start, stop in chunks))
else:
dissimilarity_matrix = distance(tracks, prototypes)
if verbose:
print("Done.")
return dissimilarity_matrix
def compute_dissimilarity(tracks, num_prototypes=40,
distance=bundles_distances_mam,
prototype_policy='sff',
n_jobs=-1,
verbose=False):
"""Compute the dissimilarity (distance) matrix between tracks and
prototypes, where prototypes are selected among the tracks with a
given policy.
Parameters
----------
tracks : list or array of objects
an iterable of streamlines.
num_prototypes : int
The number of prototypes. In most cases 40 is enough, which
is the default value.
distance : function
Distance function between groups of streamlines. The
default is bundles_distances_mam
prototype_policy : string
Shortname for the prototype selection policy. The default
value is 'sff'.
n_jobs : int
If joblib is available, split the dissimilarity computation
in n_jobs. If n_jobs is -1, then all available cpus/cores
are used. The default value is -1.
verbose : bool
If true prints some messages. Deafault is True.
Return
------
dissimilarity_matrix : array (N, num_prototypes)
See Also
--------
furthest_first_traversal, subset_furthest_first
Notes
-----
"""
if verbose:
print("Generating %s prototypes with policy %s." % (num_prototypes, prototype_policy))
if prototype_policy == 'random':
prototype_idx = np.random.permutation(len(tracks))[:num_prototypes]
elif prototype_policy == 'fft':
prototype_idx = furthest_first_traversal(tracks,
num_prototypes, distance)
elif prototype_policy == 'sff':
prototype_idx = subset_furthest_first(tracks, num_prototypes, distance)
else:
if verbose:
print("Prototype selection policy not supported: %s" % prototype_policy)
raise Exception
prototypes = [tracks[i] for i in prototype_idx]
dissimilarity_matrix = dissimilarity(tracks, prototypes, distance,
n_jobs=n_jobs, verbose=verbose)
return dissimilarity_matrix, prototype_idx
| [
"joblib.Parallel",
"joblib.delayed",
"numpy.log",
"joblib.cpu_count"
] | [((4970, 4981), 'joblib.cpu_count', 'cpu_count', ([], {}), '()\n', (4979, 4981), False, 'from joblib import Parallel, delayed, cpu_count\n'), ((5364, 5387), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs'}), '(n_jobs=n_jobs)\n', (5372, 5387), False, 'from joblib import Parallel, delayed, cpu_count\n'), ((3426, 3435), 'numpy.log', 'np.log', (['k'], {}), '(k)\n', (3432, 3435), True, 'import numpy as np\n'), ((5388, 5405), 'joblib.delayed', 'delayed', (['distance'], {}), '(distance)\n', (5395, 5405), False, 'from joblib import Parallel, delayed, cpu_count\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 2 21:53:00 2018
@author: RomanGutin
"""
import numpy as np
import pandas as pd
plot_data={}
#####AM_Tuning With Wavelet
def AM_W(x,first,last,steps):
sweep = list(np.linspace(first,last,(last-first)/steps)) #the first amino acid
for acid in count_df.index:
CrossValidation_Scores= []
for score in sweep:
A = x.copy()
ltw_AM[acid]= score
A.replace(ltw_AM,inplace=True)
MHat_Transformed= pd.DataFrame(W(A), index=just_let.index)
MHat_Transformed['pMeas']= nine_pep['pMeas']
MHat_Transformed['pMeas']= nine_pep['pMeas']
CrossValidation_Scores.append(CrossValidation(A,10))
ltw_AM[acid] = sweep[CrossValidation_Scores.index(max(CrossValidation_Scores))]
plt.plot(sweep,CrossValidation_Scores)
plt.title(str(acid))
plt.show()
plot_data[acid]= pd.DataFrame([sweep,CrossValidation_Scores])
#AM Tuned Scores Pre FM#
ltw_AM_w = np.load('AM Scores of Wavelet Transformed Pre FM.npy').item()
ltw_AM_n = np.load('AM Scores Not Wavelet Transformed.npy').item()
####AM Not Wavelet Transformed
def AM(Dataframe,dict_scores,first,last,steps):
sweep = list(np.linspace(first,last,(last-first)/steps)) #the first amino acid
for var in dict_scores.keys():
print('Variable: '+ var)
CrossValidation_Scores= []
for score in sweep:
A = Dataframe.copy()
dict_scores[var]= score
A.replace(dict_scores,inplace=True)
A['pMeas']= nine_pep['pMeas']
CrossValidation_Scores.append(CrossValidation(A,10))
print(str(score) + ' ' + 'CrossValidation: ' + str(CrossValidation_Scores[-1]))
dict_scores[var] = sweep[CrossValidation_Scores.index(max(CrossValidation_Scores))]
plt.plot(sweep,CrossValidation_Scores)
plt.title(str(var))
plt.show()
plot_data[var]= pd.DataFrame([sweep,CrossValidation_Scores]) | [
"pandas.DataFrame",
"numpy.linspace",
"numpy.load"
] | [((942, 987), 'pandas.DataFrame', 'pd.DataFrame', (['[sweep, CrossValidation_Scores]'], {}), '([sweep, CrossValidation_Scores])\n', (954, 987), True, 'import pandas as pd\n'), ((216, 264), 'numpy.linspace', 'np.linspace', (['first', 'last', '((last - first) / steps)'], {}), '(first, last, (last - first) / steps)\n', (227, 264), True, 'import numpy as np\n'), ((1027, 1081), 'numpy.load', 'np.load', (['"""AM Scores of Wavelet Transformed Pre FM.npy"""'], {}), "('AM Scores of Wavelet Transformed Pre FM.npy')\n", (1034, 1081), True, 'import numpy as np\n'), ((1100, 1148), 'numpy.load', 'np.load', (['"""AM Scores Not Wavelet Transformed.npy"""'], {}), "('AM Scores Not Wavelet Transformed.npy')\n", (1107, 1148), True, 'import numpy as np\n'), ((1254, 1302), 'numpy.linspace', 'np.linspace', (['first', 'last', '((last - first) / steps)'], {}), '(first, last, (last - first) / steps)\n', (1265, 1302), True, 'import numpy as np\n'), ((2009, 2054), 'pandas.DataFrame', 'pd.DataFrame', (['[sweep, CrossValidation_Scores]'], {}), '([sweep, CrossValidation_Scores])\n', (2021, 2054), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 4 21:18:49 2019
@author: Yoshin
"""
from SignalBuilder.functions import *
import numpy as np
class Node:
_time = None
_left = None
_right = None
_nType = 'normal'
def __init__(self, time=None, left_piece=None, right_piece=None, nType='normal'):
if left_piece is not None:
self.setLeft(left_piece)
else:
self._left = None
if right_piece is not None:
self.setRight(right_piece)
else:
self._right = None
self.nType = nType
self.time = time
@property
def time(self):
return self._time
@time.setter
def time(self, time):
self._time = time
@property
def nType(self):
return self._nType
@nType.setter
def nType(self, nType):
self._nType = nType
@property
def left(self):
return self._left
@left.setter
def left(self, left_piece):
self._left = left_piece
if self._left is not None:
assert type(left_piece) is Piece, "Invalid Type. Must be object of type 'Piece'"
self._left._end = self
@property
def right(self):
return self._right
@right.setter
def right(self, right_piece):
self._right = right_piece
if self._right is not None:
assert type(right_piece) is Piece, "Invalid Type. Must be object of type 'Piece'"
self._right._start = self
class Piece:
_fType = 'constant'
_start = None
_end = None
def __init__(self, start_node=None, end_node=None, fType='constant'):
self._funcs = {
'constant': Constant(),
'ramp': Ramp(),
'sinusoid': Sinusoid(),
'square': Square()
}
self.fType = fType
self.start = start_node
self.end = end_node
def addFunc(self, key, func):
self._funcs[key] = func
def getFunc(self, fType=None):
if fType is not None:
return self._funcs[fType]
else:
return self._funcs[self._fType]
@property
def func(self):
return self._funcs[self._fType]
@property
def fType(self):
return self._fType
@fType.setter
def fType(self, fType):
self._fType = fType
@property
def start(self):
return self._startNode
@start.setter
def start(self, start_node):
self._startNode = start_node
if self._startNode is not None:
assert type(start_node) is Node, "Invalid Type. Must be object of type 'Node'"
self._startNode._right = self
@property
def end(self):
return self._endNode
@end.setter
def end(self, end_node):
self._endNode = end_node
if self._endNode is not None:
assert type(end_node) is Node, "Invalid Type. Must be object of type 'Node'"
self._endNode._left = self
def valid(self, x):
return (x >= self._startNode.time) & (x <= self._endNode.time)
class SignalBuilder:
_startNode = Node(nType='start')
_endNode = Node(nType='end')
_nodes = [_startNode, _endNode]
_pieces = [Piece(_startNode, _endNode)]
_sampleFrequency = None
def __init__(self):
pass
@property
def sampleFrequency(self):
return self._sampleFrequency
@sampleFrequency.setter
def sampleFrequency(self, frequency):
self._sampleFrequency = frequency
@property
def signalStart(self):
return self._startNode.time
@signalStart.setter
def signalStart(self, t):
# assert t < self._nodes[1].time(), "Invalid Time"
self.setNodeTime(0, t)
@property
def signalEnd(self):
return self._endNode.time
@signalEnd.setter
def signalEnd(self, t):
# assert t > self._nodes[-2].time(), "Invalid Time"
self.setNodeTime(len(self._nodes) - 1, t)
def setNodeTime(self, index, t):
myNode = self._nodes[index]
left = None
right = None
for node in self._nodes[:index][::-1]:
if node.time is not None:
left = node
for node in self._nodes[index + 1:]:
if node.time is not None:
right = node
if left is not None:
assert t > left.time, "Invalid Time"
if right is not None:
assert t < right.time, "Invalid Time"
myNode.time = t
@property
def nodes(self):
return self._nodes
@property
def pieces(self):
return self._pieces
def insertNode(self, index, t=None):
assert 0 < index < len(self._nodes), "Invalid Node Index"
newNode = Node(time=t)
newPiece = Piece()
newPiece.start = newNode
self._nodes.insert(index, newNode)
self._pieces.insert(index, newPiece)
self._pieces[index - 1].end = newNode
newPiece.end = self._nodes[index + 1]
def deleteNode(self, index, right=True):
assert index != 0 and index != len(self._nodes)-1, "Cannot Delete Start or End Node"
delNode = self._nodes[index]
if right:
delPiece = delNode.right
oldPiece = delNode.left
oldNode = delPiece.end
# Link together remaining unlinked node and piece
oldPiece.end = oldNode
else:
delPiece = delNode.left
oldPiece = delNode.right
oldNode = delPiece.start
# Link together remaining unlinked node and piece
oldPiece.start = oldNode
self._nodes.remove(delNode)
self._pieces.remove(delPiece)
def trace(self, report=False):
obj = self._startNode
trace = []
while 1:
if type(obj) is Node:
trace.append(obj)
if obj.nType is 'end':
break
obj = obj.right
if type(obj) is Piece:
trace.append(obj)
obj = obj.end
return trace
def checkNodeTimes(self, verbose=False):
invalid = []
for item in self.trace():
if type(item) is Node:
if item.time is None:
invalid.append(item)
return invalid
def report(self):
for item in self.trace():
if type(item) is Node:
if item.nType is 'start':
print("Start Node:")
elif item.nType is 'end':
print("End Node:")
else:
print("Node:")
print(" {}".format(item))
print(" time: {}".format(item.time))
print("----\n")
if type(item) is Piece:
print("Piece:")
print(" {}".format(item))
print(" Type: {}".format(item.fType))
print("----\n")
def genPiecew(self):
num_samples = (self._endNode.time - self._startNode.time) * self._sampleFrequency
t = np.linspace(self._startNode.time, self._endNode.time, num=num_samples)
condlist = [piece.valid(t) for piece in self._pieces]
funclist = [piece.getFunc().exec_ for piece in self._pieces]
node_locations = [node.time for node in self._nodes]
return t, np.piecewise(t, condlist, funclist), node_locations
def chainConfig(self, start_node):
obj = start_node
nodes = []
pieces = []
while 1:
if type(obj) is Node:
nodes.append(obj)
if obj.nType is 'end':
break
obj = obj.right()
if type(obj) is Piece:
pieces.append(obj)
obj = obj.getEnd()
self._nodes = nodes
self._pieces = pieces
def listConfig(self, nodes, pieces):
pass | [
"numpy.piecewise",
"numpy.linspace"
] | [((7185, 7255), 'numpy.linspace', 'np.linspace', (['self._startNode.time', 'self._endNode.time'], {'num': 'num_samples'}), '(self._startNode.time, self._endNode.time, num=num_samples)\n', (7196, 7255), True, 'import numpy as np\n'), ((7469, 7504), 'numpy.piecewise', 'np.piecewise', (['t', 'condlist', 'funclist'], {}), '(t, condlist, funclist)\n', (7481, 7504), True, 'import numpy as np\n')] |
import numpy as np
from torch.utils import data
from PIL import Image
from PIL import ImageFile
import torch.backends.cudnn as cudnn
from torchvision import transforms
import os
def InfiniteSampler(n):
# i = 0
i = n - 1
order = np.random.permutation(n)
while True:
yield order[i]
i += 1
if i >= n:
np.random.seed()
order = np.random.permutation(n)
i = 0
class InfiniteSamplerWrapper(data.sampler.Sampler):
def __init__(self, data_source):
self.num_samples = len(data_source)
def __iter__(self):
return iter(InfiniteSampler(self.num_samples))
def __len__(self):
return 2 ** 31
cudnn.benchmark = True
Image.MAX_IMAGE_PIXELS = None # Disable DecompressionBombError
ImageFile.LOAD_TRUNCATED_IMAGES = True # Disable OSError: image file is truncated
def train_transform():
transform_list = [
transforms.Resize(size=(512, 512)),
transforms.RandomCrop(256),
transforms.ToTensor()
]
return transforms.Compose(transform_list)
def train_transform2():
transform_list = [
transforms.Resize(size=(256, 256)),
transforms.ToTensor()
]
return transforms.Compose(transform_list)
class FlatFolderDataset(data.Dataset):
def __init__(self, root, transform):
super(FlatFolderDataset, self).__init__()
self.root = root
self.paths = os.listdir(self.root)
self.transform = transform
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(os.path.join(self.root, path)).convert('RGB')
img = self.transform(img)
return img
def __len__(self):
return len(self.paths)
def name(self):
return 'FlatFolderDataset' | [
"os.listdir",
"os.path.join",
"torchvision.transforms.RandomCrop",
"numpy.random.seed",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"torchvision.transforms.Compose",
"numpy.random.permutation"
] | [((241, 265), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (262, 265), True, 'import numpy as np\n'), ((1039, 1073), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (1057, 1073), False, 'from torchvision import transforms\n'), ((1213, 1247), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (1231, 1247), False, 'from torchvision import transforms\n'), ((920, 954), 'torchvision.transforms.Resize', 'transforms.Resize', ([], {'size': '(512, 512)'}), '(size=(512, 512))\n', (937, 954), False, 'from torchvision import transforms\n'), ((964, 990), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(256)'], {}), '(256)\n', (985, 990), False, 'from torchvision import transforms\n'), ((1000, 1021), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1019, 1021), False, 'from torchvision import transforms\n'), ((1130, 1164), 'torchvision.transforms.Resize', 'transforms.Resize', ([], {'size': '(256, 256)'}), '(size=(256, 256))\n', (1147, 1164), False, 'from torchvision import transforms\n'), ((1174, 1195), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1193, 1195), False, 'from torchvision import transforms\n'), ((1425, 1446), 'os.listdir', 'os.listdir', (['self.root'], {}), '(self.root)\n', (1435, 1446), False, 'import os\n'), ((351, 367), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (365, 367), True, 'import numpy as np\n'), ((388, 412), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (409, 412), True, 'import numpy as np\n'), ((1575, 1604), 'os.path.join', 'os.path.join', (['self.root', 'path'], {}), '(self.root, path)\n', (1587, 1604), False, 'import os\n')] |
#!/usr/bin/env python
import cv2
import numpy as np
import yaml
import rospy
import rospkg
from camera_driver.srv import SetBlobInfo
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from geometry_msgs.msg import PointStamped
from geometry_msgs.msg import Point
from std_msgs.msg import Header
from distutils.version import LooseVersion
global joe_location_publisher
global image_publisher
global mask_publisher
global contours_publisher
global bridge
global seq
seq = 0
def name():
return "blob_detector"
# Color Mask window
#mask_window = 'Color Mask'
#cv2.namedWindow(mask_window, cv2.WINDOW_NORMAL)
#Picker
control_window = "Picker"
global window
##################
# Color filtering
#################
# Low cut off
global hue_low
def set_hue_low(new_value):
global hue_low
hue_low = new_value
global hue_high
def set_hue_high(new_value):
global hue_high
hue_high = new_value
global saturation_low
def set_saturation_low(new_value):
global saturation_low
saturation_low = new_value
global saturation_high
def set_saturation_high(new_value):
global saturation_high
saturation_high = new_value
global value_low
def set_value_low(new_value):
global value_low
value_low = new_value
global value_high
def set_value_high(new_value):
global value_high
value_high = new_value
def set_blob_info(msg):
rospy.loginfo("set_blob_info")
package_path = rospkg.RosPack().get_path('camera_driver')
full_path = "%s/calibrations/%s.yaml" % (package_path, name())
data = dict(
hue_low = hue_low,
hue_high = hue_high,
saturation_low = saturation_low,
saturation_high = saturation_high,
value_low = value_low,
value_high = value_high
)
with open(full_path, 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False)
rospy.loginfo("set_blob_info: writing settings to %s" % full_path)
return True
def get_blob_info():
rospy.loginfo("get_blob_info")
global hue_low
global hue_high
global saturation_low
global saturation_high
global value_low
global value_high
package_path = rospkg.RosPack().get_path('camera_driver')
full_path = "%s/calibrations/%s.yaml" % (package_path, name())
rospy.loginfo("get_blob_info: loading settings from %s" % full_path)
yaml_file = open(full_path, "r")
blob_params = yaml.load(yaml_file)
hue_low = blob_params['hue_low']
hue_high = blob_params['hue_high']
saturation_low = blob_params['saturation_low']
saturation_high = blob_params['saturation_high']
value_low = blob_params['value_low']
value_high = blob_params['value_high']
rospy.loginfo("get_blob_info: setting hue_low to %s" % hue_low)
def process_image(image):
global seq
rospy.loginfo("process_image")
# Convert to opencv image
cv_image = bridge.imgmsg_to_cv2(image, desired_encoding="bgr8")
# process image and show
hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (hue_low, saturation_low, value_low), (hue_high, saturation_high, value_high))
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
if LooseVersion(cv2.__version__).version[0] == 2:
contours, hierarchy = cv2.findContours(np.copy(mask), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
else:
_, contours, hierarchy = cv2.findContours(np.copy(mask), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# publish binary image
mask_message = bridge.cv2_to_imgmsg(mask, encoding="mono8")
mask_publisher.publish(mask_message)
# publish binary image with contours
rospy.loginfo("Countours: %s", len(contours))
contours_image = np.copy(cv_image)
cv2.drawContours(contours_image, contours, -1, (0,255,0), 3)
contours_message = bridge.cv2_to_imgmsg(contours_image, encoding="bgr8")
contours_publisher.publish(contours_message)
if len(contours) > 0:
c = max(contours, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
# Annotate image with circle for blob location
cv2.circle(cv_image, (int(x), int(y)), int(radius), (0, 255, 255), 3)
# Publish image coordinates of detected blob
seq += 1
header = Header(frame_id="nikon", seq=image.header.seq, stamp=image.header.stamp)
point = Point(x=x, y=y, z=1.)
pointStamped = PointStamped(point=point, header=header)
joe_location_publisher.publish(pointStamped)
image_message = bridge.cv2_to_imgmsg(cv_image, encoding="bgr8")
image_publisher.publish(image_message)
def detect_blobs():
global joe_location_publisher
global image_publisher
global mask_publisher
global contours_publisher
global bridge
global window
# Reload blob detector parameters
get_blob_info()
# Hoookup ROS stuff
rospy.init_node(name())
joe_location_publisher = rospy.Publisher("joe_location", PointStamped, queue_size = 2)
image_publisher = rospy.Publisher("image_blob", Image, queue_size = 2)
mask_publisher = rospy.Publisher("image_mask", Image, queue_size = 2)
contours_publisher = rospy.Publisher("image_contours", Image, queue_size = 2)
rospy.Subscriber("image", Image, process_image)
service = rospy.Service('set_blob_info', SetBlobInfo, set_blob_info)
# ROS to OpenCV
bridge = CvBridge()
# Bring up UI
show_picker = rospy.get_param("~show_picker", True)
rospy.loginfo("Show picker: %s" % show_picker)
if show_picker:
window = cv2.namedWindow(control_window)
cv2.createTrackbar('Hue_Low', control_window, hue_low, 179, set_hue_low)
cv2.createTrackbar('Hue_High', control_window, hue_high, 179, set_hue_high)
cv2.createTrackbar('Saturation_Low', control_window, saturation_low, 255, set_saturation_low)
cv2.createTrackbar('Saturation_High', control_window, saturation_high, 255, set_saturation_high)
cv2.createTrackbar('Value_Low', control_window, value_low, 255, set_value_low)
cv2.createTrackbar('Value_High', control_window, value_high, 255, set_value_high)
rospy.loginfo("Ready... 'spinning'")
r = rospy.Rate(10)
while not rospy.is_shutdown():
if show_picker:
cv2.waitKey(1)
r.sleep()
if __name__ == "__main__":
detect_blobs()
| [
"yaml.load",
"rospy.Rate",
"cv2.erode",
"rospy.Service",
"cv_bridge.CvBridge",
"rospy.Subscriber",
"cv2.waitKey",
"cv2.drawContours",
"yaml.dump",
"rospy.get_param",
"cv2.minEnclosingCircle",
"geometry_msgs.msg.Point",
"rospkg.RosPack",
"cv2.cvtColor",
"rospy.Publisher",
"cv2.createTra... | [((1403, 1433), 'rospy.loginfo', 'rospy.loginfo', (['"""set_blob_info"""'], {}), "('set_blob_info')\n", (1416, 1433), False, 'import rospy\n'), ((1894, 1960), 'rospy.loginfo', 'rospy.loginfo', (["('set_blob_info: writing settings to %s' % full_path)"], {}), "('set_blob_info: writing settings to %s' % full_path)\n", (1907, 1960), False, 'import rospy\n'), ((2003, 2033), 'rospy.loginfo', 'rospy.loginfo', (['"""get_blob_info"""'], {}), "('get_blob_info')\n", (2016, 2033), False, 'import rospy\n'), ((2304, 2372), 'rospy.loginfo', 'rospy.loginfo', (["('get_blob_info: loading settings from %s' % full_path)"], {}), "('get_blob_info: loading settings from %s' % full_path)\n", (2317, 2372), False, 'import rospy\n'), ((2428, 2448), 'yaml.load', 'yaml.load', (['yaml_file'], {}), '(yaml_file)\n', (2437, 2448), False, 'import yaml\n'), ((2719, 2782), 'rospy.loginfo', 'rospy.loginfo', (["('get_blob_info: setting hue_low to %s' % hue_low)"], {}), "('get_blob_info: setting hue_low to %s' % hue_low)\n", (2732, 2782), False, 'import rospy\n'), ((2831, 2861), 'rospy.loginfo', 'rospy.loginfo', (['"""process_image"""'], {}), "('process_image')\n", (2844, 2861), False, 'import rospy\n'), ((3005, 3046), 'cv2.cvtColor', 'cv2.cvtColor', (['cv_image', 'cv2.COLOR_BGR2HSV'], {}), '(cv_image, cv2.COLOR_BGR2HSV)\n', (3017, 3046), False, 'import cv2\n'), ((3058, 3157), 'cv2.inRange', 'cv2.inRange', (['hsv', '(hue_low, saturation_low, value_low)', '(hue_high, saturation_high, value_high)'], {}), '(hsv, (hue_low, saturation_low, value_low), (hue_high,\n saturation_high, value_high))\n', (3069, 3157), False, 'import cv2\n'), ((3165, 3200), 'cv2.erode', 'cv2.erode', (['mask', 'None'], {'iterations': '(2)'}), '(mask, None, iterations=2)\n', (3174, 3200), False, 'import cv2\n'), ((3212, 3248), 'cv2.dilate', 'cv2.dilate', (['mask', 'None'], {'iterations': '(2)'}), '(mask, None, iterations=2)\n', (3222, 3248), False, 'import cv2\n'), ((3771, 3788), 'numpy.copy', 'np.copy', (['cv_image'], {}), '(cv_image)\n', (3778, 3788), True, 'import numpy as np\n'), ((3793, 3855), 'cv2.drawContours', 'cv2.drawContours', (['contours_image', 'contours', '(-1)', '(0, 255, 0)', '(3)'], {}), '(contours_image, contours, -1, (0, 255, 0), 3)\n', (3809, 3855), False, 'import cv2\n'), ((4984, 5043), 'rospy.Publisher', 'rospy.Publisher', (['"""joe_location"""', 'PointStamped'], {'queue_size': '(2)'}), "('joe_location', PointStamped, queue_size=2)\n", (4999, 5043), False, 'import rospy\n'), ((5068, 5118), 'rospy.Publisher', 'rospy.Publisher', (['"""image_blob"""', 'Image'], {'queue_size': '(2)'}), "('image_blob', Image, queue_size=2)\n", (5083, 5118), False, 'import rospy\n'), ((5142, 5192), 'rospy.Publisher', 'rospy.Publisher', (['"""image_mask"""', 'Image'], {'queue_size': '(2)'}), "('image_mask', Image, queue_size=2)\n", (5157, 5192), False, 'import rospy\n'), ((5220, 5274), 'rospy.Publisher', 'rospy.Publisher', (['"""image_contours"""', 'Image'], {'queue_size': '(2)'}), "('image_contours', Image, queue_size=2)\n", (5235, 5274), False, 'import rospy\n'), ((5281, 5328), 'rospy.Subscriber', 'rospy.Subscriber', (['"""image"""', 'Image', 'process_image'], {}), "('image', Image, process_image)\n", (5297, 5328), False, 'import rospy\n'), ((5343, 5401), 'rospy.Service', 'rospy.Service', (['"""set_blob_info"""', 'SetBlobInfo', 'set_blob_info'], {}), "('set_blob_info', SetBlobInfo, set_blob_info)\n", (5356, 5401), False, 'import rospy\n'), ((5440, 5450), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (5448, 5450), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((5488, 5525), 'rospy.get_param', 'rospy.get_param', (['"""~show_picker"""', '(True)'], {}), "('~show_picker', True)\n", (5503, 5525), False, 'import rospy\n'), ((5530, 5576), 'rospy.loginfo', 'rospy.loginfo', (["('Show picker: %s' % show_picker)"], {}), "('Show picker: %s' % show_picker)\n", (5543, 5576), False, 'import rospy\n'), ((6205, 6241), 'rospy.loginfo', 'rospy.loginfo', (['"""Ready... \'spinning\'"""'], {}), '("Ready... \'spinning\'")\n', (6218, 6241), False, 'import rospy\n'), ((6250, 6264), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (6260, 6264), False, 'import rospy\n'), ((1839, 1889), 'yaml.dump', 'yaml.dump', (['data', 'outfile'], {'default_flow_style': '(False)'}), '(data, outfile, default_flow_style=False)\n', (1848, 1889), False, 'import yaml\n'), ((4081, 4106), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (4103, 4106), False, 'import cv2\n'), ((4327, 4399), 'std_msgs.msg.Header', 'Header', ([], {'frame_id': '"""nikon"""', 'seq': 'image.header.seq', 'stamp': 'image.header.stamp'}), "(frame_id='nikon', seq=image.header.seq, stamp=image.header.stamp)\n", (4333, 4399), False, 'from std_msgs.msg import Header\n'), ((4416, 4438), 'geometry_msgs.msg.Point', 'Point', ([], {'x': 'x', 'y': 'y', 'z': '(1.0)'}), '(x=x, y=y, z=1.0)\n', (4421, 4438), False, 'from geometry_msgs.msg import Point\n'), ((4461, 4501), 'geometry_msgs.msg.PointStamped', 'PointStamped', ([], {'point': 'point', 'header': 'header'}), '(point=point, header=header)\n', (4473, 4501), False, 'from geometry_msgs.msg import PointStamped\n'), ((5619, 5650), 'cv2.namedWindow', 'cv2.namedWindow', (['control_window'], {}), '(control_window)\n', (5634, 5650), False, 'import cv2\n'), ((5659, 5731), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Hue_Low"""', 'control_window', 'hue_low', '(179)', 'set_hue_low'], {}), "('Hue_Low', control_window, hue_low, 179, set_hue_low)\n", (5677, 5731), False, 'import cv2\n'), ((5740, 5815), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Hue_High"""', 'control_window', 'hue_high', '(179)', 'set_hue_high'], {}), "('Hue_High', control_window, hue_high, 179, set_hue_high)\n", (5758, 5815), False, 'import cv2\n'), ((5824, 5921), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Saturation_Low"""', 'control_window', 'saturation_low', '(255)', 'set_saturation_low'], {}), "('Saturation_Low', control_window, saturation_low, 255,\n set_saturation_low)\n", (5842, 5921), False, 'import cv2\n'), ((5926, 6026), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Saturation_High"""', 'control_window', 'saturation_high', '(255)', 'set_saturation_high'], {}), "('Saturation_High', control_window, saturation_high, 255,\n set_saturation_high)\n", (5944, 6026), False, 'import cv2\n'), ((6031, 6109), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Value_Low"""', 'control_window', 'value_low', '(255)', 'set_value_low'], {}), "('Value_Low', control_window, value_low, 255, set_value_low)\n", (6049, 6109), False, 'import cv2\n'), ((6118, 6203), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Value_High"""', 'control_window', 'value_high', '(255)', 'set_value_high'], {}), "('Value_High', control_window, value_high, 255,\n set_value_high)\n", (6136, 6203), False, 'import cv2\n'), ((6279, 6298), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (6296, 6298), False, 'import rospy\n'), ((1453, 1469), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (1467, 1469), False, 'import rospkg\n'), ((2190, 2206), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (2204, 2206), False, 'import rospkg\n'), ((3355, 3368), 'numpy.copy', 'np.copy', (['mask'], {}), '(mask)\n', (3362, 3368), True, 'import numpy as np\n'), ((3470, 3483), 'numpy.copy', 'np.copy', (['mask'], {}), '(mask)\n', (3477, 3483), True, 'import numpy as np\n'), ((6336, 6350), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (6347, 6350), False, 'import cv2\n'), ((3261, 3290), 'distutils.version.LooseVersion', 'LooseVersion', (['cv2.__version__'], {}), '(cv2.__version__)\n', (3273, 3290), False, 'from distutils.version import LooseVersion\n')] |
import numpy as np
# Accessing 1-D
arr = np.array([1, 2, 3, 4])
print(arr[0])
print(arr[2]+arr[3])
# Accessing 2-D
arr2 = np.array([[1,2,3,4,5], [6,7,8,9,10]])
print(arr2[0,1])
print(arr2[1, 3])
#Accessing 3_D
arr3 = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
print(arr3[0, 1, 0])
print(arr3[1, 0, 2]) | [
"numpy.array"
] | [((42, 64), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (50, 64), True, 'import numpy as np\n'), ((124, 169), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]'], {}), '([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])\n', (132, 169), True, 'import numpy as np\n'), ((220, 281), 'numpy.array', 'np.array', (['[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]'], {}), '([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])\n', (228, 281), True, 'import numpy as np\n')] |
"""Example use of Popsearch.
Tunes hyper-parameters of a feed-forward network to to predict
:math:`y(x) = 1 + 0.3 * x_1 - 0.6 * x_2^2 - 0.2 * x_3^3 + 0.5 x_4^4`.
Hyper-parameters that we tune:
- Param init scale
- Number of layers
- Hidden layer size
- Activation function
- Learning rate
To run this example, you need numpy and the autograd package
(https://github.com/HIPS/autograd/), a lightweight autodiff
library for numpy. Run the example with
>>> python main.py
"""
import numpy as np
import autograd.numpy as auto_np
from autograd import grad
import autograd.numpy.random as npr
from autograd.misc.optimizers import adam, sgd
from popsearch import run, Parameter, Config
#### Toy data
def build_data(seed):
"""Build toy data set"""
rs = np.random.RandomState(seed)
def y(x):
""" y(x) = 1 + 0.3 * x_1 - 0.6 * x_2^2 - 0.2 * x_3^3 + 0.5 x_4^4 """
x1, x2, x3, x4 = x[:, 0], x[:, 1], x[:, 2], x[:, 3]
return 1 + 0.3 * x1 - 0.6 * x2 ** 2 - 0.2 * x3 ** 3 + 0.5 * x4 ** 4
xtrain = rs.rand(10000, 4)
xtest = rs.rand(1000, 4)
ytrain = y(xtrain) + rs.rand(10000) / 10
ytest = y(xtest) + rs.rand(1000) / 10
return xtrain, xtest, ytrain, ytest
#### Model
# from https://github.com/HIPS/autograd/blob/master/examples/neural_net_regression.py
def sigmoid(x):
return 1 / (1 + auto_np.exp(-x))
def relu(x):
return (x > 0) * x
def init_random_params(scale, layer_sizes, seed=0):
"""Build a list of (weights, biases) tuples, one for each layer."""
rs = npr.RandomState(seed)
return [(rs.randn(insize, outsize) * scale, # weight matrix
rs.randn(outsize) * scale) # bias vector
for insize, outsize in zip(layer_sizes[:-1], layer_sizes[1:])]
def nn_predict(params, inputs, nonlinearity=auto_np.tanh):
"""Forward pass of network"""
for W, b in params:
outputs = auto_np.dot(inputs, W) + b
inputs = nonlinearity(outputs)
return outputs
def mse(weights, inputs, targets, nonlinearity=auto_np.tanh):
"""Negative log-likelihood objective"""
predictions = nn_predict(weights, inputs, nonlinearity)
return auto_np.mean((targets - predictions) ** 2)
def pop_train(state):
"""The popsearch objective"""
ival = state.parameters['ival']
N = state.config.n_step * ival
bsz = state.parameters['bsz']
seed = state.parameters['seed']
rs = np.random.RandomState(seed)
nhid = state.parameters['nhid']
nlayers = state.parameters['nlayers']
init_size = state.parameters['init_size']
lr = state.parameters['lr']
optim = {'sgd': sgd, 'adam': adam}[state.parameters['optim']]
activation = {'sig': sigmoid, 'relu': relu, 'tanh': auto_np.tanh}[
state.parameters['activation']]
sizes = [4] + [nhid] * (nlayers - 1) + [1]
params = init_random_params(init_size, sizes, seed)
xtrain, xtest, ytrain, ytest = build_data(seed)
def objective(weights, t):
idx = rs.permutation(xtrain.shape[0])[:bsz]
batch_in = xtrain[idx]
batch_ta = xtrain[idx]
return mse(weights, batch_in, batch_ta, nonlinearity=activation)
def callback(weights, i, grad):
if i % ival == 0:
state.eval(mse(weights, xtest, ytest, nonlinearity=activation))
return
optim(grad(objective), params, step_size=lr, num_iters=N, callback=callback)
return
if __name__ == '__main__':
import os
logs = os.listdir('./log')
for log in logs:
if log.endswith('.log'):
log = './log/' + log
os.remove(log)
else:
fig = log
figs = os.listdir('./log/' + fig)
for fig in figs:
fig = './log/figs/' + fig
os.remove(fig)
params = [
Parameter('bsz', int, frozen=20),
Parameter('seed', int, frozen=0),
Parameter('nhid', int, support=list(range(2, 100))),
Parameter('nlayers', int, support=list(range(1, 3))),
Parameter('lr', float, minmax=(0.0001, 0.01)),
Parameter('init_size', float, minmax=(0.1, 1)),
Parameter('activation', str, support=['sig', 'tanh', 'relu']),
Parameter('optim', str, support=['sgd', 'adam']),
Parameter('ival', int, frozen=1),
]
config = Config(
callable=pop_train,
path='./log',
n_step=100,
n_pop=10,
n_job=4,
buffer=2,
max_val=1,
sleep=0.1,
p_force=0,
perturb=(0.1, 1.0, 0.95, 0.01),
eval_rule=('double', None, ((1.,)), 2),
perturb_rule=('sample', 'pareto', ((1.5,))),
plot=True,
plot_config={'save': False, 'semilogy': False},
seed=133,
)
run(config, params)
| [
"os.listdir",
"popsearch.run",
"autograd.grad",
"autograd.numpy.exp",
"popsearch.Config",
"autograd.numpy.dot",
"autograd.numpy.mean",
"numpy.random.RandomState",
"popsearch.Parameter",
"autograd.numpy.random.RandomState",
"os.remove"
] | [((786, 813), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (807, 813), True, 'import numpy as np\n'), ((1556, 1577), 'autograd.numpy.random.RandomState', 'npr.RandomState', (['seed'], {}), '(seed)\n', (1571, 1577), True, 'import autograd.numpy.random as npr\n'), ((2184, 2226), 'autograd.numpy.mean', 'auto_np.mean', (['((targets - predictions) ** 2)'], {}), '((targets - predictions) ** 2)\n', (2196, 2226), True, 'import autograd.numpy as auto_np\n'), ((2435, 2462), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (2456, 2462), True, 'import numpy as np\n'), ((3473, 3492), 'os.listdir', 'os.listdir', (['"""./log"""'], {}), "('./log')\n", (3483, 3492), False, 'import os\n'), ((4316, 4631), 'popsearch.Config', 'Config', ([], {'callable': 'pop_train', 'path': '"""./log"""', 'n_step': '(100)', 'n_pop': '(10)', 'n_job': '(4)', 'buffer': '(2)', 'max_val': '(1)', 'sleep': '(0.1)', 'p_force': '(0)', 'perturb': '(0.1, 1.0, 0.95, 0.01)', 'eval_rule': "('double', None, (1.0,), 2)", 'perturb_rule': "('sample', 'pareto', (1.5,))", 'plot': '(True)', 'plot_config': "{'save': False, 'semilogy': False}", 'seed': '(133)'}), "(callable=pop_train, path='./log', n_step=100, n_pop=10, n_job=4,\n buffer=2, max_val=1, sleep=0.1, p_force=0, perturb=(0.1, 1.0, 0.95, \n 0.01), eval_rule=('double', None, (1.0,), 2), perturb_rule=('sample',\n 'pareto', (1.5,)), plot=True, plot_config={'save': False, 'semilogy': \n False}, seed=133)\n", (4322, 4631), False, 'from popsearch import run, Parameter, Config\n'), ((4749, 4768), 'popsearch.run', 'run', (['config', 'params'], {}), '(config, params)\n', (4752, 4768), False, 'from popsearch import run, Parameter, Config\n'), ((3337, 3352), 'autograd.grad', 'grad', (['objective'], {}), '(objective)\n', (3341, 3352), False, 'from autograd import grad\n'), ((3815, 3847), 'popsearch.Parameter', 'Parameter', (['"""bsz"""', 'int'], {'frozen': '(20)'}), "('bsz', int, frozen=20)\n", (3824, 3847), False, 'from popsearch import run, Parameter, Config\n'), ((3857, 3889), 'popsearch.Parameter', 'Parameter', (['"""seed"""', 'int'], {'frozen': '(0)'}), "('seed', int, frozen=0)\n", (3866, 3889), False, 'from popsearch import run, Parameter, Config\n'), ((4022, 4067), 'popsearch.Parameter', 'Parameter', (['"""lr"""', 'float'], {'minmax': '(0.0001, 0.01)'}), "('lr', float, minmax=(0.0001, 0.01))\n", (4031, 4067), False, 'from popsearch import run, Parameter, Config\n'), ((4077, 4123), 'popsearch.Parameter', 'Parameter', (['"""init_size"""', 'float'], {'minmax': '(0.1, 1)'}), "('init_size', float, minmax=(0.1, 1))\n", (4086, 4123), False, 'from popsearch import run, Parameter, Config\n'), ((4133, 4194), 'popsearch.Parameter', 'Parameter', (['"""activation"""', 'str'], {'support': "['sig', 'tanh', 'relu']"}), "('activation', str, support=['sig', 'tanh', 'relu'])\n", (4142, 4194), False, 'from popsearch import run, Parameter, Config\n'), ((4204, 4252), 'popsearch.Parameter', 'Parameter', (['"""optim"""', 'str'], {'support': "['sgd', 'adam']"}), "('optim', str, support=['sgd', 'adam'])\n", (4213, 4252), False, 'from popsearch import run, Parameter, Config\n'), ((4262, 4294), 'popsearch.Parameter', 'Parameter', (['"""ival"""', 'int'], {'frozen': '(1)'}), "('ival', int, frozen=1)\n", (4271, 4294), False, 'from popsearch import run, Parameter, Config\n'), ((1366, 1381), 'autograd.numpy.exp', 'auto_np.exp', (['(-x)'], {}), '(-x)\n', (1377, 1381), True, 'import autograd.numpy as auto_np\n'), ((1920, 1942), 'autograd.numpy.dot', 'auto_np.dot', (['inputs', 'W'], {}), '(inputs, W)\n', (1931, 1942), True, 'import autograd.numpy as auto_np\n'), ((3592, 3606), 'os.remove', 'os.remove', (['log'], {}), '(log)\n', (3601, 3606), False, 'import os\n'), ((3662, 3688), 'os.listdir', 'os.listdir', (["('./log/' + fig)"], {}), "('./log/' + fig)\n", (3672, 3688), False, 'import os\n'), ((3776, 3790), 'os.remove', 'os.remove', (['fig'], {}), '(fig)\n', (3785, 3790), False, 'import os\n')] |
#!/usr/bin/python3
# Copyright © 2019 <NAME>
# [This program is licensed under the "MIT License"]
# Please see the file LICENSE in the source
# distribution of this software for license terms.
import numpy as np
import resamp
import wavio
# Combine a sample with a copy shifted up a third and a copy
# shifted down two octaves for a harmonizing effect.
# Play it for 5 seconds.
# Get some samples.
samples = wavio.readwav("loop.wav")
nsamples = len(samples)
# Minimum and maximum expected fundamental frequency of
# samples in Hz.
f_min = 110
f_max = 1720
# Minimum and maximum periods in samples.
s_max = 48000 // f_min
s_min = 48000 // f_max
# Do an FFT to try to find the period of the signal.
nfft = 2**14
nwin = 4 * s_max
windowed = np.hamming(nwin) * np.array(samples[:nwin])
spectrum = np.abs(np.fft.rfft(windowed, n=nfft))
imax = np.argmax(spectrum)
dc = np.abs(spectrum[0])
amax = np.abs(spectrum[imax])
fmax = np.fft.rfftfreq(nfft, d=1/48000)[imax]
pmax = int(48000 / fmax)
print(dc, amax, fmax, pmax)
# Maximum search for autocorrelator.
ac_samples = 2 * pmax
# Sample length for autocorrelator.
ac_length = ac_samples
# Do an autocorrelation to try to find a good place to
# end the samples so they loop.
cmax = None
umax = None
for t in range(ac_samples):
u = nsamples - ac_length - t
st = samples[:ac_length]
su = samples[u:u+ac_length]
corr = np.dot(st, su)
if cmax == None or corr > cmax:
cmax = corr
umax = u
print(cmax, nsamples - umax)
samples = samples[:umax + ac_length]
nsamples = len(samples)
# Size of lap window from beginning to end of samples.
lap_samples = 0
# Lap the samples.
for i in range(lap_samples):
c = i / (lap_samples - 1)
samples[i] *= 1 - c
samples[i] += c * samples[nsamples + i - lap_samples - 1]
# Use an interpolation window this size around each sample.
# Window should be odd.
window = 9
# Replicate the samples for 5 seconds.
nreplica = 5 * 48000
# We will skip through the samples ratio faster.
def make_harmony(ratio):
ratio *= 440 / fmax
cutoff = 20000 * min(1, ratio)
harmony = np.array([0] * nreplica, dtype=np.float)
for i in range(nreplica):
x = (i * ratio) % nsamples
harmony[i] = resamp.resamp(x, samples, cutoff, 48000, window)
return harmony
# Make a slightly truncated copy of the root.
root = make_harmony(1)
# A third is four semitones up from the root.
third = make_harmony(2**(4 / 12))
# Two octaves is 1/4 rate.
octaves_down = make_harmony(0.25)
# Mix the notes.
harmony = (root + third + octaves_down) / 3
wavio.writewav("harmony.wav", harmony)
| [
"numpy.fft.rfftfreq",
"numpy.abs",
"wavio.writewav",
"numpy.argmax",
"numpy.hamming",
"numpy.fft.rfft",
"wavio.readwav",
"numpy.array",
"numpy.dot",
"resamp.resamp"
] | [((412, 437), 'wavio.readwav', 'wavio.readwav', (['"""loop.wav"""'], {}), "('loop.wav')\n", (425, 437), False, 'import wavio\n'), ((845, 864), 'numpy.argmax', 'np.argmax', (['spectrum'], {}), '(spectrum)\n', (854, 864), True, 'import numpy as np\n'), ((870, 889), 'numpy.abs', 'np.abs', (['spectrum[0]'], {}), '(spectrum[0])\n', (876, 889), True, 'import numpy as np\n'), ((897, 919), 'numpy.abs', 'np.abs', (['spectrum[imax]'], {}), '(spectrum[imax])\n', (903, 919), True, 'import numpy as np\n'), ((2575, 2613), 'wavio.writewav', 'wavio.writewav', (['"""harmony.wav"""', 'harmony'], {}), "('harmony.wav', harmony)\n", (2589, 2613), False, 'import wavio\n'), ((745, 761), 'numpy.hamming', 'np.hamming', (['nwin'], {}), '(nwin)\n', (755, 761), True, 'import numpy as np\n'), ((764, 788), 'numpy.array', 'np.array', (['samples[:nwin]'], {}), '(samples[:nwin])\n', (772, 788), True, 'import numpy as np\n'), ((807, 836), 'numpy.fft.rfft', 'np.fft.rfft', (['windowed'], {'n': 'nfft'}), '(windowed, n=nfft)\n', (818, 836), True, 'import numpy as np\n'), ((927, 961), 'numpy.fft.rfftfreq', 'np.fft.rfftfreq', (['nfft'], {'d': '(1 / 48000)'}), '(nfft, d=1 / 48000)\n', (942, 961), True, 'import numpy as np\n'), ((1384, 1398), 'numpy.dot', 'np.dot', (['st', 'su'], {}), '(st, su)\n', (1390, 1398), True, 'import numpy as np\n'), ((2104, 2144), 'numpy.array', 'np.array', (['([0] * nreplica)'], {'dtype': 'np.float'}), '([0] * nreplica, dtype=np.float)\n', (2112, 2144), True, 'import numpy as np\n'), ((2231, 2279), 'resamp.resamp', 'resamp.resamp', (['x', 'samples', 'cutoff', '(48000)', 'window'], {}), '(x, samples, cutoff, 48000, window)\n', (2244, 2279), False, 'import resamp\n')] |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A demo that runs object detection on camera frames using OpenCV.
TEST_DATA=../all_models
Run face detection model:
python3 detect.py \
--model ${TEST_DATA}/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite
Run coco model:
python3 detect.py \
--model ${TEST_DATA}/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite \
--labels ${TEST_DATA}/coco_labels.txt
"""
import argparse
import collections
import common
import cv2
import numpy as np
import os
import csv
import glob
import time
from PIL import Image
import re
import tflite_runtime.interpreter as tflite
Object = collections.namedtuple('Object', ['id', 'score', 'bbox'])
def load_labels(path):
p = re.compile(r'\s*(\d+)(.+)')
with open(path, 'r', encoding='utf-8') as f:
lines = (p.match(line).groups() for line in f.readlines())
return {int(num): text.strip() for num, text in lines}
class BBox(collections.namedtuple('BBox', ['xmin', 'ymin', 'xmax', 'ymax'])):
"""Bounding box.
Represents a rectangle which sides are either vertical or horizontal, parallel
to the x or y axis.
"""
__slots__ = ()
def get_output(interpreter, score_threshold, top_k, class_list, image_scale=1.0):
"""Returns list of detected objects."""
boxes = common.output_tensor(interpreter, 0)
class_ids = common.output_tensor(interpreter, 1)
scores = common.output_tensor(interpreter, 2)
count = int(common.output_tensor(interpreter, 3))
def make(i):
ymin, xmin, ymax, xmax = boxes[i]
return Object(
id=int(class_ids[i]),
score=scores[i],
bbox=BBox(xmin=np.maximum(0.0, xmin),
ymin=np.maximum(0.0, ymin),
xmax=np.minimum(1.0, xmax),
ymax=np.minimum(1.0, ymax)))
return [make(i) for i in range(top_k) if scores[i] >= score_threshold and class_ids[i] in class_list]
def main():
default_model_dir = '../all_models'
default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
default_labels = 'coco_labels.txt'
parser = argparse.ArgumentParser()
parser.add_argument('--model', help='.tflite model path',
default=os.path.join(default_model_dir,default_model))
parser.add_argument('--labels', help='label file path',
default=os.path.join(default_model_dir, default_labels))
parser.add_argument('--top_k', type=int, default=10,
help='number of categories with highest score to display')
parser.add_argument('--threshold', type=float, default=0.3,
help='classifier score threshold')
parser.add_argument('--class_ids', nargs='*', type=int, default=0,
help='Array of class id')
parser.add_argument('--input_files', default='/home/mendel/dataset/*.jpg',
help='Input files')
parser.add_argument('--csv_out', default='detect_output.csv',
help='csv output file')
args = parser.parse_args()
if args.class_ids == 0:
args.class_ids = [0]
print('Loading {} with {} labels.'.format(args.model, args.labels))
interpreter = common.make_interpreter(args.model)
interpreter.allocate_tensors()
labels = load_labels(args.labels)
# csv writer
f = open(args.csv_out, 'w')
with f:
fnames = ['timestamp', 'idx', 'label', 'width', 'height', 'xmin', 'ymin', 'xmax', 'ymax', 'score']
writer = csv.DictWriter(f, fieldnames=fnames)
writer.writeheader()
# read frames
inference_time = []
for image_path in sorted(glob.glob(args.input_files)):
image_name = os.path.splitext(os.path.basename(image_path))[0]
#print(image_name)
pil_im = Image.open(image_path)
# inference
start = time.time()
common.set_input(interpreter, pil_im)
interpreter.invoke()
objs = get_output(interpreter, score_threshold=args.threshold, top_k=args.top_k, class_list=args.class_ids)
inference_time.append(time.time() - start)
# return results
(width, height) = pil_im.size
idx = -1
for obj in objs:
x0, y0, x1, y1 = list(obj.bbox)
x0, y0, x1, y1 = int(x0*width), int(y0*height), int(x1*width), int(y1*height)
score = obj.score
label = labels.get(obj.id, obj.id)
idx += 1
writer.writerow({'timestamp' : image_name, 'idx': idx, 'label': label, 'width': width, 'height': height, 'xmin': x0, 'ymin': y0, 'xmax': x1, 'ymax': y1, 'score': score})
print("Inference time : {:.3f} ms".format(sum(inference_time)*1000/len(inference_time)))
print("Frames per second : {:.2f} fps".format(len(inference_time)/sum(inference_time)))
if __name__ == '__main__':
main()
| [
"csv.DictWriter",
"collections.namedtuple",
"PIL.Image.open",
"numpy.minimum",
"argparse.ArgumentParser",
"re.compile",
"os.path.join",
"common.output_tensor",
"os.path.basename",
"common.make_interpreter",
"common.set_input",
"numpy.maximum",
"time.time",
"glob.glob"
] | [((1166, 1223), 'collections.namedtuple', 'collections.namedtuple', (['"""Object"""', "['id', 'score', 'bbox']"], {}), "('Object', ['id', 'score', 'bbox'])\n", (1188, 1223), False, 'import collections\n'), ((1473, 1537), 'collections.namedtuple', 'collections.namedtuple', (['"""BBox"""', "['xmin', 'ymin', 'xmax', 'ymax']"], {}), "('BBox', ['xmin', 'ymin', 'xmax', 'ymax'])\n", (1495, 1537), False, 'import collections\n'), ((1256, 1284), 're.compile', 're.compile', (['"""\\\\s*(\\\\d+)(.+)"""'], {}), "('\\\\s*(\\\\d+)(.+)')\n", (1266, 1284), False, 'import re\n'), ((1834, 1870), 'common.output_tensor', 'common.output_tensor', (['interpreter', '(0)'], {}), '(interpreter, 0)\n', (1854, 1870), False, 'import common\n'), ((1887, 1923), 'common.output_tensor', 'common.output_tensor', (['interpreter', '(1)'], {}), '(interpreter, 1)\n', (1907, 1923), False, 'import common\n'), ((1937, 1973), 'common.output_tensor', 'common.output_tensor', (['interpreter', '(2)'], {}), '(interpreter, 2)\n', (1957, 1973), False, 'import common\n'), ((2664, 2689), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2687, 2689), False, 'import argparse\n'), ((3772, 3807), 'common.make_interpreter', 'common.make_interpreter', (['args.model'], {}), '(args.model)\n', (3795, 3807), False, 'import common\n'), ((1990, 2026), 'common.output_tensor', 'common.output_tensor', (['interpreter', '(3)'], {}), '(interpreter, 3)\n', (2010, 2026), False, 'import common\n'), ((4067, 4103), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'fieldnames': 'fnames'}), '(f, fieldnames=fnames)\n', (4081, 4103), False, 'import csv\n'), ((2784, 2830), 'os.path.join', 'os.path.join', (['default_model_dir', 'default_model'], {}), '(default_model_dir, default_model)\n', (2796, 2830), False, 'import os\n'), ((2923, 2970), 'os.path.join', 'os.path.join', (['default_model_dir', 'default_labels'], {}), '(default_model_dir, default_labels)\n', (2935, 2970), False, 'import os\n'), ((4217, 4244), 'glob.glob', 'glob.glob', (['args.input_files'], {}), '(args.input_files)\n', (4226, 4244), False, 'import glob\n'), ((4374, 4396), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (4384, 4396), False, 'from PIL import Image\n'), ((4442, 4453), 'time.time', 'time.time', ([], {}), '()\n', (4451, 4453), False, 'import time\n'), ((4466, 4503), 'common.set_input', 'common.set_input', (['interpreter', 'pil_im'], {}), '(interpreter, pil_im)\n', (4482, 4503), False, 'import common\n'), ((4289, 4317), 'os.path.basename', 'os.path.basename', (['image_path'], {}), '(image_path)\n', (4305, 4317), False, 'import os\n'), ((4691, 4702), 'time.time', 'time.time', ([], {}), '()\n', (4700, 4702), False, 'import time\n'), ((2201, 2222), 'numpy.maximum', 'np.maximum', (['(0.0)', 'xmin'], {}), '(0.0, xmin)\n', (2211, 2222), True, 'import numpy as np\n'), ((2251, 2272), 'numpy.maximum', 'np.maximum', (['(0.0)', 'ymin'], {}), '(0.0, ymin)\n', (2261, 2272), True, 'import numpy as np\n'), ((2301, 2322), 'numpy.minimum', 'np.minimum', (['(1.0)', 'xmax'], {}), '(1.0, xmax)\n', (2311, 2322), True, 'import numpy as np\n'), ((2351, 2372), 'numpy.minimum', 'np.minimum', (['(1.0)', 'ymax'], {}), '(1.0, ymax)\n', (2361, 2372), True, 'import numpy as np\n')] |
"""CheXpert LaTex exporter.
Export CheXpert statistics and graphs to be imported in LaTex documents.
The goal is to automate the generation of all tables stastical tables used in papers, so that they
are accurate and can be regenerated quickly if the dataset is upgraded.
"""
import os
import re
from typing import List
import pandas as pd
import numpy as np
import chexpert_dataset as cxd
import chexpert_statistics as cxs
# Destination directories, with path separator at the end to simplify the code
# IMPORTANT: assumes a specific path - adjust for your environment
DIR_TABLES = os.path.join('..', 'chexpert-datasheet', 'tables') + os.sep
IMAGES = 'Images'
PATIENTS = 'Patients'
FLOAT_FORMAT = '{:0,.1f}'.format
INT_FORMAT = '{:,}'.format
SHORT_OBSERVATION_NAMES = [('Enlarged Cardiomediastinum', 'Enlarged Card.')]
SEP_OBSERVATIONS = ['Consolidation', 'Lung Opacity']
SEP_TRAIN_VALIDATION = ['Validation']
def format_table(table: str, source_df: pd.DataFrame, file: str,
short_observation_name: bool = False, text_width: str = None,
vertical_columns_names: bool = False, horizontal_separators: List[str] = None,
font_size: str = None):
"""Format a LaTeX table and saves it to a file.
Args:
table (str): The LaTeX table to be formatted.
source_df (pd.DataFrame): The DataFrame used to generated the table.
file (str): The base file name to save the table to. The directory and .tex extension are
added in this function.
short_observation_name (bool, optional): Shorten some of the observations names. Defaults
to False.
text_width (bool, optional): Use the full text width (for multi-column LaTeX templates).
Defaults to False.
vertical_columns_names (bool, optional): Rotate the columns names by 90 degrees. Defaults
to False.
horizontal_separators (List[str], optional): Add a horizontal separator before lines that
start with these text.
font_size (str, optional): Set the font size to the specified font, or use the default if
``None`` is specified. Defaults to None.
"""
if text_width is not None:
adjustbox = '\\begin{adjustbox}{width = ' + text_width + '}\n\\begin{tabular}'
table = table.replace('\\begin{tabular}', adjustbox)
table = table.replace('\\end{tabular}', '\\end{tabular}\n\\end{adjustbox}')
table = table.replace('{table}', '{table*}')
if vertical_columns_names:
# Assume columns names match the ones in the DataFrame
rotated = ' & ' + (' & ').join(['\\\\rotatebox{{90}}{{{}}}'.format(x)
for x in source_df.columns.tolist()])
table = re.sub(' & {}.* & {}'.format(source_df.columns[0], source_df.columns[-1]),
rotated, table, count=1)
for sep in horizontal_separators:
table = re.sub(r'^{}'.format(sep), r'\\midrule[0.2pt]\n{}'.format(sep),
table, count=1, flags=re.MULTILINE)
if font_size is not None:
table = table.replace('\\centering', '\\{}\n\\centering'.format(font_size))
if short_observation_name:
# Not very memory efficient, but simple and sufficient for the text sizes we deal with
for replacement in SHORT_OBSERVATION_NAMES:
table = table.replace(*replacement)
with open(DIR_TABLES + file + '.tex', 'w') as f:
print(table, file=f)
chexpert = cxd.CheXpertDataset()
chexpert.fix_dataset()
# Make code a bit simpler
df = chexpert.df
# Count of patients and images in the training and validation datasets
NAME = 'patient-studies-images-train-validate'
CAPTION = 'Number of patients, studies, and images'
stats = cxs.patient_study_image_count(df)
stats = stats.unstack().droplevel(0, axis='columns')
stats.to_latex(buf=DIR_TABLES+NAME+'.tex',
formatters=[INT_FORMAT] * stats.shape[1],
float_format=FLOAT_FORMAT, index_names=False,
caption=CAPTION, label='tab:'+NAME, position='h!')
# Summary statistic of images per patient
# This sounded like a good idea, but the binned image count table is a better representation
# Will disable the code, instad of removing it, in case there is a good reason to reinstate it
patient_summary_stat = False
if patient_summary_stat:
NAME = 'patient-images-stats-summary'
CAPTION = 'Summary statistics for images per patient'
summary = cxs.images_summary_stats(df)
summary.to_latex(buf=DIR_TABLES+NAME+'.tex',
float_format=FLOAT_FORMAT, index_names=False,
caption=CAPTION, label='tab:'+NAME, position='h!')
# Binned number of images per patient (continuing from above, where the number of images was added)
NAME = 'patient-images-stats-distribution'
CAPTION = 'Distribution of number of images per patient'
stats = cxs.images_per_patient_binned(df)
# Simplify the table to make it look better
# index_names=False should be even better, but it has a bug: https://github.com/pandas-dev/pandas/issues/18326 # noqa
stats.index.names = [''] * stats.index.nlevels
table = stats.to_latex(formatters=[INT_FORMAT, FLOAT_FORMAT, FLOAT_FORMAT] * 2,
float_format=FLOAT_FORMAT, index_names=True,
caption=CAPTION, label='tab:'+NAME, position='h!', multicolumn=True)
format_table(table, stats, NAME, horizontal_separators=SEP_TRAIN_VALIDATION,
font_size='small', text_width='0.75\\textwidth')
# Frequency of labels in the training and validation sets
def generate_image_frequency_table(df: pd.DataFrame, name: str, caption: str,
pos_neg_only: bool = False) -> str:
"""Create the LaTeX table for label frequency per image."""
stats = cxs.label_image_frequency(df)
text_width = '0.9\\textwidth'
if pos_neg_only:
# Assume pos/neg count and % are the first columns
stats = stats.iloc[:, :4]
text_width = None # fits in the column, no need to adjust the size
font_size = 'small' if pos_neg_only else 'scriptsize'
table = stats.to_latex(column_format='l' + 'r' * stats.shape[1],
formatters=[INT_FORMAT, '{:.1%}'.format] * (stats.shape[1]//2),
float_format=FLOAT_FORMAT, index_names=True,
caption=caption, label='tab:'+name, position='h!')
format_table(table, stats, name, short_observation_name=True, text_width=text_width,
horizontal_separators=SEP_OBSERVATIONS, font_size=font_size)
NAME = 'label-frequency-training'
CAPTION = 'Frequency of labels in the training set images'
generate_image_frequency_table(df[df[cxd.COL_TRAIN_VALIDATION] == cxd.TRAINING], NAME, CAPTION)
NAME = 'label-frequency-validation'
CAPTION = 'Frequency of labels in the validation set images'
generate_image_frequency_table(df[df[cxd.COL_TRAIN_VALIDATION] == cxd.VALIDATION], NAME, CAPTION,
pos_neg_only=True)
NAME = 'observation-coincidence'
CAPTION = 'Coincidence of positive observations in the training set images'
stats = cxs.observation_image_coincidence(df[df[cxd.COL_TRAIN_VALIDATION] == cxd.TRAINING])
# Remove upper triangle (same as bottom triangle) to make it easier to follow
stats.values[np.triu_indices_from(stats, 0)] = ''
# Remove first row and last column (they are now empty)
stats.drop(labels=cxd.OBSERVATION_NO_FINDING, axis='rows', inplace=True)
stats.drop(labels=cxd.OBSERVATION_PATHOLOGY[-1], axis='columns', inplace=True)
table = stats.to_latex(column_format='r' * (stats.shape[1]+1), # +1 for index
float_format=FLOAT_FORMAT, index_names=True,
caption=CAPTION, label='tab:'+NAME, position='h!')
format_table(table, stats, NAME, text_width='1\\textwidth', short_observation_name=True,
vertical_columns_names=True, horizontal_separators=SEP_OBSERVATIONS)
NAME = 'demographic-by-set-sex'
CAPTION = 'Patients and images by sex'
stats = cxs.images_per_patient_sex(df)
# Simplify the table to make it look better
stats.index.names = ['', cxd.COL_SEX]
table = stats.to_latex(formatters=[INT_FORMAT, FLOAT_FORMAT] * (stats.shape[1]//2),
float_format=FLOAT_FORMAT, index_names=True,
caption=CAPTION, label='tab:'+NAME, position='h!')
format_table(table, stats, NAME, horizontal_separators=SEP_TRAIN_VALIDATION, font_size='small')
NAME = 'demographic-by-set-age-group'
CAPTION = 'Patients and images by age group'
stats = cxs.patients_images_by_age_group(df)
# Simplify the table to make it look better
stats.index.names = ['', cxd.COL_AGE_GROUP]
table = stats.to_latex(formatters=[INT_FORMAT] * stats.shape[1],
float_format=FLOAT_FORMAT, index_names=True,
caption=CAPTION, label='tab:'+NAME, position='h!')
format_table(table, stats, NAME, horizontal_separators=SEP_TRAIN_VALIDATION, font_size='small')
NAME = 'demographic-by-set-sex-age-group'
CAPTION = 'Patients, studies, and images by sex and age group'
stats = cxs.patients_studies_images_by_sex_age_group_subtotal(df)
# Simplify the table to make it look better
stats.index.names = ['', cxd.COL_AGE_GROUP]
table = stats.to_latex(formatters=[INT_FORMAT] * stats.shape[1],
float_format=FLOAT_FORMAT, index_names=True,
caption=CAPTION, label='tab:'+NAME, position='h!')
# WARNING: manual formatting is also added to this table
# Review the changes, add the formatting again before committing
format_table(table, stats, NAME, horizontal_separators=SEP_TRAIN_VALIDATION, font_size='small',
text_width='0.9\\textwidth')
| [
"chexpert_statistics.images_per_patient_binned",
"chexpert_statistics.patients_images_by_age_group",
"chexpert_statistics.observation_image_coincidence",
"chexpert_statistics.images_summary_stats",
"chexpert_dataset.CheXpertDataset",
"os.path.join",
"chexpert_statistics.images_per_patient_sex",
"numpy... | [((3511, 3532), 'chexpert_dataset.CheXpertDataset', 'cxd.CheXpertDataset', ([], {}), '()\n', (3530, 3532), True, 'import chexpert_dataset as cxd\n'), ((3778, 3811), 'chexpert_statistics.patient_study_image_count', 'cxs.patient_study_image_count', (['df'], {}), '(df)\n', (3807, 3811), True, 'import chexpert_statistics as cxs\n'), ((4917, 4950), 'chexpert_statistics.images_per_patient_binned', 'cxs.images_per_patient_binned', (['df'], {}), '(df)\n', (4946, 4950), True, 'import chexpert_statistics as cxs\n'), ((7172, 7260), 'chexpert_statistics.observation_image_coincidence', 'cxs.observation_image_coincidence', (['df[df[cxd.COL_TRAIN_VALIDATION] == cxd.TRAINING]'], {}), '(df[df[cxd.COL_TRAIN_VALIDATION] == cxd.\n TRAINING])\n', (7205, 7260), True, 'import chexpert_statistics as cxs\n'), ((8066, 8096), 'chexpert_statistics.images_per_patient_sex', 'cxs.images_per_patient_sex', (['df'], {}), '(df)\n', (8092, 8096), True, 'import chexpert_statistics as cxs\n'), ((8593, 8629), 'chexpert_statistics.patients_images_by_age_group', 'cxs.patients_images_by_age_group', (['df'], {}), '(df)\n', (8625, 8629), True, 'import chexpert_statistics as cxs\n'), ((9135, 9192), 'chexpert_statistics.patients_studies_images_by_sex_age_group_subtotal', 'cxs.patients_studies_images_by_sex_age_group_subtotal', (['df'], {}), '(df)\n', (9188, 9192), True, 'import chexpert_statistics as cxs\n'), ((587, 637), 'os.path.join', 'os.path.join', (['""".."""', '"""chexpert-datasheet"""', '"""tables"""'], {}), "('..', 'chexpert-datasheet', 'tables')\n", (599, 637), False, 'import os\n'), ((4491, 4519), 'chexpert_statistics.images_summary_stats', 'cxs.images_summary_stats', (['df'], {}), '(df)\n', (4515, 4519), True, 'import chexpert_statistics as cxs\n'), ((5826, 5855), 'chexpert_statistics.label_image_frequency', 'cxs.label_image_frequency', (['df'], {}), '(df)\n', (5851, 5855), True, 'import chexpert_statistics as cxs\n'), ((7347, 7377), 'numpy.triu_indices_from', 'np.triu_indices_from', (['stats', '(0)'], {}), '(stats, 0)\n', (7367, 7377), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function
import os
import argparse
import numpy as np
import PIL.Image as pil
import os
import sys
sys.path.append(os.getcwd())
from datasets.utils.data_reader import generate_depth_map
def export_gt_depths_kitti():
parser = argparse.ArgumentParser(description='export_gt_depth')
parser.add_argument('--data_path',
type=str,
help='path to the root of the KITTI data',
required=True)
opt = parser.parse_args()
split_folder = os.path.join(os.getcwd(), "data_splits", "kitti")
with open(os.path.join(split_folder, "test_list.txt"), "r") as f:
lines = f.readlines()
print("Exporting ground truth depths for {}".format("eigen"))
gt_depths = []
for line in lines:
folder, frame_id, _ = line.split()
frame_id = int(frame_id)
calib_dir = os.path.join(opt.data_path, folder.split("/")[0])
velo_filename = os.path.join(opt.data_path, folder,
"velodyne_points/data", "{:010d}.bin".format(frame_id))
gt_depth = generate_depth_map(calib_dir, velo_filename, 2, True)
gt_depths.append(gt_depth.astype(np.float32))
output_path = os.path.join(opt.data_path, "gt_depths.npz")
print("Saving to {}".format("eigen"))
np.savez_compressed(output_path, data=np.array(gt_depths))
if __name__ == "__main__":
export_gt_depths_kitti() | [
"argparse.ArgumentParser",
"os.path.join",
"datasets.utils.data_reader.generate_depth_map",
"os.getcwd",
"numpy.array"
] | [((174, 185), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (183, 185), False, 'import os\n'), ((291, 345), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""export_gt_depth"""'}), "(description='export_gt_depth')\n", (314, 345), False, 'import argparse\n'), ((1296, 1340), 'os.path.join', 'os.path.join', (['opt.data_path', '"""gt_depths.npz"""'], {}), "(opt.data_path, 'gt_depths.npz')\n", (1308, 1340), False, 'import os\n'), ((589, 600), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (598, 600), False, 'import os\n'), ((1168, 1221), 'datasets.utils.data_reader.generate_depth_map', 'generate_depth_map', (['calib_dir', 'velo_filename', '(2)', '(True)'], {}), '(calib_dir, velo_filename, 2, True)\n', (1186, 1221), False, 'from datasets.utils.data_reader import generate_depth_map\n'), ((641, 684), 'os.path.join', 'os.path.join', (['split_folder', '"""test_list.txt"""'], {}), "(split_folder, 'test_list.txt')\n", (653, 684), False, 'import os\n'), ((1427, 1446), 'numpy.array', 'np.array', (['gt_depths'], {}), '(gt_depths)\n', (1435, 1446), True, 'import numpy as np\n')] |
# coding=utf-8
import tensorflow as tf
from colorama import Fore
import numpy as np
import logging
from collections import OrderedDict
import Putil.DenseNet.model_base as dmb
from tensorflow.contrib import layers
import Putil.np.util as npu
import Putil.tf.util as tfu
def get_image_summary(img, idx=0):
"""
Make an image summary for 4d tensor image with index idx
"""
V = tf.slice(img, (0, 0, 0, idx), (1, -1, -1, 1))
V -= tf.reduce_min(V)
V /= tf.reduce_max(V)
V *= 255
img_w = tf.shape(img)[1]
img_h = tf.shape(img)[2]
V = tf.reshape(V, tf.stack((img_w, img_h, 1)))
V = tf.transpose(V, (2, 0, 1))
V = tf.reshape(V, tf.stack((-1, img_w, img_h, 1)))
return V
def weight_variable(shape, stddev=0.1, name="weight"):
initial = tf.truncated_normal(shape, stddev=stddev)
return tf.Variable(initial, name=name)
def weight_variable_devonc(shape, stddev=0.1, name="weight_devonc"):
return tf.Variable(tf.truncated_normal(shape, stddev=stddev), name=name)
def bias_variable(shape, name="bias"):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
def conv2d(x, W, b, keep_prob_):
with tf.name_scope("conv2d"):
conv_2d = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')
conv_2d_b = tf.nn.bias_add(conv_2d, b)
return tf.nn.dropout(conv_2d_b, keep_prob_)
def deconv2d(x, W,stride):
with tf.name_scope("deconv2d"):
x_shape = tf.shape(x)
output_shape = tf.stack([x_shape[0], x_shape[1]*2, x_shape[2]*2, x_shape[3]//2])
return tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding='VALID', name="conv2d_transpose")
def max_pool(x,n):
return tf.nn.max_pool(x, ksize=[1, n, n, 1], strides=[1, n, n, 1], padding='VALID')
def crop_and_concat(x1, x2):
with tf.name_scope("crop_and_concat"):
x1_shape = tf.shape(x1)
x2_shape = tf.shape(x2)
# offsets for the top left corner of the crop
offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, 0]
size = [-1, x2_shape[1], x2_shape[2], -1]
x1_crop = tf.slice(x1, offsets, size)
return tf.concat([x1_crop, x2], 3)
def pixel_wise_softmax(output_map):
with tf.name_scope("pixel_wise_softmax"):
# subtract max is work for avoid overflow
max_axis = tf.reduce_max(output_map, axis=3, keepdims=True, name='calc_max')
exponential_map = tf.exp(tf.subtract(output_map, max_axis, 'sub_for_avoid_overflow'), 'exp')
normalize = tf.reduce_sum(exponential_map, axis=3, keepdims=True, name='exp_sum')
return tf.div(exponential_map, normalize, name='normalize')
def cross_entropy(y_, output_map):
return -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(output_map, 1e-10, 1.0)), name="cross_entropy")
def create_conv_net(x, keep_prob, channels, n_class, layers=3, features_root=16, filter_size=3, pool_size=2,
summaries=True):
"""
Creates a new convolutional unet for the given parametrization.
:param x: input tensor, shape [?,nx,ny,channels]
:param keep_prob: dropout probability tensor
:param channels: number of channels in the input image
:param n_class: number of output labels
:param layers: number of layers in the net
:param features_root: number of features in the first layer
:param filter_size: size of the convolution filter
:param pool_size: size of the max pooling operation
:param summaries: Flag if summaries should be created
"""
logging.info(
Fore.GREEN + "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: "
"{pool_size}x{pool_size}".format(
layers=layers,
features=features_root,
filter_size=filter_size,
pool_size=pool_size))
# Placeholder for the input image
with tf.name_scope("preprocessing"):
nx = tf.shape(x)[1]
ny = tf.shape(x)[2]
x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
in_node = x_image
batch_size = tf.shape(x_image)[0]
weights = []
biases = []
convs = []
pools = OrderedDict()
deconv = OrderedDict()
dw_h_convs = OrderedDict()
up_h_convs = OrderedDict()
in_size = 1000
size = in_size
# down layers
for layer in range(0, layers):
with tf.name_scope("down_conv_{}".format(str(layer))):
features = 2 ** layer * features_root
stddev = np.sqrt(2 / (filter_size ** 2 * features))
if layer == 0:
w1 = weight_variable([filter_size, filter_size, channels, features], stddev, name="w1")
else:
w1 = weight_variable([filter_size, filter_size, features // 2, features], stddev, name="w1")
w2 = weight_variable([filter_size, filter_size, features, features], stddev, name="w2")
b1 = bias_variable([features], name="b1")
b2 = bias_variable([features], name="b2")
conv1 = conv2d(in_node, w1, b1, keep_prob)
tmp_h_conv = tf.nn.relu(conv1)
conv2 = conv2d(tmp_h_conv, w2, b2, keep_prob)
dw_h_convs[layer] = tf.nn.relu(conv2)
weights.append((w1, w2))
biases.append((b1, b2))
convs.append((conv1, conv2))
size -= 4
if layer < layers - 1:
pools[layer] = max_pool(dw_h_convs[layer], pool_size)
in_node = pools[layer]
size /= 2
in_node = dw_h_convs[layers - 1]
# up layers
for layer in range(layers - 2, -1, -1):
with tf.name_scope("up_conv_{}".format(str(layer))):
features = 2 ** (layer + 1) * features_root
stddev = np.sqrt(2 / (filter_size ** 2 * features))
wd = weight_variable_devonc([pool_size, pool_size, features // 2, features], stddev, name="wd")
bd = bias_variable([features // 2], name="bd")
h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)
h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)
deconv[layer] = h_deconv_concat
w1 = weight_variable([filter_size, filter_size, features, features // 2], stddev, name="w1")
w2 = weight_variable([filter_size, filter_size, features // 2, features // 2], stddev, name="w2")
b1 = bias_variable([features // 2], name="b1")
b2 = bias_variable([features // 2], name="b2")
conv1 = conv2d(h_deconv_concat, w1, b1, keep_prob)
h_conv = tf.nn.relu(conv1)
conv2 = conv2d(h_conv, w2, b2, keep_prob)
in_node = tf.nn.relu(conv2)
up_h_convs[layer] = in_node
weights.append((w1, w2))
biases.append((b1, b2))
convs.append((conv1, conv2))
size *= 2
size -= 4
# Output Map
with tf.name_scope("output_map"):
weight = weight_variable([1, 1, features_root, n_class], stddev)
bias = bias_variable([n_class], name="bias")
conv = conv2d(in_node, weight, bias, tf.constant(1.0))
output_map = tf.nn.relu(conv)
up_h_convs["out"] = output_map
if summaries:
with tf.name_scope("summaries"):
for i, (c1, c2) in enumerate(convs):
tf.summary.image('summary_conv_%02d_01' % i, get_image_summary(c1))
tf.summary.image('summary_conv_%02d_02' % i, get_image_summary(c2))
for k in pools.keys():
tf.summary.image('summary_pool_%02d' % k, get_image_summary(pools[k]))
for k in deconv.keys():
tf.summary.image('summary_deconv_concat_%02d' % k, get_image_summary(deconv[k]))
for k in dw_h_convs.keys():
tf.summary.histogram("dw_convolution_%02d" % k + '/activations', dw_h_convs[k])
for k in up_h_convs.keys():
tf.summary.histogram("up_convolution_%s" % k + '/activations', up_h_convs[k])
variables = []
for w1, w2 in weights:
variables.append(w1)
variables.append(w2)
for b1, b2 in biases:
variables.append(b1)
variables.append(b2)
return output_map, variables, int(in_size - size)
def __reducor_for_DenseUNet(
output_map,
training,
params
):
param_dtype = tfu.tf_type(params.get('param_dtype')).Type
regularize_weight = params.get('regularize_weight')
grow = params.get('grows')
kernel = params.get('kernels')
layer_param = params.get('layer_param')
layer_param['training'] = training
output_map = dmb.DenseNetBlockLayers(
output_map,
param_dtype,
grow,
'reducor',
regularize_weight,
kernel,
layer_param
)
return output_map
pass
def __base_feature(
output_map,
params
):
filter = params.get('feature_amount')
kernel = params.get('kernel')
stride = params.get('stride')
param_dtype = tfu.tf_type(params.get('param_dtype')).Type
regularize_weight = params.get('regularize_weight')
output_map = tf.layers.conv2d(
output_map,
filter,
kernel,
stride,
"same",
activation=tf.nn.relu,
kernel_initializer=tf.variance_scaling_initializer(mode='fan_avg', dtype=param_dtype),
kernel_regularizer=layers.l2_regularizer(regularize_weight),
bias_initializer=tf.zeros_initializer(dtype=param_dtype),
bias_regularizer=layers.l2_regularizer(regularize_weight),
name='base'
)
return output_map
pass
def __DenseUNet(
output_map,
training,
DenseUNetConfig
):
"""
:param output_map:
:param training:
:param DenseUNetConfig:
# {
# "BaseModel": "DenseNet",
# "BaseFeature":{
# "feature_amount": 32,
# "kernel": [3, 3],
# "stride": [1, 1],
# "param_dtype": 0.32,
# "regularize_weight": 0.0001
# },
# "DenseNet":[
# {
# "param_dtype": 0.32,
# "grows": [3, 3, 3],
# "regularize_weight": 0.0001,
# "kernels": [[3, 3], [3, 3], [3, 3]],
# "pool_kernel": [2, 2],
# "pool_stride": [2, 2],
# "pool_type": "max",
# "layer_param":{
# "batch_normal": true,
# "activate_param":{
# "type": "ReLU"
# }
# },
# "transition_param":{
# "batch_normal": true,
# "activate_param": {
# "type": "ReLU"
# },
# "compress_rate": null,
# "dropout_rate": 0.1
# }
# },
# {
# "param_dtype": 0.32,
# "grows": [3, 3, 3],
# "regularize_weight": 0.0001,
# "kernels": [[3, 3], [3, 3], [3, 3]],
# "pool_kernel": [2, 2],
# "pool_stride": [2, 2],
# "pool_type": "max",
# "layer_param":{
# "batch_normal": true,
# "activate_param":{
# "type": "ReLU"
# }
# },
# "transition_param":{
# "batch_normal": true,
# "activate_param": {
# "type": "ReLU"
# },
# "compress_rate": null,
# "dropout_rate": 0.1
# }
# },
# {
# "param_dtype": 0.32,
# "grows": [3, 3, 3],
# "regularize_weight": 0.0001,
# "kernels": [[3, 3], [3, 3], [3, 3]],
# "pool_kernel": [2, 2],
# "pool_stride": [2, 2],
# "pool_type": "max",
# "layer_param":{
# "batch_normal": true,
# "activate_param":{
# "type": "ReLU"
# }
# },
# "transition_param":{
# "batch_normal": true,
# "activate_param": {
# "type": "ReLU"
# },
# "compress_rate": null,
# "dropout_rate": 0.1
# }
# },
# {
# "param_dtype": 0.32,
# "grows": [3, 3, 3],
# "regularize_weight": 0.0001,
# "kernels": [[3, 3], [3, 3], [3, 3]],
# "pool_kernel": [2, 2],
# "pool_stride": [2, 2],
# "pool_type": "max",
# "layer_param":{
# "batch_normal": true,
# "activate_param":{
# "type": "ReLU"
# }
# },
# "transition_param":{
# "batch_normal": true,
# "activate_param": {
# "type": "ReLU"
# },
# "compress_rate": null,
# "dropout_rate": 0.1
# }
# }
# ],
# "DeDenseNet":[
# {
# "param_dtype": 0.32,
#
# "grows": [3, 3, 3],
# "regularize_weight": 0.0001,
# "kernels": [[3, 3], [3, 3], [3, 3]],
#
# "t_kernel": [3, 3],
# "t_stride": [2, 2],
# "compress_rate": 0.3,
#
# "layer_param":{
# "batch_normal": true,
# "activate":{
# "type": "ReLU"
# }
# },
#
# "transition_param":{
# "batch_normal": true,
# "activate_param":{
# "type": "ReLU"
# },
# "dropout_rate": 0.1
# }
# },
# {
# "param_dtype": 0.32,
#
# "grows": [3, 3, 3],
# "regularize_weight": 0.0001,
# "kernels": [[3, 3], [3, 3], [3, 3]],
#
# "t_kernel": [3, 3],
# "t_stride": [2, 2],
# "compress_rate": 0.3,
#
# "layer_param":{
# "batch_normal": true,
# "activate":{
# "type": "ReLU"
# }
# },
#
# "transition_param":{
# "batch_normal": true,
# "activate_param":{
# "type": "ReLU"
# },
# "dropout_rate": 0.1
# }
# },
# {
# "param_dtype": 0.32,
#
# "grows": [3, 3, 3],
# "regularize_weight": 0.0001,
# "kernels": [[3, 3], [3, 3], [3, 3]],
#
# "t_kernel": [3, 3],
# "t_stride": [2, 2],
# "compress_rate": 0.3,
#
# "layer_param":{
# "batch_normal": true,
# "activate":{
# "type": "ReLU"
# }
# },
#
# "transition_param":{
# "batch_normal": true,
# "activate_param":{
# "type": "ReLU"
# },
# "dropout_rate": 0.1
# }
# },
# {
# "param_dtype": 0.32,
#
# "grows": [3, 3, 3],
# "regularize_weight": 0.0001,
# "kernels": [[3, 3], [3, 3], [3, 3]],
#
# "t_kernel": [3, 3],
# "t_stride": [2, 2],
# "compress_rate": 0.3,
#
# "layer_param":{
# "batch_normal": true,
# "activate":{
# "type": "ReLU"
# }
# },
#
# "transition_param":{
# "batch_normal": true,
# "activate_param":{
# "type": "ReLU"
# },
# "dropout_rate": 0.1
# }
# }
# ],
# "BlockReducor":{
# "param_dtype": 0.32,
# "regularize_weight": 0.0001,
# "grows": [3, 2, 1],
# "kernels": [[1, 1], [2, 2], [3, 3]],
# "layer_param":{
# "batch_normal": true,
# "activate":{
# "type": "ReLU"
# }
# }
# }
# }
:return:
"""
BaseFeature = DenseUNetConfig.get('BaseFeature')
DenseNetConfig = DenseUNetConfig.get('DenseNet')
DeDenseNetConfig = DenseUNetConfig.get('DeDenseNet')
BlockReducor = DenseUNetConfig.get('BlockReducor')
output_map = __base_feature(output_map, BaseFeature)
cl = dmb.DenseNetProvide()
cld = dmb.DeDenseNetProvide()
output_map = dmb.DenseNetFromParamDict(
output_map,
training,
DenseNetConfig,
dense_net_provide=cl,
block_name_flag='encode-')
block_layer_want = cl.BlockLayer[-1][-1]
cl.BlockLayer.reverse()
output_map = __reducor_for_DenseUNet(output_map, training, BlockReducor)
de_block_name = 0
for encode_block_layer in zip(cl.BlockLayer, DeDenseNetConfig):
DeDenseNetBlockConfig = encode_block_layer[1]
param_dtype = tfu.tf_type(DeDenseNetBlockConfig.get('param_dtype')).Type
grows = DeDenseNetBlockConfig.get('grows')
regularize_weight = DeDenseNetBlockConfig.get('regularize_weight')
kernels = DeDenseNetBlockConfig.get('kernels')
t_kernel = DeDenseNetBlockConfig.get('t_kernel')
t_stride = DeDenseNetBlockConfig.get('t_stride')
compress_rate = DeDenseNetBlockConfig.get('compress_rate')
layer_param = DeDenseNetBlockConfig.get('layer_param')
layer_param['training'] = training
transition_param = DeDenseNetBlockConfig.get('transition_param')
transition_param['training'] = training
to_concat = encode_block_layer[0][-1]
cld.push_block()
output_map = dmb.DeDenseNetBlockTransition(
output_map,
param_dtype,
'decode_{0}_{1}'.format(de_block_name, 'transition'),
regularize_weight,
t_kernel,
t_stride,
compress_rate,
**transition_param
)
output_map = tf.concat(
[to_concat, output_map],
axis=-1,
name='decode_{0}_{1}'.format(de_block_name, 'concat'))
cld.push_transition(output_map)
output_map = dmb.DeDenseNetBlockLayers(
output_map,
param_dtype,
grows,
'decode_{0}_{1}'.format(de_block_name, 'block_layer'),
regularize_weight,
kernels,
layer_param,
)
cld.push_block_layer(output_map)
de_block_name += 1
pass
return output_map
pass
def DenseUNetPro(
output_map,
training,
class_amount,
param_dtype,
regularizer_weight,
DenseUNetConfig,
):
output_map = __DenseUNet(
output_map,
training,
DenseUNetConfig
)
output_map = __conv_pixel_wise_class_pro(
output_map,
class_amount,
"fcn",
param_dtype,
regularizer_weight
)
# output_map = tf.reduce_max(output_map, axis=-1, keepdims=True, name='pixel_class')
return output_map
pass
# todo: calc the miou
def fcn_calc_miou(
logit,
gt
):
pass
def __conv_pixel_wise_class_pro(output_map, class_amount, name, param_dtype, regularize_weight, **options):
with tf.variable_scope('{0}_pixel_wise_class_pro'.format(name)):
output_map = tf.layers.conv2d(
output_map,
filters=class_amount,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=tf.variance_scaling_initializer(mode='fan_avg', dtype=param_dtype),
bias_initializer=tf.variance_scaling_initializer(mode='fan_avg', dtype=param_dtype),
kernel_regularizer=layers.l2_regularizer(regularize_weight),
bias_regularizer=layers.l2_regularizer(regularize_weight),
use_bias=True,
name='conv'
)
with tf.variable_scope('ac'):
alpha = tf.Variable(0.1, trainable=True)
output_map = tf.nn.leaky_relu(output_map, alpha, name='PReLU')
pass
pass
return output_map
pass
def fcn_acc(logits, label):
with tf.name_scope("fcn_acc"):
pro = tf.arg_max(logits, -1)
shape = tf.shape(pro)
sub_shape = tf.slice(shape, [1], [-1])
pixel_count = 0.5 * tf.cast((tf.square(tf.reduce_sum(sub_shape)) - tf.reduce_sum(sub_shape * sub_shape)), tf.float32)
l = tf.arg_max(label, -1)
no_zeros_count = tf.cast(tf.count_nonzero(pro - l, axis=-1), tf.float32)
one_batch_sum = 1 - tf.reduce_sum(no_zeros_count, axis=-1) / pixel_count
acc = tf.reduce_mean(one_batch_sum, axis=0)
return acc
pass
def fcn_loss(logits, label, cost_name, param_dtype, **options):
"""
:param logits:
:param label:
:param cost_name:
:param param_dtype:
:param options:
:return:
"""
class_amount = logits.get_shape().as_list()[-1]
with tf.name_scope("fcn_loss"):
# flat_logits = tf.reshape(logits, [-1, class_amount])
# flat_labels = tf.reshape(label, [-1, class_amount])
flat_logits = logits
flat_labels = label
if cost_name == "cross_entropy":
class_weights = options.pop("class_weights", None)
if class_weights is not None:
class_weights = tf.constant(np.array(class_weights, dtype=npu.np_type(param_dtype).Type))
# class weights is a 1-D array , here create the total weight map for weighting
weight_map = tf.multiply(flat_labels, class_weights)
# weight_map = tf.reduce_sum(weight_map, axis=-1)
# calc entropy loss cross the dims[-1]
loss_map = tf.nn.softmax_cross_entropy_with_logits_v2(logits=flat_logits,
labels=flat_labels)
# make weighting
# weighted_loss = tf.multiply(loss_map, weight_map)
weighted_loss = loss_map
# loss = tf.reduce_mean(weighted_loss)
loss = tf.reduce_mean(tf.reduce_mean(weighted_loss, axis=0))
else:
loss = tf.reduce_sum(
tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
logits=flat_logits,
labels=flat_labels),
axis=0
))
elif cost_name == "dice_coefficient":
eps = 1e-5
# softmax the logits
prediction = pixel_wise_softmax(logits)
intersection = tf.reduce_sum(prediction * label)
union = eps + tf.reduce_sum(prediction) + tf.reduce_sum(label)
loss = -(2 * intersection / (union))
else:
raise ValueError("Unknown cost function: " % cost_name)
return loss
pass
| [
"tensorflow.div",
"tensorflow.shape",
"numpy.sqrt",
"tensorflow.transpose",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.reduce_sum",
"Putil.DenseNet.model_base.DenseNetBlockLayers",
"tensorflow.multiply",
"Putil.np.util.np_type",
"tensorflow.nn.dropout",
"Putil.DenseNet.model_base.De... | [((392, 437), 'tensorflow.slice', 'tf.slice', (['img', '(0, 0, 0, idx)', '(1, -1, -1, 1)'], {}), '(img, (0, 0, 0, idx), (1, -1, -1, 1))\n', (400, 437), True, 'import tensorflow as tf\n'), ((447, 463), 'tensorflow.reduce_min', 'tf.reduce_min', (['V'], {}), '(V)\n', (460, 463), True, 'import tensorflow as tf\n'), ((473, 489), 'tensorflow.reduce_max', 'tf.reduce_max', (['V'], {}), '(V)\n', (486, 489), True, 'import tensorflow as tf\n'), ((621, 647), 'tensorflow.transpose', 'tf.transpose', (['V', '(2, 0, 1)'], {}), '(V, (2, 0, 1))\n', (633, 647), True, 'import tensorflow as tf\n'), ((787, 828), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': 'stddev'}), '(shape, stddev=stddev)\n', (806, 828), True, 'import tensorflow as tf\n'), ((840, 871), 'tensorflow.Variable', 'tf.Variable', (['initial'], {'name': 'name'}), '(initial, name=name)\n', (851, 871), True, 'import tensorflow as tf\n'), ((1075, 1104), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (1086, 1104), True, 'import tensorflow as tf\n'), ((1116, 1147), 'tensorflow.Variable', 'tf.Variable', (['initial'], {'name': 'name'}), '(initial, name=name)\n', (1127, 1147), True, 'import tensorflow as tf\n'), ((1740, 1816), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, n, n, 1]', 'strides': '[1, n, n, 1]', 'padding': '"""VALID"""'}), "(x, ksize=[1, n, n, 1], strides=[1, n, n, 1], padding='VALID')\n", (1754, 1816), True, 'import tensorflow as tf\n'), ((4230, 4243), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4241, 4243), False, 'from collections import OrderedDict\n'), ((4257, 4270), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4268, 4270), False, 'from collections import OrderedDict\n'), ((4288, 4301), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4299, 4301), False, 'from collections import OrderedDict\n'), ((4319, 4332), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4330, 4332), False, 'from collections import OrderedDict\n'), ((8699, 8808), 'Putil.DenseNet.model_base.DenseNetBlockLayers', 'dmb.DenseNetBlockLayers', (['output_map', 'param_dtype', 'grow', '"""reducor"""', 'regularize_weight', 'kernel', 'layer_param'], {}), "(output_map, param_dtype, grow, 'reducor',\n regularize_weight, kernel, layer_param)\n", (8722, 8808), True, 'import Putil.DenseNet.model_base as dmb\n'), ((15223, 15244), 'Putil.DenseNet.model_base.DenseNetProvide', 'dmb.DenseNetProvide', ([], {}), '()\n', (15242, 15244), True, 'import Putil.DenseNet.model_base as dmb\n'), ((15255, 15278), 'Putil.DenseNet.model_base.DeDenseNetProvide', 'dmb.DeDenseNetProvide', ([], {}), '()\n', (15276, 15278), True, 'import Putil.DenseNet.model_base as dmb\n'), ((15296, 15412), 'Putil.DenseNet.model_base.DenseNetFromParamDict', 'dmb.DenseNetFromParamDict', (['output_map', 'training', 'DenseNetConfig'], {'dense_net_provide': 'cl', 'block_name_flag': '"""encode-"""'}), "(output_map, training, DenseNetConfig,\n dense_net_provide=cl, block_name_flag='encode-')\n", (15321, 15412), True, 'import Putil.DenseNet.model_base as dmb\n'), ((516, 529), 'tensorflow.shape', 'tf.shape', (['img'], {}), '(img)\n', (524, 529), True, 'import tensorflow as tf\n'), ((545, 558), 'tensorflow.shape', 'tf.shape', (['img'], {}), '(img)\n', (553, 558), True, 'import tensorflow as tf\n'), ((584, 611), 'tensorflow.stack', 'tf.stack', (['(img_w, img_h, 1)'], {}), '((img_w, img_h, 1))\n', (592, 611), True, 'import tensorflow as tf\n'), ((670, 701), 'tensorflow.stack', 'tf.stack', (['(-1, img_w, img_h, 1)'], {}), '((-1, img_w, img_h, 1))\n', (678, 701), True, 'import tensorflow as tf\n'), ((966, 1007), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': 'stddev'}), '(shape, stddev=stddev)\n', (985, 1007), True, 'import tensorflow as tf\n'), ((1192, 1215), 'tensorflow.name_scope', 'tf.name_scope', (['"""conv2d"""'], {}), "('conv2d')\n", (1205, 1215), True, 'import tensorflow as tf\n'), ((1235, 1292), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""'}), "(x, W, strides=[1, 1, 1, 1], padding='VALID')\n", (1247, 1292), True, 'import tensorflow as tf\n'), ((1313, 1339), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv_2d', 'b'], {}), '(conv_2d, b)\n', (1327, 1339), True, 'import tensorflow as tf\n'), ((1355, 1391), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['conv_2d_b', 'keep_prob_'], {}), '(conv_2d_b, keep_prob_)\n', (1368, 1391), True, 'import tensorflow as tf\n'), ((1430, 1455), 'tensorflow.name_scope', 'tf.name_scope', (['"""deconv2d"""'], {}), "('deconv2d')\n", (1443, 1455), True, 'import tensorflow as tf\n'), ((1475, 1486), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (1483, 1486), True, 'import tensorflow as tf\n'), ((1510, 1581), 'tensorflow.stack', 'tf.stack', (['[x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] // 2]'], {}), '([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] // 2])\n', (1518, 1581), True, 'import tensorflow as tf\n'), ((1591, 1711), 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['x', 'W', 'output_shape'], {'strides': '[1, stride, stride, 1]', 'padding': '"""VALID"""', 'name': '"""conv2d_transpose"""'}), "(x, W, output_shape, strides=[1, stride, stride, 1],\n padding='VALID', name='conv2d_transpose')\n", (1613, 1711), True, 'import tensorflow as tf\n'), ((1857, 1889), 'tensorflow.name_scope', 'tf.name_scope', (['"""crop_and_concat"""'], {}), "('crop_and_concat')\n", (1870, 1889), True, 'import tensorflow as tf\n'), ((1910, 1922), 'tensorflow.shape', 'tf.shape', (['x1'], {}), '(x1)\n', (1918, 1922), True, 'import tensorflow as tf\n'), ((1942, 1954), 'tensorflow.shape', 'tf.shape', (['x2'], {}), '(x2)\n', (1950, 1954), True, 'import tensorflow as tf\n'), ((2170, 2197), 'tensorflow.slice', 'tf.slice', (['x1', 'offsets', 'size'], {}), '(x1, offsets, size)\n', (2178, 2197), True, 'import tensorflow as tf\n'), ((2213, 2240), 'tensorflow.concat', 'tf.concat', (['[x1_crop, x2]', '(3)'], {}), '([x1_crop, x2], 3)\n', (2222, 2240), True, 'import tensorflow as tf\n'), ((2288, 2323), 'tensorflow.name_scope', 'tf.name_scope', (['"""pixel_wise_softmax"""'], {}), "('pixel_wise_softmax')\n", (2301, 2323), True, 'import tensorflow as tf\n'), ((2394, 2459), 'tensorflow.reduce_max', 'tf.reduce_max', (['output_map'], {'axis': '(3)', 'keepdims': '(True)', 'name': '"""calc_max"""'}), "(output_map, axis=3, keepdims=True, name='calc_max')\n", (2407, 2459), True, 'import tensorflow as tf\n'), ((2581, 2650), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['exponential_map'], {'axis': '(3)', 'keepdims': '(True)', 'name': '"""exp_sum"""'}), "(exponential_map, axis=3, keepdims=True, name='exp_sum')\n", (2594, 2650), True, 'import tensorflow as tf\n'), ((2666, 2718), 'tensorflow.div', 'tf.div', (['exponential_map', 'normalize'], {'name': '"""normalize"""'}), "(exponential_map, normalize, name='normalize')\n", (2672, 2718), True, 'import tensorflow as tf\n'), ((3947, 3977), 'tensorflow.name_scope', 'tf.name_scope', (['"""preprocessing"""'], {}), "('preprocessing')\n", (3960, 3977), True, 'import tensorflow as tf\n'), ((6982, 7009), 'tensorflow.name_scope', 'tf.name_scope', (['"""output_map"""'], {}), "('output_map')\n", (6995, 7009), True, 'import tensorflow as tf\n'), ((7221, 7237), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv'], {}), '(conv)\n', (7231, 7237), True, 'import tensorflow as tf\n'), ((18992, 19016), 'tensorflow.name_scope', 'tf.name_scope', (['"""fcn_acc"""'], {}), "('fcn_acc')\n", (19005, 19016), True, 'import tensorflow as tf\n'), ((19032, 19054), 'tensorflow.arg_max', 'tf.arg_max', (['logits', '(-1)'], {}), '(logits, -1)\n', (19042, 19054), True, 'import tensorflow as tf\n'), ((19071, 19084), 'tensorflow.shape', 'tf.shape', (['pro'], {}), '(pro)\n', (19079, 19084), True, 'import tensorflow as tf\n'), ((19105, 19131), 'tensorflow.slice', 'tf.slice', (['shape', '[1]', '[-1]'], {}), '(shape, [1], [-1])\n', (19113, 19131), True, 'import tensorflow as tf\n'), ((19270, 19291), 'tensorflow.arg_max', 'tf.arg_max', (['label', '(-1)'], {}), '(label, -1)\n', (19280, 19291), True, 'import tensorflow as tf\n'), ((19468, 19505), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['one_batch_sum'], {'axis': '(0)'}), '(one_batch_sum, axis=0)\n', (19482, 19505), True, 'import tensorflow as tf\n'), ((19794, 19819), 'tensorflow.name_scope', 'tf.name_scope', (['"""fcn_loss"""'], {}), "('fcn_loss')\n", (19807, 19819), True, 'import tensorflow as tf\n'), ((2493, 2552), 'tensorflow.subtract', 'tf.subtract', (['output_map', 'max_axis', '"""sub_for_avoid_overflow"""'], {}), "(output_map, max_axis, 'sub_for_avoid_overflow')\n", (2504, 2552), True, 'import tensorflow as tf\n'), ((3992, 4003), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (4000, 4003), True, 'import tensorflow as tf\n'), ((4020, 4031), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (4028, 4031), True, 'import tensorflow as tf\n'), ((4067, 4099), 'tensorflow.stack', 'tf.stack', (['[-1, nx, ny, channels]'], {}), '([-1, nx, ny, channels])\n', (4075, 4099), True, 'import tensorflow as tf\n'), ((4148, 4165), 'tensorflow.shape', 'tf.shape', (['x_image'], {}), '(x_image)\n', (4156, 4165), True, 'import tensorflow as tf\n'), ((4559, 4601), 'numpy.sqrt', 'np.sqrt', (['(2 / (filter_size ** 2 * features))'], {}), '(2 / (filter_size ** 2 * features))\n', (4566, 4601), True, 'import numpy as np\n'), ((5150, 5167), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv1'], {}), '(conv1)\n', (5160, 5167), True, 'import tensorflow as tf\n'), ((5258, 5275), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv2'], {}), '(conv2)\n', (5268, 5275), True, 'import tensorflow as tf\n'), ((5821, 5863), 'numpy.sqrt', 'np.sqrt', (['(2 / (filter_size ** 2 * features))'], {}), '(2 / (filter_size ** 2 * features))\n', (5828, 5863), True, 'import numpy as np\n'), ((6643, 6660), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv1'], {}), '(conv1)\n', (6653, 6660), True, 'import tensorflow as tf\n'), ((6737, 6754), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv2'], {}), '(conv2)\n', (6747, 6754), True, 'import tensorflow as tf\n'), ((7182, 7198), 'tensorflow.constant', 'tf.constant', (['(1.0)'], {}), '(1.0)\n', (7193, 7198), True, 'import tensorflow as tf\n'), ((7309, 7335), 'tensorflow.name_scope', 'tf.name_scope', (['"""summaries"""'], {}), "('summaries')\n", (7322, 7335), True, 'import tensorflow as tf\n'), ((9364, 9430), 'tensorflow.variance_scaling_initializer', 'tf.variance_scaling_initializer', ([], {'mode': '"""fan_avg"""', 'dtype': 'param_dtype'}), "(mode='fan_avg', dtype=param_dtype)\n", (9395, 9430), True, 'import tensorflow as tf\n'), ((9459, 9499), 'tensorflow.contrib.layers.l2_regularizer', 'layers.l2_regularizer', (['regularize_weight'], {}), '(regularize_weight)\n', (9480, 9499), False, 'from tensorflow.contrib import layers\n'), ((9526, 9565), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {'dtype': 'param_dtype'}), '(dtype=param_dtype)\n', (9546, 9565), True, 'import tensorflow as tf\n'), ((9592, 9632), 'tensorflow.contrib.layers.l2_regularizer', 'layers.l2_regularizer', (['regularize_weight'], {}), '(regularize_weight)\n', (9613, 9632), False, 'from tensorflow.contrib import layers\n'), ((18739, 18762), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""ac"""'], {}), "('ac')\n", (18756, 18762), True, 'import tensorflow as tf\n'), ((18784, 18816), 'tensorflow.Variable', 'tf.Variable', (['(0.1)'], {'trainable': '(True)'}), '(0.1, trainable=True)\n', (18795, 18816), True, 'import tensorflow as tf\n'), ((18842, 18891), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['output_map', 'alpha'], {'name': '"""PReLU"""'}), "(output_map, alpha, name='PReLU')\n", (18858, 18891), True, 'import tensorflow as tf\n'), ((19325, 19359), 'tensorflow.count_nonzero', 'tf.count_nonzero', (['(pro - l)'], {'axis': '(-1)'}), '(pro - l, axis=-1)\n', (19341, 19359), True, 'import tensorflow as tf\n'), ((7868, 7947), 'tensorflow.summary.histogram', 'tf.summary.histogram', (["('dw_convolution_%02d' % k + '/activations')", 'dw_h_convs[k]'], {}), "('dw_convolution_%02d' % k + '/activations', dw_h_convs[k])\n", (7888, 7947), True, 'import tensorflow as tf\n'), ((8005, 8082), 'tensorflow.summary.histogram', 'tf.summary.histogram', (["('up_convolution_%s' % k + '/activations')", 'up_h_convs[k]'], {}), "('up_convolution_%s' % k + '/activations', up_h_convs[k])\n", (8025, 8082), True, 'import tensorflow as tf\n'), ((18356, 18422), 'tensorflow.variance_scaling_initializer', 'tf.variance_scaling_initializer', ([], {'mode': '"""fan_avg"""', 'dtype': 'param_dtype'}), "(mode='fan_avg', dtype=param_dtype)\n", (18387, 18422), True, 'import tensorflow as tf\n'), ((18453, 18519), 'tensorflow.variance_scaling_initializer', 'tf.variance_scaling_initializer', ([], {'mode': '"""fan_avg"""', 'dtype': 'param_dtype'}), "(mode='fan_avg', dtype=param_dtype)\n", (18484, 18519), True, 'import tensorflow as tf\n'), ((18552, 18592), 'tensorflow.contrib.layers.l2_regularizer', 'layers.l2_regularizer', (['regularize_weight'], {}), '(regularize_weight)\n', (18573, 18592), False, 'from tensorflow.contrib import layers\n'), ((18623, 18663), 'tensorflow.contrib.layers.l2_regularizer', 'layers.l2_regularizer', (['regularize_weight'], {}), '(regularize_weight)\n', (18644, 18663), False, 'from tensorflow.contrib import layers\n'), ((19401, 19439), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['no_zeros_count'], {'axis': '(-1)'}), '(no_zeros_count, axis=-1)\n', (19414, 19439), True, 'import tensorflow as tf\n'), ((20382, 20421), 'tensorflow.multiply', 'tf.multiply', (['flat_labels', 'class_weights'], {}), '(flat_labels, class_weights)\n', (20393, 20421), True, 'import tensorflow as tf\n'), ((20571, 20658), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'flat_logits', 'labels': 'flat_labels'}), '(logits=flat_logits, labels=\n flat_labels)\n', (20613, 20658), True, 'import tensorflow as tf\n'), ((21459, 21492), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prediction * label)'], {}), '(prediction * label)\n', (21472, 21492), True, 'import tensorflow as tf\n'), ((2795, 2835), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['output_map', '(1e-10)', '(1.0)'], {}), '(output_map, 1e-10, 1.0)\n', (2811, 2835), True, 'import tensorflow as tf\n'), ((19207, 19243), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(sub_shape * sub_shape)'], {}), '(sub_shape * sub_shape)\n', (19220, 19243), True, 'import tensorflow as tf\n'), ((20961, 20998), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['weighted_loss'], {'axis': '(0)'}), '(weighted_loss, axis=0)\n', (20975, 20998), True, 'import tensorflow as tf\n'), ((21547, 21567), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['label'], {}), '(label)\n', (21560, 21567), True, 'import tensorflow as tf\n'), ((19179, 19203), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['sub_shape'], {}), '(sub_shape)\n', (19192, 19203), True, 'import tensorflow as tf\n'), ((21091, 21178), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'flat_logits', 'labels': 'flat_labels'}), '(logits=flat_logits, labels=\n flat_labels)\n', (21133, 21178), True, 'import tensorflow as tf\n'), ((21519, 21544), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['prediction'], {}), '(prediction)\n', (21532, 21544), True, 'import tensorflow as tf\n'), ((20224, 20248), 'Putil.np.util.np_type', 'npu.np_type', (['param_dtype'], {}), '(param_dtype)\n', (20235, 20248), True, 'import Putil.np.util as npu\n')] |
from gridworld import GridWorldMDP
from qlearn import QLearner
import numpy as np
import matplotlib.pyplot as plt
def plot_convergence(utility_grids, policy_grids):
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
utility_ssd = np.sum(np.square(np.diff(utility_grids)), axis=(0, 1))
ax1.plot(utility_ssd, 'b.-')
ax1.set_ylabel('Change in Utility', color='b')
policy_changes = np.count_nonzero(np.diff(policy_grids), axis=(0, 1))
ax2.plot(policy_changes, 'r.-')
ax2.set_ylabel('Change in Best Policy', color='r')
if __name__ == '__main__':
shape = (6, 8)
goal = (5, -1)
trap1 = (1, -1)
trap2 = (4, 1)
trap3 = (4, 2)
trap4 = (4, 3)
trap5 = (4, 4)
trap6 = (4, 5)
trap7 = (4, 6)
obstacle1 = (1, 1)
obstacle2 = (0, 5)
obstacle3 = (2, 3)
start = (2, 0)
obstacle4 = (3, 5)
default_reward = -0.1
goal_reward = 1
trap_reward = -1
reward_grid = np.zeros(shape) + default_reward
reward_grid[goal] = goal_reward
reward_grid[trap1] = trap_reward
reward_grid[trap2] = trap_reward
reward_grid[trap3] = trap_reward
reward_grid[trap4] = trap_reward
reward_grid[trap5] = trap_reward
reward_grid[trap6] = trap_reward
reward_grid[trap7] = trap_reward
reward_grid[obstacle1] = 0
reward_grid[obstacle2] = 0
reward_grid[obstacle3] = 0
reward_grid[obstacle4] = 0
terminal_mask = np.zeros_like(reward_grid, dtype=np.bool)
terminal_mask[goal] = True
terminal_mask[trap1] = True
terminal_mask[trap2] = True
terminal_mask[trap3] = True
terminal_mask[trap4] = True
terminal_mask[trap5] = True
terminal_mask[trap6] = True
terminal_mask[trap7] = True
obstacle_mask = np.zeros_like(reward_grid, dtype=np.bool)
obstacle_mask[1, 1] = True
obstacle_mask[0, 5] = True
obstacle_mask[2, 3] = True
obstacle_mask[3, 5] = True
gw = GridWorldMDP(reward_grid=reward_grid,
obstacle_mask=obstacle_mask,
terminal_mask=terminal_mask,
action_probabilities=[
(-1, 0.1),
(0, 0.8),
(1, 0.1),
],
no_action_probability=0.0)
mdp_solvers = {'Value Iteration': gw.run_value_iterations,
'Policy Iteration': gw.run_policy_iterations}
for solver_name, solver_fn in mdp_solvers.items():
print('Final result of {}:'.format(solver_name))
policy_grids, utility_grids = solver_fn(iterations=25, discount=0.5)
print(policy_grids[:, :, -1])
print(utility_grids[:, :, -1])
plt.figure()
gw.plot_policy(utility_grids[:, :, -1])
plot_convergence(utility_grids, policy_grids)
plt.show()
ql = QLearner(num_states=(shape[0] * shape[1]),
num_actions=4,
learning_rate=0.05,
discount_rate=0.5,
random_action_prob=0.8,
random_action_decay_rate=1,
dyna_iterations=0)
start_state = gw.grid_coordinates_to_indices(start)
iterations = 1000
flat_policies, flat_utilities = ql.learn(start_state,
gw.generate_experience,
iterations=iterations)
new_shape = (gw.shape[0], gw.shape[1], iterations)
ql_utility_grids = flat_utilities.reshape(new_shape)
ql_policy_grids = flat_policies.reshape(new_shape)
print('Final result of QLearning:')
print(ql_policy_grids[:, :, -1])
print(ql_utility_grids[:, :, -1])
# regret
M,N,num = ql_policy_grids.shape
fig, ax1 = plt.subplots()
regret = np.count_nonzero(ql_policy_grids - ql_policy_grids[:, :, -1].reshape(M,N,1), axis=(0, 1))
ax1.plot(regret)
plt.show()
plt.figure()
gw.plot_policy(ql_utility_grids[:, :, -1], ql_policy_grids[:, :, -1])
plot_convergence(ql_utility_grids, ql_policy_grids)
plt.show()
| [
"numpy.diff",
"matplotlib.pyplot.figure",
"numpy.zeros",
"gridworld.GridWorldMDP",
"qlearn.QLearner",
"numpy.zeros_like",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((180, 194), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (192, 194), True, 'import matplotlib.pyplot as plt\n'), ((1297, 1338), 'numpy.zeros_like', 'np.zeros_like', (['reward_grid'], {'dtype': 'np.bool'}), '(reward_grid, dtype=np.bool)\n', (1310, 1338), True, 'import numpy as np\n'), ((1588, 1629), 'numpy.zeros_like', 'np.zeros_like', (['reward_grid'], {'dtype': 'np.bool'}), '(reward_grid, dtype=np.bool)\n', (1601, 1629), True, 'import numpy as np\n'), ((1749, 1933), 'gridworld.GridWorldMDP', 'GridWorldMDP', ([], {'reward_grid': 'reward_grid', 'obstacle_mask': 'obstacle_mask', 'terminal_mask': 'terminal_mask', 'action_probabilities': '[(-1, 0.1), (0, 0.8), (1, 0.1)]', 'no_action_probability': '(0.0)'}), '(reward_grid=reward_grid, obstacle_mask=obstacle_mask,\n terminal_mask=terminal_mask, action_probabilities=[(-1, 0.1), (0, 0.8),\n (1, 0.1)], no_action_probability=0.0)\n', (1761, 1933), False, 'from gridworld import GridWorldMDP\n'), ((2467, 2640), 'qlearn.QLearner', 'QLearner', ([], {'num_states': '(shape[0] * shape[1])', 'num_actions': '(4)', 'learning_rate': '(0.05)', 'discount_rate': '(0.5)', 'random_action_prob': '(0.8)', 'random_action_decay_rate': '(1)', 'dyna_iterations': '(0)'}), '(num_states=shape[0] * shape[1], num_actions=4, learning_rate=0.05,\n discount_rate=0.5, random_action_prob=0.8, random_action_decay_rate=1,\n dyna_iterations=0)\n', (2475, 2640), False, 'from qlearn import QLearner\n'), ((3192, 3206), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3204, 3206), True, 'import matplotlib.pyplot as plt\n'), ((3326, 3336), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3334, 3336), True, 'import matplotlib.pyplot as plt\n'), ((3340, 3352), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3350, 3352), True, 'import matplotlib.pyplot as plt\n'), ((3478, 3488), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3486, 3488), True, 'import matplotlib.pyplot as plt\n'), ((398, 419), 'numpy.diff', 'np.diff', (['policy_grids'], {}), '(policy_grids)\n', (405, 419), True, 'import numpy as np\n'), ((863, 878), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (871, 878), True, 'import numpy as np\n'), ((2344, 2356), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2354, 2356), True, 'import matplotlib.pyplot as plt\n'), ((2449, 2459), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2457, 2459), True, 'import matplotlib.pyplot as plt\n'), ((246, 268), 'numpy.diff', 'np.diff', (['utility_grids'], {}), '(utility_grids)\n', (253, 268), True, 'import numpy as np\n')] |
from collections import namedtuple
from multiprocessing import pool
from pathlib import Path
from types import prepare_class
import pandas as pd
from tqdm import tqdm
import random
import torch.nn as nn
import torch
from sklearn.neighbors import KernelDensity
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pickle
from scipy.spatial import distance
import json
import time
import multiprocessing as mp
random.seed(42)
cos = nn.CosineSimilarity(dim=0, eps=1e-6)
root = ""
c = 0
ade = pd.read_csv("data/features_150.csv")
ade_classes = {row["Idx"]:row["Name"].replace(";", "-") for idx, row in ade.iterrows()}
def calc_distribution(same, diff):
kde_same = KernelDensity(kernel="gaussian",bandwidth=0.75).fit(np.array(same).reshape(-1, 1))
kde_diff = KernelDensity(kernel="gaussian",bandwidth=0.75).fit(np.array(diff).reshape(-1, 1))
return kde_same, kde_diff
def make_plot(path, same, diff,title, feature=None):
Path(path).mkdir(parents=True, exist_ok=True)
fig, ax = plt.subplots()
names = ["Same", "Diff"]
for idx, a in enumerate([same, diff]):
sns.distplot(a, ax=ax, kde=True, hist=False, rug=False, label=names[idx])
ax.set_xlim([0, 1])
ax.set_ylim([0, 12.5])
fig.add_axes(ax)
plt.legend()
fig.suptitle(title, fontsize=10)
plt.savefig(path+"same-diff-dist.png")
plt.close('all')
def load_pckl(path):
with open(path, "rb") as f: ret = pickle.load(f)
return ret
def closest(current_image, current_image_index, images, current_feature, diff):
closest = 0
# current_feature = load_pckl(current_image + '/indiv_features.pckl')[current_image_index]
for idx, img in images.iterrows():
if current_image == img["Path"]:
continue
# diff_feature = load_pckl(img["Path"] + '/indiv_features.pckl')[img["Idx"]]
try:
# diff_feature = load_pckl(img["Path"] + '/indiv_features.pckl')[img["Idx"]]
diff_feature = torch.load(img["Path"] + '/indiv_features.pt')[img["Idx"]]
except:
continue
dist = float(cos(current_feature, diff_feature))
if dist == 1.0:
return dist;
closest = max(closest, dist)
return closest
def worker(csv, cl, mode):
same_class = csv.loc[csv["Class"] == cl].drop_duplicates()
diff_class = csv.loc[csv["Class"] != cl].drop_duplicates()
same_class.reset_index(drop=True, inplace=True)
if len(same_class) == 0: return
diff_class.reset_index(drop=True, inplace=True)
closest_same_class = []
closest_diff_class = []
sample_length = min( 1000, len(same_class) )
for idx, d in tqdm(same_class.sample(sample_length).iterrows(), total=sample_length, desc="Closest"):
# for idx, d in same_class.sample(sample_length).iterrows():
try: current_feature = torch.load(d["Path"] + '/indiv_features.pt')[d["Idx"]]
except: continue
best_same = closest(d["Path"], d["Idx"],same_class.sample(sample_length), current_feature, False)
best_diff = closest(d["Path"], d["Idx"], diff_class.sample(int(sample_length*1.5)), current_feature, True)
if not best_same or not best_diff:
continue
closest_same_class.append( best_same )
closest_diff_class.append( best_diff )
if not closest_same_class or not closest_diff_class: return
path = "data/figures/{}/closest_same_closest_diff/{}/".format(mode,ade_classes[cl])
title = "{} - Same-Diff Class".format(ade_classes[cl])
make_plot(path, closest_same_class, closest_diff_class, title)
kde_same, kde_diff = calc_distribution(closest_same_class, closest_diff_class)
with open(path + "distribution.pckl", "wb") as f:
pickle.dump([kde_same, kde_diff], f)
def main(mode):
csv = pd.read_csv("data/{}/features.csv".format(mode), names=["Idx", "Class", "Path"])
csv["Instance"] = [row.split("/")[3] for row in csv["Path"]]
# instances = csv["Instance"].unique()
classes = csv["Class"].unique()
for cl in tqdm(classes[:len(classes) // 2], desc="Total"):
worker(csv, cl, mode)
if __name__ == "__main__":
main("train_non_torch") | [
"torch.nn.CosineSimilarity",
"matplotlib.pyplot.savefig",
"pickle.dump",
"pandas.read_csv",
"seaborn.distplot",
"pathlib.Path",
"torch.load",
"pickle.load",
"sklearn.neighbors.KernelDensity",
"random.seed",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.subplots",
"matplotlib... | [((439, 454), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (450, 454), False, 'import random\n'), ((461, 498), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {'dim': '(0)', 'eps': '(1e-06)'}), '(dim=0, eps=1e-06)\n', (480, 498), True, 'import torch.nn as nn\n'), ((520, 556), 'pandas.read_csv', 'pd.read_csv', (['"""data/features_150.csv"""'], {}), "('data/features_150.csv')\n", (531, 556), True, 'import pandas as pd\n'), ((1036, 1050), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1048, 1050), True, 'import matplotlib.pyplot as plt\n'), ((1287, 1299), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1297, 1299), True, 'import matplotlib.pyplot as plt\n'), ((1341, 1381), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + 'same-diff-dist.png')"], {}), "(path + 'same-diff-dist.png')\n", (1352, 1381), True, 'import matplotlib.pyplot as plt\n'), ((1389, 1405), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1398, 1405), True, 'import matplotlib.pyplot as plt\n'), ((1131, 1204), 'seaborn.distplot', 'sns.distplot', (['a'], {'ax': 'ax', 'kde': '(True)', 'hist': '(False)', 'rug': '(False)', 'label': 'names[idx]'}), '(a, ax=ax, kde=True, hist=False, rug=False, label=names[idx])\n', (1143, 1204), True, 'import seaborn as sns\n'), ((1466, 1480), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1477, 1480), False, 'import pickle\n'), ((3786, 3822), 'pickle.dump', 'pickle.dump', (['[kde_same, kde_diff]', 'f'], {}), '([kde_same, kde_diff], f)\n', (3797, 3822), False, 'import pickle\n'), ((696, 744), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'kernel': '"""gaussian"""', 'bandwidth': '(0.75)'}), "(kernel='gaussian', bandwidth=0.75)\n", (709, 744), False, 'from sklearn.neighbors import KernelDensity\n'), ((794, 842), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'kernel': '"""gaussian"""', 'bandwidth': '(0.75)'}), "(kernel='gaussian', bandwidth=0.75)\n", (807, 842), False, 'from sklearn.neighbors import KernelDensity\n'), ((965, 975), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (969, 975), False, 'from pathlib import Path\n'), ((748, 762), 'numpy.array', 'np.array', (['same'], {}), '(same)\n', (756, 762), True, 'import numpy as np\n'), ((846, 860), 'numpy.array', 'np.array', (['diff'], {}), '(diff)\n', (854, 860), True, 'import numpy as np\n'), ((2012, 2058), 'torch.load', 'torch.load', (["(img['Path'] + '/indiv_features.pt')"], {}), "(img['Path'] + '/indiv_features.pt')\n", (2022, 2058), False, 'import torch\n'), ((2889, 2933), 'torch.load', 'torch.load', (["(d['Path'] + '/indiv_features.pt')"], {}), "(d['Path'] + '/indiv_features.pt')\n", (2899, 2933), False, 'import torch\n')] |
# coding=utf-8
# Author: <NAME> Cruz <<EMAIL>>
#
# License: BSD 3 clause
"""
====================================================================
Dynamic selection with linear classifiers: XOR example
====================================================================
This example shows that DS can deal with non-linear problem (XOR) using
a combination of a few linear base classifiers.
- 10 dynamic selection methods (5 DES and 5 DCS) are evaluated with
a pool composed of Decision stumps.
- Since we use Bagging to generate the base classifiers, we also
included its performance as a baseline comparison.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.ensemble import BaggingClassifier
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from deslib.dcs import LCA
from deslib.dcs import MLA
from deslib.dcs import OLA
from deslib.dcs import MCB
from deslib.dcs import Rank
from deslib.des import DESKNN
from deslib.des import KNORAE
from deslib.des import KNORAU
from deslib.des import KNOP
from deslib.des import METADES
from deslib.util.datasets import make_xor
###############################################################################
# Defining helper functions to facilitate plotting the decision boundaries:
def plot_classifier_decision(ax, clf, X, mode='line', **params):
xx, yy = make_grid(X[:, 0], X[:, 1])
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
if mode == 'line':
ax.contour(xx, yy, Z, **params)
else:
ax.contourf(xx, yy, Z, **params)
ax.set_xlim((np.min(X[:, 0]), np.max(X[:, 0])))
ax.set_ylim((np.min(X[:, 1]), np.max(X[:, 0])))
def plot_dataset(X, y, ax=None, title=None, **params):
if ax is None:
ax = plt.gca()
ax.scatter(X[:, 0], X[:, 1], marker='o', c=y, s=25,
edgecolor='k', **params)
ax.set_xlabel('Feature 1')
ax.set_ylabel('Feature 2')
if title is not None:
ax.set_title(title)
return ax
def make_grid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
# Prepare the DS techniques. Changing k value to 5.
def initialize_ds(pool_classifiers, X, y, k=5):
knorau = KNORAU(pool_classifiers, k=k)
kne = KNORAE(pool_classifiers, k=k)
desknn = DESKNN(pool_classifiers, k=k)
ola = OLA(pool_classifiers, k=k)
lca = LCA(pool_classifiers, k=k)
mla = MLA(pool_classifiers, k=k)
mcb = MCB(pool_classifiers, k=k)
rank = Rank(pool_classifiers, k=k)
knop = KNOP(pool_classifiers, k=k)
meta = METADES(pool_classifiers, k=k)
list_ds = [knorau, kne, ola, lca, mla, desknn, mcb, rank, knop, meta]
names = ['KNORA-U', 'KNORA-E', 'OLA', 'LCA', 'MLA', 'DESKNN', 'MCB',
'RANK', 'KNOP', 'META-DES']
# fit the ds techniques
for ds in list_ds:
ds.fit(X, y)
return list_ds, names
###############################################################################
# Generating the dataset and training the pool of classifiers.
#
rng = np.random.RandomState(1234)
X, y = make_xor(1000, random_state=rng)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=rng)
X_DSEL, X_test, y_DSEL, y_test = train_test_split(X_train, y_train,
test_size=0.5,
random_state=rng)
pool_classifiers = BaggingClassifier(DecisionTreeClassifier(max_depth=1),
n_estimators=10,
random_state=rng)
pool_classifiers.fit(X_train, y_train)
###############################################################################
# Merging training and validation data to compose DSEL
# -----------------------------------------------------
# In this example merge the training data with the validation, to create a
# DSEL having more examples for the competence estimation. Using the training
# data for dynamic selection can be beneficial when dealing with small sample
# size datasets. However, in this case we need to have a pool composed of weak
# classifier so that the base classifiers are not able to memorize the
# training data (overfit).
X_DSEL = np.vstack((X_DSEL, X_train))
y_DSEL = np.hstack((y_DSEL, y_train))
list_ds, names = initialize_ds(pool_classifiers, X_DSEL, y_DSEL, k=7)
fig, sub = plt.subplots(4, 3, figsize=(13, 10))
plt.subplots_adjust(wspace=0.4, hspace=0.4)
ax_data = sub.flatten()[0]
ax_bagging = sub.flatten()[1]
plot_dataset(X_train, y_train, ax=ax_data, title="Training data")
plot_dataset(X_train, y_train, ax=ax_bagging)
plot_classifier_decision(ax_bagging, pool_classifiers,
X_train, mode='filled', alpha=0.4)
ax_bagging.set_title("Bagging")
# Plotting the decision border of the DS methods
for ds, name, ax in zip(list_ds, names, sub.flatten()[2:]):
plot_dataset(X_train, y_train, ax=ax)
plot_classifier_decision(ax, ds, X_train, mode='filled', alpha=0.4)
ax.set_xlim((np.min(X_train[:, 0]) - 0.1, np.max(X_train[:, 0] + 0.1)))
ax.set_ylim((np.min(X_train[:, 1]) - 0.1, np.max(X_train[:, 1] + 0.1)))
ax.set_title(name)
plt.show()
plt.tight_layout()
###############################################################################
# Evaluation on the test set
# --------------------------
#
# Finally, let's evaluate the classification accuracy of DS techniques and
# Bagging on the test set:
for ds, name in zip(list_ds, names):
print('Accuracy ' + name + ': ' + str(ds.score(X_test, y_test)))
print('Accuracy Bagging: ' + str(pool_classifiers.score(X_test, y_test)))
| [
"deslib.des.KNOP",
"numpy.hstack",
"deslib.dcs.OLA",
"deslib.des.KNORAU",
"deslib.util.datasets.make_xor",
"numpy.random.RandomState",
"numpy.arange",
"deslib.dcs.MCB",
"deslib.des.KNORAE",
"sklearn.tree.DecisionTreeClassifier",
"deslib.dcs.LCA",
"numpy.max",
"numpy.vstack",
"numpy.min",
... | [((3222, 3249), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (3243, 3249), True, 'import numpy as np\n'), ((3257, 3289), 'deslib.util.datasets.make_xor', 'make_xor', (['(1000)'], {'random_state': 'rng'}), '(1000, random_state=rng)\n', (3265, 3289), False, 'from deslib.util.datasets import make_xor\n'), ((3325, 3380), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.5)', 'random_state': 'rng'}), '(X, y, test_size=0.5, random_state=rng)\n', (3341, 3380), False, 'from sklearn.model_selection import train_test_split\n'), ((3466, 3533), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'y_train'], {'test_size': '(0.5)', 'random_state': 'rng'}), '(X_train, y_train, test_size=0.5, random_state=rng)\n', (3482, 3533), False, 'from sklearn.model_selection import train_test_split\n'), ((4468, 4496), 'numpy.vstack', 'np.vstack', (['(X_DSEL, X_train)'], {}), '((X_DSEL, X_train))\n', (4477, 4496), True, 'import numpy as np\n'), ((4506, 4534), 'numpy.hstack', 'np.hstack', (['(y_DSEL, y_train)'], {}), '((y_DSEL, y_train))\n', (4515, 4534), True, 'import numpy as np\n'), ((4617, 4653), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(3)'], {'figsize': '(13, 10)'}), '(4, 3, figsize=(13, 10))\n', (4629, 4653), True, 'import matplotlib.pyplot as plt\n'), ((4654, 4697), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.4)', 'hspace': '(0.4)'}), '(wspace=0.4, hspace=0.4)\n', (4673, 4697), True, 'import matplotlib.pyplot as plt\n'), ((5415, 5425), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5423, 5425), True, 'import matplotlib.pyplot as plt\n'), ((5426, 5444), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5442, 5444), True, 'import matplotlib.pyplot as plt\n'), ((2400, 2429), 'deslib.des.KNORAU', 'KNORAU', (['pool_classifiers'], {'k': 'k'}), '(pool_classifiers, k=k)\n', (2406, 2429), False, 'from deslib.des import KNORAU\n'), ((2440, 2469), 'deslib.des.KNORAE', 'KNORAE', (['pool_classifiers'], {'k': 'k'}), '(pool_classifiers, k=k)\n', (2446, 2469), False, 'from deslib.des import KNORAE\n'), ((2483, 2512), 'deslib.des.DESKNN', 'DESKNN', (['pool_classifiers'], {'k': 'k'}), '(pool_classifiers, k=k)\n', (2489, 2512), False, 'from deslib.des import DESKNN\n'), ((2523, 2549), 'deslib.dcs.OLA', 'OLA', (['pool_classifiers'], {'k': 'k'}), '(pool_classifiers, k=k)\n', (2526, 2549), False, 'from deslib.dcs import OLA\n'), ((2560, 2586), 'deslib.dcs.LCA', 'LCA', (['pool_classifiers'], {'k': 'k'}), '(pool_classifiers, k=k)\n', (2563, 2586), False, 'from deslib.dcs import LCA\n'), ((2597, 2623), 'deslib.dcs.MLA', 'MLA', (['pool_classifiers'], {'k': 'k'}), '(pool_classifiers, k=k)\n', (2600, 2623), False, 'from deslib.dcs import MLA\n'), ((2634, 2660), 'deslib.dcs.MCB', 'MCB', (['pool_classifiers'], {'k': 'k'}), '(pool_classifiers, k=k)\n', (2637, 2660), False, 'from deslib.dcs import MCB\n'), ((2672, 2699), 'deslib.dcs.Rank', 'Rank', (['pool_classifiers'], {'k': 'k'}), '(pool_classifiers, k=k)\n', (2676, 2699), False, 'from deslib.dcs import Rank\n'), ((2711, 2738), 'deslib.des.KNOP', 'KNOP', (['pool_classifiers'], {'k': 'k'}), '(pool_classifiers, k=k)\n', (2715, 2738), False, 'from deslib.des import KNOP\n'), ((2750, 2780), 'deslib.des.METADES', 'METADES', (['pool_classifiers'], {'k': 'k'}), '(pool_classifiers, k=k)\n', (2757, 2780), False, 'from deslib.des import METADES\n'), ((3672, 3707), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': '(1)'}), '(max_depth=1)\n', (3694, 3707), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1806, 1815), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1813, 1815), True, 'import matplotlib.pyplot as plt\n'), ((2186, 2212), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (2195, 2212), True, 'import numpy as np\n'), ((2239, 2265), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (2248, 2265), True, 'import numpy as np\n'), ((1629, 1644), 'numpy.min', 'np.min', (['X[:, 0]'], {}), '(X[:, 0])\n', (1635, 1644), True, 'import numpy as np\n'), ((1646, 1661), 'numpy.max', 'np.max', (['X[:, 0]'], {}), '(X[:, 0])\n', (1652, 1661), True, 'import numpy as np\n'), ((1681, 1696), 'numpy.min', 'np.min', (['X[:, 1]'], {}), '(X[:, 1])\n', (1687, 1696), True, 'import numpy as np\n'), ((1698, 1713), 'numpy.max', 'np.max', (['X[:, 0]'], {}), '(X[:, 0])\n', (1704, 1713), True, 'import numpy as np\n'), ((5286, 5313), 'numpy.max', 'np.max', (['(X_train[:, 0] + 0.1)'], {}), '(X_train[:, 0] + 0.1)\n', (5292, 5313), True, 'import numpy as np\n'), ((5362, 5389), 'numpy.max', 'np.max', (['(X_train[:, 1] + 0.1)'], {}), '(X_train[:, 1] + 0.1)\n', (5368, 5389), True, 'import numpy as np\n'), ((5257, 5278), 'numpy.min', 'np.min', (['X_train[:, 0]'], {}), '(X_train[:, 0])\n', (5263, 5278), True, 'import numpy as np\n'), ((5333, 5354), 'numpy.min', 'np.min', (['X_train[:, 1]'], {}), '(X_train[:, 1])\n', (5339, 5354), True, 'import numpy as np\n')] |
import numpy as np
import numpy.lib.format as nplf
"""
Helper functions to work with NumPy dtypes.
"""
def dtype_to_descr(dtype: np.dtype): # list
return nplf.dtype_to_descr(dtype)
def descr_to_dtype(descr) -> np.dtype:
# This function is taken verbatim from the source code of NumPy v1.21 since it is not available
# in some older versions.
# Source: https://github.com/numpy/numpy/blob/v1.21.0/numpy/lib/format.py#L283-L337
if isinstance(descr, str):
# No padding removal needed
return np.dtype(descr)
elif isinstance(descr, tuple):
# subtype, will always have a shape descr[1]
dt = descr_to_dtype(descr[0])
return np.dtype((dt, descr[1]))
titles = []
names = []
formats = []
offsets = []
offset = 0
for field in descr:
if len(field) == 2:
name, descr_str = field
dt = descr_to_dtype(descr_str)
else:
name, descr_str, shape = field
dt = np.dtype((descr_to_dtype(descr_str), shape))
# Ignore padding bytes, which will be void bytes with '' as name
# Once support for blank names is removed, only "if name == ''" needed)
is_pad = name == "" and dt.type is np.void and dt.names is None
if not is_pad:
title, name = name if isinstance(name, tuple) else (None, name)
titles.append(title)
names.append(name)
formats.append(dt)
offsets.append(offset)
offset += dt.itemsize
return np.dtype(
{
"names": names,
"formats": formats,
"titles": titles,
"offsets": offsets,
"itemsize": offset,
}
)
| [
"numpy.dtype",
"numpy.lib.format.dtype_to_descr"
] | [((162, 188), 'numpy.lib.format.dtype_to_descr', 'nplf.dtype_to_descr', (['dtype'], {}), '(dtype)\n', (181, 188), True, 'import numpy.lib.format as nplf\n'), ((1540, 1648), 'numpy.dtype', 'np.dtype', (["{'names': names, 'formats': formats, 'titles': titles, 'offsets': offsets,\n 'itemsize': offset}"], {}), "({'names': names, 'formats': formats, 'titles': titles, 'offsets':\n offsets, 'itemsize': offset})\n", (1548, 1648), True, 'import numpy as np\n'), ((530, 545), 'numpy.dtype', 'np.dtype', (['descr'], {}), '(descr)\n', (538, 545), True, 'import numpy as np\n'), ((687, 711), 'numpy.dtype', 'np.dtype', (['(dt, descr[1])'], {}), '((dt, descr[1]))\n', (695, 711), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import math
from pysliceplorer import hyperslice_core
from pysliceplorer import sliceplorer_core
np.set_printoptions(threshold=np.nan)
np.seterr(divide='ignore', invalid='ignore')
def f(x, y, z):
return z*((1 - np.sign(-x - .9 + abs(y * 2))) / 3 * (np.sign(.9 - x) + 1) / 3) * (np.sign(x + .65) + 1) / 2 - ((1 - np.sign(-x - .39 + abs(y * 2))) / 3 * (np.sign(.9 - x) + 1) / 3) + ((1 - np.sign(-x - .39 + abs(y * 2))) / 3 * (np.sign(.6 - x) + 1) / 3) * (np.sign(x - .35) + 1) / 2
def g(x, y):
return np.sin(math.pi*x) / (math.pi*x) * np.sin(math.pi*y) / (math.pi*y)
dim = 3
c = (0, 0, 0.2)
test_var = hyperslice_core(f, -1, 1, dim, c, n_seg=100)
# plotting things so we can see
fig, axs = plt.subplots(dim, dim)
for i in range(0, dim):
for j in range(0, dim):
if i == 0:
x_string = 'x' + str(j + 1)
axs[i, j].set(xlabel=x_string)
axs[i, j].xaxis.set_label_position('top')
if j == 0:
y_string = 'x' + str(dim - i)
axs[i, j].set(ylabel=y_string)
axs[i, j].pcolormesh(test_var.x_grid, test_var.y_grid,
test_var.data(j, dim - i - 1), cmap='pink')
test_var2 = sliceplorer_core(g, mn=-5, mx=5, dim=2, n_fpoint=160)
fig2, axs2 = plt.subplots(2)
# plot the 2nd figure
for i in range(0, test_var2.size):
for j in range(0, test_var2.dim):
axs2[j].plot(test_var2.x_grid, test_var2.data(i)['data'][j], color='k', alpha=0.1)
plt.show()
| [
"pysliceplorer.hyperslice_core",
"matplotlib.pyplot.show",
"pysliceplorer.sliceplorer_core",
"numpy.sign",
"numpy.sin",
"numpy.seterr",
"matplotlib.pyplot.subplots",
"numpy.set_printoptions"
] | [((150, 187), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.nan'}), '(threshold=np.nan)\n', (169, 187), True, 'import numpy as np\n'), ((188, 232), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (197, 232), True, 'import numpy as np\n'), ((666, 710), 'pysliceplorer.hyperslice_core', 'hyperslice_core', (['f', '(-1)', '(1)', 'dim', 'c'], {'n_seg': '(100)'}), '(f, -1, 1, dim, c, n_seg=100)\n', (681, 710), False, 'from pysliceplorer import hyperslice_core\n'), ((755, 777), 'matplotlib.pyplot.subplots', 'plt.subplots', (['dim', 'dim'], {}), '(dim, dim)\n', (767, 777), True, 'import matplotlib.pyplot as plt\n'), ((1243, 1296), 'pysliceplorer.sliceplorer_core', 'sliceplorer_core', (['g'], {'mn': '(-5)', 'mx': '(5)', 'dim': '(2)', 'n_fpoint': '(160)'}), '(g, mn=-5, mx=5, dim=2, n_fpoint=160)\n', (1259, 1296), False, 'from pysliceplorer import sliceplorer_core\n'), ((1310, 1325), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (1322, 1325), True, 'import matplotlib.pyplot as plt\n'), ((1515, 1525), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1523, 1525), True, 'import matplotlib.pyplot as plt\n'), ((597, 616), 'numpy.sin', 'np.sin', (['(math.pi * y)'], {}), '(math.pi * y)\n', (603, 616), True, 'import numpy as np\n'), ((563, 582), 'numpy.sin', 'np.sin', (['(math.pi * x)'], {}), '(math.pi * x)\n', (569, 582), True, 'import numpy as np\n'), ((511, 528), 'numpy.sign', 'np.sign', (['(x - 0.35)'], {}), '(x - 0.35)\n', (518, 528), True, 'import numpy as np\n'), ((336, 353), 'numpy.sign', 'np.sign', (['(x + 0.65)'], {}), '(x + 0.65)\n', (343, 353), True, 'import numpy as np\n'), ((409, 425), 'numpy.sign', 'np.sign', (['(0.9 - x)'], {}), '(0.9 - x)\n', (416, 425), True, 'import numpy as np\n'), ((482, 498), 'numpy.sign', 'np.sign', (['(0.6 - x)'], {}), '(0.6 - x)\n', (489, 498), True, 'import numpy as np\n'), ((307, 323), 'numpy.sign', 'np.sign', (['(0.9 - x)'], {}), '(0.9 - x)\n', (314, 323), True, 'import numpy as np\n')] |
import time
import numpy as np
from utils import stopwatch
def print_batch_loss(epoch_id, phase, epoch_loss_accumulator, time_epoch, batch_id, nb_batches):
UP_AND_CLEAR = '\033[F\033[K' # up one line, clear until end of line
if(batch_id == 0):
UP_AND_CLEAR = '' # The first log of an epoch does not overwrite the console line above
print('{}[Epoch: {},\t {}] Batch: {}/{} ({}%)\t Loss: {:.6f}\t (Stopwatch: {})'.format(
UP_AND_CLEAR,
epoch_id,
phase,
batch_id,
nb_batches,
np.rint(100 * batch_id / nb_batches).astype(np.int),
epoch_loss_accumulator.get_and_reset_sub_loss(),
stopwatch.stopwatch(time.time(), time_epoch)))
| [
"time.time",
"numpy.rint"
] | [((640, 651), 'time.time', 'time.time', ([], {}), '()\n', (649, 651), False, 'import time\n'), ((510, 546), 'numpy.rint', 'np.rint', (['(100 * batch_id / nb_batches)'], {}), '(100 * batch_id / nb_batches)\n', (517, 546), True, 'import numpy as np\n')] |
from abc import ABCMeta
import numpy as np
from time import time
class DotProduct(metaclass = ABCMeta):
def naive_dotproduct(self, matrix, kernel):
"""
A naive approach which uses brute force loops. Very slow.
:param matrix: a 3d numpy array of size [width][height][channel]
:param kernel: a 1d numpy array of size [length]
:return: a convoluted 3d numpy array of size [width][height][channel]
"""
t0 = time()
width = matrix.shape[0]
height = matrix.shape[1]
mod_matrix = np.zeros((width, height, 3))
for row in range(width):
for col in range(height):
r = matrix[row, col, 0]
g = matrix[row, col, 1]
b = matrix[row, col, 2]
mod_matrix[row, col, 0] = r*kernel[0,0] + g*kernel[0,1] + b*kernel[0,2]
mod_matrix[row, col, 1] = r*kernel[1,0] + g*kernel[1,1] + b*kernel[1,2]
mod_matrix[row, col, 2] = r*kernel[2,0] + g*kernel[2,1] + b*kernel[2,2]
t1 = time()
print(t1-t0)
mod_matrix = np.clip(mod_matrix.astype(int), 0, 255)
return mod_matrix
def fast_dotproduct(self, matrix, kernel):
"""
A fast dot product which uses Numpy's optimized dot product module.
:param matrix: a 3d numpy array of size [width][height][channel]
:param kernel: a 1d numpy array of size [length]
:return: a convoluted 3d numpy array of size [width][height][channel]
"""
t0 = time()
width = matrix.shape[0]
height = matrix.shape[1]
mod_matrix = np.zeros((width, height, 3))
mod_matrix[:,:,0] = np.dot(matrix, kernel[0])
mod_matrix[:,:,1] = np.dot(matrix, kernel[1])
mod_matrix[:,:,2] = np.dot(matrix, kernel[2])
mod_matrix = np.clip(mod_matrix.astype(int), 0, 255)
t1 = time()
print(t1-t0)
return mod_matrix | [
"numpy.dot",
"numpy.zeros",
"time.time"
] | [((465, 471), 'time.time', 'time', ([], {}), '()\n', (469, 471), False, 'from time import time\n'), ((558, 586), 'numpy.zeros', 'np.zeros', (['(width, height, 3)'], {}), '((width, height, 3))\n', (566, 586), True, 'import numpy as np\n'), ((1057, 1063), 'time.time', 'time', ([], {}), '()\n', (1061, 1063), False, 'from time import time\n'), ((1541, 1547), 'time.time', 'time', ([], {}), '()\n', (1545, 1547), False, 'from time import time\n'), ((1634, 1662), 'numpy.zeros', 'np.zeros', (['(width, height, 3)'], {}), '((width, height, 3))\n', (1642, 1662), True, 'import numpy as np\n'), ((1700, 1725), 'numpy.dot', 'np.dot', (['matrix', 'kernel[0]'], {}), '(matrix, kernel[0])\n', (1706, 1725), True, 'import numpy as np\n'), ((1754, 1779), 'numpy.dot', 'np.dot', (['matrix', 'kernel[1]'], {}), '(matrix, kernel[1])\n', (1760, 1779), True, 'import numpy as np\n'), ((1808, 1833), 'numpy.dot', 'np.dot', (['matrix', 'kernel[2]'], {}), '(matrix, kernel[2])\n', (1814, 1833), True, 'import numpy as np\n'), ((1917, 1923), 'time.time', 'time', ([], {}), '()\n', (1921, 1923), False, 'from time import time\n')] |
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc ###计算roc和auc
y_label = []
y_score = []
y_label = np.load("data.npz",allow_pickle=True)["arr_1"].tolist()[-1]
y_score = np.load("data.npz",allow_pickle=True)["arr_2"].tolist()[-1]
fpr, tpr, thersholds = roc_curve(y_label, y_score, pos_label=1)
print('----------------------')
print('假阳率\t真阳率\t阈值')
for i, value in enumerate(thersholds):
print("%f %f %f" % (fpr[i], tpr[i], value))
print('----------------------')
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, 'k--', label='ROC (area = {0:.2f})'.format(roc_auc), lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend(loc="lower right")
plt.show() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"matplotlib.pyplot.xlabel",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"numpy.load",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((332, 372), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_label', 'y_score'], {'pos_label': '(1)'}), '(y_label, y_score, pos_label=1)\n', (341, 372), False, 'from sklearn.metrics import roc_curve, auc\n'), ((557, 570), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (560, 570), False, 'from sklearn.metrics import roc_curve, auc\n'), ((650, 673), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (658, 673), True, 'import matplotlib.pyplot as plt\n'), ((676, 699), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (684, 699), True, 'import matplotlib.pyplot as plt\n'), ((700, 733), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (710, 733), True, 'import matplotlib.pyplot as plt\n'), ((734, 766), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (744, 766), True, 'import matplotlib.pyplot as plt\n'), ((769, 791), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC Curve"""'], {}), "('ROC Curve')\n", (778, 791), True, 'import matplotlib.pyplot as plt\n'), ((792, 821), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (802, 821), True, 'import matplotlib.pyplot as plt\n'), ((822, 832), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (830, 832), True, 'import matplotlib.pyplot as plt\n'), ((179, 217), 'numpy.load', 'np.load', (['"""data.npz"""'], {'allow_pickle': '(True)'}), "('data.npz', allow_pickle=True)\n", (186, 217), True, 'import numpy as np\n'), ((249, 287), 'numpy.load', 'np.load', (['"""data.npz"""'], {'allow_pickle': '(True)'}), "('data.npz', allow_pickle=True)\n", (256, 287), True, 'import numpy as np\n')] |
import numpy as np
input = np.loadtxt('day5_input.txt', dtype='i', delimiter='\n')
current_index = 0
steps = 0
while current_index >= 0 and current_index < len(input):
previous_index = current_index
current_index += input[current_index]
if input[previous_index] >= 3:
input[previous_index] -= 1
else:
input[previous_index] += 1
steps += 1
print(steps)
| [
"numpy.loadtxt"
] | [((28, 83), 'numpy.loadtxt', 'np.loadtxt', (['"""day5_input.txt"""'], {'dtype': '"""i"""', 'delimiter': '"""\n"""'}), "('day5_input.txt', dtype='i', delimiter='\\n')\n", (38, 83), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.