code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.base.Network import MYNET as Net
import numpy as np
class MYNET(Net):
def __init__(self, args, mode=None):
super().__init__(args,mode)
hdim=self.num_features
self.slf_attn = MultiHeadAttention(1, hdim, hdim, hdim, dropout=0.5)
def forward(self, input):
if self.mode == 'encoder':
input = self.encode(input)
return input
else:
support_idx, query_idx = input
logits = self._forward(support_idx, query_idx)
return logits
def _forward(self, support,query):
emb_dim = support.size(-1)
# get mean of the support
proto = support.mean(dim=1)
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = query.shape[1]*query.shape[2]#num of query*way
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
query = query.view(-1, emb_dim).unsqueeze(1)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto, emb_dim)
combined = torch.cat([proto, query], 1) # Nk x (N + 1) x d, batch_size = NK
combined = self.slf_attn(combined, combined, combined)
# compute distance for all batches
proto, query = combined.split(num_proto, 1)
logits=F.cosine_similarity(query,proto,dim=-1)
logits=logits*self.args.temperature
return logits
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
output, attn, log_attn = self.attention(q, k, v)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output
| [
"torch.nn.Dropout",
"torch.bmm",
"numpy.power",
"torch.nn.init.xavier_normal_",
"torch.cat",
"torch.nn.LayerNorm",
"torch.nn.Softmax",
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.nn.functional.cosine_similarity",
"numpy.sqrt"
] | [((1241, 1269), 'torch.cat', 'torch.cat', (['[proto, query]', '(1)'], {}), '([proto, query], 1)\n', (1250, 1269), False, 'import torch\n'), ((1480, 1521), 'torch.nn.functional.cosine_similarity', 'F.cosine_similarity', (['query', 'proto'], {'dim': '(-1)'}), '(query, proto, dim=-1)\n', (1499, 1521), True, 'import torch.nn.functional as F\n'), ((1819, 1843), 'torch.nn.Dropout', 'nn.Dropout', (['attn_dropout'], {}), '(attn_dropout)\n', (1829, 1843), True, 'import torch.nn as nn\n'), ((1867, 1884), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(2)'}), '(dim=2)\n', (1877, 1884), True, 'import torch.nn as nn\n'), ((2023, 2045), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['attn', '(2)'], {}), '(attn, 2)\n', (2036, 2045), True, 'import torch.nn.functional as F\n'), ((2131, 2149), 'torch.bmm', 'torch.bmm', (['attn', 'v'], {}), '(attn, v)\n', (2140, 2149), False, 'import torch\n'), ((2455, 2499), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(n_head * d_k)'], {'bias': '(False)'}), '(d_model, n_head * d_k, bias=False)\n', (2464, 2499), True, 'import torch.nn as nn\n'), ((2520, 2564), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(n_head * d_k)'], {'bias': '(False)'}), '(d_model, n_head * d_k, bias=False)\n', (2529, 2564), True, 'import torch.nn as nn\n'), ((2585, 2629), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(n_head * d_v)'], {'bias': '(False)'}), '(d_model, n_head * d_v, bias=False)\n', (2594, 2629), True, 'import torch.nn as nn\n'), ((2998, 3019), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {}), '(d_model)\n', (3010, 3019), True, 'import torch.nn as nn\n'), ((3039, 3071), 'torch.nn.Linear', 'nn.Linear', (['(n_head * d_v)', 'd_model'], {}), '(n_head * d_v, d_model)\n', (3048, 3071), True, 'import torch.nn as nn\n'), ((3080, 3118), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.fc.weight'], {}), '(self.fc.weight)\n', (3102, 3118), True, 'import torch.nn as nn\n'), ((3142, 3161), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (3152, 3161), True, 'import torch.nn as nn\n'), ((2684, 2714), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (d_model + d_k))'], {}), '(2.0 / (d_model + d_k))\n', (2691, 2714), True, 'import numpy as np\n'), ((2770, 2800), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (d_model + d_k))'], {}), '(2.0 / (d_model + d_k))\n', (2777, 2800), True, 'import numpy as np\n'), ((2856, 2886), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (d_model + d_v))'], {}), '(2.0 / (d_model + d_v))\n', (2863, 2886), True, 'import numpy as np\n'), ((2952, 2970), 'numpy.power', 'np.power', (['d_k', '(0.5)'], {}), '(d_k, 0.5)\n', (2960, 2970), True, 'import numpy as np\n')] |
# Copyright: (c) 2021, <NAME>
import numpy as np
def hexToBitStr(hexStr):
"""
provide the hex string in format 'ab ab ab....
Returns the bits in string format, as list of bytes
"""
b = [int(hexStr[i*3:i*3+2],16) for i in range(int((len(hexStr)+1)/3))]
bytesequence = ['{0:08b}'.format(g) for g in b]
return bytesequence
def hexToBytes(hexStr):
"""
Provide hex sting in format 'ab ab ab...'
Returns the byte values
"""
bInt = [int(hexStr[i*3:i*3+2],16) for i in range(int((len(hexStr)+1)/3))]
return bInt
def hexToBits(hexStr):
bInt = [int(hexStr[i*3:i*3+2],16) for i in range(int((len(hexStr)+1)/3))]
byteSequence = np.array([[int(x) for x in bin(b)[2:].zfill(8)] for b in bInt])
byteSequence = np.fliplr(byteSequence)
return byteSequence.reshape(np.prod(byteSequence.shape))
def bitsToBytes(bitArray):
if len(bitArray) % 8 != 0:
raise IndexError('Input array length must be a multiple of 8')
numBytes = int(len(bitArray)/8)
byteData = np.dot(bitArray.reshape(numBytes,8),2**np.arange(0,8,1)).astype(np.uint8)
return byteData
def bitsToHex(bits):
if len(bits) % 8 != 0:
raise IndexError('Dimension mismatch','Dimension needs to be multiple of 8')
Decvals = np.dot(bits.reshape(int(len(bits)/8),8),2**np.arange(0,8,1)).astype(np.uint8)
hexVals = ' '.join(['{:02X}'.format(i) for i in Decvals])
return hexVals
def bytesToHex(byteArr):
hexVals = ' '.join(['{:02X}'.format(i) for i in byteArr])
return hexVals
| [
"numpy.fliplr",
"numpy.arange",
"numpy.prod"
] | [((771, 794), 'numpy.fliplr', 'np.fliplr', (['byteSequence'], {}), '(byteSequence)\n', (780, 794), True, 'import numpy as np\n'), ((827, 854), 'numpy.prod', 'np.prod', (['byteSequence.shape'], {}), '(byteSequence.shape)\n', (834, 854), True, 'import numpy as np\n'), ((1078, 1096), 'numpy.arange', 'np.arange', (['(0)', '(8)', '(1)'], {}), '(0, 8, 1)\n', (1087, 1096), True, 'import numpy as np\n'), ((1337, 1355), 'numpy.arange', 'np.arange', (['(0)', '(8)', '(1)'], {}), '(0, 8, 1)\n', (1346, 1355), True, 'import numpy as np\n')] |
import os
import numpy as np
import warnings
warnings.filterwarnings('ignore')
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from config import *
from dataset import get_div2k_loader
from models import Discriminator, Generator_SRGAN, Generator_ESRGAN
from losses import PerceptualLoss, TVLoss
from inference import inference
from utils import make_dirs, get_lr_scheduler, set_requires_grad, sample_images
# Reproducibility #
cudnn.deterministic = True
cudnn.benchmark = False
# Device Configuration #
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def train():
# Fix Seed for Reproducibility #
torch.manual_seed(9)
if torch.cuda.is_available():
torch.cuda.manual_seed(9)
# Samples, Weights and Results Path #
paths = [config.samples_path, config.weights_path]
paths = [make_dirs(path) for path in paths]
# Prepare Data Loader #
train_div2k_loader = get_div2k_loader(sort='train',
batch_size=config.batch_size,
image_size=config.image_size,
upscale_factor=config.upscale_factor,
crop_size=config.crop_size)
val_div2k_loader = get_div2k_loader(sort='val',
batch_size=config.val_batch_size,
image_size=config.image_size,
upscale_factor=config.upscale_factor,
crop_size=config.crop_size)
total_batch = len(train_div2k_loader)
# Prepare Networks #
D = Discriminator()
if config.sort == 'SRGAN':
G = Generator_SRGAN()
elif config.sort == 'ESRGAN':
G = Generator_ESRGAN()
else:
raise NotImplementedError
networks = [D, G]
for network in networks:
network.to(device)
# Loss Function #
criterion_Perceptual = PerceptualLoss(sort=config.sort).to(device)
# For SRGAN #
criterion_MSE = nn.MSELoss()
criterion_TV = TVLoss()
# For ESRGAN #
criterion_BCE = nn.BCEWithLogitsLoss()
criterion_Content = nn.L1Loss()
# Optimizers #
D_optim = torch.optim.Adam(D.parameters(), lr=config.lr, betas=(0.9, 0.999))
G_optim = torch.optim.Adam(G.parameters(), lr=config.lr, betas=(0.9, 0.999))
D_optim_scheduler = get_lr_scheduler(D_optim)
G_optim_scheduler = get_lr_scheduler(G_optim)
# Lists #
D_losses, G_losses = [], []
# Train #
print("Training {} started with total epoch of {}.".format(config.sort, config.num_epochs))
for epoch in range(config.num_epochs):
for i, (high, low) in enumerate(train_div2k_loader):
D.train()
if config.sort == "SRGAN":
G.train()
# Data Preparation #
high = high.to(device)
low = low.to(device)
# Initialize Optimizers #
D_optim.zero_grad()
G_optim.zero_grad()
#######################
# Train Discriminator #
#######################
set_requires_grad(D, requires_grad=True)
# Generate Fake HR Images #
fake_high = G(low)
if config.sort == 'SRGAN':
# Forward Data #
prob_real = D(high)
prob_fake = D(fake_high.detach())
# Calculate Total Discriminator Loss #
D_loss = 1 - prob_real.mean() + prob_fake.mean()
elif config.sort == 'ESRGAN':
# Forward Data #
prob_real = D(high)
prob_fake = D(fake_high.detach())
# Relativistic Discriminator #
diff_r2f = prob_real - prob_fake.mean()
diff_f2r = prob_fake - prob_real.mean()
# Labels #
real_labels = torch.ones(diff_r2f.size()).to(device)
fake_labels = torch.zeros(diff_f2r.size()).to(device)
# Adversarial Loss #
D_loss_real = criterion_BCE(diff_r2f, real_labels)
D_loss_fake = criterion_BCE(diff_f2r, fake_labels)
# Calculate Total Discriminator Loss #
D_loss = (D_loss_real + D_loss_fake).mean()
# Back Propagation and Update #
D_loss.backward()
D_optim.step()
###################
# Train Generator #
###################
set_requires_grad(D, requires_grad=False)
if config.sort == 'SRGAN':
# Adversarial Loss #
prob_fake = D(fake_high).mean()
G_loss_adversarial = torch.mean(1 - prob_fake)
G_loss_mse = criterion_MSE(fake_high, high)
# Perceptual Loss #
lambda_perceptual = 6e-3
G_loss_perceptual = criterion_Perceptual(fake_high, high)
# Total Variation Loss #
G_loss_tv = criterion_TV(fake_high)
# Calculate Total Generator Loss #
G_loss = config.lambda_adversarial * G_loss_adversarial + G_loss_mse + lambda_perceptual * G_loss_perceptual + config.lambda_tv * G_loss_tv
elif config.sort == 'ESRGAN':
# Forward Data #
prob_real = D(high)
prob_fake = D(fake_high)
# Relativistic Discriminator #
diff_r2f = prob_real - prob_fake.mean()
diff_f2r = prob_fake - prob_real.mean()
# Labels #
real_labels = torch.ones(diff_r2f.size()).to(device)
fake_labels = torch.zeros(diff_f2r.size()).to(device)
# Adversarial Loss #
G_loss_bce_real = criterion_BCE(diff_f2r, real_labels)
G_loss_bce_fake = criterion_BCE(diff_r2f, fake_labels)
G_loss_bce = (G_loss_bce_real + G_loss_bce_fake).mean()
# Perceptual Loss #
lambda_perceptual = 1e-2
G_loss_perceptual = criterion_Perceptual(fake_high, high)
# Content Loss #
G_loss_content = criterion_Content(fake_high, high)
# Calculate Total Generator Loss #
G_loss = config.lambda_bce * G_loss_bce + lambda_perceptual * G_loss_perceptual + config.lambda_content * G_loss_content
# Back Propagation and Update #
G_loss.backward()
G_optim.step()
# Add items to Lists #
D_losses.append(D_loss.item())
G_losses.append(G_loss.item())
####################
# Print Statistics #
####################
if (i+1) % config.print_every == 0:
print("{} | Epoch [{}/{}] | Iterations [{}/{}] | D Loss {:.4f} | G Loss {:.4f}"
.format(config.sort, epoch + 1, config.num_epochs, i + 1, total_batch, np.average(D_losses), np.average(G_losses)))
# Save Sample Images #
sample_images(val_div2k_loader, G, epoch, config.samples_path)
# Adjust Learning Rate #
D_optim_scheduler.step()
G_optim_scheduler.step()
# Save Model Weights and Inference #
if (epoch+1) % config.save_every == 0:
torch.save(G.state_dict(), os.path.join(config.weights_path, '{}_Generator_Epoch_{}.pkl'.format(config.sort, epoch + 1)))
inference(G, val_div2k_loader, epoch, config.inference_path)
print("Training Finished.")
if __name__ == "__main__":
torch.cuda.empty_cache()
train() | [
"models.Discriminator",
"utils.get_lr_scheduler",
"torch.nn.MSELoss",
"losses.TVLoss",
"utils.make_dirs",
"losses.PerceptualLoss",
"dataset.get_div2k_loader",
"torch.mean",
"torch.nn.BCEWithLogitsLoss",
"numpy.average",
"torch.manual_seed",
"torch.cuda.manual_seed",
"torch.cuda.is_available"... | [((45, 78), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (68, 78), False, 'import warnings\n'), ((550, 575), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (573, 575), False, 'import torch\n'), ((644, 664), 'torch.manual_seed', 'torch.manual_seed', (['(9)'], {}), '(9)\n', (661, 664), False, 'import torch\n'), ((672, 697), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (695, 697), False, 'import torch\n'), ((933, 1099), 'dataset.get_div2k_loader', 'get_div2k_loader', ([], {'sort': '"""train"""', 'batch_size': 'config.batch_size', 'image_size': 'config.image_size', 'upscale_factor': 'config.upscale_factor', 'crop_size': 'config.crop_size'}), "(sort='train', batch_size=config.batch_size, image_size=\n config.image_size, upscale_factor=config.upscale_factor, crop_size=\n config.crop_size)\n", (949, 1099), False, 'from dataset import get_div2k_loader\n'), ((1281, 1449), 'dataset.get_div2k_loader', 'get_div2k_loader', ([], {'sort': '"""val"""', 'batch_size': 'config.val_batch_size', 'image_size': 'config.image_size', 'upscale_factor': 'config.upscale_factor', 'crop_size': 'config.crop_size'}), "(sort='val', batch_size=config.val_batch_size, image_size=\n config.image_size, upscale_factor=config.upscale_factor, crop_size=\n config.crop_size)\n", (1297, 1449), False, 'from dataset import get_div2k_loader\n'), ((1676, 1691), 'models.Discriminator', 'Discriminator', ([], {}), '()\n', (1689, 1691), False, 'from models import Discriminator, Generator_SRGAN, Generator_ESRGAN\n'), ((2075, 2087), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2085, 2087), True, 'import torch.nn as nn\n'), ((2107, 2115), 'losses.TVLoss', 'TVLoss', ([], {}), '()\n', (2113, 2115), False, 'from losses import PerceptualLoss, TVLoss\n'), ((2156, 2178), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (2176, 2178), True, 'import torch.nn as nn\n'), ((2203, 2214), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (2212, 2214), True, 'import torch.nn as nn\n'), ((2422, 2447), 'utils.get_lr_scheduler', 'get_lr_scheduler', (['D_optim'], {}), '(D_optim)\n', (2438, 2447), False, 'from utils import make_dirs, get_lr_scheduler, set_requires_grad, sample_images\n'), ((2472, 2497), 'utils.get_lr_scheduler', 'get_lr_scheduler', (['G_optim'], {}), '(G_optim)\n', (2488, 2497), False, 'from utils import make_dirs, get_lr_scheduler, set_requires_grad, sample_images\n'), ((7674, 7698), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (7696, 7698), False, 'import torch\n'), ((707, 732), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(9)'], {}), '(9)\n', (729, 732), False, 'import torch\n'), ((844, 859), 'utils.make_dirs', 'make_dirs', (['path'], {}), '(path)\n', (853, 859), False, 'from utils import make_dirs, get_lr_scheduler, set_requires_grad, sample_images\n'), ((1736, 1753), 'models.Generator_SRGAN', 'Generator_SRGAN', ([], {}), '()\n', (1751, 1753), False, 'from models import Discriminator, Generator_SRGAN, Generator_ESRGAN\n'), ((1800, 1818), 'models.Generator_ESRGAN', 'Generator_ESRGAN', ([], {}), '()\n', (1816, 1818), False, 'from models import Discriminator, Generator_SRGAN, Generator_ESRGAN\n'), ((1992, 2024), 'losses.PerceptualLoss', 'PerceptualLoss', ([], {'sort': 'config.sort'}), '(sort=config.sort)\n', (2006, 2024), False, 'from losses import PerceptualLoss, TVLoss\n'), ((3176, 3216), 'utils.set_requires_grad', 'set_requires_grad', (['D'], {'requires_grad': '(True)'}), '(D, requires_grad=True)\n', (3193, 3216), False, 'from utils import make_dirs, get_lr_scheduler, set_requires_grad, sample_images\n'), ((4560, 4601), 'utils.set_requires_grad', 'set_requires_grad', (['D'], {'requires_grad': '(False)'}), '(D, requires_grad=False)\n', (4577, 4601), False, 'from utils import make_dirs, get_lr_scheduler, set_requires_grad, sample_images\n'), ((7547, 7607), 'inference.inference', 'inference', (['G', 'val_div2k_loader', 'epoch', 'config.inference_path'], {}), '(G, val_div2k_loader, epoch, config.inference_path)\n', (7556, 7607), False, 'from inference import inference\n'), ((4765, 4790), 'torch.mean', 'torch.mean', (['(1 - prob_fake)'], {}), '(1 - prob_fake)\n', (4775, 4790), False, 'import torch\n'), ((7145, 7207), 'utils.sample_images', 'sample_images', (['val_div2k_loader', 'G', 'epoch', 'config.samples_path'], {}), '(val_div2k_loader, G, epoch, config.samples_path)\n', (7158, 7207), False, 'from utils import make_dirs, get_lr_scheduler, set_requires_grad, sample_images\n'), ((7044, 7064), 'numpy.average', 'np.average', (['D_losses'], {}), '(D_losses)\n', (7054, 7064), True, 'import numpy as np\n'), ((7066, 7086), 'numpy.average', 'np.average', (['G_losses'], {}), '(G_losses)\n', (7076, 7086), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import time
import os
params_appliance = {
'kettle': {
'windowlength': 599,
'on_power_threshold': 2000,
'max_on_power': 3998,
'mean': 700,
'std': 1000,
's2s_length': 128,
'houses': [1, 2],
'channels': [10, 8],
},
'microwave': {
'windowlength': 599,
'on_power_threshold': 200,
'max_on_power': 3969,
'mean': 500,
'std': 800,
's2s_length': 128,
'houses': [1, 2],
'channels': [13, 15],
},
'fridge': {
'windowlength': 599,
'on_power_threshold': 50,
'max_on_power': 3323,
'mean': 200,
'std': 400,
's2s_length': 512,
'houses': [1, 2],
'channels': [12, 14],
},
'dishwasher': {
'windowlength': 599,
'on_power_threshold': 10,
'max_on_power': 3964,
'mean': 700,
'std': 1000,
's2s_length': 1536,
'houses': [1, 2],
'channels': [6, 13],
},
'washingmachine': {
'windowlength': 599,
'on_power_threshold': 20,
'max_on_power': 3999,
'mean': 400,
'std': 700,
's2s_length': 2000,
'houses': [1, 2],
'channels': [5, 12],
}
}
#appliance_name = 'kettle'
#building = '1'
absolute_path = '/media/michele/Dati/ukdale/mingjun/'
buildings = [1, 2]
appliances = ['kettle', 'microwave', 'fridge', 'dishwasher', 'washingmachine']
aggregate_mean = 522
aggregate_std = 814
save_path = '/media/michele/Dati/myUKDALE/'
save_path = '/home/michele/Desktop/'
print("\nNormalization parameters: ")
print("Mean and standard deviation values USED for AGGREGATE are:")
print(" Mean = {:d}, STD = {:d}".format(aggregate_mean, aggregate_std))
start_time = time.time()
for building in buildings:
print('---------------------------Building: {} --------------------------'.format(building))
for appliance_name in appliances:
print('------------appliance: {} ------------'.format(appliance_name))
path = absolute_path + appliance_name + '/'
for idx, filename in enumerate(os.listdir(path)):
if (appliance_name + '_building' + str(building) + '_train_mains') in filename and 'probnet' not in filename:
name = filename
aggregate = np.load(path + name).flatten()
print(" loading files:")
print(' ' + path + name)
elif (appliance_name + '_building' + str(building) + '_train_target') in filename and 'probnet' not in filename:
name = filename
gt = np.load(path + name).flatten()
print(" loading files:")
print(' ' + path + name)
assert aggregate.shape == gt.shape
#aggregate[aggregate == -params_appliance[appliance_name]['mean']/params_appliance[appliance_name]['std']] = np.nan
data = {
'aggregate': aggregate,
'{}'.format(appliance_name): gt,
}
df = pd.DataFrame(data)
# de-Normalization and re-normalization
df['aggregate'] = (df['aggregate']*params_appliance[appliance_name]['std']) + params_appliance[appliance_name]['mean']
df['aggregate'] = (df['aggregate'] - aggregate_mean) / aggregate_std
# Re-sampling
ind = pd.date_range(0, periods=df.shape[0], freq='6S')
df.set_index(ind, inplace=True, drop=True)
resample = df.resample('8S')
df = resample.mean()
# Save
df.to_csv(save_path + appliance_name + '_test_' + 'uk-dale_' + 'H' + str(building) + '.csv', index=False)
print("Size of test set is {:.3f} M rows (House {:d})."
.format(df.shape[0] / 10 ** 6, int(building)))
print('Mean and standard deviation values USED for ' + appliance_name + ' are:')
print(" Mean = {:d}, STD = {:d}"
.format(params_appliance[appliance_name]['mean'], params_appliance[appliance_name]['std']))
print("\nPlease find test set files in: " + save_path)
tot = int(int(time.time() - start_time) / 60)
print("\nTotal elapsed time: " + str(tot) + ' min') | [
"pandas.DataFrame",
"numpy.load",
"pandas.date_range",
"time.time",
"os.listdir"
] | [((1818, 1829), 'time.time', 'time.time', ([], {}), '()\n', (1827, 1829), False, 'import time\n'), ((3080, 3098), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (3092, 3098), True, 'import pandas as pd\n'), ((3389, 3437), 'pandas.date_range', 'pd.date_range', (['(0)'], {'periods': 'df.shape[0]', 'freq': '"""6S"""'}), "(0, periods=df.shape[0], freq='6S')\n", (3402, 3437), True, 'import pandas as pd\n'), ((2163, 2179), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2173, 2179), False, 'import os\n'), ((4121, 4132), 'time.time', 'time.time', ([], {}), '()\n', (4130, 4132), False, 'import time\n'), ((2364, 2384), 'numpy.load', 'np.load', (['(path + name)'], {}), '(path + name)\n', (2371, 2384), True, 'import numpy as np\n'), ((2665, 2685), 'numpy.load', 'np.load', (['(path + name)'], {}), '(path + name)\n', (2672, 2685), True, 'import numpy as np\n')] |
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
def mnist():
# exchange with the corrupted mnist dataset
#train = torch.randn(50000, 784)
#test = torch.randn(10000, 784)
#train
train0 = np.load("train_0.npz")
train1 = np.load("train_1.npz")
train2 = np.load("train_2.npz")
train3 = np.load("train_3.npz")
train4 = np.load("train_4.npz")
train_images = torch.cat((torch.from_numpy(train0.f.images),torch.from_numpy(train1.f.images),torch.from_numpy(train2.f.images),torch.from_numpy(train3.f.images),torch.from_numpy(train4.f.images),), 0)
train_labels = torch.cat((torch.from_numpy(train0.f.labels),torch.from_numpy(train1.f.labels),torch.from_numpy(train2.f.labels),torch.from_numpy(train3.f.labels),torch.from_numpy(train4.f.labels),), 0)
train = Dataset(train_images, train_labels)
#test
test0 = np.load("test.npz")
test_images = torch.from_numpy(test0.f.images)
test_labels = torch.from_numpy(test0.f.labels)
test = Dataset(test_images, test_labels)
#train = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
return train, test
class Dataset(torch.utils.data.Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, images, labels):
'Initialization'
self.labels = labels
self.images = images
def __len__(self):
'Denotes the total number of samples'
return len(self.images)
def __getitem__(self, index):
'Generates one sample of data'
# Select sample
X = self.images[index]
# Load data and get label
#X = torch.load('data/' + ID + '.pt')
y = self.labels[index]
return X, y | [
"torch.utils.data.Dataset",
"numpy.load",
"torch.from_numpy"
] | [((253, 275), 'numpy.load', 'np.load', (['"""train_0.npz"""'], {}), "('train_0.npz')\n", (260, 275), True, 'import numpy as np\n'), ((291, 313), 'numpy.load', 'np.load', (['"""train_1.npz"""'], {}), "('train_1.npz')\n", (298, 313), True, 'import numpy as np\n'), ((329, 351), 'numpy.load', 'np.load', (['"""train_2.npz"""'], {}), "('train_2.npz')\n", (336, 351), True, 'import numpy as np\n'), ((367, 389), 'numpy.load', 'np.load', (['"""train_3.npz"""'], {}), "('train_3.npz')\n", (374, 389), True, 'import numpy as np\n'), ((405, 427), 'numpy.load', 'np.load', (['"""train_4.npz"""'], {}), "('train_4.npz')\n", (412, 427), True, 'import numpy as np\n'), ((858, 893), 'torch.utils.data.Dataset', 'Dataset', (['train_images', 'train_labels'], {}), '(train_images, train_labels)\n', (865, 893), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((921, 940), 'numpy.load', 'np.load', (['"""test.npz"""'], {}), "('test.npz')\n", (928, 940), True, 'import numpy as np\n'), ((961, 993), 'torch.from_numpy', 'torch.from_numpy', (['test0.f.images'], {}), '(test0.f.images)\n', (977, 993), False, 'import torch\n'), ((1014, 1046), 'torch.from_numpy', 'torch.from_numpy', (['test0.f.labels'], {}), '(test0.f.labels)\n', (1030, 1046), False, 'import torch\n'), ((1060, 1093), 'torch.utils.data.Dataset', 'Dataset', (['test_images', 'test_labels'], {}), '(test_images, test_labels)\n', (1067, 1093), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((460, 493), 'torch.from_numpy', 'torch.from_numpy', (['train0.f.images'], {}), '(train0.f.images)\n', (476, 493), False, 'import torch\n'), ((494, 527), 'torch.from_numpy', 'torch.from_numpy', (['train1.f.images'], {}), '(train1.f.images)\n', (510, 527), False, 'import torch\n'), ((528, 561), 'torch.from_numpy', 'torch.from_numpy', (['train2.f.images'], {}), '(train2.f.images)\n', (544, 561), False, 'import torch\n'), ((562, 595), 'torch.from_numpy', 'torch.from_numpy', (['train3.f.images'], {}), '(train3.f.images)\n', (578, 595), False, 'import torch\n'), ((596, 629), 'torch.from_numpy', 'torch.from_numpy', (['train4.f.images'], {}), '(train4.f.images)\n', (612, 629), False, 'import torch\n'), ((668, 701), 'torch.from_numpy', 'torch.from_numpy', (['train0.f.labels'], {}), '(train0.f.labels)\n', (684, 701), False, 'import torch\n'), ((702, 735), 'torch.from_numpy', 'torch.from_numpy', (['train1.f.labels'], {}), '(train1.f.labels)\n', (718, 735), False, 'import torch\n'), ((736, 769), 'torch.from_numpy', 'torch.from_numpy', (['train2.f.labels'], {}), '(train2.f.labels)\n', (752, 769), False, 'import torch\n'), ((770, 803), 'torch.from_numpy', 'torch.from_numpy', (['train3.f.labels'], {}), '(train3.f.labels)\n', (786, 803), False, 'import torch\n'), ((804, 837), 'torch.from_numpy', 'torch.from_numpy', (['train4.f.labels'], {}), '(train4.f.labels)\n', (820, 837), False, 'import torch\n')] |
# Import the necessary modules.
import numpy as np
import skimage.io
import glob
import scipy.ndimage
import skimage.morphology
import skimage.segmentation
import matplotlib.pyplot as plt
# Load the images.
im_glob = glob.glob('data/optical_tweezer/bead*.tif')
x = 'data/optical_tweezer/trapped_bead_5.2x_4_MMStack_Pos0.ome.tif'
im = skimage.io.ImageCollection(im_glob)
disk_radius = 3 # though choose what you like
selem = skimage.morphology.disk(disk_radius)
def center_of_mass(im, selem):
# Get peak i and j (if multiple maxima, just take first)
i, j = np.where(im == im.max())
i = i[0]
j = j[0]
# Get the i and j extent (radii) of the structuring element
r = np.array(selem.shape) // 2
r_i, r_j = r
# Get indices of non-zero entries in structuring element for convenience
ii, jj = np.nonzero(selem)
# Define indices such that index zero is in center of selem
i_pos = ii - r_i
j_pos = jj - r_j
# Width of structuring element
w = 2 * r + 1
# Make subimage that has selem
sub_im = im[i - r_i:i + r_i + 1, j - r_j:j + r_j + 1]
# Compute center of mass
eps_i, eps_j = np.array([np.dot(i_pos, sub_im[ii,jj]), np.dot(j_pos, sub_im[ii,jj])]) / sub_im[ii,jj].sum()
# Return center of mass of bead
return i + eps_i, j + eps_j
# find the center of mass
x_pos, y_pos = [], []
for i in range(100):
im_blur = skimage.filters.gaussian(im[i], 3)
m = skimage.measure.moments(im[i])
x = m[0, 1] / m[0, 0]
y = m[1, 0] / m[0, 0]
x_pos.append(x)
y_pos.append(y)
plt.figure()
plt.plot(x_pos, y_pos, 'o')
plt.show()
ip_dist = 0.042 # Physical distance in units of microns per pixel
centroid_x_micron = np.array(x_pos) * ip_dist
centroid_y_micron = np.array(y_pos) * ip_dist
# Compute the means and msd.
mean_x = np.mean(centroid_x_micron)
mean_y = np.mean(centroid_y_micron)
msd_x = np.mean((centroid_x_micron - mean_x)**2)
msd_y = np.mean((centroid_y_micron - mean_y)**2)
# Compute the trap force.
kT = 4.1E-3 # In units of pN * micron
k_x = kT / msd_x
k_y = kT / msd_y
print('Trap force in x dimension is ' + str(k_x) + ' pN micron')
print('Trap force in y dimension is ' + str(k_y) + ' pN micron')
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.nonzero",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"glob.glob",
"numpy.dot"
] | [((218, 261), 'glob.glob', 'glob.glob', (['"""data/optical_tweezer/bead*.tif"""'], {}), "('data/optical_tweezer/bead*.tif')\n", (227, 261), False, 'import glob\n'), ((1564, 1576), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1574, 1576), True, 'import matplotlib.pyplot as plt\n'), ((1577, 1604), 'matplotlib.pyplot.plot', 'plt.plot', (['x_pos', 'y_pos', '"""o"""'], {}), "(x_pos, y_pos, 'o')\n", (1585, 1604), True, 'import matplotlib.pyplot as plt\n'), ((1605, 1615), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1613, 1615), True, 'import matplotlib.pyplot as plt\n'), ((1816, 1842), 'numpy.mean', 'np.mean', (['centroid_x_micron'], {}), '(centroid_x_micron)\n', (1823, 1842), True, 'import numpy as np\n'), ((1852, 1878), 'numpy.mean', 'np.mean', (['centroid_y_micron'], {}), '(centroid_y_micron)\n', (1859, 1878), True, 'import numpy as np\n'), ((1887, 1929), 'numpy.mean', 'np.mean', (['((centroid_x_micron - mean_x) ** 2)'], {}), '((centroid_x_micron - mean_x) ** 2)\n', (1894, 1929), True, 'import numpy as np\n'), ((1936, 1978), 'numpy.mean', 'np.mean', (['((centroid_y_micron - mean_y) ** 2)'], {}), '((centroid_y_micron - mean_y) ** 2)\n', (1943, 1978), True, 'import numpy as np\n'), ((828, 845), 'numpy.nonzero', 'np.nonzero', (['selem'], {}), '(selem)\n', (838, 845), True, 'import numpy as np\n'), ((1705, 1720), 'numpy.array', 'np.array', (['x_pos'], {}), '(x_pos)\n', (1713, 1720), True, 'import numpy as np\n'), ((1751, 1766), 'numpy.array', 'np.array', (['y_pos'], {}), '(y_pos)\n', (1759, 1766), True, 'import numpy as np\n'), ((693, 714), 'numpy.array', 'np.array', (['selem.shape'], {}), '(selem.shape)\n', (701, 714), True, 'import numpy as np\n'), ((1160, 1189), 'numpy.dot', 'np.dot', (['i_pos', 'sub_im[ii, jj]'], {}), '(i_pos, sub_im[ii, jj])\n', (1166, 1189), True, 'import numpy as np\n'), ((1190, 1219), 'numpy.dot', 'np.dot', (['j_pos', 'sub_im[ii, jj]'], {}), '(j_pos, sub_im[ii, jj])\n', (1196, 1219), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 6 10:10:55 2021
@author: jiaweiguo
"""
import os
from pathlib import Path
from ase.io import write, read
import copy
import shutil
from glob import glob
from ase.constraints import FixAtoms
from collections import defaultdict
from ase.neighborlist import natural_cutoffs, NeighborList, mic
from ase import Atoms
import numpy as np
from ase.visualize import view
import ase.db
import pickle
import matplotlib.pyplot as plt
import math
import re
zeolite, TM_type = 'MFI', 'Co'
tol = 1.5 * 2
folder = '/Users/jiaweiguo/Box/P1_pair_site/%s_1Al_%s' %(zeolite, TM_type)
filepaths = [dirs for dirs in os.listdir(folder) if 'T' in dirs]
output_dir0 = '/Users/jiaweiguo/Box/P1_pair_site/%s_1Al_%sOH' %(zeolite, TM_type)
Path(output_dir0).mkdir(parents=True, exist_ok=True)
for file in filepaths:
try:
atoms = read(os.path.join(folder, file) + '/opt_400/opt_from_vasp.traj', '0')
except:
atoms = read(os.path.join(folder, file) + '/opt_400/vasprun.xml', '-1')
output_dir1 = os.path.join(output_dir0, file)
Path(output_dir1).mkdir(parents=True, exist_ok=True)
TM_index = [atom.index for atom in atoms if atom.symbol == TM_type]
TM_pos = atoms.get_positions()[TM_index]
vec_translate = np.matmul([0.5, 0.5, 0.5], atoms.get_cell()) - TM_pos
atoms.translate(vec_translate)
atoms.wrap()
Al_index = [atom.index for atom in atoms if atom.symbol == 'Al']
TM_index = [atom.index for atom in atoms if atom.symbol == TM_type]
TM_pos = atoms.get_positions()[TM_index]
vec = np.random.normal(size=(3,))
vec = vec / np.linalg.norm(vec)
oh_pos = [TM_pos[0] + vec * 2, TM_pos[0] + vec * 3]
oh_atoms = Atoms('OH', positions=oh_pos)
oh_cop = np.sum(oh_atoms.positions, 0)/len(oh_atoms)
distances = mic(oh_cop - oh_atoms.positions, atoms.cell)
distances = np.linalg.norm(distances, axis=1)
while min(distances) < tol:
vec = np.random.normal(size=(3,))
vec = vec / np.linalg.norm(vec)
oh_pos = [TM_pos[0] + vec * 2, TM_pos[0] + vec * 3]
oh_cop = np.sum(oh_atoms.positions, 0)/len(oh_atoms)
oh_atoms.translate(oh_pos - oh_cop)
oh_cop = np.sum(oh_atoms.positions, 0)/len(oh_atoms)
distances = mic(oh_cop - oh_atoms.positions, atoms.cell)
distances = np.linalg.norm(distances, axis=1)
new_atoms = atoms + Atoms('OH', positions=oh_pos)
new_atoms.translate(-1 * vec_translate)
new_atoms.wrap()
#write(output_dir1 + '/starting.traj', atoms)
break
| [
"numpy.sum",
"ase.neighborlist.mic",
"pathlib.Path",
"numpy.linalg.norm",
"numpy.random.normal",
"os.path.join",
"os.listdir",
"ase.Atoms"
] | [((1080, 1111), 'os.path.join', 'os.path.join', (['output_dir0', 'file'], {}), '(output_dir0, file)\n', (1092, 1111), False, 'import os\n'), ((1632, 1659), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3,)'}), '(size=(3,))\n', (1648, 1659), True, 'import numpy as np\n'), ((1768, 1797), 'ase.Atoms', 'Atoms', (['"""OH"""'], {'positions': 'oh_pos'}), "('OH', positions=oh_pos)\n", (1773, 1797), False, 'from ase import Atoms\n'), ((1876, 1920), 'ase.neighborlist.mic', 'mic', (['(oh_cop - oh_atoms.positions)', 'atoms.cell'], {}), '(oh_cop - oh_atoms.positions, atoms.cell)\n', (1879, 1920), False, 'from ase.neighborlist import natural_cutoffs, NeighborList, mic\n'), ((1937, 1970), 'numpy.linalg.norm', 'np.linalg.norm', (['distances'], {'axis': '(1)'}), '(distances, axis=1)\n', (1951, 1970), True, 'import numpy as np\n'), ((669, 687), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (679, 687), False, 'import os\n'), ((786, 803), 'pathlib.Path', 'Path', (['output_dir0'], {}), '(output_dir0)\n', (790, 803), False, 'from pathlib import Path\n'), ((1676, 1695), 'numpy.linalg.norm', 'np.linalg.norm', (['vec'], {}), '(vec)\n', (1690, 1695), True, 'import numpy as np\n'), ((1816, 1845), 'numpy.sum', 'np.sum', (['oh_atoms.positions', '(0)'], {}), '(oh_atoms.positions, 0)\n', (1822, 1845), True, 'import numpy as np\n'), ((2022, 2049), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3,)'}), '(size=(3,))\n', (2038, 2049), True, 'import numpy as np\n'), ((2345, 2389), 'ase.neighborlist.mic', 'mic', (['(oh_cop - oh_atoms.positions)', 'atoms.cell'], {}), '(oh_cop - oh_atoms.positions, atoms.cell)\n', (2348, 2389), False, 'from ase.neighborlist import natural_cutoffs, NeighborList, mic\n'), ((2410, 2443), 'numpy.linalg.norm', 'np.linalg.norm', (['distances'], {'axis': '(1)'}), '(distances, axis=1)\n', (2424, 2443), True, 'import numpy as np\n'), ((2469, 2498), 'ase.Atoms', 'Atoms', (['"""OH"""'], {'positions': 'oh_pos'}), "('OH', positions=oh_pos)\n", (2474, 2498), False, 'from ase import Atoms\n'), ((1116, 1133), 'pathlib.Path', 'Path', (['output_dir1'], {}), '(output_dir1)\n', (1120, 1133), False, 'from pathlib import Path\n'), ((2070, 2089), 'numpy.linalg.norm', 'np.linalg.norm', (['vec'], {}), '(vec)\n', (2084, 2089), True, 'import numpy as np\n'), ((2167, 2196), 'numpy.sum', 'np.sum', (['oh_atoms.positions', '(0)'], {}), '(oh_atoms.positions, 0)\n', (2173, 2196), True, 'import numpy as np\n'), ((2280, 2309), 'numpy.sum', 'np.sum', (['oh_atoms.positions', '(0)'], {}), '(oh_atoms.positions, 0)\n', (2286, 2309), True, 'import numpy as np\n'), ((893, 919), 'os.path.join', 'os.path.join', (['folder', 'file'], {}), '(folder, file)\n', (905, 919), False, 'import os\n'), ((992, 1018), 'os.path.join', 'os.path.join', (['folder', 'file'], {}), '(folder, file)\n', (1004, 1018), False, 'import os\n')] |
from util import get_raw_data
import pandas as pd
import numpy as np
def create_sma(price_array, window):
if len(price_array) < window:
return None
sma = np.zeros(len(price_array))
for i in range(window, len(price_array)):
sma[i] = np.sum(price_array[i-window:i])/float(window)
return sma
def create_ema(price_array, sma, window):
if len(price_array) < window:
return None
c = 2./float(window + 1)
ema = np.zeros(len(price_array))
for i in range(window, len(price_array)):
if i == window:
ema[i] = sma[i]
else:
ema[i] = c*(price_array[i] - ema[i-1]) + ema[i-1]
return ema
def create_mom(price_array, window):
mom = np.zeros(len(price_array))
for i in range(window, len(price_array)):
mom[i] = price_array[i] - price_array[i-window]
return mom
def create_macd(price_array, window = [12, 26]):
sma_12 = create_sma(price_array, window[0])
sma_26 = create_sma(price_array, window[1])
ema_12 = create_ema(price_array, sma_12, window[0])
ema_26 = create_ema(price_array, sma_26, window[1])
diff_ema = ema_12 - ema_26
sma_9 = create_sma(diff_ema, window = 9)
v = create_ema(diff_ema, sma_9, window = 9)
return diff_ema - v
def create_return(price_array, window):
output = np.zeros(len(price_array))
for i in range(window, len(price_array)):
output[i] = float(price_array[i+1] - price_array[i+1-window])/float(price_array[i+1-window])
if i+2 == len(price_array): break
return output
def create_up_down(price_array, window):
pastUD = np.zeros(len(price_array))
for i in range(window+1, len(price_array)):
pastUD[i] = window - 2*np.sum(price_array[i-window:i] < price_array[i-window-1:i-1])
return pastUD
def create_day_since_cross(cross_array):
day_since_cross = np.zeros(len(cross_array))
num = 0
for i in range(len(cross_array)):
if cross_array[i] == 0:
num += 1
else:
num = 0
day_since_cross[i] = num
return day_since_cross
def create_macd_cross(macd):
macd_cross = np.zeros(len(macd))
for i in range(1, len(macd)):
if macd[i-1] < 0 and macd[i] > 0:
macd_cross[i] = 1
elif macd[i-1] > 0 and macd[i] < 0:
macd_cross[i] = -1
else:
macd_cross[i] = 0
return macd_cross
def create_ma_cross(ma, price_array):
ma_cross = np.zeros(len(ma))
for i in range(1, len(ma)):
if ma[i-1] < price_array[i-1] and ma[i] > price_array[i]:
ma_cross[i] = 1
elif ma[i-1] > price_array[i-1] and ma[i] < price_array[i]:
ma_cross[i] = -1
else:
ma_cross[i] = 0
return ma_cross
def create_class(price_array):
output = np.zeros(len(price_array))
for i in range(len(price_array)):
if price_array[i+1] > price_array[i]:
output[i] = 1
if i+2 == len(price_array): break
return output
def main():
#df = data[['Date','Settle', 'Volume']]
data = get_raw_data()
df = data
window_sma = [5, 10, 15, 20, 50, 100, 200]
window_ema = [10, 12, 20, 26, 50, 100, 200]
price_val = np.array(df['average'])
time_val = np.array(df['date'])
daily_return = create_class(price_val)
sma_map = {}
ema_map = {}
mom_map = {}
sma_cross_map = {}
ema_cross_map = {}
up_down_map = {}
for k, l in zip(window_sma, window_ema):
sma_map["SMA" + str(k)] = create_sma(price_val, k)
sma_map["SMA" + str(l)] = create_sma(price_val, l)
ema_map["EMA" + str(l)] = create_ema(price_val, sma_map["SMA" + str(l)], l)
mom_map["MOM" + str(k)] = create_mom(price_val, k)
sma_cross_map["SMA_CROSS" + str(k)] = create_ma_cross(sma_map["SMA" + str(k)], price_val)
ema_cross_map["EMA_CROSS" + str(l)] = create_ma_cross(ema_map["EMA" + str(l)], price_val)
up_down_map["Up-Down" + str(k)] = create_up_down(price_val, l)
macd_val = create_macd(price_val)
macd_cross = create_macd_cross(macd_val)
day_since_cross_map = {}
for m,l in zip(sma_cross_map.keys(),ema_cross_map.keys()):
day_since_cross_map["Day_Since_" + str(m)] = create_day_since_cross(sma_cross_map[m])
day_since_cross_map["Day_Since_" + str(l)] = create_day_since_cross(ema_cross_map[l])
raw_data = {'Date':time_val, 'Price': price_val, 'Minute':np.array(df['minute']),
'Class': daily_return, 'Volume': np.array(df['volume']),'SMA5' : sma_map["SMA5"],
'SMA10' : sma_map["SMA10"], 'SMA15' : sma_map["SMA15"], 'SMA20' : sma_map["SMA20"],
'SMA50' : sma_map["SMA50"], 'SMA100' : sma_map["SMA100"], 'SMA200' : sma_map["SMA200"],
'EMA10' : ema_map["EMA10"], 'EMA12' : ema_map["EMA12"], 'EMA20' : ema_map["EMA20"],
'EMA26' : ema_map["EMA26"], 'EMA50' : ema_map["EMA50"], 'EMA100' : ema_map["EMA100"],
'EMA200' : ema_map["EMA200"], 'MACD' : macd_val, 'MACD_Cross' : macd_cross,
'SMA5Cross' : sma_cross_map["SMA_CROSS5"], 'SMA10Cross' : sma_cross_map["SMA_CROSS10"],
'SMA15Cross' : sma_cross_map["SMA_CROSS15"], 'SMA20Cross' : sma_cross_map["SMA_CROSS20"],
'SMA50Cross' : sma_cross_map["SMA_CROSS50"], 'SMA100Cross' : sma_cross_map["SMA_CROSS100"],
'EMA12Cross' : ema_cross_map["EMA_CROSS12"], 'EMA10Cross' : ema_cross_map["EMA_CROSS10"],
'EMA20Cross' : ema_cross_map["EMA_CROSS20"], 'EMA26Cross' : ema_cross_map["EMA_CROSS26"],
'EMA50Cross' : ema_cross_map["EMA_CROSS50"], 'EMA100Cross' : ema_cross_map["EMA_CROSS100"],
'SMA200Cross' : sma_cross_map["SMA_CROSS200"], 'EMA200Cross' : ema_cross_map["EMA_CROSS200"],
'Up-Down5' : up_down_map["Up-Down5"],'Up-Down10' : up_down_map["Up-Down10"], 'Up-Down15' : up_down_map["Up-Down15"],
'Up-Down20' : up_down_map["Up-Down20"],'Up-Down50' : up_down_map["Up-Down50"], 'Up-Down100' : up_down_map["Up-Down100"],
'Day_Since_SMA5Cross' : day_since_cross_map["Day_Since_SMA_CROSS5"], 'Day_Since_SMA10Cross' : day_since_cross_map["Day_Since_SMA_CROSS10"],
'Day_Since_SMA15Cross' : day_since_cross_map["Day_Since_SMA_CROSS15"], 'Day_Since_SMA20Cross' : day_since_cross_map["Day_Since_SMA_CROSS20"],
'Day_Since_SMA50Cross' : day_since_cross_map["Day_Since_SMA_CROSS50"], 'Day_Since_SMA100Cross' : day_since_cross_map["Day_Since_SMA_CROSS100"],
'Day_Since_EMA12Cross' : day_since_cross_map["Day_Since_EMA_CROSS12"], 'Day_Since_EMA10Cross' : day_since_cross_map["Day_Since_EMA_CROSS10"],
'Day_Since_EMA20Cross' : day_since_cross_map["Day_Since_EMA_CROSS20"], 'Day_Since_EMA26Cross' : day_since_cross_map["Day_Since_EMA_CROSS26"],
'Day_Since_EMA50Cross' : day_since_cross_map["Day_Since_EMA_CROSS50"], 'Day_Since_EMA100Cross' : day_since_cross_map["Day_Since_EMA_CROSS100"]
}
data = pd.DataFrame(raw_data)
data[200:len(price_val)].to_csv("spy1min.csv")
if __name__ == "__main__":
main()
| [
"pandas.DataFrame",
"numpy.array",
"numpy.sum",
"util.get_raw_data"
] | [((3073, 3087), 'util.get_raw_data', 'get_raw_data', ([], {}), '()\n', (3085, 3087), False, 'from util import get_raw_data\n'), ((3216, 3239), 'numpy.array', 'np.array', (["df['average']"], {}), "(df['average'])\n", (3224, 3239), True, 'import numpy as np\n'), ((3255, 3275), 'numpy.array', 'np.array', (["df['date']"], {}), "(df['date'])\n", (3263, 3275), True, 'import numpy as np\n'), ((6792, 6814), 'pandas.DataFrame', 'pd.DataFrame', (['raw_data'], {}), '(raw_data)\n', (6804, 6814), True, 'import pandas as pd\n'), ((4439, 4461), 'numpy.array', 'np.array', (["df['minute']"], {}), "(df['minute'])\n", (4447, 4461), True, 'import numpy as np\n'), ((4500, 4522), 'numpy.array', 'np.array', (["df['volume']"], {}), "(df['volume'])\n", (4508, 4522), True, 'import numpy as np\n'), ((261, 294), 'numpy.sum', 'np.sum', (['price_array[i - window:i]'], {}), '(price_array[i - window:i])\n', (267, 294), True, 'import numpy as np\n'), ((1722, 1791), 'numpy.sum', 'np.sum', (['(price_array[i - window:i] < price_array[i - window - 1:i - 1])'], {}), '(price_array[i - window:i] < price_array[i - window - 1:i - 1])\n', (1728, 1791), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(precision=2)
import sklearn.metrics as skm
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
from summ_evaluator import SummaryEvaluator
class ClassificationEvaluator(SummaryEvaluator):
def __init__(self, ds_name, model_name, num_classes):
super().__init__(ds_name, model_name, num_classes)
#metrics
self.report_dict = None
self.precision = None
self.recall = None
self.f1_score = None
self.support = None
self.true_negatives = None
self.false_positives = None
self.false_negatives = None
self.true_positives = None
def get_metrics(self, random=False, threshold=None, print_output=False, colorbar=False):
y = self.get_y(random, threshold)
self.report_dict = skm.classification_report(self.y_true, y, output_dict=True)
self.precision = self.report_dict['1']['precision']
self.recall = self.report_dict['1']['recall']
self.f1_score = self.report_dict['1']['f1-score']
self.support = self.report_dict['1']['support']
if print_output:
print(self.report_dict)
tn, fp, fn, tp = skm.confusion_matrix(self.y_true, y).ravel()
self.true_negatives = tn
self.false_positives = fp
self.false_negatives = fn
self.true_positives = tp
if print_output:
print('tn: {}, fp: {}, fn: {}, tp: {}'.format(tn, fp, fn, tp))
return skm.f1_score(self.y_true, y)
def plot(self, normalize=False, title=None, cmap=plt.cm.Blues, random=False, threshold=None, colorbar=False):
y = self.get_y(random, threshold)
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, {}'.format(self.ds_name)
# Compute confusion matrix
cm = confusion_matrix(self.y_true, y)
# Only use the labels that appear in the data
self.class_names = ['0', '1']
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, {}'.format(self.ds_name))
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
if colorbar:
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=self.class_names, yticklabels=self.class_names,
ylabel='True label',
xlabel='Predicted label')
ax.yaxis.label.set_size('xx-large')
ax.xaxis.label.set_size('xx-large')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), ha="center") # , rotation=45, rotation_mode="anchor")
for i, t in enumerate(ax.get_xticklabels()):
t.set_fontsize('xx-large')
#plt.setp(ax.get_yticklabels(), ha="center", rotation=90, rotation_mode="anchor")
for i, t in enumerate(ax.get_yticklabels()):
t.set_fontsize('xx-large')
t.set_rotation('vertical')
if i == 0:
ha = 'right'
t.set_ha(ha)
else:
ha = 'right'
t.set_ha(ha)
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
if i == 0:
va = 'top'
y = i + 0.2
else:
va = 'bottom'
y = i - 0.2
for j in range(cm.shape[1]):
ax.text(j, y, format(cm[i, j], fmt),
ha="center", va=va, size='xx-large',
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
plt.savefig('/home/emma/summary_evaluation/results/confusion_{}.pdf'.format(self.ds_name))
if __name__ == '__main__':
class_eval = ClassificationEvaluator('run_1_two', num_classes=2)
class_eval.get_metrics()
# Plot non-normalized confusion matrix
class_eval.plot(title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
class_eval.plot(normalize=True, title='Normalized confusion matrix')
plt.show() | [
"numpy.set_printoptions",
"matplotlib.pyplot.show",
"sklearn.metrics.classification_report",
"sklearn.metrics.f1_score",
"numpy.arange",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.subplots"
] | [((51, 83), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (70, 83), True, 'import numpy as np\n'), ((4752, 4762), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4760, 4762), True, 'import matplotlib.pyplot as plt\n'), ((893, 952), 'sklearn.metrics.classification_report', 'skm.classification_report', (['self.y_true', 'y'], {'output_dict': '(True)'}), '(self.y_true, y, output_dict=True)\n', (918, 952), True, 'import sklearn.metrics as skm\n'), ((1563, 1591), 'sklearn.metrics.f1_score', 'skm.f1_score', (['self.y_true', 'y'], {}), '(self.y_true, y)\n', (1575, 1591), True, 'import sklearn.metrics as skm\n'), ((2138, 2170), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['self.y_true', 'y'], {}), '(self.y_true, y)\n', (2154, 2170), False, 'from sklearn.metrics import confusion_matrix\n'), ((2499, 2513), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2511, 2513), True, 'import matplotlib.pyplot as plt\n'), ((1268, 1304), 'sklearn.metrics.confusion_matrix', 'skm.confusion_matrix', (['self.y_true', 'y'], {}), '(self.y_true, y)\n', (1288, 1304), True, 'import sklearn.metrics as skm\n'), ((2701, 2723), 'numpy.arange', 'np.arange', (['cm.shape[1]'], {}), '(cm.shape[1])\n', (2710, 2723), True, 'import numpy as np\n'), ((2747, 2769), 'numpy.arange', 'np.arange', (['cm.shape[0]'], {}), '(cm.shape[0])\n', (2756, 2769), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
print("Running k-neighborhood algorithm")
import pandas as pd
import numpy as np
import termplotlib as tpl
import seaborn as sns
import os
import sys
#add path to import to_bib module
sys.path.append("../to_bib")
from to_bib import *
try:
new_path=sys.argv[1]
except:
raise ValueError("Please, add the target path")
os.chdir(new_path)
df = pd.read_csv('human_classified.csv')
df.rename({"tipo":"target"}, axis=1,inplace=True)
rep_target = {k:index for index,k in enumerate(df.target.unique())}
df.target.replace(rep_target, inplace=True)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(df.drop(["target","obra"], axis=1))
#std the data (knn don't work well when we have high heterogeneity of magnitudes among features)
scaled_features = scaler.transform(df.drop(["target","obra"],axis=1))
from sklearn.model_selection import train_test_split
X = scaled_features
y = df["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=101)
from sklearn.neighbors import KNeighborsClassifier
number_nb=28
knn = KNeighborsClassifier(n_neighbors=number_nb)
knn.fit(X_train, y_train)
pred = knn.predict(X_test)
target_dict = {k: v for v, k in rep_target.items()}
from sklearn.metrics import classification_report
lista_pred=[k for k in np.unique(pred)]
lista_ytest=[k for k in y_test.unique()]
class_values=list(set(lista_pred+lista_ytest))
t_names=[target_dict[k] for k in class_values]
print(classification_report(y_test, pred, target_names=t_names))
def get_kind(entry):
length_elements = len(entry.split(". "))
length_string = len(entry)
n_digit = n_char(entry, digit=True)
f_slah = n_char(entry, char="/")
parenthesis = n_char(entry, char="(")
tab = pd.DataFrame({"length_elements": length_elements, "length_string": length_string,
"n_digit":n_digit, "f_slash":f_slah, "parenthesis":parenthesis}, index=[0])
scaled_features = scaler.transform(tab)
result = knn.predict(scaled_features)
return target_dict[int(result)]
error_rate = []
for i in range(1,40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error_rate.append(np.mean(pred_i != y_test))
print("I'm using {} neighborhoods".format(str(number_nb)))
fig = tpl.figure()
fig.plot(range(1,40), error_rate)
fig.show()
print("Done")
| [
"sys.path.append",
"pandas.DataFrame",
"sklearn.preprocessing.StandardScaler",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.classification_report",
"termplotlib.figure",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.mean",
"os.chdir",
"numpy.unique"
] | [((222, 250), 'sys.path.append', 'sys.path.append', (['"""../to_bib"""'], {}), "('../to_bib')\n", (237, 250), False, 'import sys\n'), ((365, 383), 'os.chdir', 'os.chdir', (['new_path'], {}), '(new_path)\n', (373, 383), False, 'import os\n'), ((390, 425), 'pandas.read_csv', 'pd.read_csv', (['"""human_classified.csv"""'], {}), "('human_classified.csv')\n", (401, 425), True, 'import pandas as pd\n'), ((648, 664), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (662, 664), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1006, 1062), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.15)', 'random_state': '(101)'}), '(X, y, test_size=0.15, random_state=101)\n', (1022, 1062), False, 'from sklearn.model_selection import train_test_split\n'), ((1134, 1177), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'number_nb'}), '(n_neighbors=number_nb)\n', (1154, 1177), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((2372, 2384), 'termplotlib.figure', 'tpl.figure', ([], {}), '()\n', (2382, 2384), True, 'import termplotlib as tpl\n'), ((1518, 1575), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'pred'], {'target_names': 't_names'}), '(y_test, pred, target_names=t_names)\n', (1539, 1575), False, 'from sklearn.metrics import classification_report\n'), ((1804, 1972), 'pandas.DataFrame', 'pd.DataFrame', (["{'length_elements': length_elements, 'length_string': length_string,\n 'n_digit': n_digit, 'f_slash': f_slah, 'parenthesis': parenthesis}"], {'index': '[0]'}), "({'length_elements': length_elements, 'length_string':\n length_string, 'n_digit': n_digit, 'f_slash': f_slah, 'parenthesis':\n parenthesis}, index=[0])\n", (1816, 1972), True, 'import pandas as pd\n'), ((2158, 2193), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'i'}), '(n_neighbors=i)\n', (2178, 2193), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1359, 1374), 'numpy.unique', 'np.unique', (['pred'], {}), '(pred)\n', (1368, 1374), True, 'import numpy as np\n'), ((2279, 2304), 'numpy.mean', 'np.mean', (['(pred_i != y_test)'], {}), '(pred_i != y_test)\n', (2286, 2304), True, 'import numpy as np\n')] |
"""
This module implements all the functions to read a video or a picture
using ffmpeg. It is quite ugly, as there are many pitfalls to avoid
Modified version, copied from
https://github.com/Zulko/moviepy/blob/master/moviepy/video/io/ffmpeg_reader.py
and
https://github.com/Zulko/moviepy/blob/master/moviepy/audio/io/readers.py
MIT license
"""
from __future__ import division
import subprocess as sp
from subprocess import DEVNULL
import re
import numpy as np
import warnings
import os
import datetime
import time
class FFMPEG_VideoReader:
def __init__(self, filename, infos, bufsize=None, pix_fmt="rgb24", target_resolution=None,
resize_algo='bicubic', read1=False):
self.filename = filename
self.proc = None
self.fps = infos['video_fps']
self.size = infos['video_size']
self.rotation = infos['video_rotation']
if target_resolution:
# revert the order, as ffmpeg used (width, height)
target_resolution = target_resolution[1], target_resolution[0]
if None in target_resolution:
ratio = 1
for idx, target in enumerate(target_resolution):
if target:
ratio = target / self.size[idx]
self.size = (int(self.size[0] * ratio), int(self.size[1] * ratio))
else:
self.size = target_resolution
self.resize_algo = resize_algo
self.duration = infos['video_duration']
self.ffmpeg_duration = infos['duration']
self.nframes = infos['video_nframes']
self.infos = infos
self.pix_fmt = pix_fmt
if 'rgba' or 'bgra' in pix_fmt:
self.depth = 4
else:
self.depth = 3
if bufsize is None:
w, h = self.size
bufsize = self.depth * w * h + 100
self.bufsize = bufsize
self.initialize()
self.lastread = None
self.pos = 0
if read1:
self.read_frame()
def initialize(self, starttime=0):
"""Opens the file, creates the pipe. """
self.close() # if any
if starttime != 0:
offset = min(1, starttime)
i_arg = ['-ss', "%.06f" % (starttime - offset), '-hwaccel', 'vdpau', '-i', self.filename, '-ss',
"%.06f" % offset]
else:
i_arg = ['-i', self.filename]
# i_arg.extend('-hwaccel vdpau'.split())
cmd = (['ffmpeg'] + i_arg + ['-loglevel', 'error', '-f', 'image2pipe', '-vf', 'scale=%d:%d' % tuple(self.size),
'-sws_flags', self.resize_algo, "-pix_fmt", self.pix_fmt, '-vcodec', 'rawvideo',
'-'])
popen_params = {"bufsize": self.bufsize, "stdout": sp.PIPE, "stderr": sp.PIPE, "stdin": DEVNULL}
if os.name == "nt":
popen_params["creationflags"] = 0x08000000
self.proc = sp.Popen(cmd, **popen_params)
def skip_frames(self, n=1):
"""Reads and throws away n frames """
w, h = self.size
self.proc.stdout.read(self.depth * w * h * n)
self.proc.stdout.flush()
self.pos += n
def read_frame(self):
w, h = self.size
nbytes = self.depth * w * h
s = self.proc.stdout.read(nbytes)
if len(s) != nbytes:
warnings.warn("Warning: in file %s, " % self.filename + "%d bytes wanted but %d bytes read," % (
nbytes, len(s)) + "at frame %d/%d, at time %.02f/%.02f sec. " % (
self.pos, self.nframes, 1.0 * self.pos / self.fps,
self.duration) + "Using the last valid frame instead.", UserWarning)
if not hasattr(self, 'lastread'):
raise IOError(("failed to read the first frame of "
"video file %s. That might mean that the file is "
"corrupted. That may also mean that you are using "
"a deprecated version of FFMPEG. On Ubuntu/Debian "
"for instance the version in the repos is deprecated. "
"Please update to a recent version from the website." + '\n' + str(
self.proc.stderr.read().decode())) % self.filename)
result = self.lastread
else:
self.pos += 1
result = np.fromstring(s, dtype='uint8')
# result.shape = (h, w, len(s) // (w * h))
self.lastread = result
return result
def get_frame(self, t):
""" Read a file video frame at time t.
Note for coders: getting an arbitrary frame in the video with
ffmpeg can be painfully slow if some decoding has to be done.
This function tries to avoid fetching arbitrary frames
whenever possible, by moving between adjacent frames.
"""
# these definitely need to be rechecked sometime. Seems to work.
# I use that horrible '+0.00001' hack because sometimes due to numerical
# imprecisions a 3.0 can become a 2.99999999... which makes the int()
# go to the previous integer. This makes the fetching more robust in the
# case where you get the nth frame by writing get_frame(n/fps).
pos = int(self.fps * t + 0.00001) + 1
# Initialize proc if it is not open
if not self.proc:
self.initialize(t)
self.pos = pos
self.lastread = self.read_frame()
if pos == self.pos:
return self.lastread
else:
if (pos < self.pos) or (pos > self.pos + 100):
self.initialize(t)
self.pos = pos
else:
self.skip_frames(pos - self.pos - 1)
result = self.read_frame()
self.pos = pos
return result
def close(self):
if self.proc:
self.proc.terminate()
self.proc.stdout.close()
self.proc.stderr.close()
self.proc.wait()
self.proc = None
if hasattr(self, 'lastread'):
del self.lastread
def ffmpeg_parse_infos(filename, print_infos=False, check_duration=True, fps_source='tbr'):
"""Get file infos using ffmpeg.
Returns a dictionnary with the fields:
"video_found", "video_fps", "duration", "video_nframes",
"video_duration", "audio_found", "audio_fps"
"video_duration" is slightly smaller than "duration" to avoid
fetching the uncomplete frames at the end, which raises an error.
"""
# open the file in a pipe, provoke an error, read output
cmd = ['ffmpeg', "-i", filename]
popen_params = {"bufsize": 10 ** 5, "stdout": sp.PIPE, "stderr": sp.PIPE, "stdin": DEVNULL}
if os.name == "nt":
popen_params["creationflags"] = 0x08000000
proc = sp.Popen(cmd, **popen_params)
proc.stdout.readline()
proc.terminate()
infos = proc.stderr.read().decode('utf8')
del proc
if print_infos:
# print the whole info text returned by FFMPEG
print(infos)
lines = infos.splitlines()
if "No such file or directory" in lines[-1]:
raise IOError(("the file %s could not be found!\n"
"Please check that you entered the correct "
"path.") % filename)
result = dict()
# get duration (in seconds)
result['duration'] = None
if check_duration:
try:
keyword = 'Duration: '
index = 0
line = [l for l in lines if keyword in l][index]
match = re.findall("([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])", line)[0]
result['duration'] = convertTime(match)
except Exception as Ex:
raise IOError((str(Ex) + " failed to read the duration of file %s.\n"
"Here are the file infos returned by ffmpeg:\n\n%s") % (filename, infos))
# get the output line that speaks about video
lines_video = [l for l in lines if ' Video: ' in l and re.search('\d+x\d+', l)]
result['video_found'] = (lines_video != [])
if result['video_found']:
try:
line = lines_video[0]
# get the size, of the form 460x320 (w x h)
match = re.search(" [0-9]*x[0-9]*([, ])", line)
s = list(map(int, line[match.start():match.end() - 1].split('x')))
result['video_size'] = s
except Exception:
raise IOError(("failed to read video dimensions in file %s.\n"
"Here are the file infos returned by ffmpeg:\n\n%s") % (filename, infos))
# Get the frame rate. Sometimes it's 'tbr', sometimes 'fps', sometimes
# tbc, and sometimes tbc/2...
# Current policy: Trust tbr first, then fps unless fps_source is
# specified as 'fps' in which case try fps then tbr
# If result is near from x*1000/1001 where x is 23,24,25,50,
# replace by x*1000/1001 (very common case for the fps).
def get_tbr():
match = re.search("( [0-9]*.| )[0-9]* tbr", line)
# Sometimes comes as e.g. 12k. We need to replace that with 12000.
s_tbr = line[match.start():match.end()].split(' ')[1]
if "k" in s_tbr:
tbr = float(s_tbr.replace("k", "")) * 1000
else:
tbr = float(s_tbr)
return tbr
def get_fps():
match = re.search("( [0-9]*.| )[0-9]* fps", line)
fps = float(line[match.start():match.end()].split(' ')[1])
return fps
if fps_source == 'tbr':
try:
result['video_fps'] = get_tbr()
except Exception:
result['video_fps'] = get_fps()
elif fps_source == 'fps':
try:
result['video_fps'] = get_fps()
except Exception:
result['video_fps'] = get_tbr()
# It is known that a fps of 24 is often written as 24000/1001
# but then ffmpeg nicely rounds it to 23.98, which we hate.
coef = 1000.0 / 1001.0
fps = result['video_fps']
for x in [23, 24, 25, 30, 50]:
if (fps != x) and abs(fps - x * coef) < .01:
result['video_fps'] = x * coef
if check_duration:
result['video_nframes'] = int(result['duration'] * result['video_fps']) + 1
result['video_duration'] = result['duration']
else:
result['video_nframes'] = 1
result['video_duration'] = None
# We could have also recomputed the duration from the number
# of frames, as follows:
# >>> result['video_duration'] = result['video_nframes'] / result['video_fps']
# get the video rotation info.
try:
rotation_lines = [l for l in lines if 'rotate :' in l and re.search('\d+$', l)]
if len(rotation_lines):
rotation_line = rotation_lines[0]
match = re.search('\d+$', rotation_line)
result['video_rotation'] = int(rotation_line[match.start(): match.end()])
else:
result['video_rotation'] = 0
except Exception:
raise IOError(("failed to read video rotation in file %s.\n"
"Here are the file infos returned by ffmpeg:\n\n%s") % (filename, infos))
lines_audio = [l for l in lines if ' Audio: ' in l]
result['audio_found'] = lines_audio != []
if result['audio_found']:
line = lines_audio[0]
try:
match = re.search(" [0-9]* Hz", line)
result['audio_fps'] = int(line[match.start() + 1:match.end()])
except Exception:
result['audio_fps'] = 'unknown'
return result
def convertTime(timeStr):
# https://stackoverflow.com/a/10663851
main, remain = timeStr.split('.')
x = time.strptime(main, '%H:%M:%S')
res = datetime.timedelta(hours=x.tm_hour, minutes=x.tm_min, seconds=x.tm_sec).total_seconds()
return res + float('.' + remain)
| [
"subprocess.Popen",
"re.findall",
"datetime.timedelta",
"re.search",
"time.strptime",
"numpy.fromstring"
] | [((6934, 6963), 'subprocess.Popen', 'sp.Popen', (['cmd'], {}), '(cmd, **popen_params)\n', (6942, 6963), True, 'import subprocess as sp\n'), ((12006, 12037), 'time.strptime', 'time.strptime', (['main', '"""%H:%M:%S"""'], {}), "(main, '%H:%M:%S')\n", (12019, 12037), False, 'import time\n'), ((2964, 2993), 'subprocess.Popen', 'sp.Popen', (['cmd'], {}), '(cmd, **popen_params)\n', (2972, 2993), True, 'import subprocess as sp\n'), ((4470, 4501), 'numpy.fromstring', 'np.fromstring', (['s'], {'dtype': '"""uint8"""'}), "(s, dtype='uint8')\n", (4483, 4501), True, 'import numpy as np\n'), ((8365, 8404), 're.search', 're.search', (['""" [0-9]*x[0-9]*([, ])"""', 'line'], {}), "(' [0-9]*x[0-9]*([, ])', line)\n", (8374, 8404), False, 'import re\n'), ((9153, 9194), 're.search', 're.search', (['"""( [0-9]*.| )[0-9]* tbr"""', 'line'], {}), "('( [0-9]*.| )[0-9]* tbr', line)\n", (9162, 9194), False, 'import re\n'), ((9549, 9590), 're.search', 're.search', (['"""( [0-9]*.| )[0-9]* fps"""', 'line'], {}), "('( [0-9]*.| )[0-9]* fps', line)\n", (9558, 9590), False, 'import re\n'), ((11695, 11724), 're.search', 're.search', (['""" [0-9]* Hz"""', 'line'], {}), "(' [0-9]* Hz', line)\n", (11704, 11724), False, 'import re\n'), ((12048, 12119), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'x.tm_hour', 'minutes': 'x.tm_min', 'seconds': 'x.tm_sec'}), '(hours=x.tm_hour, minutes=x.tm_min, seconds=x.tm_sec)\n', (12066, 12119), False, 'import datetime\n'), ((7680, 7745), 're.findall', 're.findall', (['"""([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])"""', 'line'], {}), "('([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])', line)\n", (7690, 7745), False, 'import re\n'), ((8136, 8161), 're.search', 're.search', (['"""\\\\d+x\\\\d+"""', 'l'], {}), "('\\\\d+x\\\\d+', l)\n", (8145, 8161), False, 'import re\n'), ((11111, 11144), 're.search', 're.search', (['"""\\\\d+$"""', 'rotation_line'], {}), "('\\\\d+$', rotation_line)\n", (11120, 11144), False, 'import re\n'), ((10979, 11000), 're.search', 're.search', (['"""\\\\d+$"""', 'l'], {}), "('\\\\d+$', l)\n", (10988, 11000), False, 'import re\n')] |
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from time import time
from six import iteritems
from ....tunnel import TableTunnel
from .metrics_base import MetricNode
from ...nodes.exporters import get_input_table_name, get_input_partitions
from ...core.dag import DagEndpointType
class ConfusionMatrixNode(MetricNode):
def __init__(self, col_true, col_pred, mat_sink, col_sink):
super(ConfusionMatrixNode, self).__init__("confusionmatrix")
def gen_cm_output_table_name(context):
self._data_table_name = 'tmp_p_cm_%d' % int(time())
return self._data_table_name
self.marshal({
"parameters": {
"labelColName": col_true,
"predictionColName": col_pred
},
"inputs": [(1, "input", DagEndpointType.DATA)]
})
self.add_exporter("inputTableName", lambda context: get_input_table_name(context, self, "input"))
self.add_exporter("inputTablePartitions", lambda context: get_input_partitions(context, self, "input"))
self.add_exporter("outputTableName", lambda context: gen_cm_output_table_name(context))
self._mat_sink = mat_sink
self._col_sink = col_sink
def calc_metrics(self, context):
tunnel = TableTunnel(context._odps)
down_session = tunnel.create_download_session(self._data_table_name)
# skip the first row
reader = down_session.open_record_reader(0, 100)
col_data = json.loads(reader.read().values[0])
col_data = map(lambda p: p[1], sorted(iteritems(col_data), key=lambda a: int(a[0][1:])))
self._col_sink.put(col_data)
import numpy as np
mat = np.matrix([rec[2:] for rec in reader.reads()])
self._mat_sink.put(mat)
super(ConfusionMatrixNode, self).calc_metrics(context)
class ROCCurveNode(MetricNode):
def __init__(self, col_true, col_pred, col_score, pos_label, fpr_sink, tpr_sink, threshold_sink):
super(ROCCurveNode, self).__init__("roc")
def gen_roc_output_table_name(context):
self._data_table_name = 'tmp_p_roc_%d' % int(time())
return self._data_table_name
self.marshal({
"parameters": {
"labelColName": col_true,
"predictionColName": col_pred,
"predictionScoreName": col_score,
"goodValue": pos_label
},
"inputs": [(1, "input", DagEndpointType.DATA)]
})
self.add_exporter("inputTableName", lambda context: get_input_table_name(context, self, "input"))
self.add_exporter("inputTablePartitions", lambda context: get_input_partitions(context, self, "input"))
self.add_exporter("outputTableName", lambda context: gen_roc_output_table_name(context))
self._fpr_sink = fpr_sink
self._tpr_sink = tpr_sink
self._thresh_sink = threshold_sink
def calc_metrics(self, context):
tunnel = TableTunnel(context._odps)
down_session = tunnel.create_download_session(self._data_table_name)
reader = down_session.open_record_reader(0, 1000)
import numpy as np
mat = np.matrix([rec.values for rec in reader.reads()])
thresh = mat[:, 0]
tp, fn, tn, fp = mat[:, 1], mat[:, 2], mat[:, 3], mat[:, 4]
self._tpr_sink.put(np.squeeze(np.asarray(tp * 1.0 / (tp + fn))))
self._fpr_sink.put(np.squeeze(np.asarray(fp * 1.0 / (fp + tn))))
self._thresh_sink.put(np.squeeze(np.asarray(thresh)))
super(ROCCurveNode, self).calc_metrics(context)
| [
"numpy.asarray",
"six.iteritems",
"time.time"
] | [((2335, 2354), 'six.iteritems', 'iteritems', (['col_data'], {}), '(col_data)\n', (2344, 2354), False, 'from six import iteritems\n'), ((4135, 4167), 'numpy.asarray', 'np.asarray', (['(tp * 1.0 / (tp + fn))'], {}), '(tp * 1.0 / (tp + fn))\n', (4145, 4167), True, 'import numpy as np\n'), ((4208, 4240), 'numpy.asarray', 'np.asarray', (['(fp * 1.0 / (fp + tn))'], {}), '(fp * 1.0 / (fp + tn))\n', (4218, 4240), True, 'import numpy as np\n'), ((4284, 4302), 'numpy.asarray', 'np.asarray', (['thresh'], {}), '(thresh)\n', (4294, 4302), True, 'import numpy as np\n'), ((1331, 1337), 'time.time', 'time', ([], {}), '()\n', (1335, 1337), False, 'from time import time\n'), ((2900, 2906), 'time.time', 'time', ([], {}), '()\n', (2904, 2906), False, 'from time import time\n')] |
import numpy as np
from . import Kernel
class Model(Kernel.Kernel):
""" A subclass that represents the Young & <NAME> uncoupled model
of single-vertical wavenumber near-inertial waves and STEADY
barotropic quasigeostrophic flow.
It defines the quasigeostrophic inversion relation and the diagnostics
specific to this subclass.
Reference
----------
<NAME>. & <NAME>. 1997 "Propagation of near-inertial
oscillations through a geostrophic flow." J. Mar. Res. 55 (4), 735–766.
"""
def __init__(
self,
**kwargs
):
self.model = " YBJ Model (Steady QG flow)"
super(Model, self).__init__(**kwargs)
def _allocate_variables(self):
""" Allocate variables so that variable addresses are close in memory.
"""
self.dtype_real = np.dtype('float64')
self.dtype_cplx = np.dtype('complex128')
self.shape_real = (self.ny, self.nx)
self.shape_cplx = (self.ny, self.nx)
# vorticity
self.q = np.zeros(self.shape_real, self.dtype_real)
self.qh = np.zeros(self.shape_cplx, self.dtype_cplx)
# stream function
self.p = np.zeros(self.shape_real, self.dtype_real)
self.ph = np.zeros(self.shape_cplx, self.dtype_cplx)
# wave amplitude
self.phi = np.zeros(self.shape_real, self.dtype_cplx)
self.phih = np.zeros(self.shape_cplx, self.dtype_cplx)
def _step_etdrk4(self):
""" Compute the advective term–––the Jacobian between psi and phi.
Returns
-------
complex array of floats
The Fourier transform of Jacobian(psi,phi)
"""
# phi-equation
self.phih0 = self.phih.copy()
self._calc_grad_phi()
Fn0w = -self.jacobian_psi_phi() - 0.5j*self.fft(self.phi*self.q_psi)
self.phih = (self.expch_hw*self.phih0 + Fn0w*self.Qhw)*self.filtr
self.phih1 = self.phih.copy()
# phi-equation
self._calc_grad_phi()
Fnaw = -self.jacobian_psi_phi() - 0.5j*self.fft(self.phi*self.q_psi)
self.phih = (self.expch_hw*self.phih0 + Fnaw*self.Qhw)*self.filtr
# phi-equation
self._calc_grad_phi()
Fnbw = -self.jacobian_psi_phi() - 0.5j*self.fft(self.phi*self.q_psi)
self.phih = (self.expch_hw*self.phih1 + ( 2.*Fnbw - Fn0w )*self.Qhw)*self.filtr
# phi-equation
self._calc_rel_vorticity()
self._calc_grad_phi()
Fncw = -self.jacobian_psi_phi() - 0.5j*self.fft(self.phi*self.q_psi)
self.phih = (self.expchw*self.phih0 + Fn0w*self.f0w + 2.*(Fnaw+Fnbw)*self.fabw\
+ Fncw*self.fcw)*self.filtr
# physical space
self.phi = self.ifft(self.phih)
def _initialize_etdrk4(self):
""" Compute coefficients of the exponential time-dfferencing method
with a Runge-Kutta 4 scheme.
Rereferences
------------
See Cox and Matthews, J. Comp. Physics., 176(2):430-455, 2002.
Kassam and Trefethen, IAM J. Sci. Comput., 26(4):1214-233, 2005.
"""
M = 32. # number of points for line integral in the complex plane
rho = 1. # radius for complex integration
r = rho*np.exp(2j*np.pi*((np.arange(1.,M+1))/M)) # roots for integral
# the exponent for the linear part
self.c = np.zeros((self.nl,self.nk),self.dtype_cplx) -1j*self.k*self.U
self.c += -self.nu4w*self.wv4 - 0.5j*self.f*(self.wv2/self.kappa2)\
- self.nuw*self.wv2 - self.muw
ch = self.c*self.dt
self.expchw = np.exp(ch)
self.expch_hw = np.exp(ch/2.)
self.expch2w = np.exp(2.*ch)
LR = ch[...,np.newaxis] + r[np.newaxis,np.newaxis,...]
LR2 = LR*LR
LR3 = LR2*LR
self.Qhw = self.dt*(((np.exp(LR/2.)-1.)/LR).mean(axis=-1))
self.f0w = self.dt*( ( ( -4. - LR + ( np.exp(LR)*( 4. - 3.*LR + LR2 ) ) )/ LR3 ).mean(axis=-1) )
self.fabw = self.dt*( ( ( 2. + LR + np.exp(LR)*( -2. + LR ) )/ LR3 ).mean(axis=-1) )
self.fcw = self.dt*( ( ( -4. -3.*LR - LR2 + np.exp(LR)*(4.-LR) )/ LR3 ).mean(axis=-1) )
def jacobian_psi_phi(self):
""" Compute the advective term–––the Jacobian between psi and phi.
Returns
-------
complex array of floats
The Fourier transform of Jacobian(psi,phi)
"""
return self.fft( (self.u*self.phix + self.v*self.phiy) )
def _calc_grad_phi(self):
""" Compute gradient of wave velocity. """
self.phix, self.phiy = self.ifft(self.ik*self.phih), self.ifft(self.il*self.phih)
def _invert(self):
""" Calculate the streamfunction given the potential vorticity.
"""
self.ph = -self.wv2i*self.qh
def _initialize_class_diagnostics(self):
""" Compute subclass-specific derived fields.
"""
pass
def _calc_class_derived_fields(self):
""" Compute the geostrophic relative vorticity–––the Laplacian of the
streamfuctions.
"""
pass
| [
"numpy.arange",
"numpy.dtype",
"numpy.zeros",
"numpy.exp"
] | [((869, 888), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (877, 888), True, 'import numpy as np\n'), ((915, 937), 'numpy.dtype', 'np.dtype', (['"""complex128"""'], {}), "('complex128')\n", (923, 937), True, 'import numpy as np\n'), ((1067, 1109), 'numpy.zeros', 'np.zeros', (['self.shape_real', 'self.dtype_real'], {}), '(self.shape_real, self.dtype_real)\n', (1075, 1109), True, 'import numpy as np\n'), ((1129, 1171), 'numpy.zeros', 'np.zeros', (['self.shape_cplx', 'self.dtype_cplx'], {}), '(self.shape_cplx, self.dtype_cplx)\n', (1137, 1171), True, 'import numpy as np\n'), ((1218, 1260), 'numpy.zeros', 'np.zeros', (['self.shape_real', 'self.dtype_real'], {}), '(self.shape_real, self.dtype_real)\n', (1226, 1260), True, 'import numpy as np\n'), ((1280, 1322), 'numpy.zeros', 'np.zeros', (['self.shape_cplx', 'self.dtype_cplx'], {}), '(self.shape_cplx, self.dtype_cplx)\n', (1288, 1322), True, 'import numpy as np\n'), ((1369, 1411), 'numpy.zeros', 'np.zeros', (['self.shape_real', 'self.dtype_cplx'], {}), '(self.shape_real, self.dtype_cplx)\n', (1377, 1411), True, 'import numpy as np\n'), ((1433, 1475), 'numpy.zeros', 'np.zeros', (['self.shape_cplx', 'self.dtype_cplx'], {}), '(self.shape_cplx, self.dtype_cplx)\n', (1441, 1475), True, 'import numpy as np\n'), ((3667, 3677), 'numpy.exp', 'np.exp', (['ch'], {}), '(ch)\n', (3673, 3677), True, 'import numpy as np\n'), ((3702, 3718), 'numpy.exp', 'np.exp', (['(ch / 2.0)'], {}), '(ch / 2.0)\n', (3708, 3718), True, 'import numpy as np\n'), ((3739, 3755), 'numpy.exp', 'np.exp', (['(2.0 * ch)'], {}), '(2.0 * ch)\n', (3745, 3755), True, 'import numpy as np\n'), ((3423, 3468), 'numpy.zeros', 'np.zeros', (['(self.nl, self.nk)', 'self.dtype_cplx'], {}), '((self.nl, self.nk), self.dtype_cplx)\n', (3431, 3468), True, 'import numpy as np\n'), ((3318, 3339), 'numpy.arange', 'np.arange', (['(1.0)', '(M + 1)'], {}), '(1.0, M + 1)\n', (3327, 3339), True, 'import numpy as np\n'), ((3891, 3907), 'numpy.exp', 'np.exp', (['(LR / 2.0)'], {}), '(LR / 2.0)\n', (3897, 3907), True, 'import numpy as np\n'), ((3976, 3986), 'numpy.exp', 'np.exp', (['LR'], {}), '(LR)\n', (3982, 3986), True, 'import numpy as np\n'), ((4080, 4090), 'numpy.exp', 'np.exp', (['LR'], {}), '(LR)\n', (4086, 4090), True, 'import numpy as np\n'), ((4183, 4193), 'numpy.exp', 'np.exp', (['LR'], {}), '(LR)\n', (4189, 4193), True, 'import numpy as np\n')] |
import numpy as np
class Function:
def __init__(self, function, functionDerivate):
self.function = function
self.functionDerivate = functionDerivate
def __call__(self, x):
return self.function(x)
def derivate(self, x):
return self.functionDerivate(x)
def sigmoidFunction(x):
return 1.0/(1.0 + np.exp(x))
def sigmoidFunctionDerivate(x):
return sigmoid(x)*sigmoid(1-x)
sigmoid = Function(sigmoidFunction, sigmoidFunctionDerivate)
def reluFunction(x):
x[x<=0] = 0
return x
def reluFunctionDerivate(x):
x[x<=0] = 0
x[x>0] = 1
return x
relu = Function(reluFunction, reluFunctionDerivate)
def softmax(x):
return (np.exp(x-max(x))) / sum(np.exp(x-max(x)))
def softmaxDeviate(x):
pass
softmax = Function(softmax, softmaxDeviate)
class lossFunction:
def __init__(self, function, functionDerivate):
self.function = function
self.functionDerivate = functionDerivate
def __call__(self, prediction, labels):
return self.function(prediction, labels)
def derivate(self, prediction, labels):
return self.functionDerivate(prediction, labels)
def l2error(prediction, labels):
retunr (labels-prediction)**2
def l2errorDerivate(prediction, labels):
return 2*(prediction-labels)
sse = lossFunction(l2error, l2errorDerivate) | [
"numpy.exp"
] | [((352, 361), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (358, 361), True, 'import numpy as np\n')] |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from arch.api.utils import log_utils
from federatedml.util import fate_operator
LOGGER = log_utils.getLogger()
class HeteroFederatedAggregator(object):
@staticmethod
def aggregate_add(table_a, table_b):
"""
Compute a + b
Parameters
----------
table_a: DTable, input data a
table_b: DTable, input data b
Returns
----------
DTable
sum of each element in table_a and table_b
"""
table_add = table_a.join(table_b, lambda a, b: a + b)
return table_add
# do res = (a + b)^2
@staticmethod
def aggregate_add_square(table_a, table_b, table_a_square, table_b_square):
"""
Compute (a + b)^2
Parameters
----------
table_a: DTable, input data a
table_b: DTable, input data b
table_a_square: DTable, a^2
table_b_square: DTable, b^2
Returns
----------
DTable
return (a + b)^2
"""
table_a_mul_b = table_a.join(table_b, lambda a, b: 2 * a * b)
table_a_square_add_b_square = HeteroFederatedAggregator.aggregate_add(table_a_square, table_b_square)
table_add_square = HeteroFederatedAggregator.aggregate_add(table_a_mul_b, table_a_square_add_b_square)
return table_add_square
@staticmethod
def separate(value, size_list):
"""
Separate value in order to several set according size_list
Parameters
----------
value: list or ndarray, input data
size_list: list, each set size
Returns
----------
list
set after separate
"""
separate_res = []
cur = 0
for size in size_list:
separate_res.append(value[cur:cur + size])
cur += size
return separate_res
@staticmethod
def aggregate_mean(table):
"""
Compute the mean of values in table
Parameters
----------
table: DTable, input data
Returns
----------
float or ndarray
the mean of values in table
"""
count = table.count()
reduce_res = table.reduce(fate_operator.reduce_add)
if isinstance(reduce_res, list):
reduce_res = np.array(reduce_res)
reduce_res = reduce_res / count
return reduce_res
| [
"arch.api.utils.log_utils.getLogger",
"numpy.array"
] | [((726, 747), 'arch.api.utils.log_utils.getLogger', 'log_utils.getLogger', ([], {}), '()\n', (745, 747), False, 'from arch.api.utils import log_utils\n'), ((2938, 2958), 'numpy.array', 'np.array', (['reduce_res'], {}), '(reduce_res)\n', (2946, 2958), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
#
import sys, os
sys.path.append('./utils/')
import tools
import datatools as dtools
from time import time
os.environ["CUDA_VISIBLE_DEVICES"]="1"
#
import tensorflow as tf
import tensorflow_hub as hub
#############################
seed_in = 3
from numpy.random import seed
seed(seed_in)
from tensorflow import set_random_seed
set_random_seed(seed_in)
bs = 400
nc, ncf = 128, 512
step, stepf = 10, 40
path = '../data/z00/'
ftype = 'L%04d_N%04d_S%04d_%02dstep/'
ftypefpm = 'L%04d_N%04d_S%04d_%02dstep_fpm/'
numd = 1e-3
num = int(numd*bs**3)
R1 = 3
R2 = 3*1.2
kny = np.pi*nc/bs
kk = tools.fftk((nc, nc, nc), bs)
#############################
pad = int(0)
masktype = 'constant'
dependence = None
suff = 'pad%d-cic-allnn-cmask-pois4normmix-monp'%pad
savepath = '../models/n10/%s/module/'%suff
ftname = ['cic']
tgname = ['pnn', 'mnnnomean']
nchannels = len(ftname)
ntargets = len(tgname)
def get_meshes(seed, galaxies=False):
mesh = {}
mesh['s'] = tools.readbigfile(path + ftypefpm%(bs, nc, seed, step) + 'mesh/s/')
mesh['cic'] = np.load(path + ftypefpm%(bs, nc, seed, step) + 'mesh/d.npy')
# partp = tools.readbigfile(path + ftypefpm%(bs, nc, seed, step) + 'dynamic/1/Position/')
# mesh['cic'] = tools.paintcic(partp, bs, nc)
# mesh['logcic'] = np.log(1 + mesh['cic'])
# mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
# mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
# mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
# mesh['GD'] = mesh['R1'] - mesh['R2']
#
hmesh = {}
hposall = tools.readbigfile(path + ftype%(bs, ncf, seed, stepf) + 'FOF/PeakPosition/')[1:]
massall = tools.readbigfile(path + ftype%(bs, ncf, seed, stepf) + 'FOF/Mass/')[1:].reshape(-1)*1e10
hposd = hposall[:num].copy()
massd = massall[:num].copy()
hmesh['pnn'] = tools.paintnn(hposd, bs, nc)
hmesh['mnn'] = tools.paintnn(hposd, bs, nc, massd)
hmesh['mnnnomean'] = (hmesh['mnn'])/hmesh['mnn'].mean()
#hmesh['pcic'] = tools.paintcic(hposd, bs, nc)
#hmesh['mcic'] = tools.paintcic(hposd, bs, nc, massd)
#hmesh['mcicnomean'] = (hmesh['mcic'])/hmesh['mcic'].mean()
#hmesh['mcicovd'] = (hmesh['mcic'] - hmesh['mcic'].mean())/hmesh['mcic'].mean()
#hmesh['mcicovdR3'] = tools.fingauss(hmesh['mcicovd'], kk, R1, kny)
#hmesh['pcicovd'] = (hmesh['pcic'] - hmesh['pcic'].mean())/hmesh['pcic'].mean()
#hmesh['pcicovdR3'] = tools.fingauss(hmesh['pcicovd'], kk, R1, kny)
#hmesh['lmnn'] = np.log(logoffset + hmesh['mnn'])
return mesh, hmesh
#####
#
tf.reset_default_graph()
files = os.listdir(savepath)
paths = [os.path.join(savepath, basename) for basename in files]
modpath = max(paths, key=os.path.getctime)
print(modpath)
module = hub.Module(modpath+'/likelihood/')
xx = tf.placeholder(tf.float32, shape=[None, None, None, None, len(ftname)], name='input')
yy = tf.placeholder(tf.float32, shape=[None, None, None, None, len(tgname)], name='labels')
locpos = module(dict(features=xx, labels=yy), as_dict=True)['locpos']
logitspos = module(dict(features=xx, labels=yy), as_dict=True)['logitspos']
scalepos = module(dict(features=xx, labels=yy), as_dict=True)['scalepos']
rawsamplepos = module(dict(features=xx, labels=yy), as_dict=True)['rawsamplepos']
rawsamples = module(dict(features=xx, labels=yy), as_dict=True)['rawsample']
samples = module(dict(features=xx, labels=yy), as_dict=True)['sample']
loglik = module(dict(features=xx, labels=yy), as_dict=True)['loglikelihood']
pred_mask = module(dict(features=xx, labels=yy), as_dict=True)['pred_mask']
vmeshes = {}
shape = [nc,nc,nc]
kk = tools.fftk(shape, bs)
kmesh = sum(i**2 for i in kk)**0.5
with tf.Session() as sess:
sess.run(tf.initializers.global_variables())
for seed in [100]:
pass
seed = 100
batch = 1
vmeshes[seed] = get_meshes(seed)
xxm = np.stack([np.pad(vmeshes[seed][0][i], pad, 'wrap') for i in ftname], axis=-1)
#yym = np.stack([np.pad(vmeshes[seed][1]['pnncen'], pad, 'wrap'), np.pad(vmeshes[seed][1]['pnnsat'], pad, 'wrap')], axis=-1)
yym = np.stack([vmeshes[seed][1][i] for i in tgname], axis=-1)
print('xxm, yym shape = ', xxm.shape, yym.shape)
#logits = np.squeeze(sess.run(logitspos, feed_dict={xx:np.expand_dims(xxm, 0), yy:np.expand_dims(yym, 0)}))
#loc = np.squeeze(sess.run(locpos, feed_dict={xx:np.expand_dims(xxm, 0), yy:np.expand_dims(yym, 0)}))
#scale = np.squeeze(sess.run(scalepos, feed_dict={xx:np.expand_dims(xxm, 0), yy:np.expand_dims(yym, 0)}))
predmask = np.squeeze(sess.run(pred_mask, feed_dict={xx:np.expand_dims(xxm, 0), yy:np.expand_dims(yym, 0)}))
rawpredspos = np.squeeze(sess.run(rawsamplepos, feed_dict={xx:np.expand_dims(xxm, 0), yy:np.expand_dims(yym, 0)}))
rawpreds = np.squeeze(sess.run(rawsamples, feed_dict={xx:np.expand_dims(xxm, 0), yy:np.expand_dims(yym, 0)}))
preds = np.squeeze(sess.run(samples, feed_dict={xx:np.expand_dims(xxm, 0), yy:np.expand_dims(yym, 0)}))
# print(rawpredspos.shape)
# print(rawpredspos)
# print(rawpredspos.min(), rawpredspos.max())
# print(predmask.min(), predmask.max())
# print(rawpreds.shape)
# print(rawpreds[0].min(), rawpreds[0].max())
#
# print(preds.shape)
# print(preds[0].min(), preds[0].max())
#
vmeshes[seed][0]['predict'] = preds
vmeshes[seed][0]['rawpredict'] = rawpreds
print('Truth : ', np.unique(vmeshes[seed][1]['pnn'], return_counts=True))
print('RawSamplePos : ', np.unique(rawpredspos, return_counts=True))
print('Sample : ', np.unique(vmeshes[seed][0]['predict'][0], return_counts=True))
print('RawSample : ', np.unique(vmeshes[seed][0]['rawpredict'][0], return_counts=True)) #Not sure why this is not the same as rawsamplepos
##############################
##Power spectrum
yy = ['pos', 'mass']
for iy in range(2):
fig, axar = plt.subplots(2, 2, figsize = (8, 8))
ax = axar[0]
predict, hpmeshd = vmeshes[seed][0]['predict'][...,iy] , np.stack([vmeshes[seed][1][i] for i in tgname], axis=-1)[...,iy]
print(predict.shape, hpmeshd.shape)
k, pkpred = tools.power(predict/predict.mean(), boxsize=bs, k=kmesh)
k, pkhd = tools.power(hpmeshd/hpmeshd.mean(), boxsize=bs, k=kmesh)
k, pkhx = tools.power(hpmeshd/hpmeshd.mean(), predict/predict.mean(), boxsize=bs, k=kmesh)
##
ax[0].semilogx(k[1:], pkpred[1:]/pkhd[1:], label=seed)
ax[1].semilogx(k[1:], pkhx[1:]/(pkpred[1:]*pkhd[1:])**0.5)
for axis in ax.flatten():
axis.legend(fontsize=14)
axis.set_yticks(np.arange(0, 1.2, 0.1))
axis.grid(which='both')
axis.set_ylim(0.,1.1)
ax[0].set_ylabel('Transfer function', fontsize=14)
ax[1].set_ylabel('Cross correlation', fontsize=14)
#
#
ax = axar[1]
vmin, vmax = 0, (hpmeshd[:, :, :].sum(axis=0)).max()
im = ax[0].imshow(predict[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax)
im = ax[1].imshow(hpmeshd[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax)
ax[0].set_title('Prediction', fontsize=15)
ax[1].set_title('Truth', fontsize=15)
plt.savefig('./vpredict-%s.png'%( yy[iy]))
plt.show()
plt.figure()
plt.hist(hpmeshd.flatten(), range=(-1, 20), bins=100, label='target', alpha=0.8)
plt.hist(predict.flatten(), range=(-1, 20), bins=100, label='prediict', alpha=0.5)
plt.legend()
plt.yscale('log')
plt.savefig('./hist-%s.png'%( yy[iy]))
plt.show()
##
| [
"numpy.load",
"matplotlib.pyplot.yscale",
"numpy.random.seed",
"tensorflow_hub.Module",
"tensorflow.reset_default_graph",
"tools.readbigfile",
"matplotlib.pyplot.figure",
"numpy.arange",
"os.path.join",
"numpy.unique",
"sys.path.append",
"numpy.pad",
"tensorflow.set_random_seed",
"tools.pa... | [((106, 133), 'sys.path.append', 'sys.path.append', (['"""./utils/"""'], {}), "('./utils/')\n", (121, 133), False, 'import sys, os\n'), ((367, 380), 'numpy.random.seed', 'seed', (['seed_in'], {}), '(seed_in)\n', (371, 380), False, 'from numpy.random import seed\n'), ((420, 444), 'tensorflow.set_random_seed', 'set_random_seed', (['seed_in'], {}), '(seed_in)\n', (435, 444), False, 'from tensorflow import set_random_seed\n'), ((677, 705), 'tools.fftk', 'tools.fftk', (['(nc, nc, nc)', 'bs'], {}), '((nc, nc, nc), bs)\n', (687, 705), False, 'import tools\n'), ((2647, 2671), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2669, 2671), True, 'import tensorflow as tf\n'), ((2681, 2701), 'os.listdir', 'os.listdir', (['savepath'], {}), '(savepath)\n', (2691, 2701), False, 'import sys, os\n'), ((2836, 2872), 'tensorflow_hub.Module', 'hub.Module', (["(modpath + '/likelihood/')"], {}), "(modpath + '/likelihood/')\n", (2846, 2872), True, 'import tensorflow_hub as hub\n'), ((3700, 3721), 'tools.fftk', 'tools.fftk', (['shape', 'bs'], {}), '(shape, bs)\n', (3710, 3721), False, 'import tools\n'), ((1052, 1121), 'tools.readbigfile', 'tools.readbigfile', (["(path + ftypefpm % (bs, nc, seed, step) + 'mesh/s/')"], {}), "(path + ftypefpm % (bs, nc, seed, step) + 'mesh/s/')\n", (1069, 1121), False, 'import tools\n'), ((1138, 1200), 'numpy.load', 'np.load', (["(path + ftypefpm % (bs, nc, seed, step) + 'mesh/d.npy')"], {}), "(path + ftypefpm % (bs, nc, seed, step) + 'mesh/d.npy')\n", (1145, 1200), True, 'import numpy as np\n'), ((1907, 1935), 'tools.paintnn', 'tools.paintnn', (['hposd', 'bs', 'nc'], {}), '(hposd, bs, nc)\n', (1920, 1935), False, 'import tools\n'), ((1955, 1990), 'tools.paintnn', 'tools.paintnn', (['hposd', 'bs', 'nc', 'massd'], {}), '(hposd, bs, nc, massd)\n', (1968, 1990), False, 'import tools\n'), ((2711, 2743), 'os.path.join', 'os.path.join', (['savepath', 'basename'], {}), '(savepath, basename)\n', (2723, 2743), False, 'import sys, os\n'), ((3763, 3775), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3773, 3775), True, 'import tensorflow as tf\n'), ((4173, 4229), 'numpy.stack', 'np.stack', (['[vmeshes[seed][1][i] for i in tgname]'], {'axis': '(-1)'}), '([vmeshes[seed][1][i] for i in tgname], axis=-1)\n', (4181, 4229), True, 'import numpy as np\n'), ((1633, 1711), 'tools.readbigfile', 'tools.readbigfile', (["(path + ftype % (bs, ncf, seed, stepf) + 'FOF/PeakPosition/')"], {}), "(path + ftype % (bs, ncf, seed, stepf) + 'FOF/PeakPosition/')\n", (1650, 1711), False, 'import tools\n'), ((3798, 3832), 'tensorflow.initializers.global_variables', 'tf.initializers.global_variables', ([], {}), '()\n', (3830, 3832), True, 'import tensorflow as tf\n'), ((5471, 5525), 'numpy.unique', 'np.unique', (["vmeshes[seed][1]['pnn']"], {'return_counts': '(True)'}), "(vmeshes[seed][1]['pnn'], return_counts=True)\n", (5480, 5525), True, 'import numpy as np\n'), ((5556, 5598), 'numpy.unique', 'np.unique', (['rawpredspos'], {'return_counts': '(True)'}), '(rawpredspos, return_counts=True)\n', (5565, 5598), True, 'import numpy as np\n'), ((5624, 5685), 'numpy.unique', 'np.unique', (["vmeshes[seed][0]['predict'][0]"], {'return_counts': '(True)'}), "(vmeshes[seed][0]['predict'][0], return_counts=True)\n", (5633, 5685), True, 'import numpy as np\n'), ((5714, 5778), 'numpy.unique', 'np.unique', (["vmeshes[seed][0]['rawpredict'][0]"], {'return_counts': '(True)'}), "(vmeshes[seed][0]['rawpredict'][0], return_counts=True)\n", (5723, 5778), True, 'import numpy as np\n'), ((5964, 5998), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(8, 8)'}), '(2, 2, figsize=(8, 8))\n', (5976, 5998), True, 'import matplotlib.pyplot as plt\n'), ((7267, 7308), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./vpredict-%s.png' % yy[iy])"], {}), "('./vpredict-%s.png' % yy[iy])\n", (7278, 7308), True, 'import matplotlib.pyplot as plt\n'), ((7318, 7328), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7326, 7328), True, 'import matplotlib.pyplot as plt\n'), ((7338, 7350), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7348, 7350), True, 'import matplotlib.pyplot as plt\n'), ((7540, 7552), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7550, 7552), True, 'import matplotlib.pyplot as plt\n'), ((7561, 7578), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (7571, 7578), True, 'import matplotlib.pyplot as plt\n'), ((7587, 7624), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./hist-%s.png' % yy[iy])"], {}), "('./hist-%s.png' % yy[iy])\n", (7598, 7624), True, 'import matplotlib.pyplot as plt\n'), ((7634, 7644), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7642, 7644), True, 'import matplotlib.pyplot as plt\n'), ((3966, 4006), 'numpy.pad', 'np.pad', (['vmeshes[seed][0][i]', 'pad', '"""wrap"""'], {}), "(vmeshes[seed][0][i], pad, 'wrap')\n", (3972, 4006), True, 'import numpy as np\n'), ((6087, 6143), 'numpy.stack', 'np.stack', (['[vmeshes[seed][1][i] for i in tgname]'], {'axis': '(-1)'}), '([vmeshes[seed][1][i] for i in tgname], axis=-1)\n', (6095, 6143), True, 'import numpy as np\n'), ((6692, 6714), 'numpy.arange', 'np.arange', (['(0)', '(1.2)', '(0.1)'], {}), '(0, 1.2, 0.1)\n', (6701, 6714), True, 'import numpy as np\n'), ((1732, 1802), 'tools.readbigfile', 'tools.readbigfile', (["(path + ftype % (bs, ncf, seed, stepf) + 'FOF/Mass/')"], {}), "(path + ftype % (bs, ncf, seed, stepf) + 'FOF/Mass/')\n", (1749, 1802), False, 'import tools\n'), ((4673, 4695), 'numpy.expand_dims', 'np.expand_dims', (['xxm', '(0)'], {}), '(xxm, 0)\n', (4687, 4695), True, 'import numpy as np\n'), ((4700, 4722), 'numpy.expand_dims', 'np.expand_dims', (['yym', '(0)'], {}), '(yym, 0)\n', (4714, 4722), True, 'import numpy as np\n'), ((4792, 4814), 'numpy.expand_dims', 'np.expand_dims', (['xxm', '(0)'], {}), '(xxm, 0)\n', (4806, 4814), True, 'import numpy as np\n'), ((4819, 4841), 'numpy.expand_dims', 'np.expand_dims', (['yym', '(0)'], {}), '(yym, 0)\n', (4833, 4841), True, 'import numpy as np\n'), ((4906, 4928), 'numpy.expand_dims', 'np.expand_dims', (['xxm', '(0)'], {}), '(xxm, 0)\n', (4920, 4928), True, 'import numpy as np\n'), ((4933, 4955), 'numpy.expand_dims', 'np.expand_dims', (['yym', '(0)'], {}), '(yym, 0)\n', (4947, 4955), True, 'import numpy as np\n'), ((5014, 5036), 'numpy.expand_dims', 'np.expand_dims', (['xxm', '(0)'], {}), '(xxm, 0)\n', (5028, 5036), True, 'import numpy as np\n'), ((5041, 5063), 'numpy.expand_dims', 'np.expand_dims', (['yym', '(0)'], {}), '(yym, 0)\n', (5055, 5063), True, 'import numpy as np\n')] |
from argparse import ArgumentParser
import keras
from keras import backend as K
import sys
import tensorflow as tf
import os
from pdb import set_trace
from sklearn.model_selection import train_test_split
parser = ArgumentParser()
parser.add_argument('outputDir')
parser.add_argument(
'method', choices = [
'domain_adaptation_two_samples',
'MC_training',
'data_training',
'domain_adaptation_one_sample',
'domain_adaptation_one_sample_lambdap5',
'domain_adaptation_two_samples_w50_l.25',
'domain_adaptation_two_samples_w50_l.04',
'domain_adaptation_two_samples_w25_l.5',
'domain_adaptation_two_samples_w05_l1',
'domain_adaptation_two_samples_lr0.0005_w300_l0.04',
]
)
parser.add_argument("-i", help="input directory", default='/data/ml/mverzett/pheno_domAda/smearing_x2/', dest='indir')
parser.add_argument("--addsv", action='store_true')
parser.add_argument("--gpu", help="select specific GPU", type=int, metavar="OPT", default=-1)
parser.add_argument("--nopred", help="do not compute and store predictions", action='store_true')
parser.add_argument("--lr", help="learning rate", type=float, default=0.001)
parser.add_argument("--weight", help="domain adaptation weight", type=float, default=50)
parser.add_argument("--lmb", help="domain adaptation lambda", type=float, default=0.04)
parser.add_argument("--gpufraction", help="select memory fraction for GPU", type=float, metavar="OPT", default=-1)
args = parser.parse_args()
loss_weigth = 50
lambda_reversal = .1
if args.method.startswith('domain_adaptation_two_samples_'):
cfg = args.method[len('domain_adaptation_two_samples_'):]
if len(cfg.split('_')) == 2:
winfo, linfo = tuple(cfg.split('_'))
elif len(cfg.split('_')) == 3:
lrinfo, winfo, linfo = tuple(cfg.split('_'))
args.lr = float(lrinfo[2:])
else:
raise ValueError('to be implemented')
loss_weigth = float(winfo[1:])
lambda_reversal = float(linfo[1:])
args.method = 'domain_adaptation_two_samples'
else:
loss_weigth = args.weight
lambda_reversal = args.lmb
if args.gpu<0:
import imp
try:
imp.find_module('setGPU')
import setGPU
except ImportError:
found = False
else:
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
print('running on GPU '+str(args.gpu))
if args.gpufraction>0 and args.gpufraction<1:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpufraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
K.set_session(sess)
print('using gpu memory fraction: '+str(args.gpufraction))
from keras.engine import Layer
from Layers import *
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from make_samples import make_sample
#from DL4Jets.DeepJet.modules.Losses import weighted_loss
from keras.layers import Dense, Concatenate ,Dropout
from keras.layers import Input
from keras.models import Model
import pandas as pd
from keras.optimizers import Adam
def schedule(x):
lr=0.001
if x>75: lr=0.0001
if x>125: lr=0.00001
return lr
#learning_rate = keras.callbacks.LearningRateScheduler(schedule)
def save(df, fname):
dname = os.path.dirname(fname)
if not os.path.isdir(dname):
os.makedirs(dname)
records = df.to_records(index=False)
records.dtype.names = [str(i) for i in records.dtype.names]
np.save(fname, records)
def modelIverseGrad(Inputs, rev_grad=.1):
X = Dense(20, activation='relu',input_shape=(21,)) (Inputs)
#X = Dropout(0.25)(X)
X = Dense(10, activation='relu')(X)
#X = Dropout(0.25)(X)
X = Dense(10, activation='relu')(X)
#X = Dropout(0.25)(X)
X = Dense(10, activation='relu')(X)
#X = Dropout(0.25)(X)
Xa = Dense(10, activation='relu')(X)
X = Dense(10, activation='relu')(Xa)
X = Dense(1, activation='sigmoid', name = 'mc')(X)
X1= Dense(1, activation='linear',use_bias=False, trainable=False,kernel_initializer='Ones', name = 'data') (X)
Ad = GradientReversal(hp_lambda=rev_grad)(Xa)
Ad = Dense(10, activation='relu')(Ad)
Ad = Dense(10, activation='relu')(Ad)
Ad = Dense(10, activation='relu')(Ad)
Ad = Dense(10, activation='relu')(Ad)
Ad = Dense(1, activation='sigmoid', name = 'Add' )(Ad)
Ad1 = GradientReversal(hp_lambda=rev_grad)(Xa)
Ad1 = Dense(10, activation='relu')(Ad1)
Ad1 = Dense(10, activation='relu')(Ad1)
Ad1 = Dense(10, activation='relu')(Ad1)
Ad1 = Dense(10, activation='relu')(Ad1)
Ad1 = Dense(1, activation='sigmoid', name = 'Add_1' )(Ad1)
predictions = [X,X1,Ad,Ad1]
model = Model(inputs=Inputs, outputs=predictions)
return model
from keras.models import load_model
def run_model(outdir, Grad=1, known = 1,AdversOn=1,diffOn = 1):
Inputs = Input((21,))
global_loss_list={}
global_loss_list['GradientReversal']=GradientReversal()
X_traintest, isB_traintest , isMC_traintest = make_sample(args.indir, args.addsv)
X_all, X_test, isB_all, isB_test, isMC_all, isMC_test = train_test_split(X_traintest, isB_traintest , isMC_traintest, test_size=0.1, random_state=42)
advers_weight = 25.
if AdversOn==0:
advers_weight = 0.
model = modelIverseGrad(Inputs)
# gradiant loss
if(Grad == 'domain_adaptation_two_samples'):
model = modelIverseGrad(Inputs,rev_grad=lambda_reversal)
model.compile(
loss = ['binary_crossentropy']*4,
optimizer=Adam(lr=args.lr),
loss_weights=[1., 0., loss_weigth, loss_weigth]
)
history = model.fit(
X_all,
[isB_all, isB_all, isMC_all, isMC_all],
batch_size=5000, epochs=75, verbose=1, validation_split=0.2,
sample_weight = [
isMC_all.ravel(),
1-isMC_all.ravel(),
1-isB_all.ravel()*0.75,
1+isB_all.ravel()*0.75]
)
elif(Grad == 'MC_training'):
model.compile(
loss = ['binary_crossentropy']*4,
optimizer=Adam(lr=args.lr),
loss_weights=[1.,0.,0.,0.]
)
history = model.fit(
X_all,
[isB_all, isB_all, isMC_all, isMC_all],
batch_size=5000, epochs=75, verbose=1, validation_split=0.2,
sample_weight = [
isMC_all.ravel(),
1-isMC_all.ravel(),
1+0.5*isB_all.ravel(),
1-0.5*isB_all.ravel()],
)
elif(Grad == 'data_training'):
model.compile(
loss=['binary_crossentropy']*4,
optimizer=Adam(lr=args.lr),
loss_weights=[0.,1.,0.,0.]
)
history = model.fit(
X_all,
[isB_all, isB_all, isMC_all, isMC_all],
batch_size=5000, epochs=75, verbose=1, validation_split=0.2,
sample_weight = [
isMC_all.ravel(),
1-isMC_all.ravel(),
1+0.5*isB_all.ravel(),
1-0.5*isB_all.ravel()],
)
elif(Grad == 'domain_adaptation_one_sample'):
model = modelIverseGrad(Inputs,rev_grad=.25)
model.compile(
loss = ['binary_crossentropy']*4,
optimizer=Adam(lr=args.lr),
loss_weights=[1.,0.,50.,50.]
)
history = model.fit(
X_all,
[isB_all, isB_all, isMC_all, isMC_all],
batch_size=5000, epochs=75, verbose=1, validation_split=0.2,
sample_weight = [
isMC_all.ravel(),
1-isMC_all.ravel(),
np.ones(isB_all.ravel().shape[0]),
np.ones(isB_all.ravel().shape[0])],
)
elif(Grad == 'domain_adaptation_one_sample_lambdap5'):
model = modelIverseGrad(Inputs,rev_grad=.5)
model.compile(
loss = ['binary_crossentropy']*4,
optimizer = Adam(lr=args.lr),
loss_weights = [1.,0.,50.,50.]
)
history = model.fit(
X_all,
[isB_all, isB_all, isMC_all, isMC_all],
batch_size=5000, epochs=75, verbose=1, validation_split=0.2,
sample_weight = [
isMC_all.ravel(),
1-isMC_all.ravel(),
np.ones(isB_all.ravel().shape[0]),
np.ones(isB_all.ravel().shape[0])],
)
else:
raise ValueError('%s is an unknown run option' % Grad)
history = pd.DataFrame(history.history)
save(history, '%s/history.npy' %outdir)
if args.nopred:
return history
predictions = model.predict(X_test)
preds = pd.DataFrame()
preds['prediction'] = predictions[0].ravel()
preds['isB'] = isB_test
preds['isMC'] = isMC_test
save(preds, '%s/predictions.npy' %outdir)
return history
#print history.history.keys()
run_model(
args.outputDir,
Grad=args.method,
known = 1,
AdversOn=1,
diffOn = 1
)
### #print ('damain adaptation with tw sources')
### history2 = run_model(Grad=2, known = 1,AdversOn=1,diffOn = 1)
### #print ('train on sources')
### history3 = run_model(Grad=3, known = 1,AdversOn=1,diffOn = 1)
### #history4 = run_model(Grad=4, known = 1,AdversOn=1,diffOn = 1)
### #history5 = run_model(Grad=5, known = 1,AdversOn=1,diffOn = 1)
###
###
### fig = plt.figure()
### plt.plot(history1.history['val_data_loss'],label='data DA 0.25')
### plt.plot(history1.history['val_mc_loss'],label='mc DA 0.25')
### plt.plot(history2.history['val_data_loss'],label='data mc')
### plt.plot(history2.history['val_mc_loss'],label='mc mc')
### plt.plot(history3.history['val_data_loss'],label='data data')
### plt.plot(history3.history['val_mc_loss'],label='mc data')
### #plt.plot(history4.history['val_data_loss'],label='data DA 0.1')
### #plt.plot(history4.history['val_mc_loss'],label='mc DA 0.1')
### #plt.plot(history5.history['val_data_loss'],label='data DA 0.5')
### #plt.plot(history5.history['val_mc_loss'],label='mc DA 0.5')
###
###
### plt.ylabel('loss')
### plt.xlabel('epochs')
### plt.legend()
### fig.savefig('myPlot')
### #plt.figure(2)
###
### #plt.plot(history.history['val_dense_8_loss'],label='data')
### # plt.plot(history.history['val_dense_7_loss'],label='mc')
### # plt.legend()
### #plt.plot(history.history['val_dense_12_loss'])
### #plt.figure(3)
### #plt.plot(history.history['val_loss'],label='full loss')
### #plt.plot(history.history['val_dense_12_loss'])
### #plt.legend()
### #plt.show()
| [
"pandas.DataFrame",
"numpy.save",
"argparse.ArgumentParser",
"make_samples.make_sample",
"imp.find_module",
"os.path.isdir",
"sklearn.model_selection.train_test_split",
"os.path.dirname",
"keras.backend.set_session",
"os.makedirs",
"keras.optimizers.Adam",
"keras.models.Model",
"tensorflow.C... | [((214, 230), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (228, 230), False, 'from argparse import ArgumentParser\n'), ((2338, 2401), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': 'args.gpufraction'}), '(per_process_gpu_memory_fraction=args.gpufraction)\n', (2351, 2401), True, 'import tensorflow as tf\n'), ((2470, 2489), 'keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (2483, 2489), True, 'from keras import backend as K\n'), ((3117, 3139), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (3132, 3139), False, 'import os\n'), ((3291, 3314), 'numpy.save', 'np.save', (['fname', 'records'], {}), '(fname, records)\n', (3298, 3314), True, 'import numpy as np\n'), ((4431, 4472), 'keras.models.Model', 'Model', ([], {'inputs': 'Inputs', 'outputs': 'predictions'}), '(inputs=Inputs, outputs=predictions)\n', (4436, 4472), False, 'from keras.models import Model\n'), ((4602, 4614), 'keras.layers.Input', 'Input', (['(21,)'], {}), '((21,))\n', (4607, 4614), False, 'from keras.layers import Input\n'), ((4740, 4775), 'make_samples.make_sample', 'make_sample', (['args.indir', 'args.addsv'], {}), '(args.indir, args.addsv)\n', (4751, 4775), False, 'from make_samples import make_sample\n'), ((4833, 4929), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_traintest', 'isB_traintest', 'isMC_traintest'], {'test_size': '(0.1)', 'random_state': '(42)'}), '(X_traintest, isB_traintest, isMC_traintest, test_size=0.1,\n random_state=42)\n', (4849, 4929), False, 'from sklearn.model_selection import train_test_split\n'), ((7511, 7540), 'pandas.DataFrame', 'pd.DataFrame', (['history.history'], {}), '(history.history)\n', (7523, 7540), True, 'import pandas as pd\n'), ((7663, 7677), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7675, 7677), True, 'import pandas as pd\n'), ((2051, 2076), 'imp.find_module', 'imp.find_module', (['"""setGPU"""'], {}), "('setGPU')\n", (2066, 2076), False, 'import imp\n'), ((3148, 3168), 'os.path.isdir', 'os.path.isdir', (['dname'], {}), '(dname)\n', (3161, 3168), False, 'import os\n'), ((3172, 3190), 'os.makedirs', 'os.makedirs', (['dname'], {}), '(dname)\n', (3183, 3190), False, 'import os\n'), ((3363, 3410), 'keras.layers.Dense', 'Dense', (['(20)'], {'activation': '"""relu"""', 'input_shape': '(21,)'}), "(20, activation='relu', input_shape=(21,))\n", (3368, 3410), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((3447, 3475), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (3452, 3475), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((3507, 3535), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (3512, 3535), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((3567, 3595), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (3572, 3595), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((3628, 3656), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (3633, 3656), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((3665, 3693), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (3670, 3693), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((3703, 3744), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""', 'name': '"""mc"""'}), "(1, activation='sigmoid', name='mc')\n", (3708, 3744), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((3755, 3861), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""', 'use_bias': '(False)', 'trainable': '(False)', 'kernel_initializer': '"""Ones"""', 'name': '"""data"""'}), "(1, activation='linear', use_bias=False, trainable=False,\n kernel_initializer='Ones', name='data')\n", (3760, 3861), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((3915, 3943), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (3920, 3943), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((3954, 3982), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (3959, 3982), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((3993, 4021), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (3998, 4021), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((4032, 4060), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (4037, 4060), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((4071, 4113), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""', 'name': '"""Add"""'}), "(1, activation='sigmoid', name='Add')\n", (4076, 4113), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((4176, 4204), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (4181, 4204), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((4217, 4245), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (4222, 4245), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((4258, 4286), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (4263, 4286), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((4299, 4327), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (4304, 4327), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((4340, 4384), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""', 'name': '"""Add_1"""'}), "(1, activation='sigmoid', name='Add_1')\n", (4345, 4384), False, 'from keras.layers import Dense, Concatenate, Dropout\n'), ((2428, 2467), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (2442, 2467), True, 'import tensorflow as tf\n'), ((5210, 5226), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'args.lr'}), '(lr=args.lr)\n', (5214, 5226), False, 'from keras.optimizers import Adam\n'), ((5654, 5670), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'args.lr'}), '(lr=args.lr)\n', (5658, 5670), False, 'from keras.optimizers import Adam\n'), ((6073, 6089), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'args.lr'}), '(lr=args.lr)\n', (6077, 6089), False, 'from keras.optimizers import Adam\n'), ((6556, 6572), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'args.lr'}), '(lr=args.lr)\n', (6560, 6572), False, 'from keras.optimizers import Adam\n'), ((7080, 7096), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'args.lr'}), '(lr=args.lr)\n', (7084, 7096), False, 'from keras.optimizers import Adam\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ===============================================================
# Copyright (C) 2018 HuangYk.
# Licensed under The MIT Lincese.
#
# Filename : TorchSoa.py
# Author : HuangYK
# Last Modified: 2019-03-21 14:19
# Description :
#
# ===============================================================
from __future__ import print_function, division, absolute_import
import os
import copy
import shutil
import math
import torch
import torchnet as tnt
from torchnet.engine import Engine
from torchnet.logger import VisdomPlotLogger, VisdomLogger
import time
import numpy as np
import pandas as pd
from tqdm import tqdm # progress bar using in python shell
from pandas import DataFrame
from collections import defaultdict
class TorchSoaEngine(object):
'''A architecture of training process
Inherit TorchSoaEngine to build a neural network training processor for
specific dataset, and override abstract method get_iterator to provide a
batch sample iterator from dataset.
Attribute:
----------
meters: Caculate loss, class accuracy, class confusion performance of
neural networks
model: Neural networks model at gpu device
parameters: Total number of parameters in model
Example:
--------
>> kw={'model':neural_network_instance,
'optimizer':optimizer_instance,
'loss_func':loss_function
'maxepoch':max_epoch, 'batch_size':batch_size,
'num_workers':num_workers}
>> net_engine = TorchSoaEngine(**kw)
>> net_engine.meters = ClassifyMeter(num_classes)
>> net_engine.train()
'''
def __init__(self, model, optimizer, loss_func, maxepoch, batch_size,
num_workers, net_name, snap_epoch, model_dir=None, logs_dir=None,
gpu_id=None, loss_scheduler=None, step_scheduler=None, sgdr=False,
init_lr=None, T_max=10, reset_lr_params=None, dataset=None, resume=False
):
'''Init with training parameters, add hooks in torchnet
Training hooks function sequence is:
--> hook['on_start']
--> maxepoch iteration(
--> hook['on_start_epoch']
--> batch data iteration(
--> state['sample'] --> hook['on_sample']
--> state['optimizer'].zero
--> forward: state['network'](state['sample'])
--> state['output'], state['loss']
--> hook['on_forward'] with state['output'] and state['loss']
--> state['output'].zero, state['loss'].zero
--> backprop: state['optimizer'] with loss
--> hook['on_upadte']
--> state['t'].add
) # one epoch
--> state['epoch'].add
--> hook['on_end_epoch']
) # one training
--> hook['on_end']
Args:
-----
model: torch.nn.Module A nerual networks inherit nn.Module
optimizer: torch.optim Optim method for training
loss_func: torch.nn.functional, Loss function for nerual networks
max_epoch: int, Epoch number for training process
batch_size: int, Sample batch in a iteration
num_workers: int, Number of processors for get sample
net_name: str,
Return:
-------
A normalized torch net training architecture
'''
self._model = model
self._optimizer = optimizer
self._max_epoch = maxepoch
self._loss_func = loss_func
self._batch_size = batch_size
self._num_workers = num_workers
self._net_name = net_name
self._snap_epoch = snap_epoch
self._model_dir = model_dir if model_dir is not None else './epochs'
self._logs_dir = logs_dir if logs_dir is not None else './logs'
self._gpu_id = gpu_id
self._dataset = dataset
self._loss_scheduler = loss_scheduler
self._step_scheduler = step_scheduler
self._init_lr = init_lr
self._use_sgdr = sgdr
self._T_max = T_max
self._iteration_len = None
self._best_accuracy = 0
self._best_epoch = 0
self._reset_epoch = 0
self._reset_lr_params = reset_lr_params
self._epoch_meters = None
self._epoch_recorder = None
self._batch_logger = None
self._epoch_logger = None
self._resume = resume
self._engine = None
@property
def engine(self):
return self._engine
@engine.setter
def engine(self, engine):
self._engine = engine
self._init_engine
@property
def epoch_meters(self):
return self._epoch_meters
@epoch_meters.setter
def epoch_meters(self, meters):
self._epoch_meters = meters
@property
def epoch_rec(self):
return self._epoch_recorder
@epoch_rec.setter
def epoch_rec(self, epoch_rec):
self._epoch_recorder = epoch_rec
@property
def model(self):
return self._model
@property
def parameters(self):
return sum(param.numel() for param in self._model.parameters())
def get_loggers(self, stage):
if stage == 'epoch':
logger = self._epoch_logger
elif stage == 'batch':
logger = self._batch_logger
return logger
def set_loggers(self, stage, logger):
if stage == 'epoch':
self._epoch_logger = logger
elif stage == 'batch':
self._batch_logger = logger
def init_module(self, engine, epcoh_meters, epoch_recorder, epcoh_logger, batch_logger):
self.engine = engine
self.epoch_meters = epcoh_meters
self.epoch_rec = epoch_recorder
self.set_loggers('epoch', epcoh_logger)
self.set_loggers('batch', batch_logger)
self._init_engine()._init_recorder()
def _init_engine(self):
self._engine.hooks['on_start'] = self._on_start
self._engine.hooks['on_start_epoch'] = self._on_start_epoch
self._engine.hooks['on_sample'] = self._on_sample
self._engine.hooks['on_forward'] = self._on_forward
self._engine.hooks['on_end_epoch'] = self._on_end_epoch
self._engine.hooks['on_end'] = self._on_end
self._engine.hooks['on_update'] = self._on_update_batch
return self
def _init_recorder(self):
self.epoch_rec.add_item(
kind='confusion',
num_classes=self.epoch_meters.num_classes
)
return self
def _on_start(self, state):
if state['train']:
self._iteration_len = len(state['iterator'])
print("**Iteration Numbers: {}".format(self._iteration_len))
if self._reset_lr_params:
print("**Restart Epochs: {}".format(self._reset_lr_params['epoch']))
if self._resume:
state['epoch']=self._best_epoch
print('***Resume lr: {}'.format(
self._optimizer.param_groups[0]['lr'])
)
def _on_sample(self, state):
'''Attach train(True) or test(False) label to samples
Args:
-----
state: dict, a state dict in torchnet, state['sample'] will provide
a list contain data, target
'''
state['sample'].append(state['train'])
# if state['train'] and self._use_sgdr:
# batch_lr = self._init_lr*sgdr(
# self._T_max*self._iteration_len, state['t']
# )
# set_optimizer_lr(self._optimizer, batch_lr)
# current_lr = self._optimizer.param_groups[0]['lr']
# self._batch_logger.log_lr(state, current_lr)
def _on_start_epoch(self, state):
print('Epoch {} start'.format(state['epoch']+1))
self._epoch_meters.reset_meters()
#state['t'] = 0
if self._step_scheduler and state['epoch']>0:
if not self._resume:
self._step_scheduler.step()
self._resume = False
print("**Step schedule")
if self._use_sgdr:
print("**SGDR schedule")
gamma = pow(0.1, state['epoch'] // self._T_max)
epoch_init = self._init_lr*gamma
epoch_lr = epoch_init*sgdr(self._T_max, state['epoch'])
set_optimizer_lr(self._optimizer, epoch_lr)
# reset lr strategy
if self._reset_lr_params:
if state['epoch'] >= min(self._reset_lr_params['epoch']):
reset_lr = self._reset_lr_params['lr']
self._reset_epoch += 1
# reset lr
if state['epoch'] in self._reset_lr_params['epoch']:
self._reset_epoch = 0
set_optimizer_lr(self._optimizer, reset_lr*np.power(
self._reset_lr_params['gamma'],
self._reset_epoch//self._reset_lr_params['step']
)
)
print('***current lr: {}'.format(self._optimizer.param_groups[0]['lr']))
state['iterator'] = tqdm(state['iterator'])
def _on_forward(self, state):
'''Process forward output, loss before reset
Args:
-----
state: dict, provide output tensor and loss in state['output'],
state['loss']
'''
self._epoch_meters.add_meters(state)
def _on_update_batch(self, state):
self._batch_logger(state)
def _on_end_epoch(self, state):
stage = 'train'
epoch_idx = state['epoch']
print('[Epoch {}] {} end'.format(epoch_idx, stage))
loss = self._epoch_meters.loss
accuracy = self._epoch_meters.accuracy
confusion = self._epoch_meters.confusion
print_meters(epoch=epoch_idx, loss=loss, accuracy= accuracy, train=True)
self._epoch_logger(
epoch_idx=epoch_idx, loss=loss, accuracy=accuracy,
confusion=confusion, train=True
)
self._epoch_recorder.record(
index=epoch_idx, train=True,
loss=loss, accuracy=accuracy,
diag=self._epoch_meters.get_confusion_diag()[0],
num=self._epoch_meters.get_confusion_diag()[1]
)
self._epoch_meters.reset_meters()
# release gpu memory
torch.cuda.empty_cache()
self.test()
stage='test'
print('[Epoch {}] {} end'.format(epoch_idx, stage))
loss = self._epoch_meters.loss
accuracy = self._epoch_meters.accuracy
confusion = self._epoch_meters.confusion
print_meters(epoch=epoch_idx, loss=loss, accuracy= accuracy, train=False)
self._epoch_logger(
epoch_idx=epoch_idx, loss=loss, accuracy=accuracy,
confusion=confusion, train=False
)
self._epoch_recorder.record(
index=epoch_idx, train=False,
loss=loss, accuracy=accuracy,
diag=self._epoch_meters.get_confusion_diag()[0],
num=self._epoch_meters.get_confusion_diag()[1],
conf=self._epoch_meters.get_confusion_matrix()
)
if self._loss_scheduler is not None:
self._loss_scheduler.step(loss)
print("**Loss schedule")
if not os.path.exists(self._model_dir):
os.makedirs(self._model_dir)
torch.save(
{'epoch': epoch_idx, 'arch': self._model.__class__.__name__,
'optim_state_dict': self._optimizer.state_dict(),
'model_state_dict': self._model.state_dict(),
'best_acc':accuracy
}, '{:s}/{:s}_checkpoint.pth.tar'.format(
self._model_dir, self._net_name)
)
if accuracy>self._best_accuracy:
# save static params
torch.save(
self._model.state_dict(),
'{:s}/{:s}_best_acc_state_dict.pth'.format(
self._model_dir, self._net_name
)
)
self._best_accuracy = accuracy
self._best_epoch = state['epoch']
shutil.copy(
'{:s}/{:s}_checkpoint.pth.tar'.format(
self._model_dir, self._net_name),
'{:s}/{:s}_model_best.pth.tar'.format(
self._model_dir, self._net_name),
)
accuracy_baseline = {'hmdb51':59.2,'ucf101':87.3}.get(self._dataset, None)
if accuracy>(self._best_accuracy-0.3) and accuracy_baseline:
# save static params
if accuracy > accuracy_baseline:
print("save more best state_dict")
shutil.copy(
'{:s}/{:s}_checkpoint.pth.tar'.format(
self._model_dir, self._net_name),
'{:s}/{:s}_model_{}.pth.tar'.format(
self._model_dir, self._net_name, accuracy),
)
print("[Best] Epoch {:02d} (Accuracy: {:.2f})".format(
self._best_epoch, self._best_accuracy))
if (epoch_idx) % self._snap_epoch == 0:
if not os.path.exists(self._model_dir):
os.makedirs(self._model_dir)
torch.save(
self._model.state_dict(),
'{:s}/{:s}_epoch_{:02d}_state_dict.pth'.format(
self._model_dir, self._net_name, state['epoch']
)
)
# self._epoch_logger.upate_cache()
csv_folder = self._logs_dir
csv_file = '_'.join([self._net_name, 'epoch', '{:02d}'.format(epoch_idx)])
csv_file = os.path.join(csv_folder, csv_file)
self._epoch_recorder.save_csv(csv_file, state['train'])
# release gpu memory
torch.cuda.empty_cache()
def _on_end(self, state):
'''Save training record
'''
csv_folder = self._logs_dir
if not os.path.exists(csv_folder):
os.makedirs(csv_folder)
if state['train']:
csv_file = '_'.join(
[self._net_name, 'max_epoch', str(self._max_epoch)]
)
torch.save(
self._model.state_dict(),
'{:s}/{:s}_max_epoch_{:02d}_state_dict.pth'.format(
self._model_dir, self._net_name, self._max_epoch
)
)
else:
csv_file = '_'.join([self._net_name, 'epoch', 'test', 'tmp'])
csv_file = os.path.join(csv_folder, csv_file)
self._epoch_recorder.save_csv(csv_file, state['train'])
def _network_processor(self, sample):
data, target, train = sample
data, target = data.cuda(self._gpu_id), target.cuda(self._gpu_id)
if train:
self._model.train()
else:
self._model.eval()
output = self._model(data)
loss = self._loss_func(output, target)
return loss, output
def get_iterator(self, train):
raise NotImplementedError(
'get_iterator not implemented for TorchSoaEngine, which is an \
abstract class')
def train(self):
assert self._engine is not None, 'Need to set engine'
assert self._epoch_meters is not None, 'Need to set epoch_meters'
assert self._epoch_recorder is not None, 'Need to set epoch_recorder'
assert self._batch_logger is not None, 'Need to set batch_logger'
assert self._epoch_logger is not None, 'Need to set epoch_logger '
raise NotImplementedError(
'get_iterator not implemented for TorchSoaEngine, which is an \
abstract class')
self._engine.train(
self._network_processor, self.get_iterator(True),
maxepoch=self._max_epoch, optimizer=self._optimizer,
)
def test(self):
raise NotImplementedError(
'get_iterator not implemented for TorchSoaEngine, which is an \
abstract class')
self._engine.test(self._network_processor, self.get_iterator(False))
class EpochMeter(object):
'''Classify task performance evaluation with loss curve, accuracy curve,
confusion matrix
This class provides loss, accuracy, confusion
Attribute:
----------
vis: ClassifyVisdom instance for plot loss, accuracy, confusion in
visdom server in real time during training
loss: float, average loss
accuracy: float, average accuracy of total samples
confusion: [k x k] np.array, class confusion matrix
'''
def __init__(self, num_classes):
self.num_classes = num_classes
self.loss_meter = tnt.meter.AverageValueMeter()
self.acc_meter = tnt.meter.ClassErrorMeter(accuracy=True)
self.confusion_meter = tnt.meter.ConfusionMeter(
num_classes, normalized=True)
self._meters = [self.loss_meter, self.acc_meter, self.confusion_meter]
@property
def loss(self):
'''
Return average loss
'''
return self.loss_meter.value()[0]
@property
def accuracy(self):
'''
Return average class accuracy
'''
return self.acc_meter.value()[0]
@property
def confusion(self):
'''
Return confusion matrix of [num_classes x num_classes]
'''
self.confusion_meter.normalized = True
return self.confusion_meter.value()
def get_confusion_diag(self):
confusion = self.confusion_meter.conf
return np.diag(confusion), confusion.sum(1).clip(min=1e-12)
def get_confusion_matrix(self):
return self.confusion_meter.conf
def reset_meters(self):
for meter in self._meters:
meter.reset()
def add_loss(self, loss):
self.loss_meter.add(loss.data.item())
def add_accuracy(self, output, target):
try:
self.acc_meter.add(output.data, target)
except IndexError as e:
print(e)
print(target.shape)
print(output.data.shape)
def add_confusion(self, output, target):
self.confusion_meter.add(output.data, target)
def add_meters(self, state):
'''Add output, target to meters(loss, acc, confusion) per batch iter
Args:
-----
state: dict, provide loss, output, target
'''
self.add_loss(state['loss'])
self.add_accuracy(state['output'], state['sample'][-2])
self.add_confusion(state['output'], state['sample'][-2])
def print_meters(epoch, loss, accuracy, train):
process = 'Training' if train else 'Test'
print('[{:s}][Epoch {:02d}] {:s} Loss: {:.4f} (Accuracy: {:.2f}%)'.
format(get_time(), epoch, process, loss, accuracy))
def get_time():
return time.strftime("%Y-%m-%d %H:%M:%S")
class EpochLogger(object):
'''Visdom logger for classify task, contain loss curve, accuracy curve and
confusion matrix, plot in visdom server
'''
def __init__(self, num_classes, title='TBD'):
self._loss_logger = LossVisdom(title=title)
self._acc_logger = AccuracyVisdom(title=title)
self._confusion_logger = ConfusionVisdom(num_classes=num_classes, title=title)
self._loss_cache = defaultdict(lambda: None)
self._acc_cache = defaultdict(lambda: None)
def __call__(self, epoch_idx, loss, accuracy, confusion, train=None):
if self._loss_cache[epoch_idx] == None:
self._loss_cache[epoch_idx] = []
self._loss_cache[epoch_idx].append((loss, train))
if self._acc_cache[epoch_idx] == None:
self._acc_cache[epoch_idx] = []
self._acc_cache[epoch_idx].append((accuracy, train))
self._loss_logger.log(epoch_idx, loss, train)
self._acc_logger.log(epoch_idx, accuracy, train)
self._confusion_logger.log(confusion, train)
def upate_cache(self):
for epoch_idx in range(1, len(self._loss_cache.items())+1):
for loss, train in self._loss_cache[epoch_idx]:
self._loss_logger.log(epoch_idx, loss, train)
for epoch_idx in range(1, len(self._acc_cache.items())+1):
for acc, train in self._acc_cache[epoch_idx]:
self._acc_logger.log(epoch_idx, acc, train)
class BatchLogger(object):
'''Visdom logger for classify task, contain loss curve, accuracy curve and
confusion matrix, plot in visdom server
'''
def __init__(self, title='TBD'):
self._train_iter_idx = 0
self._test_iter_idx = 0
self._train_loss_logger = LossVisdom(title=title+' Train')
self._test_loss_logger = LossVisdom(title=title+' Test')
self._lr_logger = BatchLRVisdom(title=title+' Test')
def __call__(self, state):
if state['train']:
loss = state['accumulate_loss'].data.item()
iteration = self._train_iter_idx
self._train_iter_idx += 1
self._train_loss_logger.log(iteration, loss, state['train'])
else:
loss = state['loss'].data.item()
iteration = self._test_iter_idx
self._test_iter_idx += 1
self._test_loss_logger.log(iteration, loss, state['train'])
def log_lr(self, state, lr):
if state['train']:
iteration = self._train_iter_idx
self._lr_logger.log(iteration, lr, state['train'])
else:
pass
class LossVisdom(object):
'''Plot train and test loss curve together in a VisdomPlotLogger
'''
def __init__(self, title='TBD'):
self._loss = VisdomPlotLogger('line', opts={
'title': '{:s} Loss Curve'.format(title)
})
check_visdom_server(self._loss.viz)
def log(self, epoch, loss, train=None):
assert train is not None,\
'train should be True or False, not {}'.format(train)
name = 'train' if train else 'test'
try:
self._loss.log(epoch, loss, name=name)
except BaseException as e:
check_visdom_server(self._loss.viz)
print(e)
print("***Retry LossVisdom")
self.log(epoch, loss, train)
class AccuracyVisdom(object):
'''Plot train and test accuracy curve together in a VisdomPlotLogger
'''
def __init__(self, title='TBD'):
self._acc = VisdomPlotLogger('line', opts={
'title': '{:s} Accuracy Curve'.format(title)
})
check_visdom_server(self._acc.viz)
def log(self, epoch, accuracy, train=None):
assert train is not None,\
'train should be True or False, not {}'.format(train)
name = 'train' if train else 'test'
try:
self._acc.log(epoch, accuracy, name=name)
except BaseException as e:
check_visdom_server(self._acc.viz)
print(e)
print("***Retry AccuracyVisdom")
self.log(epoch, accuracy, train)
class ConfusionVisdom(object):
'''Plot test confusion matrix in a VisdomLogger
'''
def __init__(self, num_classes, title='TBD'):
self._confusion = VisdomLogger('heatmap', opts={
'title': '{:s} Confusion Matrix'.format(title),
'columnnames': list(range(num_classes)),
'rownames': list(range(num_classes))
})
check_visdom_server(self._confusion.viz)
def log(self, confusion, train=None):
assert train is not None,\
'train should be True or False, not {}'.format(train)
if train:
pass
else:
try:
self._confusion.log(confusion)
except BaseException as e:
check_visdom_server(self._confusion.viz)
print(e)
print("***Retry ConfusionVisdom")
self.log(confusion, train)
class BatchLRVisdom(object):
def __init__(self, title='TBD'):
self._lr = VisdomPlotLogger('line', opts={
'title': '{:s} lr Curve'.format(title)
})
check_visdom_server(self._lr.viz)
def log(self, idx, lr, train=None):
assert train is not None,\
'train should be True or False, not {}'.format(train)
name = 'train' if train else 'test'
try:
self._lr.log(idx, lr, name=name)
except BaseException as e:
check_visdom_server(self._lr.viz)
print(e)
print("***Retry LossVisdom")
self.log(idx, lr, train)
class EpochRecorder(object):
'''Record loss and accuracy of a training process as csv
'''
items = ['loss-acc']
def __init__(self, record_step='epoch', root_dir='./logs'):
assert self.check_default_save_folder(), 'Save folder created failed'
self.record_step = record_step
self._recs = defaultdict(lambda: 'N/A')
self._recs['loss-acc'] = LossAccRecorder(record_step, root_dir)
self._root_dir = root_dir
def check_default_save_folder(self, path='./logs'):
if os.path.exists(path):
return True
else:
os.makedirs(path)
self.check_default_save_folder(path)
def add_item(self, kind, num_classes):
assert kind in ['confusion'], 'Record type not support'
if kind == 'confusion':
self.items.append(kind)
self._recs[kind] = ConfusionRecorder(
self.record_step, num_classes, root_dir=self._root_dir
)
def get_record(self):
'''
Return: A dict of DataFrame, which index in items
'''
return self._recs
def record(self, index, train, loss=np.nan, accuracy=np.nan,
diag=np.nan, num=np.nan, conf=None):
'''Add loss, accuracy to DataFrame
Args:
-----
index: int, epoch or batch iteration number
loss: float, loss of net forward process in this index
accuracy: float, average accuracy among classes in this index
train: boolean, if this index is a training process
'''
kws = {'index': index, 'train': train, 'loss': loss, 'conf': conf,
'accuracy': accuracy, 'diag': diag, 'num': num}
for kind in self.items:
self._recs[kind].record(**kws)
def save_csv(self, path, train=None):
for item in self.items:
if not self._recs[item] == 'N/A':
self._recs[item].save_csv(path, train=None)
else:
print('{} not used'.format(item))
class LossAccRecorder(object):
'''
'''
def __init__(self, record_step, root_dir):
self.record_step = record_step
self._df = DataFrame(
columns=[['loss', 'loss', 'accuracy', 'accuracy'],
['train', 'test', 'train', 'test']]
)
self._df.index.name = record_step
self._root_dir = root_dir
def record(self, index, train, loss, accuracy, **kws):
c_level1 = 'train' if train else 'test'
self._df.loc[index, ('loss', (c_level1))] = loss
self._df.loc[index, ('accuracy', (c_level1))] = accuracy
def save_csv(self, path, train=None):
self._df.to_csv('{0:s}_loss-acc.csv'.format(path))
class ConfusionRecorder(object):
'''
'''
items = ['diag_train', 'diag_test', 'num_train', 'num_test']
def __init__(self, record_step, num_classes, root_dir):
self.record_step = record_step
self._dfs = defaultdict(lambda: 'N/A')
self._confs = []
self._confs_keys = []
self._conf_df = None
self._root_dir = root_dir
for k in self.items:
self._dfs[k] = DataFrame(columns=np.arange(num_classes))
def record(self, index, train, diag, num, conf=None, **kws):
diag_key = 'diag_train' if train else 'diag_test'
num_key = 'num_train' if train else 'num_test'
self._dfs[diag_key].loc[index] = diag
self._dfs[num_key].loc[index] = num
if conf is not None and not train:
self._conf_df = DataFrame(conf)
self._conf_df .to_csv(
'./{2:s}/{0:s}_{1:d}_test_confusion.csv'.format(
self.record_step, index, self._root_dir)
)
self._confs.append(copy.deepcopy(self._conf_df))
self._confs_keys.append('epoch_{:d}'.format(index))
def save_csv(self, path, train=None):
df = pd.concat(
[self._dfs['diag_train'], self._dfs['diag_test'],
self._dfs['num_train'], self._dfs['num_test']],
axis=1, keys=self.items
)
df.index.name = self.record_step
df.to_csv('{:s}_diag.csv'.format(path))
if len(self._confs) > 0 and not train:
conf_concat_df = pd.concat(
self._confs, axis=1, keys=self._confs_keys
)
conf_concat_df.index.name = 'Target'
conf_concat_df.to_csv('{:s}_confusion.csv'.format(path))
def set_optimizer_lr(optimizer, lr):
# callback to set the learning rate in an optimizer, without rebuilding
# the whole optimizer
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
def sgdr(period, batch_idx):
# returns normalised anytime sgdr schedule given period and batch_idx
# best performing settings reported in paper are T_0 = 10, T_mult=2
# so always use T_mult=2
batch_idx = float(batch_idx)
restart_period = period
while batch_idx/restart_period > 1.:
batch_idx = batch_idx - restart_period
restart_period = restart_period * 2.
radians = math.pi*(batch_idx/restart_period)
return 0.5*(1.0 + math.cos(radians))
def check_visdom_server(vis):
'''check if visdom server start up
Args:
-----
vis: visdom.Visdom isinstance
Return:
-------
Throw a assert exception if visdom server not work,
return none if visdom server is running
'''
startup_sec = 1
while not vis.check_connection() and startup_sec > 0:
time.sleep(0.1)
startup_sec -= 0.1
assert vis.check_connection(), 'No visdom server found, \
use python -m visdom.server to start a visdom server'
| [
"pandas.DataFrame",
"tqdm.tqdm",
"copy.deepcopy",
"os.makedirs",
"numpy.power",
"torchnet.meter.AverageValueMeter",
"time.strftime",
"os.path.exists",
"time.sleep",
"collections.defaultdict",
"torchnet.meter.ConfusionMeter",
"numpy.arange",
"math.cos",
"torch.cuda.empty_cache",
"torchnet... | [((18738, 18772), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (18751, 18772), False, 'import time\n'), ((9120, 9143), 'tqdm.tqdm', 'tqdm', (["state['iterator']"], {}), "(state['iterator'])\n", (9124, 9143), False, 'from tqdm import tqdm\n'), ((10344, 10368), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (10366, 10368), False, 'import torch\n'), ((13629, 13663), 'os.path.join', 'os.path.join', (['csv_folder', 'csv_file'], {}), '(csv_folder, csv_file)\n', (13641, 13663), False, 'import os\n'), ((13766, 13790), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (13788, 13790), False, 'import torch\n'), ((14467, 14501), 'os.path.join', 'os.path.join', (['csv_folder', 'csv_file'], {}), '(csv_folder, csv_file)\n', (14479, 14501), False, 'import os\n'), ((16619, 16648), 'torchnet.meter.AverageValueMeter', 'tnt.meter.AverageValueMeter', ([], {}), '()\n', (16646, 16648), True, 'import torchnet as tnt\n'), ((16674, 16714), 'torchnet.meter.ClassErrorMeter', 'tnt.meter.ClassErrorMeter', ([], {'accuracy': '(True)'}), '(accuracy=True)\n', (16699, 16714), True, 'import torchnet as tnt\n'), ((16746, 16800), 'torchnet.meter.ConfusionMeter', 'tnt.meter.ConfusionMeter', (['num_classes'], {'normalized': '(True)'}), '(num_classes, normalized=True)\n', (16770, 16800), True, 'import torchnet as tnt\n'), ((19204, 19230), 'collections.defaultdict', 'defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (19215, 19230), False, 'from collections import defaultdict\n'), ((19256, 19282), 'collections.defaultdict', 'defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (19267, 19282), False, 'from collections import defaultdict\n'), ((24747, 24774), 'collections.defaultdict', 'defaultdict', (["(lambda : 'N/A')"], {}), "(lambda : 'N/A')\n", (24758, 24774), False, 'from collections import defaultdict\n'), ((24948, 24968), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (24962, 24968), False, 'import os\n'), ((26596, 26697), 'pandas.DataFrame', 'DataFrame', ([], {'columns': "[['loss', 'loss', 'accuracy', 'accuracy'], ['train', 'test', 'train', 'test']]"}), "(columns=[['loss', 'loss', 'accuracy', 'accuracy'], ['train',\n 'test', 'train', 'test']])\n", (26605, 26697), False, 'from pandas import DataFrame\n'), ((27386, 27413), 'collections.defaultdict', 'defaultdict', (["(lambda : 'N/A')"], {}), "(lambda : 'N/A')\n", (27397, 27413), False, 'from collections import defaultdict\n'), ((28342, 28479), 'pandas.concat', 'pd.concat', (["[self._dfs['diag_train'], self._dfs['diag_test'], self._dfs['num_train'],\n self._dfs['num_test']]"], {'axis': '(1)', 'keys': 'self.items'}), "([self._dfs['diag_train'], self._dfs['diag_test'], self._dfs[\n 'num_train'], self._dfs['num_test']], axis=1, keys=self.items)\n", (28351, 28479), True, 'import pandas as pd\n'), ((29965, 29980), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (29975, 29980), False, 'import time\n'), ((11292, 11323), 'os.path.exists', 'os.path.exists', (['self._model_dir'], {}), '(self._model_dir)\n', (11306, 11323), False, 'import os\n'), ((11337, 11365), 'os.makedirs', 'os.makedirs', (['self._model_dir'], {}), '(self._model_dir)\n', (11348, 11365), False, 'import os\n'), ((13917, 13943), 'os.path.exists', 'os.path.exists', (['csv_folder'], {}), '(csv_folder)\n', (13931, 13943), False, 'import os\n'), ((13957, 13980), 'os.makedirs', 'os.makedirs', (['csv_folder'], {}), '(csv_folder)\n', (13968, 13980), False, 'import os\n'), ((17479, 17497), 'numpy.diag', 'np.diag', (['confusion'], {}), '(confusion)\n', (17486, 17497), True, 'import numpy as np\n'), ((25020, 25037), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (25031, 25037), False, 'import os\n'), ((27970, 27985), 'pandas.DataFrame', 'DataFrame', (['conf'], {}), '(conf)\n', (27979, 27985), False, 'from pandas import DataFrame\n'), ((28687, 28740), 'pandas.concat', 'pd.concat', (['self._confs'], {'axis': '(1)', 'keys': 'self._confs_keys'}), '(self._confs, axis=1, keys=self._confs_keys)\n', (28696, 28740), True, 'import pandas as pd\n'), ((29601, 29618), 'math.cos', 'math.cos', (['radians'], {}), '(radians)\n', (29609, 29618), False, 'import math\n'), ((13135, 13166), 'os.path.exists', 'os.path.exists', (['self._model_dir'], {}), '(self._model_dir)\n', (13149, 13166), False, 'import os\n'), ((13184, 13212), 'os.makedirs', 'os.makedirs', (['self._model_dir'], {}), '(self._model_dir)\n', (13195, 13212), False, 'import os\n'), ((28192, 28220), 'copy.deepcopy', 'copy.deepcopy', (['self._conf_df'], {}), '(self._conf_df)\n', (28205, 28220), False, 'import copy\n'), ((27605, 27627), 'numpy.arange', 'np.arange', (['num_classes'], {}), '(num_classes)\n', (27614, 27627), True, 'import numpy as np\n'), ((8838, 8935), 'numpy.power', 'np.power', (["self._reset_lr_params['gamma']", "(self._reset_epoch // self._reset_lr_params['step'])"], {}), "(self._reset_lr_params['gamma'], self._reset_epoch // self.\n _reset_lr_params['step'])\n", (8846, 8935), True, 'import numpy as np\n')] |
"""Probabilistic models for classification."""
from dataclasses import dataclass
import numpy as np
from scipy.special import gammaln, softmax # pylint: disable=no-name-in-module
from scipy.linalg import solve_triangular, solve, eigh, eig
class Model:
"""Generic model interface."""
def __init__(self):
"""Inititialize classes as empty."""
self.classes = None
def fit(self, _data, labels):
"""Train the model on data and labels.
Parameters
----------
_data: ndarray
training data
labels: ndarray
training labels as a integer array (not one-hot encoding); the
labels don't have to be contiguous
"""
self.classes = np.array(sorted(list(set(labels))))
def _predict_proba(self, data):
"""Return log-likelihood; private template method."""
def predict_proba(self, data, llh=True):
"""Predicts per-class probabilities.
Parameters
----------
data: ndarray
the input data for which the predictions will be computed
llh: bool
return the log-likelihood (default), otherwise return normalized
probabilities
"""
return softmax(self._predict_proba(data), axis=-1) if not llh else \
self._predict_proba(data)
def predict(self, data):
"""Predict classes."""
return self.classes[np.argmax(self.predict_proba(data), axis=-1)]
def _pca(mat, perc):
"""Return the transformation capturing 'perc' variance of the sample."""
eiv, eiw = eigh(mat, check_finite=False)
nd_ = next(iter(np.nonzero(np.cumsum(eiv[::-1] / np.sum(eiv)) > perc)[0]),
None)
if nd_ is None:
return -eiw
return -eiw[:, -nd_ - 2:]
def _lda(mat, perc):
"""Return optimal separation hyperplane given the covariance."""
eiv, eiw = eig(mat, check_finite=False)
eiv, eiw = np.real(eiv), np.real(eiw)
nd_ = next(iter(np.nonzero(np.cumsum(eiv / np.sum(eiv)) > perc)[0]), None)
if nd_ is None:
return eiw
return eiw[:, :nd_ + 1]
def _get_balanced_stats(data, labels, ids, classes=None):
"""Get per-class mean / std averaged per image."""
if classes is None:
classes = np.unique(labels)
dim, n_k = data.shape[1], len(classes)
mu_0, s_0 = np.zeros((n_k, dim)), np.zeros((n_k, dim, dim))
for i, kls in enumerate(classes):
data_k = data[labels == kls, :]
id_k = ids[labels == kls].squeeze()
uid_k = np.unique(id_k)
for id_ in uid_k:
data_i = data_k[id_k == id_, :]
mu_0[i, :] += np.mean(data_i, axis=0)
s_0[i, :, :] += np.cov(data_i.T)
s_0[i, :, :] /= len(uid_k)
mu_0[i, :] /= len(uid_k)
return mu_0, s_0
#
# 2-level Gaussian Mixture Model
#
@dataclass
class HBMPriorData:
"""Prior parameters for the 2-layer GMM model."""
mu_0: np.ndarray
s_0: np.ndarray
scale: float
k_0: float
k_1: float
wdof: float
class HBMPrior: # pylint: disable=too-few-public-methods
"""Generates a prior based on the given parameters."""
def __init__(self, mode=None, perc=0.99):
"""Initialize the prior parameters.
Parameters
----------
mode: None|'pca'|'lda'
optional dimensionality reduction using PCA or LDA
perc: float
variance captured by dimensionality reduction (only if mode is not
None)
"""
self.mode, self.perc, self.mean_cov = mode, perc, None
def _get_default_prior(self, data, labels, ids, classes):
"""Compute prior parameters from data statistics."""
dim, n_k = data.shape[1], len(classes)
mu_0, s_0 = _get_balanced_stats(data, labels, ids, classes)
if self.mode == 'lda':
self.mean_cov = np.cov(mu_0.T)
mu_0, s_0 = np.mean(mu_0, axis=0), np.mean(s_0, axis=0)
_ones = np.ones((n_k,))
return HBMPriorData(mu_0=mu_0, scale=1 * _ones, k_0=1 * _ones,
s_0=s_0, k_1=100 * _ones, wdof=(dim + 2) * _ones)
def get_prior(self, data, labels, ids, classes):
"""Return the prior and the (optional) data transformation.
Parameters
----------
data: ndarray
the training data to compute the prior values
labels: ndarray
the training labels to compute per-class statistics
ids: ndarray
image ids to compute per-image statistics
classes: ndarray
list of classes for which the prior will be computed
Returns
-------
prior: HBMPriorData
dataclass storing the prior parameters
vtr: ndarray
matrix encoding the dimensionality reduction transformation
"""
prior, vtr = self._get_default_prior(data, labels, ids, classes), None
if self.mode == 'pca':
vtr = _pca(prior.s_0, self.perc)
elif self.mode == 'lda':
vtr = _lda(solve(prior.s_0, self.mean_cov, assume_a='pos'),
self.perc)
if vtr is not None:
prior.s_0, prior.mu_0 = vtr.T @ prior.s_0 @ vtr, prior.mu_0 @ vtr
return prior, vtr
class HBM(Model): # pylint: disable=too-many-instance-attributes
r"""Hierarchical Bayesian Model (HBM).
The mineral distribution is modeled by a Gaussian distribution with a local
(per image) prior, in turn derived from a global prior. The distribution is
given by:
.. math::
\text{Data model:} & \quad \boldsymbol{x}_{ijk} \sim \mathcal{N}(
\boldsymbol{\mu}_{jk}, \Sigma_k) \\
\text{Local prior:} & \quad \boldsymbol{\mu}_{jk} \sim \mathcal{N}(
\boldsymbol{\mu}_k, \Sigma_{k}\kappa_1^{-1}) \\
\text{Global prior:} & \quad \boldsymbol{\mu}_k \sim \mathcal{N}(
\boldsymbol{\mu}_0,\Sigma_k\kappa_0^{-1}) \quad \Sigma_{k} \sim
\mathcal{IW} (\Sigma_0,m)
where *k*, *j* and *i* indicate the class, instance (image) and pixel
respectively; :math:`\mathcal{IW}` is the `Inverse Wishart`_ distribution.
We compute the *posterior predictive distribution (PPD)* given the data,
the labels and the image instances, which can be computed in closed form,
as a multi-dimensional *t*-student T, and we compute the class
log-likelihood of new samples from it.
The PPD is given by:
.. math::
P(\boldsymbol{x}|\mathcal{D}) = T(\boldsymbol{x}_{ji}|\boldsymbol{
\bar{\mu}}_k,\bar{\Sigma}_s,\bar{\nu}_s)
where:
.. math::
\boldsymbol{\bar{\mu}}_k &= \frac{\kappa_k\boldsymbol{\bar{x}}_{jk}+
\kappa_0\boldsymbol{\mu}_0}{\kappa_k+\kappa_{0}} \quad \text{where}
\quad \kappa_k = \sum_{j=1}^{n_k}\frac{n_{jk}\kappa_{1}}
{(n_{jk}+\kappa_{1})} \\
\bar{\Sigma}_s &= \frac{\bar{S_s}(\bar{\kappa}_s+1)}{
\bar{\kappa}_s\nu_s} \quad \text{where} \quad \bar{S_{s}} =
\Sigma_{0}+\sum_{j=1}^{n_k}S_{jk} \\
\bar{\kappa}_{s} &= \frac{\kappa_s\kappa_1}{\kappa_s+\kappa_{1}} \quad
\text{where} \quad \kappa_s = \sum_{j=1}^{n_k}\frac{n_{jk}
\kappa_1}{(n_{jk}+\kappa_1)}+\kappa_0 \\
\bar{\nu}_s &= m+\sum_{j=1}^{n_k}(n_{jk}-1)-d+1
and where :math:`n_{jk}` is the number of pixels for class *k* and instance
*j*, :math:`S_{jk}` is the sample convariance, and
:math:`\boldsymbol{\bar{x}}_{jk}` is the sample mean. The hyperparameters
are :math:`\boldsymbol{\mu}_0`, :math:`\Sigma_{0}` set to the average of
the mean of the per-class and per-instance statistics; :math:`\kappa_0=1`
and :math:`\kappa_1=100`; and :math:`m=d+2` where *d* is the dimension of
the data.
If 'only_class' is False, the local prior defines an additional "outlier"
class to detect out-of-distribution samples: if the likelihood of the prior
is larger than any of the other classes after computing the posterior,
the sample is considered an outlier.
.. _Inverse Wishart: https://en.wikipedia.org/wiki/Inverse-Wishart_\
distribution
"""
def __init__(self, only_class=False, prior=HBMPrior()):
"""Initialize the HBM model.
Parameters
----------
only_class: bool
do not compute outlier likelihood (classification only); default:
False
prior: HBMPrior
custom prior; by default, no dimensionality reduction is used
"""
super().__init__()
self.prior, self.only_class, self.vtr = prior, only_class, None
self.v_s = self.kap_s = self.mu_s = self.sig_s = self.sum_skl = None
def fit(self, data, labels, ids=None): # pylint: disable=arguments-differ
"""Train the HBM on data.
Parameters
----------
ids: ndarray
image ids; if None, a dummy image id is created (not reccommended)
"""
# pylint: disable=too-many-locals
super().fit(data, labels)
if ids is None:
ids = np.zeros_like(labels)
prior, self.vtr = self.prior.get_prior(
data, labels, ids, self.classes)
old_dim = data.shape[1] # only for psi
if self.vtr is not None:
data = data @ self.vtr
dim = data.shape[1]
n_kls = len(self.classes)
n_kls_o = n_kls if self.only_class else (n_kls + 1) # with outliers
psi_dofs = dim if self.only_class else old_dim
self.kap_s = np.zeros((n_kls,))
self.v_s = np.zeros((n_kls_o,), dtype=np.int32)
self.mu_s = np.zeros((n_kls_o, dim))
self.sig_s = np.zeros((dim, dim, n_kls_o))
self.sum_skl = np.zeros((dim, dim, n_kls))
for i, kls in enumerate(self.classes):
in_ = labels == kls
data_k, id_k = data[in_], ids[in_]
uid_k = np.unique(id_k)
n_k = len(uid_k)
n_kl, kap = np.zeros((n_k,)), np.zeros((n_k,))
x_kl, s_kl = np.zeros((n_k, dim)), np.zeros((dim, dim, n_k))
for j, id_ in enumerate(uid_k):
in_id = (id_k == id_).squeeze()
n_kl[j] = np.sum(in_id)
kap[j] = n_kl[j] * prior.k_1[i] / (n_kl[j] + prior.k_1[i])
data_ki = data_k[in_id, :]
x_kl[j, :] = np.mean(data_ki, axis=0)
s_kl[:, :, j] = (n_kl[j] - 1) * np.cov(data_ki.T)
sumkap = np.sum(kap) + prior.k_0[i]
kaps = sumkap * prior.k_1[i] / (sumkap + prior.k_1[i])
self.sum_skl[:, :, i] = np.sum(s_kl, axis=2)
psi = prior.s_0 * (prior.wdof[i] - psi_dofs - 1) / prior.scale[i]
self.v_s[i] = np.sum(n_kl) - n_k + prior.wdof[i] - dim + 1
self.sig_s[:, :, i] = (psi + self.sum_skl[:, :, i]) / (
(kaps * self.v_s[i]) / (kaps + 1))
self.mu_s[i, :] = (np.sum(x_kl * kap[:, np.newaxis], axis=0) +
prior.k_0[i] * prior.mu_0) / sumkap
self.kap_s[i] = sumkap
if not self.only_class: # compute also outlier likelihood
kaps = (prior.k_0[-1] * prior.k_1[-1]) / (
prior.k_0[-1] + prior.k_1[-1])
self.v_s[-1] = prior.wdof[-1] - dim + 1
self.sig_s[:, :, -1] = psi / ((kaps * self.v_s[-1]) / (kaps + 1))
self.mu_s[-1, :] = prior.mu_0
self.classes = np.append(self.classes, -1) # outliers
def _predict_proba(self, data):
"""Predicts the log-likelihood of the samples."""
*shape, dim = data.shape
data = data.reshape(-1, dim) # to work on nd inputs
if self.vtr is not None:
data = data @ self.vtr
dat_f = data.T.copy('F') # Fortran order to speed up solve_triangular
piconst = 0.5 * dim * np.log(np.pi)
gl_pc = gammaln(np.arange(0.5, np.max(self.v_s) + dim + 0.5, 0.5))
llh = np.zeros((data.shape[0], len(self.classes)))
for i, _ in enumerate(self.classes):
ch_sig = np.linalg.cholesky(self.sig_s[:, :, i])
diff = solve_triangular(ch_sig, dat_f - self.mu_s[i:i+1].T,
overwrite_b=True, check_finite=False,
lower=True).T
t_par = gl_pc[self.v_s[i] + dim - 1] - gl_pc[self.v_s[i] - 1] - \
0.5 * dim * np.log(self.v_s[i]) - piconst - \
np.sum(np.log(ch_sig.diagonal()))
norm2 = np.einsum('ij,ij->i', diff, diff) # faster than sum(x**2)
llh[:, i] = t_par - 0.5 * (self.v_s[i] + dim) * np.log1p(
norm2 / self.v_s[i])
return llh.reshape(*shape, -1)
if __name__ == '__main__':
pass
| [
"scipy.linalg.solve",
"numpy.zeros_like",
"numpy.sum",
"numpy.log",
"scipy.linalg.solve_triangular",
"numpy.zeros",
"numpy.ones",
"scipy.linalg.eig",
"numpy.einsum",
"numpy.append",
"numpy.max",
"numpy.mean",
"scipy.linalg.eigh",
"numpy.real",
"numpy.cov",
"numpy.log1p",
"numpy.uniqu... | [((1595, 1624), 'scipy.linalg.eigh', 'eigh', (['mat'], {'check_finite': '(False)'}), '(mat, check_finite=False)\n', (1599, 1624), False, 'from scipy.linalg import solve_triangular, solve, eigh, eig\n'), ((1903, 1931), 'scipy.linalg.eig', 'eig', (['mat'], {'check_finite': '(False)'}), '(mat, check_finite=False)\n', (1906, 1931), False, 'from scipy.linalg import solve_triangular, solve, eigh, eig\n'), ((1947, 1959), 'numpy.real', 'np.real', (['eiv'], {}), '(eiv)\n', (1954, 1959), True, 'import numpy as np\n'), ((1961, 1973), 'numpy.real', 'np.real', (['eiw'], {}), '(eiw)\n', (1968, 1973), True, 'import numpy as np\n'), ((2278, 2295), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (2287, 2295), True, 'import numpy as np\n'), ((2356, 2376), 'numpy.zeros', 'np.zeros', (['(n_k, dim)'], {}), '((n_k, dim))\n', (2364, 2376), True, 'import numpy as np\n'), ((2378, 2403), 'numpy.zeros', 'np.zeros', (['(n_k, dim, dim)'], {}), '((n_k, dim, dim))\n', (2386, 2403), True, 'import numpy as np\n'), ((2543, 2558), 'numpy.unique', 'np.unique', (['id_k'], {}), '(id_k)\n', (2552, 2558), True, 'import numpy as np\n'), ((3975, 3990), 'numpy.ones', 'np.ones', (['(n_k,)'], {}), '((n_k,))\n', (3982, 3990), True, 'import numpy as np\n'), ((9566, 9584), 'numpy.zeros', 'np.zeros', (['(n_kls,)'], {}), '((n_kls,))\n', (9574, 9584), True, 'import numpy as np\n'), ((9604, 9640), 'numpy.zeros', 'np.zeros', (['(n_kls_o,)'], {'dtype': 'np.int32'}), '((n_kls_o,), dtype=np.int32)\n', (9612, 9640), True, 'import numpy as np\n'), ((9661, 9685), 'numpy.zeros', 'np.zeros', (['(n_kls_o, dim)'], {}), '((n_kls_o, dim))\n', (9669, 9685), True, 'import numpy as np\n'), ((9707, 9736), 'numpy.zeros', 'np.zeros', (['(dim, dim, n_kls_o)'], {}), '((dim, dim, n_kls_o))\n', (9715, 9736), True, 'import numpy as np\n'), ((9760, 9787), 'numpy.zeros', 'np.zeros', (['(dim, dim, n_kls)'], {}), '((dim, dim, n_kls))\n', (9768, 9787), True, 'import numpy as np\n'), ((2656, 2679), 'numpy.mean', 'np.mean', (['data_i'], {'axis': '(0)'}), '(data_i, axis=0)\n', (2663, 2679), True, 'import numpy as np\n'), ((2708, 2724), 'numpy.cov', 'np.cov', (['data_i.T'], {}), '(data_i.T)\n', (2714, 2724), True, 'import numpy as np\n'), ((3879, 3893), 'numpy.cov', 'np.cov', (['mu_0.T'], {}), '(mu_0.T)\n', (3885, 3893), True, 'import numpy as np\n'), ((3914, 3935), 'numpy.mean', 'np.mean', (['mu_0'], {'axis': '(0)'}), '(mu_0, axis=0)\n', (3921, 3935), True, 'import numpy as np\n'), ((3937, 3957), 'numpy.mean', 'np.mean', (['s_0'], {'axis': '(0)'}), '(s_0, axis=0)\n', (3944, 3957), True, 'import numpy as np\n'), ((9116, 9137), 'numpy.zeros_like', 'np.zeros_like', (['labels'], {}), '(labels)\n', (9129, 9137), True, 'import numpy as np\n'), ((9935, 9950), 'numpy.unique', 'np.unique', (['id_k'], {}), '(id_k)\n', (9944, 9950), True, 'import numpy as np\n'), ((10636, 10656), 'numpy.sum', 'np.sum', (['s_kl'], {'axis': '(2)'}), '(s_kl, axis=2)\n', (10642, 10656), True, 'import numpy as np\n'), ((11473, 11500), 'numpy.append', 'np.append', (['self.classes', '(-1)'], {}), '(self.classes, -1)\n', (11482, 11500), True, 'import numpy as np\n'), ((11880, 11893), 'numpy.log', 'np.log', (['np.pi'], {}), '(np.pi)\n', (11886, 11893), True, 'import numpy as np\n'), ((12095, 12134), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['self.sig_s[:, :, i]'], {}), '(self.sig_s[:, :, i])\n', (12113, 12134), True, 'import numpy as np\n'), ((12542, 12575), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'diff', 'diff'], {}), "('ij,ij->i', diff, diff)\n", (12551, 12575), True, 'import numpy as np\n'), ((10004, 10020), 'numpy.zeros', 'np.zeros', (['(n_k,)'], {}), '((n_k,))\n', (10012, 10020), True, 'import numpy as np\n'), ((10022, 10038), 'numpy.zeros', 'np.zeros', (['(n_k,)'], {}), '((n_k,))\n', (10030, 10038), True, 'import numpy as np\n'), ((10064, 10084), 'numpy.zeros', 'np.zeros', (['(n_k, dim)'], {}), '((n_k, dim))\n', (10072, 10084), True, 'import numpy as np\n'), ((10086, 10111), 'numpy.zeros', 'np.zeros', (['(dim, dim, n_k)'], {}), '((dim, dim, n_k))\n', (10094, 10111), True, 'import numpy as np\n'), ((10231, 10244), 'numpy.sum', 'np.sum', (['in_id'], {}), '(in_id)\n', (10237, 10244), True, 'import numpy as np\n'), ((10393, 10417), 'numpy.mean', 'np.mean', (['data_ki'], {'axis': '(0)'}), '(data_ki, axis=0)\n', (10400, 10417), True, 'import numpy as np\n'), ((10506, 10517), 'numpy.sum', 'np.sum', (['kap'], {}), '(kap)\n', (10512, 10517), True, 'import numpy as np\n'), ((12154, 12262), 'scipy.linalg.solve_triangular', 'solve_triangular', (['ch_sig', '(dat_f - self.mu_s[i:i + 1].T)'], {'overwrite_b': '(True)', 'check_finite': '(False)', 'lower': '(True)'}), '(ch_sig, dat_f - self.mu_s[i:i + 1].T, overwrite_b=True,\n check_finite=False, lower=True)\n', (12170, 12262), False, 'from scipy.linalg import solve_triangular, solve, eigh, eig\n'), ((5063, 5110), 'scipy.linalg.solve', 'solve', (['prior.s_0', 'self.mean_cov'], {'assume_a': '"""pos"""'}), "(prior.s_0, self.mean_cov, assume_a='pos')\n", (5068, 5110), False, 'from scipy.linalg import solve_triangular, solve, eigh, eig\n'), ((10466, 10483), 'numpy.cov', 'np.cov', (['data_ki.T'], {}), '(data_ki.T)\n', (10472, 10483), True, 'import numpy as np\n'), ((10957, 10998), 'numpy.sum', 'np.sum', (['(x_kl * kap[:, np.newaxis])'], {'axis': '(0)'}), '(x_kl * kap[:, np.newaxis], axis=0)\n', (10963, 10998), True, 'import numpy as np\n'), ((12661, 12690), 'numpy.log1p', 'np.log1p', (['(norm2 / self.v_s[i])'], {}), '(norm2 / self.v_s[i])\n', (12669, 12690), True, 'import numpy as np\n'), ((11933, 11949), 'numpy.max', 'np.max', (['self.v_s'], {}), '(self.v_s)\n', (11939, 11949), True, 'import numpy as np\n'), ((10762, 10774), 'numpy.sum', 'np.sum', (['n_kl'], {}), '(n_kl)\n', (10768, 10774), True, 'import numpy as np\n'), ((12438, 12457), 'numpy.log', 'np.log', (['self.v_s[i]'], {}), '(self.v_s[i])\n', (12444, 12457), True, 'import numpy as np\n'), ((1679, 1690), 'numpy.sum', 'np.sum', (['eiv'], {}), '(eiv)\n', (1685, 1690), True, 'import numpy as np\n'), ((2022, 2033), 'numpy.sum', 'np.sum', (['eiv'], {}), '(eiv)\n', (2028, 2033), True, 'import numpy as np\n')] |
import numpy as np
from scipy import constants
#defining the function to be integrated with
def f(x):
return (x**3)/((np.exp(x)) - 1.)
#defining the simpsons rule calculation
def simpsonsRule(f,a,b,N):
h = (b-a)/N
oddSum = 0
for k in range(1, N, 2):
oddSum += f(a+k*h)
evenSum = 0
for k in range(2, N, 2):
evenSum += f(a+k*h)
integral = (h/3)*(f(a)+f(b)+4*oddSum+2*evenSum)
return integral
#constants for simpsons rule where N is the number of slices which must be even
# a is the lower bound which we picked a really small number to approximate 0 to
# avoid reaching division by zero
# b is the upper bound which we picked a big number to approximate infinity
# since we are going to the e^700 which is reaching python's limit and we do not
# want to have overflow issues
N = 10000
a = 0.000001
b = 700
#checking integral with wolfram alpha's result
integral = simpsonsRule(f, a, b, N)
print('integral:', integral)
#let temperature to be 100 Kelvin for our calculations
T = 100
#The constant that we got from part a
C = (2 * constants.pi * (constants.k**4) * (T**4))/((constants.h**3)*(constants.c**2))
W = C * integral
#comparing our results and checking the accuracy
print('constant from integration', W/(T**4))
print('scipy constant', constants.sigma)
print('Accuracy', (1 - ((W/(T**4)) - constants.sigma)/constants.sigma) * 100, '%')
| [
"numpy.exp"
] | [((128, 137), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (134, 137), True, 'import numpy as np\n')] |
'''
Classify single images (for fun)
Input:
Image of building in Dataset
Output: label
'''
import argparse
import numpy as np
from cv2 import imread
from sklearn import svm, preprocessing
from sklearn import cross_validation
from sklearn.multiclass import OneVsRestClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import data_prep as dp
if __name__ == '__main__':
file_name = "sheffield_test.jpg"
kernel_size = 5
theta_list = [0,45,90,135,180,225,270,315]
class_labels = np.load("class_labels.npy")
dataset = np.load("dataset.npy")
dataset_256 = np.load("dataset_256.npy")
parser = argparse.ArgumentParser()
parser.add_argument("--image", help="path to image to classify")
args = parser.parse_args()
if args.image:
file_name = args.image
img = imread(file_name,0)
feature_maps = []
for theta in theta_list:
feature_maps.append(dp.steerableGaussian(img,theta,kernel_size))
feature_maps.append(dp.steerableHilbert(img,theta,kernel_size))
lda_input = np.array([], dtype=np.uint8)
for feature_map in feature_maps:
pooled = dp.imageMaxPool(feature_map)
lda_input = np.append(lda_input,pooled)
lda_input = np.resize(lda_input,(1,256))
lda = LinearDiscriminantAnalysis(n_components=39)
reduced = lda.fit(dataset_256, class_labels).transform(lda_input)
scaler = preprocessing.StandardScaler().fit(dataset)
scaler.transform(reduced)
clf = OneVsRestClassifier(svm.LinearSVC(random_state=0)).fit(dataset, class_labels)
print(clf.predict(reduced))
| [
"data_prep.steerableGaussian",
"numpy.load",
"sklearn.preprocessing.StandardScaler",
"argparse.ArgumentParser",
"numpy.resize",
"data_prep.steerableHilbert",
"cv2.imread",
"data_prep.imageMaxPool",
"numpy.append",
"numpy.array",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis",
"skle... | [((524, 551), 'numpy.load', 'np.load', (['"""class_labels.npy"""'], {}), "('class_labels.npy')\n", (531, 551), True, 'import numpy as np\n'), ((566, 588), 'numpy.load', 'np.load', (['"""dataset.npy"""'], {}), "('dataset.npy')\n", (573, 588), True, 'import numpy as np\n'), ((607, 633), 'numpy.load', 'np.load', (['"""dataset_256.npy"""'], {}), "('dataset_256.npy')\n", (614, 633), True, 'import numpy as np\n'), ((648, 673), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (671, 673), False, 'import argparse\n'), ((836, 856), 'cv2.imread', 'imread', (['file_name', '(0)'], {}), '(file_name, 0)\n', (842, 856), False, 'from cv2 import imread\n'), ((1069, 1097), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.uint8'}), '([], dtype=np.uint8)\n', (1077, 1097), True, 'import numpy as np\n'), ((1246, 1276), 'numpy.resize', 'np.resize', (['lda_input', '(1, 256)'], {}), '(lda_input, (1, 256))\n', (1255, 1276), True, 'import numpy as np\n'), ((1286, 1329), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {'n_components': '(39)'}), '(n_components=39)\n', (1312, 1329), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((1152, 1180), 'data_prep.imageMaxPool', 'dp.imageMaxPool', (['feature_map'], {}), '(feature_map)\n', (1167, 1180), True, 'import data_prep as dp\n'), ((1201, 1229), 'numpy.append', 'np.append', (['lda_input', 'pooled'], {}), '(lda_input, pooled)\n', (1210, 1229), True, 'import numpy as np\n'), ((935, 980), 'data_prep.steerableGaussian', 'dp.steerableGaussian', (['img', 'theta', 'kernel_size'], {}), '(img, theta, kernel_size)\n', (955, 980), True, 'import data_prep as dp\n'), ((1008, 1052), 'data_prep.steerableHilbert', 'dp.steerableHilbert', (['img', 'theta', 'kernel_size'], {}), '(img, theta, kernel_size)\n', (1027, 1052), True, 'import data_prep as dp\n'), ((1414, 1444), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (1442, 1444), False, 'from sklearn import svm, preprocessing\n'), ((1519, 1548), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1532, 1548), False, 'from sklearn import svm, preprocessing\n')] |
import keras
import pickle
import cv2
import numpy as np
import sys
'''This is to supress the tensorflow warnings. If something odd happens, remove them and try to debug form the warnings'''
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#import tensorflow as tf
'''This is to supress the tensorflow warnings. If something odd happens, remove them and try to debug form the warnings'''
#face_detection_path= "Face_rec/face_detection_model/res10_300x300_ssd_iter_140000.caffemodel"
#proto_path = "Face_rec/face_detection_model/deploy.prototxt"
#model_path = 'Face_rec/pickle/holly_MobileNet_3(50_class).h5'
#label_path = 'Face_rec/pickle/holly_50_classes_lableencoder.pickle'
class FaceIndentity:
dir_path=__file__[:-12]
face_detection_path= dir_path+"caffemodel/res10_300x300_ssd_iter_140000.caffemodel"
proto_path = dir_path+"face_detection_model/deploy.prototxt"
model_path = dir_path+'h5/holly_MobileNet_3(50_class).h5'
label_path = dir_path+'pickle/holly_50_classes_lableencoder.pickle'
def __init__(self):
self.detector = cv2.dnn.readNetFromCaffe(self.proto_path, self.face_detection_path)
self.model = keras.models.load_model(self.model_path)
self.labelencoder = pickle.load(open(self.label_path,'rb'))
def predict_image(self, image):
image_np = np.asarray(image)
self.getFace_CV2DNN(image)
def getFace_CV2DNN(self, image):
facelist = []
(h,w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300,300)),1.0, (300,300),(104.0, 177.0, 123.0), swapRB= False, crop = False)
self.detector.setInput(blob)
detections = self.detector.forward()
fH = 0
fW = 0
for i in range(0,detections.shape[2]):
confidence = detections[0,0,i,2];
if confidence < 0.7:
continue
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
cv2.rectangle(image, (startX, startY), (endX, endY), (0,0,255), 2)
fH = endX - startX
fW = endY - startY
if fH < 20 or fW < 20:
continue
facelist.append((startX,startY,endX, endY))
self.setLabel(facelist, image)
def setLabel(self, facelist,image):
for (x1,y1,x2,y2) in facelist:
face = image[y1:y2, x1:x2]
if(face.shape == (0,0,3)):
return
try :
im = cv2.resize(face, (224, 224)).astype(np.float32) / 255.0
im = im.reshape(1,224,224,3)
out = self.model.predict(im)
label = np.argmax(out)
name = self.labelencoder.get(label)[5:]
print('Person Found is :',name)
cv2.putText(img= image,
text=name,
org=(x1,y1),
fontFace = cv2.FONT_HERSHEY_COMPLEX,
fontScale= 0.5,
color=(255,100,50),
thickness= 1,
lineType=cv2.LINE_AA)
except Exception as e:
print("Some Error in image: ", e)
#reg = FaceIndentity(face_detection_path,proto_path,model_path,label_path)
#path='Face_rec/image/12.jpg'
#image = cv2.imread(sys.argv[1])
#image=cv2.imread(path)
#reg.predict_image(image)
| [
"keras.models.load_model",
"cv2.dnn.readNetFromCaffe",
"cv2.putText",
"numpy.argmax",
"numpy.asarray",
"numpy.array",
"cv2.rectangle",
"cv2.resize"
] | [((1086, 1153), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (['self.proto_path', 'self.face_detection_path'], {}), '(self.proto_path, self.face_detection_path)\n', (1110, 1153), False, 'import cv2\n'), ((1176, 1216), 'keras.models.load_model', 'keras.models.load_model', (['self.model_path'], {}), '(self.model_path)\n', (1199, 1216), False, 'import keras\n'), ((1344, 1361), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1354, 1361), True, 'import numpy as np\n'), ((1527, 1556), 'cv2.resize', 'cv2.resize', (['image', '(300, 300)'], {}), '(image, (300, 300))\n', (1537, 1556), False, 'import cv2\n'), ((2027, 2095), 'cv2.rectangle', 'cv2.rectangle', (['image', '(startX, startY)', '(endX, endY)', '(0, 0, 255)', '(2)'], {}), '(image, (startX, startY), (endX, endY), (0, 0, 255), 2)\n', (2040, 2095), False, 'import cv2\n'), ((1931, 1953), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (1939, 1953), True, 'import numpy as np\n'), ((2704, 2718), 'numpy.argmax', 'np.argmax', (['out'], {}), '(out)\n', (2713, 2718), True, 'import numpy as np\n'), ((2840, 3003), 'cv2.putText', 'cv2.putText', ([], {'img': 'image', 'text': 'name', 'org': '(x1, y1)', 'fontFace': 'cv2.FONT_HERSHEY_COMPLEX', 'fontScale': '(0.5)', 'color': '(255, 100, 50)', 'thickness': '(1)', 'lineType': 'cv2.LINE_AA'}), '(img=image, text=name, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_COMPLEX, fontScale=0.5, color=(255, 100, 50), thickness=1,\n lineType=cv2.LINE_AA)\n', (2851, 3003), False, 'import cv2\n'), ((2533, 2561), 'cv2.resize', 'cv2.resize', (['face', '(224, 224)'], {}), '(face, (224, 224))\n', (2543, 2561), False, 'import cv2\n')] |
import os
import numpy as np
import periodictable
class GaussianInput:
def __init__(self, link0, routes,
atom_symbols, atom_coords,
charge=0, multiplicity=1,
title="Title Card Required", extras=None):
self.link0 = link0
self.routes = routes
self.charge = charge
self.multiplicity = multiplicity
self.atom_symbols = atom_symbols
self.atom_coords = atom_coords
self.title = title
self.extras = extras or []
@classmethod
def read(cls, fname):
with open(fname, 'r') as f:
sections = f.read().split("\n\n")
# Link 0 and Routes
link0, routes = {}, {}
routes_started = False
for line in sections[0].split("\n"):
if line[0] == '%':
segments = line[1:].split("=")
if segments[0] not in link0:
link0[segments[0]] = '='.join(segments[1:])
if line[0] == '#' or routes_started:
routes_started = True
if line[0] == '#':
line = line[1:]
route_str = line.split()
for s in route_str:
segments = s.split("=")
if len(segments) > 1:
routes[segments[0]] = "=".join(segments[1:])
else:
routes[segments[0]] = None
title = sections[1]
charge, multiplicity = map(int, re.match(r"(\d+)\s+(\d+)", sections[2]).groups())
symbols, coords = [], []
for line in sections[2].split('\n')[1:]:
symbol, *coord = line.split()
symbols.append(symbol)
coords.append(np.array(coord, dtype=np.float))
symbols, coords = np.array(symbols), np.array(coords)
extras = [section for section in sections[3:] if section != '']
return cls(link0, routes, symbols, coords, charge, multiplicity, title, extras)
@classmethod
def read_log(cls, logfname, link0, routes, opt_step=-1, **kwargs):
import cclib
parser = cclib.parser.Gaussian(logfname)
data = parser.parse()
symbols = []
for atom_no in data.atomnos:
symbols.append(periodictable.elements[atom_no].symbol)
coords = data.atomcoords[opt_step]
charge = data.charge
multiplicity = data.mult
return cls(link0, routes, symbols, coords, charge, multiplicity, **kwargs)
def save(self, fname, verbose=True, skip_if_exists=False):
if skip_if_exists:
if os.path.exists(fname):
return
lines = []
for header, value in self.link0.items():
if value is None:
lines.append('%{}'.format(header))
else:
lines.append('%{}={}'.format(header, value))
route_str = "# "
for keyword, value in self.routes.items():
if value is None:
route_str += keyword + " "
else:
route_str += "{}={} ".format(keyword, value)
lines.append(route_str)
lines.append('')
lines.append(self.title + "\n")
lines.append('{} {}'.format(self.charge, self.multiplicity))
for atom_symbol, atom_coord in zip(self.atom_symbols, self.atom_coords):
lines.append(
' {} {:11.8f} {:11.8f} {:11.8f}'.format(atom_symbol, *list(atom_coord))
)
lines.append('')
for extra in self.extras:
lines.append(extra)
lines.append('')
lines.append('')
if "." not in fname:
fname += ".com"
with open(fname, 'wb') as f:
f.write('\n'.join(lines).encode())
if verbose:
print("Successfully saved Gaussian input file {}".format(os.path.basename(fname)))
@staticmethod
def update_kw(base, *args):
d = base.copy()
for arg in args:
if isinstance(arg, dict):
d.update(arg)
elif isinstance(arg, list):
for elm in arg:
d[elm] = None
elif isinstance(arg, str):
d[arg] = None
return d
| [
"os.path.basename",
"numpy.array",
"os.path.exists",
"cclib.parser.Gaussian"
] | [((2136, 2167), 'cclib.parser.Gaussian', 'cclib.parser.Gaussian', (['logfname'], {}), '(logfname)\n', (2157, 2167), False, 'import cclib\n'), ((1811, 1828), 'numpy.array', 'np.array', (['symbols'], {}), '(symbols)\n', (1819, 1828), True, 'import numpy as np\n'), ((1830, 1846), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (1838, 1846), True, 'import numpy as np\n'), ((2619, 2640), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (2633, 2640), False, 'import os\n'), ((1752, 1783), 'numpy.array', 'np.array', (['coord'], {'dtype': 'np.float'}), '(coord, dtype=np.float)\n', (1760, 1783), True, 'import numpy as np\n'), ((3904, 3927), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (3920, 3927), False, 'import os\n')] |
import torch
import numpy as np
from config import Config
class Tensor():
def __init__(self, tensor):
self.tensor = tensor
def torch_tensor_for_device(self):
tensor = self.tensor
if isinstance(tensor, torch.Tensor):
return tensor
tensor = np.asarray(tensor, dtype=np.float32)
tensor = torch.from_numpy(tensor).to(Config.DEVICE)
return tensor
def to_np(self):
return self.tensor.cpu().detach().numpy()
| [
"numpy.asarray",
"torch.from_numpy"
] | [((294, 330), 'numpy.asarray', 'np.asarray', (['tensor'], {'dtype': 'np.float32'}), '(tensor, dtype=np.float32)\n', (304, 330), True, 'import numpy as np\n'), ((348, 372), 'torch.from_numpy', 'torch.from_numpy', (['tensor'], {}), '(tensor)\n', (364, 372), False, 'import torch\n')] |
"""
tanh
~~~~
Plots a graph of the tanh function."""
import numpy as np
import matplotlib.pyplot as plt
z = np.arange(-5, 5, .1)
t = np.tanh(z)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, t)
ax.set_ylim([-1.0, 1.0])
ax.set_xlim([-5,5])
ax.grid(True)
ax.set_xlabel('z')
ax.set_title('tanh function')
plt.show()
| [
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.tanh",
"matplotlib.pyplot.show"
] | [((111, 132), 'numpy.arange', 'np.arange', (['(-5)', '(5)', '(0.1)'], {}), '(-5, 5, 0.1)\n', (120, 132), True, 'import numpy as np\n'), ((136, 146), 'numpy.tanh', 'np.tanh', (['z'], {}), '(z)\n', (143, 146), True, 'import numpy as np\n'), ((154, 166), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (164, 166), True, 'import matplotlib.pyplot as plt\n'), ((316, 326), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (324, 326), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
from typing import List
from framework.DataObjects import MetaData, PkmFullTeam, GameStateView
from framework.DataConstants import DEFAULT_PARTY_SIZE, TYPE_CHART_MULTIPLIER, DEFAULT_PKM_N_MOVES
from framework.DataTypes import PkmStat
from framework.behaviour import BattlePolicy
from framework.behaviour.BattlePolicies import estimate_damage
from framework.behaviour.DataAggregators import NullDataAggregator
from framework.competition.CompetitionObjects import Competitor
class DQNBattlePolicy(BattlePolicy):
def requires_encode(self) -> bool:
return False
def close(self):
pass
def get_action(self, g: GameStateView) -> int:
# check weather condition
weather = g.weather_condition
# get my team
my_team = g.get_team_view(0)
my_active = my_team.active_pkm_view
my_active_type = my_active.type
my_party = [my_team.get_party_pkm_view(i) for i in range(DEFAULT_PARTY_SIZE)]
my_active_moves = [my_active.get_move_view(i) for i in range(DEFAULT_PKM_N_MOVES)]
my_attack_stage = my_team.get_stage(PkmStat.ATTACK)
# get opp team
opp_team = g.get_team_view(1)
opp_active = opp_team.active_pkm_view
opp_active_type = opp_active.type
opp_active_hp = opp_active.hp
opp_defense_stage = opp_team.get_stage(PkmStat.DEFENSE)
# get best move
damage: List[float] = []
for move in my_active_moves:
damage.append(estimate_damage(move.type, my_active_type, move.power, opp_active_type, my_attack_stage, opp_defense_stage, weather))
move_id = int(np.argmax(damage))
# switch decision
best_pkm = 0
if opp_active_hp > damage[move_id]:
effectiveness_to_stay = TYPE_CHART_MULTIPLIER[my_active_type][opp_active_type]
for i, pkm in enumerate(my_party):
effectiveness_party = TYPE_CHART_MULTIPLIER[pkm.type][opp_active_type]
if effectiveness_party > effectiveness_to_stay and pkm.hp != 0.0:
effectiveness_to_stay = effectiveness_party
best_pkm = i
if best_pkm > 0:
move_id = DEFAULT_PKM_N_MOVES + best_pkm
return move_id
class DQN(Competitor):
def __init__(self, name: str = "DQN", team: PkmFullTeam = None):
self._name = name
self._battle_policy = DQNBattlePolicy()
self._team = team
@property
def name(self):
return self._name
def reset(self):
pass
@property
def battle_policy(self) -> BattlePolicy:
return self._battle_policy
@property
def meta_data(self) -> MetaData:
return NullDataAggregator.null_metadata
def want_to_change_team(self):
return True
@property
def team(self) -> PkmFullTeam:
return self._team
@team.setter
def team(self, team: PkmFullTeam):
self._team = team
| [
"framework.behaviour.BattlePolicies.estimate_damage",
"numpy.argmax"
] | [((1648, 1665), 'numpy.argmax', 'np.argmax', (['damage'], {}), '(damage)\n', (1657, 1665), True, 'import numpy as np\n'), ((1508, 1628), 'framework.behaviour.BattlePolicies.estimate_damage', 'estimate_damage', (['move.type', 'my_active_type', 'move.power', 'opp_active_type', 'my_attack_stage', 'opp_defense_stage', 'weather'], {}), '(move.type, my_active_type, move.power, opp_active_type,\n my_attack_stage, opp_defense_stage, weather)\n', (1523, 1628), False, 'from framework.behaviour.BattlePolicies import estimate_damage\n')] |
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# MAKE GREENWICH CENTERED AOI_MASK FOR CRU 10min RUNS THAT WE USE
# WHEN CORRECTING PRECIP VALUES THAT ARE WAAAY TOO HIGH.
#
# <NAME> (April 2018)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def shiftgrid( lon0, datain, lonsin, start=True, cyclic=360.0 ):
"""
Shift global lat/lon grid east or west.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
lon0 starting longitude for shifted grid
(ending longitude if start=False). lon0 must be on
input grid (within the range of lonsin).
datain original data with longitude the right-most
dimension.
lonsin original longitudes.
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
start if True, lon0 represents the starting longitude
of the new grid. if False, lon0 is the ending
longitude. Default True.
cyclic width of periodic domain (default 360)
============== ====================================================
returns ``dataout,lonsout`` (data and longitudes on shifted grid).
"""
if np.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4:
# Use all data instead of raise ValueError, 'cyclic point not included'
start_idx = 0
else:
# If cyclic, remove the duplicate point
start_idx = 1
if lon0 < lonsin[0] or lon0 > lonsin[-1]:
raise ValueError('lon0 outside of range of lonsin')
i0 = np.argmin(np.fabs(lonsin-lon0))
i0_shift = len(lonsin)-i0 # [ML] THIS COULD BE THE LINE GETTING US!!!
if np.ma.isMA(datain):
dataout = np.ma.zeros(datain.shape,datain.dtype)
else:
dataout = np.zeros(datain.shape,datain.dtype)
if np.ma.isMA(lonsin):
lonsout = np.ma.zeros(lonsin.shape,lonsin.dtype)
else:
lonsout = np.zeros(lonsin.shape,lonsin.dtype)
if start:
lonsout[0:i0_shift] = lonsin[i0:]
else:
lonsout[0:i0_shift] = lonsin[i0:]-cyclic
dataout[...,0:i0_shift] = datain[...,i0:]
if start:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic
else:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]
dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx]
return dataout,lonsout
def rotate( dat, lons, to_pacific=False ):
'''rotate longitudes in WGS84 Global Extent'''
if to_pacific == True:
# to 0 - 360
dat, lons = shiftgrid( 0., dat, lons )
elif to_pacific == False:
# to -180.0 - 180.0
dat, lons = shiftgrid( 180., dat, lons, start=False )
else:
raise AttributeError( 'to_pacific must be boolean True:False' )
return dat, lons
def transform_from_latlon( lat, lon ):
''' simple way to make an affine transform from lats and lons coords '''
from affine import Affine
lat = np.asarray( lat )
lon = np.asarray( lon )
trans = Affine.translation(lon[0], lat[0])
scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])
return trans * scale
def rasterize( shapes, coords, latitude='lat', longitude='lon', fill=None, **kwargs ):
'''
Rasterize a list of (geometry, fill_value) tuples onto the given
xarray coordinates. This only works for 1d latitude and longitude
arrays.
ARGUMENTS:
----------
shapes = [list] of tuples of (shapely.geom, fill_value)
coords = [dict] of named 1d latitude and longitude arrays.
latitude = [str] name of latitude key. default:'latitude'
longitude = [str] name of longitude key. default:'longitude'
fill = fill_value
RETURNS:
--------
xarray.DataArray
'''
from rasterio import features
import xarray as xr
if fill == None:
fill = np.nan
transform = transform_from_latlon( coords[ latitude ], coords[ longitude ] )
out_shape = ( len( coords[ latitude ] ), len( coords[ longitude ] ) )
raster = features.rasterize( shapes, out_shape=out_shape,
fill=fill, transform=transform,
dtype=float, **kwargs )
# spatial_coords = {latitude: coords[latitude], longitude: coords[longitude]}
# return xr.DataArray(raster, coords=spatial_coords, dims=(latitude, longitude))
return raster
def bounds_to_extent( bounds ):
'''
take input rasterio bounds object and return an extent
'''
l,b,r,t = bounds
return [ (l,b), (r,b), (r,t), (l,t), (l,b) ]
if __name__ == '__main__':
import os
import xarray as xr
import geopandas as gpd
import numpy as np
import rasterio
from shapely.geometry import Polygon
# get raw cru ts40
fn = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS40/cru_ts4.00.1901.2015.pre.dat.nc.gz'
shp_fn = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/masks/pcll_template_10min_extent_with_nwt.shp'
# open it
ds = xr.open_dataset(fn)
shp = gpd.read_file( shp_fn )
# rotate it to pcll
dat = np.flipud(ds.pre[0,...].data)
lons = np.array(ds.lon)
dat_pc, lons_pc = rotate( dat, lons, to_pacific=True )
coords = {'lon':lons_pc, 'lat':np.flipud(np.array(ds.lat))}
# rasterize using the pcll file with
shapes = [ (i,1) for i in shp.geometry ]
rst = rasterize( shapes, coords, latitude='lat', longitude='lon', fill=0 )
# rotate back to GCLL
dat_gc, lons_gc = rotate( rst, lons_pc, to_pacific=False )
height,width = dat_gc.shape
meta = {
'driver':'GTiff',
'height':height,
'width':width,
'count':1,
'crs':{'init':'epsg:4326'},
'dtype':'float32',
'transform':transform_from_latlon( coords[ 'lat' ], lons_gc ),
'compress':'lzw'
}
with rasterio.open( '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/TEST_GCLL_CRU.tif', 'w', **meta ) as tmp:
tmp.write( dat_gc.astype(np.float32), 1 )
# polygonize and make a polygon from the bounds...
new_ext,val = [ i for i in rasterio.features.shapes( dat_gc.astype(np.float32), mask=dat_gc==1, transform=meta['transform']) ][0]
pol = Polygon( bounds_to_extent( Polygon(new_ext['coordinates'][0]).bounds ) )
# make a geodataframe and dump out as a shapefile
new_df = gpd.GeoDataFrame({'id':[1],'geometry':[pol]}, crs={'init':'epsg:4326'}, geometry='geometry' )
new_df.to_file( shp_fn.replace('pcll','gcll') )
| [
"rasterio.open",
"shapely.geometry.Polygon",
"numpy.asarray",
"affine.Affine.translation",
"xarray.open_dataset",
"affine.Affine.scale",
"numpy.flipud",
"numpy.zeros",
"geopandas.GeoDataFrame",
"numpy.fabs",
"numpy.array",
"numpy.ma.zeros",
"numpy.ma.isMA",
"rasterio.features.rasterize",
... | [((1913, 1931), 'numpy.ma.isMA', 'np.ma.isMA', (['datain'], {}), '(datain)\n', (1923, 1931), True, 'import numpy as np\n'), ((2045, 2063), 'numpy.ma.isMA', 'np.ma.isMA', (['lonsin'], {}), '(lonsin)\n', (2055, 2063), True, 'import numpy as np\n'), ((3045, 3060), 'numpy.asarray', 'np.asarray', (['lat'], {}), '(lat)\n', (3055, 3060), True, 'import numpy as np\n'), ((3070, 3085), 'numpy.asarray', 'np.asarray', (['lon'], {}), '(lon)\n', (3080, 3085), True, 'import numpy as np\n'), ((3097, 3131), 'affine.Affine.translation', 'Affine.translation', (['lon[0]', 'lat[0]'], {}), '(lon[0], lat[0])\n', (3115, 3131), False, 'from affine import Affine\n'), ((3141, 3187), 'affine.Affine.scale', 'Affine.scale', (['(lon[1] - lon[0])', '(lat[1] - lat[0])'], {}), '(lon[1] - lon[0], lat[1] - lat[0])\n', (3153, 3187), False, 'from affine import Affine\n'), ((4017, 4124), 'rasterio.features.rasterize', 'features.rasterize', (['shapes'], {'out_shape': 'out_shape', 'fill': 'fill', 'transform': 'transform', 'dtype': 'float'}), '(shapes, out_shape=out_shape, fill=fill, transform=\n transform, dtype=float, **kwargs)\n', (4035, 4124), False, 'from rasterio import features\n'), ((4887, 4906), 'xarray.open_dataset', 'xr.open_dataset', (['fn'], {}), '(fn)\n', (4902, 4906), True, 'import xarray as xr\n'), ((4914, 4935), 'geopandas.read_file', 'gpd.read_file', (['shp_fn'], {}), '(shp_fn)\n', (4927, 4935), True, 'import geopandas as gpd\n'), ((4967, 4997), 'numpy.flipud', 'np.flipud', (['ds.pre[0, ...].data'], {}), '(ds.pre[0, ...].data)\n', (4976, 4997), True, 'import numpy as np\n'), ((5005, 5021), 'numpy.array', 'np.array', (['ds.lon'], {}), '(ds.lon)\n', (5013, 5021), True, 'import numpy as np\n'), ((6127, 6227), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (["{'id': [1], 'geometry': [pol]}"], {'crs': "{'init': 'epsg:4326'}", 'geometry': '"""geometry"""'}), "({'id': [1], 'geometry': [pol]}, crs={'init': 'epsg:4326'},\n geometry='geometry')\n", (6143, 6227), True, 'import geopandas as gpd\n'), ((1502, 1542), 'numpy.fabs', 'np.fabs', (['(lonsin[-1] - lonsin[0] - cyclic)'], {}), '(lonsin[-1] - lonsin[0] - cyclic)\n', (1509, 1542), True, 'import numpy as np\n'), ((1816, 1838), 'numpy.fabs', 'np.fabs', (['(lonsin - lon0)'], {}), '(lonsin - lon0)\n', (1823, 1838), True, 'import numpy as np\n'), ((1946, 1985), 'numpy.ma.zeros', 'np.ma.zeros', (['datain.shape', 'datain.dtype'], {}), '(datain.shape, datain.dtype)\n', (1957, 1985), True, 'import numpy as np\n'), ((2005, 2041), 'numpy.zeros', 'np.zeros', (['datain.shape', 'datain.dtype'], {}), '(datain.shape, datain.dtype)\n', (2013, 2041), True, 'import numpy as np\n'), ((2077, 2116), 'numpy.ma.zeros', 'np.ma.zeros', (['lonsin.shape', 'lonsin.dtype'], {}), '(lonsin.shape, lonsin.dtype)\n', (2088, 2116), True, 'import numpy as np\n'), ((2135, 2171), 'numpy.zeros', 'np.zeros', (['lonsin.shape', 'lonsin.dtype'], {}), '(lonsin.shape, lonsin.dtype)\n', (2143, 2171), True, 'import numpy as np\n'), ((5637, 5756), 'rasterio.open', 'rasterio.open', (['"""/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/TEST_GCLL_CRU.tif"""', '"""w"""'], {}), "(\n '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/TEST_GCLL_CRU.tif'\n , 'w', **meta)\n", (5650, 5756), False, 'import rasterio\n'), ((5123, 5139), 'numpy.array', 'np.array', (['ds.lat'], {}), '(ds.lat)\n', (5131, 5139), True, 'import numpy as np\n'), ((6019, 6053), 'shapely.geometry.Polygon', 'Polygon', (["new_ext['coordinates'][0]"], {}), "(new_ext['coordinates'][0])\n", (6026, 6053), False, 'from shapely.geometry import Polygon\n')] |
#!/usr/bin/env python
"""
Verify DWT perfect reconstruction.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_, run_module_suite
import pywt
def test_perfect_reconstruction():
families = ('db', 'sym', 'coif', 'bior', 'rbio')
wavelets = sum([pywt.wavelist(name) for name in families], [])
# list of mode names in pywt and matlab
modes = [('zero', 'zpd'),
('constant', 'sp0'),
('symmetric', 'sym'),
('periodic', 'ppd'),
('smooth', 'sp1'),
('periodization', 'per')]
dtypes = (np.float32, np.float64)
for wavelet in wavelets:
for pmode, mmode in modes:
for dt in dtypes:
yield check_reconstruction, pmode, mmode, wavelet, dt
def check_reconstruction(pmode, mmode, wavelet, dtype):
data_size = list(range(2, 40)) + [100, 200, 500, 1000, 2000, 10000,
50000, 100000]
np.random.seed(12345)
# TODO: smoke testing - more failures for different seeds
if dtype == np.float32:
# was 3e-7 has to be lowered as db21, db29, db33, db35, coif14, coif16 were failing
epsilon = 6e-7
else:
epsilon = 5e-11
for N in data_size:
data = np.asarray(np.random.random(N), dtype)
# compute dwt coefficients
pa, pd = pywt.dwt(data, wavelet, pmode)
# compute reconstruction
rec = pywt.idwt(pa, pd, wavelet, pmode)
if len(data) % 2:
rec = rec[:len(data)]
rms_rec = np.sqrt(np.mean((data-rec)**2))
msg = ('[RMS_REC > EPSILON] for Mode: %s, Wavelet: %s, '
'Length: %d, rms=%.3g' % (pmode, wavelet, len(data), rms_rec))
assert_(rms_rec < epsilon, msg=msg)
if __name__ == '__main__':
run_module_suite()
| [
"numpy.random.seed",
"numpy.testing.run_module_suite",
"pywt.idwt",
"pywt.dwt",
"numpy.testing.assert_",
"numpy.random.random",
"numpy.mean",
"pywt.wavelist"
] | [((1013, 1034), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (1027, 1034), True, 'import numpy as np\n'), ((1852, 1870), 'numpy.testing.run_module_suite', 'run_module_suite', ([], {}), '()\n', (1868, 1870), False, 'from numpy.testing import assert_, run_module_suite\n'), ((1407, 1437), 'pywt.dwt', 'pywt.dwt', (['data', 'wavelet', 'pmode'], {}), '(data, wavelet, pmode)\n', (1415, 1437), False, 'import pywt\n'), ((1486, 1519), 'pywt.idwt', 'pywt.idwt', (['pa', 'pd', 'wavelet', 'pmode'], {}), '(pa, pd, wavelet, pmode)\n', (1495, 1519), False, 'import pywt\n'), ((1783, 1818), 'numpy.testing.assert_', 'assert_', (['(rms_rec < epsilon)'], {'msg': 'msg'}), '(rms_rec < epsilon, msg=msg)\n', (1790, 1818), False, 'from numpy.testing import assert_, run_module_suite\n'), ((327, 346), 'pywt.wavelist', 'pywt.wavelist', (['name'], {}), '(name)\n', (340, 346), False, 'import pywt\n'), ((1326, 1345), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (1342, 1345), True, 'import numpy as np\n'), ((1608, 1634), 'numpy.mean', 'np.mean', (['((data - rec) ** 2)'], {}), '((data - rec) ** 2)\n', (1615, 1634), True, 'import numpy as np\n')] |
import copy
import cv2
import numpy as np
import matplotlib.pyplot as plt
class BoundingBox():
def __init__(self, xmin, ymin, xmax, ymax, score=0.):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.score = score
self.asList = [self.xmin, self.ymin, self.xmax, self.ymax, self.score]
def getList(self):
return self.asList
def getBox(self):
return np.asarray(self.asList)
class TrackerArgs(object):
def __init__(self):
self.track_thresh = 0.4
self.track_buffer = 5
self.match_thresh = 0.9
self.min_box_area = 10
self.mot20 = False
def alignImages(im1, im2, max_features=500, good_match_percent=0.15, norm_bit=True, turnGray=False,
warpMethod=cv2.RANSAC):
""" Aligns image 1 to image 2, based off https://www.learnopencv.com/image-alignment-feature-based-using-opencv-c-python/
Inputs:
im1 - misaligned image
im2 - Image to align im1 too
max_features - max number of features for alignment
good_match_percent - percent of good features to choose
norm_bit - boolean to convert images to 8 bit and normalize
turnGray - boolean to convert images to grayscale
warpMethod - method for transformation via opencv (best on SW data is cv2.RHO)
Returns:
im1Reg - Registered image 1
h - calculated homography matrix
"""
# Convert images to grayscale
if turnGray:
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
else:
im1Gray = copy.deepcopy(im1)
im2Gray = copy.deepcopy(im2)
if norm_bit:
im1Gray = cv2.normalize(im1Gray, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
im2Gray = cv2.normalize(im2Gray, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
# Detect Sift features and compute descriptors.
sift = cv2.SIFT_create(max_features)
keypoints1, descriptors1 = sift.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = sift.detectAndCompute(im2Gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_L1)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * good_match_percent)
increment = 0.1
while numGoodMatches < 4 and good_match_percent < 1.0:
good_match_percent += increment
numGoodMatches = int(len(matches) * good_match_percent)
assert numGoodMatches >= 4, "Couldn't register images!"
matches = matches[:numGoodMatches]
# Draw top matches
# imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, warpMethod)
if h is None:
return np.zeros_like(im1), None
# Use homography
height, width = im2.shape
im1Reg = cv2.warpPerspective(im1, h, (width, height), borderMode=cv2.BORDER_CONSTANT,
borderValue=np.asscalar(im1.min()))
return im1Reg, h
def plotOpticalFlow(flow, title=""):
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv = np.zeros((flow.shape[0], flow.shape[1], 3))
hsv[..., 1] = 255
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
hsv = hsv.astype(np.uint8)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
plt.imshow(bgr)
plt.title(title)
plt.show()
return
def plotBoxes(data, boxes, thresh=0., title="", show=False):
# middle = data[..., data.shape[-1] // 2]
middle = data[..., -1]
middle = np.dstack([middle, middle, middle])
middle = cv2.normalize(middle, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
for box in boxes:
if box.score > thresh:
middle = cv2.rectangle(middle, (box.xmin, box.ymin), (box.xmax, box.ymax), (0, 255, 0), 2)
if show:
plt.imshow(middle)
plt.title(title)
plt.show()
return middle
def plotArxResponse(arx, title=""):
plt.imshow(arx, cmap='gray', extent=[0, 1, 0, 1])
plt.title(title)
plt.show()
return
def fastPdet2(x, nDets, windowH=20, windowW=40):
h, w = x.shape
r, c, score = [], [], []
vmin = np.min(x)
halfW = windowW // 2
halfH = windowH // 2
for i in range(nDets):
currScore = np.max(x)
idx = np.where(x == currScore)
row, col = idx[0][0].item(), idx[1][0].item()
r.append(row)
c.append(col)
score.append(currScore)
r1 = max(row - halfH, 0)
r2 = min(r1 + windowH, h)
r1 = r2 - windowH
c1 = max(col - halfW, 0)
c2 = min(c1 + windowW, w)
c1 = c2 - windowW
x[r1:r2, c1:c2] = np.ones((windowH, windowW)) * vmin
return r, c, np.asarray(score)
def makeVideo(frames, name):
h, w, c = frames[0].shape
outVideo = cv2.VideoWriter(name,
cv2.VideoWriter_fourcc(*'DIVX'), 1, (w, h))
for frame in frames:
outVideo.write(frame)
outVideo.release()
return
# Malisiewicz et al.
def non_max_suppression_fast(bboxes, overlapThresh):
boxes = [box.getBox() for box in bboxes]
boxes = np.asarray(boxes)
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked using the
# integer data type
return [bboxes[i] for i in pick]
| [
"matplotlib.pyplot.title",
"numpy.maximum",
"cv2.VideoWriter_fourcc",
"numpy.ones",
"numpy.argsort",
"cv2.DescriptorMatcher_create",
"cv2.SIFT_create",
"cv2.normalize",
"cv2.rectangle",
"numpy.zeros_like",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"numpy.max",
"numpy.dstack",
"copy.dee... | [((2061, 2090), 'cv2.SIFT_create', 'cv2.SIFT_create', (['max_features'], {}), '(max_features)\n', (2076, 2090), False, 'import cv2\n'), ((2264, 2330), 'cv2.DescriptorMatcher_create', 'cv2.DescriptorMatcher_create', (['cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_L1'], {}), '(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_L1)\n', (2292, 2330), False, 'import cv2\n'), ((3311, 3359), 'cv2.findHomography', 'cv2.findHomography', (['points1', 'points2', 'warpMethod'], {}), '(points1, points2, warpMethod)\n', (3329, 3359), False, 'import cv2\n'), ((3705, 3748), 'cv2.cartToPolar', 'cv2.cartToPolar', (['flow[..., 0]', 'flow[..., 1]'], {}), '(flow[..., 0], flow[..., 1])\n', (3720, 3748), False, 'import cv2\n'), ((3759, 3802), 'numpy.zeros', 'np.zeros', (['(flow.shape[0], flow.shape[1], 3)'], {}), '((flow.shape[0], flow.shape[1], 3))\n', (3767, 3802), True, 'import numpy as np\n'), ((3883, 3949), 'cv2.normalize', 'cv2.normalize', (['mag', 'None', '(0)', '(255)', 'cv2.NORM_MINMAX'], {'dtype': 'cv2.CV_8U'}), '(mag, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)\n', (3896, 3949), False, 'import cv2\n'), ((3991, 4027), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2BGR'], {}), '(hsv, cv2.COLOR_HSV2BGR)\n', (4003, 4027), False, 'import cv2\n'), ((4032, 4047), 'matplotlib.pyplot.imshow', 'plt.imshow', (['bgr'], {}), '(bgr)\n', (4042, 4047), True, 'import matplotlib.pyplot as plt\n'), ((4052, 4068), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4061, 4068), True, 'import matplotlib.pyplot as plt\n'), ((4073, 4083), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4081, 4083), True, 'import matplotlib.pyplot as plt\n'), ((4244, 4279), 'numpy.dstack', 'np.dstack', (['[middle, middle, middle]'], {}), '([middle, middle, middle])\n', (4253, 4279), True, 'import numpy as np\n'), ((4293, 4387), 'cv2.normalize', 'cv2.normalize', (['middle', 'None'], {'alpha': '(0)', 'beta': '(255)', 'norm_type': 'cv2.NORM_MINMAX', 'dtype': 'cv2.CV_8U'}), '(middle, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX,\n dtype=cv2.CV_8U)\n', (4306, 4387), False, 'import cv2\n'), ((4685, 4734), 'matplotlib.pyplot.imshow', 'plt.imshow', (['arx'], {'cmap': '"""gray"""', 'extent': '[0, 1, 0, 1]'}), "(arx, cmap='gray', extent=[0, 1, 0, 1])\n", (4695, 4734), True, 'import matplotlib.pyplot as plt\n'), ((4739, 4755), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4748, 4755), True, 'import matplotlib.pyplot as plt\n'), ((4760, 4770), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4768, 4770), True, 'import matplotlib.pyplot as plt\n'), ((4892, 4901), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (4898, 4901), True, 'import numpy as np\n'), ((5855, 5872), 'numpy.asarray', 'np.asarray', (['boxes'], {}), '(boxes)\n', (5865, 5872), True, 'import numpy as np\n'), ((6540, 6554), 'numpy.argsort', 'np.argsort', (['y2'], {}), '(y2)\n', (6550, 6554), True, 'import numpy as np\n'), ((449, 472), 'numpy.asarray', 'np.asarray', (['self.asList'], {}), '(self.asList)\n', (459, 472), True, 'import numpy as np\n'), ((1573, 1610), 'cv2.cvtColor', 'cv2.cvtColor', (['im1', 'cv2.COLOR_BGR2GRAY'], {}), '(im1, cv2.COLOR_BGR2GRAY)\n', (1585, 1610), False, 'import cv2\n'), ((1629, 1666), 'cv2.cvtColor', 'cv2.cvtColor', (['im2', 'cv2.COLOR_BGR2GRAY'], {}), '(im2, cv2.COLOR_BGR2GRAY)\n', (1641, 1666), False, 'import cv2\n'), ((1695, 1713), 'copy.deepcopy', 'copy.deepcopy', (['im1'], {}), '(im1)\n', (1708, 1713), False, 'import copy\n'), ((1732, 1750), 'copy.deepcopy', 'copy.deepcopy', (['im2'], {}), '(im2)\n', (1745, 1750), False, 'import copy\n'), ((1787, 1887), 'cv2.normalize', 'cv2.normalize', (['im1Gray'], {'dst': 'None', 'alpha': '(0)', 'beta': '(255)', 'norm_type': 'cv2.NORM_MINMAX', 'dtype': 'cv2.CV_8U'}), '(im1Gray, dst=None, alpha=0, beta=255, norm_type=cv2.\n NORM_MINMAX, dtype=cv2.CV_8U)\n', (1800, 1887), False, 'import cv2\n'), ((1901, 2001), 'cv2.normalize', 'cv2.normalize', (['im2Gray'], {'dst': 'None', 'alpha': '(0)', 'beta': '(255)', 'norm_type': 'cv2.NORM_MINMAX', 'dtype': 'cv2.CV_8U'}), '(im2Gray, dst=None, alpha=0, beta=255, norm_type=cv2.\n NORM_MINMAX, dtype=cv2.CV_8U)\n', (1914, 2001), False, 'import cv2\n'), ((4562, 4580), 'matplotlib.pyplot.imshow', 'plt.imshow', (['middle'], {}), '(middle)\n', (4572, 4580), True, 'import matplotlib.pyplot as plt\n'), ((4589, 4605), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4598, 4605), True, 'import matplotlib.pyplot as plt\n'), ((4614, 4624), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4622, 4624), True, 'import matplotlib.pyplot as plt\n'), ((4999, 5008), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (5005, 5008), True, 'import numpy as np\n'), ((5023, 5047), 'numpy.where', 'np.where', (['(x == currScore)'], {}), '(x == currScore)\n', (5031, 5047), True, 'import numpy as np\n'), ((5442, 5459), 'numpy.asarray', 'np.asarray', (['score'], {}), '(score)\n', (5452, 5459), True, 'import numpy as np\n'), ((5589, 5620), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'DIVX'"], {}), "(*'DIVX')\n", (5611, 5620), False, 'import cv2\n'), ((7028, 7062), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[idxs[:last]]'], {}), '(x1[i], x1[idxs[:last]])\n', (7038, 7062), True, 'import numpy as np\n'), ((7077, 7111), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[idxs[:last]]'], {}), '(y1[i], y1[idxs[:last]])\n', (7087, 7111), True, 'import numpy as np\n'), ((7126, 7160), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[idxs[:last]]'], {}), '(x2[i], x2[idxs[:last]])\n', (7136, 7160), True, 'import numpy as np\n'), ((7175, 7209), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[idxs[:last]]'], {}), '(y2[i], y2[idxs[:last]])\n', (7185, 7209), True, 'import numpy as np\n'), ((7281, 7309), 'numpy.maximum', 'np.maximum', (['(0)', '(xx2 - xx1 + 1)'], {}), '(0, xx2 - xx1 + 1)\n', (7291, 7309), True, 'import numpy as np\n'), ((7322, 7350), 'numpy.maximum', 'np.maximum', (['(0)', '(yy2 - yy1 + 1)'], {}), '(0, yy2 - yy1 + 1)\n', (7332, 7350), True, 'import numpy as np\n'), ((3393, 3411), 'numpy.zeros_like', 'np.zeros_like', (['im1'], {}), '(im1)\n', (3406, 3411), True, 'import numpy as np\n'), ((4458, 4544), 'cv2.rectangle', 'cv2.rectangle', (['middle', '(box.xmin, box.ymin)', '(box.xmax, box.ymax)', '(0, 255, 0)', '(2)'], {}), '(middle, (box.xmin, box.ymin), (box.xmax, box.ymax), (0, 255, \n 0), 2)\n', (4471, 4544), False, 'import cv2\n'), ((5390, 5417), 'numpy.ones', 'np.ones', (['(windowH, windowW)'], {}), '((windowH, windowW))\n', (5397, 5417), True, 'import numpy as np\n'), ((7597, 7630), 'numpy.where', 'np.where', (['(overlap > overlapThresh)'], {}), '(overlap > overlapThresh)\n', (7605, 7630), True, 'import numpy as np\n')] |
"""test cost"""
import numpy as np
from neuralink.cost import Cost
from .utils import single_test
def test_compute_cost1():
np.random.seed(1)
Y = np.random.randn(1, 5) > 0
A2 = np.array([[0.5002307, 0.49985831, 0.50023963, 0.25, 0.7]])
expected_output = 0.5447066599017815
output = Cost().compute(A2, Y)
assert type(output) == float, "Wrong type. Float expected"
assert np.isclose(
output, expected_output
), f"Wrong value. Expected: {expected_output} got: {output}"
def test_compute_cost2():
Y = np.asarray([[1, 1, 0]])
AL = np.array([[0.8, 0.9, 0.4]])
expected_output = np.array(0.27977656)
test_cases = [
{
"name": "equation_output_check",
"input": [AL, Y],
"expected": expected_output,
"error": "Wrong output",
}
]
single_test(test_cases, Cost().compute)
def test_compute_cost_with_regularization():
np.random.seed(1)
Y = np.array([[1, 1, 0, 1, 0]])
W1 = np.random.randn(2, 3)
b1 = np.random.randn(2, 1)
W2 = np.random.randn(3, 2)
b2 = np.random.randn(3, 1)
W3 = np.random.randn(1, 3)
b3 = np.random.randn(1, 1)
parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2, "W3": W3, "b3": b3}
A3 = np.array([[0.40682402, 0.01629284, 0.16722898, 0.10118111, 0.40682402]])
lambd = 0.1
expected_output = np.float64(1.7864859451590758)
test_cases = [
{
"name": "shape_check",
"input": [A3, Y, parameters, lambd],
"expected": expected_output,
"error": "Wrong shape",
},
{
"name": "equation_output_check",
"input": [A3, Y, parameters, lambd],
"expected": expected_output,
"error": "Wrong output",
},
]
single_test(test_cases, Cost().compute)
| [
"numpy.random.seed",
"numpy.random.randn",
"numpy.asarray",
"neuralink.cost.Cost",
"numpy.isclose",
"numpy.array",
"numpy.float64"
] | [((132, 149), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (146, 149), True, 'import numpy as np\n'), ((193, 251), 'numpy.array', 'np.array', (['[[0.5002307, 0.49985831, 0.50023963, 0.25, 0.7]]'], {}), '([[0.5002307, 0.49985831, 0.50023963, 0.25, 0.7]])\n', (201, 251), True, 'import numpy as np\n'), ((404, 439), 'numpy.isclose', 'np.isclose', (['output', 'expected_output'], {}), '(output, expected_output)\n', (414, 439), True, 'import numpy as np\n'), ((549, 572), 'numpy.asarray', 'np.asarray', (['[[1, 1, 0]]'], {}), '([[1, 1, 0]])\n', (559, 572), True, 'import numpy as np\n'), ((582, 609), 'numpy.array', 'np.array', (['[[0.8, 0.9, 0.4]]'], {}), '([[0.8, 0.9, 0.4]])\n', (590, 609), True, 'import numpy as np\n'), ((632, 652), 'numpy.array', 'np.array', (['(0.27977656)'], {}), '(0.27977656)\n', (640, 652), True, 'import numpy as np\n'), ((948, 965), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (962, 965), True, 'import numpy as np\n'), ((974, 1001), 'numpy.array', 'np.array', (['[[1, 1, 0, 1, 0]]'], {}), '([[1, 1, 0, 1, 0]])\n', (982, 1001), True, 'import numpy as np\n'), ((1011, 1032), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (1026, 1032), True, 'import numpy as np\n'), ((1042, 1063), 'numpy.random.randn', 'np.random.randn', (['(2)', '(1)'], {}), '(2, 1)\n', (1057, 1063), True, 'import numpy as np\n'), ((1073, 1094), 'numpy.random.randn', 'np.random.randn', (['(3)', '(2)'], {}), '(3, 2)\n', (1088, 1094), True, 'import numpy as np\n'), ((1104, 1125), 'numpy.random.randn', 'np.random.randn', (['(3)', '(1)'], {}), '(3, 1)\n', (1119, 1125), True, 'import numpy as np\n'), ((1135, 1156), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)'], {}), '(1, 3)\n', (1150, 1156), True, 'import numpy as np\n'), ((1166, 1187), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)'], {}), '(1, 1)\n', (1181, 1187), True, 'import numpy as np\n'), ((1275, 1347), 'numpy.array', 'np.array', (['[[0.40682402, 0.01629284, 0.16722898, 0.10118111, 0.40682402]]'], {}), '([[0.40682402, 0.01629284, 0.16722898, 0.10118111, 0.40682402]])\n', (1283, 1347), True, 'import numpy as np\n'), ((1386, 1416), 'numpy.float64', 'np.float64', (['(1.7864859451590758)'], {}), '(1.7864859451590758)\n', (1396, 1416), True, 'import numpy as np\n'), ((158, 179), 'numpy.random.randn', 'np.random.randn', (['(1)', '(5)'], {}), '(1, 5)\n', (173, 179), True, 'import numpy as np\n'), ((307, 313), 'neuralink.cost.Cost', 'Cost', ([], {}), '()\n', (311, 313), False, 'from neuralink.cost import Cost\n'), ((881, 887), 'neuralink.cost.Cost', 'Cost', ([], {}), '()\n', (885, 887), False, 'from neuralink.cost import Cost\n'), ((1846, 1852), 'neuralink.cost.Cost', 'Cost', ([], {}), '()\n', (1850, 1852), False, 'from neuralink.cost import Cost\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from functools import partial
from itertools import product
import pymc3 as pm
from src.globs import beta_std
df = pd.read_json('data/processed/processed_data_online.json')
traces = {}
for i, (touch, d) in enumerate(df.groupby('occluded_contact')):
with pm.Model() as logistic_model:
a = pm.Normal('a', 0, 10)
b = pm.Normal('b', 0, 10)
x = d['partial_mse'] - d['partial_mse'].mean()
x = x / x.max()
p = 1 / (1 + np.exp(-(a + b * x)))
s = pm.Bernoulli('s', p=p, observed=d['result'])
trace = pm.sample(1000, tune=1000, init='adapt_diag')
traces[touch] = trace
group = 'test_part'
c = ['blue', 'green']
legend = {
True: 'Contact during Occlusion',
False: 'No Contact during Occlusion'
}
data = df
fig, ax = plt.subplots()
for i, (touch, d) in enumerate(data.groupby('occluded_contact')):
gb = d.groupby('model')
x = np.array(gb.X.mean())
xerr = np.array(gb.X.sem())
y = np.array(gb.result.mean())
yerr = np.array(gb.result.agg(beta_std))
ax.errorbar(x,
y,
xerr=xerr,
yerr=yerr,
fmt='.',
label=legend[touch],
alpha=0.3,
color=c[i])
trace = traces[touch]
xpred = np.linspace(-0.04, 0.11)
for _ in range(20):
j = np.random.choice(range(2000))
a, b = trace['a'][j], trace['b'][j]
ypred = 1 / (1 + np.exp(-(a + b * xpred)))
ax.plot(xpred, ypred, alpha=0.1, color=c[i])
plt.title('Effect of Contact on Predictability')
plt.xlabel('Centered MSE')
plt.ylabel("Confusion Rate")
plt.legend()
plt.tight_layout()
plt.savefig('reports/figures/fig6.pdf')
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.tight_layout",
"pymc3.sample",
"pymc3.Model",
"pymc3.Bernoulli",
"matplotlib.pyplot.legend",
"pymc3.Normal",
"pandas.read_json",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
... | [((187, 244), 'pandas.read_json', 'pd.read_json', (['"""data/processed/processed_data_online.json"""'], {}), "('data/processed/processed_data_online.json')\n", (199, 244), True, 'import pandas as pd\n'), ((851, 865), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (863, 865), True, 'import matplotlib.pyplot as plt\n'), ((1588, 1636), 'matplotlib.pyplot.title', 'plt.title', (['"""Effect of Contact on Predictability"""'], {}), "('Effect of Contact on Predictability')\n", (1597, 1636), True, 'import matplotlib.pyplot as plt\n'), ((1637, 1663), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Centered MSE"""'], {}), "('Centered MSE')\n", (1647, 1663), True, 'import matplotlib.pyplot as plt\n'), ((1664, 1692), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Confusion Rate"""'], {}), "('Confusion Rate')\n", (1674, 1692), True, 'import matplotlib.pyplot as plt\n'), ((1693, 1705), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1703, 1705), True, 'import matplotlib.pyplot as plt\n'), ((1706, 1724), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1722, 1724), True, 'import matplotlib.pyplot as plt\n'), ((1725, 1764), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""reports/figures/fig6.pdf"""'], {}), "('reports/figures/fig6.pdf')\n", (1736, 1764), True, 'import matplotlib.pyplot as plt\n'), ((1349, 1373), 'numpy.linspace', 'np.linspace', (['(-0.04)', '(0.11)'], {}), '(-0.04, 0.11)\n', (1360, 1373), True, 'import numpy as np\n'), ((331, 341), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (339, 341), True, 'import pymc3 as pm\n'), ((373, 394), 'pymc3.Normal', 'pm.Normal', (['"""a"""', '(0)', '(10)'], {}), "('a', 0, 10)\n", (382, 394), True, 'import pymc3 as pm\n'), ((407, 428), 'pymc3.Normal', 'pm.Normal', (['"""b"""', '(0)', '(10)'], {}), "('b', 0, 10)\n", (416, 428), True, 'import pymc3 as pm\n'), ((563, 607), 'pymc3.Bernoulli', 'pm.Bernoulli', (['"""s"""'], {'p': 'p', 'observed': "d['result']"}), "('s', p=p, observed=d['result'])\n", (575, 607), True, 'import pymc3 as pm\n'), ((624, 669), 'pymc3.sample', 'pm.sample', (['(1000)'], {'tune': '(1000)', 'init': '"""adapt_diag"""'}), "(1000, tune=1000, init='adapt_diag')\n", (633, 669), True, 'import pymc3 as pm\n'), ((529, 549), 'numpy.exp', 'np.exp', (['(-(a + b * x))'], {}), '(-(a + b * x))\n', (535, 549), True, 'import numpy as np\n'), ((1509, 1533), 'numpy.exp', 'np.exp', (['(-(a + b * xpred))'], {}), '(-(a + b * xpred))\n', (1515, 1533), True, 'import numpy as np\n')] |
import lzma
from collections import OrderedDict
from importlib.resources import open_binary
from io import StringIO
from token import NAME
from tokenize import TokenError, generate_tokens
from types import SimpleNamespace
import msgpack
import numpy as np
from ..token_map import DirtyMap, PrefixTokenMap, TokenMap
from ..utility import p, to_key_value_columns
NOT_APPLICABLE = -99999
MAX = 99999
UNIT_SCALING_FACTOR = 10000
EPSILON = .001
MAX_SCAN_LINES = 20
_EMPTY = tuple()
def _load_token_statistics(file_name):
with open_binary('akimous.resources', file_name) as f1:
with lzma.open(f1, 'rb') as f2:
return msgpack.unpack(f2, use_list=False, raw=False, strict_map_key=False)
class FeatureDefinition:
preprocessors = []
context_features = OrderedDict()
completion_features = OrderedDict()
completion_feature_indices_require_normalization = []
context_names_required_by_preprocessors = OrderedDict()
token_frequency = _load_token_statistics('token.xz')
bigram_frequency = _load_token_statistics('bigram.xz')
trigram_frequency = _load_token_statistics('trigram.xz')
@staticmethod
def register_feature_generator(feature_name,
is_context_feature=False,
normalized=False):
def inner(f):
if is_context_feature:
FeatureDefinition.context_features[feature_name] = f
if normalized:
raise NotImplementedError
else:
FeatureDefinition.completion_features[feature_name] = f
if normalized:
FeatureDefinition.completion_feature_indices_require_normalization.append(
len(FeatureDefinition.completion_features) - 1)
return f
return inner
@staticmethod
def register_context_preprocessor_for_token_features(**context_names):
def inner(f):
FeatureDefinition.preprocessors.append(f)
FeatureDefinition.context_names_required_by_preprocessors = OrderedDict(
**FeatureDefinition.context_names_required_by_preprocessors,
**context_names)
return f
return inner
def __init__(self):
self.context = SimpleNamespace()
self.n_context_features = len(FeatureDefinition.context_features)
self.n_token_features = len(FeatureDefinition.completion_features)
self.n_features = self.n_context_features + self.n_token_features
self.normalized_features = np.array(
FeatureDefinition.completion_feature_indices_require_normalization
) + self.n_context_features
self.current_completion_start_index = 0
self.n_samples = 0
self.name_to_feature_index = OrderedDict()
for i, k in enumerate(FeatureDefinition.completion_features.keys()):
self.name_to_feature_index[k] = i
for i, k in enumerate(FeatureDefinition.context_features.keys()):
self.name_to_feature_index[k] = i + self.n_token_features
p(
to_key_value_columns(self.name_to_feature_index.keys(),
self.name_to_feature_index.values()))
for k, v in FeatureDefinition.context_names_required_by_preprocessors.items(
):
setattr(self.context, k, v())
# def get_stack_context_info(self, completion):
# '''
# Example:
# Code: ```def aaa(): pass
# def func(bbb='ccc'):
# ddd = aaa(bbb)
# eee = func(bbb=
# ```
# Stack:
# [<Name: eee@1,0>, # top_name
# <Operator: =>,
# <Name: func@1,6>, # func_name
# <Operator: (>,
# <Name: bbb@1,11>, # bottom_name
# <Operator: =>]
# '''
#
# result = {
# 'top_name': None,
# 'func_name': None,
# 'bottom_name': None,
# 'is_bottom_equal_sign': False,
# # 'top==func': True,
# # 'func==bottom': True
# }
# if not completion or not completion._stack:
# return result
# completion._stack
# stack = list(completion._stack.get_nodes())
# if not stack:
# return result
#
# for node in stack:
# with suppress(AttributeError):
# if node.type == 'name':
# result['top_name'] = node.value
# break
#
# for i in range(len(stack) - 1):
# with suppress(AttributeError):
# if stack[i + 1].value == '(' and stack[i].type == 'name':
# result['func_name'] = stack[i].value
# break
#
# for node in reversed(stack):
# with suppress(AttributeError):
# if node.type == 'name':
# result['bottom_name'] = node.value
# break
#
# with suppress(AttributeError):
# if stack[-1].value == '=':
# result['is_bottom_equal_sign'] = True
#
# return result
def normalize_feature(self):
if len(self.normalized_features) == 0:
return
data = self.X[self.current_completion_start_index:self.n_samples, self
.normalized_features]
for i in range(data.shape[1]):
column = data[:, i]
minimum = column.min()
maximum = column.max()
if maximum - minimum < EPSILON:
continue
data[:, i] = UNIT_SCALING_FACTOR * (column - minimum) / (maximum -
minimum)
self.X[self.current_completion_start_index:self.n_samples, self
.normalized_features] = data
# ch: 0-based
# line: 0-based
@FeatureDefinition.register_context_preprocessor_for_token_features(
casefolded_doc_lines=dict)
def f(doc, line, context, **_):
context.casefolded_doc_lines = {}
for l in range(0, min(line, MAX_SCAN_LINES)):
context.casefolded_doc_lines[line - l] = doc[line - l].casefold()
def tokenize(string):
result = []
try:
for token in generate_tokens(StringIO(string).readline):
if token.start == token.end:
continue
result.append(token)
except (StopIteration, TokenError):
pass
return result
@FeatureDefinition.register_context_preprocessor_for_token_features(
line_to_tokens=dict,
dirty_map=DirtyMap,
t0map=PrefixTokenMap,
t1map=TokenMap,
t2map=TokenMap,
t3map=TokenMap,
trigram_map=TokenMap,
)
def f(doc, context, line, ch, **_):
dirty_map = context.dirty_map
t0map = context.t0map
t1map = context.t1map
t2map = context.t2map
t3map = context.t3map
trigram_map = context.trigram_map
line_to_tokens = context.line_to_tokens
# tokenize dirty lines
dirty_lines = dirty_map.get_dirty_lines(doc)
for line_number in dirty_lines:
line_to_tokens[line_number] = tokenize(doc[line_number])
for line_number in dirty_lines:
line_content = doc[line_number]
for i in (t0map, t1map, t2map, t3map, trigram_map):
i.remove_line(line_number)
dirty_map.set_clear(line_number, line_content)
tokens0 = line_to_tokens.get(line_number, _EMPTY)
tokens1 = line_to_tokens.get(line_number - 1, _EMPTY)
t1, t2, t3 = '', '', ''
if tokens1:
t2 = tokens1[-1].string.strip()
if len(tokens1) > 1:
t3 = tokens1[-2].string.strip()
# leave t1 alone to indicate a line break
for token in tokens0:
t0 = token.string.strip()
if token.type == NAME and len(t0) > 3:
t0map.add(line_number, t0)
t1map.add(line_number, (t1, t0))
t2map.add(line_number, (t2, t0))
t3map.add(line_number, (t3, t0))
trigram_map.add(line_number, (t2, t1, t0))
t3, t2, t1 = t2, t1, t0
# get t1, t2
tokens0 = line_to_tokens.get(line, _EMPTY)
tokens1 = line_to_tokens.get(line - 1, _EMPTY)
context.t1 = ''
context.t2 = ''
context.t3 = ''
current_token_index = 0
# t1
if tokens0:
for current_token_index, token in enumerate(tokens0):
if token.end[1] >= ch + 1:
break
if current_token_index > 0:
context.t1 = tokens0[current_token_index - 1].string
# t2
if current_token_index >= 2:
context.t2 = tokens0[current_token_index - 2].string
elif tokens1:
context.t2 = tokens1[-1].string
# t3
if current_token_index >= 3:
context.t3 = tokens0[current_token_index - 3].string
elif len(tokens1) > 1:
context.t3 = tokens1[-2].string
| [
"lzma.open",
"io.StringIO",
"msgpack.unpack",
"numpy.array",
"collections.OrderedDict",
"types.SimpleNamespace",
"importlib.resources.open_binary"
] | [((781, 794), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (792, 794), False, 'from collections import OrderedDict\n'), ((821, 834), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (832, 834), False, 'from collections import OrderedDict\n'), ((939, 952), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (950, 952), False, 'from collections import OrderedDict\n'), ((530, 573), 'importlib.resources.open_binary', 'open_binary', (['"""akimous.resources"""', 'file_name'], {}), "('akimous.resources', file_name)\n", (541, 573), False, 'from importlib.resources import open_binary\n'), ((2304, 2321), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '()\n', (2319, 2321), False, 'from types import SimpleNamespace\n'), ((2818, 2831), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2829, 2831), False, 'from collections import OrderedDict\n'), ((594, 613), 'lzma.open', 'lzma.open', (['f1', '"""rb"""'], {}), "(f1, 'rb')\n", (603, 613), False, 'import lzma\n'), ((640, 707), 'msgpack.unpack', 'msgpack.unpack', (['f2'], {'use_list': '(False)', 'raw': '(False)', 'strict_map_key': '(False)'}), '(f2, use_list=False, raw=False, strict_map_key=False)\n', (654, 707), False, 'import msgpack\n'), ((2090, 2184), 'collections.OrderedDict', 'OrderedDict', ([], {}), '(**FeatureDefinition.context_names_required_by_preprocessors, **\n context_names)\n', (2101, 2184), False, 'from collections import OrderedDict\n'), ((2580, 2656), 'numpy.array', 'np.array', (['FeatureDefinition.completion_feature_indices_require_normalization'], {}), '(FeatureDefinition.completion_feature_indices_require_normalization)\n', (2588, 2656), True, 'import numpy as np\n'), ((6290, 6306), 'io.StringIO', 'StringIO', (['string'], {}), '(string)\n', (6298, 6306), False, 'from io import StringIO\n')] |
import argparse
import cv2
import numpy as np
import torch
from albumentations.pytorch import ToTensorV2
import albumentations as Aug
from model import FaceModel
from wider_face_dataset import img_size
parser = argparse.ArgumentParser(description='add batch size')
parser.add_argument('model_path', type=str, help='the path of your model')
parser.add_argument('image_path', type=str, help='the path of the image that u want to test')
args = parser.parse_args()
def nms(dets, thresh):
if dets.shape[0] == 0:
return []
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def post_process(face_locations):
grid_size = face_locations.shape[1]
size = img_size / grid_size
face_locations = face_locations.reshape(-1, 5)
output0 = []
for i, out in enumerate(face_locations):
if out[4] >= 0.02:
colm = i % grid_size
row = int(i / grid_size)
x = (out[0] + colm) * img_size / grid_size
y = (out[1] + row) * img_size / grid_size
w = out[2] * img_size
h = out[3] * img_size
k = [x-w/2, y-h/2, x+w/2, y+h/2]
k.append(out[4])
output0.append(k)
return output0
def draw(frame, face_location):
for k in face_location:
if k[4] >= 0.3:
k = list(map(int, k))
cv2.rectangle(frame, (k[0], k[1]), (k[2], k[3]), (256, 0, 0), 2)
cv2.imshow('image', frame)
cv2.waitKey(0)
model = FaceModel('resnet18').cuda()
model.load_state_dict(torch.load(args.model_path))
transforms = Aug.Compose([
Aug.Resize(img_size, img_size),
Aug.Normalize(),
ToTensorV2()])
image = cv2.imread(args.image_path)
orig_size = (image.shape[0], image.shape[1])
image2 = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
transformed = transforms(image=image2)
x = transformed['image']
x = x.unsqueeze(0).cuda()
output = model(x)
with torch.no_grad():
dets1 = post_process(torch.squeeze(output[0].cpu()))
dets2 = post_process(torch.squeeze(output[1].cpu()))
dets3 = post_process(torch.squeeze(output[2].cpu()))
dets = np.array(dets1 + dets2 + dets3)
keep = nms(dets, 0.25)
dets = dets[keep]
dets[..., 0] = dets[..., 0] * orig_size[1] / img_size
dets[..., 1] = dets[..., 1] * orig_size[0] / img_size
dets[..., 2] = dets[..., 2] * orig_size[1] / img_size
dets[..., 3] = dets[..., 3] * orig_size[0] / img_size
draw(image, dets)
| [
"albumentations.pytorch.ToTensorV2",
"model.FaceModel",
"numpy.minimum",
"numpy.maximum",
"argparse.ArgumentParser",
"albumentations.Resize",
"cv2.cvtColor",
"cv2.waitKey",
"torch.load",
"cv2.imread",
"numpy.where",
"numpy.array",
"cv2.rectangle",
"albumentations.Normalize",
"cv2.imshow"... | [((212, 265), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""add batch size"""'}), "(description='add batch size')\n", (235, 265), False, 'import argparse\n'), ((2316, 2343), 'cv2.imread', 'cv2.imread', (['args.image_path'], {}), '(args.image_path)\n', (2326, 2343), False, 'import cv2\n'), ((2398, 2436), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (2410, 2436), False, 'import cv2\n'), ((2069, 2095), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'frame'], {}), "('image', frame)\n", (2079, 2095), False, 'import cv2\n'), ((2100, 2114), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2111, 2114), False, 'import cv2\n'), ((2175, 2202), 'torch.load', 'torch.load', (['args.model_path'], {}), '(args.model_path)\n', (2185, 2202), False, 'import torch\n'), ((2601, 2616), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2614, 2616), False, 'import torch\n'), ((2800, 2831), 'numpy.array', 'np.array', (['(dets1 + dets2 + dets3)'], {}), '(dets1 + dets2 + dets3)\n', (2808, 2831), True, 'import numpy as np\n'), ((813, 845), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order[1:]]'], {}), '(x1[i], x1[order[1:]])\n', (823, 845), True, 'import numpy as np\n'), ((860, 892), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order[1:]]'], {}), '(y1[i], y1[order[1:]])\n', (870, 892), True, 'import numpy as np\n'), ((907, 939), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order[1:]]'], {}), '(x2[i], x2[order[1:]])\n', (917, 939), True, 'import numpy as np\n'), ((954, 986), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order[1:]]'], {}), '(y2[i], y2[order[1:]])\n', (964, 986), True, 'import numpy as np\n'), ((1000, 1030), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (1010, 1030), True, 'import numpy as np\n'), ((1043, 1073), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (1053, 1073), True, 'import numpy as np\n'), ((2124, 2145), 'model.FaceModel', 'FaceModel', (['"""resnet18"""'], {}), "('resnet18')\n", (2133, 2145), False, 'from model import FaceModel\n'), ((2236, 2266), 'albumentations.Resize', 'Aug.Resize', (['img_size', 'img_size'], {}), '(img_size, img_size)\n', (2246, 2266), True, 'import albumentations as Aug\n'), ((2272, 2287), 'albumentations.Normalize', 'Aug.Normalize', ([], {}), '()\n', (2285, 2287), True, 'import albumentations as Aug\n'), ((2293, 2305), 'albumentations.pytorch.ToTensorV2', 'ToTensorV2', ([], {}), '()\n', (2303, 2305), False, 'from albumentations.pytorch import ToTensorV2\n'), ((1172, 1195), 'numpy.where', 'np.where', (['(ovr <= thresh)'], {}), '(ovr <= thresh)\n', (1180, 1195), True, 'import numpy as np\n'), ((2000, 2064), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(k[0], k[1])', '(k[2], k[3])', '(256, 0, 0)', '(2)'], {}), '(frame, (k[0], k[1]), (k[2], k[3]), (256, 0, 0), 2)\n', (2013, 2064), False, 'import cv2\n')] |
import cv2,os,glob
import SimpleITK as sitk
import numpy as np
segs='/mnt/data9/independent_segs/lungs'
raw='/mnt/data9/independent_data'
out_path='/mnt/data9/independent_crop'
for type in os.listdir(segs):
for volume in os.listdir(os.path.join(segs,type)):
person=volume.split('_')[1]
stage = volume.split('_')[2]
R=sitk.ReadImage(os.path.join(raw,type,person+'_'+stage))
R=sitk.GetArrayFromImage(R)
M=sitk.ReadImage(os.path.join(segs,type,volume))
M=sitk.GetArrayFromImage(M)
for i in range(M.shape[0]):
m = M[i,:,:]
I = R[i, :, :]
I=(I+1400)/1500*255
IMG=np.stack([I,I,m*255],-1).astype(np.uint8)
#yy,xx=np.where(I>0)
#try:
# I=I[yy.min():yy.max(),xx.min():xx.max()]
#except:
# a=1
name=os.path.join(out_path,volume.split('.')[0]+'_'+str(i)+'.jpg')
cv2.imwrite(name,IMG)
a=1 | [
"numpy.stack",
"cv2.imwrite",
"SimpleITK.GetArrayFromImage",
"os.path.join",
"os.listdir"
] | [((189, 205), 'os.listdir', 'os.listdir', (['segs'], {}), '(segs)\n', (199, 205), False, 'import cv2, os, glob\n'), ((236, 260), 'os.path.join', 'os.path.join', (['segs', 'type'], {}), '(segs, type)\n', (248, 260), False, 'import cv2, os, glob\n'), ((411, 436), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['R'], {}), '(R)\n', (433, 436), True, 'import SimpleITK as sitk\n'), ((504, 529), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['M'], {}), '(M)\n', (526, 529), True, 'import SimpleITK as sitk\n'), ((360, 405), 'os.path.join', 'os.path.join', (['raw', 'type', "(person + '_' + stage)"], {}), "(raw, type, person + '_' + stage)\n", (372, 405), False, 'import cv2, os, glob\n'), ((462, 494), 'os.path.join', 'os.path.join', (['segs', 'type', 'volume'], {}), '(segs, type, volume)\n', (474, 494), False, 'import cv2, os, glob\n'), ((950, 972), 'cv2.imwrite', 'cv2.imwrite', (['name', 'IMG'], {}), '(name, IMG)\n', (961, 972), False, 'import cv2, os, glob\n'), ((666, 695), 'numpy.stack', 'np.stack', (['[I, I, m * 255]', '(-1)'], {}), '([I, I, m * 255], -1)\n', (674, 695), True, 'import numpy as np\n')] |
import os
import sys
import argparse
import pandas as pd
import numpy as np
import lightgbm as lgb
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
import random
import operator
import pickle as pickle
import matplotlib.pyplot as plt
np.random.seed(1)
def load_data(vector_filename, ion_type):
# Read file
if vector_filename.split(".")[-1] == "pkl":
vectors = pd.read_pickle(vector_filename)
elif vector_filename.split(".")[-1] == "h5":
# vectors = pd.read_hdf(vector_filename, key='table', stop=1000)
vectors = pd.read_hdf(vector_filename, key="table")
else:
print("Unsuported feature vector format")
exit(1)
# Extract targets for given ion type
target_names = list(vectors.columns[vectors.columns.str.contains("targets")])
if not "targets{}".format(ion_type) in target_names:
print("Targets for {} could not be found in vector file.".format(ion_type))
print("Vector file only contains these targets: {}".format(target_names))
exit(1)
targets = vectors.pop("targets{}".format(ion_type))
target_names.remove("targets{}".format(ion_type))
for n in target_names:
vectors.pop(n)
# Get psmids
psmids = vectors.pop("psmid")
return (vectors, targets, psmids)
fragtype = "y"
nul_cpu = 24
print("loading train data")
vectors, targets, psmids = load_data(sys.argv[1], fragtype)
print("Splitting up into train and test set...")
upeps = psmids.unique()
np.random.shuffle(upeps)
test_psms = upeps[: int(len(upeps) * 0.3)]
train_vectors = vectors[~psmids.isin(test_psms)]
train_targets = targets[~psmids.isin(test_psms)]
train_psmids = psmids[~psmids.isin(test_psms)]
test_vectors = vectors[psmids.isin(test_psms)]
test_targets = targets[psmids.isin(test_psms)]
test_psmids = psmids[psmids.isin(test_psms)]
print("Creating LightGBM datastructures...")
data = lgb.Dataset(train_vectors, label=train_targets)
datatest = lgb.Dataset(test_vectors, label=test_targets)
valid_sets = [datatest]
vector_sets = [test_vectors]
target_sets = [test_targets]
psmid_sets = [test_psmids]
print("loading evaluation data")
for fn in sys.argv[2:]:
vectors, targets, psmids = load_data(fn, fragtype)
tmp = lgb.Dataset(vectors, label=targets)
valid_sets.append(tmp)
psmid_sets.append(psmids)
vector_sets.append(vectors)
target_sets.append(targets)
sys.stderr.write("loading data done\n")
tmp2 = pd.DataFrame()
tmp3 = pd.DataFrame()
tmp3["psmid"] = test_psmids[test_vectors["charge"] == 3]
tmp3["target"] = test_targets[test_vectors["charge"] == 3]
tmp4 = pd.DataFrame()
tmp4["psmid"] = test_psmids[test_vectors["charge"] == 4]
tmp4["target"] = test_targets[test_vectors["charge"] == 4]
for max_depth in [7, 9, 11]:
for num_leaves in [50, 100, 200]:
params = {}
params["objective"] = "regression"
params["metric"] = "l1"
params["learning_rate"] = 0.8
# params['sub_feature'] = 1
params["num_leaves"] = num_leaves
# params['min_data'] = 50
params["max_depth"] = max_depth
num_round = 100
# lgb.cv(param, data, num_round, nfold=5)
bst = lgb.train(params, data, num_round, valid_sets=valid_sets)
for c in [2, 3, 4]:
for i in range(len(valid_sets)):
tmp = pd.DataFrame()
tmp["psmid"] = psmid_sets[i][vector_sets[i]["charge"] == c]
tmp["target"] = target_sets[i][vector_sets[i]["charge"] == c]
tmp["prediction"] = bst.predict(
vector_sets[i][vector_sets[i]["charge"] == c]
)
tmpp = (
tmp.groupby("psmid")[["target", "prediction"]].corr().iloc[0::2, -1]
)
print(
">>%i %i %i %i %s"
% (
c,
i,
max_depth,
num_leaves,
" ".join(
[
str(x)
for x in np.nanpercentile(
tmpp.values, [10, 30, 50, 70, 90]
)
]
),
)
)
exit()
# bst.save_model('model.txt')
print(bst.feature_importance())
model_json = bst.dump_model()
print(model_json["tree_info"])
def parseOneTree(root, index, array_type="double", return_type="double"):
def ifElse(node):
if "leaf_index" in node:
return "return " + str(node["leaf_value"]) + ";"
else:
condition = "arr[" + str(node["split_feature"]) + "]"
if node["decision_type"] == "no_greater":
condition += " <= " + str(node["threshold"])
else:
condition += " == " + str(node["threshold"])
left = ifElse(node["left_child"])
right = ifElse(node["right_child"])
return "if ( " + condition + " ) { " + left + " } else { " + right + " }"
return (
return_type
+ " predictTree"
+ str(index)
+ "("
+ array_type
+ "[] arr) { "
+ ifElse(root)
+ " }"
)
def parseAllTrees(trees, array_type="double", return_type="double"):
return (
"\n\n".join(
[
parseOneTree(tree["tree_structure"], idx, array_type, return_type)
for idx, tree in enumerate(trees)
]
)
+ "\n\n"
+ return_type
+ " predict("
+ array_type
+ "[] arr) { "
+ "return "
+ " + ".join(["predictTree" + str(i) + "(arr)" for i in range(len(trees))])
+ ";"
+ "}"
)
with open("if.else", "w+") as f:
f.write(parseAllTrees(model_json["tree_info"]))
| [
"pandas.DataFrame",
"numpy.nanpercentile",
"numpy.random.seed",
"lightgbm.train",
"pandas.read_hdf",
"lightgbm.Dataset",
"pandas.read_pickle",
"sys.stderr.write",
"numpy.random.shuffle"
] | [((289, 306), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (303, 306), True, 'import numpy as np\n'), ((1532, 1556), 'numpy.random.shuffle', 'np.random.shuffle', (['upeps'], {}), '(upeps)\n', (1549, 1556), True, 'import numpy as np\n'), ((1939, 1986), 'lightgbm.Dataset', 'lgb.Dataset', (['train_vectors'], {'label': 'train_targets'}), '(train_vectors, label=train_targets)\n', (1950, 1986), True, 'import lightgbm as lgb\n'), ((1998, 2043), 'lightgbm.Dataset', 'lgb.Dataset', (['test_vectors'], {'label': 'test_targets'}), '(test_vectors, label=test_targets)\n', (2009, 2043), True, 'import lightgbm as lgb\n'), ((2435, 2474), 'sys.stderr.write', 'sys.stderr.write', (['"""loading data done\n"""'], {}), "('loading data done\\n')\n", (2451, 2474), False, 'import sys\n'), ((2483, 2497), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2495, 2497), True, 'import pandas as pd\n'), ((2505, 2519), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2517, 2519), True, 'import pandas as pd\n'), ((2643, 2657), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2655, 2657), True, 'import pandas as pd\n'), ((2277, 2312), 'lightgbm.Dataset', 'lgb.Dataset', (['vectors'], {'label': 'targets'}), '(vectors, label=targets)\n', (2288, 2312), True, 'import lightgbm as lgb\n'), ((433, 464), 'pandas.read_pickle', 'pd.read_pickle', (['vector_filename'], {}), '(vector_filename)\n', (447, 464), True, 'import pandas as pd\n'), ((3215, 3272), 'lightgbm.train', 'lgb.train', (['params', 'data', 'num_round'], {'valid_sets': 'valid_sets'}), '(params, data, num_round, valid_sets=valid_sets)\n', (3224, 3272), True, 'import lightgbm as lgb\n'), ((605, 646), 'pandas.read_hdf', 'pd.read_hdf', (['vector_filename'], {'key': '"""table"""'}), "(vector_filename, key='table')\n", (616, 646), True, 'import pandas as pd\n'), ((3369, 3383), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3381, 3383), True, 'import pandas as pd\n'), ((4158, 4209), 'numpy.nanpercentile', 'np.nanpercentile', (['tmpp.values', '[10, 30, 50, 70, 90]'], {}), '(tmpp.values, [10, 30, 50, 70, 90])\n', (4174, 4209), True, 'import numpy as np\n')] |
from pytorch_lightning.loggers import LoggerCollection
from pathlib import Path
from typing import Any, List, Optional, Tuple, Union
import numpy as np
import pytorch_lightning as pl
import torch as tr
import torch.nn.functional as F
from torch import Tensor, nn
from torch.optim import Adam
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.tensorboard.writer import SummaryWriter
from detection_utils.boxes import generate_targets
from detection_utils.demo.plot import draw_detections, plot_confusion_matrix, plot_img
from ..pytorch import softmax_focal_loss
from .boxes import DEFAULT_NMS_THRESHOLD, compute_batch_stats
def loss(
class_predictions: Tensor,
regression_predictions: Tensor,
class_targets: Tensor,
regression_targets: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Computes the classification and regression
smooth L1 (Huber) loss for regression (only on foreground anchor boxes)
softmax focal loss for classification (excluding ignore anchor boxes)
Parameters
----------
class_predictions : Tensor, shape-(N, K, num-class)
regression_predictions : Tensor, shape-(N, K, 4)
class_targets : Tensor, shape-(N, K)
regression_targets : Tensor, shape-(N, K, 4)
Returns
-------
classification_loss, regression_loss: Tuple[Tensor, Tensor], shape-() shape-()
The mean classification loss and regression loss, respectively.
Notes
-----
`N` is the batch size. `K` is the number of anchor boxes associated with
each image.
"""
# shape-(N*K,)
class_targets = class_targets.reshape(-1)
# shape-(N*K, 4)
regression_targets = regression_targets.reshape(-1, 4)
# shape-(N*K, num-class)
class_predictions = class_predictions.reshape(-1, class_predictions.shape[-1])
# shape-(N*K, 4)
regression_predictions = regression_predictions.reshape(-1, 4)
is_true_foreground = tr.squeeze(class_targets > 0)
num_foreground = is_true_foreground.sum().item()
if is_true_foreground.numel() > 0:
regression_loss = F.smooth_l1_loss(
regression_predictions[is_true_foreground],
regression_targets[is_true_foreground],
)
else:
regression_loss = tr.tensor(0).float()
is_not_ignore = tr.squeeze(class_targets > -1)
# the sum of focal loss terms is normalized by the number
# of anchors assigned to a ground-truth box
classification_loss = (
softmax_focal_loss(
class_predictions[is_not_ignore],
class_targets[is_not_ignore],
alpha=0.25,
gamma=2,
reduction="sum",
)
/ num_foreground
)
return classification_loss, regression_loss
class ShapeDetectionModel(pl.LightningModule):
def __init__(self, data_experiment_path: Optional[Union[str, Path]] = None):
super().__init__()
self.confusion_matrices: List[np.ndarray] = []
# stores info for plotting boxes/labels for val-image 0
# at subsequent epoch states of model
# [(boxes-epoch0, labels-epoch0, scores-epoch0), ...]
self.boxes_labels_scores: List[Tuple[np.ndarray, np.ndarray, np.ndarray]] = []
self.data_path = (
Path(data_experiment_path) if data_experiment_path is not None else None
)
self.conv1 = nn.Conv2d(3, 10, 3, padding=1)
self.conv2 = nn.Conv2d(10, 20, 3, padding=1)
self.conv3 = nn.Conv2d(20, 30, 3, padding=1)
self.conv4 = nn.Conv2d(30, 40, 3, padding=1)
self.bn1 = nn.BatchNorm2d(10)
self.bn2 = nn.BatchNorm2d(20)
self.bn3 = nn.BatchNorm2d(30)
self.bn4 = nn.BatchNorm2d(40)
# background / rectangle / triangle / circle
self.classification = nn.Conv2d(40, 4, 1)
self.regression = nn.Conv2d(40, 4, 1)
for layer in (
self.conv1,
self.conv2,
self.conv3,
self.conv4,
self.classification,
self.regression,
):
nn.init.xavier_normal_(layer.weight, np.sqrt(2))
nn.init.constant_(layer.bias, 0)
nn.init.constant_(
self.classification.bias[0], -4.6
) # roughly -log((1-π)/π) for π = 0.01
def forward(self, imgs: Tensor) -> Tuple[Tensor, Tensor]:
""""
Computes the classification scores and bounding box regression associated
with each anchor box of each image.
Parameters
----------
imgs : Tensor, shape-(N, 3, H, W)
A batch of N images.
Returns
-------
classifications, regressions : Tuple[Tensor, Tensor]
shape-(N, K, N_class), shape-(N, K, 4)
For each of N images in the batch, returns the classification scores
and bbox regressions associated with each of the K anchor boxes associated
with that image.
Notes
-----
The anchor boxes are flattened in row-major order"""
imgs = F.max_pool2d(F.relu(self.bn1(self.conv1(imgs))), 2)
imgs = F.max_pool2d(F.relu(self.bn2(self.conv2(imgs))), 2)
imgs = F.max_pool2d(F.relu(self.bn3(self.conv3(imgs))), 2)
imgs = F.max_pool2d(F.relu(self.bn4(self.conv4(imgs))), 2)
# (N, num-classes, R, C) -> (N, R, C, num-classes)
classifications = self.classification(imgs).permute(0, 2, 3, 1)
# (N, R, C, num-classes) -> (N, R*C, num-classes)
classifications = classifications.reshape(
imgs.shape[0], -1, classifications.shape[-1]
)
# (N, 4, R, C) -> (N, R, C, 4)
regressions = self.regression(imgs).permute(0, 2, 3, 1)
# (N, R, C, 4) -> (N, R*C, 4)
regressions = regressions.reshape(imgs.shape[0], -1, 4)
return classifications, regressions
def training_step(self, batch: Tuple[Tensor, ...], batch_idx: int) -> Tensor:
imgs, class_targets, bbox_targets = batch
class_predictions, regression_predictions = self(imgs)
total_cls_loss, total_reg_loss = loss(
class_predictions, regression_predictions, class_targets, bbox_targets,
)
tot_loss = total_cls_loss + total_reg_loss
self.log("train_loss", total_cls_loss + total_reg_loss)
return tot_loss
def validation_step(self, batch: Tuple[Tensor, ...], batch_idx: int) -> Tensor:
imgs, class_targets, bbox_targets = batch
class_predictions, regression_predictions = self(imgs)
total_cls_loss, total_reg_loss = loss(
class_predictions, regression_predictions, class_targets, bbox_targets,
)
self.log("val_loss", total_cls_loss + total_reg_loss, prog_bar=True)
start = len(imgs) * (batch_idx)
stop = len(imgs) * (batch_idx + 1)
confusion_matrix, precision, recall = compute_batch_stats(
class_predictions=class_predictions,
regression_predictions=regression_predictions,
boxes=self.val_boxes[start:stop],
labels=self.val_labels[start:stop],
feature_map_width=imgs.shape[2] // 16, # backbone downsamples by factor 16
)
ap = precision.mean()
ar = recall.mean()
self.log("val_precision", ap)
self.log("val_recall", ar)
self.log("ap+ar", ap + ar)
return confusion_matrix
def validation_epoch_end(self, outputs: List[Any]) -> None:
"""Plots confusion matrix and example validation image with detections"""
total_confusion_matrix = sum(outputs)
normed_conf_matrix = total_confusion_matrix / tr.sum(
total_confusion_matrix, dim=0, keepdim=True
)
normed_conf_matrix = np.nan_to_num(normed_conf_matrix.numpy())
self.confusion_matrices.append(normed_conf_matrix)
# note: exclude negatives from classification accuracy
val_acc = np.einsum("ii", normed_conf_matrix[1:, 1:]) / (
len(normed_conf_matrix) - 1
)
self.log("val_acc", val_acc)
boxes, labels, scores = zip(
*self.get_detections(self.val_images[:1].to(self.device))
)
self.boxes_labels_scores.append((boxes[0], labels[0], scores[0]))
tensorboard: Optional[SummaryWriter] = self._get_tensorboard_logger()
if tensorboard is not None:
fig, ax = plot_confusion_matrix(
normed_conf_matrix, font_size=15, figsize=(8, 8)
)
tensorboard.add_figure(
"confusion-matrix", fig, global_step=self.current_epoch
)
img_id = 0
fig, ax = plot_img(self.val_images[img_id], figsize=(8, 8))
draw_detections(ax, boxes=boxes[img_id], labels=labels[img_id])
tensorboard.add_figure("example-image", fig, global_step=self.current_epoch)
def configure_optimizers(self):
return Adam(self.parameters(), lr=5e-4)
def setup(self, stage: str) -> None:
from .boxes import make_anchor_boxes
from .data import load_data
assert self.data_path is not None
images, self.train_boxes, self.train_labels = load_data(
self.data_path / "train"
)
H, W = images.shape[1:3]
val_images, self.val_boxes, self.val_labels = load_data(self.data_path / "val")
self.train_images = tr.tensor(images.transpose((0, 3, 1, 2)))
self.val_images = tr.tensor(val_images.transpose((0, 3, 1, 2)))
self.anchor_boxes = make_anchor_boxes(image_height=H, image_width=W)
def train_dataloader(self) -> DataLoader:
train_cls_targs, train_reg_targs = zip(
*(
generate_targets(self.anchor_boxes, bxs, lbls, 0.2, 0.1)
for bxs, lbls in zip(self.train_boxes, self.train_labels)
)
)
train_reg_targs = tr.tensor(train_reg_targs).float()
train_cls_targs = tr.tensor(train_cls_targs).long()
return DataLoader(
TensorDataset(self.train_images, train_cls_targs, train_reg_targs),
batch_size=16,
pin_memory=True,
num_workers=4,
shuffle=True,
drop_last=True,
)
def val_dataloader(self) -> DataLoader:
val_cls_targs, val_reg_targs = zip(
*(
generate_targets(self.anchor_boxes, bxs, lbls, 0.2, 0.1)
for bxs, lbls in zip(self.val_boxes, self.val_labels)
)
)
val_reg_targs = tr.tensor(val_reg_targs).float()
val_cls_targs = tr.tensor(val_cls_targs).long()
return DataLoader(
TensorDataset(self.val_images, val_cls_targs, val_reg_targs),
batch_size=16,
pin_memory=True,
num_workers=4,
shuffle=False,
drop_last=True,
)
def get_detections(
self,
imgs: Tensor,
score_threshold: float = None,
nms_threshold: float = DEFAULT_NMS_THRESHOLD,
) -> List[Tuple[np.ndarray, np.ndarray, np.ndarray]]:
""""
Computes the best bounding boxes and classification scores.
Parameters
----------
imgs : Tensor, shape-(N, 3, H, W)
A batch of N images.
score_threshold: Optional[float]
If specified, detections with foreground scores below this
threshold are ignored
nms_threshold: float, optional
The IoU threshold to use for NMS, above which one of two box will be suppressed.
Returns
-------
List[Tuple[np.ndarray, np.ndarray, np.ndarray]]
The (boxes, labels, and confidence scores) for each of the N images
- boxes: ndarray shape=(N_det, 4)
- labels : ndarray shape=(N_det, 1)
- scores : ndarray shape=(N_det,)]
Notes
-----
The anchor boxes are flattened in row-major order.
Boxes are reported as (xlo, ylo, xhi, yhi).
"""
from detection_utils.demo.boxes import compute_detections
was_training = self.training
self.eval()
try:
with tr.no_grad():
class_predictions, regression_predictions = self.forward(imgs)
finally:
if was_training:
self.train(mode=True)
return [
compute_detections(
classifications=cls,
regressions=regr,
feature_map_width=imgs.shape[-1] // 16,
nms_threshold=nms_threshold,
score_threshold=score_threshold,
)
for cls, regr in zip(class_predictions, regression_predictions)
]
def _get_tensorboard_logger(self) -> Optional[SummaryWriter]:
if isinstance(self.logger.experiment, SummaryWriter):
return self.logger.experiment
elif isinstance(self.logger, LoggerCollection):
for logger in self.logger.experiment:
if isinstance(logger, SummaryWriter):
return logger
return None
| [
"torch.sum",
"detection_utils.demo.plot.plot_confusion_matrix",
"detection_utils.boxes.generate_targets",
"torch.nn.Conv2d",
"numpy.einsum",
"detection_utils.demo.plot.draw_detections",
"torch.squeeze",
"torch.nn.BatchNorm2d",
"torch.nn.init.constant_",
"pathlib.Path",
"torch.utils.data.TensorDa... | [((1932, 1961), 'torch.squeeze', 'tr.squeeze', (['(class_targets > 0)'], {}), '(class_targets > 0)\n', (1942, 1961), True, 'import torch as tr\n'), ((2294, 2324), 'torch.squeeze', 'tr.squeeze', (['(class_targets > -1)'], {}), '(class_targets > -1)\n', (2304, 2324), True, 'import torch as tr\n'), ((2080, 2184), 'torch.nn.functional.smooth_l1_loss', 'F.smooth_l1_loss', (['regression_predictions[is_true_foreground]', 'regression_targets[is_true_foreground]'], {}), '(regression_predictions[is_true_foreground],\n regression_targets[is_true_foreground])\n', (2096, 2184), True, 'import torch.nn.functional as F\n'), ((3361, 3391), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(10)', '(3)'], {'padding': '(1)'}), '(3, 10, 3, padding=1)\n', (3370, 3391), False, 'from torch import Tensor, nn\n'), ((3413, 3444), 'torch.nn.Conv2d', 'nn.Conv2d', (['(10)', '(20)', '(3)'], {'padding': '(1)'}), '(10, 20, 3, padding=1)\n', (3422, 3444), False, 'from torch import Tensor, nn\n'), ((3466, 3497), 'torch.nn.Conv2d', 'nn.Conv2d', (['(20)', '(30)', '(3)'], {'padding': '(1)'}), '(20, 30, 3, padding=1)\n', (3475, 3497), False, 'from torch import Tensor, nn\n'), ((3519, 3550), 'torch.nn.Conv2d', 'nn.Conv2d', (['(30)', '(40)', '(3)'], {'padding': '(1)'}), '(30, 40, 3, padding=1)\n', (3528, 3550), False, 'from torch import Tensor, nn\n'), ((3570, 3588), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(10)'], {}), '(10)\n', (3584, 3588), False, 'from torch import Tensor, nn\n'), ((3608, 3626), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(20)'], {}), '(20)\n', (3622, 3626), False, 'from torch import Tensor, nn\n'), ((3646, 3664), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(30)'], {}), '(30)\n', (3660, 3664), False, 'from torch import Tensor, nn\n'), ((3684, 3702), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(40)'], {}), '(40)\n', (3698, 3702), False, 'from torch import Tensor, nn\n'), ((3787, 3806), 'torch.nn.Conv2d', 'nn.Conv2d', (['(40)', '(4)', '(1)'], {}), '(40, 4, 1)\n', (3796, 3806), False, 'from torch import Tensor, nn\n'), ((3833, 3852), 'torch.nn.Conv2d', 'nn.Conv2d', (['(40)', '(4)', '(1)'], {}), '(40, 4, 1)\n', (3842, 3852), False, 'from torch import Tensor, nn\n'), ((4161, 4213), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.classification.bias[0]', '(-4.6)'], {}), '(self.classification.bias[0], -4.6)\n', (4178, 4213), False, 'from torch import Tensor, nn\n'), ((3256, 3282), 'pathlib.Path', 'Path', (['data_experiment_path'], {}), '(data_experiment_path)\n', (3260, 3282), False, 'from pathlib import Path\n'), ((4119, 4151), 'torch.nn.init.constant_', 'nn.init.constant_', (['layer.bias', '(0)'], {}), '(layer.bias, 0)\n', (4136, 4151), False, 'from torch import Tensor, nn\n'), ((7639, 7690), 'torch.sum', 'tr.sum', (['total_confusion_matrix'], {'dim': '(0)', 'keepdim': '(True)'}), '(total_confusion_matrix, dim=0, keepdim=True)\n', (7645, 7690), True, 'import torch as tr\n'), ((7927, 7970), 'numpy.einsum', 'np.einsum', (['"""ii"""', 'normed_conf_matrix[1:, 1:]'], {}), "('ii', normed_conf_matrix[1:, 1:])\n", (7936, 7970), True, 'import numpy as np\n'), ((8391, 8462), 'detection_utils.demo.plot.plot_confusion_matrix', 'plot_confusion_matrix', (['normed_conf_matrix'], {'font_size': '(15)', 'figsize': '(8, 8)'}), '(normed_conf_matrix, font_size=15, figsize=(8, 8))\n', (8412, 8462), False, 'from detection_utils.demo.plot import draw_detections, plot_confusion_matrix, plot_img\n'), ((8661, 8710), 'detection_utils.demo.plot.plot_img', 'plot_img', (['self.val_images[img_id]'], {'figsize': '(8, 8)'}), '(self.val_images[img_id], figsize=(8, 8))\n', (8669, 8710), False, 'from detection_utils.demo.plot import draw_detections, plot_confusion_matrix, plot_img\n'), ((8723, 8786), 'detection_utils.demo.plot.draw_detections', 'draw_detections', (['ax'], {'boxes': 'boxes[img_id]', 'labels': 'labels[img_id]'}), '(ax, boxes=boxes[img_id], labels=labels[img_id])\n', (8738, 8786), False, 'from detection_utils.demo.plot import draw_detections, plot_confusion_matrix, plot_img\n'), ((10024, 10090), 'torch.utils.data.TensorDataset', 'TensorDataset', (['self.train_images', 'train_cls_targs', 'train_reg_targs'], {}), '(self.train_images, train_cls_targs, train_reg_targs)\n', (10037, 10090), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((10664, 10724), 'torch.utils.data.TensorDataset', 'TensorDataset', (['self.val_images', 'val_cls_targs', 'val_reg_targs'], {}), '(self.val_images, val_cls_targs, val_reg_targs)\n', (10677, 10724), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((12386, 12555), 'detection_utils.demo.boxes.compute_detections', 'compute_detections', ([], {'classifications': 'cls', 'regressions': 'regr', 'feature_map_width': '(imgs.shape[-1] // 16)', 'nms_threshold': 'nms_threshold', 'score_threshold': 'score_threshold'}), '(classifications=cls, regressions=regr, feature_map_width\n =imgs.shape[-1] // 16, nms_threshold=nms_threshold, score_threshold=\n score_threshold)\n', (12404, 12555), False, 'from detection_utils.demo.boxes import compute_detections\n'), ((2252, 2264), 'torch.tensor', 'tr.tensor', (['(0)'], {}), '(0)\n', (2261, 2264), True, 'import torch as tr\n'), ((4095, 4105), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4102, 4105), True, 'import numpy as np\n'), ((9890, 9916), 'torch.tensor', 'tr.tensor', (['train_reg_targs'], {}), '(train_reg_targs)\n', (9899, 9916), True, 'import torch as tr\n'), ((9951, 9977), 'torch.tensor', 'tr.tensor', (['train_cls_targs'], {}), '(train_cls_targs)\n', (9960, 9977), True, 'import torch as tr\n'), ((10536, 10560), 'torch.tensor', 'tr.tensor', (['val_reg_targs'], {}), '(val_reg_targs)\n', (10545, 10560), True, 'import torch as tr\n'), ((10593, 10617), 'torch.tensor', 'tr.tensor', (['val_cls_targs'], {}), '(val_cls_targs)\n', (10602, 10617), True, 'import torch as tr\n'), ((12179, 12191), 'torch.no_grad', 'tr.no_grad', ([], {}), '()\n', (12189, 12191), True, 'import torch as tr\n'), ((9708, 9764), 'detection_utils.boxes.generate_targets', 'generate_targets', (['self.anchor_boxes', 'bxs', 'lbls', '(0.2)', '(0.1)'], {}), '(self.anchor_boxes, bxs, lbls, 0.2, 0.1)\n', (9724, 9764), False, 'from detection_utils.boxes import generate_targets\n'), ((10360, 10416), 'detection_utils.boxes.generate_targets', 'generate_targets', (['self.anchor_boxes', 'bxs', 'lbls', '(0.2)', '(0.1)'], {}), '(self.anchor_boxes, bxs, lbls, 0.2, 0.1)\n', (10376, 10416), False, 'from detection_utils.boxes import generate_targets\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/22 10:41
# @Author : ganliang
# @File : __init__.py.py
# @Desc : pandas测试
import numpy as np
import pandas as pd
from src.config import logger
def pd_serise():
series = pd.Series([1, 3, 5, np.nan, 6, 8])
logger.info("Series:\n{0}".format(series))
def pd_date_range():
date_ranges = pd.date_range("2019-10-01", periods=6, freq="D")
logger.info("date_ranges:\n{0}".format(date_ranges))
def pd_dataframe():
pd_frame = pd.DataFrame(np.random.rand(6, 4), index=["R1", "R2", "R3", "R4", "R5", "R6"],
columns=["A", "B", "C", "D"])
logger.info("pd_frame:\n{0}".format(pd_frame))
logger.info("pd_frame.head(1):\n{0}".format(pd_frame.head(1)))
logger.info("pd_frame.tail(1):\n{0}".format(pd_frame.tail(1)))
logger.info("pd_frame.dtypes:\n{0}".format(pd_frame.dtypes))
logger.info("pd_frame.shape:\n{0}".format(pd_frame.shape))
logger.info("pd_frame.to_numpy():\n{0}".format(pd_frame.to_numpy()))
logger.info("pd_frame.describe():\n{0}".format(pd_frame.describe()))
logger.info("pd_frame.T:\n{0}".format(pd_frame.T))
logger.info(
"pd_frame.sort_index(axis=1, ascending=False):\n{0}".format(pd_frame.sort_index(axis=1, ascending=False)))
logger.info("pd_frame.sort_values(by='B'):\n{0}".format(pd_frame.sort_values(by='B')))
logger.info("pd_frame['A']:\n{0}".format(pd_frame["A"]))
logger.info("pd_frame[0:1]:\n{0}".format(pd_frame[0:1]))
if __name__ == "__main__":
# pd_serise()
# pd_date_range()
pd_dataframe()
| [
"numpy.random.rand",
"pandas.date_range",
"pandas.Series"
] | [((249, 283), 'pandas.Series', 'pd.Series', (['[1, 3, 5, np.nan, 6, 8]'], {}), '([1, 3, 5, np.nan, 6, 8])\n', (258, 283), True, 'import pandas as pd\n'), ((372, 420), 'pandas.date_range', 'pd.date_range', (['"""2019-10-01"""'], {'periods': '(6)', 'freq': '"""D"""'}), "('2019-10-01', periods=6, freq='D')\n", (385, 420), True, 'import pandas as pd\n'), ((528, 548), 'numpy.random.rand', 'np.random.rand', (['(6)', '(4)'], {}), '(6, 4)\n', (542, 548), True, 'import numpy as np\n')] |
import numpy as np
import pickle
import os
import argparse
def cut_line(sentence):
sent = ''
delimiter = ['。', ';', '?', '!']
for i, c in enumerate(sentence):
sent += c
if ((c in delimiter) and (sentence[min(len(sentence)-1, i + 1)] not in ['」', '”', '’'])) or i == len(sentence)-1:
yield sent
sent = ''
def cut_line2(sentence):
sent = ''
for i, c in enumerate(sentence):
sent += c
if c == ',':
flag = True
for j in range(i+1, min(len(sentence)-1, i+6)):
if sentence[j] == ',' or j == len(sentence)-1:
flag = False
if (flag and len(sent) > 20) or i == len(sentence)-1:
yield sent[:-1] + '。'
sent = ''
def make_docs(wrong, correct):
w_res = []
if ('。' in wrong[:-1]) or (';' in wrong[:-1]) or ('?' in wrong[:-1]) or ('!' in wrong[:-1]):
for w_sent in cut_line(wrong):
w_res.append(w_sent + '\n')
# wrong_file.write(w_sent + '\n')
elif len(wrong) > 100:
for w_sent in cut_line2(wrong):
w_res.append(w_sent + '\n')
# wrong_file.write(w_sent + '\n')
else:
w_res.append(wrong + '\n')
# wrong_file.write(wrong + '\n')
# wrong_file.write('\n')
c_res = []
if ('。' in correct[:-1]) or (';' in correct[:-1]) or ('?' in correct[:-1]) or ('!' in correct[:-1]):
for c_sent in cut_line(correct):
c_res.append(c_sent + '\n')
# correct_file.write(c_sent + '\n')
elif len(wrong) > 100:
for c_sent in cut_line2(correct):
c_res.append(c_sent + '\n')
# correct_file.write(c_sent + '\n')
else:
c_res.append(correct + '\n')
# correct_file.write(correct + '\n')
if len(w_res) != len(c_res):
w_res = [wrong + '\n']
c_res = [correct + '\n']
for w_r, c_r in zip(w_res, c_res):
if not len(w_r.strip()) == len(c_r.strip()):
print(w_r)
print(len(w_r.strip()))
print(c_r)
print(len(c_r.strip()))
exit()
for l in w_res:
wrong_file.write(l)
wrong_file.write('\n')
for l in c_res:
correct_file.write(l)
correct_file.write('\n')
def main(fname, output_dir):
confusions = {}
for line in open(fname, 'r', encoding='utf-8'):
num, wrong, correct = line.strip().split('\t')
wrong = wrong.strip()
correct = correct.strip()
for w, c in zip(wrong, correct):
if w!=c:
if w + c not in confusions:
confusions[w + c] = 0
confusions[w + c] += 1
# if len(wrong) != len(correct):
# print(wrong)
# print(correct)
# exit()
assert len(wrong) == len(correct)
num = int(num)
make_docs(wrong, correct)
if wrong != correct:
make_docs(correct, correct)
poses = [pos for pos, (w, c) in enumerate(zip(wrong, correct)) if w != c]
num = len(poses)
if num >= 2:
if len(poses) != num:
print(wrong)
print(correct)
exit()
assert len(poses) == num
for i in range(1, num):
selected_poses = [poses[k] for k in np.random.choice(num, i, replace=False)]
fake_wrong = list(wrong)
for p in selected_poses:
fake_wrong[p] = correct[p]
fake_wrong = ''.join(fake_wrong)
assert len(fake_wrong) == len(correct)
assert fake_wrong != correct
make_docs(fake_wrong, correct)
# take the top frequency of confusions about the each character.
top_confusions = {}
for k in confusions:
if k[0] not in top_confusions:
top_confusions[k[0]] = confusions[k]
elif top_confusions[k[0]] < confusions[k]:
top_confusions[k[0]] = confusions[k]
confusions_top = sorted(list(top_confusions.keys()), key=lambda x: top_confusions[x], reverse=True)
correct_count = {}
for line_c, line_w in zip(open(os.path.join(args.output, 'correct.txt'), 'r', encoding='utf-8'), open(os.path.join(args.output, 'wrong.txt'), 'r', encoding='utf-8')):
if line_c.strip():
wrong, correct = line_w.strip(), line_c.strip()
wrong = wrong.strip()
correct = correct.strip()
for w, c in zip(wrong, correct):
if w==c and w in top_confusions:
if w not in correct_count:
correct_count[w] = 0
correct_count[w] += 1
proportions = {}
for k in correct_count:
assert correct_count[k] != 0
proportions[k] = min(top_confusions[k] / correct_count[k], 1.0)
print('confusion statistics:')
for i in range(min(len(confusions_top), 20)):
if confusions_top[i] in correct_count:
correct_occurs = correct_count[confusions_top[i]]
proportions_num = proportions[confusions_top[i]]
else:
correct_occurs = 0
proportions_num = 'NaN'
print(f'most frequent confusion pair for {confusions_top[i]} occurs {top_confusions[confusions_top[i]]} times,'
f' correct ones occur {correct_occurs} times, mask probability should be {proportions_num}')
pickle.dump(proportions, open(os.path.join(args.output, 'mask_probability.sav'), 'wb'))
# print('top confusions:')
# for i in range(20):
# print(f'{top_confusions[i]} occurs {confusions[confusions_top[i]]} times')
# main()
def parse_args():
usage = '\n1. create wrong.txt, correct.txt and mask_probability.sav by:\n' \
'python create_data.py -f /path/to/train.txt\n' \
'\n' \
'\n2. specify output dir by:\n' \
'python create_data.py -f /path/to/train.txt -o /path/to/dir/\n' \
'\n'
parser = argparse.ArgumentParser(
description='A module for FASPell - Fast, Adaptable, Simple, Powerful Chinese Spell Checker', usage=usage)
parser.add_argument('--file', '-f', type=str, default=None,
help='original training data.')
parser.add_argument('--output', '-o', type=str, default='',
help='output a file a dir; default is current directory.')
# parser.add_argument('--verbose', '-v', action="store_true", default=False,
# help='to show details of spell checking sentences under mode s')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
correct_file = open(os.path.join(args.output,'correct.txt'), 'w', encoding='utf-8')
wrong_file = open(os.path.join(args.output,'wrong.txt'), 'w', encoding='utf-8')
main(args.file, args.output)
| [
"os.path.join",
"argparse.ArgumentParser",
"numpy.random.choice"
] | [((6041, 6181), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A module for FASPell - Fast, Adaptable, Simple, Powerful Chinese Spell Checker"""', 'usage': 'usage'}), "(description=\n 'A module for FASPell - Fast, Adaptable, Simple, Powerful Chinese Spell Checker'\n , usage=usage)\n", (6064, 6181), False, 'import argparse\n'), ((6745, 6785), 'os.path.join', 'os.path.join', (['args.output', '"""correct.txt"""'], {}), "(args.output, 'correct.txt')\n", (6757, 6785), False, 'import os\n'), ((6831, 6869), 'os.path.join', 'os.path.join', (['args.output', '"""wrong.txt"""'], {}), "(args.output, 'wrong.txt')\n", (6843, 6869), False, 'import os\n'), ((4213, 4253), 'os.path.join', 'os.path.join', (['args.output', '"""correct.txt"""'], {}), "(args.output, 'correct.txt')\n", (4225, 4253), False, 'import os\n'), ((4284, 4322), 'os.path.join', 'os.path.join', (['args.output', '"""wrong.txt"""'], {}), "(args.output, 'wrong.txt')\n", (4296, 4322), False, 'import os\n'), ((5494, 5543), 'os.path.join', 'os.path.join', (['args.output', '"""mask_probability.sav"""'], {}), "(args.output, 'mask_probability.sav')\n", (5506, 5543), False, 'import os\n'), ((3375, 3414), 'numpy.random.choice', 'np.random.choice', (['num', 'i'], {'replace': '(False)'}), '(num, i, replace=False)\n', (3391, 3414), True, 'import numpy as np\n')] |
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for labmaze.RandomMaze."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from absl.testing import absltest
import labmaze
import numpy as np
from six.moves import range
class RandomMazeTest(absltest.TestCase):
"""Tests for labmaze.RandomMaze.
Tests whose name contain the word 'golden' are brittle since the output
depends on the specific implementation of various random algorithms in the C++
standard library. Each test case contains two sets of golden output data,
generated by libstdc++ and libc++.
"""
def testGolden7x9Maze(self):
maze = labmaze.RandomMaze(height=7, width=9, random_seed=12345)
expected_mazes = {('*********\n'
'*********\n'
'*********\n'
'*** ***\n'
'*** ***\n'
'*** ***\n'
'*********\n'), # libstdc++
('*********\n'
'*********\n'
'*********\n'
'* ***\n'
'* ***\n'
'* ***\n'
'*********\n'), # libc++
}
actual_maze = str(maze.entity_layer)
self.assertIn(actual_maze, expected_mazes)
np.testing.assert_array_equal(maze.entity_layer,
labmaze.TextGrid(actual_maze))
expected_variations = actual_maze.replace('*', '.').replace(' ', 'A')
self.assertEqual(str(maze.variations_layer), expected_variations)
np.testing.assert_array_equal(maze.variations_layer,
labmaze.TextGrid(expected_variations))
def testGoldenTwoRoom9x11Maze(self):
maze = labmaze.RandomMaze(height=9, width=11,
max_rooms=2, room_min_size=4, room_max_size=6,
random_seed=12345)
expected_mazes = {('***********\n'
'*** ***\n'
'*** * ***\n'
'*** * *\n'
'*** * *** *\n'
'*** * * *\n'
'*** * * *\n'
'*** *\n'
'***********\n'), # libc++
('***********\n'
'* *****\n'
'* * *****\n'
'* *\n'
'* * *\n'
'* * *\n'
'* *** *****\n'
'* *****\n'
'***********\n'), # libstdc++
('***********\n'
'* ***\n'
'* * ***\n'
'* * ***\n'
'*** * * ***\n'
'*** *\n'
'***** *\n'
'***** *\n'
'***********\n'), # MSVC14
}
self.assertIn(str(maze.entity_layer), expected_mazes)
expected_variations = {('...........\n'
'.....AAA...\n'
'.....AAA...\n'
'.....AAA...\n'
'...........\n'
'.....BBB...\n'
'.....BBB...\n'
'.....BBB...\n'
'...........\n'), # libstdc++
('...........\n'
'.AAA.......\n'
'.AAA.......\n'
'.AAA.BBBBB.\n'
'.AAA.BBBBB.\n'
'.AAA.BBBBB.\n'
'...........\n'
'...........\n'
'...........\n'), # libc++
('...........\n'
'.AAAAA.....\n'
'.AAAAA.....\n'
'.AAAAA.....\n'
'...........\n'
'.....BBBBB.\n'
'.....BBBBB.\n'
'.....BBBBB.\n'
'...........\n'), # MSVC14
}
self.assertIn(str(maze.variations_layer), expected_variations)
def testRegenerate(self):
maze = labmaze.RandomMaze(height=51, width=31,
max_rooms=5, room_min_size=10, room_max_size=20,
random_seed=12345)
old_maze, old_variations = None, None
for _ in range(5):
maze.regenerate()
if old_maze is not None:
self.assertNotEqual(str(maze.entity_layer), str(old_maze))
self.assertTrue(np.any(maze.entity_layer != old_maze))
self.assertNotEqual(str(maze.variations_layer), str(old_variations))
self.assertTrue(np.any(maze.variations_layer != old_variations))
old_maze = copy.deepcopy(maze.entity_layer)
old_variations = copy.deepcopy(maze.variations_layer)
def testGoldenMazeRegeneration(self):
# This test makes sure that regeneration logic is not operating on an
# old, dirty maze object.
maze = labmaze.RandomMaze(height=17, width=17,
max_rooms=9, room_min_size=3, room_max_size=3,
random_seed=12345)
expected_mazes = {('*****************\n'
'* ***** ***\n'
'* ***** *** ***\n'
'* *** *\n'
'*** *** ***** *\n'
'* * * *\n'
'* * * * *** *\n'
'* * *\n'
'* * * * * * * ***\n'
'* * *\n'
'* * * * * *\n'
'* * * * *\n'
'* ***** *** * * *\n'
'* * * *\n'
'***** * * * *\n'
'***** * *\n'
'*****************\n'), # libstdc++
('*****************\n'
'*** *\n'
'*** *********** *\n'
'*** * * *\n'
'*** * * * * *\n'
'* * * * *\n'
'* ***** ***** *\n'
'* *** *\n'
'*** *** * ***** *\n'
'* * * * * *\n'
'* * * * * *\n'
'* * * *\n'
'*** *** * * * * *\n'
'* *** * *\n'
'* *** * * *\n'
'* * *\n'
'*****************\n'), # libc++
('*****************\n'
'********* *\n'
'********* ***** *\n'
'* ***** * *\n'
'* ***** * * *\n'
'* * * *\n'
'* ************* *\n'
'* * *\n'
'* * *** * ***\n'
'* * *\n'
'* ***** * *** *\n'
'* *** *\n'
'* * ***** *** *\n'
'* * *\n'
'* *** * * *\n'
'* * *\n'
'*****************\n'), # MSVC14
}
self.assertIn(str(maze.entity_layer), expected_mazes)
maze.regenerate()
expected_mazes_2 = {('*****************\n'
'*** * *\n'
'*** * * * *\n'
'* * * *\n'
'* * *** ******* *\n'
'* * * *** *\n'
'* * * *** *** *\n'
'* * * * *\n'
'* * * * * * * *\n'
'* * * * *\n'
'* * *** ***** *\n'
'* * *\n'
'*** * * ***** *\n'
'* * * *\n'
'* ***** * ***\n'
'* * ***\n'
'*****************\n'), # libstdc++
('*****************\n'
'********* *****\n'
'********* *****\n'
'* * *\n'
'* * * *** *** *\n'
'* * * * *\n'
'* ***** * * *\n'
'* * *\n'
'* *** ***** *** *\n'
'* * *\n'
'* * * * *\n'
'* * * *\n'
'* * * * * *** ***\n'
'* ***\n'
'*** ***********\n'
'*** ***********\n'
'*****************\n'), # libc++
('*****************\n'
'* *****\n'
'* * *** *****\n'
'* *\n'
'*** * * ***** *\n'
'* * * *\n'
'* ***** *** * *\n'
'* *\n'
'* * * *** * * *\n'
'* * * * *\n'
'* * * * * * *\n'
'* * * *\n'
'* * * * * *** *\n'
'* * * *\n'
'* * *** * *****\n'
'* * *****\n'
'*****************\n'), # MSVC14
}
self.assertIn(str(maze.entity_layer), expected_mazes_2)
def testInvalidArguments(self):
with self.assertRaisesRegexp(ValueError, 'height.*integer'):
labmaze.RandomMaze(height=2.5)
with self.assertRaisesRegexp(ValueError, 'height.*positive'):
labmaze.RandomMaze(height=-3)
with self.assertRaisesRegexp(ValueError, 'height.*odd'):
labmaze.RandomMaze(height=4)
with self.assertRaisesRegexp(ValueError, 'width.*integer'):
labmaze.RandomMaze(width=1.25)
with self.assertRaisesRegexp(ValueError, 'width.*positive'):
labmaze.RandomMaze(width=-5)
with self.assertRaisesRegexp(ValueError, 'width.*odd'):
labmaze.RandomMaze(width=2)
with self.assertRaisesRegexp(ValueError, 'room_min_size.*integer'):
labmaze.RandomMaze(room_min_size=3.3)
with self.assertRaisesRegexp(ValueError, 'room_min_size.*positive'):
labmaze.RandomMaze(room_min_size=-1)
with self.assertRaisesRegexp(ValueError, 'room_max_size.*integer'):
labmaze.RandomMaze(room_max_size=4.4)
with self.assertRaisesRegexp(ValueError, 'room_max_size.*positive'):
labmaze.RandomMaze(room_max_size=-2)
with self.assertRaisesRegexp(
ValueError, 'room_min_size.*less than or equal to.*room_max_size'):
labmaze.RandomMaze(room_min_size=4, room_max_size=3)
with self.assertRaisesRegexp(ValueError, 'retry_count.*integer'):
labmaze.RandomMaze(retry_count=5.4)
with self.assertRaisesRegexp(ValueError, 'retry_count.*positive'):
labmaze.RandomMaze(retry_count=-7)
with self.assertRaisesRegexp(
ValueError, 'extra_connection_probability.*between 0.0 and 1.0'):
labmaze.RandomMaze(extra_connection_probability=1.1)
with self.assertRaisesRegexp(ValueError, 'max_variations.*integer'):
labmaze.RandomMaze(max_variations=6.7)
with self.assertRaisesRegexp(
ValueError, 'max_variations.*between 0 and 26'):
labmaze.RandomMaze(max_variations=27)
with self.assertRaisesRegexp(ValueError, 'spawn_token.*single character'):
labmaze.RandomMaze(spawn_token='foo')
with self.assertRaisesRegexp(ValueError, 'object_token.*single character'):
labmaze.RandomMaze(object_token='bar')
if __name__ == '__main__':
absltest.main()
| [
"absl.testing.absltest.main",
"copy.deepcopy",
"six.moves.range",
"numpy.any",
"labmaze.TextGrid",
"labmaze.RandomMaze"
] | [((13332, 13347), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (13345, 13347), False, 'from absl.testing import absltest\n'), ((1325, 1381), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'height': '(7)', 'width': '(9)', 'random_seed': '(12345)'}), '(height=7, width=9, random_seed=12345)\n', (1343, 1381), False, 'import labmaze\n'), ((2483, 2591), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'height': '(9)', 'width': '(11)', 'max_rooms': '(2)', 'room_min_size': '(4)', 'room_max_size': '(6)', 'random_seed': '(12345)'}), '(height=9, width=11, max_rooms=2, room_min_size=4,\n room_max_size=6, random_seed=12345)\n', (2501, 2591), False, 'import labmaze\n'), ((5186, 5297), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'height': '(51)', 'width': '(31)', 'max_rooms': '(5)', 'room_min_size': '(10)', 'room_max_size': '(20)', 'random_seed': '(12345)'}), '(height=51, width=31, max_rooms=5, room_min_size=10,\n room_max_size=20, random_seed=12345)\n', (5204, 5297), False, 'import labmaze\n'), ((5409, 5417), 'six.moves.range', 'range', (['(5)'], {}), '(5)\n', (5414, 5417), False, 'from six.moves import range\n'), ((6020, 6129), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'height': '(17)', 'width': '(17)', 'max_rooms': '(9)', 'room_min_size': '(3)', 'room_max_size': '(3)', 'random_seed': '(12345)'}), '(height=17, width=17, max_rooms=9, room_min_size=3,\n room_max_size=3, random_seed=12345)\n', (6038, 6129), False, 'import labmaze\n'), ((2126, 2155), 'labmaze.TextGrid', 'labmaze.TextGrid', (['actual_maze'], {}), '(actual_maze)\n', (2142, 2155), False, 'import labmaze\n'), ((2393, 2430), 'labmaze.TextGrid', 'labmaze.TextGrid', (['expected_variations'], {}), '(expected_variations)\n', (2409, 2430), False, 'import labmaze\n'), ((5771, 5803), 'copy.deepcopy', 'copy.deepcopy', (['maze.entity_layer'], {}), '(maze.entity_layer)\n', (5784, 5803), False, 'import copy\n'), ((5827, 5863), 'copy.deepcopy', 'copy.deepcopy', (['maze.variations_layer'], {}), '(maze.variations_layer)\n', (5840, 5863), False, 'import copy\n'), ((11253, 11283), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'height': '(2.5)'}), '(height=2.5)\n', (11271, 11283), False, 'import labmaze\n'), ((11356, 11385), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'height': '(-3)'}), '(height=-3)\n', (11374, 11385), False, 'import labmaze\n'), ((11453, 11481), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'height': '(4)'}), '(height=4)\n', (11471, 11481), False, 'import labmaze\n'), ((11552, 11582), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'width': '(1.25)'}), '(width=1.25)\n', (11570, 11582), False, 'import labmaze\n'), ((11654, 11682), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'width': '(-5)'}), '(width=-5)\n', (11672, 11682), False, 'import labmaze\n'), ((11749, 11776), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'width': '(2)'}), '(width=2)\n', (11767, 11776), False, 'import labmaze\n'), ((11855, 11892), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'room_min_size': '(3.3)'}), '(room_min_size=3.3)\n', (11873, 11892), False, 'import labmaze\n'), ((11972, 12008), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'room_min_size': '(-1)'}), '(room_min_size=-1)\n', (11990, 12008), False, 'import labmaze\n'), ((12087, 12124), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'room_max_size': '(4.4)'}), '(room_max_size=4.4)\n', (12105, 12124), False, 'import labmaze\n'), ((12204, 12240), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'room_max_size': '(-2)'}), '(room_max_size=-2)\n', (12222, 12240), False, 'import labmaze\n'), ((12357, 12409), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'room_min_size': '(4)', 'room_max_size': '(3)'}), '(room_min_size=4, room_max_size=3)\n', (12375, 12409), False, 'import labmaze\n'), ((12486, 12521), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'retry_count': '(5.4)'}), '(retry_count=5.4)\n', (12504, 12521), False, 'import labmaze\n'), ((12599, 12633), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'retry_count': '(-7)'}), '(retry_count=-7)\n', (12617, 12633), False, 'import labmaze\n'), ((12748, 12800), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'extra_connection_probability': '(1.1)'}), '(extra_connection_probability=1.1)\n', (12766, 12800), False, 'import labmaze\n'), ((12880, 12918), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'max_variations': '(6.7)'}), '(max_variations=6.7)\n', (12898, 12918), False, 'import labmaze\n'), ((13016, 13053), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'max_variations': '(27)'}), '(max_variations=27)\n', (13034, 13053), False, 'import labmaze\n'), ((13139, 13176), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'spawn_token': '"""foo"""'}), "(spawn_token='foo')\n", (13157, 13176), False, 'import labmaze\n'), ((13263, 13301), 'labmaze.RandomMaze', 'labmaze.RandomMaze', ([], {'object_token': '"""bar"""'}), "(object_token='bar')\n", (13281, 13301), False, 'import labmaze\n'), ((5565, 5602), 'numpy.any', 'np.any', (['(maze.entity_layer != old_maze)'], {}), '(maze.entity_layer != old_maze)\n', (5571, 5602), True, 'import numpy as np\n'), ((5705, 5752), 'numpy.any', 'np.any', (['(maze.variations_layer != old_variations)'], {}), '(maze.variations_layer != old_variations)\n', (5711, 5752), True, 'import numpy as np\n')] |
'''
Multiprocessor Spatial Microbial GA
<NAME>
July, 2018
'''
import random
import time
import numpy as np
from pathos.multiprocessing import ProcessPool
_pool = None
class MicrobialSearch():
def __init__(self, evol_params): #generations, pop_size, genotype_size, recomb_prob, mutation_variance, num_processes):
'''
Initialize evolutionary search
ARGS:
evol_params: dict
required keys -
pop_size: int - population size,
genotype_size: int - genotype_size,
fitness_function: function - a user-defined function that takes a genotype as arg and returns a float fitness value
mutation_variance: float - variance of the gaussian distribution used for mutation noise
recomb_prob: float between [0,1] -- proportion of genotype transfected from winner to loser
num_processes: int - pool size for multiprocessing.pool.Pool - defaults to os.cpu_count()
'''
# check for required keys
required_keys = ['pop_size','genotype_size','fitness_function','recomb_prob','mutation_variance','num_processes']
for key in required_keys:
if key not in evol_params.keys():
raise Exception('Argument evol_params does not contain the following required key: '+key)
# checked for all required keys
self.pop_size = evol_params['pop_size']
self.genotype_size = evol_params['genotype_size']
self.fitness_function = evol_params['fitness_function']
self.mutation_variance = evol_params['mutation_variance']
self.recomb_prob = evol_params['recomb_prob']
self.num_processes = evol_params['num_processes']
# validating fitness function
assert self.fitness_function,"Invalid fitness_function"
rand_genotype = np.random.rand(self.genotype_size)
rand_genotype_fitness = self.fitness_function(rand_genotype)
assert type(rand_genotype_fitness) == type(0.) or type(rand_genotype_fitness) in np.sctypes['float'],\
"Invalid return type for fitness_function. Should be float or np.dtype('np.float*')"
# Search parameters
self.group_size = int(self.pop_size/3)
# Keep track of individuals to be mutated
self.mutlist = np.zeros((self.group_size), dtype=int)
self.mutfit = np.zeros((self.group_size))
# Creating the global process pool to be used across all generations
global _pool
_pool = ProcessPool(self.num_processes)
time.sleep(0.5)
# check for fitness function kwargs
if 'fitness_args' in evol_params.keys():
optional_args = evol_params['fitness_args']
assert len(optional_args) == 1 or len(optional_args) == pop_size,\
"fitness args should be length 1 or pop_size."
self.optional_args = optional_args
else:
self.optional_args = None
# Create population and evaluate everyone once
self.pop = np.random.random((self.pop_size,self.genotype_size))
self.fitness = np.asarray(_pool.map(self.evaluate_fitness,np.arange(self.pop_size)))
def evaluate_fitness(self,individual_index):
'''
Call user defined fitness function and pass genotype
'''
if self.optional_args:
if len(self.optional_args) == 1:
return self.fitness_function(self.pop[individual_index,:], self.optional_args[0])
else:
return self.fitness_function(self.pop[individual_index,:], self.optional_args[individual_index])
else:
return self.fitness_function(self.pop[individual_index,:])
def step_generation(self):
'''
evaluate fitness and step on generation
'''
global _pool
# Perform tournament for every individual in population
for j in range(3):
k = 0
for a in range(j,self.pop_size-2,3):
# Step 1: Pick 2nd individual as left or right hand side neighbor of first
b = (a+random.choice([-1,1]))%self.pop_size
# Step 2: Compare their fitness
if (self.fitness[a] > self.fitness[b]):
winner = a
loser = b
else:
winner = b
loser = a
# Step 3: Transfect loser with winner
for l in range(self.genotype_size):
if (random.random() < self.recomb_prob):
self.pop[loser][l] = self.pop[winner][l]
# Step 4: Mutate loser
m = np.random.normal(0.0, self.mutation_variance, self.genotype_size)
self.pop[loser] = np.clip(np.add(self.pop[loser],m),0.0,1.0)
# Step 5: Add to mutated list (which will be re-evaluated)
self.mutlist[k]=loser
k+=1
# Step 6: Recalculate fitness of list of mutated losers
self.mutfit = list(_pool.map(self.evaluate_fitness, self.mutlist))
for k in range(self.group_size):
self.fitness[self.mutlist[k]] = self.mutfit[k]
def execute_search(self, num_gens):
'''
runs the evolutionary algorithm for given number of generations, num_gens
'''
for _ in range(num_gens):
self.step_generation()
def get_fitnesses(self):
'''
simply return all fitness values of current population
'''
return self.fitness
def get_best_individual(self):
'''
returns 1D array of the genotype that has max fitness
'''
return self.pop[np.argmax(self.fitness),:]
def get_best_individual_fitness(self):
'''
return the fitness value of the best individual
'''
return np.max(self.fitness)
def get_mean_fitness(self):
'''
returns the mean fitness of the population
'''
return np.mean(self.fitness)
def get_fitness_variance(self):
'''
returns variance of the population's fitness
'''
return np.std(self.fitness)**2
| [
"pathos.multiprocessing.ProcessPool",
"numpy.argmax",
"numpy.std",
"numpy.zeros",
"random.choice",
"time.sleep",
"random.random",
"numpy.max",
"numpy.random.random",
"numpy.mean",
"numpy.arange",
"numpy.random.normal",
"numpy.random.rand",
"numpy.add"
] | [((1861, 1895), 'numpy.random.rand', 'np.random.rand', (['self.genotype_size'], {}), '(self.genotype_size)\n', (1875, 1895), True, 'import numpy as np\n'), ((2328, 2364), 'numpy.zeros', 'np.zeros', (['self.group_size'], {'dtype': 'int'}), '(self.group_size, dtype=int)\n', (2336, 2364), True, 'import numpy as np\n'), ((2389, 2414), 'numpy.zeros', 'np.zeros', (['self.group_size'], {}), '(self.group_size)\n', (2397, 2414), True, 'import numpy as np\n'), ((2532, 2563), 'pathos.multiprocessing.ProcessPool', 'ProcessPool', (['self.num_processes'], {}), '(self.num_processes)\n', (2543, 2563), False, 'from pathos.multiprocessing import ProcessPool\n'), ((2572, 2587), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (2582, 2587), False, 'import time\n'), ((3058, 3111), 'numpy.random.random', 'np.random.random', (['(self.pop_size, self.genotype_size)'], {}), '((self.pop_size, self.genotype_size))\n', (3074, 3111), True, 'import numpy as np\n'), ((5907, 5927), 'numpy.max', 'np.max', (['self.fitness'], {}), '(self.fitness)\n', (5913, 5927), True, 'import numpy as np\n'), ((6051, 6072), 'numpy.mean', 'np.mean', (['self.fitness'], {}), '(self.fitness)\n', (6058, 6072), True, 'import numpy as np\n'), ((6202, 6222), 'numpy.std', 'np.std', (['self.fitness'], {}), '(self.fitness)\n', (6208, 6222), True, 'import numpy as np\n'), ((3177, 3201), 'numpy.arange', 'np.arange', (['self.pop_size'], {}), '(self.pop_size)\n', (3186, 3201), True, 'import numpy as np\n'), ((4702, 4767), 'numpy.random.normal', 'np.random.normal', (['(0.0)', 'self.mutation_variance', 'self.genotype_size'], {}), '(0.0, self.mutation_variance, self.genotype_size)\n', (4718, 4767), True, 'import numpy as np\n'), ((5741, 5764), 'numpy.argmax', 'np.argmax', (['self.fitness'], {}), '(self.fitness)\n', (5750, 5764), True, 'import numpy as np\n'), ((4810, 4836), 'numpy.add', 'np.add', (['self.pop[loser]', 'm'], {}), '(self.pop[loser], m)\n', (4816, 4836), True, 'import numpy as np\n'), ((4126, 4148), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (4139, 4148), False, 'import random\n'), ((4541, 4556), 'random.random', 'random.random', ([], {}), '()\n', (4554, 4556), False, 'import random\n')] |
import numpy as np
import cv2
def get_var(img1, img2, opt_flow):
img1 = img1[0].permute(1,2,0).cpu().numpy()
img2 = img2[0].permute(1,2,0).cpu().numpy()
img1 = np.float32(img1)
img2 = np.float32(img2)
fx = cv2.Sobel(img1,ddepth=cv2.CV_32F,dx=1,dy=0,ksize=-1)
fy = cv2.Sobel(img1,ddepth=cv2.CV_32F,dx=0,dy=1,ksize=-1)
fx = cv2.convertScaleAbs(fx)
fy = cv2.convertScaleAbs(fy)
kernel_1 = np.array([[1,1],[1,1]])
kernel_2 = np.array([[-1,-1],[-1,-1]])
f1 = cv2.filter2D(img1,ddepth=-1,kernel=kernel_2)
f2 = cv2.filter2D(img2,ddepth=-1,kernel=kernel_1)
ft = f1 + f2
flow = [opt_flow[:,:,0], opt_flow[:,:,1]]
I = [fx, fy, ft]
return flow, I | [
"cv2.filter2D",
"numpy.float32",
"numpy.array",
"cv2.convertScaleAbs",
"cv2.Sobel"
] | [((174, 190), 'numpy.float32', 'np.float32', (['img1'], {}), '(img1)\n', (184, 190), True, 'import numpy as np\n'), ((202, 218), 'numpy.float32', 'np.float32', (['img2'], {}), '(img2)\n', (212, 218), True, 'import numpy as np\n'), ((228, 284), 'cv2.Sobel', 'cv2.Sobel', (['img1'], {'ddepth': 'cv2.CV_32F', 'dx': '(1)', 'dy': '(0)', 'ksize': '(-1)'}), '(img1, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)\n', (237, 284), False, 'import cv2\n'), ((290, 346), 'cv2.Sobel', 'cv2.Sobel', (['img1'], {'ddepth': 'cv2.CV_32F', 'dx': '(0)', 'dy': '(1)', 'ksize': '(-1)'}), '(img1, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=-1)\n', (299, 346), False, 'import cv2\n'), ((352, 375), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['fx'], {}), '(fx)\n', (371, 375), False, 'import cv2\n'), ((385, 408), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['fy'], {}), '(fy)\n', (404, 408), False, 'import cv2\n'), ((424, 450), 'numpy.array', 'np.array', (['[[1, 1], [1, 1]]'], {}), '([[1, 1], [1, 1]])\n', (432, 450), True, 'import numpy as np\n'), ((463, 493), 'numpy.array', 'np.array', (['[[-1, -1], [-1, -1]]'], {}), '([[-1, -1], [-1, -1]])\n', (471, 493), True, 'import numpy as np\n'), ((500, 546), 'cv2.filter2D', 'cv2.filter2D', (['img1'], {'ddepth': '(-1)', 'kernel': 'kernel_2'}), '(img1, ddepth=-1, kernel=kernel_2)\n', (512, 546), False, 'import cv2\n'), ((554, 600), 'cv2.filter2D', 'cv2.filter2D', (['img2'], {'ddepth': '(-1)', 'kernel': 'kernel_1'}), '(img2, ddepth=-1, kernel=kernel_1)\n', (566, 600), False, 'import cv2\n')] |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import numpy as np
import pytest
from .. import TikaExtractor
def input_bytes():
with open('cats_are_awesome.pdf', 'rb') as pdf:
i_bytes = pdf.read()
return i_bytes
@pytest.mark.parametrize('uri, buffer', [
(np.stack(['cats_are_awesome.pdf', 'cats_are_awesome.pdf']), [None, None]),
([None, None], np.stack([input_bytes(), input_bytes()]))
])
def test_extraction(uri, buffer):
tika_extractor = TikaExtractor()
crafted_docs = tika_extractor.craft(uri, buffer)
assert len(crafted_docs) == 2
for crafted_doc in crafted_docs:
assert len(crafted_doc['text']) > 20
@pytest.mark.parametrize('uri, buffer', [
('cats_are_awesome.pdf', None),
(None, input_bytes())
])
def test_extraction_single(uri, buffer):
tika_extractor = TikaExtractor()
crafted_doc = tika_extractor.craft(uri, buffer)
assert len(crafted_doc['text']) > 20
@pytest.mark.parametrize('uri, buffer', [
('cats_are_awesome.pdf', None),
(None, input_bytes())
])
def test_extraction_single(uri, buffer):
tika_extractor = TikaExtractor()
crafted_doc = tika_extractor.craft(uri=uri, buffer=buffer)
assert len(crafted_doc['text']) > 20
| [
"numpy.stack"
] | [((336, 394), 'numpy.stack', 'np.stack', (["['cats_are_awesome.pdf', 'cats_are_awesome.pdf']"], {}), "(['cats_are_awesome.pdf', 'cats_are_awesome.pdf'])\n", (344, 394), True, 'import numpy as np\n')] |
"""Test for kaccum.py."""
import numpy as np
from phono3py.cui.kaccum import KappaDOS, _get_mfp
from phono3py.phonon.grid import get_ir_grid_points
kappados_si = [
-0.0000002,
0.0000000,
0.0000000,
1.6966400,
2.1977566,
5.1814323,
3.3932803,
25.8022392,
15.5096766,
5.0899206,
56.6994259,
19.4995156,
6.7865608,
68.7759426,
3.2465477,
8.4832011,
72.8398965,
1.6583881,
10.1798413,
74.8143686,
0.7945952,
11.8764816,
77.2489625,
5.4385183,
13.5731219,
80.9162245,
0.5998735,
15.2697621,
81.4303646,
0.0000000,
]
mfpdos_si = [
0.0000000,
0.0000000,
0.0000000,
806.8089241,
33.7703552,
0.0225548,
1613.6178483,
45.0137786,
0.0103479,
2420.4267724,
53.3456168,
0.0106724,
3227.2356966,
62.4915811,
0.0107850,
4034.0446207,
69.8839011,
0.0075919,
4840.8535449,
74.8662085,
0.0049228,
5647.6624690,
78.2273252,
0.0035758,
6454.4713932,
80.5493065,
0.0020836,
7261.2803173,
81.4303646,
0.0000000,
]
gammados_si = [
-0.0000002,
0.0000000,
0.0000000,
1.6966400,
0.0000063,
0.0000149,
3.3932803,
0.0004133,
0.0012312,
5.0899206,
0.0071709,
0.0057356,
6.7865608,
0.0099381,
0.0006492,
8.4832011,
0.0133390,
0.0049604,
10.1798413,
0.0394030,
0.0198106,
11.8764816,
0.0495160,
0.0044113,
13.5731219,
0.0560223,
0.0050103,
15.2697621,
0.1300596,
0.0000000,
]
kappados_nacl = [
-0.0000002,
0.0000000,
0.0000000,
0.8051732,
0.0366488,
0.1820668,
1.6103466,
0.7748514,
1.5172957,
2.4155199,
2.0165794,
2.0077744,
3.2206933,
4.6670801,
2.8357892,
4.0258667,
6.6123781,
32.8560281,
4.8310401,
7.7105916,
0.6136893,
5.6362134,
7.9112790,
0.2391300,
6.4413868,
8.0272187,
0.0604842,
7.2465602,
8.0430831,
0.0000000,
]
mfpdos_nacl = [
0.0000000,
0.0000000,
0.0000000,
117.4892903,
3.1983595,
0.0266514,
234.9785806,
5.7974129,
0.0153383,
352.4678709,
7.2012603,
0.0075057,
469.9571612,
7.5964440,
0.0017477,
587.4464515,
7.7823291,
0.0013915,
704.9357418,
7.9195460,
0.0009363,
822.4250321,
8.0024702,
0.0004844,
939.9143223,
8.0375053,
0.0001382,
1057.4036126,
8.0430831,
0.0000000,
]
gammados_nacl = [
-0.0000002,
0.0000000,
0.0000000,
0.8051732,
0.0000822,
0.0004081,
1.6103466,
0.0018975,
0.0053389,
2.4155199,
0.0114668,
0.0182495,
3.2206933,
0.0353621,
0.0329440,
4.0258667,
0.0604996,
0.1138884,
4.8310401,
0.1038315,
0.0716216,
5.6362134,
0.1481243,
0.0468421,
6.4413868,
0.1982823,
0.0662494,
7.2465602,
0.2429551,
0.0000000,
]
def test_kappados_si(si_pbesol):
"""Test KappaDOS class with Si.
* 3x3 tensor vs frequency
* scalar vs frequency
* kappa vs mean free path
"""
ph3 = si_pbesol
ph3.mesh_numbers = [7, 7, 7]
ph3.init_phph_interaction()
ph3.run_thermal_conductivity(
temperatures=[
300,
]
)
tc = ph3.thermal_conductivity
freq_points_in = np.array(kappados_si).reshape(-1, 3)[:, 0]
freq_points, kdos = _calculate_kappados(
ph3, tc.mode_kappa[0], freq_points=freq_points_in
)
for f, (jval, ival) in zip(freq_points, kdos):
print("%.7f, %.7f, %.7f," % (f, jval, ival))
np.testing.assert_allclose(
kappados_si, np.vstack((freq_points, kdos.T)).T.ravel(), rtol=0, atol=0.5
)
freq_points, kdos = _calculate_kappados(
ph3, tc.gamma[0, :, :, :, None], freq_points=freq_points_in
)
np.testing.assert_allclose(
gammados_si, np.vstack((freq_points, kdos.T)).T.ravel(), rtol=0, atol=0.5
)
mfp_points_in = np.array(mfpdos_si).reshape(-1, 3)[:, 0]
mfp_points, mfpdos = _calculate_mfpdos(ph3, mfp_points_in)
# for f, (jval, ival) in zip(freq_points, mfpdos):
# print("%.7f, %.7f, %.7f," % (f, jval, ival))
np.testing.assert_allclose(
mfpdos_si, np.vstack((mfp_points, mfpdos.T)).T.ravel(), rtol=0, atol=0.5
)
def test_kappados_nacl(nacl_pbe):
"""Test KappaDOS class with NaCl.
* 3x3 tensor vs frequency
* scalar vs frequency
* kappa vs mean free path
"""
ph3 = nacl_pbe
ph3.mesh_numbers = [7, 7, 7]
ph3.init_phph_interaction()
ph3.run_thermal_conductivity(
temperatures=[
300,
]
)
tc = ph3.thermal_conductivity
freq_points_in = np.array(kappados_nacl).reshape(-1, 3)[:, 0]
freq_points, kdos = _calculate_kappados(
ph3, tc.mode_kappa[0], freq_points=freq_points_in
)
# for f, (jval, ival) in zip(freq_points, kdos):
# print("%.7f, %.7f, %.7f," % (f, jval, ival))
np.testing.assert_allclose(
kappados_nacl, np.vstack((freq_points, kdos.T)).T.ravel(), rtol=0, atol=0.5
)
freq_points, kdos = _calculate_kappados(
ph3, tc.gamma[0, :, :, :, None], freq_points=freq_points_in
)
np.testing.assert_allclose(
gammados_nacl, np.vstack((freq_points, kdos.T)).T.ravel(), rtol=0, atol=0.5
)
mfp_points_in = np.array(mfpdos_nacl).reshape(-1, 3)[:, 0]
mfp_points, mfpdos = _calculate_mfpdos(ph3, mfp_points_in)
# for f, (jval, ival) in zip(freq_points, mfpdos):
# print("%.7f, %.7f, %.7f," % (f, jval, ival))
np.testing.assert_allclose(
mfpdos_nacl, np.vstack((mfp_points, mfpdos.T)).T.ravel(), rtol=0, atol=0.5
)
def _calculate_kappados(ph3, mode_prop, freq_points=None):
tc = ph3.thermal_conductivity
bz_grid = ph3.grid
frequencies, _, _ = ph3.get_phonon_data()
kappados = KappaDOS(
mode_prop, frequencies, bz_grid, tc.grid_points, frequency_points=freq_points
)
freq_points, kdos = kappados.get_kdos()
ir_grid_points, _, ir_grid_map = get_ir_grid_points(bz_grid)
kappados = KappaDOS(
mode_prop,
tc.frequencies,
bz_grid,
tc.grid_points,
ir_grid_map=ir_grid_map,
frequency_points=freq_points,
)
ir_freq_points, ir_kdos = kappados.get_kdos()
np.testing.assert_equal(bz_grid.bzg2grg[tc.grid_points], ir_grid_points)
np.testing.assert_allclose(ir_freq_points, freq_points, rtol=0, atol=1e-5)
np.testing.assert_allclose(ir_kdos, kdos, rtol=0, atol=1e-5)
return freq_points, kdos[0, :, :, 0]
def _calculate_mfpdos(ph3, mfp_points=None):
tc = ph3.thermal_conductivity
bz_grid = ph3.grid
mean_freepath = _get_mfp(tc.gamma[0], tc.group_velocities)
_, _, ir_grid_map = get_ir_grid_points(bz_grid)
mfpdos = KappaDOS(
tc.mode_kappa[0],
mean_freepath[0],
bz_grid,
tc.grid_points,
ir_grid_map=ir_grid_map,
frequency_points=mfp_points,
num_sampling_points=10,
)
freq_points, kdos = mfpdos.get_kdos()
return freq_points, kdos[0, :, :, 0]
| [
"phono3py.phonon.grid.get_ir_grid_points",
"phono3py.cui.kaccum._get_mfp",
"phono3py.cui.kaccum.KappaDOS",
"numpy.array",
"numpy.testing.assert_equal",
"numpy.testing.assert_allclose",
"numpy.vstack"
] | [((5966, 6058), 'phono3py.cui.kaccum.KappaDOS', 'KappaDOS', (['mode_prop', 'frequencies', 'bz_grid', 'tc.grid_points'], {'frequency_points': 'freq_points'}), '(mode_prop, frequencies, bz_grid, tc.grid_points, frequency_points=\n freq_points)\n', (5974, 6058), False, 'from phono3py.cui.kaccum import KappaDOS, _get_mfp\n'), ((6150, 6177), 'phono3py.phonon.grid.get_ir_grid_points', 'get_ir_grid_points', (['bz_grid'], {}), '(bz_grid)\n', (6168, 6177), False, 'from phono3py.phonon.grid import get_ir_grid_points\n'), ((6193, 6313), 'phono3py.cui.kaccum.KappaDOS', 'KappaDOS', (['mode_prop', 'tc.frequencies', 'bz_grid', 'tc.grid_points'], {'ir_grid_map': 'ir_grid_map', 'frequency_points': 'freq_points'}), '(mode_prop, tc.frequencies, bz_grid, tc.grid_points, ir_grid_map=\n ir_grid_map, frequency_points=freq_points)\n', (6201, 6313), False, 'from phono3py.cui.kaccum import KappaDOS, _get_mfp\n'), ((6418, 6490), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['bz_grid.bzg2grg[tc.grid_points]', 'ir_grid_points'], {}), '(bz_grid.bzg2grg[tc.grid_points], ir_grid_points)\n', (6441, 6490), True, 'import numpy as np\n'), ((6495, 6570), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ir_freq_points', 'freq_points'], {'rtol': '(0)', 'atol': '(1e-05)'}), '(ir_freq_points, freq_points, rtol=0, atol=1e-05)\n', (6521, 6570), True, 'import numpy as np\n'), ((6574, 6635), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ir_kdos', 'kdos'], {'rtol': '(0)', 'atol': '(1e-05)'}), '(ir_kdos, kdos, rtol=0, atol=1e-05)\n', (6600, 6635), True, 'import numpy as np\n'), ((6800, 6842), 'phono3py.cui.kaccum._get_mfp', '_get_mfp', (['tc.gamma[0]', 'tc.group_velocities'], {}), '(tc.gamma[0], tc.group_velocities)\n', (6808, 6842), False, 'from phono3py.cui.kaccum import KappaDOS, _get_mfp\n'), ((6867, 6894), 'phono3py.phonon.grid.get_ir_grid_points', 'get_ir_grid_points', (['bz_grid'], {}), '(bz_grid)\n', (6885, 6894), False, 'from phono3py.phonon.grid import get_ir_grid_points\n'), ((6908, 7063), 'phono3py.cui.kaccum.KappaDOS', 'KappaDOS', (['tc.mode_kappa[0]', 'mean_freepath[0]', 'bz_grid', 'tc.grid_points'], {'ir_grid_map': 'ir_grid_map', 'frequency_points': 'mfp_points', 'num_sampling_points': '(10)'}), '(tc.mode_kappa[0], mean_freepath[0], bz_grid, tc.grid_points,\n ir_grid_map=ir_grid_map, frequency_points=mfp_points,\n num_sampling_points=10)\n', (6916, 7063), False, 'from phono3py.cui.kaccum import KappaDOS, _get_mfp\n'), ((3434, 3455), 'numpy.array', 'np.array', (['kappados_si'], {}), '(kappados_si)\n', (3442, 3455), True, 'import numpy as np\n'), ((4071, 4090), 'numpy.array', 'np.array', (['mfpdos_si'], {}), '(mfpdos_si)\n', (4079, 4090), True, 'import numpy as np\n'), ((4803, 4826), 'numpy.array', 'np.array', (['kappados_nacl'], {}), '(kappados_nacl)\n', (4811, 4826), True, 'import numpy as np\n'), ((5450, 5471), 'numpy.array', 'np.array', (['mfpdos_nacl'], {}), '(mfpdos_nacl)\n', (5458, 5471), True, 'import numpy as np\n'), ((3743, 3775), 'numpy.vstack', 'np.vstack', (['(freq_points, kdos.T)'], {}), '((freq_points, kdos.T))\n', (3752, 3775), True, 'import numpy as np\n'), ((3983, 4015), 'numpy.vstack', 'np.vstack', (['(freq_points, kdos.T)'], {}), '((freq_points, kdos.T))\n', (3992, 4015), True, 'import numpy as np\n'), ((4336, 4369), 'numpy.vstack', 'np.vstack', (['(mfp_points, mfpdos.T)'], {}), '((mfp_points, mfpdos.T))\n', (4345, 4369), True, 'import numpy as np\n'), ((5120, 5152), 'numpy.vstack', 'np.vstack', (['(freq_points, kdos.T)'], {}), '((freq_points, kdos.T))\n', (5129, 5152), True, 'import numpy as np\n'), ((5362, 5394), 'numpy.vstack', 'np.vstack', (['(freq_points, kdos.T)'], {}), '((freq_points, kdos.T))\n', (5371, 5394), True, 'import numpy as np\n'), ((5719, 5752), 'numpy.vstack', 'np.vstack', (['(mfp_points, mfpdos.T)'], {}), '((mfp_points, mfpdos.T))\n', (5728, 5752), True, 'import numpy as np\n')] |
"""
Utilities for downloading and unpacking the SVHN dataset
"""
import os
import sys
import tarfile
from six.moves import urllib
import numpy as np
import scipy.io
URL = 'http://ufldl.stanford.edu/housenumbers/{}_32x32.mat'
def maybe_download_and_extract(data_dir, subset):
url = URL.format(subset)
filename = url.split('/')[-1]
filepath = os.path.join(data_dir, filename)
if not os.path.exists(filepath):
if not os.path.exists(data_dir):
os.makedirs(data_dir)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(url, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
def unpickle(file):
fo = open(file, 'rb')
if (sys.version_info >= (3, 0)):
import pickle
d = pickle.load(fo, encoding='latin1')
else:
import cPickle
d = cPickle.load(fo)
fo.close()
return {'x': d['data'].reshape((10000,3,32,32)), 'y': np.array(d['labels']).astype(np.uint8)}
def load(data_dir, subset='train'):
maybe_download_and_extract(data_dir, subset)
if subset=='train':
train_data = scipy.io.loadmat(os.path.join(data_dir, 'train_32x32.mat'))
trainx = train_data['X'].transpose((3, 2, 0, 1))
trainy = train_data['y'].squeeze(1)
return trainx, trainy
elif subset=='test':
test_data = scipy.io.loadmat(os.path.join(data_dir, 'test_32x32.mat'))
testx = test_data['X'].transpose((3, 2, 0, 1))
testy = test_data['y'].squeeze(1)
return testx, testy
else:
raise NotImplementedError('subset should be either train or test')
class DataLoader(object):
""" an object that generates batches of SVHN data for training """
def __init__(self, data_dir, subset, batch_size, rng=None, shuffle=False, return_labels=False):
"""
- data_dir is location where to store files
- subset is train|test
- batch_size is int, of #examples to load at once
- rng is np.random.RandomState object for reproducibility
"""
self.data_dir = data_dir
self.batch_size = batch_size
self.shuffle = shuffle
self.return_labels = return_labels
# create temporary storage for the data, if not yet created
if not os.path.exists(data_dir):
print('creating folder', data_dir)
os.makedirs(data_dir)
# load SVHN training data to RAM
self.data, self.labels = load(data_dir, subset=subset)
self.data = np.transpose(self.data, (0,2,3,1)) # (N,3,32,32) -> (N,32,32,3)
self.p = 0 # pointer to where we are in iteration
self.rng = np.random.RandomState(1) if rng is None else rng
def get_observation_size(self):
return self.data.shape[1:]
def get_num_labels(self):
return np.amax(self.labels) + 1
def reset(self):
self.p = 0
def __iter__(self):
return self
def __next__(self, n=None):
""" n is the number of examples to fetch """
if n is None: n = self.batch_size
# on first iteration lazily permute all data
if self.p == 0 and self.shuffle:
inds = self.rng.permutation(self.data.shape[0])
self.data = self.data[inds]
self.labels = self.labels[inds]
# on last iteration reset the counter and raise StopIteration
if self.p + n > self.data.shape[0]:
self.reset() # reset for next time we get called
raise StopIteration
# on intermediate iterations fetch the next batch
x = self.data[self.p : self.p + n]
y = self.labels[self.p : self.p + n]
self.p += self.batch_size
if self.return_labels:
return x,y
else:
return x
next = __next__ # Python 2 compatibility (https://stackoverflow.com/questions/29578469/how-to-make-an-object-both-a-python2-and-python3-iterator)
| [
"os.makedirs",
"os.stat",
"os.path.exists",
"cPickle.load",
"numpy.transpose",
"numpy.random.RandomState",
"numpy.amax",
"pickle.load",
"sys.stdout.flush",
"six.moves.urllib.request.urlretrieve",
"numpy.array",
"os.path.join"
] | [((372, 404), 'os.path.join', 'os.path.join', (['data_dir', 'filename'], {}), '(data_dir, filename)\n', (384, 404), False, 'import os\n'), ((417, 441), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (431, 441), False, 'import os\n'), ((775, 827), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'filepath', '_progress'], {}), '(url, filepath, _progress)\n', (801, 827), False, 'from six.moves import urllib\n'), ((865, 882), 'os.stat', 'os.stat', (['filepath'], {}), '(filepath)\n', (872, 882), False, 'import os\n'), ((1087, 1121), 'pickle.load', 'pickle.load', (['fo'], {'encoding': '"""latin1"""'}), "(fo, encoding='latin1')\n", (1098, 1121), False, 'import pickle\n'), ((1170, 1186), 'cPickle.load', 'cPickle.load', (['fo'], {}), '(fo)\n', (1182, 1186), False, 'import cPickle\n'), ((2871, 2908), 'numpy.transpose', 'np.transpose', (['self.data', '(0, 2, 3, 1)'], {}), '(self.data, (0, 2, 3, 1))\n', (2883, 2908), True, 'import numpy as np\n'), ((459, 483), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (473, 483), False, 'import os\n'), ((498, 519), 'os.makedirs', 'os.makedirs', (['data_dir'], {}), '(data_dir)\n', (509, 519), False, 'import os\n'), ((733, 751), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (749, 751), False, 'import sys\n'), ((1455, 1496), 'os.path.join', 'os.path.join', (['data_dir', '"""train_32x32.mat"""'], {}), "(data_dir, 'train_32x32.mat')\n", (1467, 1496), False, 'import os\n'), ((2633, 2657), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (2647, 2657), False, 'import os\n'), ((2720, 2741), 'os.makedirs', 'os.makedirs', (['data_dir'], {}), '(data_dir)\n', (2731, 2741), False, 'import os\n'), ((3024, 3048), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (3045, 3048), True, 'import numpy as np\n'), ((3197, 3217), 'numpy.amax', 'np.amax', (['self.labels'], {}), '(self.labels)\n', (3204, 3217), True, 'import numpy as np\n'), ((1262, 1283), 'numpy.array', 'np.array', (["d['labels']"], {}), "(d['labels'])\n", (1270, 1283), True, 'import numpy as np\n'), ((1696, 1736), 'os.path.join', 'os.path.join', (['data_dir', '"""test_32x32.mat"""'], {}), "(data_dir, 'test_32x32.mat')\n", (1708, 1736), False, 'import os\n')] |
"""
World module.
"""
from . import polity, terrain, period, default_parameters
from .community import Community, DIRECTIONS, LittoralNeighbour
from numpy import sqrt
from numpy.random import random, permutation
import yaml
_START_YEAR = -1500
_YEARS_PER_STEP = 2
class World(object):
"""
World class, a container for all polities, communities and methods relating
to them.
Args:
xdim (int): The x dimension of the world in communities.
ydim (int): The y dimension of the world in communities.
communities (list[Community]): The list of communities in the world.
This is a one dimensional list. Communities are arranged by their
coordinates in the list in a column-major fashion, _i.e._ [(0,0),
(0,1), (0,2)].
params (Parameters, default=guard.default_paramters): The simulation
parameter set to use.
Attributes:
xdim (int): The x dimension of the world in communities.
ydim (int): The y dimension of the world in communities.
params (Parameters): The simulation parameter set.
step_number (int): The current step number.
tiles (list[Community]): A list of communities in the world.
polities (list[Polity]): A list of polities in the world.
"""
def __init__(self, xdim, ydim, communities, params=default_parameters):
self.params = params
self.xdim = xdim
self.ydim = ydim
self.total_tiles = xdim*ydim
self.tiles = communities
# Initialise neighbours and littoral neighbours
self.set_neighbours()
if params.sea_attacks:
self.set_littoral_tiles()
self.set_littoral_neighbours()
# Each agricultural tile is its own polity, set step number to zero
self.reset()
def __str__(self):
string = 'World:\n'
string += '\t- Tiles: {0}\n'.format(self.total_tiles)
string += '\t- Dimensions: {0}x{1}\n'.format(self.xdim, self.ydim)
string += '\t- Number of polities: {0}'.format(
self.number_of_polities())
return string
def number_of_polities(self):
"""
Calculate the number of polities in the world.
Returns:
(int): The number of polities.
"""
return len(self.polities)
def index(self, x, y):
"""
Return the tile at coordinates (x,y).
Returns:
(Community): The community at coordinate (x,y).
(None): If there is no such tile.
"""
if any([x < 0, x >= self.xdim, y < 0, y >= self.ydim]):
return None
return self.tiles[self._index(x, y)]
def _index(self, x, y):
"""
Return the position in the tiles list of the tile at coordinates
(x,y).
"""
return x + y*self.xdim
def year(self):
"""
Return the current year.
Returns:
(int): The current year. Years BC are negative.
"""
return self.step_number*_YEARS_PER_STEP + _START_YEAR
def sea_attack_distance(self):
"""
Determine maximum sea attack distance at current step.
Returns:
(float): The maximum sea attack distance.
"""
return (self.params.base_sea_attack_distance
+ self.step_number * self.params.sea_attack_increment)
def set_neighbours(self):
"""
Assign tiles their neighbours.
"""
for x in range(self.xdim):
for y in range(self.ydim):
tile = self.index(x, y)
tile.position = (x, y)
tile.neighbours['left'] = self.index(x-1, y)
tile.neighbours['right'] = self.index(x+1, y)
tile.neighbours['up'] = self.index(x, y+1)
tile.neighbours['down'] = self.index(x, y-1)
def set_littoral_tiles(self):
"""
Assign littoral tiles the littoral flag.
"""
for tile in self.tiles:
# Don't set littoral status for sea or desert tiles
if not tile.terrain.polity_forming:
continue
for direction in DIRECTIONS:
neighbour = tile.neighbours[direction]
# Ensure there is a neighour
if neighbour is None:
continue
# Check if neighbour is a sea tile
if neighbour.terrain is terrain.sea:
tile.littoral = True
# Break here as only one neighbour needs to be sea for tile
# to be littoral
break
def set_littoral_neighbours(self):
"""
Assign littoral tiles their lists of littoral neighbours.
"""
littoral_tiles = [tile for tile in self.tiles if tile.littoral is True]
n_littoral = len(littoral_tiles)
for tile in littoral_tiles:
# Add self as a littoral neighbour with 0 distance, this is
# important in order to reproduce Turchin's results
tile.littoral_neighbours.append(LittoralNeighbour(tile, 0))
for i in range(n_littoral-1):
itile = littoral_tiles[i]
for j in range(i+1, n_littoral):
jtile = littoral_tiles[j]
# Calculate euclidean distance between tiles in tile dimension
# units
distance = sqrt((itile.position[0]-jtile.position[0])**2 +
(itile.position[1]-jtile.position[1])**2)
# Add neighbour and the symmetric entry
itile.littoral_neighbours.append(
LittoralNeighbour(jtile, distance))
jtile.littoral_neighbours.append(
LittoralNeighbour(itile, distance))
@classmethod
def from_file(cls, yaml_file, params=default_parameters):
"""
Read a world from a YAML file.
Args:
yaml_file (str): Path to the file containing a YAML definition of
the world.
params (Parameters, default=guard.default_paramters): The
simulation parameter set.
Returns:
(World): The world object specified by the YAML file
Raises:
(MissingYamlKey): Raised if a required key is not present in the
YAML file.
"""
# Parse YAML file
with open(yaml_file, 'r') as infile:
world_data = yaml.load(infile, Loader=yaml.FullLoader)
try:
xdim = world_data['xdim']
except KeyError:
raise MissingYamlKey('xdim', yaml_file)
try:
ydim = world_data['ydim']
except KeyError:
raise MissingYamlKey('ydim', yaml_file)
# Determine total number of tiles and assign list
total_communities = xdim*ydim
communities = [None]*total_communities
# Enter world data into tiles list
try:
community_data = world_data['communities']
except KeyError:
raise MissingYamlKey('communities', yaml_file)
for community in community_data:
x, y = community['x'], community['y']
assert community['terrain'] in ['agriculture', 'steppe',
'desert', 'sea']
if community['terrain'] == 'agriculture':
landscape = terrain.agriculture
elif community['terrain'] == 'steppe':
landscape = terrain.steppe
elif community['terrain'] == 'desert':
landscape = terrain.desert
elif community['terrain'] == 'sea':
landscape = terrain.sea
if landscape.polity_forming:
elevation = community['elevation'] / 1000.
agricultural_period = community['activeFrom']
if agricultural_period == 'agri1':
active_from = period.agri1
elif agricultural_period == 'agri2':
active_from = period.agri2
elif agricultural_period == 'agri3':
active_from = period.agri3
communities[x + y*xdim] = Community(params, landscape,
elevation, active_from)
else:
communities[x + y*xdim] = Community(params, landscape)
return cls(xdim, ydim, communities, params)
def reset(self):
"""
Reset the world by returning all polities to single communities and
setting the step number to 0.
"""
self.step_number = 0
self.polities = [polity.Polity([tile])
for tile in self.tiles if tile.terrain.polity_forming]
def cultural_shift(self):
"""
Attempt cultural shift in all communities.
"""
for tile in self.tiles:
if tile.terrain.polity_forming:
tile.cultural_shift(self.params)
def disintegration(self):
"""
Attempt disintegration of all polities
"""
new_states = []
for state in self.polities:
# Skip single community polities
if state.size() == 1:
continue
if state.disintegrate_probability(self.params) > random():
# Create a new set of polities, one for each of the communities
new_states += state.disintegrate()
# Delete the now empy polities
self.prune_empty_polities()
# Append new polities from disintegrated old polities to list
self.polities += new_states
def attack(self, callback=None):
"""
Attempt an attack from all communities.
Args:
callback (function, default=None): A callback function invoked if
an attack is successful. Used to record attack events.
"""
# Generate a random order for communities to attempt attacks in
attack_order = permutation(self.total_tiles)
for tile_no in attack_order:
tile = self.tiles[tile_no]
if tile.can_attack(self.step_number):
tile.attempt_attack(self.params, self.step_number,
self.sea_attack_distance(), callback)
self.prune_empty_polities()
def prune_empty_polities(self):
"""
Prune polities with zero communities.
"""
self.polities = [state for state in self.polities
if state.size() != 0]
def step(self, attack_callback=None):
"""
Conduct a simulation step
Args:
attack_callback (function, default=None): A callback function
invoked if an attack is successful. Used to record attack
events.
"""
# Attacks
self.attack(attack_callback)
# Cultural shift
self.cultural_shift()
# Disintegration
self.disintegration()
# Increment step counter
self.step_number += 1
class MissingYamlKey(Exception):
"""
Exception raised when a necessary key is missing from the world YAML file.
"""
def __init__(self, key, filename):
super().__init__(
'Required key "{}" missing from the world definition'
' file "{}".'.format(key, filename)
)
| [
"numpy.random.permutation",
"yaml.load",
"numpy.random.random",
"numpy.sqrt"
] | [((10083, 10112), 'numpy.random.permutation', 'permutation', (['self.total_tiles'], {}), '(self.total_tiles)\n', (10094, 10112), False, 'from numpy.random import random, permutation\n'), ((6526, 6567), 'yaml.load', 'yaml.load', (['infile'], {'Loader': 'yaml.FullLoader'}), '(infile, Loader=yaml.FullLoader)\n', (6535, 6567), False, 'import yaml\n'), ((5460, 5561), 'numpy.sqrt', 'sqrt', (['((itile.position[0] - jtile.position[0]) ** 2 + (itile.position[1] - jtile.\n position[1]) ** 2)'], {}), '((itile.position[0] - jtile.position[0]) ** 2 + (itile.position[1] -\n jtile.position[1]) ** 2)\n', (5464, 5561), False, 'from numpy import sqrt\n'), ((9390, 9398), 'numpy.random.random', 'random', ([], {}), '()\n', (9396, 9398), False, 'from numpy.random import random, permutation\n')] |
'''
Calculates the next time step for the state of the nodes
'''
import numpy as np
from sys import exit
'''
The state attribute in every node defines the last state of the node. At the dictionary activityTimeLine we have the
evolution of the values for the state over time.
'''
def states_update(g, t, delta = 0.2):
combination_functions_list = ['id', 'sum', 'ssum', 'norsum', 'adnorsum', 'slogistic', 'alogistic'] # , 'adalogistic'
g_new = g.copy()
# Updating the state of each node in the graph
for vrtx in g.nodes():
aggimpact = 0
sum_weights = 0
# Calculate the agregated impact from each neighbor's weight.
for pred in g.predecessors(vrtx):
#connect = g.get_edge_data(neigh, node)['weight']
# weight_in = g[pred][vrtx][0]['weight'][t]
weight_in = g.get_edge_data(pred, vrtx)[0]['weight']
#connect = g.get_edge_data(neigh, node).values()[0]['weight']
sum_weights += weight_in
try:
# aggimpact = aggimpact + g.node[pred]['activityTimeLine'][t-1]*weight_in
aggimpact = aggimpact + g.node[pred]['state']*weight_in
except:
print(t, pred)
exit
#extract vertex attributes from nx graph
speed_factor = g.node[vrtx]['speed_factor']
# Defining aggimpact ['id', 'sum', 'ssum', 'norsum', 'adnorsum', 'slogistic', 'alogistic', 'adalogistic']
if 'id' in g.node[vrtx] or 'sum' in g.node[vrtx]:
aggimpact = aggimpact
elif 'ssum' in g.node[vrtx]:
# Use scaling_factor
scaling_factor = g.node[vrtx]['scaling_factor']
if scaling_factor == None:
print('Error! Give scaling factor as an input to this function!')
else:
try:
scaling_f = scaling_factor
aggimpact = aggimpact/scaling_f
except:
print('Scaling factor has to be a dictionary!')
print(scaling_factor)
exit(0)
elif 'norsum' in g.node[vrtx]:
# Use normalization_factor
normalizing_factor = g.node[vrtx]['normalizing_factor']
if normalizing_factor == None:
print('Error! Give normalization factor as an input to this function!')
else:
try:
normalizing_f = normalizing_factor
aggimpact = aggimpact / normalizing_f
except:
print('Normalization factor has to be a dictionary!')
print(normalizing_factor)
exit(0)
elif 'adnorsum' in g.node[vrtx]:
aggimpact = aggimpact / sum_weights
elif 'slogistic' in g.node[vrtx]:
steepness = g.node[vrtx]['steepness']
threshold = g.node[vrtx]['threshold']
if steepness == None or threshold == None:
print('Steepness and threshold should be passed to the function for slogistic!')
exit(0)
try:
steep = steepness[vrtx]
thres = threshold[vrtx]
except:
print('Dictionary is not built with the right keys!')
exit(0)
aggimpact = 1 / (1 + np.exp(-steep * (aggimpact - thres)))
elif 'alogistic' in g.node[vrtx]:
steepness = g.node[vrtx]['steepness']
threshold = g.node[vrtx]['threshold']
if steepness == None or threshold == None:
print('Steepness and threshold should be passed to the function for alogistic!')
exit(0)
try:
steep = steepness
thres = threshold
except:
print('Dictionary is not built with the right keys (alogistic)!')
exit(0)
aggimpact = ((1 / (1 + np.exp(-steep * (aggimpact - thres)))) - (1 / (1 + np.exp(steep * thres)))) * (1 + np.exp(-steep * thres))
else:
print('Your combination function is not in the possible list of functions:', combination_functions_list)
exit(0)
if aggimpact > 0:
# new_state = store_states(i, step-1) + update_s * (aggimpact - store_states(i, step-1)); %calculate the new state value
old_activity = g.node[vrtx]['state']
new_activity = old_activity + speed_factor * (aggimpact - old_activity) * delta
try:
new_activity = np.asscalar(new_activity) # for multiple predecessors
except:
new_activity= new_activity
g_new.node[vrtx]['activityTimeLine'].update({t: new_activity}) #works
g_new.node[vrtx]['state'] = new_activity # works #ROUND ADDED
else:
try:
current_state = np.asscalar(g.node[vrtx]['state'])
except:
current_state = g.node[vrtx]['state']
g_new.node[vrtx]['activityTimeLine'].update({t: current_state})
return g_new
'''please note: ad(vanced)a(dapative)logistic function is not supported in the present version of states_update.py .
In order to use the adalogistic formula, modify the vertices attributes accordingly (or manually
fill in the parametres when calling the function) and add the following code @ l. 100 (after other elif's)'''
# elif combination_function == 'adalogistic':
# if steepness == None or threshold == None:
# print('Steepness and threshold should be passed to the function for adalogistic!')
# exit(0)
# try:
# steep = steepness[vrtx]
# thres = threshold[vrtx]
# except:
# print('Dictionary is not built with the right keys (adalogistic)!')
# exit(0)
# aggimpact = ((1 / (1 + np.exp(-steep * (aggimpact - thres * sum_weights)))) - (1 / (1 + np.exp(steep * thres)))) * (1 + np.exp(-steep * thres)) | [
"numpy.asscalar",
"numpy.exp",
"sys.exit"
] | [((4624, 4649), 'numpy.asscalar', 'np.asscalar', (['new_activity'], {}), '(new_activity)\n', (4635, 4649), True, 'import numpy as np\n'), ((4990, 5024), 'numpy.asscalar', 'np.asscalar', (["g.node[vrtx]['state']"], {}), "(g.node[vrtx]['state'])\n", (5001, 5024), True, 'import numpy as np\n'), ((2125, 2132), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (2129, 2132), False, 'from sys import exit\n'), ((2727, 2734), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (2731, 2734), False, 'from sys import exit\n'), ((3136, 3143), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (3140, 3143), False, 'from sys import exit\n'), ((4254, 4261), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (4258, 4261), False, 'from sys import exit\n'), ((3347, 3354), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (3351, 3354), False, 'from sys import exit\n'), ((3388, 3424), 'numpy.exp', 'np.exp', (['(-steep * (aggimpact - thres))'], {}), '(-steep * (aggimpact - thres))\n', (3394, 3424), True, 'import numpy as np\n'), ((3737, 3744), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (3741, 3744), False, 'from sys import exit\n'), ((3948, 3955), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (3952, 3955), False, 'from sys import exit\n'), ((4074, 4096), 'numpy.exp', 'np.exp', (['(-steep * thres)'], {}), '(-steep * thres)\n', (4080, 4096), True, 'import numpy as np\n'), ((3991, 4027), 'numpy.exp', 'np.exp', (['(-steep * (aggimpact - thres))'], {}), '(-steep * (aggimpact - thres))\n', (3997, 4027), True, 'import numpy as np\n'), ((4042, 4063), 'numpy.exp', 'np.exp', (['(steep * thres)'], {}), '(steep * thres)\n', (4048, 4063), True, 'import numpy as np\n')] |
# Copyright (c) 2015-2018 by the parties listed in the AUTHORS
# file. All rights reserved. Use of this source code is governed
# by a BSD-style license that can be found in the LICENSE file.
from toast_planck.reproc_modules.destriping import Destriper
from toast.mpi import MPI
from toast.tests.mpi import MPITestCase
import numpy as np
class DestriperTest(MPITestCase):
def setUp(self):
self.disable = True
if self.disable:
return
self.npix = 100
self.destriper = Destriper(self.npix, MPI.COMM_WORLD, cglimit=1e-10,
itermax=100)
self.verbose = False
def test_destripe(self):
if self.disable:
return
ninterval = 10
leninterval = 1000
pixels = []
toi = []
flags = []
for i in range(ninterval):
pixels.append(np.arange(leninterval, dtype=np.int32) % self.npix)
toi.append(np.ones(leninterval, dtype=np.float64) * i
+ np.random.randn(leninterval))
flags.append(np.arange(leninterval, dtype=np.int) % 10 < 5)
print('RMS before destriping = {}'.format(np.std(np.hstack(toi))))
self.destriper.destripe(toi, flags, pixels, verbose=self.verbose,
in_place=False)
print('RMS after destriping 1/2 = {}'.format(np.std(np.hstack(toi))))
self.destriper.destripe(toi, flags, pixels, verbose=False,
in_place=True)
print('RMS after destriping 2/2 = {}'.format(np.std(np.hstack(toi))))
return
def test_destripe_with_templates(self):
if self.disable:
return
ninterval = 10
leninterval = 1000
pixels = []
toi = []
flags = []
templates = []
for i in range(ninterval):
pixels.append(np.arange(leninterval, dtype=np.int32) % self.npix)
toi.append(np.ones(leninterval, dtype=np.float64) * i
+ np.random.randn(leninterval))
atemplate = np.random.randn(leninterval)
btemplate = np.random.randn(leninterval)
toi[-1] += atemplate + btemplate
templates.append([atemplate, btemplate])
flags.append(np.arange(leninterval, dtype=np.int) % 10 < 5)
print('RMS before destriping = {}'.format(np.std(np.hstack(toi))))
self.destriper.destripe(toi, flags, pixels, verbose=self.verbose,
in_place=False, templates=templates)
print('RMS after destriping 1/2 = {}'.format(np.std(np.hstack(toi))))
self.destriper.destripe(toi, flags, pixels, verbose=self.verbose,
in_place=True, templates=templates)
print('RMS after destriping 2/2 = {}'.format(np.std(np.hstack(toi))))
return
| [
"toast_planck.reproc_modules.destriping.Destriper",
"numpy.random.randn",
"numpy.ones",
"numpy.hstack",
"numpy.arange"
] | [((522, 586), 'toast_planck.reproc_modules.destriping.Destriper', 'Destriper', (['self.npix', 'MPI.COMM_WORLD'], {'cglimit': '(1e-10)', 'itermax': '(100)'}), '(self.npix, MPI.COMM_WORLD, cglimit=1e-10, itermax=100)\n', (531, 586), False, 'from toast_planck.reproc_modules.destriping import Destriper\n'), ((2103, 2131), 'numpy.random.randn', 'np.random.randn', (['leninterval'], {}), '(leninterval)\n', (2118, 2131), True, 'import numpy as np\n'), ((2156, 2184), 'numpy.random.randn', 'np.random.randn', (['leninterval'], {}), '(leninterval)\n', (2171, 2184), True, 'import numpy as np\n'), ((893, 931), 'numpy.arange', 'np.arange', (['leninterval'], {'dtype': 'np.int32'}), '(leninterval, dtype=np.int32)\n', (902, 931), True, 'import numpy as np\n'), ((1036, 1064), 'numpy.random.randn', 'np.random.randn', (['leninterval'], {}), '(leninterval)\n', (1051, 1064), True, 'import numpy as np\n'), ((1196, 1210), 'numpy.hstack', 'np.hstack', (['toi'], {}), '(toi)\n', (1205, 1210), True, 'import numpy as np\n'), ((1398, 1412), 'numpy.hstack', 'np.hstack', (['toi'], {}), '(toi)\n', (1407, 1412), True, 'import numpy as np\n'), ((1592, 1606), 'numpy.hstack', 'np.hstack', (['toi'], {}), '(toi)\n', (1601, 1606), True, 'import numpy as np\n'), ((1906, 1944), 'numpy.arange', 'np.arange', (['leninterval'], {'dtype': 'np.int32'}), '(leninterval, dtype=np.int32)\n', (1915, 1944), True, 'import numpy as np\n'), ((2049, 2077), 'numpy.random.randn', 'np.random.randn', (['leninterval'], {}), '(leninterval)\n', (2064, 2077), True, 'import numpy as np\n'), ((2413, 2427), 'numpy.hstack', 'np.hstack', (['toi'], {}), '(toi)\n', (2422, 2427), True, 'import numpy as np\n'), ((2636, 2650), 'numpy.hstack', 'np.hstack', (['toi'], {}), '(toi)\n', (2645, 2650), True, 'import numpy as np\n'), ((2858, 2872), 'numpy.hstack', 'np.hstack', (['toi'], {}), '(toi)\n', (2867, 2872), True, 'import numpy as np\n'), ((968, 1006), 'numpy.ones', 'np.ones', (['leninterval'], {'dtype': 'np.float64'}), '(leninterval, dtype=np.float64)\n', (975, 1006), True, 'import numpy as np\n'), ((1091, 1127), 'numpy.arange', 'np.arange', (['leninterval'], {'dtype': 'np.int'}), '(leninterval, dtype=np.int)\n', (1100, 1127), True, 'import numpy as np\n'), ((1981, 2019), 'numpy.ones', 'np.ones', (['leninterval'], {'dtype': 'np.float64'}), '(leninterval, dtype=np.float64)\n', (1988, 2019), True, 'import numpy as np\n'), ((2308, 2344), 'numpy.arange', 'np.arange', (['leninterval'], {'dtype': 'np.int'}), '(leninterval, dtype=np.int)\n', (2317, 2344), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2021 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for trainer.py.
"""
import shutil
import tempfile
from absl import flags
from absl.testing import absltest
from flax import nn
from flax import optim as optimizers
from init2winit import utils
from jax import test_util as jtu
import numpy as np
FLAGS = flags.FLAGS
class TrainingMetricsTest(jtu.JaxTestCase):
"""Tests the logged statistics from training_metrics_grabber."""
def setUp(self):
super(TrainingMetricsTest, self).setUp()
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
super(TrainingMetricsTest, self).tearDown()
def test_grad_var(self):
model_size = 10
example_grads = [{
'layer1': np.ones(model_size),
'layer2': 3 * np.ones(model_size)
}, {
'layer1': 2 * np.ones(model_size),
'layer2': np.ones(model_size)
}]
eval_config = {'ema_beta': 0.5}
training_metrics_grabber = utils.TrainingMetricsGrabber.create(
example_grads[0], eval_config)
# For the purposes of this test, we create fake optimizers to satisfy
# metrics grabber API.
fake_model = nn.Model(None, example_grads[0])
new_optimizer = optimizers.GradientDescent(
learning_rate=None).create(fake_model)
old_optimizer = optimizers.GradientDescent(
learning_rate=None).create(fake_model)
for grad in example_grads:
training_metrics_grabber = training_metrics_grabber.update(
grad, old_optimizer, new_optimizer)
for layer in ['layer1', 'layer2']:
expected_grad_ema = 1 / 4 * np.zeros(model_size) + 1 / 4 * example_grads[
0][layer] + 1 / 2 * example_grads[1][layer]
self.assertArraysAllClose(expected_grad_ema,
training_metrics_grabber.state[layer].grad_ema)
if __name__ == '__main__':
absltest.main()
| [
"absl.testing.absltest.main",
"init2winit.utils.TrainingMetricsGrabber.create",
"flax.optim.GradientDescent",
"numpy.ones",
"numpy.zeros",
"tempfile.mkdtemp",
"shutil.rmtree",
"flax.nn.Model"
] | [((2421, 2436), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (2434, 2436), False, 'from absl.testing import absltest\n'), ((1084, 1102), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1100, 1102), False, 'import tempfile\n'), ((1130, 1158), 'shutil.rmtree', 'shutil.rmtree', (['self.test_dir'], {}), '(self.test_dir)\n', (1143, 1158), False, 'import shutil\n'), ((1523, 1589), 'init2winit.utils.TrainingMetricsGrabber.create', 'utils.TrainingMetricsGrabber.create', (['example_grads[0]', 'eval_config'], {}), '(example_grads[0], eval_config)\n', (1558, 1589), False, 'from init2winit import utils\n'), ((1718, 1750), 'flax.nn.Model', 'nn.Model', (['None', 'example_grads[0]'], {}), '(None, example_grads[0])\n', (1726, 1750), False, 'from flax import nn\n'), ((1296, 1315), 'numpy.ones', 'np.ones', (['model_size'], {}), '(model_size)\n', (1303, 1315), True, 'import numpy as np\n'), ((1429, 1448), 'numpy.ones', 'np.ones', (['model_size'], {}), '(model_size)\n', (1436, 1448), True, 'import numpy as np\n'), ((1771, 1817), 'flax.optim.GradientDescent', 'optimizers.GradientDescent', ([], {'learning_rate': 'None'}), '(learning_rate=None)\n', (1797, 1817), True, 'from flax import optim as optimizers\n'), ((1866, 1912), 'flax.optim.GradientDescent', 'optimizers.GradientDescent', ([], {'learning_rate': 'None'}), '(learning_rate=None)\n', (1892, 1912), True, 'from flax import optim as optimizers\n'), ((1339, 1358), 'numpy.ones', 'np.ones', (['model_size'], {}), '(model_size)\n', (1346, 1358), True, 'import numpy as np\n'), ((1390, 1409), 'numpy.ones', 'np.ones', (['model_size'], {}), '(model_size)\n', (1397, 1409), True, 'import numpy as np\n'), ((2159, 2179), 'numpy.zeros', 'np.zeros', (['model_size'], {}), '(model_size)\n', (2167, 2179), True, 'import numpy as np\n')] |
import pytest
import tensorflow as tf
from edflow.nn import tf_nn as nn
def test_int_shape():
tf.enable_eager_execution()
a = tf.ones((1, 2, 3, 4))
a_shape = nn.int_shape(a)
assert type(a_shape) is list
def test_partwise_conv2d():
from matplotlib import pyplot as plt
from skimage import data
import numpy as np
im = data.astronaut()
im = im.astype(np.float32) / 255
H, W, D = im.shape
b = 1
parts = 5
out_features = 1
features = tf.reshape(im, (b, H, W, 1, D))
features = tf.concat([features] * parts, axis=3)
out = nn.partwise_conv2d(
features, out_features, init=False, part_wise=True, initdist="debug"
)
# fig, ax = plt.subplots(1, 1, figsize=(20, 20))
# a = np.hstack([np.squeeze(out[..., p, :]) for p in range(parts)])
# fig.tight_layout()
# ax.imshow(a, cmap=plt.cm.gray) # this should render the astronaut in 5 different shades of gray
coefficients = np.linspace(0, 1, parts)
assert np.allclose(out[..., 0, :], np.zeros_like(out[..., 0, :]))
assert np.allclose(out[..., -1, :] * coefficients[-2], out[..., -2, :])
def test_conv2d():
from skimage import data
import numpy as np
im = data.astronaut()
im = im.astype(np.float32) / 255
H, W, D = im.shape
b = 1
x = tf.reshape(im, (b, H, W, D))
out = nn.conv2d(x, 128)
assert out.shape == (1, 512, 512, 128)
def test_dense():
x = tf.ones((1, 100), dtype=tf.float32)
out = nn.dense(x, 512)
assert out.shape == (1, 512)
def test_upsample():
from skimage import data
import numpy as np
im = data.astronaut()
im = im.astype(np.float32) / 255
H, W, D = im.shape
b = 1
x = tf.reshape(im, (b, H, W, D))
out = nn.upsample(x, 3, method="conv_transposed")
assert out.shape == (1, 1024, 1024, 3)
out = nn.upsample(x, 3, method="subpixel")
assert out.shape == (1, 1024, 1024, 3)
out = nn.upsample(x, 3, method="nearest_neighbor")
assert out.shape == (1, 1024, 1024, 3)
out = nn.upsample(x, 3, method="linear")
assert out.shape == (1, 1024, 1024, 3)
def test_mask2rgb():
import numpy as np
m = tf.random_normal((1, 512, 512, 10))
mask = tf.nn.softmax(m, axis=-1)
rgb_mask = nn.mask2rgb(mask)
assert rgb_mask.shape == (1, 512, 512, 3)
mask_np = np.array(mask)
rgb_mask_numpy = nn.np_mask2rgb(mask_np)
assert np.allclose(rgb_mask, rgb_mask_numpy)
def test_blobs():
from matplotlib import pyplot as plt
tf.enable_eager_execution()
import numpy as np
import tensorflow.contrib.distributions as tfd
_means = [-0.5, 0, 0.5]
means = tf.ones((3, 1, 2), dtype=tf.float32) * np.array(_means).reshape((3, 1, 1))
means = tf.concat([means, means, means[::-1, ...]], axis=1)
means = tf.reshape(means, (-1, 2))
var_ = 0.1
rho = 0.5
cov = [[var_, rho * var_], [rho * var_, var_]]
scale = tf.cholesky(cov)
scale = tf.stack([scale] * 3, axis=0)
scale = tf.stack([scale] * 3, axis=0)
scale = tf.reshape(scale, (-1, 2, 2))
mvn = tfd.MultivariateNormalTriL(loc=means, scale_tril=scale)
h = 100
w = 100
y_t = tf.tile(tf.reshape(tf.linspace(-1.0, 1.0, h), [h, 1]), [1, w])
x_t = tf.tile(tf.reshape(tf.linspace(-1.0, 1.0, w), [1, w]), [h, 1])
y_t = tf.expand_dims(y_t, axis=-1)
x_t = tf.expand_dims(x_t, axis=-1)
meshgrid = tf.concat([y_t, x_t], axis=-1)
meshgrid = tf.expand_dims(meshgrid, 0)
meshgrid = tf.expand_dims(meshgrid, 3) # 1, h, w, 1, 2
blob = mvn.prob(meshgrid)
blob = tf.reshape(blob, (100, 100, 3, 3))
blob = tf.transpose(blob, perm=[2, 0, 1, 3])
norm_const = np.sum(blob, axis=(1, 2), keepdims=True)
mu, L = nn.probs_to_mu_L(blob / norm_const, 1, inv=False)
bn, h, w, nk = blob.get_shape().as_list()
estimated_blob = nn.tf_hm(h, w, mu, L)
fig, ax = plt.subplots(2, 3, figsize=(9, 6))
for b in range(len(_means)):
ax[0, b].imshow(np.squeeze(blob[b, ...]))
ax[0, b].set_title("target_blobs")
ax[0, b].set_axis_off()
for b in range(len(_means)):
ax[1, b].imshow(np.squeeze(estimated_blob[b, ...]))
ax[1, b].set_title("estimated_blobs")
ax[1, b].set_axis_off()
def test_probs_to_mu_sigma():
# TODO: right now, the test is only visual debugging
# We would need a numeric criterion to test if the calculation is correct
from matplotlib import pyplot as plt
tf.enable_eager_execution()
import numpy as np
import tensorflow.contrib.distributions as tfd
_means = [-0.5, 0, 0.5]
means = tf.ones((3, 1, 2), dtype=tf.float32) * np.array(_means).reshape((3, 1, 1))
means = tf.concat([means, means, means[::-1, ...]], axis=1)
means = tf.reshape(means, (-1, 2))
var_ = 0.1
rho = 0.0
cov = [[var_, rho * var_], [rho * var_, var_]]
scale = tf.cholesky(cov)
scale = tf.stack([scale] * 3, axis=0)
scale = tf.stack([scale] * 3, axis=0)
scale = tf.reshape(scale, (-1, 2, 2))
mvn = tfd.MultivariateNormalTriL(loc=means, scale_tril=scale)
h = 100
w = 100
y_t = tf.tile(tf.reshape(tf.linspace(-1.0, 1.0, h), [h, 1]), [1, w])
x_t = tf.tile(tf.reshape(tf.linspace(-1.0, 1.0, w), [1, w]), [h, 1])
y_t = tf.expand_dims(y_t, axis=-1)
x_t = tf.expand_dims(x_t, axis=-1)
meshgrid = tf.concat([y_t, x_t], axis=-1)
meshgrid = tf.expand_dims(meshgrid, 0)
meshgrid = tf.expand_dims(meshgrid, 3) # 1, h, w, 1, 2
blob = mvn.prob(meshgrid)
blob = tf.reshape(blob, (100, 100, 3, 3))
blob = tf.transpose(blob, perm=[2, 0, 1, 3])
norm_const = np.sum(blob, axis=(1, 2), keepdims=True)
mu, sigma = nn.probs_to_mu_sigma(blob / norm_const)
# norm_const2 = np.sum(blob, axis=(1, 2), keepdims=False)
# mu2, sigma2 = nn.probs_to_mu_sigma(blob, 1 / norm_const2)
#
# assert np.allclose(mu, mu2, rtol=1e-4, atol=1e-4)
# assert np.allclose(sigma, sigma2, rtol=1e-4, atol=1e-4)
L = tf.cholesky(sigma)
bn, h, w, nk = blob.get_shape().as_list()
estimated_blob = nn.tf_hm(h, w, mu, L)
fig, ax = plt.subplots(2, 3, figsize=(9, 6))
for b in range(len(_means)):
ax[0, b].imshow(np.squeeze(blob[b, ...]))
ax[0, b].set_title("target_blobs")
ax[0, b].set_axis_off()
for b in range(len(_means)):
ax[1, b].imshow(np.squeeze(estimated_blob[b, ...]))
ax[1, b].set_title("estimated_blobs")
ax[1, b].set_axis_off()
plt.show()
| [
"numpy.sum",
"edflow.nn.tf_nn.probs_to_mu_sigma",
"skimage.data.astronaut",
"tensorflow.reshape",
"numpy.allclose",
"edflow.nn.tf_nn.conv2d",
"tensorflow.contrib.distributions.MultivariateNormalTriL",
"tensorflow.cholesky",
"tensorflow.nn.softmax",
"numpy.zeros_like",
"edflow.nn.tf_nn.np_mask2rg... | [((101, 128), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (126, 128), True, 'import tensorflow as tf\n'), ((137, 158), 'tensorflow.ones', 'tf.ones', (['(1, 2, 3, 4)'], {}), '((1, 2, 3, 4))\n', (144, 158), True, 'import tensorflow as tf\n'), ((173, 188), 'edflow.nn.tf_nn.int_shape', 'nn.int_shape', (['a'], {}), '(a)\n', (185, 188), True, 'from edflow.nn import tf_nn as nn\n'), ((355, 371), 'skimage.data.astronaut', 'data.astronaut', ([], {}), '()\n', (369, 371), False, 'from skimage import data\n'), ((493, 524), 'tensorflow.reshape', 'tf.reshape', (['im', '(b, H, W, 1, D)'], {}), '(im, (b, H, W, 1, D))\n', (503, 524), True, 'import tensorflow as tf\n'), ((540, 577), 'tensorflow.concat', 'tf.concat', (['([features] * parts)'], {'axis': '(3)'}), '([features] * parts, axis=3)\n', (549, 577), True, 'import tensorflow as tf\n'), ((589, 681), 'edflow.nn.tf_nn.partwise_conv2d', 'nn.partwise_conv2d', (['features', 'out_features'], {'init': '(False)', 'part_wise': '(True)', 'initdist': '"""debug"""'}), "(features, out_features, init=False, part_wise=True,\n initdist='debug')\n", (607, 681), True, 'from edflow.nn import tf_nn as nn\n'), ((964, 988), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'parts'], {}), '(0, 1, parts)\n', (975, 988), True, 'import numpy as np\n'), ((1070, 1134), 'numpy.allclose', 'np.allclose', (['(out[..., -1, :] * coefficients[-2])', 'out[..., -2, :]'], {}), '(out[..., -1, :] * coefficients[-2], out[..., -2, :])\n', (1081, 1134), True, 'import numpy as np\n'), ((1218, 1234), 'skimage.data.astronaut', 'data.astronaut', ([], {}), '()\n', (1232, 1234), False, 'from skimage import data\n'), ((1314, 1342), 'tensorflow.reshape', 'tf.reshape', (['im', '(b, H, W, D)'], {}), '(im, (b, H, W, D))\n', (1324, 1342), True, 'import tensorflow as tf\n'), ((1354, 1371), 'edflow.nn.tf_nn.conv2d', 'nn.conv2d', (['x', '(128)'], {}), '(x, 128)\n', (1363, 1371), True, 'from edflow.nn import tf_nn as nn\n'), ((1444, 1479), 'tensorflow.ones', 'tf.ones', (['(1, 100)'], {'dtype': 'tf.float32'}), '((1, 100), dtype=tf.float32)\n', (1451, 1479), True, 'import tensorflow as tf\n'), ((1490, 1506), 'edflow.nn.tf_nn.dense', 'nn.dense', (['x', '(512)'], {}), '(x, 512)\n', (1498, 1506), True, 'from edflow.nn import tf_nn as nn\n'), ((1625, 1641), 'skimage.data.astronaut', 'data.astronaut', ([], {}), '()\n', (1639, 1641), False, 'from skimage import data\n'), ((1721, 1749), 'tensorflow.reshape', 'tf.reshape', (['im', '(b, H, W, D)'], {}), '(im, (b, H, W, D))\n', (1731, 1749), True, 'import tensorflow as tf\n'), ((1761, 1804), 'edflow.nn.tf_nn.upsample', 'nn.upsample', (['x', '(3)'], {'method': '"""conv_transposed"""'}), "(x, 3, method='conv_transposed')\n", (1772, 1804), True, 'from edflow.nn import tf_nn as nn\n'), ((1858, 1894), 'edflow.nn.tf_nn.upsample', 'nn.upsample', (['x', '(3)'], {'method': '"""subpixel"""'}), "(x, 3, method='subpixel')\n", (1869, 1894), True, 'from edflow.nn import tf_nn as nn\n'), ((1948, 1992), 'edflow.nn.tf_nn.upsample', 'nn.upsample', (['x', '(3)'], {'method': '"""nearest_neighbor"""'}), "(x, 3, method='nearest_neighbor')\n", (1959, 1992), True, 'from edflow.nn import tf_nn as nn\n'), ((2046, 2080), 'edflow.nn.tf_nn.upsample', 'nn.upsample', (['x', '(3)'], {'method': '"""linear"""'}), "(x, 3, method='linear')\n", (2057, 2080), True, 'from edflow.nn import tf_nn as nn\n'), ((2179, 2214), 'tensorflow.random_normal', 'tf.random_normal', (['(1, 512, 512, 10)'], {}), '((1, 512, 512, 10))\n', (2195, 2214), True, 'import tensorflow as tf\n'), ((2226, 2251), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['m'], {'axis': '(-1)'}), '(m, axis=-1)\n', (2239, 2251), True, 'import tensorflow as tf\n'), ((2268, 2285), 'edflow.nn.tf_nn.mask2rgb', 'nn.mask2rgb', (['mask'], {}), '(mask)\n', (2279, 2285), True, 'from edflow.nn import tf_nn as nn\n'), ((2347, 2361), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (2355, 2361), True, 'import numpy as np\n'), ((2383, 2406), 'edflow.nn.tf_nn.np_mask2rgb', 'nn.np_mask2rgb', (['mask_np'], {}), '(mask_np)\n', (2397, 2406), True, 'from edflow.nn import tf_nn as nn\n'), ((2418, 2455), 'numpy.allclose', 'np.allclose', (['rgb_mask', 'rgb_mask_numpy'], {}), '(rgb_mask, rgb_mask_numpy)\n', (2429, 2455), True, 'import numpy as np\n'), ((2522, 2549), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (2547, 2549), True, 'import tensorflow as tf\n'), ((2752, 2803), 'tensorflow.concat', 'tf.concat', (['[means, means, means[::-1, ...]]'], {'axis': '(1)'}), '([means, means, means[::-1, ...]], axis=1)\n', (2761, 2803), True, 'import tensorflow as tf\n'), ((2816, 2842), 'tensorflow.reshape', 'tf.reshape', (['means', '(-1, 2)'], {}), '(means, (-1, 2))\n', (2826, 2842), True, 'import tensorflow as tf\n'), ((2936, 2952), 'tensorflow.cholesky', 'tf.cholesky', (['cov'], {}), '(cov)\n', (2947, 2952), True, 'import tensorflow as tf\n'), ((2965, 2994), 'tensorflow.stack', 'tf.stack', (['([scale] * 3)'], {'axis': '(0)'}), '([scale] * 3, axis=0)\n', (2973, 2994), True, 'import tensorflow as tf\n'), ((3007, 3036), 'tensorflow.stack', 'tf.stack', (['([scale] * 3)'], {'axis': '(0)'}), '([scale] * 3, axis=0)\n', (3015, 3036), True, 'import tensorflow as tf\n'), ((3049, 3078), 'tensorflow.reshape', 'tf.reshape', (['scale', '(-1, 2, 2)'], {}), '(scale, (-1, 2, 2))\n', (3059, 3078), True, 'import tensorflow as tf\n'), ((3090, 3145), 'tensorflow.contrib.distributions.MultivariateNormalTriL', 'tfd.MultivariateNormalTriL', ([], {'loc': 'means', 'scale_tril': 'scale'}), '(loc=means, scale_tril=scale)\n', (3116, 3145), True, 'import tensorflow.contrib.distributions as tfd\n'), ((3327, 3355), 'tensorflow.expand_dims', 'tf.expand_dims', (['y_t'], {'axis': '(-1)'}), '(y_t, axis=-1)\n', (3341, 3355), True, 'import tensorflow as tf\n'), ((3366, 3394), 'tensorflow.expand_dims', 'tf.expand_dims', (['x_t'], {'axis': '(-1)'}), '(x_t, axis=-1)\n', (3380, 3394), True, 'import tensorflow as tf\n'), ((3410, 3440), 'tensorflow.concat', 'tf.concat', (['[y_t, x_t]'], {'axis': '(-1)'}), '([y_t, x_t], axis=-1)\n', (3419, 3440), True, 'import tensorflow as tf\n'), ((3456, 3483), 'tensorflow.expand_dims', 'tf.expand_dims', (['meshgrid', '(0)'], {}), '(meshgrid, 0)\n', (3470, 3483), True, 'import tensorflow as tf\n'), ((3499, 3526), 'tensorflow.expand_dims', 'tf.expand_dims', (['meshgrid', '(3)'], {}), '(meshgrid, 3)\n', (3513, 3526), True, 'import tensorflow as tf\n'), ((3586, 3620), 'tensorflow.reshape', 'tf.reshape', (['blob', '(100, 100, 3, 3)'], {}), '(blob, (100, 100, 3, 3))\n', (3596, 3620), True, 'import tensorflow as tf\n'), ((3632, 3669), 'tensorflow.transpose', 'tf.transpose', (['blob'], {'perm': '[2, 0, 1, 3]'}), '(blob, perm=[2, 0, 1, 3])\n', (3644, 3669), True, 'import tensorflow as tf\n'), ((3688, 3728), 'numpy.sum', 'np.sum', (['blob'], {'axis': '(1, 2)', 'keepdims': '(True)'}), '(blob, axis=(1, 2), keepdims=True)\n', (3694, 3728), True, 'import numpy as np\n'), ((3741, 3790), 'edflow.nn.tf_nn.probs_to_mu_L', 'nn.probs_to_mu_L', (['(blob / norm_const)', '(1)'], {'inv': '(False)'}), '(blob / norm_const, 1, inv=False)\n', (3757, 3790), True, 'from edflow.nn import tf_nn as nn\n'), ((3859, 3880), 'edflow.nn.tf_nn.tf_hm', 'nn.tf_hm', (['h', 'w', 'mu', 'L'], {}), '(h, w, mu, L)\n', (3867, 3880), True, 'from edflow.nn import tf_nn as nn\n'), ((3896, 3930), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '(9, 6)'}), '(2, 3, figsize=(9, 6))\n', (3908, 3930), True, 'from matplotlib import pyplot as plt\n'), ((4474, 4501), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (4499, 4501), True, 'import tensorflow as tf\n'), ((4704, 4755), 'tensorflow.concat', 'tf.concat', (['[means, means, means[::-1, ...]]'], {'axis': '(1)'}), '([means, means, means[::-1, ...]], axis=1)\n', (4713, 4755), True, 'import tensorflow as tf\n'), ((4768, 4794), 'tensorflow.reshape', 'tf.reshape', (['means', '(-1, 2)'], {}), '(means, (-1, 2))\n', (4778, 4794), True, 'import tensorflow as tf\n'), ((4888, 4904), 'tensorflow.cholesky', 'tf.cholesky', (['cov'], {}), '(cov)\n', (4899, 4904), True, 'import tensorflow as tf\n'), ((4917, 4946), 'tensorflow.stack', 'tf.stack', (['([scale] * 3)'], {'axis': '(0)'}), '([scale] * 3, axis=0)\n', (4925, 4946), True, 'import tensorflow as tf\n'), ((4959, 4988), 'tensorflow.stack', 'tf.stack', (['([scale] * 3)'], {'axis': '(0)'}), '([scale] * 3, axis=0)\n', (4967, 4988), True, 'import tensorflow as tf\n'), ((5001, 5030), 'tensorflow.reshape', 'tf.reshape', (['scale', '(-1, 2, 2)'], {}), '(scale, (-1, 2, 2))\n', (5011, 5030), True, 'import tensorflow as tf\n'), ((5042, 5097), 'tensorflow.contrib.distributions.MultivariateNormalTriL', 'tfd.MultivariateNormalTriL', ([], {'loc': 'means', 'scale_tril': 'scale'}), '(loc=means, scale_tril=scale)\n', (5068, 5097), True, 'import tensorflow.contrib.distributions as tfd\n'), ((5279, 5307), 'tensorflow.expand_dims', 'tf.expand_dims', (['y_t'], {'axis': '(-1)'}), '(y_t, axis=-1)\n', (5293, 5307), True, 'import tensorflow as tf\n'), ((5318, 5346), 'tensorflow.expand_dims', 'tf.expand_dims', (['x_t'], {'axis': '(-1)'}), '(x_t, axis=-1)\n', (5332, 5346), True, 'import tensorflow as tf\n'), ((5362, 5392), 'tensorflow.concat', 'tf.concat', (['[y_t, x_t]'], {'axis': '(-1)'}), '([y_t, x_t], axis=-1)\n', (5371, 5392), True, 'import tensorflow as tf\n'), ((5408, 5435), 'tensorflow.expand_dims', 'tf.expand_dims', (['meshgrid', '(0)'], {}), '(meshgrid, 0)\n', (5422, 5435), True, 'import tensorflow as tf\n'), ((5451, 5478), 'tensorflow.expand_dims', 'tf.expand_dims', (['meshgrid', '(3)'], {}), '(meshgrid, 3)\n', (5465, 5478), True, 'import tensorflow as tf\n'), ((5538, 5572), 'tensorflow.reshape', 'tf.reshape', (['blob', '(100, 100, 3, 3)'], {}), '(blob, (100, 100, 3, 3))\n', (5548, 5572), True, 'import tensorflow as tf\n'), ((5584, 5621), 'tensorflow.transpose', 'tf.transpose', (['blob'], {'perm': '[2, 0, 1, 3]'}), '(blob, perm=[2, 0, 1, 3])\n', (5596, 5621), True, 'import tensorflow as tf\n'), ((5640, 5680), 'numpy.sum', 'np.sum', (['blob'], {'axis': '(1, 2)', 'keepdims': '(True)'}), '(blob, axis=(1, 2), keepdims=True)\n', (5646, 5680), True, 'import numpy as np\n'), ((5697, 5736), 'edflow.nn.tf_nn.probs_to_mu_sigma', 'nn.probs_to_mu_sigma', (['(blob / norm_const)'], {}), '(blob / norm_const)\n', (5717, 5736), True, 'from edflow.nn import tf_nn as nn\n'), ((5996, 6014), 'tensorflow.cholesky', 'tf.cholesky', (['sigma'], {}), '(sigma)\n', (6007, 6014), True, 'import tensorflow as tf\n'), ((6082, 6103), 'edflow.nn.tf_nn.tf_hm', 'nn.tf_hm', (['h', 'w', 'mu', 'L'], {}), '(h, w, mu, L)\n', (6090, 6103), True, 'from edflow.nn import tf_nn as nn\n'), ((6119, 6153), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '(9, 6)'}), '(2, 3, figsize=(9, 6))\n', (6131, 6153), True, 'from matplotlib import pyplot as plt\n'), ((6488, 6498), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6496, 6498), True, 'from matplotlib import pyplot as plt\n'), ((1028, 1057), 'numpy.zeros_like', 'np.zeros_like', (['out[..., 0, :]'], {}), '(out[..., 0, :])\n', (1041, 1057), True, 'import numpy as np\n'), ((2665, 2701), 'tensorflow.ones', 'tf.ones', (['(3, 1, 2)'], {'dtype': 'tf.float32'}), '((3, 1, 2), dtype=tf.float32)\n', (2672, 2701), True, 'import tensorflow as tf\n'), ((4617, 4653), 'tensorflow.ones', 'tf.ones', (['(3, 1, 2)'], {'dtype': 'tf.float32'}), '((3, 1, 2), dtype=tf.float32)\n', (4624, 4653), True, 'import tensorflow as tf\n'), ((3200, 3225), 'tensorflow.linspace', 'tf.linspace', (['(-1.0)', '(1.0)', 'h'], {}), '(-1.0, 1.0, h)\n', (3211, 3225), True, 'import tensorflow as tf\n'), ((3273, 3298), 'tensorflow.linspace', 'tf.linspace', (['(-1.0)', '(1.0)', 'w'], {}), '(-1.0, 1.0, w)\n', (3284, 3298), True, 'import tensorflow as tf\n'), ((3988, 4012), 'numpy.squeeze', 'np.squeeze', (['blob[b, ...]'], {}), '(blob[b, ...])\n', (3998, 4012), True, 'import numpy as np\n'), ((4147, 4181), 'numpy.squeeze', 'np.squeeze', (['estimated_blob[b, ...]'], {}), '(estimated_blob[b, ...])\n', (4157, 4181), True, 'import numpy as np\n'), ((5152, 5177), 'tensorflow.linspace', 'tf.linspace', (['(-1.0)', '(1.0)', 'h'], {}), '(-1.0, 1.0, h)\n', (5163, 5177), True, 'import tensorflow as tf\n'), ((5225, 5250), 'tensorflow.linspace', 'tf.linspace', (['(-1.0)', '(1.0)', 'w'], {}), '(-1.0, 1.0, w)\n', (5236, 5250), True, 'import tensorflow as tf\n'), ((6211, 6235), 'numpy.squeeze', 'np.squeeze', (['blob[b, ...]'], {}), '(blob[b, ...])\n', (6221, 6235), True, 'import numpy as np\n'), ((6370, 6404), 'numpy.squeeze', 'np.squeeze', (['estimated_blob[b, ...]'], {}), '(estimated_blob[b, ...])\n', (6380, 6404), True, 'import numpy as np\n'), ((2704, 2720), 'numpy.array', 'np.array', (['_means'], {}), '(_means)\n', (2712, 2720), True, 'import numpy as np\n'), ((4656, 4672), 'numpy.array', 'np.array', (['_means'], {}), '(_means)\n', (4664, 4672), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import numpy as np
from pathlib import Path
from utils_cv.common.image import (
im_width,
im_height,
im_width_height,
im2base64,
ims2strlist,
)
def test_im_width(tiny_ic_data_path):
im_path = Path(tiny_ic_data_path) / "can" / "1.jpg"
assert (
im_width(im_path) == 499
), "Expected image width of 499, but got {}".format(im_width(im_path))
im = np.zeros((100, 50))
assert im_width(im) == 50, "Expected image width of 50, but got ".format(
im_width(im)
)
def test_im_height(tiny_ic_data_path):
im_path = Path(tiny_ic_data_path) / "can" / "1.jpg"
assert (
im_height(im_path) == 665
), "Expected image height of 665, but got ".format(im_width(60))
im = np.zeros((100, 50))
assert (
im_height(im) == 100
), "Expected image height of 100, but got ".format(im_width(im))
def test_im_width_height(tiny_ic_data_path):
im_path = Path(tiny_ic_data_path) / "can" / "1.jpg"
w, h = im_width_height(im_path)
assert w == 499 and h == 665
im = np.zeros((100, 50))
w, h = im_width_height(im)
assert w == 50 and h == 100
def test_ims2strlist(tiny_ic_data_path):
""" Tests extraction of image content and conversion into string"""
im_list = [
os.path.join(tiny_ic_data_path, "can", "1.jpg"),
os.path.join(tiny_ic_data_path, "carton", "34.jpg"),
]
im_string_list = ims2strlist(im_list)
assert isinstance(im_string_list, list)
assert len(im_string_list) == len(im_list)
for im_str in im_string_list:
assert isinstance(im_str, str)
def test_im2base64(tiny_ic_data_path):
""" Tests extraction of image content and conversion into bytes"""
im_name = os.path.join(tiny_ic_data_path, "can", "1.jpg")
im_content = im2base64(im_name)
assert isinstance(im_content, bytes)
| [
"utils_cv.common.image.im_width",
"numpy.zeros",
"utils_cv.common.image.im2base64",
"pathlib.Path",
"utils_cv.common.image.im_width_height",
"utils_cv.common.image.im_height",
"utils_cv.common.image.ims2strlist",
"os.path.join"
] | [((495, 514), 'numpy.zeros', 'np.zeros', (['(100, 50)'], {}), '((100, 50))\n', (503, 514), True, 'import numpy as np\n'), ((842, 861), 'numpy.zeros', 'np.zeros', (['(100, 50)'], {}), '((100, 50))\n', (850, 861), True, 'import numpy as np\n'), ((1087, 1111), 'utils_cv.common.image.im_width_height', 'im_width_height', (['im_path'], {}), '(im_path)\n', (1102, 1111), False, 'from utils_cv.common.image import im_width, im_height, im_width_height, im2base64, ims2strlist\n'), ((1154, 1173), 'numpy.zeros', 'np.zeros', (['(100, 50)'], {}), '((100, 50))\n', (1162, 1173), True, 'import numpy as np\n'), ((1185, 1204), 'utils_cv.common.image.im_width_height', 'im_width_height', (['im'], {}), '(im)\n', (1200, 1204), False, 'from utils_cv.common.image import im_width, im_height, im_width_height, im2base64, ims2strlist\n'), ((1513, 1533), 'utils_cv.common.image.ims2strlist', 'ims2strlist', (['im_list'], {}), '(im_list)\n', (1524, 1533), False, 'from utils_cv.common.image import im_width, im_height, im_width_height, im2base64, ims2strlist\n'), ((1824, 1871), 'os.path.join', 'os.path.join', (['tiny_ic_data_path', '"""can"""', '"""1.jpg"""'], {}), "(tiny_ic_data_path, 'can', '1.jpg')\n", (1836, 1871), False, 'import os\n'), ((1889, 1907), 'utils_cv.common.image.im2base64', 'im2base64', (['im_name'], {}), '(im_name)\n', (1898, 1907), False, 'from utils_cv.common.image import im_width, im_height, im_width_height, im2base64, ims2strlist\n'), ((386, 403), 'utils_cv.common.image.im_width', 'im_width', (['im_path'], {}), '(im_path)\n', (394, 403), False, 'from utils_cv.common.image import im_width, im_height, im_width_height, im2base64, ims2strlist\n'), ((467, 484), 'utils_cv.common.image.im_width', 'im_width', (['im_path'], {}), '(im_path)\n', (475, 484), False, 'from utils_cv.common.image import im_width, im_height, im_width_height, im2base64, ims2strlist\n'), ((526, 538), 'utils_cv.common.image.im_width', 'im_width', (['im'], {}), '(im)\n', (534, 538), False, 'from utils_cv.common.image import im_width, im_height, im_width_height, im2base64, ims2strlist\n'), ((601, 613), 'utils_cv.common.image.im_width', 'im_width', (['im'], {}), '(im)\n', (609, 613), False, 'from utils_cv.common.image import im_width, im_height, im_width_height, im2base64, ims2strlist\n'), ((738, 756), 'utils_cv.common.image.im_height', 'im_height', (['im_path'], {}), '(im_path)\n', (747, 756), False, 'from utils_cv.common.image import im_width, im_height, im_width_height, im2base64, ims2strlist\n'), ((819, 831), 'utils_cv.common.image.im_width', 'im_width', (['(60)'], {}), '(60)\n', (827, 831), False, 'from utils_cv.common.image import im_width, im_height, im_width_height, im2base64, ims2strlist\n'), ((883, 896), 'utils_cv.common.image.im_height', 'im_height', (['im'], {}), '(im)\n', (892, 896), False, 'from utils_cv.common.image import im_width, im_height, im_width_height, im2base64, ims2strlist\n'), ((959, 971), 'utils_cv.common.image.im_width', 'im_width', (['im'], {}), '(im)\n', (967, 971), False, 'from utils_cv.common.image import im_width, im_height, im_width_height, im2base64, ims2strlist\n'), ((1376, 1423), 'os.path.join', 'os.path.join', (['tiny_ic_data_path', '"""can"""', '"""1.jpg"""'], {}), "(tiny_ic_data_path, 'can', '1.jpg')\n", (1388, 1423), False, 'import os\n'), ((1433, 1484), 'os.path.join', 'os.path.join', (['tiny_ic_data_path', '"""carton"""', '"""34.jpg"""'], {}), "(tiny_ic_data_path, 'carton', '34.jpg')\n", (1445, 1484), False, 'import os\n'), ((323, 346), 'pathlib.Path', 'Path', (['tiny_ic_data_path'], {}), '(tiny_ic_data_path)\n', (327, 346), False, 'from pathlib import Path\n'), ((675, 698), 'pathlib.Path', 'Path', (['tiny_ic_data_path'], {}), '(tiny_ic_data_path)\n', (679, 698), False, 'from pathlib import Path\n'), ((1034, 1057), 'pathlib.Path', 'Path', (['tiny_ic_data_path'], {}), '(tiny_ic_data_path)\n', (1038, 1057), False, 'from pathlib import Path\n')] |
import tkinter as tk
from tkinter import filedialog, simpledialog
import re
import numpy as np
from astropy.io import fits
from PIL import Image, ImageTk
from dataanalyzer import DataAnalyzer
from datasample import DataSample
import util
class App:
def __init__(self, **kwargs):
self.args = kwargs
self.root = tk.Tk()
self.root.title("DriftScanner")
self.analyse_window = DataAnalyzer(self)
self._init_vars()
self._init_menu()
self._init_display()
# Event Handling
self.root.bind("<Motion>", self.motion)
self.root.bind("<Configure>", self.on_resize)
self.root.bind("<Button-1>", self.on_left_click)
self.root.bind("<Shift_L>", self._shift_down)
self.root.bind("<KeyRelease-Shift_L>", self._shift_up)
self.root.mainloop()
def _debug(self):
util.detect_stars(self.working_data, threshold_abs=500)
def _init_vars(self):
self.working_file = None # active .fits file
self.working_data = None # 2d numpy array of .fits data
self.declination = 0
self.time_per_pix = 0
self.active_image = None # active PhotoImage() object
self.image_zoom = 1 # zoom level
self.image_mode = "log" # brightness curve mode
# Graphics and display
self.clicks = [] # stores most recent clicks, used in some measure modes
self.graphics_temp = [] # graphics that get removed when a new mode is entered
self.image_clearable = [] # graphic coordinates (x, y)
self.graphics_clearable = [] # graphics ids that stay until manually removed
self.image_label = {} # label coordinates + text (x, y, "text")
self.custom_label_count = 0
self.operation = "idle" # tool mode
# Aperture settings, self-explanatory
self.data_aperture_length = 100
self.data_aperture_diameter = 15
self.back_aperture_diameter_lower = 10
self.back_aperture_offset_lower = 10
self.back_aperture_diameter_upper = 10
self.back_aperture_offset_upper = 10
self.back_aperture_enabled_lower = True
self.back_aperture_enabled_upper = True
# Key statuses
self.shift_pressed = False
#
self.data_samples = []
# store all used apertures as (datx, daty, data_aperture_length, data_aperture_diameter, back_aperture_enabled_lower, back_aperture_offset_lower,
self.apertures = [] # back_aperture_diameter_lower, back_aperture_enabled_upper, back_aperture_offset_upper, back_aperture_diameter_upper)
def _init_display(self):
# widgets
self.label_info = tk.Label(self.root)
self.label_info.grid(row=0, column=0, sticky="nw")
self.label_info_text = tk.StringVar()
self.label_info.config(textvariable=self.label_info_text)
self.label_tool = tk.Label(self.root)
self.label_tool.grid(row=1, column=0, sticky="nw")
self.label_tool_text = tk.StringVar()
self.label_tool.config(textvariable=self.label_tool_text)
self.frame = tk.Frame(self.root, width=800, height=800)
self.canvas = tk.Canvas(self.frame, width=800, height=800)
self.scrollbar_x = tk.Scrollbar(self.frame)
self.scrollbar_x.grid(row=1, column=0, sticky="nw,ne")
self.scrollbar_x.config(command=self.canvas.xview, orient="horizontal")
self.scrollbar_y = tk.Scrollbar(self.frame)
self.scrollbar_y.grid(row=0, column=1, sticky="nw,sw")
self.scrollbar_y.config(command=self.canvas.yview, orient="vertical")
self.frame.grid(row=2, column=0)
self.canvas.grid(row=0, column=0)
self.canvas.configure(yscrollcommand=self.scrollbar_y.set, xscrollcommand=self.scrollbar_x.set)
def _init_menu(self): # lots of boring stuff, configure the top menubar and submenus
self.menubar = tk.Menu(self.root) # main menubaar
# -------
self.filemenu = tk.Menu(self.menubar, tearoff=0) # Menu to open a file or close the program
self.filemenu_transform = tk.Menu(self.filemenu, tearoff=0) # submenu for transformations
self.filemenu_transform.add_command(label="Rotate Clockwise", command=self._transform_r_clockwise)
self.filemenu_transform.add_command(label="Rotate Counterclockwise", command=self._transform_r_cclockwise)
self.filemenu_transform.add_command(label="Mirror on Y", command=self._transform_m_y)
self.filemenu_transform.add_command(label="Mirror on X", command=self._transform_m_x)
self.filemenu.add_command(label="Open File", command=self.open_image)
self.filemenu.add_cascade(label="Transform", menu=self.filemenu_transform)
self.filemenu.add_command(label="Test Me", command=self._debug) # debug command, TODO: remove when finalizing
self.filemenu.add_separator()
self.filemenu.add_command(label="Exit", command=self.root.quit) # kills program
# ------
self.viewmenu = tk.Menu(self.menubar, tearoff=0) # Menu to change view and brighness curve
self.viewmenu_brightness = tk.Menu(self.viewmenu, tearoff=0)
self.viewmenu_brightness.add_command(label="Linear", command=self._view_linear)
self.viewmenu_brightness.add_command(label="Squareroot", command=self._view_sqrt)
self.viewmenu_brightness.add_command(label="log", command=self._view_log)
self.viewmenu_clear = tk.Menu(self.viewmenu, tearoff=0)
self.viewmenu_clear.add_command(label="Clear last", command=self.graphics_clear_last)
self.viewmenu_clear.add_command(label="Clear all", command=self.graphics_clear_all)
self.viewmenu.add_cascade(label="Brightness", menu=self.viewmenu_brightness)
self.viewmenu.add_cascade(label="Clear Graphics", menu=self.viewmenu_clear)
self.viewmenu.add_command(label="Label", command=self.graphics_create_label)
self.viewmenu.add_command(label="Clear labels", command=self.graphics_clear_labels)
# -------
self.measuremenu = tk.Menu(self.menubar, tearoff=0) # Menu to measure basics
self.measuremenu.add_command(label="Open Apertures", command=self.open_apertures)
self.measuremenu.add_command(label="Save Apertures", command=self.save_apertures)
self.measuremenu_aperture_size = tk.Menu(self.measuremenu, tearoff=0) # set aperture length and diameter using popup prompts
self.measuremenu_aperture_size.add_command(label="Set Scan Length", command=self.set_scan_length)
self.measuremenu_aperture_size.add_command(label="Set Scan Diameter", command=self.set_scan_diameter)
self.measuremenu.add_command(label="Measure Distance", command=self.measure_distance) # distance between two points
self.measuremenu.add_cascade(label="Aperture Settings", menu=self.measuremenu_aperture_size)
self.measuremenu.add_command(label="Place Aperture", command=self.set_aperture) # place apertures and measure data
self.measuremenu.add_command(label="Auto Measure", command=self.auto_measure)
# -------
self.menubar.add_cascade(label="File", menu=self.filemenu)
self.menubar.add_cascade(label="View", menu=self.viewmenu)
self.menubar.add_cascade(label="Measure", menu=self.measuremenu)
self.root.config(menu=self.menubar)
# ------------------------------------------------------------------------------------------------------------------------------
# File operations
def open_image(self, path=None, keep_labels=False):
"""open_image(path=None)\n
Load .fits file using astropy.io\n
Prompts user for file if no path specified"""
if not path: # ask user to open file, unless otherwise specified
if "directory" in self.args:
initial_dir = self.args["directory"]
else:
initial_dir = r"/"
while not path:
path = filedialog.askopenfilename(parent=self.root, initialdir=initial_dir, title="Select file")
self.working_path = path
self.working_file = fits.open(path)
self.working_data = self.working_file[0].data # .fits files are a list of data sets, each having a header and data. the first is the one usually containing the image.
self.image_zoom = 1 # TODO: if needed, set option to open different dataset
if not keep_labels:
self.graphics_clear_labels()
self.graphics_clear_all()
try:
rdec = self.working_file[0].header["OBJCTDEC"]
deg, min, sec = map(float, rdec.split(" "))
self.declination = deg + min / 60 + sec / 3600
except KeyError:
self.root.withdraw()
dec = ""
while not (m := re.match(r"^(-?[0-9]{2})°(?:([0-5][0-9])'(?:([0-5][0-9](?:[.,][0-9]+)?)(?:''|\"))?)?$", dec)): # this regex matches every possible variation of declination in
dec = simpledialog.askstring(title="", prompt="Declination of Image (XX°XX'XX,XX\")") # min/sec form and gives groups of °, ' and "
self.root.deiconify()
self.declination = float(m.group(1)) + (float(m.group(2)) / 60 if m.group(2) else 0) + (float(m.group(3).replace(",", ".")) / 3600 if m.group(3) else 0)
finally:
print("The declination is: ", self.declination)
self.root.withdraw()
arcsec_per_pix = 0
while not arcsec_per_pix:
try:
arcsec_per_pix = float(simpledialog.askstring(title="", prompt="Arcsec per pixel"))
except TypeError:
pass
self.root.deiconify()
print(1 / (24 / 360.9856 / np.cos(np.deg2rad(self.declination))))
self.time_per_pix = 24 / 360.9856 / np.cos(np.deg2rad(self.declination)) * arcsec_per_pix
print(f"Time per pix is {self.time_per_pix}")
self.display_image()
def open_apertures(self, path=None):
if not path: # ask user to open file, unless otherwise specified
if "directory" in self.args:
initial_dir = self.args["directory"]
else:
initial_dir = r"/"
path = filedialog.askopenfilename(parent=self.root, initialdir=initial_dir, title="Select aperture file")
ap = np.genfromtxt(path, delimiter=",")
for a in ap:
x, y, l, w, lio, lof, lw, uio, uof, uw = list(map(int, a))
self.data_aperture_length = l
self.data_aperture_diameter = w
self.back_aperture_enabled_lower = bool(lio)
self.back_aperture_offset_lower = lof
self.back_aperture_diameter_lower = lw
self.back_aperture_enabled_upper = bool(uio)
self.back_aperture_offset_upper = uof
self.back_aperture_diameter_upper = uw
self.click_set_aperture(x, y, x, y)
def save_apertures(self, path=None):
if not path:
if "directory" in self.args:
initial_dir = self.args["directory"]
else:
initial_dir = r"/"
path = filedialog.asksaveasfilename(parent=self.root, initialdir=initial_dir, title="Save aperture file", defaultextension=".csv")
np.savetxt(path, *self.apertures, delimiter=",")
# ------------------------------------------------------------------------------------------------------------------------------
# Display
def display_image(self, mode=None, zoom=None):
"""display_image(self, file, mode="linear")\n
Diplays image to main canvas.\n
Parameters:\n
file: .fits object to be displayed\n
mode: brightness display mode: linear, sqrt, log"""
if not mode:
if not self.image_mode:
mode = "log"
mode = self.image_mode
if not zoom:
if not self.image_zoom:
zoom = 1
zoom = self.image_zoom
data = self.working_data
if mode == "sqrt": # reduce sharpness of brightness curve: squareroot or log10 on array to reduce span of values
data = np.sqrt(np.abs(data))
elif mode == "log":
data = np.log10(np.abs(data))
data = np.uint8(data / np.max(data) * 255) # map values between (0, 255)
self.img = ImageTk.PhotoImage(Image.fromarray(data, "L").resize((len(data), len(data[0]))))
self.canvas.configure(scrollregion=(0, 0, *data.shape)) # set scrollable canvas size to data image size
self.active_image = self.canvas.create_image(0, 0, image=self.img, anchor="nw") # and print image to canvas
self.graphics_clear_labels() # kill all labels
self.graphics_clear_all() # kill all graphics
for i in self.image_label:
lx, ly, ltxt = self.image_label[i][1]
self.image_label[i] = self.canvas.create_text(lx * self.image_zoom, ly * self.image_zoom, text=ltxt, fill="red", anchor="nw"), self.image_label[i][1]
for i in self.image_clearable:
x, y = i
x, y = x * self.image_zoom, y * self.image_zoom
self.graphics_clearable.append(self.canvas.create_rectangle(*self._get_ap_main(x, y), outline="blue"))
def graphics_clear_last(self):
if len(self.graphics_clearable):
self.canvas.delete(self.graphics_clearable.pop(-1))
def graphics_clear_all(self):
[self.canvas.delete(g) for g in self.graphics_clearable]
self.graphics_clearable = []
[self.canvas.delete(g) for g in self.graphics_temp]
self.graphics_temp = []
self.image_clearable = []
[self.canvas.delete(self.image_label[key][0]) for key in self.image_label if not key.startswith("Custom")]
delete = [key for key in self.image_label if not key.startswith("Custom")]
[self.image_label.pop(key) for key in delete]
def graphics_create_label(self):
self.label_tool_text.set("Click to place a label. Shift-Click to place multiple.")
self.operation = "label"
def graphics_clear_labels(self):
[self.canvas.delete(self.image_label[g][0]) for g in self.image_label if g.startswith("Custom")]
def graphics_clear_label(self, key):
self.canvas.delete(self.image_label[key][0])
# ------------------------------------------------------------------------------------------------------------------------------
# Event Handler
def motion(self, event): # Event handler: tracks mouse position on canvas and prints to label_info
c = event.widget
if self.working_data is not None and type(c) == type(tk.Canvas()): # check if movement is on canvas and data is loaded
c = event.widget
x, y = c.canvasx(event.x), c.canvasy(event.y)
x, y = int(x / self.image_zoom), int(y / self.image_zoom)
hix, hiy = self.working_data.shape
if (0 <= x and x < hix and 0 <= y and y < hiy):
self.label_info_text.set(f"X: {x} Y: {y} B: {self.working_data[y, x]}") # give infos in top label: X, Y, Brightness
if self.operation == "set_aperture": # follow cursor with an aperture sized rectangle
x, y = c.canvasx(event.x), c.canvasy(event.y)
[self.canvas.delete(g) for g in self.graphics_temp]
self.graphics_temp = []
self.graphics_temp.append(self.canvas.create_rectangle(*self._get_ap_main(x, y), outline="blue"))
self.graphics_temp.append(self.canvas.create_rectangle(*self._get_ap_lower(x, y), outline="blue", dash=(5, 5)))
self.graphics_temp.append(self.canvas.create_rectangle(*self._get_ap_upper(x, y), outline="blue", dash=(5, 5)))
self.graphics_temp.append(
self.canvas.create_line(x + self.data_aperture_length // 2 * self.image_zoom, y, x + (self.data_aperture_length // 2 + 20) * self.image_zoom, y, arrow="last", dash=(5, 5),
fill="blue"))
def on_resize(self, event): # Event handler: resize canvas to window size
if event.widget == self.root:
w, h = event.width - 21, event.height - 63 # weird thing, without -21 and -63 window spazms out of control
self.canvas.configure(width=w, height=h)
def on_left_click(self, event): # Event handler: Everything click related
c = event.widget
if self.working_data is not None and type(c) == type(tk.Canvas()):
c = event.widget
x, y = int(c.canvasx(event.x)), int(c.canvasy(event.y))
datx, daty = x // self.image_zoom, y // self.image_zoom
self.clicks.append((x, y)) # log clicks
# Different tool modes from here on
if self.operation == "distance":
if len(self.clicks) > 2:
self.clicks = []
[self.canvas.delete(i) for i in self.graphics_temp]
self.graphics_temp = []
self.label_tool_text.set("Click twice to measure distance")
elif len(self.clicks) == 1:
self.graphics_temp.append(self.canvas.create_oval(x - 10, y - 10, x + 10, y + 10, width=1, outline="red"))
elif len(self.clicks) == 2:
self.graphics_temp.append(self.canvas.create_oval(x - 10, y - 10, x + 10, y + 10, width=1, outline="red"))
c1, c2 = self.clicks[0], self.clicks[1]
self.graphics_temp.append(self.canvas.create_line(*c1, *c2, fill="red", dash=(5, 5))) # dashed line
d = np.sqrt((c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2) / self.image_zoom # pythagoras to calculate distance, divide by zoom level
self.label_tool_text.set(f"Distance: {d:.2f}")
if self.operation == "label":
self.click_label(datx, daty)
if self.operation == "set_aperture":
self.click_set_aperture(x, y, datx, daty)
if self.operation == "idle":
self.label_tool_text.set("")
def click_label(self, datx, daty):
if not self.shift_pressed:
self.operation = "idle"
self.custom_label_count += 1
title = f"Custom{self.custom_label_count}"
self.image_label[title] = (datx, daty, simpledialog.askstring("", "Label: "))
lx, ly, ltxt = self.image_label[f"Custom{self.custom_label_count}"]
self.image_label[title] = self.canvas.create_text(lx * self.image_zoom, ly * self.image_zoom, text=ltxt, fill="red", anchor="nw"), self.image_label[title]
def click_set_aperture(self, x, y, datx, daty):
if not self.shift_pressed:
self.operation = "idle"
self.apertures.append((datx, daty, self.data_aperture_length, self.data_aperture_diameter, self.back_aperture_enabled_lower, self.back_aperture_offset_lower, self.back_aperture_diameter_lower,
self.back_aperture_enabled_upper, self.back_aperture_offset_upper, self.back_aperture_diameter_upper))
self.image_clearable.append((datx, daty))
if not self.graphics_temp:
self.graphics_temp.append(self.canvas.create_rectangle(*self._get_ap_main(x, y), outline="blue"))
self.graphics_clearable.append(self.graphics_temp.pop(0))
[self.canvas.delete(g) for g in self.graphics_temp]
self.graphics_temp = []
x1, y1, x2, y2 = self._get_ap_main(datx, daty)
data = self.working_data[y1:y2, x1:x2]
x1, y1, x2, y2 = self._get_ap_lower(datx, daty)
back1 = self.working_data[y1:y2, x1:x2]
x1, y1, x2, y2 = self._get_ap_upper(datx, daty)
back2 = self.working_data[y1:y2, x1:x2]
meta_info = {"altitude": re.search(r"[\d.]+deg", self.working_path).group(),
"declination": self.declination,
"exposure": self.working_file[0].header["EXPOSURE"],
"time_per_pix": self.time_per_pix}
s = DataSample(data, self.time_per_pix, back1, back2, meta_info=meta_info)
self.analyse_window.add_sample(s)
self.analyse_window.window.deiconify()
title = s.title
self.image_label[title] = (datx, daty, title)
lx, ly, ltxt = self.image_label[title]
self.image_label[title] = self.canvas.create_text(lx * self.image_zoom, ly * self.image_zoom, text=ltxt, fill="red", anchor="nw"), self.image_label[title]
# ------------------------------------------------------------------------------------------------------------------------------
# Basic Measure
def measure_distance(self):
self.operation = "distance"
self.clicks = []
self.label_tool_text.set("Click twice to measure distance")
def set_scan_length(self):
self.data_aperture_length = simpledialog.askinteger("", "Length: ")
def set_scan_diameter(self):
self.data_aperture_diameter = simpledialog.askinteger("", "Diameter: ")
def set_aperture(self):
self.operation = "set_aperture"
self.label_tool_text.set("Click to place aperture. Shift-Click to place multiple.")
[self.canvas.delete(g) for g in self.graphics_temp]
self.graphics_temp = []
# Advanced Measure
def auto_measure(self):
threshold = tk.simpledialog.askfloat("Auto Measure", "Set Threshold for automatic star detection")
stars, should_flip, should_rotate = util.detect_stars(self.working_data, threshold, min_separation=20)
if should_flip:
self._transform_m_y()
if should_rotate:
self._transform_r_clockwise()
stars, should_flip, should_rotate = util.detect_stars(self.working_data, threshold, min_separation=20)
boxes = []
for star in stars:
y, x = star
boxes.append(self._get_ap_main(x, y))
for star in stars:
y, x = star
if not self._check_all_intersections(x, y, boxes):
offset = self.data_aperture_diameter // 2
if self._check_is_in_image(self._get_ap_main(x + offset, y)):
self.click_set_aperture(x + offset, y, x + offset, y)
# ------------------------------------------------------------------------------------------------------------------------------
# Util functions and workarounds
def _view_linear(self):
self.image_mode = "linear"
self.display_image()
def _view_sqrt(self):
self.image_mode = "sqrt"
self.display_image()
def _view_log(self):
self.image_mode = "log"
self.display_image()
def _transform_m_x(self):
self.working_data = np.flipud(self.working_data)
self.display_image()
def _transform_m_y(self):
self.working_data = np.fliplr(self.working_data)
self.display_image()
def _transform_r_cclockwise(self):
self.working_data = np.rot90(self.working_data)
self.display_image()
def _transform_r_clockwise(self):
self.working_data = np.rot90(self.working_data, 3)
self.display_image()
def _shift_down(self, event):
self.shift_pressed = True
def _shift_up(self, event):
self.shift_pressed = False
def _get_ap_main(self, x, y): # return coords for main aperture based on mouse coordinates
x1 = x
y1 = y - int(np.floor(self.data_aperture_diameter * self.image_zoom / 2))
x2 = x + self.data_aperture_length * self.image_zoom
y2 = y + int(np.ceil(self.data_aperture_diameter * self.image_zoom / 2))
return (x1, y1, x2, y2)
def _get_ap_lower(self, x, y): # same for lower background aperture
x1 = x
y2 = y + int((np.ceil(self.data_aperture_diameter / 2)) + self.back_aperture_diameter_upper + self.back_aperture_offset_upper) * self.image_zoom
x2 = x + self.data_aperture_length * self.image_zoom
y1 = y + int((np.ceil(self.data_aperture_diameter / 2)) + self.back_aperture_offset_upper) * self.image_zoom
return (x1, y1, x2, y2)
def _get_ap_upper(self, x, y): # upper background aperture
x1 = x
y2 = y - int((np.floor(self.data_aperture_diameter / 2)) + self.back_aperture_offset_lower) * self.image_zoom
x2 = x + self.data_aperture_length * self.image_zoom
y1 = y - int((np.floor(self.data_aperture_diameter / 2)) + self.back_aperture_diameter_lower + self.back_aperture_offset_lower) * self.image_zoom
return (x1, y1, x2, y2)
@staticmethod
def _check_intersection(x, y, box):
x1, y1, x2, y2 = box
if x1 <= x <= x2:
if y1 <= y <= y2:
return True
return False
@staticmethod
def _check_all_intersections(x, y, boxes):
return sum([App._check_intersection(x, y, box) for box in boxes]) > 1
def _check_is_in_image(self, box):
x1, y1, x2, y2 = box
if x1 < 0 or len(self.working_data[0]) <= x2 or y1 < 0 or len(self.working_data) <= y2:
return False
return True
if __name__ == "__main__":
app = App(directory=r"C:\Users\ole\OneDrive\Desktop\Jufo\Daten")
| [
"tkinter.StringVar",
"tkinter.filedialog.asksaveasfilename",
"numpy.abs",
"numpy.floor",
"numpy.rot90",
"tkinter.Frame",
"tkinter.Label",
"numpy.savetxt",
"numpy.genfromtxt",
"tkinter.filedialog.askopenfilename",
"tkinter.simpledialog.askfloat",
"numpy.max",
"re.search",
"tkinter.Tk",
"u... | [((335, 342), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (340, 342), True, 'import tkinter as tk\n'), ((414, 432), 'dataanalyzer.DataAnalyzer', 'DataAnalyzer', (['self'], {}), '(self)\n', (426, 432), False, 'from dataanalyzer import DataAnalyzer\n'), ((879, 934), 'util.detect_stars', 'util.detect_stars', (['self.working_data'], {'threshold_abs': '(500)'}), '(self.working_data, threshold_abs=500)\n', (896, 934), False, 'import util\n'), ((2725, 2744), 'tkinter.Label', 'tk.Label', (['self.root'], {}), '(self.root)\n', (2733, 2744), True, 'import tkinter as tk\n'), ((2835, 2849), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (2847, 2849), True, 'import tkinter as tk\n'), ((2943, 2962), 'tkinter.Label', 'tk.Label', (['self.root'], {}), '(self.root)\n', (2951, 2962), True, 'import tkinter as tk\n'), ((3053, 3067), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (3065, 3067), True, 'import tkinter as tk\n'), ((3156, 3198), 'tkinter.Frame', 'tk.Frame', (['self.root'], {'width': '(800)', 'height': '(800)'}), '(self.root, width=800, height=800)\n', (3164, 3198), True, 'import tkinter as tk\n'), ((3222, 3266), 'tkinter.Canvas', 'tk.Canvas', (['self.frame'], {'width': '(800)', 'height': '(800)'}), '(self.frame, width=800, height=800)\n', (3231, 3266), True, 'import tkinter as tk\n'), ((3295, 3319), 'tkinter.Scrollbar', 'tk.Scrollbar', (['self.frame'], {}), '(self.frame)\n', (3307, 3319), True, 'import tkinter as tk\n'), ((3491, 3515), 'tkinter.Scrollbar', 'tk.Scrollbar', (['self.frame'], {}), '(self.frame)\n', (3503, 3515), True, 'import tkinter as tk\n'), ((3960, 3978), 'tkinter.Menu', 'tk.Menu', (['self.root'], {}), '(self.root)\n', (3967, 3978), True, 'import tkinter as tk\n'), ((4038, 4070), 'tkinter.Menu', 'tk.Menu', (['self.menubar'], {'tearoff': '(0)'}), '(self.menubar, tearoff=0)\n', (4045, 4070), True, 'import tkinter as tk\n'), ((4150, 4183), 'tkinter.Menu', 'tk.Menu', (['self.filemenu'], {'tearoff': '(0)'}), '(self.filemenu, tearoff=0)\n', (4157, 4183), True, 'import tkinter as tk\n'), ((5074, 5106), 'tkinter.Menu', 'tk.Menu', (['self.menubar'], {'tearoff': '(0)'}), '(self.menubar, tearoff=0)\n', (5081, 5106), True, 'import tkinter as tk\n'), ((5186, 5219), 'tkinter.Menu', 'tk.Menu', (['self.viewmenu'], {'tearoff': '(0)'}), '(self.viewmenu, tearoff=0)\n', (5193, 5219), True, 'import tkinter as tk\n'), ((5511, 5544), 'tkinter.Menu', 'tk.Menu', (['self.viewmenu'], {'tearoff': '(0)'}), '(self.viewmenu, tearoff=0)\n', (5518, 5544), True, 'import tkinter as tk\n'), ((6123, 6155), 'tkinter.Menu', 'tk.Menu', (['self.menubar'], {'tearoff': '(0)'}), '(self.menubar, tearoff=0)\n', (6130, 6155), True, 'import tkinter as tk\n'), ((6405, 6441), 'tkinter.Menu', 'tk.Menu', (['self.measuremenu'], {'tearoff': '(0)'}), '(self.measuremenu, tearoff=0)\n', (6412, 6441), True, 'import tkinter as tk\n'), ((8191, 8206), 'astropy.io.fits.open', 'fits.open', (['path'], {}), '(path)\n', (8200, 8206), False, 'from astropy.io import fits\n'), ((10492, 10526), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""","""'}), "(path, delimiter=',')\n", (10505, 10526), True, 'import numpy as np\n'), ((11433, 11481), 'numpy.savetxt', 'np.savetxt', (['path', '*self.apertures'], {'delimiter': '""","""'}), "(path, *self.apertures, delimiter=',')\n", (11443, 11481), True, 'import numpy as np\n'), ((20248, 20318), 'datasample.DataSample', 'DataSample', (['data', 'self.time_per_pix', 'back1', 'back2'], {'meta_info': 'meta_info'}), '(data, self.time_per_pix, back1, back2, meta_info=meta_info)\n', (20258, 20318), False, 'from datasample import DataSample\n'), ((21084, 21123), 'tkinter.simpledialog.askinteger', 'simpledialog.askinteger', (['""""""', '"""Length: """'], {}), "('', 'Length: ')\n", (21107, 21123), False, 'from tkinter import filedialog, simpledialog\n'), ((21196, 21237), 'tkinter.simpledialog.askinteger', 'simpledialog.askinteger', (['""""""', '"""Diameter: """'], {}), "('', 'Diameter: ')\n", (21219, 21237), False, 'from tkinter import filedialog, simpledialog\n'), ((21564, 21654), 'tkinter.simpledialog.askfloat', 'tk.simpledialog.askfloat', (['"""Auto Measure"""', '"""Set Threshold for automatic star detection"""'], {}), "('Auto Measure',\n 'Set Threshold for automatic star detection')\n", (21588, 21654), True, 'import tkinter as tk\n'), ((21695, 21761), 'util.detect_stars', 'util.detect_stars', (['self.working_data', 'threshold'], {'min_separation': '(20)'}), '(self.working_data, threshold, min_separation=20)\n', (21712, 21761), False, 'import util\n'), ((21934, 22000), 'util.detect_stars', 'util.detect_stars', (['self.working_data', 'threshold'], {'min_separation': '(20)'}), '(self.working_data, threshold, min_separation=20)\n', (21951, 22000), False, 'import util\n'), ((22947, 22975), 'numpy.flipud', 'np.flipud', (['self.working_data'], {}), '(self.working_data)\n', (22956, 22975), True, 'import numpy as np\n'), ((23064, 23092), 'numpy.fliplr', 'np.fliplr', (['self.working_data'], {}), '(self.working_data)\n', (23073, 23092), True, 'import numpy as np\n'), ((23190, 23217), 'numpy.rot90', 'np.rot90', (['self.working_data'], {}), '(self.working_data)\n', (23198, 23217), True, 'import numpy as np\n'), ((23314, 23344), 'numpy.rot90', 'np.rot90', (['self.working_data', '(3)'], {}), '(self.working_data, 3)\n', (23322, 23344), True, 'import numpy as np\n'), ((10379, 10482), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'parent': 'self.root', 'initialdir': 'initial_dir', 'title': '"""Select aperture file"""'}), "(parent=self.root, initialdir=initial_dir, title=\n 'Select aperture file')\n", (10405, 10482), False, 'from tkinter import filedialog, simpledialog\n'), ((11300, 11427), 'tkinter.filedialog.asksaveasfilename', 'filedialog.asksaveasfilename', ([], {'parent': 'self.root', 'initialdir': 'initial_dir', 'title': '"""Save aperture file"""', 'defaultextension': '""".csv"""'}), "(parent=self.root, initialdir=initial_dir,\n title='Save aperture file', defaultextension='.csv')\n", (11328, 11427), False, 'from tkinter import filedialog, simpledialog\n'), ((18560, 18597), 'tkinter.simpledialog.askstring', 'simpledialog.askstring', (['""""""', '"""Label: """'], {}), "('', 'Label: ')\n", (18582, 18597), False, 'from tkinter import filedialog, simpledialog\n'), ((8039, 8133), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'parent': 'self.root', 'initialdir': 'initial_dir', 'title': '"""Select file"""'}), "(parent=self.root, initialdir=initial_dir, title=\n 'Select file')\n", (8065, 8133), False, 'from tkinter import filedialog, simpledialog\n'), ((12330, 12342), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (12336, 12342), True, 'import numpy as np\n'), ((23644, 23703), 'numpy.floor', 'np.floor', (['(self.data_aperture_diameter * self.image_zoom / 2)'], {}), '(self.data_aperture_diameter * self.image_zoom / 2)\n', (23652, 23703), True, 'import numpy as np\n'), ((23787, 23845), 'numpy.ceil', 'np.ceil', (['(self.data_aperture_diameter * self.image_zoom / 2)'], {}), '(self.data_aperture_diameter * self.image_zoom / 2)\n', (23794, 23845), True, 'import numpy as np\n'), ((9079, 9158), 'tkinter.simpledialog.askstring', 'simpledialog.askstring', ([], {'title': '""""""', 'prompt': '"""Declination of Image (XX°XX\'XX,XX")"""'}), '(title=\'\', prompt=\'Declination of Image (XX°XX\\\'XX,XX")\')\n', (9101, 9158), False, 'from tkinter import filedialog, simpledialog\n'), ((12400, 12412), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (12406, 12412), True, 'import numpy as np\n'), ((12446, 12458), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (12452, 12458), True, 'import numpy as np\n'), ((12536, 12562), 'PIL.Image.fromarray', 'Image.fromarray', (['data', '"""L"""'], {}), "(data, 'L')\n", (12551, 12562), False, 'from PIL import Image, ImageTk\n'), ((14830, 14841), 'tkinter.Canvas', 'tk.Canvas', ([], {}), '()\n', (14839, 14841), True, 'import tkinter as tk\n'), ((16688, 16699), 'tkinter.Canvas', 'tk.Canvas', ([], {}), '()\n', (16697, 16699), True, 'import tkinter as tk\n'), ((19999, 20041), 're.search', 're.search', (['"""[\\\\d.]+deg"""', 'self.working_path'], {}), "('[\\\\d.]+deg', self.working_path)\n", (20008, 20041), False, 'import re\n'), ((8894, 8999), 're.match', 're.match', (['"""^(-?[0-9]{2})°(?:([0-5][0-9])\'(?:([0-5][0-9](?:[.,][0-9]+)?)(?:\'\'|\\\\"))?)?$"""', 'dec'], {}), '(\n \'^(-?[0-9]{2})°(?:([0-5][0-9])\\\'(?:([0-5][0-9](?:[.,][0-9]+)?)(?:\\\'\\\'|\\\\"))?)?$\'\n , dec)\n', (8902, 8999), False, 'import re\n'), ((9674, 9733), 'tkinter.simpledialog.askstring', 'simpledialog.askstring', ([], {'title': '""""""', 'prompt': '"""Arcsec per pixel"""'}), "(title='', prompt='Arcsec per pixel')\n", (9696, 9733), False, 'from tkinter import filedialog, simpledialog\n'), ((9962, 9990), 'numpy.deg2rad', 'np.deg2rad', (['self.declination'], {}), '(self.declination)\n', (9972, 9990), True, 'import numpy as np\n'), ((24204, 24244), 'numpy.ceil', 'np.ceil', (['(self.data_aperture_diameter / 2)'], {}), '(self.data_aperture_diameter / 2)\n', (24211, 24244), True, 'import numpy as np\n'), ((24433, 24474), 'numpy.floor', 'np.floor', (['(self.data_aperture_diameter / 2)'], {}), '(self.data_aperture_diameter / 2)\n', (24441, 24474), True, 'import numpy as np\n'), ((9875, 9903), 'numpy.deg2rad', 'np.deg2rad', (['self.declination'], {}), '(self.declination)\n', (9885, 9903), True, 'import numpy as np\n'), ((23990, 24030), 'numpy.ceil', 'np.ceil', (['(self.data_aperture_diameter / 2)'], {}), '(self.data_aperture_diameter / 2)\n', (23997, 24030), True, 'import numpy as np\n'), ((24612, 24653), 'numpy.floor', 'np.floor', (['(self.data_aperture_diameter / 2)'], {}), '(self.data_aperture_diameter / 2)\n', (24620, 24653), True, 'import numpy as np\n'), ((17841, 17893), 'numpy.sqrt', 'np.sqrt', (['((c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2)'], {}), '((c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2)\n', (17848, 17893), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Orbit SkyOffset Frames.
Like `~astropy.coordinates.Galactocentric` needs ``galcen_coord`` and other
information to convert to/from heliocentric or geocentric coordinate systems,
the `OrbitPseudoFrame` needs information about the orbit (phase-space position
and integration time) and the potential in which the orbit was integrated.
The information name, and frame attribute data descriptor are listed:
- origin : `~astropy.coordinates.attributes.CoordinateAttribute`
- potential: `PotentialAttribute`
- afn_bounds : `~astropy.coordinates.attributes.QuantityAttribute`
Notes
-----
This coordinate system should be used for visualization, probably not science
for two reasons: it is under active development, and its very difficult
to interpret what kinematics mean when the coordinate system itself is a
function of an affine parameter.
.. todo::
- Look at FunctionTransformWithFiniteDifference for the kinematics
"""
__author__ = "<NAME>"
__credits__ = ["<NAME>", "<NAME>"]
__all__ = [
"OrbitSkyOffsetFrame",
]
##############################################################################
# IMPORTS
import typing as T
import astropy.units as u
import numpy as np
from astropy.coordinates import SkyCoord # TODO not need
from astropy.coordinates import (
concatenate,
frame_transform_graph,
match_coordinates_sky,
)
from astropy.coordinates.attributes import (
CoordinateAttribute,
QuantityAttribute,
)
from astropy.coordinates.baseframe import BaseCoordinateFrame
from astropy.coordinates.representation import (
SphericalRepresentation,
UnitSphericalRepresentation,
)
from astropy.coordinates.transformations import FunctionTransform
from scipy import interpolate
from typing_extensions import Literal
from ..attributes import PotentialAttribute
from ..representations import (
OrbitSkyOffsetRepresentation,
OrbitSkyOffsetUnitRepresentation,
)
from ..transformations import FunctionWithKwargTransform
from .utils import (
catalog_match_sky_inverse_track as catalog_match_inverse_track,
)
from .utils import catalog_match_sky_track as catalog_match_track
# from astropy.coordinates.attributes import Attribute
# from astropy.coordinates.baseframe import RepresentationMapping
##############################################################################
# PARAMETERS
_orbitskyoffset_cache = {}
"""The cache in which OrbitSkyOffsets are stored."""
##############################################################################
# CODE
##############################################################################
def make_orbitskyoffset_cls(
framecls,
track_fn,
inverse_track_fn=None,
track_fn_kw={},
inverse_track_fn_kw={},
):
"""Make Sky-Offset Class from an Orbit in a potential.
Create a new class that is the orbit sky offset frame for a specific class
of origin frame. If such a class has already been created for this frame,
the same class will be returned.
The new class will always have component names "afn", "sep", "distance".
Parameters
----------
framecls : subclass of `~astropy.coordinates.BaseCoordinateFrame`
coordinate frame class. The class to create the SkyOffsetFrame.
potential : Any
The potential in which the track was integrated
track_fn : Callable[[affine param array], coordinate array]
Function mapping affine parameter to the coordinate array.
inverse_track_fn : Callable[[coordinate array], affine param array]
Function mapping coordinate array to affine parameter.
track_fn_kw : dict
keyword arguments into `track_fn`
inverse_track_fn_kw : dict
keyword arguments into `inverse_track_fn`
Returns
-------
orbitskyoffsetframecls : class
The class for the new orbit-skyoffset frame.
Notes
-----
This function is necessary because Astropy's frame transformations depend
on connection between specific frame *classes*. So each type of frame
needs its own distinct orbit-skyoffset frame class. This function
generates just that class, as well as ensuring that only one example of
such a class actually gets created in any given python session.
For implementation details, see
:mod:`~astropy/coordinates/builtin_frames/skyoffset`
.. todo::
- fix cache. it erroneously returns same class for everything
see SkyOffsetFrame for reference. maybe need to set cache key
as a hash of the framecls, origin, potential, and trackfunc?
"""
# if framecls in _orbitskyoffset_cache: # FIXME, all are the same
# return _orbitskyoffset_cache[framecls]
# the class of a class object is the metaclass
framemeta = framecls.__class__
# -----------------------------------------------------
class OrbitSkyOffsetMeta(framemeta):
"""Metaclass for Orbit Sky-Offsets.
This metaclass renames the class to be "SkyOffset<framecls>" and also
adjusts the frame specific representation info so that spherical names
are always "lon" and "lat" (instead of e.g. "ra" and "dec").
"""
def __new__(cls, name, bases, members):
# Only 'origin' is needed here, to set the origin frame properly.
members["origin"] = CoordinateAttribute(
frame=framecls, default=None
)
# This has to be done because FrameMeta will set these attributes
# to the defaults from BaseCoordinateFrame when it creates the base
# OrbitSkyOffsetFrame class initially.
members["_default_representation"] = OrbitSkyOffsetRepresentation
members[
"_default_differential"
] = framecls._default_differential # TODO replace
newname = name[:-5] if name.endswith("Frame") else name
newname += framecls.__name__
return super().__new__(cls, newname, bases, members)
# /def
# /class
# We need this to handle the intermediate metaclass correctly, otherwise
# we could just subclass OrbitSkyOffsetFrame.
_OrbitSkyOffsetFramecls = OrbitSkyOffsetMeta(
"OrbitSkyOffsetFrame",
(OrbitSkyOffsetFrame, framecls),
{"__doc__": OrbitSkyOffsetFrame.__doc__},
)
# -----------------------------------------------------
# register frame transform graph to/from reference from/to OrbitSkyOffset
@frame_transform_graph.transform(
FunctionWithKwargTransform,
framecls,
_OrbitSkyOffsetFramecls,
func_kwargs=track_fn_kw, # the **kwargs
)
def reference_to_orbitskyoffset(
reference_coord, orbitskyoffset_frame, **kwargs
):
"""Compute the transformation from reference to orbit frame.
Notes
-----
unlike matrix transforms, the FunctionTransform method requires
manually passing the frame attributes. This can be generalized using
the ``frame_attribute`` property.
"""
afn_name = kwargs.pop("afn_name", None)
afn, sep2d, distance, _PA = track_fn(reference_coord, **kwargs)
# now need to decide how to treat distances, if they are / not present
# this might be a temporary solution, but rather than directly
# implementing a _OrbitSkyOffsetFramecls, first create a representation
# normal or unit (if no distances) and then create the frame from the
# representation, specifying which representation type was used.
rep = OrbitSkyOffsetRepresentation(
afn=afn, sep=sep2d, distance=distance, _PA=_PA, afn_name=afn_name,
)
representation_type = OrbitSkyOffsetRepresentation
if all(reference_coord.distance.to_value() == 1) and (
reference_coord.distance.unit == u.dimensionless_unscaled
):
rep = rep.represent_as(OrbitSkyOffsetUnitRepresentation)
representation_type = OrbitSkyOffsetUnitRepresentation
# /if
return _OrbitSkyOffsetFramecls(
rep,
representation_type=representation_type,
# **orbitskyoffset_frame.frame_attributes # TODO
origin=orbitskyoffset_frame.origin,
potential=orbitskyoffset_frame.potential,
afn_bound_tail=orbitskyoffset_frame.afn_bound_tail, # FIXME, shape prob
afn_bound_lead=orbitskyoffset_frame.afn_bound_lead,
)
# /def
@frame_transform_graph.transform(
FunctionWithKwargTransform,
_OrbitSkyOffsetFramecls,
framecls,
func_kwargs=inverse_track_fn_kw, # the **kwargs
)
def skyoffset_to_reference(
orbitskyoffset_coord, reference_frame, **kwargs
):
"""Convert an sky offset frame coordinate to the reference frame."""
lon, lat, distance = inverse_track_fn(orbitskyoffset_coord, **kwargs)
rep = SphericalRepresentation(lon=lon, lat=lat, distance=distance)
if not hasattr(orbitskyoffset_coord, "distance"):
rep = rep.represent_as(UnitSphericalRepresentation)
return framecls(
rep, representation_type=reference_frame.representation_type
)
# /def
# -----------------------------------------------------
# register between OrbitSkyOffset frame transforms
# TODO
@frame_transform_graph.transform(
FunctionTransform, _OrbitSkyOffsetFramecls, _OrbitSkyOffsetFramecls
)
def skyoffset_to_skyoffset(from_skyoffset_coord, to_skyoffset_frame):
"""Transform between two orbit-skyoffset frames.
Parameters
----------
from_skyoffset_coord
to_skyoffset_frame
Returns
-------
to_skyoffset_coord
"""
# This transform goes through the parent frames on each side.
# from_frame -> from_frame.origin -> to_frame.origin -> to_frame
intermediate_from = from_skyoffset_coord.transform_to(
from_skyoffset_coord.origin
)
intermediate_to = intermediate_from.transform_to(
to_skyoffset_frame.origin
)
return intermediate_to.transform_to(to_skyoffset_frame)
# /def
# -----------------------------------------------------
_orbitskyoffset_cache[framecls] = _OrbitSkyOffsetFramecls
return _OrbitSkyOffsetFramecls
# /def
# -------------------------------------------------------------------
class OrbitSkyOffsetFrame(BaseCoordinateFrame):
"""Sky Offset Frame from an Orbit in a Potential.
A frame which is relative to some specific position and oriented to match
its frame.
SkyOffsetFrames always have component names for spherical coordinates
of ``lon``/``lat``, *not* the component names for the frame of ``origin``.
This is useful for calculating offsets and dithers in the frame of the sky
relative to an arbitrary position. Coordinates in this frame are both
centered on the position specified by the ``origin`` coordinate, *and*
they are oriented in the same manner as the ``origin`` frame. E.g., if
``origin`` is `~astropy.coordinates.ICRS`, this object's ``lat`` will be
pointed in the direction of Dec, while ``lon`` will point in the direction
of RA.
For more on skyoffset frames, see :ref:`astropy-skyoffset-frames`.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data,
or use the other keywords
origin : `~astropy.coordinates.SkyCoord` or low-level coordinate object.
The coordinate which specifies the origin of this frame. Note that this
origin is used purely for on-sky location. It can have a
``distance`` but it will not be used by this ``OrbitSkyOffsetFrame``.
potential : `~galpy.potential.Potential` or list thereof.
Notes
-----
``OrbitSkyOffsetFrame`` is a factory class. That is, the objects that it
yields are *not* actually objects of class ``OrbitSkyOffsetFrame``.
Instead, distinct classes are created on-the-fly for whatever the frame
class is of ``origin``.
"""
origin = CoordinateAttribute(default=None, frame=None)
potential = PotentialAttribute()
afn_bound_tail = QuantityAttribute(default=-np.inf * u.Myr, unit=u.Myr)
afn_bound_lead = QuantityAttribute(default=np.inf * u.Myr, unit=u.Myr)
@property
def afn_bounds(self):
"""Affine bounds (tail, lead)."""
return u.Quantity([self.afn_bound_tail, self.afn_bound_lead])
# /def
def __new__(cls, *args, **kwargs):
"""OrbitSkyOffsetFrame."""
# We don't want to call this method if we've already set up
# an orbitskyoffset frame for this class.
if not (
issubclass(cls, OrbitSkyOffsetFrame)
and cls is not OrbitSkyOffsetFrame
):
# We get the origin argument, and handle it here.
try:
origin_frame = kwargs["origin"]
except KeyError:
raise TypeError(
"Can't initialize an OrbitSkyOffsetFrame "
"without origin= keyword."
)
try:
track_fn = kwargs.pop("track_fn")
except KeyError:
raise TypeError(
"Can't initialize an OrbitSkyOffsetFrame "
"without origin= keyword."
)
inverse_track_fn = kwargs.pop("inverse_track_fn", None)
track_fn_kw = kwargs.pop("track_fn_kw", {})
inverse_track_fn_kw = kwargs.pop("inverse_track_fn_kw", {})
# and the potential argument
try:
kwargs["potential"]
except KeyError:
raise TypeError(
"Can't initialize an OrbitSkyOffsetFrame "
"without potential= keyword."
)
# and the afn arguments
try:
afn_bounds = kwargs["afn_bounds"]
except KeyError:
raise TypeError(
"Can't initialize an OrbitSkyOffsetFrame "
"without afn_bounds= keyword."
)
else:
if len(afn_bounds) != 2:
raise ValueError("`afn_bounds` must be len= 2.")
if hasattr(origin_frame, "frame"):
origin_frame = origin_frame.frame
newcls = make_orbitskyoffset_cls(
origin_frame.__class__,
track_fn=track_fn,
track_fn_kw=track_fn_kw,
inverse_track_fn=inverse_track_fn,
inverse_track_fn_kw=inverse_track_fn_kw,
)
return newcls.__new__(newcls, *args, **kwargs)
# /if
# http://stackoverflow.com/questions/19277399/why-does-object-new-work-differently-in-these-three-cases
# See above for why this is necessary. Basically, because some child
# may override __new__, we must override it here to never pass
# arguments to the object.__new__ method.
if super().__new__ is object.__new__:
return super().__new__(cls)
return super().__new__(cls, *args, **kwargs)
# /def
@classmethod
def from_galpy_orbit(
cls,
*args,
orbit,
orbit_bkw=None,
frame="galactocentric",
method: T.Union[
Literal["closest"],
Literal["linear"],
Literal["cubic"],
T.Callable[[T.Sequence], T.Sequence],
] = "closest",
time_unit=None,
**kwargs,
):
"""Create an Orbit Sky-Offset Frame from a galpy orbit.
Parameters
----------
orbit : `~galpy.orbit.Orbit`
An integrated single orbit., keyword only
the initial 4-vector and conjugate momenta are taken as the origin.
orbit_bkw : `~galpy.orbit.Orbit`, optional, keyword only
An integrated single orbit in the opposite time direction.
This allows for a leading and trailing orbit from the origin point.
Must have same origin as `orbit`, but be integrated in reverse.
frame : str or BaseCoordinateFrame, optional, keyword only
frame in which to represent the orbit.
calls ``transform_to(frame)`` on `orbit`'s ICRS SkyCoord output,
so be careful about things like Galactocentric defaults
method : {"closest", "linear", "cubic"} or Callable, optional, keyword
how to construct the affine parameter function mapping time to
coordinate The orbit integration is precomputed at discrete time
intervals and the path frame needs to be able to match coordinates
to the closest point on the orbit. This can be done by treating
the orbit as an immutable catalog (option "closest", default), a
linearly interpolatable set of points (option "linear") with
:class:`~scipy.interpolate.interp1d`, a cubic interpolation
(option "cubic") with :class:`~scipy.interpolate.CubicSpline`,
or any user-provided univariate function like those of "linear"
and "cubic".
.. todo::
minimization set by "tol" parameter for closest point on curve
time_unit : Quantity or Nont, optional, keyword only
preferred time unit. None means no modification.
Galpy defaults to Gyr.
Raises
------
ValueError
if `orbit` is not integrated
if `orbit_bkw` is not integrated (and not None)
if the potential in `orbit` and `orbit_bkw` do not match.
Notes
-----
Make sure that the orbit does not wrap and get close to itself.
There are currently no checks that this is happening and this
can lead to some very strange coordinate projections.
.. todo::
- allow affine parameter to be arc length
- break out into a function that calls OrbitSkyOffsetFrame
so not a classmethod. Keeps this class general.
"""
# ----------------------------------------------------
# Checks
# if orbit_bkw is not None, check has potential and matches orbit
# check times go in opposite directions
try: # check orbit has potential
orbit._pot
except AttributeError: # it does not
raise ValueError("`orbit` must be integrated.")
else: # it does
# grab the potential, integration time, and origin
potential = orbit._pot
t_fwd = orbit.time(use_physical=True)
origin = orbit.SkyCoord().transform_to(frame)
if orbit_bkw is not None:
try:
orbit._pot
except AttributeError:
raise ValueError("`orbit_bkw` must be integrated.")
if orbit._pot != potential:
raise ValueError(
("potential in `orbit` and `orbit_bkw` do not match.")
)
# check time "directions" are opposite
# they "fwd" direction can be back in time. That is permitted
# just not allowed to have the "bkw" direction be the same.
time_bkw_sgn = np.sign(orbit_bkw.t[1] - orbit_bkw.t[0])
t_fwd_sgn = np.sign(orbit.t[1] - orbit.t[0])
if time_bkw_sgn == t_fwd_sgn:
raise ValueError(
(
"`orbit` and `orbit_bkw` must be integrated"
"in opposite time directions"
)
)
# get back time, converting to correct units
t_bkw = orbit_bkw.time(use_physical=True)[::-1] << t_fwd.unit
# concatenate fwd and bkw orbits into single orbit catalog
orbit_catalog = concatenate(
[
orbit_bkw.SkyCoord(t_bkw).transform_to(frame).frame,
orbit.SkyCoord(t_fwd).transform_to(frame).frame,
]
)
orbit_time = np.concatenate((t_bkw, t_fwd))
# create time bounds
_u = t_fwd.unit if time_unit is None else time_unit
if t_fwd[-1] > t_bkw[-1]:
t_bnds = [t_bkw[0], t_fwd[-1]] << _u
else:
t_bnds = [t_fwd[0], t_bkw[-1]] << _u
else:
# create orbit catalog
orbit_catalog = orbit.SkyCoord(t_fwd).transform_to(frame)
orbit_time = t_fwd
# create time bounds
_u = t_fwd.unit if time_unit is None else time_unit
if t_fwd[-1] > t_fwd[0]:
t_bnds = [t_fwd[0], t_fwd[-1]] << _u
else:
t_bnds = [t_fwd[-1], t_fwd[0]] << _u
# convert orbit time to `time_unit`, if specified
if time_unit is not None:
orbit_time <<= time_unit # (in-place modification)
else: # time unit is not None
time_unit = orbit_time.unit
# ----------------------------------------------------
# construct affine function
track_fn_kw = kwargs.pop("track_fn_kw", {"afn_name": "time"})
inverse_track_fn_kw = kwargs.pop("inverse_track_fn_kw", {})
if isinstance(method, str):
# now need to check it's one of the supported strings
if method.lower() == "closest":
# does a catalog match between the coordinates and the
# points on the orbit from the orbit integration
# track_fn = ("closest", orbit_catalog, orbit_time)
track_fn = catalog_match_track
inverse_track_fn = catalog_match_inverse_track
track_fn_kw = {
"catalog": orbit_catalog,
"affine_param": orbit_time,
"adj_sep_sgn": True,
"afn_name": "time",
}
inverse_track_fn_kw = {
"catalog": orbit_catalog,
"affine_param": orbit_time,
}
_interpolation_flag = False
else:
# need to handle interpolated functions separately,
# because requires a closest point "optimization"
if method.lower() == "linear":
method = interpolate.interp1d
elif method.lower() == "cubic":
method = interpolate.CubicSpline
else:
raise ValueError(f"method {method} not known.")
_interpolation_flag = True
elif callable(method):
_interpolation_flag = True
else:
raise ValueError(f"method {method} not known.")
# /if
if _interpolation_flag:
# get affine parameter and data interpolation ready
affine_param = orbit_time.to_value(time_unit)
_data = orbit_catalog.data._values
_data = _data.view(np.float64).reshape(_data.shape + (-1,))
# construct interpolation
_track_array_fn = method(affine_param, _data.T)
# astropy coordinate object reconstruction information
_cmpt = [
(c, orbit_catalog.data._units[c],) # in right order, but dict
for c in orbit_catalog.data.components # tuple = order
]
_frame = orbit_catalog.frame.realize_frame(None)
_rep = orbit_catalog.frame.get_representation_cls()
# interpolation function as astropy, not numpy
def _track_fn(affine_param):
"""_track_array_fn converted back into a coordinate object."""
_oc = _track_array_fn(affine_param) # evaluate interpolation
rep = _rep(**{c: _oc[i] * u for i, (c, u) in enumerate(_cmpt)})
catalog = SkyCoord( # make catalog (TODO not SkyCoord)
_frame.realize_frame(rep)
)
return catalog
# make actual track and inverse functions
def track_fn(
coords, tol=None, init_sampler: T.Union[float, int] = 1e4
):
"""Map coordinates to catalog projection.
.. todo::
change defualt `tol` to something else
Parameters
----------
coords: SkyCoord
tol : float or None, optional
If None (default), does catalog match but no further
minimization. The catalog is the evaluation of the "method"
function with affine parameter linearly sampled with
`init_sampler` points between "afn_bounds"
init_sampler : int or float, optional
the number of points in ``np.linspace`` for an inital
sampling of the affine parameter.
"""
_aff = np.linspace( # affine parameter
*t_bnds, num=int(init_sampler)
)[1:-2]
catalog = _track_fn(_aff)
if tol is None:
return catalog_match_track(
coords,
catalog=catalog,
affine_param=_aff,
adj_sep_sgn=True,
)
else: # TODO actual minimization
# initial guess
idx, sep2d, _, = match_coordinates_sky(coords, catalog)
raise ValueError("Not yet implemented")
return idx
# /def
def inverse_track_fn(coords, **kw):
"""Map catalog projection to coordinates.
.. todo::
this is a very generic function. put somewhere else.
Parameters
----------
coords: SkyCoord
in orbit projection frame
"""
orbit_pos = _track_fn(coords.afn)
# need to know offset direction. Most should have _PA
pa = coords.data._PA
# Now offset by `sep` in direction `pa`
out = orbit_pos.directional_offset_by(
pa, np.abs(coords.sep) # need abs() b/c `adj_sep_sgn`
).represent_as("spherical")
return out.lon, out.lat, coords.distance
# /def
# /if
# TODO correct construction with init to get the Attributes
self = cls(
*args,
origin=origin,
potential=potential,
track_fn=track_fn,
inverse_track_fn=inverse_track_fn,
track_fn_kw=track_fn_kw,
inverse_track_fn_kw=inverse_track_fn_kw,
afn_bounds=t_bnds,
**kwargs,
)
return self
# /def
# ---------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""Initialize OrbitSkyOffsetFrame."""
# remove arguments into __new__ that are not supported in __init__.
kwargs.pop("track_fn", None)
kwargs.pop("inverse_track_fn", None)
kwargs.pop("track_fn_kw", None)
kwargs.pop("inverse_track_fn_kw", None)
afn_bounds = kwargs.pop("afn_bounds", None)
if afn_bounds is not None:
kwargs["afn_bound_tail"], kwargs["afn_bound_lead"] = afn_bounds
# initialize
super().__init__(*args, **kwargs)
if self.origin is not None and not self.origin.has_data:
raise ValueError(
"The origin supplied to OrbitSkyOffsetFrame has no data."
)
# /def
# /class
##############################################################################
# END
| [
"astropy.coordinates.attributes.QuantityAttribute",
"astropy.coordinates.representation.SphericalRepresentation",
"astropy.coordinates.match_coordinates_sky",
"astropy.units.Quantity",
"numpy.abs",
"astropy.coordinates.attributes.CoordinateAttribute",
"numpy.sign",
"astropy.coordinates.frame_transform... | [((6490, 6613), 'astropy.coordinates.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionWithKwargTransform', 'framecls', '_OrbitSkyOffsetFramecls'], {'func_kwargs': 'track_fn_kw'}), '(FunctionWithKwargTransform, framecls,\n _OrbitSkyOffsetFramecls, func_kwargs=track_fn_kw)\n', (6521, 6613), False, 'from astropy.coordinates import concatenate, frame_transform_graph, match_coordinates_sky\n'), ((8509, 8640), 'astropy.coordinates.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionWithKwargTransform', '_OrbitSkyOffsetFramecls', 'framecls'], {'func_kwargs': 'inverse_track_fn_kw'}), '(FunctionWithKwargTransform,\n _OrbitSkyOffsetFramecls, framecls, func_kwargs=inverse_track_fn_kw)\n', (8540, 8640), False, 'from astropy.coordinates import concatenate, frame_transform_graph, match_coordinates_sky\n'), ((9394, 9498), 'astropy.coordinates.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransform', '_OrbitSkyOffsetFramecls', '_OrbitSkyOffsetFramecls'], {}), '(FunctionTransform, _OrbitSkyOffsetFramecls,\n _OrbitSkyOffsetFramecls)\n', (9425, 9498), False, 'from astropy.coordinates import concatenate, frame_transform_graph, match_coordinates_sky\n'), ((12238, 12283), 'astropy.coordinates.attributes.CoordinateAttribute', 'CoordinateAttribute', ([], {'default': 'None', 'frame': 'None'}), '(default=None, frame=None)\n', (12257, 12283), False, 'from astropy.coordinates.attributes import CoordinateAttribute, QuantityAttribute\n'), ((12342, 12396), 'astropy.coordinates.attributes.QuantityAttribute', 'QuantityAttribute', ([], {'default': '(-np.inf * u.Myr)', 'unit': 'u.Myr'}), '(default=-np.inf * u.Myr, unit=u.Myr)\n', (12359, 12396), False, 'from astropy.coordinates.attributes import CoordinateAttribute, QuantityAttribute\n'), ((12418, 12471), 'astropy.coordinates.attributes.QuantityAttribute', 'QuantityAttribute', ([], {'default': '(np.inf * u.Myr)', 'unit': 'u.Myr'}), '(default=np.inf * u.Myr, unit=u.Myr)\n', (12435, 12471), False, 'from astropy.coordinates.attributes import CoordinateAttribute, QuantityAttribute\n'), ((8956, 9016), 'astropy.coordinates.representation.SphericalRepresentation', 'SphericalRepresentation', ([], {'lon': 'lon', 'lat': 'lat', 'distance': 'distance'}), '(lon=lon, lat=lat, distance=distance)\n', (8979, 9016), False, 'from astropy.coordinates.representation import SphericalRepresentation, UnitSphericalRepresentation\n'), ((12570, 12624), 'astropy.units.Quantity', 'u.Quantity', (['[self.afn_bound_tail, self.afn_bound_lead]'], {}), '([self.afn_bound_tail, self.afn_bound_lead])\n', (12580, 12624), True, 'import astropy.units as u\n'), ((5341, 5390), 'astropy.coordinates.attributes.CoordinateAttribute', 'CoordinateAttribute', ([], {'frame': 'framecls', 'default': 'None'}), '(frame=framecls, default=None)\n', (5360, 5390), False, 'from astropy.coordinates.attributes import CoordinateAttribute, QuantityAttribute\n'), ((19472, 19512), 'numpy.sign', 'np.sign', (['(orbit_bkw.t[1] - orbit_bkw.t[0])'], {}), '(orbit_bkw.t[1] - orbit_bkw.t[0])\n', (19479, 19512), True, 'import numpy as np\n'), ((19537, 19569), 'numpy.sign', 'np.sign', (['(orbit.t[1] - orbit.t[0])'], {}), '(orbit.t[1] - orbit.t[0])\n', (19544, 19569), True, 'import numpy as np\n'), ((20294, 20324), 'numpy.concatenate', 'np.concatenate', (['(t_bkw, t_fwd)'], {}), '((t_bkw, t_fwd))\n', (20308, 20324), True, 'import numpy as np\n'), ((25756, 25794), 'astropy.coordinates.match_coordinates_sky', 'match_coordinates_sky', (['coords', 'catalog'], {}), '(coords, catalog)\n', (25777, 25794), False, 'from astropy.coordinates import concatenate, frame_transform_graph, match_coordinates_sky\n'), ((26559, 26577), 'numpy.abs', 'np.abs', (['coords.sep'], {}), '(coords.sep)\n', (26565, 26577), True, 'import numpy as np\n')] |
# Copyright 2018 BLEMUNDSBURY AI LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from cape_document_qa.cape_docqa_machine_reader import get_production_model_config,CapeDocQAMachineReaderModel
from cape_machine_reader.cape_machine_reader_model import CapeMachineReaderModelInterface
from docqa.data_processing.text_utils import NltkAndPunctTokenizer
import hashlib
from pytest import fixture
class RandomMachineReaderModel(CapeMachineReaderModelInterface):
def __init__(self, _):
self.tokenizer = NltkAndPunctTokenizer()
def tokenize(self, text):
tokens = self.tokenizer.tokenize_paragraph_flat(text)
spans = self.tokenizer.convert_to_spans(text, [tokens])[0]
return tokens, spans
def get_document_embedding(self, text):
np.random.seed(int(hashlib.sha1(text.encode()).hexdigest(), 16) % 10 ** 8)
document_tokens, _ = self.tokenize(text)
return np.random.random((len(document_tokens), 240))
def get_logits(self, question, document_embedding):
question_tokens, _ = self.tokenize(question)
n_words = document_embedding.shape[0]
qseed = int(hashlib.sha1(question.encode()).hexdigest(), 16) % 10 ** 8
dseed = int(np.sum(document_embedding) * 10 ** 6) % 10 ** 8
np.random.seed(dseed + qseed)
start_logits = np.random.random(n_words)
off = np.random.randint(1, 5)
end_logits = np.concatenate([np.zeros(off) + np.min(start_logits), start_logits[off:]])
return start_logits[:n_words], end_logits[:n_words]
@fixture
def context():
return '''"Super Bowl 50 was an American football game to determine the champion of the National Football League (NFL) for the 2015 season. The American Football Conference (AFC) champion <NAME> defeated the National Football Conference (NFC) champion Carolina Panthers 24\u201310 to earn their third Super Bowl title. The game was played on February 7, 2016, at Levi's Stadium in the San Francisco Bay Area at Santa Clara, California. As this was the 50th Super Bowl, the league emphasized the \"golden anniversary\" with various gold-themed initiatives, as well as temporarily suspending the tradition of naming each Super Bowl game with Roman numerals (under which the game would have been known as \"Super Bowl L\"), so that the logo could prominently feature the Arabic numerals 50."'''
@fixture
def question():
return "Which NFL team represented the AFC at Super Bowl 50?"
@fixture
def answer():
return '<NAME>'
def test_machine_reader_e2e(question, context, answer):
conf = get_production_model_config()
machine_reader = CapeDocQAMachineReaderModel(conf)
doc_embedding = machine_reader.get_document_embedding(context)
start_logits, end_logits = machine_reader.get_logits(question, doc_embedding)
toks, offs = machine_reader.tokenize(context)
st, en = offs[np.argmax(start_logits)], offs[np.argmax(end_logits)]
mr_answer = context[st[0]: en[1]]
assert answer == mr_answer
| [
"numpy.random.seed",
"numpy.sum",
"numpy.argmax",
"docqa.data_processing.text_utils.NltkAndPunctTokenizer",
"numpy.zeros",
"cape_document_qa.cape_docqa_machine_reader.get_production_model_config",
"numpy.min",
"numpy.random.random",
"numpy.random.randint",
"cape_document_qa.cape_docqa_machine_read... | [((3102, 3131), 'cape_document_qa.cape_docqa_machine_reader.get_production_model_config', 'get_production_model_config', ([], {}), '()\n', (3129, 3131), False, 'from cape_document_qa.cape_docqa_machine_reader import get_production_model_config, CapeDocQAMachineReaderModel\n'), ((3153, 3186), 'cape_document_qa.cape_docqa_machine_reader.CapeDocQAMachineReaderModel', 'CapeDocQAMachineReaderModel', (['conf'], {}), '(conf)\n', (3180, 3186), False, 'from cape_document_qa.cape_docqa_machine_reader import get_production_model_config, CapeDocQAMachineReaderModel\n'), ((1038, 1061), 'docqa.data_processing.text_utils.NltkAndPunctTokenizer', 'NltkAndPunctTokenizer', ([], {}), '()\n', (1059, 1061), False, 'from docqa.data_processing.text_utils import NltkAndPunctTokenizer\n'), ((1800, 1829), 'numpy.random.seed', 'np.random.seed', (['(dseed + qseed)'], {}), '(dseed + qseed)\n', (1814, 1829), True, 'import numpy as np\n'), ((1853, 1878), 'numpy.random.random', 'np.random.random', (['n_words'], {}), '(n_words)\n', (1869, 1878), True, 'import numpy as np\n'), ((1893, 1916), 'numpy.random.randint', 'np.random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (1910, 1916), True, 'import numpy as np\n'), ((3404, 3427), 'numpy.argmax', 'np.argmax', (['start_logits'], {}), '(start_logits)\n', (3413, 3427), True, 'import numpy as np\n'), ((3435, 3456), 'numpy.argmax', 'np.argmax', (['end_logits'], {}), '(end_logits)\n', (3444, 3456), True, 'import numpy as np\n'), ((1744, 1770), 'numpy.sum', 'np.sum', (['document_embedding'], {}), '(document_embedding)\n', (1750, 1770), True, 'import numpy as np\n'), ((1954, 1967), 'numpy.zeros', 'np.zeros', (['off'], {}), '(off)\n', (1962, 1967), True, 'import numpy as np\n'), ((1970, 1990), 'numpy.min', 'np.min', (['start_logits'], {}), '(start_logits)\n', (1976, 1990), True, 'import numpy as np\n')] |
# (C) British Crown Copyright 2013 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Tests for the Transverse Mercator projection, including OSGB and OSNI.
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
import cartopy.crs as ccrs
class TestTransverseMercator(object):
def setup_class(self):
self.point_a = (-3.474083, 50.727301)
self.point_b = (0.5, 50.5)
self.src_crs = ccrs.PlateCarree()
def test_default(self):
proj = ccrs.TransverseMercator()
res = proj.transform_point(*self.point_a, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (-245269.53180633,
5627508.74354959))
res = proj.transform_point(*self.point_b, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (35474.63566645,
5596583.41949901))
def test_osgb_vals(self):
proj = ccrs.TransverseMercator(central_longitude=-2,
central_latitude=49,
scale_factor=0.9996012717,
false_easting=400000,
false_northing=-100000,
globe=ccrs.Globe(datum='OSGB36',
ellipse='airy'))
res = proj.transform_point(*self.point_a, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (295971.28667707,
93064.27666368))
res = proj.transform_point(*self.point_b, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (577274.98380140,
69740.49227181))
def test_nan(self):
proj = ccrs.TransverseMercator()
res = proj.transform_point(0.0, float('nan'), src_crs=self.src_crs)
assert np.all(np.isnan(res))
res = proj.transform_point(float('nan'), 0.0, src_crs=self.src_crs)
assert np.all(np.isnan(res))
class TestOSGB(object):
def setup_class(self):
self.point_a = (-3.474083, 50.727301)
self.point_b = (0.5, 50.5)
self.src_crs = ccrs.PlateCarree()
self.nan = float('nan')
def test_default(self):
proj = ccrs.OSGB()
res = proj.transform_point(*self.point_a, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (295971.28667707,
93064.27666368))
res = proj.transform_point(*self.point_b, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (577274.98380140,
69740.49227181))
def test_nan(self):
proj = ccrs.OSGB()
res = proj.transform_point(0.0, float('nan'), src_crs=self.src_crs)
assert np.all(np.isnan(res))
res = proj.transform_point(float('nan'), 0.0, src_crs=self.src_crs)
assert np.all(np.isnan(res))
class TestOSNI(object):
def setup_class(self):
self.point_a = (-6.826286, 54.725116)
self.src_crs = ccrs.PlateCarree()
self.nan = float('nan')
def test_default(self):
proj = ccrs.OSNI()
res = proj.transform_point(*self.point_a, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(
res, (275614.26762651594, 386984.206429612),
decimal=0 if ccrs.PROJ4_VERSION < (5, 0, 0) else 6)
def test_nan(self):
proj = ccrs.OSNI()
res = proj.transform_point(0.0, float('nan'), src_crs=self.src_crs)
assert np.all(np.isnan(res))
res = proj.transform_point(float('nan'), 0.0, src_crs=self.src_crs)
assert np.all(np.isnan(res))
| [
"cartopy.crs.OSNI",
"cartopy.crs.TransverseMercator",
"numpy.isnan",
"cartopy.crs.OSGB",
"cartopy.crs.Globe",
"cartopy.crs.PlateCarree",
"numpy.testing.assert_array_almost_equal"
] | [((1098, 1116), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1114, 1116), True, 'import cartopy.crs as ccrs\n'), ((1161, 1186), 'cartopy.crs.TransverseMercator', 'ccrs.TransverseMercator', ([], {}), '()\n', (1184, 1186), True, 'import cartopy.crs as ccrs\n'), ((1267, 1346), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['res', '(-245269.53180633, 5627508.74354959)'], {}), '(res, (-245269.53180633, 5627508.74354959))\n', (1303, 1346), True, 'import numpy as np\n'), ((1478, 1555), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['res', '(35474.63566645, 5596583.41949901)'], {}), '(res, (35474.63566645, 5596583.41949901))\n', (1514, 1555), True, 'import numpy as np\n'), ((2174, 2250), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['res', '(295971.28667707, 93064.27666368)'], {}), '(res, (295971.28667707, 93064.27666368))\n', (2210, 2250), True, 'import numpy as np\n'), ((2382, 2457), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['res', '(577274.9838014, 69740.49227181)'], {}), '(res, (577274.9838014, 69740.49227181))\n', (2418, 2457), True, 'import numpy as np\n'), ((2550, 2575), 'cartopy.crs.TransverseMercator', 'ccrs.TransverseMercator', ([], {}), '()\n', (2573, 2575), True, 'import cartopy.crs as ccrs\n'), ((2959, 2977), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2975, 2977), True, 'import cartopy.crs as ccrs\n'), ((3054, 3065), 'cartopy.crs.OSGB', 'ccrs.OSGB', ([], {}), '()\n', (3063, 3065), True, 'import cartopy.crs as ccrs\n'), ((3146, 3222), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['res', '(295971.28667707, 93064.27666368)'], {}), '(res, (295971.28667707, 93064.27666368))\n', (3182, 3222), True, 'import numpy as np\n'), ((3354, 3429), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['res', '(577274.9838014, 69740.49227181)'], {}), '(res, (577274.9838014, 69740.49227181))\n', (3390, 3429), True, 'import numpy as np\n'), ((3522, 3533), 'cartopy.crs.OSGB', 'ccrs.OSGB', ([], {}), '()\n', (3531, 3533), True, 'import cartopy.crs as ccrs\n'), ((3882, 3900), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3898, 3900), True, 'import cartopy.crs as ccrs\n'), ((3977, 3988), 'cartopy.crs.OSNI', 'ccrs.OSNI', ([], {}), '()\n', (3986, 3988), True, 'import cartopy.crs as ccrs\n'), ((4069, 4207), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['res', '(275614.26762651594, 386984.206429612)'], {'decimal': '(0 if ccrs.PROJ4_VERSION < (5, 0, 0) else 6)'}), '(res, (275614.26762651594, \n 386984.206429612), decimal=0 if ccrs.PROJ4_VERSION < (5, 0, 0) else 6)\n', (4105, 4207), True, 'import numpy as np\n'), ((4268, 4279), 'cartopy.crs.OSNI', 'ccrs.OSNI', ([], {}), '()\n', (4277, 4279), True, 'import cartopy.crs as ccrs\n'), ((2674, 2687), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (2682, 2687), True, 'import numpy as np\n'), ((2787, 2800), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (2795, 2800), True, 'import numpy as np\n'), ((3632, 3645), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (3640, 3645), True, 'import numpy as np\n'), ((3745, 3758), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (3753, 3758), True, 'import numpy as np\n'), ((4378, 4391), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (4386, 4391), True, 'import numpy as np\n'), ((4491, 4504), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (4499, 4504), True, 'import numpy as np\n'), ((1994, 2036), 'cartopy.crs.Globe', 'ccrs.Globe', ([], {'datum': '"""OSGB36"""', 'ellipse': '"""airy"""'}), "(datum='OSGB36', ellipse='airy')\n", (2004, 2036), True, 'import cartopy.crs as ccrs\n')] |
import numpy as np
import matplotlib
matplotlib.use('Agg')
import pylab as pl
fig = pl.figure(figsize=(5, 4), dpi=72)
axes = fig.add_axes([0.01, 0.01, .98, 0.98])
X = np.linspace(0, 2, 200, endpoint=True)
Y = np.sin(2*np.pi*X)
pl.plot(X, Y, lw=2)
pl.ylim(-1.1, 1.1)
pl.grid()
pl.show()
| [
"pylab.show",
"pylab.grid",
"numpy.sin",
"matplotlib.use",
"pylab.figure",
"numpy.linspace",
"pylab.ylim",
"pylab.plot"
] | [((37, 58), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (51, 58), False, 'import matplotlib\n'), ((85, 118), 'pylab.figure', 'pl.figure', ([], {'figsize': '(5, 4)', 'dpi': '(72)'}), '(figsize=(5, 4), dpi=72)\n', (94, 118), True, 'import pylab as pl\n'), ((168, 205), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(200)'], {'endpoint': '(True)'}), '(0, 2, 200, endpoint=True)\n', (179, 205), True, 'import numpy as np\n'), ((210, 231), 'numpy.sin', 'np.sin', (['(2 * np.pi * X)'], {}), '(2 * np.pi * X)\n', (216, 231), True, 'import numpy as np\n'), ((228, 247), 'pylab.plot', 'pl.plot', (['X', 'Y'], {'lw': '(2)'}), '(X, Y, lw=2)\n', (235, 247), True, 'import pylab as pl\n'), ((248, 266), 'pylab.ylim', 'pl.ylim', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (255, 266), True, 'import pylab as pl\n'), ((267, 276), 'pylab.grid', 'pl.grid', ([], {}), '()\n', (274, 276), True, 'import pylab as pl\n'), ((278, 287), 'pylab.show', 'pl.show', ([], {}), '()\n', (285, 287), True, 'import pylab as pl\n')] |
def test_peakfind():
import bardensr
import numpy as np
import numpy.random as npr
A=np.zeros((5,6,7,3))
A[1,2,3,0]=1
A[2,3,4,1]=1.5
A[3,4,5,2]=.001
df=bardensr.spot_calling.find_peaks(A,.5)
assert len(df)==2
assert np.sum(df['j']==2)==0
def test_singleshot():
import bardensr
import numpy as np
import numpy.random as npr
import pandas as pd
import scipy as sp
import tensorflow as tf
import scipy.ndimage
import matplotlib.pyplot as plt
n_codes=5
n_frames=15
npr.seed(0)
# make codebook
codebook=(npr.randn(n_frames,n_codes)>0)*1.0
codebook=codebook/np.sqrt(np.sum(codebook**2,axis=0,keepdims=True))
# make density
density=np.zeros((50,50,n_codes))
spots=[]
for i in range(5):
m0=npr.randint(0,50)
m1=npr.randint(0,50)
j=npr.randint(0,n_codes)
density[m0,m1,j]=npr.rand()+1
spots.append([m0,0,m1,j])
spots=pd.DataFrame(data=spots,columns=['m0','m1','m2','j'])
# make image
image=np.einsum('xyj,nj->nxy',density,codebook)
image=sp.ndimage.gaussian_filter(image,(0,1,1))
# find spots
V=bardensr.spot_calling.estimate_density_singleshot(image[:,:,None],codebook,noisefloor=.01)
df=bardensr.spot_calling.find_peaks(V,.8)
match=bardensr.benchmarks.match_colored_pointclouds(spots,df,5)
assert match.fn==0
assert match.fp==0 | [
"pandas.DataFrame",
"numpy.random.seed",
"numpy.sum",
"numpy.random.randn",
"scipy.ndimage.gaussian_filter",
"numpy.zeros",
"numpy.einsum",
"bardensr.spot_calling.find_peaks",
"numpy.random.randint",
"numpy.random.rand",
"bardensr.spot_calling.estimate_density_singleshot",
"bardensr.benchmarks... | [((103, 125), 'numpy.zeros', 'np.zeros', (['(5, 6, 7, 3)'], {}), '((5, 6, 7, 3))\n', (111, 125), True, 'import numpy as np\n'), ((188, 228), 'bardensr.spot_calling.find_peaks', 'bardensr.spot_calling.find_peaks', (['A', '(0.5)'], {}), '(A, 0.5)\n', (220, 228), False, 'import bardensr\n'), ((552, 563), 'numpy.random.seed', 'npr.seed', (['(0)'], {}), '(0)\n', (560, 563), True, 'import numpy.random as npr\n'), ((738, 765), 'numpy.zeros', 'np.zeros', (['(50, 50, n_codes)'], {}), '((50, 50, n_codes))\n', (746, 765), True, 'import numpy as np\n'), ((973, 1030), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'spots', 'columns': "['m0', 'm1', 'm2', 'j']"}), "(data=spots, columns=['m0', 'm1', 'm2', 'j'])\n", (985, 1030), True, 'import pandas as pd\n'), ((1055, 1098), 'numpy.einsum', 'np.einsum', (['"""xyj,nj->nxy"""', 'density', 'codebook'], {}), "('xyj,nj->nxy', density, codebook)\n", (1064, 1098), True, 'import numpy as np\n'), ((1107, 1151), 'scipy.ndimage.gaussian_filter', 'sp.ndimage.gaussian_filter', (['image', '(0, 1, 1)'], {}), '(image, (0, 1, 1))\n', (1133, 1151), True, 'import scipy as sp\n'), ((1173, 1272), 'bardensr.spot_calling.estimate_density_singleshot', 'bardensr.spot_calling.estimate_density_singleshot', (['image[:, :, None]', 'codebook'], {'noisefloor': '(0.01)'}), '(image[:, :, None],\n codebook, noisefloor=0.01)\n', (1222, 1272), False, 'import bardensr\n'), ((1271, 1311), 'bardensr.spot_calling.find_peaks', 'bardensr.spot_calling.find_peaks', (['V', '(0.8)'], {}), '(V, 0.8)\n', (1303, 1311), False, 'import bardensr\n'), ((1321, 1380), 'bardensr.benchmarks.match_colored_pointclouds', 'bardensr.benchmarks.match_colored_pointclouds', (['spots', 'df', '(5)'], {}), '(spots, df, 5)\n', (1366, 1380), False, 'import bardensr\n'), ((260, 280), 'numpy.sum', 'np.sum', (["(df['j'] == 2)"], {}), "(df['j'] == 2)\n", (266, 280), True, 'import numpy as np\n'), ((811, 829), 'numpy.random.randint', 'npr.randint', (['(0)', '(50)'], {}), '(0, 50)\n', (822, 829), True, 'import numpy.random as npr\n'), ((840, 858), 'numpy.random.randint', 'npr.randint', (['(0)', '(50)'], {}), '(0, 50)\n', (851, 858), True, 'import numpy.random as npr\n'), ((868, 891), 'numpy.random.randint', 'npr.randint', (['(0)', 'n_codes'], {}), '(0, n_codes)\n', (879, 891), True, 'import numpy.random as npr\n'), ((599, 627), 'numpy.random.randn', 'npr.randn', (['n_frames', 'n_codes'], {}), '(n_frames, n_codes)\n', (608, 627), True, 'import numpy.random as npr\n'), ((664, 708), 'numpy.sum', 'np.sum', (['(codebook ** 2)'], {'axis': '(0)', 'keepdims': '(True)'}), '(codebook ** 2, axis=0, keepdims=True)\n', (670, 708), True, 'import numpy as np\n'), ((916, 926), 'numpy.random.rand', 'npr.rand', ([], {}), '()\n', (924, 926), True, 'import numpy.random as npr\n')] |
# -*- coding: utf-8 -*-
"""
@license: MIT
@author: t.okuda
"""
import os
import requests
import random
import math
from pathlib import Path
from glob import glob
from tqdm import tqdm
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.data.experimental import AUTOTUNE
from tfaug import TfrecordConverter, DatasetCreator
DATADIR = 'testdata/tfaug/'
def quick_toy_sample():
# source image and labels
imgpaths = ['testdata/tfaug/Lenna.png'] * 10
labels = np.random.randint(0, 255, 10)
# configure and create dataset
dataset = DatasetCreator(shuffle_buffer=10,
batch_size=2,
repeat=True,
standardize=True, # add augmentation params here
training=True
).dataset_from_path(imgpaths,labels)
# define and compile the model
mbnet = tf.keras.applications.MobileNetV2(include_top=True, weights=None)
mbnet.compile(optimizer="adam", loss="mse", metrics=["mae"])
# learn the model
mbnet.fit(dataset, epochs=10, steps_per_epoch=10)
def toy_example():
# prepare inputs and labels
batch_size = 2
shuffle_buffer = 10
filepaths = [DATADIR+'Lenna.png'] * 10
class_labels = np.random.randint(0, 10, 10)
# define tfrecord path
path_record = DATADIR + 'multi_input.tfrecord'
# generate tfrecords in a one-line
TfrecordConverter().tfrecord_from_path_label(filepaths,
class_labels,
path_record)
# define augmentation parameters
aug_parms = {'random_rotation': 5,
'random_flip_left_right': True,
'random_shear': [5, 5],
'random_brightness': 0.2,
'random_crop': None,
'random_blur': [0.5, 1.5]}
# set augmentation and learning parameters to dataset
dc = DatasetCreator(shuffle_buffer, batch_size, **aug_parms, repeat=True, training=True)
# define dataset and number of dataset
ds, imgcnt = dc.dataset_from_tfrecords(path_record)
# define the handling of multiple inputs => just resize and concat
# multiple inputs were named {'image_in0', 'image_in1' , ...} in inputs dictionary
def concat_inputs(inputs, label):
resized = tf.image.resize(inputs['image_in1'], (512, 512))
concated = tf.concat([inputs['image_in0'], resized], axis=-1)
# resized = tf.image.resize(concated, (224, 224))
return concated, label
ds = ds.map(concat_inputs)
# define the model
mbnet = tf.keras.applications.MobileNetV2(input_shape = [512,512,6],
include_top=True,
weights=None)
mbnet.compile(optimizer="adam", loss="mse", metrics=["mae"])
# learn the model
mbnet.fit(ds,
epochs=10,
steps_per_epoch=imgcnt//batch_size,)
# evaluate the model
mbnet.fit(ds,
epochs=10,
steps_per_epoch=imgcnt//batch_size,)
def lean_mnist():
"""
tfaug application for classification
Returns
-------
None.
"""
os.makedirs(DATADIR+'mnist', exist_ok=True)
# load mnist dataset
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# save as tfrecord
TfrecordConverter().tfrecord_from_ary_label(
x_train, y_train, DATADIR+'mnist/train.tfrecord')
TfrecordConverter().tfrecord_from_ary_label(
x_test, y_test, DATADIR+'mnist/test.tfrecord')
batch_size, shuffle_buffer = 25, 25
# create training and validation dataset using tfaug:
ds_train, train_cnt = (DatasetCreator(shuffle_buffer=shuffle_buffer,
batch_size=batch_size,
repeat=True,
random_zoom=[0.1, 0.1],
random_rotation=20,
random_shear=[10, 10],
random_blur=10,
training=True)
.dataset_from_tfrecords([DATADIR+'mnist/train.tfrecord']))
ds_valid, valid_cnt = (DatasetCreator(shuffle_buffer=shuffle_buffer,
batch_size=batch_size,
repeat=True,
training=False)
.dataset_from_tfrecords([DATADIR+'mnist/test.tfrecord']))
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)])
model.compile(optimizer=tf.keras.optimizers.Adam(0.002),
loss=tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True),
metrics=['sparse_categorical_accuracy'])
# learn model
model.fit(ds_train,
epochs=10,
validation_data=ds_valid,
steps_per_epoch=train_cnt//batch_size,
validation_steps=valid_cnt//batch_size)
# evaluation result
model.evaluate(ds_valid,
steps=valid_cnt//batch_size,
verbose=2)
def learn_ade20k():
crop_size = [256, 256] # cropped input image size
# original input image size
batch_size = 5
# donwload
overlap_buffer = 256 // 4
download_and_convert_ADE20k(crop_size, overlap_buffer)
# define training and validation dataset using tfaug:
tfrecords_train = glob(
DATADIR+'ADE20k/ADEChallengeData2016/tfrecord/training_*.tfrecords')
ds_train, train_cnt = (DatasetCreator(shuffle_buffer=batch_size,
batch_size=batch_size,
repeat=True,
standardize=True,
random_zoom=[0.1, 0.1],
random_rotation=10,
random_shear=[10, 10],
random_crop=crop_size,
dtype=tf.float16,
training=True)
.dataset_from_tfrecords(tfrecords_train))
tfrecords_valid = glob(
DATADIR+'ADE20k/ADEChallengeData2016/tfrecord/validation_*.tfrecords')
ds_valid, valid_cnt = (DatasetCreator(shuffle_buffer=batch_size,
batch_size=batch_size,
repeat=True,
standardize=True,
random_crop=crop_size,
dtype=tf.float16,
training=False)
.dataset_from_tfrecords(tfrecords_valid))
# define model
model = def_unet(tuple(crop_size+[3]), 151) # 150class + padding area
model.compile(optimizer=tf.keras.optimizers.Adam(0.002),
loss=tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True),
metrics=['sparse_categorical_accuracy'])
model.fit(ds_train,
epochs=10,
validation_data=ds_valid,
steps_per_epoch=train_cnt//batch_size,
validation_steps=valid_cnt//batch_size)
model.evaluate(ds_valid,
steps=valid_cnt//batch_size,
verbose=2)
def test_parse_tfrecord():
tfexample_format = {"image": tf.io.FixedLenFeature([], dtype=tf.string),
"label": tf.io.FixedLenFeature([], dtype=tf.string)}
def decoder(tfexamples):
return [tf.map_fn(tf.image.decode_png, tfexamples[key], dtype=tf.uint8)
if value.dtype == tf.string else tfexamples[key]
for key, value in tfexample_format.items()]
tfrecords_train = glob(
DATADIR+'ADE20k/ADEChallengeData2016/tfrecord/training_*.tfrecords')
for tfrecord in tfrecords_train:
# define dataset
ds = tf.data.TFRecordDataset(
tfrecord, num_parallel_reads=len(tfrecords_train))
ds_train = (ds.batch(4)
.apply(tf.data.experimental.parse_example_dataset(tfexample_format))
.map(decoder)
.prefetch(AUTOTUNE))
for piyo in tqdm(ds_train, total=1000//4):
img, lbl = piyo
def def_unet(input_size, output_filters):
# define downstack model
mbnet2 = tf.keras.applications.MobileNetV2(input_size,
include_top=False,
weights='imagenet')
# Use the activations of these layers
layer_names = [
'block_16_project', # 8x8
'block_13_expand_relu', # 16x16
'block_6_expand_relu', # 32x32
'block_3_expand_relu', # 64x64
'block_1_expand_relu', # 128x128
]
mbnet2_outputs = [mbnet2.get_layer(name).output for name in layer_names]
# Create the feature extraction model
down_stack = tf.keras.Model(inputs=mbnet2.input, outputs=mbnet2_outputs)
down_stack.trainable = False
# define upstack
upstack = [upsample(2**i) for i in range(7, 3, -1)]
# define input
inputs = tf.keras.layers.Input(input_size)
# calc down stack
skips = down_stack(inputs)
x, skips = skips[0], skips[1:]
# calc up stack
for up, skip in zip(upstack, skips):
x = up(x)
x = tf.keras.layers.Concatenate()([x, skip])
# output dimension
x = upsample(output_filters)(x)
# define output of the model
return tf.keras.Model(inputs=inputs, outputs=x)
def upsample(filters):
upsample = tf.keras.Sequential()
upsample.add(tf.keras.layers.UpSampling2D())
upsample.add(tf.keras.layers.Conv2D(filters, 3, padding='same'))
upsample.add(tf.keras.layers.BatchNormalization())
return upsample
def download_and_convert_ADE20k(input_size, overlap_buffer):
"""
Donload and Converts the ADE20k dataset into tfrecord format.
"""
link = r'http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip'
dstdir = DATADIR+'ADE20k/'
os.makedirs(dstdir, exist_ok=True)
if not os.path.isfile(dstdir+'ADEChallengeData2016.zip'):
print('start donloading ADE20k...', flush=True)
with requests.get(link, stream=True) as response:
total_size_in_bytes = int(
response.headers.get('content-length', 0))
block_size = 1024 # 1 Kilobyte
progress_bar = tqdm(total=total_size_in_bytes,
unit='iB', unit_scale=True)
with open(dstdir+'ADEChallengeData2016.zip', 'wb') as f:
for data in response.iter_content(block_size):
progress_bar.update(len(data))
f.write(data)
progress_bar.close()
assert total_size_in_bytes != 0 and progress_bar.n == total_size_in_bytes,\
"download ADE20k failed"
if len(glob(dstdir+'ADEChallengeData2016/images/validation/ADE_*.jpg')) != 2000:
print('unzipping ADE20k...')
from zipfile import ZipFile
with ZipFile(dstdir+'ADEChallengeData2016.zip', 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall(dstdir)
# plot random label sample
check_ADE20k_label()
dstdir += 'ADEChallengeData2016/'
converter = TfrecordConverter()
patchdir = dstdir+'patch/'
if len(glob(patchdir+'images/*/ADE_*_no*.jpg')) != 64563: # 99209?+
print('splitting imgs to patch...', flush=True)
# split images into patch
overlap_buffer = [overlap_buffer, overlap_buffer]
for dirname in ['training', 'validation']:
print('convert', dirname, 'into patch')
os.makedirs(f'{patchdir}images/{dirname}', exist_ok=True)
os.makedirs(f'{patchdir}annotations/{dirname}', exist_ok=True)
srcimgs = glob(f'{dstdir}/images/{dirname}/ADE_*.jpg')
for path in tqdm(srcimgs):
im = np.array(Image.open(path))
lb = np.array(Image.open(os.sep.join(
Path(path).parts[:-3] + ('annotations', dirname, Path(path).stem+'.png'))))
img_patches = converter.split_to_patch(
im, input_size, overlap_buffer, dtype=np.uint8)
lbl_pathces = converter.split_to_patch(
lb, input_size, overlap_buffer, dtype=np.uint8)
basename = Path(path).stem
for no, (img_patch, lbl_patch) in enumerate(zip(img_patches, lbl_pathces)):
Image.fromarray(img_patch).save(
f'{patchdir}images/{dirname}/{basename}_no{no}.jpg')
Image.fromarray(lbl_patch).save(
f'{patchdir}annotations/{dirname}/{basename}_no{no}.png')
image_per_shards = 1000
if len(glob(dstdir+'tfrecord/*_*.tfrecords')) != 101:
print('convert ADE20k to tfrecord', flush=True)
os.makedirs(dstdir+'tfrecord', exist_ok=True)
for dirname in ['training', 'validation']:
imgs = glob(f'{patchdir}/images/{dirname}/ADE_*.jpg')
# shuffle image order
random.shuffle(imgs)
path_labels = [os.sep.join(
Path(path).parts[:-3] + ('annotations', dirname, Path(path).stem+'.png'))
for path in imgs]
converter.tfrecord_from_path_label(imgs,
path_labels,
dstdir +
f'tfrecord/{dirname}.tfrecords',
image_per_shards)
path_tfrecord = DATADIR+'ADE20k/ADEChallengeData2016/tfrecord/validation_1.tfrecords'
# check converted tfrecord
dc = DatasetCreator(
False, 10, training=True)
ds, datacnt = dc.dataset_from_tfrecords([path_tfrecord])
piyo = next(iter(ds.take(1)))
plt.imshow(piyo[0][5])
def check_ADE20k_label():
path_label = DATADIR + \
'ADE20k/ADEChallengeData2016/annotations/validation/ADE_val_00000001.png'
paths = glob(
DATADIR+'ADE20k/ADEChallengeData2016/annotations/validation/ADE_val_*.png')
# create palette
colnum = math.ceil(math.pow(150, 1/3))
colvals = list(range(255//colnum, 255, 255//colnum))
palette = [[r, g, b] for r in colvals for g in colvals for b in colvals]
palette = sum(palette, [])
fig, axs = plt.subplots(5, 1, figsize=(50, 10))
for ax in axs:
path_label = random.choice(paths)
print(path_label)
npimg = np.array(Image.open(path_label))
pimg = Image.fromarray(npimg, 'P')
pimg.putpalette(palette)
ax.imshow(pimg)
def aug_multi_input():
# toy example for multiple inputs
# prepare inputs and labels
batch_size = 2
shuffle_buffer = 10
filepaths0 = [DATADIR+'Lenna.png'] * 10
filepaths1 = [DATADIR+'Lenna_crop.png'] * 10
labels = np.random.randint(0, 10, 10)
# define tfrecord path
path_record = DATADIR + 'multi_input.tfrecord'
# generate tfrecords in a one-line
TfrecordConverter().tfrecord_from_path_label(list(zip(filepaths0, filepaths1)),
labels,
path_record,
n_imgin=2)
# define augmentation parameters
aug_parms = {'standardize': False,
'random_rotation': 5,
'random_flip_left_right': True,
'random_zoom': [0.2, 0.2],
'random_shear': [5, 5],
'random_brightness': 0.2,
'random_crop': None,
'random_blur': [0.5, 1.5],
'num_transforms': 10}
# define dataset
dc = DatasetCreator(shuffle_buffer, batch_size, **aug_parms, repeat=True, training=True)
ds, imgcnt = dc.dataset_from_tfrecords(path_record)
# define the handling of multiple inputs => just resize and concat
# multiple inputs were named {'image_in0', 'image_in1' , ...} in inputs dictionary
def concat_inputs(inputs, label):
resized = tf.image.resize(inputs['image_in1'], (512, 512))
concated = tf.concat([inputs['image_in0'], resized], axis=-1)
# resized = tf.image.resize(concated, (224, 224))
return concated, label
ds = ds.map(concat_inputs)
# define the model
mbnet = tf.keras.applications.MobileNetV2(input_shape = [512,512,6],
include_top=True,
weights=None)
mbnet.compile(optimizer="adam", loss="mse", metrics=["mae"])
# learn the model
mbnet.fit(ds,
epochs=10,
steps_per_epoch=imgcnt//batch_size,)
if __name__ == '__main__':
pass
lean_mnist()
# learn_ade20k()
# check_ADE20k_label()
# aug_multi_input()
| [
"tensorflow.keras.layers.Dense",
"random.shuffle",
"os.path.isfile",
"numpy.random.randint",
"pathlib.Path",
"tfaug.TfrecordConverter",
"tensorflow.keras.Sequential",
"glob.glob",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers... | [((543, 572), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(10)'], {}), '(0, 255, 10)\n', (560, 572), True, 'import numpy as np\n'), ((982, 1047), 'tensorflow.keras.applications.MobileNetV2', 'tf.keras.applications.MobileNetV2', ([], {'include_top': '(True)', 'weights': 'None'}), '(include_top=True, weights=None)\n', (1015, 1047), True, 'import tensorflow as tf\n'), ((1365, 1393), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(10)'], {}), '(0, 10, 10)\n', (1382, 1393), True, 'import numpy as np\n'), ((2074, 2161), 'tfaug.DatasetCreator', 'DatasetCreator', (['shuffle_buffer', 'batch_size'], {'repeat': '(True)', 'training': '(True)'}), '(shuffle_buffer, batch_size, **aug_parms, repeat=True,\n training=True)\n', (2088, 2161), False, 'from tfaug import TfrecordConverter, DatasetCreator\n'), ((2755, 2852), 'tensorflow.keras.applications.MobileNetV2', 'tf.keras.applications.MobileNetV2', ([], {'input_shape': '[512, 512, 6]', 'include_top': '(True)', 'weights': 'None'}), '(input_shape=[512, 512, 6], include_top=\n True, weights=None)\n', (2788, 2852), True, 'import tensorflow as tf\n'), ((3380, 3425), 'os.makedirs', 'os.makedirs', (["(DATADIR + 'mnist')"], {'exist_ok': '(True)'}), "(DATADIR + 'mnist', exist_ok=True)\n", (3391, 3425), False, 'import os\n'), ((3492, 3527), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), '()\n', (3525, 3527), True, 'import tensorflow as tf\n'), ((5909, 5984), 'glob.glob', 'glob', (["(DATADIR + 'ADE20k/ADEChallengeData2016/tfrecord/training_*.tfrecords')"], {}), "(DATADIR + 'ADE20k/ADEChallengeData2016/tfrecord/training_*.tfrecords')\n", (5913, 5984), False, 'from glob import glob\n'), ((6708, 6785), 'glob.glob', 'glob', (["(DATADIR + 'ADE20k/ADEChallengeData2016/tfrecord/validation_*.tfrecords')"], {}), "(DATADIR + 'ADE20k/ADEChallengeData2016/tfrecord/validation_*.tfrecords')\n", (6712, 6785), False, 'from glob import glob\n'), ((8368, 8443), 'glob.glob', 'glob', (["(DATADIR + 'ADE20k/ADEChallengeData2016/tfrecord/training_*.tfrecords')"], {}), "(DATADIR + 'ADE20k/ADEChallengeData2016/tfrecord/training_*.tfrecords')\n", (8372, 8443), False, 'from glob import glob\n'), ((8979, 9068), 'tensorflow.keras.applications.MobileNetV2', 'tf.keras.applications.MobileNetV2', (['input_size'], {'include_top': '(False)', 'weights': '"""imagenet"""'}), "(input_size, include_top=False, weights=\n 'imagenet')\n", (9012, 9068), True, 'import tensorflow as tf\n'), ((9569, 9628), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'mbnet2.input', 'outputs': 'mbnet2_outputs'}), '(inputs=mbnet2.input, outputs=mbnet2_outputs)\n', (9583, 9628), True, 'import tensorflow as tf\n'), ((9774, 9807), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['input_size'], {}), '(input_size)\n', (9795, 9807), True, 'import tensorflow as tf\n'), ((10135, 10175), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'x'}), '(inputs=inputs, outputs=x)\n', (10149, 10175), True, 'import tensorflow as tf\n'), ((10216, 10237), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (10235, 10237), True, 'import tensorflow as tf\n'), ((10698, 10732), 'os.makedirs', 'os.makedirs', (['dstdir'], {'exist_ok': '(True)'}), '(dstdir, exist_ok=True)\n', (10709, 10732), False, 'import os\n'), ((11995, 12014), 'tfaug.TfrecordConverter', 'TfrecordConverter', ([], {}), '()\n', (12012, 12014), False, 'from tfaug import TfrecordConverter, DatasetCreator\n'), ((14463, 14503), 'tfaug.DatasetCreator', 'DatasetCreator', (['(False)', '(10)'], {'training': '(True)'}), '(False, 10, training=True)\n', (14477, 14503), False, 'from tfaug import TfrecordConverter, DatasetCreator\n'), ((14612, 14634), 'matplotlib.pyplot.imshow', 'plt.imshow', (['piyo[0][5]'], {}), '(piyo[0][5])\n', (14622, 14634), True, 'import matplotlib.pyplot as plt\n'), ((14786, 14872), 'glob.glob', 'glob', (["(DATADIR + 'ADE20k/ADEChallengeData2016/annotations/validation/ADE_val_*.png')"], {}), "(DATADIR +\n 'ADE20k/ADEChallengeData2016/annotations/validation/ADE_val_*.png')\n", (14790, 14872), False, 'from glob import glob\n'), ((15122, 15158), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(1)'], {'figsize': '(50, 10)'}), '(5, 1, figsize=(50, 10))\n', (15134, 15158), True, 'import matplotlib.pyplot as plt\n'), ((15644, 15672), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(10)'], {}), '(0, 10, 10)\n', (15661, 15672), True, 'import numpy as np\n'), ((16516, 16603), 'tfaug.DatasetCreator', 'DatasetCreator', (['shuffle_buffer', 'batch_size'], {'repeat': '(True)', 'training': '(True)'}), '(shuffle_buffer, batch_size, **aug_parms, repeat=True,\n training=True)\n', (16530, 16603), False, 'from tfaug import TfrecordConverter, DatasetCreator\n'), ((17154, 17251), 'tensorflow.keras.applications.MobileNetV2', 'tf.keras.applications.MobileNetV2', ([], {'input_shape': '[512, 512, 6]', 'include_top': '(True)', 'weights': 'None'}), '(input_shape=[512, 512, 6], include_top=\n True, weights=None)\n', (17187, 17251), True, 'import tensorflow as tf\n'), ((2476, 2524), 'tensorflow.image.resize', 'tf.image.resize', (["inputs['image_in1']", '(512, 512)'], {}), "(inputs['image_in1'], (512, 512))\n", (2491, 2524), True, 'import tensorflow as tf\n'), ((2544, 2594), 'tensorflow.concat', 'tf.concat', (["[inputs['image_in0'], resized]"], {'axis': '(-1)'}), "([inputs['image_in0'], resized], axis=-1)\n", (2553, 2594), True, 'import tensorflow as tf\n'), ((7989, 8031), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]'], {'dtype': 'tf.string'}), '([], dtype=tf.string)\n', (8010, 8031), True, 'import tensorflow as tf\n'), ((8066, 8108), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]'], {'dtype': 'tf.string'}), '([], dtype=tf.string)\n', (8087, 8108), True, 'import tensorflow as tf\n'), ((8833, 8864), 'tqdm.tqdm', 'tqdm', (['ds_train'], {'total': '(1000 // 4)'}), '(ds_train, total=1000 // 4)\n', (8837, 8864), False, 'from tqdm import tqdm\n'), ((10255, 10285), 'tensorflow.keras.layers.UpSampling2D', 'tf.keras.layers.UpSampling2D', ([], {}), '()\n', (10283, 10285), True, 'import tensorflow as tf\n'), ((10304, 10354), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['filters', '(3)'], {'padding': '"""same"""'}), "(filters, 3, padding='same')\n", (10326, 10354), True, 'import tensorflow as tf\n'), ((10373, 10409), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (10407, 10409), True, 'import tensorflow as tf\n'), ((10745, 10796), 'os.path.isfile', 'os.path.isfile', (["(dstdir + 'ADEChallengeData2016.zip')"], {}), "(dstdir + 'ADEChallengeData2016.zip')\n", (10759, 10796), False, 'import os\n'), ((13621, 13668), 'os.makedirs', 'os.makedirs', (["(dstdir + 'tfrecord')"], {'exist_ok': '(True)'}), "(dstdir + 'tfrecord', exist_ok=True)\n", (13632, 13668), False, 'import os\n'), ((14921, 14941), 'math.pow', 'math.pow', (['(150)', '(1 / 3)'], {}), '(150, 1 / 3)\n', (14929, 14941), False, 'import math\n'), ((15199, 15219), 'random.choice', 'random.choice', (['paths'], {}), '(paths)\n', (15212, 15219), False, 'import random\n'), ((15310, 15337), 'PIL.Image.fromarray', 'Image.fromarray', (['npimg', '"""P"""'], {}), "(npimg, 'P')\n", (15325, 15337), False, 'from PIL import Image\n'), ((16875, 16923), 'tensorflow.image.resize', 'tf.image.resize', (["inputs['image_in1']", '(512, 512)'], {}), "(inputs['image_in1'], (512, 512))\n", (16890, 16923), True, 'import tensorflow as tf\n'), ((16943, 16993), 'tensorflow.concat', 'tf.concat', (["[inputs['image_in0'], resized]"], {'axis': '(-1)'}), "([inputs['image_in0'], resized], axis=-1)\n", (16952, 16993), True, 'import tensorflow as tf\n'), ((627, 725), 'tfaug.DatasetCreator', 'DatasetCreator', ([], {'shuffle_buffer': '(10)', 'batch_size': '(2)', 'repeat': '(True)', 'standardize': '(True)', 'training': '(True)'}), '(shuffle_buffer=10, batch_size=2, repeat=True, standardize=\n True, training=True)\n', (641, 725), False, 'from tfaug import TfrecordConverter, DatasetCreator\n'), ((1521, 1540), 'tfaug.TfrecordConverter', 'TfrecordConverter', ([], {}), '()\n', (1538, 1540), False, 'from tfaug import TfrecordConverter, DatasetCreator\n'), ((3556, 3575), 'tfaug.TfrecordConverter', 'TfrecordConverter', ([], {}), '()\n', (3573, 3575), False, 'from tfaug import TfrecordConverter, DatasetCreator\n'), ((3663, 3682), 'tfaug.TfrecordConverter', 'TfrecordConverter', ([], {}), '()\n', (3680, 3682), False, 'from tfaug import TfrecordConverter, DatasetCreator\n'), ((3889, 4078), 'tfaug.DatasetCreator', 'DatasetCreator', ([], {'shuffle_buffer': 'shuffle_buffer', 'batch_size': 'batch_size', 'repeat': '(True)', 'random_zoom': '[0.1, 0.1]', 'random_rotation': '(20)', 'random_shear': '[10, 10]', 'random_blur': '(10)', 'training': '(True)'}), '(shuffle_buffer=shuffle_buffer, batch_size=batch_size, repeat\n =True, random_zoom=[0.1, 0.1], random_rotation=20, random_shear=[10, 10\n ], random_blur=10, training=True)\n', (3903, 4078), False, 'from tfaug import TfrecordConverter, DatasetCreator\n'), ((4476, 4578), 'tfaug.DatasetCreator', 'DatasetCreator', ([], {'shuffle_buffer': 'shuffle_buffer', 'batch_size': 'batch_size', 'repeat': '(True)', 'training': '(False)'}), '(shuffle_buffer=shuffle_buffer, batch_size=batch_size, repeat\n =True, training=False)\n', (4490, 4578), False, 'from tfaug import TfrecordConverter, DatasetCreator\n'), ((4835, 4880), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'input_shape': '(28, 28)'}), '(input_shape=(28, 28))\n', (4858, 4880), True, 'import tensorflow as tf\n'), ((4890, 4935), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (4911, 4935), True, 'import tensorflow as tf\n'), ((4945, 4973), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (4968, 4973), True, 'import tensorflow as tf\n'), ((4983, 5008), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {}), '(10)\n', (5004, 5008), True, 'import tensorflow as tf\n'), ((5040, 5071), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.002)'], {}), '(0.002)\n', (5064, 5071), True, 'import tensorflow as tf\n'), ((5096, 5159), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (5141, 5159), True, 'import tensorflow as tf\n'), ((6019, 6250), 'tfaug.DatasetCreator', 'DatasetCreator', ([], {'shuffle_buffer': 'batch_size', 'batch_size': 'batch_size', 'repeat': '(True)', 'standardize': '(True)', 'random_zoom': '[0.1, 0.1]', 'random_rotation': '(10)', 'random_shear': '[10, 10]', 'random_crop': 'crop_size', 'dtype': 'tf.float16', 'training': '(True)'}), '(shuffle_buffer=batch_size, batch_size=batch_size, repeat=\n True, standardize=True, random_zoom=[0.1, 0.1], random_rotation=10,\n random_shear=[10, 10], random_crop=crop_size, dtype=tf.float16,\n training=True)\n', (6033, 6250), False, 'from tfaug import TfrecordConverter, DatasetCreator\n'), ((6820, 6981), 'tfaug.DatasetCreator', 'DatasetCreator', ([], {'shuffle_buffer': 'batch_size', 'batch_size': 'batch_size', 'repeat': '(True)', 'standardize': '(True)', 'random_crop': 'crop_size', 'dtype': 'tf.float16', 'training': '(False)'}), '(shuffle_buffer=batch_size, batch_size=batch_size, repeat=\n True, standardize=True, random_crop=crop_size, dtype=tf.float16,\n training=False)\n', (6834, 6981), False, 'from tfaug import TfrecordConverter, DatasetCreator\n'), ((7418, 7449), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.002)'], {}), '(0.002)\n', (7442, 7449), True, 'import tensorflow as tf\n'), ((7474, 7537), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (7519, 7537), True, 'import tensorflow as tf\n'), ((9989, 10018), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {}), '()\n', (10016, 10018), True, 'import tensorflow as tf\n'), ((10865, 10896), 'requests.get', 'requests.get', (['link'], {'stream': '(True)'}), '(link, stream=True)\n', (10877, 10896), False, 'import requests\n'), ((11079, 11138), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total_size_in_bytes', 'unit': '"""iB"""', 'unit_scale': '(True)'}), "(total=total_size_in_bytes, unit='iB', unit_scale=True)\n", (11083, 11138), False, 'from tqdm import tqdm\n'), ((11554, 11619), 'glob.glob', 'glob', (["(dstdir + 'ADEChallengeData2016/images/validation/ADE_*.jpg')"], {}), "(dstdir + 'ADEChallengeData2016/images/validation/ADE_*.jpg')\n", (11558, 11619), False, 'from glob import glob\n'), ((11714, 11763), 'zipfile.ZipFile', 'ZipFile', (["(dstdir + 'ADEChallengeData2016.zip')", '"""r"""'], {}), "(dstdir + 'ADEChallengeData2016.zip', 'r')\n", (11721, 11763), False, 'from zipfile import ZipFile\n'), ((12058, 12099), 'glob.glob', 'glob', (["(patchdir + 'images/*/ADE_*_no*.jpg')"], {}), "(patchdir + 'images/*/ADE_*_no*.jpg')\n", (12062, 12099), False, 'from glob import glob\n'), ((12383, 12440), 'os.makedirs', 'os.makedirs', (['f"""{patchdir}images/{dirname}"""'], {'exist_ok': '(True)'}), "(f'{patchdir}images/{dirname}', exist_ok=True)\n", (12394, 12440), False, 'import os\n'), ((12453, 12515), 'os.makedirs', 'os.makedirs', (['f"""{patchdir}annotations/{dirname}"""'], {'exist_ok': '(True)'}), "(f'{patchdir}annotations/{dirname}', exist_ok=True)\n", (12464, 12515), False, 'import os\n'), ((12538, 12582), 'glob.glob', 'glob', (['f"""{dstdir}/images/{dirname}/ADE_*.jpg"""'], {}), "(f'{dstdir}/images/{dirname}/ADE_*.jpg')\n", (12542, 12582), False, 'from glob import glob\n'), ((12607, 12620), 'tqdm.tqdm', 'tqdm', (['srcimgs'], {}), '(srcimgs)\n', (12611, 12620), False, 'from tqdm import tqdm\n'), ((13510, 13549), 'glob.glob', 'glob', (["(dstdir + 'tfrecord/*_*.tfrecords')"], {}), "(dstdir + 'tfrecord/*_*.tfrecords')\n", (13514, 13549), False, 'from glob import glob\n'), ((13738, 13784), 'glob.glob', 'glob', (['f"""{patchdir}/images/{dirname}/ADE_*.jpg"""'], {}), "(f'{patchdir}/images/{dirname}/ADE_*.jpg')\n", (13742, 13784), False, 'from glob import glob\n'), ((13831, 13851), 'random.shuffle', 'random.shuffle', (['imgs'], {}), '(imgs)\n', (13845, 13851), False, 'import random\n'), ((15271, 15293), 'PIL.Image.open', 'Image.open', (['path_label'], {}), '(path_label)\n', (15281, 15293), False, 'from PIL import Image\n'), ((15800, 15819), 'tfaug.TfrecordConverter', 'TfrecordConverter', ([], {}), '()\n', (15817, 15819), False, 'from tfaug import TfrecordConverter, DatasetCreator\n'), ((8156, 8219), 'tensorflow.map_fn', 'tf.map_fn', (['tf.image.decode_png', 'tfexamples[key]'], {'dtype': 'tf.uint8'}), '(tf.image.decode_png, tfexamples[key], dtype=tf.uint8)\n', (8165, 8219), True, 'import tensorflow as tf\n'), ((12652, 12668), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (12662, 12668), False, 'from PIL import Image\n'), ((13097, 13107), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (13101, 13107), False, 'from pathlib import Path\n'), ((8675, 8735), 'tensorflow.data.experimental.parse_example_dataset', 'tf.data.experimental.parse_example_dataset', (['tfexample_format'], {}), '(tfexample_format)\n', (8717, 8735), True, 'import tensorflow as tf\n'), ((13225, 13251), 'PIL.Image.fromarray', 'Image.fromarray', (['img_patch'], {}), '(img_patch)\n', (13240, 13251), False, 'from PIL import Image\n'), ((13355, 13381), 'PIL.Image.fromarray', 'Image.fromarray', (['lbl_patch'], {}), '(lbl_patch)\n', (13370, 13381), False, 'from PIL import Image\n'), ((13909, 13919), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (13913, 13919), False, 'from pathlib import Path\n'), ((13958, 13968), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (13962, 13968), False, 'from pathlib import Path\n'), ((12744, 12754), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (12748, 12754), False, 'from pathlib import Path\n'), ((12793, 12803), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (12797, 12803), False, 'from pathlib import Path\n')] |
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import numpy as np
import os
from data.loaddata import get_loader
from models.model import Generator, NLayerDiscriminator
from models.MSSSIM import ssim, msssim
from models.perceptual import PNet
from models.inceptionv3 import InceptionV3
from utils import save_singleimages, checkpath, compute_fid_score
cudnn.benchmark = True
def main(args):
# by default we only consider single gpu inference
assert(len(args.gpu) == 1)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
# load data
data_loader_val, num_test = get_loader(args, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, training=False)
print('finished data loading')
# Generator
colorguide = True
if args.nocolor:
colorguide = False
netG = Generator(lambdas=None, colorguide=colorguide, input_nc=1, output_nc=1)
netG.load_state_dict(torch.load(args.model_path))
if torch.cuda.is_available():
netG = netG.cuda()
out_path = args.out_path
checkpath(out_path)
predictions_fid_real = []
predictions_fid_fake = []
fid_model = InceptionV3().cuda()
fid_model.eval()
Perceptual = PNet().cuda()
avg_ssim = 0
lpips = 0
# validate on test set, TODO: test with single color guide image
with torch.no_grad():
netG.eval()
for i, (img_real, wf_real, color_real) in enumerate(data_loader_val, 0):
img_real = img_real.cuda()
wf_real = wf_real.cuda()
if colorguide:
color_real = color_real.cuda()
# in case we are in the last interation
batch_size = img_real.size(0)
img_fake, wf_fake, _, _, _, _, _ = netG(trainG=False, img_real=None, wf_real=wf_real, color_real=color_real)
ssim_score = ssim(img_real, img_fake).item() * batch_size
avg_ssim += ssim_score
lpips += Perceptual(img_real, img_fake) * batch_size
# TODO: save generated wireframes
save_singleimages(img_fake, out_path, i*args.batch_size, args.img_size)
pred_fid_real = fid_model(img_real)[0]
pred_fid_fake = fid_model(img_fake)[0]
predictions_fid_real.append(pred_fid_real.data.cpu().numpy().reshape(batch_size, -1))
predictions_fid_fake.append(pred_fid_fake.data.cpu().numpy().reshape(batch_size, -1))
print('SSIM: {:6f}'.format(avg_ssim/num_test))
print('LPIPS: {:6f}'.format(lpips/num_test))
predictions_fid_real = np.concatenate(predictions_fid_real, 0)
predictions_fid_fake = np.concatenate(predictions_fid_fake, 0)
fid = compute_fid_score(predictions_fid_fake, predictions_fid_real)
print('FID: {:6f}'.format(fid))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--nocolor', action='store_true',
help='not using color guided model, needs to be also specified when training the model')
parser.add_argument('--model_path', type=str, default='./results/saved_models/wfrenderer_G/netG_epoch_300.pth',
help='path for saved G model')
parser.add_argument('--root_path', type=str, default='./data',
help='root path for wireframe dataset')
parser.add_argument('--out_path', type=str, default='./results/out_imgs',
help='path for saving rendered images')
parser.add_argument('--img_size', type=int, default=256,
help='default image size for the wireframe renderer')
parser.add_argument('--batch_size', type=int, default=40)
parser.add_argument('--num_workers', type=int, default=16)
parser.add_argument('--gpu', type=str, default='0')
args = parser.parse_args()
print(args)
main(args) | [
"argparse.ArgumentParser",
"utils.checkpath",
"data.loaddata.get_loader",
"torch.load",
"models.inceptionv3.InceptionV3",
"models.MSSSIM.ssim",
"models.perceptual.PNet",
"torch.cuda.is_available",
"utils.compute_fid_score",
"models.model.Generator",
"torch.no_grad",
"utils.save_singleimages",
... | [((642, 752), 'data.loaddata.get_loader', 'get_loader', (['args'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'training': '(False)'}), '(args, batch_size=args.batch_size, shuffle=False, num_workers=\n args.num_workers, training=False)\n', (652, 752), False, 'from data.loaddata import get_loader\n'), ((888, 959), 'models.model.Generator', 'Generator', ([], {'lambdas': 'None', 'colorguide': 'colorguide', 'input_nc': '(1)', 'output_nc': '(1)'}), '(lambdas=None, colorguide=colorguide, input_nc=1, output_nc=1)\n', (897, 959), False, 'from models.model import Generator, NLayerDiscriminator\n'), ((1027, 1052), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1050, 1052), False, 'import torch\n'), ((1119, 1138), 'utils.checkpath', 'checkpath', (['out_path'], {}), '(out_path)\n', (1128, 1138), False, 'from utils import save_singleimages, checkpath, compute_fid_score\n'), ((2945, 2970), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2968, 2970), False, 'import argparse\n'), ((988, 1015), 'torch.load', 'torch.load', (['args.model_path'], {}), '(args.model_path)\n', (998, 1015), False, 'import torch\n'), ((1412, 1427), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1425, 1427), False, 'import torch\n'), ((2669, 2708), 'numpy.concatenate', 'np.concatenate', (['predictions_fid_real', '(0)'], {}), '(predictions_fid_real, 0)\n', (2683, 2708), True, 'import numpy as np\n'), ((2741, 2780), 'numpy.concatenate', 'np.concatenate', (['predictions_fid_fake', '(0)'], {}), '(predictions_fid_fake, 0)\n', (2755, 2780), True, 'import numpy as np\n'), ((2796, 2857), 'utils.compute_fid_score', 'compute_fid_score', (['predictions_fid_fake', 'predictions_fid_real'], {}), '(predictions_fid_fake, predictions_fid_real)\n', (2813, 2857), False, 'from utils import save_singleimages, checkpath, compute_fid_score\n'), ((1220, 1233), 'models.inceptionv3.InceptionV3', 'InceptionV3', ([], {}), '()\n', (1231, 1233), False, 'from models.inceptionv3 import InceptionV3\n'), ((1281, 1287), 'models.perceptual.PNet', 'PNet', ([], {}), '()\n', (1285, 1287), False, 'from models.perceptual import PNet\n'), ((2145, 2218), 'utils.save_singleimages', 'save_singleimages', (['img_fake', 'out_path', '(i * args.batch_size)', 'args.img_size'], {}), '(img_fake, out_path, i * args.batch_size, args.img_size)\n', (2162, 2218), False, 'from utils import save_singleimages, checkpath, compute_fid_score\n'), ((1934, 1958), 'models.MSSSIM.ssim', 'ssim', (['img_real', 'img_fake'], {}), '(img_real, img_fake)\n', (1938, 1958), False, 'from models.MSSSIM import ssim, msssim\n')] |
# -*- coding: utf-8 -*-
# ProDy: A Python Package for Protein Dynamics Analysis
#
# Copyright (C) 2010-2012 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""This module defines functions for executing STRIDE program and parsing
its output."""
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2010-2012 <NAME>'
import os.path
import numpy as np
from prody.atomic import ATOMIC_FIELDS
from prody.atomic import AtomGroup
from prody.utilities import gunzip, which, PLATFORM
from pdbfile import parsePDB
from wwpdbftp import fetchPDB
__all__ = ['execSTRIDE', 'parseSTRIDE', 'performSTRIDE']
pkg = __import__(__package__)
LOGGER = pkg.LOGGER
def execSTRIDE(pdb, outputname=None, outputdir=None):
"""Execute STRIDE program for given *pdb*. *pdb* can be an identifier or
a PDB file path. If *pdb* is a compressed file, it will be decompressed
using Python :mod:`gzip` library. When no *outputname* is given, output
name will be :file:`pdb.stride`. :file:`.stride` extension will be
appended automatically to *outputname*. If :file:`outputdir` is given,
STRIDE output and uncompressed PDB file will be written into this folder.
Upon successful execution of :command:`stride pdb > out` command, output
filename is returned.
For more information on STRIDE see http://webclu.bio.wzw.tum.de/stride/.
If you benefited from STRIDE, please consider citing [DF95]_."""
stride = which('stride')
if stride is None:
raise EnvironmentError('command not found: stride executable is not '
'found in one of system paths')
assert outputname is None or isinstance(outputname, str),\
'outputname must be a string'
assert outputdir is None or isinstance(outputdir, str),\
'outputdir must be a string'
if not os.path.isfile(pdb):
pdb = fetchPDB(pdb, compressed=False)
if pdb is None:
raise ValueError('pdb is not a valid PDB identifier or filename')
if os.path.splitext(pdb)[1] == '.gz':
if outputdir is None:
pdb = gunzip(pdb, os.path.splitext(pdb)[0])
else:
pdb = gunzip(pdb, os.path.join(outputdir,
os.path.split(os.path.splitext(pdb)[0])[1]))
if outputdir is None:
outputdir = '.'
if outputname is None:
out = os.path.join(outputdir,
os.path.splitext(os.path.split(pdb)[1])[0] + '.stride')
else:
out = os.path.join(outputdir, outputname + '.stride')
status = os.system('{0:s} {1:s} > {2:s}'.format(stride, pdb, out))
if status == 0:
return out
def parseSTRIDE(stride, ag):
"""Parse STRIDE output from file *stride* into :class:`~.AtomGroup`
instance *ag*. STRIDE output file must be in the new format used
from July 1995 and onwards. When *stride* file is parsed, following
attributes are added to *ag*:
* *stride_resnum*: STRIDE's sequential residue number, starting at the
first residue actually in the data set.
* *stride_phi*, *stride_psi*: peptide backbone torsion angles phi and psi
* *stride_area*: residue solvent accessible area"""
if not os.path.isfile(stride):
raise IOError('{0:s} is not a valid file path'.format(stride))
if not isinstance(ag, AtomGroup):
raise TypeError('ag argument must be an AtomGroup instance')
stride = open(stride)
n_atoms = ag.numAtoms()
NUMBER = np.zeros(n_atoms, int)
AREA = np.zeros(n_atoms, float)
PHI = np.zeros(n_atoms, float)
PSI = np.zeros(n_atoms, float)
ag.setSecstrs(np.zeros(n_atoms), dtype=ATOMIC_FIELDS['secondary'].dtype)
for line in stride:
if not line.startswith('ASG '):
continue
res = ag[(line[9], int(line[10:15]), line[15].strip())]
if res is None:
continue
indices = res.getIndices()
res.setSecstrs(line[24].strip())
NUMBER[indices] = int(line[16:20])
PHI[indices] = float(line[42:49])
PSI[indices] = float(line[52:59])
AREA[indices] = float(line[64:69])
ag.setData('stride_resnum', NUMBER)
ag.setData('stride_phi', PHI)
ag.setData('stride_psi', PSI)
ag.setData('stride_area', AREA)
return ag
def performSTRIDE(pdb):
"""Perform STRIDE calculations and parse results. STRIDE data is
returned in an :class:`~.AtomGroup` instance. See also
:func:`execSTRIDE` and :func:`parseSTRIDE`."""
pdb = fetchPDB(pdb, compressed=False)
return parseSTRIDE(execSTRIDE(pdb), parsePDB(pdb))
| [
"prody.utilities.which",
"numpy.zeros",
"pdbfile.parsePDB",
"wwpdbftp.fetchPDB"
] | [((2040, 2055), 'prody.utilities.which', 'which', (['"""stride"""'], {}), "('stride')\n", (2045, 2055), False, 'from prody.utilities import gunzip, which, PLATFORM\n'), ((4111, 4133), 'numpy.zeros', 'np.zeros', (['n_atoms', 'int'], {}), '(n_atoms, int)\n', (4119, 4133), True, 'import numpy as np\n'), ((4145, 4169), 'numpy.zeros', 'np.zeros', (['n_atoms', 'float'], {}), '(n_atoms, float)\n', (4153, 4169), True, 'import numpy as np\n'), ((4180, 4204), 'numpy.zeros', 'np.zeros', (['n_atoms', 'float'], {}), '(n_atoms, float)\n', (4188, 4204), True, 'import numpy as np\n'), ((4215, 4239), 'numpy.zeros', 'np.zeros', (['n_atoms', 'float'], {}), '(n_atoms, float)\n', (4223, 4239), True, 'import numpy as np\n'), ((5139, 5170), 'wwpdbftp.fetchPDB', 'fetchPDB', (['pdb'], {'compressed': '(False)'}), '(pdb, compressed=False)\n', (5147, 5170), False, 'from wwpdbftp import fetchPDB\n'), ((2465, 2496), 'wwpdbftp.fetchPDB', 'fetchPDB', (['pdb'], {'compressed': '(False)'}), '(pdb, compressed=False)\n', (2473, 2496), False, 'from wwpdbftp import fetchPDB\n'), ((4259, 4276), 'numpy.zeros', 'np.zeros', (['n_atoms'], {}), '(n_atoms)\n', (4267, 4276), True, 'import numpy as np\n'), ((5211, 5224), 'pdbfile.parsePDB', 'parsePDB', (['pdb'], {}), '(pdb)\n', (5219, 5224), False, 'from pdbfile import parsePDB\n')] |
from numpy import sum
from gwlfe.Input.LandUse.NLU import NLU
from gwlfe.Memoization import memoize
@memoize
def AreaTotal(NRur, NUrb, Area):
result = 0
nlu = NLU(NRur, NUrb)
for l in range(NRur):
result += Area[l]
for l in range(NRur, nlu):
result += Area[l]
return result
@memoize
def AreaTotal_f(Area):
return sum(Area)
| [
"gwlfe.Input.LandUse.NLU.NLU",
"numpy.sum"
] | [((170, 185), 'gwlfe.Input.LandUse.NLU.NLU', 'NLU', (['NRur', 'NUrb'], {}), '(NRur, NUrb)\n', (173, 185), False, 'from gwlfe.Input.LandUse.NLU import NLU\n'), ((358, 367), 'numpy.sum', 'sum', (['Area'], {}), '(Area)\n', (361, 367), False, 'from numpy import sum\n')] |
#!/usr/bin/env python
import numpy as np
import math
from scipy.spatial import KDTree
import rospy
from geometry_msgs.msg import PoseStamped
from std_msgs.msg import Int32
from styx_msgs.msg import Lane, Waypoint
LOOKAHEAD_WPS = 200 # Number of waypoints we will publish.
MAX_DECEL = 0.5 # m/s^2 Max Deceleration
HZ_SETTING = 50 # Hz Publishing Rate
STOP_NUM_WP_BACK = 2 # How many waypoints must the car stop before light
class WaypointUpdater(object):
def __init__(self):
#Initialization
rospy.init_node('waypoint_updater')
# Member Variables
self.base_lane = None
self.obstacle_wp_idx = -1
self.pose = None
self.stopline_wp_idx = -1
self.waypoints_2d = None
self.waypoint_tree = None
# Initialize Subscribers and Publishers
self.init_connections()
#Rather than rospy.spin(), manage the publishing frequency manually.
self.loop()
def init_connections(self):
#Initialize Subscribers and Publishers
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
rospy.Subscriber('/obstacle_waypoint', Int32, self.obstacle_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
def pose_cb(self, msg):
#Call back function for position
self.pose = msg
def waypoints_cb(self, msg):
#Call back function for waypoints
self.base_lane = msg
# Initialize self.waypoints_2d before subscriber in order to prevent callback before intialized
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y]
for waypoint in msg.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d) # For O(log(n)) search
def traffic_cb(self, msg):
#Call back function for traffic light index
self.stopline_wp_idx = msg.data # Index of waypoint closest to traffic light
def obstacle_cb(self, msg):
# Callback for obstacle index
self.obstacle_wp_idx = msg.data # Index of waypoint closest to obstacle
def loop(self):
# Enforce a standard Publishing Rate
rate = rospy.Rate(HZ_SETTING)
while not rospy.is_shutdown():
if self.pose and self.base_lane:
self.publish_waypoints()
rate.sleep()
def get_closest_waypoint_index(self):
# Return the Index of the closest waypoint
x = self.pose.pose.position.x
y = self.pose.pose.position.y
# Return 1 item which is closest t [x,y], query returns [item, index] so use [1]
closest_index = self.waypoint_tree.query([x,y],1)[1]
# Check if the waypoint is ahead or behind vehicle.
closest_coord = self.waypoints_2d[closest_index]
prev_coord = self.waypoints_2d[closest_index - 1]
# Eq for hyperplane through closest_coord
close_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x,y])
'''
-> If the angle between A and B are greater than 90 degrees, the dot product will be negative (less than zero)
-> pos_vect can either be between prev_vect and close_vect or in front of close_vect
-> If val < 0, angle is less than 90 so close_vect is infront of pos_vect
-> If val > 0, angle is greater than 90 so pos_vect is ahead of close_vect
'''
val = np.dot(close_vect - prev_vect, pos_vect - close_vect)
# If the waypoint is behind, use next point
if val > 0:
closest_index = (closest_index + 1) % len(self.waypoints_2d)
return closest_index
def publish_waypoints(self):
# Publish next set of waypoints
final_lane = self.generate_lane()
self.final_waypoints_pub.publish(final_lane)
def generate_lane(self):
# Generate the next set of way points
lane = Lane()
closest_idx = self.get_closest_waypoint_index()
farthest_idx = closest_idx + LOOKAHEAD_WPS
base_waypoints = self.base_lane.waypoints[closest_idx:farthest_idx]
# If there is no traffic light nearby or green light, continue path.
if self.stopline_wp_idx >= farthest_idx:
lane.waypoints = base_waypoints
# If yellow light
elif self.stopline_wp_idx < 0:
stop_idx = min(max(self.stopline_wp_idx - closest_idx - 2, 0), len(base_waypoints)-1)
dist_to_yellow = self.distance(base_waypoints, 0, stop_idx)
# Run yellow if feasible
if dist_to_yellow < 2.5 * base_waypoints[0].twist.twist.linear.x:
lane.waypoints = base_waypoints
# Else stop
else:
lane.waypoints = self.decelerate_waypoints(base_waypoints, closest_idx)
# Red Light, plan for deceleration
else:
lane.waypoints = self.decelerate_waypoints(base_waypoints, closest_idx)
return lane
def decelerate_waypoints(self, waypoints, closest_idx):
# For each waypoint, slow downt the velocity
decelerated_wp = []
for i, wp in enumerate(waypoints):
# Preserve Position
new_wp = Waypoint()
new_wp.pose = wp.pose
# Stop Index, -2 to stop a little behind the stopline
stop_idx = max(self.stopline_wp_idx - closest_idx - STOP_NUM_WP_BACK, 0)
distance = self.distance(waypoints, i, stop_idx)
# Increasing Deceleration per Iteration (Uniform Deceleration): v^2 = 2*a*x
velocity = math.sqrt(2 * MAX_DECEL * distance)
if velocity < 1.:
velocity = 0.
new_wp.twist.twist.linear. x = min(velocity, wp.twist.twist.linear.x)
decelerated_wp.append(new_wp)
return decelerated_wp
def distance(self, waypoints, wp1, wp2):
#Compute Total Distance between two waypoints using sum of segment lengths
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| [
"rospy.logerr",
"rospy.Subscriber",
"math.sqrt",
"styx_msgs.msg.Lane",
"rospy.Publisher",
"rospy.Rate",
"rospy.is_shutdown",
"numpy.array",
"rospy.init_node",
"scipy.spatial.KDTree",
"numpy.dot",
"styx_msgs.msg.Waypoint"
] | [((528, 563), 'rospy.init_node', 'rospy.init_node', (['"""waypoint_updater"""'], {}), "('waypoint_updater')\n", (543, 563), False, 'import rospy\n'), ((1062, 1122), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/current_pose"""', 'PoseStamped', 'self.pose_cb'], {}), "('/current_pose', PoseStamped, self.pose_cb)\n", (1078, 1122), False, 'import rospy\n'), ((1131, 1191), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/base_waypoints"""', 'Lane', 'self.waypoints_cb'], {}), "('/base_waypoints', Lane, self.waypoints_cb)\n", (1147, 1191), False, 'import rospy\n'), ((1200, 1261), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/traffic_waypoint"""', 'Int32', 'self.traffic_cb'], {}), "('/traffic_waypoint', Int32, self.traffic_cb)\n", (1216, 1261), False, 'import rospy\n'), ((1270, 1333), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/obstacle_waypoint"""', 'Int32', 'self.obstacle_cb'], {}), "('/obstacle_waypoint', Int32, self.obstacle_cb)\n", (1286, 1333), False, 'import rospy\n'), ((1369, 1423), 'rospy.Publisher', 'rospy.Publisher', (['"""final_waypoints"""', 'Lane'], {'queue_size': '(1)'}), "('final_waypoints', Lane, queue_size=1)\n", (1384, 1423), False, 'import rospy\n'), ((2443, 2465), 'rospy.Rate', 'rospy.Rate', (['HZ_SETTING'], {}), '(HZ_SETTING)\n', (2453, 2465), False, 'import rospy\n'), ((3187, 3210), 'numpy.array', 'np.array', (['closest_coord'], {}), '(closest_coord)\n', (3195, 3210), True, 'import numpy as np\n'), ((3231, 3251), 'numpy.array', 'np.array', (['prev_coord'], {}), '(prev_coord)\n', (3239, 3251), True, 'import numpy as np\n'), ((3271, 3287), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (3279, 3287), True, 'import numpy as np\n'), ((3712, 3765), 'numpy.dot', 'np.dot', (['(close_vect - prev_vect)', '(pos_vect - close_vect)'], {}), '(close_vect - prev_vect, pos_vect - close_vect)\n', (3718, 3765), True, 'import numpy as np\n'), ((4226, 4232), 'styx_msgs.msg.Lane', 'Lane', ([], {}), '()\n', (4230, 4232), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((1962, 1987), 'scipy.spatial.KDTree', 'KDTree', (['self.waypoints_2d'], {}), '(self.waypoints_2d)\n', (1968, 1987), False, 'from scipy.spatial import KDTree\n'), ((2484, 2503), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (2501, 2503), False, 'import rospy\n'), ((5538, 5548), 'styx_msgs.msg.Waypoint', 'Waypoint', ([], {}), '()\n', (5546, 5548), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((5920, 5955), 'math.sqrt', 'math.sqrt', (['(2 * MAX_DECEL * distance)'], {}), '(2 * MAX_DECEL * distance)\n', (5929, 5955), False, 'import math\n'), ((6354, 6419), 'math.sqrt', 'math.sqrt', (['((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)'], {}), '((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)\n', (6363, 6419), False, 'import math\n'), ((6706, 6760), 'rospy.logerr', 'rospy.logerr', (['"""Could not start waypoint updater node."""'], {}), "('Could not start waypoint updater node.')\n", (6718, 6760), False, 'import rospy\n')] |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Processes a directory of DICOM files and creates
both the 2D slices with their associated image masks.
usage: process_dicom_to_hdf5.py [-h] [--print_random_image]
[--data_directory DATA_DIRECTORY]
[--output_filename OUTPUT_FILENAME]
optional arguments:
-h, --help show this help message and exit
--print_random_image unit test: print random image and mask
--data_directory DATA_DIRECTORY
base directory for data
--output_filename OUTPUT_FILENAME
Name of the hdf5 to create for data
Unit test:
1. To print a random DICOM image and its associated mask:
`python process_dicom_to_hdf5.py --print_random_image`
"""
import argparse
import shutil
import atexit
import os
from tqdm import trange
from configparser import ConfigParser
import glob
import pandas as pd
import numpy as np
import h5py
import fnmatch # Filter file names
import re # Import regular expressions to extract slice #
from parsing import parse_contour_file, parse_dicom_file, poly_to_mask
#### Read from the configuration file config.ini ####
config = ConfigParser()
config.read("config.ini")
DICOMS_DIR_BASE = config.get("local", "DATA_DIR_BASE") + r"dicoms/" # Top-level directory for dicoms
CONTOURS_DIR_BASE = config.get("local", "DATA_DIR_BASE") + r"contourfiles/" # Top-level directory for contour files
CONTOURS_SUB_DIR = config.get("local", "CONTOURS_SUB_DIR")
LINK_FILE_NAME = config.get("local", "LINK_FILE_NAME")
class readable_dir(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
prospective_dir=values
if not os.path.isdir(prospective_dir):
raise argparse.ArgumentTypeError("{0} is not a valid path".format(prospective_dir))
if os.access(prospective_dir, os.R_OK):
setattr(namespace,self.dest,prospective_dir)
else:
raise argparse.ArgumentTypeError("{0} is not a readable directory".format(prospective_dir))
parser = argparse.ArgumentParser(description="Process the DICOM files and masks")
parser.add_argument("--print_random_image", action="store_true", default=False,
help="unit test: print random image and mask")
parser.add_argument("--data_directory", action=readable_dir,
default=config.get("local", "DATA_DIR_BASE"),
help="base directory for data")
parser.add_argument("--output_filename", default=config.get("local", "HDF5_FILENAME"), help="Name of the hdf5 to create for data")
args = parser.parse_args()
DATA_DIR_BASE = args.data_directory
HDF5_FILENAME = args.output_filename
def getFiles(dfLink, idx):
'''
Get the list of DICOM files and contour files associated with this patient idx
'''
dicomDirname = DICOMS_DIR_BASE + dfLink["patient_id"].iloc[idx] + "/" # DICOM Directory name
contourDirname = CONTOURS_DIR_BASE + dfLink["original_id"].iloc[idx] + CONTOURS_SUB_DIR # Contour Directory name
dicomFiles = glob.glob(dicomDirname + "*.dcm") # Get the DICOM files within this directory
contourFiles = glob.glob(contourDirname + "*.txt") # Get the contour files within this directory
return dicomFiles, contourFiles
def get_matching_slice(contourFilename, dicomFiles):
'''
Associates the DICOM slice with the contour file.
The assumption here is that the last 4 digits in the contour filename are the
slice number from the DICOM. Verified this in the EDA python notebook
by plotting the masks over the DICOM images.
'''
sliceName = os.path.basename(os.path.splitext(contourFilename)[0]) # The mask name
# Use regex to find the pattern xxxx-yyyy in the file name. Extract the yyyy and convert to int.
# This will be the slice number
sliceIdx = int(re.findall(r'\d{4}-\d{4}', sliceName)[0][-4:])
dicomFilename = fnmatch.filter(dicomFiles, "*{}.dcm".format(sliceIdx))[0] # Find associated dicom image for slice
return dicomFilename
def getMask(contourFilename, imgWidth, imgHeight, maskThreshold=0.5):
'''
contourFilename = absolute path to the contour file
imgWidth = desired width
imgHeight = desired height
maskThreshold = [0,1] Sanity check. If mask is larger than this percentage, then contour might be bad.
TODO: Add a Hough ellipse detector to validate one and only one round mask.
'''
# Extract the polygon contour points
polygonPoints = parse_contour_file(contourFilename)
# Fill the polygon
imgMask = poly_to_mask(polygonPoints, imgWidth, imgHeight)
# Sanity check - What if the polygon is malformed? Let's check to make sure the mask isn't
# more than a certain percentage of the entire image
percentMask = imgMask.sum() / float(imgMask.shape[0] * imgMask.shape[1])
if percentMask > maskThreshold:
print("The mask is more than {} of the image. Please check if polygon is correct. {} {}".format(maskThreshold,
dicomFilename, sliceName))
return imgMask
def get_imgs_and_masks(contourFilename, dicomFiles):
'''
Returns the image and mask for a given contour filename.
'''
dicomFilename = get_matching_slice(contourFilename, dicomFiles)
imgDict = parse_dicom_file(dicomFilename)
# Get the original DICOM image
img = imgDict["pixel_data"]
(imgHeight, imgWidth) = img.shape # Get the image shape
# Test: The width and height should be the same that is listed in the DICOM header
if (imgDict["dicom"].Rows!= imgHeight) | (imgDict["dicom"].Columns != imgWidth):
print("Image size does not correspond to header {} {}".format(contourFilename, dicomFilename))
# Get the associated mask for the image
imgMask = getMask(contourFilename, imgWidth, imgHeight, maskThreshold=0.5)
return img, imgMask, imgDict
import matplotlib.pyplot as plt
def plot_imgs_and_masks(img, img_mask, imgDict):
plt.figure(figsize=(15,15))
plt.subplot(1,2,1)
plt.imshow(img, cmap="bone");
plt.title("Original MRI of heart\nPatient #{}".format(imgDict["dicom"].PatientID));
plt.subplot(1,2,2)
plt.imshow(img, cmap="bone");
plt.imshow(img_mask, alpha=0.3);
plt.title("With inner diameter mask (yellow)");
print("Pixel dimensions are {:.3f} x {:.3f} mm".format(imgDict["dicom"].PixelSpacing[0],
imgDict["dicom"].PixelSpacing[1]))
print("Slice thickness is {:.3f} mm".format(imgDict["dicom"].SliceThickness))
plt.show()
def main():
dfLink = pd.read_csv(DATA_DIR_BASE + LINK_FILE_NAME)
if args.print_random_image: # Test code by plotting random image and mask
patientIdx = np.random.randint(0, dfLink.shape[0])
dicomFiles, contourFiles = getFiles(dfLink, patientIdx)
contourIdx = np.random.randint(0, np.shape(contourFiles)[0])
img, imgMask, imgDict = get_imgs_and_masks(contourFiles[contourIdx], dicomFiles)
plot_imgs_and_masks(img, imgMask, imgDict)
else: # Run the main code
print("Reading from {} file".format(LINK_FILE_NAME))
print("Base data directory is {}".format(DATA_DIR_BASE))
bFirstTensor = True
# The images and masks will be saved into a single HDF5 file.
# HDF5 can handle unlimited file sizes and only loads
# the data from the file needed. Very useful for a data loader
# when the data is too large for the RAM.
with h5py.File(HDF5_FILENAME, "w") as HDF5:
tProgressBar = trange(dfLink.shape[0], desc='Patient', leave=True)
for patientIdx in tProgressBar:
dicomFiles, contourFiles = getFiles(dfLink, patientIdx)
for contourIdx in trange(np.shape(contourFiles)[0]):
tProgressBar.set_description("Patient {} (mask {})".format(patientIdx+1,
os.path.splitext(os.path.basename(contourFiles[contourIdx]))[0]))
img, imgMask, imgDict = get_imgs_and_masks(contourFiles[contourIdx], dicomFiles)
# We need to flatten the image and mask to put in a HDF5 dataframe
imgTensor = img.ravel().reshape(1,-1)
mskTensor = imgMask.ravel().reshape(1,-1)
# HDF5 expects all of the tensors to be of equal size
# So an error will be thrown if any of the masks or images is different size.
# TODO: Check explicitly for different sized images/masks and handle gracefully.
if bFirstTensor:
bFirstTensor = False
imgSet = HDF5.create_dataset("input", data=imgTensor, maxshape=[None, imgTensor.shape[1]])
mskSet = HDF5.create_dataset("output", data=mskTensor, maxshape=[None, mskTensor.shape[1]])
else:
row = imgSet.shape[0] # Count current dataset rows
imgSet.resize(row+1, axis=0) # Add new row
imgSet[row, :] = imgTensor # Insert data into new row
row = mskSet.shape[0] # Count current dataset rows
mskSet.resize(row+1, axis=0) # Add new row
mskSet[row, :] = mskTensor # Insert data into new row
HDF5["input"].attrs["lshape"] = (img.shape[0], img.shape[1], 1)
HDF5["output"].attrs["lshape"] = (imgMask.shape[0], imgMask.shape[1], 1)
print("\n\nFinished.")
if __name__ == "__main__":
main()
| [
"parsing.parse_dicom_file",
"matplotlib.pyplot.title",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"glob.glob",
"matplotlib.pyplot.imshow",
"re.findall",
"configparser.ConfigParser",
"os.access",
"h5py.File",
"matplotlib.... | [((1891, 1905), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1903, 1905), False, 'from configparser import ConfigParser\n'), ((2736, 2808), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process the DICOM files and masks"""'}), "(description='Process the DICOM files and masks')\n", (2759, 2808), False, 'import argparse\n'), ((3687, 3720), 'glob.glob', 'glob.glob', (["(dicomDirname + '*.dcm')"], {}), "(dicomDirname + '*.dcm')\n", (3696, 3720), False, 'import glob\n'), ((3783, 3818), 'glob.glob', 'glob.glob', (["(contourDirname + '*.txt')"], {}), "(contourDirname + '*.txt')\n", (3792, 3818), False, 'import glob\n'), ((5056, 5091), 'parsing.parse_contour_file', 'parse_contour_file', (['contourFilename'], {}), '(contourFilename)\n', (5074, 5091), False, 'from parsing import parse_contour_file, parse_dicom_file, poly_to_mask\n'), ((5123, 5171), 'parsing.poly_to_mask', 'poly_to_mask', (['polygonPoints', 'imgWidth', 'imgHeight'], {}), '(polygonPoints, imgWidth, imgHeight)\n', (5135, 5171), False, 'from parsing import parse_contour_file, parse_dicom_file, poly_to_mask\n'), ((5894, 5925), 'parsing.parse_dicom_file', 'parse_dicom_file', (['dicomFilename'], {}), '(dicomFilename)\n', (5910, 5925), False, 'from parsing import parse_contour_file, parse_dicom_file, poly_to_mask\n'), ((6545, 6573), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (6555, 6573), True, 'import matplotlib.pyplot as plt\n'), ((6574, 6594), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (6585, 6594), True, 'import matplotlib.pyplot as plt\n'), ((6594, 6622), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': '"""bone"""'}), "(img, cmap='bone')\n", (6604, 6622), True, 'import matplotlib.pyplot as plt\n'), ((6711, 6731), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (6722, 6731), True, 'import matplotlib.pyplot as plt\n'), ((6731, 6759), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': '"""bone"""'}), "(img, cmap='bone')\n", (6741, 6759), True, 'import matplotlib.pyplot as plt\n'), ((6762, 6793), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_mask'], {'alpha': '(0.3)'}), '(img_mask, alpha=0.3)\n', (6772, 6793), True, 'import matplotlib.pyplot as plt\n'), ((6797, 6843), 'matplotlib.pyplot.title', 'plt.title', (['"""With inner diameter mask (yellow)"""'], {}), "('With inner diameter mask (yellow)')\n", (6806, 6843), True, 'import matplotlib.pyplot as plt\n'), ((7069, 7079), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7077, 7079), True, 'import matplotlib.pyplot as plt\n'), ((7103, 7146), 'pandas.read_csv', 'pd.read_csv', (['(DATA_DIR_BASE + LINK_FILE_NAME)'], {}), '(DATA_DIR_BASE + LINK_FILE_NAME)\n', (7114, 7146), True, 'import pandas as pd\n'), ((2537, 2572), 'os.access', 'os.access', (['prospective_dir', 'os.R_OK'], {}), '(prospective_dir, os.R_OK)\n', (2546, 2572), False, 'import os\n'), ((7239, 7276), 'numpy.random.randint', 'np.random.randint', (['(0)', 'dfLink.shape[0]'], {}), '(0, dfLink.shape[0])\n', (7256, 7276), True, 'import numpy as np\n'), ((2413, 2443), 'os.path.isdir', 'os.path.isdir', (['prospective_dir'], {}), '(prospective_dir)\n', (2426, 2443), False, 'import os\n'), ((4241, 4274), 'os.path.splitext', 'os.path.splitext', (['contourFilename'], {}), '(contourFilename)\n', (4257, 4274), False, 'import os\n'), ((7931, 7960), 'h5py.File', 'h5py.File', (['HDF5_FILENAME', '"""w"""'], {}), "(HDF5_FILENAME, 'w')\n", (7940, 7960), False, 'import h5py\n'), ((7989, 8040), 'tqdm.trange', 'trange', (['dfLink.shape[0]'], {'desc': '"""Patient"""', 'leave': '(True)'}), "(dfLink.shape[0], desc='Patient', leave=True)\n", (7995, 8040), False, 'from tqdm import trange\n'), ((4444, 4482), 're.findall', 're.findall', (['"""\\\\d{4}-\\\\d{4}"""', 'sliceName'], {}), "('\\\\d{4}-\\\\d{4}', sliceName)\n", (4454, 4482), False, 'import re\n'), ((7371, 7393), 'numpy.shape', 'np.shape', (['contourFiles'], {}), '(contourFiles)\n', (7379, 7393), True, 'import numpy as np\n'), ((8166, 8188), 'numpy.shape', 'np.shape', (['contourFiles'], {}), '(contourFiles)\n', (8174, 8188), True, 'import numpy as np\n'), ((8303, 8345), 'os.path.basename', 'os.path.basename', (['contourFiles[contourIdx]'], {}), '(contourFiles[contourIdx])\n', (8319, 8345), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 17 13:23:53 2018
@author: bartosz
"""
import numpy as np
import matplotlib.pyplot as plt
#Radial distribution functions.
# This script assumes correctly generated xyz files
infilename = 'mixture7norm with numMolecules 343 time 100000 fs dt 2 fs box 3.1 nm percEthanol 13.5 rescale 1 targetTemp 300 K rLJcut 8 nm.xyz'
refElement = 'H'
radialElement = 'H'
# non-bonded interaction approximation parameters
nonBondedOnly = True
clearClose = 8 #int
#Read last snapshot
parameters = infilename.split()
f=open(infilename,"r")
n = int(f.readline())
f.seek(0)
tsteps = int(int(parameters[5])/int(parameters[8]))
lines = (n+2)*(tsteps+1)
targetline = lines-n
size =float(parameters[11])*10 #box size in angstroms
#store data
types = ['']*n
q = np.zeros((n,3))
for l, line in enumerate(f):
if l%(5000*(n+2))==1:
print(l, line)
if l>=targetline:
sline = line.split()
#save atom type
types[l-targetline]=sline[0]
#save xyz
for i,elem in enumerate(sline[1:4]):
q[l-targetline,i] = float(elem)
f.close()
# calculate pairwise distances
indicesToDelete = [i for i,x in enumerate(types) if x != radialElement]
qtarget = np.delete(q,indicesToDelete,0)
indicesToDelete = [i for i,x in enumerate(types) if x != refElement]
qref = np.delete(q,indicesToDelete,0)
dr = qtarget - qref[:,np.newaxis]
r = np.linalg.norm(dr,axis=2).flatten()
hist1, bins = np.histogram(r,bins=50,range=(0,12))
hist1[0] =0 #get rid of the self-reference
if nonBondedOnly: #get rid of bonded atoms
hist1[1:clearClose]=[0]*(clearClose-1)
hist1 = hist1.astype(float)
hist1 /= 4*np.pi*np.linspace(0,12)**2 #normalize wrt to sphere
hist1 = np.nan_to_num(hist1)
hist1 /= np.linalg.norm(hist1) #normalize for density
plt.plot(bins[:-1], hist1)
#save bins and hist1
with open('histogram {}% {}-{}.csv'.format(parameters[14],refElement,radialElement), 'w') as outfile:
for x,y in zip(bins,hist1):
outfile.write('{},{}\n'.format(x,y))
| [
"numpy.nan_to_num",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.histogram",
"numpy.linalg.norm",
"numpy.linspace",
"numpy.delete"
] | [((817, 833), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (825, 833), True, 'import numpy as np\n'), ((1272, 1304), 'numpy.delete', 'np.delete', (['q', 'indicesToDelete', '(0)'], {}), '(q, indicesToDelete, 0)\n', (1281, 1304), True, 'import numpy as np\n'), ((1380, 1412), 'numpy.delete', 'np.delete', (['q', 'indicesToDelete', '(0)'], {}), '(q, indicesToDelete, 0)\n', (1389, 1412), True, 'import numpy as np\n'), ((1501, 1540), 'numpy.histogram', 'np.histogram', (['r'], {'bins': '(50)', 'range': '(0, 12)'}), '(r, bins=50, range=(0, 12))\n', (1513, 1540), True, 'import numpy as np\n'), ((1766, 1786), 'numpy.nan_to_num', 'np.nan_to_num', (['hist1'], {}), '(hist1)\n', (1779, 1786), True, 'import numpy as np\n'), ((1796, 1817), 'numpy.linalg.norm', 'np.linalg.norm', (['hist1'], {}), '(hist1)\n', (1810, 1817), True, 'import numpy as np\n'), ((1841, 1867), 'matplotlib.pyplot.plot', 'plt.plot', (['bins[:-1]', 'hist1'], {}), '(bins[:-1], hist1)\n', (1849, 1867), True, 'import matplotlib.pyplot as plt\n'), ((1450, 1476), 'numpy.linalg.norm', 'np.linalg.norm', (['dr'], {'axis': '(2)'}), '(dr, axis=2)\n', (1464, 1476), True, 'import numpy as np\n'), ((1712, 1730), 'numpy.linspace', 'np.linspace', (['(0)', '(12)'], {}), '(0, 12)\n', (1723, 1730), True, 'import numpy as np\n')] |
# encoding: utf-8
"""
metrics.py
~~~~~~~~~~
Functionality for computing performance metrics. Typically custom metrics not provided by sklearn.
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__created__ = "2018-05-30"
__copyright__ = "Copyright 2018 <NAME>"
__license__ = "MIT https://opensource.org/licenses/MIT"
# standard imports
# third party imports
import numpy as np
# local imports
# globals
def recall_all_score(Y_true, Y_pred):
"""Return the 'recall all' score
'Recall all' is defined as::
score := number of examples with all labels correct / number of examples
:param Y_true: ground truth topic labels (one-hot format)
:param Y_pred: topic predictions (one-hot format)
:return: recall all score
"""
Y_true = Y_true.as_matrix()
matches = np.all(Y_true == Y_pred, axis=1)
return np.sum(matches) / len(matches)
def flpd_score(Y_true, Y_pred):
"""Return 'false labels per document' score
'False labels per document' is defined as::
score := total number of false labels / number of examples
:param Y_true: ground truth topic labels (one-hot format)
:param Y_pred: topic predictions (one-hot format)
:return: false labels per document score
"""
Y_true = Y_true.as_matrix()
return np.sum(Y_pred & ~Y_true) / Y_true.shape[0]
def mlpd_score(Y_true, Y_pred):
"""Missing labels per document score
'Missing labels per document' is defined as::
score := total number of missing labels / number of examples
:param Y_true: ground truth topic labels (one-hot format)
:param Y_pred: topic predictions (one-hot format)
:return: missing labels per document score
"""
Y_true = Y_true.as_matrix()
return np.sum(~Y_pred & Y_true) / Y_true.shape[0]
# EOF | [
"numpy.sum",
"numpy.all"
] | [((801, 833), 'numpy.all', 'np.all', (['(Y_true == Y_pred)'], {'axis': '(1)'}), '(Y_true == Y_pred, axis=1)\n', (807, 833), True, 'import numpy as np\n'), ((845, 860), 'numpy.sum', 'np.sum', (['matches'], {}), '(matches)\n', (851, 860), True, 'import numpy as np\n'), ((1288, 1312), 'numpy.sum', 'np.sum', (['(Y_pred & ~Y_true)'], {}), '(Y_pred & ~Y_true)\n', (1294, 1312), True, 'import numpy as np\n'), ((1742, 1766), 'numpy.sum', 'np.sum', (['(~Y_pred & Y_true)'], {}), '(~Y_pred & Y_true)\n', (1748, 1766), True, 'import numpy as np\n')] |
import numpy as np
from scipy.stats import norm
S0 = 10
K = 9.3
r = 0.025
s = 0.3
T = 1
def calc_d1(St, K, r, s, T, t=0, q=0):
return (np.log(St / K) + (r - q + 0.5 * s ** 2) * (T - t)) \
/ (s * np.sqrt(T - t))
def calc_d2(d1, s, T, t=0):
return d1 - s * np.sqrt(T - t)
def price_call_option(S0, d1, d2, r, K, T):
return S0 * norm.cdf(d1) - K * np.exp(-r * T) * norm.cdf(d2)
d1 = calc_d1(S0, K, r, s, T)
d2 = calc_d2(d1, s, T)
C = S0 * norm.cdf(d1) - K * np.exp(-r * T) * norm.cdf(d2)
print(C)
F = S0 - K * np.exp(-r * T)
P = C - F
print(P)
| [
"scipy.stats.norm.cdf",
"numpy.log",
"numpy.exp",
"numpy.sqrt"
] | [((466, 478), 'scipy.stats.norm.cdf', 'norm.cdf', (['d1'], {}), '(d1)\n', (474, 478), False, 'from scipy.stats import norm\n'), ((502, 514), 'scipy.stats.norm.cdf', 'norm.cdf', (['d2'], {}), '(d2)\n', (510, 514), False, 'from scipy.stats import norm\n'), ((538, 552), 'numpy.exp', 'np.exp', (['(-r * T)'], {}), '(-r * T)\n', (544, 552), True, 'import numpy as np\n'), ((142, 156), 'numpy.log', 'np.log', (['(St / K)'], {}), '(St / K)\n', (148, 156), True, 'import numpy as np\n'), ((213, 227), 'numpy.sqrt', 'np.sqrt', (['(T - t)'], {}), '(T - t)\n', (220, 227), True, 'import numpy as np\n'), ((279, 293), 'numpy.sqrt', 'np.sqrt', (['(T - t)'], {}), '(T - t)\n', (286, 293), True, 'import numpy as np\n'), ((355, 367), 'scipy.stats.norm.cdf', 'norm.cdf', (['d1'], {}), '(d1)\n', (363, 367), False, 'from scipy.stats import norm\n'), ((391, 403), 'scipy.stats.norm.cdf', 'norm.cdf', (['d2'], {}), '(d2)\n', (399, 403), False, 'from scipy.stats import norm\n'), ((485, 499), 'numpy.exp', 'np.exp', (['(-r * T)'], {}), '(-r * T)\n', (491, 499), True, 'import numpy as np\n'), ((374, 388), 'numpy.exp', 'np.exp', (['(-r * T)'], {}), '(-r * T)\n', (380, 388), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Testing Laplacian discretization
@author <NAME>
"""
import sys
import unittest
from mpi4py import MPI
import numpy
from pnumpy import CubeDecomp
from pnumpy import Laplacian
class TestLaplacian(unittest.TestCase):
def setUp(self):
# number of procs
self.sz = MPI.COMM_WORLD.Get_size()
# MPI rank
self.rk = MPI.COMM_WORLD.Get_rank()
def test1d(self):
n = 8
# global number of cells
ns = (n,)
# domain decomposition
dc = CubeDecomp(self.sz, ns)
if not dc.getDecomp():
print('*** ERROR Invalid domain decomposition -- rerun with different sizes/number of procs')
sys.exit(1)
ndims = dc.getNumDims()
# local start/end grid indices
slab = dc.getSlab(self.rk)
# global domain boundaries
xmins = numpy.array([0.0 for i in range(ndims)])
xmaxs = numpy.array([1.0 for i in range(ndims)])
# local cell centered coordinates
axes = []
hs = []
for i in range(ndims):
ibeg, iend = slab[i].start, slab[i].stop
h = (xmaxs[i] - xmins[i]) / float(ns[i])
ax = xmins[i] + h*(numpy.arange(ibeg, iend) + 0.5)
hs.append(h)
axes.append(ax)
lapl = Laplacian(dc, periodic=(False,))
# set the input function
inp = 0.5 * axes[0]**2
#print('[{0}] inp = {1}'.format(self.rk, str(inp)))
out = lapl.apply(inp) / hs[0]**2
#print('[{0}] out = {1}'.format(self.rk, str(out)))
# check sum
localChkSum = numpy.sum(out.flat)
chksum = numpy.sum(MPI.COMM_WORLD.gather(localChkSum, 0))
if self.rk == 0:
print('test1d check sum = {}'.format(chksum))
self.assertLessEqual(abs(chksum - -28.25), 1.e-10)
def test2d(self):
n = 8
# global number of cells
ns = (n, n)
# domain decomposition
dc = CubeDecomp(self.sz, ns)
if not dc.getDecomp():
print('*** ERROR Invalid domain decomposition -- rerun with different sizes/number of procs')
sys.exit(1)
ndims = dc.getNumDims()
# local start/end grid indices
slab = dc.getSlab(self.rk)
# global domain boundaries
xmins = numpy.array([0.0 for i in range(ndims)])
xmaxs = numpy.array([1.0 for i in range(ndims)])
# local cell centered coordinates
axes = []
hs = []
nsLocal = []
for i in range(ndims):
ibeg, iend = slab[i].start, slab[i].stop
nsLocal.append(iend - ibeg)
h = (xmaxs[i] - xmins[i]) / float(ns[i])
ax = xmins[i] + h*(numpy.arange(ibeg, iend) + 0.5)
hs.append(h)
axes.append(ax)
lapl = Laplacian(dc, periodic=(False, False))
# set the input function
xx = numpy.outer(axes[0], numpy.ones((nsLocal[1],)))
yy = numpy.outer(numpy.ones((nsLocal[0],)), axes[1])
inp = 0.5 * xx * yy ** 2
#print('[{0}] inp = {1}'.format(self.rk, str(inp)))
out = lapl.apply(inp) / hs[0]**2 # NEED TO ADJUST WHEN CELL SIZE IS DIFFERENT IN Y!
#print('[{0}] out = {1}'.format(self.rk, str(out)))
# check sum
localChkSum = numpy.sum(out.flat)
chksum = numpy.sum(MPI.COMM_WORLD.gather(localChkSum, 0))
if self.rk == 0:
print('test2d check sum = {}'.format(chksum))
self.assertLessEqual(abs(chksum - -198.0), 1.e-10)
def test2d_1domain(self):
n = 8
# global number of cells
ns = (n, n)
ndims = len(ns)
# global domain boundaries
xmins = numpy.array([0.0 for i in range(ndims)])
xmaxs = numpy.array([1.0 for i in range(ndims)])
# local cell centered coordinates
axes = []
hs = []
for i in range(ndims):
h = (xmaxs[i] - xmins[i]) / float(ns[i])
ax = xmins[i] + h*(numpy.arange(0, ns[i]) + 0.5)
hs.append(h)
axes.append(ax)
# set the input function
xx = numpy.outer(axes[0], numpy.ones((ns[1],)))
yy = numpy.outer(numpy.ones((ns[0],)), axes[1])
inp = 0.5 * xx * yy ** 2
#print('inp = {}'.format(str(inp)))
out = -4.0 * inp
out[1:, :] += 1.0 * inp[:-1, :]
out[:-1, :] += 1.0 * inp[1:, :]
out[:, 1:] += 1.0 * inp[:, :-1]
out[:, :-1] += 1.0 * inp[:, 1:]
out /= hs[0]**2 # NEED TO ADJUST WHEN CELL SIZE IS DIFFERENT IN Y!
#print('out = {}'.format(str(out)))
# check sum
chksum = numpy.sum(out.flat)
print('test2d_1domain check sum = {}'.format(chksum))
self.assertLessEqual(abs(chksum - -198.0), 1.e-10)
def test3d(self):
n = 8
# global number of cells
ns = (n, n, n)
# domain decomposition
dc = CubeDecomp(self.sz, ns)
if not dc.getDecomp():
print('*** ERROR Invalid domain decomposition -- rerun with different sizes/number of procs')
sys.exit(1)
ndims = dc.getNumDims()
# local start/end grid indices
slab = dc.getSlab(self.rk)
# global domain boundaries
xmins = numpy.array([0.0 for i in range(ndims)])
xmaxs = numpy.array([1.0 for i in range(ndims)])
# local cell centered coordinates
axes = []
hs = []
nsLocal = []
iBegs = []
iEnds = []
for i in range(ndims):
ibeg, iend = slab[i].start, slab[i].stop
iBegs.append(ibeg)
iEnds.append(iend)
nsLocal.append(iend - ibeg)
h = (xmaxs[i] - xmins[i]) / float(ns[i])
ax = xmins[i] + h*(numpy.arange(ibeg, iend) + 0.5)
hs.append(h)
axes.append(ax)
lapl = Laplacian(dc, periodic=(False, False, True))
# set the input function
inp = numpy.zeros((iEnds[0] - iBegs[0], iEnds[1] - iBegs[1], iEnds[2] - iBegs[2]), numpy.float64)
for ig in range(iBegs[0], iEnds[0]):
i = ig - iBegs[0]
x = axes[0][i]
for jg in range(iBegs[1], iEnds[1]):
j = jg - iBegs[1]
y = axes[1][j]
for kg in range(iBegs[2], iEnds[2]):
k = kg - iBegs[2]
z = axes[2][k]
inp[i, j, k] = 0.5 * x * y**2
# check sum of input
localChkSum = numpy.sum(inp.flat)
chksum = numpy.sum(MPI.COMM_WORLD.gather(localChkSum, 0))
if self.rk == 0:
print('test3d check sum of input = {}'.format(chksum))
out = lapl.apply(inp)
# check sum
localChkSum = numpy.sum(out.flat)
chksum = numpy.sum(MPI.COMM_WORLD.gather(localChkSum, 0))
if self.rk == 0:
print('test3d check sum = {}'.format(chksum))
self.assertLessEqual(abs(chksum - -24.75), 1.e-10)
def test3d_1domain(self):
n = 8
# global number of cells
ns = (n, n, n)
ndims = len(ns)
# global domain boundaries
xmins = numpy.array([0.0 for i in range(ndims)])
xmaxs = numpy.array([1.0 for i in range(ndims)])
# local cell centered coordinates
axes = []
hs = []
for i in range(ndims):
h = (xmaxs[i] - xmins[i]) / float(ns[i])
ax = xmins[i] + h*(numpy.arange(0, ns[i]) + 0.5)
hs.append(h)
axes.append(ax)
# set the input function
inp = numpy.zeros((ns[0], ns[1], ns[2]), numpy.float64)
for i in range(ns[0]):
x = axes[0][i]
for j in range(ns[1]):
y = axes[1][j]
for k in range(ns[2]):
z = axes[2][k]
inp[i, j, k] = 0.5 * x * y ** 2
print('check sum input: {0}'.format(numpy.sum(inp.flat)))
stencil = {}
stencil[0, 0, 0] = -6.0
stencil[1, 0, 0] = 1.
stencil[-1, 0, 0] = 1.
stencil[0, 1, 0] = 1.
stencil[0, -1, 0] = 1.
stencil[0, 0, 1] = 1.
stencil[0, 0, -1] = 1.
out = stencil[0, 0, 0] * inp
out[:-1, :, :] += stencil[1, 0, 0] * inp[1:, :, :]
out[1:, :, :] += stencil[-1, 0, 0] * inp[:-1, :, :]
out[:, :-1, :] += stencil[0, 1, 0] * inp[:, 1:, :]
out[:, 1:, :] += stencil[0, -1, 0] * inp[:, :-1, :]
out[:, :, :-1] += stencil[0, 0, 1] * inp[:, :, 1:]
out[:, :, 1:] += stencil[0, 0, -1] * inp[:, :, :-1]
# handle preiodic conditions in the z direction
out[:, :, -1] += stencil[0, 0, 1] * inp[:, :, 0]
out[:, :, 0] += stencil[0, 0, -1] * inp[:, :, -1]
# check sum
chksum = numpy.sum(out.flat)
print('test3d_1domain sum = {}'.format(chksum))
self.assertLessEqual(abs(chksum - -24.75), 1.e-10)
if __name__ == '__main__':
print("") # Spacer
suite = unittest.TestLoader().loadTestsFromTestCase(TestLaplacian)
unittest.TextTestRunner(verbosity = 1).run(suite)
MPI.Finalize()
| [
"mpi4py.MPI.Finalize",
"numpy.sum",
"unittest.TextTestRunner",
"mpi4py.MPI.COMM_WORLD.Get_rank",
"numpy.zeros",
"pnumpy.CubeDecomp",
"numpy.ones",
"mpi4py.MPI.COMM_WORLD.gather",
"pnumpy.Laplacian",
"unittest.TestLoader",
"numpy.arange",
"sys.exit",
"mpi4py.MPI.COMM_WORLD.Get_size"
] | [((8249, 8263), 'mpi4py.MPI.Finalize', 'MPI.Finalize', ([], {}), '()\n', (8261, 8263), False, 'from mpi4py import MPI\n'), ((299, 324), 'mpi4py.MPI.COMM_WORLD.Get_size', 'MPI.COMM_WORLD.Get_size', ([], {}), '()\n', (322, 324), False, 'from mpi4py import MPI\n'), ((354, 379), 'mpi4py.MPI.COMM_WORLD.Get_rank', 'MPI.COMM_WORLD.Get_rank', ([], {}), '()\n', (377, 379), False, 'from mpi4py import MPI\n'), ((493, 516), 'pnumpy.CubeDecomp', 'CubeDecomp', (['self.sz', 'ns'], {}), '(self.sz, ns)\n', (503, 516), False, 'from pnumpy import CubeDecomp\n'), ((1205, 1237), 'pnumpy.Laplacian', 'Laplacian', (['dc'], {'periodic': '(False,)'}), '(dc, periodic=(False,))\n', (1214, 1237), False, 'from pnumpy import Laplacian\n'), ((1480, 1499), 'numpy.sum', 'numpy.sum', (['out.flat'], {}), '(out.flat)\n', (1489, 1499), False, 'import numpy\n'), ((1813, 1836), 'pnumpy.CubeDecomp', 'CubeDecomp', (['self.sz', 'ns'], {}), '(self.sz, ns)\n', (1823, 1836), False, 'from pnumpy import CubeDecomp\n'), ((2578, 2616), 'pnumpy.Laplacian', 'Laplacian', (['dc'], {'periodic': '(False, False)'}), '(dc, periodic=(False, False))\n', (2587, 2616), False, 'from pnumpy import Laplacian\n'), ((3027, 3046), 'numpy.sum', 'numpy.sum', (['out.flat'], {}), '(out.flat)\n', (3036, 3046), False, 'import numpy\n'), ((4246, 4265), 'numpy.sum', 'numpy.sum', (['out.flat'], {}), '(out.flat)\n', (4255, 4265), False, 'import numpy\n'), ((4497, 4520), 'pnumpy.CubeDecomp', 'CubeDecomp', (['self.sz', 'ns'], {}), '(self.sz, ns)\n', (4507, 4520), False, 'from pnumpy import CubeDecomp\n'), ((5346, 5390), 'pnumpy.Laplacian', 'Laplacian', (['dc'], {'periodic': '(False, False, True)'}), '(dc, periodic=(False, False, True))\n', (5355, 5390), False, 'from pnumpy import Laplacian\n'), ((5431, 5526), 'numpy.zeros', 'numpy.zeros', (['(iEnds[0] - iBegs[0], iEnds[1] - iBegs[1], iEnds[2] - iBegs[2])', 'numpy.float64'], {}), '((iEnds[0] - iBegs[0], iEnds[1] - iBegs[1], iEnds[2] - iBegs[2]),\n numpy.float64)\n', (5442, 5526), False, 'import numpy\n'), ((5889, 5908), 'numpy.sum', 'numpy.sum', (['inp.flat'], {}), '(inp.flat)\n', (5898, 5908), False, 'import numpy\n'), ((6119, 6138), 'numpy.sum', 'numpy.sum', (['out.flat'], {}), '(out.flat)\n', (6128, 6138), False, 'import numpy\n'), ((6867, 6916), 'numpy.zeros', 'numpy.zeros', (['(ns[0], ns[1], ns[2])', 'numpy.float64'], {}), '((ns[0], ns[1], ns[2]), numpy.float64)\n', (6878, 6916), False, 'import numpy\n'), ((7947, 7966), 'numpy.sum', 'numpy.sum', (['out.flat'], {}), '(out.flat)\n', (7956, 7966), False, 'import numpy\n'), ((654, 665), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (662, 665), False, 'import sys\n'), ((1523, 1560), 'mpi4py.MPI.COMM_WORLD.gather', 'MPI.COMM_WORLD.gather', (['localChkSum', '(0)'], {}), '(localChkSum, 0)\n', (1544, 1560), False, 'from mpi4py import MPI\n'), ((1974, 1985), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1982, 1985), False, 'import sys\n'), ((2677, 2702), 'numpy.ones', 'numpy.ones', (['(nsLocal[1],)'], {}), '((nsLocal[1],))\n', (2687, 2702), False, 'import numpy\n'), ((2725, 2750), 'numpy.ones', 'numpy.ones', (['(nsLocal[0],)'], {}), '((nsLocal[0],))\n', (2735, 2750), False, 'import numpy\n'), ((3070, 3107), 'mpi4py.MPI.COMM_WORLD.gather', 'MPI.COMM_WORLD.gather', (['localChkSum', '(0)'], {}), '(localChkSum, 0)\n', (3091, 3107), False, 'from mpi4py import MPI\n'), ((3792, 3812), 'numpy.ones', 'numpy.ones', (['(ns[1],)'], {}), '((ns[1],))\n', (3802, 3812), False, 'import numpy\n'), ((3835, 3855), 'numpy.ones', 'numpy.ones', (['(ns[0],)'], {}), '((ns[0],))\n', (3845, 3855), False, 'import numpy\n'), ((4658, 4669), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4666, 4669), False, 'import sys\n'), ((5932, 5969), 'mpi4py.MPI.COMM_WORLD.gather', 'MPI.COMM_WORLD.gather', (['localChkSum', '(0)'], {}), '(localChkSum, 0)\n', (5953, 5969), False, 'from mpi4py import MPI\n'), ((6162, 6199), 'mpi4py.MPI.COMM_WORLD.gather', 'MPI.COMM_WORLD.gather', (['localChkSum', '(0)'], {}), '(localChkSum, 0)\n', (6183, 6199), False, 'from mpi4py import MPI\n'), ((8136, 8157), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (8155, 8157), False, 'import unittest\n'), ((8197, 8233), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(1)'}), '(verbosity=1)\n', (8220, 8233), False, 'import unittest\n'), ((7156, 7175), 'numpy.sum', 'numpy.sum', (['inp.flat'], {}), '(inp.flat)\n', (7165, 7175), False, 'import numpy\n'), ((1116, 1140), 'numpy.arange', 'numpy.arange', (['ibeg', 'iend'], {}), '(ibeg, iend)\n', (1128, 1140), False, 'import numpy\n'), ((2489, 2513), 'numpy.arange', 'numpy.arange', (['ibeg', 'iend'], {}), '(ibeg, iend)\n', (2501, 2513), False, 'import numpy\n'), ((3657, 3679), 'numpy.arange', 'numpy.arange', (['(0)', 'ns[i]'], {}), '(0, ns[i])\n', (3669, 3679), False, 'import numpy\n'), ((5257, 5281), 'numpy.arange', 'numpy.arange', (['ibeg', 'iend'], {}), '(ibeg, iend)\n', (5269, 5281), False, 'import numpy\n'), ((6752, 6774), 'numpy.arange', 'numpy.arange', (['(0)', 'ns[i]'], {}), '(0, ns[i])\n', (6764, 6774), False, 'import numpy\n')] |
import numpy as np
import pandas as pd
import os
import argparse
current_path = os.path.abspath('.')
# For four ours data, run on macbook
# name = '18-64'
# name = '2-5'
# name = '2-8'
name = 'T4857'
# filename = '/Users/wangjue/Documents/results_scGNNsp/16_data_benchmark/'+name+'_benchmark.csv'
filename = '/ocean/projects/ccr180012p/shared/image_segmenation/data/10x/new_4/sparse_meta_out/'+name+'_humanBrain_metaData.csv'
coordsfilename = name+'F_cpm/coords_array.npy'
labelname = name+'F_cpm/label.csv'
df = pd.read_csv(filename)
# filter_col = [col for col in df if col.startswith('ENSG') or col.startswith('barcode')]
# dfEX = df[filter_col]
# dfEX= dfEX.T
# dfEX.to_csv(outfilename,header=False)
llist = ['barcode','Layers']
dfLabel = df[llist]
dfLabel.to_csv(labelname)
# coordinates
coords_list = []
for i in range(df.shape[0]):
coords_list.append((df.iloc[i]['array_row'],df.iloc[i]['array_col']))
np.save(coordsfilename,np.array(coords_list))
| [
"pandas.read_csv",
"os.path.abspath",
"numpy.array"
] | [((80, 100), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (95, 100), False, 'import os\n'), ((516, 537), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (527, 537), True, 'import pandas as pd\n'), ((943, 964), 'numpy.array', 'np.array', (['coords_list'], {}), '(coords_list)\n', (951, 964), True, 'import numpy as np\n')] |
import torch
import numpy as np
""" 1. PyTorch Introduction
PyTorch is developed by Facebook AI Research (FAIR). PyTorch is one of the most widely used open-source machine
learning libraries for deep learning applications. It was first introduced in 2016. Since then, PyTorch has been
gaining popularity among researchers and developers, at the expense of TensorFlow.
Machine learning workflows involve
- preparing training data
- creating models
- optimizing model parameters
- saving the trained models.
In this tutorial you will:
- Learn the key concepts used to build machine learning models
- Learn how to build a Computer Vision model
- Build models with the PyTorch API
"""
""" 1.1 Tensors
Tensors are a specialized data structure that are very similar to arrays and matrices. In PyTorch, we use tensors to
encode the inputs and outputs of a model, as well as the model’s parameters.
Tensors are similar to NumPy’s ndarrays, except that tensors can run on GPUs or other hardware accelerators. In fact,
tensors and NumPy arrays can often share the same underlying memory, eliminating the need to copy data
(see bridge-to-np-label). Tensors are also optimized for automatic differentiation
(we'll see more about that later in the Autograd unit). If you’re familiar with ndarrays, you’ll be right at home
with the Tensor API. If not, follow along!
"""
""" 1.1.1 Tensors creation
Tensors can be initialized in various ways:
- directly from data: The data type is automatically inferred.
- from a numpy array: np array to tensor and vice-versa is completely automatic
- from other tensor: The new tensor retains the properties (shape, data type) of the argument tensor,
unless explicitly overridden.
- from a shape and values:
"""
def tensor_creation():
data = [[1, 2], [3, 4]]
# 1. create tensor from data
tensor_from_data = torch.tensor(data)
print(f"tensor's shape: {tensor_from_data.size()}")
# 2. create tensor from np array
np_array = np.array(data)
tensor_from_np = torch.from_numpy(np_array)
# 3. create tensor from other tensor
# retains the properties of tensor_from_data
int_tensor = torch.ones_like(tensor_from_data)
print(f"Random Tensor has the same shape and type of tensor_from_data: \n {int_tensor} \n")
# retains the shape but overrides the datatype with float
float_tensor = torch.rand_like(tensor_from_np, dtype=torch.float)
print(f"Random Tensor has the same shape of tensor_from_np, but with float type : \n {float_tensor} \n")
# 4. create tensor with shape and values
shape = (2, 3,)
# use the shape, fill with random value
rand_tensor = torch.rand(shape)
print(f"Random Tensor: \n {rand_tensor} \n")
# use the shape, fill with 1
ones_tensor = torch.ones(shape)
print(f"Ones Tensor: \n {ones_tensor} \n")
# use the shape, fill with 0
zeros_tensor = torch.zeros(shape)
print(f"Zeros Tensor: \n {zeros_tensor}")
""" 1.1.2 Tensor attributs
Tensor has three attributes:
- shape
- datatype
- the device on which they are stored
"""
def tensor_attributes():
tensor = torch.rand(3, 4)
print(f"Shape of tensor: {tensor.shape}")
print(f"Datatype of tensor: {tensor.dtype}")
print(f"Device tensor is stored on: {tensor.device}")
"""1.1.3 Tensor Operations
PyTorch provides over 100 tensor operations, including:
- arithmetic
- linear algebra
- matrix manipulation (transposing, indexing, slicing)
- sampling
- Etc.
You can find more details here https://pytorch.org/docs/stable/torch.html
"""
def tensor_operations():
# All operations can be run on the GPU (at typically higher speeds than on a CPU).
# By default, tensors are created on the CPU. We need to explicitly move tensors to the GPU using .to method
# (after checking for GPU availability). Keep in mind that copying large tensors across devices can be expensive
# in terms of time and memory!
# We move our tensor to the GPU if available
tensor = torch.rand(3, 4)
if torch.cuda.is_available():
tensor = tensor.to('cuda')
else:
print("No GPU found")
# Tensor slicing
print("Full tensor content: \n", tensor)
print('First row: ', tensor[0])
print('First column: ', tensor[:, 0])
print('Last column:', tensor[..., -1])
# Change tensor value.
tensor[:, 1] = 0
print("Full tensor content, after value modification on column 2: \n", tensor)
# Joining tensors
# You can use torch.cat to concatenate a sequence of tensors along a given dimension. See also torch.stack,
# another tensor joining op that is subtly different from torch.cat.
concat_tensor = torch.cat([tensor, tensor, tensor], dim=1)
# shape is the field of object tensor, size() is the function that returns this field
print("Concat tensors shape: ", concat_tensor.shape)
print("Concat tensors result: \n", concat_tensor)
# arithmetic operations
# This computes the matrix multiplication between two tensors. y1, y2, y3 will have the same value
y1 = tensor @ tensor.T
y2 = tensor.matmul(tensor.T)
y3 = torch.rand_like(tensor)
torch.matmul(tensor, tensor.T, out=y3)
# single element tensor
# If you have a one-element tensor, for example by aggregating all values of a tensor into one value, you can
# convert it to a Python numerical value using item():
agg_tensor = tensor.sum()
agg_item = agg_tensor.item()
print(f"agg_tensor type: {type(agg_tensor)},agg_tensor value: {agg_tensor}")
print(f"agg_item type: {type(agg_item)},agg_item value: {agg_item}")
# In-place operations
# Operations that store the result into the operand are called in-place. They are denoted by a _ suffix.
# For example: x.copy_(y), x.t_(), will change x
print("Source tensor: \n", tensor, "\n")
# add 5 to all values in tensor
tensor.add_(5)
print("Result tensor after add_(5): \n", tensor)
# This computes the element-wise product. z1, z2, z3 will have the same value
z1 = tensor * tensor
z2 = tensor.mul(tensor)
z3 = torch.rand_like(tensor)
torch.mul(tensor, tensor, out=z3)
""" Bridge with NumPy
Tensors on the CPU and NumPy arrays can share their underlying memory locations, and changing one will change the other.
"""
def tensor_np_conversion():
# create a tensor and convert it to np array
tensor = torch.ones(5)
print(f"source tensor: {tensor}")
np_array = tensor.numpy()
print(f"convert to numpy array: {np_array}")
# change the tensor value reflects in the NumPy array.
tensor.add_(7)
print(f"tensor value after add_(7): {tensor}")
print(f"np array value after add_(7): {np_array}")
# create a np array and convert it to tensor
np_array1 = np.ones(3)
tensor1 = torch.from_numpy(np_array1)
print(f"source numpy array: {np_array1}")
print(f"convert to tensor: {tensor1}")
# change the np array value reflects on tensor
np.add(np_array1, 5, out=np_array1)
print(f"tensor value after np.add(5): {tensor1}")
print(f"np array value after np.add(5): {np_array1}")
def main():
# tensor_creation()
# tensor_attributes()
# tensor_operations()
tensor_np_conversion()
if __name__ == "__main__":
main()
| [
"torch.ones_like",
"torch.ones",
"torch.cat",
"numpy.ones",
"torch.mul",
"torch.rand_like",
"numpy.array",
"torch.cuda.is_available",
"torch.rand",
"torch.zeros",
"numpy.add",
"torch.matmul",
"torch.tensor",
"torch.from_numpy"
] | [((1880, 1898), 'torch.tensor', 'torch.tensor', (['data'], {}), '(data)\n', (1892, 1898), False, 'import torch\n'), ((2008, 2022), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2016, 2022), True, 'import numpy as np\n'), ((2044, 2070), 'torch.from_numpy', 'torch.from_numpy', (['np_array'], {}), '(np_array)\n', (2060, 2070), False, 'import torch\n'), ((2179, 2212), 'torch.ones_like', 'torch.ones_like', (['tensor_from_data'], {}), '(tensor_from_data)\n', (2194, 2212), False, 'import torch\n'), ((2391, 2441), 'torch.rand_like', 'torch.rand_like', (['tensor_from_np'], {'dtype': 'torch.float'}), '(tensor_from_np, dtype=torch.float)\n', (2406, 2441), False, 'import torch\n'), ((2679, 2696), 'torch.rand', 'torch.rand', (['shape'], {}), '(shape)\n', (2689, 2696), False, 'import torch\n'), ((2797, 2814), 'torch.ones', 'torch.ones', (['shape'], {}), '(shape)\n', (2807, 2814), False, 'import torch\n'), ((2914, 2932), 'torch.zeros', 'torch.zeros', (['shape'], {}), '(shape)\n', (2925, 2932), False, 'import torch\n'), ((3138, 3154), 'torch.rand', 'torch.rand', (['(3)', '(4)'], {}), '(3, 4)\n', (3148, 3154), False, 'import torch\n'), ((4018, 4034), 'torch.rand', 'torch.rand', (['(3)', '(4)'], {}), '(3, 4)\n', (4028, 4034), False, 'import torch\n'), ((4042, 4067), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4065, 4067), False, 'import torch\n'), ((4691, 4733), 'torch.cat', 'torch.cat', (['[tensor, tensor, tensor]'], {'dim': '(1)'}), '([tensor, tensor, tensor], dim=1)\n', (4700, 4733), False, 'import torch\n'), ((5136, 5159), 'torch.rand_like', 'torch.rand_like', (['tensor'], {}), '(tensor)\n', (5151, 5159), False, 'import torch\n'), ((5164, 5202), 'torch.matmul', 'torch.matmul', (['tensor', 'tensor.T'], {'out': 'y3'}), '(tensor, tensor.T, out=y3)\n', (5176, 5202), False, 'import torch\n'), ((6109, 6132), 'torch.rand_like', 'torch.rand_like', (['tensor'], {}), '(tensor)\n', (6124, 6132), False, 'import torch\n'), ((6137, 6170), 'torch.mul', 'torch.mul', (['tensor', 'tensor'], {'out': 'z3'}), '(tensor, tensor, out=z3)\n', (6146, 6170), False, 'import torch\n'), ((6412, 6425), 'torch.ones', 'torch.ones', (['(5)'], {}), '(5)\n', (6422, 6425), False, 'import torch\n'), ((6794, 6804), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (6801, 6804), True, 'import numpy as np\n'), ((6819, 6846), 'torch.from_numpy', 'torch.from_numpy', (['np_array1'], {}), '(np_array1)\n', (6835, 6846), False, 'import torch\n'), ((6992, 7027), 'numpy.add', 'np.add', (['np_array1', '(5)'], {'out': 'np_array1'}), '(np_array1, 5, out=np_array1)\n', (6998, 7027), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function
from builtins import (open, str, range,
object)
from bokeh.layouts import row, widgetbox, gridplot
from bokeh.models import CustomJS, Slider, HoverTool, ColorBar, LinearColorMapper, LabelSet, ColumnDataSource,Whisker
from bokeh.embed import components
from bokeh.plotting import figure
from matplotlib import pylab as plt
import numpy as np
class DataPlot(object):
def __init__(self):
self.fig = plt.figure(figsize=(8, 6))
self.ax = self.fig.subplots(1, 1)
def add_data_plot(self,
x,
y,
dx=None,
dy=None,
label=None,
color=None,
fmt='o',
dataformat=None,
ms=None,
mew=None,
loglog=True,
grid=False):
# get x,y,dx,dy from SEDdata
if dx is None:
dx = np.zeros(len(x))
else:
dx=np.fabs(x-dx)
if dy is None:
dy = np.zeros(len(y))
else:
dy=np.fabs(y-dy)
# set color
#if color is None:
# color = self.counter
ul=None
if dataformat is not None:
ul = dataformat == 'ul'
dy = y * 0.2
line = self.ax.errorbar(x, y, xerr=dx, yerr=dy, fmt=fmt, label=label, ms=ms, mew=mew,uplims=ul)
if loglog is True:
self.ax.set_xscale("log", nonposx='clip')
self.ax.set_yscale("log", nonposy='clip')
if grid is True:
self.ax.grid()
self.ax.legend()
def add_sed(self,sed_table,label=None,color=None):
if label is None:
label=sed_table.meta['Source']
self.add_data_plot(x=sed_table['en'],
y=sed_table['nufnu'],
dx=[sed_table['en_wlo'], sed_table['en_wup']],
dy=[sed_table['nufnu_elo'], sed_table['nufnu_eup']],
dataformat=sed_table['dataformat'],
label=label,
color=color)
self.ax.set_ylabel(sed_table['nufnu'].unit)
self.ax.set_xlabel(sed_table['en'].unit)
#self.ax.set_ylim(5E-14, 1E-9)
#self.ax.grid()
def add_lc(self,lc_table):
self.add_data_plot(x=lc_table['tstart'],
y=lc_table['nufnu'],
dy=[lc_table['nufnu_elo'],lc_table['nufnu_eup']],
label=lc_table.meta['Source'],
loglog=False)
self.ax.set_ylabel(lc_table['nufnu'].unit)
self.ax.set_xlabel(lc_table['tstart'].unit)
#self.ax.grid()
#self.ax.legend()
class ScatterPlot(object):
def __init__(self,w,h,x_label=None,y_label=None,x_range=None,y_range=None,title=None,y_axis_type='linear',x_axis_type='linear'):
hover = HoverTool(tooltips=[("x", "$x"), ("y", "$y")])
self.fig = figure(title=title, width=w, height=h,x_range=x_range,y_range=y_range,
y_axis_type=y_axis_type,
x_axis_type=x_axis_type,
tools=[hover, 'pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair'])
if x_label is not None:
self.fig.xaxis.axis_label = x_label
if y_label is not None:
self.fig.yaxis.axis_label = y_label
self.fig.add_tools(hover)
def add_errorbar(self, x, y, xerr=None, yerr=None,dataformat=None, color='red',
point_kwargs={}, error_kwargs={}):
self.fig.circle(x, y, color=color, **point_kwargs)
#print(xerr.shape)
if xerr is not None:
x_err_x = []
x_err_y = []
for px, py, errm,errp in zip(x, y, xerr[0],xerr[1]):
x_err_x.append((px - errm, px + errp))
x_err_y.append((py, py))
self.fig.multi_line(x_err_x, x_err_y, color=color, **error_kwargs)
if yerr is not None:
ul = None
if dataformat is not None:
ul = dataformat == 'ul'
yerr[0][ul] = y[ul]*0.5
yerr[1][ul] = 0
y_err_x = []
y_err_y = []
#print(yerr)
for px, py, errm,errp in zip(x, y, yerr[0], yerr[1]):
#print(py - errm, py + errp)
y_err_x.append((px, px))
y_err_y.append((py - errm, py + errp))
self.fig.multi_line(y_err_x, y_err_y, color=color, **error_kwargs)
#self.fig.add_layout(Whisker(source=yerr, base="base", upper="upper", lower="lower", line_color='red'))
def add_step_line(self,x,y,legend=None):
#print('a')
self.fig.step(x,y,name=legend, mode="center")
#print('b')
def add_line(self,x,y,legend=None,color=None):
self.fig.line(x,y,legend=legend,line_color=color)
def get_html_draw(self):
self.fig.sizing_mode = 'scale_width'
layout = row(
self.fig
)
layout.sizing_mode = 'scale_width'
return components(layout)
| [
"bokeh.plotting.figure",
"matplotlib.pylab.figure",
"numpy.fabs",
"bokeh.models.HoverTool",
"bokeh.embed.components",
"bokeh.layouts.row"
] | [((506, 532), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (516, 532), True, 'from matplotlib import pylab as plt\n'), ((3073, 3119), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('x', '$x'), ('y', '$y')]"}), "(tooltips=[('x', '$x'), ('y', '$y')])\n", (3082, 3119), False, 'from bokeh.models import CustomJS, Slider, HoverTool, ColorBar, LinearColorMapper, LabelSet, ColumnDataSource, Whisker\n'), ((3140, 3344), 'bokeh.plotting.figure', 'figure', ([], {'title': 'title', 'width': 'w', 'height': 'h', 'x_range': 'x_range', 'y_range': 'y_range', 'y_axis_type': 'y_axis_type', 'x_axis_type': 'x_axis_type', 'tools': "[hover, 'pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair']"}), "(title=title, width=w, height=h, x_range=x_range, y_range=y_range,\n y_axis_type=y_axis_type, x_axis_type=x_axis_type, tools=[hover,\n 'pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair'])\n", (3146, 3344), False, 'from bokeh.plotting import figure\n'), ((5173, 5186), 'bokeh.layouts.row', 'row', (['self.fig'], {}), '(self.fig)\n', (5176, 5186), False, 'from bokeh.layouts import row, widgetbox, gridplot\n'), ((5269, 5287), 'bokeh.embed.components', 'components', (['layout'], {}), '(layout)\n', (5279, 5287), False, 'from bokeh.embed import components\n'), ((1113, 1128), 'numpy.fabs', 'np.fabs', (['(x - dx)'], {}), '(x - dx)\n', (1120, 1128), True, 'import numpy as np\n'), ((1214, 1229), 'numpy.fabs', 'np.fabs', (['(y - dy)'], {}), '(y - dy)\n', (1221, 1229), True, 'import numpy as np\n')] |
import numpy as np
from pytest import approx
from mne_phase.utils import rayleigh_test
def test_basic():
input = np.array([0.02058449, 0.96990985, 0.83244264, 0.21233911])
EXPECTED = 0.02172542636470802
assert rayleigh_test(input) == approx(EXPECTED)
| [
"pytest.approx",
"numpy.array",
"mne_phase.utils.rayleigh_test"
] | [((120, 178), 'numpy.array', 'np.array', (['[0.02058449, 0.96990985, 0.83244264, 0.21233911]'], {}), '([0.02058449, 0.96990985, 0.83244264, 0.21233911])\n', (128, 178), True, 'import numpy as np\n'), ((225, 245), 'mne_phase.utils.rayleigh_test', 'rayleigh_test', (['input'], {}), '(input)\n', (238, 245), False, 'from mne_phase.utils import rayleigh_test\n'), ((249, 265), 'pytest.approx', 'approx', (['EXPECTED'], {}), '(EXPECTED)\n', (255, 265), False, 'from pytest import approx\n')] |
import numpy as np
import astropy.units as au
def evolution_timescale_scattering_rate(rho_s, v_rms, cross_section, rescale=1.):
"""
Evaluates the timescale for the evolution of SIDM profiles using the scattering rate
average proportional to
<sigma(v) v>
given by Equation 4 in this paper https://arxiv.org/pdf/2102.09580.pdf
with an additional factor of 3
:param rho_s: the central density normalization of the collisionless NFW profile of the same mass
:param v_rms: the velocity dispersion of the halo
:param cross_section: an instance of the cross section model
:return: the characteristic timescale for structural evolution in Gyr
"""
scattering_rate_cross_section = cross_section.scattering_rate_cross_section(v_rms)
rho_s *= au.solMass / au.kpc ** 3
scattering_rate_cross_section *= au.cm ** 2 / au.g * au.km / au.s
rate = 3 * rho_s * scattering_rate_cross_section
time = 1 / rate
time_Gyr = time.to(au.Gyr)
return rescale * time_Gyr.value
def evolution_timescale_NFW(rho_s, rs, cross_section_amplitude):
"""
Evaluates the timescale for the evolution of SIDM profiles given after Equation 2
of this paper https://arxiv.org/pdf/1901.00499.pdf
:param rho_s: the central density normalization of the collisionless NFW profile of the same mass
:param rs: the scale radius of the collisionless NFW profile of the same mass
:param momentum_averaged_cross_section: the scattering cross section in cm^2/gram weighted by v^2:
<sigma(v) v^2>
:return: the time in Gyr from the collapse time of a halo until when it should start to core collapse
"""
G = 4.3e-6
a = 4 / np.sqrt(np.pi)
v0 = np.sqrt(4 * np.pi * G * rho_s * rs ** 2)
const = 2.136e-19 # to year^{-1}
t_inverse = a * const * v0 * rho_s * cross_section_amplitude
return 1e-9 / t_inverse
| [
"numpy.sqrt"
] | [((1713, 1753), 'numpy.sqrt', 'np.sqrt', (['(4 * np.pi * G * rho_s * rs ** 2)'], {}), '(4 * np.pi * G * rho_s * rs ** 2)\n', (1720, 1753), True, 'import numpy as np\n'), ((1689, 1703), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (1696, 1703), True, 'import numpy as np\n')] |
# An annotation is a coordinate in a 4D array. Worldlines are lists of
# annotations.
#
# author: <EMAIL>
import base64
import colorsys
from dataclasses import dataclass, asdict, field
from functools import lru_cache, reduce
import json
from pathlib import Path
import uuid
from typing import Tuple, Optional
import h5py
import numpy as np
import pandas as pd
from .dataclass_helpers import DataclassTableBase
def base64str_from_uint32(x: np.uint32):
return base64.standard_b64encode(x.tobytes())[:6]
def get_random_base64_id():
idu32 = np.uint32(uuid.uuid1().time_low)
return base64str_from_uint32(idu32)
_S4 = np.dtype("S4")
_S7 = np.dtype("S7")
@dataclass
class Annotation:
id: np.uint32 = 0 # 0 is unassigned. Get an id after inserting into table.
t_idx: np.uint32 = 0 # in [0, shape_t)
x: np.float32 = 0.5 # in (0, 1)
y: np.float32 = 0.5 # in (0, 1)
z: np.float32 = 0.5 # in (0, 1)
worldline_id: np.uint32 = 0
parent_id: np.uint32 = 0 # =0 is null/none
provenance: _S4 = b"NULL" # unknown
def __post_init__(self):
self.id = np.uint32(self.id)
self.t_idx = np.uint32(self.t_idx)
self.x = np.float32(self.x)
self.y = np.float32(self.y)
self.z = np.float32(self.z)
self.worldline_id = np.uint32(self.worldline_id)
self.parent_id = np.uint32(self.parent_id)
self.provenance = np.string_(self.provenance)
# if self.id == b"":
# self.id = get_random_base64_id()
def to_tuple(self) -> tuple:
return (self.id, self.t_idx, self.x, self.y, self.z, self.worldline_id,
self.parent_id, self.provenance)
def to_dict(self) -> dict:
return asdict(self)
def to_jsonable_dict(self) -> dict:
jsonable_dict = {}
for k, v in self.to_dict().items():
if type(v) is bytes or type(v) is np.string_:
v = v.decode()
if type(v) is np.uint32:
v = int(v)
if type(v) is np.float32:
v = str(v)
jsonable_dict[k] = v
return jsonable_dict
class AnnotationTable(DataclassTableBase):
row_class = Annotation
def get_t(self, t_idx: int):
return self.filter(lambda x: x["t_idx"] == t_idx)
def to_jsonable_dict(self) -> dict:
jsonable_dict = {}
for annotation in self:
jsonable_dict[str(annotation.id)] = annotation.to_jsonable_dict()
return jsonable_dict
@staticmethod
def from_hdf(filename: Path):
filename = Path(filename)
f = h5py.File(filename, "r")
data = pd.DataFrame()
# Handle 'correctly' shaped data from Pythons H5 library.
if len(f["id"].shape) == 1:
for k in f:
data[k] = f[k]
return AnnotationTable(data)
# Matlab cannot make 1D arrays, and it cannot save char arrays.
# Handle both of these here
else:
for k in f:
vals = np.squeeze(f[k])
if k == "provenance":
vals = np.array(
list(map(lambda x: x.tobytes(), vals)))
data[k] = vals
return AnnotationTable(data)
@dataclass
class Worldline:
id: np.uint32 = 0
name: np.string_ = b"null"
color: _S7 = b"#ffffff"
def __post_init__(self):
self.name = np.string_(self.name)
self.color = np.string_(self.color)
def to_dict(self) -> dict:
return asdict(self)
def to_jsonable_dict(self):
jsonable_dict = {}
for k, v in self.to_dict().items():
if type(v) is bytes or type(v) is np.string_:
v = v.decode()
if type(v) is np.uint32:
v = int(v)
if type(v) is np.float32:
v = str(v)
jsonable_dict[k] = v
return jsonable_dict
class WorldlineTable(DataclassTableBase):
row_class = Worldline
def to_jsonable_dict(self) -> dict:
jsonable_dict = {}
for worldline in self:
jsonable_dict[str(worldline.id)] = worldline.to_jsonable_dict()
return jsonable_dict
@staticmethod
def from_annotations(annotations: AnnotationTable):
worldlines = WorldlineTable()
worldlines._insert_and_preserve_id(Worldline(id=0))
worldline_ids = np.unique(annotations.df["worldline_id"])
for id in worldline_ids:
worldlines.insert(Worldline(name=str(id)))
return worldlines
@lru_cache()
def get_all_neuron_data() -> dict:
file_path = Path(__file__).parent.absolute()
neuron_list_file = file_path / "./neurons_celegans.json"
print(file_path)
with open(neuron_list_file) as fp:
data = json.load(fp)
return data
def get_neuron_data(x: str) -> dict:
return get_all_neuron_data()["neurons"][x]
def cleanup_worldlines(A: AnnotationTable, W: WorldlineTable
) -> Tuple[AnnotationTable, WorldlineTable]:
"""Sequentially renumber all annotations and worldlines, and remake the
worldline table."""
new_ids = range(1, len(A) + 1)
old_ids = A.df.id
update_aid = {old_ids[i]: new_ids[i] for i in range(len(new_ids))}
update_aid[0] = 0
A.df.id = A.df.id.apply(lambda x: update_aid[x])
A.df.parent_id = A.df.parent_id.apply(lambda x: update_aid[x])
used_W = np.unique(A.df.worldline_id)
N = len(used_W)
new_ids = range(1, N + 1)
update_wid = {used_W[i]: new_ids[i] for i in range(N)}
update_wid[0] = 0
A.df.worldline_id = A.df.worldline_id.apply(lambda x: update_wid)
W_new = WorldlineTable()
W_new._insert_and_preserve_id(Worldline())
for i in range(N):
if used_W[i] in W.df.id:
wline = W.get(used_W[i])
wline.id = new_ids[i]
else:
wline = Worldline(id=new_ids[i])
W_new._insert_and_preserve_id(wline)
return (A, W_new)
def color_worldlines(W: WorldlineTable) -> None:
"""Overwrite the color of all worldlines using evenly spaced hues and
random lightness / saturation, both high."""
def _get_N_colors(N):
colors = []
for i in range(N):
h = ((i * 157) % 360) / 360.
l = (60 + 10 * np.random.rand()) / 100.
s = (90 + 10 * np.random.rand()) / 100.
rgb = colorsys.hls_to_rgb(h, l, s)
rgb_256 = map(lambda x: max(0, min(int(x * 255), 255)), rgb)
rgb_code = "#{0:02x}{1:02x}{2:02x}".format(*rgb_256)
colors.append(rgb_code)
return colors
colors = _get_N_colors(len(W))
W.df.color = colors
def load_annotations(dataset: Optional[Path] = None
) -> Tuple[AnnotationTable, WorldlineTable]:
if dataset is None:
dataset = Path(".")
annotation_file = dataset / "annotations.h5"
if annotation_file.exists():
annotations = AnnotationTable.from_hdf(annotation_file)
else:
annotations = AnnotationTable()
worldline_file = dataset / "worldlines.h5"
if worldline_file.exists():
worldlines = WorldlineTable.from_hdf(worldline_file)
else:
worldlines = WorldlineTable.from_annotations(annotations)
return (annotations, worldlines)
def save_annotations(annotations: AnnotationTable,
worldlines: WorldlineTable,
dataset: Path = None) -> None:
if dataset is None:
dataset = Path(".")
annotations.to_hdf(dataset / "annotations.h5")
worldlines.to_hdf(dataset / "worldlines.h5")
def stash_annotations(annotations: AnnotationTable,
worldlines: WorldlineTable,
dataset: Path = None) -> None:
if dataset is None:
dataset = Path(".")
annotations.to_hdf(dataset / "annotations_unsaved.h5")
worldlines.to_hdf(dataset / "worldlines_unsaved.h5") | [
"numpy.uint32",
"pandas.DataFrame",
"h5py.File",
"json.load",
"numpy.random.rand",
"numpy.float32",
"numpy.dtype",
"pathlib.Path",
"uuid.uuid1",
"colorsys.hls_to_rgb",
"numpy.string_",
"numpy.squeeze",
"functools.lru_cache",
"dataclasses.asdict",
"numpy.unique"
] | [((633, 647), 'numpy.dtype', 'np.dtype', (['"""S4"""'], {}), "('S4')\n", (641, 647), True, 'import numpy as np\n'), ((654, 668), 'numpy.dtype', 'np.dtype', (['"""S7"""'], {}), "('S7')\n", (662, 668), True, 'import numpy as np\n'), ((4569, 4580), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (4578, 4580), False, 'from functools import lru_cache, reduce\n'), ((5435, 5463), 'numpy.unique', 'np.unique', (['A.df.worldline_id'], {}), '(A.df.worldline_id)\n', (5444, 5463), True, 'import numpy as np\n'), ((1105, 1123), 'numpy.uint32', 'np.uint32', (['self.id'], {}), '(self.id)\n', (1114, 1123), True, 'import numpy as np\n'), ((1145, 1166), 'numpy.uint32', 'np.uint32', (['self.t_idx'], {}), '(self.t_idx)\n', (1154, 1166), True, 'import numpy as np\n'), ((1184, 1202), 'numpy.float32', 'np.float32', (['self.x'], {}), '(self.x)\n', (1194, 1202), True, 'import numpy as np\n'), ((1220, 1238), 'numpy.float32', 'np.float32', (['self.y'], {}), '(self.y)\n', (1230, 1238), True, 'import numpy as np\n'), ((1256, 1274), 'numpy.float32', 'np.float32', (['self.z'], {}), '(self.z)\n', (1266, 1274), True, 'import numpy as np\n'), ((1303, 1331), 'numpy.uint32', 'np.uint32', (['self.worldline_id'], {}), '(self.worldline_id)\n', (1312, 1331), True, 'import numpy as np\n'), ((1357, 1382), 'numpy.uint32', 'np.uint32', (['self.parent_id'], {}), '(self.parent_id)\n', (1366, 1382), True, 'import numpy as np\n'), ((1409, 1436), 'numpy.string_', 'np.string_', (['self.provenance'], {}), '(self.provenance)\n', (1419, 1436), True, 'import numpy as np\n'), ((1724, 1736), 'dataclasses.asdict', 'asdict', (['self'], {}), '(self)\n', (1730, 1736), False, 'from dataclasses import dataclass, asdict, field\n'), ((2577, 2591), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (2581, 2591), False, 'from pathlib import Path\n'), ((2605, 2629), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (2614, 2629), False, 'import h5py\n'), ((2645, 2659), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2657, 2659), True, 'import pandas as pd\n'), ((3422, 3443), 'numpy.string_', 'np.string_', (['self.name'], {}), '(self.name)\n', (3432, 3443), True, 'import numpy as np\n'), ((3465, 3487), 'numpy.string_', 'np.string_', (['self.color'], {}), '(self.color)\n', (3475, 3487), True, 'import numpy as np\n'), ((3535, 3547), 'dataclasses.asdict', 'asdict', (['self'], {}), '(self)\n', (3541, 3547), False, 'from dataclasses import dataclass, asdict, field\n'), ((4409, 4450), 'numpy.unique', 'np.unique', (["annotations.df['worldline_id']"], {}), "(annotations.df['worldline_id'])\n", (4418, 4450), True, 'import numpy as np\n'), ((4802, 4815), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (4811, 4815), False, 'import json\n'), ((6858, 6867), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (6862, 6867), False, 'from pathlib import Path\n'), ((7518, 7527), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (7522, 7527), False, 'from pathlib import Path\n'), ((7829, 7838), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (7833, 7838), False, 'from pathlib import Path\n'), ((562, 574), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (572, 574), False, 'import uuid\n'), ((6410, 6438), 'colorsys.hls_to_rgb', 'colorsys.hls_to_rgb', (['h', 'l', 's'], {}), '(h, l, s)\n', (6429, 6438), False, 'import colorsys\n'), ((3031, 3047), 'numpy.squeeze', 'np.squeeze', (['f[k]'], {}), '(f[k])\n', (3041, 3047), True, 'import numpy as np\n'), ((4633, 4647), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (4637, 4647), False, 'from pathlib import Path\n'), ((6315, 6331), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6329, 6331), True, 'import numpy as np\n'), ((6367, 6383), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6381, 6383), True, 'import numpy as np\n')] |
from os import name
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Model
from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation
import numpy as np
from tensorflow.python.keras.backend import shape
from utils.ssd_utils import get_pred_4, get_pred_6
# The SSD model Architecture
def ssd_model():
#input shape is assumed to be 300x300x3
input = Input(shape=[300, 300, 3])
scale = np.linspace(0.2, 0.9, 6)
#Conv1
conv1_1 = Conv2D(64, (3,3), padding='same', name='Conv1_1')(input)
conv1_2 = Conv2D(64, (3,3), padding='same', name='Conv1_2')(conv1_1)
#Pool1
pool_1 = MaxPool2D(strides=(2, 2), padding='same', name='Pool1')(conv1_2)
#Conv2
conv2_1 = Conv2D(128, (3,3), padding='same', name='Conv2_1')(pool_1)
conv2_2 = Conv2D(128, (3,3), padding='same', name='Conv2_2')(conv2_1)
#Pool2
pool_2 = MaxPool2D(strides=(2, 2), padding='same', name='Pool2')(conv2_2)
#Conv3
conv3_1 = Conv2D(256, (3,3), padding='same', name='Conv3_1')(pool_2)
conv3_2 = Conv2D(256, (3,3), padding='same', name='Conv3_2')(conv3_1)
conv3_3 = Conv2D(256, (3,3), padding='same', name='Conv3_3')(conv3_2)
#Pool3
pool_3 = MaxPool2D(strides=(2, 2), padding='same', name='Pool3')(conv3_3)
#Conv4
conv4_1 = Conv2D(512, (3,3), padding='same', name='Conv4_1')(pool_3)
conv4_2 = Conv2D(512, (3,3), padding='same', name='Conv4_2')(conv4_1)
conv4_3 = Conv2D(512, (3,3), padding='same', name='Conv4_3')(conv4_2)
# In SSD paper, conv4_3 is required to be normalised
'''Have a look here'''
# conv4_3_norm = L2Normalization(gamma_init=20, name="Conv4_3_norm")(conv4_3)
conv4_3_norm = conv4_3
#Pool4
pool_4 = MaxPool2D(strides=(2, 2), padding='same', name='Pool4')(conv4_3)
#Conv5
conv5_1 = Conv2D(512, (3,3), padding='same', name='Conv5_1')(pool_4)
conv5_2 = Conv2D(512, (3,3), padding='same', name='Conv5_2')(conv5_1)
conv5_3 = Conv2D(512, (3,3), padding='same', name='Conv5_3')(conv5_2)
''' This was the base network (VGG_16), further now we will add layers to extract various feature maps'''
pool_5 = MaxPool2D(pool_size=(3, 3), strides=(1, 1), padding="same",name="Pool5")(conv5_3)
fc_6 = Conv2D(1024, (3,3), padding='same', name='fc6', dilation_rate=(6,6))(pool_5)
fc_7 = Conv2D(1024, (1,1), padding='same', name='fc7')(fc_6)
conv8_1 = Conv2D(256, (1,1), padding='same', name='Conv8_1')(fc_7)
conv8_2 = Conv2D(512, (3,3),strides=(2, 2), padding='same', name='Conv8_2')(conv8_1)
conv9_1 = Conv2D(128, (1,1), padding='same', name='Conv9_1')(conv8_2)
conv9_2 = Conv2D(256, (3,3),strides=(2, 2), padding='same', name='Conv9_2')(conv9_1)
conv10_1 = Conv2D(128, (1,1), padding='valid', name='Conv10_1')(conv9_2)
conv10_2 = Conv2D(256, (3,3), padding='valid', name='Conv10_2')(conv10_1)
conv11_1 = Conv2D(128, (1,1), padding='valid', name='Conv11_1')(conv10_2)
conv11_2 = Conv2D(256, (3,3), padding='valid', name='Conv11_2')(conv11_1)
# To get the scales for different feature maps(here 6)
# Min scale is 0.2, and max scale is 0.9
conf_layers = []
loc_layers = []
def_layers = []
conv4_3_norm_conf, conv4_3_norm_loc, conv4_3_norm_def = get_pred_4(conv4_3_norm, scale[0], scale[1])
conf_layers.append(conv4_3_norm_conf)
loc_layers.append(conv4_3_norm_loc)
def_layers.append(conv4_3_norm_def)
fc_7_conf, fc_7_loc, fc_7_def = get_pred_6(fc_7, scale[1], scale[2])
conf_layers.append(fc_7_conf)
loc_layers.append(fc_7_loc)
def_layers.append(fc_7_def)
conv8_2_conf, conv8_2_loc, conv8_2_def = get_pred_6(conv8_2, scale[2], scale[3])
conf_layers.append(conv8_2_conf)
loc_layers.append(conv8_2_loc)
def_layers.append(conv8_2_def)
conv9_2_conf, conv9_2_loc, conv9_2_def = get_pred_6(conv9_2, scale[3], scale[4])
conf_layers.append(conv9_2_conf)
loc_layers.append(conv9_2_loc)
def_layers.append(conv9_2_def)
conv10_2_conf, conv10_2_loc, conv10_2_def = get_pred_4(conv10_2, scale[4], scale[5])
conf_layers.append(conv10_2_conf)
loc_layers.append(conv10_2_loc)
def_layers.append(conv10_2_def)
conv11_2_conf, conv11_2_loc, conv11_2_def = get_pred_4(conv11_2, scale[5], 1.0)
conf_layers.append(conv11_2_conf)
loc_layers.append(conv11_2_loc)
def_layers.append(conv11_2_def)
conf = Concatenate(axis=-2)(conf_layers)
conf_act = Activation('softmax')(conf)
loc = Concatenate(axis=-2)(loc_layers)
defau = Concatenate(axis=-2)(def_layers)
predictions = Concatenate(axis=-1, name='Predictions')([conf_act, loc, defau])
return Model(inputs=input, outputs=predictions) | [
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.keras.layers.Input",
"numpy.linspace",
"utils.ssd_utils.get_pred_4",
"utils.ssd_utils.get_pred_6"
] | [((431, 457), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '[300, 300, 3]'}), '(shape=[300, 300, 3])\n', (436, 457), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((471, 495), 'numpy.linspace', 'np.linspace', (['(0.2)', '(0.9)', '(6)'], {}), '(0.2, 0.9, 6)\n', (482, 495), True, 'import numpy as np\n'), ((3292, 3336), 'utils.ssd_utils.get_pred_4', 'get_pred_4', (['conv4_3_norm', 'scale[0]', 'scale[1]'], {}), '(conv4_3_norm, scale[0], scale[1])\n', (3302, 3336), False, 'from utils.ssd_utils import get_pred_4, get_pred_6\n'), ((3496, 3532), 'utils.ssd_utils.get_pred_6', 'get_pred_6', (['fc_7', 'scale[1]', 'scale[2]'], {}), '(fc_7, scale[1], scale[2])\n', (3506, 3532), False, 'from utils.ssd_utils import get_pred_4, get_pred_6\n'), ((3677, 3716), 'utils.ssd_utils.get_pred_6', 'get_pred_6', (['conv8_2', 'scale[2]', 'scale[3]'], {}), '(conv8_2, scale[2], scale[3])\n', (3687, 3716), False, 'from utils.ssd_utils import get_pred_4, get_pred_6\n'), ((3870, 3909), 'utils.ssd_utils.get_pred_6', 'get_pred_6', (['conv9_2', 'scale[3]', 'scale[4]'], {}), '(conv9_2, scale[3], scale[4])\n', (3880, 3909), False, 'from utils.ssd_utils import get_pred_4, get_pred_6\n'), ((4066, 4106), 'utils.ssd_utils.get_pred_4', 'get_pred_4', (['conv10_2', 'scale[4]', 'scale[5]'], {}), '(conv10_2, scale[4], scale[5])\n', (4076, 4106), False, 'from utils.ssd_utils import get_pred_4, get_pred_6\n'), ((4266, 4301), 'utils.ssd_utils.get_pred_4', 'get_pred_4', (['conv11_2', 'scale[5]', '(1.0)'], {}), '(conv11_2, scale[5], 1.0)\n', (4276, 4301), False, 'from utils.ssd_utils import get_pred_4, get_pred_6\n'), ((4685, 4725), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'input', 'outputs': 'predictions'}), '(inputs=input, outputs=predictions)\n', (4690, 4725), False, 'from tensorflow.keras import Model\n'), ((522, 572), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""Conv1_1"""'}), "(64, (3, 3), padding='same', name='Conv1_1')\n", (528, 572), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((593, 643), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""Conv1_2"""'}), "(64, (3, 3), padding='same', name='Conv1_2')\n", (599, 643), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((677, 732), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'strides': '(2, 2)', 'padding': '"""same"""', 'name': '"""Pool1"""'}), "(strides=(2, 2), padding='same', name='Pool1')\n", (686, 732), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((768, 819), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""Conv2_1"""'}), "(128, (3, 3), padding='same', name='Conv2_1')\n", (774, 819), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((841, 892), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""Conv2_2"""'}), "(128, (3, 3), padding='same', name='Conv2_2')\n", (847, 892), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((926, 981), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'strides': '(2, 2)', 'padding': '"""same"""', 'name': '"""Pool2"""'}), "(strides=(2, 2), padding='same', name='Pool2')\n", (935, 981), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((1017, 1068), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""Conv3_1"""'}), "(256, (3, 3), padding='same', name='Conv3_1')\n", (1023, 1068), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((1090, 1141), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""Conv3_2"""'}), "(256, (3, 3), padding='same', name='Conv3_2')\n", (1096, 1141), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((1164, 1215), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""Conv3_3"""'}), "(256, (3, 3), padding='same', name='Conv3_3')\n", (1170, 1215), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((1249, 1304), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'strides': '(2, 2)', 'padding': '"""same"""', 'name': '"""Pool3"""'}), "(strides=(2, 2), padding='same', name='Pool3')\n", (1258, 1304), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((1340, 1391), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""Conv4_1"""'}), "(512, (3, 3), padding='same', name='Conv4_1')\n", (1346, 1391), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((1413, 1464), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""Conv4_2"""'}), "(512, (3, 3), padding='same', name='Conv4_2')\n", (1419, 1464), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((1487, 1538), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""Conv4_3"""'}), "(512, (3, 3), padding='same', name='Conv4_3')\n", (1493, 1538), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((1767, 1822), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'strides': '(2, 2)', 'padding': '"""same"""', 'name': '"""Pool4"""'}), "(strides=(2, 2), padding='same', name='Pool4')\n", (1776, 1822), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((1858, 1909), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""Conv5_1"""'}), "(512, (3, 3), padding='same', name='Conv5_1')\n", (1864, 1909), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((1931, 1982), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""Conv5_2"""'}), "(512, (3, 3), padding='same', name='Conv5_2')\n", (1937, 1982), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((2005, 2056), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""Conv5_3"""'}), "(512, (3, 3), padding='same', name='Conv5_3')\n", (2011, 2056), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((2189, 2262), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""Pool5"""'}), "(pool_size=(3, 3), strides=(1, 1), padding='same', name='Pool5')\n", (2198, 2262), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((2283, 2353), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(1024)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""fc6"""', 'dilation_rate': '(6, 6)'}), "(1024, (3, 3), padding='same', name='fc6', dilation_rate=(6, 6))\n", (2289, 2353), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((2371, 2419), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(1024)', '(1, 1)'], {'padding': '"""same"""', 'name': '"""fc7"""'}), "(1024, (1, 1), padding='same', name='fc7')\n", (2377, 2419), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((2440, 2491), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(1, 1)'], {'padding': '"""same"""', 'name': '"""Conv8_1"""'}), "(256, (1, 1), padding='same', name='Conv8_1')\n", (2446, 2491), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((2511, 2578), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'name': '"""Conv8_2"""'}), "(512, (3, 3), strides=(2, 2), padding='same', name='Conv8_2')\n", (2517, 2578), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((2601, 2652), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(1, 1)'], {'padding': '"""same"""', 'name': '"""Conv9_1"""'}), "(128, (1, 1), padding='same', name='Conv9_1')\n", (2607, 2652), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((2675, 2742), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'name': '"""Conv9_2"""'}), "(256, (3, 3), strides=(2, 2), padding='same', name='Conv9_2')\n", (2681, 2742), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((2766, 2819), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(1, 1)'], {'padding': '"""valid"""', 'name': '"""Conv10_1"""'}), "(128, (1, 1), padding='valid', name='Conv10_1')\n", (2772, 2819), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((2843, 2896), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""valid"""', 'name': '"""Conv10_2"""'}), "(256, (3, 3), padding='valid', name='Conv10_2')\n", (2849, 2896), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((2922, 2975), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(1, 1)'], {'padding': '"""valid"""', 'name': '"""Conv11_1"""'}), "(128, (1, 1), padding='valid', name='Conv11_1')\n", (2928, 2975), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((3000, 3053), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""valid"""', 'name': '"""Conv11_2"""'}), "(256, (3, 3), padding='valid', name='Conv11_2')\n", (3006, 3053), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((4424, 4444), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-2)'}), '(axis=-2)\n', (4435, 4444), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((4473, 4494), 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (4483, 4494), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((4511, 4531), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-2)'}), '(axis=-2)\n', (4522, 4531), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((4556, 4576), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-2)'}), '(axis=-2)\n', (4567, 4576), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n'), ((4608, 4648), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)', 'name': '"""Predictions"""'}), "(axis=-1, name='Predictions')\n", (4619, 4648), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Input, Concatenate, Reshape, Activation\n')] |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import skbio
import numpy as np
import pandas as pd
from q2_diversity import procrustes_analysis
class PCoATests(unittest.TestCase):
def setUp(self):
axes = ['PC1', 'PC2', 'PC3', 'PC4', 'PC5', 'PC6']
eigvals = pd.Series(np.array([1.5, 0.75, 0.3, 0.15, 0.15, 0.15]),
index=axes)
samples = np.array([[0, 3, 4, 4, 0, 0],
[1, 2, 1, 4, 3, 3],
[2, 3, 1, 0, 0, 1],
[0, 3, 2, 4, 3, 0]])
proportion_explained = pd.Series([0.50, 0.25, 0.10, 0.05, 0.05, 0.05],
index=axes)
samples_df = pd.DataFrame(samples,
index=['A', 'B', 'C', 'D'],
columns=axes)
self.reference = skbio.OrdinationResults(
'PCoA',
'Principal Coordinate Analysis',
eigvals,
samples_df,
proportion_explained=proportion_explained)
samples = np.array([[0.7, 3.7, 4.7, 4.7, 0.7, 0.7],
[1.7, 2.7, 1.7, 4.7, 3.7, 3.7],
[2.7, 3.7, 1.7, 0.7, 0.7, 1.7],
[30, 3.7, 2.7, 4.7, 3.7, 0.7]])
samples_df = pd.DataFrame(samples,
index=['A', 'B', 'C', 'D'],
columns=axes)
self.other = skbio.OrdinationResults(
'PCoA',
'Principal Coordinate Analysis',
eigvals.copy(),
samples_df.copy(),
proportion_explained=proportion_explained.copy())
S = [[-0.1358036, 0.0452679, 0.3621430, 0.1810715, -0.2716072],
[0.0452679, -0.1358036, -0.1810715, 0.1810715, 0.2716072],
[0.2263394, 0.0452679, -0.1810715, -0.5432145, -0.2716072],
[-0.1358036, 0.0452679, 0.0000000, 0.1810715, 0.2716072]]
samples_df = pd.DataFrame(np.array(S),
index=['A', 'B', 'C', 'D'],
columns=axes[:5])
self.expected_ref = skbio.OrdinationResults(
'PCoA',
'Principal Coordinate Analysis',
eigvals[:5].copy(),
samples_df.copy(),
proportion_explained=proportion_explained[:5].copy())
S = [[0.0482731, -0.0324317, 0.0494312, -0.0316828, -0.1584374],
[0.0803620, -0.0718115, -0.0112234, -0.0171011, -0.1101209],
[0.0527554, -0.0042753, -0.0126739, -0.0969602, -0.0964822],
[-0.1813905, 0.1085184, -0.0255339, 0.1457440, 0.3650405]]
samples_df = pd.DataFrame(np.array(S),
index=['A', 'B', 'C', 'D'],
columns=axes[:5])
self.expected_other = skbio.OrdinationResults(
'PCoA',
'Principal Coordinate Analysis',
eigvals[:5].copy(),
samples_df.copy(),
proportion_explained=proportion_explained[:5].copy())
noise = [
[0.04988341, -0.03234447, 0.03177641, -0.03507789, -0.13564394],
[0.09117347, -0.08318546, -0.02249053, -0.01597601, -0.10901541],
[0.05077765, -0.003994, -0.00984688, -0.09356729, -0.09648388],
[-0.19183453, 0.11952393, 0.000561, 0.14462118, 0.34114323]]
samples_df = pd.DataFrame(np.array(noise),
index=['A', 'B', 'C', 'D'],
columns=axes[:5])
self.expected_noise = skbio.OrdinationResults(
'PCoA',
'Principal Coordinate Analysis',
eigvals[:5].copy(),
samples_df.copy(),
proportion_explained=proportion_explained[:5].copy())
self.expected_m2 = 0.72240956
self.expected_p = 0.5
def test_procrustes(self):
ref, other, m2_results = procrustes_analysis(self.reference,
self.other)
true_m2 = m2_results['true M^2 value'][0]
true_p_value = m2_results['p-value for true M^2 value'][0]
skbio.util.assert_ordination_results_equal(ref, self.expected_ref)
skbio.util.assert_ordination_results_equal(other, self.expected_other)
self.assertAlmostEqual(true_m2, self.expected_m2)
self.assertNotAlmostEqual(true_p_value, self.expected_p)
def test_non_zero_p(self):
# generated with np.random.seed(3); np.random.randn(4, 6)
noise = np.array(
[[1.78862847, 0.43650985, 0.09649747, -1.8634927, -0.2773882,
-0.35475898],
[-0.08274148, -0.62700068, -0.04381817, -0.47721803, -1.31386475,
0.88462238],
[0.88131804, 1.70957306, 0.05003364, -0.40467741, -0.54535995,
-1.54647732],
[0.98236743, -1.10106763, -1.18504653, -0.2056499, 1.48614836,
0.23671627]])
self.other.samples += noise
ref, other, m2_results = procrustes_analysis(self.reference,
self.other)
true_m2 = m2_results['true M^2 value'][0]
true_p_value = m2_results['p-value for true M^2 value'][0]
skbio.util.assert_ordination_results_equal(ref, self.expected_ref)
skbio.util.assert_ordination_results_equal(other, self.expected_noise)
# the p value shouldn't be zero even in the presence of noise
self.assertAlmostEqual(true_m2, 0.7388121)
self.assertNotAlmostEqual(true_p_value, 0.001)
def test_zero_permutations_nan_pvalue(self):
ref, other, m2_results = procrustes_analysis(self.reference,
self.other,
permutations='disable')
true_m2 = m2_results['true M^2 value'][0]
true_p_value = m2_results['p-value for true M^2 value'][0]
skbio.util.assert_ordination_results_equal(ref, self.expected_ref)
skbio.util.assert_ordination_results_equal(other, self.expected_other)
self.assertAlmostEqual(true_m2, self.expected_m2)
self.assertTrue(np.isnan(true_p_value))
def test_procrustes_bad_dimensions(self):
self.other.samples = self.other.samples.iloc[:, :4]
self.other.eigvals = self.other.eigvals[:4]
self.other.proportion_explained = self.other.proportion_explained[:4]
with self.assertRaisesRegex(ValueError, 'The matrices cannot be '):
procrustes_analysis(self.reference, self.other)
def test_procrustes_over_dimensions(self):
with self.assertRaisesRegex(ValueError, 'Cannot fit fewer dimensions '
'than available'):
procrustes_analysis(self.reference, self.other, 11)
def test_procrustes_id_mismatch(self):
msg = 'The ordinations represent two different sets of samples'
self.other.samples.index = pd.Index([':L', ':D', ':)', ':('])
with self.assertRaisesRegex(ValueError, msg):
procrustes_analysis(self.reference, self.other)
self.other.samples.index = pd.Index([':L', 'B', 'C', 'D'])
with self.assertRaisesRegex(ValueError, msg):
procrustes_analysis(self.reference, self.other)
self.other.samples.index = pd.Index(['a', 'b', 'c', 'd'])
with self.assertRaisesRegex(ValueError, msg):
procrustes_analysis(self.reference, self.other)
| [
"pandas.DataFrame",
"numpy.isnan",
"pandas.Index",
"skbio.util.assert_ordination_results_equal",
"numpy.array",
"pandas.Series",
"skbio.OrdinationResults",
"q2_diversity.procrustes_analysis"
] | [((715, 809), 'numpy.array', 'np.array', (['[[0, 3, 4, 4, 0, 0], [1, 2, 1, 4, 3, 3], [2, 3, 1, 0, 0, 1], [0, 3, 2, 4, 3, 0]\n ]'], {}), '([[0, 3, 4, 4, 0, 0], [1, 2, 1, 4, 3, 3], [2, 3, 1, 0, 0, 1], [0, 3,\n 2, 4, 3, 0]])\n', (723, 809), True, 'import numpy as np\n'), ((922, 979), 'pandas.Series', 'pd.Series', (['[0.5, 0.25, 0.1, 0.05, 0.05, 0.05]'], {'index': 'axes'}), '([0.5, 0.25, 0.1, 0.05, 0.05, 0.05], index=axes)\n', (931, 979), True, 'import pandas as pd\n'), ((1044, 1107), 'pandas.DataFrame', 'pd.DataFrame', (['samples'], {'index': "['A', 'B', 'C', 'D']", 'columns': 'axes'}), "(samples, index=['A', 'B', 'C', 'D'], columns=axes)\n", (1056, 1107), True, 'import pandas as pd\n'), ((1201, 1333), 'skbio.OrdinationResults', 'skbio.OrdinationResults', (['"""PCoA"""', '"""Principal Coordinate Analysis"""', 'eigvals', 'samples_df'], {'proportion_explained': 'proportion_explained'}), "('PCoA', 'Principal Coordinate Analysis', eigvals,\n samples_df, proportion_explained=proportion_explained)\n", (1224, 1333), False, 'import skbio\n'), ((1430, 1572), 'numpy.array', 'np.array', (['[[0.7, 3.7, 4.7, 4.7, 0.7, 0.7], [1.7, 2.7, 1.7, 4.7, 3.7, 3.7], [2.7, 3.7,\n 1.7, 0.7, 0.7, 1.7], [30, 3.7, 2.7, 4.7, 3.7, 0.7]]'], {}), '([[0.7, 3.7, 4.7, 4.7, 0.7, 0.7], [1.7, 2.7, 1.7, 4.7, 3.7, 3.7], [\n 2.7, 3.7, 1.7, 0.7, 0.7, 1.7], [30, 3.7, 2.7, 4.7, 3.7, 0.7]])\n', (1438, 1572), True, 'import numpy as np\n'), ((1673, 1736), 'pandas.DataFrame', 'pd.DataFrame', (['samples'], {'index': "['A', 'B', 'C', 'D']", 'columns': 'axes'}), "(samples, index=['A', 'B', 'C', 'D'], columns=axes)\n", (1685, 1736), True, 'import pandas as pd\n'), ((4388, 4435), 'q2_diversity.procrustes_analysis', 'procrustes_analysis', (['self.reference', 'self.other'], {}), '(self.reference, self.other)\n', (4407, 4435), False, 'from q2_diversity import procrustes_analysis\n'), ((4615, 4681), 'skbio.util.assert_ordination_results_equal', 'skbio.util.assert_ordination_results_equal', (['ref', 'self.expected_ref'], {}), '(ref, self.expected_ref)\n', (4657, 4681), False, 'import skbio\n'), ((4690, 4760), 'skbio.util.assert_ordination_results_equal', 'skbio.util.assert_ordination_results_equal', (['other', 'self.expected_other'], {}), '(other, self.expected_other)\n', (4732, 4760), False, 'import skbio\n'), ((4999, 5336), 'numpy.array', 'np.array', (['[[1.78862847, 0.43650985, 0.09649747, -1.8634927, -0.2773882, -0.35475898],\n [-0.08274148, -0.62700068, -0.04381817, -0.47721803, -1.31386475, \n 0.88462238], [0.88131804, 1.70957306, 0.05003364, -0.40467741, -\n 0.54535995, -1.54647732], [0.98236743, -1.10106763, -1.18504653, -\n 0.2056499, 1.48614836, 0.23671627]]'], {}), '([[1.78862847, 0.43650985, 0.09649747, -1.8634927, -0.2773882, -\n 0.35475898], [-0.08274148, -0.62700068, -0.04381817, -0.47721803, -\n 1.31386475, 0.88462238], [0.88131804, 1.70957306, 0.05003364, -\n 0.40467741, -0.54535995, -1.54647732], [0.98236743, -1.10106763, -\n 1.18504653, -0.2056499, 1.48614836, 0.23671627]])\n', (5007, 5336), True, 'import numpy as np\n'), ((5495, 5542), 'q2_diversity.procrustes_analysis', 'procrustes_analysis', (['self.reference', 'self.other'], {}), '(self.reference, self.other)\n', (5514, 5542), False, 'from q2_diversity import procrustes_analysis\n'), ((5723, 5789), 'skbio.util.assert_ordination_results_equal', 'skbio.util.assert_ordination_results_equal', (['ref', 'self.expected_ref'], {}), '(ref, self.expected_ref)\n', (5765, 5789), False, 'import skbio\n'), ((5798, 5868), 'skbio.util.assert_ordination_results_equal', 'skbio.util.assert_ordination_results_equal', (['other', 'self.expected_noise'], {}), '(other, self.expected_noise)\n', (5840, 5868), False, 'import skbio\n'), ((6129, 6200), 'q2_diversity.procrustes_analysis', 'procrustes_analysis', (['self.reference', 'self.other'], {'permutations': '"""disable"""'}), "(self.reference, self.other, permutations='disable')\n", (6148, 6200), False, 'from q2_diversity import procrustes_analysis\n'), ((6433, 6499), 'skbio.util.assert_ordination_results_equal', 'skbio.util.assert_ordination_results_equal', (['ref', 'self.expected_ref'], {}), '(ref, self.expected_ref)\n', (6475, 6499), False, 'import skbio\n'), ((6508, 6578), 'skbio.util.assert_ordination_results_equal', 'skbio.util.assert_ordination_results_equal', (['other', 'self.expected_other'], {}), '(other, self.expected_other)\n', (6550, 6578), False, 'import skbio\n'), ((7458, 7492), 'pandas.Index', 'pd.Index', (["[':L', ':D', ':)', ':(']"], {}), "([':L', ':D', ':)', ':('])\n", (7466, 7492), True, 'import pandas as pd\n'), ((7643, 7674), 'pandas.Index', 'pd.Index', (["[':L', 'B', 'C', 'D']"], {}), "([':L', 'B', 'C', 'D'])\n", (7651, 7674), True, 'import pandas as pd\n'), ((7825, 7855), 'pandas.Index', 'pd.Index', (["['a', 'b', 'c', 'd']"], {}), "(['a', 'b', 'c', 'd'])\n", (7833, 7855), True, 'import pandas as pd\n'), ((611, 655), 'numpy.array', 'np.array', (['[1.5, 0.75, 0.3, 0.15, 0.15, 0.15]'], {}), '([1.5, 0.75, 0.3, 0.15, 0.15, 0.15])\n', (619, 655), True, 'import numpy as np\n'), ((2380, 2391), 'numpy.array', 'np.array', (['S'], {}), '(S)\n', (2388, 2391), True, 'import numpy as np\n'), ((3101, 3112), 'numpy.array', 'np.array', (['S'], {}), '(S)\n', (3109, 3112), True, 'import numpy as np\n'), ((3854, 3869), 'numpy.array', 'np.array', (['noise'], {}), '(noise)\n', (3862, 3869), True, 'import numpy as np\n'), ((6662, 6684), 'numpy.isnan', 'np.isnan', (['true_p_value'], {}), '(true_p_value)\n', (6670, 6684), True, 'import numpy as np\n'), ((7013, 7060), 'q2_diversity.procrustes_analysis', 'procrustes_analysis', (['self.reference', 'self.other'], {}), '(self.reference, self.other)\n', (7032, 7060), False, 'from q2_diversity import procrustes_analysis\n'), ((7255, 7306), 'q2_diversity.procrustes_analysis', 'procrustes_analysis', (['self.reference', 'self.other', '(11)'], {}), '(self.reference, self.other, 11)\n', (7274, 7306), False, 'from q2_diversity import procrustes_analysis\n'), ((7559, 7606), 'q2_diversity.procrustes_analysis', 'procrustes_analysis', (['self.reference', 'self.other'], {}), '(self.reference, self.other)\n', (7578, 7606), False, 'from q2_diversity import procrustes_analysis\n'), ((7741, 7788), 'q2_diversity.procrustes_analysis', 'procrustes_analysis', (['self.reference', 'self.other'], {}), '(self.reference, self.other)\n', (7760, 7788), False, 'from q2_diversity import procrustes_analysis\n'), ((7922, 7969), 'q2_diversity.procrustes_analysis', 'procrustes_analysis', (['self.reference', 'self.other'], {}), '(self.reference, self.other)\n', (7941, 7969), False, 'from q2_diversity import procrustes_analysis\n')] |
import numpy as np
from tensortools import ssd, logging
from tensortools import utils
from tensortools import testing
logger = logging.get_logger(__name__)
def test_generate_anchors(random_annotations):
annotations = np.reshape(random_annotations, [-1, 4])
annotations = annotations / ([1600, 640] * 2)
annotations = annotations[:, 2] - annotations[:, 0], annotations[:, 3] - annotations[:, 1]
annotations = np.stack(annotations, axis=1)
fm_sizes = [[10, 10], [5, 5]]
num_clusters = 2
anchors = ssd.generate_anchors(annotations, fm_sizes, num_clusters)
assert len(fm_sizes) == len(anchors)
assert testing.approx(0.35, utils.avg_iou(annotations, anchors[0]), places=1)
assert testing.approx(0.43, utils.avg_iou(annotations, anchors[1]), places=1)
def test_generate_anchors_with_voc_data(voc_annotations):
fm_sizes = [[10, 10], [5, 5]]
num_clusters = 2
anchors = ssd.generate_anchors(voc_annotations, fm_sizes, num_clusters)
assert len(fm_sizes) == len(anchors)
assert num_clusters == len(anchors[0])
assert num_clusters == len(anchors[1])
| [
"numpy.stack",
"tensortools.utils.avg_iou",
"tensortools.ssd.generate_anchors",
"numpy.reshape",
"tensortools.logging.get_logger"
] | [((129, 157), 'tensortools.logging.get_logger', 'logging.get_logger', (['__name__'], {}), '(__name__)\n', (147, 157), False, 'from tensortools import ssd, logging\n'), ((225, 264), 'numpy.reshape', 'np.reshape', (['random_annotations', '[-1, 4]'], {}), '(random_annotations, [-1, 4])\n', (235, 264), True, 'import numpy as np\n'), ((428, 457), 'numpy.stack', 'np.stack', (['annotations'], {'axis': '(1)'}), '(annotations, axis=1)\n', (436, 457), True, 'import numpy as np\n'), ((528, 585), 'tensortools.ssd.generate_anchors', 'ssd.generate_anchors', (['annotations', 'fm_sizes', 'num_clusters'], {}), '(annotations, fm_sizes, num_clusters)\n', (548, 585), False, 'from tensortools import ssd, logging\n'), ((921, 982), 'tensortools.ssd.generate_anchors', 'ssd.generate_anchors', (['voc_annotations', 'fm_sizes', 'num_clusters'], {}), '(voc_annotations, fm_sizes, num_clusters)\n', (941, 982), False, 'from tensortools import ssd, logging\n'), ((660, 698), 'tensortools.utils.avg_iou', 'utils.avg_iou', (['annotations', 'anchors[0]'], {}), '(annotations, anchors[0])\n', (673, 698), False, 'from tensortools import utils\n'), ((742, 780), 'tensortools.utils.avg_iou', 'utils.avg_iou', (['annotations', 'anchors[1]'], {}), '(annotations, anchors[1])\n', (755, 780), False, 'from tensortools import utils\n')] |
import numpy as np
import matplotlib.pyplot as plot
from matplotlib.colors import ListedColormap, BoundaryNorm
def image_grid(image_arrays, ncols=1, nrows=1, figsize=(10,10), cmap='Greys_r', norm=None):
"""
Shows a list of 2D images in a grid like fashion. If no row or columns numbers are given, all images are shown in a single row.
Args:
image_arrays (list): a list of image arrays
n_cols (int): the number of columns of the image_grid
n_rows (int): the number of rows of the grid
figsize (tuple): a tuble (int, int) specifying the image size
cmap (str): a colormap
norm: a matplotplib normalization such as BoundaryNorm or Normalize
Returns:
(pyplot) returns matplotlib plot
"""
if len(image_arrays[0].shape)==2 or image_arrays[0].shape[-1] > 1: #test for grayscale image
def f(x): return x
else:
def f(x): return np.squeeze(x,axis=2)
if len(image_arrays) == 1: #only a single image given
fig, axes = plot.subplots(nrows=1, ncols=1, figsize=figsize)
plot.imshow(f(image_arrays[0]), cmap=cmap, norm=norm)
axes.axis('off')
else:
if(ncols*nrows < len(image_arrays)):
if ncols==1 and nrows==1:#if neither nrows or ncols are specified
fig, axes = plot.subplots(nrows=1, ncols=len(image_arrays), figsize=figsize)
[axes[i].imshow(f(image_arrays[i]), cmap=cmap, norm=norm) for i in range(len(image_arrays))]
[axes[i].axis('off') for i in range(len(image_arrays))]
else:
if ncols==1 and nrows>1: #if nrows is specified
ncols=int(np.ceil(len(image_arrays)/nrows))
fig, axes = plot.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
elif ncols>1 and nrows==1:#if ncols is specified
nrows=int(np.ceil(len(image_arrays)/ncols))
fig, axes = plot.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
[axes[int((i)/ncols), i%ncols].imshow(f(image_arrays[i]), cmap=cmap, norm=norm) for i in range(len(image_arrays))]
[axes[int((i)/ncols), i%ncols].axis('off') for i in range(len(image_arrays))]
else:#if nrows and ncols are specified
fig, axes = plot.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
[axes[int((i)/ncols), i%ncols].imshow(f(image_arrays[i]), cmap=cmap, norm=norm) for i in range(len(image_arrays))]
[axes[int((i)/ncols), i%ncols].axis('off') for i in range(len(image_arrays))]
def colorize(image_arrays, ncols=1, nrows=1, figsize=(10,10), background=(1.,1.,1.,1.)):
"""
Shows a list of 2D label images in a colored, grid like fashion. If no row or columns numbers are given, all images are shown in a single row.
Args:
image_arrays (list): a list of image arrays
n_cols (int): the number of columns of the image_grid
n_rows (int): the number of rows of the grid
figsize (tuple): a tuble (int, int) specifying the image size
background (tuple): a color tuple defining the background color (background is supposed to be < 0)
Returns:
(pyplot) returns matplotlib plot
"""
norm, cmap = _qualitative_cmap(background=background)
image_grid(image_arrays, ncols, nrows, figsize, cmap=cmap, norm=norm)
def _qualitative_cmap(background=(1.,1.,1.,1.),ncolors=1024):
cmap = plot.cm.Paired
cmaplist = [cmap(i%cmap.N) for i in range(ncolors)]
cmaplist[0]=background
norm = BoundaryNorm(boundaries=range(ncolors), ncolors=ncolors)
cmap=ListedColormap(cmaplist, 'qualitative')
return (norm,cmap)
| [
"numpy.squeeze",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.subplots"
] | [((3631, 3670), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['cmaplist', '"""qualitative"""'], {}), "(cmaplist, 'qualitative')\n", (3645, 3670), False, 'from matplotlib.colors import ListedColormap, BoundaryNorm\n'), ((1020, 1068), 'matplotlib.pyplot.subplots', 'plot.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': 'figsize'}), '(nrows=1, ncols=1, figsize=figsize)\n', (1033, 1068), True, 'import matplotlib.pyplot as plot\n'), ((921, 942), 'numpy.squeeze', 'np.squeeze', (['x'], {'axis': '(2)'}), '(x, axis=2)\n', (931, 942), True, 'import numpy as np\n'), ((2313, 2369), 'matplotlib.pyplot.subplots', 'plot.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'figsize': 'figsize'}), '(nrows=nrows, ncols=ncols, figsize=figsize)\n', (2326, 2369), True, 'import matplotlib.pyplot as plot\n'), ((1741, 1797), 'matplotlib.pyplot.subplots', 'plot.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'figsize': 'figsize'}), '(nrows=nrows, ncols=ncols, figsize=figsize)\n', (1754, 1797), True, 'import matplotlib.pyplot as plot\n'), ((1959, 2015), 'matplotlib.pyplot.subplots', 'plot.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'figsize': 'figsize'}), '(nrows=nrows, ncols=ncols, figsize=figsize)\n', (1972, 2015), True, 'import matplotlib.pyplot as plot\n')] |
from abc import abstractmethod, abstractproperty
from typing import List, Tuple, Union
import numpy as np
from quara.objects.qoperation import QOperation
from quara.objects.qoperations import SetQOperations
from quara.protocol.qtomography.qtomography import QTomography
from quara.qcircuit.experiment import Experiment
from quara.utils import matrix_util
class StandardQTomography(QTomography):
def __init__(
self,
experiment: Experiment,
set_qoperations: SetQOperations,
):
"""initialize standard quantum tomography class.
To inherit from this class, set the following instance variables in the constructor of the subclass.
- ``_coeffs_0th``: return value of ``get_coeffs_0th`` function.
- ``_coeffs_1st``: return value of ``get_coeffs_1st`` function.
- ``_map_experiment_to_setqoperations``: a map from indices of Experiment to indices of SetQOperations.
if you map the 0th state to the 1st state, set ``{("state", 0): ("state", 1)}``.
- ``_map_setqoperations_to_experiment``: a map from indices of SetQOperations to indices of Experiment.
Parameters
----------
experiment : Experiment
Experiment class used in quantum tomography.
set_qoperations : SetQOperations
SetQOperations class used in quantum tomography.
"""
super().__init__(experiment, set_qoperations)
self._coeffs_0th = None
self._coeffs_1st = None
def get_coeffs_0th(self, schedule_index: int, x: int) -> np.float64:
"""returns 0th coefficients specified by schedule index and measurement outcome index
Parameters
----------
schedule_index : int
schedule index.
x : int
measurement outcome index.
Returns
-------
np.float64
0th coefficients.
"""
return self._coeffs_0th[(schedule_index, x)]
def get_coeffs_0th_vec(self, schedule_index: int) -> np.ndarray:
"""returns 0th coefficient vector specified by schedule index.
Parameters
----------
schedule_index : int
schedule index.
Returns
-------
np.ndarray( , dtype=np.float64)
0th coefficients vector
"""
l = []
xs = [key[1] for key in self._coeffs_0th.keys() if key[0] == schedule_index]
for x in xs:
coeffs_0th = self.get_coeffs_0th(schedule_index, x)
l.append(coeffs_0th)
return np.array(l, dtype=np.float64)
def get_coeffs_1st(self, schedule_index: int, x: int) -> np.ndarray:
"""returns 1st coefficients specified by schedule index and measurement outcome index
Parameters
----------
schedule_index : int
schedule index.
x : int
measurement outcome index.
Returns
-------
np.ndarray
1st coefficients.
"""
return self._coeffs_1st[(schedule_index, x)]
def get_coeffs_1st_mat(self, schedule_index: int) -> np.ndarray:
"""returns 1st coefficient matrix specified by schedule index.
Parameters
----------
schedule_index : int
schedule index.
Returns
-------
np.ndarray
1st coefficient matrix.
"""
ll = []
xs = [key[1] for key in self._coeffs_0th.keys() if key[0] == schedule_index]
for x in xs:
coeffs_1st = self.get_coeffs_1st(schedule_index, x)
ll.append(coeffs_1st)
return np.stack(ll)
def calc_matA(self) -> np.ndarray:
"""returns the matrix A.
the matrix A is a stack of 1st coefficients.
Returns
-------
np.ndarray
the matrix A.
"""
sorted_coeffs_1st = sorted(self._coeffs_1st.items())
sorted_values = [k[1] for k in sorted_coeffs_1st]
matA = np.vstack(sorted_values)
return matA
def calc_vecB(self) -> np.ndarray:
"""returns the vector B.
the vector B is a stack of 0th coefficients.
Returns
-------
np.ndarray
the vector B.
"""
sorted_coeffs_0th = sorted(self._coeffs_0th.items())
sorted_values = [k[1] for k in sorted_coeffs_0th]
vecB = np.vstack(sorted_values).flatten()
return vecB
def is_fullrank_matA(self) -> bool:
"""returns whether matrix A is full rank.
Returns
-------
bool
True where matrix A is full rank, False otherwise.
"""
matA = self.calc_matA()
rank = np.linalg.matrix_rank(matA)
size = min(matA.shape)
return size == rank
@abstractmethod
def num_outcomes(self, schedule_index: int) -> int:
"""returns the number of outcomes of probability distribution of a schedule index.
Parameters
----------
schedule_index: int
Returns
-------
int
the number of outcomes
"""
raise NotImplementedError()
@abstractmethod
def convert_var_to_qoperation(self, var: np.ndarray) -> QOperation:
"""converts variable to QOperation.
this function must be implemented in the subclass.
Parameters
----------
var : np.ndarray
variables.
Returns
-------
QOperation
converted QOperation.
Raises
------
NotImplementedError
this function does not be implemented in the subclass.
"""
raise NotImplementedError()
@abstractmethod
def generate_empty_estimation_obj_with_setting_info(self) -> QOperation:
"""generates the empty estimation object with setting information.
Returns
-------
QOperation
the empty estimation object(QOperation) with setting information.
Raises
------
NotImplementedError
this function does not be implemented in the subclass.
"""
raise NotImplementedError()
def is_all_same_composite_systems(self, targets: List[QOperation]) -> bool:
"""check all qoperations have same composite systems.
Parameters
----------
targets : List[QOperation]
list of qoperations.
Returns
-------
bool
whether all qoperations have same composite systems.
"""
if len(targets) <= 1:
return True
checks = [
targets[0]._composite_system == target._composite_system
for target in targets[1:]
]
return all(checks)
def calc_prob_dist(self, qope: QOperation, schedule_index: int) -> List[float]:
"""calculates a probability distribution.
see :func:`~quara.protocol.qtomography.qtomography.QTomography.calc_prob_dist`
"""
prob_dists = self.calc_prob_dists(qope)
return prob_dists[schedule_index]
def calc_prob_dists(self, qope: QOperation) -> List[List[float]]:
"""calculates probability distributions.
see :func:`~quara.protocol.qtomography.qtomography.QTomography.calc_prob_dists`
"""
if self._on_para_eq_constraint:
tmp_prob_dists = self.calc_matA() @ qope.to_var() + self.calc_vecB()
else:
tmp_prob_dists = (
self.calc_matA() @ qope.to_stacked_vector() + self.calc_vecB()
)
prob_dists = tmp_prob_dists.reshape((self.num_schedules, -1))
prob_dists = matrix_util.truncate_and_normalize(prob_dists)
return prob_dists
def calc_covariance_mat_single(
self, qope: QOperation, schedule_index: int, data_num: int
) -> np.ndarray:
"""calculates covariance matrix of single probability distribution.
Parameters
----------
qope : QOperation
QOperation to calculate covariance matrix of single probability distribution.
schedule_index : int
schedule index.
data_num : int
number of data.
Returns
-------
np.ndarray
covariance matrix of single probability distribution.
"""
prob_dist = self.calc_prob_dist(qope, schedule_index)
val = matrix_util.calc_covariance_mat(prob_dist, data_num)
return val
def calc_covariance_mat_total(
self, qope: QOperation, data_num_list: List[int]
) -> np.ndarray:
"""calculates covariance matrix of total probability distributions.
Parameters
----------
qope : QOperation
QOperation to calculate covariance matrix of total probability distributions.
data_num_list : List[int]
list of number of data.
Returns
-------
np.ndarray
covariance matrix of total probability distributions.
"""
matrices = []
for schedule_index in range(self.num_schedules):
mat_single = self.calc_covariance_mat_single(
qope, schedule_index, data_num_list[schedule_index]
)
matrices.append(mat_single)
val = matrix_util.calc_direct_sum(matrices)
return val
def calc_covariance_linear_mat_total(
self, qope: QOperation, data_num_list: List[int]
) -> np.ndarray:
"""calculates covariance matrix of linear estimate of probability distributions.
Parameters
----------
qope : QOperation
QOperation to calculate covariance matrix of linear estimate of probability distributions.
data_num_list : List[int]
list of number of data.
Returns
-------
np.ndarray
covariance matrix of linear estimate of probability distributions.
"""
A_inv = matrix_util.calc_left_inv(self.calc_matA())
val = matrix_util.calc_conjugate(
A_inv, self.calc_covariance_mat_total(qope, data_num_list)
)
return val
def _calc_mse_linear_analytical_mode_var(
self, qope: QOperation, data_num_list: List[int]
) -> np.float64:
val = np.trace(self.calc_covariance_linear_mat_total(qope, data_num_list))
return val
def _calc_mse_linear_analytical_mode_qoperation(
self, qope: QOperation, data_num_list: List[int]
) -> np.float64:
return self._calc_mse_linear_analytical_mode_var(qope, data_num_list)
def calc_mse_linear_analytical(
self, qope: QOperation, data_num_list: List[int], mode: str = "qoperation"
) -> np.float64:
"""calculates mean squared error of linear estimate of probability distributions.
Parameters
----------
qope : QOperation
QOperation to calculate mean squared error of linear estimate of probability distributions.
data_num_list : List[int]
list of number of data.
Returns
-------
np.float64
mean squared error of linear estimate of probability distributions.
"""
if mode == "qoperation":
val = self._calc_mse_linear_analytical_mode_qoperation(qope, data_num_list)
elif mode == "var":
val = self._calc_mse_linear_analytical_mode_var(qope, data_num_list)
else:
error_message = "The argument `mode` must be `qoperation` or `var`"
raise ValueError(error_message)
return val
def calc_mse_empi_dists_analytical(
self, qope: QOperation, data_num_list: List[int]
) -> np.float64:
"""calculates analytical solution of mean squared error of empirical distributions.
Parameters
----------
qope : QOperation
QOperation to calculate analytical solution of mean squared error of empirical distributions.
data_num_list : List[int]
list of number of data.
Returns
-------
np.float64
analytical solution of mean squared error of empirical distributions.
"""
mse_total = 0.0
for schedule_index, data_num in enumerate(data_num_list):
mse_total += np.trace(
self.calc_covariance_mat_single(qope, schedule_index, data_num)
)
return mse_total
def calc_fisher_matrix(
self, j: int, var: Union[QOperation, np.ndarray]
) -> np.ndarray:
"""calculates Fisher matrix of one schedule.
Parameters
----------
j : int
schedule_index
var : Union[QOperation, np.ndarray]
variables to calculate Fisher matrix of one schedule.
Returns
-------
np.ndarray
Fisher matrix of one schedule.
"""
if isinstance(var, QOperation):
var = var.to_var()
matA = self.calc_matA()
vecB = self.calc_vecB()
size_prob_dist = int(len(matA) / self.num_schedules)
prob_dist = (
matA[size_prob_dist * j : size_prob_dist * (j + 1)] @ var
+ vecB[size_prob_dist * j : size_prob_dist * (j + 1)]
)
grad_prob_dist = matA[size_prob_dist * j : size_prob_dist * (j + 1)]
fisher_matrix = matrix_util.calc_fisher_matrix(prob_dist, grad_prob_dist)
return fisher_matrix
def calc_fisher_matrix_total(
self, var: Union[QOperation, np.ndarray], weights: List[float]
) -> np.ndarray:
"""calculates Fisher matrix of the total schedule.
Parameters
----------
var : Union[QOperation, np.ndarray]
variables to calculate Fisher matrix of one schedule.
weights : List[float]
weights to calculate Fisher matrix of one schedule.
Returns
-------
np.ndarray
Fisher matrix of the total schedule.
"""
fisher_matrices = []
for schedule_index in range(self.num_schedules):
fisher_matrices.append(
weights[schedule_index] * self.calc_fisher_matrix(schedule_index, var)
)
return sum(fisher_matrices)
def calc_cramer_rao_bound(
self, var: Union[QOperation, np.ndarray], N: int, list_N: List[int]
) -> np.ndarray:
"""calculates Cramer-Rao bound.
Parameters
----------
var : Union[QOperation, np.ndarray]
variables to calculate Cramer-Rao bound.
N : int
representative value of the number of data.
list_N : List[int]
the number of data for each schedule.
Returns
-------
np.ndarray
Cramer-Rao bound.
"""
return self._calc_cramer_rao_bound(var, N, list_N)
def _calc_cramer_rao_bound(
self, var: Union[QOperation, np.ndarray], N: int, list_N: List[int]
) -> np.ndarray:
weights = [tmp_N / N for tmp_N in list_N]
fisher = self.calc_fisher_matrix_total(var, weights)
val = np.trace(np.linalg.inv(fisher)) / N
return val
@abstractmethod
def _get_target_index(self, experiment: Experiment, schedule_index: int) -> int:
raise NotImplementedError()
@abstractproperty
def _estimated_qoperation_type(cls):
raise NotImplementedError()
def generate_prob_dists_sequence(
self, true_object: QOperation
) -> List[List[Tuple[int, np.ndarray]]]:
tmp_experiment = self._experiment.copy()
class_name = self.__class__._estimated_qoperation_type.__name__.lower()
attribute_name = (
class_name + "es" if class_name.endswith("ss") else class_name + "s"
)
for schedule_index in range(len(tmp_experiment.schedules)):
target_index = self._get_target_index(tmp_experiment, schedule_index)
getattr(tmp_experiment, attribute_name)[target_index] = true_object
prob_dists_sequence_tmp = tmp_experiment.calc_prob_dists()
return prob_dists_sequence_tmp
def _validate_schedules_str(self, schedules: str) -> None:
supported_schedule_strs = ["all"]
if schedules not in supported_schedule_strs:
message = f"The string specified in schedules must be one of the following, not '{schedules}': {supported_schedule_strs}"
raise ValueError(message)
| [
"numpy.stack",
"quara.utils.matrix_util.calc_covariance_mat",
"quara.utils.matrix_util.truncate_and_normalize",
"quara.utils.matrix_util.calc_direct_sum",
"numpy.linalg.matrix_rank",
"numpy.array",
"numpy.linalg.inv",
"quara.utils.matrix_util.calc_fisher_matrix",
"numpy.vstack"
] | [((2631, 2660), 'numpy.array', 'np.array', (['l'], {'dtype': 'np.float64'}), '(l, dtype=np.float64)\n', (2639, 2660), True, 'import numpy as np\n'), ((3736, 3748), 'numpy.stack', 'np.stack', (['ll'], {}), '(ll)\n', (3744, 3748), True, 'import numpy as np\n'), ((4114, 4138), 'numpy.vstack', 'np.vstack', (['sorted_values'], {}), '(sorted_values)\n', (4123, 4138), True, 'import numpy as np\n'), ((4851, 4878), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['matA'], {}), '(matA)\n', (4872, 4878), True, 'import numpy as np\n'), ((7909, 7955), 'quara.utils.matrix_util.truncate_and_normalize', 'matrix_util.truncate_and_normalize', (['prob_dists'], {}), '(prob_dists)\n', (7943, 7955), False, 'from quara.utils import matrix_util\n'), ((8677, 8729), 'quara.utils.matrix_util.calc_covariance_mat', 'matrix_util.calc_covariance_mat', (['prob_dist', 'data_num'], {}), '(prob_dist, data_num)\n', (8708, 8729), False, 'from quara.utils import matrix_util\n'), ((9595, 9632), 'quara.utils.matrix_util.calc_direct_sum', 'matrix_util.calc_direct_sum', (['matrices'], {}), '(matrices)\n', (9622, 9632), False, 'from quara.utils import matrix_util\n'), ((13770, 13827), 'quara.utils.matrix_util.calc_fisher_matrix', 'matrix_util.calc_fisher_matrix', (['prob_dist', 'grad_prob_dist'], {}), '(prob_dist, grad_prob_dist)\n', (13800, 13827), False, 'from quara.utils import matrix_util\n'), ((4525, 4549), 'numpy.vstack', 'np.vstack', (['sorted_values'], {}), '(sorted_values)\n', (4534, 4549), True, 'import numpy as np\n'), ((15584, 15605), 'numpy.linalg.inv', 'np.linalg.inv', (['fisher'], {}), '(fisher)\n', (15597, 15605), True, 'import numpy as np\n')] |
import time
import numpy as np
import pytest
from aesara.compile.function import function
from aesara.compile.io import In
from aesara.compile.mode import Mode, get_mode
from aesara.compile.sharedvalue import shared
from aesara.configdefaults import config
from aesara.graph.basic import Apply
from aesara.graph.fg import FunctionGraph
from aesara.graph.op import Op
from aesara.ifelse import ifelse
from aesara.link.c.basic import OpWiseCLinker
from aesara.link.c.exceptions import MissingGXX
from aesara.link.utils import map_storage
from aesara.link.vm import VM, Loop, Stack, VMLinker
from aesara.tensor.math import cosh, tanh
from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors
from aesara.tensor.var import TensorConstant
from tests import unittest_tools as utt
class SomeOp(Op):
def perform(self, node, inputs, outputs):
pass
def make_node(self, x):
return Apply(self, [x], [x.type()])
class TestCallbacks:
# Test the `VMLinker`'s callback argument, which can be useful for debugging.
def setup_method(self):
self.n_callbacks = {}
def callback(self, node, thunk, storage_map, compute_map):
key = node.op.__class__.__name__
self.n_callbacks.setdefault(key, 0)
self.n_callbacks[key] += 1
def test_callback(self):
a, b, c = scalars("abc")
f = function(
[a, b, c],
(a + b) + c,
mode=Mode(optimizer=None, linker=VMLinker(callback=self.callback)),
)
f(1, 2, 3)
assert sum(self.n_callbacks.values()) == len(f.maker.fgraph.toposort())
f(1, 2, 3)
assert sum(self.n_callbacks.values()) == len(f.maker.fgraph.toposort()) * 2
def test_callback_with_ifelse(self):
a, b, c = scalars("abc")
f = function(
[a, b, c],
ifelse(a, 2 * b, 2 * c),
mode=Mode(optimizer=None, linker=VMLinker(callback=self.callback)),
)
f(1, 2, 3)
assert self.n_callbacks["IfElse"] == 2
def test_use_c_thunks():
a_at = scalars("a")
b_at = vectors("b")
a = np.array(0.0).astype(config.floatX)
b = np.array([2.0]).astype(config.floatX)
cases = [False]
if config.cxx:
cases.append(True)
for use_c_thunks in cases:
f = function(
[a_at, b_at],
a_at * b_at,
mode=Mode(
optimizer=None, linker=VMLinker(c_thunks=use_c_thunks, use_cloop=False)
),
)
assert np.array_equal(a * b, f(a, b))
assert any(hasattr(t, "cthunk") for t in f.vm.thunks) == use_c_thunks
@pytest.mark.skipif(
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_speed():
# TODO FIXME: This isn't a real test.
def build_graph(x, depth=5):
z = x
for d in range(depth):
z = z + z
return z
def numpy_version(x, depth):
z = x
for d in range(depth):
z = z + z
return z
def time_numpy():
steps_a = 5
steps_b = 100
x = np.asarray([2.0, 3.0], dtype=config.floatX)
numpy_version(x, steps_a)
t0 = time.time()
# print numpy_version(x, steps_a)
t1 = time.time()
t2 = time.time()
# print numpy_version(x, steps_b)
t3 = time.time()
t_a = t1 - t0
t_b = t3 - t2
print(f"numpy takes {1000 * (t_b - t_a) / (steps_b - steps_a):f} s/Kop")
def time_linker(name, linker):
steps_a = 5
steps_b = 100
x = vector()
a = build_graph(x, steps_a)
b = build_graph(x, steps_b)
f_a = function([x], a, mode=Mode(optimizer=None, linker=linker()))
f_b = function([x], b, mode=Mode(optimizer=None, linker=linker()))
f_a([2.0, 3.0])
t0 = time.time()
f_a([2.0, 3.0])
t1 = time.time()
f_b([2.0, 3.0])
t2 = time.time()
f_b([2.0, 3.0])
t3 = time.time()
t_a = t1 - t0
t_b = t3 - t2
print(f"{name} takes {1000 * (t_b - t_a) / (steps_b - steps_a):f} s/Kop")
time_linker("c|py", OpWiseCLinker)
time_linker("vmLinker", VMLinker)
time_linker("vmLinker_nogc", lambda: VMLinker(allow_gc=False))
if config.cxx:
time_linker("vmLinker_CLOOP", lambda: VMLinker(allow_gc=False, use_cloop=True))
time_numpy()
@pytest.mark.parametrize(
"linker",
[
VMLinker(),
VMLinker(allow_gc=False),
VMLinker(allow_gc=False, use_cloop=True),
],
)
def test_speed_lazy(linker):
# TODO FIXME: This isn't a real test.
def build_graph(x, depth=5):
z = x
for d in range(depth):
z = ifelse(z[0] > 0, -z, z)
return z
steps_a = 10
steps_b = 100
x = vector()
a = build_graph(x, steps_a)
b = build_graph(x, steps_b)
f_a = function([x], a, mode=Mode(optimizer=None, linker=linker))
f_b = function([x], b, mode=Mode(optimizer=None, linker=linker))
f_a([2.0])
t0 = time.time()
f_a([2.0])
t1 = time.time()
f_b([2.0])
t2 = time.time()
f_b([2.0])
t3 = time.time()
t_a = t1 - t0
t_b = t3 - t2
print(f"{linker} takes {1000 * (t_b - t_a) / (steps_b - steps_a):f} s/Kop")
@pytest.mark.parametrize(
"linker", [VMLinker(allow_partial_eval=True, use_cloop=False), "cvm"]
)
def test_partial_function(linker):
x = scalar("input")
y = x**2
f = function(
[x], [y + 7, y - 9, y / 14.0], mode=Mode(optimizer=None, linker=linker)
)
if linker == "cvm":
from aesara.link.c.cvm import CVM
assert isinstance(f.vm, CVM)
else:
assert isinstance(f.vm, Stack)
assert f(3, output_subset=[0, 1, 2]) == f(3)
assert f(4, output_subset=[0, 2]) == [f(4)[0], f(4)[2]]
utt.assert_allclose(f(5), np.array([32.0, 16.0, 1.7857142857142858]))
@pytest.mark.parametrize(
"linker", [VMLinker(allow_partial_eval=True, use_cloop=False), "cvm"]
)
def test_partial_function_with_output_keys(linker):
x = scalar("input")
y = 3 * x
f = function(
[x], {"a": y * 5, "b": y - 7}, mode=Mode(optimizer=None, linker=linker)
)
assert f(5, output_subset=["a"])["a"] == f(5)["a"]
@pytest.mark.parametrize(
"linker", [VMLinker(allow_partial_eval=True, use_cloop=False), "cvm"]
)
def test_partial_function_with_updates(linker):
x = lscalar("input")
y = shared(np.asarray(1, "int64"), name="global")
mode = Mode(optimizer=None, linker=linker)
f = function(
[x],
[x, x + 34],
updates=[(y, x + 1)],
mode=mode,
)
g = function(
[x],
[x - 6],
updates=[(y, y + 3)],
mode=mode,
)
assert f(3, output_subset=[]) == []
assert y.get_value() == 4
assert g(30, output_subset=[0]) == [24]
assert g(40, output_subset=[]) == []
assert y.get_value() == 10
def test_allow_gc_cvm():
mode = config.mode
if mode in ["DEBUG_MODE", "DebugMode"]:
mode = "FAST_RUN"
v = vector()
f = function([v], v + 1, mode=mode)
f([1])
n = list(f.maker.fgraph.apply_nodes)[0].outputs[0]
assert f.vm.storage_map[n][0] is None
assert f.vm.allow_gc is True
f.vm.allow_gc = False
assert f.vm.allow_gc is False
f([1])
assert f.vm.storage_map[n][0] is not None
f.vm.allow_gc = True
assert f.vm.allow_gc is True
f([1])
assert f.vm.storage_map[n][0] is None
class RunOnce(Op):
__props__ = ("nb_run",)
def __init__(self):
self.nb_run = 0
def make_node(self, x):
return Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
assert self.nb_run == 0
self.nb_run += 1
outputs[0][0] = inputs[0].copy()
def test_vm_gc():
x = vector()
p = RunOnce()(x)
mode = Mode(linker=VMLinker(lazy=True))
f = function([In(x, mutable=True)], [p + 1, p + 2], mode=mode)
f([1, 2, 3])
p = RunOnce()(x)
pp = p + p
f = function([x], [pp + pp], mode=mode)
f([1, 2, 3])
def test_reallocation():
x = scalar("x")
y = scalar("y")
z = tanh(3 * x + y) + cosh(x + 5 * y)
# The functionality is currently implement for non lazy and non c VM only.
for linker in [
VMLinker(allow_gc=False, lazy=False, use_cloop=False),
VMLinker(allow_gc=True, lazy=False, use_cloop=False),
]:
m = get_mode(Mode(linker=linker))
m = m.excluding("fusion", "inplace")
f = function([x, y], z, name="test_reduce_memory", mode=m)
output = f(1, 2)
assert output
storage_map = f.vm.storage_map
def check_storage(storage_map):
for i in storage_map:
if not isinstance(i, TensorConstant):
keys_copy = list(storage_map.keys())[:]
keys_copy.remove(i)
for o in keys_copy:
if storage_map[i][0] and storage_map[i][0] is storage_map[o][0]:
return [True, storage_map[o][0]]
return [False, None]
assert check_storage(storage_map)[0]
assert len({id(v) for v in storage_map.values()}) < len(storage_map)
@pytest.mark.skipif(
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_no_recycling():
x = vector()
for lnk in [
VMLinker(use_cloop=True),
VMLinker(use_cloop=False, lazy=True),
VMLinker(use_cloop=False, lazy=False, allow_gc=True),
VMLinker(use_cloop=False, lazy=False, allow_gc=False),
]:
mode = Mode(optimizer="fast_compile", linker=lnk)
f = function([x], x + 1, mode=mode)
f2 = function([x], (x + 1) * 2, mode=mode)
m1 = f.vm.thunks[0].thunk.module
m2 = f2.vm.thunks[0].thunk.module
assert m1 is m2
@pytest.mark.skipif(
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_VMLinker_make_vm_cvm():
# We don't want this at module level, since CXX might not be present
from aesara.link.c.cvm import CVM
a = scalar()
linker = VMLinker(allow_gc=False, use_cloop=True)
f = function([a], a, mode=Mode(optimizer=None, linker=linker))
assert isinstance(f.vm, CVM)
def test_VMLinker_make_vm_no_cvm():
from importlib import reload
from unittest.mock import patch
with config.change_flags(cxx=""):
# Make sure that GXX isn't present
with pytest.raises(MissingGXX):
import aesara.link.c.cvm
reload(aesara.link.c.cvm)
# Make sure that `cvm` module is missing
with patch.dict("sys.modules", {"aesara.link.c.cvm": None}):
a = scalar()
linker = VMLinker(allow_gc=False, use_cloop=True)
with pytest.raises(ModuleNotFoundError):
import aesara.link.c.cvm
f = function([a], a, mode=Mode(optimizer=None, linker=linker))
assert isinstance(f.vm, Loop)
def test_VMLinker_exception():
class BadOp(Op):
def perform(self, node, inputs, outputs):
pass
def make_node(self, x):
return Apply(self, [x], [x.type()])
def make_thunk(self, *args, **kwargs):
raise Exception("bad Op")
a = scalar()
linker = VMLinker(allow_gc=False, use_cloop=True)
z = BadOp()(a)
with pytest.raises(Exception, match=".*Apply node that caused the error.*"):
function([a], z, mode=Mode(optimizer=None, linker=linker))
def test_VM_exception():
class SomeVM(VM):
def __call__(self):
pass
a = scalar()
fg = FunctionGraph(outputs=[SomeOp()(a)])
with pytest.raises(ValueError, match="`nodes` and `thunks`.*"):
SomeVM(fg, fg.apply_nodes, [], [])
def test_Loop_exception():
a = scalar()
fg = FunctionGraph(outputs=[SomeOp()(a)])
# Create valid(ish) `VM` arguments
nodes = fg.toposort()
input_storage, output_storage, storage_map = map_storage(
fg, nodes, None, None, None
)
compute_map = {}
for k in storage_map:
compute_map[k] = [k.owner is None]
thunks = [node.op.make_thunk(node, storage_map, compute_map, []) for node in nodes]
with pytest.raises(ValueError, match="`nodes`, `thunks` and `post_thunk_clear`.*"):
Loop(
fg,
fg.apply_nodes,
thunks,
[],
storage_map,
input_storage,
output_storage,
{},
[],
)
def test_Loop_updates():
a = scalar("a")
a_plus_1 = a + 1
fg = FunctionGraph(outputs=[a, a_plus_1], clone=False)
nodes = fg.toposort()
input_storage, output_storage, storage_map = map_storage(
fg, nodes, None, None, None
)
compute_map = {}
for k in storage_map:
compute_map[k] = [k.owner is None]
thunks = [node.op.make_thunk(node, storage_map, compute_map, []) for node in nodes]
assert a in storage_map
update_vars = {a: a_plus_1}
loop_vm = Loop(
fg,
fg.apply_nodes,
thunks,
[],
storage_map,
input_storage,
output_storage,
update_vars,
)
storage_map[a][0] = np.array(1.0, dtype=config.floatX)
res = loop_vm()
assert res == [np.array(1.0), np.array(2.0)]
assert storage_map[a][0] == np.array(2.0)
def test_Stack_updates():
a = scalar("a")
a_plus_1 = a + 1
fg = FunctionGraph(outputs=[a, a_plus_1], clone=False)
nodes = fg.toposort()
input_storage, output_storage, storage_map = map_storage(
fg, nodes, None, None, None
)
compute_map = {}
for k in storage_map:
compute_map[k] = [k.owner is None]
thunks = [node.op.make_thunk(node, storage_map, compute_map, []) for node in nodes]
assert a in storage_map
update_vars = {a: a_plus_1}
stack_vm = Stack(
fg,
fg.apply_nodes,
thunks,
[],
storage_map,
input_storage,
output_storage,
update_vars,
compute_map,
False,
)
storage_map[a][0] = np.array(1.0, dtype=config.floatX)
res = stack_vm()
assert res == [np.array(1.0), np.array(2.0)]
assert storage_map[a][0] == np.array(2.0)
| [
"aesara.tensor.type.vector",
"pytest.mark.skipif",
"aesara.configdefaults.config.change_flags",
"aesara.ifelse.ifelse",
"aesara.tensor.type.scalars",
"aesara.compile.mode.Mode",
"aesara.compile.function.function",
"pytest.raises",
"aesara.tensor.type.lscalar",
"numpy.asarray",
"aesara.tensor.typ... | [((2632, 2730), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not config.cxx)'], {'reason': '"""G++ not available, so we need to skip this test."""'}), "(not config.cxx, reason=\n 'G++ not available, so we need to skip this test.')\n", (2650, 2730), False, 'import pytest\n'), ((9261, 9359), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not config.cxx)'], {'reason': '"""G++ not available, so we need to skip this test."""'}), "(not config.cxx, reason=\n 'G++ not available, so we need to skip this test.')\n", (9279, 9359), False, 'import pytest\n'), ((9896, 9994), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not config.cxx)'], {'reason': '"""G++ not available, so we need to skip this test."""'}), "(not config.cxx, reason=\n 'G++ not available, so we need to skip this test.')\n", (9914, 9994), False, 'import pytest\n'), ((2069, 2081), 'aesara.tensor.type.scalars', 'scalars', (['"""a"""'], {}), "('a')\n", (2076, 2081), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((2093, 2105), 'aesara.tensor.type.vectors', 'vectors', (['"""b"""'], {}), "('b')\n", (2100, 2105), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((4824, 4832), 'aesara.tensor.type.vector', 'vector', ([], {}), '()\n', (4830, 4832), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((5061, 5072), 'time.time', 'time.time', ([], {}), '()\n', (5070, 5072), False, 'import time\n'), ((5097, 5108), 'time.time', 'time.time', ([], {}), '()\n', (5106, 5108), False, 'import time\n'), ((5135, 5146), 'time.time', 'time.time', ([], {}), '()\n', (5144, 5146), False, 'import time\n'), ((5171, 5182), 'time.time', 'time.time', ([], {}), '()\n', (5180, 5182), False, 'import time\n'), ((5449, 5464), 'aesara.tensor.type.scalar', 'scalar', (['"""input"""'], {}), "('input')\n", (5455, 5464), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((6085, 6100), 'aesara.tensor.type.scalar', 'scalar', (['"""input"""'], {}), "('input')\n", (6091, 6100), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((6435, 6451), 'aesara.tensor.type.lscalar', 'lscalar', (['"""input"""'], {}), "('input')\n", (6442, 6451), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((6518, 6553), 'aesara.compile.mode.Mode', 'Mode', ([], {'optimizer': 'None', 'linker': 'linker'}), '(optimizer=None, linker=linker)\n', (6522, 6553), False, 'from aesara.compile.mode import Mode, get_mode\n'), ((6563, 6622), 'aesara.compile.function.function', 'function', (['[x]', '[x, x + 34]'], {'updates': '[(y, x + 1)]', 'mode': 'mode'}), '([x], [x, x + 34], updates=[(y, x + 1)], mode=mode)\n', (6571, 6622), False, 'from aesara.compile.function import function\n'), ((6670, 6725), 'aesara.compile.function.function', 'function', (['[x]', '[x - 6]'], {'updates': '[(y, y + 3)]', 'mode': 'mode'}), '([x], [x - 6], updates=[(y, y + 3)], mode=mode)\n', (6678, 6725), False, 'from aesara.compile.function import function\n'), ((7081, 7089), 'aesara.tensor.type.vector', 'vector', ([], {}), '()\n', (7087, 7089), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((7098, 7129), 'aesara.compile.function.function', 'function', (['[v]', '(v + 1)'], {'mode': 'mode'}), '([v], v + 1, mode=mode)\n', (7106, 7129), False, 'from aesara.compile.function import function\n'), ((7846, 7854), 'aesara.tensor.type.vector', 'vector', ([], {}), '()\n', (7852, 7854), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((8049, 8084), 'aesara.compile.function.function', 'function', (['[x]', '[pp + pp]'], {'mode': 'mode'}), '([x], [pp + pp], mode=mode)\n', (8057, 8084), False, 'from aesara.compile.function import function\n'), ((8137, 8148), 'aesara.tensor.type.scalar', 'scalar', (['"""x"""'], {}), "('x')\n", (8143, 8148), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((8157, 8168), 'aesara.tensor.type.scalar', 'scalar', (['"""y"""'], {}), "('y')\n", (8163, 8168), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((9394, 9402), 'aesara.tensor.type.vector', 'vector', ([], {}), '()\n', (9400, 9402), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((10149, 10157), 'aesara.tensor.type.scalar', 'scalar', ([], {}), '()\n', (10155, 10157), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((10171, 10211), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'allow_gc': '(False)', 'use_cloop': '(True)'}), '(allow_gc=False, use_cloop=True)\n', (10179, 10211), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((11335, 11343), 'aesara.tensor.type.scalar', 'scalar', ([], {}), '()\n', (11341, 11343), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((11357, 11397), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'allow_gc': '(False)', 'use_cloop': '(True)'}), '(allow_gc=False, use_cloop=True)\n', (11365, 11397), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((11670, 11678), 'aesara.tensor.type.scalar', 'scalar', ([], {}), '()\n', (11676, 11678), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((11875, 11883), 'aesara.tensor.type.scalar', 'scalar', ([], {}), '()\n', (11881, 11883), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((12045, 12085), 'aesara.link.utils.map_storage', 'map_storage', (['fg', 'nodes', 'None', 'None', 'None'], {}), '(fg, nodes, None, None, None)\n', (12056, 12085), False, 'from aesara.link.utils import map_storage\n'), ((12621, 12632), 'aesara.tensor.type.scalar', 'scalar', (['"""a"""'], {}), "('a')\n", (12627, 12632), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((12663, 12712), 'aesara.graph.fg.FunctionGraph', 'FunctionGraph', ([], {'outputs': '[a, a_plus_1]', 'clone': '(False)'}), '(outputs=[a, a_plus_1], clone=False)\n', (12676, 12712), False, 'from aesara.graph.fg import FunctionGraph\n'), ((12789, 12829), 'aesara.link.utils.map_storage', 'map_storage', (['fg', 'nodes', 'None', 'None', 'None'], {}), '(fg, nodes, None, None, None)\n', (12800, 12829), False, 'from aesara.link.utils import map_storage\n'), ((13101, 13198), 'aesara.link.vm.Loop', 'Loop', (['fg', 'fg.apply_nodes', 'thunks', '[]', 'storage_map', 'input_storage', 'output_storage', 'update_vars'], {}), '(fg, fg.apply_nodes, thunks, [], storage_map, input_storage,\n output_storage, update_vars)\n', (13105, 13198), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((13291, 13325), 'numpy.array', 'np.array', (['(1.0)'], {'dtype': 'config.floatX'}), '(1.0, dtype=config.floatX)\n', (13299, 13325), True, 'import numpy as np\n'), ((13480, 13491), 'aesara.tensor.type.scalar', 'scalar', (['"""a"""'], {}), "('a')\n", (13486, 13491), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((13522, 13571), 'aesara.graph.fg.FunctionGraph', 'FunctionGraph', ([], {'outputs': '[a, a_plus_1]', 'clone': '(False)'}), '(outputs=[a, a_plus_1], clone=False)\n', (13535, 13571), False, 'from aesara.graph.fg import FunctionGraph\n'), ((13648, 13688), 'aesara.link.utils.map_storage', 'map_storage', (['fg', 'nodes', 'None', 'None', 'None'], {}), '(fg, nodes, None, None, None)\n', (13659, 13688), False, 'from aesara.link.utils import map_storage\n'), ((13961, 14079), 'aesara.link.vm.Stack', 'Stack', (['fg', 'fg.apply_nodes', 'thunks', '[]', 'storage_map', 'input_storage', 'output_storage', 'update_vars', 'compute_map', '(False)'], {}), '(fg, fg.apply_nodes, thunks, [], storage_map, input_storage,\n output_storage, update_vars, compute_map, False)\n', (13966, 14079), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((14188, 14222), 'numpy.array', 'np.array', (['(1.0)'], {'dtype': 'config.floatX'}), '(1.0, dtype=config.floatX)\n', (14196, 14222), True, 'import numpy as np\n'), ((1339, 1353), 'aesara.tensor.type.scalars', 'scalars', (['"""abc"""'], {}), "('abc')\n", (1346, 1353), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((1777, 1791), 'aesara.tensor.type.scalars', 'scalars', (['"""abc"""'], {}), "('abc')\n", (1784, 1791), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((3105, 3148), 'numpy.asarray', 'np.asarray', (['[2.0, 3.0]'], {'dtype': 'config.floatX'}), '([2.0, 3.0], dtype=config.floatX)\n', (3115, 3148), True, 'import numpy as np\n'), ((3197, 3208), 'time.time', 'time.time', ([], {}), '()\n', (3206, 3208), False, 'import time\n'), ((3264, 3275), 'time.time', 'time.time', ([], {}), '()\n', (3273, 3275), False, 'import time\n'), ((3289, 3300), 'time.time', 'time.time', ([], {}), '()\n', (3298, 3300), False, 'import time\n'), ((3356, 3367), 'time.time', 'time.time', ([], {}), '()\n', (3365, 3367), False, 'import time\n'), ((3584, 3592), 'aesara.tensor.type.vector', 'vector', ([], {}), '()\n', (3590, 3592), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((3854, 3865), 'time.time', 'time.time', ([], {}), '()\n', (3863, 3865), False, 'import time\n'), ((3903, 3914), 'time.time', 'time.time', ([], {}), '()\n', (3912, 3914), False, 'import time\n'), ((3954, 3965), 'time.time', 'time.time', ([], {}), '()\n', (3963, 3965), False, 'import time\n'), ((4003, 4014), 'time.time', 'time.time', ([], {}), '()\n', (4012, 4014), False, 'import time\n'), ((4468, 4478), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {}), '()\n', (4476, 4478), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((4488, 4512), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'allow_gc': '(False)'}), '(allow_gc=False)\n', (4496, 4512), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((4522, 4562), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'allow_gc': '(False)', 'use_cloop': '(True)'}), '(allow_gc=False, use_cloop=True)\n', (4530, 4562), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((5877, 5919), 'numpy.array', 'np.array', (['[32.0, 16.0, 1.7857142857142858]'], {}), '([32.0, 16.0, 1.7857142857142858])\n', (5885, 5919), True, 'import numpy as np\n'), ((5344, 5394), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'allow_partial_eval': '(True)', 'use_cloop': '(False)'}), '(allow_partial_eval=True, use_cloop=False)\n', (5352, 5394), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((5964, 6014), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'allow_partial_eval': '(True)', 'use_cloop': '(False)'}), '(allow_partial_eval=True, use_cloop=False)\n', (5972, 6014), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((6467, 6489), 'numpy.asarray', 'np.asarray', (['(1)', '"""int64"""'], {}), "(1, 'int64')\n", (6477, 6489), True, 'import numpy as np\n'), ((6318, 6368), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'allow_partial_eval': '(True)', 'use_cloop': '(False)'}), '(allow_partial_eval=True, use_cloop=False)\n', (6326, 6368), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((8177, 8192), 'aesara.tensor.math.tanh', 'tanh', (['(3 * x + y)'], {}), '(3 * x + y)\n', (8181, 8192), False, 'from aesara.tensor.math import cosh, tanh\n'), ((8195, 8210), 'aesara.tensor.math.cosh', 'cosh', (['(x + 5 * y)'], {}), '(x + 5 * y)\n', (8199, 8210), False, 'from aesara.tensor.math import cosh, tanh\n'), ((8318, 8371), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'allow_gc': '(False)', 'lazy': '(False)', 'use_cloop': '(False)'}), '(allow_gc=False, lazy=False, use_cloop=False)\n', (8326, 8371), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((8381, 8433), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'allow_gc': '(True)', 'lazy': '(False)', 'use_cloop': '(False)'}), '(allow_gc=True, lazy=False, use_cloop=False)\n', (8389, 8433), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((8542, 8596), 'aesara.compile.function.function', 'function', (['[x, y]', 'z'], {'name': '"""test_reduce_memory"""', 'mode': 'm'}), "([x, y], z, name='test_reduce_memory', mode=m)\n", (8550, 8596), False, 'from aesara.compile.function import function\n'), ((9428, 9452), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'use_cloop': '(True)'}), '(use_cloop=True)\n', (9436, 9452), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((9462, 9498), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'use_cloop': '(False)', 'lazy': '(True)'}), '(use_cloop=False, lazy=True)\n', (9470, 9498), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((9508, 9560), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'use_cloop': '(False)', 'lazy': '(False)', 'allow_gc': '(True)'}), '(use_cloop=False, lazy=False, allow_gc=True)\n', (9516, 9560), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((9570, 9623), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'use_cloop': '(False)', 'lazy': '(False)', 'allow_gc': '(False)'}), '(use_cloop=False, lazy=False, allow_gc=False)\n', (9578, 9623), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((9648, 9690), 'aesara.compile.mode.Mode', 'Mode', ([], {'optimizer': '"""fast_compile"""', 'linker': 'lnk'}), "(optimizer='fast_compile', linker=lnk)\n", (9652, 9690), False, 'from aesara.compile.mode import Mode, get_mode\n'), ((9703, 9734), 'aesara.compile.function.function', 'function', (['[x]', '(x + 1)'], {'mode': 'mode'}), '([x], x + 1, mode=mode)\n', (9711, 9734), False, 'from aesara.compile.function import function\n'), ((9748, 9785), 'aesara.compile.function.function', 'function', (['[x]', '((x + 1) * 2)'], {'mode': 'mode'}), '([x], (x + 1) * 2, mode=mode)\n', (9756, 9785), False, 'from aesara.compile.function import function\n'), ((10430, 10457), 'aesara.configdefaults.config.change_flags', 'config.change_flags', ([], {'cxx': '""""""'}), "(cxx='')\n", (10449, 10457), False, 'from aesara.configdefaults import config\n'), ((11428, 11498), 'pytest.raises', 'pytest.raises', (['Exception'], {'match': '""".*Apply node that caused the error.*"""'}), "(Exception, match='.*Apply node that caused the error.*')\n", (11441, 11498), False, 'import pytest\n'), ((11735, 11792), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""`nodes` and `thunks`.*"""'}), "(ValueError, match='`nodes` and `thunks`.*')\n", (11748, 11792), False, 'import pytest\n'), ((12290, 12367), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""`nodes`, `thunks` and `post_thunk_clear`.*"""'}), "(ValueError, match='`nodes`, `thunks` and `post_thunk_clear`.*')\n", (12303, 12367), False, 'import pytest\n'), ((12377, 12469), 'aesara.link.vm.Loop', 'Loop', (['fg', 'fg.apply_nodes', 'thunks', '[]', 'storage_map', 'input_storage', 'output_storage', '{}', '[]'], {}), '(fg, fg.apply_nodes, thunks, [], storage_map, input_storage,\n output_storage, {}, [])\n', (12381, 12469), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((13429, 13442), 'numpy.array', 'np.array', (['(2.0)'], {}), '(2.0)\n', (13437, 13442), True, 'import numpy as np\n'), ((14327, 14340), 'numpy.array', 'np.array', (['(2.0)'], {}), '(2.0)\n', (14335, 14340), True, 'import numpy as np\n'), ((1849, 1872), 'aesara.ifelse.ifelse', 'ifelse', (['a', '(2 * b)', '(2 * c)'], {}), '(a, 2 * b, 2 * c)\n', (1855, 1872), False, 'from aesara.ifelse import ifelse\n'), ((2115, 2128), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (2123, 2128), True, 'import numpy as np\n'), ((2159, 2174), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (2167, 2174), True, 'import numpy as np\n'), ((4262, 4286), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'allow_gc': '(False)'}), '(allow_gc=False)\n', (4270, 4286), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((4739, 4762), 'aesara.ifelse.ifelse', 'ifelse', (['(z[0] > 0)', '(-z)', 'z'], {}), '(z[0] > 0, -z, z)\n', (4745, 4762), False, 'from aesara.ifelse import ifelse\n'), ((4930, 4965), 'aesara.compile.mode.Mode', 'Mode', ([], {'optimizer': 'None', 'linker': 'linker'}), '(optimizer=None, linker=linker)\n', (4934, 4965), False, 'from aesara.compile.mode import Mode, get_mode\n'), ((4999, 5034), 'aesara.compile.mode.Mode', 'Mode', ([], {'optimizer': 'None', 'linker': 'linker'}), '(optimizer=None, linker=linker)\n', (5003, 5034), False, 'from aesara.compile.mode import Mode, get_mode\n'), ((5540, 5575), 'aesara.compile.mode.Mode', 'Mode', ([], {'optimizer': 'None', 'linker': 'linker'}), '(optimizer=None, linker=linker)\n', (5544, 5575), False, 'from aesara.compile.mode import Mode, get_mode\n'), ((6177, 6212), 'aesara.compile.mode.Mode', 'Mode', ([], {'optimizer': 'None', 'linker': 'linker'}), '(optimizer=None, linker=linker)\n', (6181, 6212), False, 'from aesara.compile.mode import Mode, get_mode\n'), ((7899, 7918), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'lazy': '(True)'}), '(lazy=True)\n', (7907, 7918), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((7938, 7957), 'aesara.compile.io.In', 'In', (['x'], {'mutable': '(True)'}), '(x, mutable=True)\n', (7940, 7957), False, 'from aesara.compile.io import In\n'), ((8463, 8482), 'aesara.compile.mode.Mode', 'Mode', ([], {'linker': 'linker'}), '(linker=linker)\n', (8467, 8482), False, 'from aesara.compile.mode import Mode, get_mode\n'), ((10243, 10278), 'aesara.compile.mode.Mode', 'Mode', ([], {'optimizer': 'None', 'linker': 'linker'}), '(optimizer=None, linker=linker)\n', (10247, 10278), False, 'from aesara.compile.mode import Mode, get_mode\n'), ((10516, 10541), 'pytest.raises', 'pytest.raises', (['MissingGXX'], {}), '(MissingGXX)\n', (10529, 10541), False, 'import pytest\n'), ((10593, 10618), 'importlib.reload', 'reload', (['aesara.link.c.cvm'], {}), '(aesara.link.c.cvm)\n', (10599, 10618), False, 'from importlib import reload\n'), ((10682, 10736), 'unittest.mock.patch.dict', 'patch.dict', (['"""sys.modules"""', "{'aesara.link.c.cvm': None}"], {}), "('sys.modules', {'aesara.link.c.cvm': None})\n", (10692, 10736), False, 'from unittest.mock import patch\n'), ((10754, 10762), 'aesara.tensor.type.scalar', 'scalar', ([], {}), '()\n', (10760, 10762), False, 'from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors\n'), ((10784, 10824), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'allow_gc': '(False)', 'use_cloop': '(True)'}), '(allow_gc=False, use_cloop=True)\n', (10792, 10824), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((13367, 13380), 'numpy.array', 'np.array', (['(1.0)'], {}), '(1.0)\n', (13375, 13380), True, 'import numpy as np\n'), ((13382, 13395), 'numpy.array', 'np.array', (['(2.0)'], {}), '(2.0)\n', (13390, 13395), True, 'import numpy as np\n'), ((14265, 14278), 'numpy.array', 'np.array', (['(1.0)'], {}), '(1.0)\n', (14273, 14278), True, 'import numpy as np\n'), ((14280, 14293), 'numpy.array', 'np.array', (['(2.0)'], {}), '(2.0)\n', (14288, 14293), True, 'import numpy as np\n'), ((4353, 4393), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'allow_gc': '(False)', 'use_cloop': '(True)'}), '(allow_gc=False, use_cloop=True)\n', (4361, 4393), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((10843, 10877), 'pytest.raises', 'pytest.raises', (['ModuleNotFoundError'], {}), '(ModuleNotFoundError)\n', (10856, 10877), False, 'import pytest\n'), ((11530, 11565), 'aesara.compile.mode.Mode', 'Mode', ([], {'optimizer': 'None', 'linker': 'linker'}), '(optimizer=None, linker=linker)\n', (11534, 11565), False, 'from aesara.compile.mode import Mode, get_mode\n'), ((10959, 10994), 'aesara.compile.mode.Mode', 'Mode', ([], {'optimizer': 'None', 'linker': 'linker'}), '(optimizer=None, linker=linker)\n', (10963, 10994), False, 'from aesara.compile.mode import Mode, get_mode\n'), ((1469, 1501), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'callback': 'self.callback'}), '(callback=self.callback)\n', (1477, 1501), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((1919, 1951), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'callback': 'self.callback'}), '(callback=self.callback)\n', (1927, 1951), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n'), ((2431, 2479), 'aesara.link.vm.VMLinker', 'VMLinker', ([], {'c_thunks': 'use_c_thunks', 'use_cloop': '(False)'}), '(c_thunks=use_c_thunks, use_cloop=False)\n', (2439, 2479), False, 'from aesara.link.vm import VM, Loop, Stack, VMLinker\n')] |
# https://github.com/akirbaes/Pixel-Font/blob/master/font_to_data.py
import io
from PIL import Image
from os import path
import json
import numpy as np
""" This file contains function to create font usable by font_manager.py
INPUT: filename of the characters list
there must exist a FILENAME.png image and a FILENAME.txt file containing the characters represented
(pad with spaces if a line doesn't go until the end)
OUTPUT: a FILENAME.json with the character positions """
def generate_data(fontimage_file):
AUTOALIGN_LEFTMOST = True # Otherwise, will align based on where you put the character in the rectangle
if "aligned" in fontimage_file:
AUTOALIGN_LEFTMOST = False
fontimage = Image.open(fontimage_file).convert("RGB")
image_width, image_height = fontimage.size
# INPUT: the characters that appear in the font, in order
filename = fontimage_file[:-4] + ".txt"
with io.open(filename, "r", encoding="utf8") as f:
allchars = f.read()
# print(allchars)
# allchars = "ABCDEFGHIJKLM\nNOPQRSTUVWXYZ"
allchars = allchars.strip("\n")
charlines = allchars.split("\n")
chars_vertical_count = len(charlines)
y_sep = image_height // chars_vertical_count
# determine background color by majority
def majority_color(image):
image_width, image_height = image.size
colors_counter = dict()
for x in range(image_width):
for y in range(image_height):
pixel = image.getpixel((x, y))
colors_counter[pixel] = colors_counter.get(pixel, -1) + 1
maxcount = 0
majority_color = (0, 0, 0)
for color in colors_counter:
if colors_counter[color] > maxcount:
majority_color = color
maxcount = colors_counter[majority_color]
return majority_color
#
background_color = majority_color(fontimage)
print(fontimage_file, background_color)
# background_color = (0,0,0) #if doesn't work
# print("background_color=",background_color)
char_pos_size = dict()
char_pos_size["background"] = background_color
# Determine the boundaries of the character
char_horizontal_count = 0
for line in charlines:
char_horizontal_count = max(char_horizontal_count, len(line))
x_sep = image_width // char_horizontal_count
print("Image of ", image_width, "x", image_height)
print("Characters grid:", char_horizontal_count, "x", chars_vertical_count)
print("Characters box:", x_sep, "x", y_sep)
for j, line in enumerate(charlines):
for i, character in enumerate(line):
charx = i * x_sep
chary = j * y_sep
x0 = x_sep
y0 = y_sep
x1 = 0
y1 = 0
for dy in range(y_sep):
for dx in range(x_sep):
pixel = fontimage.getpixel((charx + dx, chary + dy))
if pixel != background_color:
x0 = min(dx, x0)
y0 = min(dy, y0)
y1 = max(dy, y1)
if ("µ" in fontimage_file) and not (
pixel[1] == 0 and pixel[2] == 0 and pixel[0] > 1
):
dx += 1 # If subpixels, pixels other than red are bigger on the right
x1 = max(dx, x1)
# print(character,charx,chary,x1>x0 and y1>y0)
if x1 >= x0 and y1 >= y0:
char_pos_size[character] = [charx, chary, x0, y0, x1, y1]
# Character fits into this box as [X+X0,Y+Y0,X+X1,Y+Y1]
# charx, chary show the position compared to other characters
font_width = 0
font_height = 0
origin_x = float("inf") # top-left of the box regardless of char size
origin_y = float("inf")
# end_x = 0
# end_y = 0 #Bottom right of the box regardless of char size
for key in char_pos_size:
if len(key) == 1:
cx, cy, x0, y0, x1, y1 = char_pos_size[key]
font_width = max(font_width, x1 - x0 + 1)
font_height = max(font_height, y1 - y0 + 1)
origin_x = min(origin_x, x0)
origin_y = min(origin_y, y0)
# shift everything by origin for neat cut
# hence, x0 and y0 should be 0 most of the time
# except for things like underscore
# so we just include the shift in the size and drop x0,y0
# Because we only really needed one height, and all the widths for our purposes
for key in char_pos_size:
if len(key) == 1:
cx, cy, x0, y0, x1, y1 = char_pos_size[key]
if "mono" in fontimage_file:
# If mono, all characters must have the same width
char_pos_size[key] = (
cx + origin_x,
cy + origin_y,
font_width,
y1 - origin_y + 1,
)
else:
if AUTOALIGN_LEFTMOST:
char_pos_size[key] = (
cx + x0,
cy + origin_y,
x1 - x0 + 1,
y1 - origin_y + 1,
)
else:
char_pos_size[key] = (
cx + origin_x,
cy + origin_y,
x1 - origin_x + 1,
y1 - origin_y + 1,
)
for key in char_pos_size:
if len(key) == 1:
x0, y0, x1, y1 = char_pos_size[key]
font_width = max(font_width, x1 - x0 + 1)
font_height = max(font_height, y1 - y0 + 1)
char_pos_size["width"] = font_width
char_pos_size["height"] = font_height
jsonform = json.dumps(char_pos_size, indent=4, separators=(",", ": "))
# print(repr(jsonform))
with io.open(fontimage_file[:-4] + ".json", "w", encoding="utf8") as f:
f.write(jsonform)
# with io.open(fontimage_file[:-4] + ".json", "r", encoding="utf8") as f:
# newdict = json.load(f)
char_pos_size["background"] = (64, 64, 64)
return char_pos_size
def create_font_template(char_height, char_width, nb_line, nb_col=1):
"""create a grid for a font in the dimensions given"""
color1 = (36, 181, 254)
color2 = (18, 92, 199)
res_array = np.empty((0, char_height * (nb_line + 1), 3), dtype=np.uint8)
for j in range(nb_col):
row_array = np.zeros((char_width, char_height, 3), dtype=np.uint8)
if j % 2 == 0:
row_array[:, :] = color1
else:
row_array[:, :] = color2
for i in range(nb_line):
array_to_add = np.zeros((char_width, char_height, 3), dtype=np.uint8)
if (j + i) % 2 == 0:
array_to_add[:, :] = color2
else:
array_to_add[:, :] = color1
row_array = np.concatenate((row_array, array_to_add), axis=1)
res_array = np.concatenate((res_array, row_array), axis=0)
return Image.fromarray(res_array)
if __name__ == "__main__":
create_font_template(5, 7, 10, 10).save("temp.png")
font_name = "typewriter"
basepath = path.dirname(__file__)
fonts_folder = path.abspath(
path.join(basepath, "..", "..", "..", "resources", "fonts", font_name)
)
font_img_file = path.join(fonts_folder, font_name + ".png")
print(font_img_file)
generate_data(font_img_file)
img = Image.open(font_img_file).convert("RGB")
img.save(font_img_file)
print("done!")
| [
"numpy.empty",
"os.path.dirname",
"numpy.zeros",
"json.dumps",
"PIL.Image.open",
"io.open",
"PIL.Image.fromarray",
"os.path.join",
"numpy.concatenate"
] | [((5786, 5845), 'json.dumps', 'json.dumps', (['char_pos_size'], {'indent': '(4)', 'separators': "(',', ': ')"}), "(char_pos_size, indent=4, separators=(',', ': '))\n", (5796, 5845), False, 'import json\n'), ((6362, 6423), 'numpy.empty', 'np.empty', (['(0, char_height * (nb_line + 1), 3)'], {'dtype': 'np.uint8'}), '((0, char_height * (nb_line + 1), 3), dtype=np.uint8)\n', (6370, 6423), True, 'import numpy as np\n'), ((7045, 7071), 'PIL.Image.fromarray', 'Image.fromarray', (['res_array'], {}), '(res_array)\n', (7060, 7071), False, 'from PIL import Image\n'), ((7203, 7225), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (7215, 7225), False, 'from os import path\n'), ((7364, 7407), 'os.path.join', 'path.join', (['fonts_folder', "(font_name + '.png')"], {}), "(fonts_folder, font_name + '.png')\n", (7373, 7407), False, 'from os import path\n'), ((915, 954), 'io.open', 'io.open', (['filename', '"""r"""'], {'encoding': '"""utf8"""'}), "(filename, 'r', encoding='utf8')\n", (922, 954), False, 'import io\n'), ((5883, 5943), 'io.open', 'io.open', (["(fontimage_file[:-4] + '.json')", '"""w"""'], {'encoding': '"""utf8"""'}), "(fontimage_file[:-4] + '.json', 'w', encoding='utf8')\n", (5890, 5943), False, 'import io\n'), ((6472, 6526), 'numpy.zeros', 'np.zeros', (['(char_width, char_height, 3)'], {'dtype': 'np.uint8'}), '((char_width, char_height, 3), dtype=np.uint8)\n', (6480, 6526), True, 'import numpy as np\n'), ((6987, 7033), 'numpy.concatenate', 'np.concatenate', (['(res_array, row_array)'], {'axis': '(0)'}), '((res_array, row_array), axis=0)\n', (7001, 7033), True, 'import numpy as np\n'), ((7267, 7337), 'os.path.join', 'path.join', (['basepath', '""".."""', '""".."""', '""".."""', '"""resources"""', '"""fonts"""', 'font_name'], {}), "(basepath, '..', '..', '..', 'resources', 'fonts', font_name)\n", (7276, 7337), False, 'from os import path\n'), ((710, 736), 'PIL.Image.open', 'Image.open', (['fontimage_file'], {}), '(fontimage_file)\n', (720, 736), False, 'from PIL import Image\n'), ((6699, 6753), 'numpy.zeros', 'np.zeros', (['(char_width, char_height, 3)'], {'dtype': 'np.uint8'}), '((char_width, char_height, 3), dtype=np.uint8)\n', (6707, 6753), True, 'import numpy as np\n'), ((6917, 6966), 'numpy.concatenate', 'np.concatenate', (['(row_array, array_to_add)'], {'axis': '(1)'}), '((row_array, array_to_add), axis=1)\n', (6931, 6966), True, 'import numpy as np\n'), ((7477, 7502), 'PIL.Image.open', 'Image.open', (['font_img_file'], {}), '(font_img_file)\n', (7487, 7502), False, 'from PIL import Image\n')] |
"""
Generates instances for unit tests.
"""
import numpy as np
from itertools import product
import os.path
from abcvoting import generate
from operator import itemgetter
from abcvoting import abcrules
from abcvoting import fileio
def generate_abc_yaml_testinstances(
batchname,
committeesizes,
num_voters_values,
num_cand_values,
prob_distributions,
av_neq_pav=False,
):
generate.rng = np.random.default_rng(24121838) # seed for numpy RNG
parameter_tuples = []
for committeesize, num_voters, num_cand, prob_distribution in product(
committeesizes, num_voters_values, num_cand_values, prob_distributions
):
if committeesize >= num_cand:
continue
parameter_tuples.append((num_voters, num_cand, prob_distribution, committeesize))
parameter_tuples.sort(key=itemgetter(1))
print(f"Generating {len(parameter_tuples)} instances for batch {batchname}...")
num_instances = 0
for index, (num_voters, num_cand, prob_distribution, committeesize) in enumerate(
parameter_tuples
):
num_instances += 1
# write instance to .abc.yaml file
currdir = os.path.dirname(os.path.abspath(__file__))
filename = currdir + f"/instance{batchname}{index:04d}.abc.yaml"
print(f"generating {filename} ({prob_distribution})")
while True:
profile = generate.random_profile(num_voters, num_cand, prob_distribution)
committees_av = abcrules.compute("av", profile, committeesize, resolute=False)
committees_pav = abcrules.compute("pav", profile, committeesize, resolute=False)
if not av_neq_pav:
break
intersection = set(tuple(sorted(committee)) for committee in committees_pav) & set(
tuple(sorted(committee)) for committee in committees_av
)
if not intersection:
break
rule_instances = []
for rule_id in abcrules.MAIN_RULE_IDS:
rule = abcrules.Rule(rule_id)
# if irresolute (resolute = False) is supported, then "result" should be
# the list of committees returned for resolute=False.
if False in rule.resolute_values:
resolute = False
else:
resolute = True
if rule_id == "rsd":
committees = None # result is random, not sensible for unit tests
elif rule_id == "leximaxphragmen" and (num_cand > 7 or num_voters > 8):
committees = None # too slow
else:
committees = abcrules.compute(rule_id, profile, committeesize, resolute=resolute)
for resolute in rule.resolute_values:
rule_instances.append(
{"rule_id": rule_id, "resolute": resolute, "result": committees}
)
fileio.write_abcvoting_instance_to_yaml_file(
filename,
profile,
committeesize=committeesize,
description=(
f"profile generated via prob_distribution={prob_distribution}, "
f"num_voters={num_voters}, "
f"num_cand={num_cand}"
),
compute_instances=rule_instances,
)
print("Done.")
if __name__ == "__main__":
generate_abc_yaml_testinstances(
batchname="S",
committeesizes=[3, 4],
num_voters_values=[8, 9],
num_cand_values=[6],
prob_distributions=[{"id": "IC", "p": 0.5}],
av_neq_pav=True,
)
generate_abc_yaml_testinstances(
batchname="M",
committeesizes=[3, 4],
num_voters_values=[8, 9, 10, 11],
num_cand_values=[6, 7],
prob_distributions=[
{"id": "IC fixed-size", "setsize": 2},
{"id": "IC fixed-size", "setsize": 3},
],
av_neq_pav=True,
)
generate_abc_yaml_testinstances(
batchname="L",
committeesizes=[3, 4, 5, 6],
num_voters_values=[8, 12, 15],
num_cand_values=[6, 8, 9],
prob_distributions=[
{"id": "IC fixed-size", "setsize": 2},
{"id": "IC fixed-size", "setsize": 3},
{"id": "Truncated Mallows", "setsize": 2, "dispersion": 0.2},
{"id": "Truncated Mallows", "setsize": 3, "dispersion": 0.2},
{"id": "Truncated Mallows", "setsize": 4, "dispersion": 0.2},
{"id": "Truncated Mallows", "setsize": 2, "dispersion": 0.5},
{"id": "Truncated Mallows", "setsize": 3, "dispersion": 0.5},
{"id": "Truncated Mallows", "setsize": 4, "dispersion": 0.5},
{"id": "Truncated Mallows", "setsize": 3, "dispersion": 0.8},
{"id": "Truncated Mallows", "setsize": 4, "dispersion": 0.8},
{"id": "Urn fixed-size", "setsize": 2, "replace": 0.5},
{"id": "Urn fixed-size", "setsize": 3, "replace": 0.5},
],
av_neq_pav=False,
)
generate_abc_yaml_testinstances(
batchname="VL",
committeesizes=[6],
num_voters_values=[24, 25],
num_cand_values=[8, 9],
prob_distributions=[
{"id": "IC", "p": 0.3},
{"id": "IC", "p": 0.4},
{"id": "IC", "p": 0.5},
{"id": "Truncated Mallows", "setsize": 2, "dispersion": 0.5},
{"id": "Truncated Mallows", "setsize": 3, "dispersion": 0.5},
{"id": "Truncated Mallows", "setsize": 5, "dispersion": 0.5},
{"id": "Truncated Mallows", "setsize": 2, "dispersion": 0.8},
{"id": "Truncated Mallows", "setsize": 3, "dispersion": 0.8},
{"id": "Truncated Mallows", "setsize": 5, "dispersion": 0.8},
{"id": "Urn fixed-size", "setsize": 2, "replace": 0.5},
{"id": "Urn fixed-size", "setsize": 3, "replace": 0.5},
{"id": "Urn fixed-size", "setsize": 5, "replace": 0.5},
],
av_neq_pav=False,
)
| [
"abcvoting.abcrules.Rule",
"abcvoting.abcrules.compute",
"numpy.random.default_rng",
"abcvoting.fileio.write_abcvoting_instance_to_yaml_file",
"itertools.product",
"operator.itemgetter",
"abcvoting.generate.random_profile"
] | [((418, 449), 'numpy.random.default_rng', 'np.random.default_rng', (['(24121838)'], {}), '(24121838)\n', (439, 449), True, 'import numpy as np\n'), ((565, 644), 'itertools.product', 'product', (['committeesizes', 'num_voters_values', 'num_cand_values', 'prob_distributions'], {}), '(committeesizes, num_voters_values, num_cand_values, prob_distributions)\n', (572, 644), False, 'from itertools import product\n'), ((2893, 3155), 'abcvoting.fileio.write_abcvoting_instance_to_yaml_file', 'fileio.write_abcvoting_instance_to_yaml_file', (['filename', 'profile'], {'committeesize': 'committeesize', 'description': 'f"""profile generated via prob_distribution={prob_distribution}, num_voters={num_voters}, num_cand={num_cand}"""', 'compute_instances': 'rule_instances'}), "(filename, profile,\n committeesize=committeesize, description=\n f'profile generated via prob_distribution={prob_distribution}, num_voters={num_voters}, num_cand={num_cand}'\n , compute_instances=rule_instances)\n", (2937, 3155), False, 'from abcvoting import fileio\n'), ((839, 852), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (849, 852), False, 'from operator import itemgetter\n'), ((1390, 1454), 'abcvoting.generate.random_profile', 'generate.random_profile', (['num_voters', 'num_cand', 'prob_distribution'], {}), '(num_voters, num_cand, prob_distribution)\n', (1413, 1454), False, 'from abcvoting import generate\n'), ((1483, 1545), 'abcvoting.abcrules.compute', 'abcrules.compute', (['"""av"""', 'profile', 'committeesize'], {'resolute': '(False)'}), "('av', profile, committeesize, resolute=False)\n", (1499, 1545), False, 'from abcvoting import abcrules\n'), ((1575, 1638), 'abcvoting.abcrules.compute', 'abcrules.compute', (['"""pav"""', 'profile', 'committeesize'], {'resolute': '(False)'}), "('pav', profile, committeesize, resolute=False)\n", (1591, 1638), False, 'from abcvoting import abcrules\n'), ((2024, 2046), 'abcvoting.abcrules.Rule', 'abcrules.Rule', (['rule_id'], {}), '(rule_id)\n', (2037, 2046), False, 'from abcvoting import abcrules\n'), ((2622, 2690), 'abcvoting.abcrules.compute', 'abcrules.compute', (['rule_id', 'profile', 'committeesize'], {'resolute': 'resolute'}), '(rule_id, profile, committeesize, resolute=resolute)\n', (2638, 2690), False, 'from abcvoting import abcrules\n')] |
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from .petroequations import vshale_gr, vshale_dn, phi_rho, phie, sw, perm, flow_capacity, phia, sw_pnn
def petrophysics(logs,dfrom,dto,
vshale_gr_kw=None,
vshale_dn_kw=None,
phi_rho_kw=None, #[DenM,DenF,DenColumn,RhoPhiColumn]
phie_kw=None, #[RhophiColumn,NtrColumn,VshColumn,[ListMethod],[listNames]]
sw_kw=None, #[a,m,n,Rw,phicolumn,rtcolumn,Vshcolumn,Rsh=4.0,alpha=0.3,[ListMethod],[listNames]]
perm_kw=None, #[phiecolumn,swcolum,autor,fluid,[ListMethod],[listNames]]
flag_kw=None, #[phicolumn,phicut,vshcol,vshcut,swcol,swcut,kcol,kcut,paycol]
kh_kw=None, #[h,kcol,paycol,khcol]
sw_pnn_kw = None,
return_partial = False
):
"""petrophysics [summary]
Parameters
----------
logs : [type]
[description]
dfrom : [type]
[description]
dto : [type]
[description]
vshale_gr_kw : [type], optional
[description], by default None
vshale_dn_kw : [type], optional
[description], by default None
phi_rho_kw : [type], optional
[description], by default None
return_partial : bool, optional
[description], by default False
Returns
-------
[type]
[description]
"""
logf=logs[(logs.index >= dfrom) & (logs.index<=dto)].copy()
new_cols = []
if sw_pnn_kw is not None:
#name for the new columns
sw_col = sw_pnn_kw.pop('sw_col_name','sw_pnn')
#Name for input columns
phie_name = sw_pnn_kw.pop('phie_name','phie')
vsh_name = sw_pnn_kw.pop('vsh_name','vsh')
sigma_name = sw_pnn_kw.pop('sigma_name','sigma')
sighy = sw_pnn_kw.pop('sighy',20)
sigsh = sw_pnn_kw.pop('sigsh',35)
sigmam = sw_pnn_kw.pop('sigmam',None)
if isinstance(sigmam,str):
_sigmam = logf[sigmam]
elif isinstance(sigmam,(int,float,type(None))):
_sigmam = sigmam
sigw = sw_pnn_kw.pop('sigw',None)
ws = sw_pnn_kw.pop('ws',None)
logf[sw_col] = sw_pnn(logf[phie_name],logf[vsh_name], logf[sigma_name],sighy,sigsh,sigw=sigw, sigmam = _sigmam,ws=ws)
new_cols.append(sw_col)
if vshale_gr_kw is not None:
#name for the new columns
vsh_col_name = vshale_gr_kw.pop('vsh_col_name','vsh_gr')
vsh_type = vshale_gr_kw.pop('type','linear')
new_cols.append(vsh_col_name)
#Name for input columns
gr_col_name = vshale_gr_kw.pop('gr_name',None)
gr_sand = vshale_gr_kw.pop('gr_sand',None)
gr_shale = vshale_gr_kw.pop('gr_shale',None)
logf[vsh_col_name]=vshale_gr(logf[gr_col_name],gr_sand,gr_shale,type=vsh_type)
if vshale_dn_kw is not None:
#name for the new columns
vsh_col_name = vshale_dn_kw.pop('vsh_col_name','vsh_dn')
new_cols.append(vsh_col_name)
#Name for input columns
rho_col_name = vshale_dn_kw.pop('rho_name',None)
ntr_col_name = vshale_dn_kw.pop('ntr_name',None)
logf[vsh_col_name] = vshale_dn(logf[rho_col_name], logf[ntr_col_name], **vshale_dn_kw)
if phi_rho_kw is not None:
#name for the new columns
phi_rho_col_name = phi_rho_kw.pop('phi_rho_name','rho_phi')
new_cols.append(phi_rho_col_name)
#Name for input columns
rho_col_name = phi_rho_kw.pop('rho_name',None)
logf[phi_rho_col_name]=phi_rho(logf[rho_col_name], **phi_rho_kw)
if phie_kw is not None:
#name for the new columns
phie_avg_col_name = phie_kw.pop('phie_avg_col_name','phie_avg')
phie_rho_col_name = phie_kw.pop('phie_rho_col_name','phie_rho')
phie_ntr_col_name = phie_kw.pop('phie_ntr_col_name','phie_ntr')
phi_avg_col_name = phie_kw.pop('phi_avg_col_name','phia')
#Name for input columns
phi_rho_col_name = phie_kw.pop('phi_rho_name',None)
ntr_col_name = phie_kw.pop('ntr_name',None)
vsh_col_name = phie_kw.pop('vsh_name',None)
if (phi_rho_col_name is not None) & (ntr_col_name is not None):
logf[phi_avg_col_name] = phia(logf[phi_rho_col_name],logf[ntr_col_name], **phie_kw)
logf[phie_avg_col_name]=phie(logf[phi_avg_col_name],logf[vsh_col_name])
logf[phie_rho_col_name]=phie(logf[phi_rho_col_name],logf[vsh_col_name])
logf[phie_ntr_col_name]=phie(logf[ntr_col_name],logf[vsh_col_name])
new_cols.extend([phi_avg_col_name,phie_avg_col_name,phie_rho_col_name,phie_ntr_col_name])
elif phi_rho_col_name is not None:
logf[phie_rho_col_name]=phie(logf[phi_rho_col_name],logf[vsh_col_name])
new_cols.append(phie_rho_col_name)
elif ntr_col_name is not None:
logf[phie_ntr_col_name]=phie(logf[ntr_col_name],logf[vsh_col_name])
new_cols.append(phie_ntr_col_name)
if sw_kw is not None:
#Name for input columns
rt_col_name = sw_kw.pop('rt_name',None)
phi_col_name = sw_kw.pop('phi_name',None)
vsh_col_name = sw_kw.pop('vsh_name',None)
sw_kw['vsh_curve'] = logf[vsh_col_name] if vsh_col_name!=None else None
rw = sw_kw.pop('rw', None)
methods = sw_kw.pop('methods',['archie'])
#name for the new columns. List of curve names depending on the method
sw_cols_name = sw_kw.pop('sw_cols_name',methods)
for i,method in enumerate(methods):
logf['sw_' + sw_cols_name[i]] = sw(logf[rt_col_name],logf[phi_col_name],rw,method=method, **sw_kw)
new_cols.append(f'sw_{sw_cols_name[i]}')
if perm_kw is not None:
#Name for input columns
phi_col_name = perm_kw.pop('phi_name',None)
swir = perm_kw.pop('swir', None)
authors = perm_kw.pop('authors',['timur'])
fluid = perm_kw.pop('fluid','oil')
#name for the new columns. List of curve names depending on the method
perm_cols_name = sw_kw.pop('perm_cols_name',authors)
for i,author in enumerate(authors):
logf['k_' + perm_cols_name[i]] = perm(logf[phi_col_name],swir,author=author,fluid=fluid)
new_cols.append(perm_cols_name[i])
if flag_kw is not None:
#name for the new columns.
sand_flag_col_name = flag_kw.pop('sand_flag_name','sand_flag')
reservoir_flag_col_name = flag_kw.pop('reservoir_flag_name','reservoir_flag')
pay_flag_col_name = flag_kw.pop('pay_flag_name','pay_flag')
#Name for input columns
vsh_col_name = flag_kw.pop('vsh_name',None)
phi_col_name = flag_kw.pop('phi_name',None)
sw_col_name = flag_kw.pop('sw_name',None)
#Name for cutoffs
vsh_cutoff = flag_kw.pop('vsh_cutoff',0)
phi_cutoff = flag_kw.pop('phi_cutoff',0)
sw_cutoff = flag_kw.pop('sw_cutoff',0)
#whichs flags
method = flag_kw.pop('which',None)
if method=='pay':
logf[sand_flag_col_name] = (logf[vsh_col_name] <= vsh_cutoff)*1
logf[reservoir_flag_col_name] = (logf[phi_col_name] >= phi_cutoff)*logf[sand_flag_col_name]
logf[pay_flag_col_name] = (logf[sw_col_name] <= sw_cutoff)*logf[reservoir_flag_col_name]
new_cols.extend([sand_flag_col_name,reservoir_flag_col_name,pay_flag_col_name])
elif method=='reservoir':
logf[sand_flag_col_name] = (logf[vsh_col_name] <= vsh_cutoff)*1
logf[reservoir_flag_col_name] = (logf[phi_col_name] >= phi_cutoff)*logf[sand_flag_col_name]
new_cols.extend([sand_flag_col_name,reservoir_flag_col_name])
elif method=='sand':
logf[sand_flag_col_name] = (logf[vsh_col_name] <= vsh_cutoff)*1
new_cols.append(sand_flag_col_name)
if kh_kw is not None:
#name for the new columns.
kh_col_name = kh_kw.pop('kh_name','kh')
kh_norm_col_name = kh_kw.pop('khnorm_name','kh_norm')
#Name for input columns
perm_col_name = kh_kw.pop('perm_name',None)
pay_col_name = kh_kw.pop('pay_name','pay_flag')
#
h=np.mean(np.diff(logf.index))
logf[kh_col_name],logf[kh_norm_col_name]=flow_capacity(h,logf[perm_col_name],logf[pay_col_name])
new_cols.extend([kh_norm_col_name,kh_col_name])
if return_partial:
return logf
else:
log_merged = logs.merge(logf[new_cols], how='left', left_index=True, right_index=True)
return log_merged | [
"numpy.diff"
] | [((8390, 8409), 'numpy.diff', 'np.diff', (['logf.index'], {}), '(logf.index)\n', (8397, 8409), True, 'import numpy as np\n')] |
# Copyright (C) 2021 <NAME>, <NAME>, and Politecnico di Milano. All rights reserved.
# Licensed under the Apache 2.0 License.
from scipy.optimize import root_scalar
import numpy as np
import matplotlib.pyplot as plt
from ..dataset.regression import ActionDist
def estimate_lambda(weights, n, delta, alpha=2, return_info=False):
eta = estimate_lambda2(weights, np.sqrt(n), delta, alpha, return_info)
if return_info:
return eta[0] / n ** .25, eta[1]
return eta / n ** .25
def estimate_lambda2(weights, n, delta, alpha=2, return_info=False):
if alpha != 2:
raise NotImplementedError
rhs = 2 * np.log(1 / delta) / (3 * n)
def fun(x):
return x ** 2 * np.mean(weights ** 2 / (1 - x + x * weights) ** 2) - rhs
def deriv(x):
return 2 * x * np.mean(weights ** 2 / (1 - x + x * weights) ** 3)
def f(x):
if x < 0 or x > 1:
return np.inf, np.inf
#print(x, fun(x))
return fun(x), deriv(x)
guess1 = 1
res1 = root_scalar(f, x0=guess1, fprime=True)
if return_info:
if res1.converged:
return np.clip(res1.root, 0, 1), res1.converged
return 0, res1.converged
else:
if res1.converged:
return np.clip(res1.root, 0, 1)
return 0
def compute_renyi_divergence(action_dist, behav_action_dist, alpha=2):
if alpha != 2:
raise NotImplementedError
if isinstance(action_dist, ActionDist): #We assume it's Gaussian
mean_i = action_dist.preds
mean_j = behav_action_dist.preds
var_i = action_dist.sigma_e ** 2
var_j = behav_action_dist.sigma_e ** 2
var_star = alpha*var_j + (1 - alpha)*var_i
contextual_non_exp_Renyi = np.log(var_j ** .5 / var_i ** .5) + 1 / (2 * (alpha - 1)) * np.log(var_j / var_star) + alpha * (
mean_i - mean_j) ** 2 / (2 * var_star)
non_exp_Renyi = np.mean(contextual_non_exp_Renyi)
exp_Renyi = np.exp(non_exp_Renyi)
elif isinstance(action_dist, np.ndarray): #We assume it's categorical
action_dist2 = np.power(action_dist, 2)
contextual_Renyi = np.sum(action_dist2 / behav_action_dist, axis=1)
exp_Renyi = np.mean(contextual_Renyi)
else:
raise ValueError
return exp_Renyi
def find_minimum_bound(d, n, delta):
t = np.log(1 / delta)
def fun(x):
el = d + x - d * x
den = 6 * n * x ** 2 * np.sqrt(el)
num = 3 * np.sqrt(2 * n * t) * (1 - d) * x ** 2 + \
3 * n * np.sqrt(d - 1) * (1 - d) * x ** 3 - \
4 * t * np.sqrt(el) + 6 * n * x ** 2 * el * np.sqrt(d - 1)
return num / den
def deriv(x):
el = d + x - d * x
n1 = 4 * t
d1 = 3 * n * x ** 3
n2 = - (d - 1) ** 2 * t
d2 = 2 * np.sqrt(2 * n * t) * el ** (3 / 2)
n3 = - (d - 1) ** (5 / 2) * x
d3 = 4 * el ** (3 / 2)
n4 = - (d - 1) ** (3 / 2)
d4 = np.sqrt(el)
return n1 / d1 + n2 / d2 + n3 / d3 + n4 / d4
def f(x):
if x < 0 or x > 1:
return np.inf, np.inf
#print(x, fun(x))
return fun(x), deriv(x)
guess1 = np.sqrt(2 * t / (3 * d * n))
res1 = root_scalar(f, x0=guess1, fprime=True)
# guess2 = 1
# res2 = root_scalar(f, x0=guess2, fprime=True)
if res1.converged:
root = res1.root
if deriv(root) > 0:
return root
return None
# print(res1)
# print(res1.root, deriv(res1.root))
# print(res2.root, deriv(res2.root))
def find_optimal_lambda(d, n, delta):
t = np.log(1 / delta)
def bound(x):
el = d + x - d * x
return 2 * t / (3 * n * x) + x * np.sqrt((d - 1) * el) + np.sqrt(2 * t * el / n)
lambda_wmin = find_minimum_bound(d, n, delta)
if lambda_wmin is None:
return 1.
else:
if bound(lambda_wmin) < bound(1.):
return np.clip(lambda_wmin, 0, 1)
else:
return 1.
| [
"numpy.sum",
"numpy.log",
"numpy.power",
"scipy.optimize.root_scalar",
"numpy.clip",
"numpy.mean",
"numpy.exp",
"numpy.sqrt"
] | [((1013, 1051), 'scipy.optimize.root_scalar', 'root_scalar', (['f'], {'x0': 'guess1', 'fprime': '(True)'}), '(f, x0=guess1, fprime=True)\n', (1024, 1051), False, 'from scipy.optimize import root_scalar\n'), ((2344, 2361), 'numpy.log', 'np.log', (['(1 / delta)'], {}), '(1 / delta)\n', (2350, 2361), True, 'import numpy as np\n'), ((3179, 3207), 'numpy.sqrt', 'np.sqrt', (['(2 * t / (3 * d * n))'], {}), '(2 * t / (3 * d * n))\n', (3186, 3207), True, 'import numpy as np\n'), ((3219, 3257), 'scipy.optimize.root_scalar', 'root_scalar', (['f'], {'x0': 'guess1', 'fprime': '(True)'}), '(f, x0=guess1, fprime=True)\n', (3230, 3257), False, 'from scipy.optimize import root_scalar\n'), ((3595, 3612), 'numpy.log', 'np.log', (['(1 / delta)'], {}), '(1 / delta)\n', (3601, 3612), True, 'import numpy as np\n'), ((368, 378), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (375, 378), True, 'import numpy as np\n'), ((1920, 1953), 'numpy.mean', 'np.mean', (['contextual_non_exp_Renyi'], {}), '(contextual_non_exp_Renyi)\n', (1927, 1953), True, 'import numpy as np\n'), ((1974, 1995), 'numpy.exp', 'np.exp', (['non_exp_Renyi'], {}), '(non_exp_Renyi)\n', (1980, 1995), True, 'import numpy as np\n'), ((2965, 2976), 'numpy.sqrt', 'np.sqrt', (['el'], {}), '(el)\n', (2972, 2976), True, 'import numpy as np\n'), ((633, 650), 'numpy.log', 'np.log', (['(1 / delta)'], {}), '(1 / delta)\n', (639, 650), True, 'import numpy as np\n'), ((801, 851), 'numpy.mean', 'np.mean', (['(weights ** 2 / (1 - x + x * weights) ** 3)'], {}), '(weights ** 2 / (1 - x + x * weights) ** 3)\n', (808, 851), True, 'import numpy as np\n'), ((1251, 1275), 'numpy.clip', 'np.clip', (['res1.root', '(0)', '(1)'], {}), '(res1.root, 0, 1)\n', (1258, 1275), True, 'import numpy as np\n'), ((2093, 2117), 'numpy.power', 'np.power', (['action_dist', '(2)'], {}), '(action_dist, 2)\n', (2101, 2117), True, 'import numpy as np\n'), ((2145, 2193), 'numpy.sum', 'np.sum', (['(action_dist2 / behav_action_dist)'], {'axis': '(1)'}), '(action_dist2 / behav_action_dist, axis=1)\n', (2151, 2193), True, 'import numpy as np\n'), ((2214, 2239), 'numpy.mean', 'np.mean', (['contextual_Renyi'], {}), '(contextual_Renyi)\n', (2221, 2239), True, 'import numpy as np\n'), ((2437, 2448), 'numpy.sqrt', 'np.sqrt', (['el'], {}), '(el)\n', (2444, 2448), True, 'import numpy as np\n'), ((3724, 3747), 'numpy.sqrt', 'np.sqrt', (['(2 * t * el / n)'], {}), '(2 * t * el / n)\n', (3731, 3747), True, 'import numpy as np\n'), ((3918, 3944), 'numpy.clip', 'np.clip', (['lambda_wmin', '(0)', '(1)'], {}), '(lambda_wmin, 0, 1)\n', (3925, 3944), True, 'import numpy as np\n'), ((702, 752), 'numpy.mean', 'np.mean', (['(weights ** 2 / (1 - x + x * weights) ** 2)'], {}), '(weights ** 2 / (1 - x + x * weights) ** 2)\n', (709, 752), True, 'import numpy as np\n'), ((1121, 1145), 'numpy.clip', 'np.clip', (['res1.root', '(0)', '(1)'], {}), '(res1.root, 0, 1)\n', (1128, 1145), True, 'import numpy as np\n'), ((1740, 1775), 'numpy.log', 'np.log', (['(var_j ** 0.5 / var_i ** 0.5)'], {}), '(var_j ** 0.5 / var_i ** 0.5)\n', (1746, 1775), True, 'import numpy as np\n'), ((2631, 2645), 'numpy.sqrt', 'np.sqrt', (['(d - 1)'], {}), '(d - 1)\n', (2638, 2645), True, 'import numpy as np\n'), ((2814, 2832), 'numpy.sqrt', 'np.sqrt', (['(2 * n * t)'], {}), '(2 * n * t)\n', (2821, 2832), True, 'import numpy as np\n'), ((1800, 1824), 'numpy.log', 'np.log', (['(var_j / var_star)'], {}), '(var_j / var_star)\n', (1806, 1824), True, 'import numpy as np\n'), ((2595, 2606), 'numpy.sqrt', 'np.sqrt', (['el'], {}), '(el)\n', (2602, 2606), True, 'import numpy as np\n'), ((3700, 3721), 'numpy.sqrt', 'np.sqrt', (['((d - 1) * el)'], {}), '((d - 1) * el)\n', (3707, 3721), True, 'import numpy as np\n'), ((2467, 2485), 'numpy.sqrt', 'np.sqrt', (['(2 * n * t)'], {}), '(2 * n * t)\n', (2474, 2485), True, 'import numpy as np\n'), ((2533, 2547), 'numpy.sqrt', 'np.sqrt', (['(d - 1)'], {}), '(d - 1)\n', (2540, 2547), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import tensorflow as tf
from tensorflow.contrib.layers import flatten
from keras.layers.pooling import MaxPooling2D
from keras.models import Sequential, Model
from keras.callbacks import EarlyStopping, Callback
from keras.layers import Dense, Dropout, Activation, Flatten, Lambda, ELU,GlobalAveragePooling2D, regularizers
from keras.layers.convolutional import Convolution2D, Cropping2D, Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import adam
from sklearn.utils import shuffle
from keras.utils import np_utils
import time, cv2, glob
global inputShape,size
def kerasModel4():
model = Sequential()
model.add(Conv2D(16, (8, 8), strides=(4, 4), padding='valid', input_shape=(size,size,1)))
model.add(Activation('relu'))
model.add(Conv2D(32, (5, 5), padding="same"))
model.add(Activation('relu'))
model.add(GlobalAveragePooling2D())
# model.add(Dropout(.2))
# model.add(Activation('relu'))
# model.add(Dense(1024))
# model.add(Dropout(.5))
model.add(Dense(512))
model.add(Dropout(.1))
model.add(Activation('relu'))
# model.add(Dense(256))
# model.add(Dropout(.5))
# model.add(Activation('relu'))
model.add(Dense(2))
model.add(Activation('softmax'))
return model
size=100
## load Training data : pothole
potholeTrainImages = glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Pothole/*.jpg")
potholeTrainImages.extend(glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Pothole/*.jpeg"))
potholeTrainImages.extend(glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Pothole/*.png"))
train1 = [cv2.imread(img,0) for img in potholeTrainImages]
for i in range(0,len(train1)):
train1[i] = cv2.resize(train1[i],(size,size))
temp1 = np.asarray(train1)
# ## load Training data : non-pothole
nonPotholeTrainImages = glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Plain/*.jpg")
# nonPotholeTrainImages.extend(glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Plain/*.jpeg"))
# nonPotholeTrainImages.extend(glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Plain/*.png"))
train2 = [cv2.imread(img,0) for img in nonPotholeTrainImages]
# train2[train2 != np.array(None)]
for i in range(0,len(train2)):
train2[i] = cv2.resize(train2[i],(size,size))
temp2 = np.asarray(train2)
## load Testing data : non-pothole
nonPotholeTestImages = glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/test/Plain/*.jpg")
# nonPotholeTrainImages.extend(glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Plain/*.jpeg"))
# nonPotholeTrainImages.extend(glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Plain/*.png"))
test2 = [cv2.imread(img,0) for img in nonPotholeTestImages]
# train2[train2 != np.array(None)]
for i in range(0,len(test2)):
test2[i] = cv2.resize(test2[i],(size,size))
temp4 = np.asarray(test2)
## load Testing data : potholes
potholeTestImages = glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/test/Pothole/*.jpg")
# nonPotholeTrainImages.extend(glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Plain/*.jpeg"))
# nonPotholeTrainImages.extend(glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Plain/*.png"))
test1 = [cv2.imread(img,0) for img in potholeTestImages]
# train2[train2 != np.array(None)]
for i in range(0,len(test1)):
test1[i] = cv2.resize(test1[i],(size,size))
temp3 = np.asarray(test1)
X_train = []
X_train.extend(temp1)
X_train.extend(temp2)
X_train = np.asarray(X_train)
X_test = []
X_test.extend(temp3)
X_test.extend(temp4)
X_test = np.asarray(X_test)
y_train1 = np.ones([temp1.shape[0]],dtype = int)
y_train2 = np.zeros([temp2.shape[0]],dtype = int)
y_test1 = np.ones([temp3.shape[0]],dtype = int)
y_test2 = np.zeros([temp4.shape[0]],dtype = int)
print(y_train1[0])
print(y_train2[0])
print(y_test1[0])
print(y_test2[0])
y_train = []
y_train.extend(y_train1)
y_train.extend(y_train2)
y_train = np.asarray(y_train)
y_test = []
y_test.extend(y_test1)
y_test.extend(y_test2)
y_test = np.asarray(y_test)
X_train,y_train = shuffle(X_train,y_train)
X_test,y_test = shuffle(X_test,y_test)
# X_train.reshape([-1,50,50,1])
# X_test.reshape([-1,50,50,1])/
X_train = X_train.reshape(X_train.shape[0], size, size, 1)
X_test = X_test.reshape(X_test.shape[0], size, size, 1)
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
print("train shape X", X_train.shape)
print("train shape y", y_train.shape)
inputShape = (size, size, 1)
model = kerasModel4()
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_train, y_train, epochs=500,validation_split=0.1)
metrics = model.evaluate(X_test, y_test)
for metric_i in range(len(model.metrics_names)):
metric_name = model.metrics_names[metric_i]
metric_value = metrics[metric_i]
print('{}: {}'.format(metric_name, metric_value))
print("Saving model weights and configuration file")
model.save('sample.h5')
model_json = model.to_json()
with open("truesample.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("truesample.h5")
print("Saved model to disk") | [
"keras.layers.Activation",
"keras.layers.Dropout",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"keras.layers.GlobalAveragePooling2D",
"cv2.imread",
"keras.utils.np_utils.to_categorical",
"keras.layers.convolutional.Conv2D",
"keras.layers.Dense",
"glob.glob",
"keras.models.Sequential",
"skl... | [((1514, 1620), 'glob.glob', 'glob.glob', (['"""C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Pothole/*.jpg"""'], {}), "(\n 'C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Pothole/*.jpg'\n )\n", (1523, 1620), False, 'import time, cv2, glob\n'), ((2009, 2027), 'numpy.asarray', 'np.asarray', (['train1'], {}), '(train1)\n', (2019, 2027), True, 'import numpy as np\n'), ((2093, 2197), 'glob.glob', 'glob.glob', (['"""C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Plain/*.jpg"""'], {}), "(\n 'C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Plain/*.jpg'\n )\n", (2102, 2197), False, 'import time, cv2, glob\n'), ((2629, 2647), 'numpy.asarray', 'np.asarray', (['train2'], {}), '(train2)\n', (2639, 2647), True, 'import numpy as np\n'), ((2709, 2812), 'glob.glob', 'glob.glob', (['"""C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/test/Plain/*.jpg"""'], {}), "(\n 'C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/test/Plain/*.jpg'\n )\n", (2718, 2812), False, 'import time, cv2, glob\n'), ((3239, 3256), 'numpy.asarray', 'np.asarray', (['test2'], {}), '(test2)\n', (3249, 3256), True, 'import numpy as np\n'), ((3311, 3416), 'glob.glob', 'glob.glob', (['"""C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/test/Pothole/*.jpg"""'], {}), "(\n 'C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/test/Pothole/*.jpg'\n )\n", (3320, 3416), False, 'import time, cv2, glob\n'), ((3840, 3857), 'numpy.asarray', 'np.asarray', (['test1'], {}), '(test1)\n', (3850, 3857), True, 'import numpy as np\n'), ((3927, 3946), 'numpy.asarray', 'np.asarray', (['X_train'], {}), '(X_train)\n', (3937, 3946), True, 'import numpy as np\n'), ((4011, 4029), 'numpy.asarray', 'np.asarray', (['X_test'], {}), '(X_test)\n', (4021, 4029), True, 'import numpy as np\n'), ((4046, 4082), 'numpy.ones', 'np.ones', (['[temp1.shape[0]]'], {'dtype': 'int'}), '([temp1.shape[0]], dtype=int)\n', (4053, 4082), True, 'import numpy as np\n'), ((4095, 4132), 'numpy.zeros', 'np.zeros', (['[temp2.shape[0]]'], {'dtype': 'int'}), '([temp2.shape[0]], dtype=int)\n', (4103, 4132), True, 'import numpy as np\n'), ((4144, 4180), 'numpy.ones', 'np.ones', (['[temp3.shape[0]]'], {'dtype': 'int'}), '([temp3.shape[0]], dtype=int)\n', (4151, 4180), True, 'import numpy as np\n'), ((4192, 4229), 'numpy.zeros', 'np.zeros', (['[temp4.shape[0]]'], {'dtype': 'int'}), '([temp4.shape[0]], dtype=int)\n', (4200, 4229), True, 'import numpy as np\n'), ((4380, 4399), 'numpy.asarray', 'np.asarray', (['y_train'], {}), '(y_train)\n', (4390, 4399), True, 'import numpy as np\n'), ((4468, 4486), 'numpy.asarray', 'np.asarray', (['y_test'], {}), '(y_test)\n', (4478, 4486), True, 'import numpy as np\n'), ((4507, 4532), 'sklearn.utils.shuffle', 'shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (4514, 4532), False, 'from sklearn.utils import shuffle\n'), ((4548, 4571), 'sklearn.utils.shuffle', 'shuffle', (['X_test', 'y_test'], {}), '(X_test, y_test)\n', (4555, 4571), False, 'from sklearn.utils import shuffle\n'), ((4762, 4794), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_train'], {}), '(y_train)\n', (4785, 4794), False, 'from keras.utils import np_utils\n'), ((4804, 4835), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_test'], {}), '(y_test)\n', (4827, 4835), False, 'from keras.utils import np_utils\n'), ((731, 743), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (741, 743), False, 'from keras.models import Sequential, Model\n'), ((1637, 1744), 'glob.glob', 'glob.glob', (['"""C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Pothole/*.jpeg"""'], {}), "(\n 'C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Pothole/*.jpeg'\n )\n", (1646, 1744), False, 'import time, cv2, glob\n'), ((1762, 1868), 'glob.glob', 'glob.glob', (['"""C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Pothole/*.png"""'], {}), "(\n 'C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Pothole/*.png'\n )\n", (1771, 1868), False, 'import time, cv2, glob\n'), ((1871, 1889), 'cv2.imread', 'cv2.imread', (['img', '(0)'], {}), '(img, 0)\n', (1881, 1889), False, 'import time, cv2, glob\n'), ((1967, 2002), 'cv2.resize', 'cv2.resize', (['train1[i]', '(size, size)'], {}), '(train1[i], (size, size))\n', (1977, 2002), False, 'import time, cv2, glob\n'), ((2453, 2471), 'cv2.imread', 'cv2.imread', (['img', '(0)'], {}), '(img, 0)\n', (2463, 2471), False, 'import time, cv2, glob\n'), ((2587, 2622), 'cv2.resize', 'cv2.resize', (['train2[i]', '(size, size)'], {}), '(train2[i], (size, size))\n', (2597, 2622), False, 'import time, cv2, glob\n'), ((3067, 3085), 'cv2.imread', 'cv2.imread', (['img', '(0)'], {}), '(img, 0)\n', (3077, 3085), False, 'import time, cv2, glob\n'), ((3198, 3232), 'cv2.resize', 'cv2.resize', (['test2[i]', '(size, size)'], {}), '(test2[i], (size, size))\n', (3208, 3232), False, 'import time, cv2, glob\n'), ((3671, 3689), 'cv2.imread', 'cv2.imread', (['img', '(0)'], {}), '(img, 0)\n', (3681, 3689), False, 'import time, cv2, glob\n'), ((3799, 3833), 'cv2.resize', 'cv2.resize', (['test1[i]', '(size, size)'], {}), '(test1[i], (size, size))\n', (3809, 3833), False, 'import time, cv2, glob\n'), ((762, 847), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(16)', '(8, 8)'], {'strides': '(4, 4)', 'padding': '"""valid"""', 'input_shape': '(size, size, 1)'}), "(16, (8, 8), strides=(4, 4), padding='valid', input_shape=(size, size, 1)\n )\n", (768, 847), False, 'from keras.layers.convolutional import Convolution2D, Cropping2D, Conv2D\n'), ((860, 878), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (870, 878), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Lambda, ELU, GlobalAveragePooling2D, regularizers\n'), ((898, 932), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(32)', '(5, 5)'], {'padding': '"""same"""'}), "(32, (5, 5), padding='same')\n", (904, 932), False, 'from keras.layers.convolutional import Convolution2D, Cropping2D, Conv2D\n'), ((952, 970), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (962, 970), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Lambda, ELU, GlobalAveragePooling2D, regularizers\n'), ((990, 1014), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (1012, 1014), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Lambda, ELU, GlobalAveragePooling2D, regularizers\n'), ((1173, 1183), 'keras.layers.Dense', 'Dense', (['(512)'], {}), '(512)\n', (1178, 1183), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Lambda, ELU, GlobalAveragePooling2D, regularizers\n'), ((1203, 1215), 'keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (1210, 1215), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Lambda, ELU, GlobalAveragePooling2D, regularizers\n'), ((1234, 1252), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1244, 1252), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Lambda, ELU, GlobalAveragePooling2D, regularizers\n'), ((1377, 1385), 'keras.layers.Dense', 'Dense', (['(2)'], {}), '(2)\n', (1382, 1385), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Lambda, ELU, GlobalAveragePooling2D, regularizers\n'), ((1405, 1426), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (1415, 1426), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Lambda, ELU, GlobalAveragePooling2D, regularizers\n')] |
#!/usr/bin/python3
'''
Background Reading to understand this tool ----
Paper: Portfolio Selection - <NAME> 1952 URL:
https://www.math.ust.hk/~maykwok/courses/ma362/07F/markowitz_JF.pdf
https://www.investopedia.com/terms/e/efficientfrontier.asp
https://en.wikipedia.org/wiki/Efficient_frontier
https://en.wikipedia.org/wiki/Markowitz_model
The idea is that there is an efficent set of portfolio containing different securities
with different weights of investments and for each amount of risk investor is willing to indure.
This set of efficient portfolios can be calculated and discovered. This script helps us understand how!
Enjoy!
'''
'''
Description:
------------
This Python script calculates Markowitz Efficient Frontier
API Used: Yahoo Finance
I am studying the efficiency (Markowitz-wise) of 500 portfolios containing only
stocks of Apple & Ford with many random weights for each portfolio
Duration: Data from 2010-1-1 till now
Requirements:
------------
Make sure that Pandas, pandas_datareader, numpy, matplotlib and xlrd are installed
no need for anything else
Usage:
-----
python Calculate_Markowitz_Efficient_Frontier.py
Dr. <NAME>
Enjoy!
'''
'''
Some famous companies stocks tikers to work with
------------------------------------------------
Apple AAPL
Procter & Gamble Co PG
Microsoft Corporation MSFT
Exxon Mobil Corporation XOM
BP plc BP
AT&T Inc. T
Ford Motor Company F
General Electric Company GE
Alphabet Inc Class A (Google) GOOGL
'''
import numpy as np
import pandas as pd
from pandas_datareader import data as web
import matplotlib.pyplot as plt
# Ford is F and Apple is AAPL
Stock_tickers = ['F', 'AAPL']
ComparisionofEfficiency = pd.DataFrame()
# Duration: 2010-1-1 till now from Yahoo Finance
for ticker in Stock_tickers:
ComparisionofEfficiency[ticker] = web.DataReader(ticker, data_source='yahoo', start='2010-1-1')['Adj Close']
# Normalising to 100 and ploting the data of both stock
(ComparisionofEfficiency / ComparisionofEfficiency.iloc[0] * 100).plot(figsize=(10, 5))
# Plot Title
plt.title('Comparing Apple with Ford - Normalised to 100')
# X-Axis Label
# plt.xlabel('Dates', fontsize=12)
plt.xlabel('Dates')
# Y-Axis Label
plt.ylabel('Adjusted Closing Prices')
# Saving the plot - specifying high DPI
plt.savefig('Comparision_Normalised.png')
plt.savefig('Comparision_Normalised300DPI.png', dpi=300)
# Uncomment the following if you want to see the plot during the execution of the script
# plt.show()
# Calculating their logarithmic rate of returns
Logarithmic_Returns = np.log(ComparisionofEfficiency / ComparisionofEfficiency.shift(1))
# print(Logarithmic_Returns.head())
print("\nAnnual Logarithmic Averages of Returns")
# It is important to know the number of trading days in a year
# "The NYSE and NASDAQ average about 253 trading days a year.""
# "This is from 365.25 (days on average per year) * 5/7 (proportion work days per week) - 6 (weekday holidays) - 3*5/7 (fixed date holidays) = 252.75 ≈ 253."
print(Logarithmic_Returns.mean() * 253)
Logarithmic_Returns_Variance_Annual = Logarithmic_Returns.var() * 253
print("\nAnnual Logarithmic Returns Variance")
print(Logarithmic_Returns_Variance_Annual)
print("\nAnnual Logarithmic Covariance Matrix between securities ----")
Covariance_Matrix_Covariance_Annual = Logarithmic_Returns.cov() * 253
print(Covariance_Matrix_Covariance_Annual)
print("\nChecking the correlation between securities (Logarithmic)")
Correlation_Matrix = Logarithmic_Returns.corr()
print(Correlation_Matrix)
## Suppose I have a portfolio containing Apple and Ford with different weights
# In other words, we have invested 45% worth of stocks in Ford and 65% in Apple
Portfolio_Weights = np.array([0.45, 0.65])
print("\nExpected Annual Portfolio Return ----")
Expected_Portfolio_Return_Annually = np.sum(Portfolio_Weights * Logarithmic_Returns.mean())*253
print(Expected_Portfolio_Return_Annually)
print("\nExpected Annual Portfolio Variance ----")
Expected_Portfolio_Variance_Annually = np.dot(Portfolio_Weights.T, np.dot(Logarithmic_Returns.cov() * 253, Portfolio_Weights))
print(Expected_Portfolio_Variance_Annually)
print("\nExpected Annual Portfolio Volatility ----")
Expected_Portfolio_Volatility_Annually = (np.dot(Portfolio_Weights.T, np.dot(Logarithmic_Returns.cov() * 253, Portfolio_Weights))) ** 0.5
print(Expected_Portfolio_Volatility_Annually)
#### Markowitz Efficient Frontier Calculation for 500 portfolios of random weights' combinations ####
# Per example: Portfolio 1 could be 1% investment in Ford, 99% investment in Apple
# Portfolio 2: could be 34% in Ford, 66% in Apple
# etc..
Portfolio_Returns = []
Portfolio_Volatilities = []
for instance in range (500):
# The following two lines create two random numbers that sums to 1 ie 100%
Current_Portfolio_Random_Weights = np.random.random(len(Stock_tickers))
Current_Portfolio_Random_Weights = Current_Portfolio_Random_Weights/np.sum(Current_Portfolio_Random_Weights)
# Logarithmic_Returns for the two stocks is calculated before in the script
# Calculating each return & appending it to the corresponding list
Portfolio_Returns.append(np.sum(Current_Portfolio_Random_Weights * Logarithmic_Returns.mean()) * 253)
# Calculating each Volatility & appending it to the corresponding list
Portfolio_Volatilities.append(np.sqrt(np.dot(Current_Portfolio_Random_Weights.T, np.dot(Logarithmic_Returns.cov() * 253, Current_Portfolio_Random_Weights))))
# Transforming Python lists to numpy arrays
Portfolio_Returns = np.array(Portfolio_Returns)
Portfolio_Volatilities = np.array(Portfolio_Volatilities)
AllPortfolios = pd.DataFrame({'Portfolio Return': Portfolio_Returns, 'Portfolio Volatility': Portfolio_Volatilities})
# print(AllPortfolios.head())
# print(AllPortfolios.tail())
############# Plotting ###############
AllPortfolios.plot(x='Portfolio Volatility', y='Portfolio Return', kind='scatter', figsize=(10, 6))
# Plot Title
plt.title('Markowitz Efficient Frontier')
# X-Axis Label
plt.xlabel('Expected Portfolio Volatility')
# Y-Axis Label
plt.ylabel('Expected Portfolio Return')
# Saving the plot - specifying high DPI
plt.savefig('Markowitz.png')
plt.savefig('Markowitz300DPI.png', dpi=300)
# Uncomment the following if you want to see the plot during execution of the script
plt.show() | [
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.sum",
"pandas_datareader.data.DataReader",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((1885, 1899), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1897, 1899), True, 'import pandas as pd\n'), ((2259, 2317), 'matplotlib.pyplot.title', 'plt.title', (['"""Comparing Apple with Ford - Normalised to 100"""'], {}), "('Comparing Apple with Ford - Normalised to 100')\n", (2268, 2317), True, 'import matplotlib.pyplot as plt\n'), ((2374, 2393), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dates"""'], {}), "('Dates')\n", (2384, 2393), True, 'import matplotlib.pyplot as plt\n'), ((2414, 2451), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Adjusted Closing Prices"""'], {}), "('Adjusted Closing Prices')\n", (2424, 2451), True, 'import matplotlib.pyplot as plt\n'), ((2496, 2537), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Comparision_Normalised.png"""'], {}), "('Comparision_Normalised.png')\n", (2507, 2537), True, 'import matplotlib.pyplot as plt\n'), ((2539, 2595), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Comparision_Normalised300DPI.png"""'], {'dpi': '(300)'}), "('Comparision_Normalised300DPI.png', dpi=300)\n", (2550, 2595), True, 'import matplotlib.pyplot as plt\n'), ((3951, 3973), 'numpy.array', 'np.array', (['[0.45, 0.65]'], {}), '([0.45, 0.65])\n', (3959, 3973), True, 'import numpy as np\n'), ((5811, 5838), 'numpy.array', 'np.array', (['Portfolio_Returns'], {}), '(Portfolio_Returns)\n', (5819, 5838), True, 'import numpy as np\n'), ((5865, 5897), 'numpy.array', 'np.array', (['Portfolio_Volatilities'], {}), '(Portfolio_Volatilities)\n', (5873, 5897), True, 'import numpy as np\n'), ((5917, 6022), 'pandas.DataFrame', 'pd.DataFrame', (["{'Portfolio Return': Portfolio_Returns, 'Portfolio Volatility':\n Portfolio_Volatilities}"], {}), "({'Portfolio Return': Portfolio_Returns, 'Portfolio Volatility':\n Portfolio_Volatilities})\n", (5929, 6022), True, 'import pandas as pd\n'), ((6241, 6282), 'matplotlib.pyplot.title', 'plt.title', (['"""Markowitz Efficient Frontier"""'], {}), "('Markowitz Efficient Frontier')\n", (6250, 6282), True, 'import matplotlib.pyplot as plt\n'), ((6303, 6346), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Expected Portfolio Volatility"""'], {}), "('Expected Portfolio Volatility')\n", (6313, 6346), True, 'import matplotlib.pyplot as plt\n'), ((6367, 6406), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expected Portfolio Return"""'], {}), "('Expected Portfolio Return')\n", (6377, 6406), True, 'import matplotlib.pyplot as plt\n'), ((6451, 6479), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Markowitz.png"""'], {}), "('Markowitz.png')\n", (6462, 6479), True, 'import matplotlib.pyplot as plt\n'), ((6481, 6524), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Markowitz300DPI.png"""'], {'dpi': '(300)'}), "('Markowitz300DPI.png', dpi=300)\n", (6492, 6524), True, 'import matplotlib.pyplot as plt\n'), ((6614, 6624), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6622, 6624), True, 'import matplotlib.pyplot as plt\n'), ((2021, 2082), 'pandas_datareader.data.DataReader', 'web.DataReader', (['ticker'], {'data_source': '"""yahoo"""', 'start': '"""2010-1-1"""'}), "(ticker, data_source='yahoo', start='2010-1-1')\n", (2035, 2082), True, 'from pandas_datareader import data as web\n'), ((5201, 5241), 'numpy.sum', 'np.sum', (['Current_Portfolio_Random_Weights'], {}), '(Current_Portfolio_Random_Weights)\n', (5207, 5241), True, 'import numpy as np\n')] |
import streamlit as st
import numpy as np
import pandas as pd
import json
import pickle
import reference_book as rb
def main():
model = load_data()
st.title('Questionario - Open Sex Role Inventory')
st.text(
"""
Olá, bem vindo!
Este questionario foi baseado em um estudo chamado Open Sex-Role Inventory, realizado originalmente por <NAME> e visava ser capaz de estudar e prever a
sexualidade das pessoas utilizando uma serie de perguntas. Atualmente, este questionario é muito mais amplo(apesar de parecer dubio) e mede a sexualidade de um
individuo de forma mais complexa.
A ideia deste questionario que você está prestes a fazer não tem nada haver com o objetivo original do teste, mas, em utilizar os dados fornecidos pelos
pesquisadores responsáveis para prever algumas outras informações da sua pessoa, sendo meramente um experimento envolvendo Estatística, Inteligencia Artificial
e Programação.
Os dados informados aqui são completamente anónimos e utilizados apenas para propositos de medição da qualidade do modelo de decisão utilizado no experimento.
O questionario deve ser respondido da seguinte forma:
Você será exposto a 44 perguntas que devem ser respondidas utilizando um slider de intensidade, onde o valor 1(mais baixo) representa total discordancia com a
afirmação, o valor 3(Médio) representa neutralidade sobre a pergunta e o valor 5(Mais alto) representa total concordancia com a pergunta.
Dessa forma, você deve responder o questionario da forma mais honesta possível. Boa sorte!
1 - Discordo totalmente;
2 - Discordo;
3 - Neutro;
4 - Concordo;
5 - Concordo totalmente.
"""
)
st.sidebar.title("OSRI Questionario")
confirm_btn = st.sidebar.button(
label='Confirmar questionario'
)
# Question sliders
Q1 = st.slider(
label='Eu estudei como ganhar apostando',
min_value=1,
max_value=5,
step=1)
Q2 = st.slider(
label='Eu penso em pintar meu cabelo',
min_value=1,
max_value=5,
step=1)
Q3 = st.slider(
label='Eu gosto de arremessar facas, machados e outras coisas cortantes',
min_value=1,
max_value=5,
step=1)
Q4 = st.slider(
label='Eu presenteio as pessoas com presentes feitos a mão',
min_value=1,
max_value=5,
step=1)
Q5 = st.slider(
label='Eu tenho sonhos em que estou salvando alguém de um prédio em chamas',
min_value=1,
max_value=5,
step=1)
Q6 = st.slider(
label='Eu fico envergonhado quando alguém lê algo que eu escrevi',
min_value=1,
max_value=5,
step=1)
Q7 = st.slider(
label='Eu ja estive/estou muito interessadas em guerras históricas',
min_value=1,
max_value=5,
step=1)
Q8 = st.slider(
label='Eu sei os aniversários dos meus amigos',
min_value=1,
max_value=5,
step=1)
Q9 = st.slider(
label='Eu gosto de armas de fogo',
min_value=1,
max_value=5,
step=1)
Q10 = st.slider(
label='Eu fico mais feliz quando estou na minha cama',
min_value=1,
max_value=5,
step=1)
Q11 = st.slider(
label='Eu não trabalhei/estudei muito no ensino médio',
min_value=1,
max_value=5,
step=1)
Q12 = st.slider(
label='Eu uso loção para minhas mãos',
min_value=1,
max_value=5,
step=1)
Q13 = st.slider(
label='Eu prefiro ter aulas de matemática do que aulas de artes manuais',
min_value=1,
max_value=5,
step=1)
Q14 = st.slider(
label='Eu danço quando estou sozinho',
min_value=1,
max_value=5,
step=1)
Q15 = st.slider(
label='Eu penso/pensei que seria muito excitante se tornar um fora da lei',
min_value=1,
max_value=5,
step=1)
Q16 = st.slider(
label='Quando eu era criança, eu costumava brincar de fingir que estava em uma banda com meus amigos',
min_value=1,
max_value=5,
step=1)
Q17 = st.slider(
label='Eu considero/considerei entrar para as forças armadas',
min_value=1,
max_value=5,
step=1)
Q18 = st.slider(
label='Eu fico tonto quando me levanto bruscamente',
min_value=1,
max_value=5,
step=1)
Q19 = st.slider(
label='Eu não considero normal ficar com raiva/triste ao ouvir sobre a morte de pessoas que você não conhece',
min_value=1,
max_value=5,
step=1)
Q20 = st.slider(
label='Algumas vezes eu choro quando fico com muita raiva',
min_value=1,
max_value=5,
step=1)
Q21 = st.slider(
label='Eu não lembro de datas de aniversários',
min_value=1,
max_value=5,
step=1)
Q22 = st.slider(
label='Eu guardo as cartas que eu recebo',
min_value=1,
max_value=5,
step=1)
Q23 = st.slider(
label='Eu brinco de xingar meus amigos e não me importo que façam o mesmo',
min_value=1,
max_value=5,
step=1)
Q24 = st.slider(
label='Sou contra experimentos médicos em animais',
min_value=1,
max_value=5,
step=1)
Q25 = st.slider(
label='Eu posso fazer uma quantidade incrível de flexões',
min_value=1,
max_value=5,
step=1)
Q26 = st.slider(
label='Eu pulo quando fico muito animado',
min_value=1,
max_value=5,
step=1)
Q27 = st.slider(
label='Eu penso que um desastre natural poderia ser divertido',
min_value=1,
max_value=5,
step=1)
Q28 = st.slider(
label='Eu ando com um cobertor pela casa',
min_value=1,
max_value=5,
step=1)
Q29 = st.slider(
label='Eu costumava/costumo queimar coisas com a luz do sol e uma lupa',
min_value=1,
max_value=5,
step=1)
Q30 = st.slider(
label='Eu acho o horóscopo divertido',
min_value=1,
max_value=5,
step=1)
Q31 = st.slider(
label='Eu não levo muita bagagem quando viajo',
min_value=1,
max_value=5,
step=1)
Q32 = st.slider(
label='Eu ja pensei/penso em me tornar vegetariano',
min_value=1,
max_value=5,
step=1)
Q33 = st.slider(
label='Eu odeio sair as compras',
min_value=1,
max_value=5,
step=1)
Q34 = st.slider(
label='Eu tenho/tive um diario pessoal que guardo até hoje',
min_value=1,
max_value=5,
step=1)
Q35 = st.slider(
label='Eu desmontei/desmonto maquinas apenas para ver como elas funcionam',
min_value=1,
max_value=5,
step=1)
Q36 = st.slider(
label='Eu tenho muitas fotos das coisas que eu fiz/faço',
min_value=1,
max_value=5,
step=1)
Q37 = st.slider(
label='Eu ja joguei/jogo muitos video games',
min_value=1,
max_value=5,
step=1)
Q38 = st.slider(
label='De vez em quando deixo boas mensagens para as pessoas',
min_value=1,
max_value=5,
step=1)
Q39 = st.slider(
label='Eu ja botei fogo em vários tipos de combustíveis apenas por diversão',
min_value=1,
max_value=5,
step=1)
Q40 = st.slider(
label='Eu realmente gosto de dançar',
min_value=1,
max_value=5,
step=1)
Q41 = st.slider(
label='Em uma escada, subo dois degraus de cada vez',
min_value=1,
max_value=5,
step=1)
Q42 = st.slider(
label='Eu cozinho doces e coisas gostosas para mim mesmo as vezes',
min_value=1,
max_value=5,
step=1)
Q43 = st.slider(
label='Eu penso que um desastre natural poderia ser divertido',
min_value=1,
max_value=5,
step=1)
Q44 = st.slider(
label='Eu decoro meus pertences(Ex: adesivos no notebook)',
min_value=1,
max_value=5,
step=1)
# Features array
features = np.array([
Q1,Q2,Q3,Q4,Q5,Q6,Q7,Q8,Q9,Q10,Q11,Q12,Q13,Q14,
Q15,Q16,Q17,Q18,Q19,Q20,Q21,Q22,Q23,Q24,Q25,Q26,
Q27,Q28,Q29,Q30,Q31,Q32,Q33,Q34,Q35,Q36,Q37,Q38,
Q39,Q40,Q41,Q42,Q43,Q44
])
if confirm_btn:
ypred = model.predict(features.reshape([1, -1]))
age = ypred[0]
education = rb.education[ypred[1]]
gender = rb.gender[ypred[2]]
orientation = rb.orientation[ypred[3]]
race = rb.race[ypred[4]]
religion = rb.religion[ypred[5]]
hand = rb.hand[ypred[6]]
print(ypred)
@st.cache
def load_data():
with open("files/OSRI_Decision_Tree_model.hdf5", 'rb') as model_file:
return pickle.load(model_file)
if __name__ == "__main__":
main()
if __name__ != "__main__":
main()
| [
"streamlit.slider",
"streamlit.title",
"streamlit.sidebar.title",
"streamlit.text",
"pickle.load",
"numpy.array",
"streamlit.sidebar.button"
] | [((157, 207), 'streamlit.title', 'st.title', (['"""Questionario - Open Sex Role Inventory"""'], {}), "('Questionario - Open Sex Role Inventory')\n", (165, 207), True, 'import streamlit as st\n'), ((212, 1922), 'streamlit.text', 'st.text', (['"""\n Olá, bem vindo!\n \n Este questionario foi baseado em um estudo chamado Open Sex-Role Inventory, realizado originalmente por <NAME> e visava ser capaz de estudar e prever a \n sexualidade das pessoas utilizando uma serie de perguntas. Atualmente, este questionario é muito mais amplo(apesar de parecer dubio) e mede a sexualidade de um\n individuo de forma mais complexa.\n \n A ideia deste questionario que você está prestes a fazer não tem nada haver com o objetivo original do teste, mas, em utilizar os dados fornecidos pelos \n pesquisadores responsáveis para prever algumas outras informações da sua pessoa, sendo meramente um experimento envolvendo Estatística, Inteligencia Artificial \n e Programação.\n \n Os dados informados aqui são completamente anónimos e utilizados apenas para propositos de medição da qualidade do modelo de decisão utilizado no experimento.\n \n O questionario deve ser respondido da seguinte forma: \n Você será exposto a 44 perguntas que devem ser respondidas utilizando um slider de intensidade, onde o valor 1(mais baixo) representa total discordancia com a \n afirmação, o valor 3(Médio) representa neutralidade sobre a pergunta e o valor 5(Mais alto) representa total concordancia com a pergunta.\n \n Dessa forma, você deve responder o questionario da forma mais honesta possível. Boa sorte!\n \n 1 - Discordo totalmente;\n 2 - Discordo;\n 3 - Neutro;\n 4 - Concordo;\n 5 - Concordo totalmente.\n """'], {}), '(\n """\n Olá, bem vindo!\n \n Este questionario foi baseado em um estudo chamado Open Sex-Role Inventory, realizado originalmente por <NAME> e visava ser capaz de estudar e prever a \n sexualidade das pessoas utilizando uma serie de perguntas. Atualmente, este questionario é muito mais amplo(apesar de parecer dubio) e mede a sexualidade de um\n individuo de forma mais complexa.\n \n A ideia deste questionario que você está prestes a fazer não tem nada haver com o objetivo original do teste, mas, em utilizar os dados fornecidos pelos \n pesquisadores responsáveis para prever algumas outras informações da sua pessoa, sendo meramente um experimento envolvendo Estatística, Inteligencia Artificial \n e Programação.\n \n Os dados informados aqui são completamente anónimos e utilizados apenas para propositos de medição da qualidade do modelo de decisão utilizado no experimento.\n \n O questionario deve ser respondido da seguinte forma: \n Você será exposto a 44 perguntas que devem ser respondidas utilizando um slider de intensidade, onde o valor 1(mais baixo) representa total discordancia com a \n afirmação, o valor 3(Médio) representa neutralidade sobre a pergunta e o valor 5(Mais alto) representa total concordancia com a pergunta.\n \n Dessa forma, você deve responder o questionario da forma mais honesta possível. Boa sorte!\n \n 1 - Discordo totalmente;\n 2 - Discordo;\n 3 - Neutro;\n 4 - Concordo;\n 5 - Concordo totalmente.\n """\n )\n', (219, 1922), True, 'import streamlit as st\n'), ((1936, 1973), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""OSRI Questionario"""'], {}), "('OSRI Questionario')\n", (1952, 1973), True, 'import streamlit as st\n'), ((1992, 2041), 'streamlit.sidebar.button', 'st.sidebar.button', ([], {'label': '"""Confirmar questionario"""'}), "(label='Confirmar questionario')\n", (2009, 2041), True, 'import streamlit as st\n'), ((2093, 2183), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu estudei como ganhar apostando"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu estudei como ganhar apostando', min_value=1, max_value=\n 5, step=1)\n", (2102, 2183), True, 'import streamlit as st\n'), ((2227, 2313), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu penso em pintar meu cabelo"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu penso em pintar meu cabelo', min_value=1, max_value=5,\n step=1)\n", (2236, 2313), True, 'import streamlit as st\n'), ((2358, 2484), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu gosto de arremessar facas, machados e outras coisas cortantes"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label=\n 'Eu gosto de arremessar facas, machados e outras coisas cortantes',\n min_value=1, max_value=5, step=1)\n", (2367, 2484), True, 'import streamlit as st\n'), ((2528, 2636), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu presenteio as pessoas com presentes feitos a mão"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu presenteio as pessoas com presentes feitos a mão',\n min_value=1, max_value=5, step=1)\n", (2537, 2636), True, 'import streamlit as st\n'), ((2681, 2810), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu tenho sonhos em que estou salvando alguém de um prédio em chamas"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label=\n 'Eu tenho sonhos em que estou salvando alguém de um prédio em chamas',\n min_value=1, max_value=5, step=1)\n", (2690, 2810), True, 'import streamlit as st\n'), ((2854, 2968), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu fico envergonhado quando alguém lê algo que eu escrevi"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu fico envergonhado quando alguém lê algo que eu escrevi',\n min_value=1, max_value=5, step=1)\n", (2863, 2968), True, 'import streamlit as st\n'), ((3021, 3142), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu ja estive/estou muito interessadas em guerras históricas"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label=\n 'Eu ja estive/estou muito interessadas em guerras históricas',\n min_value=1, max_value=5, step=1)\n", (3030, 3142), True, 'import streamlit as st\n'), ((3182, 3277), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu sei os aniversários dos meus amigos"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu sei os aniversários dos meus amigos', min_value=1,\n max_value=5, step=1)\n", (3191, 3277), True, 'import streamlit as st\n'), ((3322, 3400), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu gosto de armas de fogo"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu gosto de armas de fogo', min_value=1, max_value=5, step=1)\n", (3331, 3400), True, 'import streamlit as st\n'), ((3454, 3557), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu fico mais feliz quando estou na minha cama"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu fico mais feliz quando estou na minha cama', min_value=\n 1, max_value=5, step=1)\n", (3463, 3557), True, 'import streamlit as st\n'), ((3602, 3706), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu não trabalhei/estudei muito no ensino médio"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu não trabalhei/estudei muito no ensino médio', min_value\n =1, max_value=5, step=1)\n", (3611, 3706), True, 'import streamlit as st\n'), ((3751, 3837), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu uso loção para minhas mãos"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu uso loção para minhas mãos', min_value=1, max_value=5,\n step=1)\n", (3760, 3837), True, 'import streamlit as st\n'), ((3883, 4009), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu prefiro ter aulas de matemática do que aulas de artes manuais"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label=\n 'Eu prefiro ter aulas de matemática do que aulas de artes manuais',\n min_value=1, max_value=5, step=1)\n", (3892, 4009), True, 'import streamlit as st\n'), ((4050, 4136), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu danço quando estou sozinho"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu danço quando estou sozinho', min_value=1, max_value=5,\n step=1)\n", (4059, 4136), True, 'import streamlit as st\n'), ((4182, 4310), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu penso/pensei que seria muito excitante se tornar um fora da lei"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label=\n 'Eu penso/pensei que seria muito excitante se tornar um fora da lei',\n min_value=1, max_value=5, step=1)\n", (4191, 4310), True, 'import streamlit as st\n'), ((4351, 4507), 'streamlit.slider', 'st.slider', ([], {'label': '"""Quando eu era criança, eu costumava brincar de fingir que estava em uma banda com meus amigos"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label=\n 'Quando eu era criança, eu costumava brincar de fingir que estava em uma banda com meus amigos'\n , min_value=1, max_value=5, step=1)\n", (4360, 4507), True, 'import streamlit as st\n'), ((4547, 4657), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu considero/considerei entrar para as forças armadas"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu considero/considerei entrar para as forças armadas',\n min_value=1, max_value=5, step=1)\n", (4556, 4657), True, 'import streamlit as st\n'), ((4703, 4803), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu fico tonto quando me levanto bruscamente"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu fico tonto quando me levanto bruscamente', min_value=1,\n max_value=5, step=1)\n", (4712, 4803), True, 'import streamlit as st\n'), ((4849, 5013), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu não considero normal ficar com raiva/triste ao ouvir sobre a morte de pessoas que você não conhece"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label=\n 'Eu não considero normal ficar com raiva/triste ao ouvir sobre a morte de pessoas que você não conhece'\n , min_value=1, max_value=5, step=1)\n", (4858, 5013), True, 'import streamlit as st\n'), ((5053, 5160), 'streamlit.slider', 'st.slider', ([], {'label': '"""Algumas vezes eu choro quando fico com muita raiva"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Algumas vezes eu choro quando fico com muita raiva',\n min_value=1, max_value=5, step=1)\n", (5062, 5160), True, 'import streamlit as st\n'), ((5206, 5301), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu não lembro de datas de aniversários"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu não lembro de datas de aniversários', min_value=1,\n max_value=5, step=1)\n", (5215, 5301), True, 'import streamlit as st\n'), ((5347, 5438), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu guardo as cartas que eu recebo"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu guardo as cartas que eu recebo', min_value=1, max_value\n =5, step=1)\n", (5356, 5438), True, 'import streamlit as st\n'), ((5483, 5611), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu brinco de xingar meus amigos e não me importo que façam o mesmo"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label=\n 'Eu brinco de xingar meus amigos e não me importo que façam o mesmo',\n min_value=1, max_value=5, step=1)\n", (5492, 5611), True, 'import streamlit as st\n'), ((5652, 5751), 'streamlit.slider', 'st.slider', ([], {'label': '"""Sou contra experimentos médicos em animais"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Sou contra experimentos médicos em animais', min_value=1,\n max_value=5, step=1)\n", (5661, 5751), True, 'import streamlit as st\n'), ((5797, 5903), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu posso fazer uma quantidade incrível de flexões"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu posso fazer uma quantidade incrível de flexões',\n min_value=1, max_value=5, step=1)\n", (5806, 5903), True, 'import streamlit as st\n'), ((5949, 6040), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu pulo quando fico muito animado"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu pulo quando fico muito animado', min_value=1, max_value\n =5, step=1)\n", (5958, 6040), True, 'import streamlit as st\n'), ((6085, 6196), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu penso que um desastre natural poderia ser divertido"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu penso que um desastre natural poderia ser divertido',\n min_value=1, max_value=5, step=1)\n", (6094, 6196), True, 'import streamlit as st\n'), ((6238, 6329), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu ando com um cobertor pela casa"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu ando com um cobertor pela casa', min_value=1, max_value\n =5, step=1)\n", (6247, 6329), True, 'import streamlit as st\n'), ((6374, 6499), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu costumava/costumo queimar coisas com a luz do sol e uma lupa"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label=\n 'Eu costumava/costumo queimar coisas com a luz do sol e uma lupa',\n min_value=1, max_value=5, step=1)\n", (6383, 6499), True, 'import streamlit as st\n'), ((6540, 6626), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu acho o horóscopo divertido"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu acho o horóscopo divertido', min_value=1, max_value=5,\n step=1)\n", (6549, 6626), True, 'import streamlit as st\n'), ((6672, 6767), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu não levo muita bagagem quando viajo"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu não levo muita bagagem quando viajo', min_value=1,\n max_value=5, step=1)\n", (6681, 6767), True, 'import streamlit as st\n'), ((6813, 6913), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu ja pensei/penso em me tornar vegetariano"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu ja pensei/penso em me tornar vegetariano', min_value=1,\n max_value=5, step=1)\n", (6822, 6913), True, 'import streamlit as st\n'), ((6959, 7036), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu odeio sair as compras"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu odeio sair as compras', min_value=1, max_value=5, step=1)\n", (6968, 7036), True, 'import streamlit as st\n'), ((7086, 7194), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu tenho/tive um diario pessoal que guardo até hoje"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu tenho/tive um diario pessoal que guardo até hoje',\n min_value=1, max_value=5, step=1)\n", (7095, 7194), True, 'import streamlit as st\n'), ((7240, 7368), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu desmontei/desmonto maquinas apenas para ver como elas funcionam"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label=\n 'Eu desmontei/desmonto maquinas apenas para ver como elas funcionam',\n min_value=1, max_value=5, step=1)\n", (7249, 7368), True, 'import streamlit as st\n'), ((7409, 7514), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu tenho muitas fotos das coisas que eu fiz/faço"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu tenho muitas fotos das coisas que eu fiz/faço',\n min_value=1, max_value=5, step=1)\n", (7418, 7514), True, 'import streamlit as st\n'), ((7560, 7653), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu ja joguei/jogo muitos video games"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu ja joguei/jogo muitos video games', min_value=1,\n max_value=5, step=1)\n", (7569, 7653), True, 'import streamlit as st\n'), ((7699, 7809), 'streamlit.slider', 'st.slider', ([], {'label': '"""De vez em quando deixo boas mensagens para as pessoas"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='De vez em quando deixo boas mensagens para as pessoas',\n min_value=1, max_value=5, step=1)\n", (7708, 7809), True, 'import streamlit as st\n'), ((7855, 7985), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu ja botei fogo em vários tipos de combustíveis apenas por diversão"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label=\n 'Eu ja botei fogo em vários tipos de combustíveis apenas por diversão',\n min_value=1, max_value=5, step=1)\n", (7864, 7985), True, 'import streamlit as st\n'), ((8026, 8111), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu realmente gosto de dançar"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu realmente gosto de dançar', min_value=1, max_value=5,\n step=1)\n", (8035, 8111), True, 'import streamlit as st\n'), ((8157, 8258), 'streamlit.slider', 'st.slider', ([], {'label': '"""Em uma escada, subo dois degraus de cada vez"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Em uma escada, subo dois degraus de cada vez', min_value=1,\n max_value=5, step=1)\n", (8166, 8258), True, 'import streamlit as st\n'), ((8304, 8425), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu cozinho doces e coisas gostosas para mim mesmo as vezes"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label=\n 'Eu cozinho doces e coisas gostosas para mim mesmo as vezes', min_value\n =1, max_value=5, step=1)\n", (8313, 8425), True, 'import streamlit as st\n'), ((8465, 8576), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu penso que um desastre natural poderia ser divertido"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu penso que um desastre natural poderia ser divertido',\n min_value=1, max_value=5, step=1)\n", (8474, 8576), True, 'import streamlit as st\n'), ((8622, 8729), 'streamlit.slider', 'st.slider', ([], {'label': '"""Eu decoro meus pertences(Ex: adesivos no notebook)"""', 'min_value': '(1)', 'max_value': '(5)', 'step': '(1)'}), "(label='Eu decoro meus pertences(Ex: adesivos no notebook)',\n min_value=1, max_value=5, step=1)\n", (8631, 8729), True, 'import streamlit as st\n'), ((8801, 9030), 'numpy.array', 'np.array', (['[Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15, Q16, Q17,\n Q18, Q19, Q20, Q21, Q22, Q23, Q24, Q25, Q26, Q27, Q28, Q29, Q30, Q31,\n Q32, Q33, Q34, Q35, Q36, Q37, Q38, Q39, Q40, Q41, Q42, Q43, Q44]'], {}), '([Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15,\n Q16, Q17, Q18, Q19, Q20, Q21, Q22, Q23, Q24, Q25, Q26, Q27, Q28, Q29,\n Q30, Q31, Q32, Q33, Q34, Q35, Q36, Q37, Q38, Q39, Q40, Q41, Q42, Q43, Q44])\n', (8809, 9030), True, 'import numpy as np\n'), ((9514, 9537), 'pickle.load', 'pickle.load', (['model_file'], {}), '(model_file)\n', (9525, 9537), False, 'import pickle\n')] |
from itertools import product
from pyshocks import get_logger
from pyshocks.burgers import WENOJS32, WENOJS53
import jax
import jax.numpy as jnp
import jax.numpy.linalg as jla
import pytest
logger = get_logger("test_weno")
# {{{ test_weno_smoothness_indicator_vectorization
@pytest.mark.parametrize(
"scheme",
[
WENOJS32(),
WENOJS53(),
],
)
def test_weno_smoothness_indicator_vectorization(scheme, rtol=2.0e-15, n=64):
"""Tests that the vectorized version of the smoothness indicator matches
the explicitly looped version.
"""
# {{{ setup
a = scheme.a
b = scheme.b
c = scheme.c
nghosts = b.shape[-1] // 2
nstencils = b.shape[0]
stencil = jnp.arange(-nghosts, nghosts + 1)
n = n + 2 * nghosts
m = jnp.s_[nghosts:-nghosts]
theta = jnp.linspace(0.0, 2.0 * jnp.pi, n)
u = jnp.sin(theta)
# }}}
# {{{ compute smoothness indicator
import numpy as np
# loop-based
beta0 = np.zeros((nstencils, n), dtype=jnp.float64)
for j in range(*m.indices(n)): # pylint: disable=no-member
for i, k in product(range(nstencils), range(a.size)):
beta0[i, j] += a[k] * jnp.sum(u[j + stencil] * b[i, k, ::-1]) ** 2
# jnp.convolve-based
from pyshocks.weno import _weno_js_smoothness
beta1 = _weno_js_smoothness(u, a, b)
# }}}
# {{{ compute stencils
# loop-based
uhat0 = np.zeros((nstencils, n), dtype=jnp.float64)
for j in range(*m.indices(n)): # pylint: disable=no-member
for i in range(nstencils):
uhat0[i, j] = jnp.sum(u[j + stencil] * c[i, ::-1])
# jnp.convolve-based
from pyshocks.weno import _weno_js_reconstruct
uhat1 = _weno_js_reconstruct(u, c)
# }}}
# {{{ check equality
error = jla.norm(beta0[:, m] - beta1[:, m]) / jla.norm(beta0[:, m])
logger.info("beta[%s]: %.8e", type(scheme).__name__, error)
assert error < rtol
error = jla.norm(uhat0[:, m] - uhat1[:, m]) / jla.norm(uhat0[:, m])
logger.info("uhat[%s]: %.8e", type(scheme).__name__, error)
assert error < rtol
# }}}
# }}}
# {{{ test_weno_smoothness_indicator
@pytest.mark.parametrize(
("scheme", "n"),
[
(WENOJS32(), 512),
(WENOJS53(), 128),
],
)
@pytest.mark.parametrize("is_smooth", [True, False])
def test_weno_smoothness_indicator(scheme, n, is_smooth):
"""Tests that the smoothness indicator actually works."""
# {{{ setup
a = scheme.a
b = scheme.b
nghosts = b.shape[-1] // 2
n = n + 2 * nghosts
m = jnp.s_[nghosts:-nghosts]
theta = jnp.linspace(0.0, 2.0 * jnp.pi, n)
if is_smooth:
u = jnp.sin(theta)
else:
u = theta < jnp.pi
# }}}
# {{{ compute smoothness indicator
from pyshocks.weno import _weno_js_smoothness
beta = _weno_js_smoothness(u, a, b)
alpha = scheme.d / (scheme.eps + beta) ** 2
omega = alpha / jnp.sum(alpha, axis=0, keepdims=True)
# }}}
# {{{ check smoothness
# NOTE: scheme.d are the "ideal" coefficients, so we're comparing how
# close we are to those
error = jnp.max(jnp.abs(omega[:, m] - scheme.d))
logger.info("error[%s, %s]: %.8e", type(scheme).__name__, is_smooth, error)
if is_smooth:
assert error < 0.1
else:
assert error > 0.1
# }}}
# }}}
# {{{
def _pyweno_reconstruct(u, order, side):
import pyweno
ul, sl = pyweno.weno.reconstruct(u, order, side, return_smoothness=True)
return jax.device_put(ul), jax.device_put(sl)
@pytest.mark.parametrize(
("scheme", "order", "n"),
[
# NOTE: pyweno only seems to support order >= 5
# (WENOJS32(), 3, 256),
(WENOJS53(), 5, 512),
],
)
def test_weno_reference(scheme, order, n, visualize=False):
"""Compares our weno reconstruction to PyWENO"""
pytest.importorskip("pyweno")
# {{{ reference values
from pyshocks import UniformGrid
grid = UniformGrid(a=0, b=2.0 * jnp.pi, n=n, nghosts=scheme.order)
from pyshocks import Quadrature, cell_average
quad = Quadrature(grid=grid, order=order)
u = cell_average(quad, jnp.sin)
uhost = u.copy()
ul, sl = _pyweno_reconstruct(uhost, order, "left")
ur, sr = _pyweno_reconstruct(uhost, order, "right")
# }}}
# {{{ compare
from pyshocks import rnorm
from pyshocks.weno import _weno_js_smoothness
betar = _weno_js_smoothness(u[::-1], scheme.a, scheme.b)[:, ::-1].T
betal = _weno_js_smoothness(u, scheme.a, scheme.b).T
errorl = rnorm(grid, sl, betal)
errorr = rnorm(grid, sr, betar)
logger.info("error smoothness: left %.5e right %.5e", errorl, errorr)
assert errorl < 1.0e-5 and errorr < 1.0e-8
from pyshocks.weno import reconstruct
urhat = reconstruct(grid, scheme, u)
ulhat = reconstruct(grid, scheme, u[::-1])[::-1]
errorl = rnorm(grid, ul, ulhat)
errorr = rnorm(grid, ur, urhat)
logger.info("error reconstruct: left %.5e right %.5e", errorl, errorr)
assert errorl < 1.0e-12 and errorr < 1.0e-12
# }}}
if not visualize:
return
s = grid.i_
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca()
ax.plot(grid.x[s], ul[s] - ulhat[s], label="left")
ax.plot(grid.x[s], ur[s] - urhat[s], label="right")
ax.grid()
ax.legend()
fig.savefig("test_weno_reference_reconstruct")
fig.clf()
ax = fig.gca()
ax.plot(grid.x[s], sl[s] - betal[s], label="left")
ax.plot(grid.x[s], sr[s] - betar[s], label="right")
ax.grid()
ax.legend()
fig.savefig("test_weno_reference_smoothness")
plt.close(fig)
# }}}
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
pytest.main([__file__])
| [
"pyshocks.Quadrature",
"pyshocks.weno._weno_js_smoothness",
"pytest.main",
"matplotlib.pyplot.figure",
"pytest.mark.parametrize",
"pyshocks.get_logger",
"jax.device_put",
"matplotlib.pyplot.close",
"jax.numpy.linspace",
"pyshocks.burgers.WENOJS32",
"jax.numpy.linalg.norm",
"pyweno.weno.reconst... | [((203, 226), 'pyshocks.get_logger', 'get_logger', (['"""test_weno"""'], {}), "('test_weno')\n", (213, 226), False, 'from pyshocks import get_logger\n'), ((2288, 2339), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_smooth"""', '[True, False]'], {}), "('is_smooth', [True, False])\n", (2311, 2339), False, 'import pytest\n'), ((717, 750), 'jax.numpy.arange', 'jnp.arange', (['(-nghosts)', '(nghosts + 1)'], {}), '(-nghosts, nghosts + 1)\n', (727, 750), True, 'import jax.numpy as jnp\n'), ((822, 856), 'jax.numpy.linspace', 'jnp.linspace', (['(0.0)', '(2.0 * jnp.pi)', 'n'], {}), '(0.0, 2.0 * jnp.pi, n)\n', (834, 856), True, 'import jax.numpy as jnp\n'), ((865, 879), 'jax.numpy.sin', 'jnp.sin', (['theta'], {}), '(theta)\n', (872, 879), True, 'import jax.numpy as jnp\n'), ((985, 1028), 'numpy.zeros', 'np.zeros', (['(nstencils, n)'], {'dtype': 'jnp.float64'}), '((nstencils, n), dtype=jnp.float64)\n', (993, 1028), True, 'import numpy as np\n'), ((1327, 1355), 'pyshocks.weno._weno_js_smoothness', '_weno_js_smoothness', (['u', 'a', 'b'], {}), '(u, a, b)\n', (1346, 1355), False, 'from pyshocks.weno import _weno_js_smoothness\n'), ((1425, 1468), 'numpy.zeros', 'np.zeros', (['(nstencils, n)'], {'dtype': 'jnp.float64'}), '((nstencils, n), dtype=jnp.float64)\n', (1433, 1468), True, 'import numpy as np\n'), ((1725, 1751), 'pyshocks.weno._weno_js_reconstruct', '_weno_js_reconstruct', (['u', 'c'], {}), '(u, c)\n', (1745, 1751), False, 'from pyshocks.weno import _weno_js_reconstruct\n'), ((2614, 2648), 'jax.numpy.linspace', 'jnp.linspace', (['(0.0)', '(2.0 * jnp.pi)', 'n'], {}), '(0.0, 2.0 * jnp.pi, n)\n', (2626, 2648), True, 'import jax.numpy as jnp\n'), ((2845, 2873), 'pyshocks.weno._weno_js_smoothness', '_weno_js_smoothness', (['u', 'a', 'b'], {}), '(u, a, b)\n', (2864, 2873), False, 'from pyshocks.weno import _weno_js_smoothness\n'), ((3442, 3505), 'pyweno.weno.reconstruct', 'pyweno.weno.reconstruct', (['u', 'order', 'side'], {'return_smoothness': '(True)'}), '(u, order, side, return_smoothness=True)\n', (3465, 3505), False, 'import pyweno\n'), ((3865, 3894), 'pytest.importorskip', 'pytest.importorskip', (['"""pyweno"""'], {}), "('pyweno')\n", (3884, 3894), False, 'import pytest\n'), ((3973, 4032), 'pyshocks.UniformGrid', 'UniformGrid', ([], {'a': '(0)', 'b': '(2.0 * jnp.pi)', 'n': 'n', 'nghosts': 'scheme.order'}), '(a=0, b=2.0 * jnp.pi, n=n, nghosts=scheme.order)\n', (3984, 4032), False, 'from pyshocks import UniformGrid\n'), ((4096, 4130), 'pyshocks.Quadrature', 'Quadrature', ([], {'grid': 'grid', 'order': 'order'}), '(grid=grid, order=order)\n', (4106, 4130), False, 'from pyshocks import Quadrature, cell_average\n'), ((4139, 4166), 'pyshocks.cell_average', 'cell_average', (['quad', 'jnp.sin'], {}), '(quad, jnp.sin)\n', (4151, 4166), False, 'from pyshocks import Quadrature, cell_average\n'), ((4557, 4579), 'pyshocks.rnorm', 'rnorm', (['grid', 'sl', 'betal'], {}), '(grid, sl, betal)\n', (4562, 4579), False, 'from pyshocks import rnorm\n'), ((4593, 4615), 'pyshocks.rnorm', 'rnorm', (['grid', 'sr', 'betar'], {}), '(grid, sr, betar)\n', (4598, 4615), False, 'from pyshocks import rnorm\n'), ((4793, 4821), 'pyshocks.weno.reconstruct', 'reconstruct', (['grid', 'scheme', 'u'], {}), '(grid, scheme, u)\n', (4804, 4821), False, 'from pyshocks.weno import reconstruct\n'), ((4889, 4911), 'pyshocks.rnorm', 'rnorm', (['grid', 'ul', 'ulhat'], {}), '(grid, ul, ulhat)\n', (4894, 4911), False, 'from pyshocks import rnorm\n'), ((4925, 4947), 'pyshocks.rnorm', 'rnorm', (['grid', 'ur', 'urhat'], {}), '(grid, ur, urhat)\n', (4930, 4947), False, 'from pyshocks import rnorm\n'), ((5186, 5198), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5196, 5198), True, 'import matplotlib.pyplot as plt\n'), ((5641, 5655), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (5650, 5655), True, 'import matplotlib.pyplot as plt\n'), ((1802, 1837), 'jax.numpy.linalg.norm', 'jla.norm', (['(beta0[:, m] - beta1[:, m])'], {}), '(beta0[:, m] - beta1[:, m])\n', (1810, 1837), True, 'import jax.numpy.linalg as jla\n'), ((1840, 1861), 'jax.numpy.linalg.norm', 'jla.norm', (['beta0[:, m]'], {}), '(beta0[:, m])\n', (1848, 1861), True, 'import jax.numpy.linalg as jla\n'), ((1963, 1998), 'jax.numpy.linalg.norm', 'jla.norm', (['(uhat0[:, m] - uhat1[:, m])'], {}), '(uhat0[:, m] - uhat1[:, m])\n', (1971, 1998), True, 'import jax.numpy.linalg as jla\n'), ((2001, 2022), 'jax.numpy.linalg.norm', 'jla.norm', (['uhat0[:, m]'], {}), '(uhat0[:, m])\n', (2009, 2022), True, 'import jax.numpy.linalg as jla\n'), ((336, 346), 'pyshocks.burgers.WENOJS32', 'WENOJS32', ([], {}), '()\n', (344, 346), False, 'from pyshocks.burgers import WENOJS32, WENOJS53\n'), ((356, 366), 'pyshocks.burgers.WENOJS53', 'WENOJS53', ([], {}), '()\n', (364, 366), False, 'from pyshocks.burgers import WENOJS32, WENOJS53\n'), ((2679, 2693), 'jax.numpy.sin', 'jnp.sin', (['theta'], {}), '(theta)\n', (2686, 2693), True, 'import jax.numpy as jnp\n'), ((2943, 2980), 'jax.numpy.sum', 'jnp.sum', (['alpha'], {'axis': '(0)', 'keepdims': '(True)'}), '(alpha, axis=0, keepdims=True)\n', (2950, 2980), True, 'import jax.numpy as jnp\n'), ((3144, 3175), 'jax.numpy.abs', 'jnp.abs', (['(omega[:, m] - scheme.d)'], {}), '(omega[:, m] - scheme.d)\n', (3151, 3175), True, 'import jax.numpy as jnp\n'), ((3518, 3536), 'jax.device_put', 'jax.device_put', (['ul'], {}), '(ul)\n', (3532, 3536), False, 'import jax\n'), ((3538, 3556), 'jax.device_put', 'jax.device_put', (['sl'], {}), '(sl)\n', (3552, 3556), False, 'import jax\n'), ((4498, 4540), 'pyshocks.weno._weno_js_smoothness', '_weno_js_smoothness', (['u', 'scheme.a', 'scheme.b'], {}), '(u, scheme.a, scheme.b)\n', (4517, 4540), False, 'from pyshocks.weno import _weno_js_smoothness\n'), ((4834, 4868), 'pyshocks.weno.reconstruct', 'reconstruct', (['grid', 'scheme', 'u[::-1]'], {}), '(grid, scheme, u[::-1])\n', (4845, 4868), False, 'from pyshocks.weno import reconstruct\n'), ((5779, 5802), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (5790, 5802), False, 'import pytest\n'), ((1598, 1634), 'jax.numpy.sum', 'jnp.sum', (['(u[j + stencil] * c[i, ::-1])'], {}), '(u[j + stencil] * c[i, ::-1])\n', (1605, 1634), True, 'import jax.numpy as jnp\n'), ((2233, 2243), 'pyshocks.burgers.WENOJS32', 'WENOJS32', ([], {}), '()\n', (2241, 2243), False, 'from pyshocks.burgers import WENOJS32, WENOJS53\n'), ((2260, 2270), 'pyshocks.burgers.WENOJS53', 'WENOJS53', ([], {}), '()\n', (2268, 2270), False, 'from pyshocks.burgers import WENOJS32, WENOJS53\n'), ((4426, 4474), 'pyshocks.weno._weno_js_smoothness', '_weno_js_smoothness', (['u[::-1]', 'scheme.a', 'scheme.b'], {}), '(u[::-1], scheme.a, scheme.b)\n', (4445, 4474), False, 'from pyshocks.weno import _weno_js_smoothness\n'), ((3718, 3728), 'pyshocks.burgers.WENOJS53', 'WENOJS53', ([], {}), '()\n', (3726, 3728), False, 'from pyshocks.burgers import WENOJS32, WENOJS53\n'), ((1193, 1232), 'jax.numpy.sum', 'jnp.sum', (['(u[j + stencil] * b[i, k, ::-1])'], {}), '(u[j + stencil] * b[i, k, ::-1])\n', (1200, 1232), True, 'import jax.numpy as jnp\n')] |
"""Capacited Vehicles Routing Problem (CVRP) using an Ant Colony Algorithm (ACO)."""
from __future__ import print_function
import random
import numpy as np
from H_Hy_Men import VRPLibReader
Q = 1
RHO = 0.2
nk = {5: [32, 33, 34, 36, 37, 38, 39], 6: [33, 37, 39, 45], 7: [45, 46, 48, 53, 54], 9: [55, 61, 65]}
def get_problem_sol_file_pair(n, k):
problem_fn = 'data/A/A-n' + str(n) + '-k' + str(k) + '.vrp'
sol_fn = 'data/A/A-n' + str(n) + '-k' + str(k)+'.sol'
write_fn = 'data/A-VRP-my-alpha/latest-A-n' + str(n) + '-k' + str(k)
return problem_fn, sol_fn, write_fn
class Config:
def __init__(self):
self.iterations = 0
self.ants = 0
self.k = 0
self.alpha = 2.0
self.beta = 5.0
self.iterations = 100
# self._read_config_f()
self.k = 5
self.ants = 31
#
# def _read_config_f(self):
# with open('data/config', 'r') as f:
# for line in f.readlines():
# contents = line.split(' ')
# if contents[0] == 'iterations':
# self.iterations = int(contents[2])
# if contents[0] == 'ants':
# self.ants = int(contents[2])
# if contents[0] == 'k':
# self.k = int(contents[2])
def set_alpha(self, alpha):
self.alpha = alpha
def set_beta(self, beta):
self.beta = beta
def read_solution_file(path):
with open(path, 'r') as f:
optimal_routes = []
cost = 0
for line in f.readlines():
contents = line.split(' ')
if 'cost' in contents[0].lower() or 'my_best' in contents[0].lower():
cost = int(contents[1])
break
elif 'route' in contents[0].lower():
route = []
for i in range(2, len(contents)):
el = contents[i].strip()
if el == '':
break
route.append(int(el))
optimal_routes.append(route)
return optimal_routes, cost
class Coords:
def __init__(self, filenames):
problem_fn, sol_fn, self.write_fn = filenames
self.capacities = []
self.no_trucks = 5
self.coords = []
self.demands = []
self.depot = None
self._read_problem_f(problem_fn)
self.distance_matrix = self._create_distance_matrix()
self._read_sol_f(sol_fn)
def _read_problem_f(self, problem_fn):
reading_coords = False
reading_demand = False
reading_depot = False
self.capacities= VRPLibReader.capacity
self.coords= VRPLibReader.site
self.demands= VRPLibReader.things
def _read_sol_f(self, sol_fn):
self.optimal_routes, self.cost = read_solution_file(sol_fn)
def _create_distance_matrix(self):
distances_m = []
for i_coord in self.coords:
distances = []
for j_coord in self.coords:
distance = int(round(np.linalg.norm(np.subtract(i_coord, j_coord))))
distances.append(distance)
distances_m.append(distances)
return distances_m
class Pheromone_Trails:
def __init__(self, no_cities):
from random import uniform
self.pheromones_matrix = []
for distances in range(no_cities):
pheromones = [uniform(0, 0.1) for _ in range(no_cities)]
self.pheromones_matrix.append(pheromones)
def get_pheromone_trail(self, city_i, city_j):
return self.pheromones_matrix[city_i][city_j]
def evaporate(self):
self.pheromones_matrix = np.multiply(self.pheromones_matrix, (1 - RHO))
def update(self, city_i, city_j, overall_trip_distance):
delta_tau = Q / overall_trip_distance
self.pheromones_matrix[city_i][city_j] += delta_tau
self.pheromones_matrix[city_j][city_i] += delta_tau
class Ant:
def __init__(self, capacity, depot, demands, distance_m, pheromone_trails, config):
self.current_city = depot
self.depot = depot
self.distance_m = distance_m
self.not_visited = self.create_not_visited(distance_m)
self.capacity = capacity
self.load = capacity
self.demands = demands
self.trips = []
self.current_trip = []
self.pheromone_trails = pheromone_trails
self.trips_distance = None
self.config = config
self.start()
def create_not_visited(self, distance_m):
nv = set()
for i, _ in enumerate(distance_m):
if i != self.depot:
nv.add(i)
return nv
def start(self):
from random import randint
start_city = randint(1, len(self.distance_m) - 1)
self.visit_city(start_city)
def visit_city(self, index):
self.not_visited.remove(index)
self.load -= self.demands[index]
distance = self.distance_m[self.current_city][index]
self.current_city = index
self.current_trip.append((index, distance))
def visit_depot(self):
self.load = self.capacity
distance = self.distance_m[self.current_city][self.depot]
self.current_city = self.depot
self.current_trip.append((self.depot, distance))
self.trips.append(self.current_trip)
self.current_trip = []
def get_intensity(self, city):
return self.pheromone_trails.get_pheromone_trail(self.current_city, city)
def get_visibility(self, city):
distance = self.distance_m[self.current_city][city]
if distance == 0:
return 1
return 1 / distance
def calc_val(self, city):
intensity = self.get_intensity(city)
visibility = self.get_visibility(city)
return pow(intensity, self.config.alpha) * pow(visibility, self.config.beta)
def get_neighbours_with_probab(self):
not_visited = list(self.not_visited)
l = [self.calc_val(city) for city in not_visited]
m = np.divide(l, sum(l))
for i in range(1, len(m)):
m[i] += m[i - 1]
return not_visited, m
def create_path(self):
while len(self.not_visited) > 0:
neighbours, probabilities = self.get_neighbours_with_probab()
r = random.random()
next_to_visit = neighbours[-1]
for i in range(len(probabilities)):
if r < probabilities[i]:
next_to_visit = neighbours[i]
break
if self.demands[next_to_visit] > self.load:
# Come back to the depot to reload
self.visit_depot()
else:
self.visit_city(next_to_visit)
self.visit_depot()
def calculate_paths_quality(self):
overall_distance = 0
for trip in self.trips:
for _, dist in trip:
overall_distance += dist
self.trips_distance = overall_distance
def leave_pheromone(self):
for trip in self.trips:
prev_city = self.depot
for i, (city, _) in enumerate(trip):
self.pheromone_trails.update(prev_city, city, self.trips_distance)
prev_city = city
self.pheromone_trails.update(prev_city, self.depot, self.trips_distance)
def reset(self):
self.current_city = 0
self.not_visited = self.create_not_visited(self.distance_m)
self.current_trip = []
self.trips = []
self.load = self.capacity
self.start()
def trips_to_str(trips):
s = ''
for index, trip in enumerate(trips):
line = 'Route #' + str(index + 1) + ": "
for city, _ in trip:
if city == 0:
break
line += str(city) + " "
s += line + '\n'
return s
def save(filename, optimal, my_best, routes):
with open(filename, 'w')as f:
f.write(routes)
f.write("my_best: " + str(my_best) + '\n')
f.write("optimal: " + str(optimal))
def plot_data(xs, ys):
import matplotlib.pyplot as plt
plt.plot(xs, ys, c='red')
plt.xlabel('beta')
plt.ylabel('error')
plt.savefig('beta1.svg')
def solve_using_ants():
config = Config()
no_ants = config.ants
iterations = config.iterations
overall_score = 0
for k, ns in nk.items():
k_score = 0
for n in ns:
data = Coords(get_problem_sol_file_pair(n, k))
no_cities = len(data.distance_matrix)
pheromone_trails = Pheromone_Trails(no_cities)
ants = [Ant(capacity=data.capacities[0], depot=data.depot, demands=data.demands,distance_m=data.distance_matrix, pheromone_trails=pheromone_trails, config=config) for _ in range(no_ants)]
import sys
best_trip = sys.maxsize
routes = ''
last_record = 100000000
for iteration in range(iterations):
for ant in ants:
ant.create_path()
ant.calculate_paths_quality()
if ant.trips_distance < best_trip:
best_trip = ant.trips_distance
routes = trips_to_str(ant.trips)
for ant in ants:
pheromone_trails.evaporate()
ant.leave_pheromone()
ant.reset()
metric = (best_trip - data.cost) / data.cost
if iteration % 100 == 0:
if abs(last_record - metric) < 0.001:
break
last_record = metric
print("n", n, "k", k, "optimal", data.cost, "my_best", best_trip,
'metric', metric)
save(data.write_fn, data.cost, best_trip, routes)
k_score += metric
k_score /= len(ns)
print('k', k, 'mean k_score', k_score)
overall_score += k_score
overall_score /= len(nk)
print('mean overall_score', overall_score)
def visualize_graph(coords, routes, title):
import networkx as nx
import matplotlib.pyplot as plt
G = nx.Graph()
for route in routes:
last_city = 0
for city in route:
G.add_edge(last_city, city)
last_city = city
G.add_edge(last_city, 0)
edgelist = [(u, v) for (u, v, d) in G.edges(data=True)]
for i, pos in enumerate(coords):
G.add_node(i, pos=pos)
pos = nx.get_node_attributes(G, 'pos')
# nodes
nx.draw_networkx_nodes(G, pos)
# edges
nx.draw_networkx_edges(G, pos, edgelist=edgelist)
# labels
nx.draw_networkx_labels(G, pos, font_size=20, font_family='sans-serif')
plt.axis('off')
plt.title(title)
plt.show()
def main():
random.seed(1)
solve_using_ants()
# Visualize one use case from the dataset
n = 32
k = 5
problem_fn, sol_fn, write_fn = get_problem_sol_file_pair(n, k)
data = Coords((problem_fn, sol_fn, write_fn))
coords = data.coords
optimal_routes = data.optimal_routes
my_routes, my_cost = read_solution_file(write_fn)
visualize_graph(coords, optimal_routes, 'optimal routes. cost: ' + str(data.cost))
visualize_graph(coords, my_routes, 'my routes. cost: ' + str(my_cost))
if __name__ == '__main__':
main() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"networkx.draw_networkx_edges",
"matplotlib.pyplot.plot",
"numpy.multiply",
"random.uniform",
"numpy.subtract",
"matplotlib.pyplot.axis",
"random.random",
"networkx.Graph",
"networkx.draw_networkx_nodes",
"networkx.draw_networkx_labels",
"... | [((8135, 8160), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys'], {'c': '"""red"""'}), "(xs, ys, c='red')\n", (8143, 8160), True, 'import matplotlib.pyplot as plt\n'), ((8165, 8183), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""beta"""'], {}), "('beta')\n", (8175, 8183), True, 'import matplotlib.pyplot as plt\n'), ((8188, 8207), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""error"""'], {}), "('error')\n", (8198, 8207), True, 'import matplotlib.pyplot as plt\n'), ((8212, 8236), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""beta1.svg"""'], {}), "('beta1.svg')\n", (8223, 8236), True, 'import matplotlib.pyplot as plt\n'), ((10159, 10169), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (10167, 10169), True, 'import networkx as nx\n'), ((10489, 10521), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['G', '"""pos"""'], {}), "(G, 'pos')\n", (10511, 10521), True, 'import networkx as nx\n'), ((10539, 10569), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G', 'pos'], {}), '(G, pos)\n', (10561, 10569), True, 'import networkx as nx\n'), ((10587, 10636), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['G', 'pos'], {'edgelist': 'edgelist'}), '(G, pos, edgelist=edgelist)\n', (10609, 10636), True, 'import networkx as nx\n'), ((10655, 10726), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['G', 'pos'], {'font_size': '(20)', 'font_family': '"""sans-serif"""'}), "(G, pos, font_size=20, font_family='sans-serif')\n", (10678, 10726), True, 'import networkx as nx\n'), ((10732, 10747), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10740, 10747), True, 'import matplotlib.pyplot as plt\n'), ((10752, 10768), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (10761, 10768), True, 'import matplotlib.pyplot as plt\n'), ((10773, 10783), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10781, 10783), True, 'import matplotlib.pyplot as plt\n'), ((10802, 10816), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (10813, 10816), False, 'import random\n'), ((3679, 3723), 'numpy.multiply', 'np.multiply', (['self.pheromones_matrix', '(1 - RHO)'], {}), '(self.pheromones_matrix, 1 - RHO)\n', (3690, 3723), True, 'import numpy as np\n'), ((6328, 6343), 'random.random', 'random.random', ([], {}), '()\n', (6341, 6343), False, 'import random\n'), ((3417, 3432), 'random.uniform', 'uniform', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (3424, 3432), False, 'from random import uniform\n'), ((3071, 3100), 'numpy.subtract', 'np.subtract', (['i_coord', 'j_coord'], {}), '(i_coord, j_coord)\n', (3082, 3100), True, 'import numpy as np\n')] |
# This script output to-eng-distance and number of training data
# statistics for each language in UD treebank, according to uriel,
# and then rank them in terms of GEOGRAPHIC distance
import os
import numpy as np
from io import open
from collections import namedtuple
from conllu import parse_incr
LangObj = namedtuple("lang", ["name", "old_id", "new_id", "num_train", "distance", "obj"])
rank_obj = ["GEOGRAPHIC", "GENETIC", "SYNTACTIC"]
distance = np.load("uriel_v0_2/distances/distances.npz")
en_id = np.asscalar(np.where(distance["langs"]=="eng")[0])
# map identifier from language name to iso-639-3
fid = open("statistics/iso-639-3.tab", "r", encoding="utf-8")
_ = fid.readline()
lang_to_id = {}
for line in fid:
line_ = line.strip().split("\t")
lang_to_id[line_[-1]] = line_[0]
print("complete mapping identifier")
fout = open("statistics/distance_all.txt", "w")
fout.write("LANG\t")
for name in distance["sources"]:
fout.write(name + "\t")
fout.write("AVG\t")
fout.write("TRAIN\n")
obj_id = [np.asscalar(np.where(distance["sources"]==x)[0]) for x in rank_obj]
res = {}
cnt = 0
for root, subdirs, files in os.walk("ud-treebanks-v2.2"):
valid_dir = False
for fname in files:
if fname.endswith("conllu"):
valid_dir = True
lang = root.split("/")[1].split("-")[0].split("_")[1]
old_id = fname.split("_")[0]
break
if valid_dir:
if lang in res:
continue
if lang in lang_to_id:
identifier = lang_to_id[lang]
else:
continue
tgt_id = np.asscalar(np.where(distance["langs"]==identifier)[0])
dist = distance["data"][en_id, tgt_id]
train_num = 0
# for fname in files:
# if fname.endswith("conllu"):
# train = fname.strip().split('.')[0].split('-')[-1]
# if train != "train":
# continue
# with open(os.path.join(root, fname), "r", encoding="utf-8") as fdata:
# train_num = len(list(parse_incr(fdata)))
# break
dist_new = sum([dist[x] for x in obj_id]) / len(obj_id)
res[lang] = LangObj(lang, old_id, identifier, train_num, dist, dist_new)
cnt += 1
# if cnt == 10:
# break
for lang_obj in sorted(res.values(), key=lambda s: -s.obj):
fout.write("{}/{}\t".format(lang_obj.name, lang_obj.old_id))
for num in lang_obj.distance:
fout.write("{:.3f}\t".format(num))
fout.write("{:.3f}\t".format(lang_obj.obj))
fout.write("{}\n".format(lang_obj.num_train))
fid.close()
fout.close()
| [
"numpy.load",
"os.walk",
"numpy.where",
"collections.namedtuple",
"io.open"
] | [((311, 396), 'collections.namedtuple', 'namedtuple', (['"""lang"""', "['name', 'old_id', 'new_id', 'num_train', 'distance', 'obj']"], {}), "('lang', ['name', 'old_id', 'new_id', 'num_train', 'distance', 'obj']\n )\n", (321, 396), False, 'from collections import namedtuple\n'), ((454, 499), 'numpy.load', 'np.load', (['"""uriel_v0_2/distances/distances.npz"""'], {}), "('uriel_v0_2/distances/distances.npz')\n", (461, 499), True, 'import numpy as np\n'), ((616, 671), 'io.open', 'open', (['"""statistics/iso-639-3.tab"""', '"""r"""'], {'encoding': '"""utf-8"""'}), "('statistics/iso-639-3.tab', 'r', encoding='utf-8')\n", (620, 671), False, 'from io import open\n'), ((846, 886), 'io.open', 'open', (['"""statistics/distance_all.txt"""', '"""w"""'], {}), "('statistics/distance_all.txt', 'w')\n", (850, 886), False, 'from io import open\n'), ((1138, 1166), 'os.walk', 'os.walk', (['"""ud-treebanks-v2.2"""'], {}), "('ud-treebanks-v2.2')\n", (1145, 1166), False, 'import os\n'), ((521, 557), 'numpy.where', 'np.where', (["(distance['langs'] == 'eng')"], {}), "(distance['langs'] == 'eng')\n", (529, 557), True, 'import numpy as np\n'), ((1035, 1069), 'numpy.where', 'np.where', (["(distance['sources'] == x)"], {}), "(distance['sources'] == x)\n", (1043, 1069), True, 'import numpy as np\n'), ((1607, 1648), 'numpy.where', 'np.where', (["(distance['langs'] == identifier)"], {}), "(distance['langs'] == identifier)\n", (1615, 1648), True, 'import numpy as np\n')] |
__author__ = '<NAME>'
import numpy as np
import time
from rollup.utils import collections
def rollup(ontology,
desired_annotators = 250,
max_iters = 50000,
print_freq = 500,
checkpoints = None,
checkpoint_hook = None
):
"""
Performs a rollup on ontology using a greedy search on current leaves in the ontology
by selecting the leaf whose elimination will yield the minimum standard deviation of
the average information content of all concepts that directly annotate an object
WARNING: THIS WILL MUTATE THE INPUT ONTOLOGY GRAPH BY REMOVING LEAF NODES THAT ARE ROLLED UP,
AND ADDING ANNOTATIONS TO THE CONCEPTS TO WHICH DESCENDANT CONCEPTS ARE ROLLED TO
:param ontology: an instance of rollup.ontology.Ontology
:param desired_annotators: the number of direct annotators after rollup
:param max_iters: maximum number of iterations allowed for rollup
:param print_freq: frequency in iterations to print status
:param checkpoints: list of integers of number of annotators at which checkpoint_hook should be called
:param checkpoint_hook: callable that accepts position args: annotator_count, rollups, rollup_levels, best_lambdas, best_psis
:return: (rollup, rollup_levels, best_lambdas, best_psis)
rollup - dictionary - keys are concepts in the original graph, values are the concepts to which the
given concept represented by the key is rolled up to. For concepts that do not
get rolled up, the key and value are identical
rollup_levels - dictionary - keys are concept ids from ontology graph, values are the highest number
of edges in the original graph between the concept and the concepts it was
rolled to
best_lambdas - list of mean direct annotator IC over all algorithm iterations
best_psis - list of stdev of direct annotator IC over all algorithm iterations
"""
N = ontology.total_annotated_objects()
D = ontology.total_annotators()
annotator_ICs = ontology.annotators_information_content(N)
Gamma = np.sum([x for x in annotator_ICs.values()])
Lambda = Gamma / float(D)
Psi = ic_stdev(annotator_ICs.values(), Lambda, D)
print("Initial Direct Annotator Count:\t{0}".format(D))
print("Initial Direct Annotator Mean IC:\t{0}\n".format(Lambda))
print("Initial Direct Annotator IC Stdev:\t{0}\n".format(Psi))
# initialize variables for logging
best_lambdas = [Lambda]
best_gammas = [Gamma]
best_psis = [Psi]
leaf_counts = []
annotator_counts = []
iterations = 0
rollups = {}
rollup_levels = {}
t0 = time.time()
while D > desired_annotators and iterations < max_iters:
leaf_to_roll = None
best_Gamma = 0.0
best_Psi = float("inf")
best_D = D
leaves = ontology.leaf_nodes()
if not leaves:
print("WARNING: NO LEAVES FOUND IN GRAPH")
iterations += 1
parent_is_object_annotator = {}
leaf_count = 0
for leaf in leaves:
leaf_count += 1
tmp_Gamma = Gamma
parents = ontology.parent_concepts(leaf)
# store leaf attributes to restore later
leaf_ic = ontology.information_content(leaf, N)
for p in parents:
p_node = ontology.graph.node[p]
# need to store parent's current is_visit_annotator
parent_is_object_annotator[p] = p_node[ontology.is_direct_annotator_key]
if not p_node[ontology.is_direct_annotator_key]:
# temporarily add parent to list of annotators for rollup and add ic to Gamma
pic = ontology.information_content(p, N)
annotator_ICs[p] = pic
tmp_Gamma += pic
# temporarily remove leaf from annotator_ICs for stdev computation
del annotator_ICs[leaf]
tmp_Gamma -= leaf_ic
tmp_D = len(annotator_ICs)
tmp_Lambda = tmp_Gamma / float(tmp_D)
tmp_Psi = ic_stdev(annotator_ICs.values(), tmp_Lambda, tmp_D)
if (tmp_Lambda < 0):
print("WARNING: NEGATIVE TMP_LAMBDA")
# store current best values
if tmp_Psi < best_Psi:
best_Psi = tmp_Psi
best_Gamma = tmp_Gamma
leaf_to_roll = leaf
best_D = tmp_D
# restore leaf and parents to current state
annotator_ICs[leaf] = leaf_ic
for p in parents:
if not parent_is_object_annotator[p]:
del annotator_ICs[p]
# END for leaf in leaves
# make sure there is a leaf to roll
if leaf_to_roll == None:
print("WARNING: LEAF_TO_ROLL IS NONE")
break
# update leaf_to_roll parents
leaf_parents = ontology.parent_concepts(leaf_to_roll)
for p in leaf_parents:
p_node = ontology.graph.node[p]
parent_is_object_annotator = p_node[ontology.is_direct_annotator_key]
if not parent_is_object_annotator:
p_node[ontology.is_direct_annotator_key] = True
annotator_ICs[p] = ontology.information_content(p, N)
# roll up leaf_to_roll - remove it from graph and annotator list
ontology.graph.remove_node(leaf_to_roll)
del annotator_ICs[leaf_to_roll]
# update Gamma, N, D
D = best_D
Gamma = best_Gamma
best_gammas.append(Gamma)
Lambda = Gamma / float(D)
best_lambdas.append(Lambda)
Psi = best_Psi
best_psis.append(Psi)
leaf_counts.append(leaf_count)
annotator_counts.append(D)
if iterations % print_freq == 0:
print("Iteration:\t{0}\nD (annotators):\t{1}\nLambda:\t{2}\nPsi:\t{3}\nLeaf count:\t{4}"
.format(iterations, D, Lambda, Psi, leaf_count))
# store roll up as dict{rolled_child:parents}. Requires a check to determine if leaf_to_roll is in
# any of the values of the dict in which case the key will have to be assigned to new parent
prev_corrected_levels = []
for k, obj_list in rollups.items():
if leaf_to_roll in obj_list:
if k not in prev_corrected_levels:
rollup_levels[k] += 1
prev_corrected_levels.append(k)
obj_list.remove(leaf_to_roll)
for p in leaf_parents:
if p not in obj_list:
obj_list.append(p)
rollups[leaf_to_roll] = leaf_parents
rollup_levels[leaf_to_roll] = 1
t1 = time.time()
tdelta = t1 - t0
if iterations % print_freq == 0:
print("Reduction of D to {0} to {1} seconds".format(D, tdelta))
if D in checkpoints:
tmp_rollups = rollups.copy()
tmp_rollup_levels = rollup_levels.copy()
rolled_cids = list(tmp_rollups.keys())
for a in ontology.annotators():
if a not in rolled_cids:
tmp_rollups[a] = [a]
tmp_rollup_levels[a] = 0
checkpoint_hook(D, tmp_rollups, tmp_rollup_levels, best_lambdas, best_psis)
# need to add concepts that were annotators in the original data and were NOT rolled up to the rollup dictionary
# this will be needed to differentiate these concepts from concepts that appear in new data that were not part of
# the ontology segment represented by the dataset used to rollup concepts
rolled_cids = list(rollups.keys())
for a in ontology.annotators():
if a not in rolled_cids:
rollups[a] = [a]
rollup_levels[a] = 0
tf = time.time()
tdelta = tf - t0
print("Total time: {0} seconds".format(tdelta))
return (rollups, rollup_levels, best_lambdas, best_psis)
def serialize_rollups(rollups, file_path):
"""
stores rollup to file
:param rollups: rollup dictionary returned by rollup method
:param file_path: location to store data
:return: None
"""
with open(file_path, 'a+') as f:
first_line = True
for k, obj_list in rollups.items():
if not first_line:
line = "\n{0}:".format(k)
else:
line = "{0}:".format(k)
first_line = False
for v in obj_list:
line = "{0}{1},".format(line, v)
line = line[0:-1]
f.write(line)
def ic_stdev(ic_vals, mean_ic, total_annotators):
return np.sqrt(np.sum([(x - mean_ic) ** 2 for x in ic_vals]) / float(total_annotators))
def serialzie_rollup_levels(rollup_levels, file_path):
"""
stores rollup levels to file
:param rollup_levels: rollup_levels dict returned by rollup method
:param file_path: location to store data
:return: None
"""
with open(file_path, 'a+') as f:
first_line = True
for k, v in rollup_levels.items():
if first_line:
line = "{0},{1}".format(k, v)
first_line = False
else:
line = "\n{0},{1}".format(k, v)
f.write(line)
def map_annotations_with_rollup(rollups, unrolled_annotation_dict):
rolled_annotations_dict = {}
for cid, obj_list in unrolled_annotation_dict.items():
rids = rollups[cid]
for rid in rids:
if rid in rolled_annotations_dict:
rolled_annotations_dict[rid] = collections.merge_lists(rolled_annotations_dict[rid], obj_list)
else:
rolled_annotations_dict[rid] = obj_list
return rolled_annotations_dict
| [
"rollup.utils.collections.merge_lists",
"numpy.sum",
"time.time"
] | [((2804, 2815), 'time.time', 'time.time', ([], {}), '()\n', (2813, 2815), False, 'import time\n'), ((7957, 7968), 'time.time', 'time.time', ([], {}), '()\n', (7966, 7968), False, 'import time\n'), ((6874, 6885), 'time.time', 'time.time', ([], {}), '()\n', (6883, 6885), False, 'import time\n'), ((8797, 8844), 'numpy.sum', 'np.sum', (['[((x - mean_ic) ** 2) for x in ic_vals]'], {}), '([((x - mean_ic) ** 2) for x in ic_vals])\n', (8803, 8844), True, 'import numpy as np\n'), ((9725, 9788), 'rollup.utils.collections.merge_lists', 'collections.merge_lists', (['rolled_annotations_dict[rid]', 'obj_list'], {}), '(rolled_annotations_dict[rid], obj_list)\n', (9748, 9788), False, 'from rollup.utils import collections\n')] |
from __future__ import division
from __future__ import print_function
import codecs
import sys
import time
from os import path
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.python.framework import ops
from evaluation import eval_translations, Result, extract_translations
class Trainer(object):
def __init__(self, model, num_epochs, data_feeder):
self._model = model
self._num_epochs = num_epochs
self._data_feeder = data_feeder
self._tasks = []
def add_command(self, command):
self._tasks.append(command)
def train(self):
start_time = time.time()
for task in self._tasks:
task.set_start_time(start_time)
current_epoch = 0
while current_epoch < self._num_epochs:
data_feed = self._data_feeder.get_batch()
self._model.step(data_feed)
end_time = time.time()
current_epoch = self._data_feeder.epoch
for task in self._tasks:
task.maybe_execute(current_epoch, end_time)
class SemiSupTrainer(object):
def __init__(self, model, num_epochs, data_feeder, unlabeled_data_feeder):
self._model = model
self._num_epochs = num_epochs
self._data_feeder = data_feeder
self._unlabeled_data_feeder = unlabeled_data_feeder
self._tasks = []
def add_command(self, command):
self._tasks.append(command)
def train(self):
start_time = time.time()
for task in self._tasks:
task.set_start_time(start_time)
current_epoch = 0
while current_epoch < self._num_epochs:
data_feed = self._data_feeder.get_batch()
unlabeled_data_feed = self._unlabeled_data_feeder.get_batch()
self._model.step({'labeled': data_feed, 'unlabeled': unlabeled_data_feed})
end_time = time.time()
current_epoch = self._data_feeder.epoch
for task in self._tasks:
task.maybe_execute(current_epoch, end_time)
class MultitaskTrainer(object):
def __init__(self, model, num_epochs, data_feeders):
self._model = model
self._num_epochs = num_epochs
self._data_feeders = data_feeders
self._tasks = []
def add_command(self, command):
self._tasks.append(command)
def train(self):
start_time = time.time()
for task in self._tasks:
task.set_start_time(start_time)
current_epochs = 0
while current_epochs < self._num_epochs:
data_feeds = [f.get_batch() for f in self._data_feeders]
self._model.step(data_feeds)
end_time = time.time()
current_epochs = self._data_feeders[0].epoch
for task in self._tasks:
task.maybe_execute(current_epochs, end_time)
class SemiSupEpochLossLogger(object):
def __init__(self, model, log_dir):
self._model = model
with tf.variable_scope('losses'):
self._epoch_loss = tf.placeholder(dtype='float32', shape=[], name='epoch_loss')
self._walker_loss = tf.placeholder(dtype='float32', shape=[], name='walker_loss')
self._visit_loss = tf.placeholder(dtype='float32', shape=[], name='visit_loss')
self._logit_loss = tf.placeholder(dtype='float32', shape=[], name='logit_loss')
self._epoch_summary_op = tf.summary.scalar('epoch_loss', self._epoch_loss)
self._walker_summary_op = tf.summary.scalar('walker_loss', self._walker_loss)
self._visit_summary_op = tf.summary.scalar('visit_loss', self._visit_loss)
self._logit_summary_op = tf.summary.scalar('logit_loss', self._logit_loss)
self._summary_op = tf.summary.merge([self._epoch_summary_op, self._walker_summary_op, self._visit_summary_op,
self._logit_summary_op])
self._epoch = 0
self._losses_epoch = []
self._losses_walker = []
self._losses_visit = []
self._losses_logit = []
self._summary_writer = tf.summary.FileWriter(log_dir)
self._update_time = 0
def set_start_time(self, time):
self._update_time = time
def maybe_execute(self, epoch, time):
loss = self._model.loss()
self._losses_epoch.append(loss[0])
self._losses_walker.append(loss[1])
self._losses_visit.append(loss[2])
self._losses_logit.append(loss[3])
if epoch > self._epoch:
average_loss = sum(self._losses_epoch) / len(self._losses_epoch)
average_loss_walker = sum(self._losses_walker) / len(self._losses_walker)
average_loss_visit = sum(self._losses_visit) / len(self._losses_visit)
average_loss_logit = sum(self._losses_logit) / len(self._losses_logit)
summ_str = self._model.session.run(self._summary_op,
{
self._epoch_loss: average_loss,
self._walker_loss: average_loss_walker,
self._visit_loss: average_loss_visit,
self._logit_loss: average_loss_logit
})
self._summary_writer.add_summary(summ_str, epoch)
self._losses_epoch = []
self._losses_walker = []
self._losses_visit = []
self._losses_logit = []
self._epoch = epoch
class EpochLossLogger(object):
def __init__(self, model, log_dir):
self._model = model
self._epoch_loss = tf.placeholder(dtype='float32', shape=[], name='epoch_loss')
self._summary_op = tf.summary.scalar('epoch_loss', self._epoch_loss)
self._epoch = 0
self._losses_epoch = []
self._summary_writer = tf.summary.FileWriter(log_dir)
self._update_time = 0
def set_start_time(self, time):
self._update_time = time
def maybe_execute(self, epoch, time):
self._losses_epoch.append(self._model.loss())
if epoch > self._epoch:
average_loss = sum(self._losses_epoch) / len(self._losses_epoch)
summ_str = self._model.session.run(self._summary_op, {self._epoch_loss: average_loss})
self._summary_writer.add_summary(summ_str, epoch)
self._losses_epoch = []
self._epoch = epoch
class MultitaskEpochLossLogger(object):
def __init__(self, model, log_dir):
self._model = model
num_tasks = model.num_tasks()
self._epoch_loss = [tf.placeholder(dtype='float32', shape=[], name='epoch_loss') for _ in xrange(num_tasks)]
self._summary_ops = [tf.summary.scalar(('epoch_loss_%i' % i), self._epoch_loss[i]) for i in xrange(num_tasks)]
self._epoch = 0
self._losses_epoch = []
self._summary_writer = tf.summary.FileWriter(log_dir)
self._update_time = 0
def set_start_time(self, time):
self._update_time = time
def maybe_execute(self, epoch, time):
self._losses_epoch.append(self._model.losses())
if epoch > self._epoch:
for i, summary_op in enumerate(self._summary_ops):
losses = [t[i] for t in self._losses_epoch]
average_loss = sum(losses) / len(losses)
summ_str = self._model.session.run(summary_op, {self._epoch_loss[i]: average_loss})
self._summary_writer.add_summary(summ_str, epoch)
self._losses_epoch = []
self._epoch = epoch
class BasicStatsLogger(object):
def __init__(self, model, data_feeder, num_epochs, period):
self._model = model
self._data_feeder = data_feeder
self._num_epochs = num_epochs
self._period = period
self._update_time = 0
self._losses = []
def set_start_time(self, time):
self._update_time = time
def maybe_execute(self, epoch, time):
loss = self._model.loss()
if np.isnan(loss):
print('Loss is nan. Last two batches:')
print(self._model.last_examples())
raise NanLoss
self._losses.append(loss)
if time - self._update_time > self._period:
num_steps = len(self._losses)
average_loss = sum(self._losses) / num_steps
step_frequency = num_steps / (time - self._update_time)
progress = self._data_feeder.num_examples_fed / \
(self._data_feeder.num_examples_epoch * self._num_epochs) * 100
print('\rprogress %3.2f %%, loss %6.4f, step rate %3.3f steps/s' %
(progress, average_loss, step_frequency), end='')
sys.stdout.flush()
self._update_time = time
self._losses = []
class SemiSupBasicStatsLogger(object):
def __init__(self, model, data_feeder, num_epochs, period):
self._model = model
self._data_feeder = data_feeder
self._num_epochs = num_epochs
self._period = period
self._update_time = 0
self._losses = []
def set_start_time(self, time):
self._update_time = time
def maybe_execute(self, epoch, time):
loss = self._model.loss()[0]
if np.isnan(loss):
print('Loss is nan. Last two batches:')
print(self._model.last_examples())
raise NanLoss
self._losses.append(loss)
if time - self._update_time > self._period:
num_steps = len(self._losses)
average_loss = sum(self._losses) / num_steps
step_frequency = num_steps / (time - self._update_time)
progress = self._data_feeder.num_examples_fed / \
(self._data_feeder.num_examples_epoch * self._num_epochs) * 100
print('\rprogress %3.2f %%, loss %6.4f, step rate %3.3f steps/s' %
(progress, average_loss, step_frequency), end='')
sys.stdout.flush()
self._update_time = time
self._losses = []
class NanLoss(Exception):
pass
class Evaluation(object):
def __init__(self, model, data_feeder, training_lexicon, num_epochs, log_dir, modes=('all',)):
self._model = model
self._data_feeder = data_feeder
self._training_lexicon = {}
for w_s, w_t in training_lexicon:
if w_s in training_lexicon:
self._training_lexicon[w_s].add(w_t)
else:
self._training_lexicon[w_s] = {w_t}
self._epoch = 0
self._num_epochs = num_epochs
self._log_dir = log_dir
with tf.variable_scope('evaluation') as scope:
self._result_top_placeholders = self._init_summaries('top', modes)
self._result_all_placeholders = self._init_summaries('all', modes)
graph = tf.get_default_graph()
summaries_all = graph.get_collection(tf.GraphKeys.SUMMARIES)
print(summaries_all)
summaries = graph.get_collection(tf.GraphKeys.SUMMARIES, scope='evaluation')
print(summaries)
#summaries = if s.scope ]
self._summary_op = tf.summary.merge(summaries)
self._summary_writer = tf.summary.FileWriter(log_dir)
self._modes = modes
def _init_summaries(self, model_mode, modes):
result_placeholders = {}
dtype = 'float32'
shape = []
for eval_mode in modes:
suffix = '_%s_%s' % (model_mode, eval_mode)
f1_top = tf.placeholder(dtype, shape, 'f1' + suffix)
precision_top = tf.placeholder(dtype, shape, 'precision' + suffix)
recall_top = tf.placeholder(dtype, shape, 'recall' + suffix)
result_placeholder = Result(f1_top, precision_top, recall_top)
tf.summary.scalar('f1' + suffix, f1_top)
tf.summary.scalar('precison' + suffix, f1_top)
tf.summary.scalar('recall' + suffix, f1_top)
result_placeholders[eval_mode] = result_placeholder
return result_placeholders
def set_start_time(self, time):
return
def maybe_execute(self, epoch, time):
if epoch > self._epoch + 3:
threshold = 0.5
summary_feeds = {}
for mode in self._modes: # mode can be all, uniword, multiword
source_words, target_words, predicted_targets = self.predict_translations(threshold, mode)
oov_words = self._model.get_oov_words(target_words)
result_top, result_all = self.evaluate(source_words, target_words, predicted_targets)
self._print_eval(result_top, result_all, oov_words, predicted_targets, target_words, mode)
summary_feeds.update(
{self._result_top_placeholders[mode].precision: result_top.precision,
self._result_top_placeholders[mode].recall: result_top.recall,
self._result_top_placeholders[mode].f1: result_top.f1,
self._result_all_placeholders[mode].precision: result_all.precision,
self._result_all_placeholders[mode].recall: result_all.recall,
self._result_all_placeholders[mode].f1: result_all.f1})
summary = self._model.session.run(self._summary_op, summary_feeds)
self._summary_writer.add_summary(summary, epoch)
self._epoch = epoch
if epoch == self._num_epochs:
thresholds = [t/10 for t in xrange(0, 10, 1)] + [0.92, .94, .96, .98]
for mode in self._modes:
results_top, results_all, predicted_translations = self._eval_thresholds(thresholds, mode)
self._predicted_translations_to_file(predicted_translations, thresholds, mode)
self._results_to_file(results_top, results_all, mode)
def _results_to_file(self, results_top, results_all, mode):
df = pd.DataFrame(
{'precision_1': results_top.precision,
'precision_all': results_all.precision,
'recall_1': results_top.recall,
'recall_all': results_all.recall,
'f1_1': results_top.f1,
'f1_all': results_all.f1}
)
results_file = path.join(self._log_dir, 'results.%s.csv' % mode)
df.to_csv(results_file)
def _predicted_translations_to_file(self, predicted_translations, thresholds, mode):
predicted_translations_f = path.join(self._log_dir, 'translations.%s.txt' % mode)
with codecs.open(predicted_translations_f, 'wb', 'utf-8') as f:
for t, predicted_translations_t in zip(thresholds, predicted_translations):
f.write('threshold %1.1f\n' % t)
for ts in predicted_translations_t:
ts = [t[0] for t in ts]
f.write('\t'.join(ts))
f.write('\n')
f.write('\n')
def _eval_thresholds(self, thresholds, mode):
prec_1_vals, prec_all_vals = np.zeros(shape=(len(thresholds))), np.zeros(shape=(len(thresholds)))
rec_1_vals, rec_all_vals = np.zeros(shape=(len(thresholds))), np.zeros(shape=(len(thresholds)))
f1_1_vals, f1_all_vals = np.zeros(shape=(len(thresholds))), np.zeros(shape=(len(thresholds)))
predicted_targets = []
for i, threshold in enumerate(thresholds):
source_words, target_words, predicted_targets_threshold = self.predict_translations(threshold, mode)
result_top, result_all = self.evaluate(source_words, target_words, predicted_targets_threshold)
prec_1_vals[i] = result_top.precision
prec_all_vals[i] = result_all.precision
rec_1_vals[i] = result_top.recall
rec_all_vals[i] = result_all.recall
f1_1_vals[i] = result_top.f1
f1_all_vals[i] = result_all.f1
predicted_targets.append(predicted_targets_threshold)
results_top = Result(f1_1_vals, prec_1_vals, rec_1_vals)
results_all = Result(f1_all_vals, prec_all_vals, rec_all_vals)
return results_top, results_all, predicted_targets
def _print_eval(self, result_top, result_all, oov_words, predicted_translations, target_words, mode):
print()
print('Evaluation of %s on %s' % (mode, self._data_feeder.name))
print(round(len(oov_words) / len(target_words) * 100, 2), '% out of training vocabulary.')
print('predicted @1: ')
for w_pred, w_true in zip(predicted_translations, target_words):
string1 = w_true + ':'
string2 = w_pred[0][0] + ',' + str(w_pred[0][1]) if w_pred else ''
string = string1 + string2
print(string.encode('utf-8'), ' ', end='')
print()
print('recall@1: %3.2f' % result_top.recall)
print('precision@1: %3.2f' % result_top.precision)
print('f1@1: %3.2f' % result_top.f1)
print('recall@all: %3.2f' % result_all.recall)
print('precision@all: %3.2f' % result_all.precision)
print('f1@all: %3.2f' % result_all.f1)
def evaluate(self, source_words, target_words, predicted_targets):
top_1_translation_pairs, translation_pairs = extract_translations(
self._training_lexicon, source_words, predicted_targets)
groundtruth = zip(source_words, target_words)
result_top = eval_translations(groundtruth, top_1_translation_pairs)
result_all = eval_translations(groundtruth, translation_pairs)
return result_top, result_all
def predict_translations(self, threshold, mode):
data_feeder = self._data_feeder
eval_data_epoch = data_feeder.epoch
predict_targets = []
source_words = []
target_words = []
while data_feeder.epoch == eval_data_epoch:
data_feed = data_feeder.get_batch()
source_words_batch = []
target_words_batch = []
for word_s, word_t in data_feed:
is_mwu_pair = ' ' in word_s or ' ' in word_t
if mode == 'mwu':
if is_mwu_pair:
source_words_batch.append(word_s)
target_words_batch.append(word_t)
elif mode == 'uniword':
if not is_mwu_pair:
source_words_batch.append(word_s)
target_words_batch.append(word_t)
else:
source_words_batch.append(word_s)
target_words_batch.append(word_t)
source_words.extend(source_words_batch)
target_words.extend(target_words_batch)
predicted_targets_batch = self._model.predict(
source_words_batch, source2target=True, threshold=threshold)
predict_targets.extend(predicted_targets_batch)
return source_words, target_words, predict_targets
| [
"pandas.DataFrame",
"codecs.open",
"tensorflow.summary.scalar",
"evaluation.eval_translations",
"evaluation.extract_translations",
"evaluation.Result",
"numpy.isnan",
"time.time",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"tensorflow.get_default_graph",
"tensorflow.summary.FileWri... | [((605, 616), 'time.time', 'time.time', ([], {}), '()\n', (614, 616), False, 'import time\n'), ((1374, 1385), 'time.time', 'time.time', ([], {}), '()\n', (1383, 1385), False, 'import time\n'), ((2184, 2195), 'time.time', 'time.time', ([], {}), '()\n', (2193, 2195), False, 'import time\n'), ((3750, 3780), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['log_dir'], {}), '(log_dir)\n', (3771, 3780), True, 'import tensorflow as tf\n'), ((5218, 5278), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': '"""float32"""', 'shape': '[]', 'name': '"""epoch_loss"""'}), "(dtype='float32', shape=[], name='epoch_loss')\n", (5232, 5278), True, 'import tensorflow as tf\n'), ((5302, 5351), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""epoch_loss"""', 'self._epoch_loss'], {}), "('epoch_loss', self._epoch_loss)\n", (5319, 5351), True, 'import tensorflow as tf\n'), ((5427, 5457), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['log_dir'], {}), '(log_dir)\n', (5448, 5457), True, 'import tensorflow as tf\n'), ((6384, 6414), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['log_dir'], {}), '(log_dir)\n', (6405, 6414), True, 'import tensorflow as tf\n'), ((7396, 7410), 'numpy.isnan', 'np.isnan', (['loss'], {}), '(loss)\n', (7404, 7410), True, 'import numpy as np\n'), ((8514, 8528), 'numpy.isnan', 'np.isnan', (['loss'], {}), '(loss)\n', (8522, 8528), True, 'import numpy as np\n'), ((10269, 10299), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['log_dir'], {}), '(log_dir)\n', (10290, 10299), True, 'import tensorflow as tf\n'), ((12700, 12916), 'pandas.DataFrame', 'pd.DataFrame', (["{'precision_1': results_top.precision, 'precision_all': results_all.\n precision, 'recall_1': results_top.recall, 'recall_all': results_all.\n recall, 'f1_1': results_top.f1, 'f1_all': results_all.f1}"], {}), "({'precision_1': results_top.precision, 'precision_all':\n results_all.precision, 'recall_1': results_top.recall, 'recall_all':\n results_all.recall, 'f1_1': results_top.f1, 'f1_all': results_all.f1})\n", (12712, 12916), True, 'import pandas as pd\n'), ((12975, 13024), 'os.path.join', 'path.join', (['self._log_dir', "('results.%s.csv' % mode)"], {}), "(self._log_dir, 'results.%s.csv' % mode)\n", (12984, 13024), False, 'from os import path\n'), ((13172, 13226), 'os.path.join', 'path.join', (['self._log_dir', "('translations.%s.txt' % mode)"], {}), "(self._log_dir, 'translations.%s.txt' % mode)\n", (13181, 13226), False, 'from os import path\n'), ((14529, 14571), 'evaluation.Result', 'Result', (['f1_1_vals', 'prec_1_vals', 'rec_1_vals'], {}), '(f1_1_vals, prec_1_vals, rec_1_vals)\n', (14535, 14571), False, 'from evaluation import eval_translations, Result, extract_translations\n'), ((14590, 14638), 'evaluation.Result', 'Result', (['f1_all_vals', 'prec_all_vals', 'rec_all_vals'], {}), '(f1_all_vals, prec_all_vals, rec_all_vals)\n', (14596, 14638), False, 'from evaluation import eval_translations, Result, extract_translations\n'), ((15684, 15761), 'evaluation.extract_translations', 'extract_translations', (['self._training_lexicon', 'source_words', 'predicted_targets'], {}), '(self._training_lexicon, source_words, predicted_targets)\n', (15704, 15761), False, 'from evaluation import eval_translations, Result, extract_translations\n'), ((15836, 15891), 'evaluation.eval_translations', 'eval_translations', (['groundtruth', 'top_1_translation_pairs'], {}), '(groundtruth, top_1_translation_pairs)\n', (15853, 15891), False, 'from evaluation import eval_translations, Result, extract_translations\n'), ((15909, 15958), 'evaluation.eval_translations', 'eval_translations', (['groundtruth', 'translation_pairs'], {}), '(groundtruth, translation_pairs)\n', (15926, 15958), False, 'from evaluation import eval_translations, Result, extract_translations\n'), ((849, 860), 'time.time', 'time.time', ([], {}), '()\n', (858, 860), False, 'import time\n'), ((1733, 1744), 'time.time', 'time.time', ([], {}), '()\n', (1742, 1744), False, 'import time\n'), ((2446, 2457), 'time.time', 'time.time', ([], {}), '()\n', (2455, 2457), False, 'import time\n'), ((2704, 2731), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""losses"""'], {}), "('losses')\n", (2721, 2731), True, 'import tensorflow as tf\n'), ((2758, 2818), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': '"""float32"""', 'shape': '[]', 'name': '"""epoch_loss"""'}), "(dtype='float32', shape=[], name='epoch_loss')\n", (2772, 2818), True, 'import tensorflow as tf\n'), ((2845, 2906), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': '"""float32"""', 'shape': '[]', 'name': '"""walker_loss"""'}), "(dtype='float32', shape=[], name='walker_loss')\n", (2859, 2906), True, 'import tensorflow as tf\n'), ((2932, 2992), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': '"""float32"""', 'shape': '[]', 'name': '"""visit_loss"""'}), "(dtype='float32', shape=[], name='visit_loss')\n", (2946, 2992), True, 'import tensorflow as tf\n'), ((3018, 3078), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': '"""float32"""', 'shape': '[]', 'name': '"""logit_loss"""'}), "(dtype='float32', shape=[], name='logit_loss')\n", (3032, 3078), True, 'import tensorflow as tf\n'), ((3110, 3159), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""epoch_loss"""', 'self._epoch_loss'], {}), "('epoch_loss', self._epoch_loss)\n", (3127, 3159), True, 'import tensorflow as tf\n'), ((3192, 3243), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""walker_loss"""', 'self._walker_loss'], {}), "('walker_loss', self._walker_loss)\n", (3209, 3243), True, 'import tensorflow as tf\n'), ((3275, 3324), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""visit_loss"""', 'self._visit_loss'], {}), "('visit_loss', self._visit_loss)\n", (3292, 3324), True, 'import tensorflow as tf\n'), ((3356, 3405), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""logit_loss"""', 'self._logit_loss'], {}), "('logit_loss', self._logit_loss)\n", (3373, 3405), True, 'import tensorflow as tf\n'), ((3431, 3551), 'tensorflow.summary.merge', 'tf.summary.merge', (['[self._epoch_summary_op, self._walker_summary_op, self._visit_summary_op,\n self._logit_summary_op]'], {}), '([self._epoch_summary_op, self._walker_summary_op, self.\n _visit_summary_op, self._logit_summary_op])\n', (3447, 3551), True, 'import tensorflow as tf\n'), ((6105, 6165), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': '"""float32"""', 'shape': '[]', 'name': '"""epoch_loss"""'}), "(dtype='float32', shape=[], name='epoch_loss')\n", (6119, 6165), True, 'import tensorflow as tf\n'), ((6219, 6278), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('epoch_loss_%i' % i)", 'self._epoch_loss[i]'], {}), "('epoch_loss_%i' % i, self._epoch_loss[i])\n", (6236, 6278), True, 'import tensorflow as tf\n'), ((8024, 8042), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8040, 8042), False, 'import sys\n'), ((9142, 9160), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9158, 9160), False, 'import sys\n'), ((9732, 9763), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""evaluation"""'], {}), "('evaluation')\n", (9749, 9763), True, 'import tensorflow as tf\n'), ((9934, 9956), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (9954, 9956), True, 'import tensorflow as tf\n'), ((10214, 10241), 'tensorflow.summary.merge', 'tf.summary.merge', (['summaries'], {}), '(summaries)\n', (10230, 10241), True, 'import tensorflow as tf\n'), ((10532, 10575), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', 'shape', "('f1' + suffix)"], {}), "(dtype, shape, 'f1' + suffix)\n", (10546, 10575), True, 'import tensorflow as tf\n'), ((10598, 10648), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', 'shape', "('precision' + suffix)"], {}), "(dtype, shape, 'precision' + suffix)\n", (10612, 10648), True, 'import tensorflow as tf\n'), ((10668, 10715), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', 'shape', "('recall' + suffix)"], {}), "(dtype, shape, 'recall' + suffix)\n", (10682, 10715), True, 'import tensorflow as tf\n'), ((10743, 10784), 'evaluation.Result', 'Result', (['f1_top', 'precision_top', 'recall_top'], {}), '(f1_top, precision_top, recall_top)\n', (10749, 10784), False, 'from evaluation import eval_translations, Result, extract_translations\n'), ((10791, 10831), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('f1' + suffix)", 'f1_top'], {}), "('f1' + suffix, f1_top)\n", (10808, 10831), True, 'import tensorflow as tf\n'), ((10838, 10884), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('precison' + suffix)", 'f1_top'], {}), "('precison' + suffix, f1_top)\n", (10855, 10884), True, 'import tensorflow as tf\n'), ((10891, 10935), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('recall' + suffix)", 'f1_top'], {}), "('recall' + suffix, f1_top)\n", (10908, 10935), True, 'import tensorflow as tf\n'), ((13236, 13288), 'codecs.open', 'codecs.open', (['predicted_translations_f', '"""wb"""', '"""utf-8"""'], {}), "(predicted_translations_f, 'wb', 'utf-8')\n", (13247, 13288), False, 'import codecs\n')] |
import numpy as np
from copy import copy
class NoiseCreator():
'''
Factory that build the action noise responsible for the exploration of the agent.
'''
def __init__(self):
self.builders = {
'OU': lambda size, kwargs : OUActionNoise(size, **kwargs),
'scaling_OU': lambda size, kwargs : Scaling_OUActionNoise(size, **kwargs),
'gaussian': lambda size, kwargs : GaussianNoise(size, **kwargs)
}
def create(self, noise, size, kwargs):
return self.builders[noise](size, kwargs)
class OUActionNoise:
"""
Ornstein-Uhlenbeck process.
Temporally correlated noise that has a zero mean used to add
exploratioin to the deterministic policy.
"""
def __init__(self,
size,
mu,
theta,
sigma):
"""
mu : mean
theta : attraction of the mean
sigma : magnitude of the wiener steps
"""
self.mu = mu
self.theta = theta
self.sigma = sigma
self.state = np.zeros(size)
self.size = size
def sample(self):
'''
sample a new point from the OU process, update the state and return a noise
of the asked size.
'''
dx = self.theta * (self.mu - self.state) + self.sigma * np.random.normal()
x = self.state + dx
self.state = x
return x
def sample_multipe(self, noise_size):
'''
sample a new point from the OU process, update the state and return a noise
of the asked size.
'''
dx = self.theta * (self.mu * np.ones(self.size) - self.state) + self.sigma * np.random.normal(size=self.state.shape)
x = self.state + dx
self.state = x
return np.squeeze(np.tile(x, (noise_size, 1)))
def reset(self):
'''
reset the temporal correlation.
'''
self.state = copy(self.mu)
def update(self):
pass
def __repr__(self):
return f'OrnsteinUhlenbeckActionNoise(mu={self.mu}, sigma={self.sigma})'
class Scaling_OUActionNoise(OUActionNoise):
"""
Ornstein-Uhlenbeck process.
Temporally correlated noise that has a zero mean used to add
exploratioin to the deterministic policy.
"""
def __init__(self,
size,
mu,
theta, theta_max, theta_grow,
sigma, sigma_min, sigma_decay):
super().__init__(size, mu, theta, sigma)
self.theta_init = theta
self.theta_max = theta_max
self.theta_grow = theta_grow
self.sigma_init = sigma
self.sigma_min = sigma_min
self.sigma_decay = sigma_decay
def update(self):
self.sigma = max(self.sigma_min, self.sigma * self.sigma_decay)
self.theta = min(self.theta_max, self.theta * self.theta_grow)
def __repr__(self):
representation =(
f'OrnsteinUhlenbeckActionNoise(mu={self.mu}, sigma={round(self.sigma, 4)}/{round(self.sigma_min, 4)}, '+
f'theta={round(self.theta, 4)}/{round(self.theta_max, 4)})')
return representation
class GaussianNoise:
'''
Simple gaussian noise.
'''
def __init__(self,
size,
mu,
sigma):
self.size = size
self.mu = mu
def sample(self):
return np.random.normal(loc=self.mu, scale=self.sigma, size=self.size)
def update(self):
return | [
"numpy.zeros",
"numpy.ones",
"copy.copy",
"numpy.tile",
"numpy.random.normal"
] | [((1086, 1100), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (1094, 1100), True, 'import numpy as np\n'), ((1953, 1966), 'copy.copy', 'copy', (['self.mu'], {}), '(self.mu)\n', (1957, 1966), False, 'from copy import copy\n'), ((3429, 3492), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'self.mu', 'scale': 'self.sigma', 'size': 'self.size'}), '(loc=self.mu, scale=self.sigma, size=self.size)\n', (3445, 3492), True, 'import numpy as np\n'), ((1817, 1844), 'numpy.tile', 'np.tile', (['x', '(noise_size, 1)'], {}), '(x, (noise_size, 1))\n', (1824, 1844), True, 'import numpy as np\n'), ((1348, 1366), 'numpy.random.normal', 'np.random.normal', ([], {}), '()\n', (1364, 1366), True, 'import numpy as np\n'), ((1699, 1738), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'self.state.shape'}), '(size=self.state.shape)\n', (1715, 1738), True, 'import numpy as np\n'), ((1651, 1669), 'numpy.ones', 'np.ones', (['self.size'], {}), '(self.size)\n', (1658, 1669), True, 'import numpy as np\n')] |
# The entire file is mostly a replication of the open-source implementation
# Source:
# https://github.com/susanli2016/PyCon-Canada-2019-NLP-Tutorial/blob/master/BBC%20News_LSTM.ipynb
import csv
import json
import re
import sys
import os
import tensorflow as tf
import numpy as np
import nltk
nltk.download("stopwords")
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from pprint import pprint
from nltk.corpus import stopwords
# This is so that the following imports work
sys.path.append(os.path.realpath("."))
import src.utils as utils
from src.config import *
from src.utils import *
STOPWORDS = set(stopwords.words("english"))
# Our default configuration
VOCAB_SIZE = 5000
EMBEDDING_DIM = 64
MAX_LENGTH = 200
TRUNC_TYPE = "post"
PADDING_TYPE = "post"
OOV_TOK = "<OOV>"
TRAINING_PORTION = 0.8
NUM_EPOCHS = 10
def get_articles_and_labels(reviews, labels=[]):
"""
Given a list of reviews (in unified json format) returns a tuple of review, category
We call them articles and labels
Ideally if you want to implement the LSTM classifier for a different data-set, you should only have to change this function
"""
articles = []
# In this process we clean up the original labels
# This is required because these labels will be tokenized afterwards
# We need to maintain a mapping of what the original labels were
original_label_to_clean_label = {}
# We go through the list of reviews
for review in reviews:
article = review[MESSAGE]
label = review[DERIVED_INSIGHTS][CATEGORY]
# Now we remove stopwords
for word in STOPWORDS:
token = " " + word + " "
article = article.replace(token, " ")
article = article.replace(" ", " ")
# Remove leading and ending spaces
article.strip()
# We remove the empty/usless articles
if article == "" or article == NA_STRING:
continue
# Cleaning up the labels
cleaned_label = re.sub(r"\W+", "", label)
original_label_to_clean_label[cleaned_label] = label
articles.append(article)
labels.append(cleaned_label)
return (articles, labels, original_label_to_clean_label)
def split_data(articles, labels):
train_size = int(len(articles) * TRAINING_PORTION)
train_articles = articles[0:train_size]
train_labels = labels[0:train_size]
validation_articles = articles[train_size:]
validation_labels = labels[train_size:]
return (train_articles, train_labels, validation_articles, validation_labels)
def train(articles, labels):
print("[LOG] LSTM pre-processing started")
# Splitting the data into training and validation set
train_articles, train_labels, validation_articles, validation_labels = split_data(
articles, labels
)
# Now we tokenize the articles and labels
# We find the token for each word and store it.
article_tokenizer = Tokenizer(num_words=VOCAB_SIZE, oov_token=OOV_TOK)
article_tokenizer.fit_on_texts(train_articles)
# After we have the token for the top (5000) VOCAB_SIZE words
# We convert the text to a list of tokens
train_sequences = article_tokenizer.texts_to_sequences(train_articles)
validation_sequences = article_tokenizer.texts_to_sequences(validation_articles)
# We pad/truncate the sequences to appropriate length
train_padded = pad_sequences(
train_sequences, maxlen=MAX_LENGTH, padding=PADDING_TYPE, truncating=TRUNC_TYPE
)
validation_padded = pad_sequences(
validation_sequences,
maxlen=MAX_LENGTH,
padding=PADDING_TYPE,
truncating=TRUNC_TYPE,
)
# We do the same things on labels also
label_tokenizer = Tokenizer()
label_tokenizer.fit_on_texts(labels)
training_label_seq = np.array(label_tokenizer.texts_to_sequences(train_labels))
validation_label_seq = np.array(
label_tokenizer.texts_to_sequences(validation_labels)
)
print("[LOG] LSTM pre-processing completed")
print("[LOG] LSTM building model started")
model = tf.keras.Sequential(
[
# Add an Embedding layer expecting input vocab of size 5000, and output
# embedding dimension of size 64 we set at the top
tf.keras.layers.Embedding(VOCAB_SIZE, EMBEDDING_DIM),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(EMBEDDING_DIM)),
# use ReLU in place of tanh function since they are very good
# alternatives of each other.
tf.keras.layers.Dense(EMBEDDING_DIM, activation="relu"),
# Add a Dense layer with 6 units and softmax activation.
# When we have multiple outputs, softmax convert outputs layers into a
# probability distribution.
tf.keras.layers.Dense(len(set(labels)) + 1, activation="softmax"),
]
)
model.summary()
model.compile(
loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
print("[LOG] LSTM building model completed")
print("[LOG] LSTM training model started")
# Getting down to business
model.fit(
train_padded,
training_label_seq,
epochs=NUM_EPOCHS,
validation_data=(validation_padded, validation_label_seq),
verbose=2,
)
print("[LOG] LSTM training model completed")
return model, article_tokenizer, label_tokenizer
def train_lstm_model():
app_configs = open_json(APP_CONFIG_FILE.format(file_name=APP_CONFIG_FILE_NAME))
# We process all algorithms on parsed data for each app
for app in app_configs:
app_config = open_json(APP_CONFIG_FILE.format(file_name=app))
# If the app json schema is not valid, we don't execute any thing.
if not utils.validate_app_config(app_config):
return
app_config = decrypt_config(app_config)
if not (
CATEGORIZATION_ALGORITHM in app_config
and app_config[CATEGORIZATION_ALGORITHM] == LSTM_CLASSIFIER
):
continue
# Loading the REVIEW's
reviews = utils.open_json(
PROCESSED_INTEGRATED_REVIEW_FILE.format(app_name=app_config[APP])
)
# reviews = utils.filter_reviews(reviews, app_config)
articles, labels, original_label_to_clean_label = get_articles_and_labels(
reviews
)
trained_model, article_tokenizer, label_tokenizer = train(articles, labels)
if not os.path.exists(TRAINED_MODELS):
os.makedirs(TRAINED_MODELS)
trained_model.save(LSTM_TRAINED_MODEL_FILE.format(app_name=app_config[APP]))
# Saving the tokenizers
dump_json(
article_tokenizer.to_json(),
LSTM_ARTICLE_TOKENIZER_FILE.format(app_name=app_config[APP]),
)
# Saving the tokenizers
dump_json(
label_tokenizer.to_json(),
LSTM_LABEL_TOKENIZER_FILE.format(app_name=app_config[APP]),
)
def predict_labels(articles, app_config, model, article_tokenizer, label_tokenizer):
""" Given an article we predict the label """
# Convert the give article to tokens
tokenized_articles = article_tokenizer.texts_to_sequences(articles)
# Add padding
tokenized_articles = pad_sequences(
tokenized_articles,
maxlen=MAX_LENGTH,
padding=PADDING_TYPE,
truncating=TRUNC_TYPE,
)
# Predict the label
predictions = model.predict(tokenized_articles)
# Now to find out the label, we have to do a reverse index search on the
# label_tokens
label_names = dict(
(token, label_name)
for (label_name, token) in label_tokenizer.word_index.items()
)
labels = [label_names[np.argmax(prediction)] for prediction in predictions]
return labels
if __name__ == "__main__":
train_lstm_model()
| [
"tensorflow.keras.preprocessing.text.Tokenizer",
"os.makedirs",
"tensorflow.keras.layers.Dense",
"numpy.argmax",
"os.path.realpath",
"src.utils.validate_app_config",
"os.path.exists",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"tensorflow.keras.layers.LSTM",
"nltk.corpus.stopwords.wo... | [((294, 320), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (307, 320), False, 'import nltk\n'), ((569, 590), 'os.path.realpath', 'os.path.realpath', (['"""."""'], {}), "('.')\n", (585, 590), False, 'import os\n'), ((685, 711), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (700, 711), False, 'from nltk.corpus import stopwords\n'), ((3017, 3067), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'VOCAB_SIZE', 'oov_token': 'OOV_TOK'}), '(num_words=VOCAB_SIZE, oov_token=OOV_TOK)\n', (3026, 3067), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((3470, 3568), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['train_sequences'], {'maxlen': 'MAX_LENGTH', 'padding': 'PADDING_TYPE', 'truncating': 'TRUNC_TYPE'}), '(train_sequences, maxlen=MAX_LENGTH, padding=PADDING_TYPE,\n truncating=TRUNC_TYPE)\n', (3483, 3568), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((3603, 3706), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['validation_sequences'], {'maxlen': 'MAX_LENGTH', 'padding': 'PADDING_TYPE', 'truncating': 'TRUNC_TYPE'}), '(validation_sequences, maxlen=MAX_LENGTH, padding=PADDING_TYPE,\n truncating=TRUNC_TYPE)\n', (3616, 3706), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((3808, 3819), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (3817, 3819), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((7378, 7479), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['tokenized_articles'], {'maxlen': 'MAX_LENGTH', 'padding': 'PADDING_TYPE', 'truncating': 'TRUNC_TYPE'}), '(tokenized_articles, maxlen=MAX_LENGTH, padding=PADDING_TYPE,\n truncating=TRUNC_TYPE)\n', (7391, 7479), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((2068, 2093), 're.sub', 're.sub', (['"""\\\\W+"""', '""""""', 'label'], {}), "('\\\\W+', '', label)\n", (2074, 2093), False, 'import re\n'), ((4350, 4402), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['VOCAB_SIZE', 'EMBEDDING_DIM'], {}), '(VOCAB_SIZE, EMBEDDING_DIM)\n', (4375, 4402), True, 'import tensorflow as tf\n'), ((4612, 4667), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['EMBEDDING_DIM'], {'activation': '"""relu"""'}), "(EMBEDDING_DIM, activation='relu')\n", (4633, 4667), True, 'import tensorflow as tf\n'), ((5864, 5901), 'src.utils.validate_app_config', 'utils.validate_app_config', (['app_config'], {}), '(app_config)\n', (5889, 5901), True, 'import src.utils as utils\n'), ((6576, 6606), 'os.path.exists', 'os.path.exists', (['TRAINED_MODELS'], {}), '(TRAINED_MODELS)\n', (6590, 6606), False, 'import os\n'), ((6620, 6647), 'os.makedirs', 'os.makedirs', (['TRAINED_MODELS'], {}), '(TRAINED_MODELS)\n', (6631, 6647), False, 'import os\n'), ((7844, 7865), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (7853, 7865), True, 'import numpy as np\n'), ((4446, 4481), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['EMBEDDING_DIM'], {}), '(EMBEDDING_DIM)\n', (4466, 4481), True, 'import tensorflow as tf\n')] |
import os
import os.path as osp
import shutil
import numpy as np
from mne import set_config
from moabb.datasets.download import (
fs_get_file_hash,
fs_get_file_id,
fs_get_file_list,
get_dataset_path,
)
from moabb.utils import set_download_dir
from pooch import HTTPDownloader, Unzip, retrieve
BEETL_URL = "https://ndownloader.figshare.com/files/"
class BeetlDataset:
def __init__(self, figshare_id, code, subject_list):
self.figshare_id = figshare_id
self.code = code
self.subject_list = subject_list
def data_path(self, subject):
pass
def download(self, path=None, subjects=None):
"""Download datasets for sleep task
Parameters
----------
path: str | None
Path to download the data, store in ~/mne_data if None
subjects: list | None
list of subject, default=None to select all subjects
Returns
--------
path: str
path to the downloaded data
"""
if path:
set_download_dir(path)
set_config("MNE_DATASETS_{}_PATH".format(self.code.upper()), path)
# TODO: Fix FileExistsError: [Errno 17] File exists: '/Users/X/test_compet in
# moabb/utils.py in set_download_dir(path), l. 54
subjects = self.subject_list if subjects is None else subjects
# Competition files
spath = []
for s in subjects:
spath.append(self.data_path(s))
return osp.dirname(spath[-1][0])
def get_data(self, subjects=None):
pass
class BeetlSleepTutorial(BeetlDataset):
def __init__(self):
super().__init__(
figshare_id=14779407,
code="beetlsleeptutorial",
subject_list=range(10),
)
def data_path(self, subject):
sign = self.code
key_dest = "MNE-{:s}-data".format(sign.lower())
path = osp.join(get_dataset_path(sign, None), key_dest)
filelist = fs_get_file_list(self.figshare_id)
reg = fs_get_file_hash(filelist)
fsn = fs_get_file_id(filelist)
spath = []
for f in fsn.keys():
if not osp.exists(osp.join(path, "s{}r1X.npy".format(subject))):
retrieve(
BEETL_URL + fsn[f],
reg[fsn[f]],
fsn[f],
path,
processor=Unzip(),
downloader=HTTPDownloader(progressbar=True),
)
zpath = osp.join(path, fsn[f] + ".unzip")
for i in self.subject_list:
fx, fy = "s{}r1X.npy".format(i), "s{}r1y.npy".format(i)
shutil.move(osp.join(zpath, fx), osp.join(path, fx))
shutil.move(osp.join(zpath, fy), osp.join(path, fy))
shutil.move(
osp.join(zpath, "headerInfo.npy"), osp.join(path, "headerInfo.npy")
)
os.rmdir(osp.join(path, fsn[f] + ".unzip"))
spath.append(osp.join(path, "s{}r1X.npy".format(subject)))
spath.append(osp.join(path, "s{}r1y.npy".format(subject)))
spath.append(osp.join(path, "headerInfo.npy"))
return spath
def get_data(self, path=None, subjects=None):
"""Get data as list of numpy array, labels and metadata
Parameters
----------
path: str | None
Path to download the data, store in ~/mne_data if None
subjects: list | None
list of subject, default=None to select all subjects
Returns
--------
X: ndarray, shape (n_trials, n_electrodes, n_samples)
ndarray for EEG signal
y: ndarray, shape (n_trials)
label for the EEG signal
metadata: mne.Info
metadata of acquisition as mne.Info
"""
subjects = self.subject_list if subjects is None else subjects
spath = []
for s in subjects:
files = self.data_path(s)
for f in files:
if osp.basename(f) != "headerInfo.npy":
spath.append(f)
else:
hd = f
spath.append(hd)
X, y, meta = [], [], []
for p in spath:
d = np.load(p, allow_pickle=True)
if osp.basename(p)[4] == "X":
X.append(d)
elif osp.basename(p)[4] == "y":
y.append(d)
elif osp.basename(p) == "headerInfo.npy":
meta = d
X = np.concatenate(X)
y = np.concatenate(y)
return X, y, meta
class BeetlSleepSource(BeetlDataset):
def __init__(self):
super().__init__(
figshare_id=14839659,
code="beetlsleepsource",
subject_list=range(39),
)
def data_path(self, subject):
sign = self.code
key_dest = "MNE-{:s}-data".format(sign.lower())
path = osp.join(get_dataset_path(sign, None), key_dest)
filelist = fs_get_file_list(self.figshare_id)
reg = fs_get_file_hash(filelist)
fsn = fs_get_file_id(filelist)
spath = []
for f in fsn.keys():
if not osp.exists(osp.join(path, "training_s{}r1X.npy".format(subject))):
retrieve(
BEETL_URL + fsn[f],
reg[fsn[f]],
fsn[f],
path,
processor=Unzip(),
downloader=HTTPDownloader(progressbar=True),
)
zpath = osp.join(path, fsn[f] + ".unzip", "SleepSource")
for i in self.subject_list:
for s in [1, 2]:
fx, fy = (
"training_s{}r{}X.npy".format(i, s),
"training_s{}r{}y.npy".format(i, s),
)
shutil.move(osp.join(zpath, fx), osp.join(path, fx))
shutil.move(osp.join(zpath, fy), osp.join(path, fy))
shutil.move(
osp.join(zpath, "headerInfo.npy"), osp.join(path, "headerInfo.npy")
)
os.rmdir(osp.join(path, fsn[f] + ".unzip", "SleepSource"))
os.rmdir(osp.join(path, fsn[f] + ".unzip"))
for s in [1, 2]:
spath.append(osp.join(path, "training_s{}r{}X.npy".format(subject, s)))
spath.append(osp.join(path, "training_s{}r{}y.npy".format(subject, s)))
spath.append(osp.join(path, "headerInfo.npy"))
return spath
def get_data(self, path=None, subjects=None):
"""Get data as list of numpy array, labels and metadata
Parameters
----------
path: str | None
Path to download the data, store in ~/mne_data if None
subjects: list | None
list of subject, default=None to select all subjects
Returns
--------
X: ndarray, shape (n_trials, n_electrodes, n_samples)
ndarray for EEG signal
y: ndarray, shape (n_trials)
label for the EEG signal
metadata: mne.Info
metadata of acquisition as mne.Info
"""
subjects = self.subject_list if subjects is None else subjects
spath = []
for s in subjects:
files = self.data_path(s)
for f in files:
if osp.basename(f) != "headerInfo.npy":
spath.append(f)
else:
hd = f
spath.append(hd)
X, y, meta = [], [], []
for p in spath:
d = np.load(p, allow_pickle=True)
if osp.basename(p).split(".")[0][-1] == "X":
X.append(d)
elif osp.basename(p).split(".")[0][-1] == "y":
y.append(d)
elif osp.basename(p) == "headerInfo.npy":
meta = d
X = np.concatenate(X)
y = np.concatenate(y)
return X, y, meta
class BeetlSleepLeaderboard(BeetlDataset):
def __init__(self):
super().__init__(
figshare_id=14839653,
code="beetlsleepleaderboard",
subject_list=range(18),
)
def data_path(self, subject):
sign = self.code
key_dest = "MNE-{:s}-data".format(sign.lower())
path = osp.join(get_dataset_path(sign, None), key_dest)
filelist = fs_get_file_list(self.figshare_id)
reg = fs_get_file_hash(filelist)
fsn = fs_get_file_id(filelist)
spath = []
for f in fsn.keys():
if not osp.exists(osp.join(path, "sleep_target", "leaderboard_s0r1X.npy")):
retrieve(
BEETL_URL + fsn[f],
reg[fsn[f]],
fsn[f],
path,
processor=Unzip(),
downloader=HTTPDownloader(progressbar=True),
)
zpath = osp.join(path, fsn[f] + ".unzip", "LeaderboardSleep")
os.mkdir(osp.join(path, "sleep_target"))
os.mkdir(osp.join(path, "testing"))
zptr = osp.join(zpath, "sleep_target")
ptr = osp.join(path, "sleep_target")
zpte = osp.join(zpath, "testing")
pte = osp.join(path, "testing")
for i in self.subject_list:
for s in [1, 2]:
if i < 6:
fx = "leaderboard_s{}r{}X.npy".format(i, s)
fy = "leaderboard_s{}r{}y.npy".format(i, s)
shutil.move(osp.join(zptr, fx), osp.join(ptr, fx))
shutil.move(osp.join(zptr, fy), osp.join(ptr, fy))
else:
fx = "leaderboard_s{}r{}X.npy".format(i, s)
shutil.move(osp.join(zpte, fx), osp.join(pte, fx))
hi = "headerInfo.npy"
shutil.move(osp.join(zptr, hi), osp.join(ptr, hi))
os.rmdir(zptr)
os.rmdir(zpte)
os.rmdir(zpath)
os.rmdir(osp.join(path, fsn[f] + ".unzip"))
for s in [1, 2]:
if subject < 6:
fd = "sleep_target"
fy = "leaderboard_s{}r{}y.npy".format(subject, s)
spath.append(osp.join(path, fd, fy))
else:
fd = "testing"
fx = "leaderboard_s{}r{}X.npy".format(subject, s)
spath.append(osp.join(path, fd, fx))
spath.append(osp.join(path, "sleep_target", "headerInfo.npy"))
return spath
def get_data(self, path=None, subjects=None):
"""Get data as list of numpy array, labels and metadata
Parameters
----------
path: str | None
Path to download the data, store in ~/mne_data if None
subjects: list | None
list of subject, default=None to select all subjects
Returns
--------
X_target: ndarray, shape (n_trials, n_electrodes, n_samples)
ndarray for labeled EEG signal
y_target: ndarray, shape (n_trials)
label for the EEG signal
X_testing: ndarray, shape (n_trials, n_electrodes, n_samples)
ndarray for unlabeled EEG signal
metadata: mne.Info
metadata of acquisition as mne.Info
"""
subjects = self.subject_list if subjects is None else subjects
spath = []
for s in subjects:
files = self.data_path(s)
for f in files:
if osp.basename(f) != "headerInfo.npy":
spath.append(f)
else:
hd = f
spath.append(hd)
X_target, y_target, X_testing, meta = [], [], [], []
for p in spath:
d = np.load(p, allow_pickle=True)
if osp.basename(p).split(".")[0][-1] == "X":
if int(osp.basename(p).split("s")[1].split("r")[0]) < 6:
X_target.append(d)
else:
X_testing.append(d)
elif osp.basename(p).split(".")[0][-1] == "y":
y_target.append(d)
elif osp.basename(p) == "headerInfo.npy":
meta = d
X_target = np.concatenate(X_target) if len(X_target) > 0 else np.array(X_target)
X_testing = (
np.concatenate(X_testing) if len(X_testing) > 0 else np.array(X_testing)
)
y_target = np.concatenate(y_target) if len(y_target) > 0 else np.array(y_target)
return X_target, y_target, X_testing, meta
class BeetlMILeaderboard(BeetlDataset):
def __init__(self):
super().__init__(
figshare_id=14839650,
code="beetlMIleaderboard",
subject_list=range(1, 6),
)
def data_path(self, subject):
sign = self.code
key_dest = "MNE-{:s}-data".format(sign.lower())
path = osp.join(get_dataset_path(sign, None), key_dest)
filelist = fs_get_file_list(self.figshare_id)
reg = fs_get_file_hash(filelist)
fsn = fs_get_file_id(filelist)
for f in fsn.keys():
if not osp.exists(osp.join(path, "S1", "training", "race1_padsData.npy")):
retrieve(
BEETL_URL + fsn[f],
reg[fsn[f]],
fsn[f],
path,
processor=Unzip(),
downloader=HTTPDownloader(progressbar=True),
)
zpath = osp.join(path, fsn[f] + ".unzip", "leaderboardMI")
for s in range(1, 6):
os.mkdir(osp.join(path, "S{}".format(s)))
os.mkdir(osp.join(path, "S{}".format(s), "training"))
os.mkdir(osp.join(path, "S{}".format(s), "testing"))
for s in self.subject_list:
zptr = osp.join(zpath, "S{}".format(s), "training")
zpte = osp.join(zpath, "S{}".format(s), "testing")
ptr = osp.join(path, "S{}".format(s), "training")
pte = osp.join(path, "S{}".format(s), "testing")
if s < 3:
for i in range(1, 6):
fx = "race{}_padsData.npy".format(i)
fy = "race{}_padsLabel.npy".format(i)
shutil.move(osp.join(zptr, fx), osp.join(ptr, fx))
shutil.move(osp.join(zptr, fy), osp.join(ptr, fy))
for i in range(6, 16):
fx = "race{}_padsData.npy".format(i)
shutil.move(osp.join(zpte, fx), osp.join(pte, fx))
else:
fx = "training_s{}X.npy".format(s)
fy = "training_s{}y.npy".format(s)
tfx = "testing_s{}X.npy".format(s)
shutil.move(osp.join(zptr, fx), osp.join(ptr, fx))
shutil.move(osp.join(zptr, fy), osp.join(ptr, fy))
shutil.move(osp.join(zpte, tfx), osp.join(pte, tfx))
os.rmdir(zptr)
os.rmdir(zpte)
zpths = osp.join(zpath, "S{}".format(s))
os.rmdir(zpths)
os.rmdir(osp.join(path, fsn[f] + ".unzip", "leaderboardMI"))
os.rmdir(osp.join(path, fsn[f] + ".unzip"))
spath = []
ptr = osp.join(path, "S{}".format(subject), "training")
pte = osp.join(path, "S{}".format(subject), "testing")
if subject < 3:
for i in range(1, 6):
fx = "race{}_padsData.npy".format(i)
fy = "race{}_padsLabel.npy".format(i)
spath.append(osp.join(ptr, fx))
spath.append(osp.join(ptr, fy))
for i in range(6, 16):
fx = "race{}_padsData.npy".format(i)
spath.append(osp.join(pte, fx))
else:
fx = "training_s{}X.npy".format(subject)
fy = "training_s{}y.npy".format(subject)
tfx = "testing_s{}X.npy".format(subject)
spath.append(osp.join(ptr, fx))
spath.append(osp.join(ptr, fy))
spath.append(osp.join(pte, tfx))
return spath
def get_data(self, path=None, dataset="A"):
"""Get data as list of numpy array, labels and metadata
Parameters
----------
path: str | None
Path to download the data, store in ~/mne_data if None
dataset: str
'A' or 'B' for leaderboard datasets
Returns
--------
X_target: ndarray, shape (n_trials, n_electrodes, n_samples)
ndarray for labeled EEG signal
y_target: ndarray, shape (n_trials)
label for the EEG signal
X_testing: ndarray, shape (n_trials, n_electrodes, n_samples)
ndarray for unlabeled EEG signal
"""
if dataset == "A":
subjects = range(1, 3)
elif dataset == "B":
subjects = range(3, 6)
else:
raise ValueError("leaderboard dataset should be A or B")
spath = []
for s in subjects:
files = self.data_path(s)
for f in files:
spath.append(f)
X_target, y_target, X_testing = [], [], []
for p in spath:
d = np.load(p, allow_pickle=True)
if osp.basename(p)[-5] == "l" or osp.basename(p)[-5] == "y":
y_target.append(d)
elif osp.basename(p).startswith("training"):
X_target.append(d)
elif osp.basename(p).startswith("testing"):
X_testing.append(d)
elif int(osp.basename(p)[4]) < 6 and osp.basename(p)[5] == "_":
X_target.append(d)
else:
X_testing.append(d)
X_target = np.concatenate(X_target)
X_testing = np.concatenate(X_testing)
y_target = np.concatenate(y_target)
return X_target, y_target, X_testing
| [
"numpy.load",
"moabb.datasets.download.get_dataset_path",
"os.path.basename",
"os.path.dirname",
"moabb.datasets.download.fs_get_file_id",
"moabb.datasets.download.fs_get_file_list",
"moabb.datasets.download.fs_get_file_hash",
"moabb.utils.set_download_dir",
"pooch.HTTPDownloader",
"numpy.array",
... | [((1510, 1535), 'os.path.dirname', 'osp.dirname', (['spath[-1][0]'], {}), '(spath[-1][0])\n', (1521, 1535), True, 'import os.path as osp\n'), ((2000, 2034), 'moabb.datasets.download.fs_get_file_list', 'fs_get_file_list', (['self.figshare_id'], {}), '(self.figshare_id)\n', (2016, 2034), False, 'from moabb.datasets.download import fs_get_file_hash, fs_get_file_id, fs_get_file_list, get_dataset_path\n'), ((2049, 2075), 'moabb.datasets.download.fs_get_file_hash', 'fs_get_file_hash', (['filelist'], {}), '(filelist)\n', (2065, 2075), False, 'from moabb.datasets.download import fs_get_file_hash, fs_get_file_id, fs_get_file_list, get_dataset_path\n'), ((2090, 2114), 'moabb.datasets.download.fs_get_file_id', 'fs_get_file_id', (['filelist'], {}), '(filelist)\n', (2104, 2114), False, 'from moabb.datasets.download import fs_get_file_hash, fs_get_file_id, fs_get_file_list, get_dataset_path\n'), ((4561, 4578), 'numpy.concatenate', 'np.concatenate', (['X'], {}), '(X)\n', (4575, 4578), True, 'import numpy as np\n'), ((4591, 4608), 'numpy.concatenate', 'np.concatenate', (['y'], {}), '(y)\n', (4605, 4608), True, 'import numpy as np\n'), ((5042, 5076), 'moabb.datasets.download.fs_get_file_list', 'fs_get_file_list', (['self.figshare_id'], {}), '(self.figshare_id)\n', (5058, 5076), False, 'from moabb.datasets.download import fs_get_file_hash, fs_get_file_id, fs_get_file_list, get_dataset_path\n'), ((5091, 5117), 'moabb.datasets.download.fs_get_file_hash', 'fs_get_file_hash', (['filelist'], {}), '(filelist)\n', (5107, 5117), False, 'from moabb.datasets.download import fs_get_file_hash, fs_get_file_id, fs_get_file_list, get_dataset_path\n'), ((5132, 5156), 'moabb.datasets.download.fs_get_file_id', 'fs_get_file_id', (['filelist'], {}), '(filelist)\n', (5146, 5156), False, 'from moabb.datasets.download import fs_get_file_hash, fs_get_file_id, fs_get_file_list, get_dataset_path\n'), ((7951, 7968), 'numpy.concatenate', 'np.concatenate', (['X'], {}), '(X)\n', (7965, 7968), True, 'import numpy as np\n'), ((7981, 7998), 'numpy.concatenate', 'np.concatenate', (['y'], {}), '(y)\n', (7995, 7998), True, 'import numpy as np\n'), ((8442, 8476), 'moabb.datasets.download.fs_get_file_list', 'fs_get_file_list', (['self.figshare_id'], {}), '(self.figshare_id)\n', (8458, 8476), False, 'from moabb.datasets.download import fs_get_file_hash, fs_get_file_id, fs_get_file_list, get_dataset_path\n'), ((8491, 8517), 'moabb.datasets.download.fs_get_file_hash', 'fs_get_file_hash', (['filelist'], {}), '(filelist)\n', (8507, 8517), False, 'from moabb.datasets.download import fs_get_file_hash, fs_get_file_id, fs_get_file_list, get_dataset_path\n'), ((8532, 8556), 'moabb.datasets.download.fs_get_file_id', 'fs_get_file_id', (['filelist'], {}), '(filelist)\n', (8546, 8556), False, 'from moabb.datasets.download import fs_get_file_hash, fs_get_file_id, fs_get_file_list, get_dataset_path\n'), ((13091, 13125), 'moabb.datasets.download.fs_get_file_list', 'fs_get_file_list', (['self.figshare_id'], {}), '(self.figshare_id)\n', (13107, 13125), False, 'from moabb.datasets.download import fs_get_file_hash, fs_get_file_id, fs_get_file_list, get_dataset_path\n'), ((13140, 13166), 'moabb.datasets.download.fs_get_file_hash', 'fs_get_file_hash', (['filelist'], {}), '(filelist)\n', (13156, 13166), False, 'from moabb.datasets.download import fs_get_file_hash, fs_get_file_id, fs_get_file_list, get_dataset_path\n'), ((13181, 13205), 'moabb.datasets.download.fs_get_file_id', 'fs_get_file_id', (['filelist'], {}), '(filelist)\n', (13195, 13205), False, 'from moabb.datasets.download import fs_get_file_hash, fs_get_file_id, fs_get_file_list, get_dataset_path\n'), ((18022, 18046), 'numpy.concatenate', 'np.concatenate', (['X_target'], {}), '(X_target)\n', (18036, 18046), True, 'import numpy as np\n'), ((18067, 18092), 'numpy.concatenate', 'np.concatenate', (['X_testing'], {}), '(X_testing)\n', (18081, 18092), True, 'import numpy as np\n'), ((18112, 18136), 'numpy.concatenate', 'np.concatenate', (['y_target'], {}), '(y_target)\n', (18126, 18136), True, 'import numpy as np\n'), ((1051, 1073), 'moabb.utils.set_download_dir', 'set_download_dir', (['path'], {}), '(path)\n', (1067, 1073), False, 'from moabb.utils import set_download_dir\n'), ((1940, 1968), 'moabb.datasets.download.get_dataset_path', 'get_dataset_path', (['sign', 'None'], {}), '(sign, None)\n', (1956, 1968), False, 'from moabb.datasets.download import fs_get_file_hash, fs_get_file_id, fs_get_file_list, get_dataset_path\n'), ((3189, 3221), 'os.path.join', 'osp.join', (['path', '"""headerInfo.npy"""'], {}), "(path, 'headerInfo.npy')\n", (3197, 3221), True, 'import os.path as osp\n'), ((4298, 4327), 'numpy.load', 'np.load', (['p'], {'allow_pickle': '(True)'}), '(p, allow_pickle=True)\n', (4305, 4327), True, 'import numpy as np\n'), ((4982, 5010), 'moabb.datasets.download.get_dataset_path', 'get_dataset_path', (['sign', 'None'], {}), '(sign, None)\n', (4998, 5010), False, 'from moabb.datasets.download import fs_get_file_hash, fs_get_file_id, fs_get_file_list, get_dataset_path\n'), ((6549, 6581), 'os.path.join', 'osp.join', (['path', '"""headerInfo.npy"""'], {}), "(path, 'headerInfo.npy')\n", (6557, 6581), True, 'import os.path as osp\n'), ((7658, 7687), 'numpy.load', 'np.load', (['p'], {'allow_pickle': '(True)'}), '(p, allow_pickle=True)\n', (7665, 7687), True, 'import numpy as np\n'), ((8382, 8410), 'moabb.datasets.download.get_dataset_path', 'get_dataset_path', (['sign', 'None'], {}), '(sign, None)\n', (8398, 8410), False, 'from moabb.datasets.download import fs_get_file_hash, fs_get_file_id, fs_get_file_list, get_dataset_path\n'), ((10607, 10655), 'os.path.join', 'osp.join', (['path', '"""sleep_target"""', '"""headerInfo.npy"""'], {}), "(path, 'sleep_target', 'headerInfo.npy')\n", (10615, 10655), True, 'import os.path as osp\n'), ((11898, 11927), 'numpy.load', 'np.load', (['p'], {'allow_pickle': '(True)'}), '(p, allow_pickle=True)\n', (11905, 11927), True, 'import numpy as np\n'), ((12351, 12375), 'numpy.concatenate', 'np.concatenate', (['X_target'], {}), '(X_target)\n', (12365, 12375), True, 'import numpy as np\n'), ((12402, 12420), 'numpy.array', 'np.array', (['X_target'], {}), '(X_target)\n', (12410, 12420), True, 'import numpy as np\n'), ((12455, 12480), 'numpy.concatenate', 'np.concatenate', (['X_testing'], {}), '(X_testing)\n', (12469, 12480), True, 'import numpy as np\n'), ((12508, 12527), 'numpy.array', 'np.array', (['X_testing'], {}), '(X_testing)\n', (12516, 12527), True, 'import numpy as np\n'), ((12557, 12581), 'numpy.concatenate', 'np.concatenate', (['y_target'], {}), '(y_target)\n', (12571, 12581), True, 'import numpy as np\n'), ((12608, 12626), 'numpy.array', 'np.array', (['y_target'], {}), '(y_target)\n', (12616, 12626), True, 'import numpy as np\n'), ((13031, 13059), 'moabb.datasets.download.get_dataset_path', 'get_dataset_path', (['sign', 'None'], {}), '(sign, None)\n', (13047, 13059), False, 'from moabb.datasets.download import fs_get_file_hash, fs_get_file_id, fs_get_file_list, get_dataset_path\n'), ((17516, 17545), 'numpy.load', 'np.load', (['p'], {'allow_pickle': '(True)'}), '(p, allow_pickle=True)\n', (17523, 17545), True, 'import numpy as np\n'), ((2539, 2572), 'os.path.join', 'osp.join', (['path', "(fsn[f] + '.unzip')"], {}), "(path, fsn[f] + '.unzip')\n", (2547, 2572), True, 'import os.path as osp\n'), ((5590, 5638), 'os.path.join', 'osp.join', (['path', "(fsn[f] + '.unzip')", '"""SleepSource"""'], {}), "(path, fsn[f] + '.unzip', 'SleepSource')\n", (5598, 5638), True, 'import os.path as osp\n'), ((8992, 9045), 'os.path.join', 'osp.join', (['path', "(fsn[f] + '.unzip')", '"""LeaderboardSleep"""'], {}), "(path, fsn[f] + '.unzip', 'LeaderboardSleep')\n", (9000, 9045), True, 'import os.path as osp\n'), ((9178, 9209), 'os.path.join', 'osp.join', (['zpath', '"""sleep_target"""'], {}), "(zpath, 'sleep_target')\n", (9186, 9209), True, 'import os.path as osp\n'), ((9232, 9262), 'os.path.join', 'osp.join', (['path', '"""sleep_target"""'], {}), "(path, 'sleep_target')\n", (9240, 9262), True, 'import os.path as osp\n'), ((9286, 9312), 'os.path.join', 'osp.join', (['zpath', '"""testing"""'], {}), "(zpath, 'testing')\n", (9294, 9312), True, 'import os.path as osp\n'), ((9335, 9360), 'os.path.join', 'osp.join', (['path', '"""testing"""'], {}), "(path, 'testing')\n", (9343, 9360), True, 'import os.path as osp\n'), ((10080, 10094), 'os.rmdir', 'os.rmdir', (['zptr'], {}), '(zptr)\n', (10088, 10094), False, 'import os\n'), ((10111, 10125), 'os.rmdir', 'os.rmdir', (['zpte'], {}), '(zpte)\n', (10119, 10125), False, 'import os\n'), ((10142, 10157), 'os.rmdir', 'os.rmdir', (['zpath'], {}), '(zpath)\n', (10150, 10157), False, 'import os\n'), ((10562, 10584), 'os.path.join', 'osp.join', (['path', 'fd', 'fx'], {}), '(path, fd, fx)\n', (10570, 10584), True, 'import os.path as osp\n'), ((13621, 13671), 'os.path.join', 'osp.join', (['path', "(fsn[f] + '.unzip')", '"""leaderboardMI"""'], {}), "(path, fsn[f] + '.unzip', 'leaderboardMI')\n", (13629, 13671), True, 'import os.path as osp\n'), ((16276, 16293), 'os.path.join', 'osp.join', (['ptr', 'fx'], {}), '(ptr, fx)\n', (16284, 16293), True, 'import os.path as osp\n'), ((16320, 16337), 'os.path.join', 'osp.join', (['ptr', 'fy'], {}), '(ptr, fy)\n', (16328, 16337), True, 'import os.path as osp\n'), ((16364, 16382), 'os.path.join', 'osp.join', (['pte', 'tfx'], {}), '(pte, tfx)\n', (16372, 16382), True, 'import os.path as osp\n'), ((2888, 2921), 'os.path.join', 'osp.join', (['zpath', '"""headerInfo.npy"""'], {}), "(zpath, 'headerInfo.npy')\n", (2896, 2921), True, 'import os.path as osp\n'), ((2923, 2955), 'os.path.join', 'osp.join', (['path', '"""headerInfo.npy"""'], {}), "(path, 'headerInfo.npy')\n", (2931, 2955), True, 'import os.path as osp\n'), ((2999, 3032), 'os.path.join', 'osp.join', (['path', "(fsn[f] + '.unzip')"], {}), "(path, fsn[f] + '.unzip')\n", (3007, 3032), True, 'import os.path as osp\n'), ((4079, 4094), 'os.path.basename', 'osp.basename', (['f'], {}), '(f)\n', (4091, 4094), True, 'import os.path as osp\n'), ((4343, 4358), 'os.path.basename', 'osp.basename', (['p'], {}), '(p)\n', (4355, 4358), True, 'import os.path as osp\n'), ((6114, 6147), 'os.path.join', 'osp.join', (['zpath', '"""headerInfo.npy"""'], {}), "(zpath, 'headerInfo.npy')\n", (6122, 6147), True, 'import os.path as osp\n'), ((6149, 6181), 'os.path.join', 'osp.join', (['path', '"""headerInfo.npy"""'], {}), "(path, 'headerInfo.npy')\n", (6157, 6181), True, 'import os.path as osp\n'), ((6225, 6273), 'os.path.join', 'osp.join', (['path', "(fsn[f] + '.unzip')", '"""SleepSource"""'], {}), "(path, fsn[f] + '.unzip', 'SleepSource')\n", (6233, 6273), True, 'import os.path as osp\n'), ((6300, 6333), 'os.path.join', 'osp.join', (['path', "(fsn[f] + '.unzip')"], {}), "(path, fsn[f] + '.unzip')\n", (6308, 6333), True, 'import os.path as osp\n'), ((7439, 7454), 'os.path.basename', 'osp.basename', (['f'], {}), '(f)\n', (7451, 7454), True, 'import os.path as osp\n'), ((8635, 8690), 'os.path.join', 'osp.join', (['path', '"""sleep_target"""', '"""leaderboard_s0r1X.npy"""'], {}), "(path, 'sleep_target', 'leaderboard_s0r1X.npy')\n", (8643, 8690), True, 'import os.path as osp\n'), ((9071, 9101), 'os.path.join', 'osp.join', (['path', '"""sleep_target"""'], {}), "(path, 'sleep_target')\n", (9079, 9101), True, 'import os.path as osp\n'), ((9128, 9153), 'os.path.join', 'osp.join', (['path', '"""testing"""'], {}), "(path, 'testing')\n", (9136, 9153), True, 'import os.path as osp\n'), ((10025, 10043), 'os.path.join', 'osp.join', (['zptr', 'hi'], {}), '(zptr, hi)\n', (10033, 10043), True, 'import os.path as osp\n'), ((10045, 10062), 'os.path.join', 'osp.join', (['ptr', 'hi'], {}), '(ptr, hi)\n', (10053, 10062), True, 'import os.path as osp\n'), ((10183, 10216), 'os.path.join', 'osp.join', (['path', "(fsn[f] + '.unzip')"], {}), "(path, fsn[f] + '.unzip')\n", (10191, 10216), True, 'import os.path as osp\n'), ((10402, 10424), 'os.path.join', 'osp.join', (['path', 'fd', 'fy'], {}), '(path, fd, fy)\n', (10410, 10424), True, 'import os.path as osp\n'), ((11650, 11665), 'os.path.basename', 'osp.basename', (['f'], {}), '(f)\n', (11662, 11665), True, 'import os.path as osp\n'), ((13265, 13319), 'os.path.join', 'osp.join', (['path', '"""S1"""', '"""training"""', '"""race1_padsData.npy"""'], {}), "(path, 'S1', 'training', 'race1_padsData.npy')\n", (13273, 13319), True, 'import os.path as osp\n'), ((15251, 15265), 'os.rmdir', 'os.rmdir', (['zptr'], {}), '(zptr)\n', (15259, 15265), False, 'import os\n'), ((15286, 15300), 'os.rmdir', 'os.rmdir', (['zpte'], {}), '(zpte)\n', (15294, 15300), False, 'import os\n'), ((15382, 15397), 'os.rmdir', 'os.rmdir', (['zpths'], {}), '(zpths)\n', (15390, 15397), False, 'import os\n'), ((15423, 15473), 'os.path.join', 'osp.join', (['path', "(fsn[f] + '.unzip')", '"""leaderboardMI"""'], {}), "(path, fsn[f] + '.unzip', 'leaderboardMI')\n", (15431, 15473), True, 'import os.path as osp\n'), ((15500, 15533), 'os.path.join', 'osp.join', (['path', "(fsn[f] + '.unzip')"], {}), "(path, fsn[f] + '.unzip')\n", (15508, 15533), True, 'import os.path as osp\n'), ((15875, 15892), 'os.path.join', 'osp.join', (['ptr', 'fx'], {}), '(ptr, fx)\n', (15883, 15892), True, 'import os.path as osp\n'), ((15923, 15940), 'os.path.join', 'osp.join', (['ptr', 'fy'], {}), '(ptr, fy)\n', (15931, 15940), True, 'import os.path as osp\n'), ((16059, 16076), 'os.path.join', 'osp.join', (['pte', 'fx'], {}), '(pte, fx)\n', (16067, 16076), True, 'import os.path as osp\n'), ((2423, 2430), 'pooch.Unzip', 'Unzip', ([], {}), '()\n', (2428, 2430), False, 'from pooch import HTTPDownloader, Unzip, retrieve\n'), ((2463, 2495), 'pooch.HTTPDownloader', 'HTTPDownloader', ([], {'progressbar': '(True)'}), '(progressbar=True)\n', (2477, 2495), False, 'from pooch import HTTPDownloader, Unzip, retrieve\n'), ((2725, 2744), 'os.path.join', 'osp.join', (['zpath', 'fx'], {}), '(zpath, fx)\n', (2733, 2744), True, 'import os.path as osp\n'), ((2746, 2764), 'os.path.join', 'osp.join', (['path', 'fx'], {}), '(path, fx)\n', (2754, 2764), True, 'import os.path as osp\n'), ((2798, 2817), 'os.path.join', 'osp.join', (['zpath', 'fy'], {}), '(zpath, fy)\n', (2806, 2817), True, 'import os.path as osp\n'), ((2819, 2837), 'os.path.join', 'osp.join', (['path', 'fy'], {}), '(path, fy)\n', (2827, 2837), True, 'import os.path as osp\n'), ((4415, 4430), 'os.path.basename', 'osp.basename', (['p'], {}), '(p)\n', (4427, 4430), True, 'import os.path as osp\n'), ((4487, 4502), 'os.path.basename', 'osp.basename', (['p'], {}), '(p)\n', (4499, 4502), True, 'import os.path as osp\n'), ((5474, 5481), 'pooch.Unzip', 'Unzip', ([], {}), '()\n', (5479, 5481), False, 'from pooch import HTTPDownloader, Unzip, retrieve\n'), ((5514, 5546), 'pooch.HTTPDownloader', 'HTTPDownloader', ([], {'progressbar': '(True)'}), '(progressbar=True)\n', (5528, 5546), False, 'from pooch import HTTPDownloader, Unzip, retrieve\n'), ((7877, 7892), 'os.path.basename', 'osp.basename', (['p'], {}), '(p)\n', (7889, 7892), True, 'import os.path as osp\n'), ((8876, 8883), 'pooch.Unzip', 'Unzip', ([], {}), '()\n', (8881, 8883), False, 'from pooch import HTTPDownloader, Unzip, retrieve\n'), ((8916, 8948), 'pooch.HTTPDownloader', 'HTTPDownloader', ([], {'progressbar': '(True)'}), '(progressbar=True)\n', (8930, 8948), False, 'from pooch import HTTPDownloader, Unzip, retrieve\n'), ((12270, 12285), 'os.path.basename', 'osp.basename', (['p'], {}), '(p)\n', (12282, 12285), True, 'import os.path as osp\n'), ((13505, 13512), 'pooch.Unzip', 'Unzip', ([], {}), '()\n', (13510, 13512), False, 'from pooch import HTTPDownloader, Unzip, retrieve\n'), ((13545, 13577), 'pooch.HTTPDownloader', 'HTTPDownloader', ([], {'progressbar': '(True)'}), '(progressbar=True)\n', (13559, 13577), False, 'from pooch import HTTPDownloader, Unzip, retrieve\n'), ((17561, 17576), 'os.path.basename', 'osp.basename', (['p'], {}), '(p)\n', (17573, 17576), True, 'import os.path as osp\n'), ((17591, 17606), 'os.path.basename', 'osp.basename', (['p'], {}), '(p)\n', (17603, 17606), True, 'import os.path as osp\n'), ((17671, 17686), 'os.path.basename', 'osp.basename', (['p'], {}), '(p)\n', (17683, 17686), True, 'import os.path as osp\n'), ((5947, 5966), 'os.path.join', 'osp.join', (['zpath', 'fx'], {}), '(zpath, fx)\n', (5955, 5966), True, 'import os.path as osp\n'), ((5968, 5986), 'os.path.join', 'osp.join', (['path', 'fx'], {}), '(path, fx)\n', (5976, 5986), True, 'import os.path as osp\n'), ((6024, 6043), 'os.path.join', 'osp.join', (['zpath', 'fy'], {}), '(zpath, fy)\n', (6032, 6043), True, 'import os.path as osp\n'), ((6045, 6063), 'os.path.join', 'osp.join', (['path', 'fy'], {}), '(path, fy)\n', (6053, 6063), True, 'import os.path as osp\n'), ((15040, 15058), 'os.path.join', 'osp.join', (['zptr', 'fx'], {}), '(zptr, fx)\n', (15048, 15058), True, 'import os.path as osp\n'), ((15060, 15077), 'os.path.join', 'osp.join', (['ptr', 'fx'], {}), '(ptr, fx)\n', (15068, 15077), True, 'import os.path as osp\n'), ((15115, 15133), 'os.path.join', 'osp.join', (['zptr', 'fy'], {}), '(zptr, fy)\n', (15123, 15133), True, 'import os.path as osp\n'), ((15135, 15152), 'os.path.join', 'osp.join', (['ptr', 'fy'], {}), '(ptr, fy)\n', (15143, 15152), True, 'import os.path as osp\n'), ((15190, 15209), 'os.path.join', 'osp.join', (['zpte', 'tfx'], {}), '(zpte, tfx)\n', (15198, 15209), True, 'import os.path as osp\n'), ((15211, 15229), 'os.path.join', 'osp.join', (['pte', 'tfx'], {}), '(pte, tfx)\n', (15219, 15229), True, 'import os.path as osp\n'), ((17763, 17778), 'os.path.basename', 'osp.basename', (['p'], {}), '(p)\n', (17775, 17778), True, 'import os.path as osp\n'), ((7703, 7718), 'os.path.basename', 'osp.basename', (['p'], {}), '(p)\n', (7715, 7718), True, 'import os.path as osp\n'), ((9660, 9678), 'os.path.join', 'osp.join', (['zptr', 'fx'], {}), '(zptr, fx)\n', (9668, 9678), True, 'import os.path as osp\n'), ((9680, 9697), 'os.path.join', 'osp.join', (['ptr', 'fx'], {}), '(ptr, fx)\n', (9688, 9697), True, 'import os.path as osp\n'), ((9739, 9757), 'os.path.join', 'osp.join', (['zptr', 'fy'], {}), '(zptr, fy)\n', (9747, 9757), True, 'import os.path as osp\n'), ((9759, 9776), 'os.path.join', 'osp.join', (['ptr', 'fy'], {}), '(ptr, fy)\n', (9767, 9776), True, 'import os.path as osp\n'), ((9920, 9938), 'os.path.join', 'osp.join', (['zpte', 'fx'], {}), '(zpte, fx)\n', (9928, 9938), True, 'import os.path as osp\n'), ((9940, 9957), 'os.path.join', 'osp.join', (['pte', 'fx'], {}), '(pte, fx)\n', (9948, 9957), True, 'import os.path as osp\n'), ((11943, 11958), 'os.path.basename', 'osp.basename', (['p'], {}), '(p)\n', (11955, 11958), True, 'import os.path as osp\n'), ((14492, 14510), 'os.path.join', 'osp.join', (['zptr', 'fx'], {}), '(zptr, fx)\n', (14500, 14510), True, 'import os.path as osp\n'), ((14512, 14529), 'os.path.join', 'osp.join', (['ptr', 'fx'], {}), '(ptr, fx)\n', (14520, 14529), True, 'import os.path as osp\n'), ((14571, 14589), 'os.path.join', 'osp.join', (['zptr', 'fy'], {}), '(zptr, fy)\n', (14579, 14589), True, 'import os.path as osp\n'), ((14591, 14608), 'os.path.join', 'osp.join', (['ptr', 'fy'], {}), '(ptr, fy)\n', (14599, 14608), True, 'import os.path as osp\n'), ((14762, 14780), 'os.path.join', 'osp.join', (['zpte', 'fx'], {}), '(zpte, fx)\n', (14770, 14780), True, 'import os.path as osp\n'), ((14782, 14799), 'os.path.join', 'osp.join', (['pte', 'fx'], {}), '(pte, fx)\n', (14790, 14799), True, 'import os.path as osp\n'), ((7790, 7805), 'os.path.basename', 'osp.basename', (['p'], {}), '(p)\n', (7802, 7805), True, 'import os.path as osp\n'), ((12176, 12191), 'os.path.basename', 'osp.basename', (['p'], {}), '(p)\n', (12188, 12191), True, 'import os.path as osp\n'), ((17887, 17902), 'os.path.basename', 'osp.basename', (['p'], {}), '(p)\n', (17899, 17902), True, 'import os.path as osp\n'), ((17859, 17874), 'os.path.basename', 'osp.basename', (['p'], {}), '(p)\n', (17871, 17874), True, 'import os.path as osp\n'), ((12008, 12023), 'os.path.basename', 'osp.basename', (['p'], {}), '(p)\n', (12020, 12023), True, 'import os.path as osp\n')] |
from collections import namedtuple
from inspect import getargspec
import numpy as np
import torch
from torch import optim
import torch.nn as nn
from utils import *
from action_utils import *
from ic3net_envs import predator_prey_env
Transition = namedtuple('Transition', ('state', 'action', 'action_out', 'value', 'episode_mask', 'episode_mini_mask', 'next_state',
'reward', 'misc'))
class Evaluator:
def __init__(self, args, policy_net, env):
self.args = args
self.policy_net = policy_net
self.env = env
self.display = args.display
self.last_step = False
def run_episode(self, epoch=1):
all_comms = []
episode = []
reset_args = getargspec(self.env.reset).args
if 'epoch' in reset_args:
state = self.env.reset(epoch)
else:
state = self.env.reset()
should_display = self.display # and self.last_step
if should_display:
self.env.display()
stat = dict()
info = dict()
switch_t = -1
prev_hid = torch.zeros(1, self.args.nagents, self.args.hid_size)
comms_to_prey_loc = {} # record action 0
comms_to_prey_act = {} # record action 1
comms_to_loc_full = {} # record all
comm_action_episode = np.zeros(self.args.max_steps)
for t in range(self.args.max_steps):
misc = dict()
info['step_t'] = t
if t == 0 and self.args.hard_attn and self.args.commnet:
info['comm_action'] = np.zeros(self.args.nagents, dtype=int)
# Hardcoded to record communication for agent 1 (prey)
info['record_comms'] = 1
# recurrence over time
if self.args.recurrent:
if self.args.rnn_type == 'LSTM' and t == 0:
prev_hid = self.policy_net.init_hidden(batch_size=state.shape[0])
x = [state, prev_hid]
action_out, value, prev_hid, proto_comms = self.policy_net(x, info)
# if isinstance(self.env.env.env, predator_prey_env.PredatorPreyEnv):
if self.args.env_name == 'predator_prey':
tuple_comms = tuple(proto_comms.detach().numpy())
if t < 2:
if comms_to_prey_loc.get(tuple_comms) is None:
comms_to_prey_loc[tuple_comms] = []
comms_to_prey_loc[tuple_comms].append(tuple(self.env.env.env.prey_loc[0]))
elif self.args.env_name == 'traffic_junction':
# print("car loc", self.env.env.car_loc)
# print("paths", self.env.env.car_loc)
for i in range(0, len(self.env.env.car_loc)):
p = self.env.env.car_loc[i]
# print(p)
proto = proto_comms[0][i]
action_i = self.env.env.car_last_act[i]
if self.env.env.car_route_loc[i] != -1:
if p[0] == 0 and p[1] == 0:
continue
# print("path", p, proto.shape)
tuple_comms = tuple(proto)
# print("tuple comms", proto.shape)
if comms_to_loc_full.get(tuple_comms) is None:
comms_to_loc_full[tuple_comms] = []
comms_to_loc_full[tuple_comms].append(tuple(p))
# print(action_i)
if action_i == 0:
if comms_to_prey_loc.get(tuple_comms) is None:
comms_to_prey_loc[tuple_comms] = []
# print("path", self.env.env.chosen_path[0])
comms_to_prey_loc[tuple_comms].append(tuple(p))
else:
if comms_to_prey_act.get(tuple_comms) is None:
comms_to_prey_act[tuple_comms] = []
comms_to_prey_act[tuple_comms].append(tuple(p))
if (t + 1) % self.args.detach_gap == 0:
if self.args.rnn_type == 'LSTM':
prev_hid = (prev_hid[0].detach(), prev_hid[1].detach())
else:
prev_hid = prev_hid.detach()
else:
x = state
action_out, value, proto_comms = self.policy_net(x, info)
# if isinstance(self.env.env.env, predator_prey_env.PredatorPreyEnv):
if self.args.env_name == 'predator_prey':
tuple_comms = tuple(proto_comms.detach().numpy())
if comms_to_prey_loc.get(tuple_comms) is None:
comms_to_prey_loc[tuple_comms] = []
comms_to_prey_loc[tuple_comms].append(tuple(self.env.env.env.prey_loc[0]))
elif self.args.env_name == 'traffic_junction':
# print("car loc", self.env.env.car_loc)
# print("paths", self.env.env.car_loc)
for i in range(0, len(self.env.env.car_loc)):
p = self.env.env.car_loc[i]
# print(p)
proto = proto_comms[0][i]
action_i = self.env.env.car_last_act[i]
if self.env.env.car_route_loc[i] != -1:
# print("path", p, proto.shape)
tuple_comms = tuple(proto)
# print("tuple comms", proto.shape)
if comms_to_loc_full.get(tuple_comms) is None:
comms_to_loc_full[tuple_comms] = []
comms_to_loc_full[tuple_comms].append(tuple(p))
# print(action_i)
if action_i == 0:
if comms_to_prey_loc.get(tuple_comms) is None:
comms_to_prey_loc[tuple_comms] = []
# print("path", self.env.env.chosen_path[0])
comms_to_prey_loc[tuple_comms].append(tuple(p))
else:
if comms_to_prey_act.get(tuple_comms) is None:
comms_to_prey_act[tuple_comms] = []
comms_to_prey_act[tuple_comms].append(tuple(p))
action = select_action(self.args, action_out, eval_mode=True)
action, actual = translate_action(self.args, self.env, action)
next_state, reward, done, info = self.env.step(actual)
if self.args.env_name == 'traffic_junction':
done = done or self.env.env.has_failed
# store comm_action in info for next step
if self.args.hard_attn and self.args.commnet:
info['comm_action'] = action[-1] if not self.args.comm_action_one else np.ones(self.args.nagents, dtype=int)
# print(info['comm_action'][0])
comm_action_episode[t] += info['comm_action'][0]
# print("before ", stat.get('comm_action', 0), info['comm_action'][:self.args.nfriendly])
stat['comm_action'] = stat.get('comm_action', 0) + info['comm_action'][:self.args.nfriendly]
all_comms.append(info['comm_action'][:self.args.nfriendly])
if hasattr(self.args, 'enemy_comm') and self.args.enemy_comm:
stat['enemy_comm'] = stat.get('enemy_comm', 0) + info['comm_action'][self.args.nfriendly:]
if 'alive_mask' in info:
misc['alive_mask'] = info['alive_mask'].reshape(reward.shape)
else:
misc['alive_mask'] = np.ones_like(reward)
# env should handle this make sure that reward for dead agents is not counted
# reward = reward * misc['alive_mask']
stat['reward'] = stat.get('reward', 0) + reward[:self.args.nfriendly]
if hasattr(self.args, 'enemy_comm') and self.args.enemy_comm:
stat['enemy_reward'] = stat.get('enemy_reward', 0) + reward[self.args.nfriendly:]
done = done or t == self.args.max_steps - 1
episode_mask = np.ones(reward.shape)
episode_mini_mask = np.ones(reward.shape)
if done:
episode_mask = np.zeros(reward.shape)
else:
if 'is_completed' in info:
episode_mini_mask = 1 - info['is_completed'].reshape(-1)
if should_display:
self.env.display()
trans = Transition(state, action, action_out, value, episode_mask, episode_mini_mask, next_state, reward, misc)
episode.append(trans)
state = next_state
if done:
break
stat['num_steps'] = t + 1
stat['steps_taken'] = stat['num_steps']
if hasattr(self.env, 'reward_terminal'):
reward = self.env.reward_terminal()
# We are not multiplying in case of reward terminal with alive agent
# If terminal reward is masked environment should do
# reward = reward * misc['alive_mask']
episode[-1] = episode[-1]._replace(reward = episode[-1].reward + reward)
stat['reward'] = stat.get('reward', 0) + reward[:self.args.nfriendly]
if hasattr(self.args, 'enemy_comm') and self.args.enemy_comm:
stat['enemy_reward'] = stat.get('enemy_reward', 0) + reward[self.args.nfriendly:]
if hasattr(self.env, 'get_stat'):
merge_stat(self.env.get_stat(), stat)
return episode, stat, all_comms, comms_to_prey_loc, comms_to_prey_act, comms_to_loc_full, comm_action_episode
| [
"numpy.ones_like",
"numpy.zeros",
"numpy.ones",
"inspect.getargspec",
"collections.namedtuple",
"torch.zeros"
] | [((247, 388), 'collections.namedtuple', 'namedtuple', (['"""Transition"""', "('state', 'action', 'action_out', 'value', 'episode_mask',\n 'episode_mini_mask', 'next_state', 'reward', 'misc')"], {}), "('Transition', ('state', 'action', 'action_out', 'value',\n 'episode_mask', 'episode_mini_mask', 'next_state', 'reward', 'misc'))\n", (257, 388), False, 'from collections import namedtuple\n'), ((1110, 1163), 'torch.zeros', 'torch.zeros', (['(1)', 'self.args.nagents', 'self.args.hid_size'], {}), '(1, self.args.nagents, self.args.hid_size)\n', (1121, 1163), False, 'import torch\n'), ((1336, 1365), 'numpy.zeros', 'np.zeros', (['self.args.max_steps'], {}), '(self.args.max_steps)\n', (1344, 1365), True, 'import numpy as np\n'), ((746, 772), 'inspect.getargspec', 'getargspec', (['self.env.reset'], {}), '(self.env.reset)\n', (756, 772), False, 'from inspect import getargspec\n'), ((8449, 8470), 'numpy.ones', 'np.ones', (['reward.shape'], {}), '(reward.shape)\n', (8456, 8470), True, 'import numpy as np\n'), ((8503, 8524), 'numpy.ones', 'np.ones', (['reward.shape'], {}), '(reward.shape)\n', (8510, 8524), True, 'import numpy as np\n'), ((1575, 1613), 'numpy.zeros', 'np.zeros', (['self.args.nagents'], {'dtype': 'int'}), '(self.args.nagents, dtype=int)\n', (1583, 1613), True, 'import numpy as np\n'), ((7946, 7966), 'numpy.ones_like', 'np.ones_like', (['reward'], {}), '(reward)\n', (7958, 7966), True, 'import numpy as np\n'), ((8578, 8600), 'numpy.zeros', 'np.zeros', (['reward.shape'], {}), '(reward.shape)\n', (8586, 8600), True, 'import numpy as np\n'), ((7141, 7178), 'numpy.ones', 'np.ones', (['self.args.nagents'], {'dtype': 'int'}), '(self.args.nagents, dtype=int)\n', (7148, 7178), True, 'import numpy as np\n')] |
from __future__ import absolute_import, unicode_literals, print_function
import os, re, math
from ctypes import *
import numpy as N
from numpy.ctypeslib import as_array
# don't bother with parsing error
try:
lib = cdll.LoadLibrary('libnest3.so')
except:
lib = cdll.LoadLibrary(os.path.dirname(__file__) + '/libnest3.so')
# if we want to do OS X version detection:
# import platform
# if platform.system() == 'Darwin'
# '.'.join(platform.mac_ver().split('.')[:2]) --> 10.X
# libstempo.multinest.run borrows heavily from <NAME>'s pymultinest;
# it requires MultiNest v3.2 patched with cwrapper.f90
def run(LogLikelihood,
Prior,
n_dims,
n_params = None,
n_clustering_params = None, wrapped_params = None,
importance_nested_sampling = True,
multimodal = True, const_efficiency_mode = False, n_live_points = 400,
evidence_tolerance = 0.5, sampling_efficiency = 0.8,
n_iter_before_update = 100, null_log_evidence = -1e90,
max_modes = 100, mode_tolerance = -1e90,
outputfiles_basename = "./multinest-", seed = -1, verbose = False,
resume = True, context = None, write_output = True, log_zero = -1e100,
max_iter = 0, init_MPI = True, dump_callback = None):
"""
Runs MultiNest
The most important parameters are the two log-probability functions Prior
and LogLikelihood. They are called by MultiNest.
Prior should transform the unit cube into the parameter cube. Here
is an example for a uniform prior::
def Prior(cube, ndim, nparams):
for i in range(ndim):
cube[i] = cube[i] * 10 * math.pi
The LogLikelihood function gets this parameter cube and should
return the logarithm of the likelihood.
Here is the example for the eggbox problem::
def Loglike(cube, ndim, nparams):
chi = 1.
for i in range(ndim):
chi *= math.cos(cube[i] / 2.)
return math.pow(2. + chi, 5)
Some of the parameters are explained below. Otherwise consult the
MultiNest documentation.
@param importance_nested_sampling:
If True, Multinest will use Importance Nested Sampling (INS). Read http://arxiv.org/abs/1306.2144
for more details on INS. Please read the MultiNest README file before using the INS in MultiNest v3.0.
@param n_params:
Total no. of parameters, should be equal to ndims in most cases
but if you need to store some additional
parameters with the actual parameters then you need to pass
them through the likelihood routine.
@param sampling_efficiency:
defines the sampling efficiency. 0.8 and 0.3 are recommended
for parameter estimation & evidence evalutation
respectively.
use 'parameter' or 'model' to select the respective default
values
@param mode_tolerance:
MultiNest can find multiple modes & also specify which samples belong to which mode. It might be
desirable to have separate samples & mode statistics for modes with local log-evidence value greater than a
particular value in which case Ztol should be set to that value. If there isn't any particularly interesting
Ztol value, then Ztol should be set to a very large negative number (e.g. -1e90).
@param evidence_tolerance:
A value of 0.5 should give good enough accuracy.
@param n_clustering_params:
If mmodal is T, MultiNest will attempt to separate out the
modes. Mode separation is done through a clustering
algorithm. Mode separation can be done on all the parameters
(in which case nCdims should be set to ndims) & it
can also be done on a subset of parameters (in which case
nCdims < ndims) which might be advantageous as
clustering is less accurate as the dimensionality increases.
If nCdims < ndims then mode separation is done on
the first nCdims parameters.
@param null_log_evidence:
If mmodal is T, MultiNest can find multiple modes & also specify
which samples belong to which mode. It might be
desirable to have separate samples & mode statistics for modes
with local log-evidence value greater than a
particular value in which case nullZ should be set to that
value. If there isn't any particulrly interesting
nullZ value, then nullZ should be set to a very large negative
number (e.g. -1.d90).
@param init_MPI:
initialize MPI routines?, relevant only if compiling with MPI
@param log_zero:
points with loglike < logZero will be ignored by MultiNest
@param max_iter:
maximum number of iterations. 0 is unlimited.
@param write_output:
write output files? This is required for analysis.
@param dump_callback:
a callback function for dumping the current status
"""
if n_params == None:
n_params = n_dims
if n_clustering_params == None:
n_clustering_params = n_dims
if wrapped_params == None:
wrapped_params = [0] * n_dims
WrappedType = c_int * len(wrapped_params)
wraps = WrappedType(*wrapped_params)
if sampling_efficiency == 'parameter':
sampling_efficiency = 0.8
if sampling_efficiency == 'model':
sampling_efficiency = 0.3
# MV 20130923
loglike_type = CFUNCTYPE(c_double,
POINTER(c_double),c_int,c_int,c_void_p)
dumper_type = CFUNCTYPE(c_void_p,
c_int,c_int,c_int,
POINTER(c_double),POINTER(c_double),POINTER(c_double),
c_double,c_double,c_double,c_void_p)
if hasattr(LogLikelihood,'loglike') and hasattr(Prior,'remap') and hasattr(Prior,'prior'):
def loglike(cube,ndim,nparams,nullcontext):
# we're not using context with libstempo.like objects
pprior = Prior.premap(cube)
# mappers are supposed to throw a ValueError if they get out of range
try:
pars = Prior.remap(cube)
except ValueError:
return -N.inf
prior = pprior * Prior.prior(pars)
return -N.inf if not prior else math.log(prior) + LogLikelihood.loglike(pars)
else:
def loglike(cube,ndim,nparams,nullcontext):
# it's actually easier to use the context, if any, at the Python level
# and pass a null pointer to MultiNest...
args = [cube,ndim,nparams] + ([] if context is None else context)
if Prior:
Prior(*args)
return LogLikelihood(*args)
def dumper(nSamples,nlive,nPar,
physLive,posterior,paramConstr,
maxLogLike,logZ,logZerr,nullcontext):
if dump_callback:
# It's not clear to me what the desired PyMultiNest dumper callback
# syntax is... but this should pass back the right numpy arrays,
# without copies. Untested!
pc = as_array(paramConstr,shape=(nPar,4))
dump_callback(nSamples,nlive,nPar,
as_array(physLive,shape=(nPar+1,nlive)).T,
as_array(posterior,shape=(nPar+2,nSamples)).T,
(pc[0,:],pc[1,:],pc[2,:],pc[3,:]), # (mean,std,bestfit,map)
maxLogLike,logZ,logZerr)
# MV 20130923: currently we support only multinest 3.2 (24 parameters),
# but it would not be a problem to build up the parameter list dynamically
lib.run(c_bool(importance_nested_sampling),c_bool(multimodal),c_bool(const_efficiency_mode),
c_int(n_live_points),c_double(evidence_tolerance),
c_double(sampling_efficiency),c_int(n_dims),c_int(n_params),
c_int(n_clustering_params),c_int(max_modes),
c_int(n_iter_before_update),c_double(mode_tolerance),
create_string_buffer(outputfiles_basename.encode()), # MV 20130923: need a regular C string
c_int(seed),wraps,
c_bool(verbose),c_bool(resume),
c_bool(write_output),c_bool(init_MPI),
c_double(log_zero),c_int(max_iter),
loglike_type(loglike),dumper_type(dumper),
c_void_p(0))
class multinestdata(dict):
pass
class multinestpar(object):
pass
# where are the multinest files?
def _findfiles(multinestrun,dirname,suffix='-post_equal_weights.dat'):
# try chains/multinestrun-...
# chains/multinestrun/multinestrun-...
root = [dirname + '/',dirname + '/' + multinestrun]
# and if multinestrun is something like pulsar-model,
# try chains/pulsar/model/pulsar-model-...
if '-' in multinestrun:
tokens = multinestrun.split('-')[:-1]
pulsar, model = '-'.join(tokens[:-1]), tokens[-1]
root.append(dirname + '/' + pulsar + '/' + model)
return filter(lambda r: os.path.isfile(r + '/' + multinestrun + suffix),root)
def _getcomment(ret,filename):
try:
ret.comment = open(filename,'r').read()
except IOError:
pass
def _getmeta(ret,filename):
try:
meta = N.load(filename)
except IOError:
return
ret.parnames = list(meta['name'])
ret.tempopars = list(meta['val']) # somewhat legacy?
ret.tempo = {}
ml = N.argmax(ret.data[:,-1])
for i,par in enumerate(ret.parnames):
ret[par] = multinestpar()
try:
ret[par].val, ret[par].err = N.mean(ret.data[:,i]) + meta['offset'][i], math.sqrt(N.var(ret.data[:,i]))
ret[par].offset = meta['offset'][i]
except ValueError:
ret[par].val, ret[par].err = N.mean(ret.data[:,i]), math.sqrt(N.var(ret.data[:,i]))
if 'ml' in meta.dtype.names:
ret[par].ml = meta['ml'][i]
else:
ret[par].ml = ret.data[ml,i] + (meta['offset'][i] if 'offset' in meta.dtype.names else 0)
ret.tempo[par] = multinestpar()
ret.tempo[par].val, ret.tempo[par].err = meta['val'][i], meta['err'][i]
def load_mcmc(mcrun,dirname='.'):
root = _findfiles(mcrun,dirname,'-chain.npy')
ret = multinestdata()
ret.dirname = root[0]
alldata = N.load('{0}/{1}-chain.npy'.format(root[0],mcrun))
# keep all the steps
ret.data = alldata[:,:]
_getmeta(ret,'{0}/{1}-meta.npy'.format(root[0],mcrun))
_getcomment(ret,'{0}/{1}-comment.txt'.format(root[0],mcrun))
return ret
def load_emcee(emceerun,dirname='.',chains=False):
root = _findfiles(emceerun,dirname,'-chain.npy')
ret = multinestdata()
ret.dirname = root[0]
alldata = N.load('{0}/{1}-chain.npy'.format(root[0],emceerun))
# keep the last iteration of the walker cloud
ret.data = alldata[:,-1,:]
if chains:
ret.chains = alldata
_getmeta(ret,'{0}/{1}-meta.npy'.format(root[0],emceerun))
_getcomment(ret,'{0}/{1}-comment.txt'.format(root[0],emceerun))
return ret
def load(multinestrun,dirname='.'):
root = _findfiles(multinestrun,dirname,'-post_equal_weights.dat')
if not root:
# try to find a tar.gz archive
import tempfile, tarfile
root = _findfiles(multinestrun,dirname,'.tar.gz')
tar = tarfile.open('{0}/{1}.tar.gz'.format(root[0],multinestrun),mode='r|gz')
root = [tempfile.mkdtemp(prefix='/tmp/')]
tar.extractall(path=root[0])
ret = multinestdata()
ret.dirname = root[0]
# get data
ret.data = N.loadtxt('{0}/{1}-post_equal_weights.dat'.format(root[0],multinestrun))[:,:-1]
# get evidence
try:
lines = open('{0}/{1}-stats.dat'.format(root[0],multinestrun),'r').readlines()
try:
ret.ev = float(re.search(r'Global Evidence:\s*(\S*)\s*\+/-\s*(\S*)',lines[0]).group(1))
except:
ret.ev = float(re.search(r'Global Log-Evidence :\s*(\S*)\s*\+/-\s*(\S*)',lines[0]).group(1))
except IOError:
pass
# get metadata
_getmeta(ret,'{0}/{1}-meta.npy'.format(root[0],multinestrun))
_getcomment(ret,'{0}/{1}-comment.txt'.format(root[0],multinestrun))
if root[0][:4] == '/tmp':
import shutil
shutil.rmtree(root[0])
return ret
def compress(rootname):
import sys, os, glob
dirname, filename = os.path.dirname(rootname), os.path.basename(rootname)
if filename[-1] == '-':
filename = filename[:-1]
files = [filename + '-' + ending for ending in ('.txt','phys_live.points','stats.dat','ev.dat',
'post_equal_weights.dat','summary.txt','live.points',
'post_separate.dat','meta.npy','resume.dat','comment.txt')]
cd = os.getcwd()
os.chdir(dirname)
os.system('tar zcf {0}.tar.gz {1}'.format(filename,' '.join(files)))
files_exclude = [filename + '-' + ending for ending in ('IS.iterinfo','IS.points','IS.ptprob')]
for f in files + files_exclude:
if os.path.isfile(f):
os.unlink(f)
os.chdir(cd)
| [
"numpy.load",
"os.unlink",
"numpy.argmax",
"os.getcwd",
"os.path.basename",
"os.path.dirname",
"numpy.ctypeslib.as_array",
"os.path.isfile",
"tempfile.mkdtemp",
"numpy.mean",
"re.search",
"shutil.rmtree",
"math.log",
"numpy.var",
"os.chdir"
] | [((9338, 9363), 'numpy.argmax', 'N.argmax', (['ret.data[:, -1]'], {}), '(ret.data[:, -1])\n', (9346, 9363), True, 'import numpy as N\n'), ((12720, 12731), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (12729, 12731), False, 'import sys, os, glob\n'), ((12736, 12753), 'os.chdir', 'os.chdir', (['dirname'], {}), '(dirname)\n', (12744, 12753), False, 'import sys, os, glob\n'), ((13026, 13038), 'os.chdir', 'os.chdir', (['cd'], {}), '(cd)\n', (13034, 13038), False, 'import sys, os, glob\n'), ((9158, 9174), 'numpy.load', 'N.load', (['filename'], {}), '(filename)\n', (9164, 9174), True, 'import numpy as N\n'), ((12161, 12183), 'shutil.rmtree', 'shutil.rmtree', (['root[0]'], {}), '(root[0])\n', (12174, 12183), False, 'import shutil\n'), ((12275, 12300), 'os.path.dirname', 'os.path.dirname', (['rootname'], {}), '(rootname)\n', (12290, 12300), False, 'import sys, os, glob\n'), ((12302, 12328), 'os.path.basename', 'os.path.basename', (['rootname'], {}), '(rootname)\n', (12318, 12328), False, 'import sys, os, glob\n'), ((12977, 12994), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (12991, 12994), False, 'import sys, os, glob\n'), ((7042, 7080), 'numpy.ctypeslib.as_array', 'as_array', (['paramConstr'], {'shape': '(nPar, 4)'}), '(paramConstr, shape=(nPar, 4))\n', (7050, 7080), False, 'from numpy.ctypeslib import as_array\n'), ((8929, 8976), 'os.path.isfile', 'os.path.isfile', (["(r + '/' + multinestrun + suffix)"], {}), "(r + '/' + multinestrun + suffix)\n", (8943, 8976), False, 'import sys, os, glob\n'), ((11314, 11346), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""/tmp/"""'}), "(prefix='/tmp/')\n", (11330, 11346), False, 'import tempfile, tarfile\n'), ((13008, 13020), 'os.unlink', 'os.unlink', (['f'], {}), '(f)\n', (13017, 13020), False, 'import sys, os, glob\n'), ((287, 312), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (302, 312), False, 'import sys, os, glob\n'), ((6246, 6261), 'math.log', 'math.log', (['prior'], {}), '(prior)\n', (6254, 6261), False, 'import os, re, math\n'), ((7153, 7196), 'numpy.ctypeslib.as_array', 'as_array', (['physLive'], {'shape': '(nPar + 1, nlive)'}), '(physLive, shape=(nPar + 1, nlive))\n', (7161, 7196), False, 'from numpy.ctypeslib import as_array\n'), ((7222, 7269), 'numpy.ctypeslib.as_array', 'as_array', (['posterior'], {'shape': '(nPar + 2, nSamples)'}), '(posterior, shape=(nPar + 2, nSamples))\n', (7230, 7269), False, 'from numpy.ctypeslib import as_array\n'), ((9495, 9517), 'numpy.mean', 'N.mean', (['ret.data[:, i]'], {}), '(ret.data[:, i])\n', (9501, 9517), True, 'import numpy as N\n'), ((9548, 9569), 'numpy.var', 'N.var', (['ret.data[:, i]'], {}), '(ret.data[:, i])\n', (9553, 9569), True, 'import numpy as N\n'), ((9686, 9708), 'numpy.mean', 'N.mean', (['ret.data[:, i]'], {}), '(ret.data[:, i])\n', (9692, 9708), True, 'import numpy as N\n'), ((9719, 9740), 'numpy.var', 'N.var', (['ret.data[:, i]'], {}), '(ret.data[:, i])\n', (9724, 9740), True, 'import numpy as N\n'), ((11705, 11773), 're.search', 're.search', (['"""Global Evidence:\\\\s*(\\\\S*)\\\\s*\\\\+/-\\\\s*(\\\\S*)"""', 'lines[0]'], {}), "('Global Evidence:\\\\s*(\\\\S*)\\\\s*\\\\+/-\\\\s*(\\\\S*)', lines[0])\n", (11714, 11773), False, 'import os, re, math\n'), ((11821, 11908), 're.search', 're.search', (['"""Global Log-Evidence :\\\\s*(\\\\S*)\\\\s*\\\\+/-\\\\s*(\\\\S*)"""', 'lines[0]'], {}), "('Global Log-Evidence :\\\\s*(\\\\S*)\\\\s*\\\\+/-\\\\s*(\\\\S*)',\n lines[0])\n", (11830, 11908), False, 'import os, re, math\n')] |
import numpy as np
def vehicle_dynamics(dynamics_param, curv, xglob, xcurv, delta_t, u):
m, lf, lr, Iz, Df, Cf, Bf, Dr, Cr, Br = dynamics_param.get_params()
xglob_next = np.zeros(len(xglob))
xcurv_next = np.zeros(len(xcurv))
delta = u[0]
a = u[1]
psi = xglob[3]
X = xglob[4]
Y = xglob[5]
vx = xcurv[0]
vy = xcurv[1]
wz = xcurv[2]
epsi = xcurv[3]
s = xcurv[4]
ey = xcurv[5]
# Compute tire slip angle
alpha_f = delta - np.arctan2(vy + lf * wz, vx)
alpha_r = -np.arctan2(vy - lf * wz, vx)
# Compute lateral force at front and rear tire
Fyf = 2 * Df * np.sin(Cf * np.arctan(Bf * alpha_f))
Fyr = 2 * Dr * np.sin(Cr * np.arctan(Br * alpha_r))
# Propagate the dynamics of delta_t
xglob_next[0] = vx + delta_t * (a - 1 / m * Fyf * np.sin(delta) + wz * vy)
xglob_next[1] = vy + delta_t * (1 / m * (Fyf * np.cos(delta) + Fyr) - wz * vx)
xglob_next[2] = wz + delta_t * (1 / Iz * (lf * Fyf * np.cos(delta) - lr * Fyr))
xglob_next[3] = psi + delta_t * (wz)
xglob_next[4] = X + delta_t * ((vx * np.cos(psi) - vy * np.sin(psi)))
xglob_next[5] = Y + delta_t * (vx * np.sin(psi) + vy * np.cos(psi))
xcurv_next[0] = vx + delta_t * (a - 1 / m * Fyf * np.sin(delta) + wz * vy)
xcurv_next[1] = vy + delta_t * (1 / m * (Fyf * np.cos(delta) + Fyr) - wz * vx)
xcurv_next[2] = wz + delta_t * (1 / Iz * (lf * Fyf * np.cos(delta) - lr * Fyr))
xcurv_next[3] = epsi + delta_t * (
wz - (vx * np.cos(epsi) - vy * np.sin(epsi)) / (1 - curv * ey) * curv
)
xcurv_next[4] = s + delta_t * ((vx * np.cos(epsi) - vy * np.sin(epsi)) / (1 - curv * ey))
xcurv_next[5] = ey + delta_t * (vx * np.sin(epsi) + vy * np.cos(epsi))
return xglob_next, xcurv_next
| [
"numpy.arctan",
"numpy.sin",
"numpy.arctan2",
"numpy.cos"
] | [((488, 516), 'numpy.arctan2', 'np.arctan2', (['(vy + lf * wz)', 'vx'], {}), '(vy + lf * wz, vx)\n', (498, 516), True, 'import numpy as np\n'), ((532, 560), 'numpy.arctan2', 'np.arctan2', (['(vy - lf * wz)', 'vx'], {}), '(vy - lf * wz, vx)\n', (542, 560), True, 'import numpy as np\n'), ((644, 667), 'numpy.arctan', 'np.arctan', (['(Bf * alpha_f)'], {}), '(Bf * alpha_f)\n', (653, 667), True, 'import numpy as np\n'), ((700, 723), 'numpy.arctan', 'np.arctan', (['(Br * alpha_r)'], {}), '(Br * alpha_r)\n', (709, 723), True, 'import numpy as np\n'), ((1094, 1105), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (1100, 1105), True, 'import numpy as np\n'), ((1113, 1124), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (1119, 1124), True, 'import numpy as np\n'), ((1167, 1178), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (1173, 1178), True, 'import numpy as np\n'), ((1186, 1197), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (1192, 1197), True, 'import numpy as np\n'), ((1704, 1716), 'numpy.sin', 'np.sin', (['epsi'], {}), '(epsi)\n', (1710, 1716), True, 'import numpy as np\n'), ((1724, 1736), 'numpy.cos', 'np.cos', (['epsi'], {}), '(epsi)\n', (1730, 1736), True, 'import numpy as np\n'), ((820, 833), 'numpy.sin', 'np.sin', (['delta'], {}), '(delta)\n', (826, 833), True, 'import numpy as np\n'), ((985, 998), 'numpy.cos', 'np.cos', (['delta'], {}), '(delta)\n', (991, 998), True, 'import numpy as np\n'), ((1254, 1267), 'numpy.sin', 'np.sin', (['delta'], {}), '(delta)\n', (1260, 1267), True, 'import numpy as np\n'), ((1419, 1432), 'numpy.cos', 'np.cos', (['delta'], {}), '(delta)\n', (1425, 1432), True, 'import numpy as np\n'), ((1610, 1622), 'numpy.cos', 'np.cos', (['epsi'], {}), '(epsi)\n', (1616, 1622), True, 'import numpy as np\n'), ((1630, 1642), 'numpy.sin', 'np.sin', (['epsi'], {}), '(epsi)\n', (1636, 1642), True, 'import numpy as np\n'), ((896, 909), 'numpy.cos', 'np.cos', (['delta'], {}), '(delta)\n', (902, 909), True, 'import numpy as np\n'), ((1330, 1343), 'numpy.cos', 'np.cos', (['delta'], {}), '(delta)\n', (1336, 1343), True, 'import numpy as np\n'), ((1504, 1516), 'numpy.cos', 'np.cos', (['epsi'], {}), '(epsi)\n', (1510, 1516), True, 'import numpy as np\n'), ((1524, 1536), 'numpy.sin', 'np.sin', (['epsi'], {}), '(epsi)\n', (1530, 1536), True, 'import numpy as np\n')] |
import numpy as np
import torch
from torch.utils import data
import scipy.io as sio
import os
from tqdm import tqdm
import cv2
import json
from PIL import Image
from maskrcnn_benchmark.structures.bounding_box import BoxList
class MyDataset_train(object):
def __init__(self, json_dir, transforms=None):
self.json_dir = json_dir
with open(json_dir,'r') as f:
data = json.load(f)
self.data = data
self.transforms = transforms
def __getitem__(self, item):
img = Image.open(self.data[item]['image']).convert("RGB")
# dummy target
boxes = self.data[item]['box']
boxes = np.array(boxes)
target = BoxList(boxes, img.size, mode="xyxy")
classes = torch.ones(len(boxes))
target.add_field("labels",classes)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, self.data[item]['image']
def __len__(self):
return len(self.data)
class MyDataset_test(object):
def __init__(self, json_dir, transforms=None):
self.json_dir = json_dir
with open(json_dir,'r') as f:
data = json.load(f)
self.data = data
self.transforms = transforms
def __getitem__(self, item):
img = Image.open(self.data[item]['image']).convert("RGB")
# dummy target
boxes = self.data[item]['box']
boxes = np.array(boxes)
target = BoxList(boxes, img.size, mode="xyxy")
classes = torch.ones(len(boxes))
target.add_field("labels",classes)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, self.data[item]['image']
def __len__(self):
return 3000
def mat_to_json():
matfile = '/data1/zem/Resnet.CRNN/data/SynthText/gt.mat'
img_root = '/data1/zem/Resnet.CRNN/data/SynthText'
data = sio.loadmat(matfile)
num_img = len(data['wordBB'][0])
# words = []
# for val in data['txt'][0][0]:
# v = [x.split('\n') for x in val.strip().split(' ')]
# v = [[vv_ for vv_ in vv if len(vv_) > 0] for vv in v]
# words.extend(sum(v, []))
json_data = []
tbar = tqdm(range(num_img))
for i in tbar:
img_info = {}
boxes = []
wordsBB = data['wordBB'][0][i]
if len(wordsBB.shape) == 2:
wordsBB = np.expand_dims(wordsBB,axis=2)
assert len(wordsBB.shape)==3,'the shape is {}'.format(wordsBB.shape)
wordsBB = np.around(np.array(wordsBB), decimals=2).transpose(2, 1, 0)
# print(words[0])
for j in range(wordsBB.shape[0]):
x1 = wordsBB[j][0][0]
y1 = wordsBB[j][0][1]
x2 = wordsBB[j][1][0]
y2 = wordsBB[j][1][1]
x3 = wordsBB[j][2][0]
y3 = wordsBB[j][2][1]
x4 = wordsBB[j][3][0]
y4 = wordsBB[j][3][1]
x_min = min([x1, x2, x3, x4])
x_max = max([x1, x2, x3, x4])
y_min = min([y1, y2, y3, y4])
y_max = max([y1, y2, y3, y4])
box = [round(float(x_min),2), round(float(y_min),2), round(float(x_max),2), round(float(y_max),2)]
boxes.append(box)
img_path = data['imnames'][0][i][0]
img_abs_path = os.path.join(img_root, img_path)
img_info['image'] = img_abs_path
img_info['box'] = boxes
json_data.append(dict(img_info))
with open('/data1/zem/Resnet.CRNN/data/SynthText/my_gt.json','w') as f:
json.dump(json_data,f)
print('done')
if __name__ == "__main__":
with open('/data1/zem/Resnet.CRNN/data/SynthText/my_gt.json','r') as f:
data = json.load(f)
print('finish')
| [
"json.dump",
"maskrcnn_benchmark.structures.bounding_box.BoxList",
"json.load",
"scipy.io.loadmat",
"numpy.expand_dims",
"PIL.Image.open",
"numpy.array",
"os.path.join"
] | [((2008, 2028), 'scipy.io.loadmat', 'sio.loadmat', (['matfile'], {}), '(matfile)\n', (2019, 2028), True, 'import scipy.io as sio\n'), ((679, 694), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (687, 694), True, 'import numpy as np\n'), ((713, 750), 'maskrcnn_benchmark.structures.bounding_box.BoxList', 'BoxList', (['boxes', 'img.size'], {'mode': '"""xyxy"""'}), "(boxes, img.size, mode='xyxy')\n", (720, 750), False, 'from maskrcnn_benchmark.structures.bounding_box import BoxList\n'), ((1490, 1505), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (1498, 1505), True, 'import numpy as np\n'), ((1524, 1561), 'maskrcnn_benchmark.structures.bounding_box.BoxList', 'BoxList', (['boxes', 'img.size'], {'mode': '"""xyxy"""'}), "(boxes, img.size, mode='xyxy')\n", (1531, 1561), False, 'from maskrcnn_benchmark.structures.bounding_box import BoxList\n'), ((3426, 3458), 'os.path.join', 'os.path.join', (['img_root', 'img_path'], {}), '(img_root, img_path)\n', (3438, 3458), False, 'import os\n'), ((3662, 3685), 'json.dump', 'json.dump', (['json_data', 'f'], {}), '(json_data, f)\n', (3671, 3685), False, 'import json\n'), ((3827, 3839), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3836, 3839), False, 'import json\n'), ((416, 428), 'json.load', 'json.load', (['f'], {}), '(f)\n', (425, 428), False, 'import json\n'), ((1227, 1239), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1236, 1239), False, 'import json\n'), ((2504, 2535), 'numpy.expand_dims', 'np.expand_dims', (['wordsBB'], {'axis': '(2)'}), '(wordsBB, axis=2)\n', (2518, 2535), True, 'import numpy as np\n'), ((544, 580), 'PIL.Image.open', 'Image.open', (["self.data[item]['image']"], {}), "(self.data[item]['image'])\n", (554, 580), False, 'from PIL import Image\n'), ((1355, 1391), 'PIL.Image.open', 'Image.open', (["self.data[item]['image']"], {}), "(self.data[item]['image'])\n", (1365, 1391), False, 'from PIL import Image\n'), ((2642, 2659), 'numpy.array', 'np.array', (['wordsBB'], {}), '(wordsBB)\n', (2650, 2659), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
Author: <NAME>
"""
import numpy as np
import scipy
from scipy.misc import fromimage
from scipy.io import loadmat
from skimage.color import rgb2lab
from skimage.util import img_as_float
from skimage import io
from utils import *
from config import *
from init_caffe import *
from random import Random
myrandom = Random(RAND_SEED)
def transform_and_get_image(im, max_spixels, out_size):
height = im.shape[0]
width = im.shape[1]
out_height = out_size[0]
out_width = out_size[1]
pad_height = out_height - height
pad_width = out_width - width
im = np.lib.pad(im, ((0, pad_height), (0, pad_width), (0, 0)), 'constant',
constant_values=-10)
transformer = caffe.io.Transformer({'img': (1, 3, out_size[0],
out_size[1])})
transformer.set_transpose('img', (2, 0, 1))
im = np.asarray(transformer.preprocess('img', im))
im = np.expand_dims(im, axis=0)
return im
def transform_and_get_spixel_init(max_spixels, out_size):
out_height = out_size[0]
out_width = out_size[1]
spixel_init, feat_spixel_initmap, k_w, k_h = \
get_spixel_init(max_spixels, out_width, out_height)
spixel_init = spixel_init[None, None, :, :]
feat_spixel_initmap = feat_spixel_initmap[None, None, :, :]
return spixel_init, feat_spixel_initmap, k_h, k_w
def convert_label(label):
problabel = np.zeros((1, 50, label.shape[0], label.shape[1])).astype(np.float32)
ct = 0
for t in np.unique(label).tolist():
if ct >= 50:
print(np.unique(label).shape)
break
else:
problabel[:, ct, :, :] = (label == t)
ct = ct + 1
label2 = np.squeeze(np.argmax(problabel, axis = 1))
return label2, problabel
def fetch_and_transform_data2(imgname,
data_type,
out_types,
max_spixels):
image_folder = IMG_FOLDER[data_type]
image_filename = image_folder + imgname + '.jpg'
image = img_as_float(io.imread(image_filename))
im = rgb2lab(image)
gt_folder = GT_FOLDER[data_type]
gt_filename = gt_folder + imgname + '.mat'
gtseg_all = loadmat(gt_filename)
t = np.random.randint(0, len(gtseg_all['groundTruth'][0]))
gtseg = gtseg_all['groundTruth'][0][t][0][0][0]
label, problabel = convert_label(gtseg)
height = im.shape[0]
width = im.shape[1]
out_height = height
out_width = width
out_img = transform_and_get_image(im, max_spixels, [out_height, out_width])
inputs = {}
for in_name in out_types:
if in_name == 'img':
inputs['img'] = out_img
if in_name == 'spixel_init':
out_spixel_init, feat_spixel_init, spixels_h, spixels_w = \
transform_and_get_spixel_init(max_spixels, [out_height, out_width])
inputs['spixel_init'] = out_spixel_init
if in_name == 'feat_spixel_init':
inputs['feat_spixel_init'] = feat_spixel_init
if in_name == 'label':
label = np.expand_dims(np.expand_dims(label, axis=0), axis=0)
inputs['label'] = label
if in_name == 'problabel':
inputs['problabel'] = problabel
return [inputs, height, width]
def fetch_and_transform_data(imgname,
data_type,
out_types,
max_spixels):
image_folder = IMG_FOLDER[data_type]
image_filename = image_folder + imgname + '.jpg'
image = img_as_float(io.imread(image_filename))
im = rgb2lab(image)
height = im.shape[0]
width = im.shape[1]
out_height = height
out_width = width
out_img = transform_and_get_image(im, max_spixels, [out_height, out_width])
#print('*******')
#print(out_img.shape)
img11 = np.squeeze(out_img)
#np.savetxt('out_img11.txt', img11[0], fmt='%0.6f')
inputs = {}
for in_name in out_types:
if in_name == 'img':
inputs['img'] = out_img
if in_name == 'spixel_init':
out_spixel_init, feat_spixel_init, spixels_h, spixels_w = \
transform_and_get_spixel_init(max_spixels, [out_height, out_width])
inputs['spixel_init'] = out_spixel_init
if in_name == 'feat_spixel_init':
inputs['feat_spixel_init'] = feat_spixel_init
return [inputs, height, width]
def scale_image(im, s_factor):
s_img = scipy.ndimage.zoom(im, (s_factor, s_factor, 1), order = 1)
return s_img
def scale_label(label, s_factor):
s_label = scipy.ndimage.zoom(label, (s_factor, s_factor), order = 0)
return s_label
def fetch_and_transform_patch_data(imgname,
data_type,
out_types,
max_spixels,
patch_size = None):
s_factor = get_rand_scale_factor()
image_folder = IMG_FOLDER[data_type]
image_filename = image_folder + imgname + '.jpg'
image = img_as_float(io.imread(image_filename))
image = scale_image(image, s_factor)
im = rgb2lab(image)
gt_folder = GT_FOLDER[data_type]
gt_filename = gt_folder + imgname + '.mat'
gtseg_all = loadmat(gt_filename)
t = np.random.randint(0, len(gtseg_all['groundTruth'][0]))
gtseg = gtseg_all['groundTruth'][0][t][0][0][0]
# gtseg2 = np.zeros((5, gtseg.shape[0], gtseg.shape[1])).astype(gtseg.dtype)
# for kk in range(0, len(gtseg_all['groundTruth'][0])):
# gtseg2[kk, ...] = gtseg_all['groundTruth'][0][kk][0][0][0]
gtseg = scale_label(gtseg, s_factor)
# gtseg2 = scale_label(gtseg2, s_factor)
if np.random.uniform(0, 1) > 0.5:
im = im[:, ::-1, ...]
gtseg = gtseg[:, ::-1]
# gtseg2 = gtseg2[:, :, ::-1]
height = im.shape[0]
width = im.shape[1]
if patch_size == None:
out_height = height
out_width = width
else:
out_height = patch_size[0]
out_width = patch_size[1]
if out_height > height:
raise "Patch size is greater than image size"
if out_width > width:
raise "Patch size is greater than image size"
start_row = myrandom.randint(0, height - out_height)
start_col = myrandom.randint(0, width - out_width)
im_cropped = im[start_row : start_row + out_height,
start_col : start_col + out_width, :]
out_img = transform_and_get_image(im_cropped, max_spixels, [out_height, out_width])
gtseg_cropped = gtseg[start_row : start_row + out_height,
start_col : start_col + out_width]
# gtseg2_cropped = gtseg2[:, start_row : start_row + out_height,
# start_col : start_col + out_width]
label_cropped, problabel_cropped = convert_label(gtseg_cropped)
inputs = {}
for in_name in out_types:
if in_name == 'img':
inputs['img'] = out_img
if in_name == 'spixel_init':
out_spixel_init, feat_spixel_init, spixels_h, spixels_w = \
transform_and_get_spixel_init(max_spixels, [out_height, out_width])
inputs['spixel_init'] = out_spixel_init
if in_name == 'feat_spixel_init':
inputs['feat_spixel_init'] = feat_spixel_init
if in_name == 'label':
label_cropped = np.expand_dims(np.expand_dims(label_cropped, axis=0), axis=0)
inputs['label'] = label_cropped
if in_name == 'problabel':
inputs['problabel'] = problabel_cropped
# if in_name == 'multilabel':
# inputs['multilabel'] = label_cropped
return [inputs, height, width]
| [
"numpy.random.uniform",
"scipy.io.loadmat",
"numpy.argmax",
"random.Random",
"numpy.unique",
"numpy.zeros",
"numpy.expand_dims",
"scipy.ndimage.zoom",
"numpy.lib.pad",
"numpy.squeeze",
"skimage.io.imread",
"skimage.color.rgb2lab"
] | [((508, 525), 'random.Random', 'Random', (['RAND_SEED'], {}), '(RAND_SEED)\n', (514, 525), False, 'from random import Random\n'), ((772, 866), 'numpy.lib.pad', 'np.lib.pad', (['im', '((0, pad_height), (0, pad_width), (0, 0))', '"""constant"""'], {'constant_values': '(-10)'}), "(im, ((0, pad_height), (0, pad_width), (0, 0)), 'constant',\n constant_values=-10)\n", (782, 866), True, 'import numpy as np\n'), ((1127, 1153), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (1141, 1153), True, 'import numpy as np\n'), ((2302, 2316), 'skimage.color.rgb2lab', 'rgb2lab', (['image'], {}), '(image)\n', (2309, 2316), False, 'from skimage.color import rgb2lab\n'), ((2418, 2438), 'scipy.io.loadmat', 'loadmat', (['gt_filename'], {}), '(gt_filename)\n', (2425, 2438), False, 'from scipy.io import loadmat\n'), ((3807, 3821), 'skimage.color.rgb2lab', 'rgb2lab', (['image'], {}), '(image)\n', (3814, 3821), False, 'from skimage.color import rgb2lab\n'), ((4062, 4081), 'numpy.squeeze', 'np.squeeze', (['out_img'], {}), '(out_img)\n', (4072, 4081), True, 'import numpy as np\n'), ((4674, 4730), 'scipy.ndimage.zoom', 'scipy.ndimage.zoom', (['im', '(s_factor, s_factor, 1)'], {'order': '(1)'}), '(im, (s_factor, s_factor, 1), order=1)\n', (4692, 4730), False, 'import scipy\n'), ((4801, 4857), 'scipy.ndimage.zoom', 'scipy.ndimage.zoom', (['label', '(s_factor, s_factor)'], {'order': '(0)'}), '(label, (s_factor, s_factor), order=0)\n', (4819, 4857), False, 'import scipy\n'), ((5357, 5371), 'skimage.color.rgb2lab', 'rgb2lab', (['image'], {}), '(image)\n', (5364, 5371), False, 'from skimage.color import rgb2lab\n'), ((5473, 5493), 'scipy.io.loadmat', 'loadmat', (['gt_filename'], {}), '(gt_filename)\n', (5480, 5493), False, 'from scipy.io import loadmat\n'), ((1921, 1949), 'numpy.argmax', 'np.argmax', (['problabel'], {'axis': '(1)'}), '(problabel, axis=1)\n', (1930, 1949), True, 'import numpy as np\n'), ((2266, 2291), 'skimage.io.imread', 'io.imread', (['image_filename'], {}), '(image_filename)\n', (2275, 2291), False, 'from skimage import io\n'), ((3771, 3796), 'skimage.io.imread', 'io.imread', (['image_filename'], {}), '(image_filename)\n', (3780, 3796), False, 'from skimage import io\n'), ((5280, 5305), 'skimage.io.imread', 'io.imread', (['image_filename'], {}), '(image_filename)\n', (5289, 5305), False, 'from skimage import io\n'), ((5911, 5934), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (5928, 5934), True, 'import numpy as np\n'), ((1610, 1659), 'numpy.zeros', 'np.zeros', (['(1, 50, label.shape[0], label.shape[1])'], {}), '((1, 50, label.shape[0], label.shape[1]))\n', (1618, 1659), True, 'import numpy as np\n'), ((1704, 1720), 'numpy.unique', 'np.unique', (['label'], {}), '(label)\n', (1713, 1720), True, 'import numpy as np\n'), ((3299, 3328), 'numpy.expand_dims', 'np.expand_dims', (['label'], {'axis': '(0)'}), '(label, axis=0)\n', (3313, 3328), True, 'import numpy as np\n'), ((7589, 7626), 'numpy.expand_dims', 'np.expand_dims', (['label_cropped'], {'axis': '(0)'}), '(label_cropped, axis=0)\n', (7603, 7626), True, 'import numpy as np\n'), ((1770, 1786), 'numpy.unique', 'np.unique', (['label'], {}), '(label)\n', (1779, 1786), True, 'import numpy as np\n')] |
import numpy as np
def epoching_moat(messages, data, info, events):
"""Epoch the raw eyetracking data. This function has ragged array support.
Can deprecate once we switch to NivLink 2.0.
Parameters
----------
messages: array, shape (n_times, 1)
Array containing messages fromt the raw eyetracking file
raw : array, shape (n_times, 3)
Raw eyetracking data. The first column contains the event messages.
The second and third columns contain the eye positions in the x- and
y-dimensions, respectively.
info : instance of `ScreenInfo`
Eyetracking acquisition information.
events : array, shape (n_trials, 3)
Events data. Each row represents a trial. The first column
denotes the block. The second column denotes the trial onset
relative to the block. The third column denotes the trial length.
template : str
Template for block start note as found in the EyeLink output file.
Returns
-------
epochs : array, shape (n_trials, n_times, 2)
Epoched eyetracking timeseries data. Last dimension is the
spatial dimension of the eyetracker (xdim, ydim).
Notes
-----
Designed for MOAT dataset collected by <NAME> & <NAME>.
Key assumption is that each "Start of Run message" is aligned to the
first stimulus onset within that run/block. We only look at last 4 blocks.
"""
## 0-indexing (blocks start at 0).
events[:,0] -= 1
## Identify block starts (in samples).
block_onsets, = np.where([True if msg.startswith('Start') else False for msg in messages])
block_onsets = block_onsets[-4:]
## Convert events from seconds to samples.
blocks, raw_index = events[:,0].astype(int), events[:,1:] # Divvy up blocks & events.
raw_index[:,1] += raw_index[:,0] # Convert duration to offset.
raw_index = (raw_index * info.sfreq).astype(int) # Convert time to samples.
raw_index = (raw_index.T + block_onsets[blocks]).T # Add block offsets to samples.
## Build epochs.
n_trials = raw_index.shape[0]
n_times = np.diff(raw_index).max()
epochs = np.ones((n_trials, n_times, 2)) * np.nan
for n, (r1, r2) in enumerate(raw_index): epochs[n,:(r2-r1)] = data[r1:r2]
epochs = np.ma.masked_invalid(epochs)
return epochs.astype(float)
def set_custom_centers(info, raw_data_pos):
"""Customize AoI centers for a particular subject.
Parameters
----------
info : instance of `ScreenInfo`
Eyetracking acquisition information.
raw : array, shape (n_times, 2)
Raw eyetracking data without message column.
Returns
-------
custom_ctr_left : array, shape (1, 2)
New x, y coordinates of left ellipse
custom_ctr_right : array, shape (1, 2)
New x, y coordinates of right ellipse
"""
## Remove NaNs from eye position data.
mask = ~np.any(np.isnan(raw_data_pos),axis=1)
x = raw_data_pos[mask,0]
y = raw_data_pos[mask,1]
## Compute 2D histogram in pixel space.
xedges = np.arange(0,info.xdim+1)
yedges = np.arange(0,info.ydim+1)
H, xedges, yedges = np.histogram2d(x, y,bins=(xedges, yedges))
H = H.T
## Determine custom AoI centers.
center_mask = 300;
H_left = np.zeros(H.shape); H_right = np.zeros(H.shape);
H_left[:,np.arange(1,int(info.xdim/2)-center_mask)] = H[:,np.arange(1,int(info.xdim/2)-center_mask)]
H_right[:,np.arange(int(info.xdim/2)+center_mask,int(info.xdim))] = H[:,np.arange(int(info.xdim/2)+center_mask,int(info.xdim))]
# Get indices of maximum each half.
max_ind_left = np.unravel_index(np.argmax(H_left, axis=None), H_left.shape)
max_ind_right = np.unravel_index(np.argmax(H_right, axis=None), H_right.shape)
# Recode as custom AoI center.
custom_ctr_left = (max_ind_left[1], max_ind_left[0])
custom_ctr_right = (max_ind_right[1], max_ind_right[0])
return custom_ctr_left, custom_ctr_right
def set_screen_moat(info, custom_ctr_left=None, custom_ctr_right=None):
"""Sets screen and AoIs for MOAT experiment.
Parameters
----------
info : instance of `ScreenInfo`
Eyetracking acquisition information.
Returns
-------
info_with_aoi : instance of `ScreenInfo` with AoIs added.
Eyetracking acquisition information.
Notes
-----
Designed for MOAT dataset collected by <NAME> & <NAME>. Assumes
a fixed 135 degree rotation for each ellipse.
"""
import math
## Define ellipse centers
if custom_ctr_left is None:
ctr_left = (400,400)
else:
ctr_left = custom_ctr_left
if custom_ctr_right is None:
ctr_right = (1200,400)
else:
ctr_right = custom_ctr_right
## Define AoIs
aois = np.empty((4,5)) # center x-coord, center y-coord, x-radius, y-radius
# Large left ellipse
aois[0] = [ctr_left[0], ctr_left[1], 200, 400, np.radians(-135)]
# Large right ellipse
aois[1] = [ctr_right[0], ctr_right[1], 200, 400, np.radians(135)]
# Small left ellipse
aois[2] = [ctr_left[0], ctr_left[1], 100*np.sqrt(2), 200*np.sqrt(2), np.radians(-135)]
# Small right ellipse
aois[3] = [ctr_right[0], ctr_right[1], 100*np.sqrt(2), 200*np.sqrt(2), np.radians(135)]
## Make masks
# Define slope and intercept for inequality line
slope_left = math.sin(math.radians(45)) / math.cos(math.radians(45))
slope_right = math.sin(math.radians(-45)) / math.cos(math.radians(-45))
int_left = ctr_left[1] - slope_left * ctr_left[0]
int_right = ctr_right[1] - slope_right * ctr_right[0]
# Create screen sized array with unraveled indices.
# This gives us the cartesian coordinate grid in pixel space
[X,Y] = np.unravel_index(np.arange(info.xdim * info.ydim),(info.xdim, info.ydim))
# Make mask that keeps upper half of large left ellipse.
mask1 = np.reshape(slope_left * X + int_left < Y, (info.xdim, info.ydim)).astype(int)
# Make mask that keeps lower half of large left ellipse.
mask2 = np.reshape(slope_left * X + int_left > Y, (info.xdim, info.ydim)).astype(int)
# Make mask that keeps lower half of large right ellipse.
mask3 = np.reshape(slope_right * X + int_right > Y, (info.xdim, info.ydim)).astype(int)
# Make mask that keeps upper half of large right ellipse.
mask4 = np.reshape(slope_right * X + int_right < Y, (info.xdim, info.ydim)).astype(int)
# Screen 1: whole ellipses
info.add_ellipsoid_aoi(aois[2,0], aois[2,1], aois[2,2], aois[2,3], aois[2,4], 1)
info.add_ellipsoid_aoi(aois[3,0], aois[3,1], aois[3,2], aois[3,3], aois[3,4], 1)
# Screen 2: halved left ellipse, whole right ellipse
info.add_ellipsoid_aoi(aois[0,0], aois[0,1], aois[0,2], aois[0,3], aois[0,4], 2, mask1)
info.add_ellipsoid_aoi(aois[0,0], aois[0,1], aois[0,2], aois[0,3], aois[0,4], 2, mask2)
info.add_ellipsoid_aoi(aois[3,0], aois[3,1], aois[3,2], aois[3,3], aois[3,4], 2)
# Screen 2: whole left ellipse, halved right ellipse
info.add_ellipsoid_aoi(aois[2,0], aois[2,1], aois[2,2], aois[2,3], aois[2,4], 3)
info.add_ellipsoid_aoi(aois[1,0], aois[1,1], aois[1,2], aois[1,3], aois[1,4], 3, mask3)
info.add_ellipsoid_aoi(aois[1,0], aois[1,1], aois[1,2], aois[1,3], aois[1,4], 3, mask4)
# Screen 4: halved left ellipse, halved right ellipse
info.add_ellipsoid_aoi(aois[0,0], aois[0,1], aois[0,2], aois[0,3], aois[0,4], 4, mask1)
info.add_ellipsoid_aoi(aois[0,0], aois[0,1], aois[0,2], aois[0,3], aois[0,4], 4, mask2)
info.add_ellipsoid_aoi(aois[1,0], aois[1,1], aois[1,2], aois[1,3], aois[1,4], 4, mask3)
info.add_ellipsoid_aoi(aois[1,0], aois[1,1], aois[1,2], aois[1,3], aois[1,4], 4, mask4)
def make_screen_idx(n_trials, featmap):
"""Sets screen and AoIs for MOAT experiment.
Parameters
----------
n_trials : int
Number of trials in the experiment.
featmap : array, shape (n_trials, n_aois)
Key for mapping AoIs to cues.
Returns
-------
screen_idx : array, shape(n_trials, 1)
Vector defining which AoI configuration present
on each trial.
"""
screen_idx = np.zeros((n_trials,1))
empty_count = np.count_nonzero(featmap == 99, axis = 1)
idx_aoi1_filled = featmap[:,0] != 99
idx_aoi2_filled = featmap[:,1] != 99
idx_screen_1 = empty_count == 4
idx_screen_2 = np.logical_and(empty_count == 3, idx_aoi2_filled)
idx_screen_3 = np.logical_and(empty_count == 3, idx_aoi1_filled)
idx_screen_4 = empty_count == 2
screen_idx[idx_screen_1] = 1
screen_idx[idx_screen_2] = 2
screen_idx[idx_screen_3] = 3
screen_idx[idx_screen_4] = 4
return screen_idx
def remap_aois(fixations):
"""Recode AoIs.
Parameters
----------
fixations : dataframe
Contains eye position data mapped to NivLink AoIs (1-12).
Returns
-------
fixations : dataframe
Contains eye position data mapped to MOAT recoded AoIs (1-6).
Notes
-------
Key:
1=6=[1], 2=5=[2], 3=9=[3], 4=10=[4], 7=11=[5], 8=12=[6]
"""
fixations = fixations.replace({'AoI': 5}, 2)
fixations = fixations.replace({'AoI': 9}, 3)
fixations = fixations.replace({'AoI': 10}, 4)
fixations = fixations.replace({'AoI': 7}, 5)
fixations = fixations.replace({'AoI': 11}, 5)
fixations = fixations.replace({'AoI': 8}, 6)
fixations = fixations.replace({'AoI': 12}, 6)
return fixations
def plot_moat_heatmaps(info_with_aoi, H, contrast):
"""Plot raw data heatmaps with overlaid AoIs.
Parameters
----------
info_with_aoi : instance of `ScreenInfo`
Eyetracking acquisition information with AoIs added.
H: array, shape(xdim, ydim)
2D histogram of position in pixel space.
contrast : array, shape(0,1)
Contrast for histogram plot.
Returns
-------
fig, ax : plt.figure
Figure and axis of plot.
Notes
-----
Requires matplotlib.
"""
import matplotlib.pyplot as plt
import matplotlib.cm as cm
fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(20, 20));
axes[0,0].imshow(H, interpolation='bilinear', cmap=cm.gnuplot, clim=(contrast[0], contrast[1]));
axes[0,0].imshow(info_with_aoi.indices[:,:,0].T, alpha = 0.2, cmap = cm.gray)
axes[0,0].set_xticks([]);
axes[0,0].set_yticks([]);
axes[0,0].set_title('Simple vs. simple');
axes[0,1].imshow(H, interpolation='bilinear', cmap=cm.gnuplot, clim=(contrast[0], contrast[1]));
axes[0,1].imshow(info_with_aoi.indices[:,:,1].T, alpha = 0.2, cmap = cm.gray)
axes[0,1].set_xticks([]);
axes[0,1].set_yticks([]);
axes[0,1].set_title('Compound vs. simple');
axes[1,0].imshow(H, interpolation='bilinear', cmap=cm.gnuplot, clim=(contrast[0], contrast[1]));
axes[1,0].imshow(info_with_aoi.indices[:,:,2].T, alpha = 0.2, cmap = cm.gray)
axes[1,0].set_xticks([]);
axes[1,0].set_yticks([]);
axes[1,0].set_title('Simple vs. compound');
axes[1,1].imshow(H, interpolation='bilinear', cmap=cm.gnuplot, clim=(contrast[0], contrast[1]));
axes[1,1].imshow(info_with_aoi.indices[:,:,3].T, alpha = 0.2, cmap = cm.gray)
axes[1,1].set_xticks([]);
axes[1,1].set_yticks([]);
axes[1,1].set_title('Compound vs. compound');
return fig, axes
| [
"numpy.radians",
"numpy.count_nonzero",
"numpy.logical_and",
"numpy.argmax",
"math.radians",
"numpy.histogram2d",
"numpy.empty",
"numpy.zeros",
"numpy.ma.masked_invalid",
"numpy.ones",
"numpy.isnan",
"numpy.diff",
"numpy.arange",
"numpy.reshape",
"matplotlib.pyplot.subplots",
"numpy.sq... | [((2349, 2377), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['epochs'], {}), '(epochs)\n', (2369, 2377), True, 'import numpy as np\n'), ((3147, 3174), 'numpy.arange', 'np.arange', (['(0)', '(info.xdim + 1)'], {}), '(0, info.xdim + 1)\n', (3156, 3174), True, 'import numpy as np\n'), ((3185, 3212), 'numpy.arange', 'np.arange', (['(0)', '(info.ydim + 1)'], {}), '(0, info.ydim + 1)\n', (3194, 3212), True, 'import numpy as np\n'), ((3234, 3277), 'numpy.histogram2d', 'np.histogram2d', (['x', 'y'], {'bins': '(xedges, yedges)'}), '(x, y, bins=(xedges, yedges))\n', (3248, 3277), True, 'import numpy as np\n'), ((3363, 3380), 'numpy.zeros', 'np.zeros', (['H.shape'], {}), '(H.shape)\n', (3371, 3380), True, 'import numpy as np\n'), ((3392, 3409), 'numpy.zeros', 'np.zeros', (['H.shape'], {}), '(H.shape)\n', (3400, 3409), True, 'import numpy as np\n'), ((4890, 4906), 'numpy.empty', 'np.empty', (['(4, 5)'], {}), '((4, 5))\n', (4898, 4906), True, 'import numpy as np\n'), ((8267, 8290), 'numpy.zeros', 'np.zeros', (['(n_trials, 1)'], {}), '((n_trials, 1))\n', (8275, 8290), True, 'import numpy as np\n'), ((8308, 8347), 'numpy.count_nonzero', 'np.count_nonzero', (['(featmap == 99)'], {'axis': '(1)'}), '(featmap == 99, axis=1)\n', (8324, 8347), True, 'import numpy as np\n'), ((8490, 8539), 'numpy.logical_and', 'np.logical_and', (['(empty_count == 3)', 'idx_aoi2_filled'], {}), '(empty_count == 3, idx_aoi2_filled)\n', (8504, 8539), True, 'import numpy as np\n'), ((8559, 8608), 'numpy.logical_and', 'np.logical_and', (['(empty_count == 3)', 'idx_aoi1_filled'], {}), '(empty_count == 3, idx_aoi1_filled)\n', (8573, 8608), True, 'import numpy as np\n'), ((10211, 10259), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'nrows': '(2)', 'figsize': '(20, 20)'}), '(ncols=2, nrows=2, figsize=(20, 20))\n', (10223, 10259), True, 'import matplotlib.pyplot as plt\n'), ((2217, 2248), 'numpy.ones', 'np.ones', (['(n_trials, n_times, 2)'], {}), '((n_trials, n_times, 2))\n', (2224, 2248), True, 'import numpy as np\n'), ((3725, 3753), 'numpy.argmax', 'np.argmax', (['H_left'], {'axis': 'None'}), '(H_left, axis=None)\n', (3734, 3753), True, 'import numpy as np\n'), ((3806, 3835), 'numpy.argmax', 'np.argmax', (['H_right'], {'axis': 'None'}), '(H_right, axis=None)\n', (3815, 3835), True, 'import numpy as np\n'), ((5035, 5051), 'numpy.radians', 'np.radians', (['(-135)'], {}), '(-135)\n', (5045, 5051), True, 'import numpy as np\n'), ((5132, 5147), 'numpy.radians', 'np.radians', (['(135)'], {}), '(135)\n', (5142, 5147), True, 'import numpy as np\n'), ((5247, 5263), 'numpy.radians', 'np.radians', (['(-135)'], {}), '(-135)\n', (5257, 5263), True, 'import numpy as np\n'), ((5366, 5381), 'numpy.radians', 'np.radians', (['(135)'], {}), '(135)\n', (5376, 5381), True, 'import numpy as np\n'), ((5867, 5899), 'numpy.arange', 'np.arange', (['(info.xdim * info.ydim)'], {}), '(info.xdim * info.ydim)\n', (5876, 5899), True, 'import numpy as np\n'), ((2179, 2197), 'numpy.diff', 'np.diff', (['raw_index'], {}), '(raw_index)\n', (2186, 2197), True, 'import numpy as np\n'), ((2995, 3017), 'numpy.isnan', 'np.isnan', (['raw_data_pos'], {}), '(raw_data_pos)\n', (3003, 3017), True, 'import numpy as np\n'), ((5219, 5229), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5226, 5229), True, 'import numpy as np\n'), ((5235, 5245), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5242, 5245), True, 'import numpy as np\n'), ((5338, 5348), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5345, 5348), True, 'import numpy as np\n'), ((5354, 5364), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5361, 5364), True, 'import numpy as np\n'), ((5482, 5498), 'math.radians', 'math.radians', (['(45)'], {}), '(45)\n', (5494, 5498), False, 'import math\n'), ((5511, 5527), 'math.radians', 'math.radians', (['(45)'], {}), '(45)\n', (5523, 5527), False, 'import math\n'), ((5556, 5573), 'math.radians', 'math.radians', (['(-45)'], {}), '(-45)\n', (5568, 5573), False, 'import math\n'), ((5586, 5603), 'math.radians', 'math.radians', (['(-45)'], {}), '(-45)\n', (5598, 5603), False, 'import math\n'), ((5997, 6062), 'numpy.reshape', 'np.reshape', (['(slope_left * X + int_left < Y)', '(info.xdim, info.ydim)'], {}), '(slope_left * X + int_left < Y, (info.xdim, info.ydim))\n', (6007, 6062), True, 'import numpy as np\n'), ((6148, 6213), 'numpy.reshape', 'np.reshape', (['(slope_left * X + int_left > Y)', '(info.xdim, info.ydim)'], {}), '(slope_left * X + int_left > Y, (info.xdim, info.ydim))\n', (6158, 6213), True, 'import numpy as np\n'), ((6300, 6367), 'numpy.reshape', 'np.reshape', (['(slope_right * X + int_right > Y)', '(info.xdim, info.ydim)'], {}), '(slope_right * X + int_right > Y, (info.xdim, info.ydim))\n', (6310, 6367), True, 'import numpy as np\n'), ((6454, 6521), 'numpy.reshape', 'np.reshape', (['(slope_right * X + int_right < Y)', '(info.xdim, info.ydim)'], {}), '(slope_right * X + int_right < Y, (info.xdim, info.ydim))\n', (6464, 6521), True, 'import numpy as np\n')] |
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import netCDF4 as nc
import os
import glob
import fnmatch
from collections import namedtuple, OrderedDict
import scipy.io as sio
from scipy import interpolate, signal
from pyproj import Proj,transform
import sys
sys.path.append('/ocean/ssahu/CANYONS/wcvi/grid/')
from bathy_common import *
from matplotlib import path
import xarray as xr
import scipy.io as sio
import matplotlib.cm as cm
import cmocean as cmo
import matplotlib.gridspec as gridspec
from dateutil.parser import parse
from salishsea_tools import geo_tools, viz_tools, tidetools, nc_tools
import gsw
path_to_save = '/data/ssahu/NEP36_Extracted_Months/'
bathy = nc.Dataset('/data/mdunphy/NEP036-N30-OUT/INV/Bathymetry_EastCoast_NEMO_R036_GEBCO_corr_v14.nc')
Z = bathy.variables['Bathymetry'][:]
zlevels = nc.Dataset('/data/mdunphy/NEP036-N30-OUT/CDF_COMB_COMPRESSED/NEP036-N30_IN_20140915_00001440_grid_T.nc').variables['deptht']
lon = bathy['nav_lon'][...]
lat = bathy['nav_lat'][...]
z0 = np.ma.masked_values(Z, 0)
y_wcvi_slice = np.array(np.arange(180,350))
x_wcvi_slice = np.array(np.arange(480,650))
def tem_sal_timeseries_at_WCVI_locations(grid_scalar):#, j, i):
temp = grid_scalar.variables['votemper'][0,:, :, :]
sal = grid_scalar.variables['vosaline'][0,:, :, :]
scalar_ts = namedtuple('scalar_ts', 'temp, sal')
return scalar_ts(temp, sal)
print("Extracting June Data")
temp_june = np.empty((30,50,Z.shape[0],Z.shape[1]))
sal_june = np.empty((30,50,Z.shape[0],Z.shape[1]))
i = 0
for file in sorted(glob.glob('/data/mdunphy/NEP036-N30-OUT/CDF_COMB_COMPRESSED/NEP036-N30_IN_201506*grid_T.nc')):
scalar_ts = tem_sal_timeseries_at_WCVI_locations(nc.Dataset(file))
temp_june[i,...] = scalar_ts[0]
sal_june[i,...] = scalar_ts[1]
i = i+1
print("Calculating the Spice for June for the WCVI subset; the rest of the locations will have empty spice")
pressure_loc = gsw.p_from_z(-zlevels[:],np.mean(lat))
SA_loc_jun = np.empty_like(sal_june)
CT_loc_jun = np.empty_like(sal_june)
spic_jun = np.empty_like(sal_june)
rho_jun = np.empty_like(sal_june)
for t in np.arange(sal_june.shape[0]):
for k in np.arange(sal_june.shape[1]):
for j in np.arange(180,350):
for i in np.arange(480,650):
SA_loc_jun[t,k,j,i] = gsw.SA_from_SP(sal_june[t,k,j,i], pressure_loc[k], lon[j,i], lat[j,i])
CT_loc_jun[t,k,j,i] = gsw.CT_from_pt(sal_june[t,k,j,i], temp_june[t,k,j,i])
spic_jun[t,k,j,i] = gsw.spiciness0(SA_loc_jun[t,k,j,i],CT_loc_jun[t,k,j,i])
# rho_jun[t,k,j,i] = gsw.density.rho(SA_loc_jun[t,k,j,i], CT_loc_jun[t,k,j,i], pressure_loc[k])
rho_jun[t,k,j,i] = gsw.density.rho(SA_loc_jun[t,k,j,i], CT_loc_jun[t,k,j,i], 0)
print("Writing the file for June")
bdy_file = nc.Dataset(path_to_save + 'NEP36_T_S_Spice_june_larger_offshore_rho_correct.nc', 'w', zlib=True);
bdy_file.createDimension('x', sal_june.shape[3]);
bdy_file.createDimension('y', sal_june.shape[2]);
bdy_file.createDimension('deptht', sal_june.shape[1]);
bdy_file.createDimension('time_counter', None);
x = bdy_file.createVariable('x', 'int32', ('x',), zlib=True);
x.units = 'indices';
x.longname = 'x indices of NEP36';
y = bdy_file.createVariable('y', 'int32', ('y',), zlib=True);
y.units = 'indices';
y.longname = 'y indices of NEP36';
deptht = bdy_file.createVariable('deptht', 'float32', ('deptht',), zlib=True);
deptht.units = 'm';
deptht.longname = 'Vertical T Levels';
time_counter = bdy_file.createVariable('time_counter', 'int32', ('time_counter',), zlib=True);
time_counter.units = 's';
time_counter.longname = 'time';
votemper = bdy_file.createVariable('votemper', 'float32', ('time_counter', 'deptht', 'y', 'x'), zlib=True);
vosaline = bdy_file.createVariable('vosaline', 'float32', ('time_counter', 'deptht', 'y', 'x'), zlib=True);
spiciness = bdy_file.createVariable('spiciness', 'float32', ('time_counter', 'deptht', 'y', 'x'), zlib=True);
density = bdy_file.createVariable('density', 'float32', ('time_counter', 'deptht', 'y', 'x'), zlib=True)
votemper[...] = temp_june[...];
vosaline[...] = sal_june[...];
spiciness[...] = spic_jun[...];
density[...] = rho_jun[...];
bdy_file.close()
print("The June file is successfully written")
print("End of Script: Thanks")
| [
"sys.path.append",
"netCDF4.Dataset",
"gsw.spiciness0",
"gsw.SA_from_SP",
"numpy.ma.masked_values",
"numpy.empty",
"numpy.empty_like",
"gsw.CT_from_pt",
"numpy.mean",
"numpy.arange",
"collections.namedtuple",
"gsw.density.rho",
"glob.glob"
] | [((295, 345), 'sys.path.append', 'sys.path.append', (['"""/ocean/ssahu/CANYONS/wcvi/grid/"""'], {}), "('/ocean/ssahu/CANYONS/wcvi/grid/')\n", (310, 345), False, 'import sys\n'), ((711, 816), 'netCDF4.Dataset', 'nc.Dataset', (['"""/data/mdunphy/NEP036-N30-OUT/INV/Bathymetry_EastCoast_NEMO_R036_GEBCO_corr_v14.nc"""'], {}), "(\n '/data/mdunphy/NEP036-N30-OUT/INV/Bathymetry_EastCoast_NEMO_R036_GEBCO_corr_v14.nc'\n )\n", (721, 816), True, 'import netCDF4 as nc\n'), ((1044, 1069), 'numpy.ma.masked_values', 'np.ma.masked_values', (['Z', '(0)'], {}), '(Z, 0)\n', (1063, 1069), True, 'import numpy as np\n'), ((1485, 1527), 'numpy.empty', 'np.empty', (['(30, 50, Z.shape[0], Z.shape[1])'], {}), '((30, 50, Z.shape[0], Z.shape[1]))\n', (1493, 1527), True, 'import numpy as np\n'), ((1536, 1578), 'numpy.empty', 'np.empty', (['(30, 50, Z.shape[0], Z.shape[1])'], {}), '((30, 50, Z.shape[0], Z.shape[1]))\n', (1544, 1578), True, 'import numpy as np\n'), ((2036, 2059), 'numpy.empty_like', 'np.empty_like', (['sal_june'], {}), '(sal_june)\n', (2049, 2059), True, 'import numpy as np\n'), ((2073, 2096), 'numpy.empty_like', 'np.empty_like', (['sal_june'], {}), '(sal_june)\n', (2086, 2096), True, 'import numpy as np\n'), ((2108, 2131), 'numpy.empty_like', 'np.empty_like', (['sal_june'], {}), '(sal_june)\n', (2121, 2131), True, 'import numpy as np\n'), ((2142, 2165), 'numpy.empty_like', 'np.empty_like', (['sal_june'], {}), '(sal_june)\n', (2155, 2165), True, 'import numpy as np\n'), ((2176, 2204), 'numpy.arange', 'np.arange', (['sal_june.shape[0]'], {}), '(sal_june.shape[0])\n', (2185, 2204), True, 'import numpy as np\n'), ((2881, 2981), 'netCDF4.Dataset', 'nc.Dataset', (["(path_to_save + 'NEP36_T_S_Spice_june_larger_offshore_rho_correct.nc')", '"""w"""'], {'zlib': '(True)'}), "(path_to_save +\n 'NEP36_T_S_Spice_june_larger_offshore_rho_correct.nc', 'w', zlib=True)\n", (2891, 2981), True, 'import netCDF4 as nc\n'), ((1095, 1114), 'numpy.arange', 'np.arange', (['(180)', '(350)'], {}), '(180, 350)\n', (1104, 1114), True, 'import numpy as np\n'), ((1139, 1158), 'numpy.arange', 'np.arange', (['(480)', '(650)'], {}), '(480, 650)\n', (1148, 1158), True, 'import numpy as np\n'), ((1357, 1393), 'collections.namedtuple', 'namedtuple', (['"""scalar_ts"""', '"""temp, sal"""'], {}), "('scalar_ts', 'temp, sal')\n", (1367, 1393), False, 'from collections import namedtuple, OrderedDict\n'), ((1603, 1705), 'glob.glob', 'glob.glob', (['"""/data/mdunphy/NEP036-N30-OUT/CDF_COMB_COMPRESSED/NEP036-N30_IN_201506*grid_T.nc"""'], {}), "(\n '/data/mdunphy/NEP036-N30-OUT/CDF_COMB_COMPRESSED/NEP036-N30_IN_201506*grid_T.nc'\n )\n", (1612, 1705), False, 'import glob\n'), ((2008, 2020), 'numpy.mean', 'np.mean', (['lat'], {}), '(lat)\n', (2015, 2020), True, 'import numpy as np\n'), ((2219, 2247), 'numpy.arange', 'np.arange', (['sal_june.shape[1]'], {}), '(sal_june.shape[1])\n', (2228, 2247), True, 'import numpy as np\n'), ((856, 970), 'netCDF4.Dataset', 'nc.Dataset', (['"""/data/mdunphy/NEP036-N30-OUT/CDF_COMB_COMPRESSED/NEP036-N30_IN_20140915_00001440_grid_T.nc"""'], {}), "(\n '/data/mdunphy/NEP036-N30-OUT/CDF_COMB_COMPRESSED/NEP036-N30_IN_20140915_00001440_grid_T.nc'\n )\n", (866, 970), True, 'import netCDF4 as nc\n'), ((1752, 1768), 'netCDF4.Dataset', 'nc.Dataset', (['file'], {}), '(file)\n', (1762, 1768), True, 'import netCDF4 as nc\n'), ((2266, 2285), 'numpy.arange', 'np.arange', (['(180)', '(350)'], {}), '(180, 350)\n', (2275, 2285), True, 'import numpy as np\n'), ((2307, 2326), 'numpy.arange', 'np.arange', (['(480)', '(650)'], {}), '(480, 650)\n', (2316, 2326), True, 'import numpy as np\n'), ((2365, 2440), 'gsw.SA_from_SP', 'gsw.SA_from_SP', (['sal_june[t, k, j, i]', 'pressure_loc[k]', 'lon[j, i]', 'lat[j, i]'], {}), '(sal_june[t, k, j, i], pressure_loc[k], lon[j, i], lat[j, i])\n', (2379, 2440), False, 'import gsw\n'), ((2474, 2533), 'gsw.CT_from_pt', 'gsw.CT_from_pt', (['sal_june[t, k, j, i]', 'temp_june[t, k, j, i]'], {}), '(sal_june[t, k, j, i], temp_june[t, k, j, i])\n', (2488, 2533), False, 'import gsw\n'), ((2564, 2626), 'gsw.spiciness0', 'gsw.spiciness0', (['SA_loc_jun[t, k, j, i]', 'CT_loc_jun[t, k, j, i]'], {}), '(SA_loc_jun[t, k, j, i], CT_loc_jun[t, k, j, i])\n', (2578, 2626), False, 'import gsw\n'), ((2768, 2834), 'gsw.density.rho', 'gsw.density.rho', (['SA_loc_jun[t, k, j, i]', 'CT_loc_jun[t, k, j, i]', '(0)'], {}), '(SA_loc_jun[t, k, j, i], CT_loc_jun[t, k, j, i], 0)\n', (2783, 2834), False, 'import gsw\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.