code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from __future__ import print_function
import os
import logging
import numpy as np
import random
import math
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import shutil
from shutil import copyfile
from datetime import datetime
from tensorboardX import SummaryWriter
from cnnf.model_cifar import WideResNet
from cnnf.model_mnist import CNNF
from utils import *
from advertorch.attacks import GradientSignAttack, LinfPGDAttack
from advertorch.context import ctx_noparamgrad_and_eval
def train_adv(args, model, device, train_loader, optimizer, scheduler, epoch,
cycles, mse_parameter=1.0, clean_parameter=1.0, clean='supclean'):
model.train()
correct = 0
train_loss = 0.0
model.reset()
adversary = LinfPGDAttack(
model, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=args.eps,
nb_iter=args.nb_iter, eps_iter=args.eps_iter, rand_init=True, clip_min=-1.0, clip_max=1.0, targeted=False)
print(len(train_loader))
for batch_idx, (images, targets) in enumerate(train_loader):
optimizer.zero_grad()
images = images.cuda()
targets = targets.cuda()
model.reset()
with ctx_noparamgrad_and_eval(model):
adv_images = adversary.perturb(images, targets)
images_all = torch.cat((images, adv_images), 0)
# Reset the model latent variables
model.reset()
if (args.dataset == 'cifar10'):
logits, orig_feature_all, block1_all, block2_all, block3_all = model(images_all, first=True, inter=True)
elif (args.dataset == 'fashion'):
logits, orig_feature_all, block1_all, block2_all = model(images_all, first=True, inter=True)
ff_prev = orig_feature_all
# f1 the original feature of clean images
orig_feature, _ = torch.split(orig_feature_all, images.size(0))
block1_clean, _ = torch.split(block1_all, images.size(0))
block2_clean, _ = torch.split(block2_all, images.size(0))
if (args.dataset == 'cifar10'):
block3_clean, _ = torch.split(block3_all, images.size(0))
logits_clean, logits_adv = torch.split(logits, images.size(0))
if not ('no' in clean):
loss = (clean_parameter * F.cross_entropy(logits_clean, targets) + F.cross_entropy(logits_adv, targets)) / (2*(cycles+1))
else:
loss = F.cross_entropy(logits_adv, targets) / (cycles+1)
for i_cycle in range(cycles):
if (args.dataset == 'cifar10'):
recon, block1_recon, block2_recon, block3_recon = model(logits, step='backward', inter_recon=True)
elif (args.dataset == 'fashion'):
recon, block1_recon, block2_recon = model(logits, step='backward', inter_recon=True)
recon_clean, recon_adv = torch.split(recon, images.size(0))
recon_block1_clean, recon_block1_adv = torch.split(block1_recon, images.size(0))
recon_block2_clean, recon_block2_adv = torch.split(block2_recon, images.size(0))
if (args.dataset == 'cifar10'):
recon_block3_clean, recon_block3_adv = torch.split(block3_recon, images.size(0))
loss += (F.mse_loss(recon_adv, orig_feature) + F.mse_loss(recon_block1_adv, block1_clean) + F.mse_loss(recon_block2_adv, block2_clean) + F.mse_loss(recon_block3_adv, block3_clean)) * mse_parameter / (4*cycles)
elif (args.dataset == 'fashion'):
loss += (F.mse_loss(recon_adv, orig_feature) + F.mse_loss(recon_block1_adv, block1_clean) + F.mse_loss(recon_block2_adv, block2_clean)) * mse_parameter / (3*cycles)
# feedforward
ff_current = ff_prev + args.res_parameter * (recon - ff_prev)
logits = model(ff_current, first=False)
ff_prev = ff_current
logits_clean, logits_adv = torch.split(logits, images.size(0))
if not ('no' in clean):
loss += (clean_parameter * F.cross_entropy(logits_clean, targets) + F.cross_entropy(logits_adv, targets)) / (2*(cycles+1))
else:
loss += F.cross_entropy(logits_adv, targets) / (cycles+1)
pred = logits_clean.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(targets.view_as(pred)).sum().item()
loss.backward()
if (args.grad_clip):
nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
scheduler.step()
train_loss += loss
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(images[0]), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_loss /= len(train_loader)
acc = correct / len(train_loader.dataset)
return train_loss, acc
def train(args, model, device, train_loader, optimizer, scheduler, epoch,
cycles, mse_parameter=1.0, clean_parameter=1.0,clean="no"):
model.train()
correct = 0
train_loss = 0.0
model.reset()
for batch_idx, (images, targets) in enumerate(train_loader):
optimizer.zero_grad()
images = images.cuda()
targets = targets.cuda()
model.reset()
# Reset the model latent variables
model.reset()
if (args.dataset == 'cifar10'):
logits, orig_feature, block1, block2, block3 = model(images, first=True, inter=True)
elif (args.dataset == 'fashion'):
logits, orig_feature_all, block1, block2 = model(images, first=True, inter=True)
ff_prev = orig_feature
# find the original feature of clean images
loss = F.cross_entropy(logits, targets) / (cycles+1)
for i_cycle in range(cycles):
if (args.dataset == 'cifar10'):
recon, block1_recon, block2_recon, block3_recon = model(logits, step='backward', inter_recon=True)
elif (args.dataset == 'fashion'):
recon, block1_recon, block2_recon = model(logits, step='backward', inter_recon=True)
if (args.dataset == 'cifar10'):
loss += (F.mse_loss(recon, orig_feature) + F.mse_loss(block1_recon, block1) + F.mse_loss(block2_recon, block2) + F.mse_loss(block3_recon, block3)) * mse_parameter / (4*cycles)
elif (args.dataset == 'fashion'):
loss += (F.mse_loss(recon, orig_feature) + F.mse_loss(block1_recon, block1) + F.mse_loss(block2_recon, block2)) * mse_parameter / (3*cycles)
# feedforward
ff_current = ff_prev + args.res_parameter * (recon - ff_prev)
logits = model(ff_current, first=False)
ff_prev = ff_current
loss += F.cross_entropy(logits, targets) / (cycles+1)
pred = logits.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(targets.view_as(pred)).sum().item()
loss.backward()
if (args.grad_clip):
nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
scheduler.step()
train_loss += loss
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(images[0]), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_loss /= len(train_loader)
acc = correct / len(train_loader.dataset)
return train_loss, acc
def test(args, model, device, test_loader, cycles, epoch):
model.eval()
test_loss = 0
correct = 0
noise_loss = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
data, target = data.to(device), target.to(device)
# Calculate accuracy with the original images
model.reset()
if (args.dataset == 'cifar10'):
output, orig_feature, _, _, _ = model(data, first=True, inter=True)
else:
output, orig_feature, _, _ = model(data, first=True, inter=True)
ff_prev = orig_feature
for i_cycle in range(cycles):
recon = model(output, step='backward')
ff_current = ff_prev + args.res_parameter * (recon - ff_prev)
output = model(ff_current, first=False)
test_loss += F.cross_entropy(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return test_loss, correct / len(test_loader.dataset)
def test_pgd(args, model, device, test_loader, epsilon=0.063):
model.eval()
model.reset()
adversary = LinfPGDAttack(
model.forward_adv, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=epsilon,
nb_iter=args.nb_iter, eps_iter=args.eps_iter, rand_init=True, clip_min=-1.0, clip_max=1.0, targeted=False)
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = data.to(device), target.to(device)
model.reset()
with ctx_noparamgrad_and_eval(model):
adv_images = adversary.perturb(data, target)
output = model.run_cycles(adv_images)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
acc = correct / len(test_loader.dataset)
print('PGD attack Acc {:.3f}'.format(100. * acc))
return acc
def main():
parser = argparse.ArgumentParser(description='CNNF training')
# optimization parameters
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128 for CIFAR, 64 for MNIST)')
parser.add_argument('--test-batch-size', type=int, default=128, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.15, metavar='LR',
help='learning rate (default: 0.05 for SGD)')
parser.add_argument('--power', type=float, default=0.9, metavar='LR',
help='learning rate for poly scheduling')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--wd', default=5e-4, type=float,
help='weight decay (default: 5e-4)')
parser.add_argument('--grad-clip', action='store_true', default=False,
help='enable gradient clipping')
parser.add_argument('--dataset', choices=['cifar10', 'fashion'],
default='fashion', help='the dataset for training the model')
parser.add_argument('--schedule', choices=['poly', 'cos', 'stepLR'],
default='poly', help='scheduling for learning rate')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1, metavar='N',
help='how many batches to wait before logging training status')
# adversarial training parameters
parser.add_argument('--eps', type=float, default=0.063,
help='Perturbation magnitude for adv training')
parser.add_argument('--eps-iter', type=float, default=0.02,
help='attack step size')
parser.add_argument('--nb_iter', type=int, default=7,
help='number of steps in pgd attack')
parser.add_argument('--clean', choices=['no', 'supclean'],
default='no', help='whether to use clean data in adv training')
# hyper-parameters
parser.add_argument('--mse-parameter', type=float, default=1.0,
help='weight of the reconstruction loss')
parser.add_argument('--clean-parameter', type=float, default=1.0,
help='weight of the clean Xentropy loss')
parser.add_argument('--res-parameter', type=float, default=0.1,
help='step size for residuals')
# model parameters
parser.add_argument('--layers', default=40, type=int, help='total number of layers for WRN')
parser.add_argument('--widen-factor', default=2, type=int, help='Widen factor for WRN')
parser.add_argument('--droprate', default=0.0, type=float, help='Dropout probability')
parser.add_argument('--ind', type=int, default=2,
help='index of the intermediate layer to reconstruct to')
parser.add_argument('--max-cycles', type=int, default=2,
help='the maximum cycles that the CNN-F uses')
parser.add_argument('--save-model', default="model", # None
help='Name for Saving the current Model')
parser.add_argument('--model-dir', default="runs", # None
help='Directory for Saving the current Model')
args = parser.parse_args()
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
seed_torch(args.seed)
Tensor_writer = SummaryWriter(os.path.join(args.model_dir, args.save_model))
train_transform_cifar = transforms.Compose(
[transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize([0.5] * 3, [0.5] * 3)])
test_transform_cifar = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize([0.5] * 3, [0.5] * 3)])
transform_mnist = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# Load datasets and architecture
if args.dataset == 'fashion':
train_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('data', train=True, download=True,
transform=transform_mnist),
batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('data', train=False, transform=transform_mnist),
batch_size=args.test_batch_size, shuffle=True, drop_last=True)
num_classes = 10
model = CNNF(num_classes, ind=args.ind, cycles=args.max_cycles, res_param=args.res_parameter).to(device)
elif args.dataset == 'cifar10':
train_data = datasets.CIFAR10(
'data', train=True, transform=train_transform_cifar, download=True)
test_data = datasets.CIFAR10(
'data', train=False, transform=test_transform_cifar, download=True)
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
shuffle=True, num_workers=4, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
test_data, batch_size=args.test_batch_size,
shuffle=True, num_workers=4, pin_memory=True)
num_classes = 10
model = WideResNet(args.layers, 10, args.widen_factor, args.droprate, args.ind, args.max_cycles, args.res_parameter).to(device)
optimizer = torch.optim.SGD(
model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.wd)
if(args.schedule == 'cos'):
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=lambda step: get_lr(step, args.epochs * len(train_loader), 1.0, 1e-5))
elif(args.schedule == 'stepLR'):
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)
else:
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=lambda step: lr_poly(1.0, step, args.epochs * len(train_loader), args.power))
# Begin training
best_acc = 0
for epoch in range(args.epochs):
train_loss, train_acc = train(args, model, device, train_loader, optimizer, scheduler, epoch,
cycles=args.max_cycles, mse_parameter=args.mse_parameter, clean_parameter=args.clean_parameter, clean=args.clean)
test_loss, test_acc = test(args, model, device, test_loader, cycles=args.max_cycles, epoch=epoch)
Tensor_writer.add_scalars('loss', {'train': train_loss}, epoch)
Tensor_writer.add_scalars('acc', {'train': train_acc}, epoch)
Tensor_writer.add_scalars('loss', {'test': test_loss}, epoch)
Tensor_writer.add_scalars('acc', {'test': test_acc}, epoch)
# Save the model with the best accuracy
if test_acc > best_acc and args.save_model is not None:
best_acc = test_acc
experiment_fn = args.save_model
torch.save(model.state_dict(),
args.model_dir + "/{}-best.pt".format(experiment_fn))
if ((epoch+1)%50)==0 and args.save_model is not None:
experiment_fn = args.save_model
torch.save(model.state_dict(),
args.model_dir + "/{}-epoch{}.pt".format(experiment_fn,epoch))
pgd_acc = test_pgd(args, model, device, test_loader, epsilon=args.eps)
Tensor_writer.add_scalars('pgd_acc', {'test': pgd_acc}, epoch)
# Save final model
if args.save_model is not None:
experiment_fn = args.save_model
torch.save(model.state_dict(),
args.model_dir + "/{}.pt".format(experiment_fn))
if __name__ == '__main__':
main()
|
[
"torch.optim.lr_scheduler.StepLR",
"argparse.ArgumentParser",
"torch.cat",
"advertorch.context.ctx_noparamgrad_and_eval",
"torchvision.datasets.CIFAR10",
"torch.device",
"torchvision.transforms.Normalize",
"torch.no_grad",
"os.path.join",
"cnnf.model_mnist.CNNF",
"torch.utils.data.DataLoader",
"os.path.exists",
"torchvision.transforms.RandomHorizontalFlip",
"torch.nn.functional.mse_loss",
"torch.nn.functional.cross_entropy",
"cnnf.model_cifar.WideResNet",
"torch.cuda.is_available",
"torchvision.transforms.RandomCrop",
"os.makedirs",
"torchvision.datasets.FashionMNIST",
"torch.nn.CrossEntropyLoss",
"torchvision.transforms.ToTensor"
] |
[((9922, 9974), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CNNF training"""'}), "(description='CNNF training')\n", (9945, 9974), False, 'import argparse\n'), ((13812, 13855), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (13824, 13855), False, 'import torch\n'), ((1437, 1471), 'torch.cat', 'torch.cat', (['(images, adv_images)', '(0)'], {}), '((images, adv_images), 0)\n', (1446, 1471), False, 'import torch\n'), ((7814, 7829), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7827, 7829), False, 'import torch\n'), ((13668, 13698), 'os.path.exists', 'os.path.exists', (['args.model_dir'], {}), '(args.model_dir)\n', (13682, 13698), False, 'import os\n'), ((13708, 13735), 'os.makedirs', 'os.makedirs', (['args.model_dir'], {}), '(args.model_dir)\n', (13719, 13735), False, 'import os\n'), ((13773, 13798), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13796, 13798), False, 'import torch\n'), ((13990, 14035), 'os.path.join', 'os.path.join', (['args.model_dir', 'args.save_model'], {}), '(args.model_dir, args.save_model)\n', (14002, 14035), False, 'import os\n'), ((929, 965), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (948, 965), True, 'import torch.nn as nn\n'), ((1322, 1353), 'advertorch.context.ctx_noparamgrad_and_eval', 'ctx_noparamgrad_and_eval', (['model'], {}), '(model)\n', (1346, 1353), False, 'from advertorch.context import ctx_noparamgrad_and_eval\n'), ((5863, 5895), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'targets'], {}), '(logits, targets)\n', (5878, 5895), True, 'import torch.nn.functional as F\n'), ((9193, 9229), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (9212, 9229), True, 'import torch.nn as nn\n'), ((9530, 9561), 'advertorch.context.ctx_noparamgrad_and_eval', 'ctx_noparamgrad_and_eval', (['model'], {}), '(model)\n', (9554, 9561), False, 'from advertorch.context import ctx_noparamgrad_and_eval\n'), ((14093, 14126), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (14124, 14126), False, 'from torchvision import datasets, transforms\n'), ((14135, 14171), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (14156, 14171), False, 'from torchvision import datasets, transforms\n'), ((14180, 14201), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (14199, 14201), False, 'from torchvision import datasets, transforms\n'), ((14210, 14252), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['([0.5] * 3)', '([0.5] * 3)'], {}), '([0.5] * 3, [0.5] * 3)\n', (14230, 14252), False, 'from torchvision import datasets, transforms\n'), ((14310, 14331), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (14329, 14331), False, 'from torchvision import datasets, transforms\n'), ((14340, 14382), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['([0.5] * 3)', '([0.5] * 3)'], {}), '([0.5] * 3, [0.5] * 3)\n', (14360, 14382), False, 'from torchvision import datasets, transforms\n'), ((14435, 14456), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (14454, 14456), False, 'from torchvision import datasets, transforms\n'), ((14465, 14501), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5,)', '(0.5,)'], {}), '((0.5,), (0.5,))\n', (14485, 14501), False, 'from torchvision import datasets, transforms\n'), ((14640, 14728), 'torchvision.datasets.FashionMNIST', 'datasets.FashionMNIST', (['"""data"""'], {'train': '(True)', 'download': '(True)', 'transform': 'transform_mnist'}), "('data', train=True, download=True, transform=\n transform_mnist)\n", (14661, 14728), False, 'from torchvision import datasets, transforms\n'), ((14885, 14954), 'torchvision.datasets.FashionMNIST', 'datasets.FashionMNIST', (['"""data"""'], {'train': '(False)', 'transform': 'transform_mnist'}), "('data', train=False, transform=transform_mnist)\n", (14906, 14954), False, 'from torchvision import datasets, transforms\n'), ((15227, 15315), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""data"""'], {'train': '(True)', 'transform': 'train_transform_cifar', 'download': '(True)'}), "('data', train=True, transform=train_transform_cifar,\n download=True)\n", (15243, 15315), False, 'from torchvision import datasets, transforms\n'), ((15345, 15433), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""data"""'], {'train': '(False)', 'transform': 'test_transform_cifar', 'download': '(True)'}), "('data', train=False, transform=test_transform_cifar,\n download=True)\n", (15361, 15433), False, 'from torchvision import datasets, transforms\n'), ((15466, 15584), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(4)', 'pin_memory': '(True)'}), '(train_data, batch_size=args.batch_size, shuffle\n =True, num_workers=4, pin_memory=True)\n', (15493, 15584), False, 'import torch\n'), ((15623, 15744), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': 'args.test_batch_size', 'shuffle': '(True)', 'num_workers': '(4)', 'pin_memory': '(True)'}), '(test_data, batch_size=args.test_batch_size,\n shuffle=True, num_workers=4, pin_memory=True)\n', (15650, 15744), False, 'import torch\n'), ((16319, 16386), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(30)', 'gamma': '(0.1)'}), '(optimizer, step_size=30, gamma=0.1)\n', (16350, 16386), False, 'import torch\n'), ((2512, 2548), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits_adv', 'targets'], {}), '(logits_adv, targets)\n', (2527, 2548), True, 'import torch.nn.functional as F\n'), ((6899, 6931), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'targets'], {}), '(logits, targets)\n', (6914, 6931), True, 'import torch.nn.functional as F\n'), ((15072, 15162), 'cnnf.model_mnist.CNNF', 'CNNF', (['num_classes'], {'ind': 'args.ind', 'cycles': 'args.max_cycles', 'res_param': 'args.res_parameter'}), '(num_classes, ind=args.ind, cycles=args.max_cycles, res_param=args.\n res_parameter)\n', (15076, 15162), False, 'from cnnf.model_mnist import CNNF\n'), ((2424, 2460), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits_adv', 'targets'], {}), '(logits_adv, targets)\n', (2439, 2460), True, 'import torch.nn.functional as F\n'), ((4236, 4272), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits_adv', 'targets'], {}), '(logits_adv, targets)\n', (4251, 4272), True, 'import torch.nn.functional as F\n'), ((8561, 8609), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (8576, 8609), True, 'import torch.nn.functional as F\n'), ((15803, 15915), 'cnnf.model_cifar.WideResNet', 'WideResNet', (['args.layers', '(10)', 'args.widen_factor', 'args.droprate', 'args.ind', 'args.max_cycles', 'args.res_parameter'], {}), '(args.layers, 10, args.widen_factor, args.droprate, args.ind,\n args.max_cycles, args.res_parameter)\n', (15813, 15915), False, 'from cnnf.model_cifar import WideResNet\n'), ((2383, 2421), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits_clean', 'targets'], {}), '(logits_clean, targets)\n', (2398, 2421), True, 'import torch.nn.functional as F\n'), ((4139, 4175), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits_adv', 'targets'], {}), '(logits_adv, targets)\n', (4154, 4175), True, 'import torch.nn.functional as F\n'), ((3458, 3500), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['recon_block3_adv', 'block3_clean'], {}), '(recon_block3_adv, block3_clean)\n', (3468, 3500), True, 'import torch.nn.functional as F\n'), ((4098, 4136), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits_clean', 'targets'], {}), '(logits_clean, targets)\n', (4113, 4136), True, 'import torch.nn.functional as F\n'), ((6427, 6459), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['block3_recon', 'block3'], {}), '(block3_recon, block3)\n', (6437, 6459), True, 'import torch.nn.functional as F\n'), ((3413, 3455), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['recon_block2_adv', 'block2_clean'], {}), '(recon_block2_adv, block2_clean)\n', (3423, 3455), True, 'import torch.nn.functional as F\n'), ((3685, 3727), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['recon_block2_adv', 'block2_clean'], {}), '(recon_block2_adv, block2_clean)\n', (3695, 3727), True, 'import torch.nn.functional as F\n'), ((6392, 6424), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['block2_recon', 'block2'], {}), '(block2_recon, block2)\n', (6402, 6424), True, 'import torch.nn.functional as F\n'), ((6630, 6662), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['block2_recon', 'block2'], {}), '(block2_recon, block2)\n', (6640, 6662), True, 'import torch.nn.functional as F\n'), ((3330, 3365), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['recon_adv', 'orig_feature'], {}), '(recon_adv, orig_feature)\n', (3340, 3365), True, 'import torch.nn.functional as F\n'), ((3368, 3410), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['recon_block1_adv', 'block1_clean'], {}), '(recon_block1_adv, block1_clean)\n', (3378, 3410), True, 'import torch.nn.functional as F\n'), ((3602, 3637), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['recon_adv', 'orig_feature'], {}), '(recon_adv, orig_feature)\n', (3612, 3637), True, 'import torch.nn.functional as F\n'), ((3640, 3682), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['recon_block1_adv', 'block1_clean'], {}), '(recon_block1_adv, block1_clean)\n', (3650, 3682), True, 'import torch.nn.functional as F\n'), ((6323, 6354), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['recon', 'orig_feature'], {}), '(recon, orig_feature)\n', (6333, 6354), True, 'import torch.nn.functional as F\n'), ((6357, 6389), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['block1_recon', 'block1'], {}), '(block1_recon, block1)\n', (6367, 6389), True, 'import torch.nn.functional as F\n'), ((6561, 6592), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['recon', 'orig_feature'], {}), '(recon, orig_feature)\n', (6571, 6592), True, 'import torch.nn.functional as F\n'), ((6595, 6627), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['block1_recon', 'block1'], {}), '(block1_recon, block1)\n', (6605, 6627), True, 'import torch.nn.functional as F\n')]
|
import torch
import random
import socket
import os
import numpy as np
def get_device(device):
assert device in (
'cpu', 'cuda'), 'device {} should be in (cpu, cuda)'.format(device)
if socket.gethostname() == 'gemini' or not torch.cuda.is_available():
device = 'cpu'
else:
device = 'cuda' if device == 'cuda' else "cpu"
return device
def seed_exp(seed, device='cuda'):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if device == 'cuda':
torch.cuda.manual_seed(seed)
torch.set_num_threads(1)
def update_arguments(model=None, dataset=None, collect=None, sim2real=None):
""" User provides the arguments in a user-friendly way.
This function takes care of converting them to the format used by the repo. """
from bc.settings import MODEL_LOGDIR, DATASET_LOGDIR
def update_model_args(model):
if model is None:
return None
# convert the input_type argument from a string to a tuple
if isinstance(model['input_type'], (tuple, list)):
return
input_type_str2list = {
'rgb': ('rgb', ),
'depth': ('depth', ),
'rgbd': ('depth', 'rgb')
}
assert model['input_type'] in input_type_str2list
model['input_type'] = input_type_str2list[model['input_type']]
# get the full paths using the user-speicified settings
model['model_dir'] = os.path.join(MODEL_LOGDIR, model['name'])
model.pop('name')
return model
def update_dataset_args(dataset):
if dataset is None:
return None
dataset['dataset_dir'] = os.path.join(DATASET_LOGDIR, dataset['name'])
dataset.pop('name')
signal_keys_updated = []
for signal_key in dataset['signal_keys']:
signal_keys_updated.append(('state', signal_key))
dataset['signal_keys'] = signal_keys_updated
return dataset
def update_collect_args(collect):
if collect is None:
return None
collect['collect_dir'] = os.path.join(DATASET_LOGDIR, collect['folder'])
collect.pop('folder')
return collect
def update_sim2real_args(sim2real):
if sim2real is None:
return None
sim2real['mcts_dir'] = os.path.join(MODEL_LOGDIR, sim2real['name'])
sim2real['trainset_dir'] = os.path.join(DATASET_LOGDIR, sim2real['trainset_name'])
sim2real['evalset_dir'] = os.path.join(DATASET_LOGDIR, sim2real['evalset_name'])
sim2real.pop('name')
return sim2real
model = update_model_args(model)
dataset = update_dataset_args(dataset)
collect = update_collect_args(collect)
sim2real = update_sim2real_args(sim2real)
return [args for args in (model, dataset, collect, sim2real) if args is not None]
|
[
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.manual_seed",
"socket.gethostname",
"torch.set_num_threads",
"random.seed",
"torch.cuda.is_available",
"os.path.join"
] |
[((416, 433), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (427, 433), False, 'import random\n'), ((438, 458), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (452, 458), True, 'import numpy as np\n'), ((463, 486), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (480, 486), False, 'import torch\n'), ((553, 577), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (574, 577), False, 'import torch\n'), ((520, 548), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (542, 548), False, 'import torch\n'), ((1453, 1494), 'os.path.join', 'os.path.join', (['MODEL_LOGDIR', "model['name']"], {}), "(MODEL_LOGDIR, model['name'])\n", (1465, 1494), False, 'import os\n'), ((1666, 1711), 'os.path.join', 'os.path.join', (['DATASET_LOGDIR', "dataset['name']"], {}), "(DATASET_LOGDIR, dataset['name'])\n", (1678, 1711), False, 'import os\n'), ((2085, 2132), 'os.path.join', 'os.path.join', (['DATASET_LOGDIR', "collect['folder']"], {}), "(DATASET_LOGDIR, collect['folder'])\n", (2097, 2132), False, 'import os\n'), ((2311, 2355), 'os.path.join', 'os.path.join', (['MODEL_LOGDIR', "sim2real['name']"], {}), "(MODEL_LOGDIR, sim2real['name'])\n", (2323, 2355), False, 'import os\n'), ((2391, 2446), 'os.path.join', 'os.path.join', (['DATASET_LOGDIR', "sim2real['trainset_name']"], {}), "(DATASET_LOGDIR, sim2real['trainset_name'])\n", (2403, 2446), False, 'import os\n'), ((2481, 2535), 'os.path.join', 'os.path.join', (['DATASET_LOGDIR', "sim2real['evalset_name']"], {}), "(DATASET_LOGDIR, sim2real['evalset_name'])\n", (2493, 2535), False, 'import os\n'), ((202, 222), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (220, 222), False, 'import socket\n'), ((242, 267), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (265, 267), False, 'import torch\n')]
|
import os
import tempfile
import pathlib
import pytest
from wallabag.config import Configs, Options, Sections
from xdg.BaseDirectory import xdg_config_home as XDG_CONFIG_HOME
class TestConfigs():
configs = None
def teardown_method(self, method):
os.close(self.fd)
os.remove(self.path)
def setup_method(self, method):
self.fd, self.path = tempfile.mkstemp()
with open(self.path, 'w') as f:
f.write('')
self.configs = Configs(self.path)
if method.__name__ in ['test_get_config', 'test_is_token_expired',
'test_set_config', 'test_set_config_new']:
self.configs.config.read_string("""
[api]
serverurl = https://server
[token]
expires = 1000
""")
elif method.__name__ == 'test_is_valid__true':
self.configs.config.read_string("""
[api]
serverurl = url
username = user
password = <PASSWORD>
[oauth2]
client = 100
secret = 100
""")
elif method.__name__ == 'test_get_path':
self.configs = Configs()
def test_get_path(self):
xdg_config = os.path.expanduser(XDG_CONFIG_HOME)
expected = f"{xdg_config}/wallabag-cli/config.ini"
actual = self.configs.get_path()
assert expected == str(actual)
def test_get_path_custom(self):
expected = pathlib.PurePath("custom/directory")
assert expected == self.configs.get_path(expected)
@pytest.mark.parametrize(
'condition',
[(Sections.TOKEN, Options.EXPIRES, '1000', 'get'),
(Sections.TOKEN, Options.ACCESS_TOKEN, None, 'get'),
(Sections.API, Options.SERVERURL, "https://server", 'get'),
(Sections.API, '', 0, 'getint'),
(Sections.API, '', None, 'get'),
('', '', None, 'get'),
(None, None, None, 'get'),
(None, None, 0, 'getint'),
(Sections.TOKEN, Options.EXPIRES, 1000, 'getint')])
def test_get_config(self, condition):
if condition[3] == 'get':
assert self.configs.get(condition[0], condition[1]) == condition[2]
elif condition[3] == 'getint':
assert self.configs.getint(
condition[0], condition[1]) == condition[2]
def test_is_token_expired(self):
assert self.configs.is_token_expired()
def test_is_token_expired_no_value(self):
assert self.configs.is_token_expired()
def test_is_valid__false(self):
assert not self.configs.is_valid()
def test_is_valid__true(self):
assert self.configs.is_valid()
def test_set_config(self):
self.configs.set(Sections.TOKEN, Options.EXPIRES, str(500))
assert self.configs.getint(Sections.TOKEN, Options.EXPIRES) == 500
def test_set_config_new(self):
self.configs.set(Sections.TOKEN, Options.ACCESS_TOKEN, 'abba')
assert self.configs.get(Sections.TOKEN, Options.ACCESS_TOKEN) == 'abba'
def test_load_or_create(self, monkeypatch):
self.save_called = False
def exists(path):
return False
def savemock(configs, path):
self.save_called = True
return True
monkeypatch.setattr(os.path, 'exists', exists)
monkeypatch.setattr(Configs, 'save', savemock)
self.configs.load_or_create()
assert self.save_called
def test_load_or_create_load(self, monkeypatch):
self.load_called = False
def exists(path):
return True
def loadmock(configs, path):
self.load_called = True
return True
monkeypatch.setattr(os.path, 'exists', exists)
monkeypatch.setattr(Configs, 'load', loadmock)
self.configs.load_or_create()
assert self.load_called
def test_load_or_create_value_error(self, monkeypatch):
def exists(path):
return False
def savemock(configs, path):
return False
monkeypatch.setattr(os.path, 'exists', exists)
monkeypatch.setattr(Configs, 'save', savemock)
with pytest.raises(ValueError, match=Configs.LOAD_ERROR):
self.configs.load_or_create()
@pytest.mark.parametrize(
'password',
['<PASSWORD>', 'password', '<PASSWORD>'])
def test_encryption(self, password):
self.configs.set(Sections.API, Options.PASSWORD, password)
encrypted = self.configs.config.get(Sections.API, Options.PASSWORD)
plain = self.configs.get(Sections.API, Options.PASSWORD)
assert encrypted != password
assert plain == password
|
[
"os.remove",
"tempfile.mkstemp",
"pytest.raises",
"os.close",
"pathlib.PurePath",
"wallabag.config.Configs",
"pytest.mark.parametrize",
"os.path.expanduser"
] |
[((1700, 2118), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""condition"""', "[(Sections.TOKEN, Options.EXPIRES, '1000', 'get'), (Sections.TOKEN, Options\n .ACCESS_TOKEN, None, 'get'), (Sections.API, Options.SERVERURL,\n 'https://server', 'get'), (Sections.API, '', 0, 'getint'), (Sections.\n API, '', None, 'get'), ('', '', None, 'get'), (None, None, None, 'get'),\n (None, None, 0, 'getint'), (Sections.TOKEN, Options.EXPIRES, 1000,\n 'getint')]"], {}), "('condition', [(Sections.TOKEN, Options.EXPIRES,\n '1000', 'get'), (Sections.TOKEN, Options.ACCESS_TOKEN, None, 'get'), (\n Sections.API, Options.SERVERURL, 'https://server', 'get'), (Sections.\n API, '', 0, 'getint'), (Sections.API, '', None, 'get'), ('', '', None,\n 'get'), (None, None, None, 'get'), (None, None, 0, 'getint'), (Sections\n .TOKEN, Options.EXPIRES, 1000, 'getint')])\n", (1723, 2118), False, 'import pytest\n'), ((4450, 4527), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""password"""', "['<PASSWORD>', 'password', '<PASSWORD>']"], {}), "('password', ['<PASSWORD>', 'password', '<PASSWORD>'])\n", (4473, 4527), False, 'import pytest\n'), ((269, 286), 'os.close', 'os.close', (['self.fd'], {}), '(self.fd)\n', (277, 286), False, 'import os\n'), ((295, 315), 'os.remove', 'os.remove', (['self.path'], {}), '(self.path)\n', (304, 315), False, 'import os\n'), ((382, 400), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (398, 400), False, 'import tempfile\n'), ((488, 506), 'wallabag.config.Configs', 'Configs', (['self.path'], {}), '(self.path)\n', (495, 506), False, 'from wallabag.config import Configs, Options, Sections\n'), ((1365, 1400), 'os.path.expanduser', 'os.path.expanduser', (['XDG_CONFIG_HOME'], {}), '(XDG_CONFIG_HOME)\n', (1383, 1400), False, 'import os\n'), ((1597, 1633), 'pathlib.PurePath', 'pathlib.PurePath', (['"""custom/directory"""'], {}), "('custom/directory')\n", (1613, 1633), False, 'import pathlib\n'), ((4349, 4400), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'Configs.LOAD_ERROR'}), '(ValueError, match=Configs.LOAD_ERROR)\n', (4362, 4400), False, 'import pytest\n'), ((1304, 1313), 'wallabag.config.Configs', 'Configs', ([], {}), '()\n', (1311, 1313), False, 'from wallabag.config import Configs, Options, Sections\n')]
|
import torch
import pandas as pd
import numpy as np
from os import path
from torch.utils.data import Dataset, sampler
from scipy.ndimage.filters import gaussian_filter
class MRIDataset(Dataset):
"""Dataset of MRI organized in a CAPS folder."""
def __init__(self, img_dir, data_file, preprocessing='linear', transform=None):
"""
Args:
img_dir (string): Directory of all the images.
data_file (string): File name of the train/test split file.
preprocessing (string): Defines the path to the data in CAPS
transform (callable, optional): Optional transform to be applied on a sample.
"""
self.img_dir = img_dir
self.transform = transform
self.diagnosis_code = {'CN': 0, 'AD': 1, 'sMCI': 0, 'pMCI': 1, 'MCI': 1, 'unlabeled': -1}
self.data_path = preprocessing
# Check the format of the tsv file here
if isinstance(data_file, str):
self.df = pd.read_csv(data_file, sep='\t')
elif isinstance(data_file, pd.DataFrame):
self.df = data_file
else:
raise Exception('The argument datafile is not of correct type.')
if ('diagnosis' not in list(self.df.columns.values)) or ('session_id' not in list(self.df.columns.values)) or \
('participant_id' not in list(self.df.columns.values)):
raise Exception("the data file is not in the correct format."
"Columns should include ['participant_id', 'session_id', 'diagnosis']")
self.size = self[0]['image'].numpy().size
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
img_name = self.df.loc[idx, 'participant_id']
img_label = self.df.loc[idx, 'diagnosis']
sess_name = self.df.loc[idx, 'session_id']
# Not in BIDS but in CAPS
if self.data_path == "linear":
image_path = path.join(self.img_dir, 'subjects', img_name, sess_name,
't1', 'preprocessing_dl',
img_name + '_' + sess_name + '_space-MNI_res-1x1x1.pt')
elif self.data_path == "mni":
image_path = path.join(self.img_dir, 'subjects', img_name, sess_name,
't1', 'spm', 'segmentation', 'normalized_space',
img_name + '_' + sess_name + '_space-Ixi549Space_T1w.pt')
else:
raise NotImplementedError("The data path %s is not implemented" % self.data_path)
image = torch.load(image_path)
label = self.diagnosis_code[img_label]
if self.transform:
image = self.transform(image)
sample = {'image': image, 'label': label, 'participant_id': img_name, 'session_id': sess_name,
'image_path': image_path}
return sample
def session_restriction(self, session):
"""
Allows to generate a new MRIDataset using some specific sessions only (mostly used for evaluation of test)
:param session: (str) the session wanted. Must be 'all' or 'ses-MXX'
:return: (DataFrame) the dataset with the wanted sessions
"""
from copy import copy
data_output = copy(self)
if session == "all":
return data_output
else:
df_session = self.df[self.df.session_id == session]
df_session.reset_index(drop=True, inplace=True)
data_output.df = df_session
if len(data_output) == 0:
raise Exception("The session %s doesn't exist for any of the subjects in the test data" % session)
return data_output
class GaussianSmoothing(object):
def __init__(self, sigma):
self.sigma = sigma
def __call__(self, sample):
image = sample['image']
np.nan_to_num(image, copy=False)
smoothed_image = gaussian_filter(image, sigma=self.sigma)
sample['image'] = smoothed_image
return sample
class ToTensor(object):
"""Convert image type to Tensor and diagnosis to diagnosis code"""
def __call__(self, image):
np.nan_to_num(image, copy=False)
image = image.astype(float)
return torch.from_numpy(image[np.newaxis, :]).float()
class MinMaxNormalization(object):
"""Normalizes a tensor between 0 and 1"""
def __call__(self, image):
return (image - image.min()) / (image.max() - image.min())
def load_data(train_val_path, diagnoses_list, split, n_splits=None, baseline=True):
train_df = pd.DataFrame()
valid_df = pd.DataFrame()
if n_splits is None:
train_path = path.join(train_val_path, 'train')
valid_path = path.join(train_val_path, 'validation')
else:
train_path = path.join(train_val_path, 'train_splits-' + str(n_splits),
'split-' + str(split))
valid_path = path.join(train_val_path, 'validation_splits-' + str(n_splits),
'split-' + str(split))
print("Train", train_path)
print("Valid", valid_path)
for diagnosis in diagnoses_list:
if baseline:
train_diagnosis_path = path.join(train_path, diagnosis + '_baseline.tsv')
else:
train_diagnosis_path = path.join(train_path, diagnosis + '.tsv')
valid_diagnosis_path = path.join(valid_path, diagnosis + '_baseline.tsv')
train_diagnosis_df = pd.read_csv(train_diagnosis_path, sep='\t')
valid_diagnosis_df = pd.read_csv(valid_diagnosis_path, sep='\t')
train_df = pd.concat([train_df, train_diagnosis_df])
valid_df = pd.concat([valid_df, valid_diagnosis_df])
train_df.reset_index(inplace=True, drop=True)
valid_df.reset_index(inplace=True, drop=True)
return train_df, valid_df
def load_data_test(test_path, diagnoses_list):
test_df = pd.DataFrame()
for diagnosis in diagnoses_list:
test_diagnosis_path = path.join(test_path, diagnosis + '_baseline.tsv')
test_diagnosis_df = pd.read_csv(test_diagnosis_path, sep='\t')
test_df = pd.concat([test_df, test_diagnosis_df])
test_df.reset_index(inplace=True, drop=True)
return test_df
|
[
"pandas.DataFrame",
"scipy.ndimage.filters.gaussian_filter",
"numpy.nan_to_num",
"pandas.read_csv",
"torch.load",
"copy.copy",
"os.path.join",
"pandas.concat",
"torch.from_numpy"
] |
[((4592, 4606), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4604, 4606), True, 'import pandas as pd\n'), ((4622, 4636), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4634, 4636), True, 'import pandas as pd\n'), ((5913, 5927), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5925, 5927), True, 'import pandas as pd\n'), ((2569, 2591), 'torch.load', 'torch.load', (['image_path'], {}), '(image_path)\n', (2579, 2591), False, 'import torch\n'), ((3276, 3286), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (3280, 3286), False, 'from copy import copy\n'), ((3876, 3908), 'numpy.nan_to_num', 'np.nan_to_num', (['image'], {'copy': '(False)'}), '(image, copy=False)\n', (3889, 3908), True, 'import numpy as np\n'), ((3934, 3974), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['image'], {'sigma': 'self.sigma'}), '(image, sigma=self.sigma)\n', (3949, 3974), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((4176, 4208), 'numpy.nan_to_num', 'np.nan_to_num', (['image'], {'copy': '(False)'}), '(image, copy=False)\n', (4189, 4208), True, 'import numpy as np\n'), ((4684, 4718), 'os.path.join', 'path.join', (['train_val_path', '"""train"""'], {}), "(train_val_path, 'train')\n", (4693, 4718), False, 'from os import path\n'), ((4740, 4779), 'os.path.join', 'path.join', (['train_val_path', '"""validation"""'], {}), "(train_val_path, 'validation')\n", (4749, 4779), False, 'from os import path\n'), ((5396, 5446), 'os.path.join', 'path.join', (['valid_path', "(diagnosis + '_baseline.tsv')"], {}), "(valid_path, diagnosis + '_baseline.tsv')\n", (5405, 5446), False, 'from os import path\n'), ((5477, 5520), 'pandas.read_csv', 'pd.read_csv', (['train_diagnosis_path'], {'sep': '"""\t"""'}), "(train_diagnosis_path, sep='\\t')\n", (5488, 5520), True, 'import pandas as pd\n'), ((5550, 5593), 'pandas.read_csv', 'pd.read_csv', (['valid_diagnosis_path'], {'sep': '"""\t"""'}), "(valid_diagnosis_path, sep='\\t')\n", (5561, 5593), True, 'import pandas as pd\n'), ((5614, 5655), 'pandas.concat', 'pd.concat', (['[train_df, train_diagnosis_df]'], {}), '([train_df, train_diagnosis_df])\n', (5623, 5655), True, 'import pandas as pd\n'), ((5675, 5716), 'pandas.concat', 'pd.concat', (['[valid_df, valid_diagnosis_df]'], {}), '([valid_df, valid_diagnosis_df])\n', (5684, 5716), True, 'import pandas as pd\n'), ((5997, 6046), 'os.path.join', 'path.join', (['test_path', "(diagnosis + '_baseline.tsv')"], {}), "(test_path, diagnosis + '_baseline.tsv')\n", (6006, 6046), False, 'from os import path\n'), ((6075, 6117), 'pandas.read_csv', 'pd.read_csv', (['test_diagnosis_path'], {'sep': '"""\t"""'}), "(test_diagnosis_path, sep='\\t')\n", (6086, 6117), True, 'import pandas as pd\n'), ((6136, 6175), 'pandas.concat', 'pd.concat', (['[test_df, test_diagnosis_df]'], {}), '([test_df, test_diagnosis_df])\n', (6145, 6175), True, 'import pandas as pd\n'), ((981, 1013), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {'sep': '"""\t"""'}), "(data_file, sep='\\t')\n", (992, 1013), True, 'import pandas as pd\n'), ((1938, 2080), 'os.path.join', 'path.join', (['self.img_dir', '"""subjects"""', 'img_name', 'sess_name', '"""t1"""', '"""preprocessing_dl"""', "(img_name + '_' + sess_name + '_space-MNI_res-1x1x1.pt')"], {}), "(self.img_dir, 'subjects', img_name, sess_name, 't1',\n 'preprocessing_dl', img_name + '_' + sess_name + '_space-MNI_res-1x1x1.pt')\n", (1947, 2080), False, 'from os import path\n'), ((5222, 5272), 'os.path.join', 'path.join', (['train_path', "(diagnosis + '_baseline.tsv')"], {}), "(train_path, diagnosis + '_baseline.tsv')\n", (5231, 5272), False, 'from os import path\n'), ((5322, 5363), 'os.path.join', 'path.join', (['train_path', "(diagnosis + '.tsv')"], {}), "(train_path, diagnosis + '.tsv')\n", (5331, 5363), False, 'from os import path\n'), ((2210, 2381), 'os.path.join', 'path.join', (['self.img_dir', '"""subjects"""', 'img_name', 'sess_name', '"""t1"""', '"""spm"""', '"""segmentation"""', '"""normalized_space"""', "(img_name + '_' + sess_name + '_space-Ixi549Space_T1w.pt')"], {}), "(self.img_dir, 'subjects', img_name, sess_name, 't1', 'spm',\n 'segmentation', 'normalized_space', img_name + '_' + sess_name +\n '_space-Ixi549Space_T1w.pt')\n", (2219, 2381), False, 'from os import path\n'), ((4261, 4299), 'torch.from_numpy', 'torch.from_numpy', (['image[np.newaxis, :]'], {}), '(image[np.newaxis, :])\n', (4277, 4299), False, 'import torch\n')]
|
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
from typing import List, Callable, Tuple
import cirq
import numpy as np
import pyqsp.phases
import scipy.linalg
from plot_qsp import qsp_plot
from qsp import to_r_z_from_wx
@dataclasses.dataclass
class FixedPointAmplitudeAmplification:
"""Amplitude amplification inputs.
Based on the inputs for an amplitude amplification problem, it
creates the fixed point amplitude amplification circuit and after the
proper projection (depending on whether the number of
coefficients is even or odd) it returns the amplitude.
On a real quantum computer, we'll need to provide all of u, u_inv, a0, b0,
rotA0=e^(i * phi * (2|a0><a0|-I)) and rotB0=e^(i * phi * (2|b0><b0|-I))
for the algorithm as black boxes, but in this simulation we can just
calculate them from u, a0, b0. Finally, coeffs determine the polynomial
we'd like to convert <a0|u|b0> with.
Attributes:
u: the unitary to amplify
a0: the goal state
b0: the starting state
coeffs: the coefficients in QSP(R, <0|.|0>) convention
"""
u: cirq.Gate
a0: cirq.STATE_VECTOR_LIKE
b0: cirq.STATE_VECTOR_LIKE
coeffs: List[float]
u_inv: cirq.Gate = None
rot_a0: Callable[[float], cirq.Gate] = None
rot_b0: Callable[[float], cirq.Gate] = None
num_qubits: int = 2
_amplitude_projector: Callable[[np.ndarray], np.complex] = None
def __post_init__(self):
self.u_inv = cirq.inverse(self.u)
self.rot_a0 = self._rot_state("a0", self.a0)
self.rot_b0 = self._rot_state("b0", self.b0)
self.num_qubits = cirq.num_qubits(self.u)
self._amplitude_projector = lambda uni: (
self.a0 @ uni @ self.b0
if len(self.coeffs) % 2 == 1
else self.b0 @ uni @ self.b0
)
def _rot_state(
self, name: str, state_vector: np.ndarray
) -> Callable[[float], cirq.Gate]:
"""Rotates the state around a given state."""
return lambda phi: cirq.MatrixGate(
name=f"{name}[{phi:.2f}]",
matrix=scipy.linalg.expm(
1j * phi * (2 * np.outer(state_vector, state_vector) - np.identity(4))
),
)
def get_circuit(self) -> cirq.Circuit:
qs = cirq.LineQubit.range(self.num_qubits)
# reverse operation order for circuits
# we mandatorily start with U, as this is the U|B0> in Eq (13)
if len(self.coeffs) == 0:
return cirq.Circuit(self.u(*qs))
ops = []
i = 1
for phi in self.coeffs[::-1]:
if i % 2 == 1:
ops += [self.u(*qs)]
ops += [self.rot_a0(phi)(*qs)]
else:
ops += [self.u_inv(*qs)]
ops += [self.rot_b0(phi)(*qs)]
i += 1
return cirq.Circuit(ops)
def run(self) -> float:
return self._amplitude_projector(cirq.unitary(self.get_circuit()))
def __str__(self):
return f"""FixedPointAmplification:
num qubits: {self.num_qubits},
u: {self.u},
a0: {self.a0},
b0: {self.b0},
{self.get_circuit()}"""
class Experiment:
def __init__(
self,
coeffs: List[float],
n_points: int,
basis_a: int = 2,
basis_b: int = 3,
n_qubits: int = 2,
):
self.coeffs = coeffs
self.basis_a = basis_a
self.basis_b = basis_b
self.n_points = n_points
self.a_s = []
self.fa_s = []
self.a0 = cirq.to_valid_state_vector(basis_a, n_qubits)
self.b0 = cirq.to_valid_state_vector(basis_b, n_qubits)
def _get_u_gate_and_initial_amplitude(
self, p: float, sign: int
) -> Tuple[float, cirq.Gate]:
"""Creates a CNOT-like unitary with a real amplitude."""
u = sign * scipy.linalg.expm(1j * p * cirq.unitary(cirq.CX))
a = u[self.basis_a][self.basis_b]
new_a = a * sign * np.conj(a) / np.abs(a)
return new_a, cirq.MatrixGate(
name="u", matrix=sign * np.conj(a) / np.abs(a) * u
)
def _run_half(self, sign: int):
for p in np.linspace(1e-8, np.pi, self.n_points):
a, u = self._get_u_gate_and_initial_amplitude(p, sign)
fp_amp = self._get_fpamp(u)
self.a_s.append(a)
self.fa_s.append(fp_amp.run())
def _get_fpamp(self, u):
return FixedPointAmplitudeAmplification(u, self.a0, self.b0, self.coeffs)
def run(self) -> Tuple[List[float], List[float]]:
_, sample_fpamp = self._get_u_gate_and_initial_amplitude(0.123, -1)
print(self._get_fpamp(sample_fpamp))
self._run_half(-1)
self._run_half(1)
return self.a_s, self.fa_s
def experiment(
coeffs,
npoints=50,
title=None,
filename="fp_amp.png",
target_fn=None,
target_fn_label: str = None,
):
"""The main function to qsp the two cases presented in the paper."""
title = f"Fixed amplitude amplification for {title}"
a_s, f_as = Experiment(coeffs, npoints).run()
qsp_plot(np.real(a_s), f_as, filename, target_fn, target_fn_label, title)
if __name__ == "__main__":
experiment(
title="$T_1$",
coeffs=to_r_z_from_wx([0, 0]),
npoints=10,
filename="fp_amp_t1.png",
target_fn=lambda a_s: a_s,
target_fn_label="$T_1(a)=a$",
)
experiment(
title="$T_2$",
coeffs=to_r_z_from_wx([0, 0, 0]),
npoints=100,
filename="fp_amp_t2.png",
target_fn=lambda a_s: 2 * a_s ** 2 - 1,
target_fn_label="$T_2(a)=2a^2-1$",
)
experiment(
title="$T_3$",
coeffs=to_r_z_from_wx([0, 0, 0, 0]),
npoints=100,
filename="fp_amp_t3.png",
target_fn=lambda a_s: 4 * a_s ** 3 - 3 * a_s,
target_fn_label="$T_3(a)=4 a^3-3 a$",
)
experiment(
title="$T_4$",
coeffs=to_r_z_from_wx([0, 0, 0, 0, 0]),
npoints=100,
filename="fp_amp_t4.png",
target_fn=lambda a_s: 8 * a_s ** 4 - 8 * a_s ** 2 + 1,
target_fn_label="$T_4(a)=8 a^4-8 a^2 +1$",
)
experiment(
title="$T_5$",
coeffs=to_r_z_from_wx([0, 0, 0, 0, 0, 0]),
npoints=100,
filename="fp_amp_t5.png",
target_fn=lambda a_s: 16 * a_s ** 5 - 20 * a_s ** 3 + 5 * a_s,
target_fn_label="$T_5(a)=16 a^5-20 a^3 + 5 a$",
)
# these are the same as in the Martyn et al paper
wx_phis = pyqsp.phases.FPSearch().generate(10, 0.5)
experiment(
title="FPSearch(10,0.5)",
coeffs=to_r_z_from_wx(wx_phis),
npoints=100,
filename="fp_amp_fpsearch_10_0.5.png",
)
|
[
"numpy.conj",
"numpy.outer",
"numpy.abs",
"cirq.inverse",
"cirq.unitary",
"cirq.to_valid_state_vector",
"numpy.identity",
"cirq.num_qubits",
"qsp.to_r_z_from_wx",
"cirq.Circuit",
"numpy.linspace",
"numpy.real",
"cirq.LineQubit.range"
] |
[((2056, 2076), 'cirq.inverse', 'cirq.inverse', (['self.u'], {}), '(self.u)\n', (2068, 2076), False, 'import cirq\n'), ((2209, 2232), 'cirq.num_qubits', 'cirq.num_qubits', (['self.u'], {}), '(self.u)\n', (2224, 2232), False, 'import cirq\n'), ((2865, 2902), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['self.num_qubits'], {}), '(self.num_qubits)\n', (2885, 2902), False, 'import cirq\n'), ((3424, 3441), 'cirq.Circuit', 'cirq.Circuit', (['ops'], {}), '(ops)\n', (3436, 3441), False, 'import cirq\n'), ((4102, 4147), 'cirq.to_valid_state_vector', 'cirq.to_valid_state_vector', (['basis_a', 'n_qubits'], {}), '(basis_a, n_qubits)\n', (4128, 4147), False, 'import cirq\n'), ((4166, 4211), 'cirq.to_valid_state_vector', 'cirq.to_valid_state_vector', (['basis_b', 'n_qubits'], {}), '(basis_b, n_qubits)\n', (4192, 4211), False, 'import cirq\n'), ((4716, 4756), 'numpy.linspace', 'np.linspace', (['(1e-08)', 'np.pi', 'self.n_points'], {}), '(1e-08, np.pi, self.n_points)\n', (4727, 4756), True, 'import numpy as np\n'), ((5655, 5667), 'numpy.real', 'np.real', (['a_s'], {}), '(a_s)\n', (5662, 5667), True, 'import numpy as np\n'), ((4540, 4549), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (4546, 4549), True, 'import numpy as np\n'), ((5803, 5825), 'qsp.to_r_z_from_wx', 'to_r_z_from_wx', (['[0, 0]'], {}), '([0, 0])\n', (5817, 5825), False, 'from qsp import to_r_z_from_wx\n'), ((6014, 6039), 'qsp.to_r_z_from_wx', 'to_r_z_from_wx', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (6028, 6039), False, 'from qsp import to_r_z_from_wx\n'), ((6247, 6275), 'qsp.to_r_z_from_wx', 'to_r_z_from_wx', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (6261, 6275), False, 'from qsp import to_r_z_from_wx\n'), ((6492, 6523), 'qsp.to_r_z_from_wx', 'to_r_z_from_wx', (['[0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0])\n', (6506, 6523), False, 'from qsp import to_r_z_from_wx\n'), ((6754, 6788), 'qsp.to_r_z_from_wx', 'to_r_z_from_wx', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (6768, 6788), False, 'from qsp import to_r_z_from_wx\n'), ((7154, 7177), 'qsp.to_r_z_from_wx', 'to_r_z_from_wx', (['wx_phis'], {}), '(wx_phis)\n', (7168, 7177), False, 'from qsp import to_r_z_from_wx\n'), ((4527, 4537), 'numpy.conj', 'np.conj', (['a'], {}), '(a)\n', (4534, 4537), True, 'import numpy as np\n'), ((4435, 4456), 'cirq.unitary', 'cirq.unitary', (['cirq.CX'], {}), '(cirq.CX)\n', (4447, 4456), False, 'import cirq\n'), ((4638, 4647), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (4644, 4647), True, 'import numpy as np\n'), ((2767, 2781), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (2778, 2781), True, 'import numpy as np\n'), ((4625, 4635), 'numpy.conj', 'np.conj', (['a'], {}), '(a)\n', (4632, 4635), True, 'import numpy as np\n'), ((2728, 2764), 'numpy.outer', 'np.outer', (['state_vector', 'state_vector'], {}), '(state_vector, state_vector)\n', (2736, 2764), True, 'import numpy as np\n')]
|
import aiohttp
from plugin_system import Plugin
plugin = Plugin("Скриншот любого сайта",
usage=["скрин [адрес сайта] - сделать скриншот сайта [адрес сайта]"])
# Желательно первой командой указывать основную (она будет в списке команд)
@plugin.on_command('скрин')
async def screen(msg, args):
if not args:
return msg.answer('Вы не указали сайт!')
async with aiohttp.ClientSession() as sess:
async with sess.get("http://mini.s-shot.ru/1024x768/1024/png/?" + args.pop()) as resp:
result = await msg.vk.upload_photo(await resp.read())
return await msg.answer('Держи', attachment=str(result))
|
[
"aiohttp.ClientSession",
"plugin_system.Plugin"
] |
[((59, 165), 'plugin_system.Plugin', 'Plugin', (['"""Скриншот любого сайта"""'], {'usage': "['скрин [адрес сайта] - сделать скриншот сайта [адрес сайта]']"}), "('Скриншот любого сайта', usage=[\n 'скрин [адрес сайта] - сделать скриншот сайта [адрес сайта]'])\n", (65, 165), False, 'from plugin_system import Plugin\n'), ((394, 417), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (415, 417), False, 'import aiohttp\n')]
|
import pytest
from hypothesis import given
from lz import right
from lz.functional import curry
from tests import strategies
from tests.hints import (FunctionCall,
PartitionedFunctionCall)
@given(strategies.partitioned_transparent_functions_calls)
def test_basic(partitioned_function_call: PartitionedFunctionCall) -> None:
(function,
(first_args_part, second_args_part),
(first_kwargs_part, second_kwargs_part)) = partitioned_function_call
applied = right.applier(function,
*second_args_part,
**first_kwargs_part)
result = curry(applied)
assert (result(*first_args_part, **second_kwargs_part)
== function(*first_args_part, *second_args_part,
**first_kwargs_part, **second_kwargs_part))
@given(strategies.non_variadic_transparent_functions_calls_with_invalid_args)
def test_invalid_args(function_call: FunctionCall) -> None:
function, invalid_args, kwargs = function_call
applied = right.applier(function, *invalid_args, **kwargs)
with pytest.raises(TypeError):
curry(applied)
@given(strategies.non_variadic_transparent_functions_calls_with_invalid_kwargs)
def test_invalid_kwargs(function_call: FunctionCall) -> None:
function, args, invalid_kwargs = function_call
applied = right.applier(function, *args, **invalid_kwargs)
with pytest.raises(TypeError):
curry(applied)
|
[
"lz.functional.curry",
"pytest.raises",
"hypothesis.given",
"lz.right.applier"
] |
[((218, 275), 'hypothesis.given', 'given', (['strategies.partitioned_transparent_functions_calls'], {}), '(strategies.partitioned_transparent_functions_calls)\n', (223, 275), False, 'from hypothesis import given\n'), ((838, 914), 'hypothesis.given', 'given', (['strategies.non_variadic_transparent_functions_calls_with_invalid_args'], {}), '(strategies.non_variadic_transparent_functions_calls_with_invalid_args)\n', (843, 914), False, 'from hypothesis import given\n'), ((1151, 1229), 'hypothesis.given', 'given', (['strategies.non_variadic_transparent_functions_calls_with_invalid_kwargs'], {}), '(strategies.non_variadic_transparent_functions_calls_with_invalid_kwargs)\n', (1156, 1229), False, 'from hypothesis import given\n'), ((497, 560), 'lz.right.applier', 'right.applier', (['function', '*second_args_part'], {}), '(function, *second_args_part, **first_kwargs_part)\n', (510, 560), False, 'from lz import right\n'), ((631, 645), 'lz.functional.curry', 'curry', (['applied'], {}), '(applied)\n', (636, 645), False, 'from lz.functional import curry\n'), ((1040, 1088), 'lz.right.applier', 'right.applier', (['function', '*invalid_args'], {}), '(function, *invalid_args, **kwargs)\n', (1053, 1088), False, 'from lz import right\n'), ((1357, 1405), 'lz.right.applier', 'right.applier', (['function', '*args'], {}), '(function, *args, **invalid_kwargs)\n', (1370, 1405), False, 'from lz import right\n'), ((1099, 1123), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1112, 1123), False, 'import pytest\n'), ((1133, 1147), 'lz.functional.curry', 'curry', (['applied'], {}), '(applied)\n', (1138, 1147), False, 'from lz.functional import curry\n'), ((1416, 1440), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1429, 1440), False, 'import pytest\n'), ((1450, 1464), 'lz.functional.curry', 'curry', (['applied'], {}), '(applied)\n', (1455, 1464), False, 'from lz.functional import curry\n')]
|
from django.test import TestCase
from app.calc import add, subtract
class CalcTest(TestCase):
def test_and_numbers(self):
"""Test that numbers are added together"""
self.assertEqual(add(3,8), 11)
def test_subtract_numbers(self):
"""Test That numbers are subtracted"""
self.assertEqual(subtract(5, 11), 6)
|
[
"app.calc.add",
"app.calc.subtract"
] |
[((204, 213), 'app.calc.add', 'add', (['(3)', '(8)'], {}), '(3, 8)\n', (207, 213), False, 'from app.calc import add, subtract\n'), ((329, 344), 'app.calc.subtract', 'subtract', (['(5)', '(11)'], {}), '(5, 11)\n', (337, 344), False, 'from app.calc import add, subtract\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# xdp_ip_whitelist.py Drop packet coming from ips not in a whitelist
#
# Based on https://github.com/iovisor/bcc/blob/master/examples/networking/xdp/xdp_drop_count.py,
# Copyright (c) 2016 PLUMgrid
# Copyright (c) 2016 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License")
# See http://apache.org/licenses/LICENSE-2.0
from bcc import BPF
import pyroute2
import time
import sys
import socket, struct
# Like unblockedIp = ['10.244.3.24']
unblockedIp = [
"192.168.1.187", # Cyril’s computer
"192.168.1.98", # Lucian’s connector
"192.168.1.1", # Router
"192.168.1.150", # Router
"192.168.1.131", # ??
"192.168.1.68", # ??
]
debug = 1
flags = 0
def usage():
print("Usage: {0} [-S] <ifdev>".format(sys.argv[0]))
print(" -S: use skb mode\n")
print("e.g.: {0} eth0\n".format(sys.argv[0]))
exit(1)
if len(sys.argv) < 2 or len(sys.argv) > 3:
usage()
if len(sys.argv) == 2:
device = sys.argv[1]
if len(sys.argv) == 3:
if "-S" in sys.argv:
# XDP_FLAGS_SKB_MODE
flags |= 2 << 0
if "-S" == sys.argv[1]:
device = sys.argv[2]
else:
device = sys.argv[1]
mode = BPF.XDP
#mode = BPF.SCHED_CLS
if mode == BPF.XDP:
ret = "XDP_DROP"
ctxtype = "xdp_md"
else:
ret = "TC_ACT_SHOT"
ctxtype = "__sk_buff"
# load BPF program
bpf_src = ''
with open("xdp_ip_whitelist.bpf") as bpf_file:
bpf_src = bpf_file.read()
ip4array = map(str,
[socket.htonl(struct.unpack("!L", socket.inet_aton(ip))[0])
for ip in unblockedIp])
bpf_src = bpf_src.replace("__IP4ARRAY__", ", ".join(ip4array))
bpf_src = bpf_src.replace("__IP4ARRAYSIZE__", str(len(ip4array)))
if debug:
print("C code of BPF program:")
print(bpf_src)
b = BPF(text = bpf_src,
cflags=["-w", "-DRETURNCODE=%s" % ret, "-DCTXTYPE=%s" % ctxtype])
fn = b.load_func("xdp_prog1", mode)
if mode == BPF.XDP:
print("XDP Mode")
b.attach_xdp(device, fn, flags)
else:
print("TC Fallback")
ip = pyroute2.IPRoute()
ipdb = pyroute2.IPDB(nl=ip)
idx = ipdb.interfaces[device].index
ip.tc("add", "clsact", idx)
ip.tc("add-filter", "bpf", idx, ":1", fd=fn.fd, name=fn.name,
parent="ffff:fff2", classid=1, direct_action=True)
dropcnt = b.get_table("dropcnt")
prev = [0] * 256
print("Accepting packets only from the following IP addresses {}, hit CTRL+C to stop".format(unblockedIp))
while 1:
try:
time.sleep(1)
except KeyboardInterrupt:
print("Removing filter from device")
break;
if mode == BPF.XDP:
b.remove_xdp(device, flags)
else:
ip.tc("del", "clsact", idx)
ipdb.release()
|
[
"bcc.BPF",
"pyroute2.IPDB",
"time.sleep",
"socket.inet_aton",
"pyroute2.IPRoute"
] |
[((1816, 1903), 'bcc.BPF', 'BPF', ([], {'text': 'bpf_src', 'cflags': "['-w', '-DRETURNCODE=%s' % ret, '-DCTXTYPE=%s' % ctxtype]"}), "(text=bpf_src, cflags=['-w', '-DRETURNCODE=%s' % ret, '-DCTXTYPE=%s' %\n ctxtype])\n", (1819, 1903), False, 'from bcc import BPF\n'), ((2066, 2084), 'pyroute2.IPRoute', 'pyroute2.IPRoute', ([], {}), '()\n', (2082, 2084), False, 'import pyroute2\n'), ((2096, 2116), 'pyroute2.IPDB', 'pyroute2.IPDB', ([], {'nl': 'ip'}), '(nl=ip)\n', (2109, 2116), False, 'import pyroute2\n'), ((2500, 2513), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2510, 2513), False, 'import time\n'), ((1539, 1559), 'socket.inet_aton', 'socket.inet_aton', (['ip'], {}), '(ip)\n', (1555, 1559), False, 'import socket, struct\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# TCP
import socket
# Client
# # create
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#
# # connect
# s.connect(('www.sina.com.cn', 80))
#
# # AF_INET IPV4
# # AF_INET6 IPV6
# # SOCK_STREAM 使用面向流的TCP协议
# # connect 参数是tuple 包含ip和port
#
# # send
# s.send(b'GET / HTTP/1.1\r\nHost: www.sina.com.cn\r\nConnection: close\r\n\r\n')
#
# # receive
# buffer = []
# while True:
# # 每次最多接收1k字节:
# d = s.recv(1024)
# if d:
# buffer.append(d)
# else:
# break
# data = b''.join(buffer)
#
# # close
# s.close()
#
# # handle data to file
# header, html = data.split(b'\r\n\r\n', 1)
# print(header.decode('utf-8'))
# with open('sina.html', 'wb') as f:
# f.write(html)
# Server
# create
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind
s.bind(('127.0.0.1', 9999))
# listen
s.listen(5)
print('Waiting for connection...')
# accept
import threading, time
def tcplink(sock, addr):
print('Accept new connection from %s:%s...' % addr)
sock.send(b'Welcome!')
while True:
data = sock.recv(1024)
time.sleep(1)
if not data or data.decode('utf-8') == 'exit':
break
sock.send(('Hello, %s!' % data.decode('utf-8')).encode('utf-8'))
sock.close()
print('Connection from %s:%s closed.' % addr)
while True:
# 接受一个新连接:
sock, addr = s.accept()
# 创建新线程来处理TCP连接:
t = threading.Thread(target=tcplink, args=(sock, addr))
t.start()
|
[
"threading.Thread",
"socket.socket",
"time.sleep"
] |
[((769, 818), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (782, 818), False, 'import socket\n'), ((1420, 1471), 'threading.Thread', 'threading.Thread', ([], {'target': 'tcplink', 'args': '(sock, addr)'}), '(target=tcplink, args=(sock, addr))\n', (1436, 1471), False, 'import threading, time\n'), ((1108, 1121), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1118, 1121), False, 'import threading, time\n')]
|
# Generated by Django 3.0.6 on 2020-05-25 00:02
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('birthday', models.DateField()),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='GSBrand',
fields=[
('brand', models.CharField(max_length=100, primary_key=True, serialize=False)),
('concentration', models.DecimalField(decimal_places=2, max_digits=2)),
],
),
migrations.CreateModel(
name='WarriorAdmin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('warrior_admin', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='UserExtension',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('warrior_admin', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='InjectionLog.WarriorAdmin')),
],
),
migrations.CreateModel(
name='InjectionLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_added', models.DateField(default=datetime.date.today, editable=False)),
('cat_weight', models.DecimalField(decimal_places=2, max_digits=2)),
('injection_time', models.TimeField()),
('injection_amount', models.DecimalField(decimal_places=1, max_digits=2)),
('cat_behavior_today', models.IntegerField(default=3)),
('injection_notes', models.TextField(null=True)),
('gaba_dose', models.IntegerField(null=True)),
('other_notes', models.TextField(null=True)),
('cat_name', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='InjectionLog.Cats')),
('gs_brand', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='InjectionLog.GSBrand')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"django.db.models.TextField",
"django.db.migrations.swappable_dependency",
"django.db.models.TimeField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DecimalField",
"django.db.models.IntegerField",
"django.db.models.DateField"
] |
[((263, 320), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (294, 320), False, 'from django.db import migrations, models\n'), ((449, 542), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (465, 542), False, 'from django.db import migrations, models\n'), ((566, 598), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (582, 598), False, 'from django.db import migrations, models\n'), ((630, 648), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (646, 648), False, 'from django.db import migrations, models\n'), ((677, 773), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.PROTECT, to=settings.\n AUTH_USER_MODEL)\n', (694, 773), False, 'from django.db import migrations, models\n'), ((904, 971), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'primary_key': '(True)', 'serialize': '(False)'}), '(max_length=100, primary_key=True, serialize=False)\n', (920, 971), False, 'from django.db import migrations, models\n'), ((1008, 1059), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(2)'}), '(decimal_places=2, max_digits=2)\n', (1027, 1059), False, 'from django.db import migrations, models\n'), ((1197, 1290), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1213, 1290), False, 'from django.db import migrations, models\n'), ((1323, 1355), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1339, 1355), False, 'from django.db import migrations, models\n'), ((1494, 1587), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1510, 1587), False, 'from django.db import migrations, models\n'), ((1613, 1709), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1630, 1709), False, 'from django.db import migrations, models\n'), ((1741, 1840), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""InjectionLog.WarriorAdmin"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'InjectionLog.WarriorAdmin')\n", (1758, 1840), False, 'from django.db import migrations, models\n'), ((1973, 2066), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1989, 2066), False, 'from django.db import migrations, models\n'), ((2096, 2157), 'django.db.models.DateField', 'models.DateField', ([], {'default': 'datetime.date.today', 'editable': '(False)'}), '(default=datetime.date.today, editable=False)\n', (2112, 2157), False, 'from django.db import migrations, models\n'), ((2191, 2242), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(2)'}), '(decimal_places=2, max_digits=2)\n', (2210, 2242), False, 'from django.db import migrations, models\n'), ((2280, 2298), 'django.db.models.TimeField', 'models.TimeField', ([], {}), '()\n', (2296, 2298), False, 'from django.db import migrations, models\n'), ((2338, 2389), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(1)', 'max_digits': '(2)'}), '(decimal_places=1, max_digits=2)\n', (2357, 2389), False, 'from django.db import migrations, models\n'), ((2431, 2461), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(3)'}), '(default=3)\n', (2450, 2461), False, 'from django.db import migrations, models\n'), ((2500, 2527), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (2516, 2527), False, 'from django.db import migrations, models\n'), ((2560, 2590), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (2579, 2590), False, 'from django.db import migrations, models\n'), ((2625, 2652), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (2641, 2652), False, 'from django.db import migrations, models\n'), ((2684, 2775), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""InjectionLog.Cats"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'InjectionLog.Cats')\n", (2701, 2775), False, 'from django.db import migrations, models\n'), ((2802, 2896), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""InjectionLog.GSBrand"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'InjectionLog.GSBrand')\n", (2819, 2896), False, 'from django.db import migrations, models\n'), ((2920, 3016), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.PROTECT, to=settings.\n AUTH_USER_MODEL)\n', (2937, 3016), False, 'from django.db import migrations, models\n')]
|
import json
import subprocess
import sys
from argparse import ArgumentParser
from os import environ
from pathlib import Path
from typing import Any, List
# Bazel sets this environment for 'bazel run' to document the workspace root
WORKSPACE_ENV_VAR = "BUILD_WORKSPACE_DIRECTORY"
def cli():
parser = ArgumentParser()
parser.add_argument(
"--workspace",
metavar="PATH",
help="""
Workspace for which DWYU reports are gathered and fixes are applied to the source code. If no dedicated
workspace is provided, we assume we are running from within the workspace for which the DWYU reports have been
generated and determine the workspace root automatically.
By default the Bazel output directory containing the DWYU report files is deduced by following the 'bazel-bin'
convenience symlink.""",
)
parser.add_argument(
"--use-bazel-info",
const="fastbuild",
choices=["dbg", "fastbuild", "opt"],
nargs="?",
help="""
Don't follow the convenience symlinks to reach the Bazel output directory containing the DWYU reports. Instead,
use 'bazel info' to deduce the output directory.
This option accepts an optional argument specifying the compilation mode which was used to generate the DWYU
report files.
Using this option is recommended if the convenience symlinks do not exist, don't follow the default
naming scheme or do not point to the Bazel output directory containing the DWYU reports.""",
)
parser.add_argument(
"--bazel-bin",
metavar="PATH",
help="""
Path to the bazel-bin directory inside which the DWYU reports are located.
Using this option is recommended if neither the convenience symlinks nor the 'bazel info' command are suited to
deduce the Bazel output directory containing the DWYU report files.""",
)
parser.add_argument(
"--buildozer",
metavar="PATH",
help="""
buildozer binary which shall be used by this script. If none is provided, it is expected to find buildozer on
PATH.""",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Don't apply fixes. Report the buildozer commands and print the adapted BUILD files to stdout.",
)
parser.add_argument("--verbose", action="store_true", help="Announce intermediate steps.")
return parser.parse_args()
def get_workspace(main_args: Any) -> Path:
if main_args.workspace:
return Path(main_args.workspace)
workspace_root = environ.get(WORKSPACE_ENV_VAR)
if not workspace_root:
print(
"ERROR:"
f" No workspace was explicitly provided and environment variable '{WORKSPACE_ENV_VAR}' is not available."
)
return Path(workspace_root)
def get_bazel_bin_dir(main_args: Any, workspace_root: Path) -> Path:
if main_args.bazel_bin:
return Path(main_args.bazel_bin)
if main_args.use_bazel_info:
process = subprocess.run(
["bazel", "info", f"--compilation_mode={main_args.use_bazel_info}", "bazel-bin"],
cwd=workspace_root,
check=True,
encoding="utf-8",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return Path(process.stdout.strip())
bazel_bin_link = workspace_root / "bazel-bin"
if not bazel_bin_link.is_symlink():
print(f"ERROR: convenience symlink '{bazel_bin_link}' does not exist or is not a symlink.")
sys.exit(1)
return bazel_bin_link.resolve()
def gather_reports(bazel_bin: Path) -> List[Path]:
return list(bazel_bin.glob("**/*_dwyu_report.json"))
def make_base_cmd(buildozer: str, dry: bool) -> List[str]:
cmd = [buildozer]
if dry:
cmd.append("-stdout")
return cmd
def perform_fixes(workspace: Path, report: Path, buildozer: str, dry: bool = False, verbose=False):
with open(report, encoding="utf-8") as report_in:
content = json.load(report_in)
target = content["analyzed_target"]
unused_deps = content["unused_dependencies"]
base_cmd = make_base_cmd(buildozer=buildozer, dry=dry)
if unused_deps:
deps_str = " ".join(unused_deps)
cmd = base_cmd + [f"remove deps {deps_str}", target]
if dry or verbose:
print(f"Buildozer command: {cmd}")
subprocess.run(cmd, cwd=workspace, check=True)
def main(args: Any) -> int:
"""
This script expects that the user has invoked DWYU in the given workspace and by doing so generated DYWU report
files in the output path.
The script expects "bazel" to be available on PATH.
"""
buildozer = args.buildozer if args.buildozer else "buildozer"
workspace = get_workspace(args)
if args.verbose:
print(f"Workspace: '{workspace}'")
bin_dir = get_bazel_bin_dir(main_args=args, workspace_root=workspace)
if args.verbose:
print(f"Bazel-bin directory: '{bin_dir}'")
reports = gather_reports(bin_dir)
if not reports:
print("ERROR: Did not find any DWYU report files.")
print("Did you forget to run DWYU beforehand?")
print(
"By default this tool looks for DWYU report files in the output directory for a 'fastbuild' DWYU execution."
" If you want to use another output directory, have a look at the apply_fixes CLI options via '--help'."
)
return 1
for report in reports:
if args.verbose:
print(f"Report File '{report}'")
perform_fixes(workspace=workspace, report=report, buildozer=buildozer, dry=args.dry_run, verbose=args.verbose)
return 0
if __name__ == "__main__":
cli_args = cli()
sys.exit(main(cli_args))
|
[
"subprocess.run",
"json.load",
"argparse.ArgumentParser",
"os.environ.get",
"pathlib.Path",
"sys.exit"
] |
[((306, 322), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (320, 322), False, 'from argparse import ArgumentParser\n'), ((2622, 2652), 'os.environ.get', 'environ.get', (['WORKSPACE_ENV_VAR'], {}), '(WORKSPACE_ENV_VAR)\n', (2633, 2652), False, 'from os import environ\n'), ((2855, 2875), 'pathlib.Path', 'Path', (['workspace_root'], {}), '(workspace_root)\n', (2859, 2875), False, 'from pathlib import Path\n'), ((2574, 2599), 'pathlib.Path', 'Path', (['main_args.workspace'], {}), '(main_args.workspace)\n', (2578, 2599), False, 'from pathlib import Path\n'), ((2990, 3015), 'pathlib.Path', 'Path', (['main_args.bazel_bin'], {}), '(main_args.bazel_bin)\n', (2994, 3015), False, 'from pathlib import Path\n'), ((3068, 3275), 'subprocess.run', 'subprocess.run', (["['bazel', 'info', f'--compilation_mode={main_args.use_bazel_info}', 'bazel-bin'\n ]"], {'cwd': 'workspace_root', 'check': '(True)', 'encoding': '"""utf-8"""', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['bazel', 'info',\n f'--compilation_mode={main_args.use_bazel_info}', 'bazel-bin'], cwd=\n workspace_root, check=True, encoding='utf-8', stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n", (3082, 3275), False, 'import subprocess\n'), ((3589, 3600), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3597, 3600), False, 'import sys\n'), ((4061, 4081), 'json.load', 'json.load', (['report_in'], {}), '(report_in)\n', (4070, 4081), False, 'import json\n'), ((4470, 4516), 'subprocess.run', 'subprocess.run', (['cmd'], {'cwd': 'workspace', 'check': '(True)'}), '(cmd, cwd=workspace, check=True)\n', (4484, 4516), False, 'import subprocess\n')]
|
import math
def process_input(file_contents):
lines_stripped = [line.strip() for line in file_contents]
lines_stripped = [line.replace(" ","") for line in lines_stripped]
return lines_stripped
def find_opening_par(input_string):
position = input_string.rfind(")")
counter = 1
while counter > 0:
position -= 1
if input_string[position] == "(":
counter -= 1
elif input_string[position] == ")":
counter += 1
return position
def do_math(input_string):
j = len(input_string) - 1
if input_string[j] == ")":
opening_par = find_opening_par(input_string)
if input_string[opening_par-1] == "*":
result = do_math(input_string[opening_par+1:j])*do_math(input_string[:opening_par-1])
elif input_string[opening_par-1] == "+":
result = do_math(input_string[opening_par+1:j])+do_math(input_string[:opening_par-1])
else:
return do_math(input_string[opening_par+1:j])
else:
number = int(input_string[j])
if j>0:
if input_string[j-1] == "*":
result = number*do_math(input_string[:j-1])
elif input_string[j-1] == "+":
result = number+do_math(input_string[:j-1])
else:
result = number
return result
def do_math2(input_string):
last_par = input_string.rfind(")")
while last_par != -1:
opening_par = find_opening_par(input_string)
input_string = input_string.replace(input_string[opening_par:last_par+1], str(do_math2(input_string[opening_par+1:last_par])))
last_par = input_string.rfind(")")
fragments = input_string.split("*")
results = [sum([int(number) for number in fragment.split("+")]) for fragment in fragments]
return math.prod(results)
def main():
with open("input.txt",'r') as code_file:
all_code_file = code_file.readlines()
operations_list = process_input(all_code_file)
math_results = [do_math(operation) for operation in operations_list]
print("The sum of the answers to all problems in the initial homework is", sum(math_results))
math_results = [do_math2(operation) for operation in operations_list]
print("The sum of the answers to all problems in the advanced math homework is", sum(math_results))
main()
|
[
"math.prod"
] |
[((1810, 1828), 'math.prod', 'math.prod', (['results'], {}), '(results)\n', (1819, 1828), False, 'import math\n')]
|
import collections
from typing import TYPE_CHECKING, List, NoReturn, Optional, Tuple, Union
from pyknp import Morpheme, Tag
from pyknp_eventgraph.builder import Builder
from pyknp_eventgraph.component import Component
from pyknp_eventgraph.helper import PAS_ORDER, convert_katakana_to_hiragana, get_parallel_tags
from pyknp_eventgraph.relation import filter_relations
if TYPE_CHECKING:
from pyknp_eventgraph.argument import Argument
from pyknp_eventgraph.event import Event
from pyknp_eventgraph.predicate import Predicate
class BasePhrase(Component):
"""A wrapper of :class:`pyknp.knp.tag.Tag`, which allow exophora to be a base phrase.
BasePhrase is a bidirectional linked list; each of base phrases has its parent and children.
Attributes:
event (Event): An event that has this base phrase.
tag (Tag, optional): A tag.
ssid (int): A serial sentence ID.
bid (int): A serial bunsetsu ID.
tid (int): A serial tag ID.
is_child (bool): If true, this base phrase is a child of a head base phrase.
exophora (str): An exophora.
omitted_case (str): A omitted case.
parent (BasePhrase, optional): A parent base phrase.
children (List[BasePhrase]): A list of child base phrases.
"""
def __init__(
self,
event: "Event",
tag: Optional[Tag],
ssid: int,
bid: int,
tid: int,
is_child: bool = False,
exophora: str = "",
omitted_case: str = "",
):
self.event = event
self.tag: Optional[Tag] = tag
self.ssid = ssid
self.bid = bid
self.tid = tid
self.is_child = is_child
self.exophora = exophora
self.omitted_case = omitted_case
self.parent: Optional["BasePhrase"] = None
self.children: List["BasePhrase"] = []
self._surf = None
def __hash__(self):
return hash(self.key)
def __eq__(self, other: "BasePhrase"):
assert isinstance(other, BasePhrase)
return self.key == other.key
def __lt__(self, other: "BasePhrase"):
assert isinstance(other, BasePhrase)
return self.key < other.key
@property
def morphemes(self) -> List[Union[str, Morpheme]]:
mrphs = []
if self.omitted_case:
if self.exophora:
mrphs.append(self.exophora)
else:
exists_content_word = False
for mrph in self.tag.mrph_list():
is_content_word = mrph.hinsi not in {"助詞", "特殊", "判定詞"}
if not is_content_word and exists_content_word:
break
exists_content_word = exists_content_word or is_content_word
mrphs.append(mrph)
mrphs.append(self.omitted_case)
else:
mrphs.extend(list(self.tag.mrph_list()))
return mrphs
@property
def surf(self) -> str:
"""A surface string."""
if self._surf is None:
morphemes = self.morphemes
if self.omitted_case:
bases, case = morphemes[:-1], morphemes[-1]
base = "".join(base if isinstance(base, str) else base.midasi for base in bases)
case = convert_katakana_to_hiragana(case)
self._surf = f"[{base}{case}]"
else:
self._surf = "".join(mrph.midasi for mrph in morphemes)
return self._surf
@property
def key(self) -> Tuple[int, int, int, int]:
"""A key used for sorting."""
return PAS_ORDER.get(self.omitted_case, 99), self.ssid, self.bid, self.tid
@property
def is_event_head(self) -> bool:
"""True if this base phrase is the head of an event."""
return bool(self.tag and any("節-主辞" in tag.features for tag in [self.tag] + get_parallel_tags(self.tag)))
@property
def is_event_end(self) -> bool:
"""True if this base phrase is the end of an event."""
return bool(self.tag and any("節-区切" in tag.features for tag in [self.tag] + get_parallel_tags(self.tag)))
@property
def adnominal_events(self) -> List["Event"]:
"""A list of events modifying this predicate (adnominal)."""
if self.omitted_case:
return []
else:
return [r.modifier for r in filter_relations(self.event.incoming_relations, ["連体修飾"], [self.tid])]
@property
def sentential_complement_events(self) -> List["Event"]:
"""A list of events modifying this predicate (sentential complement)."""
if self.omitted_case:
return []
else:
return [r.modifier for r in filter_relations(self.event.incoming_relations, ["補文"], [self.tid])]
@property
def root(self) -> "BasePhrase":
"""Return the root of this base phrase."""
root_bp = self
while root_bp.parent:
root_bp = root_bp.parent
return root_bp
def to_list(self) -> List["BasePhrase"]:
"""Expand to a list."""
return sorted(self.root.modifiers(include_self=True))
def modifiees(self, include_self: bool = False) -> List["BasePhrase"]:
"""Return a list of base phrases modified by this base phrase.
Args:
include_self: If true, include this base phrase to the return.
"""
modifiee_bps = [self] if include_self else []
def add_modifiee(bp: BasePhrase):
if bp.parent:
modifiee_bps.append(bp.parent)
add_modifiee(bp.parent)
add_modifiee(self)
return modifiee_bps
def modifiers(self, include_self: bool = False) -> List["BasePhrase"]:
"""Return a list of base phrases modifying this base phrase.
Args:
include_self: If true, include this base phrase to the return.
"""
modifier_bps = [self] if include_self else []
def add_modifier(bp: BasePhrase):
for child_bp in bp.children:
modifier_bps.append(child_bp)
add_modifier(child_bp)
add_modifier(self)
return sorted(modifier_bps)
def to_dict(self) -> dict:
"""Convert this object into a dictionary."""
return dict(ssid=self.ssid, bid=self.bid, tid=self.tid, surf=self.surf)
def to_string(self) -> str:
"""Convert this object into a string."""
return f"<BasePhrase, ssid: {self.ssid}, bid: {self.bid}, tid: {self.tid}, surf: {self.surf}>"
class BasePhraseBuilder(Builder):
@classmethod
def build(cls, event: "Event"):
# Greedily dispatch base phrases to arguments.
argument_head_bps: List[BasePhrase] = []
for args in event.pas.arguments.values():
for arg in args:
head = cls._dispatch_head_base_phrase_to_argument(arg)
argument_head_bps.append(head)
if head.parent:
argument_head_bps.append(head.parent)
# Resolve duplication.
cls._resolve_duplication(argument_head_bps)
# Dispatch base phrases to a predicate.
cls._dispatch_head_base_phrase_to_predicate(event.pas.predicate, sentinels=argument_head_bps)
@classmethod
def _dispatch_head_base_phrase_to_argument(cls, argument: "Argument") -> BasePhrase:
event = argument.pas.event
ssid = argument.pas.ssid - argument.arg.sdist
tid = argument.arg.tid
bid = Builder.stid_bid_map.get((ssid, tid), -1)
tag = Builder.stid_tag_map.get((ssid, tid), None)
if argument.arg.flag == "E": # exophora
head_bp = BasePhrase(event, None, ssid, bid, tid, exophora=argument.arg.midasi, omitted_case=argument.case)
elif argument.arg.flag == "O": # zero anaphora
head_bp = BasePhrase(event, tag, ssid, bid, tid, omitted_case=argument.case)
else:
head_bp = BasePhrase(event, tag, ssid, bid, tid)
cls._add_children(head_bp, ssid)
cls._add_compound_phrase_component(head_bp, ssid)
argument.head_base_phrase = head_bp
return head_bp
@classmethod
def _dispatch_head_base_phrase_to_predicate(cls, predicate: "Predicate", sentinels: List[BasePhrase]) -> BasePhrase:
event = predicate.pas.event
ssid = predicate.pas.event.ssid
tid = predicate.head.tag_id
bid = Builder.stid_bid_map.get((ssid, tid), -1)
tag = Builder.stid_tag_map.get((ssid, tid), None)
head_bp = BasePhrase(event, tag, ssid, bid, tid)
cls._add_children(head_bp, ssid, sentinels=sentinels)
if predicate.pas.event.head != predicate.pas.event.end:
next_tid = predicate.pas.event.end.tag_id
next_bid = Builder.stid_bid_map.get((ssid, next_tid), -1)
head_parent_bp = BasePhrase(event, predicate.pas.event.end, ssid, next_bid, next_tid)
cls._add_children(head_parent_bp, ssid, sentinels=sentinels + [head_bp])
cls._add_compound_phrase_component(head_parent_bp, ssid)
head_bp.parent = head_parent_bp
head_parent_bp.children.append(head_bp)
predicate.head_base_phrase = head_bp
return head_bp
@classmethod
def _add_compound_phrase_component(cls, bp: BasePhrase, ssid: int) -> NoReturn:
next_tag = Builder.stid_tag_map.get((ssid, bp.tag.tag_id + 1), None)
if next_tag and "複合辞" in next_tag.features and "補文ト" not in next_tag.features:
next_tid = bp.tag.tag_id + 1
next_bid = Builder.stid_bid_map.get((ssid, next_tid), -1)
parent_bp = BasePhrase(bp.event, next_tag, ssid, next_bid, next_tid)
cls._add_children(parent_bp, ssid, sentinels=[bp])
cls._add_compound_phrase_component(parent_bp, ssid)
bp.parent = parent_bp
parent_bp.children.append(bp)
@classmethod
def _add_children(cls, parent_bp: BasePhrase, ssid: int, sentinels: List[BasePhrase] = None) -> NoReturn:
sentinel_tags = {sentinel.tag for sentinel in sentinels} if sentinels else {}
for child_tag in parent_bp.tag.children: # type: Tag
if child_tag in sentinel_tags or "節-主辞" in child_tag.features or "節-区切" in child_tag.features:
continue
tid = child_tag.tag_id
bid = Builder.stid_bid_map.get((ssid, tid), -1)
child_bp = BasePhrase(parent_bp.event, child_tag, ssid, bid, tid, is_child=True)
cls._add_children(child_bp, ssid, sentinels)
child_bp.parent = parent_bp
parent_bp.children.append(child_bp)
@classmethod
def _resolve_duplication(cls, head_bps: List[BasePhrase]) -> NoReturn:
keys = {head_bp.key[1:] for head_bp in head_bps} # head_bp.key[0] is the case id.
def resolver(children: List[BasePhrase]) -> NoReturn:
for i in reversed(range(len(children))):
child_bp = children[i]
if child_bp.omitted_case:
continue
if child_bp.key[1:] in keys:
children.pop(i)
else:
resolver(child_bp.children)
for head in head_bps:
resolver(head.children)
|
[
"pyknp_eventgraph.builder.Builder.stid_tag_map.get",
"pyknp_eventgraph.relation.filter_relations",
"pyknp_eventgraph.helper.convert_katakana_to_hiragana",
"pyknp_eventgraph.builder.Builder.stid_bid_map.get",
"pyknp_eventgraph.helper.get_parallel_tags",
"pyknp_eventgraph.helper.PAS_ORDER.get"
] |
[((7483, 7524), 'pyknp_eventgraph.builder.Builder.stid_bid_map.get', 'Builder.stid_bid_map.get', (['(ssid, tid)', '(-1)'], {}), '((ssid, tid), -1)\n', (7507, 7524), False, 'from pyknp_eventgraph.builder import Builder\n'), ((7539, 7582), 'pyknp_eventgraph.builder.Builder.stid_tag_map.get', 'Builder.stid_tag_map.get', (['(ssid, tid)', 'None'], {}), '((ssid, tid), None)\n', (7563, 7582), False, 'from pyknp_eventgraph.builder import Builder\n'), ((8413, 8454), 'pyknp_eventgraph.builder.Builder.stid_bid_map.get', 'Builder.stid_bid_map.get', (['(ssid, tid)', '(-1)'], {}), '((ssid, tid), -1)\n', (8437, 8454), False, 'from pyknp_eventgraph.builder import Builder\n'), ((8469, 8512), 'pyknp_eventgraph.builder.Builder.stid_tag_map.get', 'Builder.stid_tag_map.get', (['(ssid, tid)', 'None'], {}), '((ssid, tid), None)\n', (8493, 8512), False, 'from pyknp_eventgraph.builder import Builder\n'), ((9359, 9416), 'pyknp_eventgraph.builder.Builder.stid_tag_map.get', 'Builder.stid_tag_map.get', (['(ssid, bp.tag.tag_id + 1)', 'None'], {}), '((ssid, bp.tag.tag_id + 1), None)\n', (9383, 9416), False, 'from pyknp_eventgraph.builder import Builder\n'), ((3605, 3641), 'pyknp_eventgraph.helper.PAS_ORDER.get', 'PAS_ORDER.get', (['self.omitted_case', '(99)'], {}), '(self.omitted_case, 99)\n', (3618, 3641), False, 'from pyknp_eventgraph.helper import PAS_ORDER, convert_katakana_to_hiragana, get_parallel_tags\n'), ((8774, 8820), 'pyknp_eventgraph.builder.Builder.stid_bid_map.get', 'Builder.stid_bid_map.get', (['(ssid, next_tid)', '(-1)'], {}), '((ssid, next_tid), -1)\n', (8798, 8820), False, 'from pyknp_eventgraph.builder import Builder\n'), ((9568, 9614), 'pyknp_eventgraph.builder.Builder.stid_bid_map.get', 'Builder.stid_bid_map.get', (['(ssid, next_tid)', '(-1)'], {}), '((ssid, next_tid), -1)\n', (9592, 9614), False, 'from pyknp_eventgraph.builder import Builder\n'), ((10360, 10401), 'pyknp_eventgraph.builder.Builder.stid_bid_map.get', 'Builder.stid_bid_map.get', (['(ssid, tid)', '(-1)'], {}), '((ssid, tid), -1)\n', (10384, 10401), False, 'from pyknp_eventgraph.builder import Builder\n'), ((3291, 3325), 'pyknp_eventgraph.helper.convert_katakana_to_hiragana', 'convert_katakana_to_hiragana', (['case'], {}), '(case)\n', (3319, 3325), False, 'from pyknp_eventgraph.helper import PAS_ORDER, convert_katakana_to_hiragana, get_parallel_tags\n'), ((4370, 4439), 'pyknp_eventgraph.relation.filter_relations', 'filter_relations', (['self.event.incoming_relations', "['連体修飾']", '[self.tid]'], {}), "(self.event.incoming_relations, ['連体修飾'], [self.tid])\n", (4386, 4439), False, 'from pyknp_eventgraph.relation import filter_relations\n'), ((4704, 4771), 'pyknp_eventgraph.relation.filter_relations', 'filter_relations', (['self.event.incoming_relations', "['補文']", '[self.tid]'], {}), "(self.event.incoming_relations, ['補文'], [self.tid])\n", (4720, 4771), False, 'from pyknp_eventgraph.relation import filter_relations\n'), ((3879, 3906), 'pyknp_eventgraph.helper.get_parallel_tags', 'get_parallel_tags', (['self.tag'], {}), '(self.tag)\n', (3896, 3906), False, 'from pyknp_eventgraph.helper import PAS_ORDER, convert_katakana_to_hiragana, get_parallel_tags\n'), ((4107, 4134), 'pyknp_eventgraph.helper.get_parallel_tags', 'get_parallel_tags', (['self.tag'], {}), '(self.tag)\n', (4124, 4134), False, 'from pyknp_eventgraph.helper import PAS_ORDER, convert_katakana_to_hiragana, get_parallel_tags\n')]
|
from ape.api.config import PluginConfig
from ape.api.networks import LOCAL_NETWORK_NAME
from ape_ethereum.ecosystem import Ethereum, NetworkConfig
NETWORKS = {
# chain_id, network_id
"opera": (250, 250),
"testnet": (4002, 4002),
}
class FantomConfig(PluginConfig):
opera: NetworkConfig = NetworkConfig(required_confirmations=1, block_time=1) # type: ignore
testnet: NetworkConfig = NetworkConfig(required_confirmations=1, block_time=1) # type: ignore
local: NetworkConfig = NetworkConfig(default_provider="test") # type: ignore
default_network: str = LOCAL_NETWORK_NAME
class Fantom(Ethereum):
@property
def config(self) -> FantomConfig: # type: ignore
return self.config_manager.get_config("fantom") # type: ignore
|
[
"ape_ethereum.ecosystem.NetworkConfig"
] |
[((307, 360), 'ape_ethereum.ecosystem.NetworkConfig', 'NetworkConfig', ([], {'required_confirmations': '(1)', 'block_time': '(1)'}), '(required_confirmations=1, block_time=1)\n', (320, 360), False, 'from ape_ethereum.ecosystem import Ethereum, NetworkConfig\n'), ((406, 459), 'ape_ethereum.ecosystem.NetworkConfig', 'NetworkConfig', ([], {'required_confirmations': '(1)', 'block_time': '(1)'}), '(required_confirmations=1, block_time=1)\n', (419, 459), False, 'from ape_ethereum.ecosystem import Ethereum, NetworkConfig\n'), ((503, 541), 'ape_ethereum.ecosystem.NetworkConfig', 'NetworkConfig', ([], {'default_provider': '"""test"""'}), "(default_provider='test')\n", (516, 541), False, 'from ape_ethereum.ecosystem import Ethereum, NetworkConfig\n')]
|
from setuptools import setup
from os import path
base_dir = path.abspath(path.dirname(__file__))
with open(path.join(base_dir, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='sigils',
version='0.0.5',
description='Extract, resolve and replace [SIGILS] embedded in text.',
long_description=long_description,
long_description_content_type='text/x-rst',
url='http://github.com/arthexis/sigils',
download_url='https://github.com/arthexis/sigils/archive/v0.0.5.tar.gz',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
keywords=["UTILS", "SIGIL", "STRING", "TEXT"],
packages=['sigils'],
zip_safe=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Topic :: Text Processing',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
],
install_requires=[
'lark-parser',
'lru-dict'
],
extras_require={
'django': [
'django',
],
'dev': [
'pytest',
'black',
'pytest-cov',
]
}
)
|
[
"os.path.dirname",
"os.path.join",
"setuptools.setup"
] |
[((200, 1134), 'setuptools.setup', 'setup', ([], {'name': '"""sigils"""', 'version': '"""0.0.5"""', 'description': '"""Extract, resolve and replace [SIGILS] embedded in text."""', 'long_description': 'long_description', 'long_description_content_type': '"""text/x-rst"""', 'url': '"""http://github.com/arthexis/sigils"""', 'download_url': '"""https://github.com/arthexis/sigils/archive/v0.0.5.tar.gz"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'keywords': "['UTILS', 'SIGIL', 'STRING', 'TEXT']", 'packages': "['sigils']", 'zip_safe': '(True)', 'classifiers': "['Development Status :: 3 - Alpha', 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Text Processing', 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.8']", 'install_requires': "['lark-parser', 'lru-dict']", 'extras_require': "{'django': ['django'], 'dev': ['pytest', 'black', 'pytest-cov']}"}), "(name='sigils', version='0.0.5', description=\n 'Extract, resolve and replace [SIGILS] embedded in text.',\n long_description=long_description, long_description_content_type=\n 'text/x-rst', url='http://github.com/arthexis/sigils', download_url=\n 'https://github.com/arthexis/sigils/archive/v0.0.5.tar.gz', author=\n '<NAME>', author_email='<EMAIL>', license='MIT', keywords=['UTILS',\n 'SIGIL', 'STRING', 'TEXT'], packages=['sigils'], zip_safe=True,\n classifiers=['Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Text Processing', 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.8'], install_requires=[\n 'lark-parser', 'lru-dict'], extras_require={'django': ['django'], 'dev':\n ['pytest', 'black', 'pytest-cov']})\n", (205, 1134), False, 'from setuptools import setup\n'), ((74, 96), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (86, 96), False, 'from os import path\n'), ((108, 141), 'os.path.join', 'path.join', (['base_dir', '"""README.rst"""'], {}), "(base_dir, 'README.rst')\n", (117, 141), False, 'from os import path\n')]
|
import lab as B
from stheno import EQ, GP, Delta, Measure
from gpcm.experiment import run, setup
args, wd = setup("smk")
# Setup experiment.
n = 801 # Need to add the last point for the call to `linspace`.
noise = 1.0
t = B.linspace(-44, 44, n)
t_plot = B.linspace(0, 10, 500)
# Setup true model and GPCM models.
kernel = EQ().stretch(1) * (lambda x: B.cos(2 * B.pi * x))
kernel = kernel + EQ().stretch(1) * (lambda x: B.sin(2 * B.pi * x))
window = 4
scale = 0.5
n_u = 40
n_z = 88
# Sample data.
m = Measure()
gp_f = GP(kernel, measure=m)
gp_y = gp_f + GP(noise * Delta(), measure=m)
truth, y = map(B.flatten, m.sample(gp_f(t_plot), gp_y(t)))
# Remove region [-8.8, 8.8].
inds = ~((t >= -8.8) & (t <= 8.8))
t = t[inds]
y = y[inds]
def comparative_kernel(vs_):
return vs_.pos(1) * EQ().stretch(vs_.pos(1.0)) + vs_.pos(noise) * Delta()
run(
args=args,
wd=wd,
noise=noise,
window=window,
scale=scale,
t=t,
y=y,
n_u=n_u,
n_z=n_z,
true_kernel=kernel,
true_noisy_kernel=kernel + noise * Delta(),
comparative_kernel=comparative_kernel,
t_plot=t_plot,
truth=(t_plot, truth),
x_range={"psd": (0, 3)},
y_range={"kernel": (-1.5, 1.5), "psd": (-100, 10)},
)
|
[
"stheno.EQ",
"lab.linspace",
"gpcm.experiment.setup",
"lab.sin",
"stheno.Delta",
"stheno.Measure",
"stheno.GP",
"lab.cos"
] |
[((110, 122), 'gpcm.experiment.setup', 'setup', (['"""smk"""'], {}), "('smk')\n", (115, 122), False, 'from gpcm.experiment import run, setup\n'), ((226, 248), 'lab.linspace', 'B.linspace', (['(-44)', '(44)', 'n'], {}), '(-44, 44, n)\n', (236, 248), True, 'import lab as B\n'), ((258, 280), 'lab.linspace', 'B.linspace', (['(0)', '(10)', '(500)'], {}), '(0, 10, 500)\n', (268, 280), True, 'import lab as B\n'), ((507, 516), 'stheno.Measure', 'Measure', ([], {}), '()\n', (514, 516), False, 'from stheno import EQ, GP, Delta, Measure\n'), ((524, 545), 'stheno.GP', 'GP', (['kernel'], {'measure': 'm'}), '(kernel, measure=m)\n', (526, 545), False, 'from stheno import EQ, GP, Delta, Measure\n'), ((356, 375), 'lab.cos', 'B.cos', (['(2 * B.pi * x)'], {}), '(2 * B.pi * x)\n', (361, 375), True, 'import lab as B\n'), ((327, 331), 'stheno.EQ', 'EQ', ([], {}), '()\n', (329, 331), False, 'from stheno import EQ, GP, Delta, Measure\n'), ((424, 443), 'lab.sin', 'B.sin', (['(2 * B.pi * x)'], {}), '(2 * B.pi * x)\n', (429, 443), True, 'import lab as B\n'), ((571, 578), 'stheno.Delta', 'Delta', ([], {}), '()\n', (576, 578), False, 'from stheno import EQ, GP, Delta, Measure\n'), ((840, 847), 'stheno.Delta', 'Delta', ([], {}), '()\n', (845, 847), False, 'from stheno import EQ, GP, Delta, Measure\n'), ((395, 399), 'stheno.EQ', 'EQ', ([], {}), '()\n', (397, 399), False, 'from stheno import EQ, GP, Delta, Measure\n'), ((1041, 1048), 'stheno.Delta', 'Delta', ([], {}), '()\n', (1046, 1048), False, 'from stheno import EQ, GP, Delta, Measure\n'), ((794, 798), 'stheno.EQ', 'EQ', ([], {}), '()\n', (796, 798), False, 'from stheno import EQ, GP, Delta, Measure\n')]
|
# -*- coding: utf-8 -*-
"""
Module to define CONSTANTS used across the project
"""
import os
from webargs import fields, validate
from marshmallow import Schema, INCLUDE
# identify basedir for the package
BASE_DIR = os.path.dirname(os.path.normpath(os.path.dirname(__file__)))
# default location for input and output data, e.g. directories 'data' and 'models',
# is either set relative to the application path or via environment setting
IN_OUT_BASE_DIR = BASE_DIR
if 'APP_INPUT_OUTPUT_BASE_DIR' in os.environ:
env_in_out_base_dir = os.environ['APP_INPUT_OUTPUT_BASE_DIR']
if os.path.isdir(env_in_out_base_dir):
IN_OUT_BASE_DIR = env_in_out_base_dir
else:
msg = "[WARNING] \"APP_INPUT_OUTPUT_BASE_DIR=" + \
"{}\" is not a valid directory! ".format(env_in_out_base_dir) + \
"Using \"BASE_DIR={}\" instead.".format(BASE_DIR)
print(msg)
DATA_DIR = os.path.join(IN_OUT_BASE_DIR, 'data/')
IMG_STYLE_DIR = os.path.join(IN_OUT_BASE_DIR, 'neural_transfer/dataset/style_images')
MODEL_DIR = os.path.join(IN_OUT_BASE_DIR, 'models')
neural_RemoteSpace = 'rshare:/neural_transfer/'
neural_RemoteShare = 'https://nc.deep-hybrid-datacloud.eu/s/9Qp4mxNBaLKmqAQ/download?path=%2F&files='
REMOTE_IMG_DATA_DIR = os.path.join(neural_RemoteSpace, 'dataset/training_dataset/')
REMOTE_IMG_STYLE_DIR = os.path.join(neural_RemoteSpace, 'styles/')
REMOTE_MODELS_DIR = os.path.join(neural_RemoteSpace, 'models/')
# Input parameters for predict() (deepaas>=1.0.0)
class PredictArgsSchema(Schema):
class Meta:
unknown = INCLUDE # support 'full_paths' parameter
# full list of fields: https://marshmallow.readthedocs.io/en/stable/api_reference.html
# to be able to upload a file for prediction
img_content = fields.Field(
required=False,
missing=None,
type="file",
data_key="image_content",
location="form",
description="Image to be styled."
)
accept = fields.Str(
require=False,
description="Returns the image with the new style or a pdf containing the 3 images.",
missing='image/png',
validate=validate.OneOf(['image/png', 'application/pdf']))
model_name = fields.Str(
required=False,
missing = "mosaic",
description="Name of the saved model. This module already comes with some styles, just write the name: 'mosaic', 'candy', 'rain_princess' or 'udnie'. You can see the styles in the dataset/style_images folder. Running 'get_metadata' return the list of models in the module."
)
# Input parameters for train() (deepaas>=1.0.0)
class TrainArgsSchema(Schema):
class Meta:
unknown = INCLUDE # support 'full_paths' parameter
model_name = fields.Str(
required=True,
description="Name of the style image e.g. 'name.jpg' in nextcloud. This will also be the name of the model."
)
upload_model = fields.Boolean(
required=False,
missing = 2,
description="Upload model to nextcloud."
)
epochs = fields.Int(
required=False,
missing = 2,
description="Number of training epochs."
)
learning_rate = fields.Float(
required=False,
missing = 0.003,
description="Learning rate."
)
batch_size = fields.Int(
required=False,
missing = 4,
description="Batch size for training."
)
content_weight = fields.Float(
required=False,
missing = 1e5,
description="Weight for content-loss."
)
style_weight = fields.Float(
required=False,
missing = 1e10,
description="Number of iterations on the network to compute the gradients."
)
size_train_img = fields.Int(
required=False,
missing = 256,
description="Size of training images, default is 256 X 256"
)
log_interval = fields.Int(
required=False,
missing = 200,
description="Number of images after which the training loss is logged."
)
|
[
"webargs.fields.Str",
"os.path.isdir",
"webargs.fields.Float",
"os.path.dirname",
"webargs.validate.OneOf",
"webargs.fields.Int",
"webargs.fields.Field",
"webargs.fields.Boolean",
"os.path.join"
] |
[((903, 941), 'os.path.join', 'os.path.join', (['IN_OUT_BASE_DIR', '"""data/"""'], {}), "(IN_OUT_BASE_DIR, 'data/')\n", (915, 941), False, 'import os\n'), ((958, 1027), 'os.path.join', 'os.path.join', (['IN_OUT_BASE_DIR', '"""neural_transfer/dataset/style_images"""'], {}), "(IN_OUT_BASE_DIR, 'neural_transfer/dataset/style_images')\n", (970, 1027), False, 'import os\n'), ((1040, 1079), 'os.path.join', 'os.path.join', (['IN_OUT_BASE_DIR', '"""models"""'], {}), "(IN_OUT_BASE_DIR, 'models')\n", (1052, 1079), False, 'import os\n'), ((1254, 1315), 'os.path.join', 'os.path.join', (['neural_RemoteSpace', '"""dataset/training_dataset/"""'], {}), "(neural_RemoteSpace, 'dataset/training_dataset/')\n", (1266, 1315), False, 'import os\n'), ((1339, 1382), 'os.path.join', 'os.path.join', (['neural_RemoteSpace', '"""styles/"""'], {}), "(neural_RemoteSpace, 'styles/')\n", (1351, 1382), False, 'import os\n'), ((1404, 1447), 'os.path.join', 'os.path.join', (['neural_RemoteSpace', '"""models/"""'], {}), "(neural_RemoteSpace, 'models/')\n", (1416, 1447), False, 'import os\n'), ((589, 623), 'os.path.isdir', 'os.path.isdir', (['env_in_out_base_dir'], {}), '(env_in_out_base_dir)\n', (602, 623), False, 'import os\n'), ((1772, 1910), 'webargs.fields.Field', 'fields.Field', ([], {'required': '(False)', 'missing': 'None', 'type': '"""file"""', 'data_key': '"""image_content"""', 'location': '"""form"""', 'description': '"""Image to be styled."""'}), "(required=False, missing=None, type='file', data_key=\n 'image_content', location='form', description='Image to be styled.')\n", (1784, 1910), False, 'from webargs import fields, validate\n'), ((2249, 2578), 'webargs.fields.Str', 'fields.Str', ([], {'required': '(False)', 'missing': '"""mosaic"""', 'description': '"""Name of the saved model. This module already comes with some styles, just write the name: \'mosaic\', \'candy\', \'rain_princess\' or \'udnie\'. You can see the styles in the dataset/style_images folder. Running \'get_metadata\' return the list of models in the module."""'}), '(required=False, missing=\'mosaic\', description=\n "Name of the saved model. This module already comes with some styles, just write the name: \'mosaic\', \'candy\', \'rain_princess\' or \'udnie\'. You can see the styles in the dataset/style_images folder. Running \'get_metadata\' return the list of models in the module."\n )\n', (2259, 2578), False, 'from webargs import fields, validate\n'), ((2787, 2932), 'webargs.fields.Str', 'fields.Str', ([], {'required': '(True)', 'description': '"""Name of the style image e.g. \'name.jpg\' in nextcloud. This will also be the name of the model."""'}), '(required=True, description=\n "Name of the style image e.g. \'name.jpg\' in nextcloud. This will also be the name of the model."\n )\n', (2797, 2932), False, 'from webargs import fields, validate\n'), ((2969, 3057), 'webargs.fields.Boolean', 'fields.Boolean', ([], {'required': '(False)', 'missing': '(2)', 'description': '"""Upload model to nextcloud."""'}), "(required=False, missing=2, description=\n 'Upload model to nextcloud.')\n", (2983, 3057), False, 'from webargs import fields, validate\n'), ((3103, 3182), 'webargs.fields.Int', 'fields.Int', ([], {'required': '(False)', 'missing': '(2)', 'description': '"""Number of training epochs."""'}), "(required=False, missing=2, description='Number of training epochs.')\n", (3113, 3182), False, 'from webargs import fields, validate\n'), ((3240, 3313), 'webargs.fields.Float', 'fields.Float', ([], {'required': '(False)', 'missing': '(0.003)', 'description': '"""Learning rate."""'}), "(required=False, missing=0.003, description='Learning rate.')\n", (3252, 3313), False, 'from webargs import fields, validate\n'), ((3368, 3445), 'webargs.fields.Int', 'fields.Int', ([], {'required': '(False)', 'missing': '(4)', 'description': '"""Batch size for training."""'}), "(required=False, missing=4, description='Batch size for training.')\n", (3378, 3445), False, 'from webargs import fields, validate\n'), ((3504, 3595), 'webargs.fields.Float', 'fields.Float', ([], {'required': '(False)', 'missing': '(100000.0)', 'description': '"""Weight for content-loss."""'}), "(required=False, missing=100000.0, description=\n 'Weight for content-loss.')\n", (3516, 3595), False, 'from webargs import fields, validate\n'), ((3642, 3775), 'webargs.fields.Float', 'fields.Float', ([], {'required': '(False)', 'missing': '(10000000000.0)', 'description': '"""Number of iterations on the network to compute the gradients."""'}), "(required=False, missing=10000000000.0, description=\n 'Number of iterations on the network to compute the gradients.')\n", (3654, 3775), False, 'from webargs import fields, validate\n'), ((3820, 3925), 'webargs.fields.Int', 'fields.Int', ([], {'required': '(False)', 'missing': '(256)', 'description': '"""Size of training images, default is 256 X 256"""'}), "(required=False, missing=256, description=\n 'Size of training images, default is 256 X 256')\n", (3830, 3925), False, 'from webargs import fields, validate\n'), ((3977, 4094), 'webargs.fields.Int', 'fields.Int', ([], {'required': '(False)', 'missing': '(200)', 'description': '"""Number of images after which the training loss is logged."""'}), "(required=False, missing=200, description=\n 'Number of images after which the training loss is logged.')\n", (3987, 4094), False, 'from webargs import fields, validate\n'), ((254, 279), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (269, 279), False, 'import os\n'), ((2177, 2225), 'webargs.validate.OneOf', 'validate.OneOf', (["['image/png', 'application/pdf']"], {}), "(['image/png', 'application/pdf'])\n", (2191, 2225), False, 'from webargs import fields, validate\n')]
|
from ...isa.inst import *
import numpy as np
class Vmfeq_vf(Inst):
name = 'vmfeq.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['rs1'] == self['vs2'][no]
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
class Vmfne_vf(Inst):
name = 'vmfne.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['rs1'] != self['vs2'][no]
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
class Vmflt_vf(Inst):
name = 'vmflt.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['vs2'][no] < self['rs1']
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
class Vmfle_vf(Inst):
name = 'vmfle.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['vs2'][no] <= self['rs1']
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
class Vmfgt_vf(Inst):
name = 'vmfgt.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['vs2'][no] > self['rs1']
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
class Vmfge_vf(Inst):
name = 'vmfge.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['vs2'][no] >= self['rs1']
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
|
[
"numpy.packbits",
"numpy.ones",
"numpy.unpackbits"
] |
[((161, 207), 'numpy.unpackbits', 'np.unpackbits', (["self['orig']"], {'bitorder': '"""little"""'}), "(self['orig'], bitorder='little')\n", (174, 207), True, 'import numpy as np\n'), ((782, 820), 'numpy.packbits', 'np.packbits', (['result'], {'bitorder': '"""little"""'}), "(result, bitorder='little')\n", (793, 820), True, 'import numpy as np\n'), ((1003, 1049), 'numpy.unpackbits', 'np.unpackbits', (["self['orig']"], {'bitorder': '"""little"""'}), "(self['orig'], bitorder='little')\n", (1016, 1049), True, 'import numpy as np\n'), ((1624, 1662), 'numpy.packbits', 'np.packbits', (['result'], {'bitorder': '"""little"""'}), "(result, bitorder='little')\n", (1635, 1662), True, 'import numpy as np\n'), ((1844, 1890), 'numpy.unpackbits', 'np.unpackbits', (["self['orig']"], {'bitorder': '"""little"""'}), "(self['orig'], bitorder='little')\n", (1857, 1890), True, 'import numpy as np\n'), ((2465, 2503), 'numpy.packbits', 'np.packbits', (['result'], {'bitorder': '"""little"""'}), "(result, bitorder='little')\n", (2476, 2503), True, 'import numpy as np\n'), ((2686, 2732), 'numpy.unpackbits', 'np.unpackbits', (["self['orig']"], {'bitorder': '"""little"""'}), "(self['orig'], bitorder='little')\n", (2699, 2732), True, 'import numpy as np\n'), ((3308, 3346), 'numpy.packbits', 'np.packbits', (['result'], {'bitorder': '"""little"""'}), "(result, bitorder='little')\n", (3319, 3346), True, 'import numpy as np\n'), ((3530, 3576), 'numpy.unpackbits', 'np.unpackbits', (["self['orig']"], {'bitorder': '"""little"""'}), "(self['orig'], bitorder='little')\n", (3543, 3576), True, 'import numpy as np\n'), ((4151, 4189), 'numpy.packbits', 'np.packbits', (['result'], {'bitorder': '"""little"""'}), "(result, bitorder='little')\n", (4162, 4189), True, 'import numpy as np\n'), ((4373, 4419), 'numpy.unpackbits', 'np.unpackbits', (["self['orig']"], {'bitorder': '"""little"""'}), "(self['orig'], bitorder='little')\n", (4386, 4419), True, 'import numpy as np\n'), ((4995, 5033), 'numpy.packbits', 'np.packbits', (['result'], {'bitorder': '"""little"""'}), "(result, bitorder='little')\n", (5006, 5033), True, 'import numpy as np\n'), ((385, 431), 'numpy.unpackbits', 'np.unpackbits', (["self['mask']"], {'bitorder': '"""little"""'}), "(self['mask'], bitorder='little')\n", (398, 431), True, 'import numpy as np\n'), ((538, 582), 'numpy.ones', 'np.ones', (["(self['vl'] - vstart)"], {'dtype': 'np.uint8'}), "(self['vl'] - vstart, dtype=np.uint8)\n", (545, 582), True, 'import numpy as np\n'), ((1227, 1273), 'numpy.unpackbits', 'np.unpackbits', (["self['mask']"], {'bitorder': '"""little"""'}), "(self['mask'], bitorder='little')\n", (1240, 1273), True, 'import numpy as np\n'), ((1380, 1424), 'numpy.ones', 'np.ones', (["(self['vl'] - vstart)"], {'dtype': 'np.uint8'}), "(self['vl'] - vstart, dtype=np.uint8)\n", (1387, 1424), True, 'import numpy as np\n'), ((2068, 2114), 'numpy.unpackbits', 'np.unpackbits', (["self['mask']"], {'bitorder': '"""little"""'}), "(self['mask'], bitorder='little')\n", (2081, 2114), True, 'import numpy as np\n'), ((2221, 2265), 'numpy.ones', 'np.ones', (["(self['vl'] - vstart)"], {'dtype': 'np.uint8'}), "(self['vl'] - vstart, dtype=np.uint8)\n", (2228, 2265), True, 'import numpy as np\n'), ((2910, 2956), 'numpy.unpackbits', 'np.unpackbits', (["self['mask']"], {'bitorder': '"""little"""'}), "(self['mask'], bitorder='little')\n", (2923, 2956), True, 'import numpy as np\n'), ((3063, 3107), 'numpy.ones', 'np.ones', (["(self['vl'] - vstart)"], {'dtype': 'np.uint8'}), "(self['vl'] - vstart, dtype=np.uint8)\n", (3070, 3107), True, 'import numpy as np\n'), ((3754, 3800), 'numpy.unpackbits', 'np.unpackbits', (["self['mask']"], {'bitorder': '"""little"""'}), "(self['mask'], bitorder='little')\n", (3767, 3800), True, 'import numpy as np\n'), ((3907, 3951), 'numpy.ones', 'np.ones', (["(self['vl'] - vstart)"], {'dtype': 'np.uint8'}), "(self['vl'] - vstart, dtype=np.uint8)\n", (3914, 3951), True, 'import numpy as np\n'), ((4597, 4643), 'numpy.unpackbits', 'np.unpackbits', (["self['mask']"], {'bitorder': '"""little"""'}), "(self['mask'], bitorder='little')\n", (4610, 4643), True, 'import numpy as np\n'), ((4750, 4794), 'numpy.ones', 'np.ones', (["(self['vl'] - vstart)"], {'dtype': 'np.uint8'}), "(self['vl'] - vstart, dtype=np.uint8)\n", (4757, 4794), True, 'import numpy as np\n')]
|
from typing import List, Tuple
import numpy as np
from evobench.benchmark import Benchmark
# from evobench.linkage import DependencyStructureMatrix
from evobench.model import Population, Solution
from ..operator import Operator
class RestrictedMixing(Operator):
def __init__(self, benchmark: Benchmark):
super(RestrictedMixing, self).__init__(benchmark)
# def apply(
# self,
# population: Population,
# dsm: DependencyStructureMatrix
# ) -> Population:
def mix(
self,
source: Solution,
ils: List[int], population: Population
) -> Tuple[Solution, np.ndarray]:
assert source.genome.size == self.benchmark.genome_size
if not source.fitness:
source.fitness = self.benchmark.evaluate_solution(source)
trial = Solution(source.genome.copy())
best_fitness = source.fitness
mask = np.zeros(self.benchmark.genome_size, dtype=bool)
for gene_index in ils:
trial.genome[gene_index] = 1 - trial.genome[gene_index]
fitness = self.benchmark.evaluate_solution(trial)
# ! TODO: benchmark min/max
if fitness >= best_fitness and not population.contains(trial):
best_fitness = fitness
mask[gene_index] = True
else:
trial.genome[gene_index] = 1 - trial.genome[gene_index]
trial.fitness = best_fitness
return trial, mask
|
[
"numpy.zeros"
] |
[((911, 959), 'numpy.zeros', 'np.zeros', (['self.benchmark.genome_size'], {'dtype': 'bool'}), '(self.benchmark.genome_size, dtype=bool)\n', (919, 959), True, 'import numpy as np\n')]
|
import logging
from django.template import VariableDoesNotExist
from django import template
from stack_it.contents.abstracts import BaseContentMixin
from stack_it.models import Page, Template as TemplateModel
from django.db import transaction
from django.utils.safestring import mark_safe
# Get an instance of a logger
logger = logging.getLogger(__name__)
register = template.Library()
def get_template(request, templatename):
if hasattr(request, "templates"):
if request.templates.get(templatename) is not None:
_template = request.templates.get(templatename)
else:
with transaction.atomic():
_template, _ = TemplateModel.objects.get_or_create(path=templatename)
request.templates.update({templatename: _template})
else:
with transaction.atomic():
_template, _ = TemplateModel.objects.get_or_create(path=templatename)
request.templates = {templatename: _template}
return _template
class ContentNodeMixin(template.Node):
CONTENT_MODEL = None
ADMIN_TEMPLATE = "stack_it/editable.html"
INSTANCE_PARAMETER_NAME = None
def __init__(self, instance, content_type, key, widget, nodelist):
super(ContentNodeMixin, self).__init__()
self.instance = template.Variable(instance)
self.key_variable = key
self.widget = widget
self.nodelist = nodelist
self.messages = []
self.content_type = content_type
self.alternative_content_types = list(
dict(BaseContentMixin.CONTENT_TYPES).keys()
)
self.alternative_content_types.remove(self.content_type)
self.admin_template = template.loader.get_template(self.ADMIN_TEMPLATE)
def create_content(self, instance, content_type, key, value):
"""
Creates related content
This is meant to be overriden
Returns:
CONTENT_MODEL instance: Returns the CONTENT_MODEL instance which's just been created
"""
attrs = dict(
[
(self.INSTANCE_PARAMETER_NAME, instance),
("content_type", content_type),
("key", key),
("value", value),
]
)
with transaction.atomic():
content_instance = self.CONTENT_MODEL.objects.create(**attrs)
getattr(instance, f"{self.content_type}s").update(
dict(((self.key, content_instance),))
)
return content_instance
def _get_instance(self, context):
return self.instance.resolve(context)
def _get_key(self, context):
try:
return self.key_variable.resolve(context)
except AttributeError:
return self.key_variable
def content(self, context):
instance = self._get_instance(context)
self.key = self._get_key(context)
original_output = self.nodelist.render(context)
if self.key in getattr(instance, f"{self.content_type}s").keys():
return getattr(instance, f"{self.content_type}s").get(self.key)
for content_type in self.alternative_content_types:
# Checking the key cannot be found anywhere else®
if self.key in getattr(instance, f"{content_type}s").keys():
content_instance = getattr(instance, f"{content_type}s").get(self.key)
content_instance.content_type = self.content_type
with transaction.atomic():
content_instance.save()
msg = (
"warning",
f"Automatically changed {self.key} for instance {content_instance}!",
)
self.messages.append(msg)
getattr(instance, f"{content_type}s").pop(self.key)
getattr(instance, f"{self.content_type}s").update(
{self.key: content_instance}
)
return content_instance
return self.create_content(
instance, self.content_type, self.key, original_output
)
def render(self, context):
content = self.content(context)
if hasattr(context["request"], "user") and context["request"].user.is_staff:
return self.admin_template.render(
{
"id": content.id,
"key": self.key,
"widget": self.widget,
"value": mark_safe(content.value),
}
)
return mark_safe(content.value)
class TemplateContentNodeMixin(ContentNodeMixin):
def __init__(self, instance, content_type, key, widget, nodelist):
super(TemplateContentNodeMixin, self).__init__(
instance, content_type, key, widget, nodelist
)
self.instance = instance
def _get_instance(self, context):
request = context["request"]
return get_template(request, self.instance)
class TextTagMixin(object):
CONTENT_MODEL = None
ADMIN_TEMPLATE = "stack_it/editable.html"
INSTANCE_PARAMETER_NAME = None
def __init__(self, instance, content_type, key, content):
super(TextTagMixin, self).__init__()
self.instance = instance
self.key = key
self.content = content
self.messages = []
self.content_type = content_type
self.alternative_content_types = list(
dict(BaseContentMixin.CONTENT_TYPES).keys()
)
self.alternative_content_types.remove(self.content_type)
def create_content(self, instance, content_type, key, value):
"""
Creates related content
This is meant to be overriden
Returns:
CONTENT_MODEL instance: Returns the CONTENT_MODEL instance which's just been created
"""
attrs = dict(
[
(self.INSTANCE_PARAMETER_NAME, instance),
("content_type", content_type),
("key", key),
("value", value),
]
)
with transaction.atomic():
content_instance = self.CONTENT_MODEL.objects.create(**attrs)
getattr(instance, f"{self.content_type}s").update(
dict(((self.key, content_instance),))
)
return content_instance
def __call__(self):
instance = self.instance
if self.key in getattr(instance, f"{self.content_type}s").keys():
return getattr(instance, f"{self.content_type}s").get(self.key)
for content_type in self.alternative_content_types:
# Checking the key cannot be found anywhere else®
if self.key in getattr(instance, f"{content_type}s").keys():
content_instance = getattr(instance, f"{content_type}s").get(self.key)
content_instance.content_type = self.content_type
content_instance.save()
msg = (
"warning",
f"Automatically changed {self.key} for instance {content_instance}!",
)
self.messages.append(msg)
getattr(instance, f"{content_type}s").pop(self.key)
getattr(instance, f"{self.content_type}s").update(
{self.key: content_instance}
)
return content_instance
return self.create_content(instance, self.content_type, self.key, self.content)
class ImageTagMixin(object):
CONTENT_MODEL = None
ADMIN_TEMPLATE = "stack_it/editable.html"
INSTANCE_PARAMETER_NAME = None
def __init__(self, instance, content_type, key, size, color):
super(ImageTagMixin, self).__init__()
self.instance = instance
self.key = key
self.size = size
self.color = color
self.messages = []
self.content_type = content_type
self.alternative_content_types = list(
dict(BaseContentMixin.CONTENT_TYPES).keys()
)
self.alternative_content_types.remove(self.content_type)
self.admin_template = template.loader.get_template(self.ADMIN_TEMPLATE)
def create_content(
self, instance, content_type, key, size="800x600", color=(0, 0, 0)
):
"""
Creates related content
This is meant to be overriden
Returns:
CONTENT_MODEL instance: Returns the CONTENT_MODEL instance which's just been created
"""
attrs = dict(
[
(self.INSTANCE_PARAMETER_NAME, instance),
("content_type", content_type),
("key", key),
("size", size),
("color", color),
]
)
with transaction.atomic():
content_instance = self.CONTENT_MODEL.init(**attrs)
getattr(instance, f"{self.content_type}s").update(
dict(((self.key, content_instance),))
)
return content_instance
def __call__(self):
instance = self.instance
if self.key in getattr(instance, f"{self.content_type}s").keys():
return getattr(instance, f"{self.content_type}s").get(self.key)
for content_type in self.alternative_content_types:
# Checking the key cannot be found anywhere else®
if self.key in getattr(instance, f"{content_type}s").keys():
content_instance = getattr(instance, f"{content_type}s").get(self.key)
content_instance.content_type = self.content_type
content_instance.save()
msg = (
"warning",
f"Automatically changed {self.key} for instance {content_instance}!",
)
self.messages.append(msg)
getattr(instance, f"{content_type}s").pop(self.key)
getattr(instance, f"{self.content_type}s").update(
{self.key: content_instance}
)
return content_instance
return self.create_content(
instance, self.content_type, self.key, self.size, self.color
)
class PageTagMixin(object):
CONTENT_MODEL = None
ADMIN_TEMPLATE = "stack_it/editable.html"
INSTANCE_PARAMETER_NAME = None
def __init__(self, instance, content_type, key, title):
super(PageTagMixin, self).__init__()
self.instance = instance
self.key = key
self.title = title
self.messages = []
self.content_type = content_type
self.alternative_content_types = list(
dict(BaseContentMixin.CONTENT_TYPES).keys()
)
self.alternative_content_types.remove(self.content_type)
def create_content(self, instance, content_type, key, title):
"""
Creates related content
This is meant to be overriden
Returns:
CONTENT_MODEL instance: Returns the CONTENT_MODEL instance which's just been created
"""
with transaction.atomic():
page = Page.get_or_create(title=title)
content_instance, created = self.CONTENT_MODEL.objects.get_or_create(
**dict(
[
(self.INSTANCE_PARAMETER_NAME, instance),
("content_type", content_type),
("key", key),
("value", page),
]
)
)
getattr(instance, f"{self.content_type}s").update(
dict(((self.key, content_instance),))
)
return content_instance
def __call__(self):
instance = self.instance
if self.key in getattr(instance, f"{self.content_type}s").keys():
return getattr(instance, f"{self.content_type}s").get(self.key)
for content_type in self.alternative_content_types:
# Checking the key cannot be found anywhere else®
if self.key in getattr(instance, f"{content_type}s").keys():
content_instance = getattr(instance, f"{content_type}s").get(self.key)
content_instance.content_type = self.content_type
content_instance.save()
msg = (
"warning",
f"Automatically changed {self.key} for instance {content_instance}!",
)
self.messages.append(msg)
getattr(instance, f"{content_type}s").pop(self.key)
getattr(instance, f"{self.content_type}s").update(
{self.key: content_instance}
)
return content_instance
return self.create_content(instance, self.content_type, self.key, self.title)
|
[
"stack_it.models.Template.objects.get_or_create",
"django.template.Library",
"django.utils.safestring.mark_safe",
"django.template.Variable",
"stack_it.models.Page.get_or_create",
"django.db.transaction.atomic",
"logging.getLogger",
"django.template.loader.get_template"
] |
[((329, 356), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (346, 356), False, 'import logging\n'), ((368, 386), 'django.template.Library', 'template.Library', ([], {}), '()\n', (384, 386), False, 'from django import template\n'), ((1285, 1312), 'django.template.Variable', 'template.Variable', (['instance'], {}), '(instance)\n', (1302, 1312), False, 'from django import template\n'), ((1684, 1733), 'django.template.loader.get_template', 'template.loader.get_template', (['self.ADMIN_TEMPLATE'], {}), '(self.ADMIN_TEMPLATE)\n', (1712, 1733), False, 'from django import template\n'), ((4522, 4546), 'django.utils.safestring.mark_safe', 'mark_safe', (['content.value'], {}), '(content.value)\n', (4531, 4546), False, 'from django.utils.safestring import mark_safe\n'), ((8063, 8112), 'django.template.loader.get_template', 'template.loader.get_template', (['self.ADMIN_TEMPLATE'], {}), '(self.ADMIN_TEMPLATE)\n', (8091, 8112), False, 'from django import template\n'), ((814, 834), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (832, 834), False, 'from django.db import transaction\n'), ((863, 917), 'stack_it.models.Template.objects.get_or_create', 'TemplateModel.objects.get_or_create', ([], {'path': 'templatename'}), '(path=templatename)\n', (898, 917), True, 'from stack_it.models import Page, Template as TemplateModel\n'), ((2253, 2273), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (2271, 2273), False, 'from django.db import transaction\n'), ((6051, 6071), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (6069, 6071), False, 'from django.db import transaction\n'), ((8704, 8724), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (8722, 8724), False, 'from django.db import transaction\n'), ((10966, 10986), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (10984, 10986), False, 'from django.db import transaction\n'), ((11007, 11038), 'stack_it.models.Page.get_or_create', 'Page.get_or_create', ([], {'title': 'title'}), '(title=title)\n', (11025, 11038), False, 'from stack_it.models import Page, Template as TemplateModel\n'), ((619, 639), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (637, 639), False, 'from django.db import transaction\n'), ((672, 726), 'stack_it.models.Template.objects.get_or_create', 'TemplateModel.objects.get_or_create', ([], {'path': 'templatename'}), '(path=templatename)\n', (707, 726), True, 'from stack_it.models import Page, Template as TemplateModel\n'), ((3453, 3473), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (3471, 3473), False, 'from django.db import transaction\n'), ((4449, 4473), 'django.utils.safestring.mark_safe', 'mark_safe', (['content.value'], {}), '(content.value)\n', (4458, 4473), False, 'from django.utils.safestring import mark_safe\n')]
|
from unittest import TestCase
import os
import tempfile
import numpy as np
from keras_trans_mask.backend import keras
from keras_trans_mask import CreateMask, RemoveMask, RestoreMask
class TestMasks(TestCase):
def test_over_fit(self):
input_layer = keras.layers.Input(shape=(None,))
embed_layer = keras.layers.Embedding(
input_dim=10,
output_dim=15,
)(input_layer)
mask_layer = CreateMask(mask_value=9)(input_layer)
embed_layer = RestoreMask()([embed_layer, mask_layer])
removed_layer = RemoveMask()(embed_layer)
conv_layer = keras.layers.Conv1D(
filters=32,
kernel_size=3,
padding='same',
)(removed_layer)
restored_layer = RestoreMask()([conv_layer, embed_layer])
lstm_layer = keras.layers.LSTM(units=5)(restored_layer)
dense_layer = keras.layers.Dense(units=2, activation='softmax')(lstm_layer)
model = keras.models.Model(inputs=input_layer, outputs=dense_layer)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
model.summary()
x = np.array([
[1, 2, 3, 4, 5, 9, 9, 9],
[6, 7, 8, 9, 9, 9, 9, 9],
] * 1024)
y = np.array([[0], [1]] * 1024)
model_path = os.path.join(tempfile.gettempdir(), 'test_trans_mask_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={
'CreateMask': CreateMask,
'RemoveMask': RemoveMask,
'RestoreMask': RestoreMask,
})
model.fit(x, y, epochs=10)
|
[
"keras_trans_mask.RestoreMask",
"keras_trans_mask.RemoveMask",
"keras_trans_mask.backend.keras.layers.Embedding",
"tempfile.gettempdir",
"keras_trans_mask.backend.keras.layers.Input",
"keras_trans_mask.backend.keras.models.Model",
"keras_trans_mask.CreateMask",
"numpy.array",
"keras_trans_mask.backend.keras.layers.Conv1D",
"numpy.random.random",
"keras_trans_mask.backend.keras.layers.LSTM",
"keras_trans_mask.backend.keras.layers.Dense",
"keras_trans_mask.backend.keras.models.load_model"
] |
[((266, 299), 'keras_trans_mask.backend.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(None,)'}), '(shape=(None,))\n', (284, 299), False, 'from keras_trans_mask.backend import keras\n'), ((970, 1029), 'keras_trans_mask.backend.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'input_layer', 'outputs': 'dense_layer'}), '(inputs=input_layer, outputs=dense_layer)\n', (988, 1029), False, 'from keras_trans_mask.backend import keras\n'), ((1146, 1215), 'numpy.array', 'np.array', (['([[1, 2, 3, 4, 5, 9, 9, 9], [6, 7, 8, 9, 9, 9, 9, 9]] * 1024)'], {}), '([[1, 2, 3, 4, 5, 9, 9, 9], [6, 7, 8, 9, 9, 9, 9, 9]] * 1024)\n', (1154, 1215), True, 'import numpy as np\n'), ((1263, 1290), 'numpy.array', 'np.array', (['([[0], [1]] * 1024)'], {}), '([[0], [1]] * 1024)\n', (1271, 1290), True, 'import numpy as np\n'), ((1441, 1577), 'keras_trans_mask.backend.keras.models.load_model', 'keras.models.load_model', (['model_path'], {'custom_objects': "{'CreateMask': CreateMask, 'RemoveMask': RemoveMask, 'RestoreMask': RestoreMask\n }"}), "(model_path, custom_objects={'CreateMask':\n CreateMask, 'RemoveMask': RemoveMask, 'RestoreMask': RestoreMask})\n", (1464, 1577), False, 'from keras_trans_mask.backend import keras\n'), ((322, 373), 'keras_trans_mask.backend.keras.layers.Embedding', 'keras.layers.Embedding', ([], {'input_dim': '(10)', 'output_dim': '(15)'}), '(input_dim=10, output_dim=15)\n', (344, 373), False, 'from keras_trans_mask.backend import keras\n'), ((443, 467), 'keras_trans_mask.CreateMask', 'CreateMask', ([], {'mask_value': '(9)'}), '(mask_value=9)\n', (453, 467), False, 'from keras_trans_mask import CreateMask, RemoveMask, RestoreMask\n'), ((503, 516), 'keras_trans_mask.RestoreMask', 'RestoreMask', ([], {}), '()\n', (514, 516), False, 'from keras_trans_mask import CreateMask, RemoveMask, RestoreMask\n'), ((568, 580), 'keras_trans_mask.RemoveMask', 'RemoveMask', ([], {}), '()\n', (578, 580), False, 'from keras_trans_mask import CreateMask, RemoveMask, RestoreMask\n'), ((615, 677), 'keras_trans_mask.backend.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'padding': '"""same"""'}), "(filters=32, kernel_size=3, padding='same')\n", (634, 677), False, 'from keras_trans_mask.backend import keras\n'), ((765, 778), 'keras_trans_mask.RestoreMask', 'RestoreMask', ([], {}), '()\n', (776, 778), False, 'from keras_trans_mask import CreateMask, RemoveMask, RestoreMask\n'), ((827, 853), 'keras_trans_mask.backend.keras.layers.LSTM', 'keras.layers.LSTM', ([], {'units': '(5)'}), '(units=5)\n', (844, 853), False, 'from keras_trans_mask.backend import keras\n'), ((892, 941), 'keras_trans_mask.backend.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(2)', 'activation': '"""softmax"""'}), "(units=2, activation='softmax')\n", (910, 941), False, 'from keras_trans_mask.backend import keras\n'), ((1325, 1346), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1344, 1346), False, 'import tempfile\n'), ((1374, 1392), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1390, 1392), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.stats import entropy
from bisect import bisect
from scipy import stats
from scipy.stats import median_absolute_deviation as mad
from sklearn.metrics import r2_score, mean_squared_error
from pyapprox.multivariate_polynomials import conditional_moments_of_polynomial_chaos_expansion as cond_moments
def group_fix(partial_result, func, x, y_true, x_default,
rand, pool_results, file_exist=False):
"""
Function for compare results between conditioned and unconditioned QoI.
Fix parameters from the least influential group
based on results from partially sorting.
Four error measure types will be returned.
Parameters
----------
partial_result : dict,
dictionary of parameter groups, results of partial sort
func : list of function,
function for analysis (analytical formula or model)
x : np.array,
Input with shape of N * D where N is sampling size and
D is the number of parameters
y_true : list,
Function results with all x varying (the raw sampling matrix of x)
x_default : int, float, list,
Default values of x as a scalar or list of scalars
rand : np.ndarray,
Resample index in bootstrap, shape of R * N,
where R is the number of resamples
pool_results : dict,
Index of fixed parameters and the corresponding results
file_exist : bool (default: False),
If true, reads cached partial-ranking results from a file.
Otherwise, calculates results.
Returns
----------
Tuple of:
dict_return: dictionary of uncertainty measures
mae and the uncertain ranges:
Changes in absolute mean error of the func results due to fixing
parameters
var and the uncertain ranges :
Changes in variance of the func results due to fixing parameters
ks measures and the uncertain ranges :
Changes in pearson correlation coefficients
of the func results due to fixing parameters
pool_results:
"""
num_group = len(partial_result) - 1
# store results from fixing parameters in dict
cf_upper = {i: None for i in range(num_group)}
cf_lower, cv, ks, pvalue = dict(cf_upper), dict(cf_upper), dict(cf_upper), dict(cf_upper)
cf_upper_upper, cf_upper_lower, ks_upper, pvalue_upper = dict(cf_upper), dict(cf_upper), dict(cf_upper), dict(cf_upper)
cf_lower_lower, cf_lower_upper, ks_lower, pvalue_lower = dict(cf_upper), dict(cf_upper), dict(cf_upper), dict(cf_upper)
cf_width, cf_width_lower, cf_width_upper, cond_mean = dict(cf_upper), dict(cf_upper), dict(cf_upper), dict(cf_upper)
ind_fix = []
conf_level = [0.025, 0.975]
measures_all = [cf_upper, cf_lower, ks, pvalue, cv,
cf_upper_upper, cf_upper_lower, cf_lower_upper,
cf_lower_lower, ks_lower, ks_upper,
pvalue_lower, pvalue_upper,
cf_width, cf_width_lower,
cf_width_upper, cond_mean]
for i in range(num_group, -1, -1):
if file_exist:
try:
ind_fix.extend(partial_result[str(i)])
except NameError:
ind_fix = partial_result[str(i)]
else:
try:
ind_fix.extend(partial_result[i])
except NameError:
ind_fix = partial_result[i]
ind_fix.sort()
x_temp = x_default[ind_fix]
# check whether results existing
skip_calcul = results_exist(ind_fix, pool_results)
# print(skip_calcul)
if skip_calcul == False:
x_copy = np.copy(x)
x_copy[ind_fix, :] = x_temp
# compare results with insignificant parameters fixed
Nresample = rand.shape[0]
num_func = len(func)
total_resample = num_func * Nresample
pvalue_bt, ks_bt, cf_upper_bt, cf_lower_bt, cf_width_bt, y_true_width = \
np.zeros(total_resample), np.zeros(total_resample), np.zeros(total_resample), np.zeros(total_resample), \
np.zeros(total_resample), np.zeros(total_resample)
## Add the bootstrap of PCE
for jj in range(num_func):
fun = func[jj]
results_fix = fun(x_copy).flatten()
for ii in range(Nresample):
I = rand[ii]
ind_resample = jj * Nresample + ii
[cf_lower_bt[ind_resample], cf_upper_bt[ind_resample], ks_bt[ind_resample], pvalue_bt[ind_resample], y_true_width[ind_resample]] \
= error_measure(I, y_true[jj], results_fix, conf_level)
cf_width_bt = (cf_upper_bt - cf_lower_bt) / y_true_width
# End for
cf_upper[i], cf_lower[i], ks[i], pvalue[i] = cf_upper_bt.mean(), cf_lower_bt.mean(), ks_bt.mean(), pvalue_bt.mean()
cf_upper_lower[i], cf_upper_upper[i] = np.quantile(cf_upper_bt, conf_level)
cf_lower_lower[i], cf_lower_upper[i] = np.quantile(cf_lower_bt, conf_level)
cf_width[i], cf_width_lower[i], cf_width_upper[i] = cf_width_bt.mean(), *np.quantile(cf_width_bt, conf_level)
ks_lower[i], ks_upper[i] = np.quantile(ks_bt, conf_level)
pvalue_lower[i], pvalue_upper[i] = np.quantile(pvalue_bt, conf_level)
cond_mean[i] = results_fix.mean()
if len(ind_fix) == x.shape[0]:
cv[i] = 0
# cond_mean[i] = func(x_temp)[0][0]
else:
mean, variance = cond_moments(fun, x_temp, ind_fix, return_variance=True)
# cond_mean[i] = mean[0]
cv[i] = (np.sqrt(variance) / mean)[0]
# End If
# update pool_results
measure_list = [measure_ele[i] for measure_ele in measures_all]
pool_results = pool_update(ind_fix, measure_list, pool_results)
else:
# map index to calculated values
for ele in range(len(measures_all)):
measures_all[ele][i] = skip_calcul[ele]
# End if
# End for()
names = ['cf_upper', 'cf_lower', 'ks', 'pvalue', 'cv',
'cf_upper_upper', 'cf_upper_lower', 'cf_lower_upper',
'cf_lower_lower', 'ks_lower', 'ks_upper',
'pvalue_lower', 'pvalue_upper',
'cf_width', 'cf_width_lower',
'cf_width_upper', 'cond_mean']
dict_return = dict(zip(names, measures_all))
return dict_return, pool_results
def error_measure(I, y_true, results_fix, conf_level):
"""
Calculate the error measures with a resample dataset.
Parameters:
----------
I : np.array
the random index of each bootstrap
y_true : list,
Function results with all x varying (the raw sampling matrix of x)
result_fix : list,
Conditional results with all some x fixed
conf_level: list, percentiles used to calculate the confidence intervals
Returns:
----------
List, values of uncertainty measures
"""
y_true_resample = y_true[I]
results_fix_resample = results_fix[I]
cf_lower_temp, cf_upper_temp = np.quantile(results_fix_resample, conf_level)
ks_bt_temp, pvalue_bt_temp = stats.ks_2samp(y_true_resample, results_fix_resample)
y_true_width_temp = np.quantile(y_true_resample, conf_level[1]) - np.quantile(y_true_resample, conf_level[0])
return [cf_lower_temp, cf_upper_temp, ks_bt_temp, pvalue_bt_temp, y_true_width_temp]
def uncond_cal(y_true, conf_level, rand):
"""
Calculate the unconditional results
Parameters:
----------
y_true : list,
Function results with all x varying (the raw sampling matrix of x)
conf_level: list, percentiles used to calculate the confidence intervals
rand : np.ndarray,
Resample index in bootstrap, shape of R * N,
where R is the number of resamples
Returns:
----------
"""
# if rand is None:
# y_true_bt = y_true
# elif isinstance(rand, np.ndarray):
# y_true_bt = y_true[rand]
# else:
# AssertionError
y_true_bt = np.zeros(shape=(y_true.shape[0], rand.shape[0], y_true.shape[1]))
# import pdb; pdb.set_trace()
for ii in range(y_true.shape[0]):
y_true_bt[ii] = y_true[ii][rand]
uncond_cf_bt = np.quantile(y_true_bt, conf_level, axis=2)
uncond_cf_low, uncond_cf_up = {}, {}
uncond_cf_low['mean'] = uncond_cf_bt[0].mean()
uncond_cf_low['low'], uncond_cf_low['up'] = np.quantile(uncond_cf_bt[0], conf_level)
uncond_cf_up['mean'] = uncond_cf_bt[1].mean()
uncond_cf_up['low'], uncond_cf_up['up'] = np.quantile(uncond_cf_bt[1], conf_level)
uncond_dict = {
'uncond_cf_low' : uncond_cf_low,
'uncond_cf_up' : uncond_cf_up,
'uncond_mean': y_true_bt.mean()
}
return uncond_dict
def results_exist(parms_fixed, pool_results):
"""
Helper function to determine whether results exist.
Parameters
----------
parms_fixed : list,
Index of parameters to fix
pool_results : dict,
Contains both index of parameters fixed and the corresponding results
Returns
-------
skip_cal : bool
"""
if pool_results == {}:
skip_cal = False
elif parms_fixed in pool_results['parms']:
index_measure = pool_results['parms'].index(parms_fixed)
skip_cal = pool_results[f'measures_{index_measure}']
else:
skip_cal = False
return skip_cal
def pool_update(parms_fixed, measure_list, pool_results):
"""Update pool_results with new values.
Parameters
----------
parms_fixed : list,
Index of parameters to fix
measure_list : list,
Measures newly calculated for parameters in parms_fixed
pool_results : dict,
Contains both index of parameters fixed and the corresponding results
Returns
----------
Updated pool_results
"""
try:
pool_results['parms'].append(parms_fixed[:])
except KeyError:
pool_results['parms'] = [parms_fixed[:]]
index_measure = pool_results['parms'].index(parms_fixed)
pool_results[f'measures_{index_measure}'] = measure_list
return pool_results
|
[
"numpy.quantile",
"pyapprox.multivariate_polynomials.conditional_moments_of_polynomial_chaos_expansion",
"numpy.copy",
"numpy.zeros",
"scipy.stats.ks_2samp",
"numpy.sqrt"
] |
[((7295, 7340), 'numpy.quantile', 'np.quantile', (['results_fix_resample', 'conf_level'], {}), '(results_fix_resample, conf_level)\n', (7306, 7340), True, 'import numpy as np\n'), ((7374, 7427), 'scipy.stats.ks_2samp', 'stats.ks_2samp', (['y_true_resample', 'results_fix_resample'], {}), '(y_true_resample, results_fix_resample)\n', (7388, 7427), False, 'from scipy import stats\n'), ((8267, 8332), 'numpy.zeros', 'np.zeros', ([], {'shape': '(y_true.shape[0], rand.shape[0], y_true.shape[1])'}), '(shape=(y_true.shape[0], rand.shape[0], y_true.shape[1]))\n', (8275, 8332), True, 'import numpy as np\n'), ((8465, 8507), 'numpy.quantile', 'np.quantile', (['y_true_bt', 'conf_level'], {'axis': '(2)'}), '(y_true_bt, conf_level, axis=2)\n', (8476, 8507), True, 'import numpy as np\n'), ((8657, 8697), 'numpy.quantile', 'np.quantile', (['uncond_cf_bt[0]', 'conf_level'], {}), '(uncond_cf_bt[0], conf_level)\n', (8668, 8697), True, 'import numpy as np\n'), ((8797, 8837), 'numpy.quantile', 'np.quantile', (['uncond_cf_bt[1]', 'conf_level'], {}), '(uncond_cf_bt[1], conf_level)\n', (8808, 8837), True, 'import numpy as np\n'), ((7452, 7495), 'numpy.quantile', 'np.quantile', (['y_true_resample', 'conf_level[1]'], {}), '(y_true_resample, conf_level[1])\n', (7463, 7495), True, 'import numpy as np\n'), ((7498, 7541), 'numpy.quantile', 'np.quantile', (['y_true_resample', 'conf_level[0]'], {}), '(y_true_resample, conf_level[0])\n', (7509, 7541), True, 'import numpy as np\n'), ((3705, 3715), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (3712, 3715), True, 'import numpy as np\n'), ((5033, 5069), 'numpy.quantile', 'np.quantile', (['cf_upper_bt', 'conf_level'], {}), '(cf_upper_bt, conf_level)\n', (5044, 5069), True, 'import numpy as np\n'), ((5121, 5157), 'numpy.quantile', 'np.quantile', (['cf_lower_bt', 'conf_level'], {}), '(cf_lower_bt, conf_level)\n', (5132, 5157), True, 'import numpy as np\n'), ((5319, 5349), 'numpy.quantile', 'np.quantile', (['ks_bt', 'conf_level'], {}), '(ks_bt, conf_level)\n', (5330, 5349), True, 'import numpy as np\n'), ((5397, 5431), 'numpy.quantile', 'np.quantile', (['pvalue_bt', 'conf_level'], {}), '(pvalue_bt, conf_level)\n', (5408, 5431), True, 'import numpy as np\n'), ((4043, 4067), 'numpy.zeros', 'np.zeros', (['total_resample'], {}), '(total_resample)\n', (4051, 4067), True, 'import numpy as np\n'), ((4069, 4093), 'numpy.zeros', 'np.zeros', (['total_resample'], {}), '(total_resample)\n', (4077, 4093), True, 'import numpy as np\n'), ((4095, 4119), 'numpy.zeros', 'np.zeros', (['total_resample'], {}), '(total_resample)\n', (4103, 4119), True, 'import numpy as np\n'), ((4121, 4145), 'numpy.zeros', 'np.zeros', (['total_resample'], {}), '(total_resample)\n', (4129, 4145), True, 'import numpy as np\n'), ((4161, 4185), 'numpy.zeros', 'np.zeros', (['total_resample'], {}), '(total_resample)\n', (4169, 4185), True, 'import numpy as np\n'), ((4187, 4211), 'numpy.zeros', 'np.zeros', (['total_resample'], {}), '(total_resample)\n', (4195, 4211), True, 'import numpy as np\n'), ((5651, 5707), 'pyapprox.multivariate_polynomials.conditional_moments_of_polynomial_chaos_expansion', 'cond_moments', (['fun', 'x_temp', 'ind_fix'], {'return_variance': '(True)'}), '(fun, x_temp, ind_fix, return_variance=True)\n', (5663, 5707), True, 'from pyapprox.multivariate_polynomials import conditional_moments_of_polynomial_chaos_expansion as cond_moments\n'), ((5243, 5279), 'numpy.quantile', 'np.quantile', (['cf_width_bt', 'conf_level'], {}), '(cf_width_bt, conf_level)\n', (5254, 5279), True, 'import numpy as np\n'), ((5791, 5808), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (5798, 5808), True, 'import numpy as np\n')]
|
import re
from datetime import date
import pyexcel
def convert_to_date(datetime: str):
"""Convert the date-time string(dd/mm/yyyy) to date time object"""
if type(datetime) == str:
date_, *_ = datetime.split()
dd, mm, yyyy = [int(val) for val in date_.split("/")]
return date(year=yyyy, month=mm, day=dd)
return datetime
def rename_header(headers: list) -> list:
"""This function is replacing all the column names of the given excel sheet with the field names of the Type8"""
for i in range(len(headers)):
headers[i] = headers[i].replace("Transaction ID", "transaction_id") \
.replace("Value Date", "transaction_value_date") \
.replace("Txn Posted Date", "transaction_posted_date") \
.replace("Description", "mode_of_payment") \
.replace("Cr/Dr", "credit") \
.replace("Transaction Amount(INR)", "transaction_amount")
return headers
def xlparser(xlfile):
"""Parse the excel data coming from forms"""
xl = pyexcel.get_book(file_type="xls", file_content=xlfile)
sheets = tuple(xl.dict.keys()) # get all the sheet names from the excel file
rows = xl.dict.get(sheets[0])
headers = rename_header(rows[6][1:]) # get all the data from the first sheet
for row in rows[7:]:
data = dict(zip(headers, row[1:]))
data["mode_of_payment"] = (
re.findall(r"RAZORPAY|MSWIPE|CCARD|GOOGLE|AXISROOMS|ICICI|SELF|FINO|MAKEMYTRIP|IBIBO|Paytm",
data.get("mode_of_payment"))[0]
)
data['transaction_value_date'] = convert_to_date(data['transaction_value_date'])
data['transaction_posted_date'] = convert_to_date(data['transaction_posted_date'])
data.pop("ChequeNo.")
yield data
if __name__ == "__main__":
with open("./ICICI_648805052604_sample.xls", "rb") as f:
xlparser(f.read())
|
[
"pyexcel.get_book",
"datetime.date"
] |
[((1033, 1087), 'pyexcel.get_book', 'pyexcel.get_book', ([], {'file_type': '"""xls"""', 'file_content': 'xlfile'}), "(file_type='xls', file_content=xlfile)\n", (1049, 1087), False, 'import pyexcel\n'), ((304, 337), 'datetime.date', 'date', ([], {'year': 'yyyy', 'month': 'mm', 'day': 'dd'}), '(year=yyyy, month=mm, day=dd)\n', (308, 337), False, 'from datetime import date\n')]
|
import time
import pickle
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
import os.path
def load_cookies(driver):
for cookie in pickle.load(open("TwitterCookies.pkl", "rb")):
driver.add_cookie(cookie)
def save_cookies(driver):
pickle.dump(driver.get_cookies(), open("TwitterCookies.pkl", "wb"))
# read login details from file
def account_info():
with open("account_info.txt", "r") as file:
info = file.read().split()
email = info[0]
password = info[1]
file.close()
return email, password
def login(driver, email, password):
email_xpath = '/html/body/div/div/div/div[2]/main/div/div/div[2]/form/div/div[1]/label/div/div[2]/div/input'
password_xpath = '/html/body/div/div/div/div[2]/main/div/div/div[2]/form/div/div[2]/label/div/div[2]/div/input'
login_xpath = '/html/body/div/div/div/div[2]/main/div/div/div[2]/form/div/div[3]/div/div/span/span'
time.sleep(3)
driver.find_element_by_xpath(email_xpath).send_keys(email)
time.sleep(0.5)
driver.find_element_by_xpath(password_xpath).send_keys(password)
time.sleep(0.5)
driver.find_element_by_xpath(login_xpath).click()
def tweet_picture(author_name, picture_path):
options = Options()
options.add_argument("start-maximized")
driver = webdriver.Firefox(options=options)
driver.get("https://twitter.com/login")
# check is user login before
if os.path.isfile('TwitterCookies.pkl'):
time.sleep(1)
load_cookies(driver)
else:
email, password = account_info()
login(driver, email, password)
save_cookies(driver)
# xpath's for sharing tweets
tweet_xpath = '/html/body/div/div/div/div[2]/header/div/div/div/div[1]/div[3]/a/div'
message_xpath = '/html/body/div/div/div/div[1]/div[2]/div/div/div/div/div/div[2]/div[2]/div/div[3]/div/div/div/div[' \
'1]/div/div/div/div/div[2]/div[1]/div/div/div/div/div/div/div/div/div/div[1]/div/div/div/div[' \
'2]/div '
media_xpath = '/html/body/div/div/div/div[1]/div[2]/div/div/div/div/div/div[2]/div[2]/div/div[3]/div/div/div/div[1]/div/div/div/div/div[2]/div[4]/div/div/div[1]/input'
post_xpath = '/html/body/div/div/div/div[1]/div[2]/div/div/div/div/div/div[2]/div[2]/div/div[3]/div/div/div/div[' \
'1]/div/div/div/div/div[2]/div[4]/div/div/div[2]/div[4]/div/span/span '
# sharing tweet steps
time.sleep(4)
driver.find_element_by_xpath(tweet_xpath).click()
time.sleep(1)
driver.find_element_by_xpath(message_xpath).send_keys(f"Author: {author_name}")
time.sleep(1)
file_upload_button = driver.find_element_by_xpath(media_xpath)
file_upload_button.send_keys(picture_path)
time.sleep(2)
driver.find_element_by_xpath(post_xpath).click()
|
[
"selenium.webdriver.firefox.options.Options",
"selenium.webdriver.Firefox",
"time.sleep"
] |
[((990, 1003), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1000, 1003), False, 'import time\n'), ((1073, 1088), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1083, 1088), False, 'import time\n'), ((1164, 1179), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1174, 1179), False, 'import time\n'), ((1301, 1310), 'selenium.webdriver.firefox.options.Options', 'Options', ([], {}), '()\n', (1308, 1310), False, 'from selenium.webdriver.firefox.options import Options\n'), ((1370, 1404), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {'options': 'options'}), '(options=options)\n', (1387, 1404), False, 'from selenium import webdriver\n'), ((2531, 2544), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (2541, 2544), False, 'import time\n'), ((2605, 2618), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2615, 2618), False, 'import time\n'), ((2709, 2722), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2719, 2722), False, 'import time\n'), ((2844, 2857), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2854, 2857), False, 'import time\n'), ((1543, 1556), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1553, 1556), False, 'import time\n')]
|
from core.argo.core.argoLogging import get_logger
tf_logging = get_logger()
import numpy as np
from core.argo.core.hooks.AbstractImagesReconstructHook import AbstractImagesReconstructHook
from core.argo.core.utils.ImagesSaver import ImagesSaver
class ImagesReconstructHook(AbstractImagesReconstructHook):
def do_when_triggered(self, run_context, run_values):
# tf_logging.info("trigger for ImagesGeneratorHook s" + str(global_step) + " s/e" + str(global_step/global_epoch)+ " e" + str(global_epoch))
tf_logging.info("trigger for ImagesReconstructHook")
self.load_images(run_context.session)
for ds_key in self._images:
images, images_target = self._images[ds_key][1:3]
zs, means = self._model.encode(images, run_context.session)
reconstructed_images_m_sample, reconstructed_images_m_means = self._model.decode(means, run_context.session)
reconstructed_images_z_sample, reconstructed_images_z_means = self._model.decode(zs, run_context.session)
rows = int(np.ceil(len(images) / self._n_images_columns))
panel = [[] for x in range(rows * 6)]
c = 0
for i in range(0, 6 * rows, 6):
for j in range(self._n_images_columns):
panel[i].append(images[c])
panel[i + 1].append(images_target[c])
panel[i + 2].append(reconstructed_images_m_means[c])
panel[i + 3].append(reconstructed_images_m_sample[c])
panel[i + 4].append(reconstructed_images_z_means[c])
panel[i + 5].append(reconstructed_images_z_sample[c])
if c == len(images) - 1:
break
else:
c = c + 1
# "[1st] original image [2nd] recostructed mean [3rd] reconstr z"
self.images_saver.save_images(panel,
fileName="reconstruction_" + str(ds_key) + "_" + self._time_ref_shortstr + "_" + str(
self._time_ref).zfill(4),
title=self._plot_title,
fontsize=9)
|
[
"core.argo.core.argoLogging.get_logger"
] |
[((64, 76), 'core.argo.core.argoLogging.get_logger', 'get_logger', ([], {}), '()\n', (74, 76), False, 'from core.argo.core.argoLogging import get_logger\n')]
|
import sys
sys.path.append('../')
import bz2, os
import random, string
import importlib
import _pickle as pickle
from datetime import datetime, timedelta
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# OS & list MANAGEMENT FUNCTIONS <~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
def find(name, path):
# Find the file name in any of the directories or sub-directories in the path
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
def getFilepaths(directory):
"""
This function will generate the file names in a directory
tree by walking the tree either top-down or bottom-up. For each
directory in the tree rooted at directory top (including top itself),
it yields a 3-tuple (dirpath, dirnames, filenames).
"""
file_paths = [] # List which will store all of the full filepaths.
# Walk the tree.
for root, directories, files in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
file_paths.append(filepath) # Add it to the list.
return file_paths # Self-explanatory.
def absoluteFilePaths(directory):
'''Get the absolute file path for every file in the given directory'''
for dirpath,_,filenames in os.walk(directory):
for f in filenames:
yield os.path.abspath(os.path.join(dirpath, f))
def import_package_string(package_string):
'''Submit a string argument to be imported as a package (i.e. day_trader.models.LU01_A3). No need to include the .py'''
return importlib.import_module(package_string)
def genrs(length=10):
'''Generate random string'''
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
def remove_values_from_list(the_list, val):
'''Remove a specific value from a list'''
return [value for value in the_list if value != val]
def chunks(l,n):
'''Break list l up into chunks of size n'''
for i in range(0, len(l), n):
yield l[i:i+n]
def sizeFirstBin(data, col, minimum_bin_size, vals=None):
'''Bin the data based on the vals, iterates through each val assigning the corresponding rows to a bin while that bin size has not reached the minimum_bin_size
__________
parameters
- data : pd.DataFrame
- col : the columns to bin based on
- minimum_bin_size : int. Each bin must have at least this size
- vals : list. Will only bin the values in this list. The default is all the unique values of "col"
'''
if vals is None:
values = sorted(data[col].unique())
else:
values = vals
bins = {}
bin_number = 1
bin_total = 0
vc = dict(data[col].value_counts())
for val in values:
if bin_total<minimum_bin_size:
if bin_number not in bins:
bins[bin_number] = []
bins[bin_number].append(val)
bin_total += vc[val]
else:
bins[bin_number].append(val)
bin_total += vc[val]
else:
bin_number+=1
bins[bin_number] = []
bins[bin_number].append(val)
bin_total = vc[val]
return bins
def nondups(items : list):
'''Return True if list has no duplicate items'''
print('List length:',len(items))
print('Unique items:',len(set(items)))
return len(items) == len(set(items))
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# Storage & COMPRESSION FUNCTIONS <~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# Article on pickling and compressed pickling functions
# https://betterprogramming.pub/load-fast-load-big-with-compressed-pickles-5f311584507e
def full_pickle(title, data):
'''pickles the submited data and titles it'''
pikd = open(title + '.pickle', 'wb')
pickle.dump(data, pikd)
pikd.close()
def loosen(file):
'''loads and returns a pickled objects'''
pikd = open(file, 'rb')
data = pickle.load(pikd)
pikd.close()
return data
def compressed_pickle(title, data):
'''
Pickle a file and then compress it into a file with extension .pbz2
__________
parameters
- title : title of the file you want to save (will be saved with .pbz2 extension automatically)
- data : object you want to save
'''
with bz2.BZ2File(title + '.pbz2', 'w') as f:
pickle.dump(data, f)
def decompress_pickle(filename):
'''filename - file name including .pbz2 extension'''
data = bz2.BZ2File(filename, 'rb')
data = pickle.load(data)
return data
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# Time Management FUNCTIONS <~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# Time Stuff
def cuttomin(x):
'''Cut a time stamp at the minutes (exclude seconds or more precise)'''
return datetime.strftime(x, '%m-%d %H:%M')
def cuttohrs(x):
'''Cut a time stamp at the hours (exclude minutes or more precise)'''
return datetime.strftime(x, '%m-%d %H')
def cuttodays(x):
'''Cut a time stamp at the date (exclude hour or more precise)'''
return datetime.strftime(x, '%y-%m-%d')
def datetime_range(start, end, delta):
'''Returns the times between start and end in steps of delta'''
current = start
while current < end:
yield current
current += delta
def prev_weekday(adate):
'''Returns the date of the last weekday before the given date'''
adate -= timedelta(days=1)
while adate.weekday() > 4: # Mon-Fri are 0-4
adate -= timedelta(days=1)
return adate
|
[
"sys.path.append",
"_pickle.load",
"datetime.datetime.strftime",
"importlib.import_module",
"_pickle.dump",
"os.walk",
"random.choices",
"bz2.BZ2File",
"datetime.timedelta",
"os.path.join"
] |
[((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((583, 596), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (590, 596), False, 'import bz2, os\n'), ((1107, 1125), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (1114, 1125), False, 'import bz2, os\n'), ((1530, 1548), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (1537, 1548), False, 'import bz2, os\n'), ((1817, 1856), 'importlib.import_module', 'importlib.import_module', (['package_string'], {}), '(package_string)\n', (1840, 1856), False, 'import importlib\n'), ((4277, 4300), '_pickle.dump', 'pickle.dump', (['data', 'pikd'], {}), '(data, pikd)\n', (4288, 4300), True, 'import _pickle as pickle\n'), ((4429, 4446), '_pickle.load', 'pickle.load', (['pikd'], {}), '(pikd)\n', (4440, 4446), True, 'import _pickle as pickle\n'), ((4957, 4984), 'bz2.BZ2File', 'bz2.BZ2File', (['filename', '"""rb"""'], {}), "(filename, 'rb')\n", (4968, 4984), False, 'import bz2, os\n'), ((4996, 5013), '_pickle.load', 'pickle.load', (['data'], {}), '(data)\n', (5007, 5013), True, 'import _pickle as pickle\n'), ((5440, 5475), 'datetime.datetime.strftime', 'datetime.strftime', (['x', '"""%m-%d %H:%M"""'], {}), "(x, '%m-%d %H:%M')\n", (5457, 5475), False, 'from datetime import datetime, timedelta\n'), ((5583, 5615), 'datetime.datetime.strftime', 'datetime.strftime', (['x', '"""%m-%d %H"""'], {}), "(x, '%m-%d %H')\n", (5600, 5615), False, 'from datetime import datetime, timedelta\n'), ((5716, 5748), 'datetime.datetime.strftime', 'datetime.strftime', (['x', '"""%y-%m-%d"""'], {}), "(x, '%y-%m-%d')\n", (5733, 5748), False, 'from datetime import datetime, timedelta\n'), ((6057, 6074), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (6066, 6074), False, 'from datetime import datetime, timedelta\n'), ((1932, 1994), 'random.choices', 'random.choices', (['(string.ascii_letters + string.digits)'], {'k': 'length'}), '(string.ascii_letters + string.digits, k=length)\n', (1946, 1994), False, 'import random, string\n'), ((4785, 4818), 'bz2.BZ2File', 'bz2.BZ2File', (["(title + '.pbz2')", '"""w"""'], {}), "(title + '.pbz2', 'w')\n", (4796, 4818), False, 'import bz2, os\n'), ((4834, 4854), '_pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (4845, 4854), True, 'import _pickle as pickle\n'), ((6141, 6158), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (6150, 6158), False, 'from datetime import datetime, timedelta\n'), ((643, 667), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (655, 667), False, 'import bz2, os\n'), ((1252, 1280), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (1264, 1280), False, 'import bz2, os\n'), ((1612, 1636), 'os.path.join', 'os.path.join', (['dirpath', 'f'], {}), '(dirpath, f)\n', (1624, 1636), False, 'import bz2, os\n')]
|
"""Fetch JIRA issues from database
Defines a schema using SQLalchemy
"""
from __future__ import absolute_import
from __future__ import print_function
import logging
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from . import jiraschema
def get_issues(user, password, host="localhost", database="jira"):
"""Get all the JIRA issues from a database.
"""
connstr = 'mysql+pymysql://{}:{}@{}/{}?charset=utf8'.format(user, password, host, database)
engine = sqlalchemy.create_engine(connstr, echo=False)
connection = engine.connect()
connection.execution_options(stream_results=True)
Session = sessionmaker(bind=engine)
session = Session()
# This is a bit of a hack. How else are you supposed to make a progress bar with a generator?
count = session.query(jiraschema.Issue).count()
yield count
for issue in session.query(jiraschema.Issue):
try:
yield issue.as_dict()
except Exception:
# Do not try to attach the sqlalchemy record as extra info. There be dragons.
logging.error("Uncaught exception trying to process a record. Oh well. Too bad.", exc_info=True)
|
[
"sqlalchemy.create_engine",
"sqlalchemy.orm.sessionmaker",
"logging.error"
] |
[((486, 531), 'sqlalchemy.create_engine', 'sqlalchemy.create_engine', (['connstr'], {'echo': '(False)'}), '(connstr, echo=False)\n', (510, 531), False, 'import sqlalchemy\n'), ((634, 659), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (646, 659), False, 'from sqlalchemy.orm import sessionmaker\n'), ((1077, 1182), 'logging.error', 'logging.error', (['"""Uncaught exception trying to process a record. Oh well. Too bad."""'], {'exc_info': '(True)'}), "(\n 'Uncaught exception trying to process a record. Oh well. Too bad.',\n exc_info=True)\n", (1090, 1182), False, 'import logging\n')]
|
import numpy as np
import gpflow
import matplotlib.pyplot as plt
import matplotlib.pylab as pl
from mpl_toolkits.mplot3d import axes3d, Axes3D
from BoManifolds.Riemannian_utils.sphere_utils import logmap
from BoManifolds.kernel_utils.kernels_sphere_tf import SphereGaussianKernel, SphereLaplaceKernel
from BoManifolds.plot_utils.manifold_plots import plot_sphere
plt.rcParams['text.usetex'] = True # use Latex font for plots
plt.rcParams['text.latex.preamble'] = [r'\usepackage{bm}']
"""
This example shows the use of different kernels for the hypershere manifold S^n , used for Gaussian process regression.
The tested function corresponds to a Gaussian distribution with a mean defined on the sphere and a covariance defined on
the tangent space of the mean. Training data are generated "far" from the mean. The trained Gaussian process is then
used to determine the value of the function from test data sampled around the mean of the test function.
The kernels used are:
- Manifold-RBF kernel (geometry-aware)
- Laplace kernel (geometry-aware)
- Euclidean kernel (classical geometry-unaware)
This example works with GPflow version = 0.5 (used by GPflowOpt).
Authors: <NAME> and <NAME>, 2019
License: MIT
Contact: <EMAIL>, <EMAIL>
"""
def test_function(x, mu_test_function):
# Parameters
sigma_test_fct = np.array([[0.6, 0.2, 0], [0.2, 0.3, -0.01], [0, -0.01, 0.2]])
inv_sigma_test_fct = np.linalg.inv(sigma_test_fct)
det_sigma_test_fct = np.linalg.det(sigma_test_fct)
# Function value
x_proj = logmap(x, mu_test_function)
return np.exp(- 0.5 * np.dot(x_proj.T, np.dot(inv_sigma_test_fct, x_proj))) / np.sqrt(
(2 * np.pi) ** dim * det_sigma_test_fct)
def plot_gaussian_process_prediction(figure_handle, mu, test_data, mean_est, mu_test_fct, title):
ax = Axes3D(figure_handle)
# Make the panes transparent
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# Make the grid lines transparent
ax.xaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.yaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.zaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
# Remove axis
ax._axis3don = False
# Initial view
# ax.view_init(elev=10, azim=-20.) # (default: elev=30, azim=-60)
ax.view_init(elev=10, azim=30.) # (default: elev=30, azim=-60)
# Plot sphere
plot_sphere(ax, alpha=0.4)
# Plot training data on the manifold
plt_scale_fact = test_function(mu_test_fct, mu_test_fct)[0, 0]
nb_data_test = test_data.shape[0]
for n in range(nb_data_test):
ax.scatter(test_data[n, 0], test_data[n, 1], test_data[n, 2], c=pl.cm.inferno(mean_est[n] / plt_scale_fact))
# Plot mean of Gaussian test function
ax.scatter(mu[0], mu[1], mu[2], c='g', marker='D')
plt.title(title, size=25)
if __name__ == "__main__":
np.random.seed(1234)
# Define the test function mean
mu_test_fct = np.array([1 / np.sqrt(2), 1 / np.sqrt(2), 0])
# Generate random data on the sphere
nb_data = 20
dim = 3
mean = np.array([1, 0, 0])
mean = mean / np.linalg.norm(mean)
fact_cov = 0.1
cov = fact_cov * np.eye(dim)
data = np.random.multivariate_normal(mean, cov, nb_data)
x_man = data / np.linalg.norm(data, axis=1)[:, None]
y_train = np.zeros((nb_data,1))
for n in range(nb_data):
y_train[n] = test_function(x_man[n], mu_test_fct)
# Generate test data on the sphere
nb_data_test = 10
mean_test = mu_test_fct
mean_test = mean_test / np.linalg.norm(mean)
fact_cov = 0.1
cov_test = fact_cov * np.eye(dim)
data = np.random.multivariate_normal(mean_test, cov_test, nb_data_test)
x_man_test = data / np.linalg.norm(data, axis=1)[:, None]
y_test = np.zeros((nb_data_test, 1))
for n in range(nb_data_test):
y_test[n] = test_function(x_man_test[n], mu_test_fct)
# Plot training data - 3D figure
fig = plt.figure(figsize=(5, 5))
plot_gaussian_process_prediction(fig, mu_test_fct, x_man, y_train, mu_test_fct, r'Training data')
# Plot true test data - 3D figure
fig = plt.figure(figsize=(5, 5))
plot_gaussian_process_prediction(fig, mu_test_fct, x_man_test, y_test, mu_test_fct, r'Test data (ground truth)')
# ### Gaussian kernel
# Define the kernel
k_gauss = SphereGaussianKernel(input_dim=dim, active_dims=range(dim), beta_min=7.0, beta=10.0, variance=1.)
# Kernel computation
K1 = k_gauss.compute_K_symm(x_man)
K12 = k_gauss.compute_K(x_man, x_man_test)
K2 = k_gauss.compute_K_symm(x_man_test)
# GPR model
m_gauss = gpflow.gpr.GPR(x_man, y_train, kern=k_gauss, mean_function=None)
# Optimization of the model parameters
m_gauss.optimize()
# Compute posterior samples
# Does not always work due to Cholesky decomposition used in gpflow
# nb_samples_post = 10
# posterior_samples = m.predict_f_samples(y_man_test.T, nb_samples_post)
# Prediction
mean_est_gauss, cov_est_gauss = m_gauss.predict_f_full_cov(x_man_test)
# mean, cov = m.predict_y(x_new) # includes noise variance (seems not to be included in predict_f functions
var_est_gauss = np.diag(cov_est_gauss[0])[:, None]
# Error computation
error_gauss = np.sqrt(np.sum((y_test - mean_est_gauss) ** 2) / nb_data_test)
print('Estimation error (Manifold-RBF kernel) = ', error_gauss)
# Plot test data
fig = plt.figure(figsize=(5, 5))
plot_gaussian_process_prediction(fig, mu_test_fct, x_man_test, mean_est_gauss, mu_test_fct, r'Manifold-RBF kernel')
# ### Laplace kernel
# Define the kernel
k_laplace = SphereLaplaceKernel(input_dim=dim, active_dims=range(dim), beta=10.0, variance=1.)
# Kernel computation
K1 = k_laplace.compute_K_symm(x_man)
K12 = k_laplace.compute_K(x_man, x_man_test)
K2 = k_laplace.compute_K_symm(x_man_test)
# GPR model
m_laplace = gpflow.gpr.GPR(x_man, y_train, kern=k_laplace, mean_function=None)
# Optimization of the model parameters
m_laplace.optimize()
# Compute posterior samples
# Does not always work due to Cholesky decomposition used in gpflow
# nb_samples_post = 10
# posterior_samples = m.predict_f_samples(y_man_test.T, nb_samples_post)
# Prediction
mean_est_laplace, cov_est_laplace = m_laplace.predict_f_full_cov(x_man_test)
# mean, cov = m.predict_y(x_new) # includes noise variance (seems not to be included in predict_f functions
var_est_laplace = np.diag(cov_est_laplace[0])[:, None]
# Error computation
error_laplace = np.sqrt(np.sum((y_test - mean_est_laplace) ** 2) / nb_data_test)
print('Estimation error (Laplace kernel) = ', error_laplace)
# Plot test data
fig = plt.figure(figsize=(5, 5))
plot_gaussian_process_prediction(fig, mu_test_fct, x_man_test, mean_est_laplace, mu_test_fct, r'Laplace kernel')
# ### Euclidean RBF
# Define the kernel
k_eucl = gpflow.kernels.RBF(input_dim=dim, ARD=False)
# Kernel computation
K1 = k_eucl.compute_K_symm(x_man)
K12 = k_eucl.compute_K(x_man, x_man_test)
K2 = k_eucl.compute_K_symm(x_man_test)
# GPR model
m_eucl = gpflow.gpr.GPR(x_man, y_train, kern=k_eucl, mean_function=None)
# Optimization of the model parameters
m_eucl.optimize()
# Compute posterior samples
# Does not always work due to Cholesky decomposition used in gpflow
# nb_samples_post = 10
# posterior_samples = m.predict_f_samples(y_man_test.T, nb_samples_post)
# Prediction
mean_est_eucl, cov_est_eucl = m_eucl.predict_f_full_cov(x_man_test)
# mean, cov = m_eucl.predict_y(x_new) # includes noise variance (seems not to be included in predict_f functions
var_est_eucl = np.diag(cov_est_eucl[0])[:, None]
# Error computation
error_eucl = np.sqrt(np.sum((y_test - mean_est_eucl) ** 2) / nb_data_test)
print('Estimation error (Euclidean-RBF kernel) = ', error_eucl)
# Plot test data
fig = plt.figure(figsize=(5, 5))
plot_gaussian_process_prediction(fig, mu_test_fct, x_man_test, mean_est_eucl, mu_test_fct, r'Euclidean-RBF kernel')
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.random.seed",
"numpy.sum",
"BoManifolds.Riemannian_utils.sphere_utils.logmap",
"matplotlib.pyplot.figure",
"numpy.linalg.norm",
"gpflow.kernels.RBF",
"numpy.diag",
"numpy.linalg.det",
"matplotlib.pylab.cm.inferno",
"matplotlib.pyplot.show",
"mpl_toolkits.mplot3d.Axes3D",
"numpy.linalg.inv",
"gpflow.gpr.GPR",
"numpy.dot",
"numpy.zeros",
"BoManifolds.plot_utils.manifold_plots.plot_sphere",
"numpy.array",
"numpy.random.multivariate_normal",
"numpy.eye",
"numpy.sqrt"
] |
[((1336, 1397), 'numpy.array', 'np.array', (['[[0.6, 0.2, 0], [0.2, 0.3, -0.01], [0, -0.01, 0.2]]'], {}), '([[0.6, 0.2, 0], [0.2, 0.3, -0.01], [0, -0.01, 0.2]])\n', (1344, 1397), True, 'import numpy as np\n'), ((1423, 1452), 'numpy.linalg.inv', 'np.linalg.inv', (['sigma_test_fct'], {}), '(sigma_test_fct)\n', (1436, 1452), True, 'import numpy as np\n'), ((1478, 1507), 'numpy.linalg.det', 'np.linalg.det', (['sigma_test_fct'], {}), '(sigma_test_fct)\n', (1491, 1507), True, 'import numpy as np\n'), ((1543, 1570), 'BoManifolds.Riemannian_utils.sphere_utils.logmap', 'logmap', (['x', 'mu_test_function'], {}), '(x, mu_test_function)\n', (1549, 1570), False, 'from BoManifolds.Riemannian_utils.sphere_utils import logmap\n'), ((1820, 1841), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['figure_handle'], {}), '(figure_handle)\n', (1826, 1841), False, 'from mpl_toolkits.mplot3d import axes3d, Axes3D\n'), ((2450, 2476), 'BoManifolds.plot_utils.manifold_plots.plot_sphere', 'plot_sphere', (['ax'], {'alpha': '(0.4)'}), '(ax, alpha=0.4)\n', (2461, 2476), False, 'from BoManifolds.plot_utils.manifold_plots import plot_sphere\n'), ((2878, 2903), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'size': '(25)'}), '(title, size=25)\n', (2887, 2903), True, 'import matplotlib.pyplot as plt\n'), ((2937, 2957), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (2951, 2957), True, 'import numpy as np\n'), ((3142, 3161), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (3150, 3161), True, 'import numpy as np\n'), ((3265, 3314), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', 'nb_data'], {}), '(mean, cov, nb_data)\n', (3294, 3314), True, 'import numpy as np\n'), ((3387, 3409), 'numpy.zeros', 'np.zeros', (['(nb_data, 1)'], {}), '((nb_data, 1))\n', (3395, 3409), True, 'import numpy as np\n'), ((3705, 3769), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean_test', 'cov_test', 'nb_data_test'], {}), '(mean_test, cov_test, nb_data_test)\n', (3734, 3769), True, 'import numpy as np\n'), ((3846, 3873), 'numpy.zeros', 'np.zeros', (['(nb_data_test, 1)'], {}), '((nb_data_test, 1))\n', (3854, 3873), True, 'import numpy as np\n'), ((4018, 4044), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (4028, 4044), True, 'import matplotlib.pyplot as plt\n'), ((4196, 4222), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (4206, 4222), True, 'import matplotlib.pyplot as plt\n'), ((4688, 4752), 'gpflow.gpr.GPR', 'gpflow.gpr.GPR', (['x_man', 'y_train'], {'kern': 'k_gauss', 'mean_function': 'None'}), '(x_man, y_train, kern=k_gauss, mean_function=None)\n', (4702, 4752), False, 'import gpflow\n'), ((5491, 5517), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (5501, 5517), True, 'import matplotlib.pyplot as plt\n'), ((5980, 6046), 'gpflow.gpr.GPR', 'gpflow.gpr.GPR', (['x_man', 'y_train'], {'kern': 'k_laplace', 'mean_function': 'None'}), '(x_man, y_train, kern=k_laplace, mean_function=None)\n', (5994, 6046), False, 'import gpflow\n'), ((6798, 6824), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (6808, 6824), True, 'import matplotlib.pyplot as plt\n'), ((7004, 7048), 'gpflow.kernels.RBF', 'gpflow.kernels.RBF', ([], {'input_dim': 'dim', 'ARD': '(False)'}), '(input_dim=dim, ARD=False)\n', (7022, 7048), False, 'import gpflow\n'), ((7230, 7293), 'gpflow.gpr.GPR', 'gpflow.gpr.GPR', (['x_man', 'y_train'], {'kern': 'k_eucl', 'mean_function': 'None'}), '(x_man, y_train, kern=k_eucl, mean_function=None)\n', (7244, 7293), False, 'import gpflow\n'), ((8029, 8055), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (8039, 8055), True, 'import matplotlib.pyplot as plt\n'), ((8181, 8191), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8189, 8191), True, 'import matplotlib.pyplot as plt\n'), ((1653, 1701), 'numpy.sqrt', 'np.sqrt', (['((2 * np.pi) ** dim * det_sigma_test_fct)'], {}), '((2 * np.pi) ** dim * det_sigma_test_fct)\n', (1660, 1701), True, 'import numpy as np\n'), ((3180, 3200), 'numpy.linalg.norm', 'np.linalg.norm', (['mean'], {}), '(mean)\n', (3194, 3200), True, 'import numpy as np\n'), ((3241, 3252), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (3247, 3252), True, 'import numpy as np\n'), ((3615, 3635), 'numpy.linalg.norm', 'np.linalg.norm', (['mean'], {}), '(mean)\n', (3629, 3635), True, 'import numpy as np\n'), ((3681, 3692), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (3687, 3692), True, 'import numpy as np\n'), ((5252, 5277), 'numpy.diag', 'np.diag', (['cov_est_gauss[0]'], {}), '(cov_est_gauss[0])\n', (5259, 5277), True, 'import numpy as np\n'), ((6556, 6583), 'numpy.diag', 'np.diag', (['cov_est_laplace[0]'], {}), '(cov_est_laplace[0])\n', (6563, 6583), True, 'import numpy as np\n'), ((7793, 7817), 'numpy.diag', 'np.diag', (['cov_est_eucl[0]'], {}), '(cov_est_eucl[0])\n', (7800, 7817), True, 'import numpy as np\n'), ((3334, 3362), 'numpy.linalg.norm', 'np.linalg.norm', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (3348, 3362), True, 'import numpy as np\n'), ((3794, 3822), 'numpy.linalg.norm', 'np.linalg.norm', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (3808, 3822), True, 'import numpy as np\n'), ((5337, 5375), 'numpy.sum', 'np.sum', (['((y_test - mean_est_gauss) ** 2)'], {}), '((y_test - mean_est_gauss) ** 2)\n', (5343, 5375), True, 'import numpy as np\n'), ((6645, 6685), 'numpy.sum', 'np.sum', (['((y_test - mean_est_laplace) ** 2)'], {}), '((y_test - mean_est_laplace) ** 2)\n', (6651, 6685), True, 'import numpy as np\n'), ((7876, 7913), 'numpy.sum', 'np.sum', (['((y_test - mean_est_eucl) ** 2)'], {}), '((y_test - mean_est_eucl) ** 2)\n', (7882, 7913), True, 'import numpy as np\n'), ((2730, 2773), 'matplotlib.pylab.cm.inferno', 'pl.cm.inferno', (['(mean_est[n] / plt_scale_fact)'], {}), '(mean_est[n] / plt_scale_fact)\n', (2743, 2773), True, 'import matplotlib.pylab as pl\n'), ((3027, 3037), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3034, 3037), True, 'import numpy as np\n'), ((3043, 3053), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3050, 3053), True, 'import numpy as np\n'), ((1614, 1648), 'numpy.dot', 'np.dot', (['inv_sigma_test_fct', 'x_proj'], {}), '(inv_sigma_test_fct, x_proj)\n', (1620, 1648), True, 'import numpy as np\n')]
|
from flask import render_template, request, redirect, url_for, jsonify, json
from flask_login import login_required
from app.api.classes.location.models import Location
from app.api.classes.observatory.models import Observatory
from app.api.classes.type.models import Type
from app.api.classes.observatory.services import getAll, getObservatoryId
from app.api.classes.location.services import getLocationsAndTypes, getAllLocations, editLocation
from app.api import bp
from app.db import db
import os
import json
@bp.route('/api/addLocation', methods=['POST'])
@login_required
def add_location():
req = request.get_json()
location = Location(name=req['name'], observatory_id=req['observatory_id'])
db.session().add(location)
db.session().commit()
return req
@bp.route('/api/getLocationsAndTypes/<observatory_name>', methods=['GET'])
#@login_required
def list_locations(observatory_name):
res = getLocationsAndTypes(observatory_name)
return res
@bp.route('/api/getLocations/', methods=['GET'])
#@login_required
def get_all_locations():
res =getAllLocations()
return res
#alustava route lokaation nimen muokkaamiseen
@bp.route("/api/edit/<observatoryname>/<locationname>", methods=["POST"])
@login_required
def edit_location(observatoryname, locationname):
req = editLocation(observatoryname, locationname)
return req
|
[
"app.api.classes.location.models.Location",
"app.api.classes.location.services.getAllLocations",
"app.db.db.session",
"app.api.classes.location.services.editLocation",
"app.api.classes.location.services.getLocationsAndTypes",
"app.api.bp.route",
"flask.request.get_json"
] |
[((519, 565), 'app.api.bp.route', 'bp.route', (['"""/api/addLocation"""'], {'methods': "['POST']"}), "('/api/addLocation', methods=['POST'])\n", (527, 565), False, 'from app.api import bp\n'), ((789, 862), 'app.api.bp.route', 'bp.route', (['"""/api/getLocationsAndTypes/<observatory_name>"""'], {'methods': "['GET']"}), "('/api/getLocationsAndTypes/<observatory_name>', methods=['GET'])\n", (797, 862), False, 'from app.api import bp\n'), ((985, 1032), 'app.api.bp.route', 'bp.route', (['"""/api/getLocations/"""'], {'methods': "['GET']"}), "('/api/getLocations/', methods=['GET'])\n", (993, 1032), False, 'from app.api import bp\n'), ((1168, 1240), 'app.api.bp.route', 'bp.route', (['"""/api/edit/<observatoryname>/<locationname>"""'], {'methods': "['POST']"}), "('/api/edit/<observatoryname>/<locationname>', methods=['POST'])\n", (1176, 1240), False, 'from app.api import bp\n'), ((613, 631), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (629, 631), False, 'from flask import render_template, request, redirect, url_for, jsonify, json\n'), ((647, 711), 'app.api.classes.location.models.Location', 'Location', ([], {'name': "req['name']", 'observatory_id': "req['observatory_id']"}), "(name=req['name'], observatory_id=req['observatory_id'])\n", (655, 711), False, 'from app.api.classes.location.models import Location\n'), ((928, 966), 'app.api.classes.location.services.getLocationsAndTypes', 'getLocationsAndTypes', (['observatory_name'], {}), '(observatory_name)\n', (948, 966), False, 'from app.api.classes.location.services import getLocationsAndTypes, getAllLocations, editLocation\n'), ((1085, 1102), 'app.api.classes.location.services.getAllLocations', 'getAllLocations', ([], {}), '()\n', (1100, 1102), False, 'from app.api.classes.location.services import getLocationsAndTypes, getAllLocations, editLocation\n'), ((1318, 1361), 'app.api.classes.location.services.editLocation', 'editLocation', (['observatoryname', 'locationname'], {}), '(observatoryname, locationname)\n', (1330, 1361), False, 'from app.api.classes.location.services import getLocationsAndTypes, getAllLocations, editLocation\n'), ((717, 729), 'app.db.db.session', 'db.session', ([], {}), '()\n', (727, 729), False, 'from app.db import db\n'), ((748, 760), 'app.db.db.session', 'db.session', ([], {}), '()\n', (758, 760), False, 'from app.db import db\n')]
|
import re
from datetime import datetime
from getpass import getpass
from sys import argv
from . import __version__
from .data.time import 沙坪坝校区作息时间, 虎溪校区作息时间
from .app import App
class CommandParser:
def __init__(self):
self.username = input("用户名: ").strip()
self.password = getpass("密码: ").strip()
self.term = self.getTerm()
self.startdate = self.getStartDate()
self.作息时间 = self.get作息时间()
def getTerm(self) -> int:
term = input("学期号: ").strip()
if re.fullmatch(r"(?P<year>\d{4,})(?P<term>\d)", term):
return term
else:
raise ValueError(f"{term} 不是一个有效的学期号, 应为类似于 20190 这样的数字")
def getStartDate(self) -> datetime:
date = input("学期开始日期: ").strip()
m = re.fullmatch(
r"(?P<year>\d{4,})(?P<month>\d{2})(?P<day>\d{2})", date)
if m:
year = int(m["year"])
mon = int(m["month"])
day = int(m["day"])
date = datetime(year, mon, day)
return date
else:
raise ValueError(f"{date} 不是有效的日期号,应为类似于 20190101 这样的数字")
def get作息时间(self) -> dict:
print("选择作息时间:")
print("1) 沙坪坝校区")
print("2) 虎溪校区")
code = int(input("1|2> ").strip())
assert code in [1, 2]
choice = {
1: 沙坪坝校区作息时间,
2: 虎溪校区作息时间
}[code]
return choice
@staticmethod
def help():
print((
"Usage: cqu_schedule\n"
" 登录并获取学生课程表 ics 文件"
))
def main():
if len(argv) == 1:
args = CommandParser()
app = App(username=args.username, password=args.password)
app.writeICS(args.term, args.startdate, args.作息时间)
else:
CommandParser.help()
|
[
"getpass.getpass",
"re.fullmatch",
"datetime.datetime"
] |
[((517, 569), 're.fullmatch', 're.fullmatch', (['"""(?P<year>\\\\d{4,})(?P<term>\\\\d)"""', 'term'], {}), "('(?P<year>\\\\d{4,})(?P<term>\\\\d)', term)\n", (529, 569), False, 'import re\n'), ((772, 843), 're.fullmatch', 're.fullmatch', (['"""(?P<year>\\\\d{4,})(?P<month>\\\\d{2})(?P<day>\\\\d{2})"""', 'date'], {}), "('(?P<year>\\\\d{4,})(?P<month>\\\\d{2})(?P<day>\\\\d{2})', date)\n", (784, 843), False, 'import re\n'), ((988, 1012), 'datetime.datetime', 'datetime', (['year', 'mon', 'day'], {}), '(year, mon, day)\n', (996, 1012), False, 'from datetime import datetime\n'), ((298, 313), 'getpass.getpass', 'getpass', (['"""密码: """'], {}), "('密码: ')\n", (305, 313), False, 'from getpass import getpass\n')]
|
from django.urls import path
from django.views.generic import TemplateView
from . import views
urlpatterns = [
path('search/directions', views.results_search_directions),
path('enter', views.enter),
path('get', views.result_get),
path('pdf', views.result_print),
path('preview', TemplateView.as_view(template_name='dashboard/results_preview.html')),
path('results', TemplateView.as_view(template_name='dashboard/results.html')),
path('journal', views.result_journal_print),
path('journal_table', views.result_journal_table_print),
path('filter', views.result_filter),
path('day', views.get_day_results),
]
|
[
"django.views.generic.TemplateView.as_view",
"django.urls.path"
] |
[((117, 175), 'django.urls.path', 'path', (['"""search/directions"""', 'views.results_search_directions'], {}), "('search/directions', views.results_search_directions)\n", (121, 175), False, 'from django.urls import path\n'), ((181, 207), 'django.urls.path', 'path', (['"""enter"""', 'views.enter'], {}), "('enter', views.enter)\n", (185, 207), False, 'from django.urls import path\n'), ((213, 242), 'django.urls.path', 'path', (['"""get"""', 'views.result_get'], {}), "('get', views.result_get)\n", (217, 242), False, 'from django.urls import path\n'), ((248, 279), 'django.urls.path', 'path', (['"""pdf"""', 'views.result_print'], {}), "('pdf', views.result_print)\n", (252, 279), False, 'from django.urls import path\n'), ((459, 502), 'django.urls.path', 'path', (['"""journal"""', 'views.result_journal_print'], {}), "('journal', views.result_journal_print)\n", (463, 502), False, 'from django.urls import path\n'), ((508, 563), 'django.urls.path', 'path', (['"""journal_table"""', 'views.result_journal_table_print'], {}), "('journal_table', views.result_journal_table_print)\n", (512, 563), False, 'from django.urls import path\n'), ((569, 604), 'django.urls.path', 'path', (['"""filter"""', 'views.result_filter'], {}), "('filter', views.result_filter)\n", (573, 604), False, 'from django.urls import path\n'), ((610, 644), 'django.urls.path', 'path', (['"""day"""', 'views.get_day_results'], {}), "('day', views.get_day_results)\n", (614, 644), False, 'from django.urls import path\n'), ((301, 369), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""dashboard/results_preview.html"""'}), "(template_name='dashboard/results_preview.html')\n", (321, 369), False, 'from django.views.generic import TemplateView\n'), ((392, 452), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""dashboard/results.html"""'}), "(template_name='dashboard/results.html')\n", (412, 452), False, 'from django.views.generic import TemplateView\n')]
|
from django.contrib import admin
from .models import Owner
admin.site.register(Owner)
from .models import Car
admin.site.register(Car)
from .models import Ownership
admin.site.register(Ownership)
from .models import License
admin.site.register(License)
|
[
"django.contrib.admin.site.register"
] |
[((59, 85), 'django.contrib.admin.site.register', 'admin.site.register', (['Owner'], {}), '(Owner)\n', (78, 85), False, 'from django.contrib import admin\n'), ((110, 134), 'django.contrib.admin.site.register', 'admin.site.register', (['Car'], {}), '(Car)\n', (129, 134), False, 'from django.contrib import admin\n'), ((165, 195), 'django.contrib.admin.site.register', 'admin.site.register', (['Ownership'], {}), '(Ownership)\n', (184, 195), False, 'from django.contrib import admin\n'), ((224, 252), 'django.contrib.admin.site.register', 'admin.site.register', (['License'], {}), '(License)\n', (243, 252), False, 'from django.contrib import admin\n')]
|
#!/usr/bin/env python3
from scrapper import INSTANCE_URL
from bs4 import BeautifulSoup
import requests
def get_html() -> BeautifulSoup:
"""Function makes GET request to instance and downloads raw HTML code
which is parsing after."""
html_doc = requests.get(f"{INSTANCE_URL}/preferences").content
html = BeautifulSoup(html_doc, "html.parser")
return html
|
[
"bs4.BeautifulSoup",
"requests.get"
] |
[((322, 360), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_doc', '"""html.parser"""'], {}), "(html_doc, 'html.parser')\n", (335, 360), False, 'from bs4 import BeautifulSoup\n'), ((259, 302), 'requests.get', 'requests.get', (['f"""{INSTANCE_URL}/preferences"""'], {}), "(f'{INSTANCE_URL}/preferences')\n", (271, 302), False, 'import requests\n')]
|
import datetime
import jwt
import os
from functools import wraps
from flask import request, Response
SECRET_KEY = "ThisIsAVeryBadAPISecretKeyThatIsOnlyUsedWhenRunningLocally"
if 'API_KEY' in os.environ: SECRET_KEY = os.environ['API_KEY']
# generates an encrypted auth token using the encrypted using the secret key valid for 24 hours
def encode_auth_token(userName):
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=1),
'iat': datetime.datetime.utcnow(),
'username': userName
}
return jwt.encode(
payload,
SECRET_KEY,
algorithm='HS256'
)
except Exception as e:
return e
# Decodes the auth token and returns userid as integer if token is valid or else an error as a string
def decode_auth_token(auth_token):
# print(auth_token)
try:
payload = jwt.decode(auth_token, SECRET_KEY)
return 'SUCCESS' + payload['username']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
# Defines the @requires_auth decoration. Any endpoint with the decoration requires authentication
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth_token = False
if not auth_token:
auth_token = request.headers.get('capstoneAuth')
if not auth_token:
auth_token = request.headers.get('Authorization')
if not auth_token:
auth_token = request.cookies.get('capstoneAuth')
if not auth_token: # Authtoken no present so send 401
return Response('Missing Auth Token!\n' 'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
user_name = decode_auth_token(auth_token) # Get userid from authtoken
if user_name.startswith('SUCCESS'):
# set the userNameFromToken var so user can be identified form the request
request.userNameFromToken = user_name[7:]
# send control back to actual endpoint function
return f(*args, **kwargs)
else:
return Response('\n' 'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
return decorated
|
[
"flask.request.headers.get",
"jwt.encode",
"flask.request.cookies.get",
"datetime.datetime.utcnow",
"datetime.timedelta",
"functools.wraps",
"flask.Response",
"jwt.decode"
] |
[((1313, 1321), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (1318, 1321), False, 'from functools import wraps\n'), ((591, 641), 'jwt.encode', 'jwt.encode', (['payload', 'SECRET_KEY'], {'algorithm': '"""HS256"""'}), "(payload, SECRET_KEY, algorithm='HS256')\n", (601, 641), False, 'import jwt\n'), ((922, 956), 'jwt.decode', 'jwt.decode', (['auth_token', 'SECRET_KEY'], {}), '(auth_token, SECRET_KEY)\n', (932, 956), False, 'import jwt\n'), ((505, 531), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (529, 531), False, 'import datetime\n'), ((1437, 1472), 'flask.request.headers.get', 'request.headers.get', (['"""capstoneAuth"""'], {}), "('capstoneAuth')\n", (1456, 1472), False, 'from flask import request, Response\n'), ((1525, 1561), 'flask.request.headers.get', 'request.headers.get', (['"""Authorization"""'], {}), "('Authorization')\n", (1544, 1561), False, 'from flask import request, Response\n'), ((1614, 1649), 'flask.request.cookies.get', 'request.cookies.get', (['"""capstoneAuth"""'], {}), "('capstoneAuth')\n", (1633, 1649), False, 'from flask import request, Response\n'), ((1732, 1872), 'flask.Response', 'Response', (['"""Missing Auth Token!\nYou have to login with proper credentials"""', '(401)', '{\'WWW-Authenticate\': \'Basic realm="Login Required"\'}'], {}), '("""Missing Auth Token!\nYou have to login with proper credentials""",\n 401, {\'WWW-Authenticate\': \'Basic realm="Login Required"\'})\n', (1740, 1872), False, 'from flask import request, Response\n'), ((2293, 2415), 'flask.Response', 'Response', (['"""\nYou have to login with proper credentials"""', '(401)', '{\'WWW-Authenticate\': \'Basic realm="Login Required"\'}'], {}), '("""\nYou have to login with proper credentials""", 401, {\n \'WWW-Authenticate\': \'Basic realm="Login Required"\'})\n', (2301, 2415), False, 'from flask import request, Response\n'), ((418, 444), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (442, 444), False, 'import datetime\n'), ((447, 484), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)', 'seconds': '(1)'}), '(days=1, seconds=1)\n', (465, 484), False, 'import datetime\n')]
|
from tkinter import *
from tkinter.ttk import *
from tkinter import ttk
import tkinter.messagebox as mb
import sys, os, requests
def weatherapp():
def closeweatherapp():
file = open('Apps/WeatherApp/src/weather-condition.txt', 'w')
file.write("")
file.close()
weatherapp.destroy()
def getWeather(canvas):
def readintoweatherconditionfile():
file = open('Apps/WeatherApp/src/weather-condition.txt', 'r')
content = file.read()
if content == "Clear":
weatherapp.iconbitmap("Apps/WeatherApp/icons/sun-icon.ico")
elif content == "Rain":
weatherapp.iconbitmap("Apps/WeatherApp/icons/rain-icon.ico")
elif content == "Clouds":
weatherapp.iconbitmap("Apps/WeatherApp/icons/clouds-icon.ico")
elif content == "Stormy":
weatherapp.iconbitmap("Apps/WeatherApp/icons/stormyrain-icon.ico")
elif content == "Haze":
weatherapp.iconbitmap("Apps/WeatherApp/icons/windy-icon.ico")
elif content == "Mist":
weatherapp.iconbitmap("Apps/WeatherApp/icons/haze-icon.ico")
else:
pass
file.close()
city = namecity.get()
api = "https://api.openweathermap.org/data/2.5/weather?q="+city+"&appid=06c921750b9a82d8f5d1294e1586276f"
try:
weatherapp.iconbitmap("Apps/WeatherApp/icons/weatherapp-icon.ico")
json_data = requests.get(api).json()
condition = json_data['weather'][0]['main']
file = open('Apps/WeatherApp/src/weather-condition.txt', 'w')
file.write(condition)
file.close()
temp = int(json_data['main']['temp'] - 273.15)
min_temp = int(json_data['main']['temp_min'] - 273.15)
max_temp = int(json_data['main']['temp_max'] - 273.15)
pressure = json_data['main']['pressure']
humidity = json_data['main']['humidity']
wind = json_data['wind']['speed']
final_info = condition + "\n" + str(temp) + "°C"
final_data = "\n" + "Minimal temperature: " + str(min_temp) + "°C" + "\n" + "Maximal temperature: " + str(max_temp) + "°C" + "\n" + "Humidity: " + str(humidity) + "\n"
readintoweatherconditionfile()
label1.config(text=final_info)
label2.config(text=final_data)
except:
weatherapp.iconbitmap("Apps/WeatherApp/icons/weatherapp-icon.ico")
file = open('Apps/WeatherApp/src/weather-condition.txt', 'w')
file.write("")
file.close()
label1.config(text="This city doesn't exist !")
label2.config(text="")
weatherapp = Tk()
weatherapp.title("MaxPyOS - Weather App")
weatherapp.geometry("600x500")
weatherapp.resizable(False, False)
weatherapp.iconbitmap("Apps/WeatherApp/icons/weatherapp-icon.ico")
weatherapp.protocol("WM_DELETE_WINDOW", lambda: closeweatherapp())
f = ("poppins", 15, "bold")
t = ("poppins", 35, "bold")
namecity = Entry(weatherapp, justify='center', width=20, font=t)
namecity.pack(pady=20)
namecity.focus()
namecity.bind('<Return>', getWeather)
label1 = Label(weatherapp, font=t)
label1.pack()
label2 = Label(weatherapp, font=f)
label2.pack()
|
[
"requests.get"
] |
[((1298, 1315), 'requests.get', 'requests.get', (['api'], {}), '(api)\n', (1310, 1315), False, 'import sys, os, requests\n')]
|
import os
import shutil
def copytree(src, dst):
"""
Copy file tree from <src> location to <dst> location
"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d)
else:
shutil.copy2(s, d)
|
[
"os.path.isdir",
"shutil.copy2",
"shutil.copytree",
"os.path.join",
"os.listdir"
] |
[((139, 154), 'os.listdir', 'os.listdir', (['src'], {}), '(src)\n', (149, 154), False, 'import os\n'), ((168, 191), 'os.path.join', 'os.path.join', (['src', 'item'], {}), '(src, item)\n', (180, 191), False, 'import os\n'), ((204, 227), 'os.path.join', 'os.path.join', (['dst', 'item'], {}), '(dst, item)\n', (216, 227), False, 'import os\n'), ((239, 255), 'os.path.isdir', 'os.path.isdir', (['s'], {}), '(s)\n', (252, 255), False, 'import os\n'), ((269, 290), 'shutil.copytree', 'shutil.copytree', (['s', 'd'], {}), '(s, d)\n', (284, 290), False, 'import shutil\n'), ((317, 335), 'shutil.copy2', 'shutil.copy2', (['s', 'd'], {}), '(s, d)\n', (329, 335), False, 'import shutil\n')]
|
import os, logging, json, re
import pandas as pd
import numpy as np
from BarSeqPy.translate_R_to_pandas import *
def data_prep_1(data_dir, FEBA_dir, debug_bool=False, meta_ix=7, cfg=None):
""" The first phase of data preparation for the BarSeqR Computations
Args:
data_dir: (str) Path to directory which contains the
following files: 'all.poolcount', 'genes',
'exps', 'pool' - all TSV files.
Optionally contains the following files:
strainusage.barcodes.json - json list
strainusage.genes.json - json list
strainusage.genes12.json - json list
ignore_list.json - json list ( list of str
with sample-index name to ignore )
All these files are changed depending on the input.
FEBA_dir: (str) Path to directory which contains the
following files: 'desc_short_rules'
debug_bool: Whether you'd like to print the dataframes
as a test to the data_dir before running FEBA_Fit
meta_ix (int): The number of meta column indeces in all.poolcount
cfg (python dict): The default and config variables required:
drop_exps (bool): Do we drop the 'Drop' experiments
from the experiments dataframe
already?
okControls (bool): Are we defining controls by
the method where it's written
into the Experiments file?
Returns:
list<exps_df, all_df, genes_df,
strainsUsed_list, genesUsed_list, genesUsed12_list>
exps_df (pandas DataFrame): Must contain cols: (Variable)
all_df (pandas DataFrame): Must contain cols:
genes_df (pandas DataFrame): Must contain cols:
scaffold, begin
strainsUsed_list (py list or None):
genesUsed_list (py list or None):
genesUsed12_list (py list or None):
Description:
Within data_prep1 we perform the following functions:
getDataFrames:
We import the tables genes, all, exps, rules using a dict to say which
data type is in each column. The dataframes we get are called:
genes_df, all_df, exps_df, rules_df
Within exps_df:
We optionally remove the rows who have 'Drop' set to True (if drop_exps==True).
We strip (remove the spaces from) the values in 'Group',
'Condition_1', 'Condition_2'
We check that the right column names exist in each of the tables.
checkLocusIdEquality:
We check all the locusIds in all_df are also present in genes_df
If debugging we also print the number of unique locusIds in each.
check_exps_df_against_all_df:
We check that the index names in all.poolcount are equivalent to the
'SetName' + '.' + 'Index' in exps
prepare_set_names:
We replace the SetNames from their original version to a simplified standard one,
remove the period in between SetName and Index in all.poolcount columns,
and make the 'names' column in the experiments file and the all.poolcount columns
have the same values. For example, we move column name from Keio_ML9_set2.IT004 to
set2IT004, and rename the values in the Experiments file similarly.
get_special_lists:
We get the lists from the files in data_dir if they are there,
otherwise we return their values as empty lists. The lists we
look for are genesUsed, which should be a list of locusIds
from this genome that we are using, and ignore_list, which is a list
of experiment names to ignore (columns from all.poolcount).
If debug_bool is set to true we print out resultant exps, all, genes to 'tmp' dir
We return the following variables:
'exps_df' (The experiments dataframe)
'all_df' (The barcodes and locations dataframe)
'genes_df' (The total genes dataframe)
'genesUsed_list' (A python list of locusIds that we will use)
'ignore_list' (A python list of experiment names to ignore)
"""
genes_df, all_df, exps_df, rules_df = getDataFrames(data_dir, FEBA_dir,
drop_exps=cfg['drop_exps'],
okControls = cfg['okControls'],
dbg_lvl=0)
# Makes no changes to the variables
checkLocusIdEquality(all_df, genes_df, debug_bool=debug_bool)
# We check that SetNames and Indexes in experiments file match all.poolcount file
check_exps_df_against_all_df(exps_df, all_df, meta_ix)
# We make it so the names are cleaner and create 'names', 'num', 'short' in exps_df
exps_df, all_df, replace_col_d = prepare_set_names(exps_df, all_df, rules_df,
okControls=cfg['okControls'],
meta_ix=meta_ix,
debug_bool=debug_bool)
genesUsed_list, ignore_list = get_special_lists(data_dir, all_df,
replace_col_d, debug_bool=debug_bool)
if debug_bool:
exps_df.to_csv("tmp/py_test1_exps_fp.tsv", sep="\t")
all_df.to_csv("tmp/py_test1_all_fp.tsv", sep="\t")
genes_df.to_csv("tmp/py_test1_genes_fp.tsv", sep="\t")
return [exps_df, all_df, genes_df, genesUsed_list, ignore_list]
def getDataFrames(data_dir, FEBA_dir, drop_exps=False,
okControls=False, dbg_lvl=0):
"""
Args:
data_dir: (str) Path to directory which contains the
following files: 'all.poolcount', 'genes',
'exps' - all TSV files.
Optionally contains the following files:
strainusage.barcodes.json - json list
strainusage.genes.json - json list
strainusage.genes12.json - json list
All these files are changed depending on the input.
FEBA_dir: (str) Path to directory which contains the
following files: 'desc_short_rules'
drop_exps (bool): Should we drop all experiments that have Drop=True
already?
Returns:
genes_df (pandas DataFrame): Contains columns:
locusId, sysName, type, scaffoldId, begin, end, strand, name, desc, GC, nTA
all_df (pandas DataFrame): Contains columns:
barcode, rcbarcode, scaffold, strand, pos, locusId, f, setName1, ..., setNameN
exps_df (pandas DataFrame): Must contains columns:
Index (str)
Date_pool_expt_started (str)
Description (str)
SetName (Str)
Group (str)
Drop (bool)
[Condition_1]
[Condition_2]
rules_df (pandas DataFrame): Contains columns:
V1 (str): Original string to replace
V2 (str): String to replace V1 by
Description:
We import the tables using a dict to say which data type is in each column.
In exps_df:
We might remove the rows who have 'Drop' set to True (if drop_exps==True).
We remove the spaces from the values in 'Group', 'Condition_1', 'Condition_2'
We check that the right column names exist in each of the tables.
To Do:
Should we strip all of the column names when we import them?
"""
data_files = os.listdir(data_dir)
for x in ["all.poolcount", "genes", "exps", "pool"]:
if x not in data_files:
raise Exception("Input data_dir to RunFEBA must include files:\n"
"all.poolcount, genes, exps, and pool."
" Currently missing: " + x)
all_fp = os.path.join(data_dir, "all.poolcount")
genes_fp = os.path.join(data_dir, "genes")
exps_fp = os.path.join(data_dir, "exps")
short_rules_fp = os.path.join(FEBA_dir, "desc_short_rules.tsv")
# Checking access permissions
for x in [all_fp, genes_fp, exps_fp]:
if not os.access(x, os.R_OK):
raise Exception("To run, program requires read permission to file " + x)
# Read tsv files into dataframes, making sure columns locusId and scaffoldId read as stings
genes_dtypes = {
'locusId': str,
'sysName': str,
'type': int,
'scaffoldId': str,
'begin': int,
'end': int,
'strand': str,
'name': str,
'desc': str,
'GC': float,
'nTA': int
}
genes_df = pd.read_table(genes_fp, dtype=genes_dtypes)
#barcode rcbarcode scaffold strand pos locusId f
all_dtypes = {
'barcode': str,
'rcbarcode': str,
'scaffold': str,
'strand': str,
'pos': int,
'locusId': str,
'f': float
}
all_df = pd.read_table(all_fp, dtype=all_dtypes)
exps_dtypes = {
'SetName': str,
'Index': str,
'Date_pool_expt_started': str,
"Description": str,
"Group": str,
"Drop": str,
"Condition_1": str,
"Condition_2": str,
"control_group": str,
"control_bool": str
}
exps_df = pd.read_table(exps_fp, dtype=exps_dtypes)
# We update the 'Drop' experiments
if 'Drop' in exps_df:
new_drops = []
for ix, value in exps_df['Drop'].items():
if not isinstance(value, str):
if pd.isna(value):
new_drops.append(False)
else:
raise Exception(f"Value in 'Drop' not string: {value}")
elif str(value).strip().upper() == "TRUE":
new_drops.append(True)
elif value.strip().upper() == "FALSE":
new_drops.append(False)
else:
raise Exception(f"Cannot recognize Drop value in row {ix}:"
f" {value}")
exps_df['Drop'] = new_drops
else:
exps_df['Drop'] = [False]*exps_df.shape[0]
"""
if drop_exps:
# Removing Drop rows
exps_df.drop(remove_indeces, axis=0, inplace=True)
"""
# Remove trailing spaces:
for x in ["Group", "Condition_1", "Condition_2", "control_bool"]:
if x in exps_df:
# We take the entire column (pandas Series) and remove the spaces
# from either end
exps_df[x] = exps_df[x].str.strip()
rules_dtypes = {
"V1": str,
"V2": str
}
rules_df = pd.read_table(short_rules_fp, keep_default_na=False, dtype=rules_dtypes)
# Checking genes.GC
for x in ["scaffoldId", "locusId", "sysName", "desc", "begin", "end"]:
if x not in genes_df.columns:
raise Exception(f"Genes table must include header {x}")
# Checking exps table
for x in ["SetName", "Index", "Date_pool_expt_started", "Description"]:
if x not in exps_df.columns:
raise Exception(f"Experiments table must include header {x}")
if okControls:
for x in ["control_group", "control_bool"]:
if x not in exps_df.columns:
raise Exception("If okControls is set To True, then "
f"experiments table must include header {x}")
# Checking all_df
for x in ["scaffold", "locusId", "f", "pos"]:
if x not in all_df.columns:
raise Exception(f"All.PoolCount file must include header {x}")
if dbg_lvl > 1:
print(genes_df)
print(all_df)
print(exps_df)
print(rules_df)
return [genes_df, all_df, exps_df, rules_df]
def checkLocusIdEquality(all_df, genes_df, debug_bool=False):
""" We check all the locusIds in all_df are also present in genes_df
Description:
We check all the locusIds in all_df are also present in genes_df
If debugging we also print the number of unique locusIds
"""
if debug_bool:
logging.debug("Original locusId col")
logging.debug(all_df['locusId'])
# below both are pandas series
unique_all_locusIds = all_df['locusId'].dropna().unique()
unique_genes_locusIds = genes_df['locusId'].dropna().unique()
if debug_bool:
# All
logging.debug("Unique All Locus Ids: ")
logging.debug(unique_all_locusIds)
logging.debug("Number of Unique All Locus Ids: ")
logging.debug(len(unique_all_locusIds))
# Genes
logging.debug("Unique Gene Locus Ids: ")
logging.debug(unique_genes_locusIds)
logging.debug("Number of Unique Gene Locus Ids: ")
logging.debug(len(unique_genes_locusIds))
# Checking if every locusId from all.poolcount also exists in genes
not_found_locusIds = []
for x in unique_all_locusIds:
if x not in unique_genes_locusIds:
not_found_locusIds.append(x)
if len(not_found_locusIds) > 0:
raise Exception("The following locusIds were not found in the genes file."
" (All locusIds from all.poolcount must also be in the genes"
" file.)"
"', '".join(not_found_locusIds))
def check_exps_df_against_all_df(exps_df, all_df, meta_ix):
"""
We make sure that all the experiment names left in the all_df dataframe
are the same as the experiment names in the rows of the experiments
dataframe.
"""
experiment_names_test = [exps_df['SetName'].iat[i] + "." + exps_df['Index'].iat[i] for i in \
range(len(exps_df['SetName']))]
index_names = list(all_df.head())[meta_ix:]
# Number of rows:
if len(index_names) != exps_df.shape[0]:
raise Exception(f"Number of data columns in {all_fp} does not match"
f" number of rows in {exps_fp}\n"
f"{len(index_names)} != {exps_df.shape[0]}")
for i in range(len(index_names)):
if index_names[i] not in experiment_names_test:
raise Exception(f"Column names in {all_fp} do not match names from"
f"{exps_fp} at index {i}")
logging.debug("There are the same experiment names in all_df and exps_df.")
def prepare_set_names(exps_df, all_df, rules_df,
okControls=False, meta_ix=7, debug_bool=False):
"""
Description:
We replace the SetNames from the complicated version to a simpler one,
remove the period in between SetName and Index in all.poolcount columns,
and make the 'names' column in the experiments file and the all.poolcount columns
have the same values. For example, we move column name from Keio_ML9_set2.IT004 to
set2IT004, and rename the values in the Experiments file similarly.
We also add multiple new columns to exps_df:
"num", "short", "name", "t0set"
We also make sure that any experiment with its "Group" being "Time0" has
its short as "Time0" as well.
We initialize the 't0set' column as being the date + the set name (lane).
"""
# Below is a numpy array, not a series
uniqueSetNames_nparray = exps_df['SetName'].unique()
# shortSetNames is numpy ndarray, shortNamesTranslation_d is a dict which contains
# conversions from original names to short names.
shortSetNames, shortNamesTranslation_d = ShortSetNames(uniqueSetNames_nparray)
if debug_bool:
logging.debug("uniqueSetNames:")
logging.debug(uniqueSetNames_nparray)
logging.debug("shortSetNames")
logging.debug(shortSetNames)
logging.debug("Above 2 arrays should be the same length.")
# We concatenate the string of the set name and the index column
# But first we need to find the original location of the set name
# match_list is a list of indeces (int) for each element in the first list
# where it is found in the second list.
match_list = match_ix(list(exps_df['SetName']), list(uniqueSetNames_nparray))
# We apply the match list to shortSetNames_list to recreate the original SetName order
# just with the newly created 'short' setNames.
short_names_srs = shortSetNames[match_list]
if debug_bool:
logging.info("short_names_srs: (shortSetNames[match_list])")
logging.info(short_names_srs)
logging.info("original set Names:")
logging.info(exps_df['SetName'])
logging.info('match_list')
logging.info(match_list)
# If there are 3 unique set names and 100 items in exps_df['SetName'],
# then match_list will contain 100 items with only 3 different values (0, 1, 2)
# expNamesNew ends up being a list<str>
expNamesNew = []
for i in range(len(short_names_srs)):
if not short_names_srs[i] in [None, np.nan]:
expNamesNew.append(short_names_srs[i] + exps_df['Index'][i])
else:
expNamesNew.append(exps_df['Index'][i])
if debug_bool:
logging.info('expNamesNew:')
logging.info(expNamesNew)
exps_df['num'] = range(1, exps_df.shape[0] + 1)
# We replace certain strings with others using the 'rules' table.
exps_df['short'] = applyRules(rules_df, list(exps_df['Description']))
if okControls:
if not "control_bool" in exps_df.columns:
raise Exception("Using manual control label but no column "
"'control_bool' in Experiments file!")
else:
for ix, val in exps_df["control_bool"].iteritems():
if val.strip().upper() == "TRUE":
exps_df["short"].loc[ix] = "Time0"
else:
# Should not be a Time0 short
if exps_df["short"].loc[ix].upper() == "TIME0":
raise Exception("Description of experiment indicates Time0, but"
f" value in control_bool is not 'True', instead '{val}'.")
if debug_bool:
logging.info("exps_df of col 'short':")
logging.info(exps_df['short'])
# We remove the "." in the names of the values. Just SetNameIndex now
replace_col_d = {list(all_df.head())[meta_ix + i]: expNamesNew[i] for i in range(len(expNamesNew))}
if debug_bool:
logging.info('replace_col_d')
logging.info(replace_col_d)
logging.info('original all_df col names:')
logging.info(list(all_df.columns))
all_df = all_df.rename(columns=replace_col_d)
if debug_bool:
logging.info('after replacement all_df col names:')
logging.info(list(all_df.columns))
exps_df['name'] = expNamesNew
# updating short to include Groups with Time0
num_time_zero = 0
for ix, val in exps_df['Group'].items():
if val.strip().upper() == "TIME0":
num_time_zero += 1
exps_df.loc[ix, 'short'] = "Time0"
# Updating column 't0sets' which refers to the date and SetName
exps_df['t0set'] = [exps_df['Date_pool_expt_started'].iat[ix] + " " + \
val for ix, val in exps_df['SetName'].items()]
if okControls:
if not "control_group" in exps_df.columns:
raise Exception("Using manual control label but no column "
"'control_group' in Experiments file!")
else:
for ix, val in exps_df["control_group"].iteritems():
exps_df['t0set'].loc[ix] = val
if debug_bool:
logging.info('exps_df short: ')
logging.info(exps_df['short'])
logging.info('exps_df t0set: ')
logging.info(exps_df['t0set'])
logging.info(f"Total number of time zeros: {num_time_zero}")
return exps_df, all_df, replace_col_d
def ShortSetNames(set_names_nparray, dbg_lvl=0):
""" Using a table with rules, shorten the names of these sets
Args:
set_names_nparray (numpy.ndarray): Array of string, unique set names from exps file
Returns:
set_names_nparray (numpy.ndarray): Edited set Names to be
in the format setX* or testX*
This might convert
[ Keio_ML9_set2, Keio_ML9_set2, Keio_ML9_set2, ..., Keio_ML9_set3, Keio_ML9_set3,..., Keio_ML9_set3]
to
[ set2, set2, set2, ..., set3, set3, ..., set3]
"""
set_names_nparray = np.copy(set_names_nparray)
# Below returns a TRUE/FALSE vector indicating which
# elements of the character vector contain a match (i.o.w a simple name)
simple = [bool(re.search(r"(set|test)[0-9A-Z]+[0-9A-Z0-9]*$", x)) for x in set_names_nparray]
if dbg_lvl > 0:
if len(simple) > 0:
logging.debug("simple names: \n" + ",".join(list([str(x) for x in simple])))
else:
logging.debug("No simple names found.")
# We edit the values of set_names_nparray who are true for simple
# by removing anything before 'set' or 'test'
# We count the number of values that were false
nleft = 0
simple_set_names = []
for i in range(len(simple)):
if simple[i]:
new_set_name = re.sub("^.*(set|test)", "\\1", set_names_nparray[i])
set_names_nparray[i] = new_set_name
simple_set_names.append(new_set_name)
else:
nleft += 1
if dbg_lvl > 0:
logging.debug("fixed set_names:\n" + ",".join(list(set_names_nparray)))
candidates = []
for x in "A.B.C.D.E.F.G.H.I.J.K.L.M.N.O.P.Q.R.S.T.U.V.W.X.Y.Z".split("."):
candidates.append("set" + x)
if dbg_lvl > 0:
logging.debug(candidates)
# get the elements in candidates that are not in set_names_nparray[simple]
candidates = [x for x in candidates if x not in simple_set_names]
if (nleft > len(candidates)):
raise Exception(f"Too many unexpected set names: {nleft}.\n To fix this, contact developer "
"and say to change the number of possible extensions in list candidates (A.B...Z).")
# Get the non-simple values from set_names_nparray
oldComplex = [x for x in set_names_nparray if x not in simple_set_names]
if dbg_lvl > 0:
logging.debug("oldComplex:\n" + ",".join(oldComplex))
cnd_ix = 0
translation_dict = {}
for i in range(len(simple)):
if not simple[i]:
logging.info(f"Set {set_names_nparray[i]} simplified to {candidates[cnd_ix]}")
translation_dict[set_names_nparray[i]] = candidates[cnd_ix]
set_names_nparray[i] = candidates[cnd_ix]
cnd_ix += 1
crnt_unq = list(pd.Series(set_names_nparray).unique())
repeats = []
for x in list(set_names_nparray):
if x in crnt_unq:
crnt_unq.remove(x)
else:
repeats.append(x)
if not (len(repeats) == 0):
raise Exception("Non-unique set names! :\n" + \
", ".join(repeats))
else:
logging.debug("Finished running short set names")
if dbg_lvl > 0:
logging.debug("Final set names list: " + ", ".join(set_names_nparray))
return set_names_nparray, translation_dict
def get_special_lists(data_dir, all_df, replace_col_d, debug_bool=False):
"""
Args:
replace_col_d: Dict mapping original all_df experiment name to replacement name
data_dir
Returns:
genesUsed_list list<str>: LocusIds of genes to use
ignore_list: List<str> New names for the experiments we want to ignore.
Description: We get the lists from the files in data_dir if they are there.
Otherwise we return their values as empty lists. The lists we
look for are genesUsed, which should be a list of locusIds
from this genome that we are using, and ignore_list, which is a list
of experiment names to ignore (columns from all.poolcount)
"""
genesUsed_list = []
ignore_list = []
# list of locusIds
genesUsed_fp = os.path.join(data_dir, "strainusage.genes.json")
# list of extra ignored experiments
ignore_list_fp = os.path.join(data_dir, "ignore_list.json")
if os.path.isfile(genesUsed_fp) and os.access(genesUsed_fp, os.R_OK):
genesUsed_list = json.loads(open(GenesUsed_fp).read())
logging.info(f"Loaded {len(genesUsed_list)} genes to include in the "
"analysis\n")
if os.path.isfile(ignore_list_fp) and os.access(ignore_list_fp, os.R_OK):
pre_ignore_list = json.loads(open(ignore_list_fp).read())
for x in pre_ignore_list:
if x in replace_col_d:
ignore_list.append(x)
else:
raise Exception(f"Avoid list contains experiment {x} but experiment name"
" not found in all.poolcount."
f" Possible names: {', '.join(list(replace_col_d.keys()))}")
ignore_list = [replace_col_d[x] for x in ignore_list]
return genesUsed_list, ignore_list
def applyRules(rules_df, desc_str_list):
"""
We replace str value in V1 with value in V2
Args:
rules_df: data frame with cols:
V1, V2
desc_str_list: list<str>
Returns:
new_desc_list: list<str>
"""
new_desc_list = []
for j in range(len(desc_str_list)):
new_desc_list.append(desc_str_list[j])
for i in range(0, rules_df.shape[0]):
new_desc_list[-1] = new_desc_list[-1].replace(rules_df["V1"].iloc[i],
rules_df["V2"].iloc[i])
return new_desc_list
|
[
"logging.debug",
"numpy.copy",
"logging.info",
"os.path.isfile",
"pandas.Series",
"re.search",
"os.access",
"pandas.read_table",
"pandas.isna",
"os.path.join",
"os.listdir",
"re.sub"
] |
[((8321, 8341), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (8331, 8341), False, 'import os, logging, json, re\n'), ((8648, 8687), 'os.path.join', 'os.path.join', (['data_dir', '"""all.poolcount"""'], {}), "(data_dir, 'all.poolcount')\n", (8660, 8687), False, 'import os, logging, json, re\n'), ((8703, 8734), 'os.path.join', 'os.path.join', (['data_dir', '"""genes"""'], {}), "(data_dir, 'genes')\n", (8715, 8734), False, 'import os, logging, json, re\n'), ((8749, 8779), 'os.path.join', 'os.path.join', (['data_dir', '"""exps"""'], {}), "(data_dir, 'exps')\n", (8761, 8779), False, 'import os, logging, json, re\n'), ((8801, 8847), 'os.path.join', 'os.path.join', (['FEBA_dir', '"""desc_short_rules.tsv"""'], {}), "(FEBA_dir, 'desc_short_rules.tsv')\n", (8813, 8847), False, 'import os, logging, json, re\n'), ((9482, 9525), 'pandas.read_table', 'pd.read_table', (['genes_fp'], {'dtype': 'genes_dtypes'}), '(genes_fp, dtype=genes_dtypes)\n', (9495, 9525), True, 'import pandas as pd\n'), ((9814, 9853), 'pandas.read_table', 'pd.read_table', (['all_fp'], {'dtype': 'all_dtypes'}), '(all_fp, dtype=all_dtypes)\n', (9827, 9853), True, 'import pandas as pd\n'), ((10210, 10251), 'pandas.read_table', 'pd.read_table', (['exps_fp'], {'dtype': 'exps_dtypes'}), '(exps_fp, dtype=exps_dtypes)\n', (10223, 10251), True, 'import pandas as pd\n'), ((11528, 11600), 'pandas.read_table', 'pd.read_table', (['short_rules_fp'], {'keep_default_na': '(False)', 'dtype': 'rules_dtypes'}), '(short_rules_fp, keep_default_na=False, dtype=rules_dtypes)\n', (11541, 11600), True, 'import pandas as pd\n'), ((15136, 15211), 'logging.debug', 'logging.debug', (['"""There are the same experiment names in all_df and exps_df."""'], {}), "('There are the same experiment names in all_df and exps_df.')\n", (15149, 15211), False, 'import os, logging, json, re\n'), ((21318, 21344), 'numpy.copy', 'np.copy', (['set_names_nparray'], {}), '(set_names_nparray)\n', (21325, 21344), True, 'import numpy as np\n'), ((24942, 24990), 'os.path.join', 'os.path.join', (['data_dir', '"""strainusage.genes.json"""'], {}), "(data_dir, 'strainusage.genes.json')\n", (24954, 24990), False, 'import os, logging, json, re\n'), ((25052, 25094), 'os.path.join', 'os.path.join', (['data_dir', '"""ignore_list.json"""'], {}), "(data_dir, 'ignore_list.json')\n", (25064, 25094), False, 'import os, logging, json, re\n'), ((12964, 13001), 'logging.debug', 'logging.debug', (['"""Original locusId col"""'], {}), "('Original locusId col')\n", (12977, 13001), False, 'import os, logging, json, re\n'), ((13010, 13042), 'logging.debug', 'logging.debug', (["all_df['locusId']"], {}), "(all_df['locusId'])\n", (13023, 13042), False, 'import os, logging, json, re\n'), ((13249, 13288), 'logging.debug', 'logging.debug', (['"""Unique All Locus Ids: """'], {}), "('Unique All Locus Ids: ')\n", (13262, 13288), False, 'import os, logging, json, re\n'), ((13297, 13331), 'logging.debug', 'logging.debug', (['unique_all_locusIds'], {}), '(unique_all_locusIds)\n', (13310, 13331), False, 'import os, logging, json, re\n'), ((13340, 13389), 'logging.debug', 'logging.debug', (['"""Number of Unique All Locus Ids: """'], {}), "('Number of Unique All Locus Ids: ')\n", (13353, 13389), False, 'import os, logging, json, re\n'), ((13462, 13502), 'logging.debug', 'logging.debug', (['"""Unique Gene Locus Ids: """'], {}), "('Unique Gene Locus Ids: ')\n", (13475, 13502), False, 'import os, logging, json, re\n'), ((13511, 13547), 'logging.debug', 'logging.debug', (['unique_genes_locusIds'], {}), '(unique_genes_locusIds)\n', (13524, 13547), False, 'import os, logging, json, re\n'), ((13556, 13606), 'logging.debug', 'logging.debug', (['"""Number of Unique Gene Locus Ids: """'], {}), "('Number of Unique Gene Locus Ids: ')\n", (13569, 13606), False, 'import os, logging, json, re\n'), ((16443, 16475), 'logging.debug', 'logging.debug', (['"""uniqueSetNames:"""'], {}), "('uniqueSetNames:')\n", (16456, 16475), False, 'import os, logging, json, re\n'), ((16484, 16521), 'logging.debug', 'logging.debug', (['uniqueSetNames_nparray'], {}), '(uniqueSetNames_nparray)\n', (16497, 16521), False, 'import os, logging, json, re\n'), ((16530, 16560), 'logging.debug', 'logging.debug', (['"""shortSetNames"""'], {}), "('shortSetNames')\n", (16543, 16560), False, 'import os, logging, json, re\n'), ((16569, 16597), 'logging.debug', 'logging.debug', (['shortSetNames'], {}), '(shortSetNames)\n', (16582, 16597), False, 'import os, logging, json, re\n'), ((16606, 16664), 'logging.debug', 'logging.debug', (['"""Above 2 arrays should be the same length."""'], {}), "('Above 2 arrays should be the same length.')\n", (16619, 16664), False, 'import os, logging, json, re\n'), ((17241, 17301), 'logging.info', 'logging.info', (['"""short_names_srs: (shortSetNames[match_list])"""'], {}), "('short_names_srs: (shortSetNames[match_list])')\n", (17253, 17301), False, 'import os, logging, json, re\n'), ((17310, 17339), 'logging.info', 'logging.info', (['short_names_srs'], {}), '(short_names_srs)\n', (17322, 17339), False, 'import os, logging, json, re\n'), ((17348, 17383), 'logging.info', 'logging.info', (['"""original set Names:"""'], {}), "('original set Names:')\n", (17360, 17383), False, 'import os, logging, json, re\n'), ((17392, 17424), 'logging.info', 'logging.info', (["exps_df['SetName']"], {}), "(exps_df['SetName'])\n", (17404, 17424), False, 'import os, logging, json, re\n'), ((17433, 17459), 'logging.info', 'logging.info', (['"""match_list"""'], {}), "('match_list')\n", (17445, 17459), False, 'import os, logging, json, re\n'), ((17468, 17492), 'logging.info', 'logging.info', (['match_list'], {}), '(match_list)\n', (17480, 17492), False, 'import os, logging, json, re\n'), ((17989, 18017), 'logging.info', 'logging.info', (['"""expNamesNew:"""'], {}), "('expNamesNew:')\n", (18001, 18017), False, 'import os, logging, json, re\n'), ((18026, 18051), 'logging.info', 'logging.info', (['expNamesNew'], {}), '(expNamesNew)\n', (18038, 18051), False, 'import os, logging, json, re\n'), ((19002, 19041), 'logging.info', 'logging.info', (['"""exps_df of col \'short\':"""'], {}), '("exps_df of col \'short\':")\n', (19014, 19041), False, 'import os, logging, json, re\n'), ((19050, 19080), 'logging.info', 'logging.info', (["exps_df['short']"], {}), "(exps_df['short'])\n", (19062, 19080), False, 'import os, logging, json, re\n'), ((19287, 19316), 'logging.info', 'logging.info', (['"""replace_col_d"""'], {}), "('replace_col_d')\n", (19299, 19316), False, 'import os, logging, json, re\n'), ((19325, 19352), 'logging.info', 'logging.info', (['replace_col_d'], {}), '(replace_col_d)\n', (19337, 19352), False, 'import os, logging, json, re\n'), ((19361, 19403), 'logging.info', 'logging.info', (['"""original all_df col names:"""'], {}), "('original all_df col names:')\n", (19373, 19403), False, 'import os, logging, json, re\n'), ((19524, 19575), 'logging.info', 'logging.info', (['"""after replacement all_df col names:"""'], {}), "('after replacement all_df col names:')\n", (19536, 19575), False, 'import os, logging, json, re\n'), ((20480, 20511), 'logging.info', 'logging.info', (['"""exps_df short: """'], {}), "('exps_df short: ')\n", (20492, 20511), False, 'import os, logging, json, re\n'), ((20520, 20550), 'logging.info', 'logging.info', (["exps_df['short']"], {}), "(exps_df['short'])\n", (20532, 20550), False, 'import os, logging, json, re\n'), ((20559, 20590), 'logging.info', 'logging.info', (['"""exps_df t0set: """'], {}), "('exps_df t0set: ')\n", (20571, 20590), False, 'import os, logging, json, re\n'), ((20599, 20629), 'logging.info', 'logging.info', (["exps_df['t0set']"], {}), "(exps_df['t0set'])\n", (20611, 20629), False, 'import os, logging, json, re\n'), ((20638, 20698), 'logging.info', 'logging.info', (['f"""Total number of time zeros: {num_time_zero}"""'], {}), "(f'Total number of time zeros: {num_time_zero}')\n", (20650, 20698), False, 'import os, logging, json, re\n'), ((22542, 22567), 'logging.debug', 'logging.debug', (['candidates'], {}), '(candidates)\n', (22555, 22567), False, 'import os, logging, json, re\n'), ((23893, 23942), 'logging.debug', 'logging.debug', (['"""Finished running short set names"""'], {}), "('Finished running short set names')\n", (23906, 23942), False, 'import os, logging, json, re\n'), ((25103, 25131), 'os.path.isfile', 'os.path.isfile', (['genesUsed_fp'], {}), '(genesUsed_fp)\n', (25117, 25131), False, 'import os, logging, json, re\n'), ((25136, 25168), 'os.access', 'os.access', (['genesUsed_fp', 'os.R_OK'], {}), '(genesUsed_fp, os.R_OK)\n', (25145, 25168), False, 'import os, logging, json, re\n'), ((25357, 25387), 'os.path.isfile', 'os.path.isfile', (['ignore_list_fp'], {}), '(ignore_list_fp)\n', (25371, 25387), False, 'import os, logging, json, re\n'), ((25392, 25426), 'os.access', 'os.access', (['ignore_list_fp', 'os.R_OK'], {}), '(ignore_list_fp, os.R_OK)\n', (25401, 25426), False, 'import os, logging, json, re\n'), ((8940, 8961), 'os.access', 'os.access', (['x', 'os.R_OK'], {}), '(x, os.R_OK)\n', (8949, 8961), False, 'import os, logging, json, re\n'), ((21500, 21548), 're.search', 're.search', (['"""(set|test)[0-9A-Z]+[0-9A-Z0-9]*$"""', 'x'], {}), "('(set|test)[0-9A-Z]+[0-9A-Z0-9]*$', x)\n", (21509, 21548), False, 'import os, logging, json, re\n'), ((21743, 21782), 'logging.debug', 'logging.debug', (['"""No simple names found."""'], {}), "('No simple names found.')\n", (21756, 21782), False, 'import os, logging, json, re\n'), ((22082, 22134), 're.sub', 're.sub', (['"""^.*(set|test)"""', '"""\\\\1"""', 'set_names_nparray[i]'], {}), "('^.*(set|test)', '\\\\1', set_names_nparray[i])\n", (22088, 22134), False, 'import os, logging, json, re\n'), ((23291, 23369), 'logging.info', 'logging.info', (['f"""Set {set_names_nparray[i]} simplified to {candidates[cnd_ix]}"""'], {}), "(f'Set {set_names_nparray[i]} simplified to {candidates[cnd_ix]}')\n", (23303, 23369), False, 'import os, logging, json, re\n'), ((10453, 10467), 'pandas.isna', 'pd.isna', (['value'], {}), '(value)\n', (10460, 10467), True, 'import pandas as pd\n'), ((23547, 23575), 'pandas.Series', 'pd.Series', (['set_names_nparray'], {}), '(set_names_nparray)\n', (23556, 23575), True, 'import pandas as pd\n')]
|
import matplotlib
matplotlib.rcParams['savefig.dpi'] = 600
# see https://stackoverflow.com/a/46262952 (for norm symbol)
# and https://stackoverflow.com/a/23856968
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.preamble'] = [
r'\usepackage{amsmath}',
r'\usepackage{amsfonts}',
r'\usepackage{amssymb}']
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
|
[
"matplotlib.pyplot.rc"
] |
[((373, 400), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (379, 400), True, 'import matplotlib.pyplot as plt\n'), ((401, 431), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (407, 431), True, 'import matplotlib.pyplot as plt\n')]
|
import nagisa
from core.models import Music
class Nagisa():
def __init__(self, idol):
self.musics = Music.objects.filter(artist=idol).values("id", "identifications_title")
self.overide_nagisa = nagisa.Tagger(
single_word_list=list(self.musics.values_list("identifications_title", flat=True))
)
def extract(self, text):
return self.overide_nagisa.extract(text, extract_postags=["名詞", "英単語"])
def hit_title_ids(self, text):
# TODO: 馬鹿っぽいけどとりあえずこれで...
text = text.replace("moon light", "moonlight")
text = text.replace("in the Dark", "inthedark")
token = self.extract(text)
hit_title_ids = []
identification_titles = [m["identifications_title"] for m in self.musics]
for word in token.words:
if word in identification_titles:
hit_title_ids.append(Music.objects.get(identifications_title=word).id)
return hit_title_ids
|
[
"core.models.Music.objects.get",
"core.models.Music.objects.filter"
] |
[((117, 150), 'core.models.Music.objects.filter', 'Music.objects.filter', ([], {'artist': 'idol'}), '(artist=idol)\n', (137, 150), False, 'from core.models import Music\n'), ((893, 938), 'core.models.Music.objects.get', 'Music.objects.get', ([], {'identifications_title': 'word'}), '(identifications_title=word)\n', (910, 938), False, 'from core.models import Music\n')]
|
import re
def count(word):
"""
Simple syllable counting
"""
word = word if type(word) is str else str(word)
word = word.lower()
if len(word) <= 3:
return 1
word = re.sub('(?:[^laeiouy]es|[^laeiouy]e)$', '', word) # removed ed|
word = re.sub('^y', '', word)
matches = re.findall('[aeiouy]{1,2}', word)
return len(matches)
|
[
"re.findall",
"re.sub"
] |
[((205, 254), 're.sub', 're.sub', (['"""(?:[^laeiouy]es|[^laeiouy]e)$"""', '""""""', 'word'], {}), "('(?:[^laeiouy]es|[^laeiouy]e)$', '', word)\n", (211, 254), False, 'import re\n'), ((280, 302), 're.sub', 're.sub', (['"""^y"""', '""""""', 'word'], {}), "('^y', '', word)\n", (286, 302), False, 'import re\n'), ((317, 350), 're.findall', 're.findall', (['"""[aeiouy]{1,2}"""', 'word'], {}), "('[aeiouy]{1,2}', word)\n", (327, 350), False, 'import re\n')]
|
import asyncio
from schedule import Scheduler
from janusbackup.logger import logger
from janusbackup.worker.jobs import BaseJob
class TestJob(BaseJob):
is_active = False
@staticmethod
async def _job(*args, **kwargs):
logger.debug("Hello world for TestJob")
@classmethod
def set_schedule_job(cls, scheduler: Scheduler, loop: asyncio.BaseEventLoop, *args, **kwargs):
scheduler.every(5).seconds.do(cls.get_schedule_job(), loop=loop, *args, **kwargs)
|
[
"janusbackup.logger.logger.debug"
] |
[((242, 281), 'janusbackup.logger.logger.debug', 'logger.debug', (['"""Hello world for TestJob"""'], {}), "('Hello world for TestJob')\n", (254, 281), False, 'from janusbackup.logger import logger\n')]
|
import json
import requests
from bs4 import BeautifulSoup
def updatePlaces(key):
'''Puts the launch places (address and coords) in a json'''
link = "http://www.spaceflightinsider.com/launch-schedule/"
places = {}
for url in [link, link+"?past=1"]:
page = requests.get(url)
soup = BeautifulSoup(page.text, 'lxml')
for tag in soup.select("table.launchcalendar"):
result = {}
details = tag.find(class_="launchdetails").find_all("tr")
for detail in details:
result[detail.th.string.lower()] = detail.td.get_text()
place = result['location'].split(' ')
result['location'] = ' '.join(place[:-1])
coordinates = places.get(result['location'], geocode(result['location'], key))
places[result['location']] = coordinates
with open('places.txt', 'w') as fout:
json.dump(places, fout)
return places
def getLaunches(past=False):
''' Returns a dict containing info about future launches '''
url = "http://www.spaceflightinsider.com/launch-schedule/"
if past:
url += "?past=1"
page = requests.get(url)
soup = BeautifulSoup(page.text, 'lxml')
launches = []
places = {}
with open('places.txt') as fin:
places = json.load(fin)
for tag in soup.select("table.launchcalendar"):
result = {}
details = tag.find(class_="launchdetails").find_all("tr")
for detail in details:
result[detail.th.string.lower()] = detail.td.get_text()
style = tag.find(class_='vehicle').div['style']
index = style.index("http")
result['image'] = style[index:-3]
result['mission'] = tag.find(colspan='2').get_text()
result['description'] = tag.find(class_='description').p.get_text()
place = result['location'].split(' ')
result['location'] = ' '.join(place[:-1])
result['pad'] = place[-1]
coordinates = places.get(result['location'], None)
if coordinates:
result['long'] = coordinates.get('lng', None)
result['lat'] = coordinates.get('lat', None)
launches.append(result)
return launches
def geocode(address, key):
''' converts address string to lat-long coordinates '''
address = address.replace(' ', '+')
url = f"https://maps.googleapis.com/maps/api/geocode/json?key={key}&address={address}"
response = requests.get(url).json()
if not response['results']:
print('oopsy')
return {}
coordinates = response['results'][0]['geometry']['location']
for k, v in coordinates.items():
coordinates[k] = round(v, 7)
return coordinates
if __name__ == '__main__':
from pprint import pprint
#print('Please enter your Google API key:')
#key = input()
#updatePlaces(key)
launches = getLaunches()
for l in launches:
pprint(l['mission'])
pprint(l['location'])
pprint(l['lat'])
print()
|
[
"json.dump",
"json.load",
"pprint.pprint",
"requests.get",
"bs4.BeautifulSoup"
] |
[((1163, 1180), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1175, 1180), False, 'import requests\n'), ((1192, 1224), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.text', '"""lxml"""'], {}), "(page.text, 'lxml')\n", (1205, 1224), False, 'from bs4 import BeautifulSoup\n'), ((280, 297), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (292, 297), False, 'import requests\n'), ((313, 345), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.text', '"""lxml"""'], {}), "(page.text, 'lxml')\n", (326, 345), False, 'from bs4 import BeautifulSoup\n'), ((1312, 1326), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (1321, 1326), False, 'import json\n'), ((2922, 2942), 'pprint.pprint', 'pprint', (["l['mission']"], {}), "(l['mission'])\n", (2928, 2942), False, 'from pprint import pprint\n'), ((2951, 2972), 'pprint.pprint', 'pprint', (["l['location']"], {}), "(l['location'])\n", (2957, 2972), False, 'from pprint import pprint\n'), ((2981, 2997), 'pprint.pprint', 'pprint', (["l['lat']"], {}), "(l['lat'])\n", (2987, 2997), False, 'from pprint import pprint\n'), ((913, 936), 'json.dump', 'json.dump', (['places', 'fout'], {}), '(places, fout)\n', (922, 936), False, 'import json\n'), ((2454, 2471), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2466, 2471), False, 'import requests\n')]
|
"""
Threading tests.
"""
from multiprocessing.pool import ThreadPool
from tempfile import NamedTemporaryFile
from microcosm.api import create_object_graph
from microcosm.loaders import load_from_dict
from hamcrest import assert_that, contains
from microcosm_sqlite.context import SessionContext
from microcosm_sqlite.stores import GetOrCreateSession
from microcosm_sqlite.tests.fixtures import Example, Person, PersonStore
def test_threading():
with NamedTemporaryFile() as tmp_file:
loader = load_from_dict(
sqlite=dict(
paths=dict(
example=tmp_file.name,
),
),
)
graph = create_object_graph("example", testing=True, loader=loader)
store = PersonStore()
Person.recreate_all(graph)
with SessionContext(graph, Example) as context:
gw = store.create(
Person(id=1, first="George", last="Washington"),
)
tj = store.create(
Person(id=2, first="Thomas", last="Jefferson"),
)
context.commit()
pool = ThreadPool(2)
store.get_session = GetOrCreateSession(graph)
people = pool.map(lambda index: store.search()[index], range(2))
assert_that(people, contains(gw, tj))
|
[
"tempfile.NamedTemporaryFile",
"microcosm_sqlite.tests.fixtures.Person.recreate_all",
"microcosm_sqlite.context.SessionContext",
"multiprocessing.pool.ThreadPool",
"microcosm.api.create_object_graph",
"microcosm_sqlite.tests.fixtures.Person",
"microcosm_sqlite.stores.GetOrCreateSession",
"hamcrest.contains",
"microcosm_sqlite.tests.fixtures.PersonStore"
] |
[((459, 479), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {}), '()\n', (477, 479), False, 'from tempfile import NamedTemporaryFile\n'), ((682, 741), 'microcosm.api.create_object_graph', 'create_object_graph', (['"""example"""'], {'testing': '(True)', 'loader': 'loader'}), "('example', testing=True, loader=loader)\n", (701, 741), False, 'from microcosm.api import create_object_graph\n'), ((758, 771), 'microcosm_sqlite.tests.fixtures.PersonStore', 'PersonStore', ([], {}), '()\n', (769, 771), False, 'from microcosm_sqlite.tests.fixtures import Example, Person, PersonStore\n'), ((781, 807), 'microcosm_sqlite.tests.fixtures.Person.recreate_all', 'Person.recreate_all', (['graph'], {}), '(graph)\n', (800, 807), False, 'from microcosm_sqlite.tests.fixtures import Example, Person, PersonStore\n'), ((1129, 1142), 'multiprocessing.pool.ThreadPool', 'ThreadPool', (['(2)'], {}), '(2)\n', (1139, 1142), False, 'from multiprocessing.pool import ThreadPool\n'), ((1172, 1197), 'microcosm_sqlite.stores.GetOrCreateSession', 'GetOrCreateSession', (['graph'], {}), '(graph)\n', (1190, 1197), False, 'from microcosm_sqlite.stores import GetOrCreateSession\n'), ((822, 852), 'microcosm_sqlite.context.SessionContext', 'SessionContext', (['graph', 'Example'], {}), '(graph, Example)\n', (836, 852), False, 'from microcosm_sqlite.context import SessionContext\n'), ((1300, 1316), 'hamcrest.contains', 'contains', (['gw', 'tj'], {}), '(gw, tj)\n', (1308, 1316), False, 'from hamcrest import assert_that, contains\n'), ((912, 959), 'microcosm_sqlite.tests.fixtures.Person', 'Person', ([], {'id': '(1)', 'first': '"""George"""', 'last': '"""Washington"""'}), "(id=1, first='George', last='Washington')\n", (918, 959), False, 'from microcosm_sqlite.tests.fixtures import Example, Person, PersonStore\n'), ((1022, 1068), 'microcosm_sqlite.tests.fixtures.Person', 'Person', ([], {'id': '(2)', 'first': '"""Thomas"""', 'last': '"""Jefferson"""'}), "(id=2, first='Thomas', last='Jefferson')\n", (1028, 1068), False, 'from microcosm_sqlite.tests.fixtures import Example, Person, PersonStore\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 herrlich10
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function, division, absolute_import, unicode_literals
import sys, shlex, time
import subprocess, multiprocessing, ctypes
import numpy as np
__author__ = 'herrlich10 <<EMAIL>>'
__version__ = '0.1.3'
if sys.version_info[0] == 3:
string_types = (str,)
else:
string_types = (basestring,)
def cmd_for_exec(cmd, cmd_kws):
'''
Format cmd appropriately for execution according to whether shell=True.
Split the cmd string into a list, if not shell=True.
Join the cmd list into a string, if shell=True.
Do nothing to callable.
'''
if not callable(cmd):
if 'shell' in cmd_kws and cmd_kws['shell']: # cmd string is required
if not isinstance(cmd, string_types):
cmd = ' '.join(cmd)
else: # cmd list is required
if isinstance(cmd, string_types):
cmd = shlex.split(cmd) # Split by space, preserving quoted substrings
return cmd
def cmd_for_disp(cmd):
'''
Format cmd for printing.
'''
if isinstance(cmd, list):
return ' '.join(cmd)
else:
return cmd
def format_duration(duration, format='standard'):
'''
Format duration (in seconds) in a more human friendly way.
'''
if format == 'short':
units = ['d', 'h', 'm', 's']
elif format == 'long':
units = [' days', ' hours', ' minutes', ' seconds']
else:
units = [' day', ' hr', ' min', ' sec']
values = [int(duration//86400), int(duration%86400//3600), int(duration%3600//60), duration%60]
for K in range(len(values)): # values[K] would be the first non-zero value
if values[K] > 0:
break
formatted = ((('%d' if k<len(values)-1 else '%.3f') % values[k]) + units[k] for k in range(len(values)) if k >= K)
return ' '.join(formatted)
class PooledCaller(object):
'''
Execute multiple command line programs, as well as python callables,
asynchronously and parallelly across a pool of processes.
'''
def __init__(self, pool_size=None):
if pool_size is None:
self.pool_size = multiprocessing.cpu_count() * 3 // 4
else:
self.pool_size = pool_size
self.ps = []
self.cmd_queue = []
self._n_cmds = 0 # Accumulated counter for generating cmd idx
self._pid2idx = {}
self._return_codes = []
def check_call(self, cmd, *args, **kwargs):
'''
Asynchronous check_call (queued execution, return immediately).
See subprocess.Popen() for more information about the arguments.
Multiple commands can be separated with ";" and executed sequentially
within a single subprocess in linux/mac, only if shell=True.
Python callable can also be executed in parallel via multiprocessing.
Note that only the return code of the child process will be retrieved
later when calling wait(), not the actual return value of the callable.
So the result of the computation needs to be saved in a file.
Parameters
----------
cmd : list, str, or callable
Computation in command line programs is handled with subprocess.
Computation in python callable is handled with multiprocessing.
shell : bool
If provided, must be a keyword argument.
If shell is True, the command will be executed through the shell.
*args, **kwargs :
If cmd is a callable, *args and **kwargs are passed to the callable as its arguments.
If cmd is a list or str, **kwargs are passed to subprocess.Popen().
'''
cmd = cmd_for_exec(cmd, kwargs)
self.cmd_queue.append((self._n_cmds, cmd, args, kwargs))
self._n_cmds += 1
def dispatch(self):
# If there are free slot and more jobs
while len(self.ps) < self.pool_size and len(self.cmd_queue) > 0:
idx, cmd, args, kwargs = self.cmd_queue.pop(0)
print('>> job {0}: {1}'.format(idx, cmd_for_disp(cmd)))
if callable(cmd):
p = multiprocessing.Process(target=cmd, args=args, kwargs=kwargs)
p.start()
else:
p = subprocess.Popen(cmd, **kwargs)
self.ps.append(p)
self._pid2idx[p.pid] = idx
def wait(self):
'''
Wait for all jobs in the queue to finish.
Returns
-------
codes : list
The return code of the child process for each job.
'''
self._start_time = time.time()
while len(self.ps) > 0 or len(self.cmd_queue) > 0:
# Dispatch jobs if possible
self.dispatch()
# Poll workers' state
for p in self.ps:
if isinstance(p, subprocess.Popen) and p.poll() is not None: # If the process is terminated
self._return_codes.append((self._pid2idx[p.pid], p.returncode))
self.ps.remove(p)
elif isinstance(p, multiprocessing.Process) and not p.is_alive(): # If the process is terminated
self._return_codes.append((self._pid2idx[p.pid], p.exitcode))
self.ps.remove(p)
time.sleep(0.1)
codes = [code for idx, code in sorted(self._return_codes)]
duration = time.time() - self._start_time
print('>> All {0} jobs done in {1}.'.format(self._n_cmds, format_duration(duration)))
if np.any(codes):
print('returncode: {0}', codes)
else:
print('all returncodes are 0.')
self._n_cmds = 0
self._pid2idx = {}
self._return_codes = []
return codes
class ArrayWrapper(type):
'''
This is the metaclass for classes that wrap an np.ndarray and delegate
non-reimplemented operators (among other magic functions) to the wrapped array.
'''
def __init__(cls, name, bases, dct):
def make_descriptor(name):
return property(lambda self: getattr(self.arr, name))
type.__init__(cls, name, bases, dct)
ignore = 'class mro new init setattr getattr getattribute'
ignore = set('__{0}__'.format(name) for name in ignore.split())
for name in dir(np.ndarray):
if name.startswith('__'):
if name not in ignore and name not in dct:
setattr(cls, name, make_descriptor(name))
class SharedMemoryArray(object, metaclass=ArrayWrapper):
'''
This class can be used as a usual np.ndarray, but its data buffer
is allocated in shared memory (under Cached Files in memory monitor),
and can be passed across processes without any data copy/duplication,
even when write access happens (which is lock-synchronized).
The idea is to allocate memory using multiprocessing.Array, and
access it from current or another process via a numpy.ndarray view,
without actually copying the data.
So it is both convenient and efficient when used with multiprocessing.
This implementation also demonstrates the power of composition + metaclass,
as opposed to the canonical multiple inheritance.
'''
def __init__(self, dtype, shape, initializer=None, lock=True):
self.dtype = np.dtype(dtype)
self.shape = shape
if initializer is None:
# Preallocate memory using multiprocessing is the preferred usage
self.shared_arr = multiprocessing.Array(self.dtype2ctypes[self.dtype], int(np.prod(self.shape)), lock=lock)
else:
self.shared_arr = multiprocessing.Array(self.dtype2ctypes[self.dtype], initializer, lock=lock)
if not lock:
self.arr = np.frombuffer(self.shared_arr, dtype=self.dtype).reshape(self.shape)
else:
self.arr = np.frombuffer(self.shared_arr.get_obj(), dtype=self.dtype).reshape(self.shape)
@classmethod
def zeros(cls, shape, dtype=float, lock=True):
'''
Return a new array of given shape and dtype, filled with zeros.
This is the preferred usage, which avoids holding two copies of the
potentially very large data simultaneously in the memory.
'''
return cls(dtype, shape, lock=lock)
@classmethod
def from_array(cls, arr, lock=True):
'''
Initialize a new shared-memory array with an existing array.
'''
# return cls(arr.dtype, arr.shape, arr.ravel(), lock=lock) # Slow and memory inefficient, why?
a = cls.zeros(arr.shape, dtype=arr.dtype, lock=lock)
a[:] = arr # This is a more efficient way of initialization
return a
def __getattr__(self, attr):
if attr in ['acquire', 'release']:
return getattr(self.shared_arr, attr)
else:
return getattr(self.arr, attr)
def __dir__(self):
return list(self.__dict__.keys()) + ['acquire', 'release'] + dir(self.arr)
# At present, only numerical dtypes are supported.
dtype2ctypes = {
bool: ctypes.c_bool,
int: ctypes.c_long,
float: ctypes.c_double,
np.dtype('bool'): ctypes.c_bool,
np.dtype('int64'): ctypes.c_long,
np.dtype('int32'): ctypes.c_int,
np.dtype('int16'): ctypes.c_short,
np.dtype('int8'): ctypes.c_byte,
np.dtype('uint64'): ctypes.c_ulong,
np.dtype('uint32'): ctypes.c_uint,
np.dtype('uint16'): ctypes.c_ushort,
np.dtype('uint8'): ctypes.c_ubyte,
np.dtype('float64'): ctypes.c_double,
np.dtype('float32'): ctypes.c_float,
}
|
[
"subprocess.Popen",
"multiprocessing.Array",
"numpy.frombuffer",
"numpy.dtype",
"shlex.split",
"time.sleep",
"numpy.any",
"time.time",
"multiprocessing.Process",
"numpy.prod",
"multiprocessing.cpu_count"
] |
[((5724, 5735), 'time.time', 'time.time', ([], {}), '()\n', (5733, 5735), False, 'import sys, shlex, time\n'), ((6640, 6653), 'numpy.any', 'np.any', (['codes'], {}), '(codes)\n', (6646, 6653), True, 'import numpy as np\n'), ((8430, 8445), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (8438, 8445), True, 'import numpy as np\n'), ((10272, 10288), 'numpy.dtype', 'np.dtype', (['"""bool"""'], {}), "('bool')\n", (10280, 10288), True, 'import numpy as np\n'), ((10313, 10330), 'numpy.dtype', 'np.dtype', (['"""int64"""'], {}), "('int64')\n", (10321, 10330), True, 'import numpy as np\n'), ((10355, 10372), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (10363, 10372), True, 'import numpy as np\n'), ((10396, 10413), 'numpy.dtype', 'np.dtype', (['"""int16"""'], {}), "('int16')\n", (10404, 10413), True, 'import numpy as np\n'), ((10439, 10455), 'numpy.dtype', 'np.dtype', (['"""int8"""'], {}), "('int8')\n", (10447, 10455), True, 'import numpy as np\n'), ((10480, 10498), 'numpy.dtype', 'np.dtype', (['"""uint64"""'], {}), "('uint64')\n", (10488, 10498), True, 'import numpy as np\n'), ((10524, 10542), 'numpy.dtype', 'np.dtype', (['"""uint32"""'], {}), "('uint32')\n", (10532, 10542), True, 'import numpy as np\n'), ((10567, 10585), 'numpy.dtype', 'np.dtype', (['"""uint16"""'], {}), "('uint16')\n", (10575, 10585), True, 'import numpy as np\n'), ((10612, 10629), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (10620, 10629), True, 'import numpy as np\n'), ((10655, 10674), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (10663, 10674), True, 'import numpy as np\n'), ((10701, 10720), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (10709, 10720), True, 'import numpy as np\n'), ((6402, 6417), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (6412, 6417), False, 'import sys, shlex, time\n'), ((6504, 6515), 'time.time', 'time.time', ([], {}), '()\n', (6513, 6515), False, 'import sys, shlex, time\n'), ((8747, 8823), 'multiprocessing.Array', 'multiprocessing.Array', (['self.dtype2ctypes[self.dtype]', 'initializer'], {'lock': 'lock'}), '(self.dtype2ctypes[self.dtype], initializer, lock=lock)\n', (8768, 8823), False, 'import subprocess, multiprocessing, ctypes\n'), ((2030, 2046), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (2041, 2046), False, 'import sys, shlex, time\n'), ((5250, 5311), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'cmd', 'args': 'args', 'kwargs': 'kwargs'}), '(target=cmd, args=args, kwargs=kwargs)\n', (5273, 5311), False, 'import subprocess, multiprocessing, ctypes\n'), ((5376, 5407), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {}), '(cmd, **kwargs)\n', (5392, 5407), False, 'import subprocess, multiprocessing, ctypes\n'), ((3260, 3287), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3285, 3287), False, 'import subprocess, multiprocessing, ctypes\n'), ((8670, 8689), 'numpy.prod', 'np.prod', (['self.shape'], {}), '(self.shape)\n', (8677, 8689), True, 'import numpy as np\n'), ((8868, 8916), 'numpy.frombuffer', 'np.frombuffer', (['self.shared_arr'], {'dtype': 'self.dtype'}), '(self.shared_arr, dtype=self.dtype)\n', (8881, 8916), True, 'import numpy as np\n')]
|
import requests
import time
from bs4 import BeautifulSoup
from pymongo import MongoClient
from selenium import webdriver
client = MongoClient('localhost', 27017)
db = client.dbnews
def crawler_daum_news(date):
db_list = client.list_database_names()
if 'dbnews' in db_list:
print('db 최신 뉴스로 새로고침')
client.drop_database('dbnews')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
}
url = 'https://news.daum.net/ranking/popular'
if url:
url += '?regDate=' + date
data = requests.get(url, headers=headers)
soup = BeautifulSoup(data.text, 'html.parser')
date = soup.select_one('.box_calendar > .screen_out').text
news_list = soup.select('.list_news2 > li')
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("user-agent=Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko")
driver = webdriver.Chrome(options=options)
for news in news_list:
rank = news.select_one('.rank_num.rank_popular > .wrap_num > .num_rank >.screen_out').text
detail_url = news.select_one('.cont_thumb > .tit_thumb > a')['href']
if news.select_one('a > img') is None:
img_url = ''
else:
img_url = news.select_one('a > img')['src']
title = news.select_one('.cont_thumb > .tit_thumb > a').text
info_news = news.select_one('.cont_thumb > .tit_thumb > .info_news').text
detail_content = news.select_one('.cont_thumb > .desc_thumb > .link_txt').text.strip()
driver.get(detail_url)
time.sleep(0.5) # 네트환경 환경에 따라...
soup2 = BeautifulSoup(driver.page_source, 'html.parser')
emoticon_list = soup2.select_one('.list-wrapper')
selects = emoticon_list.select('.count')
count_list = []
for i in range(len(selects)):
count_list.append(int(selects[i].text))
doc = {
'rank': rank,
'info_news': info_news,
'title': title,
'detail_content': detail_content,
'date': date,
'detail_url': detail_url,
'img_url': img_url,
'nr_RECOMMEND': count_list[0],
'nr_LIKE': count_list[1],
'nr_IMPRESS': count_list[2],
'nr_ANGRY': count_list[3],
'nr_SAD': count_list[4],
}
db.headline.insert_one(doc)
print(rank, info_news, title, detail_content, detail_url, img_url, count_list)
date = ''
crawler_daum_news(date)
|
[
"pymongo.MongoClient",
"time.sleep",
"selenium.webdriver.ChromeOptions",
"selenium.webdriver.Chrome",
"requests.get",
"bs4.BeautifulSoup"
] |
[((131, 162), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {}), "('localhost', 27017)\n", (142, 162), False, 'from pymongo import MongoClient\n'), ((623, 657), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (635, 657), False, 'import requests\n'), ((669, 708), 'bs4.BeautifulSoup', 'BeautifulSoup', (['data.text', '"""html.parser"""'], {}), "(data.text, 'html.parser')\n", (682, 708), False, 'from bs4 import BeautifulSoup\n'), ((835, 860), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (858, 860), False, 'from selenium import webdriver\n'), ((1075, 1108), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'options'}), '(options=options)\n', (1091, 1108), False, 'from selenium import webdriver\n'), ((1741, 1756), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1751, 1756), False, 'import time\n'), ((1791, 1839), 'bs4.BeautifulSoup', 'BeautifulSoup', (['driver.page_source', '"""html.parser"""'], {}), "(driver.page_source, 'html.parser')\n", (1804, 1839), False, 'from bs4 import BeautifulSoup\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-06 04:40
from __future__ import unicode_literals
from django.db import migrations
def create_locations(apps, schema_editor):
country_model = apps.get_model("postal_code_api", "Country")
region_model = apps.get_model("postal_code_api", "Region")
province_model = apps.get_model("postal_code_api", "Province")
country,_ = country_model.objects.get_or_create(
name = u'España',
country_code= 'ES',
)
#https://es.wikipedia.org/wiki/ISO_3166-2:ES
regions = {
'ES-AN' : u'Andalucía',
'ES-AR' : u'Aragón',
'ES-AS' : u'Asturias',
'ES-CN' : u'Canarias',
'ES-CB' : u'Cantabria',
'ES-CM' : u'Castilla La Mancha',
'ES-CL' : u'Castilla y León',
'ES-CT' : u'Catalunya',
'ES-EX' : u'Extremadura',
'ES-GA' : u'Galicia',
'ES-IB' : u'Illes Balears',
'ES-RI' : u'La Rioja',
'ES-MD' : u'Comunidad de Madrid',
'ES-MC' : u'Región de Murcia',
'ES-NC' : u'Navarra',
'ES-PV' : u'País Vasco',
'ES-VC' : u'Comunidad Valenciana',
}
provinces = {
'C' : [ u'A Coruña', 'ES-GA'],
'VI' : [ u'Álava', 'ES-PV'],
'AB' : [ u'Albacete', 'ES-CM'],
'A' : [ u'Alicante', 'ES-VC'],
'AL' : [ u'Almería', 'ES-AN'],
'O' : [ u'Asturias', 'ES-AS'],
'AV' : [ u'Ávila', 'ES-CL'],
'BA' : [ u'Badajoz', 'ES-EX'],
'IB' : [ u'Balears', 'ES-IB'],
'B' : [ u'Barcelona', 'ES-CT'],
'BI' : [ u'Vizcaya', 'ES-PV'],
'BU' : [ u'Burgos', 'ES-CL'],
'CC' : [ u'Cáceres', 'ES-EX'],
'CA' : [ u'Cádiz', 'ES-AN'],
'S' : [ u'Cantabria', 'ES-CB'],
'CS' : [ u'Castellón', 'ES-VC'],
'CR' : [ u'Ciudad Real', 'ES-CM'],
'CO' : [ u'Córdoba', 'ES-AN'],
'CU' : [ u'Cuenca', 'ES-CM'],
'SS' : [ u'Gipuzcoa', 'ES-PV'],
'GI' : [ u'Girona', 'ES-CT'],
'GR' : [ u'Granada', 'ES-AN'],
'GU' : [ u'Guadalajara', 'ES-CM'],
'H' : [ u'Huelva', 'ES-AN'],
'HU' : [ u'Huesca', 'ES-AR'],
'J' : [ u'Jaén', 'ES-AN'],
'LO' : [ u'La Rioja', 'ES-RI'],
'GC' : [ u'Las Palmas', 'ES-CN'],
'LE' : [ u'León', 'ES-CL'],
'L' : [ u'Lleida', 'ES-CT'],
'LU' : [ u'Lugo', 'ES-GA'],
'M' : [ u'Madrid', 'ES-MD'],
'MA' : [ u'Málaga', 'ES-AN'],
'MU' : [ u'Murcia', 'ES-MC'],
'NA' : [ u'Navarra', 'ES-NC'],
'OR' : [ u'Ourense', 'ES-GA'],
'P' : [ u'Palencia', 'ES-CL'],
'PO' : [ u'Pontevedra', 'ES-GA'],
'SA' : [ u'Salamanca', 'ES-CL'],
'TF' : [ u'Santa Cruz de Tenerife', 'ES-CN'],
'SG' : [ u'Segovia', 'ES-CL'],
'SE' : [ u'Sevilla', 'ES-AN'],
'SO' : [ u'Soria', 'ES-CL'],
'T' : [ u'Tarragona', 'ES-CT'],
'TE' : [ u'Teruel', 'ES-AR'],
'TO' : [ u'Toledo', 'ES-CM'],
'V' : [ u'Valencia', 'ES-VC'],
'VA' : [ u'Valladolid', 'ES-CL'],
'ZA' : [ u'Zamora', 'ES-CL'],
'Z' : [ u'Zaragoza', 'ES-AR'],
}
#https://es.wikipedia.org/wiki/Anexo:Provincias_de_Espa%C3%B1a_por_c%C3%B3digo_postal
official_code__zip_code = {
'VI' : '01',
'AB' : '02',
'A' : '03',
'AL' : '04',
'AV' : '05',
'BA' : '06',
'IB' : '07',
'B' : '08',
'BU' : '09',
'CC' : '10',
'CA' : '11',
'CS' : '12',
'CR' : '13',
'CO' : '14',
'C' : '15',
'CU' : '16',
'GI' : '17',
'GR' : '18',
'GU' : '19',
'SS' : '20',
'H' : '21',
'HU' : '22',
'J' : '23',
'LE' : '24',
'L' : '25',
'LO' : '26',
'LU' : '27',
'M' : '28',
'MA' : '29',
'MU' : '30',
'NA' : '31',
'OR' : '32',
'O' : '33',
'P' : '34',
'GC' : '35',
'PO' : '36',
'SA' : '37',
'TF' : '38',
'S' : '39',
'SG' : '40',
'SE' : '41',
'SO' : '42',
'T' : '43',
'TE' : '44',
'TO' : '45',
'V' : '46',
'VA' : '47',
'BI' : '48',
'ZA' : '49',
'Z' : '50',
}
region_code__region = {}
for region_code,name in regions.iteritems():
region,_ = region_model.objects.get_or_create(
country = country,
name = name,
region_code = region_code
)
region_code__region[region_code] = region
for official_code, name__region_code in provinces.iteritems():
name,region_code = name__region_code
region = region_code__region[region_code]
zip_code = official_code__zip_code[official_code]
province,_ = province_model.objects.get_or_create(
country = country,
region = region,
name = name,
official_code = official_code,
zip_code = zip_code
)
def delete_locations(apps, schema_editor):
country_model = apps.get_model("postal_code_api", "Country")
country_model.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('postal_code_api', '0001_initial'),
]
operations = [
migrations.RunPython(create_locations,delete_locations),
]
|
[
"django.db.migrations.RunPython"
] |
[((6217, 6273), 'django.db.migrations.RunPython', 'migrations.RunPython', (['create_locations', 'delete_locations'], {}), '(create_locations, delete_locations)\n', (6237, 6273), False, 'from django.db import migrations\n')]
|
from typing import List
from fractions import Fraction
from abc import ABC, abstractmethod
import spacy
import string
import random
import pandas as pd
import numpy as np
import diskcache
import sys
from somajo import SoMaJo
from spacy.lang.tr import Turkish
from spacy.lang.sv import Swedish
from spacy.lang.uk import Ukrainian
NO_MODEL_LANGUAGE_LOOKUP = {
"turkish": Turkish,
"swedish": Swedish,
"ukrainian": Ukrainian,
}
def noise(text, insert_chance, delete_chance, repeat_chance):
assert insert_chance == delete_chance == repeat_chance
chances = np.random.random(len(text) * 3)
if (chances < insert_chance).all():
return text
out = ""
for i, char in enumerate(text):
if chances[i * 3] >= delete_chance:
out += char
if chances[(i * 3) + 1] < repeat_chance:
out += char
if chances[(i * 3) + 2] < insert_chance:
out += random.choice(string.ascii_letters)
return out
def get_model(name):
try:
nlp = spacy.load(name, disable=["tagger", "parser", "ner"])
except OSError:
nlp = NO_MODEL_LANGUAGE_LOOKUP[name]()
return nlp
def has_space(text: str) -> bool:
return any(x.isspace() for x in text)
class Tokenizer(ABC):
def __init__(self):
self.training = True
def train(self, mode=True):
self.training = mode
def eval(self):
self.train(False)
@abstractmethod
def tokenize(self, text: str) -> List[str]:
pass
def remove_last_punct(text: str, punctuation) -> str:
for i in range(len(text))[::-1]:
if text[i] in punctuation:
return text[:i] + text[i + 1 :]
elif not text[i].isspace():
return text
return text
class SpacySentenceTokenizer(Tokenizer):
def __init__(
self,
model_name: str,
lower_start_prob: Fraction,
remove_end_punct_prob: Fraction,
punctuation: str,
):
super().__init__()
self.nlp = get_model(model_name)
self.nlp.add_pipe("sentencizer")
self.lower_start_prob = lower_start_prob
self.remove_end_punct_prob = remove_end_punct_prob
self.punctuation = punctuation
def tokenize(self, text: str) -> List[str]:
out_sentences = []
current_sentence = ""
end_sentence = False
for token in self.nlp(text):
text = token.text
whitespace = token.whitespace_
if token.is_sent_start:
end_sentence = True
if end_sentence and not text.isspace():
if self.training and random.random() < self.remove_end_punct_prob:
current_sentence = remove_last_punct(current_sentence, self.punctuation)
out_sentences.append(current_sentence)
current_sentence = ""
end_sentence = False
if (
self.training
and len(current_sentence) == 0
and random.random() < self.lower_start_prob
):
text = text.lower()
current_sentence += text + whitespace
out_sentences.append(current_sentence)
return [x for x in out_sentences if len(x) > 0]
class SpacyWordTokenizer(Tokenizer):
def __init__(self, model_name: str):
super().__init__()
self.tokenizer = get_model(model_name).tokenizer
def tokenize(self, text: str) -> List[str]:
out_tokens = []
current_token = ""
for token in self.tokenizer(text):
if not token.text.isspace():
out_tokens.append(current_token)
current_token = ""
current_token += token.text + token.whitespace_
out_tokens.append(current_token)
return [x for x in out_tokens if len(x) > 0]
class SoMaJoSentenceTokenizer(Tokenizer):
def __init__(self, model_name: str):
super().__init__()
self.tokenizer = SoMaJo(model_name)
def tokenize(self, text: str) -> List[str]:
out_sentences = []
sentences = list(self.tokenizer.tokenize_text([text]))
for i, sentence in enumerate(sentences):
text = ""
for token in sentence:
if "SpaceAfter=No" in token.extra_info:
whitespace = ""
else:
whitespace = " "
text += token.text + whitespace
if i == len(sentences) - 1:
text = text.rstrip()
out_sentences.append(text)
return out_sentences
class SoMaJoWordTokenizer(Tokenizer):
def __init__(self, model_name: str):
super().__init__()
self.tokenizer = SoMaJo(model_name, split_sentences=False)
def tokenize(self, text: str) -> List[str]:
out_tokens = []
tokens = next(self.tokenizer.tokenize_text([text]))
for i, token in enumerate(tokens):
if "SpaceAfter=No" in token.extra_info or i == len(tokens) - 1:
whitespace = ""
else:
whitespace = " "
# sometimes sample more spaces than one space so the model learns to deal with it
while random.random() < 0.05:
whitespace += " "
out_tokens.append(token.text + whitespace)
return [x for x in out_tokens if len(x) > 0]
class WhitespaceTokenizer(Tokenizer):
def tokenize(self, text: str) -> List[str]:
out = None
for i in range(len(text))[::-1]:
if not text[i].isspace():
out = [text[: i + 1], text[i + 1 :]]
break
if out is None:
out = [text, ""]
return out
class SECOSCompoundTokenizer(Tokenizer):
def __init__(self, secos_path: str):
super().__init__()
sys.path.append(secos_path)
import decompound_server
self.decompound = decompound_server.make_decompounder(
[
"decompound_server.py",
f"{secos_path}data/denews70M_trigram__candidates",
f"{secos_path}data/denews70M_trigram__WordCount",
"50",
"3",
"3",
"5",
"3",
"upper",
"0.01",
"2020",
]
)
self.disk_cache = diskcache.Index("secos_cache")
self.cache = {}
for key in self.disk_cache:
self.cache[key] = self.disk_cache[key]
def tokenize(self, text: str) -> List[str]:
if text.isspace():
return [text]
text_bytes = text.encode("utf-8")
compounds = self.cache.get(text_bytes)
if compounds is None:
assert not has_space(text), text
compounds = self.decompound(text)
if len(compounds) == 0:
compounds = text
compound_bytes = compounds.encode("utf-8")
self.disk_cache[text_bytes] = compound_bytes
self.cache[text_bytes] = compound_bytes
else:
compounds = compounds.decode("utf-8")
compounds = compounds.split()
compounds = [noise(x, 0.001, 0.001, 0.001) for x in compounds]
return compounds if len(compounds) > 0 else [noise(text, 0.001, 0.001, 0.001)]
class Labeler:
def __init__(self, tokenizers):
self.tokenizers = tokenizers
def _annotate(self, text: str, tok_index=0):
if tok_index >= len(self.tokenizers):
return [(text, set())]
out = []
for token in self.tokenizers[tok_index].tokenize(text):
out += self._annotate(token, tok_index=tok_index + 1)
out[-1][1].add(tok_index)
return out
def _to_dense_label(self, annotations):
input_bytes = []
label = []
all_zeros = [0] * len(self.tokenizers)
for (token, annotation) in annotations:
token_bytes = token.encode("utf-8")
input_bytes += token_bytes
label += [all_zeros.copy() for _ in range(len(token_bytes))]
if len(label) > 0:
for idx in annotation:
label[-1][idx] = 1
return input_bytes, label
def label(self, text):
return self._to_dense_label(self._annotate(text))
def visualize(self, text):
text, label = self.label(text)
data = []
for char, label_col in zip(text, label):
data.append([char, *label_col])
df = pd.DataFrame(
data, columns=["byte", *[x.__class__.__name__ for x in self.tokenizers]]
).T
df.columns = ["" for _ in range(len(df.columns))]
with pd.option_context(
"display.max_columns",
len(text),
):
print(df)
if __name__ == "__main__":
labeler = Labeler(
[
SpacySentenceTokenizer(
"de_core_news_sm", lower_start_prob=0.7, remove_end_punct_prob=0.7, punctuation=".?!"
),
SpacyWordTokenizer("de_core_news_sm"),
WhitespaceTokenizer(),
SECOSCompoundTokenizer("../../../Experiments/SECOS/"),
]
)
labeler.visualize("KNN (ANN).")
|
[
"sys.path.append",
"pandas.DataFrame",
"somajo.SoMaJo",
"decompound_server.make_decompounder",
"diskcache.Index",
"random.choice",
"spacy.load",
"random.random"
] |
[((1025, 1078), 'spacy.load', 'spacy.load', (['name'], {'disable': "['tagger', 'parser', 'ner']"}), "(name, disable=['tagger', 'parser', 'ner'])\n", (1035, 1078), False, 'import spacy\n'), ((3994, 4012), 'somajo.SoMaJo', 'SoMaJo', (['model_name'], {}), '(model_name)\n', (4000, 4012), False, 'from somajo import SoMaJo\n'), ((4741, 4782), 'somajo.SoMaJo', 'SoMaJo', (['model_name'], {'split_sentences': '(False)'}), '(model_name, split_sentences=False)\n', (4747, 4782), False, 'from somajo import SoMaJo\n'), ((5855, 5882), 'sys.path.append', 'sys.path.append', (['secos_path'], {}), '(secos_path)\n', (5870, 5882), False, 'import sys\n'), ((5943, 6168), 'decompound_server.make_decompounder', 'decompound_server.make_decompounder', (["['decompound_server.py', f'{secos_path}data/denews70M_trigram__candidates',\n f'{secos_path}data/denews70M_trigram__WordCount', '50', '3', '3', '5',\n '3', 'upper', '0.01', '2020']"], {}), "(['decompound_server.py',\n f'{secos_path}data/denews70M_trigram__candidates',\n f'{secos_path}data/denews70M_trigram__WordCount', '50', '3', '3', '5',\n '3', 'upper', '0.01', '2020'])\n", (5978, 6168), False, 'import decompound_server\n'), ((6397, 6427), 'diskcache.Index', 'diskcache.Index', (['"""secos_cache"""'], {}), "('secos_cache')\n", (6412, 6427), False, 'import diskcache\n'), ((927, 962), 'random.choice', 'random.choice', (['string.ascii_letters'], {}), '(string.ascii_letters)\n', (940, 962), False, 'import random\n'), ((8556, 8647), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['byte', *[x.__class__.__name__ for x in self.tokenizers]]"}), "(data, columns=['byte', *[x.__class__.__name__ for x in self.\n tokenizers]])\n", (8568, 8647), True, 'import pandas as pd\n'), ((5232, 5247), 'random.random', 'random.random', ([], {}), '()\n', (5245, 5247), False, 'import random\n'), ((3019, 3034), 'random.random', 'random.random', ([], {}), '()\n', (3032, 3034), False, 'import random\n'), ((2633, 2648), 'random.random', 'random.random', ([], {}), '()\n', (2646, 2648), False, 'import random\n')]
|
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import date
from decimal import Decimal as D
import mock
import pytest
from gratipay.billing.payday import Payday
from gratipay.models.community import Community
from gratipay.models.participant import Participant
from gratipay.testing import Harness
class TestClosing(Harness):
# close
def test_close_closes(self):
alice = self.make_participant('alice', claimed_time='now')
alice.close()
assert Participant.from_username('alice').is_closed
def test_close_fails_if_still_a_balance(self):
alice = self.make_participant('alice', claimed_time='now', balance=D('10.00'))
with pytest.raises(alice.BalanceIsNotZero):
alice.close()
def test_close_fails_if_still_owns_a_team(self):
alice = self.make_participant('alice', claimed_time='now')
self.make_team(owner=alice)
with pytest.raises(alice.StillATeamOwner):
alice.close()
def test_close_page_is_usually_available(self):
self.make_participant('alice', claimed_time='now')
body = self.client.GET('/~alice/settings/close', auth_as='alice').body
assert 'Personal Information' in body
def test_close_page_is_not_available_during_payday(self):
Payday.start()
self.make_participant('alice', claimed_time='now')
body = self.client.GET('/~alice/settings/close', auth_as='alice').body
assert 'Personal Information' not in body
assert 'Try Again Later' in body
def test_can_post_to_close_page(self):
self.make_participant('alice', claimed_time='now')
response = self.client.PxST('/~alice/settings/close', auth_as='alice')
assert response.code == 302
assert response.headers['Location'] == '/~alice/'
assert Participant.from_username('alice').is_closed
def test_cant_post_to_close_page_during_payday(self):
Payday.start()
self.make_participant('alice', claimed_time='now')
body = self.client.POST('/~alice/settings/close', auth_as='alice').body
assert 'Try Again Later' in body
def test_close_page_shows_a_message_to_team_owners(self):
alice = self.make_participant('alice', claimed_time='now')
self.make_team('A', alice)
body = self.client.GET('/~alice/settings/close', auth_as='alice').body
assert 'You are the owner of the A team.' in body
def test_close_page_shows_a_message_to_owners_of_two_teams(self):
alice = self.make_participant('alice', claimed_time='now')
self.make_team('A', alice)
self.make_team('B', alice)
body = self.client.GET('/~alice/settings/close', auth_as='alice').body
assert 'You are the owner of the A and B teams.' in body
def test_close_page_shows_a_message_to_owners_of_three_teams(self):
alice = self.make_participant('alice', claimed_time='now')
self.make_team('A', alice)
self.make_team('B', alice)
self.make_team('C', alice)
body = self.client.GET('/~alice/settings/close', auth_as='alice').body
assert 'You are the owner of the A, B and C teams.' in body
# cs - clear_subscriptions
def test_cs_clears_subscriptions(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
alice.set_subscription_to(self.make_team(), D('1.00'))
nsubscriptions = lambda: self.db.one("SELECT count(*) FROM current_subscriptions "
"WHERE subscriber='alice' AND amount > 0")
assert nsubscriptions() == 1
with self.db.get_cursor() as cursor:
alice.clear_subscriptions(cursor)
assert nsubscriptions() == 0
def test_cs_doesnt_duplicate_zero_subscriptions(self):
alice = self.make_participant('alice', claimed_time='now')
A = self.make_team()
alice.set_subscription_to(A, D('1.00'))
alice.set_subscription_to(A, D('0.00'))
nsubscriptions = lambda: self.db.one("SELECT count(*) FROM subscriptions "
"WHERE subscriber='alice'")
assert nsubscriptions() == 2
with self.db.get_cursor() as cursor:
alice.clear_subscriptions(cursor)
assert nsubscriptions() == 2
def test_cs_doesnt_zero_when_theres_no_subscription(self):
alice = self.make_participant('alice')
nsubscriptions = lambda: self.db.one("SELECT count(*) FROM subscriptions "
"WHERE subscriber='alice'")
assert nsubscriptions() == 0
with self.db.get_cursor() as cursor:
alice.clear_subscriptions(cursor)
assert nsubscriptions() == 0
def test_cs_clears_multiple_subscriptions(self):
alice = self.make_participant('alice', claimed_time='now')
alice.set_subscription_to(self.make_team('A'), D('1.00'))
alice.set_subscription_to(self.make_team('B'), D('1.00'))
alice.set_subscription_to(self.make_team('C'), D('1.00'))
alice.set_subscription_to(self.make_team('D'), D('1.00'))
alice.set_subscription_to(self.make_team('E'), D('1.00'))
nsubscriptions = lambda: self.db.one("SELECT count(*) FROM current_subscriptions "
"WHERE subscriber='alice' AND amount > 0")
assert nsubscriptions() == 5
with self.db.get_cursor() as cursor:
alice.clear_subscriptions(cursor)
assert nsubscriptions() == 0
# cpi - clear_personal_information
@mock.patch.object(Participant, '_mailer')
def test_cpi_clears_personal_information(self, mailer):
alice = self.make_participant( 'alice'
, anonymous_giving=True
, anonymous_receiving=True
, avatar_url='img-url'
, email_address='<EMAIL>'
, claimed_time='now'
, session_token='<PASSWORD>'
, session_expires='2000-01-01'
, giving=20
, receiving=40
, npatrons=21
)
alice.upsert_statement('en', 'not forgetting to be awesome!')
alice.add_email('<EMAIL>')
with self.db.get_cursor() as cursor:
alice.clear_personal_information(cursor)
new_alice = Participant.from_username('alice')
assert alice.get_statement(['en']) == (None, None)
assert alice.anonymous_giving == new_alice.anonymous_giving == False
assert alice.anonymous_receiving == new_alice.anonymous_receiving == False
assert alice.number == new_alice.number == 'singular'
assert alice.avatar_url == new_alice.avatar_url == None
assert alice.email_address == new_alice.email_address == None
assert alice.claimed_time == new_alice.claimed_time == None
assert alice.giving == new_alice.giving == 0
assert alice.receiving == new_alice.receiving == 0
assert alice.npatrons == new_alice.npatrons == 0
assert alice.session_token == new_alice.session_token == None
assert alice.session_expires.year == new_alice.session_expires.year == date.today().year
assert not alice.get_emails()
team = self.make_participant('team', number='plural')
with self.db.get_cursor() as cursor:
team.clear_personal_information(cursor)
team2 = Participant.from_username('team')
assert team.number == team2.number == 'singular'
def test_cpi_clears_communities(self):
alice = self.make_participant('alice')
alice.insert_into_communities(True, 'test', 'test')
bob = self.make_participant('bob')
bob.insert_into_communities(True, 'test', 'test')
assert Community.from_slug('test').nmembers == 2 # sanity check
with self.db.get_cursor() as cursor:
alice.clear_personal_information(cursor)
assert Community.from_slug('test').nmembers == 1
# uic = update_is_closed
def test_uic_updates_is_closed(self):
alice = self.make_participant('alice')
alice.update_is_closed(True)
assert alice.is_closed
assert Participant.from_username('alice').is_closed
def test_uic_updates_is_closed_False(self):
alice = self.make_participant('alice')
alice.update_is_closed(True)
alice.update_is_closed(False)
assert not alice.is_closed
assert not Participant.from_username('alice').is_closed
def test_uic_uses_supplied_cursor(self):
alice = self.make_participant('alice')
with self.db.get_cursor() as cursor:
alice.update_is_closed(True, cursor)
assert alice.is_closed
assert not Participant.from_username('alice').is_closed
assert Participant.from_username('alice').is_closed
|
[
"mock.patch.object",
"decimal.Decimal",
"datetime.date.today",
"pytest.raises",
"gratipay.models.participant.Participant.from_username",
"gratipay.models.community.Community.from_slug",
"gratipay.billing.payday.Payday.start"
] |
[((5644, 5685), 'mock.patch.object', 'mock.patch.object', (['Participant', '"""_mailer"""'], {}), "(Participant, '_mailer')\n", (5661, 5685), False, 'import mock\n'), ((1336, 1350), 'gratipay.billing.payday.Payday.start', 'Payday.start', ([], {}), '()\n', (1348, 1350), False, 'from gratipay.billing.payday import Payday\n'), ((1983, 1997), 'gratipay.billing.payday.Payday.start', 'Payday.start', ([], {}), '()\n', (1995, 1997), False, 'from gratipay.billing.payday import Payday\n'), ((6649, 6683), 'gratipay.models.participant.Participant.from_username', 'Participant.from_username', (['"""alice"""'], {}), "('alice')\n", (6674, 6683), False, 'from gratipay.models.participant import Participant\n'), ((7718, 7751), 'gratipay.models.participant.Participant.from_username', 'Participant.from_username', (['"""team"""'], {}), "('team')\n", (7743, 7751), False, 'from gratipay.models.participant import Participant\n'), ((532, 566), 'gratipay.models.participant.Participant.from_username', 'Participant.from_username', (['"""alice"""'], {}), "('alice')\n", (557, 566), False, 'from gratipay.models.participant import Participant\n'), ((729, 766), 'pytest.raises', 'pytest.raises', (['alice.BalanceIsNotZero'], {}), '(alice.BalanceIsNotZero)\n', (742, 766), False, 'import pytest\n'), ((964, 1000), 'pytest.raises', 'pytest.raises', (['alice.StillATeamOwner'], {}), '(alice.StillATeamOwner)\n', (977, 1000), False, 'import pytest\n'), ((1871, 1905), 'gratipay.models.participant.Participant.from_username', 'Participant.from_username', (['"""alice"""'], {}), "('alice')\n", (1896, 1905), False, 'from gratipay.models.participant import Participant\n'), ((3442, 3451), 'decimal.Decimal', 'D', (['"""1.00"""'], {}), "('1.00')\n", (3443, 3451), True, 'from decimal import Decimal as D\n'), ((3990, 3999), 'decimal.Decimal', 'D', (['"""1.00"""'], {}), "('1.00')\n", (3991, 3999), True, 'from decimal import Decimal as D\n'), ((4038, 4047), 'decimal.Decimal', 'D', (['"""0.00"""'], {}), "('0.00')\n", (4039, 4047), True, 'from decimal import Decimal as D\n'), ((4978, 4987), 'decimal.Decimal', 'D', (['"""1.00"""'], {}), "('1.00')\n", (4979, 4987), True, 'from decimal import Decimal as D\n'), ((5044, 5053), 'decimal.Decimal', 'D', (['"""1.00"""'], {}), "('1.00')\n", (5045, 5053), True, 'from decimal import Decimal as D\n'), ((5110, 5119), 'decimal.Decimal', 'D', (['"""1.00"""'], {}), "('1.00')\n", (5111, 5119), True, 'from decimal import Decimal as D\n'), ((5176, 5185), 'decimal.Decimal', 'D', (['"""1.00"""'], {}), "('1.00')\n", (5177, 5185), True, 'from decimal import Decimal as D\n'), ((5242, 5251), 'decimal.Decimal', 'D', (['"""1.00"""'], {}), "('1.00')\n", (5243, 5251), True, 'from decimal import Decimal as D\n'), ((8497, 8531), 'gratipay.models.participant.Participant.from_username', 'Participant.from_username', (['"""alice"""'], {}), "('alice')\n", (8522, 8531), False, 'from gratipay.models.participant import Participant\n'), ((9119, 9153), 'gratipay.models.participant.Participant.from_username', 'Participant.from_username', (['"""alice"""'], {}), "('alice')\n", (9144, 9153), False, 'from gratipay.models.participant import Participant\n'), ((704, 714), 'decimal.Decimal', 'D', (['"""10.00"""'], {}), "('10.00')\n", (705, 714), True, 'from decimal import Decimal as D\n'), ((7486, 7498), 'datetime.date.today', 'date.today', ([], {}), '()\n', (7496, 7498), False, 'from datetime import date\n'), ((8077, 8104), 'gratipay.models.community.Community.from_slug', 'Community.from_slug', (['"""test"""'], {}), "('test')\n", (8096, 8104), False, 'from gratipay.models.community import Community\n'), ((8250, 8277), 'gratipay.models.community.Community.from_slug', 'Community.from_slug', (['"""test"""'], {}), "('test')\n", (8269, 8277), False, 'from gratipay.models.community import Community\n'), ((8768, 8802), 'gratipay.models.participant.Participant.from_username', 'Participant.from_username', (['"""alice"""'], {}), "('alice')\n", (8793, 8802), False, 'from gratipay.models.participant import Participant\n'), ((9059, 9093), 'gratipay.models.participant.Participant.from_username', 'Participant.from_username', (['"""alice"""'], {}), "('alice')\n", (9084, 9093), False, 'from gratipay.models.participant import Participant\n')]
|
import numpy as np
from pommerman import constants
from pommerman.constants import Item
from util.data import calc_dist
def staying_alive_reward(nobs, agent_id):
"""
Return a reward if the agent with the given id is alive.
:param nobs: The game state
:param agent_id: The agent to check
:return: The reward for staying alive
"""
#print(nobs[0]['position'][0])
if agent_id in nobs[0]['alive']:
return 1.0
else:
return 0.0
def go_down_right_reward(nobs, high_pos, agent_num, act):
"""
Return a reward for going to the low or right side of the board
:param nobs: The current observation
:param high_pos: Tuple of lowest and most-right position
:param agent_num: The id of the agent to check (0-3)
:return: The reward for going down or right
"""
# only give rewards if a new highest point is reached
bomb_bonus = 0
if act[agent_num] == 5:
bomb_bonus = 0.00
if nobs[agent_num]['position'][0] > high_pos[0]:
return 1 + bomb_bonus, (nobs[agent_num]['position'][0], high_pos[1])
elif nobs[agent_num]['position'][1] > high_pos[1]:
return 1 + bomb_bonus, (high_pos[0], nobs[agent_num]['position'][1])
else:
return 0 + bomb_bonus, high_pos
def bomb_reward(nobs, act, agent_ind):
dist = calc_dist(agent_ind, nobs)
rwd = 0.0
if act[agent_ind] == 5:
rwd = 5.0/dist
elif act[agent_ind] == 0:
rwd = 0.0
else:
rwd = 1.0/dist
return rwd
def skynet_reward(obs, act, nobs, fifo, agent_inds, log):
"""
Skynet reward function rewarding enemy deaths, powerup pickups and stepping on blocks not in FIFO
:param obs: previous observation
:param nobs: new observation
:param fifo: 121 (11x11) cell queue
:return:
"""
# calculate rewards for player agents, rest are zero
r = [0.0] * len(obs)
for i in range(len(obs)):
if i not in agent_inds:
continue
log_ind = 0 if i <= 1 else 1
teammate_ind = i + 2 if log_ind == 0 else i - 2
n_enemies_prev = 0
alive_prev = obs[i]['alive']
for e in obs[i]['enemies']:
if e.value in alive_prev:
n_enemies_prev += 1
prev_n_teammate = 1 if obs[i]['teammate'].value in alive_prev else 0
prev_can_kick = obs[i]['can_kick']
prev_n_ammo = obs[i]['ammo']
prev_n_blast = obs[i]['blast_strength']
cur_alive = nobs[i]['alive']
n_enemy_cur = 0
for e in nobs[i]['enemies']:
if e.value in cur_alive:
n_enemy_cur += 1
cur_n_teammate = 1 if nobs[i]['teammate'].value in cur_alive else 0
cur_can_kick = nobs[i]['can_kick']
cur_n_ammo = nobs[i]['ammo']
cur_n_blast = nobs[i]['blast_strength']
cur_position = nobs[i]['position']
if n_enemies_prev - n_enemy_cur > 0:
r[i] += (n_enemies_prev - n_enemy_cur) * 0.5
log[log_ind][0] += (n_enemies_prev - n_enemy_cur) * 0.5
# if prev_n_teammate - cur_n_teammate > 0:
# r[i] -= (prev_n_teammate-cur_n_teammate)*0.5
# log[log_ind][4] -= (prev_n_teammate-cur_n_teammate)*0.5
if not prev_can_kick and cur_can_kick:
r[i] += 0.02
log[log_ind][1] += 0.02
if cur_n_ammo - prev_n_ammo > 0 and obs[i]['board'][cur_position[0]][cur_position[1]] == Item.ExtraBomb.value:
r[i] += 0.01
log[log_ind][1] += 0.01
if cur_n_blast - prev_n_blast > 0:
r[i] += 0.01
log[log_ind][1] += 0.01
if cur_position not in fifo[i]:
r[i] += 0.001
log[log_ind][2] += 0.001
if len(fifo[i]) == 121:
fifo[i].pop()
fifo[i].append(cur_position)
return r
def _get_positions(board, value):
wood_bitmap = np.isin(board, value).astype(np.uint8)
wood_positions = np.where(wood_bitmap == 1)
return list(zip(wood_positions[0], wood_positions[1]))
def woods_close_to_bomb_reward(obs, bomb_pos, blast_strength, agent_ids):
'''
:param obs: observation
:param bomb_pos: position bomb is layed
:param blast_strength: current blast strength of the agent
:param agent_ids: agent ids of teammates
:return: reward for laying bombs near wood and enemies
'''
board = obs['board']
wood_positions = _get_positions(board, constants.Item.Wood.value)
rigid_positions = _get_positions(board, constants.Item.Rigid.value)
enemy_ids = [10,11,12,13]
for id in agent_ids:
enemy_ids.remove(id)
enemy_positions =[]
for e in enemy_ids:
enemy_positions += _get_positions(board, e)
woods_in_range = 0.0
enemies_in_range = 0.0
# for every wooden block check if it would be destroyed
left_pos = np.asarray(bomb_pos)
for i in range(1, blast_strength+1):
if left_pos[0] == 0:
break
left_pos = (bomb_pos[0] - i, bomb_pos[1])
if left_pos in rigid_positions:
break
elif left_pos in enemy_positions:
enemies_in_range +=1
break
elif left_pos in wood_positions:
woods_in_range += 1
break
right_pos = np.asarray(bomb_pos)
for i in range(1, blast_strength + 1):
if right_pos[0] == len(board)-1:
break
right_pos = (bomb_pos[0] + i, bomb_pos[1])
if right_pos in rigid_positions:
break
elif right_pos in enemy_positions:
enemies_in_range += 1
break
elif right_pos in wood_positions:
woods_in_range += 1
break
down_pos = np.asarray(bomb_pos)
for i in range(1, blast_strength + 1):
if down_pos[1] == 0:
break
down_pos = (bomb_pos[0], bomb_pos[1] - i)
if down_pos in rigid_positions:
break
elif down_pos in enemy_positions:
enemies_in_range += 1
break
elif down_pos in wood_positions:
woods_in_range += 1
break
up_pos = np.asarray(bomb_pos)
for i in range(1, blast_strength + 1):
if up_pos[1] == len(board)-1:
break
up_pos = (bomb_pos[0], bomb_pos[1] + i)
if up_pos in rigid_positions:
break
elif up_pos in enemy_positions:
enemies_in_range += 1
break
elif up_pos in wood_positions:
woods_in_range += 1
break
# for each wood close to bomb reward x
reward = (0.01 * woods_in_range) + (0.3 * enemies_in_range)
return reward
|
[
"numpy.isin",
"numpy.asarray",
"numpy.where",
"util.data.calc_dist"
] |
[((1392, 1418), 'util.data.calc_dist', 'calc_dist', (['agent_ind', 'nobs'], {}), '(agent_ind, nobs)\n', (1401, 1418), False, 'from util.data import calc_dist\n'), ((4096, 4122), 'numpy.where', 'np.where', (['(wood_bitmap == 1)'], {}), '(wood_bitmap == 1)\n', (4104, 4122), True, 'import numpy as np\n'), ((5017, 5037), 'numpy.asarray', 'np.asarray', (['bomb_pos'], {}), '(bomb_pos)\n', (5027, 5037), True, 'import numpy as np\n'), ((5447, 5467), 'numpy.asarray', 'np.asarray', (['bomb_pos'], {}), '(bomb_pos)\n', (5457, 5467), True, 'import numpy as np\n'), ((5895, 5915), 'numpy.asarray', 'np.asarray', (['bomb_pos'], {}), '(bomb_pos)\n', (5905, 5915), True, 'import numpy as np\n'), ((6325, 6345), 'numpy.asarray', 'np.asarray', (['bomb_pos'], {}), '(bomb_pos)\n', (6335, 6345), True, 'import numpy as np\n'), ((4035, 4056), 'numpy.isin', 'np.isin', (['board', 'value'], {}), '(board, value)\n', (4042, 4056), True, 'import numpy as np\n')]
|
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from plotly import express as px
from attrbench.suite.dashboard.components.pages import Page
class DetailPage(Page):
def __init__(self, result_obj, app):
super().__init__(result_obj)
self.app = app
self.rendered = {}
# Callback for method selection dropdown
app.callback(Output("plots-div", "children"),
Input("method-dropdown", "value"))(self._update_method)
def _update_method(self, method_name):
if method_name is not None:
if method_name not in self.rendered:
contents = []
for metric_name in self.result_obj.get_metrics():
contents.append(html.H2(metric_name))
metric_data = self.result_obj.data[metric_name][method_name]
metric_shape = self.result_obj.metadata[metric_name]["shape"]
plot = px.line(metric_data.transpose()) if metric_shape[1] > 1 else px.violin(metric_data)
contents.append(dcc.Graph(id=metric_name, figure=plot))
self.rendered[method_name] = contents
return contents
return self.rendered[method_name]
return f"No method selected."
def render(self) -> html.Div:
return html.Div([
dbc.FormGroup([
dcc.Dropdown(
id="method-dropdown",
options=[
{"label": method, "value": method} for method in self.result_obj.get_methods()
],
placeholder="Select method...")
]),
html.Div(id="plots-div")
])
|
[
"dash_html_components.H2",
"dash_html_components.Div",
"plotly.express.violin",
"dash.dependencies.Input",
"dash_core_components.Graph",
"dash.dependencies.Output"
] |
[((474, 505), 'dash.dependencies.Output', 'Output', (['"""plots-div"""', '"""children"""'], {}), "('plots-div', 'children')\n", (480, 505), False, 'from dash.dependencies import Input, Output\n'), ((528, 561), 'dash.dependencies.Input', 'Input', (['"""method-dropdown"""', '"""value"""'], {}), "('method-dropdown', 'value')\n", (533, 561), False, 'from dash.dependencies import Input, Output\n'), ((1784, 1808), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""plots-div"""'}), "(id='plots-div')\n", (1792, 1808), True, 'import dash_html_components as html\n'), ((845, 865), 'dash_html_components.H2', 'html.H2', (['metric_name'], {}), '(metric_name)\n', (852, 865), True, 'import dash_html_components as html\n'), ((1118, 1140), 'plotly.express.violin', 'px.violin', (['metric_data'], {}), '(metric_data)\n', (1127, 1140), True, 'from plotly import express as px\n'), ((1177, 1215), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': 'metric_name', 'figure': 'plot'}), '(id=metric_name, figure=plot)\n', (1186, 1215), True, 'import dash_core_components as dcc\n')]
|
from django.db import models
# Create your models here.
class Candidate(models.Model):
id = models.IntegerField(primary_key=True)
cand_no = models.IntegerField(blank=True, null=True)
cand_type = models.CharField(max_length=1, blank=True, null=True)
name = models.CharField(max_length=150, blank=True, null=True)
class Meta:
managed = False
db_table = 'candidate'
def __str__(self):
return str(self.cand_no)+self.cand_type+" : "+self.name
|
[
"django.db.models.CharField",
"django.db.models.IntegerField"
] |
[((98, 135), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (117, 135), False, 'from django.db import models\n'), ((150, 192), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (169, 192), False, 'from django.db import models\n'), ((209, 262), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'blank': '(True)', 'null': '(True)'}), '(max_length=1, blank=True, null=True)\n', (225, 262), False, 'from django.db import models\n'), ((274, 329), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)', 'blank': '(True)', 'null': '(True)'}), '(max_length=150, blank=True, null=True)\n', (290, 329), False, 'from django.db import models\n')]
|
from machine import UART
import sys
import select
import socket
import iceboot
import gc
def go():
"""
Stupid telnet to serial terminal for Blinkencard
"""
uart=UART(2, rx=16, tx=17, timeout=1)
uart.init(9600)
s = socket.socket()
s.bind(socket.getaddrinfo('0.0.0.0', 5000)[0][-1])
s.listen(1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
running = True
while running:
print('Waiting for conection')
cl, addr = s.accept()
print('Connected to ' + str(addr))
connected = True
cmd_mode = False
cl.setblocking(True)
p = select.poll()
p.register(cl, select.POLLIN)
# Tell telnet client to not echo and not wait for newline to send
# IAC DO LINEMODE
cl.write(b'\xFF\xFD\x22')
# IAC SB LINEMODE MODE 0
cl.write(b'\xFF\xFA\x22\x01\x00')
# IAC SE
cl.write(b'\xFF\xF0')
# IAC WILL ECHO
cl.write(b'\xFF\xFB\x01')
# flush out response for echo
cl.settimeout(100)
cl.recv(100)
while connected:
if uart.any():
recv_char=uart.read(100)
cl.write(recv_char)
events = p.poll(10)
if events:
for event_s, event in events:
if event & select.POLLIN:
# receive and filter
to_send = b''
control_chars = 0
recvd = event_s.recv(100)
# usocket doesn't do POLLERR or POLLHUP, it just returns empty on recv
if not len(recvd):
print('Disconnected')
p.unregister(event_s)
event_s.close()
connected = False
break
for test_char in recvd:
if control_chars:
control_chars -= control_chars
continue
if test_char == 0xFF:
control_chars = 2
continue
if test_char == 0x00:
continue
if not cmd_mode and test_char == 0x01: # ctrl-a
cmd_mode = True
continue
if cmd_mode:
cmd_mode = False
# Quit on 'k'
if (test_char == 0x6B or test_char == 0x4B):
p.unregister(event_s)
event_s.close()
connected = False
running = False
break
if (test_char == 0x72 or test_char == 0x52):
cl.write(b'Reloading config')
iceboot.boot('altair.bin')
continue
to_send += bytes([test_char])
uart.write(to_send)
s.close()
gc.collect()
|
[
"select.poll",
"socket.socket",
"gc.collect",
"socket.getaddrinfo",
"iceboot.boot",
"machine.UART"
] |
[((180, 212), 'machine.UART', 'UART', (['(2)'], {'rx': '(16)', 'tx': '(17)', 'timeout': '(1)'}), '(2, rx=16, tx=17, timeout=1)\n', (184, 212), False, 'from machine import UART\n'), ((241, 256), 'socket.socket', 'socket.socket', ([], {}), '()\n', (254, 256), False, 'import socket\n'), ((3453, 3465), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3463, 3465), False, 'import gc\n'), ((638, 651), 'select.poll', 'select.poll', ([], {}), '()\n', (649, 651), False, 'import select\n'), ((268, 303), 'socket.getaddrinfo', 'socket.getaddrinfo', (['"""0.0.0.0"""', '(5000)'], {}), "('0.0.0.0', 5000)\n", (286, 303), False, 'import socket\n'), ((3224, 3250), 'iceboot.boot', 'iceboot.boot', (['"""altair.bin"""'], {}), "('altair.bin')\n", (3236, 3250), False, 'import iceboot\n')]
|
'''
name: E#09
author: <NAME>
email: <EMAIL>
link: https://www.youtube.com/channel/UCNN3bpPlWWUkUMB7gjcUFlw
MIT License https://github.com/repen/E-parsers/blob/master/License
'''
import requests
from bs4 import BeautifulSoup
base_url = "https://wallpapershome.com"
space = "/space?page=4"
response = requests.get(base_url + space)
html = response.text
# print(html)
soup = BeautifulSoup(html, "html.parser")
conteiner = soup.find("div", {"class":"pics"})
images = conteiner.find_all("p")
urls_images = []
for image in images:
id_img = image.a["href"].split("-")[-1].replace(".html","")
urls_images.append("https://wallpapershome.com/images/pages/pic_h/" + id_img + ".jpg")
# break
images_byte = []
for url in urls_images:
images = requests.get(url)
images_byte.append(images.content)
# break
for e, image in enumerate(images_byte):
# print(image)
with open("image{}.jpg".format(e), "wb") as f:
f.write(image)
|
[
"bs4.BeautifulSoup",
"requests.get"
] |
[((303, 333), 'requests.get', 'requests.get', (['(base_url + space)'], {}), '(base_url + space)\n', (315, 333), False, 'import requests\n'), ((378, 412), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (391, 412), False, 'from bs4 import BeautifulSoup\n'), ((756, 773), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (768, 773), False, 'import requests\n')]
|
#!/usr/bin/env python
from __future__ import unicode_literals
import os
import sys
import django
from django.core.management import call_command
import reviewboard
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reviewboard.settings')
if hasattr(django, 'setup'):
# Django >= 1.7
django.setup()
os.chdir(os.path.dirname(reviewboard.__file__))
sys.exit(call_command('compilemessages', interactive=False, verbosity=2))
|
[
"django.core.management.call_command",
"os.path.dirname",
"os.environ.setdefault",
"django.setup"
] |
[((201, 272), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""reviewboard.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'reviewboard.settings')\n", (222, 272), False, 'import os\n'), ((339, 353), 'django.setup', 'django.setup', ([], {}), '()\n', (351, 353), False, 'import django\n'), ((368, 405), 'os.path.dirname', 'os.path.dirname', (['reviewboard.__file__'], {}), '(reviewboard.__file__)\n', (383, 405), False, 'import os\n'), ((420, 483), 'django.core.management.call_command', 'call_command', (['"""compilemessages"""'], {'interactive': '(False)', 'verbosity': '(2)'}), "('compilemessages', interactive=False, verbosity=2)\n", (432, 483), False, 'from django.core.management import call_command\n')]
|
import re
import ply.lex as lex
states = (
('instring', 'exclusive'),
)
tokens = (
'COMMENT', 'HEXSTRING', 'INT', 'FLOAT', 'LITERAL', 'KEYWORD', 'STRING', 'OPERATOR'
)
delimiter = r'\(\)\<\>\[\]\{\}\/\%\s'
delimiter_end = r'(?=[%s]|$)' % delimiter
def t_COMMENT(t):
# r'^%!.+\n'
r'%.*\n'
pass
RE_SPC = re.compile(r'\s')
RE_HEX_PAIR = re.compile(r'[0-9a-fA-F]{2}|.')
@lex.TOKEN(r'<[0-9A-Fa-f\s]*>')
def t_HEXSTRING(t):
cleaned = RE_SPC.sub('', t.value[1:-1])
pairs = RE_HEX_PAIR.findall(cleaned)
token_bytes = bytes([int(pair, 16) for pair in pairs])
try:
t.value = token_bytes.decode('ascii')
except UnicodeDecodeError:
# should be kept as bytes
t.value = token_bytes
return t
@lex.TOKEN(r'(\-|\+)?[0-9]+' + delimiter_end)
def t_INT(t):
t.value = int(t.value)
return t
@lex.TOKEN(r'(\-|\+)?([0-9]+\.|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?' + delimiter_end)
def t_FLOAT(t):
t.value = float(t.value)
return t
RE_LITERAL_HEX = re.compile(r'#[0-9A-Fa-f]{2}')
@lex.TOKEN(r'/.+?' + delimiter_end)
def t_LITERAL(t):
newvalue = t.value[1:]
# If there's '#' chars in the literal, we much de-hex it
def re_sub(m):
# convert any hex str to int (without the # char) and the convert that
return bytes.fromhex(m.group(0)[1:]).decode('latin-1')
newvalue = RE_LITERAL_HEX.sub(re_sub , newvalue)
# If there's any lone # char left, remove them
newvalue = newvalue.replace('#', '')
t.value = newvalue
return t
def t_OPERATOR(t):
r'{|}|<<|>>|\[|\]'
return t
t_KEYWORD = r'.+?' + delimiter_end
def t_instring(t):
r'\('
t.lexer.value_buffer = []
t.lexer.string_startpos = t.lexpos
t.lexer.level = 1
t.lexer.begin('instring')
# The parens situation: it's complicated. We can have both escaped parens and unescaped parens.
# If they're escaped, there's nothing special, we unescape them and add them to the string. If
# they're not escaped, we have to count how many of them there are, to know when a rparen is the
# end of the string. The regular expression for this is messed up, so what we do is when we hit
# a paren, we look if the previous buffer ended up with a backslash. If it did, we don't to paren
# balancing.
def t_instring_lparen(t):
r'\('
is_escaped = t.lexer.value_buffer and t.lexer.value_buffer[-1].endswith('\\')
if is_escaped:
t.lexer.value_buffer[-1] = t.lexer.value_buffer[-1][:-1]
else:
t.lexer.level +=1
t.lexer.value_buffer.append('(')
def t_instring_rparen(t):
r'\)'
is_escaped = t.lexer.value_buffer and t.lexer.value_buffer[-1].endswith('\\')
if is_escaped:
t.lexer.value_buffer[-1] = t.lexer.value_buffer[-1][:-1]
else:
t.lexer.level -=1
if t.lexer.level == 0:
t.value = ''.join(t.lexer.value_buffer)
if any(ord(c) > 0x7f for c in t.value):
t.value = t.value.encode('latin-1')
t.type = "STRING"
t.lexpos = t.lexer.string_startpos
t.lexer.begin('INITIAL')
return t
else:
t.lexer.value_buffer.append(')')
RE_STRING_ESCAPE = re.compile(r'\\[btnfr\\]')
RE_STRING_OCTAL = re.compile(r'\\[0-7]{1,3}')
RE_STRING_LINE_CONT = re.compile(r'\\\n|\\\r|\\\r\n')
ESC_STRING = { 'b': '\b', 't': '\t', 'n': '\n', 'f': '\f', 'r': '\r', '\\': '\\' }
def repl_string_escape(m):
return ESC_STRING[m.group(0)[1]]
def repl_string_octal(m):
i = int(m.group(0)[1:], 8)
if i < 0xff: # we never want to go above 256 because it's unencodable
return chr(i)
else:
return m.group(0)
def t_instring_contents(t):
r'[^()]+'
s = t.value
s = RE_STRING_ESCAPE.sub(repl_string_escape, s)
s = RE_STRING_OCTAL.sub(repl_string_octal, s)
s = RE_STRING_LINE_CONT.sub('', s)
t.lexer.value_buffer.append(s)
t_instring_ignore = ''
t_ignore = ' \t\r\n'
# Error handling rule
def t_error(t):
print("Illegal character '%r'" % t.value[0])
t.lexer.skip(1)
t_instring_error = t_error
lexer = lex.lex()
|
[
"ply.lex.TOKEN",
"ply.lex.lex",
"re.compile"
] |
[((327, 344), 're.compile', 're.compile', (['"""\\\\s"""'], {}), "('\\\\s')\n", (337, 344), False, 'import re\n'), ((359, 389), 're.compile', 're.compile', (['"""[0-9a-fA-F]{2}|."""'], {}), "('[0-9a-fA-F]{2}|.')\n", (369, 389), False, 'import re\n'), ((392, 422), 'ply.lex.TOKEN', 'lex.TOKEN', (['"""<[0-9A-Fa-f\\\\s]*>"""'], {}), "('<[0-9A-Fa-f\\\\s]*>')\n", (401, 422), True, 'import ply.lex as lex\n'), ((752, 797), 'ply.lex.TOKEN', 'lex.TOKEN', (["('(\\\\-|\\\\+)?[0-9]+' + delimiter_end)"], {}), "('(\\\\-|\\\\+)?[0-9]+' + delimiter_end)\n", (761, 797), True, 'import ply.lex as lex\n'), ((853, 958), 'ply.lex.TOKEN', 'lex.TOKEN', (["('(\\\\-|\\\\+)?([0-9]+\\\\.|[0-9]*\\\\.[0-9]+|[0-9]+\\\\.[0-9]*)((e|E)[0-9]+)?' +\n delimiter_end)"], {}), "(\n '(\\\\-|\\\\+)?([0-9]+\\\\.|[0-9]*\\\\.[0-9]+|[0-9]+\\\\.[0-9]*)((e|E)[0-9]+)?' +\n delimiter_end)\n", (862, 958), True, 'import ply.lex as lex\n'), ((1022, 1051), 're.compile', 're.compile', (['"""#[0-9A-Fa-f]{2}"""'], {}), "('#[0-9A-Fa-f]{2}')\n", (1032, 1051), False, 'import re\n'), ((1054, 1087), 'ply.lex.TOKEN', 'lex.TOKEN', (["('/.+?' + delimiter_end)"], {}), "('/.+?' + delimiter_end)\n", (1063, 1087), True, 'import ply.lex as lex\n'), ((3177, 3206), 're.compile', 're.compile', (['"""\\\\\\\\[btnfr\\\\\\\\]"""'], {}), "('\\\\\\\\[btnfr\\\\\\\\]')\n", (3187, 3206), False, 'import re\n'), ((3222, 3250), 're.compile', 're.compile', (['"""\\\\\\\\[0-7]{1,3}"""'], {}), "('\\\\\\\\[0-7]{1,3}')\n", (3232, 3250), False, 'import re\n'), ((3272, 3312), 're.compile', 're.compile', (['"""\\\\\\\\\\\\n|\\\\\\\\\\\\r|\\\\\\\\\\\\r\\\\n"""'], {}), "('\\\\\\\\\\\\n|\\\\\\\\\\\\r|\\\\\\\\\\\\r\\\\n')\n", (3282, 3312), False, 'import re\n'), ((4066, 4075), 'ply.lex.lex', 'lex.lex', ([], {}), '()\n', (4073, 4075), True, 'import ply.lex as lex\n')]
|
import numpy as np
import pandas as pd
def intersection_cartesian(L1: pd.DataFrame, L2: pd.DataFrame):
"""
Compute cartesian coordinates of intersection points given two list of lines in general form.
General form for a line: Ax+By+C=0
:param L1:
:param L2:
:return:
"""
if not {'A', 'B', 'C'}.issubset(set(L1.columns)) or not {'A', 'B', 'C'}.issubset(set(L2.columns)):
raise ValueError('L1 and L2 should both contains columns A, B and C, which depicts lines in general form')
d = (L1['A'] * L2['B'] - L1['B'] * L2['A'])
dx = L1['B'] * L2['C'] - L1['C'] * L2['B']
dy = L1['C'] * L2['A'] - L1['A'] * L2['C']
x = dx / d
y = dy / d
return list(zip(x.values.tolist(), y.values.tolist()))
def points2line(p1, p2):
"""
Compute Ax+By+C=0 given a list of point [(x1,y1)] and [(x2,y2)].
Single point is also acceptable.
:param p1: point in tuple or array (x1,y1) or a list of points in tuple or array [(x1_1,y1_1),(x1_2,y1_2),...]
:param p2: point in tuple or array (x2,y2) or a list of points in tuple or array [(x2_1,y2_1),(x2_2,y2_2),...]
:return: pd.DataFrame objects of lines in general form(Ax+By+C=0)
"""
p1 = np.array(p1)
p2 = np.array(p2)
if p1.dtype == np.object or p2.dtype == np.object:
raise ValueError("p1 and p2 should matrix alike")
elif len(p1.shape) == 2 and len(p2.shape) == 2:
if p1.shape[1] != 2 or p2.shape[1] != 2:
raise ValueError("p1 and p2 should be matrix with column size of exactly 2")
elif len(p1.shape) == 1 and len(p1) == 2 and len(p1.shape) == 1 and len(p2) == 2:
p1 = p1.reshape(-1, 2)
p2 = p2.reshape(-1, 2)
else:
raise ValueError("Invalid p1 and p2")
a = (p1[:, 1] - p2[:, 1])
b = (p2[:, 0] - p1[:, 0])
c = (p1[:, 0] * p2[:, 1] - p2[:, 0] * p1[:, 1])
return pd.DataFrame([a, b, c], index=['A', 'B', 'C']).T
def find_y_on_lines(lines: np.array, x: np.array):
"""
find y of a list of x on a list of lines that in polar form.
:param lines:
:param x:
:return: a list of points, 1th dimension for different x and 2th dimension for different lines
"""
if len(lines) == 0:
return lines
lines = np.array(lines)
if lines.dtype == np.object:
raise ValueError("lines should be matrix alike")
elif len(lines.shape) == 1:
if len(lines) == 2:
lines = lines.reshape(-1, 2)
else:
raise ValueError("the length of line vector should 2")
elif len(lines.shape) == 2:
if lines.shape[1] != 2:
raise ValueError("lines should be matrix with column size of exactly 2")
else:
raise ValueError("Invalid lines")
x = np.array(x)
if x.dtype == np.object:
raise ValueError("x should be matrix alike")
rho = lines[:, 1].reshape(-1, 1)
phi = lines[:, 0].reshape(-1, 1)
y = (rho - x * np.cos(phi)) / np.sin(phi)
return y
def find_points_on_lines(lines: np.array, x: np.array):
"""
find points of a list of x on a list of lines that in polar form.
:param lines:
:param x:
:return: a list of points, 1th dimension for different x and 2th dimension for different lines
"""
if len(lines) == 0:
return lines
lines = np.array(lines)
if len(lines.shape) == 1:
if len(lines) == 2:
lines = lines.reshape(-1, 2)
x = np.array(x)
y = find_y_on_lines(lines, x)
points = list()
for ix in range(len(x)):
points_on_a_line = np.zeros((len(lines), 2))
points_on_a_line[:, 0] = x[ix]
points_on_a_line[:, 1] = y[:, ix]
points.append(list(map(lambda x: tuple(x), points_on_a_line.tolist())))
return points
def interpolate_pixels_along_line(p1: np.array or tuple, p2: np.array or tuple, width=2):
"""Uses Xiaolin Wu's line algorithm to interpolate all of the pixels along a
straight line, given two points (x0, y0) and (x1, y1)
Wikipedia article containing pseudo code that function was based off of:
http://en.wikipedia.org/wiki/Xiaolin_Wu's_line_algorithm
Given by Rick(https://stackoverflow.com/users/2025958/rick)
on https://stackoverflow.com/questions/24702868/python3-pillow-get-all-pixels-on-a-line.
"""
if type(p1) is np.ndarray and type(p2) is np.ndarray:
(x1, y1) = p1.flatten()
(x2, y2) = p2.flatten()
elif len(p1) == 2 and len(p2) == 2:
(x1, y1) = p1
(x2, y2) = p2
else:
raise TypeError("p1 and p2 must be tuple or ndarray depicting points")
pixels = []
steep = np.abs(y2 - y1) > np.abs(x2 - x1)
# Ensure that the path to be interpolated is shallow and from left to right
if steep:
t = x1
x1 = y1
y1 = t
t = x2
x2 = y2
y2 = t
if x1 > x2:
t = x1
x1 = x2
x2 = t
t = y1
y1 = y2
y2 = t
dx = x2 - x1
dy = y2 - y1
gradient = dy / dx # slope
# Get the first given coordinate and add it to the return list
x_end = np.round(x1)
y_end = y1 + (gradient * (x_end - x1))
xpxl0 = x_end
ypxl0 = np.round(y_end)
if steep:
pixels.extend([(ypxl0, xpxl0), (ypxl0 + 1, xpxl0)])
else:
pixels.extend([(xpxl0, ypxl0), (xpxl0, ypxl0 + 1)])
interpolated_y = y_end + gradient
# Get the second given coordinate to give the main loop a range
x_end = np.round(x2)
y_end = y2 + (gradient * (x_end - x2))
xpxl1 = x_end
ypxl1 = np.round(y_end)
# Loop between the first x coordinate and the second x coordinate, interpolating the y coordinates
for x in np.arange(xpxl0 + 1, xpxl1):
if steep:
pixels.extend([(np.floor(interpolated_y) + i, x) for i in range(1 - width, width + 1)])
else:
pixels.extend([(x, np.floor(interpolated_y) + i) for i in range(1 - width, width + 1)])
interpolated_y += gradient
# Add the second given coordinate to the given list
if steep:
pixels.extend([(ypxl1, xpxl1), (ypxl1 + 1, xpxl1)])
else:
pixels.extend([(xpxl1, ypxl1), (xpxl1, ypxl1 + 1)])
# convert to int
return list(map(lambda x: tuple(x), np.array(pixels, dtype=np.int)))
|
[
"pandas.DataFrame",
"numpy.abs",
"numpy.floor",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.cos",
"numpy.round"
] |
[((1209, 1221), 'numpy.array', 'np.array', (['p1'], {}), '(p1)\n', (1217, 1221), True, 'import numpy as np\n'), ((1231, 1243), 'numpy.array', 'np.array', (['p2'], {}), '(p2)\n', (1239, 1243), True, 'import numpy as np\n'), ((2246, 2261), 'numpy.array', 'np.array', (['lines'], {}), '(lines)\n', (2254, 2261), True, 'import numpy as np\n'), ((2744, 2755), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2752, 2755), True, 'import numpy as np\n'), ((3304, 3319), 'numpy.array', 'np.array', (['lines'], {}), '(lines)\n', (3312, 3319), True, 'import numpy as np\n'), ((3427, 3438), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3435, 3438), True, 'import numpy as np\n'), ((5098, 5110), 'numpy.round', 'np.round', (['x1'], {}), '(x1)\n', (5106, 5110), True, 'import numpy as np\n'), ((5184, 5199), 'numpy.round', 'np.round', (['y_end'], {}), '(y_end)\n', (5192, 5199), True, 'import numpy as np\n'), ((5464, 5476), 'numpy.round', 'np.round', (['x2'], {}), '(x2)\n', (5472, 5476), True, 'import numpy as np\n'), ((5550, 5565), 'numpy.round', 'np.round', (['y_end'], {}), '(y_end)\n', (5558, 5565), True, 'import numpy as np\n'), ((5683, 5710), 'numpy.arange', 'np.arange', (['(xpxl0 + 1)', 'xpxl1'], {}), '(xpxl0 + 1, xpxl1)\n', (5692, 5710), True, 'import numpy as np\n'), ((1875, 1921), 'pandas.DataFrame', 'pd.DataFrame', (['[a, b, c]'], {'index': "['A', 'B', 'C']"}), "([a, b, c], index=['A', 'B', 'C'])\n", (1887, 1921), True, 'import pandas as pd\n'), ((2946, 2957), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2952, 2957), True, 'import numpy as np\n'), ((4619, 4634), 'numpy.abs', 'np.abs', (['(y2 - y1)'], {}), '(y2 - y1)\n', (4625, 4634), True, 'import numpy as np\n'), ((4637, 4652), 'numpy.abs', 'np.abs', (['(x2 - x1)'], {}), '(x2 - x1)\n', (4643, 4652), True, 'import numpy as np\n'), ((6244, 6274), 'numpy.array', 'np.array', (['pixels'], {'dtype': 'np.int'}), '(pixels, dtype=np.int)\n', (6252, 6274), True, 'import numpy as np\n'), ((2931, 2942), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (2937, 2942), True, 'import numpy as np\n'), ((5758, 5782), 'numpy.floor', 'np.floor', (['interpolated_y'], {}), '(interpolated_y)\n', (5766, 5782), True, 'import numpy as np\n'), ((5876, 5900), 'numpy.floor', 'np.floor', (['interpolated_y'], {}), '(interpolated_y)\n', (5884, 5900), True, 'import numpy as np\n')]
|
import pytest
from pyasn1.type.namedval import NamedValues
from asn1PERser.codec.per.encoder import encode as per_encoder
from asn1PERser.classes.data.builtin.EnumeratedType import EnumeratedType
from asn1PERser.classes.types.constraint import ExtensionMarker
def SCHEMA_my_enum(enumerationRoot_list, extensionMarker_value=False):
class MyEnum(EnumeratedType):
'''
MyEnum ::= ENUMERATED {
e0,
e1,
.
.
.
eN-1
eN
}
'''
subtypeSpec = ExtensionMarker(extensionMarker_value)
enumerationRoot = NamedValues(
*[(item, index) for index, item in enumerate(enumerationRoot_list)]
)
extensionAddition = NamedValues(
)
namedValues = enumerationRoot + extensionAddition
return MyEnum
def SCHEMA_my_ext_enum(enumerationRoot_list, extensionAddition_list, extensionMarker_value=False):
class MyEnum(EnumeratedType):
'''
MyEnum::= ENUMERATED
{
e0,
e1,
.
.
.
eN - 1
eN,
...,
eN+1
.
.
.
eM-1,
eM
}
'''
subtypeSpec = ExtensionMarker(extensionMarker_value)
enumerationRoot = NamedValues(
*[(item, index) for index, item in enumerate(enumerationRoot_list)]
)
extensionAddition = NamedValues(
*[(item, index) for index, item in enumerate(extensionAddition_list, start=len(enumerationRoot_list))]
)
namedValues = enumerationRoot + extensionAddition
return MyEnum
def DATA_my_enum(enum, value):
return enum(value)
short_enum = ['a0', 'a1']
enumeration_list = ['e0', 'e1', 'e2', 'e3', 'e4', 'e5', 'e6', 'e7', 'e8', 'e9',
'e10', 'e11', 'e12', 'e13', 'e14', 'e15', 'e16', 'e17', 'e18', 'e19',
'e20', 'e21', 'e22', 'e23', 'e24', 'e25', 'e26', 'e27', 'e28', 'e29',
'e30', 'e31', 'e32', 'e33', 'e34', 'e35', 'e36', 'e37', 'e38', 'e39',
'e40', 'e41', 'e42', 'e43', 'e44', 'e45', 'e46', 'e47', 'e48', 'e49',
'e50', 'e51', 'e52', 'e53', 'e54', 'e55', 'e56', 'e57', 'e58', 'e59',
'e60', 'e61', 'e62', 'e63', 'e64', 'e65', 'e66', 'e67', 'e68', 'e69',
'e70', 'e71', 'e72', 'e73', 'e74', 'e75', 'e76', 'e77', 'e78', 'e79',
'e80', 'e81', 'e82', 'e83', 'e84', 'e85', 'e86', 'e87', 'e88', 'e89',
'e90', 'e91', 'e92', 'e93', 'e94', 'e95', 'e96', 'e97', 'e98', 'e99',
'e100', 'e101', 'e102', 'e103', 'e104', 'e105', 'e106', 'e107', 'e108', 'e109',
'e110', 'e111', 'e112', 'e113', 'e114', 'e115', 'e116', 'e117', 'e118', 'e119',
'e120', 'e121', 'e122', 'e123', 'e124', 'e125', 'e126', 'e127', 'e128', 'e129',
'e130', 'e131', 'e132', 'e133', 'e134', 'e135', 'e136', 'e137', 'e138', 'e139',
'e140', 'e141', 'e142', 'e143', 'e144', 'e145', 'e146', 'e147', 'e148', 'e149',
'e150', 'e151', 'e152', 'e153', 'e154', 'e155', 'e156', 'e157', 'e158', 'e159',
'e160', 'e161', 'e162', 'e163', 'e164', 'e165', 'e166', 'e167', 'e168', 'e169',
'e170', 'e171', 'e172', 'e173', 'e174', 'e175', 'e176', 'e177', 'e178', 'e179',
'e180', 'e181', 'e182', 'e183', 'e184', 'e185', 'e186', 'e187', 'e188', 'e189',
'e190', 'e191', 'e192', 'e193', 'e194', 'e195', 'e196', 'e197', 'e198', 'e199',
'e200', 'e201', 'e202', 'e203', 'e204', 'e205', 'e206', 'e207', 'e208', 'e209',
'e210', 'e211', 'e212', 'e213', 'e214', 'e215', 'e216', 'e217', 'e218', 'e219',
'e220', 'e221', 'e222', 'e223', 'e224', 'e225', 'e226', 'e227', 'e228', 'e229',
'e230', 'e231', 'e232', 'e233', 'e234', 'e235', 'e236', 'e237', 'e238', 'e239',
'e240', 'e241', 'e242', 'e243', 'e244', 'e245', 'e246', 'e247', 'e248', 'e249',
'e250', 'e251', 'e252', 'e253', 'e254', 'e255', 'e256', 'e257', 'e258', 'e259']
@pytest.mark.parametrize("enumerated, encoded", [
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:2]), 'e0'), '00'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:2]), 'e1'), '80'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:10]), 'e9'), '90'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:17]), 'e9'), '48'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:33]), 'e9'), '24'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:33]), 'e32'), '80'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:100]), 'e98'), 'C4'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130]), 'e126'), '7E'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130]), 'e127'), '7F'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130]), 'e128'), '80'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260]), 'e128'), '0080'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260]), 'e254'), '00FE'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260]), 'e255'), '00FF'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260]), 'e256'), '0100'),
])
def test_no_extension_marker_enumerated_can_be_encoded(enumerated, encoded):
assert per_encoder(enumerated) == bytearray.fromhex(encoded)
@pytest.mark.parametrize("enumerated, encoded", [
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:2], extensionMarker_value=True), 'e0'), '00'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:2], extensionMarker_value=True), 'e1'), '40'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:10], extensionMarker_value=True), 'e9'), '48'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:17], extensionMarker_value=True), 'e9'), '24'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:33], extensionMarker_value=True), 'e9'), '12'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:33], extensionMarker_value=True), 'e32'), '40'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:100], extensionMarker_value=True), 'e98'), '62'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130], extensionMarker_value=True), 'e126'), '3F00'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130], extensionMarker_value=True), 'e127'), '3F80'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130], extensionMarker_value=True), 'e128'), '4000'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260], extensionMarker_value=True), 'e128'), '000080'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260], extensionMarker_value=True), 'e254'), '0000FE'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260], extensionMarker_value=True), 'e255'), '0000FF'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260], extensionMarker_value=True), 'e256'), '000100'),
])
def test_extension_marker_is_present_and_extension_addition_is_empty_but_value_is_from_root_can_be_encoded(enumerated, encoded):
assert per_encoder(enumerated) == bytearray.fromhex(encoded)
@pytest.mark.parametrize("enumerated, encoded", [
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:2], extensionAddition_list=short_enum, extensionMarker_value=True), 'e0'), '00'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:2], extensionAddition_list=short_enum, extensionMarker_value=True), 'e1'), '40'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:10], extensionAddition_list=short_enum, extensionMarker_value=True), 'e9'), '48'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:17], extensionAddition_list=short_enum, extensionMarker_value=True), 'e9'), '24'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:33], extensionAddition_list=short_enum, extensionMarker_value=True), 'e9'), '12'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:33], extensionAddition_list=short_enum, extensionMarker_value=True), 'e32'), '40'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:100], extensionAddition_list=short_enum, extensionMarker_value=True), 'e98'), '62'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:130], extensionAddition_list=short_enum, extensionMarker_value=True), 'e126'), '3F00'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:130], extensionAddition_list=short_enum, extensionMarker_value=True), 'e127'), '3F80'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:130], extensionAddition_list=short_enum, extensionMarker_value=True), 'e128'), '4000'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:260], extensionAddition_list=short_enum, extensionMarker_value=True), 'e128'), '000080'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:260], extensionAddition_list=short_enum, extensionMarker_value=True), 'e254'), '0000FE'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:260], extensionAddition_list=short_enum, extensionMarker_value=True), 'e255'), '0000FF'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:260], extensionAddition_list=short_enum, extensionMarker_value=True), 'e256'), '000100'),
])
def test_extension_marker_is_present_and_extension_addition_is_not_empty_but_value_is_from_root_can_be_encoded(enumerated, encoded):
assert per_encoder(enumerated) == bytearray.fromhex(encoded)
@pytest.mark.parametrize("enumerated, encoded", [
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:2], extensionMarker_value=True), 'e0'), '80'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:2], extensionMarker_value=True), 'e1'), '81'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:10], extensionMarker_value=True), 'e9'), '89'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:17], extensionMarker_value=True), 'e9'), '89'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:33], extensionMarker_value=True), 'e9'), '89'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:33], extensionMarker_value=True), 'e32'), 'A0'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:100], extensionMarker_value=True), 'e98'), 'C00162'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:130], extensionMarker_value=True), 'e126'), 'C0017E'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:130], extensionMarker_value=True), 'e127'), 'C0017F'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:130], extensionMarker_value=True), 'e128'), 'C00180'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:260], extensionMarker_value=True), 'e128'), 'C00180'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:260], extensionMarker_value=True), 'e254'), 'C001FE'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:260], extensionMarker_value=True), 'e255'), 'C001FF'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:260], extensionMarker_value=True), 'e256'), 'C0020100'),
])
def test_extension_marker_is_present_and_value_is_from_extension_can_be_encoded(enumerated, encoded):
assert per_encoder(enumerated) == bytearray.fromhex(encoded)
|
[
"asn1PERser.classes.types.constraint.ExtensionMarker",
"asn1PERser.codec.per.encoder.encode",
"pyasn1.type.namedval.NamedValues"
] |
[((561, 599), 'asn1PERser.classes.types.constraint.ExtensionMarker', 'ExtensionMarker', (['extensionMarker_value'], {}), '(extensionMarker_value)\n', (576, 599), False, 'from asn1PERser.classes.types.constraint import ExtensionMarker\n'), ((757, 770), 'pyasn1.type.namedval.NamedValues', 'NamedValues', ([], {}), '()\n', (768, 770), False, 'from pyasn1.type.namedval import NamedValues\n'), ((1304, 1342), 'asn1PERser.classes.types.constraint.ExtensionMarker', 'ExtensionMarker', (['extensionMarker_value'], {}), '(extensionMarker_value)\n', (1319, 1342), False, 'from asn1PERser.classes.types.constraint import ExtensionMarker\n'), ((5862, 5885), 'asn1PERser.codec.per.encoder.encode', 'per_encoder', (['enumerated'], {}), '(enumerated)\n', (5873, 5885), True, 'from asn1PERser.codec.per.encoder import encode as per_encoder\n'), ((7849, 7872), 'asn1PERser.codec.per.encoder.encode', 'per_encoder', (['enumerated'], {}), '(enumerated)\n', (7860, 7872), True, 'from asn1PERser.codec.per.encoder import encode as per_encoder\n'), ((10386, 10409), 'asn1PERser.codec.per.encoder.encode', 'per_encoder', (['enumerated'], {}), '(enumerated)\n', (10397, 10409), True, 'from asn1PERser.codec.per.encoder import encode as per_encoder\n'), ((12904, 12927), 'asn1PERser.codec.per.encoder.encode', 'per_encoder', (['enumerated'], {}), '(enumerated)\n', (12915, 12927), True, 'from asn1PERser.codec.per.encoder import encode as per_encoder\n')]
|
from error.error_crossbar import *
from basic_blocks.mvm_four_bit import mvm_four_by_four
import unittest
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_dir_path = os.path.abspath(os.path.join(dir_path, os.pardir))
sys.path.insert(0, parent_dir_path)
class TestFourBitMAC(unittest.TestCase):
'''
Test some multiplier structures
'''
def test_multiply_and_accumulate_operation_four_by_four(self):
'''
Test the multiplication:
| 11 13| | 3 | = | 150 |
| 2 6| | 9 | = | 60 |
Remember, the matrix above is stored as its transpose in the ReRAM crossbar
'''
mat = mvm_four_by_four()
mat.set_conductance_matrix([[11, 2],
[13, 6]])
input_v = [3, 9]
mat.crossbar_multiply(input_v=input_v)
expected_matrix = [150, 60]
self.assertEqual(mat.get_shift_reg_values(), expected_matrix)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"os.path.realpath",
"sys.path.insert",
"basic_blocks.mvm_four_bit.mvm_four_by_four",
"os.path.join"
] |
[((251, 286), 'sys.path.insert', 'sys.path.insert', (['(0)', 'parent_dir_path'], {}), '(0, parent_dir_path)\n', (266, 286), False, 'import sys\n'), ((154, 180), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (170, 180), False, 'import os\n'), ((216, 249), 'os.path.join', 'os.path.join', (['dir_path', 'os.pardir'], {}), '(dir_path, os.pardir)\n', (228, 249), False, 'import os\n'), ((1011, 1026), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1024, 1026), False, 'import unittest\n'), ((688, 706), 'basic_blocks.mvm_four_bit.mvm_four_by_four', 'mvm_four_by_four', ([], {}), '()\n', (704, 706), False, 'from basic_blocks.mvm_four_bit import mvm_four_by_four\n')]
|
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse
from rest_framework.response import Response
from rest_framework.utils import json
from rest_framework.viewsets import ViewSetMixin
from course import models
from rest_framework.views import APIView
from rest_framework import serializers, generics
from course import models
from arrange import models
from rest_framework import exceptions
from django.contrib.auth.views import LoginView, LogoutView
# Create your views here.
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = models.CourseBaseInfo
fields = "__all__"
class ClassroomSerializer(serializers.ModelSerializer):
class Meta:
model = models.ClassroomInfo
fields = "__all__"
class ClassroomDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = models.ClassroomInfo.objects.all()
serializer_class = ClassroomSerializer
class CourseDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = models.CourseBaseInfo.objects.all()
serializer_class = CourseSerializer
class CourseView(APIView):
def get(self, request, *args, **kwargs):
"""
课程详细接口
:param request:
:param args:
:param kwargs:
:return:
"""
ret = {'code': 1000, 'data': None}
try:
pk = kwargs.get('pk') # 课程id
# 课程详细对象
obj = models.CourseBaseInfo.objects.filter(pk=pk).first()
ser = CourseSerializer(instance=obj, many=False)
ret['data'] = ser.data
except Exception as e:
ret['code'] = 1001
ret['error'] = "获取课程失败"
return Response(ret)
|
[
"arrange.models.ClassroomInfo.objects.all",
"arrange.models.CourseBaseInfo.objects.filter",
"rest_framework.response.Response",
"arrange.models.CourseBaseInfo.objects.all"
] |
[((930, 964), 'arrange.models.ClassroomInfo.objects.all', 'models.ClassroomInfo.objects.all', ([], {}), '()\n', (962, 964), False, 'from arrange import models\n'), ((1084, 1119), 'arrange.models.CourseBaseInfo.objects.all', 'models.CourseBaseInfo.objects.all', ([], {}), '()\n', (1117, 1119), False, 'from arrange import models\n'), ((1760, 1773), 'rest_framework.response.Response', 'Response', (['ret'], {}), '(ret)\n', (1768, 1773), False, 'from rest_framework.response import Response\n'), ((1496, 1539), 'arrange.models.CourseBaseInfo.objects.filter', 'models.CourseBaseInfo.objects.filter', ([], {'pk': 'pk'}), '(pk=pk)\n', (1532, 1539), False, 'from arrange import models\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def capm(path):
"""Stock Market Data
monthly observations from 1960–01 to 2002–12
*number of observations* : 516
A time serie containing :
rfood
excess returns food industry
rdur
excess returns durables industry
rcon
excess returns construction industry
rmrf
excess returns market portfolio
rf
riskfree return
most of the above data are from Kenneth French's data library at
http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `capm.csv`.
Returns:
Tuple of np.ndarray `x_train` with 516 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'capm.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/Capm.csv'
maybe_download_and_extract(path, url,
save_file_name='capm.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
[
"observations.util.maybe_download_and_extract",
"os.path.expanduser",
"os.path.join"
] |
[((1100, 1124), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (1118, 1124), False, 'import os\n'), ((1264, 1342), 'observations.util.maybe_download_and_extract', 'maybe_download_and_extract', (['path', 'url'], {'save_file_name': '"""capm.csv"""', 'resume': '(False)'}), "(path, url, save_file_name='capm.csv', resume=False)\n", (1290, 1342), False, 'from observations.util import maybe_download_and_extract\n'), ((1427, 1455), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (1439, 1455), False, 'import os\n'), ((1173, 1201), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (1185, 1201), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
(c) 2019 - Copyright Red Hat Inc
Authors:
<NAME> <<EMAIL>>
"""
from __future__ import unicode_literals, absolute_import
import unittest
import sys
import os
import json
from mock import patch
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
)
import tests # noqa: E402
class PagureFlaskApiPluginViewtests(tests.Modeltests):
"""Tests for the flask API of pagure for viewing plugins"""
def test_view_plugin(self):
"""Test viewing every plugin available in pagure."""
output = self.app.get("/api/0/_plugins")
self.assertEqual(output.status_code, 200)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
data,
{
"plugins": [
{"Block Un-Signed commits": []},
{"Block non fast-forward pushes": ["branches"]},
{"Fedmsg": []},
{
"IRC": [
"server",
"port",
"room",
"nick",
"nick_pass",
"join",
"ssl",
]
},
{"Mail": ["mail_to"]},
{"Mirroring": ["target", "public_key", "last_log"]},
{"Pagure": []},
{
"Pagure CI": [
"ci_type",
"ci_url",
"ci_job",
"active_commit",
"active_pr",
]
},
{"Pagure requests": []},
{"Pagure tickets": []},
{"Prevent creating new branches by git push": []},
{"Read the Doc": ["api_url", "api_token", "branches"]},
],
"total_plugins": 12,
},
)
@patch.dict("pagure.config.config", {"DISABLED_PLUGINS": ["IRC"]})
def test_view_plugin_disabled(self):
"""Test viewing every plugin available in pagure with one plugin disabled."""
output = self.app.get("/api/0/_plugins")
self.assertEqual(output.status_code, 200)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
data,
{
"plugins": [
{"Block Un-Signed commits": []},
{"Block non fast-forward pushes": ["branches"]},
{"Fedmsg": []},
{"Mail": ["mail_to"]},
{"Mirroring": ["target", "public_key", "last_log"]},
{"Pagure": []},
{
"Pagure CI": [
"ci_type",
"ci_url",
"ci_job",
"active_commit",
"active_pr",
]
},
{"Pagure requests": []},
{"Pagure tickets": []},
{"Prevent creating new branches by git push": []},
{"Read the Doc": ["api_url", "api_token", "branches"]},
],
"total_plugins": 12,
},
)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
[
"unittest.main",
"os.path.abspath",
"mock.patch.dict"
] |
[((2114, 2179), 'mock.patch.dict', 'patch.dict', (['"""pagure.config.config"""', "{'DISABLED_PLUGINS': ['IRC']}"], {}), "('pagure.config.config', {'DISABLED_PLUGINS': ['IRC']})\n", (2124, 2179), False, 'from mock import patch\n'), ((3522, 3548), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (3535, 3548), False, 'import unittest\n'), ((285, 310), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (300, 310), False, 'import os\n')]
|
from contextlib import contextmanager
from datetime import datetime
from six.moves import cStringIO as StringIO
from django.test import TestCase
from django.core import management
from simple_history import models as sh_models
from simple_history.management.commands import populate_history
from .. import models
@contextmanager
def replace_registry(new_value=None):
hidden_registry = sh_models.registered_models
sh_models.registered_models = new_value or {}
try:
yield
except Exception:
raise
finally:
sh_models.registered_models = hidden_registry
class TestPopulateHistory(TestCase):
command_name = 'populate_history'
command_error = (management.CommandError, SystemExit)
def test_no_args(self):
out = StringIO()
management.call_command(self.command_name,
stdout=out, stderr=StringIO())
self.assertIn(populate_history.Command.COMMAND_HINT, out.getvalue())
def test_bad_args(self):
test_data = (
(populate_history.Command.MODEL_NOT_HISTORICAL, ("tests.place",)),
(populate_history.Command.MODEL_NOT_FOUND, ("invalid.model",)),
(populate_history.Command.MODEL_NOT_FOUND, ("bad_key",)),
)
for msg, args in test_data:
out = StringIO()
self.assertRaises(self.command_error, management.call_command,
self.command_name, *args,
stdout=StringIO(), stderr=out)
self.assertIn(msg, out.getvalue())
def test_auto_populate(self):
models.Poll.objects.create(question="Will this populate?",
pub_date=datetime.now())
models.Poll.history.all().delete()
management.call_command(self.command_name, auto=True,
stdout=StringIO(), stderr=StringIO())
self.assertEqual(models.Poll.history.all().count(), 1)
def test_populate_with_custom_batch_size(self):
models.Poll.objects.create(question="Will this populate?",
pub_date=datetime.now())
models.Poll.history.all().delete()
management.call_command(self.command_name, auto=True, batchsize=500,
stdout=StringIO(), stderr=StringIO())
self.assertEqual(models.Poll.history.all().count(), 1)
def test_specific_populate(self):
models.Poll.objects.create(question="Will this populate?",
pub_date=datetime.now())
models.Poll.history.all().delete()
models.Book.objects.create(isbn="9780007117116")
models.Book.history.all().delete()
management.call_command(self.command_name, "tests.book",
stdout=StringIO(), stderr=StringIO())
self.assertEqual(models.Book.history.all().count(), 1)
self.assertEqual(models.Poll.history.all().count(), 0)
def test_failing_wont_save(self):
models.Poll.objects.create(question="Will this populate?",
pub_date=datetime.now())
models.Poll.history.all().delete()
self.assertRaises(self.command_error,
management.call_command, self.command_name,
"tests.poll", "tests.invalid_model",
stdout=StringIO(), stderr=StringIO())
self.assertEqual(models.Poll.history.all().count(), 0)
def test_multi_table(self):
data = {'rating': 5, 'name': "Tea '<NAME>"}
models.Restaurant.objects.create(**data)
models.Restaurant.updates.all().delete()
management.call_command(self.command_name, 'tests.restaurant',
stdout=StringIO(), stderr=StringIO())
update_record = models.Restaurant.updates.all()[0]
for attr, value in data.items():
self.assertEqual(getattr(update_record, attr), value)
def test_existing_objects(self):
data = {'rating': 5, 'name': "Tea '<NAME>"}
out = StringIO()
models.Restaurant.objects.create(**data)
pre_call_count = models.Restaurant.updates.count()
management.call_command(self.command_name, 'tests.restaurant',
stdout=StringIO(), stderr=out)
self.assertEqual(models.Restaurant.updates.count(), pre_call_count)
self.assertIn(populate_history.Command.EXISTING_HISTORY_FOUND,
out.getvalue())
def test_no_historical(self):
out = StringIO()
with replace_registry():
management.call_command(self.command_name, auto=True,
stdout=out)
self.assertIn(populate_history.Command.NO_REGISTERED_MODELS,
out.getvalue())
|
[
"django.core.management.call_command",
"six.moves.cStringIO",
"datetime.datetime.now"
] |
[((776, 786), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (784, 786), True, 'from six.moves import cStringIO as StringIO\n'), ((4081, 4091), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (4089, 4091), True, 'from six.moves import cStringIO as StringIO\n'), ((4568, 4578), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (4576, 4578), True, 'from six.moves import cStringIO as StringIO\n'), ((1319, 1329), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (1327, 1329), True, 'from six.moves import cStringIO as StringIO\n'), ((4624, 4689), 'django.core.management.call_command', 'management.call_command', (['self.command_name'], {'auto': '(True)', 'stdout': 'out'}), '(self.command_name, auto=True, stdout=out)\n', (4647, 4689), False, 'from django.core import management\n'), ((889, 899), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (897, 899), True, 'from six.moves import cStringIO as StringIO\n'), ((1715, 1729), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1727, 1729), False, 'from datetime import datetime\n'), ((1875, 1885), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (1883, 1885), True, 'from six.moves import cStringIO as StringIO\n'), ((1894, 1904), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (1902, 1904), True, 'from six.moves import cStringIO as StringIO\n'), ((2133, 2147), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2145, 2147), False, 'from datetime import datetime\n'), ((2308, 2318), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (2316, 2318), True, 'from six.moves import cStringIO as StringIO\n'), ((2327, 2337), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (2335, 2337), True, 'from six.moves import cStringIO as StringIO\n'), ((2552, 2566), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2564, 2566), False, 'from datetime import datetime\n'), ((2815, 2825), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (2823, 2825), True, 'from six.moves import cStringIO as StringIO\n'), ((2834, 2844), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (2842, 2844), True, 'from six.moves import cStringIO as StringIO\n'), ((3122, 3136), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3134, 3136), False, 'from datetime import datetime\n'), ((3393, 3403), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (3401, 3403), True, 'from six.moves import cStringIO as StringIO\n'), ((3412, 3422), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (3420, 3422), True, 'from six.moves import cStringIO as StringIO\n'), ((3780, 3790), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (3788, 3790), True, 'from six.moves import cStringIO as StringIO\n'), ((3799, 3809), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (3807, 3809), True, 'from six.moves import cStringIO as StringIO\n'), ((4310, 4320), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (4318, 4320), True, 'from six.moves import cStringIO as StringIO\n'), ((1498, 1508), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (1506, 1508), True, 'from six.moves import cStringIO as StringIO\n')]
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import networkx as nx
import numpy as np
from .defined_histogram import DefinedHistogram
from typing import List, Union
from .make_cuts import MakeCuts, filter_function_for_make_cuts
def histogram_degree_centrality(
graph: nx.Graph,
bin_directive: Union[int, List[Union[float, int]], np.ndarray, str] = 10
) -> DefinedHistogram:
"""
Generates a histogram of the vertex degree centrality of the provided graph.
Histogram function is fundamentally proxied through to numpy's `histogram` function, and bin selection
follows `numpy.histogram` processes.
:param networkx.Graph graph: the graph. No changes will be made to it.
:param bin_directive: Is passed directly through to numpy's
"histogram" (and thus, "histogram_bin_edges") functions.
See: https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.histogram_bin_edges.html#numpy.histogram_bin_edges
In short description: if an int is provided, we use `bin_directive` number of equal range bins.
If a sequence is provided, these bin edges will be used and can be sized to whatever size you prefer
Note that the np.ndarray should be ndim=1 and the values should be float or int.
:type bin_directive: Union[int, List[Union[float, int]], numpy.ndarray, str]
:return: A named tuple that contains the histogram and the bin_edges used in the histogram
:rtype: DefinedHistogram
""" # noqa:501
degree_centrality_dict = nx.degree_centrality(graph)
histogram, bin_edges = np.histogram(
list(degree_centrality_dict.values()),
bin_directive
)
return DefinedHistogram(histogram=histogram, bin_edges=bin_edges)
def cut_vertices_by_degree_centrality(
graph: nx.Graph,
cut_threshold: Union[int, float],
cut_process: MakeCuts
) -> nx.Graph:
"""
Given a graph and a cut_threshold and a cut_process, return a copy of the graph with the vertices outside of the
cut_threshold.
:param networkx.Graph graph: The graph that will be copied and pruned.
:param cut_threshold: The threshold for making cuts based on degree centrality.
:type cut_threshold: Union[int, float]
:param MakeCuts cut_process: Describes how we should make the cut; cut all edges larger or smaller than the
cut_threshold, and whether exclusive or inclusive.
:return: Pruned copy of the graph
:rtype: networkx.Graph
"""
graph_copy = graph.copy()
degree_centrality_dict = nx.degree_centrality(graph_copy)
filter_by = filter_function_for_make_cuts(cut_threshold, cut_process)
vertices_to_cut = list(filter(filter_by, degree_centrality_dict.items()))
for vertex, degree_centrality in vertices_to_cut:
graph_copy.remove_node(vertex)
return graph_copy
|
[
"networkx.degree_centrality"
] |
[((1543, 1570), 'networkx.degree_centrality', 'nx.degree_centrality', (['graph'], {}), '(graph)\n', (1563, 1570), True, 'import networkx as nx\n'), ((2548, 2580), 'networkx.degree_centrality', 'nx.degree_centrality', (['graph_copy'], {}), '(graph_copy)\n', (2568, 2580), True, 'import networkx as nx\n')]
|
from dexp.utils import xpArray
from dexp.utils.backends import Backend, CupyBackend, NumpyBackend
def nan_to_zero(array: xpArray, copy: bool = True) -> xpArray:
"""
Replaces every nan in an array to zero. It might, or not, be able to operate in-place.
To be safe, the returned array should always be used...
Parameters
----------
array : array to replace NaNs with zeros.
copy : True/False to suggest whether copy or in-place behaviour should occur.
Returns
-------
Array for which NaNs have been replace by zero.
"""
# TODO: should we remove this function?
backend = Backend.current()
if type(backend) is NumpyBackend:
xp = backend.get_xp_module()
return xp.nan_to_num(array, copy=copy)
elif type(backend) is CupyBackend:
import cupy
return cupy.nan_to_num(array)
|
[
"dexp.utils.backends.Backend.current",
"cupy.nan_to_num"
] |
[((625, 642), 'dexp.utils.backends.Backend.current', 'Backend.current', ([], {}), '()\n', (640, 642), False, 'from dexp.utils.backends import Backend, CupyBackend, NumpyBackend\n'), ((841, 863), 'cupy.nan_to_num', 'cupy.nan_to_num', (['array'], {}), '(array)\n', (856, 863), False, 'import cupy\n')]
|
import sys
import json
log_file_name = sys.argv[1]
number_of_agents = sys.argv[2]
agent_config_file_name = sys.argv[3]
best_agents_file_name = sys.argv[4]
current_iteration = sys.argv[5]
# reading contents of logfile
log_file = open(log_file_name, 'r')
lines = log_file.readlines()
log_file.close()
# convert log entries to json objects
logs = []
for i in range(len(lines)):
logs.append(json.loads(lines[i]))
# create dict pairing agent types with a list of their lifespans -- {TeamX: [1,2,3..], TeamY: [1,2,3...]}
agents = {}
# {"agentAge":33,"agentID":"506f71ff-0b92-4eb5-8fea-1750cb2d44f5","agentType":"Team5","level":"info","msg":"Agent survives till the end of the simulation","reporter":"simulation","time":"2022-01-03T11:17:44Z"}
# going through every log entry
for i in logs:
try:
# Check when agent dies -- append days lived
if i['msg'] == "Killing agent":
if i['agent_type'] not in agents: # creates new entry if the log does not already exist
agents[i['agent_type']] = []
agents[i['agent_type']].append(i["daysLived"])
# Check when agent survives -- append days lived
elif i['msg'] == "Agent survives till the end of the simulation":
if i['agent_type'] not in agents: # creates new entry if the log does not already exist
agents[i['agent_type']] = []
agents[i['agent_type']].append(i["agentAge"])
except:
continue
avgs = {}
# avg life expectancy of all agents globally
avg_of_all_agents = 0
# avg life expectancy of all agents not incl Team 4
avg_all_other_agents = 0
# number of agents not of Team 4
number_of_other_agents = 0
# go through each agent type
for agent in agents:
# get avg life expectancy per agent type and store
avg = sum(agents[agent])/len(agents[agent])
avgs[agent] = avg
# increase global life exp
avg_of_all_agents += sum(agents[agent])
if agent != "Team4":
# count number of agents not Team 4
number_of_other_agents += len(agents[agent])
# increase global life exp of not team 4
avg_all_other_agents += sum(agents[agent])
avg_of_all_agents /= sum([len(agents[a]) for a in agents])
avg_all_other_agents /= number_of_other_agents
avg_days_lived = avgs["Team4"]
print(str(avg_of_all_agents)+";"+str(avg_days_lived)+";"+str(avg_all_other_agents))
# read best_agent file
best_agent_json_file = open(best_agents_file_name)
best_agent_json = json.load(best_agent_json_file)
best_agent_json_file.close()
try:
# read agent at current_iteration+1
next_agent = best_agent_json[int(current_iteration)]
# print("Changing agent config to: {0}".format(next_agent))
# pass agent to agent_config file to create population for next run
agent_config_file = open(agent_config_file_name, 'w')
agent_config_file.write(json.dumps(next_agent, indent=4))
agent_config_file.close()
except:
pass
|
[
"json.load",
"json.loads",
"json.dumps"
] |
[((2495, 2526), 'json.load', 'json.load', (['best_agent_json_file'], {}), '(best_agent_json_file)\n', (2504, 2526), False, 'import json\n'), ((394, 414), 'json.loads', 'json.loads', (['lines[i]'], {}), '(lines[i])\n', (404, 414), False, 'import json\n'), ((2882, 2914), 'json.dumps', 'json.dumps', (['next_agent'], {'indent': '(4)'}), '(next_agent, indent=4)\n', (2892, 2914), False, 'import json\n')]
|
"""Boundary checking (improved)
This function will (hopefully!) find if data in a csv file is contained within Northern Ireland.
If not so, this will be reported back to the user.
For now, please make sure that the second geojson in the argument is a boundary of Northern Ireland.
"""
import shapely
from geojson_utils import point_in_multipolygon
import logging
import json
from dask.threaded import get
import pandas as p
import geopandas as gp
import csv
import sys
import os
from ltldoorstep.processor import DoorstepProcessor
from ltldoorstep.reports import report
DEFAULT_OUTLINE = 'example/data/osni-ni-outline-lowres.geojson'
def find_ni_data(first_file, rprt, metadata=None):
ni_data = DEFAULT_OUTLINE
if metadata and 'definition' in metadata:
if metadata and \
'configuration' in metadata and \
'boundary' in metadata['configuration'] and \
metadata['configuration']['boundary'].startswith('$->'):
boundary_key = metadata['configuration']['boundary'][3:]
if not 'supplementary' in metadata or boundary_key not in metadata['supplementary']:
raise RuntimeError("Boundary not found in supplementary data")
boundary = metadata['supplementary'][boundary_key]
ni_data = boundary['location']
rprt.add_supplementary('boundary', boundary['source'], 'Boundary against which points are tested')
if not os.path.exists(ni_data):
raise RuntimeError("Boundary not found on filesystem")
with open(first_file) as data_file:
# Setting up data that will be compared to the dataset/file being passed in
data_to_compare = gp.GeoDataFrame.from_features(json.load(data_file)['features'])
rprt.set_properties(preset='geojson', headers=list(data_to_compare.columns))
# If csv file has these attributes then...
if 'geometry' in data_to_compare:
# This is what we are comparing the first csv/json file to - contains data of NI.
ni_compare_data = gp.read_file(ni_data)
# Multipolyon var is set to the first index of ni_compare_data with the key 'geometry'
multipolygon = ni_compare_data.ix[0]['geometry']
# points var is set with data_to_compare with the key 'geometry'
points = data_to_compare['geometry']
# outside_points is set to data that is not in multipolygon - this is values outside NI?
outside_points = data_to_compare[[not multipolygon.contains(p) for p in points]]
# inside_points_ct is set the sum of the length of points minus outside points
inside_points_ct = len(points) - len(outside_points)
# If outside points are not empty then....
if not outside_points.empty:
# Iterating through index and points in outside points
for ix, point in outside_points.iterrows():
geopoint = shapely.geometry.mapping(point['geometry'])
# props is set to a dictonary object of point
props = dict(point)
# removing key 'geometry'
del props['geometry']
# calling Report object method add_issue
rprt.add_issue(
logging.ERROR,
'locations-not-found',
_("This location is not within the given boundary"),
item_index=ix,
item=geopoint,
item_type=geopoint['type'],
item_properties=props
)
# If the file does not have any location data....
else:
rprt.add_issue(
'lintol/boundary-checker-improved:1',
logging.WARNING,
'no-location-data-found',
_("No location data found! Please make sure that you have read the right file")
)
return rprt
class BoundaryCheckerImprovedProcessor(DoorstepProcessor):
@staticmethod
def make_report():
return report.GeoJSONReport("GeoJSON Boundary Processor", "Info from GeoJSON Processor - example info")
def get_workflow(self, filename, metadata={}):
workflow = {
'output': (find_ni_data, filename, self.make_report(), metadata)
}
return workflow
processor = BoundaryCheckerImprovedProcessor
if __name__ == "__main__":
argv = sys.argv
processor = BoundaryCheckerImprovedProcessor()
workflow = processor.get_workflow(argv[1])
print(compile_report(filename, metadata))
|
[
"json.load",
"shapely.geometry.mapping",
"os.path.exists",
"ltldoorstep.reports.report.GeoJSONReport",
"geopandas.read_file"
] |
[((2058, 2079), 'geopandas.read_file', 'gp.read_file', (['ni_data'], {}), '(ni_data)\n', (2070, 2079), True, 'import geopandas as gp\n'), ((4004, 4104), 'ltldoorstep.reports.report.GeoJSONReport', 'report.GeoJSONReport', (['"""GeoJSON Boundary Processor"""', '"""Info from GeoJSON Processor - example info"""'], {}), "('GeoJSON Boundary Processor',\n 'Info from GeoJSON Processor - example info')\n", (4024, 4104), False, 'from ltldoorstep.reports import report\n'), ((1463, 1486), 'os.path.exists', 'os.path.exists', (['ni_data'], {}), '(ni_data)\n', (1477, 1486), False, 'import os\n'), ((1740, 1760), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (1749, 1760), False, 'import json\n'), ((2923, 2966), 'shapely.geometry.mapping', 'shapely.geometry.mapping', (["point['geometry']"], {}), "(point['geometry'])\n", (2947, 2966), False, 'import shapely\n')]
|
# -*- coding: utf-8 -*-
#code adapted from https://github.com/analyticalmindsltd/smote_variants
import numpy as np
import time
import logging
import itertools
from sklearn.neighbors import NearestNeighbors
# setting the _logger format
_logger = logging.getLogger('smote_variants')
_logger.setLevel(logging.DEBUG)
_logger_ch = logging.StreamHandler()
_logger_ch.setFormatter(logging.Formatter(
"%(asctime)s:%(levelname)s:%(message)s"))
_logger.addHandler(_logger_ch)
def mode(data):
values, counts = np.unique(data, return_counts=True)
return values[np.where(counts == max(counts))[0][0]]
class StatisticsMixin:
"""
Mixin to compute class statistics and determine minority/majority labels
"""
def class_label_statistics(self, X, y):
"""
determines class sizes and minority and majority labels
Args:
X (np.array): features
y (np.array): target labels
"""
unique, counts = np.unique(y, return_counts=True)
self.class_stats = dict(zip(unique, counts))
self.min_label = unique[0] if counts[0] < counts[1] else unique[1]
self.maj_label = unique[1] if counts[0] < counts[1] else unique[0]
# shorthands
self.min_label = self.min_label
self.maj_label = self.maj_label
def check_enough_min_samples_for_sampling(self, threshold=2):
if self.class_stats[self.min_label] < threshold:
m = ("The number of minority samples (%d) is not enough "
"for sampling")
m = m % self.class_stats[self.min_label]
_logger.warning(self.__class__.__name__ + ": " + m)
return False
return True
class RandomStateMixin:
"""
Mixin to set random state
"""
def set_random_state(self, random_state):
"""
sets the random_state member of the object
Args:
random_state (int/np.random.RandomState/None): the random state
initializer
"""
self._random_state_init = random_state
if random_state is None:
self.random_state = np.random
elif isinstance(random_state, int):
self.random_state = np.random.RandomState(random_state)
elif isinstance(random_state, np.random.RandomState):
self.random_state = random_state
elif random_state is np.random:
self.random_state = random_state
else:
raise ValueError(
"random state cannot be initialized by " + str(random_state))
class ParameterCheckingMixin:
"""
Mixin to check if parameters come from a valid range
"""
def check_in_range(self, x, name, r):
"""
Check if parameter is in range
Args:
x (numeric): the parameter value
name (str): the parameter name
r (list-like(2)): the lower and upper bound of a range
Throws:
ValueError
"""
if x < r[0] or x > r[1]:
m = ("Value for parameter %s outside the range [%f,%f] not"
" allowed: %f")
m = m % (name, r[0], r[1], x)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_out_range(self, x, name, r):
"""
Check if parameter is outside of range
Args:
x (numeric): the parameter value
name (str): the parameter name
r (list-like(2)): the lower and upper bound of a range
Throws:
ValueError
"""
if x >= r[0] and x <= r[1]:
m = "Value for parameter %s in the range [%f,%f] not allowed: %f"
m = m % (name, r[0], r[1], x)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_less_or_equal(self, x, name, val):
"""
Check if parameter is less than or equal to value
Args:
x (numeric): the parameter value
name (str): the parameter name
val (numeric): value to compare to
Throws:
ValueError
"""
if x > val:
m = "Value for parameter %s greater than %f not allowed: %f > %f"
m = m % (name, val, x, val)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_less_or_equal_par(self, x, name_x, y, name_y):
"""
Check if parameter is less than or equal to another parameter
Args:
x (numeric): the parameter value
name_x (str): the parameter name
y (numeric): the other parameter value
name_y (str): the other parameter name
Throws:
ValueError
"""
if x > y:
m = ("Value for parameter %s greater than parameter %s not"
" allowed: %f > %f")
m = m % (name_x, name_y, x, y)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_less(self, x, name, val):
"""
Check if parameter is less than value
Args:
x (numeric): the parameter value
name (str): the parameter name
val (numeric): value to compare to
Throws:
ValueError
"""
if x >= val:
m = ("Value for parameter %s greater than or equal to %f"
" not allowed: %f >= %f")
m = m % (name, val, x, val)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_less_par(self, x, name_x, y, name_y):
"""
Check if parameter is less than another parameter
Args:
x (numeric): the parameter value
name_x (str): the parameter name
y (numeric): the other parameter value
name_y (str): the other parameter name
Throws:
ValueError
"""
if x >= y:
m = ("Value for parameter %s greater than or equal to parameter"
" %s not allowed: %f >= %f")
m = m % (name_x, name_y, x, y)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_greater_or_equal(self, x, name, val):
"""
Check if parameter is greater than or equal to value
Args:
x (numeric): the parameter value
name (str): the parameter name
val (numeric): value to compare to
Throws:
ValueError
"""
if x < val:
m = "Value for parameter %s less than %f is not allowed: %f < %f"
m = m % (name, val, x, val)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_greater_or_equal_par(self, x, name_x, y, name_y):
"""
Check if parameter is less than or equal to another parameter
Args:
x (numeric): the parameter value
name_x (str): the parameter name
y (numeric): the other parameter value
name_y (str): the other parameter name
Throws:
ValueError
"""
if x < y:
m = ("Value for parameter %s less than parameter %s is not"
" allowed: %f < %f")
m = m % (name_x, name_y, x, y)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_greater(self, x, name, val):
"""
Check if parameter is greater than value
Args:
x (numeric): the parameter value
name (str): the parameter name
val (numeric): value to compare to
Throws:
ValueError
"""
if x <= val:
m = ("Value for parameter %s less than or equal to %f not allowed"
" %f < %f")
m = m % (name, val, x, val)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_greater_par(self, x, name_x, y, name_y):
"""
Check if parameter is greater than or equal to another parameter
Args:
x (numeric): the parameter value
name_x (str): the parameter name
y (numeric): the other parameter value
name_y (str): the other parameter name
Throws:
ValueError
"""
if x <= y:
m = ("Value for parameter %s less than or equal to parameter %s"
" not allowed: %f <= %f")
m = m % (name_x, name_y, x, y)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_equal(self, x, name, val):
"""
Check if parameter is equal to value
Args:
x (numeric): the parameter value
name (str): the parameter name
val (numeric): value to compare to
Throws:
ValueError
"""
if x == val:
m = ("Value for parameter %s equal to parameter %f is not allowed:"
" %f == %f")
m = m % (name, val, x, val)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_equal_par(self, x, name_x, y, name_y):
"""
Check if parameter is equal to another parameter
Args:
x (numeric): the parameter value
name_x (str): the parameter name
y (numeric): the other parameter value
name_y (str): the other parameter name
Throws:
ValueError
"""
if x == y:
m = ("Value for parameter %s equal to parameter %s is not "
"allowed: %f == %f")
m = m % (name_x, name_y, x, y)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_isin(self, x, name, li):
"""
Check if parameter is in list
Args:
x (numeric): the parameter value
name (str): the parameter name
li (list): list to check if parameter is in it
Throws:
ValueError
"""
if x not in li:
m = "Value for parameter %s not in list %s is not allowed: %s"
m = m % (name, str(li), str(x))
raise ValueError(self.__class__.__name__ + ": " + m)
def check_n_jobs(self, x, name):
"""
Check n_jobs parameter
Args:
x (int/None): number of jobs
name (str): the parameter name
Throws:
ValueError
"""
if not ((x is None)
or (x is not None and isinstance(x, int) and not x == 0)):
m = "Value for parameter n_jobs is not allowed: %s" % str(x)
raise ValueError(self.__class__.__name__ + ": " + m)
class ParameterCombinationsMixin:
"""
Mixin to generate parameter combinations
"""
@classmethod
def generate_parameter_combinations(cls, dictionary, raw):
"""
Generates reasonable paramter combinations
Args:
dictionary (dict): dictionary of paramter ranges
num (int): maximum number of combinations to generate
"""
if raw:
return dictionary
keys = sorted(list(dictionary.keys()))
values = [dictionary[k] for k in keys]
combinations = [dict(zip(keys, p))
for p in list(itertools.product(*values))]
return combinations
class NoiseFilter(StatisticsMixin,
ParameterCheckingMixin,
ParameterCombinationsMixin):
"""
Parent class of noise filtering methods
"""
def __init__(self):
"""
Constructor
"""
pass
def remove_noise(self, X, y):
"""
Removes noise
Args:
X (np.array): features
y (np.array): target labels
"""
pass
def get_params(self, deep=False):
"""
Return parameters
Returns:
dict: dictionary of parameters
"""
return {}
def set_params(self, **params):
"""
Set parameters
Args:
params (dict): dictionary of parameters
"""
for key, value in params.items():
setattr(self, key, value)
return self
class OverSampling(StatisticsMixin,
ParameterCheckingMixin,
ParameterCombinationsMixin,
RandomStateMixin):
"""
Base class of oversampling methods
"""
categories = []
cat_noise_removal = 'NR'
cat_dim_reduction = 'DR'
cat_uses_classifier = 'Clas'
cat_sample_componentwise = 'SCmp'
cat_sample_ordinary = 'SO'
cat_sample_copy = 'SCpy'
cat_memetic = 'M'
cat_density_estimation = 'DE'
cat_density_based = 'DB'
cat_extensive = 'Ex'
cat_changes_majority = 'CM'
cat_uses_clustering = 'Clus'
cat_borderline = 'BL'
cat_application = 'A'
def __init__(self):
pass
def det_n_to_sample(self, strategy, n_maj, n_min):
"""
Determines the number of samples to generate
Args:
strategy (str/float): if float, the fraction of the difference
of the minority and majority numbers to
generate, like 0.1 means that 10% of the
difference will be generated if str,
like 'min2maj', the minority class will
be upsampled to match the cardinality
of the majority class
"""
if isinstance(strategy, float) or isinstance(strategy, int):
return max([0, int((n_maj - n_min)*strategy)])
else:
m = "Value %s for parameter strategy is not supported" % strategy
raise ValueError(self.__class__.__name__ + ": " + m)
def sample_between_points(self, x, y):
"""
Sample randomly along the line between two points.
Args:
x (np.array): point 1
y (np.array): point 2
Returns:
np.array: the new sample
"""
return x + (y - x)*self.random_state.random_sample()
def sample_between_points_componentwise(self, x, y, mask=None):
"""
Sample each dimension separately between the two points.
Args:
x (np.array): point 1
y (np.array): point 2
mask (np.array): array of 0,1s - specifies which dimensions
to sample
Returns:
np.array: the new sample being generated
"""
if mask is None:
return x + (y - x)*self.random_state.random_sample()
else:
return x + (y - x)*self.random_state.random_sample()*mask
def sample_by_jittering(self, x, std):
"""
Sample by jittering.
Args:
x (np.array): base point
std (float): standard deviation
Returns:
np.array: the new sample
"""
return x + (self.random_state.random_sample() - 0.5)*2.0*std
def sample_by_jittering_componentwise(self, x, std):
"""
Sample by jittering componentwise.
Args:
x (np.array): base point
std (np.array): standard deviation
Returns:
np.array: the new sample
"""
return x + (self.random_state.random_sample(len(x))-0.5)*2.0 * std
def sample_by_gaussian_jittering(self, x, std):
"""
Sample by Gaussian jittering
Args:
x (np.array): base point
std (np.array): standard deviation
Returns:
np.array: the new sample
"""
return self.random_state.normal(x, std)
def sample(self, X, y):
"""
The samplig function reimplemented in child classes
Args:
X (np.matrix): features
y (np.array): labels
Returns:
np.matrix, np.array: sampled X and y
"""
return X, y
def fit_resample(self, X, y):
"""
Alias of the function "sample" for compatibility with imbalanced-learn
pipelines
"""
return self.sample(X, y)
def sample_with_timing(self, X, y):
begin = time.time()
X_samp, y_samp = self.sample(X, y)
_logger.info(self.__class__.__name__ + ": " +
("runtime: %f" % (time.time() - begin)))
return X_samp, y_samp
def preprocessing_transform(self, X):
"""
Transforms new data according to the possible transformation
implemented by the function "sample".
Args:
X (np.matrix): features
Returns:
np.matrix: transformed features
"""
return X
def get_params(self, deep=False):
"""
Returns the parameters of the object as a dictionary.
Returns:
dict: the parameters of the object
"""
pass
def set_params(self, **params):
"""
Set parameters
Args:
params (dict): dictionary of parameters
"""
for key, value in params.items():
setattr(self, key, value)
return self
def descriptor(self):
"""
Returns:
str: JSON description of the current sampling object
"""
return str((self.__class__.__name__, str(self.get_params())))
def __str__(self):
return self.descriptor()
class FOS_1(OverSampling): #F4_SMOTE(OverSampling):
categories = [OverSampling.cat_sample_ordinary,
OverSampling.cat_extensive]
def __init__(self,
proportion=1.0,
n_neighbors=5,
n_jobs=1,
random_state=None):
super().__init__()
self.check_greater_or_equal(proportion, "proportion", 0)
self.check_greater_or_equal(n_neighbors, "n_neighbors", 1)
self.check_n_jobs(n_jobs, 'n_jobs')
self.proportion = proportion
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
self.set_random_state(random_state)
@classmethod
def parameter_combinations(cls, raw=False):
parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,
1.0, 1.1,1.5, 2.0],
'n_neighbors': [3, 5, 7]}
return cls.generate_parameter_combinations(parameter_combinations, raw)
def sample(self, X, y,prot_idx, pv_mid_pt, prot_grp, maj_min, nsamp,
pv_max,pv_min):
_logger.info(self.__class__.__name__ + ": " +
"Running sampling via %s" % self.descriptor())
self.class_label_statistics(X, y)
if not self.check_enough_min_samples_for_sampling():
return X.copy(), y.copy()
y = np.squeeze(y)
n_to_sample = nsamp
if maj_min == 0:
X_min = X[y == self.min_label]
y_min = y[y == self.min_label]
prot = X_min[:,prot_idx]
if prot_grp == 0:
X_min = X_min[prot==prot_grp]
y_min = y_min[prot==prot_grp]
if prot_grp == 1:
X_min = X_min[prot==prot_grp]
y_min = y_min[prot==prot_grp]
if maj_min == 1:
X_min = X[y == self.maj_label]
y_min = y[y == self.maj_label]
prot = X_min[:,prot_idx]
if prot_grp == 0:
X_min = X_min[prot==prot_grp]
y_min = y_min[prot==prot_grp]
if prot_grp == 1:
X_min = X_min[prot==prot_grp]
y_min = y_min[prot==prot_grp]
self.min_label = np.copy(self.maj_label)
if n_to_sample == 0:
return X.copy(), y.copy()
# fitting the model
n_neigh = min([len(X_min), self.n_neighbors+1])
nn = NearestNeighbors(n_neighbors=n_neigh, n_jobs=self.n_jobs)
nn.fit(X_min)
dist, ind = nn.kneighbors(X_min)
if n_to_sample == 0:
return X.copy(), y.copy()
# generating samples
#np.random.seed(seed=1)
base_indices = self.random_state.choice(list(range(len(X_min))),
n_to_sample)
neighbor_indices = self.random_state.choice(list(range(1, n_neigh)),
n_to_sample)
X_base = X_min[base_indices]
X_neighbor = X_min[ind[base_indices, neighbor_indices]]
samples = X_base + np.multiply(self.random_state.rand(n_to_sample,
1),
X_neighbor - X_base)
return (np.vstack([X, samples]),
np.hstack([y, np.hstack([self.min_label]*n_to_sample)]))
def get_params(self, deep=False):
return {'proportion': self.proportion,
'n_neighbors': self.n_neighbors,
'n_jobs': self.n_jobs,
'random_state': self._random_state_init}
class FOS_2(OverSampling): #F3a_SMOTE(OverSampling):
categories = [OverSampling.cat_sample_ordinary,
OverSampling.cat_extensive]
def __init__(self,
proportion=1.0,
n_neighbors=5,
n_jobs=1,
random_state=None):
super().__init__()
self.check_greater_or_equal(proportion, "proportion", 0)
self.check_greater_or_equal(n_neighbors, "n_neighbors", 1)
self.check_n_jobs(n_jobs, 'n_jobs')
self.proportion = proportion
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
self.set_random_state(random_state)
@classmethod
def parameter_combinations(cls, raw=False):
parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,
1.0, 1.1,1.5, 2.0],
'n_neighbors': [3, 5, 7]}
return cls.generate_parameter_combinations(parameter_combinations, raw)
def sample(self, X, y,prot_idx, pv_mid_pt, prot_grp, maj_min, nsamp):
_logger.info(self.__class__.__name__ + ": " +
"Running sampling via %s" % self.descriptor())
self.class_label_statistics(X, y)
if not self.check_enough_min_samples_for_sampling():
return X.copy(), y.copy()
n_to_sample = nsamp
if maj_min == 0:
X_min = X[y == self.min_label]
y_min = y[y == self.min_label]
prot = X_min[:,prot_idx]
if prot_grp == 0:
X_min1 = X_min[prot<pv_mid_pt]
y_min1 = y_min[prot<pv_mid_pt]
if prot_grp == 1:
X_min1 = X_min[prot>pv_mid_pt]
y_min1 = y_min[prot>pv_mid_pt]
if maj_min == 1:
X_min = X[y == self.maj_label]
y_min = y[y == self.maj_label]
prot = X_min[:,prot_idx]
if prot_grp == 0:
X_min1 = X_min[prot<pv_mid_pt]
y_min1 = y_min[prot<pv_mid_pt]
if prot_grp == 1:
X_min1 = X_min[prot>pv_mid_pt]
y_min1 = y_min[prot>pv_mid_pt]
self.min_label = np.copy(self.maj_label)
if n_to_sample == 0:
return X.copy(), y.copy()
# fitting the model
n_neigh = min([len(X_min), self.n_neighbors+1])
nn = NearestNeighbors(n_neighbors=n_neigh, n_jobs=self.n_jobs)
nn.fit(X_min)
dist, ind = nn.kneighbors(X_min1)
if n_to_sample == 0:
return X.copy(), y.copy()
# generating samples
np.random.seed(seed=1)
base_indices = self.random_state.choice(list(range(len(X_min1))),
n_to_sample)
neighbor_indices = self.random_state.choice(list(range(1, n_neigh)),
n_to_sample)
X_base = X_min1[base_indices]
X_neighbor = X_min[ind[base_indices, neighbor_indices]]
samples = X_base + np.multiply(self.random_state.rand(n_to_sample,
1),
X_neighbor - X_base)
return (np.vstack([X, samples]),
np.hstack([y, np.hstack([self.min_label]*n_to_sample)]))
def get_params(self, deep=False):
return {'proportion': self.proportion,
'n_neighbors': self.n_neighbors,
'n_jobs': self.n_jobs,
'random_state': self._random_state_init}
|
[
"numpy.random.seed",
"numpy.copy",
"logging.StreamHandler",
"logging.getLogger",
"time.time",
"logging.Formatter",
"numpy.random.RandomState",
"numpy.hstack",
"sklearn.neighbors.NearestNeighbors",
"itertools.product",
"numpy.squeeze",
"numpy.vstack",
"numpy.unique"
] |
[((260, 295), 'logging.getLogger', 'logging.getLogger', (['"""smote_variants"""'], {}), "('smote_variants')\n", (277, 295), False, 'import logging\n'), ((343, 366), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (364, 366), False, 'import logging\n'), ((392, 450), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s:%(levelname)s:%(message)s"""'], {}), "('%(asctime)s:%(levelname)s:%(message)s')\n", (409, 450), False, 'import logging\n'), ((539, 574), 'numpy.unique', 'np.unique', (['data'], {'return_counts': '(True)'}), '(data, return_counts=True)\n', (548, 574), True, 'import numpy as np\n'), ((1017, 1049), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (1026, 1049), True, 'import numpy as np\n'), ((16779, 16790), 'time.time', 'time.time', ([], {}), '()\n', (16788, 16790), False, 'import time\n'), ((19515, 19528), 'numpy.squeeze', 'np.squeeze', (['y'], {}), '(y)\n', (19525, 19528), True, 'import numpy as np\n'), ((20727, 20784), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'n_neigh', 'n_jobs': 'self.n_jobs'}), '(n_neighbors=n_neigh, n_jobs=self.n_jobs)\n', (20743, 20784), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((24637, 24694), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'n_neigh', 'n_jobs': 'self.n_jobs'}), '(n_neighbors=n_neigh, n_jobs=self.n_jobs)\n', (24653, 24694), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((24891, 24913), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(1)'}), '(seed=1)\n', (24905, 24913), True, 'import numpy as np\n'), ((20521, 20544), 'numpy.copy', 'np.copy', (['self.maj_label'], {}), '(self.maj_label)\n', (20528, 20544), True, 'import numpy as np\n'), ((21622, 21645), 'numpy.vstack', 'np.vstack', (['[X, samples]'], {}), '([X, samples])\n', (21631, 21645), True, 'import numpy as np\n'), ((24403, 24426), 'numpy.copy', 'np.copy', (['self.maj_label'], {}), '(self.maj_label)\n', (24410, 24426), True, 'import numpy as np\n'), ((25554, 25577), 'numpy.vstack', 'np.vstack', (['[X, samples]'], {}), '([X, samples])\n', (25563, 25577), True, 'import numpy as np\n'), ((2341, 2376), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (2362, 2376), True, 'import numpy as np\n'), ((11605, 11631), 'itertools.product', 'itertools.product', (['*values'], {}), '(*values)\n', (11622, 11631), False, 'import itertools\n'), ((21678, 21719), 'numpy.hstack', 'np.hstack', (['([self.min_label] * n_to_sample)'], {}), '([self.min_label] * n_to_sample)\n', (21687, 21719), True, 'import numpy as np\n'), ((25610, 25651), 'numpy.hstack', 'np.hstack', (['([self.min_label] * n_to_sample)'], {}), '([self.min_label] * n_to_sample)\n', (25619, 25651), True, 'import numpy as np\n'), ((16930, 16941), 'time.time', 'time.time', ([], {}), '()\n', (16939, 16941), False, 'import time\n')]
|
#!/usr/bin/env python
from telnetlib import Telnet
import time
tn = Telnet('192.168.1.4', 13666, None)
tn.write("hello\n")
tn.write("screen_add s1\n")
tn.write("screen_set s1 -priority 1\n")
tn.write("widget_add s1 w1 string\n")
tn.write("widget_add s1 w2 string\n")
def lcd_string(x, telnet_obj, delay=2):
L = []
for i in range(len(x)):
if i % (15+16) == 0:
L.append(x[i:i+15+16])
for s in L:
s1 = s[0:15]
s2 = s[15:]
telnet_obj.write("widget_set s1 w1 1 1 {" + s1 + "}\n")
telnet_obj.write("widget_set s1 w2 1 2 {" + s2 + "}\n")
time.sleep(delay)
|
[
"telnetlib.Telnet",
"time.sleep"
] |
[((70, 104), 'telnetlib.Telnet', 'Telnet', (['"""192.168.1.4"""', '(13666)', 'None'], {}), "('192.168.1.4', 13666, None)\n", (76, 104), False, 'from telnetlib import Telnet\n'), ((606, 623), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (616, 623), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for /v1/execution API endpoint."""
import hashlib
import unittest.mock
import gridfs.grid_file
import pytest
from decapod_common.models import cluster
from decapod_common.models import execution
from decapod_common.models import execution_step
from decapod_common.models import role
from decapod_common.models import task
@pytest.fixture
def clean_execution_collection(configure_model, pymongo_connection):
pymongo_connection.db.execution.remove({})
@pytest.fixture
def valid_post_request(new_pcmodel):
return {
"playbook_configuration": {
"id": new_pcmodel.model_id,
"version": new_pcmodel.version
}
}
@pytest.fixture
def sudo_client(sudo_client_v1, public_playbook_name, sudo_role):
role.PermissionSet.add_permission("playbook", public_playbook_name)
sudo_role.add_permissions("playbook", [public_playbook_name])
sudo_role.save()
return sudo_client_v1
@pytest.fixture
def mock_task_class(monkeypatch):
mocked = unittest.mock.MagicMock()
monkeypatch.setattr(task, "PlaybookPluginTask", mocked)
return mocked
@pytest.fixture
def new_execution_with_logfile(new_execution, execution_log_storage):
def side_effect(model_id):
if model_id != new_execution.model_id:
return None
mock = unittest.mock.MagicMock(spec=gridfs.grid_file.GridOut)
mock.read.side_effect = b"LOG", b""
mock.__iter__.return_value = [b"LOG"]
mock.content_type = "text/plain"
mock.filename = "filename.log"
mock.md5 = hashlib.md5(b"LOG").hexdigest()
return mock
execution_log_storage.get.side_effect = side_effect
return new_execution
def create_execution_step(execution_id, srv, state):
db_model = {
"execution_id": execution_id,
"role": pytest.faux.gen_alpha(),
"name": pytest.faux.gen_alpha(),
"result": state.value,
"error": {},
"server_id": srv.model_id,
"time_started": pytest.faux.gen_integer(1, 100),
"time_finished": pytest.faux.gen_integer(101)
}
execution_step.ExecutionStep.collection().insert_one(db_model)
def test_post_access(sudo_client, client_v1, sudo_user, freeze_time,
normal_user, valid_post_request):
response = client_v1.post("/v1/execution/", data=valid_post_request)
assert response.status_code == 401
assert response.json["error"] == "Unauthorized"
client_v1.login(normal_user.login, "qwerty")
response = client_v1.post("/v1/execution/", data=valid_post_request)
assert response.status_code == 403
assert response.json["error"] == "Forbidden"
response = sudo_client.post("/v1/execution/", data=valid_post_request)
assert response.status_code == 200
def test_post_result(sudo_client, new_pcmodel, freeze_time,
valid_post_request):
response = sudo_client.post("/v1/execution/", data=valid_post_request)
assert response.json["data"]["playbook_configuration"]["id"] \
== new_pcmodel.model_id
assert response.json["data"]["playbook_configuration"]["version"] \
== new_pcmodel.version
assert response.json["data"]["playbook_configuration"]["playbook_name"] \
== new_pcmodel.playbook_id
assert response.json["data"]["state"] \
== execution.ExecutionState.created.name
tsk = task.Task.get_by_execution_id(
response.json["id"], task.TaskType.playbook.name)
assert tsk
assert not tsk.time_started
assert not tsk.time_completed
assert not tsk.time_failed
assert not tsk.time_cancelled
assert tsk.time_updated == int(freeze_time.return_value)
assert tsk.time_created == int(freeze_time.return_value)
def test_post_result_deleted_cluster(sudo_client, new_pcmodel, freeze_time,
valid_post_request):
clus = cluster.ClusterModel.create(pytest.faux.gen_alpha())
clus.delete()
new_pcmodel.cluster = clus
new_pcmodel.save()
valid_post_request["playbook_configuration"]["version"] = \
new_pcmodel.version
response = sudo_client.post("/v1/execution/", data=valid_post_request)
assert response.status_code == 400
@pytest.mark.parametrize("what", ("id", "version"))
def test_post_fake_playbook_configuration(what, sudo_client,
valid_post_request):
if what == "id":
valid_post_request["playbook_configuration"]["id"] \
= pytest.faux.gen_uuid()
else:
valid_post_request["playbook_configuration"]["version"] \
= pytest.faux.gen_integer(3)
response = sudo_client.post("/v1/execution/", data=valid_post_request)
assert response.status_code == 400
assert response.json["error"] == "UnknownPlaybookConfiguration"
def test_post_cannot_create_task(sudo_client, mock_task_class,
valid_post_request, pymongo_connection,
clean_execution_collection):
mock_task_class.side_effect = Exception
response = sudo_client.post("/v1/execution/", data=valid_post_request)
assert response.status_code == 400
db_model = pymongo_connection.db.execution.find({})
db_model = list(db_model)
assert len(db_model) == 2
db_model = max((mdl for mdl in db_model), key=lambda x: x["version"])
assert db_model["state"] == execution.ExecutionState.failed.name
def test_delete_access(sudo_client, client_v1, sudo_user, freeze_time,
normal_user, valid_post_request):
resp = sudo_client.post("/v1/execution/", data=valid_post_request)
assert resp.status_code == 200
response = client_v1.delete(
"/v1/execution/{0}/".format(resp.json["id"]))
assert response.status_code == 401
assert response.json["error"] == "Unauthorized"
client_v1.login(normal_user.login, "qwerty")
response = client_v1.delete(
"/v1/execution/{0}/".format(resp.json["id"]))
assert response.status_code == 403
assert response.json["error"] == "Forbidden"
response = sudo_client.delete(
"/v1/execution/{0}/".format(resp.json["id"]))
assert response.status_code == 200
def test_delete_not_started(sudo_client, valid_post_request):
resp = sudo_client.post("/v1/execution/", data=valid_post_request)
resp = sudo_client.delete("/v1/execution/{0}/".format(resp.json["id"]))
excmodel = execution.ExecutionModel.find_by_model_id(resp.json["id"])
assert excmodel.state == execution.ExecutionState.canceled
def test_delete_started(sudo_client, valid_post_request):
resp = sudo_client.post("/v1/execution/", data=valid_post_request)
tsk = task.Task.get_by_execution_id(resp.json["id"],
task.TaskType.playbook)
tsk.start()
excmodel = execution.ExecutionModel.find_by_model_id(resp.json["id"])
assert excmodel.state == execution.ExecutionState.started
resp = sudo_client.delete("/v1/execution/{0}/".format(resp.json["id"]))
assert resp.status_code == 200
excmodel = execution.ExecutionModel.find_by_model_id(resp.json["id"])
assert excmodel.state == execution.ExecutionState.canceling
tsk = task.Task.get_by_execution_id(resp.json["id"], task.TaskType.cancel)
assert tsk
def test_api_get_access(sudo_client, client_v1, normal_user):
response = client_v1.get("/v1/execution/")
assert response.status_code == 401
assert response.json["error"] == "Unauthorized"
client_v1.login(normal_user.login, "qwerty")
response = client_v1.get("/v1/execution/")
assert response.status_code == 403
assert response.json["error"] == "Forbidden"
response = sudo_client.get("/v1/execution/")
assert response.status_code == 200
def test_get(sudo_client, clean_execution_collection, valid_post_request):
resp = sudo_client.post("/v1/execution/", data=valid_post_request)
model_id = resp.json["id"]
resp = sudo_client.get("/v1/execution/")
assert resp.status_code == 200
assert resp.json["total"] == 1
assert len(resp.json["items"]) == 1
resp = sudo_client.get("/v1/execution/{0}/".format(model_id))
assert resp.status_code == 200
resp = sudo_client.get("/v1/execution/{0}/version/".format(model_id))
assert resp.status_code == 200
assert resp.json["total"] == 1
assert len(resp.json["items"]) == 1
resp = sudo_client.get("/v1/execution/{0}/version/1/".format(model_id))
assert resp.status_code == 200
@pytest.mark.parametrize("state", execution_step.ExecutionStepState)
def test_get_execution_steps(state, sudo_client, new_server,
valid_post_request):
resp = sudo_client.post("/v1/execution/", data=valid_post_request)
model_id = resp.json["id"]
for _ in range(5):
create_execution_step(model_id, new_server, state)
resp = sudo_client.get("/v1/execution/{0}/steps/".format(model_id))
assert resp.status_code == 200
assert resp.json["total"] == 5
assert len(resp.json["items"]) == 5
assert all((item["data"]["execution_id"] == model_id)
for item in resp.json["items"])
assert all((item["data"]["result"] == state.name)
for item in resp.json["items"])
def test_get_execution_log_fail(sudo_client, client_v1, normal_user,
new_execution_with_logfile):
response = client_v1.get(
"/v1/execution/{0}/log/".format(new_execution_with_logfile.model_id))
assert response.status_code == 401
assert response.json["error"] == "Unauthorized"
client_v1.login(normal_user.login, "qwerty")
response = client_v1.get(
"/v1/execution/{0}/log/".format(new_execution_with_logfile.model_id))
assert response.status_code == 403
assert response.json["error"] == "Forbidden"
response = sudo_client.get(
"/v1/execution/{0}/log/".format(new_execution_with_logfile.model_id))
assert response.status_code == 200
@pytest.mark.parametrize("download", (True, False))
def test_get_execution_plain_text_log(download, sudo_client,
new_execution_with_logfile):
query = "?download=yes" if download else ""
response = sudo_client.get(
"/v1/execution/{0}/log/{1}".format(
new_execution_with_logfile.model_id, query))
assert response.status_code == 200
assert response.headers.get("Content-Type").startswith("text/plain")
assert response.headers.get("ETag") == "\"{0}\"".format(
hashlib.md5(b"LOG").hexdigest()
)
assert response.data == b"LOG"
if download:
assert response.headers["Content-Disposition"] == \
"attachment; filename=filename.log"
else:
assert "Content-Disposition" not in response.headers
@pytest.mark.parametrize("download", (False,))
def test_get_execution_json_log(download, sudo_client,
new_execution_with_logfile):
query = "?download=yes" if download else ""
response = sudo_client.get(
"/v1/execution/{0}/log/{1}".format(
new_execution_with_logfile.model_id, query),
content_type="application/json"
)
assert response.status_code == 200
if download:
assert response.headers.get("Content-Type").startswith("text/plain")
else:
assert response.headers.get("Content-Type").startswith(
"application/json")
assert response.json == {"data": "LOG"}
if download:
assert response.headers["Content-Disposition"] == \
"attachment; filename=filename.log"
else:
assert "Content-Disposition" not in response.headers
|
[
"hashlib.md5",
"decapod_common.models.role.PermissionSet.add_permission",
"pytest.faux.gen_uuid",
"decapod_common.models.execution.ExecutionModel.find_by_model_id",
"decapod_common.models.task.Task.get_by_execution_id",
"pytest.faux.gen_integer",
"decapod_common.models.execution_step.ExecutionStep.collection",
"pytest.mark.parametrize",
"pytest.faux.gen_alpha"
] |
[((4820, 4870), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""what"""', "('id', 'version')"], {}), "('what', ('id', 'version'))\n", (4843, 4870), False, 'import pytest\n'), ((9115, 9182), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""state"""', 'execution_step.ExecutionStepState'], {}), "('state', execution_step.ExecutionStepState)\n", (9138, 9182), False, 'import pytest\n'), ((10598, 10648), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""download"""', '(True, False)'], {}), "('download', (True, False))\n", (10621, 10648), False, 'import pytest\n'), ((11413, 11458), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""download"""', '(False,)'], {}), "('download', (False,))\n", (11436, 11458), False, 'import pytest\n'), ((1366, 1433), 'decapod_common.models.role.PermissionSet.add_permission', 'role.PermissionSet.add_permission', (['"""playbook"""', 'public_playbook_name'], {}), "('playbook', public_playbook_name)\n", (1399, 1433), False, 'from decapod_common.models import role\n'), ((3981, 4060), 'decapod_common.models.task.Task.get_by_execution_id', 'task.Task.get_by_execution_id', (["response.json['id']", 'task.TaskType.playbook.name'], {}), "(response.json['id'], task.TaskType.playbook.name)\n", (4010, 4060), False, 'from decapod_common.models import task\n'), ((7029, 7087), 'decapod_common.models.execution.ExecutionModel.find_by_model_id', 'execution.ExecutionModel.find_by_model_id', (["resp.json['id']"], {}), "(resp.json['id'])\n", (7070, 7087), False, 'from decapod_common.models import execution\n'), ((7292, 7362), 'decapod_common.models.task.Task.get_by_execution_id', 'task.Task.get_by_execution_id', (["resp.json['id']", 'task.TaskType.playbook'], {}), "(resp.json['id'], task.TaskType.playbook)\n", (7321, 7362), False, 'from decapod_common.models import task\n'), ((7435, 7493), 'decapod_common.models.execution.ExecutionModel.find_by_model_id', 'execution.ExecutionModel.find_by_model_id', (["resp.json['id']"], {}), "(resp.json['id'])\n", (7476, 7493), False, 'from decapod_common.models import execution\n'), ((7684, 7742), 'decapod_common.models.execution.ExecutionModel.find_by_model_id', 'execution.ExecutionModel.find_by_model_id', (["resp.json['id']"], {}), "(resp.json['id'])\n", (7725, 7742), False, 'from decapod_common.models import execution\n'), ((7818, 7886), 'decapod_common.models.task.Task.get_by_execution_id', 'task.Task.get_by_execution_id', (["resp.json['id']", 'task.TaskType.cancel'], {}), "(resp.json['id'], task.TaskType.cancel)\n", (7847, 7886), False, 'from decapod_common.models import task\n'), ((2430, 2453), 'pytest.faux.gen_alpha', 'pytest.faux.gen_alpha', ([], {}), '()\n', (2451, 2453), False, 'import pytest\n'), ((2471, 2494), 'pytest.faux.gen_alpha', 'pytest.faux.gen_alpha', ([], {}), '()\n', (2492, 2494), False, 'import pytest\n'), ((2607, 2638), 'pytest.faux.gen_integer', 'pytest.faux.gen_integer', (['(1)', '(100)'], {}), '(1, 100)\n', (2630, 2638), False, 'import pytest\n'), ((2665, 2693), 'pytest.faux.gen_integer', 'pytest.faux.gen_integer', (['(101)'], {}), '(101)\n', (2688, 2693), False, 'import pytest\n'), ((4513, 4536), 'pytest.faux.gen_alpha', 'pytest.faux.gen_alpha', ([], {}), '()\n', (4534, 4536), False, 'import pytest\n'), ((5091, 5113), 'pytest.faux.gen_uuid', 'pytest.faux.gen_uuid', ([], {}), '()\n', (5111, 5113), False, 'import pytest\n'), ((5204, 5230), 'pytest.faux.gen_integer', 'pytest.faux.gen_integer', (['(3)'], {}), '(3)\n', (5227, 5230), False, 'import pytest\n'), ((2704, 2745), 'decapod_common.models.execution_step.ExecutionStep.collection', 'execution_step.ExecutionStep.collection', ([], {}), '()\n', (2743, 2745), False, 'from decapod_common.models import execution_step\n'), ((2168, 2187), 'hashlib.md5', 'hashlib.md5', (["b'LOG'"], {}), "(b'LOG')\n", (2179, 2187), False, 'import hashlib\n'), ((11140, 11159), 'hashlib.md5', 'hashlib.md5', (["b'LOG'"], {}), "(b'LOG')\n", (11151, 11159), False, 'import hashlib\n')]
|
from typing import Callable, Optional
from numpy import nan
from pandas import Series, isnull, Interval
from pandas.core.dtypes.inference import is_number
class ObjectDataMixin(object):
_data: Optional[Series]
_validate_data: Callable[[Series], None]
def _set_data(self, data: Series):
self.data = data
@property
def data(self) -> Series:
return self._data
@data.setter
def data(self, data: Series):
if data is None:
self._data = None
else:
self._validate_data(data)
self._data = data
class NumericDataMixin(object):
_data: Optional[Series]
_validate_data: Callable[[Series], None]
def _set_data(self, data: Series):
self.data = data
@property
def data(self) -> Series:
return self._data
@data.setter
def data(self, data: Series):
if data is None:
self._data = None
else:
self._validate_data(data)
try:
data = data.astype(int)
except ValueError:
data = data.astype(float)
self._data = data
class SingleCategoryDataMixin(object):
_data: Optional[Series]
name: str
_validate_data: Callable[[Series], None]
def _set_data(self, data: Series):
if data is not None:
self.data = data
@property
def data(self) -> Series:
return self._data
@data.setter
def data(self, data: Series):
if data is None:
self._data = None
else:
data = Series(
index=data.index,
data=[nan if isnull(d)
else d if type(d) is str
else d if type(d) is Interval
else str(int(d)) if is_number(d) and d == int(d)
else str(d)
for d in data.values],
name=self.name
).astype('category')
self._validate_data(data)
self._data = data
class MultiCategoryDataMixin(object):
_data: Optional[Series]
name: str
_validate_data: Callable[[Series], None]
def _set_data(self, data: Series):
self.data = data
@property
def data(self) -> Series:
return self._data
@data.setter
def data(self, data: Series):
if data is None:
self._data = None
else:
data = Series(
index=data.index,
data=[
nan if isnull(d)
else nan if type(d) is str and d == ''
else d if type(d) is str
else str(d)
for d in data.values
],
name=self.name
)
self._validate_data(data)
self._data = data
|
[
"pandas.isnull",
"pandas.core.dtypes.inference.is_number"
] |
[((2550, 2559), 'pandas.isnull', 'isnull', (['d'], {}), '(d)\n', (2556, 2559), False, 'from pandas import Series, isnull, Interval\n'), ((1662, 1671), 'pandas.isnull', 'isnull', (['d'], {}), '(d)\n', (1668, 1671), False, 'from pandas import Series, isnull, Interval\n'), ((1813, 1825), 'pandas.core.dtypes.inference.is_number', 'is_number', (['d'], {}), '(d)\n', (1822, 1825), False, 'from pandas.core.dtypes.inference import is_number\n')]
|
"""
An extended shell for test selection
"""
from IPython.terminal.embed import InteractiveShellEmbed
from IPython.core.magic import (Magics, magics_class, line_magic)
from IPython.core.history import HistoryManager
class PytestShellEmbed(InteractiveShellEmbed):
"""Custom ip shell with a slightly altered exit message
"""
def init_history(self):
"""Sets up the command history, and starts regular autosaves.
.. note::
A separate history db is allocated for this plugin separate
from regular ip shell sessions such that only relevant
commands are retained.
"""
self.history_manager = HistoryManager(
shell=self, parent=self, hist_file=self.pytest_hist_file)
self.configurables.append(self.history_manager)
def exit(self):
"""Handle interactive exit.
This method calls the ``ask_exit`` callback and if applicable prompts
the user to verify the current test selection
"""
if getattr(self, 'selection', None):
print(" \n".join(self.selection.keys()))
msg = "\nYou have selected the above {} test(s) to be run."\
"\nWould you like to run pytest now? ([y]/n)?"\
.format(len(self.selection))
else:
msg = 'Do you really want to exit ([y]/n)?'
if self.ask_yes_no(msg, 'y'):
self.ask_exit()
@magics_class
class SelectionMagics(Magics):
"""Custom magics for performing multiple test selections
within a single session
"""
def ns_eval(self, line):
'''Evalutate line in the embedded ns and return result
'''
ns = self.shell.user_ns
return eval(line, ns)
@property
def tt(self):
return self.ns_eval('tt')
@property
def selection(self):
return self.tt._selection
@property
def tr(self):
return self.tt._tr
def err(self, msg="No tests selected"):
self.tr.write("ERROR: ", red=True)
self.tr.write_line(msg)
@line_magic
def add(self, line):
'''Add tests from a test set to the current selection.
Usage:
add tt : add all tests in the current tree
add tt[4] : add 5th test in the current tree
add tt.tests[1:10] : add tests 1-9 found under the 'tests' module
'''
if line:
ts = self.ns_eval(line)
if ts:
self.selection.addtests(ts)
else:
raise TypeError("'{}' is not a test set".format(ts))
else:
print("No test set provided?")
@line_magic
def remove(self, line, delim=','):
"""Remove tests from the current selection using a slice syntax
using a ',' delimiter instead of ':'.
Usage:
remove : remove all tests from the current selection
remove -1 : remove the last item from the selection
remove 1, : remove all but the first item (same as [1:])
remove ,,-3 : remove every third item (same as [::-3])
"""
selection = self.selection
if not self.selection:
self.err()
return
if not line:
selection.clear()
return
# parse out slice
if delim in line:
slc = slice(*map(lambda x: int(x.strip()) if x.strip() else None,
line.split(delim)))
for item in selection[slc]:
selection.remove(item)
else: # just an index
try:
selection.remove(selection[int(line)])
except ValueError:
self.err("'{}' is not and index or slice?".format(line))
@line_magic
def show(self, test_set):
'''Show all currently selected test by pretty printing
to the console.
Usage:
show: print currently selected tests
'''
items = self.selection.values()
if items:
self.tt._tprint(items)
else:
self.err()
|
[
"IPython.core.history.HistoryManager"
] |
[((667, 739), 'IPython.core.history.HistoryManager', 'HistoryManager', ([], {'shell': 'self', 'parent': 'self', 'hist_file': 'self.pytest_hist_file'}), '(shell=self, parent=self, hist_file=self.pytest_hist_file)\n', (681, 739), False, 'from IPython.core.history import HistoryManager\n')]
|
#!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtksurfaceclipper.py,v $
## Language: Python
## Date: $Date: 2006/05/26 12:35:13 $
## Version: $Revision: 1.9 $
## Copyright (c) <NAME>, <NAME>. All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this class was contributed by
## <NAME> (<EMAIL>)
## Politecnico di Milano
from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY
import vtk
import sys
import math
from vmtk import vmtkrenderer
from vmtk import pypes
class vmtkSurfaceTagger(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Surface = None
self.Method = 'cliparray'
self.CellEntityIdsArrayName = 'CellEntityIds'
self.CellEntityIdsArray = None
self.ArrayName = None
self.Array = None
self.Value = None
self.Range = None
self.InsideTag = 2
self.OutsideTag = 1
self.OverwriteOutsideTag = 0
self.InsideOut = 0
self.TagsToModify = None
self.ConnectivityOffset = 1
self.TagSmallestRegion = 1
self.CleanOutput = 1
self.PrintTags = 1
self.Tags = None
self.HarmonicRadius = 1.0
self.HarmonicGenerateTag = 0
self.HarmonicCleaningFixPoints = 0
self.SetScriptName('vmtksurfacetagger')
self.SetScriptDoc('tag a surface exploiting an array defined on it')
self.SetInputMembers([
['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'],
['Method','method','str',1,'["cliparray","array","harmonic","connectivity","constant","drawing"]','tagging method (cliparray: exploit an array to clip the surface at a certain value tagging the two parts, it creates skew triangles that need a successive remeshing; array: the same of cliparray, but without clipping the original triangles, thus creating a zig-zag tag; harmonic: move harmonically the original points of the input surface toward the array value in order to be able to obtain a precise tag also with a successive call of the array method without the need of remeshing; connectivity: given an already tagged surface, tag disconnected part of each input tag; constant: assign a constant tag to the input surface; drawing: interactive drawing a region)'],
['CellEntityIdsArrayName','entityidsarray','str',1,'','name of the array where the tags are stored'],
['ArrayName','array','str',1,'','name of the array with which to define the boundary between tags'],
['Value','value','float',1,'','scalar value of the array identifying the boundary between tags'],
['Range','range','float',2,'','range scalar values of the array identifying the region for the new tag (alternative to value, only array method)'],
['InsideTag','inside','int',1,'','tag of the inside region (i.e. where the Array is lower than Value; used also in case of "constant" method)'],
['HarmonicRadius','harmonicradius','float',1,'','buffer zone radius for the harmonic method beyond which the points are not moved'],
['HarmonicGenerateTag','harmonicgeneratetag','float',1,'','toggle tagging with the array method after the harmonic movement, it is suggested not to tag directly the surface, but to recompute the array on the warped surface and to use the array method on the recomputed array'],
['HarmonicCleaningFixPoints','harmoniccleanfixpoints','bool',1,'','toggle if the cleaning harmonic method has to fix the points or to leave them free'],
['OverwriteOutsideTag','overwriteoutside','bool',1,'','overwrite outside value also when the CellEntityIdsArray already exists in the input surface'],
['OutsideTag','outside','int',1,'','tag of the outside region (i.e. where the Array is greater than Value)'],
['InsideOut','insideout','bool',1,'','toggle switching inside and outside tags ("cliparray" and "array" methods, only when specifying value and not range)'],
['TagsToModify','tagstomodify','int',-1,'','if set, new tag is created only in this subset of existing tags ("cliparray" only)'],
['ConnectivityOffset','offset','int',1,'','offset added to the entityidsarray of each disconnected parts of each input tag ("connectivity" only)'],
['TagSmallestRegion','tagsmallestregion','bool',1,'','toggle tagging the smallest or the largest region (drawing only)'],
['CleanOutput','cleanoutput','bool',1,'','toggle cleaning the unused points'],
['PrintTags','printtags','bool',1,'','toggle printing the set of tags']
])
self.SetOutputMembers([
['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter'],
['CellEntityIdsArray','oentityidsarray','vtkIntArray',1,'','the output entity ids array']
])
def CleanSurface(self):
cleaner = vtk.vtkCleanPolyData()
cleaner.SetInputData(self.Surface)
cleaner.Update()
self.Surface = cleaner.GetOutput()
self.CellEntityIdsArray = self.Surface.GetCellData().GetArray(self.CellEntityIdsArrayName)
def ClipArrayTagger(self,onlyRing=False):
from vmtk import vmtkscripts
# clip the surface according to the Array
clipper = vmtkscripts.vmtkSurfaceClipper()
clipper.Surface = self.Surface
clipper.Interactive = False
clipper.InsideOut = 1-self.InsideOut # inside means regions where the Array is lower than Value
clipper.CleanOutput = self.CleanOutput
clipper.ClipArrayName = self.ArrayName
clipper.ClipValue = self.Value
clipper.Execute()
if onlyRing:
return clipper.CutLines
else:
insideSurface = clipper.Surface
outsideSurface = clipper.ClippedSurface
# change values of the inside tags
insideCellEntityIdsArray = insideSurface.GetCellData().GetArray( self.CellEntityIdsArrayName )
outsideCellEntityIdsArray = outsideSurface.GetCellData().GetArray( self.CellEntityIdsArrayName )
if self.TagsToModify!=None:
for i in range(insideCellEntityIdsArray.GetNumberOfTuples()):
if insideCellEntityIdsArray.GetValue(i) in self.TagsToModify:
insideCellEntityIdsArray.SetValue(i,self.InsideTag)
else:
insideCellEntityIdsArray.FillComponent(0,self.InsideTag)
# merge the inside and the outside surfaces
mergeSurface = vtk.vtkAppendPolyData()
mergeSurface.AddInputData(insideSurface)
mergeSurface.AddInputData(outsideSurface)
mergeSurface.Update()
self.Surface = mergeSurface.GetOutput()
self.CellEntityIdsArray = self.Surface.GetCellData().GetArray(self.CellEntityIdsArrayName)
def ArrayTagger(self,surface=None,arrayName=None,insideTag=None,rangeValues=[]):
if surface == None:
surface = self.Surface
if arrayName == None:
arrayName = self.ArrayName
if insideTag == None:
insideTag = self.InsideTag
if rangeValues == []:
rangeValues = self.Range
pointsToCells = vtk.vtkPointDataToCellData()
pointsToCells.SetInputData(surface)
pointsToCells.PassPointDataOn()
pointsToCells.Update()
surface = pointsToCells.GetPolyDataOutput()
cellEntityIdsArray = surface.GetCellData().GetArray(self.CellEntityIdsArrayName)
cellArray = surface.GetCellData().GetArray(arrayName)
for i in range(surface.GetNumberOfCells()):
if cellArray.GetValue(i) > rangeValues[0] and cellArray.GetValue(i) < rangeValues[1]:
cellEntityIdsArray.SetValue(i,insideTag)
return surface
def CleanPreciseRingDistance(self,ring):
from vmtk import vmtkscripts
def nextPointId(ring,cellId,currentPointId):
idList = vtk.vtkIdList()
ring.GetCellPoints(cellId,idList)
if idList.GetId(0) == currentPointId:
return idList.GetId(1)
else:
return idList.GetId(0)
def nextCellId(ring,pointId,currentCellId):
idList = vtk.vtkIdList()
ring.GetPointCells(pointId,idList)
if idList.GetId(0) == currentCellId:
return idList.GetId(1)
else:
return idList.GetId(0)
def checkThreeConsecutivePointsOnATriangle(lastThreeCellIdLists):
for item in lastThreeCellIdLists[2]:
if item in lastThreeCellIdLists[1]:
if item in lastThreeCellIdLists[0]:
return True
return False
nP = ring.GetNumberOfPoints()
nC = ring.GetNumberOfCells()
# print ("points and cells: ", nP, ", ", nC)
pointLocator = vtk.vtkPointLocator()
pointLocator.SetDataSet(self.Surface)
pointLocator.BuildLocator()
lastThreePointsIds = []
lastThreeCellIdLists = []
distanceCleaned = [0, 0, 0, 0]
cleanRingPointsIds = set()
currentCellId = 0
pointIdList = vtk.vtkIdList()
cellIdList = vtk.vtkIdList()
ring.GetCellPoints(currentCellId,pointIdList)
currentRingPointId = pointIdList.GetId(0)
for i in range(nP):
lastThreePointsIds.append(currentRingPointId)
currentSurfPointId = pointLocator.FindClosestPoint(ring.GetPoint(currentRingPointId))
self.Surface.GetPointCells(currentSurfPointId,cellIdList)
cellIds=[]
for k in range(cellIdList.GetNumberOfIds()):
cellIds.append(cellIdList.GetId(k))
lastThreeCellIdLists.append(cellIds)
currentCellId = nextCellId(ring,currentRingPointId,currentCellId)
currentRingPointId = nextPointId(ring,currentCellId,currentRingPointId)
if i > 1:
# print("last three points: ",lastThreePointsIds)
# print("last three cell id Lists: ",lastThreeCellIdLists)
answer = checkThreeConsecutivePointsOnATriangle(lastThreeCellIdLists)
# print("answer: ", answer)
if answer:
if distanceCleaned[1] == 0:
distanceCleaned[2] = 1
cleanRingPointsIds.add(lastThreePointsIds[1])
else:
distanceCleaned[1] = 1
cleanRingPointsIds.add(lastThreePointsIds[0])
# print("distance cleaned: ", distanceCleaned)
# print("")
lastThreePointsIds.pop(0)
lastThreeCellIdLists.pop(0)
distanceCleaned.append(0)
distanceCleaned.pop(0)
cleanRingPointsIds = sorted(cleanRingPointsIds)
print(cleanRingPointsIds)
if self.HarmonicCleaningFixPoints:
outputRing = ring
distanceArray = outputRing.GetPointData().GetArray('PreciseRingDistance')
else:
outputRing = vtk.vtkPolyData()
distanceArray = vtk.vtkDoubleArray()
if self.HarmonicCleaningFixPoints:
for pointId in cleanRingPointsIds:
distanceArray.SetComponent(pointId,0,0.0)
distanceArray.SetComponent(pointId,1,0.0)
distanceArray.SetComponent(pointId,2,0.0)
else:
points = vtk.vtkPoints()
lines = vtk.vtkCellArray()
countCleanedPoints = 0
ko = 1
firstInsertedId = 0
while ko:
if firstInsertedId not in cleanRingPointsIds:
ko = 0
else:
firstInsertedId = firstInsertedId + 1
lastInsertedId = firstInsertedId
for i in range(lastInsertedId+1,nP):
# currentSurfPointId = pointLocator.FindClosestPoint(ring.GetPoint(i))
if i not in cleanRingPointsIds:
# print ('IDS: ring = ',i,'; surface = ',currentSurfPointId)
points.InsertNextPoint(ring.GetPoint(i))
line = vtk.vtkLine()
line.GetPointIds().SetId(0,lastInsertedId)
line.GetPointIds().SetId(1,i)
lines.InsertNextCell(line)
lastInsertedId = i
else:
countCleanedPoints = countCleanedPoints + 1
# print ('IDS: ring = ',i,'; surface = ',currentSurfPointId)
print ('IDS: ring = ',i)
# cloase the loop
line = vtk.vtkLine()
line.GetPointIds().SetId(0,lastInsertedId)
line.GetPointIds().SetId(1,firstInsertedId)
lines.InsertNextCell(line)
print('\ncleaned points: ',countCleanedPoints,'/',nP,'\n')
outputRing.SetPoints(points)
outputRing.SetLines(lines)
surfaceProjection = vmtkscripts.vmtkSurfaceProjection()
surfaceProjection.Surface = outputRing
surfaceProjection.ReferenceSurface = ring
surfaceProjection.Execute()
outputRing = surfaceProjection.Surface
# FIRST AND LAST POINTS NOT YET CHECKED
return outputRing
def HarmonicTagger(self):
from vmtk import vmtkscripts
from vmtk import vmtkcontribscripts
from vmtk import vtkvmtk
def zigZagRingExtractor(surface,arrayname,tag,rangevalues):
surf = vtk.vtkPolyData()
surf.DeepCopy(surface)
surf = self.ArrayTagger(surf,arrayname,tag,rangevalues)
th = vmtkcontribscripts.vmtkThreshold()
th.Surface = surf
th.ArrayName = self.CellEntityIdsArrayName
th.CellData = True
th.LowThreshold = tag
th.HighThreshold = tag
th.Execute()
surf = th.Surface
# boundaryExtractor = vtkvmtk.vtkvmtkPolyDataBoundaryExtractor()
# boundaryExtractor.SetInputData(surf)
# boundaryExtractor.Update()
# zigZagRing = boundaryExtractor.GetOutput()
featureEdges = vtk.vtkFeatureEdges()
featureEdges.SetInputData(surf)
featureEdges.BoundaryEdgesOn()
featureEdges.FeatureEdgesOff()
featureEdges.NonManifoldEdgesOff()
featureEdges.ManifoldEdgesOff()
featureEdges.ColoringOff()
featureEdges.CreateDefaultLocator()
featureEdges.Update()
zigZagRing = featureEdges.GetOutput()
return zigZagRing
tags = set()
for i in range(self.Surface.GetNumberOfCells()):
tags.add(self.CellEntityIdsArray.GetComponent(i,0))
tags = sorted(tags)
# use clip-array method only to extract the ring
preciseRing = self.ClipArrayTagger(True)
if self.HarmonicGenerateTag:
self.ArrayTagger()
zigZagRing = zigZagRingExtractor(self.Surface,self.ArrayName,12345,[-math.inf, self.Value])
surfaceDistance = vmtkscripts.vmtkSurfaceDistance()
surfaceDistance.Surface = zigZagRing
surfaceDistance.ReferenceSurface = preciseRing
surfaceDistance.DistanceVectorsArrayName = 'PreciseRingDistance'
surfaceDistance.Execute()
zigZagRing = surfaceDistance.Surface
passArray = vtk.vtkPassArrays()
passArray.SetInputData(zigZagRing)
passArray.AddPointDataArray('PreciseRingDistance')
passArray.Update()
zigZagRing = passArray.GetOutput()
zigZagRing = self.CleanPreciseRingDistance(zigZagRing)
writer = vtk.vtkXMLPolyDataWriter()
writer.SetInputData(zigZagRing)
writer.SetFileName('zigZagRing.vtp')
writer.SetDataModeToBinary()
writer.Write()
surfaceDistance2 = vmtkscripts.vmtkSurfaceDistance()
surfaceDistance2.Surface = self.Surface
surfaceDistance2.ReferenceSurface = zigZagRing
surfaceDistance2.DistanceArrayName = 'ZigZagRingDistance'
surfaceDistance2.Execute()
self.Surface = surfaceDistance2.Surface
print('OK!')
homogeneousBoundaries = zigZagRingExtractor(self.Surface,'ZigZagRingDistance',2435,[-math.inf,self.HarmonicRadius])
pointLocator = vtk.vtkPointLocator()
pointLocator.SetDataSet(self.Surface)
pointLocator.BuildLocator()
for k in range(3):
print("Harmonic extension of component ",k)
boundaryIds = vtk.vtkIdList()
temperature = vtk.vtkDoubleArray()
temperature.SetNumberOfComponents(1)
for i in range(homogeneousBoundaries.GetNumberOfPoints()):
idb = pointLocator.FindClosestPoint(homogeneousBoundaries.GetPoint(i))
boundaryIds.InsertNextId(idb)
temperature.InsertNextTuple1(0.0)
warpArray = zigZagRing.GetPointData().GetArray('PreciseRingDistance')
for i in range(zigZagRing.GetNumberOfPoints()):
idb = pointLocator.FindClosestPoint(zigZagRing.GetPoint(i))
boundaryIds.InsertNextId(idb)
#temperature.InsertNextTuple1(1.0)
temperature.InsertNextTuple1(warpArray.GetComponent(i,k))
# perform harmonic mapping using temperature as boundary condition
harmonicMappingFilter = vtkvmtk.vtkvmtkPolyDataHarmonicMappingFilter()
harmonicMappingFilter.SetInputData(self.Surface)
harmonicMappingFilter.SetHarmonicMappingArrayName('WarpVector'+str(k))
harmonicMappingFilter.SetBoundaryPointIds(boundaryIds)
harmonicMappingFilter.SetBoundaryValues(temperature)
harmonicMappingFilter.SetAssemblyModeToFiniteElements()
harmonicMappingFilter.Update()
self.Surface = harmonicMappingFilter.GetOutput()
warpVector = vtk.vtkDoubleArray()
warpVector.SetNumberOfComponents(3)
warpVector.SetNumberOfTuples(self.Surface.GetNumberOfPoints())
warpVector.SetName('WarpVector')
warpVectorX = self.Surface.GetPointData().GetArray('WarpVector0')
warpVectorY = self.Surface.GetPointData().GetArray('WarpVector1')
warpVectorZ = self.Surface.GetPointData().GetArray('WarpVector2')
for i in range(self.Surface.GetNumberOfPoints()):
warpVector.SetComponent(i,0,warpVectorX.GetComponent(i,0))
warpVector.SetComponent(i,1,warpVectorY.GetComponent(i,0))
warpVector.SetComponent(i,2,warpVectorZ.GetComponent(i,0))
self.Surface.GetPointData().AddArray(warpVector)
warper = vtk.vtkWarpVector()
warper.SetInputData(self.Surface)
warper.SetInputArrayToProcess(0,0,0,0,'WarpVector')
warper.SetScaleFactor(1.)
warper.Update()
self.Surface = warper.GetOutput()
def ConnectivityTagger(self):
self.CleanSurface()
tags = set()
for i in range(self.Surface.GetNumberOfCells()):
tags.add(self.CellEntityIdsArray.GetComponent(i,0))
tags = sorted(tags)
if self.PrintTags:
self.PrintLog('Initial tags: '+str(tags))
surface = []
mergeTags = vtk.vtkAppendPolyData()
for k, item in enumerate(tags):
th = vtk.vtkThreshold()
th.SetInputData(self.Surface)
th.SetInputArrayToProcess(0, 0, 0, 1, self.CellEntityIdsArrayName)
th.ThresholdBetween(item-0.001,item+0.001)
th.Update()
gf = vtk.vtkGeometryFilter()
gf.SetInputConnection(th.GetOutputPort())
gf.Update()
surface.append(gf.GetOutput())
connectivityFilter = vtk.vtkConnectivityFilter()
connectivityFilter.SetInputData(surface[k])
connectivityFilter.SetExtractionModeToAllRegions()
connectivityFilter.ColorRegionsOn()
connectivityFilter.Update()
surface[k] = connectivityFilter.GetOutput()
cellEntityIdsArray = surface[k].GetCellData().GetArray(self.CellEntityIdsArrayName)
regionIdArray = surface[k].GetCellData().GetArray('RegionId')
for i in range(surface[k].GetNumberOfCells()):
tag = cellEntityIdsArray.GetComponent(i,0) +regionIdArray.GetComponent(i,0)*self.ConnectivityOffset
cellEntityIdsArray.SetComponent(i,0,tag)
mergeTags.AddInputData(surface[k])
mergeTags.Update()
self.Surface = mergeTags.GetOutput()
self.CellEntityIdsArray = self.Surface.GetCellData().GetArray(self.CellEntityIdsArrayName)
def DrawingTagger(self):
from vmtk import vmtkscripts
drawer = vmtkscripts.vmtkSurfaceRegionDrawing()
drawer.Surface = self.Surface
drawer.InsideValue = self.InsideTag
drawer.OutsideValue = self.OutsideTag
drawer.OverwriteOutsideValue = self.OverwriteOutsideTag
drawer.ArrayName = self.CellEntityIdsArrayName
drawer.TagSmallestRegion = self.TagSmallestRegion
drawer.CellData = 1
drawer.ComputeDisance = 0
drawer.Execute()
self.Surface = drawer.Surface
def Execute(self):
if self.Surface == None:
self.PrintError('Error: no Surface.')
self.CellEntityIdsArray = self.Surface.GetCellData().GetArray(self.CellEntityIdsArrayName)
# initialize the CellEntityIdsArray with OutsideTag in some cases
if self.CellEntityIdsArray == None or (self.OverwriteOutsideTag and self.Method != "connectivity"):
self.CellEntityIdsArray = vtk.vtkIntArray()
self.CellEntityIdsArray.SetName(self.CellEntityIdsArrayName)
self.CellEntityIdsArray.SetNumberOfComponents(1)
self.CellEntityIdsArray.SetNumberOfTuples(self.Surface.GetNumberOfCells())
self.Surface.GetCellData().AddArray(self.CellEntityIdsArray)
self.CellEntityIdsArray.FillComponent(0,self.OutsideTag)
if self.Method in ['array','harmonic']: # to be extended also to other method ['cliparray','array','harmonic']:
if self.Value == None and self.Range == None:
self.PrintError("This method need the definition of a value or a range")
elif self.Range == None:
if self.InsideOut:
self.Range = [self.Value, math.inf]
else:
self.Range = [-math.inf, self.Value]
# print("range: ",self.Range)
if self.Method == 'cliparray':
self.ClipArrayTagger()
elif self.Method == 'array':
self.ArrayTagger()
elif self.Method == 'harmonic':
self.HarmonicTagger()
elif self.Method == 'connectivity':
self.ConnectivityTagger()
elif self.Method == 'constant':
self.CellEntityIdsArray.FillComponent(0,self.InsideTag)
elif self.Method == 'drawing':
self.DrawingTagger()
else:
self.PrintError("Method unknown (available: cliparray, array, connectivity, constant, drawing)")
if self.CleanOutput:
self.CleanSurface()
if self.PrintTags:
self.CellEntityIdsArray = self.Surface.GetCellData().GetArray(self.CellEntityIdsArrayName)
self.Tags = set()
for i in range(self.Surface.GetNumberOfCells()):
self.Tags.add(self.CellEntityIdsArray.GetComponent(i,0))
self.Tags = sorted(self.Tags)
self.PrintLog('Tags of the output surface: '+str(self.Tags))
# useless, already triangulated
# if self.Triangulate:
# triangleFilter = vtk.vtkTriangleFilter()
# triangleFilter.SetInputData(self.Surface)
# triangleFilter.Update()
# self.Surface = triangleFilter.GetOutput()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
|
[
"vtk.vtkPoints",
"vmtk.vmtkscripts.vmtkSurfaceDistance",
"vtk.vtkWarpVector",
"vtk.vtkThreshold",
"vmtk.vmtkscripts.vmtkSurfaceRegionDrawing",
"vmtk.pypes.pypeMain",
"vtk.vtkAppendPolyData",
"vmtk.vmtkscripts.vmtkSurfaceClipper",
"vtk.vtkIdList",
"vtk.vtkConnectivityFilter",
"vmtk.vmtkcontribscripts.vmtkThreshold",
"vtk.vtkPolyData",
"vtk.vtkCellArray",
"vtk.vtkPassArrays",
"vtk.vtkIntArray",
"vtk.vtkXMLPolyDataWriter",
"vtk.vtkLine",
"vmtk.vmtkscripts.vmtkSurfaceProjection",
"vtk.vtkDoubleArray",
"vtk.vtkPointLocator",
"vtk.vtkPointDataToCellData",
"vtk.vtkGeometryFilter",
"vtk.vtkCleanPolyData",
"vmtk.pypes.pypeScript.__init__",
"vtk.vtkFeatureEdges",
"vmtk.vtkvmtk.vtkvmtkPolyDataHarmonicMappingFilter"
] |
[((24418, 24434), 'vmtk.pypes.pypeMain', 'pypes.pypeMain', ([], {}), '()\n', (24432, 24434), False, 'from vmtk import pypes\n'), ((865, 896), 'vmtk.pypes.pypeScript.__init__', 'pypes.pypeScript.__init__', (['self'], {}), '(self)\n', (890, 896), False, 'from vmtk import pypes\n'), ((5226, 5248), 'vtk.vtkCleanPolyData', 'vtk.vtkCleanPolyData', ([], {}), '()\n', (5246, 5248), False, 'import vtk\n'), ((5615, 5647), 'vmtk.vmtkscripts.vmtkSurfaceClipper', 'vmtkscripts.vmtkSurfaceClipper', ([], {}), '()\n', (5645, 5647), False, 'from vmtk import vmtkscripts\n'), ((7572, 7600), 'vtk.vtkPointDataToCellData', 'vtk.vtkPointDataToCellData', ([], {}), '()\n', (7598, 7600), False, 'import vtk\n'), ((9245, 9266), 'vtk.vtkPointLocator', 'vtk.vtkPointLocator', ([], {}), '()\n', (9264, 9266), False, 'import vtk\n'), ((9539, 9554), 'vtk.vtkIdList', 'vtk.vtkIdList', ([], {}), '()\n', (9552, 9554), False, 'import vtk\n'), ((9576, 9591), 'vtk.vtkIdList', 'vtk.vtkIdList', ([], {}), '()\n', (9589, 9591), False, 'import vtk\n'), ((15547, 15580), 'vmtk.vmtkscripts.vmtkSurfaceDistance', 'vmtkscripts.vmtkSurfaceDistance', ([], {}), '()\n', (15578, 15580), False, 'from vmtk import vmtkscripts\n'), ((15854, 15873), 'vtk.vtkPassArrays', 'vtk.vtkPassArrays', ([], {}), '()\n', (15871, 15873), False, 'import vtk\n'), ((16128, 16154), 'vtk.vtkXMLPolyDataWriter', 'vtk.vtkXMLPolyDataWriter', ([], {}), '()\n', (16152, 16154), False, 'import vtk\n'), ((16328, 16361), 'vmtk.vmtkscripts.vmtkSurfaceDistance', 'vmtkscripts.vmtkSurfaceDistance', ([], {}), '()\n', (16359, 16361), False, 'from vmtk import vmtkscripts\n'), ((16784, 16805), 'vtk.vtkPointLocator', 'vtk.vtkPointLocator', ([], {}), '()\n', (16803, 16805), False, 'import vtk\n'), ((18397, 18417), 'vtk.vtkDoubleArray', 'vtk.vtkDoubleArray', ([], {}), '()\n', (18415, 18417), False, 'import vtk\n'), ((19143, 19162), 'vtk.vtkWarpVector', 'vtk.vtkWarpVector', ([], {}), '()\n', (19160, 19162), False, 'import vtk\n'), ((19726, 19749), 'vtk.vtkAppendPolyData', 'vtk.vtkAppendPolyData', ([], {}), '()\n', (19747, 19749), False, 'import vtk\n'), ((21226, 21264), 'vmtk.vmtkscripts.vmtkSurfaceRegionDrawing', 'vmtkscripts.vmtkSurfaceRegionDrawing', ([], {}), '()\n', (21262, 21264), False, 'from vmtk import vmtkscripts\n'), ((6871, 6894), 'vtk.vtkAppendPolyData', 'vtk.vtkAppendPolyData', ([], {}), '()\n', (6892, 6894), False, 'import vtk\n'), ((8308, 8323), 'vtk.vtkIdList', 'vtk.vtkIdList', ([], {}), '()\n', (8321, 8323), False, 'import vtk\n'), ((8590, 8605), 'vtk.vtkIdList', 'vtk.vtkIdList', ([], {}), '()\n', (8603, 8605), False, 'import vtk\n'), ((11475, 11492), 'vtk.vtkPolyData', 'vtk.vtkPolyData', ([], {}), '()\n', (11490, 11492), False, 'import vtk\n'), ((11521, 11541), 'vtk.vtkDoubleArray', 'vtk.vtkDoubleArray', ([], {}), '()\n', (11539, 11541), False, 'import vtk\n'), ((11842, 11857), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (11855, 11857), False, 'import vtk\n'), ((11878, 11896), 'vtk.vtkCellArray', 'vtk.vtkCellArray', ([], {}), '()\n', (11894, 11896), False, 'import vtk\n'), ((13048, 13061), 'vtk.vtkLine', 'vtk.vtkLine', ([], {}), '()\n', (13059, 13061), False, 'import vtk\n'), ((13417, 13452), 'vmtk.vmtkscripts.vmtkSurfaceProjection', 'vmtkscripts.vmtkSurfaceProjection', ([], {}), '()\n', (13450, 13452), False, 'from vmtk import vmtkscripts\n'), ((13959, 13976), 'vtk.vtkPolyData', 'vtk.vtkPolyData', ([], {}), '()\n', (13974, 13976), False, 'import vtk\n'), ((14097, 14131), 'vmtk.vmtkcontribscripts.vmtkThreshold', 'vmtkcontribscripts.vmtkThreshold', ([], {}), '()\n', (14129, 14131), False, 'from vmtk import vmtkcontribscripts\n'), ((14627, 14648), 'vtk.vtkFeatureEdges', 'vtk.vtkFeatureEdges', ([], {}), '()\n', (14646, 14648), False, 'import vtk\n'), ((17006, 17021), 'vtk.vtkIdList', 'vtk.vtkIdList', ([], {}), '()\n', (17019, 17021), False, 'import vtk\n'), ((17048, 17068), 'vtk.vtkDoubleArray', 'vtk.vtkDoubleArray', ([], {}), '()\n', (17066, 17068), False, 'import vtk\n'), ((17879, 17925), 'vmtk.vtkvmtk.vtkvmtkPolyDataHarmonicMappingFilter', 'vtkvmtk.vtkvmtkPolyDataHarmonicMappingFilter', ([], {}), '()\n', (17923, 17925), False, 'from vmtk import vtkvmtk\n'), ((19809, 19827), 'vtk.vtkThreshold', 'vtk.vtkThreshold', ([], {}), '()\n', (19825, 19827), False, 'import vtk\n'), ((20045, 20068), 'vtk.vtkGeometryFilter', 'vtk.vtkGeometryFilter', ([], {}), '()\n', (20066, 20068), False, 'import vtk\n'), ((20224, 20251), 'vtk.vtkConnectivityFilter', 'vtk.vtkConnectivityFilter', ([], {}), '()\n', (20249, 20251), False, 'import vtk\n'), ((22126, 22143), 'vtk.vtkIntArray', 'vtk.vtkIntArray', ([], {}), '()\n', (22141, 22143), False, 'import vtk\n'), ((12573, 12586), 'vtk.vtkLine', 'vtk.vtkLine', ([], {}), '()\n', (12584, 12586), False, 'import vtk\n')]
|
import logging
from gehomesdk.erd.converters.abstract import ErdReadOnlyConverter
from gehomesdk.erd.converters.primitives import *
from gehomesdk.erd.values.laundry import ErdTankStatus, TankStatus, TANK_STATUS_MAP
_LOGGER = logging.getLogger(__name__)
class TankStatusConverter(ErdReadOnlyConverter[TankStatus]):
def erd_decode(self, value: str) -> TankStatus:
try:
om = ErdTankStatus(erd_decode_int(value))
return TANK_STATUS_MAP[om].value
except (KeyError, ValueError):
return ErdTankStatus.NA
|
[
"logging.getLogger"
] |
[((228, 255), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (245, 255), False, 'import logging\n')]
|
#!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2021-2022. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN 'AS IS' BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
"""
Time:
Author:
Description:
"""
import unittest
from cve_manager.function.cache import LRUCache
class TestCache(unittest.TestCase):
def setUp(self):
self.cache = LRUCache(2)
def test_common(self):
self.cache.put('a', [1])
self.cache.put('b', 2)
self.cache.put('a', 3)
self.assertEqual(len(self.cache.queue), 2)
res = self.cache.get('a')
self.assertEqual(res, 3)
|
[
"cve_manager.function.cache.LRUCache"
] |
[((899, 910), 'cve_manager.function.cache.LRUCache', 'LRUCache', (['(2)'], {}), '(2)\n', (907, 910), False, 'from cve_manager.function.cache import LRUCache\n')]
|
"""
"""
# IMPORT modules. Must have unittest, and probably coast.
import coast
from coast import general_utils
import unittest
import numpy as np
import os.path as path
import xarray as xr
import matplotlib.pyplot as plt
import unit_test_files as files
class test_transect_methods(unittest.TestCase):
def test_determine_extract_transect_indices(self):
nemo_t = coast.Gridded(files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_t_grid)
yt, xt, length_of_line = nemo_t.transect_indices([51, -5], [49, -9])
# Test transect indices
yt_ref = [
164,
163,
162,
162,
161,
160,
159,
158,
157,
156,
156,
155,
154,
153,
152,
152,
151,
150,
149,
148,
147,
146,
146,
145,
144,
143,
142,
142,
141,
140,
139,
138,
137,
136,
136,
135,
134,
]
xt_ref = [
134,
133,
132,
131,
130,
129,
128,
127,
126,
125,
124,
123,
122,
121,
120,
119,
118,
117,
116,
115,
114,
113,
112,
111,
110,
109,
108,
107,
106,
105,
104,
103,
102,
101,
100,
99,
98,
]
length_ref = 37
check1 = xt == xt_ref
check2 = yt == yt_ref
check3 = length_of_line == length_ref
self.assertTrue(check1, msg="check1")
self.assertTrue(check2, msg="check2")
self.assertTrue(check3, msg="check3")
def test_calculate_transport_velocity_and_depth(self):
with self.subTest("Calculate_transports and velocties and depth"):
nemo_t = coast.Gridded(
fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_t_grid
)
nemo_u = coast.Gridded(
fn_data=files.fn_nemo_grid_u_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_u_grid
)
nemo_v = coast.Gridded(
fn_data=files.fn_nemo_grid_v_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_v_grid
)
nemo_f = coast.Gridded(fn_domain=files.fn_nemo_dom, config=files.fn_config_f_grid)
tran_f = coast.TransectF(nemo_f, (54, -15), (56, -12))
tran_f.calc_flow_across_transect(nemo_u, nemo_v)
cksum1 = tran_f.data_cross_tran_flow.normal_velocities.sum(dim=("t_dim", "z_dim", "r_dim")).item()
cksum2 = tran_f.data_cross_tran_flow.normal_transports.sum(dim=("t_dim", "r_dim")).item()
check1 = np.isclose(cksum1, -253.6484375)
check2 = np.isclose(cksum2, -48.67562136873888)
self.assertTrue(check1, msg="check1")
self.assertTrue(check2, msg="check2")
with self.subTest("plot_transect_on_map"):
fig, ax = tran_f.plot_transect_on_map()
ax.set_xlim([-20, 0]) # Problem: nice to make the land appear.
ax.set_ylim([45, 65]) # But can not call plt.show() before adjustments are made...
# fig.tight_layout()
fig.savefig(files.dn_fig + "transect_map.png")
plt.close("all")
with self.subTest("plot_normal_velocity"):
plot_dict = {"fig_size": (5, 3), "title": "Normal velocities"}
fig, ax = tran_f.plot_normal_velocity(time=0, cmap="seismic", plot_info=plot_dict, smoothing_window=2)
fig.tight_layout()
fig.savefig(files.dn_fig + "transect_velocities.png")
plt.close("all")
with self.subTest("plot_depth_integrated_transport"):
plot_dict = {"fig_size": (5, 3), "title": "Transport across AB"}
fig, ax = tran_f.plot_depth_integrated_transport(time=0, plot_info=plot_dict, smoothing_window=2)
fig.tight_layout()
fig.savefig(files.dn_fig + "transect_transport.png")
plt.close("all")
def test_transect_density_and_pressure(self):
nemo_t = coast.Gridded(
fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_t_grid
)
tran_t = coast.TransectT(nemo_t, (54, -15), (56, -12))
tran_t.construct_pressure()
cksum1 = tran_t.data.density_zlevels.sum(dim=["t_dim", "r_dim", "depth_z_levels"]).compute().item()
cksum2 = tran_t.data.pressure_h_zlevels.sum(dim=["t_dim", "r_dim", "depth_z_levels"]).compute().item()
cksum3 = tran_t.data.pressure_s.sum(dim=["t_dim", "r_dim"]).compute().item()
check1 = np.isclose(cksum1, 23800545.87457855)
check2 = np.isclose(cksum2, 135536478.93335825)
check3 = np.isclose(cksum3, -285918.5625)
self.assertTrue(check1, msg="check1")
self.assertTrue(check2, msg="check2")
self.assertTrue(check3, msg="check3")
def test_cross_transect_geostrophic_flow(self):
nemo_f = coast.Gridded(fn_domain=files.fn_nemo_dom, config=files.fn_config_f_grid)
tran_f = coast.TransectF(nemo_f, (54, -15), (56, -12))
nemo_t = coast.Gridded(
fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_t_grid
)
tran_f.calc_geostrophic_flow(nemo_t, config_u=files.fn_config_u_grid, config_v=files.fn_config_v_grid)
cksum1 = tran_f.data_cross_tran_flow.normal_velocity_hpg.sum(dim=("t_dim", "depth_z_levels", "r_dim")).item()
cksum2 = tran_f.data_cross_tran_flow.normal_velocity_spg.sum(dim=("t_dim", "r_dim")).item()
cksum3 = tran_f.data_cross_tran_flow.normal_transport_hpg.sum(dim=("t_dim", "r_dim")).item()
cksum4 = tran_f.data_cross_tran_flow.normal_transport_spg.sum(dim=("t_dim", "r_dim")).item()
check1 = np.isclose(cksum1, 84.8632969783)
check2 = np.isclose(cksum2, -5.09718418121)
check3 = np.isclose(cksum3, 115.2587369660)
check4 = np.isclose(cksum4, -106.7897376093)
self.assertTrue(check1, msg="check1")
self.assertTrue(check2, msg="check2")
self.assertTrue(check3, msg="check3")
self.assertTrue(check4, msg="check4")
|
[
"coast.TransectT",
"matplotlib.pyplot.close",
"coast.TransectF",
"numpy.isclose",
"coast.Gridded"
] |
[((377, 481), 'coast.Gridded', 'coast.Gridded', (['files.fn_nemo_grid_t_dat'], {'fn_domain': 'files.fn_nemo_dom', 'config': 'files.fn_config_t_grid'}), '(files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom, config\n =files.fn_config_t_grid)\n', (390, 481), False, 'import coast\n'), ((4641, 4752), 'coast.Gridded', 'coast.Gridded', ([], {'fn_data': 'files.fn_nemo_grid_t_dat', 'fn_domain': 'files.fn_nemo_dom', 'config': 'files.fn_config_t_grid'}), '(fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom,\n config=files.fn_config_t_grid)\n', (4654, 4752), False, 'import coast\n'), ((4788, 4833), 'coast.TransectT', 'coast.TransectT', (['nemo_t', '(54, -15)', '(56, -12)'], {}), '(nemo_t, (54, -15), (56, -12))\n', (4803, 4833), False, 'import coast\n'), ((5191, 5228), 'numpy.isclose', 'np.isclose', (['cksum1', '(23800545.87457855)'], {}), '(cksum1, 23800545.87457855)\n', (5201, 5228), True, 'import numpy as np\n'), ((5246, 5284), 'numpy.isclose', 'np.isclose', (['cksum2', '(135536478.93335825)'], {}), '(cksum2, 135536478.93335825)\n', (5256, 5284), True, 'import numpy as np\n'), ((5302, 5334), 'numpy.isclose', 'np.isclose', (['cksum3', '(-285918.5625)'], {}), '(cksum3, -285918.5625)\n', (5312, 5334), True, 'import numpy as np\n'), ((5543, 5616), 'coast.Gridded', 'coast.Gridded', ([], {'fn_domain': 'files.fn_nemo_dom', 'config': 'files.fn_config_f_grid'}), '(fn_domain=files.fn_nemo_dom, config=files.fn_config_f_grid)\n', (5556, 5616), False, 'import coast\n'), ((5634, 5679), 'coast.TransectF', 'coast.TransectF', (['nemo_f', '(54, -15)', '(56, -12)'], {}), '(nemo_f, (54, -15), (56, -12))\n', (5649, 5679), False, 'import coast\n'), ((5697, 5808), 'coast.Gridded', 'coast.Gridded', ([], {'fn_data': 'files.fn_nemo_grid_t_dat', 'fn_domain': 'files.fn_nemo_dom', 'config': 'files.fn_config_t_grid'}), '(fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom,\n config=files.fn_config_t_grid)\n', (5710, 5808), False, 'import coast\n'), ((6376, 6409), 'numpy.isclose', 'np.isclose', (['cksum1', '(84.8632969783)'], {}), '(cksum1, 84.8632969783)\n', (6386, 6409), True, 'import numpy as np\n'), ((6427, 6461), 'numpy.isclose', 'np.isclose', (['cksum2', '(-5.09718418121)'], {}), '(cksum2, -5.09718418121)\n', (6437, 6461), True, 'import numpy as np\n'), ((6479, 6512), 'numpy.isclose', 'np.isclose', (['cksum3', '(115.258736966)'], {}), '(cksum3, 115.258736966)\n', (6489, 6512), True, 'import numpy as np\n'), ((6531, 6566), 'numpy.isclose', 'np.isclose', (['cksum4', '(-106.7897376093)'], {}), '(cksum4, -106.7897376093)\n', (6541, 6566), True, 'import numpy as np\n'), ((2326, 2437), 'coast.Gridded', 'coast.Gridded', ([], {'fn_data': 'files.fn_nemo_grid_t_dat', 'fn_domain': 'files.fn_nemo_dom', 'config': 'files.fn_config_t_grid'}), '(fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom,\n config=files.fn_config_t_grid)\n', (2339, 2437), False, 'import coast\n'), ((2485, 2596), 'coast.Gridded', 'coast.Gridded', ([], {'fn_data': 'files.fn_nemo_grid_u_dat', 'fn_domain': 'files.fn_nemo_dom', 'config': 'files.fn_config_u_grid'}), '(fn_data=files.fn_nemo_grid_u_dat, fn_domain=files.fn_nemo_dom,\n config=files.fn_config_u_grid)\n', (2498, 2596), False, 'import coast\n'), ((2644, 2755), 'coast.Gridded', 'coast.Gridded', ([], {'fn_data': 'files.fn_nemo_grid_v_dat', 'fn_domain': 'files.fn_nemo_dom', 'config': 'files.fn_config_v_grid'}), '(fn_data=files.fn_nemo_grid_v_dat, fn_domain=files.fn_nemo_dom,\n config=files.fn_config_v_grid)\n', (2657, 2755), False, 'import coast\n'), ((2803, 2876), 'coast.Gridded', 'coast.Gridded', ([], {'fn_domain': 'files.fn_nemo_dom', 'config': 'files.fn_config_f_grid'}), '(fn_domain=files.fn_nemo_dom, config=files.fn_config_f_grid)\n', (2816, 2876), False, 'import coast\n'), ((2899, 2944), 'coast.TransectF', 'coast.TransectF', (['nemo_f', '(54, -15)', '(56, -12)'], {}), '(nemo_f, (54, -15), (56, -12))\n', (2914, 2944), False, 'import coast\n'), ((3240, 3272), 'numpy.isclose', 'np.isclose', (['cksum1', '(-253.6484375)'], {}), '(cksum1, -253.6484375)\n', (3250, 3272), True, 'import numpy as np\n'), ((3294, 3332), 'numpy.isclose', 'np.isclose', (['cksum2', '(-48.67562136873888)'], {}), '(cksum2, -48.67562136873888)\n', (3304, 3332), True, 'import numpy as np\n'), ((3813, 3829), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3822, 3829), True, 'import matplotlib.pyplot as plt\n'), ((4181, 4197), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4190, 4197), True, 'import matplotlib.pyplot as plt\n'), ((4556, 4572), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4565, 4572), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
from csv import reader
from decimal import *
def SDM(datalist):
"""
逐差法
:param datalist:
:return:
"""
length = len(datalist)
resultlist = []
halfLen = int(length/2)
for i in range(0, halfLen):
resultlist.append((Decimal(datalist[i+halfLen]) - Decimal(datalist[i])).to_eng_string())
return resultlist
class DataReader:
def __init__(self, filename):
self.filename = filename
with open(filename, 'rt', encoding='UTF-8') as raw_data:
readers = reader(raw_data, delimiter=',')
overx = list(readers)
data = np.array(overx)
self.data = data
self.resultVar = data[0][1]
self.resultUnit = data[0][3]
self.function = data[1][1]
self.P = float(data[0][5])
if data[1][3] == 'Y':
self.flag = True
elif data[1][3] == 'N':
self.flag = False
else:
raise IOError('Y or N wanted, not ' + data[1][3])
experimentdata = data[4:len(data)]
tempvarlist = []
tempunitlist = []
tempdatalist = []
tempUblist = []
tempSDMflag = []
tempUbFunclist = []
tempfunctionlist = []
for item in experimentdata:
tempvarlist.append(item[0])
tempunitlist.append(item[1])
tempUbFunclist.append(item[2])
temptempdata = []
for j in range(3, len(item)):
if j == 3:
tempUblist.append(item[j])
elif j == 4:
tempSDMflag.append(item[j])
elif j == 5:
tempfunctionlist.append(item[j])
else:
if not item[j] == '':
temptempdata.append(item[j])
tempdatalist.append(temptempdata)
self.varList = tempvarlist
self.unitList = tempunitlist
self.UbList = tempUblist
self.UbFuncList = tempUbFunclist
self.SDMflagList = tempSDMflag
self.TempFunctionList = tempfunctionlist
for i in range(0, len(tempSDMflag)):
if tempSDMflag[i] == 'Y':
tempdatalist[i] = SDM(tempdatalist[i])
self.dataList = tempdatalist
|
[
"numpy.array",
"csv.reader"
] |
[((566, 597), 'csv.reader', 'reader', (['raw_data'], {'delimiter': '""","""'}), "(raw_data, delimiter=',')\n", (572, 597), False, 'from csv import reader\n'), ((653, 668), 'numpy.array', 'np.array', (['overx'], {}), '(overx)\n', (661, 668), True, 'import numpy as np\n')]
|
from swagger import RoutingSpecGenerator
import os
def replace_class_instances(className, fileContents):
fileContents = replace_class_named_instances(className, fileContents)
fileContents = str(fileContents).replace('**CLASSNAME**', className)
return replace_class_plural_instances(className, fileContents)
def replace_class_named_instances(className, fileContents):
return str(fileContents).replace('**classname**', js_camel_case_string(className))
def replace_class_plural_instances(className, fileContents):
pluralUpperClass = transform_class_name_to_plural(className)
pluralLowerClass = transform_class_name_to_plural(className.lower())
fileContents = str(fileContents).replace('**PLURAL**', pluralUpperClass)
return str(fileContents).replace('**plural**', pluralLowerClass)
def transform_class_name_to_plural(className):
if className[len(className) - 1] == "y":
return str(className)[:(len(className) - 1)] + "ies"
return className + "s"
def js_camel_case_string(string):
return str(string[0]).lower() + string[1:]
swagger_paths_spec_dir = r'/Source/API/service-hmlFhirConverter/src/main/resources/swagger/paths'
routingSpecGenerator = RoutingSpecGenerator.RoutingSpecGenerator()
modelNames = routingSpecGenerator.get_model_names()
for model in modelNames:
modelTemplate = routingSpecGenerator.get_template()
modelTemplate = replace_class_instances(model, modelTemplate)
routingSpecGenerator.write_file(os.path.join(swagger_paths_spec_dir, model + '.yaml'), modelTemplate)
|
[
"swagger.RoutingSpecGenerator.RoutingSpecGenerator",
"os.path.join"
] |
[((1204, 1247), 'swagger.RoutingSpecGenerator.RoutingSpecGenerator', 'RoutingSpecGenerator.RoutingSpecGenerator', ([], {}), '()\n', (1245, 1247), False, 'from swagger import RoutingSpecGenerator\n'), ((1484, 1537), 'os.path.join', 'os.path.join', (['swagger_paths_spec_dir', "(model + '.yaml')"], {}), "(swagger_paths_spec_dir, model + '.yaml')\n", (1496, 1537), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import random
import unittest
import tondo
class TestSequenceFunctions(unittest.TestCase):
JSON_ITEMS = [
'content',
'contributor',
'type',
'url'
]
def setUp(self):
self.json = tondo.loadjsons()
def test_json_integrity(self):
for json_file in self.json:
for item in self.json[json_file]:
self.assertListEqual(item.keys(), self.JSON_ITEMS)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"unittest.TextTestRunner",
"tondo.loadjsons",
"unittest.TestLoader"
] |
[((258, 275), 'tondo.loadjsons', 'tondo.loadjsons', ([], {}), '()\n', (273, 275), False, 'import tondo\n'), ((502, 523), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (521, 523), False, 'import unittest\n'), ((573, 609), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (596, 609), False, 'import unittest\n')]
|
"""users table
Revision ID: a7acd67386c9
Revises:
Create Date: 2020-07-21 16:17:36.492935
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('address', sa.String(length=256), nullable=True),
sa.Column('phone', sa.String(length=20), nullable=True),
sa.Column('last_password_hash', sa.String(length=32), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_address'), 'user', ['address'], unique=True)
op.create_index(op.f('ix_user_phone'), 'user', ['phone'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_phone'), table_name='user')
op.drop_index(op.f('ix_user_address'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
|
[
"alembic.op.drop_table",
"alembic.op.f",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.String",
"sqlalchemy.Integer"
] |
[((1275, 1296), 'alembic.op.drop_table', 'op.drop_table', (['"""user"""'], {}), "('user')\n", (1288, 1296), False, 'from alembic import op\n'), ((695, 724), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (718, 724), True, 'import sqlalchemy as sa\n'), ((751, 774), 'alembic.op.f', 'op.f', (['"""ix_user_address"""'], {}), "('ix_user_address')\n", (755, 774), False, 'from alembic import op\n'), ((830, 851), 'alembic.op.f', 'op.f', (['"""ix_user_phone"""'], {}), "('ix_user_phone')\n", (834, 851), False, 'from alembic import op\n'), ((905, 929), 'alembic.op.f', 'op.f', (['"""ix_user_username"""'], {}), "('ix_user_username')\n", (909, 929), False, 'from alembic import op\n'), ((1104, 1128), 'alembic.op.f', 'op.f', (['"""ix_user_username"""'], {}), "('ix_user_username')\n", (1108, 1128), False, 'from alembic import op\n'), ((1167, 1188), 'alembic.op.f', 'op.f', (['"""ix_user_phone"""'], {}), "('ix_user_phone')\n", (1171, 1188), False, 'from alembic import op\n'), ((1227, 1250), 'alembic.op.f', 'op.f', (['"""ix_user_address"""'], {}), "('ix_user_address')\n", (1231, 1250), False, 'from alembic import op\n'), ((397, 409), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (407, 409), True, 'import sqlalchemy as sa\n'), ((454, 474), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (463, 474), True, 'import sqlalchemy as sa\n'), ((517, 538), 'sqlalchemy.String', 'sa.String', ([], {'length': '(256)'}), '(length=256)\n', (526, 538), True, 'import sqlalchemy as sa\n'), ((579, 599), 'sqlalchemy.String', 'sa.String', ([], {'length': '(20)'}), '(length=20)\n', (588, 599), True, 'import sqlalchemy as sa\n'), ((653, 673), 'sqlalchemy.String', 'sa.String', ([], {'length': '(32)'}), '(length=32)\n', (662, 673), True, 'import sqlalchemy as sa\n')]
|
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
import dataclasses
import context
import wallet.services.offchain.p2p_payment as pc_service
import wallet.services.offchain.utils as utils
from diem import identifier, LocalAccount, jsonrpc
from diem_utils.types.currencies import DiemCurrency
from tests.wallet_tests.resources.seeds.one_user_seeder import OneUser
from wallet import storage
from wallet.services import offchain as offchain_service
from wallet.services.account import (
generate_new_subaddress,
)
from wallet.services.offchain import offchain as offchain_service
from wallet.storage import db_session
from wallet import storage
import offchain
from wallet.types import TransactionStatus
CID = "35a1b548-3170-438f-bf3a-6ca0fef85d15"
currency = DiemCurrency.XUS
def test_save_outbound_payment_command(monkeypatch):
user = OneUser.run(
db_session, account_amount=100_000_000_000, account_currency=currency
)
amount = 10_000_000_000
receiver = LocalAccount.generate()
sub_address = identifier.gen_subaddress()
cmd = pc_service.save_outbound_payment_command(
user.account_id, receiver.account_address, sub_address, amount, currency
)
assert cmd is not None
assert cmd.reference_id() is not None
model = storage.get_payment_command(cmd.reference_id())
assert model is not None
assert model.reference_id is not None
assert model.status == TransactionStatus.OFF_CHAIN_OUTBOUND
with monkeypatch.context() as m:
m.setattr(
context.get().offchain_client,
"send_command",
lambda c, _: offchain.reply_request(c.cid),
)
offchain_service.process_offchain_tasks()
db_session.refresh(model)
assert model.status == TransactionStatus.OFF_CHAIN_WAIT
def test_process_inbound_payment_command(monkeypatch):
hrp = context.get().config.diem_address_hrp()
user = OneUser.run(
db_session, account_amount=100_000_000_000, account_currency=currency
)
amount = 10_000_000_000
sender = LocalAccount.generate()
sender_sub_address = identifier.gen_subaddress()
receiver_sub_address = generate_new_subaddress(user.account_id)
cmd = offchain.PaymentCommand.init(
sender_account_id=identifier.encode_account(
sender.account_address, sender_sub_address, hrp
),
sender_kyc_data=utils.user_kyc_data(user.account_id),
receiver_account_id=identifier.encode_account(
context.get().config.vasp_address, receiver_sub_address, hrp
),
amount=amount,
currency=currency.value,
inbound=True,
)
with monkeypatch.context() as m:
client = context.get().offchain_client
m.setattr(
client,
"deserialize_jws_request",
lambda _, c: client.create_inbound_payment_command(c.cid, c.payment),
)
m.setattr(
client,
"process_inbound_request",
lambda c, _: client.create_inbound_payment_command(c.cid, c.payment),
)
m.setattr(
client,
"send_command",
lambda c, _: offchain.reply_request(c.cid),
)
code, resp = offchain_service.process_inbound_command(
cmd.payment.sender.address, cmd
)
assert code == 200
assert resp
model = storage.get_payment_command(cmd.reference_id())
assert model
assert model.status == TransactionStatus.OFF_CHAIN_RECEIVER_OUTBOUND
assert model.inbound, str(cmd)
def test_submit_txn_when_both_ready(monkeypatch):
user = OneUser.run(
db_session, account_amount=100_000_000_000, account_currency=currency
)
amount = 10_000_000_000
receiver = LocalAccount.generate()
sub_address = identifier.gen_subaddress()
cmd = pc_service.save_outbound_payment_command(
user.account_id, receiver.account_address, sub_address, amount, currency
)
receiver_cmd = dataclasses.replace(
cmd, my_actor_address=cmd.payment.receiver.address
)
receiver_ready_cmd = receiver_cmd.new_command(
recipient_signature=b"recipient_signature".hex(),
status=offchain.Status.ready_for_settlement,
kyc_data=utils.user_kyc_data(user.account_id),
)
model = storage.get_payment_command(cmd.reference_id())
assert model
assert model.status == TransactionStatus.OFF_CHAIN_OUTBOUND
assert not model.inbound, str(model)
with monkeypatch.context() as m:
client = context.get().offchain_client
m.setattr(
context.get().offchain_client,
"deserialize_jws_request",
lambda _, c: client.create_inbound_payment_command(c.cid, c.payment),
)
m.setattr(
client,
"process_inbound_request",
lambda c, _: client.create_inbound_payment_command(c.cid, c.payment),
)
code, resp = offchain_service.process_inbound_command(
cmd.payment.receiver.address, receiver_ready_cmd
)
assert code == 200
assert resp
model = storage.get_payment_command(cmd.reference_id())
assert model
assert model.status == TransactionStatus.OFF_CHAIN_INBOUND
assert model.inbound, str(model)
# sync command and submit
with monkeypatch.context() as m:
m.setattr(
context.get().offchain_client,
"send_command",
lambda c, _: offchain.reply_request(c.cid),
)
m.setattr(
context.get(),
"p2p_by_travel_rule",
jsonrpc_txn_sample,
)
offchain_service.process_offchain_tasks()
model = storage.get_payment_command(cmd.reference_id())
assert model.status == TransactionStatus.COMPLETED, model.reference_id
tx = storage.get_transaction_by_reference_id(model.reference_id)
assert tx.status == TransactionStatus.COMPLETED
assert tx.sequence == 5
assert tx.blockchain_version == 3232
def jsonrpc_txn_sample(*args):
return jsonrpc.Transaction(
version=3232,
transaction=jsonrpc.TransactionData(sequence_number=5),
hash="3232-hash",
)
|
[
"wallet.services.offchain.p2p_payment.save_outbound_payment_command",
"wallet.services.offchain.offchain.process_offchain_tasks",
"dataclasses.replace",
"wallet.services.offchain.offchain.process_inbound_command",
"diem.identifier.gen_subaddress",
"wallet.storage.get_transaction_by_reference_id",
"offchain.reply_request",
"diem.identifier.encode_account",
"wallet.services.offchain.utils.user_kyc_data",
"context.get",
"wallet.services.account.generate_new_subaddress",
"diem.jsonrpc.TransactionData",
"diem.LocalAccount.generate",
"wallet.storage.db_session.refresh",
"tests.wallet_tests.resources.seeds.one_user_seeder.OneUser.run"
] |
[((881, 960), 'tests.wallet_tests.resources.seeds.one_user_seeder.OneUser.run', 'OneUser.run', (['db_session'], {'account_amount': '(100000000000)', 'account_currency': 'currency'}), '(db_session, account_amount=100000000000, account_currency=currency)\n', (892, 960), False, 'from tests.wallet_tests.resources.seeds.one_user_seeder import OneUser\n'), ((1021, 1044), 'diem.LocalAccount.generate', 'LocalAccount.generate', ([], {}), '()\n', (1042, 1044), False, 'from diem import identifier, LocalAccount, jsonrpc\n'), ((1063, 1090), 'diem.identifier.gen_subaddress', 'identifier.gen_subaddress', ([], {}), '()\n', (1088, 1090), False, 'from diem import identifier, LocalAccount, jsonrpc\n'), ((1101, 1220), 'wallet.services.offchain.p2p_payment.save_outbound_payment_command', 'pc_service.save_outbound_payment_command', (['user.account_id', 'receiver.account_address', 'sub_address', 'amount', 'currency'], {}), '(user.account_id, receiver.\n account_address, sub_address, amount, currency)\n', (1141, 1220), True, 'import wallet.services.offchain.p2p_payment as pc_service\n'), ((1957, 2036), 'tests.wallet_tests.resources.seeds.one_user_seeder.OneUser.run', 'OneUser.run', (['db_session'], {'account_amount': '(100000000000)', 'account_currency': 'currency'}), '(db_session, account_amount=100000000000, account_currency=currency)\n', (1968, 2036), False, 'from tests.wallet_tests.resources.seeds.one_user_seeder import OneUser\n'), ((2095, 2118), 'diem.LocalAccount.generate', 'LocalAccount.generate', ([], {}), '()\n', (2116, 2118), False, 'from diem import identifier, LocalAccount, jsonrpc\n'), ((2144, 2171), 'diem.identifier.gen_subaddress', 'identifier.gen_subaddress', ([], {}), '()\n', (2169, 2171), False, 'from diem import identifier, LocalAccount, jsonrpc\n'), ((2199, 2239), 'wallet.services.account.generate_new_subaddress', 'generate_new_subaddress', (['user.account_id'], {}), '(user.account_id)\n', (2222, 2239), False, 'from wallet.services.account import generate_new_subaddress\n'), ((3660, 3739), 'tests.wallet_tests.resources.seeds.one_user_seeder.OneUser.run', 'OneUser.run', (['db_session'], {'account_amount': '(100000000000)', 'account_currency': 'currency'}), '(db_session, account_amount=100000000000, account_currency=currency)\n', (3671, 3739), False, 'from tests.wallet_tests.resources.seeds.one_user_seeder import OneUser\n'), ((3800, 3823), 'diem.LocalAccount.generate', 'LocalAccount.generate', ([], {}), '()\n', (3821, 3823), False, 'from diem import identifier, LocalAccount, jsonrpc\n'), ((3842, 3869), 'diem.identifier.gen_subaddress', 'identifier.gen_subaddress', ([], {}), '()\n', (3867, 3869), False, 'from diem import identifier, LocalAccount, jsonrpc\n'), ((3880, 3999), 'wallet.services.offchain.p2p_payment.save_outbound_payment_command', 'pc_service.save_outbound_payment_command', (['user.account_id', 'receiver.account_address', 'sub_address', 'amount', 'currency'], {}), '(user.account_id, receiver.\n account_address, sub_address, amount, currency)\n', (3920, 3999), True, 'import wallet.services.offchain.p2p_payment as pc_service\n'), ((4028, 4099), 'dataclasses.replace', 'dataclasses.replace', (['cmd'], {'my_actor_address': 'cmd.payment.receiver.address'}), '(cmd, my_actor_address=cmd.payment.receiver.address)\n', (4047, 4099), False, 'import dataclasses\n'), ((5867, 5926), 'wallet.storage.get_transaction_by_reference_id', 'storage.get_transaction_by_reference_id', (['model.reference_id'], {}), '(model.reference_id)\n', (5906, 5926), False, 'from wallet import storage\n'), ((1698, 1739), 'wallet.services.offchain.offchain.process_offchain_tasks', 'offchain_service.process_offchain_tasks', ([], {}), '()\n', (1737, 1739), True, 'from wallet.services.offchain import offchain as offchain_service\n'), ((1749, 1774), 'wallet.storage.db_session.refresh', 'db_session.refresh', (['model'], {}), '(model)\n', (1767, 1774), False, 'from wallet.storage import db_session\n'), ((3268, 3341), 'wallet.services.offchain.offchain.process_inbound_command', 'offchain_service.process_inbound_command', (['cmd.payment.sender.address', 'cmd'], {}), '(cmd.payment.sender.address, cmd)\n', (3308, 3341), True, 'from wallet.services.offchain import offchain as offchain_service\n'), ((4989, 5083), 'wallet.services.offchain.offchain.process_inbound_command', 'offchain_service.process_inbound_command', (['cmd.payment.receiver.address', 'receiver_ready_cmd'], {}), '(cmd.payment.receiver.address,\n receiver_ready_cmd)\n', (5029, 5083), True, 'from wallet.services.offchain import offchain as offchain_service\n'), ((5680, 5721), 'wallet.services.offchain.offchain.process_offchain_tasks', 'offchain_service.process_offchain_tasks', ([], {}), '()\n', (5719, 5721), True, 'from wallet.services.offchain import offchain as offchain_service\n'), ((2306, 2380), 'diem.identifier.encode_account', 'identifier.encode_account', (['sender.account_address', 'sender_sub_address', 'hrp'], {}), '(sender.account_address, sender_sub_address, hrp)\n', (2331, 2380), False, 'from diem import identifier, LocalAccount, jsonrpc\n'), ((2428, 2464), 'wallet.services.offchain.utils.user_kyc_data', 'utils.user_kyc_data', (['user.account_id'], {}), '(user.account_id)\n', (2447, 2464), True, 'import wallet.services.offchain.utils as utils\n'), ((2744, 2757), 'context.get', 'context.get', ([], {}), '()\n', (2755, 2757), False, 'import context\n'), ((4293, 4329), 'wallet.services.offchain.utils.user_kyc_data', 'utils.user_kyc_data', (['user.account_id'], {}), '(user.account_id)\n', (4312, 4329), True, 'import wallet.services.offchain.utils as utils\n'), ((4575, 4588), 'context.get', 'context.get', ([], {}), '()\n', (4586, 4588), False, 'import context\n'), ((5581, 5594), 'context.get', 'context.get', ([], {}), '()\n', (5592, 5594), False, 'import context\n'), ((6155, 6197), 'diem.jsonrpc.TransactionData', 'jsonrpc.TransactionData', ([], {'sequence_number': '(5)'}), '(sequence_number=5)\n', (6178, 6197), False, 'from diem import identifier, LocalAccount, jsonrpc\n'), ((1565, 1578), 'context.get', 'context.get', ([], {}), '()\n', (1576, 1578), False, 'import context\n'), ((1649, 1678), 'offchain.reply_request', 'offchain.reply_request', (['c.cid'], {}), '(c.cid)\n', (1671, 1678), False, 'import offchain\n'), ((1906, 1919), 'context.get', 'context.get', ([], {}), '()\n', (1917, 1919), False, 'import context\n'), ((3206, 3235), 'offchain.reply_request', 'offchain.reply_request', (['c.cid'], {}), '(c.cid)\n', (3228, 3235), False, 'import offchain\n'), ((4636, 4649), 'context.get', 'context.get', ([], {}), '()\n', (4647, 4649), False, 'import context\n'), ((5425, 5438), 'context.get', 'context.get', ([], {}), '()\n', (5436, 5438), False, 'import context\n'), ((5509, 5538), 'offchain.reply_request', 'offchain.reply_request', (['c.cid'], {}), '(c.cid)\n', (5531, 5538), False, 'import offchain\n'), ((2533, 2546), 'context.get', 'context.get', ([], {}), '()\n', (2544, 2546), False, 'import context\n')]
|
from urllib import request, error as url_error
from django.conf import settings
from cra_helper.logging import logger
def hosted_by_liveserver(file_url: str) -> bool:
# Ignore the server check if we're in production
if settings.DEBUG:
try:
resp = request.urlopen(file_url)
if resp.status == 200:
logger.debug('{} is being hosted by liveserver'.format(file_url))
return True
else:
logger.warning('Create-React-App liveserver is up but not serving files')
return False
except url_error.URLError as err:
logger.debug('{} is not being hosted by liveserver'.format(file_url))
return False
else:
logger.debug('Liveserver host check disabled in production')
return False
|
[
"cra_helper.logging.logger.warning",
"urllib.request.urlopen",
"cra_helper.logging.logger.debug"
] |
[((754, 814), 'cra_helper.logging.logger.debug', 'logger.debug', (['"""Liveserver host check disabled in production"""'], {}), "('Liveserver host check disabled in production')\n", (766, 814), False, 'from cra_helper.logging import logger\n'), ((279, 304), 'urllib.request.urlopen', 'request.urlopen', (['file_url'], {}), '(file_url)\n', (294, 304), False, 'from urllib import request, error as url_error\n'), ((484, 557), 'cra_helper.logging.logger.warning', 'logger.warning', (['"""Create-React-App liveserver is up but not serving files"""'], {}), "('Create-React-App liveserver is up but not serving files')\n", (498, 557), False, 'from cra_helper.logging import logger\n')]
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_inertial3DSpin')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_inertial3DSpin')
_inertial3DSpin = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_inertial3DSpin', [dirname(__file__)])
except ImportError:
import _inertial3DSpin
return _inertial3DSpin
try:
_mod = imp.load_module('_inertial3DSpin', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_inertial3DSpin = swig_import_helper()
del swig_import_helper
else:
import _inertial3DSpin
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
def new_doubleArray(nelements):
return _inertial3DSpin.new_doubleArray(nelements)
new_doubleArray = _inertial3DSpin.new_doubleArray
def delete_doubleArray(ary):
return _inertial3DSpin.delete_doubleArray(ary)
delete_doubleArray = _inertial3DSpin.delete_doubleArray
def doubleArray_getitem(ary, index):
return _inertial3DSpin.doubleArray_getitem(ary, index)
doubleArray_getitem = _inertial3DSpin.doubleArray_getitem
def doubleArray_setitem(ary, index, value):
return _inertial3DSpin.doubleArray_setitem(ary, index, value)
doubleArray_setitem = _inertial3DSpin.doubleArray_setitem
def new_longArray(nelements):
return _inertial3DSpin.new_longArray(nelements)
new_longArray = _inertial3DSpin.new_longArray
def delete_longArray(ary):
return _inertial3DSpin.delete_longArray(ary)
delete_longArray = _inertial3DSpin.delete_longArray
def longArray_getitem(ary, index):
return _inertial3DSpin.longArray_getitem(ary, index)
longArray_getitem = _inertial3DSpin.longArray_getitem
def longArray_setitem(ary, index, value):
return _inertial3DSpin.longArray_setitem(ary, index, value)
longArray_setitem = _inertial3DSpin.longArray_setitem
def new_intArray(nelements):
return _inertial3DSpin.new_intArray(nelements)
new_intArray = _inertial3DSpin.new_intArray
def delete_intArray(ary):
return _inertial3DSpin.delete_intArray(ary)
delete_intArray = _inertial3DSpin.delete_intArray
def intArray_getitem(ary, index):
return _inertial3DSpin.intArray_getitem(ary, index)
intArray_getitem = _inertial3DSpin.intArray_getitem
def intArray_setitem(ary, index, value):
return _inertial3DSpin.intArray_setitem(ary, index, value)
intArray_setitem = _inertial3DSpin.intArray_setitem
def new_shortArray(nelements):
return _inertial3DSpin.new_shortArray(nelements)
new_shortArray = _inertial3DSpin.new_shortArray
def delete_shortArray(ary):
return _inertial3DSpin.delete_shortArray(ary)
delete_shortArray = _inertial3DSpin.delete_shortArray
def shortArray_getitem(ary, index):
return _inertial3DSpin.shortArray_getitem(ary, index)
shortArray_getitem = _inertial3DSpin.shortArray_getitem
def shortArray_setitem(ary, index, value):
return _inertial3DSpin.shortArray_setitem(ary, index, value)
shortArray_setitem = _inertial3DSpin.shortArray_setitem
def getStructSize(self):
try:
return eval('sizeof_' + repr(self).split(';')[0].split('.')[-1])
except (NameError) as e:
typeString = 'sizeof_' + repr(self).split(';')[0].split('.')[-1]
raise NameError(e.message + '\nYou tried to get this size macro: ' + typeString +
'\n It appears to be undefined. \nYou need to run the SWIG GEN_SIZEOF' +
' SWIG macro against the class/struct in your SWIG file if you want to ' +
' make this call.\n')
def protectSetAttr(self, name, value):
if(hasattr(self, name) or name == 'this'):
object.__setattr__(self, name, value)
else:
raise ValueError('You tried to add this variable: ' + name + '\n' +
'To this class: ' + str(self))
def protectAllClasses(moduleType):
import inspect
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
for member in clsmembers:
try:
exec(str(member[0]) + '.__setattr__ = protectSetAttr')
exec(str(member[0]) + '.getStructSize = getStructSize')
except (AttributeError, TypeError) as e:
pass
Update_inertial3DSpin = _inertial3DSpin.Update_inertial3DSpin
SelfInit_inertial3DSpin = _inertial3DSpin.SelfInit_inertial3DSpin
CrossInit_inertial3DSpin = _inertial3DSpin.CrossInit_inertial3DSpin
Reset_inertial3DSpin = _inertial3DSpin.Reset_inertial3DSpin
sizeof_inertial3DSpinConfig = _inertial3DSpin.sizeof_inertial3DSpinConfig
sizeof_AttRefFswMsg = _inertial3DSpin.sizeof_AttRefFswMsg
class inertial3DSpinConfig(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, inertial3DSpinConfig, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, inertial3DSpinConfig, name)
__repr__ = _swig_repr
__swig_setmethods__["sigma_RN"] = _inertial3DSpin.inertial3DSpinConfig_sigma_RN_set
__swig_getmethods__["sigma_RN"] = _inertial3DSpin.inertial3DSpinConfig_sigma_RN_get
if _newclass:
sigma_RN = _swig_property(_inertial3DSpin.inertial3DSpinConfig_sigma_RN_get, _inertial3DSpin.inertial3DSpinConfig_sigma_RN_set)
__swig_setmethods__["omega_spin"] = _inertial3DSpin.inertial3DSpinConfig_omega_spin_set
__swig_getmethods__["omega_spin"] = _inertial3DSpin.inertial3DSpinConfig_omega_spin_get
if _newclass:
omega_spin = _swig_property(_inertial3DSpin.inertial3DSpinConfig_omega_spin_get, _inertial3DSpin.inertial3DSpinConfig_omega_spin_set)
__swig_setmethods__["priorTime"] = _inertial3DSpin.inertial3DSpinConfig_priorTime_set
__swig_getmethods__["priorTime"] = _inertial3DSpin.inertial3DSpinConfig_priorTime_get
if _newclass:
priorTime = _swig_property(_inertial3DSpin.inertial3DSpinConfig_priorTime_get, _inertial3DSpin.inertial3DSpinConfig_priorTime_set)
__swig_setmethods__["outputDataName"] = _inertial3DSpin.inertial3DSpinConfig_outputDataName_set
__swig_getmethods__["outputDataName"] = _inertial3DSpin.inertial3DSpinConfig_outputDataName_get
if _newclass:
outputDataName = _swig_property(_inertial3DSpin.inertial3DSpinConfig_outputDataName_get, _inertial3DSpin.inertial3DSpinConfig_outputDataName_set)
__swig_setmethods__["outputMsgID"] = _inertial3DSpin.inertial3DSpinConfig_outputMsgID_set
__swig_getmethods__["outputMsgID"] = _inertial3DSpin.inertial3DSpinConfig_outputMsgID_get
if _newclass:
outputMsgID = _swig_property(_inertial3DSpin.inertial3DSpinConfig_outputMsgID_get, _inertial3DSpin.inertial3DSpinConfig_outputMsgID_set)
__swig_setmethods__["inputRefName"] = _inertial3DSpin.inertial3DSpinConfig_inputRefName_set
__swig_getmethods__["inputRefName"] = _inertial3DSpin.inertial3DSpinConfig_inputRefName_get
if _newclass:
inputRefName = _swig_property(_inertial3DSpin.inertial3DSpinConfig_inputRefName_get, _inertial3DSpin.inertial3DSpinConfig_inputRefName_set)
__swig_setmethods__["inputRefID"] = _inertial3DSpin.inertial3DSpinConfig_inputRefID_set
__swig_getmethods__["inputRefID"] = _inertial3DSpin.inertial3DSpinConfig_inputRefID_get
if _newclass:
inputRefID = _swig_property(_inertial3DSpin.inertial3DSpinConfig_inputRefID_get, _inertial3DSpin.inertial3DSpinConfig_inputRefID_set)
__swig_setmethods__["attRefOut"] = _inertial3DSpin.inertial3DSpinConfig_attRefOut_set
__swig_getmethods__["attRefOut"] = _inertial3DSpin.inertial3DSpinConfig_attRefOut_get
if _newclass:
attRefOut = _swig_property(_inertial3DSpin.inertial3DSpinConfig_attRefOut_get, _inertial3DSpin.inertial3DSpinConfig_attRefOut_set)
def __init__(self):
this = _inertial3DSpin.new_inertial3DSpinConfig()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _inertial3DSpin.delete_inertial3DSpinConfig
__del__ = lambda self: None
inertial3DSpinConfig_swigregister = _inertial3DSpin.inertial3DSpinConfig_swigregister
inertial3DSpinConfig_swigregister(inertial3DSpinConfig)
def computeReference_inertial3DSpin(ConfigData, omega_R0N_N, domega_R0N_N, omega_RR0_R, dt):
return _inertial3DSpin.computeReference_inertial3DSpin(ConfigData, omega_R0N_N, domega_R0N_N, omega_RR0_R, dt)
computeReference_inertial3DSpin = _inertial3DSpin.computeReference_inertial3DSpin
class AttRefFswMsg(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, AttRefFswMsg, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, AttRefFswMsg, name)
__repr__ = _swig_repr
__swig_setmethods__["sigma_RN"] = _inertial3DSpin.AttRefFswMsg_sigma_RN_set
__swig_getmethods__["sigma_RN"] = _inertial3DSpin.AttRefFswMsg_sigma_RN_get
if _newclass:
sigma_RN = _swig_property(_inertial3DSpin.AttRefFswMsg_sigma_RN_get, _inertial3DSpin.AttRefFswMsg_sigma_RN_set)
__swig_setmethods__["omega_RN_N"] = _inertial3DSpin.AttRefFswMsg_omega_RN_N_set
__swig_getmethods__["omega_RN_N"] = _inertial3DSpin.AttRefFswMsg_omega_RN_N_get
if _newclass:
omega_RN_N = _swig_property(_inertial3DSpin.AttRefFswMsg_omega_RN_N_get, _inertial3DSpin.AttRefFswMsg_omega_RN_N_set)
__swig_setmethods__["domega_RN_N"] = _inertial3DSpin.AttRefFswMsg_domega_RN_N_set
__swig_getmethods__["domega_RN_N"] = _inertial3DSpin.AttRefFswMsg_domega_RN_N_get
if _newclass:
domega_RN_N = _swig_property(_inertial3DSpin.AttRefFswMsg_domega_RN_N_get, _inertial3DSpin.AttRefFswMsg_domega_RN_N_set)
def __init__(self):
this = _inertial3DSpin.new_AttRefFswMsg()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _inertial3DSpin.delete_AttRefFswMsg
__del__ = lambda self: None
AttRefFswMsg_swigregister = _inertial3DSpin.AttRefFswMsg_swigregister
AttRefFswMsg_swigregister(AttRefFswMsg)
import sys
protectAllClasses(sys.modules[__name__])
# This file is compatible with both classic and new-style classes.
|
[
"_inertial3DSpin.longArray_setitem",
"_inertial3DSpin.delete_longArray",
"_inertial3DSpin.doubleArray_getitem",
"_inertial3DSpin.new_intArray",
"_inertial3DSpin.shortArray_setitem",
"_inertial3DSpin.intArray_getitem",
"inspect.getmembers",
"_inertial3DSpin.delete_shortArray",
"os.path.dirname",
"_inertial3DSpin.shortArray_getitem",
"_inertial3DSpin.intArray_setitem",
"_inertial3DSpin.new_inertial3DSpinConfig",
"imp.load_module",
"importlib.import_module",
"_inertial3DSpin.new_shortArray",
"_inertial3DSpin.new_AttRefFswMsg",
"_inertial3DSpin.computeReference_inertial3DSpin",
"_inertial3DSpin.new_longArray",
"_inertial3DSpin.delete_intArray",
"_inertial3DSpin.delete_doubleArray",
"_inertial3DSpin.new_doubleArray",
"_inertial3DSpin.longArray_getitem",
"_inertial3DSpin.doubleArray_setitem"
] |
[((2985, 3027), '_inertial3DSpin.new_doubleArray', '_inertial3DSpin.new_doubleArray', (['nelements'], {}), '(nelements)\n', (3016, 3027), False, 'import _inertial3DSpin\n'), ((3119, 3158), '_inertial3DSpin.delete_doubleArray', '_inertial3DSpin.delete_doubleArray', (['ary'], {}), '(ary)\n', (3153, 3158), False, 'import _inertial3DSpin\n'), ((3264, 3311), '_inertial3DSpin.doubleArray_getitem', '_inertial3DSpin.doubleArray_getitem', (['ary', 'index'], {}), '(ary, index)\n', (3299, 3311), False, 'import _inertial3DSpin\n'), ((3426, 3480), '_inertial3DSpin.doubleArray_setitem', '_inertial3DSpin.doubleArray_setitem', (['ary', 'index', 'value'], {}), '(ary, index, value)\n', (3461, 3480), False, 'import _inertial3DSpin\n'), ((3581, 3621), '_inertial3DSpin.new_longArray', '_inertial3DSpin.new_longArray', (['nelements'], {}), '(nelements)\n', (3610, 3621), False, 'import _inertial3DSpin\n'), ((3707, 3744), '_inertial3DSpin.delete_longArray', '_inertial3DSpin.delete_longArray', (['ary'], {}), '(ary)\n', (3739, 3744), False, 'import _inertial3DSpin\n'), ((3844, 3889), '_inertial3DSpin.longArray_getitem', '_inertial3DSpin.longArray_getitem', (['ary', 'index'], {}), '(ary, index)\n', (3877, 3889), False, 'import _inertial3DSpin\n'), ((3998, 4050), '_inertial3DSpin.longArray_setitem', '_inertial3DSpin.longArray_setitem', (['ary', 'index', 'value'], {}), '(ary, index, value)\n', (4031, 4050), False, 'import _inertial3DSpin\n'), ((4146, 4185), '_inertial3DSpin.new_intArray', '_inertial3DSpin.new_intArray', (['nelements'], {}), '(nelements)\n', (4174, 4185), False, 'import _inertial3DSpin\n'), ((4268, 4304), '_inertial3DSpin.delete_intArray', '_inertial3DSpin.delete_intArray', (['ary'], {}), '(ary)\n', (4299, 4304), False, 'import _inertial3DSpin\n'), ((4401, 4445), '_inertial3DSpin.intArray_getitem', '_inertial3DSpin.intArray_getitem', (['ary', 'index'], {}), '(ary, index)\n', (4433, 4445), False, 'import _inertial3DSpin\n'), ((4551, 4602), '_inertial3DSpin.intArray_setitem', '_inertial3DSpin.intArray_setitem', (['ary', 'index', 'value'], {}), '(ary, index, value)\n', (4583, 4602), False, 'import _inertial3DSpin\n'), ((4698, 4739), '_inertial3DSpin.new_shortArray', '_inertial3DSpin.new_shortArray', (['nelements'], {}), '(nelements)\n', (4728, 4739), False, 'import _inertial3DSpin\n'), ((4828, 4866), '_inertial3DSpin.delete_shortArray', '_inertial3DSpin.delete_shortArray', (['ary'], {}), '(ary)\n', (4861, 4866), False, 'import _inertial3DSpin\n'), ((4969, 5015), '_inertial3DSpin.shortArray_getitem', '_inertial3DSpin.shortArray_getitem', (['ary', 'index'], {}), '(ary, index)\n', (5003, 5015), False, 'import _inertial3DSpin\n'), ((5127, 5180), '_inertial3DSpin.shortArray_setitem', '_inertial3DSpin.shortArray_setitem', (['ary', 'index', 'value'], {}), '(ary, index, value)\n', (5161, 5180), False, 'import _inertial3DSpin\n'), ((6085, 6143), 'inspect.getmembers', 'inspect.getmembers', (['sys.modules[__name__]', 'inspect.isclass'], {}), '(sys.modules[__name__], inspect.isclass)\n', (6103, 6143), False, 'import inspect\n'), ((10402, 10509), '_inertial3DSpin.computeReference_inertial3DSpin', '_inertial3DSpin.computeReference_inertial3DSpin', (['ConfigData', 'omega_R0N_N', 'domega_R0N_N', 'omega_RR0_R', 'dt'], {}), '(ConfigData, omega_R0N_N,\n domega_R0N_N, omega_RR0_R, dt)\n', (10449, 10509), False, 'import _inertial3DSpin\n'), ((9897, 9939), '_inertial3DSpin.new_inertial3DSpinConfig', '_inertial3DSpin.new_inertial3DSpinConfig', ([], {}), '()\n', (9937, 9939), False, 'import _inertial3DSpin\n'), ((11838, 11872), '_inertial3DSpin.new_AttRefFswMsg', '_inertial3DSpin.new_AttRefFswMsg', ([], {}), '()\n', (11870, 11872), False, 'import _inertial3DSpin\n'), ((497, 527), 'importlib.import_module', 'importlib.import_module', (['mname'], {}), '(mname)\n', (520, 527), False, 'import importlib\n'), ((575, 617), 'importlib.import_module', 'importlib.import_module', (['"""_inertial3DSpin"""'], {}), "('_inertial3DSpin')\n", (598, 617), False, 'import importlib\n'), ((1075, 1136), 'imp.load_module', 'imp.load_module', (['"""_inertial3DSpin"""', 'fp', 'pathname', 'description'], {}), "('_inertial3DSpin', fp, pathname, description)\n", (1090, 1136), False, 'import imp\n'), ((925, 942), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (932, 942), False, 'from os.path import dirname\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) <NAME>. All Rights Reserved.
# Distributed under the MIT License. See LICENSE file for more info.
import time
from asyncframes import Frame, PFrame, sleep, all_
from asyncframes.asyncio_eventloop import EventLoop
@Frame
async def main_frame():
subframes = [sub_frame(i) for i in range(10000)]
print(sum(1 if result == True else 0 for result in await all_(*subframes)))
@PFrame
async def sub_frame(i):
time.sleep(0.001)
return i % 2 == 0
loop = EventLoop()
loop.run(main_frame)
|
[
"asyncframes.asyncio_eventloop.EventLoop",
"asyncframes.all_",
"time.sleep"
] |
[((504, 515), 'asyncframes.asyncio_eventloop.EventLoop', 'EventLoop', ([], {}), '()\n', (513, 515), False, 'from asyncframes.asyncio_eventloop import EventLoop\n'), ((456, 473), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (466, 473), False, 'import time\n'), ((400, 416), 'asyncframes.all_', 'all_', (['*subframes'], {}), '(*subframes)\n', (404, 416), False, 'from asyncframes import Frame, PFrame, sleep, all_\n')]
|
from sense_hat import SenseHat
sense = SenseHat()
sense.set_rotation(270)
magenta=(255,0,255)
sense.clear(magenta)
|
[
"sense_hat.SenseHat"
] |
[((40, 50), 'sense_hat.SenseHat', 'SenseHat', ([], {}), '()\n', (48, 50), False, 'from sense_hat import SenseHat\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
import subprocess
from flask import Flask, Response, request
from deployer.runner import run_child
app = Flask('deployer')
@app.route('/incoming/<deployment_name>/', methods=["POST"])
def incoming(deployment_name):
r = request
config_path = os.environ.get('DEPLOYER_CONFIG')
if not config_path:
return Response(response='no deployment config path', status=500)
final_path = os.path.join(config_path, '{}.conf'.format(deployment_name))
if not os.path.exists(final_path):
return Response(response='no deployment config', status=404)
run_child(final_path)
response = 'ok'
status = 200
r = Response(response=response, status=status)
return r
if __name__ == '__main__':
app.run()
|
[
"deployer.runner.run_child",
"flask.Flask",
"os.path.exists",
"os.environ.get",
"flask.Response"
] |
[((170, 187), 'flask.Flask', 'Flask', (['"""deployer"""'], {}), "('deployer')\n", (175, 187), False, 'from flask import Flask, Response, request\n'), ((315, 348), 'os.environ.get', 'os.environ.get', (['"""DEPLOYER_CONFIG"""'], {}), "('DEPLOYER_CONFIG')\n", (329, 348), False, 'import os, sys\n'), ((637, 658), 'deployer.runner.run_child', 'run_child', (['final_path'], {}), '(final_path)\n', (646, 658), False, 'from deployer.runner import run_child\n'), ((704, 746), 'flask.Response', 'Response', ([], {'response': 'response', 'status': 'status'}), '(response=response, status=status)\n', (712, 746), False, 'from flask import Flask, Response, request\n'), ((388, 446), 'flask.Response', 'Response', ([], {'response': '"""no deployment config path"""', 'status': '(500)'}), "(response='no deployment config path', status=500)\n", (396, 446), False, 'from flask import Flask, Response, request\n'), ((536, 562), 'os.path.exists', 'os.path.exists', (['final_path'], {}), '(final_path)\n', (550, 562), False, 'import os, sys\n'), ((579, 632), 'flask.Response', 'Response', ([], {'response': '"""no deployment config"""', 'status': '(404)'}), "(response='no deployment config', status=404)\n", (587, 632), False, 'from flask import Flask, Response, request\n')]
|
import pretty_midi
import numpy as np
'''
Note class: represent note, including:
1. the note pitch
2. the note duration
3. downbeat
4. intensity of note sound
'''
class Note:
def __init__(self):
self.pitch = 0
self.length = 0
self.downbeat = False
self.force = 0
'''
Midi2Numpy: tool to convert midi file to numpy list of Note
input_path: the path of the input midi file
track_index: the index of the melody track of midi
output_path: the path to save the numpy array
'''
def Midi2Numpy(input_path, output_path, track_index):
midi_data = pretty_midi.PrettyMIDI(input_path)
notes = midi_data.instruments[track_index].notes
downbeats = midi_data.get_downbeats()
dataset = []
for n in notes:
note = Note()
for i in downbeats:
'''
the downbeat locates in this note's duration
we see the note as downbeat
'''
if n.start <= i < n.end:
note.downbeat = True
note.pitch = n.pitch
note.length = n.end - n.start
note.force = n.velocity
dataset.append(note)
np.save(output_path, dataset)
path = 'plag/23_ma este meg.mid'
test = pretty_midi.PrettyMIDI()
midi_data = pretty_midi.PrettyMIDI(path)
# decide the track index
track_index = 0
notes = midi_data.instruments[track_index]
test.instruments.append(notes)
test.write('test.mid')
test.write("newdata" + path[4:])
|
[
"numpy.save",
"pretty_midi.PrettyMIDI"
] |
[((1240, 1264), 'pretty_midi.PrettyMIDI', 'pretty_midi.PrettyMIDI', ([], {}), '()\n', (1262, 1264), False, 'import pretty_midi\n'), ((1277, 1305), 'pretty_midi.PrettyMIDI', 'pretty_midi.PrettyMIDI', (['path'], {}), '(path)\n', (1299, 1305), False, 'import pretty_midi\n'), ((613, 647), 'pretty_midi.PrettyMIDI', 'pretty_midi.PrettyMIDI', (['input_path'], {}), '(input_path)\n', (635, 647), False, 'import pretty_midi\n'), ((1167, 1196), 'numpy.save', 'np.save', (['output_path', 'dataset'], {}), '(output_path, dataset)\n', (1174, 1196), True, 'import numpy as np\n')]
|
"""Utility functions and classes for visualization and logging."""
import os
from datetime import datetime
import cv2
import imageio
import numpy as np
from allenact_plugins.manipulathor_plugin.manipulathor_utils import initialize_arm
from allenact_plugins.manipulathor_plugin.manipulathor_utils import (
reset_environment_and_additional_commands,
transport_wrapper,
)
class LoggerVisualizer:
def __init__(self, exp_name="", log_dir=""):
if log_dir == "":
log_dir = self.__class__.__name__
if exp_name == "":
exp_name = "NoNameExp"
now = datetime.now()
self.exp_name = exp_name
log_dir = os.path.join(
"experiment_output/visualizations",
exp_name,
log_dir + "_" + now.strftime("%m_%d_%Y_%H_%M_%S_%f"),
)
self.log_dir = log_dir
os.makedirs(self.log_dir, exist_ok=True)
self.log_queue = []
self.action_queue = []
self.logger_index = 0
def log(self, environment, action_str):
raise Exception("Not Implemented")
def is_empty(self):
return len(self.log_queue) == 0
def finish_episode_metrics(self, episode_info, task_info, metric_results):
pass
def finish_episode(self, environment, episode_info, task_info):
pass
class TestMetricLogger(LoggerVisualizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.total_metric_dict = {}
log_file_name = os.path.join(
self.log_dir, "test_metric_{}.txt".format(self.exp_name)
)
self.metric_log_file = open(log_file_name, "w")
def average_dict(self):
result = {}
for (k, v) in self.total_metric_dict.items():
result[k] = sum(v) / len(v)
return result
def finish_episode_metrics(self, episode_info, task_info, metric_results=None):
if metric_results is None:
print("had to reset")
self.log_queue = []
self.action_queue = []
return
for k in metric_results.keys():
if "metric" in k or k in ["ep_length", "reward", "success"]:
self.total_metric_dict.setdefault(k, [])
self.total_metric_dict[k].append(metric_results[k])
print(
"total",
len(self.total_metric_dict["success"]),
"average test metric",
self.average_dict(),
)
# save the task info and all the action queue and results
log_dict = {
"task_info_metrics": metric_results,
"action_sequence": self.action_queue,
"logger_number": self.logger_index,
}
self.logger_index += 1
self.metric_log_file.write(str(log_dict))
self.metric_log_file.write("\n")
print("Logging to", self.metric_log_file.name)
self.log_queue = []
self.action_queue = []
def log(self, environment, action_str):
# We can add agent arm and state location if needed
self.action_queue.append(action_str)
self.log_queue.append(action_str)
class BringObjImageVisualizer(LoggerVisualizer):
def finish_episode(self, environment, episode_info, task_info):
now = datetime.now()
time_to_write = now.strftime("%m_%d_%Y_%H_%M_%S_%f")
time_to_write += "log_ind_{}".format(self.logger_index)
self.logger_index += 1
print("Loggigng", time_to_write, "len", len(self.log_queue))
source_object_id = task_info["source_object_id"]
goal_object_id = task_info["goal_object_id"]
pickup_success = episode_info.object_picked_up
episode_success = episode_info._success
# Put back if you want the images
# for i, img in enumerate(self.log_queue):
# image_dir = os.path.join(self.log_dir, time_to_write + '_seq{}.png'.format(str(i)))
# cv2.imwrite(image_dir, img[:,:,[2,1,0]])
episode_success_offset = "succ" if episode_success else "fail"
pickup_success_offset = "succ" if pickup_success else "fail"
gif_name = (
time_to_write
+ "_from_"
+ source_object_id.split("|")[0]
+ "_to_"
+ goal_object_id.split("|")[0]
+ "_pickup_"
+ pickup_success_offset
+ "_episode_"
+ episode_success_offset
+ ".gif"
)
concat_all_images = np.expand_dims(np.stack(self.log_queue, axis=0), axis=1)
save_image_list_to_gif(concat_all_images, gif_name, self.log_dir)
this_controller = environment.controller
scene = this_controller.last_event.metadata["sceneName"]
reset_environment_and_additional_commands(this_controller, scene)
self.log_start_goal(
environment,
task_info["visualization_source"],
tag="start",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_start_goal(
environment,
task_info["visualization_target"],
tag="goal",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_queue = []
self.action_queue = []
def log(self, environment, action_str):
image_tensor = environment.current_frame
self.action_queue.append(action_str)
self.log_queue.append(image_tensor)
def log_start_goal(self, env, task_info, tag, img_adr):
object_location = task_info["object_location"]
object_id = task_info["object_id"]
agent_state = task_info["agent_pose"]
this_controller = env.controller
# We should not reset here
# for start arm from high up as a cheating, this block is very important. never remove
event1, event2, event3 = initialize_arm(this_controller)
if not (
event1.metadata["lastActionSuccess"]
and event2.metadata["lastActionSuccess"]
and event3.metadata["lastActionSuccess"]
):
print("ERROR: ARM MOVEMENT FAILED in logging! SHOULD NEVER HAPPEN")
event = transport_wrapper(this_controller, object_id, object_location)
if event.metadata["lastActionSuccess"] == False:
print("ERROR: oh no could not transport in logging")
event = this_controller.step(
dict(
action="TeleportFull",
standing=True,
x=agent_state["position"]["x"],
y=agent_state["position"]["y"],
z=agent_state["position"]["z"],
rotation=dict(
x=agent_state["rotation"]["x"],
y=agent_state["rotation"]["y"],
z=agent_state["rotation"]["z"],
),
horizon=agent_state["cameraHorizon"],
)
)
if event.metadata["lastActionSuccess"] == False:
print("ERROR: oh no could not teleport in logging")
image_tensor = this_controller.last_event.frame
image_dir = (
img_adr + "_obj_" + object_id.split("|")[0] + "_pickup_" + tag + ".png"
)
cv2.imwrite(image_dir, image_tensor[:, :, [2, 1, 0]])
# Saving the mask
target_object_id = task_info["object_id"]
all_visible_masks = this_controller.last_event.instance_masks
if target_object_id in all_visible_masks:
mask_frame = all_visible_masks[target_object_id]
else:
mask_frame = np.zeros(env.controller.last_event.frame[:, :, 0].shape)
mask_dir = (
img_adr + "_obj_" + object_id.split("|")[0] + "_pickup_" + tag + "_mask.png"
)
cv2.imwrite(mask_dir, mask_frame.astype(float) * 255.0)
class ImageVisualizer(LoggerVisualizer):
def finish_episode(self, environment, episode_info, task_info):
now = datetime.now()
time_to_write = now.strftime("%m_%d_%Y_%H_%M_%S_%f")
time_to_write += "log_ind_{}".format(self.logger_index)
self.logger_index += 1
print("Loggigng", time_to_write, "len", len(self.log_queue))
object_id = task_info["objectId"]
pickup_success = episode_info.object_picked_up
episode_success = episode_info._success
# Put back if you want the images
# for i, img in enumerate(self.log_queue):
# image_dir = os.path.join(self.log_dir, time_to_write + '_seq{}.png'.format(str(i)))
# cv2.imwrite(image_dir, img[:,:,[2,1,0]])
episode_success_offset = "succ" if episode_success else "fail"
pickup_success_offset = "succ" if pickup_success else "fail"
gif_name = (
time_to_write
+ "_obj_"
+ object_id.split("|")[0]
+ "_pickup_"
+ pickup_success_offset
+ "_episode_"
+ episode_success_offset
+ ".gif"
)
concat_all_images = np.expand_dims(np.stack(self.log_queue, axis=0), axis=1)
save_image_list_to_gif(concat_all_images, gif_name, self.log_dir)
self.log_start_goal(
environment,
task_info["visualization_source"],
tag="start",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_start_goal(
environment,
task_info["visualization_target"],
tag="goal",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_queue = []
self.action_queue = []
def log(self, environment, action_str):
image_tensor = environment.current_frame
self.action_queue.append(action_str)
self.log_queue.append(image_tensor)
def log_start_goal(self, env, task_info, tag, img_adr):
object_location = task_info["object_location"]
object_id = task_info["object_id"]
agent_state = task_info["agent_pose"]
this_controller = env.controller
scene = this_controller.last_event.metadata[
"sceneName"
] # maybe we need to reset env actually]
reset_environment_and_additional_commands(this_controller, scene)
# for start arm from high up as a cheating, this block is very important. never remove
event1, event2, event3 = initialize_arm(this_controller)
if not (
event1.metadata["lastActionSuccess"]
and event2.metadata["lastActionSuccess"]
and event3.metadata["lastActionSuccess"]
):
print("ERROR: ARM MOVEMENT FAILED in logging! SHOULD NEVER HAPPEN")
event = transport_wrapper(this_controller, object_id, object_location)
if event.metadata["lastActionSuccess"] == False:
print("ERROR: oh no could not transport in logging")
event = this_controller.step(
dict(
action="TeleportFull",
standing=True,
x=agent_state["position"]["x"],
y=agent_state["position"]["y"],
z=agent_state["position"]["z"],
rotation=dict(
x=agent_state["rotation"]["x"],
y=agent_state["rotation"]["y"],
z=agent_state["rotation"]["z"],
),
horizon=agent_state["cameraHorizon"],
)
)
if event.metadata["lastActionSuccess"] == False:
print("ERROR: oh no could not teleport in logging")
image_tensor = this_controller.last_event.frame
image_dir = (
img_adr + "_obj_" + object_id.split("|")[0] + "_pickup_" + tag + ".png"
)
cv2.imwrite(image_dir, image_tensor[:, :, [2, 1, 0]])
def save_image_list_to_gif(image_list, gif_name, gif_dir):
gif_adr = os.path.join(gif_dir, gif_name)
seq_len, cols, w, h, c = image_list.shape
pallet = np.zeros((seq_len, w, h * cols, c))
for col_ind in range(cols):
pallet[:, :, col_ind * h : (col_ind + 1) * h, :] = image_list[:, col_ind]
if not os.path.exists(gif_dir):
os.makedirs(gif_dir)
imageio.mimsave(gif_adr, pallet.astype(np.uint8), format="GIF", duration=1 / 5)
print("Saved result in ", gif_adr)
|
[
"numpy.stack",
"allenact_plugins.manipulathor_plugin.manipulathor_utils.transport_wrapper",
"os.makedirs",
"cv2.imwrite",
"numpy.zeros",
"os.path.exists",
"datetime.datetime.now",
"allenact_plugins.manipulathor_plugin.manipulathor_utils.initialize_arm",
"allenact_plugins.manipulathor_plugin.manipulathor_utils.reset_environment_and_additional_commands",
"os.path.join"
] |
[((11803, 11834), 'os.path.join', 'os.path.join', (['gif_dir', 'gif_name'], {}), '(gif_dir, gif_name)\n', (11815, 11834), False, 'import os\n'), ((11896, 11931), 'numpy.zeros', 'np.zeros', (['(seq_len, w, h * cols, c)'], {}), '((seq_len, w, h * cols, c))\n', (11904, 11931), True, 'import numpy as np\n'), ((602, 616), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (614, 616), False, 'from datetime import datetime\n'), ((867, 907), 'os.makedirs', 'os.makedirs', (['self.log_dir'], {'exist_ok': '(True)'}), '(self.log_dir, exist_ok=True)\n', (878, 907), False, 'import os\n'), ((3265, 3279), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3277, 3279), False, 'from datetime import datetime\n'), ((4723, 4788), 'allenact_plugins.manipulathor_plugin.manipulathor_utils.reset_environment_and_additional_commands', 'reset_environment_and_additional_commands', (['this_controller', 'scene'], {}), '(this_controller, scene)\n', (4764, 4788), False, 'from allenact_plugins.manipulathor_plugin.manipulathor_utils import reset_environment_and_additional_commands, transport_wrapper\n'), ((5838, 5869), 'allenact_plugins.manipulathor_plugin.manipulathor_utils.initialize_arm', 'initialize_arm', (['this_controller'], {}), '(this_controller)\n', (5852, 5869), False, 'from allenact_plugins.manipulathor_plugin.manipulathor_utils import initialize_arm\n'), ((6150, 6212), 'allenact_plugins.manipulathor_plugin.manipulathor_utils.transport_wrapper', 'transport_wrapper', (['this_controller', 'object_id', 'object_location'], {}), '(this_controller, object_id, object_location)\n', (6167, 6212), False, 'from allenact_plugins.manipulathor_plugin.manipulathor_utils import reset_environment_and_additional_commands, transport_wrapper\n'), ((7192, 7245), 'cv2.imwrite', 'cv2.imwrite', (['image_dir', 'image_tensor[:, :, [2, 1, 0]]'], {}), '(image_dir, image_tensor[:, :, [2, 1, 0]])\n', (7203, 7245), False, 'import cv2\n'), ((7909, 7923), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7921, 7923), False, 'from datetime import datetime\n'), ((10126, 10191), 'allenact_plugins.manipulathor_plugin.manipulathor_utils.reset_environment_and_additional_commands', 'reset_environment_and_additional_commands', (['this_controller', 'scene'], {}), '(this_controller, scene)\n', (10167, 10191), False, 'from allenact_plugins.manipulathor_plugin.manipulathor_utils import reset_environment_and_additional_commands, transport_wrapper\n'), ((10320, 10351), 'allenact_plugins.manipulathor_plugin.manipulathor_utils.initialize_arm', 'initialize_arm', (['this_controller'], {}), '(this_controller)\n', (10334, 10351), False, 'from allenact_plugins.manipulathor_plugin.manipulathor_utils import initialize_arm\n'), ((10632, 10694), 'allenact_plugins.manipulathor_plugin.manipulathor_utils.transport_wrapper', 'transport_wrapper', (['this_controller', 'object_id', 'object_location'], {}), '(this_controller, object_id, object_location)\n', (10649, 10694), False, 'from allenact_plugins.manipulathor_plugin.manipulathor_utils import reset_environment_and_additional_commands, transport_wrapper\n'), ((11674, 11727), 'cv2.imwrite', 'cv2.imwrite', (['image_dir', 'image_tensor[:, :, [2, 1, 0]]'], {}), '(image_dir, image_tensor[:, :, [2, 1, 0]])\n', (11685, 11727), False, 'import cv2\n'), ((12059, 12082), 'os.path.exists', 'os.path.exists', (['gif_dir'], {}), '(gif_dir)\n', (12073, 12082), False, 'import os\n'), ((12092, 12112), 'os.makedirs', 'os.makedirs', (['gif_dir'], {}), '(gif_dir)\n', (12103, 12112), False, 'import os\n'), ((4485, 4517), 'numpy.stack', 'np.stack', (['self.log_queue'], {'axis': '(0)'}), '(self.log_queue, axis=0)\n', (4493, 4517), True, 'import numpy as np\n'), ((7543, 7599), 'numpy.zeros', 'np.zeros', (['env.controller.last_event.frame[:, :, 0].shape'], {}), '(env.controller.last_event.frame[:, :, 0].shape)\n', (7551, 7599), True, 'import numpy as np\n'), ((8988, 9020), 'numpy.stack', 'np.stack', (['self.log_queue'], {'axis': '(0)'}), '(self.log_queue, axis=0)\n', (8996, 9020), True, 'import numpy as np\n'), ((4935, 4976), 'os.path.join', 'os.path.join', (['self.log_dir', 'time_to_write'], {}), '(self.log_dir, time_to_write)\n', (4947, 4976), False, 'import os\n'), ((5133, 5174), 'os.path.join', 'os.path.join', (['self.log_dir', 'time_to_write'], {}), '(self.log_dir, time_to_write)\n', (5145, 5174), False, 'import os\n'), ((9251, 9292), 'os.path.join', 'os.path.join', (['self.log_dir', 'time_to_write'], {}), '(self.log_dir, time_to_write)\n', (9263, 9292), False, 'import os\n'), ((9449, 9490), 'os.path.join', 'os.path.join', (['self.log_dir', 'time_to_write'], {}), '(self.log_dir, time_to_write)\n', (9461, 9490), False, 'import os\n')]
|
import logging
from tornado import gen
import hijackingprevention.db_int as db_int
logger = logging.getLogger(__name__)
class User(db_int.Interface):
"""This class handles reading, writing, and manipulating user objects."""
def __init__(self, uid, site, db):
self.__id_type = "uid"
self.__id = uid
self.__data_type= 'userData_site-'
self.data = {}
self.code = None #verification token
self.__site = str(site)
self.__db = db
super(User, self).__init__(db, self.__data_type, str(site),
self.__id_type, uid, self.__combine)
@gen.coroutine
def read_db(self):
"""Reads user object from DB."""
sud = self.__db[self.__data_type + self.__site]
user = yield sud.find_one({'uid':self.__id}) #Try to find user
if user is not None:
self.data = user['data']
try:
self.code = user['code']
except KeyError:
pass
def add_data(self, data):
"""Adds data to user."""
for k in data.keys():
if k in self.data.keys():
self.data[k].append(data[k])
else:
self.data[k] = [data[k]]
def __combine(self):
"""Returns user object as dictionary"""
return({self.__id_type:self.__id, 'data':self.data,
'code':self.code})
|
[
"logging.getLogger"
] |
[((94, 121), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (111, 121), False, 'import logging\n')]
|
# Standard python library
import logging
# The Fastapi web server
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
# Import uvicorn for debugging
import uvicorn
# The settings for the system
from settings import settings
# Acces to the bockchain
from blockchain import trustframework as tf
# Create logger
logging.basicConfig(
format='%(levelname)s - %(asctime)s - %(message)s', level=logging.INFO)
log = logging.getLogger(__name__)
# Create the FastAPi server
app = FastAPI(
title="FIWARE Canis Major with EBSI/Alastria APIs",
description="FIWARE blockchain integration with SSI and Verifiable Credentials with interoperability EBSI-Alastria Red T",
version="0.2.0",
openapi_url="/api/v1/openapi.json",
)
# The routes for the Canis Major NGSI API functionality
from routers import ngsi_api
app.include_router(ngsi_api.router)
# The routes for Resolver APIs
from routers import resolver_api
app.include_router(resolver_api.router, include_in_schema=True)
# The routes for Issuer APIs
from routers import issuer_api
app.include_router(issuer_api.router, include_in_schema=False)
# The route for Verifying a credential
from routers import verify_credential_api
app.include_router(verify_credential_api.router, include_in_schema=False)
# Support for API keys to secure invocations of APIs
from fastapi_simple_security import api_key_router
app.include_router(api_key_router, prefix="/auth", tags=["API-key Authorization"])
# APIs to check for server health
from routers import server_health
app.include_router(server_health.router, include_in_schema=True)
# APIS to implement a simple, fast a secure messaging server
from routers import secure_messaging_router
app.include_router(secure_messaging_router.router, include_in_schema=False)
# For serving static assets.
# Should be the last route added because it is serving the root ("/")
#app.mount("/", StaticFiles(directory="static", html=True), name="static")
# Template directory for dynamic HTML pages
templates = Jinja2Templates(directory="templates")
# Perform startup processing
@app.on_event("startup")
async def startup_event():
"""Connect to blockchain when starting the server"""
log.info("######### Configuration values #########")
if settings.PRODUCTION:
log.info(f"Running in PRODUCTION")
else:
log.info(f"Running in DEVELOPMENT")
log.info(f"Current directory: {settings.INITIAL_DIR}")
log.info(f"SmartContract source dir: {settings.CONTRACTS_DIR}")
log.info(f"SmartContract binary dir: {settings.CONTRACTS_OUTPUT_DIR}")
log.info(f"Blockchain IP: {settings.BLOCKCHAIN_NODE_IP}")
log.info(f"Database Dir: {settings.DATABASE_DIR}")
tf.connect_blockchain(settings.BLOCKCHAIN_NODE_IP)
log.info(f"Connected to the blockchain provider")
log.info("########################################")
# This is for running the server in test mode
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)
|
[
"blockchain.trustframework.connect_blockchain",
"logging.basicConfig",
"fastapi.templating.Jinja2Templates",
"uvicorn.run",
"logging.getLogger",
"fastapi.FastAPI"
] |
[((381, 476), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s - %(asctime)s - %(message)s"""', 'level': 'logging.INFO'}), "(format='%(levelname)s - %(asctime)s - %(message)s',\n level=logging.INFO)\n", (400, 476), False, 'import logging\n'), ((484, 511), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (501, 511), False, 'import logging\n'), ((547, 792), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""FIWARE Canis Major with EBSI/Alastria APIs"""', 'description': '"""FIWARE blockchain integration with SSI and Verifiable Credentials with interoperability EBSI-Alastria Red T"""', 'version': '"""0.2.0"""', 'openapi_url': '"""/api/v1/openapi.json"""'}), "(title='FIWARE Canis Major with EBSI/Alastria APIs', description=\n 'FIWARE blockchain integration with SSI and Verifiable Credentials with interoperability EBSI-Alastria Red T'\n , version='0.2.0', openapi_url='/api/v1/openapi.json')\n", (554, 792), False, 'from fastapi import FastAPI\n'), ((2069, 2107), 'fastapi.templating.Jinja2Templates', 'Jinja2Templates', ([], {'directory': '"""templates"""'}), "(directory='templates')\n", (2084, 2107), False, 'from fastapi.templating import Jinja2Templates\n'), ((2758, 2808), 'blockchain.trustframework.connect_blockchain', 'tf.connect_blockchain', (['settings.BLOCKCHAIN_NODE_IP'], {}), '(settings.BLOCKCHAIN_NODE_IP)\n', (2779, 2808), True, 'from blockchain import trustframework as tf\n'), ((3000, 3063), 'uvicorn.run', 'uvicorn.run', (['"""main:app"""'], {'host': '"""0.0.0.0"""', 'port': '(8000)', 'reload': '(True)'}), "('main:app', host='0.0.0.0', port=8000, reload=True)\n", (3011, 3063), False, 'import uvicorn\n')]
|
import math
num = int(input("Enter the number:"))
try:
result = math.factorial(num)
print(result)
except:
print("factorial is not print for negative number")
|
[
"math.factorial"
] |
[((65, 84), 'math.factorial', 'math.factorial', (['num'], {}), '(num)\n', (79, 84), False, 'import math\n')]
|
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from utils import read_ZINC_smiles, smiles_to_onehot, convert_to_graph
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
import sys
import time
# execution) python gcn_logP.py 3 64 256 0.001 gsc
# Default option
num_layer = 3
hidden_dim1 = 64
hidden_dim2 = 256
init_lr = 0.001
using_sc = 'gsc' # 'sc, 'gsc, 'no'
if( len(sys.argv) == 6 ):
# Note that sys.argv[0] is gcn_logP.py
num_layer = int(sys.argv[1])
hidden_dim1 = int(sys.argv[2])
hidden_dim2 = int(sys.argv[3])
init_lr = float(sys.argv[4])
using_sc = sys.argv[5] # 'sc, 'gsc, 'no'
model_name = 'gcn_logP_' + str(num_layer) + '_' + str(hidden_dim1) + '_' + str(hidden_dim2) + '_' + str(init_lr) + '_' + using_sc
#1. Prepare data - X : fingerprint, Y : logP
# and split to (training:validation:test) set
smi_total, logP_total, tpsa_total = read_ZINC_smiles(50000)
num_train = 30000
num_validation = 10000
num_test = 10000
smi_train = smi_total[0:num_train]
logP_train = logP_total[0:num_train]
smi_validation = smi_total[num_train:(num_train+num_validation)]
logP_validation = logP_total[num_train:(num_train+num_validation)]
smi_test = smi_total[(num_train+num_validation):]
logP_test = logP_total[(num_train+num_validation):]
#2. Construct a neural network
def skip_connection(input_X, new_X, act):
# Skip-connection, H^(l+1)_sc = H^(l) + H^(l+1)
inp_dim = int(input_X.get_shape()[2])
out_dim = int(new_X.get_shape()[2])
if(inp_dim != out_dim):
output_X = act(new_X + tf.layers.dense(input_X, units=out_dim, use_bias=False))
else:
output_X = act(new_X + input_X)
return output_X
def gated_skip_connection(input_X, new_X, act):
# Skip-connection, H^(l+1)_gsc = z*H^(l) + (1-z)*H^(l+1)
inp_dim = int(input_X.get_shape()[2])
out_dim = int(new_X.get_shape()[2])
def get_gate_coefficient(input_X, new_X, out_dim):
X1 = tf.layers.dense(input_X, units=out_dim, use_bias=True)
X2 = tf.layers.dense(new_X, units=out_dim, use_bias=True)
gate_coefficient = tf.nn.sigmoid(X1 + X2)
return gate_coefficient
if(inp_dim != out_dim):
input_X = tf.layers.dense(input_X, units=out_dim, use_bias=False)
gate_coefficient = get_gate_coefficient(input_X, new_X, out_dim)
output_X = tf.multiply(new_X, gate_coefficient) + tf.multiply(input_X, 1.0-gate_coefficient)
return output_X
def graph_convolution(input_X, input_A, hidden_dim, act, using_sc):
# Graph Convolution, H^(l+1) = A{H^(l)W^(l)+b^(l))
output_X = tf.layers.dense(input_X,
units=hidden_dim,
use_bias=True,
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer())
output_X = tf.matmul(input_A, output_X)
if( using_sc == 'sc' ):
output_X = skip_connection(input_X, output_X, act)
elif( using_sc == 'gsc' ):
output_X = gated_skip_connection(input_X, output_X, act)
elif( using_sc == 'no' ):
output_X = act(output_X)
else:
output_X = gated_skip_connection(input_X, output_X)
return output_X
# Readout
def readout(input_X, hidden_dim, act):
# Readout, Z = sum_{v in G} NN(H^(L)_v)
output_Z = tf.layers.dense(input_X,
units=hidden_dim,
use_bias=True,
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer())
output_Z = tf.reduce_sum(output_Z, axis=1)
output = act(output_Z)
return output_Z
num_atoms=50
num_features=58
X = tf.placeholder(tf.float64, shape=[None, num_atoms, num_features])
A = tf.placeholder(tf.float64, shape=[None, num_atoms, num_atoms])
Y = tf.placeholder(tf.float64, shape=[None, ])
is_training = tf.placeholder(tf.bool, shape=())
h = X
# Graph convolution layers
for i in range(num_layer):
h = graph_convolution(h,
A,
hidden_dim1,
tf.nn.relu,
using_sc)
# Readout layer
h = readout(h, hidden_dim2, tf.nn.sigmoid)
# Predictor composed of MLPs(multi-layer perceptron)
h = tf.layers.dense(h,
units=hidden_dim2,
use_bias=True,
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer())
h = tf.layers.dense(h,
units=hidden_dim2,
use_bias=True,
activation=tf.nn.tanh,
kernel_initializer=tf.contrib.layers.xavier_initializer())
Y_pred = tf.layers.dense(h,
units=1,
use_bias=True,
kernel_initializer=tf.contrib.layers.xavier_initializer())
#3. Set a loss function, in this case we will use a MSE-loss (l2-norm)
Y_pred = tf.reshape(Y_pred, shape=[-1,])
Y_pred = tf.cast(Y_pred, tf.float64)
Y = tf.cast(Y, tf.float64)
loss = tf.reduce_mean( (Y_pred - Y)**2 )
#4. Set an optimizer
lr = tf.Variable(0.0, trainable = False) # learning rate
opt = tf.train.AdamOptimizer(lr).minimize(loss) # Note that we use the Adam optimizer in this practice.
#opt = tf.train.GradientDescentOptimizer(lr).minimize(loss)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
#5. Training & validation
batch_size = 100
epoch_size = 100
decay_rate = 0.95
batch_train = int(num_train/batch_size)
batch_validation = int(num_validation/batch_size)
batch_test = int(num_test/batch_size)
total_iter = 0
total_time = 0.0
for t in range(epoch_size):
pred_train = []
sess.run(tf.assign( lr, init_lr*( decay_rate**t ) ))
st = time.time()
for i in range(batch_train):
total_iter += 1
smi_batch = smi_train[i*batch_size:(i+1)*batch_size]
X_batch, A_batch = convert_to_graph(smi_batch)
Y_batch = logP_train[i*batch_size:(i+1)*batch_size]
_opt, _Y, _loss = sess.run([opt, Y_pred, loss], feed_dict = {X : X_batch, A : A_batch, Y : Y_batch, is_training : True})
pred_train.append(_Y.flatten())
#print("Epoch :", t, "\t batch:", i, "Loss :", _loss, "\t Training")
pred_train = np.concatenate(pred_train, axis=0)
error = (logP_train-pred_train)
mae = np.mean(np.abs(error))
rmse = np.sqrt(np.mean(error**2))
stdv = np.std(error)
print ("MAE :", mae, "RMSE :", rmse, "Std :", stdv, "\t Training, \t Epoch :", t)
pred_validation = []
for i in range(batch_validation):
smi_batch = smi_validation[i*batch_size:(i+1)*batch_size]
X_batch, A_batch = convert_to_graph(smi_batch)
Y_batch = logP_validation[i*batch_size:(i+1)*batch_size]
_Y, _loss = sess.run([Y_pred, loss], feed_dict = {X : X_batch, A : A_batch, Y : Y_batch, is_training : False})
#print("Epoch :", t, "\t batch:", i, "Loss :", _loss, "\t validation")
pred_validation.append(_Y.flatten())
pred_validation = np.concatenate(pred_validation, axis=0)
error = (logP_validation-pred_validation)
mae = np.mean(np.abs(error))
rmse = np.sqrt(np.mean(error**2))
stdv = np.std(error)
et = time.time()
print ("MAE :", mae, "RMSE :", rmse, "Std :", stdv, "\t Validation, \t Epoch :", t, "\t Time per epoch", (et-st))
total_time += (et-st)
### save model
ckpt_path = 'save/'+model_name+'.ckpt'
saver.save(sess, ckpt_path, global_step=total_iter)
#6. Test
pred_test = []
for i in range(batch_test):
smi_batch = smi_test[i*batch_size:(i+1)*batch_size]
X_batch, A_batch = convert_to_graph(smi_batch)
Y_batch = logP_test[i*batch_size:(i+1)*batch_size]
_Y, _loss = sess.run([Y_pred, loss], feed_dict = {X : X_batch, A : A_batch, Y : Y_batch, is_training : False})
pred_test.append(_Y.flatten())
pred_test = np.concatenate(pred_test, axis=0)
error = (logP_test-pred_test)
mae = np.mean(np.abs(error))
rmse = np.sqrt(np.mean(error**2))
stdv = np.std(error)
print ("MSE :", mae, "RMSE :", rmse, "Std :", stdv, "\t Test", "\t Total time :", total_time)
plt.figure()
plt.scatter(logP_test, pred_test, s=3)
plt.xlabel('logP - Truth', fontsize=15)
plt.ylabel('logP - Prediction', fontsize=15)
x = np.arange(-4,6)
plt.plot(x,x,c='black')
plt.tight_layout()
plt.savefig('./figures/'+model_name+'_results.png')
|
[
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.reduce_sum",
"utils.read_ZINC_smiles",
"numpy.abs",
"tensorflow.reshape",
"tensorflow.train.AdamOptimizer",
"tensorflow.matmul",
"matplotlib.pyplot.figure",
"tensorflow.Variable",
"numpy.arange",
"numpy.mean",
"tensorflow.multiply",
"tensorflow.assign",
"matplotlib.pyplot.tight_layout",
"numpy.std",
"tensorflow.placeholder",
"tensorflow.cast",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"tensorflow.reduce_mean",
"tensorflow.Session",
"matplotlib.pyplot.ylabel",
"utils.convert_to_graph",
"numpy.concatenate",
"matplotlib.pyplot.switch_backend",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"tensorflow.layers.dense",
"time.time",
"tensorflow.nn.sigmoid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((75, 100), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (93, 100), True, 'import matplotlib.pyplot as plt\n'), ((955, 978), 'utils.read_ZINC_smiles', 'read_ZINC_smiles', (['(50000)'], {}), '(50000)\n', (971, 978), False, 'from utils import read_ZINC_smiles, smiles_to_onehot, convert_to_graph\n'), ((3789, 3854), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, num_atoms, num_features]'}), '(tf.float64, shape=[None, num_atoms, num_features])\n', (3803, 3854), True, 'import tensorflow as tf\n'), ((3859, 3921), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, num_atoms, num_atoms]'}), '(tf.float64, shape=[None, num_atoms, num_atoms])\n', (3873, 3921), True, 'import tensorflow as tf\n'), ((3926, 3966), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None]'}), '(tf.float64, shape=[None])\n', (3940, 3966), True, 'import tensorflow as tf\n'), ((3983, 4016), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '()'}), '(tf.bool, shape=())\n', (3997, 4016), True, 'import tensorflow as tf\n'), ((5081, 5111), 'tensorflow.reshape', 'tf.reshape', (['Y_pred'], {'shape': '[-1]'}), '(Y_pred, shape=[-1])\n', (5091, 5111), True, 'import tensorflow as tf\n'), ((5122, 5149), 'tensorflow.cast', 'tf.cast', (['Y_pred', 'tf.float64'], {}), '(Y_pred, tf.float64)\n', (5129, 5149), True, 'import tensorflow as tf\n'), ((5154, 5176), 'tensorflow.cast', 'tf.cast', (['Y', 'tf.float64'], {}), '(Y, tf.float64)\n', (5161, 5176), True, 'import tensorflow as tf\n'), ((5184, 5217), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((Y_pred - Y) ** 2)'], {}), '((Y_pred - Y) ** 2)\n', (5198, 5217), True, 'import tensorflow as tf\n'), ((5245, 5278), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)'}), '(0.0, trainable=False)\n', (5256, 5278), True, 'import tensorflow as tf\n'), ((5469, 5481), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5479, 5481), True, 'import tensorflow as tf\n'), ((5489, 5522), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5520, 5522), True, 'import tensorflow as tf\n'), ((5546, 5562), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5560, 5562), True, 'import tensorflow as tf\n'), ((8042, 8075), 'numpy.concatenate', 'np.concatenate', (['pred_test'], {'axis': '(0)'}), '(pred_test, axis=0)\n', (8056, 8075), True, 'import numpy as np\n'), ((8176, 8189), 'numpy.std', 'np.std', (['error'], {}), '(error)\n', (8182, 8189), True, 'import numpy as np\n'), ((8286, 8298), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8296, 8298), True, 'import matplotlib.pyplot as plt\n'), ((8299, 8337), 'matplotlib.pyplot.scatter', 'plt.scatter', (['logP_test', 'pred_test'], {'s': '(3)'}), '(logP_test, pred_test, s=3)\n', (8310, 8337), True, 'import matplotlib.pyplot as plt\n'), ((8338, 8377), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""logP - Truth"""'], {'fontsize': '(15)'}), "('logP - Truth', fontsize=15)\n", (8348, 8377), True, 'import matplotlib.pyplot as plt\n'), ((8378, 8422), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""logP - Prediction"""'], {'fontsize': '(15)'}), "('logP - Prediction', fontsize=15)\n", (8388, 8422), True, 'import matplotlib.pyplot as plt\n'), ((8427, 8443), 'numpy.arange', 'np.arange', (['(-4)', '(6)'], {}), '(-4, 6)\n', (8436, 8443), True, 'import numpy as np\n'), ((8443, 8468), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'x'], {'c': '"""black"""'}), "(x, x, c='black')\n", (8451, 8468), True, 'import matplotlib.pyplot as plt\n'), ((8467, 8485), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8483, 8485), True, 'import matplotlib.pyplot as plt\n'), ((8486, 8541), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./figures/' + model_name + '_results.png')"], {}), "('./figures/' + model_name + '_results.png')\n", (8497, 8541), True, 'import matplotlib.pyplot as plt\n'), ((2920, 2948), 'tensorflow.matmul', 'tf.matmul', (['input_A', 'output_X'], {}), '(input_A, output_X)\n', (2929, 2948), True, 'import tensorflow as tf\n'), ((3674, 3705), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['output_Z'], {'axis': '(1)'}), '(output_Z, axis=1)\n', (3687, 3705), True, 'import tensorflow as tf\n'), ((5918, 5929), 'time.time', 'time.time', ([], {}), '()\n', (5927, 5929), False, 'import time\n'), ((6426, 6460), 'numpy.concatenate', 'np.concatenate', (['pred_train'], {'axis': '(0)'}), '(pred_train, axis=0)\n', (6440, 6460), True, 'import numpy as np\n'), ((6579, 6592), 'numpy.std', 'np.std', (['error'], {}), '(error)\n', (6585, 6592), True, 'import numpy as np\n'), ((7195, 7234), 'numpy.concatenate', 'np.concatenate', (['pred_validation'], {'axis': '(0)'}), '(pred_validation, axis=0)\n', (7209, 7234), True, 'import numpy as np\n'), ((7363, 7376), 'numpy.std', 'np.std', (['error'], {}), '(error)\n', (7369, 7376), True, 'import numpy as np\n'), ((7387, 7398), 'time.time', 'time.time', ([], {}), '()\n', (7396, 7398), False, 'import time\n'), ((7796, 7823), 'utils.convert_to_graph', 'convert_to_graph', (['smi_batch'], {}), '(smi_batch)\n', (7812, 7823), False, 'from utils import read_ZINC_smiles, smiles_to_onehot, convert_to_graph\n'), ((8120, 8133), 'numpy.abs', 'np.abs', (['error'], {}), '(error)\n', (8126, 8133), True, 'import numpy as np\n'), ((8150, 8169), 'numpy.mean', 'np.mean', (['(error ** 2)'], {}), '(error ** 2)\n', (8157, 8169), True, 'import numpy as np\n'), ((2012, 2066), 'tensorflow.layers.dense', 'tf.layers.dense', (['input_X'], {'units': 'out_dim', 'use_bias': '(True)'}), '(input_X, units=out_dim, use_bias=True)\n', (2027, 2066), True, 'import tensorflow as tf\n'), ((2080, 2132), 'tensorflow.layers.dense', 'tf.layers.dense', (['new_X'], {'units': 'out_dim', 'use_bias': '(True)'}), '(new_X, units=out_dim, use_bias=True)\n', (2095, 2132), True, 'import tensorflow as tf\n'), ((2160, 2182), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['(X1 + X2)'], {}), '(X1 + X2)\n', (2173, 2182), True, 'import tensorflow as tf\n'), ((2263, 2318), 'tensorflow.layers.dense', 'tf.layers.dense', (['input_X'], {'units': 'out_dim', 'use_bias': '(False)'}), '(input_X, units=out_dim, use_bias=False)\n', (2278, 2318), True, 'import tensorflow as tf\n'), ((2404, 2440), 'tensorflow.multiply', 'tf.multiply', (['new_X', 'gate_coefficient'], {}), '(new_X, gate_coefficient)\n', (2415, 2440), True, 'import tensorflow as tf\n'), ((2443, 2487), 'tensorflow.multiply', 'tf.multiply', (['input_X', '(1.0 - gate_coefficient)'], {}), '(input_X, 1.0 - gate_coefficient)\n', (2454, 2487), True, 'import tensorflow as tf\n'), ((4548, 4586), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (4584, 4586), True, 'import tensorflow as tf\n'), ((4771, 4809), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (4807, 4809), True, 'import tensorflow as tf\n'), ((4960, 4998), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (4996, 4998), True, 'import tensorflow as tf\n'), ((5304, 5330), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr'], {}), '(lr)\n', (5326, 5330), True, 'import tensorflow as tf\n'), ((5865, 5905), 'tensorflow.assign', 'tf.assign', (['lr', '(init_lr * decay_rate ** t)'], {}), '(lr, init_lr * decay_rate ** t)\n', (5874, 5905), True, 'import tensorflow as tf\n'), ((6075, 6102), 'utils.convert_to_graph', 'convert_to_graph', (['smi_batch'], {}), '(smi_batch)\n', (6091, 6102), False, 'from utils import read_ZINC_smiles, smiles_to_onehot, convert_to_graph\n'), ((6515, 6528), 'numpy.abs', 'np.abs', (['error'], {}), '(error)\n', (6521, 6528), True, 'import numpy as np\n'), ((6549, 6568), 'numpy.mean', 'np.mean', (['(error ** 2)'], {}), '(error ** 2)\n', (6556, 6568), True, 'import numpy as np\n'), ((6836, 6863), 'utils.convert_to_graph', 'convert_to_graph', (['smi_batch'], {}), '(smi_batch)\n', (6852, 6863), False, 'from utils import read_ZINC_smiles, smiles_to_onehot, convert_to_graph\n'), ((7299, 7312), 'numpy.abs', 'np.abs', (['error'], {}), '(error)\n', (7305, 7312), True, 'import numpy as np\n'), ((7333, 7352), 'numpy.mean', 'np.mean', (['(error ** 2)'], {}), '(error ** 2)\n', (7340, 7352), True, 'import numpy as np\n'), ((2865, 2903), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (2901, 2903), True, 'import tensorflow as tf\n'), ((3619, 3657), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (3655, 3657), True, 'import tensorflow as tf\n'), ((1614, 1669), 'tensorflow.layers.dense', 'tf.layers.dense', (['input_X'], {'units': 'out_dim', 'use_bias': '(False)'}), '(input_X, units=out_dim, use_bias=False)\n', (1629, 1669), True, 'import tensorflow as tf\n')]
|
# Average Pariwsie Distance:
import argparse
import pickle
import pandas as pd
from scipy.spatial import distance
parser = argparse.ArgumentParser(
description="Average Pariwsie Distance Evaluation (Quality Analysis)"
)
parser.add_argument("embed_type", type=str, metavar="N", help="")
parser.add_argument("label_name", type=str, metavar="N", help="")
arg = parser.parse_args()
# TODO: label class automatically update based on label name
noise_lable_transfer = {
"Clean": "Clean",
"Babble": "Noisy",
"Telephone": "Noisy",
"Music": "Noisy",
}
age_lable_transfer = {
60: 50,
70: 50,
}
def filter(data_dict):
filter_dict = data_dict.copy()
for key in data_dict:
emo_list = data_dict[key]["emotion"]
if len(emo_list) != 0:
filter_dict[key]["emotion"] = emo_list[0]
else:
del filter_dict[key]
return filter_dict
class PairwiseDistance:
"""
1. extract all classes we have
2. calculate the centroid(mean of all point belonged to the class) of all class
3. calcuate the distance between this centroid and all other centroid, and average
4. print average pairwise distnce for all class
5. average all value to get final average
Args:
embed_type: 'contrastive', 'barlowtwins', or 'combined'
label_name: 'valence_arousal', 'age', 'gender', 'noise'
label_classes: based on label name
train_result_path: path to trained embedding result
test_result_path: path to test embedding result (we evaluate this frist)
"""
def __init__(self, embed_type, label_name, train_result_path, test_result_path):
self.embed_type = embed_type
self.label_name = label_name
self.train_result_path = train_result_path
self.test_result_path = test_result_path
def extract_embedding_label(self, embed_type, label_name, file_path):
"""
return embeddings and corresponding labels
"""
with open(file_path, "rb") as fp:
data = pickle.load(fp)
if self.label_name == "emotion":
filter(data)
df_data = pd.DataFrame.from_dict(data, orient="index")
df_data = df_data[[embed_type, label_name]]
df_data = df_data.dropna(subset=[embed_type])
return df_data
def _get_avg_pairwise_dist(self, centroids) -> dict:
"""
Given dictionary of label-centroid point, return label-average pairwise distance
"""
result_dict = {}
for label, centroid in centroids.items():
centroids_cp = centroids.copy()
pairwise_dist = 0.0
del centroids_cp[label]
for other_label, other_centroid in centroids_cp.items():
dist = distance.euclidean(centroid, other_centroid)
pairwise_dist += dist
result_dict[label] = pairwise_dist # / len(centroids)
print(result_dict)
return result_dict
def avg_distances_per_class(self):
"""
Calculate the centroid given embeddings(a list of 1D vector)
Euclidan distance between each data point in a cluster to
its respective cluster centroid, put in pandas df
"""
train_pair_distances = {}
test_pair_distances = {}
# data type of embedding: numpy.ndarray here
train_df_data = self.extract_embedding_label(
embed_type=self.embed_type,
label_name=self.label_name,
file_path=self.train_result_path,
)
test_df_data = self.extract_embedding_label(
embed_type=self.embed_type,
label_name=self.label_name,
file_path=self.test_result_path,
)
if self.label_name == "noise_type":
train_df_data["noise_type"] = train_df_data["noise_type"].replace(
noise_lable_transfer
)
test_df_data["noise_type"] = test_df_data["noise_type"].replace(
noise_lable_transfer
)
if self.label_name == "age":
train_df_data["age"] = train_df_data["age"].replace(age_lable_transfer)
test_df_data["age"] = test_df_data["age"].replace(age_lable_transfer)
label_classes = train_df_data[self.label_name].unique().tolist()
# put all in pandas data strcture, so we can extract embedding base on label
for label in label_classes:
# get centroid/mean of vectors from each class, and calculate the average euclidean distance between it and all datapoint
# training set
train_one_class = train_df_data[train_df_data[self.label_name] == label]
# all vectors belong to this class
one_class_vectors = train_one_class[self.embed_type].to_numpy()
# get centroid of those vectors
one_class_centriod = one_class_vectors.mean(axis=0)
train_pair_distances[label] = one_class_centriod
# test set
test_one_class = test_df_data[test_df_data[self.label_name] == label]
test_one_class_vectors = test_one_class[self.embed_type].to_numpy()
test_one_class_centriod = test_one_class_vectors.mean(axis=0)
test_pair_distances[label] = test_one_class_centriod
# get average pairwise distance
train_avg_distances = self._get_avg_pairwise_dist(train_pair_distances)
test_avg_distances = self._get_avg_pairwise_dist(test_pair_distances)
# return result in the pandas format
train_avg_pair_distances = pd.DataFrame.from_dict(
train_avg_distances,
orient="index",
columns=["avg_pairwise_dist"],
)
print(
"--- Average Pariwsie Distance of {}/{} (train)--- \n{}".format(
self.label_name, self.embed_type, train_avg_pair_distances
)
)
test_avg_pair_distances = pd.DataFrame.from_dict(
test_avg_distances,
orient="index",
columns=["avg_pairwise_dist"],
)
print(
"--- Average Pariwsie Distance of {}/{} (test)--- \n{}".format(
self.label_name, self.embed_type, test_avg_pair_distances
)
)
# TODO: write into csv file? (better copy-paste)
return train_avg_pair_distances, test_avg_pair_distances
if __name__ == "__main__":
embed_type = arg.embed_type
label_name = arg.label_name
# TODO: auto search file in cache
avg_pair_distance = PairwiseDistance(
embed_type=embed_type,
label_name=label_name,
# train_result_path=iemocap_barlowtwins_train,
# test_result_path=iemocap_barlowtwins_test,
)
avg_pair_distance.avg_distances_per_class()
|
[
"pandas.DataFrame.from_dict",
"pickle.load",
"argparse.ArgumentParser",
"scipy.spatial.distance.euclidean"
] |
[((127, 226), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Average Pariwsie Distance Evaluation (Quality Analysis)"""'}), "(description=\n 'Average Pariwsie Distance Evaluation (Quality Analysis)')\n", (150, 226), False, 'import argparse\n'), ((5598, 5693), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['train_avg_distances'], {'orient': '"""index"""', 'columns': "['avg_pairwise_dist']"}), "(train_avg_distances, orient='index', columns=[\n 'avg_pairwise_dist'])\n", (5620, 5693), True, 'import pandas as pd\n'), ((5962, 6056), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['test_avg_distances'], {'orient': '"""index"""', 'columns': "['avg_pairwise_dist']"}), "(test_avg_distances, orient='index', columns=[\n 'avg_pairwise_dist'])\n", (5984, 6056), True, 'import pandas as pd\n'), ((2042, 2057), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (2053, 2057), False, 'import pickle\n'), ((2154, 2198), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data'], {'orient': '"""index"""'}), "(data, orient='index')\n", (2176, 2198), True, 'import pandas as pd\n'), ((2788, 2832), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['centroid', 'other_centroid'], {}), '(centroid, other_centroid)\n', (2806, 2832), False, 'from scipy.spatial import distance\n')]
|
import numpy as np
from ROI_Arrival import ROI_Arrival,ROI_Location
#prefined imports
import sys,time,winsound
import numpy as np
from PyQt5.QtWidgets import (QApplication, QPushButton,QWidget,QGridLayout,
QSizePolicy,QLineEdit,
QMainWindow,QAction,QVBoxLayout
,QDockWidget,QListView,
QAbstractItemView,QLabel,QFileDialog,QTextEdit,
QInputDialog,QSlider,QMdiArea,QMdiSubWindow,
QMessageBox)
from PyQt5.QtGui import QFont
from PyQt5.QtCore import Qt
#import numpy as np
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
class ROI_Viewer(QMainWindow):
done=False
def __init__(self,list_time,list_channel,sync_time,calibration):
super().__init__()
self.num_sync=sync_time.size
self.num_pulses=list_time.size
self.list_time,self.list_channel=list_time,list_channel
self.sync_time,self.calibration=sync_time,calibration
self.sync_delta=sync_time[2]-sync_time[1]
self.lower,self.upper=9.5,10.9
self.font1=QFont()
self.font1.setPointSize(12)
self.size_policy=QSizePolicy.Expanding
self.menu()
self.showMaximized()
self.setWindowTitle('ROI Timing Arrival')
self.geometry()
# self.process()
self.show()
def menu(self):
self.menuFile=self.menuBar().addMenu('&File')
self.save_file=QAction('&Save Spectrum')
self.save_file.triggered.connect(self.save_spectrum)
self.save_file.setShortcut('CTRL+S')
self.save_file.setEnabled(False)
# self.save_roi=QAction('&Save ROI')
# self.save_roi.triggered.connect(self.save_roi_csv)
# self.save_roi.setEnabled(True)
self.menuFile.addActions([self.save_file])
def geometry(self):
r1_label=QLabel(r'Region 1-2 divider: [us]')
r1_label.setFont(self.font1)
r2_label=QLabel(r'Region 2-3 divider: [us]')
r2_label.setFont(self.font1)
self.r_1_slider=QSlider(Qt.Horizontal)
self.r_1_slider.setSizePolicy(self.size_policy,self.size_policy)
self.r_1_slider.setMinimum(0)
self.r_1_slider.setMaximum(self.sync_delta-1)
self.r_1_slider.setSingleStep(1)
self.r_1_slider.setTickInterval(50)
self.r_1_slider.setValue(100)
self.r_1_slider.setTickPosition(QSlider.TicksBelow)
self.r_1_slider.valueChanged.connect(self.update_r_1)
self.r_1_slider.setFont(self.font1)
self.r_2_slider=QSlider(Qt.Horizontal)
self.r_2_slider.setSizePolicy(self.size_policy,self.size_policy)
self.r_2_slider.setMinimum(101)
self.r_2_slider.setMaximum(self.sync_delta)
self.r_2_slider.setSingleStep(1)
self.r_2_slider.setTickInterval(50)
self.r_2_slider.setValue(101)
self.r_2_slider.setTickPosition(QSlider.TicksBelow)
self.r_2_slider.valueChanged.connect(self.update_r_2)
self.r_2_slider.setFont(self.font1)
self.r_1_label=QLabel(self)
self.r_1_label.setSizePolicy(self.size_policy,self.size_policy)
self.r_1_label.setText(str(self.r_1_slider.value()))
self.r_1_label.setFont(self.font1)
self.r_2_label=QLabel(self)
self.r_2_label.setSizePolicy(self.size_policy,self.size_policy)
self.r_2_label.setText(str(self.r_2_slider.value()))
self.r_2_label.setFont(self.font1)
self.processer=QPushButton('Process',self)
self.processer.clicked.connect(self.process)
self.processer.setFont(self.font1)
lower_label=QLabel('Lower ROI: [MeV]',self)
lower_label.setFont(self.font1)
upper_label=QLabel('Upper ROI: [MeV]',self)
upper_label.setFont(self.font1)
self.lower_text=QLineEdit(self)
self.lower_text.setFont(self.font1)
self.lower_text.setText(str(self.lower))
self.upper_text=QLineEdit(self)
self.upper_text.setFont(self.font1)
self.upper_text.setText(str(self.upper))
self.time_plot=QWidget()
self.time_figure=Figure()
self.time_canvas=FigureCanvas(self.time_figure)
self.time_toolbar=NavigationToolbar(self.time_canvas,self)
layout=QVBoxLayout()
layout.addWidget(self.time_toolbar)
layout.addWidget(self.time_canvas)
self.time_plot.setLayout(layout)
self.time_ax=self.time_canvas.figure.subplots()
self.time_ax.set_title('Time')
main_=QWidget()
layout=QGridLayout(self)
layout.addWidget(r1_label,0,0)
layout.addWidget(self.r_1_slider,0,1)
layout.addWidget(self.r_1_label,0,2)
layout.addWidget(lower_label,0,3)
layout.addWidget(self.lower_text,0,4)
layout.addWidget(upper_label,1,3)
layout.addWidget(self.upper_text,1,4)
layout.addWidget(r2_label,1,0)
layout.addWidget(self.r_2_slider,1,1)
layout.addWidget(self.r_2_label,1,2)
layout.addWidget(self.processer,2,0)
layout.addWidget(self.time_plot,3,0,1,5)
main_.setLayout(layout)
self.setCentralWidget(main_)
def update_r_1(self):
self.r_2_slider.setMinimum(self.r_1_slider.value()+1)
self.r_1_label.setText(str(self.r_1_slider.value()))
def update_r_2(self):
self.r_2_label.setText(str(self.r_2_slider.value()))
def process(self):
self.save_file.setEnabled(True)
# self.save_roi.setEnabled(True)
s1=time.time()
delt=(self.sync_time[2]-self.sync_time[1])
self.lower=float(self.lower_text.text())
self.upper=float(self.upper_text.text())
self.arrival,self.height,self.raw=ROI_Arrival(self.sync_time,self.list_time,
self.num_sync,self.list_channel,
self.num_pulses,self.lower,
self.upper,self.calibration)
num_bins=int(delt/4)
bins=np.linspace(0,delt,num_bins)
self.bins=bins
s=len(self.arrival)
self.output=ROI_Location(self.arrival,bins,num_bins,s)
r1,r2,r3=0,0,0
print('Process ROI Arrivals in {:.3f}s'.format(time.time()-s1))
for i in range(num_bins):
if bins[i]<=self.r_1_slider.value():
r1+=self.output[i]
elif bins[i]>self.r_1_slider.value() and bins[i]<=self.r_2_slider.value():
r2+=self.output[i]
else:
r3+=self.output[i]
self.time_ax.clear()
self.time_ax.plot(bins,self.output,'r*')
self.time_ax.axvline(self.r_1_slider.value(),label='Region 1-2 divider at {:.2f}'.format(self.r_1_slider.value()))
self.time_ax.axvline(self.r_2_slider.value(),label='Region 2-3 divider at {:.2f}'.format(self.r_2_slider.value()))
# self.time_ax.set_yscale('log')
self.time_ax.set_ylabel('Counts',fontsize=18)
self.time_ax.set_xlabel(r'Arrival Time [$\mu s$]',fontsize=18)
self.time_canvas.draw()
self.done=True
self.percentages=[r1/(r1+r2+r3)*100,
r2/(r1+r2+r3)*100,
r3/(r1+r2+r3)*100]
QMessageBox.information(self,
'ROI Perecentages','''Region 1:{:.2f}%\nRegion 2:{:.2f}%\nRegion 3:{:.2f}%'''.format(
r1/(r1+r2+r3)*100,
r2/(r1+r2+r3)*100,r3/(r1+r2+r3)*100),
QMessageBox.Ok)
# print('Region 1 total ROI percentage: {:.2f}%'.format(r1/(r1+r2+r3)*100))
# print('Region 2 total ROI percentage: {:.2f}%'.format(r2/(r1+r2+r3)*100))
# print('Region 3 total ROI percentage: {:.2f}%'.format(r3/(r1+r2+r3)*100))
def save_spectrum(self):
name=QFileDialog.getSaveFileName(self,'File Name','',
'Text File (*.txt);;Comma Seperated File (*.csv)')
if name[0]!=' ':
f=open(name[0],'w')
f.write('%{:.2f},{:.2f},{:.2f}\n'.format(*self.percentages))
for i in range(len(self.bins)):
f.write('{:.6f},{}\n'.format(self.bins[i],self.output[i]))
f.close()
# def save_roi_csv(self):
# name,ok=QFileDialog.getSaveFileName(self,'Safe File Name','',
# 'Comma Seperated File (*.csv)')
# if ok:
# f=open(name,'w')
# f.write('Pulse_Height(MeV),Time(s)\n')
# print(len(self.height))
# for i in range(len(self.height)):
# f.write('{:.3f},{:.3f}\n'.format(self.height[i],self.raw[i]*1e-6))
# f.close()
# print('All finished')
|
[
"PyQt5.QtWidgets.QLabel",
"ROI_Arrival.ROI_Arrival",
"ROI_Arrival.ROI_Location",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtGui.QFont",
"time.time",
"matplotlib.backends.backend_qt5agg.FigureCanvas",
"matplotlib.figure.Figure",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QFileDialog.getSaveFileName",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"PyQt5.QtWidgets.QSlider",
"numpy.linspace",
"PyQt5.QtWidgets.QAction"
] |
[((1247, 1254), 'PyQt5.QtGui.QFont', 'QFont', ([], {}), '()\n', (1252, 1254), False, 'from PyQt5.QtGui import QFont\n'), ((1611, 1636), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""&Save Spectrum"""'], {}), "('&Save Spectrum')\n", (1618, 1636), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((2041, 2075), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Region 1-2 divider: [us]"""'], {}), "('Region 1-2 divider: [us]')\n", (2047, 2075), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((2131, 2165), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Region 2-3 divider: [us]"""'], {}), "('Region 2-3 divider: [us]')\n", (2137, 2165), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((2237, 2259), 'PyQt5.QtWidgets.QSlider', 'QSlider', (['Qt.Horizontal'], {}), '(Qt.Horizontal)\n', (2244, 2259), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((2747, 2769), 'PyQt5.QtWidgets.QSlider', 'QSlider', (['Qt.Horizontal'], {}), '(Qt.Horizontal)\n', (2754, 2769), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((3256, 3268), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['self'], {}), '(self)\n', (3262, 3268), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((3468, 3480), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['self'], {}), '(self)\n', (3474, 3480), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((3697, 3725), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Process"""', 'self'], {}), "('Process', self)\n", (3708, 3725), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((3850, 3882), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Lower ROI: [MeV]"""', 'self'], {}), "('Lower ROI: [MeV]', self)\n", (3856, 3882), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((3942, 3974), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Upper ROI: [MeV]"""', 'self'], {}), "('Upper ROI: [MeV]', self)\n", (3948, 3974), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((4047, 4062), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['self'], {}), '(self)\n', (4056, 4062), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((4180, 4195), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['self'], {}), '(self)\n', (4189, 4195), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((4321, 4330), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (4328, 4330), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((4356, 4364), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (4362, 4364), False, 'from matplotlib.figure import Figure\n'), ((4390, 4420), 'matplotlib.backends.backend_qt5agg.FigureCanvas', 'FigureCanvas', (['self.time_figure'], {}), '(self.time_figure)\n', (4402, 4420), False, 'from matplotlib.backends.backend_qt5agg import FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((4447, 4488), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self.time_canvas', 'self'], {}), '(self.time_canvas, self)\n', (4464, 4488), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((4503, 4516), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (4514, 4516), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((4763, 4772), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (4770, 4772), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((4788, 4805), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['self'], {}), '(self)\n', (4799, 4805), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((5783, 5794), 'time.time', 'time.time', ([], {}), '()\n', (5792, 5794), False, 'import sys, time, winsound\n'), ((5986, 6127), 'ROI_Arrival.ROI_Arrival', 'ROI_Arrival', (['self.sync_time', 'self.list_time', 'self.num_sync', 'self.list_channel', 'self.num_pulses', 'self.lower', 'self.upper', 'self.calibration'], {}), '(self.sync_time, self.list_time, self.num_sync, self.\n list_channel, self.num_pulses, self.lower, self.upper, self.calibration)\n', (5997, 6127), False, 'from ROI_Arrival import ROI_Arrival, ROI_Location\n'), ((6296, 6326), 'numpy.linspace', 'np.linspace', (['(0)', 'delt', 'num_bins'], {}), '(0, delt, num_bins)\n', (6307, 6326), True, 'import numpy as np\n'), ((6396, 6441), 'ROI_Arrival.ROI_Location', 'ROI_Location', (['self.arrival', 'bins', 'num_bins', 's'], {}), '(self.arrival, bins, num_bins, s)\n', (6408, 6441), False, 'from ROI_Arrival import ROI_Arrival, ROI_Location\n'), ((8140, 8245), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QFileDialog.getSaveFileName', (['self', '"""File Name"""', '""""""', '"""Text File (*.txt);;Comma Seperated File (*.csv)"""'], {}), "(self, 'File Name', '',\n 'Text File (*.txt);;Comma Seperated File (*.csv)')\n", (8167, 8245), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((6517, 6528), 'time.time', 'time.time', ([], {}), '()\n', (6526, 6528), False, 'import sys, time, winsound\n')]
|
"""
Copyright (c) 2020 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
from main import train, predict
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
trans2sec = 10 ** 12
def main():
train_file = 'data/train.csv'
test_file = 'data/test.csv'
label_file = 'data/label.csv'
# create dataset
test_label = list()
with open(label_file) as f:
for line in f.readlines():
line = line.strip()
test_label += [float(line)]
cluster_number, top_sql = train('data/train.csv', 'data/', 2000, 40)
print('Best cluster number is: ' + str(cluster_number))
print('Typical SQL template is: ')
print(top_sql)
result = predict('data/test.csv', 'data/', 0.1)
# plot
x = range(len(result))
scores = r2_score(test_label, result, multioutput='variance_weighted')
plt.scatter(x, test_label, marker='o', label='actual value')
plt.scatter(x, result, marker='*', label='predicted value')
plt.title("acc: " + str(scores * 100))
plt.legend()
plt.show()
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.show",
"main.predict",
"main.train",
"matplotlib.pyplot.legend",
"sklearn.metrics.r2_score",
"matplotlib.pyplot.scatter"
] |
[((968, 1010), 'main.train', 'train', (['"""data/train.csv"""', '"""data/"""', '(2000)', '(40)'], {}), "('data/train.csv', 'data/', 2000, 40)\n", (973, 1010), False, 'from main import train, predict\n'), ((1142, 1180), 'main.predict', 'predict', (['"""data/test.csv"""', '"""data/"""', '(0.1)'], {}), "('data/test.csv', 'data/', 0.1)\n", (1149, 1180), False, 'from main import train, predict\n'), ((1233, 1294), 'sklearn.metrics.r2_score', 'r2_score', (['test_label', 'result'], {'multioutput': '"""variance_weighted"""'}), "(test_label, result, multioutput='variance_weighted')\n", (1241, 1294), False, 'from sklearn.metrics import r2_score\n'), ((1299, 1359), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'test_label'], {'marker': '"""o"""', 'label': '"""actual value"""'}), "(x, test_label, marker='o', label='actual value')\n", (1310, 1359), True, 'import matplotlib.pyplot as plt\n'), ((1364, 1423), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'result'], {'marker': '"""*"""', 'label': '"""predicted value"""'}), "(x, result, marker='*', label='predicted value')\n", (1375, 1423), True, 'import matplotlib.pyplot as plt\n'), ((1471, 1483), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1481, 1483), True, 'import matplotlib.pyplot as plt\n'), ((1488, 1498), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1496, 1498), True, 'import matplotlib.pyplot as plt\n')]
|
from corner import corner
import numpy as np
CORNER_KWARGS = dict(
smooth=0.9,
label_kwargs=dict(fontsize=30),
title_kwargs=dict(fontsize=16),
color="tab:blue",
truth_color="tab:orange",
quantiles=[0.16, 0.84],
levels=(1 - np.exp(-0.5), 1 - np.exp(-2), 1 - np.exp(-9.0 / 2.0)),
plot_density=False,
plot_datapoints=False,
fill_contours=True,
max_n_ticks=3,
verbose=False,
use_math_text=True,
)
LABELS = dict(
q=r"$q$",
xeff=r"$\chi_{\rm eff}$",
a_1=r"$a_1$",
a_2=r"$a_2$",
cos_tilt_1=r"$\cos \theta_1$",
cos_tilt_2=r"$\cos \theta_2$",
)
def plot_corner(df, fname="corner.png"):
labels = [LABELS.get(i, i.replace("_", "")) for i in df.columns.values]
fig = corner(df, labels=labels, **CORNER_KWARGS)
fig.savefig(fname)
|
[
"corner.corner",
"numpy.exp"
] |
[((742, 784), 'corner.corner', 'corner', (['df'], {'labels': 'labels'}), '(df, labels=labels, **CORNER_KWARGS)\n', (748, 784), False, 'from corner import corner\n'), ((252, 264), 'numpy.exp', 'np.exp', (['(-0.5)'], {}), '(-0.5)\n', (258, 264), True, 'import numpy as np\n'), ((270, 280), 'numpy.exp', 'np.exp', (['(-2)'], {}), '(-2)\n', (276, 280), True, 'import numpy as np\n'), ((286, 304), 'numpy.exp', 'np.exp', (['(-9.0 / 2.0)'], {}), '(-9.0 / 2.0)\n', (292, 304), True, 'import numpy as np\n')]
|
#Ref: <NAME>
"""
# TTA - Should be called prediction time augmentation
#We can augment each input image, predict augmented images and average all predictions
"""
import os
import cv2
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
import random
model = tf.keras.models.load_model("mitochondria_load_from_disk_focal_dice_50epochs.hdf5", compile=False)
image_directory = 'data2/test_images/test/'
mask_directory = 'data2/test_masks/test/'
SIZE = 256
image_dataset = []
mask_dataset = []
images = os.listdir(image_directory)
for i, image_name in enumerate(images): #Remember enumerate method adds a counter and returns the enumerate object
if (image_name.split('.')[1] == 'tif'):
#print(image_directory+image_name)
image = cv2.imread(image_directory+image_name)
image = Image.fromarray(image)
image = image.resize((SIZE, SIZE))
image_dataset.append(np.array(image))
#Iterate through all images in Uninfected folder, resize to 64 x 64
#Then save into the same numpy array 'dataset' but with label 1
masks = os.listdir(mask_directory)
for i, image_name in enumerate(masks):
if (image_name.split('.')[1] == 'tif'):
image = cv2.imread(mask_directory+image_name, 0)
image = Image.fromarray(image)
image = image.resize((SIZE, SIZE))
mask_dataset.append(np.array(image))
#
image_dataset = np.array(image_dataset) / 255.
#D not normalize masks, just rescale to 0 to 1.
mask_dataset = (np.array(mask_dataset)) /255.
#Demonstrate TTP on single image
n = random.randint(0, mask_dataset.shape[0])
temp_test_img = image_dataset[n,:,:,:]
temp_test_img = image_dataset[n,:,:,:]
temp_mask = mask_dataset[n,:,:]
p0 = model.predict(np.expand_dims(temp_test_img, axis=0))[0][:, :, 0]
p1 = model.predict(np.expand_dims(np.fliplr(temp_test_img), axis=0))[0][:, :, 0]
p1 = np.fliplr(p1)
p2 = model.predict(np.expand_dims(np.flipud(temp_test_img), axis=0))[0][:, :, 0]
p2 = np.flipud(p2)
p3 = model.predict(np.expand_dims(np.fliplr(np.flipud(temp_test_img)), axis=0))[0][:, :, 0]
p3 = np.fliplr(np.flipud(p3))
thresh = 0.3
p = (((p0 + p1 + p2 + p3) / 4) > thresh).astype(np.uint8)
plt.figure(figsize=(12, 12))
plt.subplot(231)
plt.title('Original mask')
plt.imshow(temp_mask, cmap='gray')
plt.subplot(232)
plt.title('Prediction No Aug')
plt.imshow(p0>thresh, cmap='gray')
plt.subplot(233)
plt.title('Prediction LR')
plt.imshow(p1>thresh, cmap='gray')
plt.subplot(234)
plt.title('Prediction UD')
plt.imshow(p2>thresh, cmap='gray')
plt.subplot(235)
plt.title('Prediction LR and UD')
plt.imshow(p3>thresh, cmap='gray')
plt.subplot(236)
plt.title('Average Prediction')
plt.imshow(p>thresh, cmap='gray')
plt.show()
#Now that we know the transformations are working, let us extend to all predictions
predictions = []
for image in image_dataset:
pred_original = model.predict(np.expand_dims(image, axis=0))[0][:, :, 0]
pred_lr = model.predict(np.expand_dims(np.fliplr(image), axis=0))[0][:, :, 0]
pred_lr = np.fliplr(pred_lr)
pred_ud = model.predict(np.expand_dims(np.flipud(image), axis=0))[0][:, :, 0]
pred_ud = np.flipud(pred_ud)
pred_lr_ud = model.predict(np.expand_dims(np.fliplr(np.flipud(image)), axis=0))[0][:, :, 0]
pred_lr_ud = np.fliplr(np.flipud(pred_lr_ud))
preds = (pred_original + pred_lr + pred_ud + pred_lr_ud) / 4
predictions.append(preds)
predictions = np.array(predictions)
threshold = 0.5
predictions_th = predictions > threshold
import random
test_img_number = random.randint(0, mask_dataset.shape[0]-1)
test_img = image_dataset[test_img_number]
ground_truth=mask_dataset[test_img_number]
#test_img_norm=test_img[:,:,0][:,:,None]
test_img_input=np.expand_dims(test_img, 0)
prediction = predictions_th[test_img_number]
plt.figure(figsize=(16, 8))
plt.subplot(231)
plt.title('Testing Image')
plt.imshow(test_img, cmap='gray')
plt.subplot(232)
plt.title('Testing Label')
plt.imshow(ground_truth, cmap='gray')
plt.subplot(233)
plt.title('Prediction on test image')
plt.imshow(prediction, cmap='gray')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"tensorflow.keras.models.load_model",
"random.randint",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.flipud",
"numpy.expand_dims",
"numpy.fliplr",
"matplotlib.pyplot.figure",
"cv2.imread",
"numpy.array",
"PIL.Image.fromarray",
"os.listdir"
] |
[((314, 416), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""mitochondria_load_from_disk_focal_dice_50epochs.hdf5"""'], {'compile': '(False)'}), "(\n 'mitochondria_load_from_disk_focal_dice_50epochs.hdf5', compile=False)\n", (340, 416), True, 'import tensorflow as tf\n'), ((564, 591), 'os.listdir', 'os.listdir', (['image_directory'], {}), '(image_directory)\n', (574, 591), False, 'import os\n'), ((1122, 1148), 'os.listdir', 'os.listdir', (['mask_directory'], {}), '(mask_directory)\n', (1132, 1148), False, 'import os\n'), ((1602, 1642), 'random.randint', 'random.randint', (['(0)', 'mask_dataset.shape[0]'], {}), '(0, mask_dataset.shape[0])\n', (1616, 1642), False, 'import random\n'), ((1911, 1924), 'numpy.fliplr', 'np.fliplr', (['p1'], {}), '(p1)\n', (1920, 1924), True, 'import numpy as np\n'), ((2012, 2025), 'numpy.flipud', 'np.flipud', (['p2'], {}), '(p2)\n', (2021, 2025), True, 'import numpy as np\n'), ((2223, 2251), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (2233, 2251), True, 'from matplotlib import pyplot as plt\n'), ((2252, 2268), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (2263, 2268), True, 'from matplotlib import pyplot as plt\n'), ((2269, 2295), 'matplotlib.pyplot.title', 'plt.title', (['"""Original mask"""'], {}), "('Original mask')\n", (2278, 2295), True, 'from matplotlib import pyplot as plt\n'), ((2296, 2330), 'matplotlib.pyplot.imshow', 'plt.imshow', (['temp_mask'], {'cmap': '"""gray"""'}), "(temp_mask, cmap='gray')\n", (2306, 2330), True, 'from matplotlib import pyplot as plt\n'), ((2331, 2347), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (2342, 2347), True, 'from matplotlib import pyplot as plt\n'), ((2348, 2378), 'matplotlib.pyplot.title', 'plt.title', (['"""Prediction No Aug"""'], {}), "('Prediction No Aug')\n", (2357, 2378), True, 'from matplotlib import pyplot as plt\n'), ((2379, 2415), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(p0 > thresh)'], {'cmap': '"""gray"""'}), "(p0 > thresh, cmap='gray')\n", (2389, 2415), True, 'from matplotlib import pyplot as plt\n'), ((2414, 2430), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(233)'], {}), '(233)\n', (2425, 2430), True, 'from matplotlib import pyplot as plt\n'), ((2431, 2457), 'matplotlib.pyplot.title', 'plt.title', (['"""Prediction LR"""'], {}), "('Prediction LR')\n", (2440, 2457), True, 'from matplotlib import pyplot as plt\n'), ((2458, 2494), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(p1 > thresh)'], {'cmap': '"""gray"""'}), "(p1 > thresh, cmap='gray')\n", (2468, 2494), True, 'from matplotlib import pyplot as plt\n'), ((2493, 2509), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(234)'], {}), '(234)\n', (2504, 2509), True, 'from matplotlib import pyplot as plt\n'), ((2510, 2536), 'matplotlib.pyplot.title', 'plt.title', (['"""Prediction UD"""'], {}), "('Prediction UD')\n", (2519, 2536), True, 'from matplotlib import pyplot as plt\n'), ((2537, 2573), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(p2 > thresh)'], {'cmap': '"""gray"""'}), "(p2 > thresh, cmap='gray')\n", (2547, 2573), True, 'from matplotlib import pyplot as plt\n'), ((2572, 2588), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (2583, 2588), True, 'from matplotlib import pyplot as plt\n'), ((2589, 2622), 'matplotlib.pyplot.title', 'plt.title', (['"""Prediction LR and UD"""'], {}), "('Prediction LR and UD')\n", (2598, 2622), True, 'from matplotlib import pyplot as plt\n'), ((2623, 2659), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(p3 > thresh)'], {'cmap': '"""gray"""'}), "(p3 > thresh, cmap='gray')\n", (2633, 2659), True, 'from matplotlib import pyplot as plt\n'), ((2658, 2674), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(236)'], {}), '(236)\n', (2669, 2674), True, 'from matplotlib import pyplot as plt\n'), ((2675, 2706), 'matplotlib.pyplot.title', 'plt.title', (['"""Average Prediction"""'], {}), "('Average Prediction')\n", (2684, 2706), True, 'from matplotlib import pyplot as plt\n'), ((2707, 2742), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(p > thresh)'], {'cmap': '"""gray"""'}), "(p > thresh, cmap='gray')\n", (2717, 2742), True, 'from matplotlib import pyplot as plt\n'), ((2741, 2751), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2749, 2751), True, 'from matplotlib import pyplot as plt\n'), ((3478, 3499), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (3486, 3499), True, 'import numpy as np\n'), ((3591, 3635), 'random.randint', 'random.randint', (['(0)', '(mask_dataset.shape[0] - 1)'], {}), '(0, mask_dataset.shape[0] - 1)\n', (3605, 3635), False, 'import random\n'), ((3775, 3802), 'numpy.expand_dims', 'np.expand_dims', (['test_img', '(0)'], {}), '(test_img, 0)\n', (3789, 3802), True, 'import numpy as np\n'), ((3849, 3876), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 8)'}), '(figsize=(16, 8))\n', (3859, 3876), True, 'from matplotlib import pyplot as plt\n'), ((3877, 3893), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (3888, 3893), True, 'from matplotlib import pyplot as plt\n'), ((3894, 3920), 'matplotlib.pyplot.title', 'plt.title', (['"""Testing Image"""'], {}), "('Testing Image')\n", (3903, 3920), True, 'from matplotlib import pyplot as plt\n'), ((3921, 3954), 'matplotlib.pyplot.imshow', 'plt.imshow', (['test_img'], {'cmap': '"""gray"""'}), "(test_img, cmap='gray')\n", (3931, 3954), True, 'from matplotlib import pyplot as plt\n'), ((3955, 3971), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (3966, 3971), True, 'from matplotlib import pyplot as plt\n'), ((3972, 3998), 'matplotlib.pyplot.title', 'plt.title', (['"""Testing Label"""'], {}), "('Testing Label')\n", (3981, 3998), True, 'from matplotlib import pyplot as plt\n'), ((3999, 4036), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ground_truth'], {'cmap': '"""gray"""'}), "(ground_truth, cmap='gray')\n", (4009, 4036), True, 'from matplotlib import pyplot as plt\n'), ((4037, 4053), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(233)'], {}), '(233)\n', (4048, 4053), True, 'from matplotlib import pyplot as plt\n'), ((4054, 4091), 'matplotlib.pyplot.title', 'plt.title', (['"""Prediction on test image"""'], {}), "('Prediction on test image')\n", (4063, 4091), True, 'from matplotlib import pyplot as plt\n'), ((4092, 4127), 'matplotlib.pyplot.imshow', 'plt.imshow', (['prediction'], {'cmap': '"""gray"""'}), "(prediction, cmap='gray')\n", (4102, 4127), True, 'from matplotlib import pyplot as plt\n'), ((4129, 4139), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4137, 4139), True, 'from matplotlib import pyplot as plt\n'), ((1436, 1459), 'numpy.array', 'np.array', (['image_dataset'], {}), '(image_dataset)\n', (1444, 1459), True, 'import numpy as np\n'), ((1532, 1554), 'numpy.array', 'np.array', (['mask_dataset'], {}), '(mask_dataset)\n', (1540, 1554), True, 'import numpy as np\n'), ((2134, 2147), 'numpy.flipud', 'np.flipud', (['p3'], {}), '(p3)\n', (2143, 2147), True, 'import numpy as np\n'), ((3067, 3085), 'numpy.fliplr', 'np.fliplr', (['pred_lr'], {}), '(pred_lr)\n', (3076, 3085), True, 'import numpy as np\n'), ((3187, 3205), 'numpy.flipud', 'np.flipud', (['pred_ud'], {}), '(pred_ud)\n', (3196, 3205), True, 'import numpy as np\n'), ((813, 853), 'cv2.imread', 'cv2.imread', (['(image_directory + image_name)'], {}), '(image_directory + image_name)\n', (823, 853), False, 'import cv2\n'), ((868, 890), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (883, 890), False, 'from PIL import Image\n'), ((1248, 1290), 'cv2.imread', 'cv2.imread', (['(mask_directory + image_name)', '(0)'], {}), '(mask_directory + image_name, 0)\n', (1258, 1290), False, 'import cv2\n'), ((1305, 1327), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (1320, 1327), False, 'from PIL import Image\n'), ((3334, 3355), 'numpy.flipud', 'np.flipud', (['pred_lr_ud'], {}), '(pred_lr_ud)\n', (3343, 3355), True, 'import numpy as np\n'), ((963, 978), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (971, 978), True, 'import numpy as np\n'), ((1399, 1414), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1407, 1414), True, 'import numpy as np\n'), ((1773, 1810), 'numpy.expand_dims', 'np.expand_dims', (['temp_test_img'], {'axis': '(0)'}), '(temp_test_img, axis=0)\n', (1787, 1810), True, 'import numpy as np\n'), ((1859, 1883), 'numpy.fliplr', 'np.fliplr', (['temp_test_img'], {}), '(temp_test_img)\n', (1868, 1883), True, 'import numpy as np\n'), ((1960, 1984), 'numpy.flipud', 'np.flipud', (['temp_test_img'], {}), '(temp_test_img)\n', (1969, 1984), True, 'import numpy as np\n'), ((2923, 2952), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2937, 2952), True, 'import numpy as np\n'), ((2071, 2095), 'numpy.flipud', 'np.flipud', (['temp_test_img'], {}), '(temp_test_img)\n', (2080, 2095), True, 'import numpy as np\n'), ((3014, 3030), 'numpy.fliplr', 'np.fliplr', (['image'], {}), '(image)\n', (3023, 3030), True, 'import numpy as np\n'), ((3134, 3150), 'numpy.flipud', 'np.flipud', (['image'], {}), '(image)\n', (3143, 3150), True, 'import numpy as np\n'), ((3267, 3283), 'numpy.flipud', 'np.flipud', (['image'], {}), '(image)\n', (3276, 3283), True, 'import numpy as np\n')]
|
"""Added task model
Revision ID: ca6c6171cdbe
Revises: 989dbc01a9b0
Create Date: 2021-06-11 08:31:03.584401
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ca6c6171cdbe'
down_revision = '989dbc01a9b0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('task',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('assinged_on', sa.DateTime(), nullable=False),
sa.Column('due_on', sa.DateTime(), nullable=True),
sa.Column('admin_id', sa.Integer(), nullable=False),
sa.Column('is_completed', sa.Boolean(), nullable=True),
sa.Column('extractor_id', sa.Integer(), nullable=True),
sa.Column('management_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['admin_id'], ['admin.id'], ),
sa.ForeignKeyConstraint(['extractor_id'], ['extractor.id'], ),
sa.ForeignKeyConstraint(['management_id'], ['management.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('task')
# ### end Alembic commands ###
|
[
"alembic.op.drop_table",
"sqlalchemy.DateTime",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.Text",
"sqlalchemy.Boolean",
"sqlalchemy.ForeignKeyConstraint",
"sqlalchemy.String",
"sqlalchemy.Integer"
] |
[((1295, 1316), 'alembic.op.drop_table', 'op.drop_table', (['"""task"""'], {}), "('task')\n", (1308, 1316), False, 'from alembic import op\n'), ((940, 991), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['admin_id']", "['admin.id']"], {}), "(['admin_id'], ['admin.id'])\n", (963, 991), True, 'import sqlalchemy as sa\n'), ((999, 1058), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['extractor_id']", "['extractor.id']"], {}), "(['extractor_id'], ['extractor.id'])\n", (1022, 1058), True, 'import sqlalchemy as sa\n'), ((1066, 1127), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['management_id']", "['management.id']"], {}), "(['management_id'], ['management.id'])\n", (1089, 1127), True, 'import sqlalchemy as sa\n'), ((1135, 1164), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1158, 1164), True, 'import sqlalchemy as sa\n'), ((431, 443), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (441, 443), True, 'import sqlalchemy as sa\n'), ((485, 506), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (494, 506), True, 'import sqlalchemy as sa\n'), ((554, 563), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (561, 563), True, 'import sqlalchemy as sa\n'), ((611, 624), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (622, 624), True, 'import sqlalchemy as sa\n'), ((667, 680), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (678, 680), True, 'import sqlalchemy as sa\n'), ((724, 736), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (734, 736), True, 'import sqlalchemy as sa\n'), ((785, 797), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (795, 797), True, 'import sqlalchemy as sa\n'), ((845, 857), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (855, 857), True, 'import sqlalchemy as sa\n'), ((906, 918), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (916, 918), True, 'import sqlalchemy as sa\n')]
|
import tensorflow as tf
class ResidualDense(tf.keras.layers.Layer):
def __init__(
self,
units,
activation=None,
dropout=None,
kernel_initializer=None,
kernel_regularizer=None,
output_activation=None
):
super(ResidualDense, self).__init__()
self.units = units
self.activation = activation
self.dropout = dropout
self.kernel_initializer = kernel_initializer
self.kernel_regularizer = kernel_regularizer
if output_activation is None:
self.output_activation = self.activation
else:
self.output_activation = self.output_activation
def build(self, input_shape):
last_dim_units = input_shape[-1].value
self.layer0 = tf.keras.layers.Dense(
units=self.units,
activation=self.activation,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.kernel_regularizer
)
if self.dropout is not None and self.dropout > 0:
self.dropout_layer = tf.keras.layers.Dropout(
rate=float(self.dropout)
)
self.layer1 = tf.keras.layers.Dense(
units=last_dim_units,
activation=tf.keras.activations.linear,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.kernel_regularizer
)
def call(self, inputs, training):
net = self.layer0(inputs)
if self.dropout is not None and self.dropout > 0:
net = self.dropout_layer(net, training=training)
net = self.layer1(net)
outputs = self.activation(inputs + net)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
class LayerNormalization(tf.keras.layers.Layer):
def __init__(self):
super(LayerNormalization, self).__init__()
def build(self, input_shape):
last_dim = input_shape[-1].value
self.scale = tf.Variable(
initial_value=tf.ones([last_dim]),
trainable=True,
name='layer_norm_scale',
dtype=tf.float32,
)
self.bias = tf.Variable(
initial_value=tf.zeros([last_dim]),
trainable=True,
name='layer_norm_bias',
dtype=tf.float32,
)
def call(self, inputs, epsilon=1e-6):
mean = tf.reduce_mean(
input_tensor=inputs, axis=[-1], keepdims=True
)
variance = tf.reduce_mean(
input_tensor=tf.square(inputs - mean), axis=[-1], keepdims=True
)
norm_inputs = (inputs - mean) * tf.math.rsqrt(variance + epsilon)
return norm_inputs * self.scale + self.bias
def compute_output_shape(self, input_shape):
return input_shape
class AdversarialNoise(tf.keras.layers.Layer):
def __init__(self, eps):
super(AdversarialNoise, self).__init__()
self.eps = eps
def _scale_l2(self, x):
ndim = tf.keras.backend.ndim(x)
feature_dims = [i for i in range(1, ndim)]
alpha = tf.reduce_max(
input_tensor=tf.abs(x),
axis=feature_dims,
keepdims=True
) + 1e-12
l2_norm = alpha * tf.sqrt(
tf.reduce_sum(
input_tensor=tf.pow(x / alpha, 2),
axis=feature_dims,
keepdims=True
) + 1e-6
)
x_unit = x / l2_norm
return x_unit
def _truncated_normal_eps(self, x):
ndim = tf.keras.backend.ndim(x)
sample_eps = tf.keras.backend.truncated_normal(
shape=tf.keras.backend.shape(x)[:1],
mean=tf.cast(self.eps, dtype=tf.float32) / 2.0,
stddev=tf.square(tf.cast(self.eps, dtype=tf.float32) / 4.0)
)
sample_eps = tf.tile(
input=tf.reshape(
sample_eps, [-1] + [1 for i in range(ndim-1)]
),
multiples=[1] + list(tf.keras.backend.int_shape(x)[1:])
)
return sample_eps
def call(self, inputs, loss, training):
if training:
inputs_grad = tf.gradients(
ys=loss,
xs=inputs,
aggregation_method=(
tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
)
)
inputs_grad_dense = tf.squeeze(
tf.stop_gradient(inputs_grad), axis=0
)
noise_unit = self._scale_l2(inputs_grad_dense)
sample_eps = self._truncated_normal_eps(noise_unit)
noise = noise_unit * sample_eps
return inputs + noise
else:
return inputs
def compute_output_shape(self, input_shape):
return input_shape
class TargetedDense(tf.keras.layers.Dense):
def __init__(
self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
targeted_dropout_type=None,
target_rate=0.50,
dropout_rate=0.50,
**kwargs
):
super(TargetedDense, self).__init__(
units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs
)
self.targeted_dropout_type = targeted_dropout_type
self.target_rate = target_rate
self.dropout_rate = dropout_rate
def targeted_weight_dropout(
self, w, target_rate, dropout_rate, is_training
):
w_shape = w.shape
w = tf.reshape(w, [-1, w_shape[-1]])
norm = tf.abs(w)
idx = tf.cast(
target_rate * tf.cast(tf.shape(input=w)[0], dtype=tf.float32),
dtype=tf.int32
)
threshold = tf.sort(norm, axis=0)[idx]
mask = norm < threshold[None, :]
if not is_training:
w = (1.0 - tf.cast(mask, dtype=tf.float32)) * w
w = tf.reshape(w, w_shape)
return w
mask = tf.cast(
tf.logical_and(
tf.random.uniform(tf.shape(input=w)) < dropout_rate,
mask
), dtype=tf.float32
)
w = (1.0 - mask) * w
w = tf.reshape(w, w_shape)
return w
def targeted_unit_dropout(
self, w, target_rate, dropout_rate, is_training
):
w_shape = w.shape
w = tf.reshape(w, [-1, w_shape[-1]])
norm = tf.norm(tensor=w, axis=0)
idx = int(target_rate * int(w.shape[1]))
sorted_norms = tf.sort(norm)
threshold = sorted_norms[idx]
mask = (norm < threshold)[None, :]
mask = tf.tile(mask, [w.shape[0], 1])
mask = tf.compat.v1.where(
tf.logical_and(
(1.0 - dropout_rate) < tf.random.uniform(tf.shape(input=w)),
mask
),
tf.ones_like(w, dtype=tf.float32),
tf.zeros_like(w, dtype=tf.float32)
)
w = (1.0 - mask) * w
w = tf.reshape(w, w_shape)
return w
def call(self, inputs, training):
inputs = tf.convert_to_tensor(value=inputs, dtype=self.dtype)
rank = inputs._rank()
if (self.targeted_dropout_type == 'weight'):
self.kernel.assign(
self.targeted_weight_dropout(
self.kernel,
self.target_rate,
self.dropout_rate,
training
)
)
elif (self.targeted_dropout_type == 'unit'):
self.kernel.assign(
self.targeted_unit_dropout(
self.kernel,
self.target_rate,
self.dropout_rate,
training
)
)
if rank > 2:
# Broadcasting is required for the inputs.
outputs = tf.tensordot(inputs, self.kernel, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not tf.executing_eagerly():
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
outputs = tf.linalg.matmul(inputs, self.kernel)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
class VectorDense(tf.keras.layers.Layer):
def __init__(
self,
units,
activation=None,
use_bias=True,
kernel_initializer=None,
kernel_regularizer=None,
dropout=None
):
super(VectorDense, self).__init__()
self.units = units
self.dropout = dropout
self.permute_layer = tf.keras.layers.Permute(
dims=(2, 1)
)
if self.dropout is not None and self.dropout > 0:
self.dropout_layer = tf.keras.layers.Dropout(
rate=float(self.dropout)
)
self.dense_layer = tf.keras.layers.Dense(
units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer
)
def call(self, inputs, training):
net = self.permute_layer(inputs)
if self.dropout is not None and self.dropout > 0:
net = self.dropout_layer(net, training=training)
net = self.dense_layer(net)
outputs = self.permute_layer(net)
return outputs
def compute_output_shape(self, input_shape):
output_shape = tf.TensorShape(input_shape).as_list()
output_shape[1] = self.units
return tf.TensorShape(output_shape)
|
[
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"tensorflow.zeros_like",
"tensorflow.keras.backend.int_shape",
"tensorflow.executing_eagerly",
"tensorflow.abs",
"tensorflow.TensorShape",
"tensorflow.sort",
"tensorflow.cast",
"tensorflow.gradients",
"tensorflow.keras.layers.Permute",
"tensorflow.norm",
"tensorflow.nn.bias_add",
"tensorflow.ones",
"tensorflow.stop_gradient",
"tensorflow.keras.backend.shape",
"tensorflow.reduce_mean",
"tensorflow.keras.backend.ndim",
"tensorflow.ones_like",
"tensorflow.tile",
"tensorflow.linalg.matmul",
"tensorflow.convert_to_tensor",
"tensorflow.pow",
"tensorflow.shape",
"tensorflow.zeros",
"tensorflow.tensordot",
"tensorflow.square",
"tensorflow.math.rsqrt"
] |
[((785, 949), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'self.units', 'activation': 'self.activation', 'kernel_initializer': 'self.kernel_initializer', 'kernel_regularizer': 'self.kernel_regularizer'}), '(units=self.units, activation=self.activation,\n kernel_initializer=self.kernel_initializer, kernel_regularizer=self.\n kernel_regularizer)\n', (806, 949), True, 'import tensorflow as tf\n'), ((1194, 1375), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'last_dim_units', 'activation': 'tf.keras.activations.linear', 'kernel_initializer': 'self.kernel_initializer', 'kernel_regularizer': 'self.kernel_regularizer'}), '(units=last_dim_units, activation=tf.keras.activations\n .linear, kernel_initializer=self.kernel_initializer, kernel_regularizer\n =self.kernel_regularizer)\n', (1215, 1375), True, 'import tensorflow as tf\n'), ((2427, 2488), 'tensorflow.reduce_mean', 'tf.reduce_mean', ([], {'input_tensor': 'inputs', 'axis': '[-1]', 'keepdims': '(True)'}), '(input_tensor=inputs, axis=[-1], keepdims=True)\n', (2441, 2488), True, 'import tensorflow as tf\n'), ((3029, 3053), 'tensorflow.keras.backend.ndim', 'tf.keras.backend.ndim', (['x'], {}), '(x)\n', (3050, 3053), True, 'import tensorflow as tf\n'), ((3563, 3587), 'tensorflow.keras.backend.ndim', 'tf.keras.backend.ndim', (['x'], {}), '(x)\n', (3584, 3587), True, 'import tensorflow as tf\n'), ((6092, 6124), 'tensorflow.reshape', 'tf.reshape', (['w', '[-1, w_shape[-1]]'], {}), '(w, [-1, w_shape[-1]])\n', (6102, 6124), True, 'import tensorflow as tf\n'), ((6140, 6149), 'tensorflow.abs', 'tf.abs', (['w'], {}), '(w)\n', (6146, 6149), True, 'import tensorflow as tf\n'), ((6748, 6770), 'tensorflow.reshape', 'tf.reshape', (['w', 'w_shape'], {}), '(w, w_shape)\n', (6758, 6770), True, 'import tensorflow as tf\n'), ((6921, 6953), 'tensorflow.reshape', 'tf.reshape', (['w', '[-1, w_shape[-1]]'], {}), '(w, [-1, w_shape[-1]])\n', (6931, 6953), True, 'import tensorflow as tf\n'), ((6969, 6994), 'tensorflow.norm', 'tf.norm', ([], {'tensor': 'w', 'axis': '(0)'}), '(tensor=w, axis=0)\n', (6976, 6994), True, 'import tensorflow as tf\n'), ((7067, 7080), 'tensorflow.sort', 'tf.sort', (['norm'], {}), '(norm)\n', (7074, 7080), True, 'import tensorflow as tf\n'), ((7177, 7207), 'tensorflow.tile', 'tf.tile', (['mask', '[w.shape[0], 1]'], {}), '(mask, [w.shape[0], 1])\n', (7184, 7207), True, 'import tensorflow as tf\n'), ((7530, 7552), 'tensorflow.reshape', 'tf.reshape', (['w', 'w_shape'], {}), '(w, w_shape)\n', (7540, 7552), True, 'import tensorflow as tf\n'), ((7626, 7678), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ([], {'value': 'inputs', 'dtype': 'self.dtype'}), '(value=inputs, dtype=self.dtype)\n', (7646, 7678), True, 'import tensorflow as tf\n'), ((9399, 9435), 'tensorflow.keras.layers.Permute', 'tf.keras.layers.Permute', ([], {'dims': '(2, 1)'}), '(dims=(2, 1))\n', (9422, 9435), True, 'import tensorflow as tf\n'), ((9658, 9821), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'units', 'activation': 'activation', 'use_bias': 'use_bias', 'kernel_initializer': 'kernel_initializer', 'kernel_regularizer': 'kernel_regularizer'}), '(units=units, activation=activation, use_bias=use_bias,\n kernel_initializer=kernel_initializer, kernel_regularizer=\n kernel_regularizer)\n', (9679, 9821), True, 'import tensorflow as tf\n'), ((10346, 10374), 'tensorflow.TensorShape', 'tf.TensorShape', (['output_shape'], {}), '(output_shape)\n', (10360, 10374), True, 'import tensorflow as tf\n'), ((2672, 2705), 'tensorflow.math.rsqrt', 'tf.math.rsqrt', (['(variance + epsilon)'], {}), '(variance + epsilon)\n', (2685, 2705), True, 'import tensorflow as tf\n'), ((4180, 4284), 'tensorflow.gradients', 'tf.gradients', ([], {'ys': 'loss', 'xs': 'inputs', 'aggregation_method': 'tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N'}), '(ys=loss, xs=inputs, aggregation_method=tf.AggregationMethod.\n EXPERIMENTAL_ACCUMULATE_N)\n', (4192, 4284), True, 'import tensorflow as tf\n'), ((6305, 6326), 'tensorflow.sort', 'tf.sort', (['norm'], {'axis': '(0)'}), '(norm, axis=0)\n', (6312, 6326), True, 'import tensorflow as tf\n'), ((6478, 6500), 'tensorflow.reshape', 'tf.reshape', (['w', 'w_shape'], {}), '(w, w_shape)\n', (6488, 6500), True, 'import tensorflow as tf\n'), ((7397, 7430), 'tensorflow.ones_like', 'tf.ones_like', (['w'], {'dtype': 'tf.float32'}), '(w, dtype=tf.float32)\n', (7409, 7430), True, 'import tensorflow as tf\n'), ((7444, 7478), 'tensorflow.zeros_like', 'tf.zeros_like', (['w'], {'dtype': 'tf.float32'}), '(w, dtype=tf.float32)\n', (7457, 7478), True, 'import tensorflow as tf\n'), ((8411, 8463), 'tensorflow.tensordot', 'tf.tensordot', (['inputs', 'self.kernel', '[[rank - 1], [0]]'], {}), '(inputs, self.kernel, [[rank - 1], [0]])\n', (8423, 8463), True, 'import tensorflow as tf\n'), ((8774, 8811), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['inputs', 'self.kernel'], {}), '(inputs, self.kernel)\n', (8790, 8811), True, 'import tensorflow as tf\n'), ((8860, 8894), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['outputs', 'self.bias'], {}), '(outputs, self.bias)\n', (8874, 8894), True, 'import tensorflow as tf\n'), ((2058, 2077), 'tensorflow.ones', 'tf.ones', (['[last_dim]'], {}), '([last_dim])\n', (2065, 2077), True, 'import tensorflow as tf\n'), ((2243, 2263), 'tensorflow.zeros', 'tf.zeros', (['[last_dim]'], {}), '([last_dim])\n', (2251, 2263), True, 'import tensorflow as tf\n'), ((2571, 2595), 'tensorflow.square', 'tf.square', (['(inputs - mean)'], {}), '(inputs - mean)\n', (2580, 2595), True, 'import tensorflow as tf\n'), ((4442, 4471), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['inputs_grad'], {}), '(inputs_grad)\n', (4458, 4471), True, 'import tensorflow as tf\n'), ((8556, 8578), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (8576, 8578), True, 'import tensorflow as tf\n'), ((10256, 10283), 'tensorflow.TensorShape', 'tf.TensorShape', (['input_shape'], {}), '(input_shape)\n', (10270, 10283), True, 'import tensorflow as tf\n'), ((3161, 3170), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (3167, 3170), True, 'import tensorflow as tf\n'), ((3666, 3691), 'tensorflow.keras.backend.shape', 'tf.keras.backend.shape', (['x'], {}), '(x)\n', (3688, 3691), True, 'import tensorflow as tf\n'), ((3718, 3753), 'tensorflow.cast', 'tf.cast', (['self.eps'], {'dtype': 'tf.float32'}), '(self.eps, dtype=tf.float32)\n', (3725, 3753), True, 'import tensorflow as tf\n'), ((6425, 6456), 'tensorflow.cast', 'tf.cast', (['mask'], {'dtype': 'tf.float32'}), '(mask, dtype=tf.float32)\n', (6432, 6456), True, 'import tensorflow as tf\n'), ((3794, 3829), 'tensorflow.cast', 'tf.cast', (['self.eps'], {'dtype': 'tf.float32'}), '(self.eps, dtype=tf.float32)\n', (3801, 3829), True, 'import tensorflow as tf\n'), ((6207, 6224), 'tensorflow.shape', 'tf.shape', ([], {'input': 'w'}), '(input=w)\n', (6215, 6224), True, 'import tensorflow as tf\n'), ((6609, 6626), 'tensorflow.shape', 'tf.shape', ([], {'input': 'w'}), '(input=w)\n', (6617, 6626), True, 'import tensorflow as tf\n'), ((7329, 7346), 'tensorflow.shape', 'tf.shape', ([], {'input': 'w'}), '(input=w)\n', (7337, 7346), True, 'import tensorflow as tf\n'), ((3338, 3358), 'tensorflow.pow', 'tf.pow', (['(x / alpha)', '(2)'], {}), '(x / alpha, 2)\n', (3344, 3358), True, 'import tensorflow as tf\n'), ((4017, 4046), 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', (['x'], {}), '(x)\n', (4043, 4046), True, 'import tensorflow as tf\n')]
|
"""
Transformers
------------
Transformers defines the mutations that can be applied. The ``CATEGORIES`` dictionary lists all
valid category codes that are valid filters. The primary classes are:
1. ``LocIndex``
2. ``MutateAST``
The ``LocIndex`` is a location index within a given Abstract Syntax Tree (AST) that can be mutated.
The ``MutateAST`` class walks the AST of a given source file to identify all of the locations,
and optionally create the mutation at that node. These are implemented in the ``Genome`` object.
``MutateAST`` is constructed from ``MutateBase`` and the appropriate mixin class - either
``ConstantMixin`` for Python 3.8, or ``NameConstantMixin`` for Python 3.7.
"""
import ast
import logging
import sys
from pathlib import Path
####################################################################################################
# AST TRANSFORMERS
####################################################################################################
from typing import Any, Dict, List, NamedTuple, Optional, Set, Type, Union
try:
# Python 3.8
from typing import Protocol
except ImportError:
# Python 3.7
from typing_extensions import Protocol # type: ignore
LOGGER = logging.getLogger(__name__)
CATEGORIES = {
"AugAssign": "aa",
"BinOp": "bn",
"BinOpBC": "bc",
"BinOpBS": "bs",
"BoolOp": "bl",
"Compare": "cp",
"CompareIn": "cn",
"CompareIs": "cs",
"If": "if",
"Index": "ix",
"NameConstant": "nc",
"SliceUS": "su",
}
####################################################################################################
# CORE TYPES
####################################################################################################
class LocIndex(NamedTuple):
"""Location index within AST to mark mutation targets.
The ``end_lineno`` and ``end_col_offset`` properties are set to ``None`` by default as they
are only used distinctly in Python 3.8.
"""
ast_class: str
lineno: int
col_offset: int
op_type: Any # varies based on the visit_Node definition in MutateAST
# New in Python 3.8 AST: https://docs.python.org/3/whatsnew/3.8.html#improved-modules
# These values are always set to None if running Python 3.7.
# The NodeSpan class is used to manage setting the values
end_lineno: Optional[int] = None
end_col_offset: Optional[int] = None
class MutationOpSet(NamedTuple):
"""Container for compatible mutation operations. Also used in the CLI display."""
name: str
desc: str
operations: Set[Any]
category: str
class LocIndexNode(Protocol):
"""Type protocol for AST Nodes that include lineno and col_offset properties."""
lineno: int
col_offset: int
class NodeSpan(NamedTuple):
"""Node span to support Py3.7 and 3.8 compatibility for locations.
This is used to generate the set the values in the LocIndex as a general class.
"""
node: LocIndexNode
@property
def lineno(self) -> int:
"""Line number for the node."""
return self.node.lineno
@property
def col_offset(self) -> int:
"""Col offset for the node."""
return self.node.col_offset
@property
def end_lineno(self) -> Optional[int]:
"""End line no: Python 3.8 will have this defined, in Python 3.7 it will be None."""
eline: Optional[int] = getattr(self.node, "end_lineno", None)
return eline
@property
def end_col_offset(self) -> Optional[int]:
"""End col offset: Python 3.8 will have this defined, in Python 3.7 it will be None."""
ecol: Optional[int] = getattr(self.node, "end_col_offset", None)
return ecol
####################################################################################################
# MUTATE AST Definitions
# Includes MutateBase and Mixins for 3.7 and 3.8 AST support
# MutateAST is constructed from Base + Mixins depending on sys.version_info
####################################################################################################
class MutateBase(ast.NodeTransformer):
"""AST NodeTransformer to replace nodes with mutations by visits."""
def __init__(
self,
target_idx: Optional[LocIndex] = None,
mutation: Optional[Any] = None,
readonly: bool = False,
src_file: Optional[Union[Path, str]] = None,
) -> None:
"""Create the AST node transformer for mutations.
If readonly is set to True then no transformations are applied;
however, the locs attribute is updated with the locations of nodes that could
be transformed. This allows the class to function both as an inspection method
and as a mutation transformer.
Note that different nodes handle the ``LocIndex`` differently based on the context. For
example, ``visit_BinOp`` uses direct AST types, while ``visit_NameConstant`` uses values,
and ``visit_AugAssign`` uses custom strings in a dictionary mapping.
All ``visit_`` methods take the ``node`` as an argument and rely on the class properties.
This MutateBase class is designed to be implemented with the appropriate Mixin Class
for supporting either Python 3.7 or Python 3.8 ASTs. If the base class is used
directly certain operations - like ``visit_If`` and ``visit_NameConstant`` will not
work as intended..
Args:
target_idx: Location index for the mutatest in the AST
mutation: the mutatest to apply, may be a type or a value
readonly: flag for read-only operations, used to visit nodes instead of transform
src_file: Source file name, used for logging purposes
"""
self.locs: Set[LocIndex] = set()
# managed via @property
self._target_idx = target_idx
self._mutation = mutation
self._readonly = readonly
self._src_file = src_file
@property
def target_idx(self) -> Optional[LocIndex]:
"""Location index for the mutation in the AST"""
return self._target_idx
@property
def mutation(self) -> Optional[Any]:
"""The mutation to apply, may be a type or a value"""
return self._mutation
@property
def readonly(self) -> bool:
"""A flag for read-only operations, used to visit nodes instead of transform"""
return self._readonly
@property
def src_file(self) -> Optional[Union[Path, str]]:
"""Source file name, used for logging purposes"""
return self._src_file
@property
def constant_type(self) -> Union[Type[ast.NameConstant], Type[ast.Constant]]:
"""Overridden using the MixinClasses for NameConstant(3.7) vs. Constant(3.8)."""
raise NotImplementedError
def visit_AugAssign(self, node: ast.AugAssign) -> ast.AST:
"""AugAssign is ``-=, +=, /=, *=`` for augmented assignment."""
self.generic_visit(node)
log_header = f"visit_AugAssign: {self.src_file}:"
# custom mapping of string keys to ast operations that can be used
# in the nodes since these overlap with BinOp types
aug_mappings = {
"AugAssign_Add": ast.Add,
"AugAssign_Sub": ast.Sub,
"AugAssign_Mult": ast.Mult,
"AugAssign_Div": ast.Div,
}
rev_mappings = {v: k for k, v in aug_mappings.items()}
idx_op = rev_mappings.get(type(node.op), None)
# edge case protection in case the mapping isn't known for substitution
# in that instance, return the node and take no action
if not idx_op:
LOGGER.debug(
"%s (%s, %s): unknown aug_assignment: %s",
log_header,
node.lineno,
node.col_offset,
type(node.op),
)
return node
node_span = NodeSpan(node)
idx = LocIndex(
ast_class="AugAssign",
lineno=node_span.lineno,
col_offset=node_span.col_offset,
op_type=idx_op,
end_lineno=node_span.end_lineno,
end_col_offset=node_span.end_col_offset,
)
self.locs.add(idx)
if idx == self.target_idx and self.mutation in aug_mappings and not self.readonly:
LOGGER.debug("%s mutating idx: %s with %s", log_header, self.target_idx, self.mutation)
return ast.copy_location(
ast.AugAssign(
target=node.target,
op=aug_mappings[self.mutation](), # awkward syntax to call type from mapping
value=node.value,
),
node,
)
LOGGER.debug("%s (%s, %s): no mutations applied.", log_header, node.lineno, node.col_offset)
return node
def visit_BinOp(self, node: ast.BinOp) -> ast.AST:
"""BinOp nodes are bit-shifts and general operators like add, divide, etc."""
self.generic_visit(node)
log_header = f"visit_BinOp: {self.src_file}:"
# default case for this node, can be BinOpBC or BinOpBS
ast_class = "BinOp"
op_type = type(node.op)
# binop_bit_cmp_types: Set[type] = {ast.BitAnd, ast.BitOr, ast.BitXor}
if op_type in {ast.BitAnd, ast.BitOr, ast.BitXor}:
ast_class = "BinOpBC"
# binop_bit_shift_types: Set[type] = {ast.LShift, ast.RShift}
if op_type in {ast.LShift, ast.RShift}:
ast_class = "BinOpBS"
node_span = NodeSpan(node)
idx = LocIndex(
ast_class=ast_class,
lineno=node_span.lineno,
col_offset=node_span.col_offset,
op_type=op_type,
end_lineno=node_span.end_lineno,
end_col_offset=node_span.end_col_offset,
)
self.locs.add(idx)
if idx == self.target_idx and self.mutation and not self.readonly:
LOGGER.debug("%s mutating idx: %s with %s", log_header, self.target_idx, self.mutation)
return ast.copy_location(
ast.BinOp(left=node.left, op=self.mutation(), right=node.right), node
)
LOGGER.debug("%s (%s, %s): no mutations applied.", log_header, node.lineno, node.col_offset)
return node
def visit_BoolOp(self, node: ast.BoolOp) -> ast.AST:
"""Boolean operations, AND/OR."""
self.generic_visit(node)
log_header = f"visit_BoolOp: {self.src_file}:"
node_span = NodeSpan(node)
idx = LocIndex(
ast_class="BoolOp",
lineno=node_span.lineno,
col_offset=node_span.col_offset,
op_type=type(node.op),
end_lineno=node_span.end_lineno,
end_col_offset=node_span.end_col_offset,
)
self.locs.add(idx)
if idx == self.target_idx and self.mutation and not self.readonly:
LOGGER.debug("%s mutating idx: %s with %s", log_header, self.target_idx, self.mutation)
return ast.copy_location(ast.BoolOp(op=self.mutation(), values=node.values), node)
LOGGER.debug("%s (%s, %s): no mutations applied.", log_header, node.lineno, node.col_offset)
return node
def visit_Compare(self, node: ast.Compare) -> ast.AST:
"""Compare nodes are ``==, >=, is, in`` etc. There are multiple Compare categories."""
self.generic_visit(node)
log_header = f"visit_Compare: {self.src_file}:"
# taking only the first operation in the compare node
# in basic testing, things like (a==b)==1 still end up with lists of 1,
# but since the AST docs specify a list of operations this seems safer.
# idx = LocIndex("CompareIs", node.lineno, node.col_offset, type(node.ops[0]))
cmpop_is_types: Set[type] = {ast.Is, ast.IsNot}
cmpop_in_types: Set[type] = {ast.In, ast.NotIn}
op_type = type(node.ops[0])
node_span = NodeSpan(node)
locidx_kwargs = {
"lineno": node_span.lineno,
"col_offset": node_span.col_offset,
"op_type": op_type,
"end_lineno": node_span.end_lineno,
"end_col_offset": node_span.end_col_offset,
}
if op_type in cmpop_is_types:
idx = LocIndex(ast_class="CompareIs", **locidx_kwargs) # type: ignore
elif op_type in cmpop_in_types:
idx = LocIndex(ast_class="CompareIn", **locidx_kwargs) # type: ignore
else:
idx = LocIndex(ast_class="Compare", **locidx_kwargs) # type: ignore
self.locs.add(idx)
if idx == self.target_idx and self.mutation and not self.readonly:
LOGGER.debug("%s mutating idx: %s with %s", log_header, self.target_idx, self.mutation)
# TODO: Determine when/how this case would actually be called
if len(node.ops) > 1:
# unlikely test case where the comparison has multiple values
LOGGER.debug("%s multiple compare ops in node, len: %s", log_header, len(node.ops))
existing_ops = [i for i in node.ops]
mutation_ops = [self.mutation()] + existing_ops[1:]
return ast.copy_location(
ast.Compare(left=node.left, ops=mutation_ops, comparators=node.comparators),
node,
)
else:
# typical comparison case, will also catch (a==b)==1 as an example.
LOGGER.debug("%s single comparison node operation", log_header)
return ast.copy_location(
ast.Compare(
left=node.left, ops=[self.mutation()], comparators=node.comparators
),
node,
)
LOGGER.debug("%s (%s, %s): no mutations applied.", log_header, node.lineno, node.col_offset)
return node
def visit_If(self, node: ast.If) -> ast.AST:
"""If statements e.g. If ``x == y`` is transformed to ``if True`` and ``if False``.
This visit method only works when the appropriate Mixin is used.
"""
self.generic_visit(node)
log_header = f"visit_If: {self.src_file}:"
# default for a comparison is "If_Statement" which will be changed to True/False
# If_Statement is not set as a mutation target, controlled in get_mutations function
if_type = "If_Statement"
# Py 3.7 vs 3.8 - 3.7 uses NameConstant, 3.8 uses Constant
if_mutations = {
"If_True": self.constant_type(value=True),
"If_False": self.constant_type(value=False),
}
if type(node.test) == self.constant_type:
if_type: str = f"If_{bool(node.test.value)}" # type: ignore
node_span = NodeSpan(node)
idx = LocIndex(
ast_class="If",
lineno=node_span.lineno,
col_offset=node_span.col_offset,
op_type=if_type,
end_lineno=node_span.end_lineno,
end_col_offset=node_span.end_col_offset,
)
self.locs.add(idx)
if idx == self.target_idx and self.mutation and not self.readonly:
LOGGER.debug("%s mutating idx: %s with %s", log_header, self.target_idx, self.mutation)
return ast.fix_missing_locations(
ast.copy_location(
ast.If(test=if_mutations[self.mutation], body=node.body, orelse=node.orelse),
node,
)
)
LOGGER.debug("%s (%s, %s): no mutations applied.", log_header, node.lineno, node.col_offset)
return node
def visit_Index(self, node: ast.Index) -> ast.AST:
"""Index visit e.g. ``i[0], i[0][1]``."""
self.generic_visit(node)
log_header = f"visit_Index: {self.src_file}:"
# Index Node has a value attribute that can be either Num node or UnaryOp node
# depending on whether the value is positive or negative.
n_value = node.value
idx = None
index_mutations = {
"Index_NumZero": ast.Num(n=0),
"Index_NumPos": ast.Num(n=1),
"Index_NumNeg": ast.UnaryOp(op=ast.USub(), operand=ast.Num(n=1)),
}
node_span = NodeSpan(n_value)
locidx_kwargs = {
"ast_class": "Index",
"lineno": node_span.lineno,
"col_offset": node_span.col_offset,
"end_lineno": node_span.end_lineno,
"end_col_offset": node_span.end_col_offset,
}
# index is a non-negative number e.g. i[0], i[1]
if isinstance(n_value, ast.Num):
# positive integer case
if n_value.n != 0:
idx = LocIndex(op_type="Index_NumPos", **locidx_kwargs) # type: ignore
self.locs.add(idx)
# zero value case
else:
idx = LocIndex(op_type="Index_NumZero", **locidx_kwargs) # type: ignore
self.locs.add(idx)
# index is a negative number e.g. i[-1]
if isinstance(n_value, ast.UnaryOp):
idx = LocIndex(op_type="Index_NumNeg", **locidx_kwargs) # type: ignore
self.locs.add(idx)
if idx == self.target_idx and self.mutation and not self.readonly:
LOGGER.debug("%s mutating idx: %s with %s", log_header, self.target_idx, self.mutation)
mutation = index_mutations[self.mutation]
# uses AST.fix_missing_locations since the values of ast.Num and ast.UnaryOp also need
# lineno and col-offset values. This is a recursive fix.
return ast.fix_missing_locations(ast.copy_location(ast.Index(value=mutation), node))
LOGGER.debug(
"%s (%s, %s): no mutations applied.", log_header, n_value.lineno, n_value.col_offset
)
return node
def mixin_NameConstant(self, node: Union[ast.NameConstant, ast.Constant]) -> ast.AST:
"""Constants: ``True, False, None``.
This method is called by using the Mixin classes for handling the difference of
ast.NameConstant (Py 3.7) an ast.Constant (Py 3.8).
"""
self.generic_visit(node)
log_header = f"visit_NameConstant: {self.src_file}:"
node_span = NodeSpan(node)
idx = LocIndex(
ast_class="NameConstant",
lineno=node_span.lineno,
col_offset=node_span.col_offset,
op_type=node.value,
end_lineno=node_span.end_lineno,
end_col_offset=node_span.end_col_offset,
)
self.locs.add(idx)
if idx == self.target_idx and not self.readonly:
LOGGER.debug("%s mutating idx: %s with %s", log_header, self.target_idx, self.mutation)
return ast.copy_location(self.constant_type(value=self.mutation), node)
LOGGER.debug("%s (%s, %s): no mutations applied.", log_header, node.lineno, node.col_offset)
return node
def visit_Subscript(self, node: ast.Subscript) -> ast.AST:
"""Subscript slice operations e.g., ``x[1:]`` or ``y[::2]``."""
self.generic_visit(node)
log_header = f"visit_Subscript: {self.src_file}:"
idx = None
# Subscripts have slice properties with col/lineno, slice itself does not have line/col
# Index is also a valid Subscript slice property
slice = node.slice
if not isinstance(slice, ast.Slice):
LOGGER.debug("%s (%s, %s): not a slice node.", log_header, node.lineno, node.col_offset)
return node
# Built "on the fly" based on the various conditions for operation types
# The RangeChange options are added in the later if/else cases
slice_mutations: Dict[str, ast.Slice] = {
"Slice_UnboundUpper": ast.Slice(lower=slice.upper, upper=None, step=slice.step),
"Slice_UnboundLower": ast.Slice(lower=None, upper=slice.lower, step=slice.step),
"Slice_Unbounded": ast.Slice(lower=None, upper=None, step=slice.step),
}
node_span = NodeSpan(node)
locidx_kwargs = {
"lineno": node_span.lineno,
"col_offset": node_span.col_offset,
"end_lineno": node_span.end_lineno,
"end_col_offset": node_span.end_col_offset,
}
# Unbounded Swap Operation
# upper slice range e.g. x[:2] will become x[2:]
if slice.lower is None and slice.upper is not None:
idx = LocIndex(
ast_class="SliceUS", op_type="Slice_UnboundLower", **locidx_kwargs # type: ignore
)
self.locs.add(idx)
# lower slice range e.g. x[1:] will become x[:1]
if slice.upper is None and slice.lower is not None:
idx = LocIndex(
ast_class="SliceUS", op_type="Slice_UnboundUpper", **locidx_kwargs # type: ignore
)
self.locs.add(idx)
# Apply Mutation
if idx == self.target_idx and not self.readonly:
LOGGER.debug("%s mutating idx: %s with %s", log_header, self.target_idx, self.mutation)
mutation = slice_mutations[str(self.mutation)]
# uses AST.fix_missing_locations since the values of ast.Num and ast.UnaryOp also need
# lineno and col-offset values. This is a recursive fix.
return ast.fix_missing_locations(
ast.copy_location(
ast.Subscript(value=node.value, slice=mutation, ctx=node.ctx), node
)
)
LOGGER.debug("%s (%s, %s): no mutations applied.", log_header, node.lineno, node.col_offset)
return node
class NameConstantMixin:
"""Mixin for Python 3.7 AST applied to MutateBase."""
@property
def constant_type(self) -> Type[ast.NameConstant]:
return ast.NameConstant
def visit_NameConstant(self, node: ast.NameConstant) -> ast.AST:
"""NameConstants: ``True, False, None``."""
return self.mixin_NameConstant(node) # type: ignore
class ConstantMixin:
"""Mixin for Python 3.8 AST applied to MutateBase."""
@property
def constant_type(self) -> Type[ast.Constant]:
return ast.Constant
def visit_Constant(self, node: ast.Constant) -> ast.AST:
"""Constants: https://bugs.python.org/issue32892
NameConstant: ``True, False, None``.
Num: isinstance(int, float)
Str: isinstance(str)
"""
# NameConstant behavior consistent with Python 3.7
if isinstance(node.value, bool) or node.value is None:
return self.mixin_NameConstant(node) # type: ignore
return node
# PYTHON 3.7
if sys.version_info < (3, 8):
class MutateAST(NameConstantMixin, MutateBase):
"""Implementation of the MutateAST class based on running environment."""
pass
# PYTHON 3.8
else:
class MutateAST(ConstantMixin, MutateBase):
"""Implementation of the MutateAST class based on running environment."""
pass
####################################################################################################
# TRANSFORMER FUNCTIONS
####################################################################################################
def get_compatible_operation_sets() -> List[MutationOpSet]:
"""Utility function to return a list of compatible AST mutations with names.
All of the mutation transformation sets that are supported by mutatest are defined here.
See: https://docs.python.org/3/library/ast.html#abstract-grammar
This is used to create the search space in finding mutations for a target, and
also to list the support operations in the CLI help function.
Returns:
List of ``MutationOpSets`` that have substitutable operations
"""
# AST operations that are sensible mutations for each other
binop_types: Set[type] = {ast.Add, ast.Sub, ast.Div, ast.Mult, ast.Pow, ast.Mod, ast.FloorDiv}
binop_bit_cmp_types: Set[type] = {ast.BitAnd, ast.BitOr, ast.BitXor}
binop_bit_shift_types: Set[type] = {ast.LShift, ast.RShift}
cmpop_types: Set[type] = {ast.Eq, ast.NotEq, ast.Lt, ast.LtE, ast.Gt, ast.GtE}
cmpop_is_types: Set[type] = {ast.Is, ast.IsNot}
cmpop_in_types: Set[type] = {ast.In, ast.NotIn}
boolop_types: Set[type] = {ast.And, ast.Or}
# Python built-in constants (singletons) that can be used with NameConstant AST node
named_const_singletons: Set[Union[bool, None]] = {True, False, None}
# Custom augmentation ops to differentiate from bin_op types
# these are defined for substitution within the visit_AugAssign node and need to match
aug_assigns: Set[str] = {"AugAssign_Add", "AugAssign_Sub", "AugAssign_Mult", "AugAssign_Div"}
# Custom references for substitutions of zero, positive, and negative iterable indicies
index_types: Set[str] = {"Index_NumPos", "Index_NumNeg", "Index_NumZero"}
# Custom references for If statement substitutions
# only If_True and If_False will be applied as mutations
if_types: Set[str] = {"If_True", "If_False", "If_Statement"}
# Custom references for subscript substitutions for slice mutations
slice_bounded_types: Set[str] = {"Slice_UnboundUpper", "Slice_UnboundLower", "Slice_Unbounded"}
return [
MutationOpSet(
name="AugAssign",
desc="Augmented assignment e.g. += -= /= *=",
operations=aug_assigns,
category=CATEGORIES["AugAssign"],
),
MutationOpSet(
name="BinOp",
desc="Binary operations e.g. + - * / %",
operations=binop_types,
category=CATEGORIES["BinOp"],
),
MutationOpSet(
name="BinOp Bit Comparison",
desc="Bitwise comparison operations e.g. x & y, x | y, x ^ y",
operations=binop_bit_cmp_types,
category=CATEGORIES["BinOpBC"],
),
MutationOpSet(
name="BinOp Bit Shifts",
desc="Bitwise shift operations e.g. << >>",
operations=binop_bit_shift_types,
category=CATEGORIES["BinOpBS"],
),
MutationOpSet(
name="BoolOp",
desc="Boolean operations e.g. and or",
operations=boolop_types,
category=CATEGORIES["BoolOp"],
),
MutationOpSet(
name="Compare",
desc="Comparison operations e.g. == >= <= > <",
operations=cmpop_types,
category=CATEGORIES["Compare"],
),
MutationOpSet(
name="Compare In",
desc="Compare membership e.g. in, not in",
operations=cmpop_in_types,
category=CATEGORIES["CompareIn"],
),
MutationOpSet(
name="Compare Is",
desc="Comapre identity e.g. is, is not",
operations=cmpop_is_types,
category=CATEGORIES["CompareIs"],
),
MutationOpSet(
name="If",
desc="If statement tests e.g. original statement, True, False",
operations=if_types,
category=CATEGORIES["If"],
),
MutationOpSet(
name="Index",
desc="Index values for iterables e.g. i[-1], i[0], i[0][1]",
operations=index_types,
category=CATEGORIES["Index"],
),
MutationOpSet(
name="NameConstant",
desc="Named constant mutations e.g. True, False, None",
operations=named_const_singletons,
category=CATEGORIES["NameConstant"],
),
MutationOpSet(
name="Slice Unbounded Swap",
desc=(
"Slice mutations to swap lower/upper values, x[2:] (unbound upper) to x[:2],"
" (unbound lower). Steps are not changed."
),
operations=slice_bounded_types,
category=CATEGORIES["SliceUS"],
),
]
def get_mutations_for_target(target: LocIndex) -> Set[Any]:
"""Given a target, find all the mutations that could apply from the AST definitions.
Args:
target: the location index target
Returns:
Set of types that can mutated into the target location.
"""
search_space: List[Set[Any]] = [m.operations for m in get_compatible_operation_sets()]
mutation_ops: Set[Any] = set()
for potential_ops in search_space:
if target.op_type in potential_ops:
LOGGER.debug("Potential mutatest operations found for target: %s", target.op_type)
mutation_ops = potential_ops.copy()
mutation_ops.remove(target.op_type)
# Special case for If_Statement since that is a default to transform to True or False
# but not a validation mutation target by itself
if "If_Statement" in mutation_ops:
mutation_ops.remove("If_Statement")
break
return mutation_ops
|
[
"ast.Index",
"ast.Num",
"ast.Compare",
"ast.If",
"ast.Subscript",
"ast.Slice",
"ast.USub",
"logging.getLogger"
] |
[((1217, 1244), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1234, 1244), False, 'import logging\n'), ((16067, 16079), 'ast.Num', 'ast.Num', ([], {'n': '(0)'}), '(n=0)\n', (16074, 16079), False, 'import ast\n'), ((16109, 16121), 'ast.Num', 'ast.Num', ([], {'n': '(1)'}), '(n=1)\n', (16116, 16121), False, 'import ast\n'), ((19766, 19823), 'ast.Slice', 'ast.Slice', ([], {'lower': 'slice.upper', 'upper': 'None', 'step': 'slice.step'}), '(lower=slice.upper, upper=None, step=slice.step)\n', (19775, 19823), False, 'import ast\n'), ((19859, 19916), 'ast.Slice', 'ast.Slice', ([], {'lower': 'None', 'upper': 'slice.lower', 'step': 'slice.step'}), '(lower=None, upper=slice.lower, step=slice.step)\n', (19868, 19916), False, 'import ast\n'), ((19949, 19999), 'ast.Slice', 'ast.Slice', ([], {'lower': 'None', 'upper': 'None', 'step': 'slice.step'}), '(lower=None, upper=None, step=slice.step)\n', (19958, 19999), False, 'import ast\n'), ((13219, 13294), 'ast.Compare', 'ast.Compare', ([], {'left': 'node.left', 'ops': 'mutation_ops', 'comparators': 'node.comparators'}), '(left=node.left, ops=mutation_ops, comparators=node.comparators)\n', (13230, 13294), False, 'import ast\n'), ((15356, 15432), 'ast.If', 'ast.If', ([], {'test': 'if_mutations[self.mutation]', 'body': 'node.body', 'orelse': 'node.orelse'}), '(test=if_mutations[self.mutation], body=node.body, orelse=node.orelse)\n', (15362, 15432), False, 'import ast\n'), ((16166, 16176), 'ast.USub', 'ast.USub', ([], {}), '()\n', (16174, 16176), False, 'import ast\n'), ((16186, 16198), 'ast.Num', 'ast.Num', ([], {'n': '(1)'}), '(n=1)\n', (16193, 16198), False, 'import ast\n'), ((17646, 17671), 'ast.Index', 'ast.Index', ([], {'value': 'mutation'}), '(value=mutation)\n', (17655, 17671), False, 'import ast\n'), ((21403, 21464), 'ast.Subscript', 'ast.Subscript', ([], {'value': 'node.value', 'slice': 'mutation', 'ctx': 'node.ctx'}), '(value=node.value, slice=mutation, ctx=node.ctx)\n', (21416, 21464), False, 'import ast\n')]
|