id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1628841
|
import bench
class Foo:
num = 20000000
def test(num):
i = 0
while i < Foo.num:
i += 1
bench.run(test)
|
1628843
|
import os
import sys
from datetime import datetime
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# local
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
add_path(os.path.abspath('..'))
from pycls.al.ActiveLearning import ActiveLearning
import pycls.core.builders as model_builder
from pycls.core.config import cfg, dump_cfg
import pycls.core.losses as losses
import pycls.core.optimizer as optim
from pycls.datasets.data import Data
import pycls.utils.checkpoint as cu
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.net as nu
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from pycls.utils.meters import ValMeter
logger = lu.get_logger(__name__)
plot_episode_xvalues = []
plot_episode_yvalues = []
plot_epoch_xvalues = []
plot_epoch_yvalues = []
plot_it_x_values = []
plot_it_y_values = []
def argparser():
parser = argparse.ArgumentParser(description='Active Learning - Image Classification')
parser.add_argument('--cfg', dest='cfg_file', help='Config file', required=True, type=str)
parser.add_argument('--exp-name', dest='exp_name', help='Experiment Name', required=True, type=str)
return parser
def plot_arrays(x_vals, y_vals, x_name, y_name, dataset_name, out_dir, isDebug=False):
# if not du.is_master_proc():
# return
import matplotlib.pyplot as plt
temp_name = "{}_vs_{}".format(x_name, y_name)
plt.xlabel(x_name)
plt.ylabel(y_name)
plt.title("Dataset: {}; {}".format(dataset_name, temp_name))
plt.plot(x_vals, y_vals)
if isDebug: print("plot_saved at : {}".format(os.path.join(out_dir, temp_name+'.png')))
plt.savefig(os.path.join(out_dir, temp_name+".png"))
plt.close()
def save_plot_values(temp_arrays, temp_names, out_dir, isParallel=True, saveInTextFormat=True, isDebug=True):
""" Saves arrays provided in the list in npy format """
# Return if not master process
# if isParallel:
# if not du.is_master_proc():
# return
for i in range(len(temp_arrays)):
temp_arrays[i] = np.array(temp_arrays[i])
temp_dir = out_dir
# if cfg.TRAIN.TRANSFER_EXP:
# temp_dir += os.path.join("transfer_experiment",cfg.MODEL.TRANSFER_MODEL_TYPE+"_depth_"+str(cfg.MODEL.TRANSFER_MODEL_DEPTH))+"/"
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
if saveInTextFormat:
# if isDebug: print(f"Saving {temp_names[i]} at {temp_dir+temp_names[i]}.txt in text format!!")
np.savetxt(temp_dir+'/'+temp_names[i]+".txt", temp_arrays[i], fmt="%1.2f")
else:
# if isDebug: print(f"Saving {temp_names[i]} at {temp_dir+temp_names[i]}.npy in numpy format!!")
np.save(temp_dir+'/'+temp_names[i]+".npy", temp_arrays[i])
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def main(cfg):
# Setting up GPU args
use_cuda = (cfg.NUM_GPUS > 0) and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': cfg.DATA_LOADER.NUM_WORKERS, 'pin_memory': cfg.DATA_LOADER.PIN_MEMORY} if use_cuda else {}
# Auto assign a RNG_SEED when not supplied a value
if cfg.RNG_SEED is None:
cfg.RNG_SEED = np.random.randint(100)
# Using specific GPU
# os.environ['NVIDIA_VISIBLE_DEVICES'] = str(cfg.GPU_ID)
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# print("Using GPU : {}.\n".format(cfg.GPU_ID))
# Getting the output directory ready (default is "/output")
cfg.OUT_DIR = os.path.join(os.path.abspath('..'), cfg.OUT_DIR)
if not os.path.exists(cfg.OUT_DIR):
os.mkdir(cfg.OUT_DIR)
# Create "DATASET/MODEL TYPE" specific directory
dataset_out_dir = os.path.join(cfg.OUT_DIR, cfg.DATASET.NAME, cfg.MODEL.TYPE)
if not os.path.exists(dataset_out_dir):
os.makedirs(dataset_out_dir)
# Creating the experiment directory inside the dataset specific directory
# all logs, labeled, unlabeled, validation sets are stroed here
# E.g., output/CIFAR10/resnet18/{timestamp or cfg.EXP_NAME based on arguments passed}
if cfg.EXP_NAME == 'auto':
now = datetime.now()
exp_dir = f'{now.year}_{now.month}_{now.day}_{now.hour}{now.minute}{now.second}'
else:
exp_dir = cfg.EXP_NAME
exp_dir = os.path.join(dataset_out_dir, exp_dir)
if not os.path.exists(exp_dir):
os.mkdir(exp_dir)
print("Experiment Directory is {}.\n".format(exp_dir))
else:
print("Experiment Directory Already Exists: {}. Reusing it may lead to loss of old logs in the directory.\n".format(exp_dir))
cfg.EXP_DIR = exp_dir
# Save the config file in EXP_DIR
dump_cfg(cfg)
# Setup Logger
lu.setup_logging(cfg)
# Dataset preparing steps
print("\n======== PREPARING DATA AND MODEL ========\n")
cfg.DATASET.ROOT_DIR = os.path.join(os.path.abspath('..'), cfg.DATASET.ROOT_DIR)
data_obj = Data(cfg)
train_data, train_size = data_obj.getDataset(save_dir=cfg.DATASET.ROOT_DIR, isTrain=True, isDownload=True)
test_data, test_size = data_obj.getDataset(save_dir=cfg.DATASET.ROOT_DIR, isTrain=False, isDownload=True)
print("\nDataset {} Loaded Sucessfully.\nTotal Train Size: {} and Total Test Size: {}\n".format(cfg.DATASET.NAME, train_size, test_size))
logger.info("Dataset {} Loaded Sucessfully. Total Train Size: {} and Total Test Size: {}\n".format(cfg.DATASET.NAME, train_size, test_size))
trainSet_path, valSet_path = data_obj.makeTVSets(val_split_ratio=cfg.DATASET.VAL_RATIO, data=train_data, seed_id=cfg.RNG_SEED, save_dir=cfg.EXP_DIR)
trainSet, valSet = data_obj.loadTVPartitions(trainSetPath=trainSet_path, valSetPath=valSet_path)
print("Data Partitioning Complete. \nTrain Set: {}, Validation Set: {}\n".format(len(trainSet), len(valSet)))
logger.info("\nTrain Set: {}, Validation Set: {}\n".format(len(trainSet), len(valSet)))
# Preparing dataloaders for initial training
trainSet_loader = data_obj.getIndexesDataLoader(indexes=trainSet, batch_size=cfg.TRAIN.BATCH_SIZE, data=train_data)
valSet_loader = data_obj.getIndexesDataLoader(indexes=valSet, batch_size=cfg.TRAIN.BATCH_SIZE, data=train_data)
test_loader = data_obj.getTestLoader(data=test_data, test_batch_size=cfg.TRAIN.BATCH_SIZE, seed_id=cfg.RNG_SEED)
# Initialize the models
num_ensembles = cfg.ENSEMBLE.NUM_MODELS
models = []
for i in range(num_ensembles):
models.append(model_builder.build_model(cfg))
print("{} ensemble models of type: {}\n".format(cfg.ENSEMBLE.NUM_MODELS, cfg.ENSEMBLE.MODEL_TYPE))
logger.info("{} ensemble models of type: {}\n".format(cfg.ENSEMBLE.NUM_MODELS, cfg.ENSEMBLE.MODEL_TYPE))
# This is to seamlessly use the code originally written for AL episodes
cfg.EPISODE_DIR = cfg.EXP_DIR
# Train models
print("======== ENSEMBLE TRAINING ========")
logger.info("======== ENSEMBLE TRAINING ========")
best_model_paths = []
test_accs = []
for i in range(num_ensembles):
print("=== Training ensemble [{}/{}] ===".format(i+1, num_ensembles))
# Construct the optimizer
optimizer = optim.construct_optimizer(cfg, models[i])
print("optimizer: {}\n".format(optimizer))
logger.info("optimizer: {}\n".format(optimizer))
# Each ensemble gets its own output directory
cfg.EPISODE_DIR = os.path.join(cfg.EPISODE_DIR, 'model_{} '.format(i+1))
# Train the model
best_val_acc, best_val_epoch, checkpoint_file = ensemble_train_model(trainSet_loader, valSet_loader, models[i], optimizer, cfg)
best_model_paths.append(checkpoint_file)
print("Best Validation Accuracy by Model {}: {}\nBest Epoch: {}\n".format(i+1, round(best_val_acc, 4), best_val_epoch))
logger.info("Best Validation Accuracy by Model {}: {}\tBest Epoch: {}\n".format(i+1, round(best_val_acc, 4), best_val_epoch))
# Test the model
print("=== Testing ensemble [{}/{}] ===".format(i+1, num_ensembles))
test_acc = ensemble_test_model(test_loader, checkpoint_file, cfg, cur_episode=0)
test_accs.append(test_acc)
print("Test Accuracy by Model {}: {}.\n".format(i+1, round(test_acc, 4)))
logger.info("Test Accuracy by Model {}: {}.\n".format(i+1, test_acc))
# Reset EPISODE_DIR
cfg.EPISODE_DIR = cfg.EXP_DIR
# Test each best model checkpoint and report the average
print("======== ENSEMBLE TESTING ========\n")
logger.info("======== ENSEMBLE TESTING ========\n")
mean_test_acc = np.mean(test_accs)
print("Average Ensemble Test Accuracy: {}.\n".format(round(mean_test_acc, 4)))
logger.info("Average Ensemble Test Accuracy: {}.\n".format(mean_test_acc))
print("================================\n\n")
logger.info("================================\n\n")
def ensemble_train_model(train_loader, val_loader, model, optimizer, cfg):
global plot_epoch_xvalues
global plot_epoch_yvalues
global plot_it_x_values
global plot_it_y_values
start_epoch = 0
loss_fun = losses.get_loss_fun()
# Create meters
train_meter = TrainMeter(len(train_loader))
val_meter = ValMeter(len(val_loader))
# Perform the training loop
# print("Len(train_loader):{}".format(len(train_loader)))
logger.info('Start epoch: {}'.format(start_epoch + 1))
val_set_acc = 0.
temp_best_val_acc = 0.
temp_best_val_epoch = 0
# Best checkpoint model and optimizer states
best_model_state = None
best_opt_state = None
val_acc_epochs_x = []
val_acc_epochs_y = []
clf_train_iterations = cfg.OPTIM.MAX_EPOCH * int(len(train_loader)/cfg.TRAIN.BATCH_SIZE)
clf_change_lr_iter = clf_train_iterations // 25
clf_iter_count = 0
for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH):
# Train for one epoch
train_loss, clf_iter_count = train_epoch(train_loader, model, loss_fun, optimizer, train_meter, \
cur_epoch, cfg, clf_iter_count, clf_change_lr_iter, clf_train_iterations)
# Compute precise BN stats
if cfg.BN.USE_PRECISE_STATS:
nu.compute_precise_bn_stats(model, train_loader)
# Model evaluation
if is_eval_epoch(cur_epoch):
# Original code[PYCLS] passes on testLoader but we want to compute on val Set
val_loader.dataset.no_aug = True
val_set_err = test_epoch(val_loader, model, val_meter, cur_epoch)
val_set_acc = 100. - val_set_err
val_loader.dataset.no_aug = False
if temp_best_val_acc < val_set_acc:
temp_best_val_acc = val_set_acc
temp_best_val_epoch = cur_epoch + 1
# Save best model and optimizer state for checkpointing
model.eval()
best_model_state = model.module.state_dict() if cfg.NUM_GPUS > 1 else model.state_dict()
best_opt_state = optimizer.state_dict()
model.train()
# Since we start from 0 epoch
val_acc_epochs_x.append(cur_epoch+1)
val_acc_epochs_y.append(val_set_acc)
plot_epoch_xvalues.append(cur_epoch+1)
plot_epoch_yvalues.append(train_loss)
save_plot_values([plot_epoch_xvalues, plot_epoch_yvalues, plot_it_x_values, plot_it_y_values, val_acc_epochs_x, val_acc_epochs_y],\
["plot_epoch_xvalues", "plot_epoch_yvalues", "plot_it_x_values", "plot_it_y_values","val_acc_epochs_x","val_acc_epochs_y"], out_dir=cfg.EPISODE_DIR, isDebug=False)
logger.info("Successfully logged numpy arrays!!")
# Plot arrays
plot_arrays(x_vals=plot_epoch_xvalues, y_vals=plot_epoch_yvalues, \
x_name="Epochs", y_name="Loss", dataset_name=cfg.DATASET.NAME, out_dir=cfg.EPISODE_DIR)
plot_arrays(x_vals=val_acc_epochs_x, y_vals=val_acc_epochs_y, \
x_name="Epochs", y_name="Validation Accuracy", dataset_name=cfg.DATASET.NAME, out_dir=cfg.EPISODE_DIR)
save_plot_values([plot_epoch_xvalues, plot_epoch_yvalues, plot_it_x_values, plot_it_y_values, val_acc_epochs_x, val_acc_epochs_y], \
["plot_epoch_xvalues", "plot_epoch_yvalues", "plot_it_x_values", "plot_it_y_values","val_acc_epochs_x","val_acc_epochs_y"], out_dir=cfg.EPISODE_DIR)
print('Training Epoch: {}/{}\tTrain Loss: {}\tVal Accuracy: {}'.format(cur_epoch+1, cfg.OPTIM.MAX_EPOCH, round(train_loss, 4), round(val_set_acc, 4)))
# Save the best model checkpoint (Episode level)
checkpoint_file = cu.save_checkpoint(info="vlBest_acc_"+str(int(temp_best_val_acc)), \
model_state=best_model_state, optimizer_state=best_opt_state, epoch=temp_best_val_epoch, cfg=cfg)
print('\nWrote Best Model Checkpoint to: {}\n'.format(checkpoint_file.split('/')[-1]))
logger.info('Wrote Best Model Checkpoint to: {}\n'.format(checkpoint_file))
plot_arrays(x_vals=plot_epoch_xvalues, y_vals=plot_epoch_yvalues, \
x_name="Epochs", y_name="Loss", dataset_name=cfg.DATASET.NAME, out_dir=cfg.EPISODE_DIR)
plot_arrays(x_vals=plot_it_x_values, y_vals=plot_it_y_values, \
x_name="Iterations", y_name="Loss", dataset_name=cfg.DATASET.NAME, out_dir=cfg.EPISODE_DIR)
plot_arrays(x_vals=val_acc_epochs_x, y_vals=val_acc_epochs_y, \
x_name="Epochs", y_name="Validation Accuracy", dataset_name=cfg.DATASET.NAME, out_dir=cfg.EPISODE_DIR)
plot_epoch_xvalues = []
plot_epoch_yvalues = []
plot_it_x_values = []
plot_it_y_values = []
best_val_acc = temp_best_val_acc
best_val_epoch = temp_best_val_epoch
return best_val_acc, best_val_epoch, checkpoint_file
def ensemble_test_model(test_loader, checkpoint_file, cfg, cur_episode):
test_meter = TestMeter(len(test_loader))
model = model_builder.build_model(cfg)
model = cu.load_checkpoint(checkpoint_file, model)
test_err = test_epoch(test_loader, model, test_meter, cur_episode)
test_acc = 100. - test_err
return test_acc
def train_epoch(train_loader, model, loss_fun, optimizer, train_meter, cur_epoch, cfg, clf_iter_count, clf_change_lr_iter, clf_max_iter):
"""Performs one epoch of training."""
global plot_epoch_xvalues
global plot_epoch_yvalues
global plot_it_x_values
global plot_it_y_values
# Shuffle the data
#loader.shuffle(train_loader, cur_epoch)
if cfg.NUM_GPUS>1: train_loader.sampler.set_epoch(cur_epoch)
# Update the learning rate
# Currently we only support LR schedules for only 'SGD' optimizer
lr = optim.get_epoch_lr(cfg, cur_epoch)
if cfg.OPTIM.TYPE == "sgd":
optim.set_lr(optimizer, lr)
if torch.cuda.is_available():
model.cuda()
# Enable training mode
model.train()
train_meter.iter_tic() #This basically notes the start time in timer class defined in utils/timer.py
len_train_loader = len(train_loader)
for cur_iter, (inputs, labels) in enumerate(train_loader):
#ensuring that inputs are floatTensor as model weights are
inputs = inputs.type(torch.cuda.FloatTensor)
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Perform the forward pass
preds = model(inputs)
# Compute the loss
loss = loss_fun(preds, labels)
# Perform the backward pass
optimizer.zero_grad()
loss.backward()
# Update the parametersSWA
optimizer.step()
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the stats across the GPUs
# if cfg.NUM_GPUS > 1:
# #Average error and losses across GPUs
# #Also this this calls wait method on reductions so we are ensured
# #to obtain synchronized results
# loss, top1_err = du.scaled_all_reduce(
# [loss, top1_err]
# )
# Copy the stats from GPU to CPU (sync point)
loss, top1_err = loss.item(), top1_err.item()
# #Only master process writes the logs which are used for plotting
# if du.is_master_proc():
if cur_iter != 0 and cur_iter%19 == 0:
#because cur_epoch starts with 0
plot_it_x_values.append((cur_epoch)*len_train_loader + cur_iter)
plot_it_y_values.append(loss)
save_plot_values([plot_it_x_values, plot_it_y_values],["plot_it_x_values.npy", "plot_it_y_values.npy"], out_dir=cfg.EPISODE_DIR, isDebug=False)
# print(plot_it_x_values)
# print(plot_it_y_values)
#Plot loss graphs
plot_arrays(x_vals=plot_it_x_values, y_vals=plot_it_y_values, x_name="Iterations", y_name="Loss", dataset_name=cfg.DATASET.NAME, out_dir=cfg.EPISODE_DIR,)
print('Training Epoch: {}/{}\tIter: {}/{}'.format(cur_epoch+1, cfg.OPTIM.MAX_EPOCH, cur_iter, len(train_loader)))
#Compute the difference in time now from start time initialized just before this for loop.
train_meter.iter_toc()
train_meter.update_stats(top1_err=top1_err, loss=loss, \
lr=lr, mb_size=inputs.size(0) * cfg.NUM_GPUS)
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats
train_meter.log_epoch_stats(cur_epoch)
train_meter.reset()
return loss, clf_iter_count
@torch.no_grad()
def test_epoch(test_loader, model, test_meter, cur_epoch):
"""Evaluates the model on the test set."""
global plot_epoch_xvalues
global plot_epoch_yvalues
global plot_it_x_values
global plot_it_y_values
if torch.cuda.is_available():
model.cuda()
# Enable eval mode
model.eval()
test_meter.iter_tic()
misclassifications = 0.
totalSamples = 0.
for cur_iter, (inputs, labels) in enumerate(test_loader):
with torch.no_grad():
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
inputs = inputs.type(torch.cuda.FloatTensor)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
# if cfg.NUM_GPUS > 1:
# top1_err = du.scaled_all_reduce([top1_err])
# #as above returns a list
# top1_err = top1_err[0]
# Copy the errors from GPU to CPU (sync point)
top1_err = top1_err.item()
# Multiply by Number of GPU's as top1_err is scaled by 1/Num_GPUs
misclassifications += top1_err * inputs.size(0) * cfg.NUM_GPUS
totalSamples += inputs.size(0)*cfg.NUM_GPUS
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err=top1_err, mb_size=inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
test_meter.log_epoch_stats(cur_epoch)
test_meter.reset()
return misclassifications/totalSamples
if __name__ == "__main__":
cfg.merge_from_file(argparser().parse_args().cfg_file)
main(cfg)
|
1628857
|
def extractHokageTrans(item):
"""
# Hokage Translations
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if any([('Aim the Deepest Part of the Different World Labyrinth'.lower() in tag.lower()) for tag in item['tags']]):
if re.match('\\d+\\.', item['title']):
postfix = item['title'].split('.', 1)[-1]
return buildReleaseMessageWithType(item, 'Aim the Deepest Part of the Different World Labyrinth', vol, chp, frag=frag, postfix=postfix)
if any([('Divine Protection of Many Gods'.lower() in tag.lower()) for tag in item['tags'] + [item['title']]]):
return buildReleaseMessageWithType(item, 'Divine Protection of Many Gods', vol, chp, frag=frag, postfix=postfix)
if any([('Because Janitor-san is Not a Hero'.lower() in tag.lower()) for tag in item['tags'] + [item['title']]]):
return buildReleaseMessageWithType(item, 'Because Janitor-san is Not a Hero', vol, chp, frag=frag, postfix=postfix)
return False
|
1628918
|
from .class_weighted_bce_loss import WeightedBCEWithLogitsLoss
from .cnn_rnn import EfficientNet, EfficientNet3D, ResNet, ResNet3D
|
1628919
|
import os
import torch
from collections import OrderedDict
import re
import math
import argparse
def network_blending(low, high, res, ratio=1, name=None):
net_A = torch.load(low)
net_B = torch.load(high)
net_interp = OrderedDict()
A_names = list(net_A['g_ema'].keys())
B_names = list(net_B['g_ema'].keys())
assert all((x == y for x, y in zip(A_names, B_names)))
origin_idx = [[value, i] for i, value in enumerate(A_names)]
match_names = [x for x in origin_idx if not (x[0].startswith('style') or x[0].startswith('to_rgb') or x[0].startswith('noises'))]
mid_point_idx = [i for i, value in enumerate([x[-1] for x in match_names]) if value == res][-1] + 1
for pos, value in enumerate(match_names):
x = pos - mid_point_idx
alpha = ratio if x <= 0 else 1-ratio
for p, (k, v_A) in enumerate(net_A['g_ema'].items()):
v_B = net_B['g_ema'][k]
net_interp[k] = alpha * v_A + (1 - alpha) * v_B
ckpt = {"g_ema": net_interp, "latent_avg": net_A['latent_avg']}
if name is not None:
torch.save(ckpt, name + ".pt")
else:
name = low.split('/')[-1]
torch.save(ckpt, name + ".pt")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--file1",
required=True,
default='params/ffhq.pt',
type=str,
help="Low pt file"
)
parser.add_argument(
"--file2",
required=True,
default='params/animation.pt',
type=str,
help="High pt file"
)
parser.add_argument(
"--ratio",
default=1,
type=int,
help="How much adjust the ratio"
)
parser.add_argument(
"--res",
default=8,
type=int,
help="Standard layer you want"
)
parser.add_argument(
"--name",
default='result',
type=str,
help="output.pt name"
)
args = parser.parse_args()
network_blending(args.file1, args.file2, args.res, args.ratio, args.name)
|
1628925
|
from entity import User
from .dao import BaseDao
class UserDao(BaseDao):
T = User
def add_user(self, username: str, password: str):
"""
Add a user to the db
:param username:
:param password:
"""
session = self.Session()
user = User(name=username.lower(), password=password)
session.add(user)
session.commit()
def get_by_username(self, username: str):
"""
Find user by username
:param username: username
:return:
"""
session = self.Session()
username = username.lower()
return session.query(User).filter(User.name == username).first()
|
1628933
|
import math
from scipy import interpolate
class NoneInterpolator:
def __call__(self, level):
return None
class ProfileLevels:
def __init__(self, rotorGeometry, windSpeedLevels, windDirectionLevels=None, upflowLevels=None):
self.windSpeedLevels = windSpeedLevels
self.windDirectionLevels = windDirectionLevels
self.upflowLevels = upflowLevels
self.rotorGeometry = rotorGeometry
def getWindSpeedProfile(self, row):
return self.create_interpolator(row, self.windSpeedLevels)
def getDirectionProfile(self, row):
return self.create_interpolator(row, self.windDirectionLevels)
def getUpflowProfile(self, row):
return self.create_interpolator(row, self.upflowLevels)
def create_interpolator(self, row, levels_dict):
if levels_dict is None:
return NoneInterpolator()
values = []
for level in levels_dict:
column = levels_dict[level]
if not column is None:
value = row[column]
values.append((level, value))
if len(values) >= 3:
xy = zip(*sorted(values))
return interpolate.interp1d(xy[0], xy[1], kind='linear')
else:
return NoneInterpolator()
def findLowestAbove(self, level):
lowest = None
for height in self.windSpeedLevels:
if self.rotorGeometry.within_rotor(height) and height > level:
if lowest == None or height < lowest:
lowest = height
return lowest
def findHighestBelow(self, level):
highest = None
for height in self.windSpeedLevels:
if self.rotorGeometry.within_rotor(height) and height < level:
if highest == None or height > highest:
highest = height
return highest
class RotorBase:
def __init__(self, rotorGeometry):
self.rotorGeometry = rotorGeometry
self.levels = []
def __str__(self):
value = "Level\tBottom\tTop\tArea\tArea Fraction\n"
for level in self.levels:
value += "%0.2f\t%0.2f\t%0.2f\t%0.2f\t%0.2f%%\n" % (level.level, level.bottom, level.top, level.area, (level.areaFraction * 100.0))
return value
class EvenlySpacedRotor(RotorBase):
def __init__(self, rotorGeometry, numberOfRotorLevels):
RotorBase.__init__(self, rotorGeometry)
if (numberOfRotorLevels % 2) != 1:
raise Exception("Number of levels must be odd")
step = self.rotorGeometry.diameter / numberOfRotorLevels
level = self.rotorGeometry.hub_height - self.rotorGeometry.radius + step / 2
for i in range(numberOfRotorLevels):
self.levels.append(RotorLevel(self.rotorGeometry, level, level - step / 2, level + step / 2))
level += step
class ProfileLevelsRotor(RotorBase):
def __init__(self, rotorGeometry, profileLevels):
RotorBase.__init__(self, rotorGeometry)
for height in profileLevels.windSpeedLevels:
if self.rotorGeometry.within_rotor(height):
lowestAbove = profileLevels.findLowestAbove(height)
highestBelow = profileLevels.findHighestBelow(height)
if highestBelow is None:
bottom = self.rotorGeometry.lower_tip
else:
bottom = (height + highestBelow) / 2
if lowestAbove is None:
top = self.rotorGeometry.upper_tip
else:
top = (height + lowestAbove) / 2
self.levels.append(RotorLevel(self.rotorGeometry, height, bottom, top))
class RotorLevel:
def __init__(self, rotorGeometry, level, bottom, top):
self.level = level
self.bottom = bottom
self.top = top
self.middle = (self.top + self.bottom) / 2.0
self.area = self.calculateLevelArea(rotorGeometry.hub_height, rotorGeometry.radius, self.middle, top - bottom)
self.areaFraction = self.area / rotorGeometry.area
def calculateLevelArea(self, hubHeight, radius, level, step):
a1 = self.calculateArea(hubHeight, radius, level, step)
if level < hubHeight:
a2 = self.calculateArea(hubHeight, radius, level - step, step)
else:
a2 = self.calculateArea(hubHeight, radius, level + step, step)
return a1 - a2
def calculateArea(self, hubHeight, radius, level, step):
bottom = level - step / 2
top = level + step / 2
rotorBottom = hubHeight - radius
rotorTop = hubHeight + radius
if level > rotorTop or level < rotorBottom:
return 0.0
else:
if level < hubHeight:
adjacement = hubHeight - top
else:
adjacement = bottom - hubHeight
cosHalfAngle = adjacement / radius
angle = math.acos(cosHalfAngle) * 2.0
return 0.5 * (angle - math.sin(angle)) * radius ** 2
class HubParameterBase:
def __init__(self, profileLevels, rotorGeometry):
self.rotorGeometry = rotorGeometry
self.profileLevels = profileLevels
class InterpolatedHubDirection:
def __init__(self, profileLevels, rotorGeometry):
HubParameterBase.__init__(self, profileLevels, rotorGeometry)
def hubDirection(self, row):
raise Exception("Not implemented")
class InterpolatedHubWindSpeed(HubParameterBase):
def __init__(self, profileLevels, rotorGeometry):
HubParameterBase.__init__(self, profileLevels, rotorGeometry)
def hubWindSpeed(self, row):
return self.profileLevels.getWindSpeedProfile(row)(self.rotorGeometry.hub_height)
class PiecewiseHubBase(HubParameterBase):
def __init__(self, profileLevels, rotorGeometry):
HubParameterBase.__init__(self, profileLevels, rotorGeometry)
self.highestBelow = profileLevels.findHighestBelow(self.rotorGeometry.hub_height)
self.lowestAbove = profileLevels.findLowestAbove(self.rotorGeometry.hub_height)
class PiecewiseExponentHubWindSpeed(PiecewiseHubBase):
def __init__(self, profileLevels, rotorGeometry):
PiecewiseHubBase.__init__(self, profileLevels, rotorGeometry)
def hubWindSpeed(self, row):
profile = self.profileLevels.getWindSpeedProfile(row)
speedAbove = profile(self.lowestAbove)
speedBelow = profile(self.highestBelow)
exponent = math.log(speedAbove / speedBelow) / math.log(self.lowestAbove / self.highestBelow)
return speedBelow * (self.rotorGeometry.hub_height / self.highestBelow) ** exponent
class PiecewiseInterpolationHubDirection(PiecewiseHubBase):
def __init__(self, profileLevels, rotorGeometry):
PiecewiseHubBase.__init__(self, profileLevels, rotorGeometry)
self.direction_above_col = self.profileLevels.windDirectionLevels[self.lowestAbove]
self.direction_below_col = self.profileLevels.windDirectionLevels[self.highestBelow]
self.x = [self.highestBelow, self.lowestAbove]
def bound_direction(self, direction):
while direction < 0:
direction += 360.0
while direction > 360.0:
direction -= 360.0
return direction
def hubDirection(self, row):
profile = self.profileLevels.getWindSpeedProfile(row)
below_direction = row[self.direction_below_col]
above_direction = row[self.direction_above_col]
if below_direction is None or above_direction is None:
return None
else:
below_direction = self.bound_direction(below_direction)
above_direction = self.bound_direction(above_direction)
if abs(below_direction - above_direction) > 180.0:
if below_direction > above_direction:
below_direction -= 360.0
else:
above_direction -= 360.0
y = [below_direction, above_direction]
inter = interpolate.interp1d(self.x, y, kind='linear')
return inter(self.rotorGeometry.hub_height)
class RotorEquivalentWindSpeed:
def __init__(self, profileLevels, rotor, hubWindSpeedCalculator, rewsVeer, rewsUpflow, exponent):
self.profileLevels = profileLevels
self.rotor = rotor
self.hubWindSpeedCalculator = hubWindSpeedCalculator
self.rewsVeer = rewsVeer
self.rewsUpflow = rewsUpflow
self.exponent = exponent
if not profileLevels.windDirectionLevels is None:
self.hubDirectionCalculator = PiecewiseInterpolationHubDirection(profileLevels, self.rotor.rotorGeometry)
else:
self.hubDirectionCalculator = None
if self.rotor.rotorGeometry.tilt is not None:
self.tilt_rad = self.to_radians(self.rotor.rotorGeometry.tilt)
else:
self.tilt_rad = None
def rews(self, row):
speed_profile = self.profileLevels.getWindSpeedProfile(row)
direction_profile = self.profileLevels.getDirectionProfile(row)
upflow_profile = self.profileLevels.getUpflowProfile(row)
equivalentWindSpeed = 0
if not self.hubDirectionCalculator is None:
hub_direction = self.hubDirectionCalculator.hubDirection(row)
else:
hub_direction = None
for level in self.rotor.levels:
speed = speed_profile(level.level)
if speed is None:
#TODO consider enforcing minimum level count
#(instead of forcing pre-filters to remove data with any level missing)
#this would be good fo rnacelle LiDARs (where there is always some data missing)
raise Exception("Speed cannot be None")
level_value = self.level_value(speed, level, hub_direction, direction_profile, upflow_profile)
equivalentWindSpeed += level_value ** self.exponent * level.areaFraction
equivalentWindSpeed = equivalentWindSpeed ** (1.0 / self.exponent)
return equivalentWindSpeed
def level_value(self, speed, level, hub_direction, direction_profile, upflow_profile):
return speed \
* self.direction_term(level, hub_direction, direction_profile) \
* self.upflow_term(level, speed, upflow_profile)
def rewsToHubRatio(self, row):
hub_speed = self.hubWindSpeedCalculator.hubWindSpeed(row)
return self.rews(row) / hub_speed
def direction_term(self, level, hub_direction, direction_profile):
if not self.rewsVeer:
return 1.0
direction = direction_profile(level.level)
if direction is None or hub_direction is None:
return 1.0
else:
direction_rad = self.to_radians(direction)
hub_direction_rad = self.to_radians(hub_direction)
return math.cos(direction_rad - hub_direction_rad)
def upflow_term(self, level, speed, upflow_profile):
if not self.rewsUpflow:
return 1.0
upflow = upflow_profile(level.level)
if upflow is None or self.tilt_rad is None:
return 1.0
else:
upflow_rad = math.atan2(upflow, speed)
return math.cos(upflow_rad + self.tilt_rad) / (math.cos(upflow_rad) * math.cos(self.tilt_rad))
def to_radians(self, direction):
return direction * math.pi / 180.0
class ProductionByHeight(RotorEquivalentWindSpeed):
def __init__(self, profileLevels, rotor, hubWindSpeedCalculator, power_curve):
RotorEquivalentWindSpeed.__init__(self, profileLevels, rotor, hubWindSpeedCalculator, False, False, 1.0)
self.power_curve = power_curve
def level_value(self, speed, level, hub_direction, direction_profile, upflow_profile):
return self.power_curve.power(speed)
def calculate(self, row):
hub_speed = self.hubWindSpeedCalculator.hubWindSpeed(row)
hub_power = self.power_curve.power(hub_speed)
return self.rews(row) - hub_power
|
1628984
|
import gdsfactory as gf
@gf.cell
def straight_with_padding(padding: float = 3.0) -> gf.Component:
"""
Adding padding to a cached component should raise MutabilityError
Args:
default: default padding on all sides
"""
c = gf.c.straight()
c.add_padding(default=padding) # add padding to original cell
return c
@gf.cell
def straight_with_padding_container(padding: float = 3.0) -> gf.Component:
"""Solution1: create new component (container)"""
c = gf.Component()
component = gf.c.straight()
c << component
c.add_padding(default=padding)
c.copy_child_info(component)
c.add_ports(component.ports)
return c
@gf.cell
def straight_with_padding_copy(padding: float = 3.0) -> gf.Component:
"""Solution2: create component copy
Args:
default: default padding on all sides
"""
c = gf.c.straight()
c = c.copy()
c.add_padding(default=padding) # add padding to original cell
return c
if __name__ == "__main__":
# c1 = straight_with_padding(padding=1)
# c2 = straight_with_padding(padding=3)
# c1 = straight_with_padding_container(default=1)
# c2 = straight_with_padding_container(default=3)
c1 = straight_with_padding_copy(padding=1)
c2 = straight_with_padding_copy(padding=3)
print(c1.name)
print(c2.name)
assert c1.name != c2.name, f"{c1.name} and {c2.name} must be different"
|
1628993
|
import sys
import torch
import numpy as np
from math import pi
from torch.distributions import Normal, Categorical
from .geoml.curve import CubicSpline
import math
class DistSqKL(torch.autograd.Function):
@staticmethod
def forward(ctx, net, p0, p1):
device = "cuda" if torch.cuda.is_available() else "cpu"
b_sz = p0.shape[0]
with torch.no_grad():
with torch.enable_grad():
crv, energy = connecting_geodesic(net, p0, p1, max_iter=6, n_nodes=5, eval_grid=16, l_rate=1e-3)
lm0 = crv.deriv(torch.zeros(1, device=device)).view(b_sz, -1)
lm1 = crv.deriv(torch.ones(1, device=device)).view(b_sz, -1)
ctx.save_for_backward(lm0, lm1)
net.p_sigma.zero_grad()
net.dummy_pmu.zero_grad()
return energy
@staticmethod
def backward(ctx, grad_output):
if grad_output.dim() == 1:
grad_output.unsqueeze_(1)
lm0, lm1 = ctx.saved_tensors
return None, 2 * grad_output * lm0, 2 * grad_output * lm1
def curve_energy(c, model, eval_pts):
"""Computes curve energy (in ambient/embedding space) with
Riemann sums.
params:
c: geoml.curve.CubicSpline object - the curve in latent space
model: nn.Module object - the VAE containing the decoder mu/sigma
functions
eval_pts: int - the number of (ordered) discrete points representing
the curve
"""
c = c.view(-1, model.latent_dim)
mu = model.dummy_pmu(c, False)
mu = mu.view(-1, eval_pts, model.in_dim)
delta_mu = (mu[:, 1:, :] - mu[:, :-1, :])
sigma = model.p_sigma(c, False)
sigma = sigma.view(-1, eval_pts, model.in_dim)
delta_sig = (sigma[:, :-1, :] - sigma[:, 1:, :])
d_mu = delta_mu.pow(2).sum(1)
d_sig = delta_sig.pow(2).sum(1)
return 0.5 * torch.sum(d_mu + d_sig, dim=-1)
def connecting_geodesic(net, p0, p1, optim=torch.optim.SGD, max_iter=25, n_nodes=16, eval_grid=5, l_rate=1e-3):
"""Computes the logmap of the geodesic with endpoints
p0, p1 \in M by minimizing the curve energy.
"""
device = "cuda" if torch.cuda.is_available() else "cpu"
# The line below is written assuming p1 is the mean
curve = CubicSpline(p0, p1, num_nodes=n_nodes, device=device)
# the following code is lifted from
# geoml.geodesics.geodesic_minimizing_energy()
alpha = torch.linspace(0, 1, eval_grid, device=device).reshape((-1, 1))
if optim == torch.optim.SGD:
opt = optim([curve.parameters], momentum=0.99, lr=l_rate, nesterov=True)
else:
opt = optim([curve.parameters], lr=l_rate)
if net._mean_warmup:
curve_energies = curve_energy(curve(alpha), net, eval_grid)
else:
for _ in range(max_iter):
opt.zero_grad()
curve_energies = curve_energy(curve(alpha), net, eval_grid)
loss = curve_energies.sum()
loss.backward()
opt.step()
if torch.max(torch.abs(curve.parameters.grad)) < 1e-4:
break
return curve, curve_energies.detach_()
def log_bm_krn(x, y, t, model):
"""Log pdf of a Brownian motion (BM) transition kernel.
params:
x: torch.tensor object - a point on the manifold
y: torch.tensor object - a point on the manifold,
typically interpreted as a "mean".
t: float - the time for which the BM occur
model: nn.Module object - the model containing the embedding
mapping to the ambient space
"""
d = x.size(1)
t = t.squeeze()
_, logdet_x = model.metric(x).slogdet()
_, logdet_y = model.metric(y).slogdet()
log_H = (logdet_x - logdet_y)/2
l_sq = DistSqKL.apply(model, x, y)
return -d/2 * torch.log(2 * pi * t) + log_H - l_sq/(2 * t)
def brownian_motion_sample(num_steps, num_samples, dim, t, init_point, model):
"""Returns the points of a discretized Brownian motion (BM)
on a manifold (a.k.a. latent space).
params:
num_steps: int - the number of time steps for which
the BM will run
num_samples: int - the number of samples that will be
returned
dim: int - the dimensionality of the manifold/
latent space
t: float - the time for which the BM will run
init_point: torch.Tensor - the initial point of the BM
model: torch.nn.Module - the model containing the
embedding
"""
if init_point is None:
init_point = torch.zeros(num_samples, dim)
samples = [init_point.squeeze()]
if num_samples > 1:
samples[0] = samples[0].expand(num_samples, dim)
for _ in range(num_steps - 1):
g = model.metric(samples[-1])
cov_mat = t/num_steps * g.squeeze().inverse()
mvn = torch.distributions.multivariate_normal.MultivariateNormal(samples[-1], covariance_matrix=cov_mat)
samples.append(mvn.sample().squeeze())
return torch.cat(samples).view(num_steps, num_samples, dim).squeeze()
def log_gauss_mix(x, mu, var):
# number of components
K = mu.shape[0]
x_xp = x.unsqueeze(1)
mu_xp = mu.unsqueeze(0)
var_xp = var.unsqueeze(0)
a = log_Normal_diag(x_xp, mu_xp, torch.log(var_xp + 1e-5), dim=2) - math.log(K)
a_max, _ = torch.max(a, 1) # MB x 1
# calculate log-sum-exp
log_mix = a_max + torch.log(torch.sum(torch.exp(a - a_max.unsqueeze(1)), 1))
return log_mix
def log_Normal_diag(x, mean, log_var, average=False, dim=None):
log_normal = -0.5 * ( log_var + torch.pow( x - mean, 2 ) / torch.exp( log_var ) )
if average:
return torch.mean( log_normal, dim )
else:
return torch.sum( log_normal, dim )
def linear_interpolation(p0, p1, n_points):
dim = p0.shape[-1]
c_pts = torch.zeros([n_points, dim])
c_pts[0] = p0
c_pts[-1] = p1
for i in range(1, (n_points + 1) - 2):
c_pts[i] = c_pts[i - 1] + 1/n_points * (p1 - p0)
return c_pts
|
1629055
|
import numpy as np
import pandas as pd
df = pd.DataFrame(data=[[1, 2, 3], [4, 5, 6]], columns=['a', 'b', 'c'])
print(df)
# a b c
# 0 1 2 3
# 1 4 5 6
a_df = df.values
print(a_df)
# [[1 2 3]
# [4 5 6]]
print(type(a_df))
# <class 'numpy.ndarray'>
print(a_df.dtype)
# int64
s = df['a']
print(s)
# 0 1
# 1 4
# Name: a, dtype: int64
a_s = s.values
print(a_s)
# [1 4]
print(type(a_s))
# <class 'numpy.ndarray'>
print(a_s.dtype)
# int64
df_f = pd.DataFrame([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
print(df_f)
# 0 1 2
# 0 0.1 0.2 0.3
# 1 0.4 0.5 0.6
a_df_f = df_f.values
print(a_df_f)
# [[0.1 0.2 0.3]
# [0.4 0.5 0.6]]
print(type(a_df_f))
# <class 'numpy.ndarray'>
print(a_df_f.dtype)
# float64
df_multi = pd.read_csv('data/src/sample_pandas_normal.csv')
print(df_multi)
# name age state point
# 0 Alice 24 NY 64
# 1 Bob 42 CA 92
# 2 Charlie 18 CA 70
# 3 Dave 68 TX 70
# 4 Ellen 24 CA 88
# 5 Frank 30 NY 57
a_df_multi = df_multi.values
print(a_df_multi)
# [['Alice' 24 'NY' 64]
# ['Bob' 42 'CA' 92]
# ['Charlie' 18 'CA' 70]
# ['Dave' 68 'TX' 70]
# ['Ellen' 24 'CA' 88]
# ['Frank' 30 'NY' 57]]
print(type(a_df_multi))
# <class 'numpy.ndarray'>
print(a_df_multi.dtype)
# object
a_df_int = df_multi[['age', 'point']].values
print(a_df_int)
# [[24 64]
# [42 92]
# [18 70]
# [68 70]
# [24 88]
# [30 57]]
print(type(a_df_int))
# <class 'numpy.ndarray'>
print(a_df_int.dtype)
# int64
print(a_df_int.T)
# [[24 42 18 68 24 30]
# [64 92 70 70 88 57]]
a_df_int = df_multi.select_dtypes(include=int).values
print(a_df_int)
# [[24 64]
# [42 92]
# [18 70]
# [68 70]
# [24 88]
# [30 57]]
print(type(a_df_int))
# <class 'numpy.ndarray'>
print(a_df_int.dtype)
# int64
|
1629078
|
from pytensor import *
class RNNClassifier(Graph):
def __init__(self, vocab_size, input_size, hidden_size):
"""
RNN classification
:param vocab_size:
:param input_size:
:param hidden_size:
"""
super().__init__('RNNClassifier')
# embedding size
self.vocab_size = vocab_size
self.word_dim = input_size
# network size
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = vocab_size
# num steps
self.max_num_steps = 100
self.num_steps = 0
# word embedding
embed_argument = {'vocab_size': self.vocab_size, 'embed_size': self.input_size}
self.word_embedding = self.get_operation('Embedding', embed_argument)
# rnn
rnn_argument = {'input_size': self.input_size, 'hidden_size': self.hidden_size, 'max_num_steps': self.max_num_steps}
self.rnn = self.get_operation('RNN', rnn_argument)
# affines
self.affine = self.get_operation('Affine', {'input_size': self.hidden_size, 'hidden_size': self.output_size}, "Affine")
# softmax
self.softmaxLoss = self.get_operation('SoftmaxLoss')
def forward(self, word_lst):
# get num steps
self.num_steps = min(len(word_lst), self.max_num_steps)
# create embeddings
self.embedding_tensors = []
for word_id in word_lst:
self.embedding_tensors.append(self.word_embedding.forward([LongTensor([word_id])]))
# run RNN
self.rnn_tensors = self.rnn.forward(self.embedding_tensors)
# affine
self.output_tensor = self.affine.forward(self.rnn_tensors[-1])
self.softmax_tensor = self.softmaxLoss.forward(self.output_tensor)
return self.softmax_tensor
def loss(self, target_id):
ce_loss = self.softmaxLoss.loss(LongTensor([target_id]))
return ce_loss
class RNNLM(Graph):
def __init__(self, vocab_size, input_size, hidden_size):
super().__init__('RNN')
# embedding size
self.vocab_size = vocab_size
self.word_dim = input_size
# network size
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = vocab_size
# num steps
self.max_num_steps = 100
self.num_steps = 0
# word embedding
embed_argument = {'vocab_size': self.vocab_size, 'embed_size': self.input_size}
self.word_embedding = self.get_operation('Embedding', embed_argument)
# rnn
rnn_argument = {'input_size': self.input_size, 'hidden_size': self.hidden_size, 'max_num_steps': self.max_num_steps}
self.rnn = self.get_operation('RNN', rnn_argument)
# affines
affine_argument = {'input_size': self.hidden_size, 'hidden_size': self.output_size}
self.affines = [self.get_operation('Affine', affine_argument, "Affine") for i in range(self.max_num_steps)]
# softmax
self.softmaxLosses = [self.get_operation('SoftmaxLoss') for i in range(self.max_num_steps)]
def forward(self, word_lst):
# get num steps
self.num_steps = min(len(word_lst), self.max_num_steps)
# create embeddings
embedding_tensors = []
for word_id in word_lst:
embedding_tensors.append(self.word_embedding.forward([LongTensor([word_id])]))
# run RNN
rnn_tensors = self.rnn.forward(embedding_tensors)
# softmax tensors
softmax_tensors = []
for i in range(self.num_steps):
output_tensor = self.affines[i].forward(rnn_tensors[i])
softmax_tensor = self.softmaxLosses[i].forward(output_tensor)
softmax_tensors.append(softmax_tensor)
return softmax_tensors
def loss(self, target_ids):
ce_loss = 0.0
for i in range(self.num_steps):
cur_ce_loss = self.softmaxLosses[i].loss(LongTensor([target_ids[i]]))
ce_loss += cur_ce_loss
return ce_loss
|
1629121
|
from __future__ import unicode_literals
from django.db import migrations, connection
from bluebottle.utils.utils import update_group_permissions
from bluebottle.clients import properties
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
def add_group_permissions(apps, schema_editor):
tenant = Client.objects.get(schema_name=connection.tenant.schema_name)
with LocalTenant(tenant):
group_perms = {
'Staff': {
'perms': (
'add_basestatistic', 'change_basestatistic', 'delete_basestatistic',
'add_databasestatistic', 'change_databasestatistic', 'delete_databasestatistic',
'add_manualstatistic', 'change_manualstatistic', 'delete_manualstatistic',
'add_impactstatistic', 'change_impactstatistic', 'delete_impactstatistic',
)
},
}
update_group_permissions('statistics', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('statistics', '0011_auto_20200722_0810'),
]
operations = [
migrations.RunPython(
add_group_permissions,
migrations.RunPython.noop
)
]
|
1629128
|
import os
import subprocess
can_dir = os.path.dirname(os.path.abspath(__file__))
libdbc_fn = os.path.join(can_dir, "libdbc.so")
subprocess.check_call(["make"], cwd=can_dir)
from selfdrive.can.parser_pyx import CANParser # pylint: disable=no-name-in-module, import-error
assert CANParser
|
1629133
|
import numpy as np
import pymc3 as pm
import pandas as pd
import theano
import arviz as az
from arviz_json import get_dag, arviz_to_json
#Hierarchical Linear Regression Model
#Reference1: https://docs.pymc.io/notebooks/multilevel_modeling.html
#Reference2: https://docs.pymc.io/notebooks/GLM-hierarchical.html#The-data-set
#data
data_r = pd.read_csv(pm.get_data('radon.csv'))
data_r['log_radon'] = data_r['log_radon'].astype(theano.config.floatX)
county_names = data_r.county.unique()
county_idx = data_r.county_code.values
n_counties = len(data_r.county.unique())
#model-inference
fileName='radon_basement_PyMC3'
samples=3000
tune=10000
chains=2
coords = {"county":county_names, "county_idx_household":data_r["county"].tolist()}
with pm.Model(coords=coords) as model:
# Hyperpriors for group nodes
mu_a = pm.Normal('mu_a', mu=0., sigma=100)
sigma_a = pm.HalfNormal('sigma_a', 5.)
mu_b = pm.Normal('mu_b', mu=0., sigma=100)
sigma_b = pm.HalfNormal('sigma_b', 5.)
# Intercept for each county, distributed around group mean mu_a
# Above we just set mu and sd to a fixed value while here we
# plug in a common group distribution for all a and b (which are
# vectors of length n_counties).
a = pm.Normal('a', mu=mu_a, sigma=sigma_a, dims='county')
# Intercept for each county, distributed around group mean mu_a
b = pm.Normal('b', mu=mu_b, sigma=sigma_b, dims='county')
# Model error
eps = pm.HalfCauchy('eps', 5.)
radon_est = a[county_idx] + b[county_idx]*data_r.floor.values
# Data likelihood
radon = pm.Normal('radon', mu=radon_est,
sigma=eps, observed=data_r.log_radon, dims='county_idx_household')
#Inference
trace = pm.sample(samples, chains=chains, tune=tune, target_accept=1.0)
prior = pm.sample_prior_predictive(samples=samples)
posterior_predictive = pm.sample_posterior_predictive(trace, samples=samples)
## STEP 1
# will also capture all the sampler statistics
data = az.from_pymc3(trace=trace, prior=prior, posterior_predictive=posterior_predictive)
## STEP 2
#dag
dag = get_dag(model)
# insert dag into sampler stat attributes
data.sample_stats.attrs["graph"] = str(dag)
## STEP 3
# save data
arviz_to_json(data, fileName+'.npz')
|
1629171
|
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
import tensorflow_datasets as tfds
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
(ds_train, ds_test), ds_info = tfds.load(
"mnist",
split=["train", "test"],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
"""Normalizes images"""
return tf.cast(image, tf.float32) / 255.0, label
AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 128
# Setup for train dataset
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
ds_train = ds_train.batch(BATCH_SIZE)
ds_train = ds_train.prefetch(AUTOTUNE)
# Setup for test Dataset
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_test = ds_train.batch(128)
ds_test = ds_train.prefetch(AUTOTUNE)
model = keras.Sequential(
[
keras.Input((28, 28, 1)),
layers.Conv2D(32, 3, activation="relu"),
layers.Flatten(),
layers.Dense(10, activation="softmax"),
]
)
num_epochs = 5
optimizer = keras.optimizers.Adam()
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
acc_metric = keras.metrics.SparseCategoricalAccuracy()
# Training Loop
for epoch in range(num_epochs):
print(f"\nStart of Training Epoch {epoch}")
for batch_idx, (x_batch, y_batch) in enumerate(ds_train):
with tf.GradientTape() as tape:
y_pred = model(x_batch, training=True)
loss = loss_fn(y_batch, y_pred)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
acc_metric.update_state(y_batch, y_pred)
train_acc = acc_metric.result()
print(f"Accuracy over epoch {train_acc}")
acc_metric.reset_states()
# Test Loop
for batch_idx, (x_batch, y_batch) in enumerate(ds_test):
y_pred = model(x_batch, training=True)
acc_metric.update_state(y_batch, y_pred)
train_acc = acc_metric.result()
print(f"Accuracy over Test Set: {train_acc}")
acc_metric.reset_states()
|
1629200
|
from kdbinsert import KdbInsert
from optparse import OptionParser
import sys
from aselite import read_any
from remote_db import RemoteDB
from server_config import *
class RemoteInsert(KdbInsert):
#email and password are set prior to running insert if email/pw combo is present
def __init__(self):
self.email = None
self.password = None
# overloads KdbInsert.insert_into_db
def insert_into_db(self, **args):
#create database instance
db = RemoteDB()
# test if process is already in database
name = db.get_name(args['s'].get_chemical_symbols())
saddle_list = db.get_saddles(name)
for db_saddle in saddle_list:
if len(args['s']) != len(db_saddle[0]):
continue
if self.getMappings(args['s'], db_saddle[0], args['nf'], args['dc']) is not None:
#print "SQL duplicate of", name, "with id:", db_saddle[1]
return "SQL duplicate of " + name + " with id: " + str(db_saddle[1])
# add process to db
try:
db.add_process(args['or'], args['os'], args['op'], args['om'],
args['r'], args['s'], args['p'], args['m'], args['ma'], self.email, self.password)
except TypeError:
print "Account info in user_config.py is not valid. Try running remote_config.py to set up account"
return
# Indicate that the process was inserted successfully.
#print "good"
return "good"
if __name__ == "__main__":
insert_sub_class = RemoteInsert()
# Parse command line options.
parser = OptionParser(usage = "%prog [options] reactant saddle product mode")
parser.add_option("-o", "--mode", dest = "mode",
help = "optional mode file",
default = None)
options, args = parser.parse_args()
# Make sure we get the reactant, saddle, product, and mode files.
if len(args) < 3:
parser.print_help()
sys.exit()
# Load the reactant, saddle, product, and mode files.
reactant = read_any(args[0])
saddle = read_any(args[1])
product = read_any(args[2])
mode = None
if options.mode is not None:
mode = insert_sub_class.load_mode(options.mode)
# load previous params
db = RemoteDB()
params = db.get_params()
nf = params['nf']
dc = params['dc']
mac = params['mac']
insert_sub_class.insert(reactant, saddle, product, mode, nf=nf, dc=dc, mac=mac)
|
1629218
|
import unittest
from programy.utils.language.default import DefaultLangauge
#############################################################################
#
class DefaultTests(unittest.TestCase):
def test_split_into_sentences(self):
sentences = DefaultLangauge.split_into_sentences("")
self.assertEqual([], sentences)
sentences = DefaultLangauge.split_into_sentences("Hello")
self.assertEqual(["Hello"], sentences)
sentences = DefaultLangauge.split_into_sentences("Hello World")
self.assertEqual(["Hello World"], sentences)
sentences = DefaultLangauge.split_into_sentences("Hello, World")
self.assertEqual(["Hello, World"], sentences)
sentences = DefaultLangauge.split_into_sentences("Hello, World!")
self.assertEqual(["Hello, World"], sentences)
sentences = DefaultLangauge.split_into_sentences("Hello. World")
self.assertEqual(["Hello", "World"], sentences)
sentences = DefaultLangauge.split_into_sentences("Hello? World")
self.assertEqual(["Hello", "World"], sentences)
sentences = DefaultLangauge.split_into_sentences("Hello. World.?!")
self.assertEqual(["Hello", "World"], sentences)
sentences = DefaultLangauge.split_into_sentences("!Hello. World")
self.assertEqual(["Hello", "World"], sentences)
sentences = DefaultLangauge.split_into_sentences("半宽韩文字母")
self.assertEqual(["半宽韩文字母"], sentences)
sentences = DefaultLangauge.split_into_sentences("半宽韩文字母. 半宽平假名")
self.assertEqual(["半宽韩文字母", "半宽平假名"], sentences)
def test_split_into_words(self):
words = DefaultLangauge.split_into_words("")
self.assertEqual([], words)
words = DefaultLangauge.split_into_words("Hello")
self.assertEqual(["Hello"], words)
words = DefaultLangauge.split_into_words("Hello World")
self.assertEqual(["Hello", "World"], words)
words = DefaultLangauge.split_into_words(" Hello World ")
self.assertEqual(["Hello", "World"], words)
|
1629232
|
from pydantic import BaseModel, conlist
from enum import Enum
from typing import Dict, Union, List, Tuple, Any
import yaml
# Enums for objectives and constraints
class ObjectiveEnum(str, Enum):
MINIMIZE = 'MINIMIZE'
MAXIMIZE = 'MAXIMIZE'
# Allow any case
@classmethod
def _missing_(cls, name):
for member in cls:
if member.name.lower() == name.lower():
return member
class ConstraintEnum(str, Enum):
LESS_THAN = 'LESS_THAN'
GREATER_THAN = 'GREATER_THAN'
# Allow any case
@classmethod
def _missing_(cls, name):
for member in cls:
if member.name.lower() == name.lower():
return member
class VOCS(BaseModel):
variables: Dict[str, conlist(float, min_items=2, max_items=2)] = None
constraints: Dict[str, conlist(Union[float, ConstraintEnum], min_items=2, max_items=2)] = None
objectives: Dict[str, ObjectiveEnum] = None
constants: Dict[str, Any] = None
linked_variables: Dict[str, str] = None
class Config:
validate_assignment=True # Not sure this helps in this case
use_enum_values = True
@classmethod
def from_yaml(cls, yaml_text):
return cls.parse_obj(yaml.safe_load(yaml_text))
def as_yaml(self):
return yaml.dump(self.dict(), default_flow_style=None, sort_keys=False)
|
1629250
|
from . import pandas, xarray # noqa (API import)
try:
import dask as _dask # noqa (Test dask installed)
from . import dask # noqa (API import)
except ImportError:
pass
try:
import cudf as _cudf # noqa (Test cudf installed)
import cupy as _cupy # noqa (Test cupy installed)
from . import cudf # noqa (API import)
import dask_cudf as _dask_cudf # noqa (Test dask_cudf installed)
from . import dask_cudf # noqa (API import)
except Exception:
pass
|
1629290
|
from EasyLogin import EasyLogin
a=EasyLogin()
# make a GET request, and use cache to speed up
a.get("http://www.shanghairanking.com/ARWU2016.html",cache=True)
# this is the table we need
table=a.b.find("table",{"id":"UniversityRanking"})
count=0
# delete first <tr>, which is unneccesary
a.d("tr",{})
print("Ranking\tName\tScore")
for tr in table.find_all("tr"):
data=a.text(tr) # get all text in this tr, this method return a list of strings
print("\t".join( [ data[0],data[1],data[3] ]))
count+=1
if count==20:# only need the first 20 school
break
|
1629293
|
from __future__ import absolute_import
from celery import Celery
from django.conf import settings
app = Celery('webalyzer')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
1629299
|
import json
import os
import shutil
from pathlib import Path
from typing import Callable
from .deck_exporter import DeckExporter
from ..anki.adapters.anki_deck import AnkiDeck
from ..representation import deck_initializer
from ..representation.deck import Deck
from ..utils.constants import DECK_FILE_NAME, DECK_FILE_EXTENSION, MEDIA_SUBDIRECTORY_NAME
from ..utils.filesystem.name_sanitizer import sanitize_anki_deck_name
from .note_sorter import NoteSorter
from ..config.config_settings import ConfigSettings
class AnkiJsonExporter(DeckExporter):
def __init__(self, collection,
config: ConfigSettings,
deck_name_sanitizer: Callable[[str], str] = sanitize_anki_deck_name,
deck_file_name: str = DECK_FILE_NAME):
self.config = config
self.collection = collection
self.last_exported_count = 0
self.deck_name_sanitizer = deck_name_sanitizer
self.deck_file_name = deck_file_name
self.note_sorter = NoteSorter(config)
def export_to_directory(self, deck: AnkiDeck, output_dir=Path("."), copy_media=True, create_deck_subdirectory=True) -> Path:
deck_directory = output_dir
if create_deck_subdirectory:
deck_directory = output_dir.joinpath(self.deck_name_sanitizer(deck.name))
deck_directory.mkdir(parents=True, exist_ok=True)
deck = deck_initializer.from_collection(self.collection, deck.name)
deck.notes = self.note_sorter.sort_notes(deck.notes)
self.last_exported_count = deck.get_note_count()
deck_filename = deck_directory.joinpath(self.deck_file_name).with_suffix(DECK_FILE_EXTENSION)
with deck_filename.open(mode='w', encoding="utf8") as deck_file:
deck_file.write(json.dumps(deck,
default=Deck.default_json,
sort_keys=True,
indent=4,
ensure_ascii=False))
self._save_changes(deck)
if copy_media:
self._copy_media(deck, deck_directory)
return deck_directory
def _save_changes(self, deck, is_export_child=False):
"""Save updates that were made during the export. E.g. UUID fields
It saves decks, deck configurations and models.
is_export_child refers to whether this deck is a child for the
_purposes of the current export operation_. For instance, if
we're exporting or snapshotting a specific subdeck, then it's
considered the "parent" here. We need the argument to avoid
duplicately saving deck configs and note models.
"""
self.collection.decks.save(deck.anki_dict)
for child_deck in deck.children:
self._save_changes(child_deck, is_export_child=True)
if not is_export_child:
for deck_config in deck.metadata.deck_configs.values():
self.collection.decks.save(deck_config.anki_dict)
for model in deck.metadata.models.values():
self.collection.models.save(model.anki_dict)
# Notes?
def _copy_media(self, deck, deck_directory):
media_directory = deck_directory.joinpath(MEDIA_SUBDIRECTORY_NAME)
media_directory.mkdir(parents=True, exist_ok=True)
for file_src in deck.get_media_file_list():
try:
shutil.copy(os.path.join(self.collection.media.dir(), file_src),
str(media_directory.resolve()))
except IOError as ioerror:
print("Failed to copy a file {}. Full error: {}".format(file_src, ioerror))
|
1629384
|
from .mlp import MLP
# Hide lines below until Lab 2
from .cnn import CNN
# Hide lines above until Lab 2
# Hide lines below until Lab 3
from .line_cnn_simple import LineCNNSimple
from .line_cnn import LineCNN
from .line_cnn_lstm import LineCNNLSTM
# Hide lines above until Lab 3
|
1629402
|
import torch
import numpy as np
from xgboost import XGBClassifier,XGBRegressor
from collections import OrderedDict
from XBNet.Seq import Seq
class XBNETClassifier(torch.nn.Module):
'''
XBNetClassifier is a model for classification tasks that tries to combine tree-based models with
neural networks to create a robust architecture.
:param X_values(numpy array): Features on which model has to be trained
:param y_values(numpy array): Labels of the features i.e target variable
:param num_layers(int): Number of layers in the neural network
:param num_layers_boosted(int,optional): Number of layers to be boosted in the neural network. Default value: 1
:param input_through_cmd(Boolean): Use to tell how you provide the inputs
:param inputs_for_gui(list): Use only for providing inputs through list and when input_through_cmd is
set to True
'''
def __init__(self, X_values, y_values, num_layers, num_layers_boosted=1,
input_through_cmd = False,inputs_for_gui=None):
super(XBNETClassifier, self).__init__()
self.name = "Classification"
self.layers = OrderedDict()
self.boosted_layers = {}
self.num_layers = num_layers
self.num_layers_boosted = num_layers_boosted
self.X = X_values
self.y = y_values
self.gui = input_through_cmd
self.inputs_layers_gui = inputs_for_gui
self.take_layers_dim()
self.base_tree()
self.layers[str(0)].weight = torch.nn.Parameter(torch.from_numpy(self.temp.T))
self.xg = XGBClassifier(n_estimators=100)
self.sequential = Seq(self.layers)
self.sequential.give(self.xg, self.num_layers_boosted)
self.feature_importances_ = None
def get(self, l):
'''
Gets the set of current actual outputs of the inputs
:param l(tensor): Labels of the current set of inputs that are getting processed.
'''
self.l = l
def take_layers_dim(self):
'''
Creates the neural network by taking input from the user
:param gyi(Boolean): Is it being for GUI building purposes
'''
if self.gui == True:
counter = 0
for i in range(self.num_layers):
inp = self.inputs_layers_gui[counter]
counter += 1
out = self.inputs_layers_gui[counter]
counter += 1
set_bias = True
self.layers[str(i)] = torch.nn.Linear(inp, out, bias=set_bias)
if i == 0:
self.input_out_dim = out
self.labels = out
else:
print("Enter dimensions of linear layers: ")
for i in range(self.num_layers):
inp = int(input("Enter input dimensions of layer " + str(i + 1) + ": "))
out = int(input("Enter output dimensions of layer " + str(i + 1)+ ": "))
set_bias = bool(input("Set bias as True or False: "))
self.layers[str(i)] = torch.nn.Linear(inp, out, bias=set_bias)
if i == 0:
self.input_out_dim = out
self.labels = out
print("Enter your last layer ")
self.ch = int(input("1. Sigmoid \n2. Softmax \n3. None \n"))
if self.ch == 1:
self.layers[str(self.num_layers)] = torch.nn.Sigmoid()
elif self.ch == 2:
dimension = int(input("Enter dimension for Softmax: "))
self.layers[str(self.num_layers)] = torch.nn.Softmax(dim=dimension)
else:
pass
def base_tree(self):
'''
Instantiates and trains a XGBRegressor on the first layer of the neural network to set its feature importances
as the weights of the layer
'''
self.temp1 = XGBClassifier(n_estimators=100).fit(self.X, self.y,eval_metric="mlogloss").feature_importances_
self.temp = self.temp1
for i in range(1, self.input_out_dim):
self.temp = np.column_stack((self.temp, self.temp1))
def forward(self, x, train=True):
x = self.sequential(x, self.l,train)
return x
def save(self,path):
'''
Saves the entire model in the provided path
:param path(string): Path where model should be saved
'''
torch.save(self,path)
class XBNETRegressor(torch.nn.Module):
'''
XBNETRegressor is a model for regression tasks that tries to combine tree-based models with
neural networks to create a robust architecture.
:param X_values(numpy array): Features on which model has to be trained
:param y_values(numpy array): Labels of the features i.e target variable
:param num_layers(int): Number of layers in the neural network
:param num_layers_boosted(int,optional): Number of layers to be boosted in the neural network. Default value: 1
'''
def __init__(self, X_values, y_values, num_layers, num_layers_boosted=1):
super(XBNETRegressor, self).__init__()
self.name = "Regression"
self.layers = OrderedDict()
self.boosted_layers = {}
self.num_layers = num_layers
self.num_layers_boosted = num_layers_boosted
self.X = X_values
self.y = y_values
self.take_layers_dim()
self.base_tree()
self.layers[str(0)].weight = torch.nn.Parameter(torch.from_numpy(self.temp.T))
self.xg = XGBRegressor(n_estimators=100)
self.sequential = Seq(self.layers)
self.sequential.give(self.xg, self.num_layers_boosted)
self.sigmoid = torch.nn.Sigmoid()
self.feature_importances_ = None
def get(self, l):
'''
Gets the set of current actual outputs of the inputs
:param l(tensor): Labels of the current set of inputs that are getting processed.
'''
self.l = l
def take_layers_dim(self):
'''
Creates the neural network by taking input from the user
'''
print("Enter dimensions of linear layers: ")
for i in range(self.num_layers):
inp = int(input("Enter input dimensions of layer " + str(i + 1) + ": "))
out = int(input("Enter output dimensions of layer " + str(i + 1)+ ": "))
set_bias = bool(input("Set bias as True or False: "))
self.layers[str(i)] = torch.nn.Linear(inp, out, bias=set_bias)
if i == 0:
self.input_out_dim = out
self.labels = out
print("Enter your last layer ")
self.ch = int(input("1. Sigmoid \n2. Softmax \n3. None \n"))
if self.ch == 1:
self.layers[str(self.num_layers)] = torch.nn.Sigmoid()
elif self.ch == 2:
dimension = int(input("Enter dimension for Softmax: "))
self.layers[str(self.num_layers)] = torch.nn.Softmax(dim=dimension)
else:
pass
def base_tree(self):
'''
Instantiates and trains a XGBRegressor on the first layer of the neural network to set its feature importances
as the weights of the layer
'''
self.temp1 = XGBRegressor(n_estimators=100).fit(self.X, self.y,eval_metric="mlogloss").feature_importances_
self.temp = self.temp1
for i in range(1, self.input_out_dim):
self.temp = np.column_stack((self.temp, self.temp1))
def forward(self, x, train=True):
x = self.sequential(x,self.l,train)
return x
def save(self,path):
'''
Saves the entire model in the provided path
:param path(string): Path where model should be saved
'''
torch.save(self,path)
|
1629473
|
from .cmap import CMapParser
from .document import PDFParser, RegistryPDFParser
from .objstm import ObjStmParser
from .inlineimage import InlineImageParser
|
1629476
|
import unittest
"""
1
/ \
2 3
/ \
4 5
"""
#DFS PostOrder 4 5 2 3 1 (Left-Right-Root)
def is_balancedRecurive(tree_root):
def postorder(node):
if node is None:
return
postorder(node.left)
postorder(node.right)
print(node.value, end=' ')
postorder(tree_root)
return True
def postOrderHack(tree_root):
res, stack = [], [tree_root]
while stack:
node = stack.pop()
if node:
res.append(node.value)
stack.append(node.left)
stack.append(node.right)
print(res[::-1])
return True
#DFS PostOrder 4 5 2 3 1 (Left-Right-Root)
def postOrder(tree_root):
stack = [(tree_root, False)]
while stack:
node, visited = stack.pop()
if node:
if visited:
print(node.value, end=' ')
else:
stack.append((node, True))
stack.append((node.right, False))
stack.append((node.left, False))
return True
# Tests
class Test(unittest.TestCase):
class BinaryTreeNode(object):
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def insert_left(self, value):
self.left = Test.BinaryTreeNode(value)
return self.left
def insert_right(self, value):
self.right = Test.BinaryTreeNode(value)
return self.right
def test_traversal(self):
tree = Test.BinaryTreeNode(1)
left = tree.insert_left(2)
tree.insert_right(3)
left.insert_left(4)
left.insert_right(5)
result = postOrder(tree)
self.assertTrue(result)
unittest.main(verbosity=2)
|
1629503
|
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']
def __min_birth_max_death(persistence, band=0.0):
# Look for minimum birth date and maximum death date for plot optimisation
max_death = 0
min_birth = persistence[0][1][0]
for interval in reversed(persistence):
if float(interval[1][1]) != float("inf"):
if float(interval[1][1]) > max_death:
max_death = float(interval[1][1])
if float(interval[1][0]) > max_death:
max_death = float(interval[1][0])
if float(interval[1][0]) < min_birth:
min_birth = float(interval[1][0])
if band > 0.0:
max_death += band
return (min_birth, max_death)
def _array_handler(a):
if isinstance(a[0][1], np.float64) or isinstance(a[0][1], float):
return [[0, x] for x in a]
else:
return a
def plot_persistence_barcode(
persistence=[],
alpha=0.85,
max_intervals=1024,
max_barcodes=1024,
inf_delta=0.1,
legend=True,
colormap=None,
axes=None,
fontsize=14,
):
persistence = _array_handler(persistence)
if max_intervals > 0 and max_intervals < len(persistence):
# Sort by life time, then takes only the max_intervals elements
persistence = sorted(
persistence,
key=lambda life_time: life_time[1][1] - life_time[1][0],
reverse=True,
)[:max_intervals]
if colormap is None:
# colormap = plt.cm.Set1.colors
colormap = CB_color_cycle
if axes is None:
fig, axes = plt.subplots(1, 1)
persistence = sorted(persistence, key=lambda birth: birth[1][0])
(min_birth, max_death) = __min_birth_max_death(persistence)
ind = 0
delta = (max_death - min_birth) * inf_delta
# Replace infinity values with max_death + delta for bar code to be more
# readable
infinity = max_death + delta
axis_start = min_birth - delta
# Draw horizontal bars in loop
for interval in reversed(persistence):
if float(interval[1][1]) != float("inf"):
# Finite death case
axes.barh(
ind,
(interval[1][1] - interval[1][0]),
height=0.8,
left=interval[1][0],
alpha=alpha,
color=colormap[interval[0]],
linewidth=0.5,
)
else:
# Infinite death case for diagram to be nicer
axes.barh(
ind,
(infinity - interval[1][0]),
height=0.8,
left=interval[1][0],
alpha=alpha,
color=colormap[interval[0]],
linewidth=0.5,
)
ind = ind + 1
if legend:
dimensions = list(set(item[0] for item in persistence))
axes.legend(
handles=[
mpatches.Patch(color=colormap[dim], label="H"+str(dim))
for dim in dimensions
],
loc="upper right",
)
axes.set_title("Persistence barcode", fontsize=fontsize)
# Ends plot on infinity value and starts a little bit before min_birth
axes.axis([axis_start, infinity, 0, ind])
return axes
def plot_persistence_diagram(
persistence=[],
alpha=0.6,
band=0.0,
max_intervals=1024,
max_plots=1024,
inf_delta=0.1,
legend=True,
colormap=None,
axes=None,
fontsize=14,
greyblock=False
):
persistence = _array_handler(persistence)
if max_plots != 1000:
print("Deprecated parameter. It has been replaced by max_intervals")
max_intervals = max_plots
if max_intervals > 0 and max_intervals < len(persistence):
# Sort by life time, then takes only the max_intervals elements
persistence = sorted(
persistence,
key=lambda life_time: life_time[1][1] - life_time[1][0],
reverse=True,
)[:max_intervals]
if colormap is None:
# colormap = plt.cm.Set1.colors
colormap = CB_color_cycle
if axes is None:
fig, axes = plt.subplots(1, 1)
(min_birth, max_death) = __min_birth_max_death(persistence, band)
delta = (max_death - min_birth) * inf_delta
# Replace infinity values with max_death + delta for diagram to be more
# readable
infinity = max_death + delta
axis_end = max_death + delta / 2
axis_start = min_birth - delta
# bootstrap band
if band > 0.0:
x = np.linspace(axis_start, infinity, 1000)
axes.fill_between(x, x, x + band, alpha=alpha, facecolor="red")
# lower diag patch
if greyblock:
axes.add_patch(mpatches.Polygon([[axis_start, axis_start],
[axis_end, axis_start],
[axis_end, axis_end]],
fill=True,
color='lightgrey'))
# Draw points in loop
pts_at_infty = False # Records presence of pts at infty
for interval in reversed(persistence):
if float(interval[1][1]) != float("inf"):
# Finite death case
axes.scatter(
interval[1][0],
interval[1][1],
alpha=alpha,
color=colormap[interval[0]],
)
else:
pts_at_infty = True
# Infinite death case for diagram to be nicer
axes.scatter(interval[1][0],
infinity,
alpha=alpha,
color=colormap[interval[0]])
if pts_at_infty:
# infinity line and text
axes.plot([axis_start, axis_end],
[axis_start, axis_end],
linewidth=1.0,
color="k")
axes.plot([axis_start, axis_end],
[infinity, infinity],
linewidth=1.0,
color="k",
alpha=alpha)
# Infinity label
yt = axes.get_yticks()
yt = yt[np.where(yt < axis_end)] # to avoid ticklabel higher than inf
yt = np.append(yt, infinity)
ytl = ["%.3f" % e for e in yt] # to avoid float precision error
ytl[-1] = r'$+\infty$'
axes.set_yticks(yt)
axes.set_yticklabels(ytl, fontsize=14, weight='bold')
if legend:
dimensions = list(set(item[0] for item in persistence))
axes.legend(
handles=[
mpatches.Patch(color=colormap[dim], label="H"+str(dim))
for dim in dimensions
]
)
axes.set_xlabel("Birth", fontsize=fontsize, weight='bold')
axes.set_ylabel("Death", fontsize=fontsize, weight='bold')
axes.set_title("Persistence diagram", fontsize=fontsize)
# Ends plot on infinity value and starts a little bit before min_birth
axes.axis([axis_start, axis_end, axis_start, infinity + delta/2])
return axes
def read_pdgm(fname):
with open(fname, "rb") as f:
dgm = pickle.load(f)
return dgm
def plot_diagrams(dgm_input, dgm_regular, dgm_random):
fig = plt.figure(figsize=(16, 11))
fig.subplots_adjust(wspace=0.3, hspace=0.2)
ax1 = fig.add_subplot(231)
plot_persistence_barcode(dgm_input, axes=ax1)
ax1.set_title("")
ax1.set_xlabel(r"$\alpha$", fontsize=21, weight='bold')
ticks = ax1.get_yticks().astype('i')
ax1.set_yticklabels(ticks, fontsize=14, weight='bold')
xlim = ax1.get_xlim()
ax1.set_xticks(np.round(np.linspace(0, xlim[1], 3), 3))
ticks = ax1.get_xticks()
ax1.set_xticklabels(ticks, fontsize=14, weight='bold')
K = 1.06
M = 0
ax1.text(M, K, 'A',
va='top',
ha='left',
fontsize=18,
weight='bold',
transform=ax1.transAxes)
ax2 = fig.add_subplot(232)
plot_persistence_barcode(dgm_regular, axes=ax2)
ax2.set_title("")
ax2.set_xlabel(r"$\alpha$", fontsize=21, weight='bold')
ax2.set_yticks([])
xlim = ax2.get_xlim()
ax2.set_xticks(np.round(np.linspace(0, xlim[1], 3), 3))
ticks = ax2.get_xticks()
ax2.set_xticklabels(ticks, fontsize=14, weight='bold')
ax2.text(M, K, 'B',
va='top',
ha='left',
fontsize=18,
weight='bold',
transform=ax2.transAxes)
ax3 = fig.add_subplot(233)
plot_persistence_barcode(dgm_random, axes=ax3)
ax3.set_title("")
ax3.set_xlabel(r"$\alpha$", fontsize=21, weight='bold')
ax3.set_yticks([])
xlim = ax3.get_xlim()
ax3.set_xticks(np.round(np.linspace(0, xlim[1], 3), 3))
ticks = ax3.get_xticks()
ax3.set_xticklabels(ticks, fontsize=14, weight='bold')
ax3.text(M, K, 'C',
va='top',
ha='left',
fontsize=18,
weight='bold',
transform=ax3.transAxes)
ax4 = fig.add_subplot(234)
plot_persistence_diagram(dgm_input, axes=ax4)
ax4.set_title("")
xlim = ax4.get_xlim()
ax4.set_xticks(np.round(np.linspace(0, xlim[1], 3), 3))
ticks = ax4.get_xticks()
ax4.set_xticklabels(ticks, fontsize=14, weight='bold')
ax4.text(M, K, 'D',
va='top',
ha='left',
fontsize=18,
weight='bold',
transform=ax4.transAxes)
xlim_track, ylim_track = [], []
ax5 = fig.add_subplot(235)
plot_persistence_diagram(dgm_regular, axes=ax5)
xlim = ax5.get_xlim()
ylim = ax5.get_ylim()
xlim_track.append(xlim)
ylim_track.append(ylim)
ax5.set_xticks(np.round(np.linspace(0, xlim[1], 3), 3))
ticks = ax5.get_xticks()
ax5.set_xticklabels(ticks, fontsize=14, weight='bold')
ax5.set_ylabel("")
ax5.set_title("")
ax5.text(M, K, 'E',
va='top',
ha='left',
fontsize=18,
weight='bold',
transform=ax5.transAxes)
ax6 = fig.add_subplot(236)
plot_persistence_diagram(dgm_random, axes=ax6)
xlim = ax6.get_xlim()
ylim = ax6.get_ylim()
xlim_track.append(xlim)
ylim_track.append(ylim)
ax6.set_xticks(np.round(np.linspace(0, xlim[1], 3), 3))
ticks = ax6.get_xticks()
ax6.set_xticklabels(ticks, fontsize=14, weight='bold')
ax6.set_title("")
ax6.set_ylabel("")
ax6.text(M, K, 'F',
va='top',
ha='left',
fontsize=18,
weight='bold',
transform=ax6.transAxes)
|
1629506
|
from collections import deque
import numpy as np
# A circular buffer implemented as a deque to keep track of the last few
# frames in the environment that together form a state capturing temporal
# and directional information. Provides an accessor to get the current
# state at any given time, which is represented as a list of consecutive
# frames.
#
# Also takes in a pre-processor to potentially resize or modify the frames
# before inserting them into the buffer.
class FrameBuffer:
def __init__(self, frames_per_state, preprocessor=lambda x: x):
"""
@param frames_per_state: Number of consecutive frames that form a state.
@param reprocessor: Lambda that takes a frame and returns a
preprocessed frame.
"""
if frames_per_state <= 0:
raise RuntimeError('Frames per state should be greater than 0')
self.frames_per_state = frames_per_state
self.frames = deque(maxlen=frames_per_state)
self.preprocessor = preprocessor
def append(self, frame):
"""
Takes a frame, applies preprocessing, and appends it to the deque.
The first frame added to the buffer is duplicated `frames_per_state` times
to completely fill the buffer.
"""
frame = self.preprocessor(frame)
if len(self.frames) == 0:
self.frames.extend(self.frames_per_state * [frame])
else:
self.frames.append(frame)
def get_state(self):
"""
Fetch the current state consisting of `frames_per_state` consecutive frames.
If `frames_per_state` is 1, returns the frame instead of an array of
length 1. Otherwise, returns a Numpy array of `frames_per_state` frames.
"""
if len(self.frames) == 0:
return None
if self.frames_per_state == 1:
return self.frames[0]
return np.stack(self.frames, axis=-1)
def clear(self):
"""
Clear the frames in the buffer.
"""
self.frames.clear()
|
1629511
|
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import layers
import numpy as np
import csv
import sys
import os
# Import utility functions from 'utils.py' file
from utils import checkFolders, show_variables, add_suffix, backup_configs
# Import convolution layer definitions from 'convolution layers.py' file
from convolution_layers import conv2d_layer, inception_v3, transpose_conv2d_layer, transpose_inception_v3, dense_layer, factored_conv2d, upsample
"""
U-Net model with optional KL-divergence (post-activation),
decoder-only batch-normalization, optional upsampling,
and probabilistic loss according to MVN prediction.
"""
# Encoder component of VAE model
def encoder(self, x, training=True, reuse=None, name=None):
# Unpack data
data, mesh, __ = x
if self.use_noise_injection:
interior_indices = tf.greater(mesh, 0)
zero_tensor = tf.zeros_like(data)
noisy_data = tf.distributions.Normal(loc=data, scale=self.noise_level*tf.ones_like(data), name='noisy_data').sample()
data = tf.cond(training, lambda: noisy_data, lambda: data)
data = tf.where(interior_indices, data, zero_tensor)
if not (self.alt_res == 128):
data = tf.image.resize_images(data, [self.alt_res, self.alt_res])
# [None, 64, 64, 1] --> [None, 32, 32, 16]
h1 = conv2d_layer(data, 48, kernel_size=5, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_1')#, coordconv=self.coordconv)
h1 = layers.max_pooling2d(h1, 2, 2, padding='same', data_format='channels_last', name='e_pool_1')
# [None, 32, 32, 16] --> [None, 16, 16, 32]
if self.factor:
h2 = factored_conv2d(h1, 48, kernel_size=3, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_2')
h2 = layers.max_pooling2d(h2, 2, 2, padding='same', data_format='channels_last', name='e_pool_2')
else:
h2 = conv2d_layer(h1, 48, kernel_size=3, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_2')#, coordconv=self.coordconv)
h2 = layers.max_pooling2d(h2, 2, 2, padding='same', data_format='channels_last', name='e_pool_2')
h3 = inception_v3(h2, 80, stride=1, batch_norm=False, training=training, reuse=reuse, name='e_incept_1')
# [None, 16, 16, 64] --> [None, 8, 8, 64]
h4 = conv2d_layer(h3, 80, kernel_size=3, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_3')#, coordconv=self.coordconv)
h4 = layers.max_pooling2d(h4, 2, 2, padding='same', data_format='channels_last', name='e_pool_3')
if self.use_inception:
h5 = inception_v3(h4,150, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_incept_4')
h5 = layers.max_pooling2d(h5, 2, 2, padding='same', data_format='channels_last', name='e_pool_4')
else:
h5 = conv2d_layer(h4, 150, kernel_size=3, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_4')#, coordconv=self.coordconv)#, activation=None)
h5 = layers.max_pooling2d(h5, 2, 2, padding='same', data_format='channels_last', name='e_pool_4')
chans = 512 if self.use_kl else 256
omit = True if self.use_kl else False
h6 = inception_v3(h5, chans, batch_norm=self.use_bn, regularize=self.regularize, training=training, reuse=reuse, name='e_incept_5',
omit_activation=omit)
h6 = layers.max_pooling2d(h6, 2, 2, padding='same', data_format='channels_last', name='e_pool_5')
if self.coordconv:
h6 = conv2d_layer(h6, chans, kernel_size=2, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_6', coordconv=self.coordconv)
if not self.use_kl:
h6 = tf.layers.dropout(h6, rate=self.dropout_rate, training=training)
elif self.use_extra_dropout:
h6 = tf.layers.dropout(h6, rate=self.dropout_rate, training=training)
return h1, h2, h3, h4, h5, h6
# Decoder component of VAE model
def decoder(self, z, training=True, reuse=None, name=None):
# Note: h2 and h3 have same resolution
h1, h2, h3, h4, h5, h6 = z
# h6 ~ [None, 4, 4, 256]
h = h6
h = inception_v3(h, 256, batch_norm=self.use_bn, regularize=self.regularize, training=training, reuse=reuse, name='d_incept_0')
h = tf.layers.dropout(h, rate=self.dropout_rate, training=training)
if self.coordconv:
h = conv2d_layer(h, 256, kernel_size=2, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='d_conv_0', coordconv=self.coordconv)
# [None, 4, 4, 256] --> [None, 8, 8, 128]
stride = 1 if self.interpolate else 2
h = transpose_conv2d_layer(h, 150, kernel_size=2, batch_norm=self.use_bn, regularize=self.regularize, stride=stride, training=training, reuse=reuse, name='d_tconv_0')#, coordconv=self.coordconv)
if self.interpolate:
h = upsample(h, 4*2)
h = tf.concat([h, h5],3)
# [None, 8, 8, 64] --> [None, 16, 16, 64]
h = transpose_conv2d_layer(h, 80, kernel_size=2, batch_norm=self.use_bn, regularize=self.regularize, stride=stride, training=training, reuse=reuse, name='d_tconv_1')#, coordconv=self.coordconv)
if self.interpolate:
h = upsample(h, 4*2*2)
h = tf.concat([h, h4],3)
# [None, 16, 16, 64] --> [None, 32, 32, 32]
if self.symmetric:
h = transpose_inception_v3(h, 80, stride=stride, batch_norm=self.use_bn, regularize=self.regularize, training=training, reuse=reuse, name='d_tincept_2')
else:
h = transpose_inception_v3(h, 80, stride=stride, batch_norm=self.use_bn, regularize=self.regularize, training=training, reuse=reuse, name='d_tincept_2')
if self.interpolate:
h = upsample(h, 4*2*2*2)
h = tf.concat([h, h3],3)
# [None, 32, 32, 32] --> [None, 64, 64, 16]
h_m = transpose_conv2d_layer(h, 48, kernel_size=3, batch_norm=self.use_bn, regularize=self.regularize, stride=1, training=training, reuse=reuse, name='d_tconv_2_1')#, coordconv=self.coordconv)
h_m = transpose_conv2d_layer(h_m, 48, kernel_size=3, batch_norm=self.use_bn, regularize=self.regularize, stride=stride, training=training, reuse=reuse, name='d_tconv_2_2')
h_s = transpose_conv2d_layer(h, 48, kernel_size=3, batch_norm=self.use_bn, regularize=self.regularize, stride=1, training=training, reuse=reuse, name='d_tconv_2_1_s')#, coordconv=self.coordconv)
h_s = transpose_conv2d_layer(h_s, 48, kernel_size=3, batch_norm=self.use_bn, regularize=self.regularize, stride=stride, training=training, reuse=reuse, name='d_tconv_2_2_s')
if self.interpolate:
h_m = upsample(h_m, 4*2*2*2*2)
h_s = upsample(h_s, 4*2*2*2*2)
#h = tf.concat([h, h1],3)
# [None, 64, 64, 16] --> [None, 128, 128, 1]
s = transpose_conv2d_layer(h_s, 1, kernel_size=6, batch_norm=False, stride=2, activation=None,
add_bias=False, training=training, reuse=reuse, name='d_tconv_3_s')
h = transpose_conv2d_layer(h_m, 1, kernel_size=6, batch_norm=False, stride=2, activation=None,
add_bias=False, training=training, reuse=reuse, name='d_tconv_3_m')
# Assign name to final output
return tf.identity(h, name=name), s
# Evaluate model on specified batch of data
def evaluate_model(self, data, reuse=None, training=True, suffix=None):
# Encode input images
z = self.encoder(self, data, training=training, reuse=reuse, name=add_suffix("encoder", suffix))
# Sample in latent spaces
if self.use_kl:
h1, h2, h3, h4, h5, h6 = z
m, log_s = tf.split(h6, num_or_size_splits=2, axis=3)
h6 = self.sampleGaussian(m, log_s, name='latent_sample')
h6 = tf.layers.dropout(h6, rate=self.dropout_rate, training=training)
z = [h1, h2, h3, h4, h5, h6]
#if self.reduce_noise:
# # Use KL divergence w.r.t. N(0, 0.1*I)
# # by comparing with 10*sigma ~ log(10*sigma) ~ log(10) + log(sigma)
# kl_loss = self.kl_wt*tf.reduce_sum([self.compute_kl_loss(m,tf.add(10.0*tf.ones_like(log_s),log_s))])
#else:
# kl_loss = self.kl_wt*tf.reduce_sum([self.compute_kl_loss(m,log_s)])
kl_loss = self.kl_wt*tf.reduce_sum([self.compute_kl_loss(m,log_s)])
else:
h1, h2, h3, h4, h5, h6 = z
h6 = tf.nn.leaky_relu(h6)
z = [h1, h2, h3, h4, h5, h6]
# Compute Kullback–Leibler (KL) divergence
kl_loss = self.kl_wt
# Decode latent vector back to original image
pred = self.decoder(self, z, training=training, reuse=reuse, name=add_suffix("pred", suffix))
# Compute marginal likelihood loss
masked_soln, masked_pred, masked_scale, interior_loss, boundary_loss, prob_loss = self.compute_ms_loss(data, pred, name=add_suffix("ms_loss", suffix))
# Assign names to outputs
masked_soln = tf.identity(masked_soln, name=add_suffix('masked_soln', suffix))
masked_pred = tf.identity(masked_pred, name=add_suffix('masked_pred', suffix))
masked_scale = tf.identity(masked_scale, name=add_suffix('masked_scale', suffix))
return masked_soln, masked_pred, masked_scale, interior_loss, boundary_loss, kl_loss, prob_loss
|
1629515
|
import os
import sys
from os.path import dirname, abspath
sys.path.append(dirname(dirname(abspath(__file__))))
import gym
import time
from agents.actor_critic_agents.A2C import A2C
from agents.DQN_agents.Dueling_DDQN import Dueling_DDQN
from agents.actor_critic_agents.SAC_Discrete import SAC_Discrete
from agents.actor_critic_agents.A3C import A3C
from agents.policy_gradient_agents.PPO import PPO
from agents.Trainer import Trainer
from utilities.data_structures.Config import Config
from agents.DQN_agents.DDQN import DDQN
from agents.DQN_agents.DDQN_With_Prioritised_Experience_Replay import DDQN_With_Prioritised_Experience_Replay
from agents.DQN_agents.DQN import DQN
from agents.DQN_agents.DQN_With_Fixed_Q_Targets import DQN_With_Fixed_Q_Targets
from agents.policy_gradient_agents.REINFORCE import REINFORCE
## envs import ##
from environments.carla_enviroments import env_v1_ObstacleAvoidance
env_title = "ObstacleAvoidance-v0"
config = Config()
config.env_title = env_title
config.seed = 1
config.environment = gym.make(env_title)
config.num_episodes_to_run = 2000
config.show_solution_score = False
config.visualise_individual_results = True
config.visualise_overall_agent_results = True
config.standard_deviation_results = 1.0
config.runs_per_agent = 1
config.use_GPU = True
config.overwrite_existing_results_file = False
config.randomise_random_seed = True
config.save_model = True
config.log_loss = False
config.log_base = time.strftime("%Y%m%d%H%M%S", time.localtime())
config.save_model_freq = 300 ## save model per 300 episodes
config.retrain = True
config.resume = False
config.resume_path = 'E:\\reinforcement-learning-based-driving-decision-in-Carla\\results\Models\ObstacleAvoidance-v0\DDQN with Prioritised Replay\\20200611150242\\rolling_score_68.0417.model'
config.backbone_pretrain = False
config.force_explore_mode = True
config.force_explore_stare_e = 0.2 ## when the std of rolling score in last 10 window is smaller than this val, start explore mode
config.force_explore_rate = 0.95 ## only when the current score bigger than 0.8*max(rolling score[-10:]), forece expolre
## data and graphs save dir ##
data_results_root = os.path.join(os.path.dirname(__file__)+"/data_and_graphs/carla_obstacle_avoidance", config.log_base)
while os.path.exists(data_results_root):
data_results_root += '_'
os.makedirs(data_results_root)
config.file_to_save_data_results = os.path.join(data_results_root, "data.pkl")
config.file_to_save_results_graph = os.path.join(data_results_root, "data.png")
config.hyperparameters = {
"DQN_Agents": {
"learning_rate": 1e-1,
"batch_size": 256,
"buffer_size": 20000,
"epsilon": 1.0,
"epsilon_decay_rate_denominator": 1.,
"discount_rate": 0.9,
"tau": 0.01,
"alpha_prioritised_replay": 0.6,
"beta_prioritised_replay": 0.1,
"incremental_td_error": 1e-8,
"update_every_n_steps": 1,
"linear_hidden_units": [32, 108, 296, 108, 32],
"final_layer_activation": "None",
"batch_norm": True,
"gradient_clipping_norm": 0.1,
"learning_iterations": 1,
"clip_rewards": False
},
"Stochastic_Policy_Search_Agents": {
"policy_network_type": "Linear",
"noise_scale_start": 1e-2,
"noise_scale_min": 1e-3,
"noise_scale_max": 2.0,
"noise_scale_growth_factor": 2.0,
"stochastic_action_decision": False,
"num_policies": 10,
"episodes_per_policy": 1,
"num_policies_to_keep": 5,
"clip_rewards": False
},
"Policy_Gradient_Agents": {
"learning_rate": 0.05,
"linear_hidden_units": [64, 128, 64, 32],
"final_layer_activation": "SOFTMAX",
"learning_iterations_per_round": 5,
"discount_rate": 0.99,
"batch_norm": False,
"clip_epsilon": 0.1,
"episodes_per_learning_round": 4,
"normalise_rewards": True,
"gradient_clipping_norm": 7.0,
"mu": 0.0, #only required for continuous action games
"theta": 0.0, #only required for continuous action games
"sigma": 0.0, #only required for continuous action games
"epsilon_decay_rate_denominator": 1.0,
"clip_rewards": False
},
"Actor_Critic_Agents": {
"learning_rate": 0.005,
"linear_hidden_units": [20, 10],
"final_layer_activation": ["SOFTMAX", None],
"gradient_clipping_norm": 5.0,
"discount_rate": 0.99,
"epsilon_decay_rate_denominator": 1.0,
"normalise_rewards": True,
"exploration_worker_difference": 2.0,
"clip_rewards": False,
"Actor": {
"learning_rate": 0.0003,
"linear_hidden_units": [64, 64],
"final_layer_activation": "Softmax",
"batch_norm": False,
"tau": 0.005,
"gradient_clipping_norm": 5,
"initialiser": "Xavier"
},
"Critic": {
"learning_rate": 0.0003,
"linear_hidden_units": [64, 64],
"final_layer_activation": None,
"batch_norm": False,
"buffer_size": 1000000,
"tau": 0.005,
"gradient_clipping_norm": 5,
"initialiser": "Xavier"
},
"min_steps_before_learning": 400,
"batch_size": 256,
"discount_rate": 0.99,
"mu": 0.0, #for O-H noise
"theta": 0.15, #for O-H noise
"sigma": 0.25, #for O-H noise
"action_noise_std": 0.2, # for TD3
"action_noise_clipping_range": 0.5, # for TD3
"update_every_n_steps": 1,
"learning_updates_per_learning_session": 1,
"automatically_tune_entropy_hyperparameter": True,
"entropy_term_weight": None,
"add_extra_noise": False,
"do_evaluation_iterations": True
}
}
if __name__ == "__main__":
# AGENTS = [SAC_Discrete, DDQN, Dueling_DDQN, DQN, DQN_With_Fixed_Q_Targets,
# DDQN_With_Prioritised_Experience_Replay, A2C, PPO, A3C ]
AGENTS = [DQN_With_Fixed_Q_Targets]
trainer = Trainer(config, AGENTS)
trainer.run_games_for_agents()
pass
|
1629516
|
import numpy as np
from yt.testing import assert_allclose_units, fake_random_ds, requires_file
from yt.units import cm, s # type: ignore
from yt.utilities.answer_testing.framework import data_dir_load
from yt.visualization.volume_rendering.off_axis_projection import off_axis_projection
def random_unit_vector(prng):
v = prng.random_sample(3)
while (v == 0).all():
v = prng.random_sample(3)
return v / np.sqrt((v ** 2).sum())
def random_velocity_vector(prng):
return 2e5 * prng.random_sample(3) - 1e5
def compare_vector_conversions(data_source):
prng = np.random.RandomState(8675309)
normals = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] + [
random_unit_vector(prng) for i in range(2)
]
bulk_velocities = [random_velocity_vector(prng) for i in range(2)]
for bv in bulk_velocities:
bulk_velocity = bv * cm / s
data_source.set_field_parameter("bulk_velocity", bulk_velocity)
data_source.clear_data()
vmag = data_source[("gas", "velocity_magnitude")]
vrad = data_source[("gas", "velocity_spherical_radius")]
for normal in normals:
data_source.set_field_parameter("normal", normal)
data_source.clear_data()
assert_allclose_units(
vrad, data_source[("gas", "velocity_spherical_radius")]
)
vmag_new = data_source[("gas", "velocity_magnitude")]
assert_allclose_units(vmag, vmag_new)
vmag_cart = np.sqrt(
(data_source[("gas", "velocity_x")] - bulk_velocity[0]) ** 2
+ (data_source[("gas", "velocity_y")] - bulk_velocity[1]) ** 2
+ (data_source[("gas", "velocity_z")] - bulk_velocity[2]) ** 2
)
assert_allclose_units(vmag, vmag_cart)
vmag_cyl = np.sqrt(
data_source[("gas", "velocity_cylindrical_radius")] ** 2
+ data_source[("gas", "velocity_cylindrical_theta")] ** 2
+ data_source[("gas", "velocity_cylindrical_z")] ** 2
)
assert_allclose_units(vmag, vmag_cyl)
vmag_sph = np.sqrt(
data_source[("gas", "velocity_spherical_radius")] ** 2
+ data_source[("gas", "velocity_spherical_theta")] ** 2
+ data_source[("gas", "velocity_spherical_phi")] ** 2
)
assert_allclose_units(vmag, vmag_sph)
for i, d in enumerate("xyz"):
assert_allclose_units(
data_source[("gas", f"velocity_{d}")] - bulk_velocity[i],
data_source[("gas", f"relative_velocity_{d}")],
)
for i, ax in enumerate("xyz"):
data_source.set_field_parameter("axis", i)
data_source.clear_data()
assert_allclose_units(
data_source[("gas", "velocity_los")],
data_source[("gas", f"relative_velocity_{ax}")],
)
for i, ax in enumerate("xyz"):
prj = data_source.ds.proj(
("gas", "velocity_los"), i, weight_field=("gas", "density")
)
assert_allclose_units(
prj[("gas", "velocity_los")], prj[("gas", f"velocity_{ax}")]
)
data_source.clear_data()
ax = [0.1, 0.2, -0.3]
data_source.set_field_parameter("axis", ax)
ax /= np.sqrt(np.dot(ax, ax))
vlos = data_source[("gas", "relative_velocity_x")] * ax[0]
vlos += data_source[("gas", "relative_velocity_y")] * ax[1]
vlos += data_source[("gas", "relative_velocity_z")] * ax[2]
assert_allclose_units(data_source[("gas", "velocity_los")], vlos)
buf_los = off_axis_projection(
data_source,
data_source.ds.domain_center,
ax,
0.5,
128,
("gas", "velocity_los"),
weight=("gas", "density"),
)
buf_x = off_axis_projection(
data_source,
data_source.ds.domain_center,
ax,
0.5,
128,
("gas", "relative_velocity_x"),
weight=("gas", "density"),
)
buf_y = off_axis_projection(
data_source,
data_source.ds.domain_center,
ax,
0.5,
128,
("gas", "relative_velocity_y"),
weight=("gas", "density"),
)
buf_z = off_axis_projection(
data_source,
data_source.ds.domain_center,
ax,
0.5,
128,
("gas", "relative_velocity_z"),
weight=("gas", "density"),
)
vlos = buf_x * ax[0] + buf_y * ax[1] + buf_z * ax[2]
assert_allclose_units(buf_los, vlos, rtol=1.0e-6)
def test_vector_component_conversions_fake():
ds = fake_random_ds(16)
ad = ds.all_data()
compare_vector_conversions(ad)
g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
@requires_file(g30)
def test_vector_component_conversions_real():
ds = data_dir_load(g30)
sp = ds.sphere(ds.domain_center, (10, "kpc"))
compare_vector_conversions(sp)
|
1629536
|
import random
import networkx as nx
from utils.link_prediction import link_prediction
class Graph:
def __init__(self):
self.g = nx.Graph()
def set_graph(self, g):
self.g = g
def get_graph(self):
return self.g
def load_graph(self, edge_list, directed=False):
self.g = nx.read_edgelist(edge_list, delimiter=',')
def link_predict(self, params, classifier='MLP'):
return link_prediction.predict(self.g, params, classifier)
|
1629553
|
from typing import Type
from starlette import status
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.responses import JSONResponse, Response
from starlette.testclient import TestClient
from starlette_context import plugins
from starlette_context.header_keys import HeaderKeys
from starlette_context.middleware import (
ContextMiddleware,
RawContextMiddleware,
)
def gen_middleware_config(
middleware_class: Type, response: Response
) -> TestClient:
middleware = [
Middleware(
middleware_class,
plugins=(plugins.RequestIdPlugin(error_response=response),),
)
]
app = Starlette(middleware=middleware)
client = TestClient(app)
return client
def test_invalid_request_id_returns_specified_response_raw_middleware():
content = {"Error": "Invalid X-Request-ID"}
response = JSONResponse(
content=content, status_code=status.HTTP_422_UNPROCESSABLE_ENTITY
)
client = gen_middleware_config(RawContextMiddleware, response)
response = client.get("/", headers={HeaderKeys.request_id: "invalid_uuid"})
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
assert HeaderKeys.request_id not in response.headers
body = response.json()
assert body == content
def test_invalid_request_id_returns_specified_response_context_middleware():
content = {"Error": "Invalid X-Request-ID"}
response = JSONResponse(
content=content, status_code=status.HTTP_422_UNPROCESSABLE_ENTITY
)
client = gen_middleware_config(ContextMiddleware, response)
response = client.get("/", headers={HeaderKeys.request_id: "invalid_uuid"})
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
assert HeaderKeys.request_id not in response.headers
body = response.json()
assert body == content
def test_invalid_request_id_unspecified_response_raw_middleware():
client = gen_middleware_config(RawContextMiddleware, None)
response = client.get("/", headers={HeaderKeys.request_id: "invalid_uuid"})
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert HeaderKeys.request_id not in response.headers
assert response.content == b""
|
1629593
|
from django.conf.urls.defaults import *
urlpatterns = patterns('pypy.translator.js.examples.djangoping.views',
(r"^ping.js$", "ping_js"),
(r"^ping/$", "ping"),
(r"^$", "index"),
)
|
1629605
|
import os
import os.path
import signal
import subprocess
import time
import psutil
import pytest
PYTEST_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.dirname(PYTEST_DIR)
RSDS_BIN = os.path.join(ROOT, "target", "debug", "rsds-scheduler")
RSDS_WORKER_BIN = os.path.join(ROOT, "target", "debug", "rsds-worker")
def check_free_port(port):
assert isinstance(port, int)
for conn in psutil.net_connections('tcp'):
if conn.laddr.port == port and conn.status == "LISTEN":
return False
return True
class Env:
def __init__(self, work_path):
self.processes = []
self.cleanups = []
self.work_path = work_path
def start_process(self, name, args, env=None, catch_io=True):
logfile = (self.work_path / name).with_suffix(".out")
if catch_io:
with open(logfile, "w") as out:
p = subprocess.Popen(args,
preexec_fn=os.setsid,
stdout=out,
stderr=subprocess.STDOUT,
cwd=self.work_path,
env=env)
else:
p = subprocess.Popen(args,
cwd=str(self.work_path),
env=env)
self.processes.append((name, p))
return p
def kill_all(self):
for fn in self.cleanups:
fn()
for n, p in self.processes:
# Kill the whole group since the process may spawn a child
if not p.poll():
os.killpg(os.getpgid(p.pid), signal.SIGTERM)
class RsdsEnv(Env):
default_listen_port = 17070
def __init__(self, work_dir):
Env.__init__(self, work_dir)
self.server = None
self.workers = {}
self.id_counter = 0
self.do_final_check = True
def no_final_check(self):
self.do_final_check = False
def start_worker(self, ncpus, port, rsds_worker):
worker_id = self.id_counter
self.id_counter += 1
name = "worker{}".format(worker_id)
env = os.environ.copy()
env["PYTHONPATH"] = PYTEST_DIR + ":" + env.get("PYTHONPATH", "")
if rsds_worker:
env["RUST_BACKTRACE"] = "FULL"
program = RSDS_WORKER_BIN
else:
program = "dask-worker"
args = [program, "localhost:{}".format(port), "--nthreads", str(ncpus)]
self.workers[name] = self.start_process(name, args, env)
def start(self,
workers=(),
port=None,
scheduler=None,
rsds_worker=False):
print("Starting rsds env in ", self.work_path)
"""
Start infrastructure: server & n governors
"""
if self.server:
raise Exception("Server is already running")
port = port or self.default_listen_port
if not check_free_port(port):
raise Exception("Trying to spawn server on port {}, but it is not free".format(port))
env = os.environ.copy()
env["RUST_LOG"] = "trace"
env["RUST_BACKTRACE"] = "FULL"
args = [RSDS_BIN, "--port", str(port)]
if scheduler:
args += ["--scheduler", scheduler]
self.server = self.start_process("server", args, env=env)
assert self.server is not None
it = 0
while check_free_port(port):
time.sleep(0.05)
self.check_running_processes()
it += 1
if it > 100:
raise Exception("Server not started after 5")
for cpus in workers:
self.start_worker(cpus, port=port, rsds_worker=rsds_worker)
time.sleep(0.2) # TODO: Replace with real check that worker is
self.check_running_processes()
return "127.0.0.1:{}".format(port)
def check_running_processes(self):
"""Checks that everything is still running"""
for worker_name, worker in self.workers.items():
if worker.poll() is not None:
raise Exception(
"{0} crashed (log in {1}/{0}.out)".format(worker_name, self.work_path))
if self.server and self.server.poll() is not None:
self.server = None
raise Exception(
"Server crashed (log in {}/server.out)".format(self.work_path))
def new_client(self):
pass
def final_check(self):
pass
def close(self):
pass
@pytest.yield_fixture(autouse=False, scope="function")
def rsds_env(tmp_path):
"""Fixture that allows to start Rain test environment"""
os.chdir(tmp_path)
env = RsdsEnv(tmp_path)
yield env
time.sleep(0.2)
try:
env.final_check()
env.check_running_processes()
finally:
env.close()
env.kill_all()
# Final sleep to let server port be freed, on some slow computers
# a new test is starter before the old server is properly cleaned
time.sleep(0.02)
|
1629613
|
from django.core.urlresolvers import reverse
from django.test import TestCase
import json
from myshop.models import Product
from myshop.models.manufacturer import Manufacturer
class ProductSelectViewTest(TestCase):
def setUp(self):
manufacturer = Manufacturer.objects.create(name="testmanufacturer")
Product.objects.create(product_name="testproduct1", order=1, manufacturer=manufacturer)
def test_finds_product_case_insensitive(self):
response = self.client.get(reverse('shop:select-product') + "?term=Prod")
data = json.loads(response.content.decode("utf-8"))
self.assertEqual(data['count'], 1)
self.assertEqual(data['results'][0]['text'], "testproduct1")
def test_bogus_query_finds_nothing(self):
response = self.client.get(reverse('shop:select-product') + "?term=whatever")
data = json.loads(response.content.decode("utf-8"))
self.assertEqual(data['count'], 0)
|
1629640
|
from stanpyro.distributions import *
from stanpyro.dppllib import sample, param, observe, factor, array, zeros, ones, empty, matmul, true_divide, floor_divide, transpose, dtype_long, dtype_float, vmap
def convert_inputs(inputs):
return { }
def model():
# Parameters
cluster = sample('cluster', improper_uniform(shape=[]))
theta = sample('theta', improper_uniform(shape=[]))
# Model
observe('_cluster__1', normal(0, 1), cluster)
mu = None
if cluster > 0:
mu = 2
else:
mu = 0
observe('_theta__2', normal(mu, 1), theta)
def parameters_info():
return { 'cluster': { 'shape': [] },'theta': { 'shape': [] }, }
|
1629656
|
from alibi_detect.base import BaseDetector, outlier_prediction_dict
import numpy as np
from unittest import TestCase
from adserver.od_model import AlibiDetectOutlierModel
from adserver.constants import HEADER_RETURN_INSTANCE_SCORE
from typing import Dict
from adserver.base import ModelResponse
class DummyODModel(BaseDetector):
def __init__(
self,
expected_return_instance_score: bool = False,
expected_return_feature_score: bool = False,
expect_return_is_outlier: int = 0,
):
super().__init__()
self.expected_return_instance_score = expected_return_instance_score
self.expected_return_feature_score = expected_return_feature_score
self.expect_return_is_outlier = expect_return_is_outlier
def score(self, X: np.ndarray):
pass
def predict(
self,
X: np.ndarray,
outlier_type: str = "instance",
outlier_perc: float = 100.0,
batch_size: int = int(1e10),
return_feature_score: bool = True,
return_instance_score: bool = True,
) -> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
assert return_instance_score == self.expected_return_instance_score
assert return_feature_score == self.expected_return_feature_score
od = outlier_prediction_dict()
od["data"]["is_outlier"] = self.expect_return_is_outlier
return od
class TestODModel(TestCase):
def test_basic(self):
model = DummyODModel()
od_model = AlibiDetectOutlierModel("name", "s3://model", model=model)
req = [1, 2]
headers = {}
res: ModelResponse = od_model.process_event(req, headers)
self.assert_(res is not None)
self.assertEqual(res.data["data"]["is_outlier"], 0)
def test_no_return_instance_score(self):
expect_return_is_outlier = 1
model = DummyODModel(
expected_return_instance_score=False,
expect_return_is_outlier=expect_return_is_outlier,
)
ad_model = AlibiDetectOutlierModel("name", "s3://model", model=model)
req = [1, 2]
headers = {HEADER_RETURN_INSTANCE_SCORE: "false"}
res: ModelResponse = ad_model.process_event(req, headers)
self.assert_(res is not None)
self.assertEqual(res.data["data"]["is_outlier"], expect_return_is_outlier)
|
1629659
|
from leapp.actors import Actor
from leapp.libraries.common.rpms import get_installed_rpms
from leapp.models import LeftoverPackages, TransactionCompleted, InstalledUnsignedRPM, RPM
from leapp.tags import RPMUpgradePhaseTag, IPUWorkflowTag
class CheckLeftoverPackages(Actor):
"""
Check if there are any RHEL 7 packages present after upgrade.
Actor produces message containing these packages. Message is empty if there are no el7 package left.
"""
name = 'check_leftover_packages'
consumes = (TransactionCompleted, InstalledUnsignedRPM)
produces = (LeftoverPackages,)
tags = (RPMUpgradePhaseTag, IPUWorkflowTag)
def process(self):
LEAPP_PACKAGES = ['leapp', 'leapp-repository', 'snactor', 'leapp-repository-deps-el8', 'leapp-deps-el8',
'python2-leapp']
installed_rpms = get_installed_rpms()
if not installed_rpms:
return
to_remove = LeftoverPackages()
unsigned = [pkg.name for pkg in next(self.consume(InstalledUnsignedRPM), InstalledUnsignedRPM()).items]
for rpm in installed_rpms:
rpm = rpm.strip()
if not rpm:
continue
name, version, release, epoch, packager, arch, pgpsig = rpm.split('|')
if 'el7' in release and name not in set(unsigned + LEAPP_PACKAGES):
to_remove.items.append(RPM(
name=name,
version=version,
epoch=epoch,
packager=packager,
arch=arch,
release=release,
pgpsig=pgpsig
))
self.produce(to_remove)
|
1629674
|
from python_pachyderm.service import Service, enterprise_proto
class EnterpriseMixin:
"""A mixin for enterprise-related functionality."""
def activate_enterprise(self, license_server: str, id: str, secret: str) -> None:
"""Activates enterprise by registering with a license server.
Parameters
----------
license_server : str
The Pachyderm Enterprise Server to register with.
id : str
The unique ID for this cluster.
secret : str
The secret for registering this cluster.
"""
self._req(
Service.ENTERPRISE,
"Activate",
license_server=license_server,
id=id,
secret=secret,
)
def get_enterprise_state(self) -> enterprise_proto.GetStateResponse:
"""Gets the current enterprise state of the cluster.
Returns
-------
enterprise_proto.GetStateResponse
A protobuf object that returns a state enum, info on the token,
and an empty activation code.
"""
return self._req(Service.ENTERPRISE, "GetState")
def deactivate_enterprise(self) -> None:
"""Deactivates enterprise."""
self._req(Service.ENTERPRISE, "Deactivate")
def get_activation_code(self) -> enterprise_proto.GetActivationCodeResponse:
"""Returns the enterprise code used to activate Pachdyerm Enterprise in
this cluster.
Returns
-------
enterprise_proto.GetActivationCodeResponse
A protobuf object that returns a state enum, info on the token,
and the activation code.
"""
return self._req(Service.ENTERPRISE, "GetActivationCode")
|
1629684
|
import boto3
import json
import random
import time
from datetime import datetime, timedelta
from apps import App, Humana, Evidation
import events
import csv
"""
Synthentic generator code for a days worth of data
"""
apps = [(Humana(), range(0,60)), (Evidation(), range(55,85))]
benes = []
with open('benes.csv') as bene_file:
reader = csv.DictReader(bene_file)
for row in reader:
benes.append(row)
firehose = boto3.client('firehose')
start_time = datetime.utcnow()
sessions = []
for app in apps:
for bene in benes:
if int(bene['random']) in app[1]:
session_time = start_time + timedelta(seconds=random.uniform(10.0, 10*60*60))
session_messages = []
for message in app[0].generate_session(session_time, bene):
session_messages.append(message)
sessions.append({"time": session_time, "messages": session_messages})
sessions.sort(key = lambda s: s["time"])
for session in sessions:
for message in session["messages"]:
event = {
"instance_id": "i-000000000000000",
"image_id": "ami-00000000000000",
"component": "bb2.web",
"vpc": "bluebutton-prod-sim",
"log_name": "audit.hhs_oauth_server.request_logging",
"message": message,
}
print(json.dumps(event))
firehose.put_record(DeliveryStreamName='bfd-insights-bb2-events', Record={'Data': json.dumps(event) + '\n'})
|
1629709
|
import pyforms
from pyforms import BaseWidget
from pyforms.controls import ControlPlayer
class VideoWindow(BaseWidget):
def __init__(self):
BaseWidget.__init__(self, 'Video window')
self._player = ControlPlayer('Player')
self.formset = ['_player']
if __name__ == "__main__": pyforms.start_app(VideoWindow)
|
1629726
|
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from PooledEffect import PooledEffect
from EffectController import EffectController
import random
class ExplosionTip(PooledEffect, EffectController):
NUM_PARTS = 10
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
self.size = 4.0
self.speed = 0.8
self.speedSpread = 0.4
self.parts = []
for i in range(self.NUM_PARTS):
explosion = loader.loadModel('models/effects/dirt_trail')
explosion.setDepthWrite(0)
explosion.reparentTo(self)
explosion.hide()
self.parts.append(explosion)
def createTrack(self):
subTracks = Parallel()
for i in range(len(self.parts)):
self.parts[i].setScale(1.0)
self.parts[i].setColorScale(1, 1, 1, 1)
self.parts[i].setPos(0, 0, 0)
self.parts[i].setHpr(i * (360 / self.NUM_PARTS), 15, 0)
speed = self.speed + random.uniform(0.0, self.speedSpread)
fadeBlast = self.parts[i].colorScaleInterval(speed * 0.5, Vec4(0, 0, 0, 0))
waitFade = Sequence(Wait(speed), fadeBlast)
scaleBlast = self.parts[i].scaleInterval(speed, self.size, blendType='easeIn')
moveBlast = self.parts[i].posInterval(speed, Vec3(1, 0, 4), startPos=Vec3(0, 0, 0), blendType='easeOut', other=self.parts[i])
subTracks.append(Parallel(scaleBlast, moveBlast, waitFade))
self.track = Sequence(Func(self.showAll), subTracks, Func(self.hideAll), Func(self.cleanUpEffect))
def hideAll(self):
for part in self.parts:
part.hide()
def showAll(self):
for part in self.parts:
part.show()
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
if self.card:
self.card.removeNode()
self.card = None
EffectController.destroy(self)
PooledEffect.destroy(self)
return
|
1629748
|
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
i, j = 0, len(nums)-1
k = 0
while k<=j:
if nums[k] == 0:
nums[i], nums[k] = nums[k],nums[i]
i+=1
k+=1
elif nums[k] == 2:
nums[j], nums[k] = nums[k],nums[j]
j-=1
elif nums[k] == 1:
k+=1
|
1629753
|
import numpy as np
import pandas as pd
import os
import os.path
fold = 4#1#4#3
resep = 143#21#17#39
gbtdepth = 2#3#2#3
neptime = 0.3
testdetp = -2
traindetp = -2
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
resmodelpath = './detcls-'+str(fold)+'-old/ckptgbt.t7'
def iou(box0, box1):
r0 = box0[3] / 2
s0 = box0[:3] - r0
e0 = box0[:3] + r0
r1 = box1[3] / 2
s1 = box1[:3] - r1
e1 = box1[:3] + r1
overlap = []
for i in range(len(s0)): overlap.append(max(0, min(e0[i], e1[i]) - max(s0[i], s1[i])))
intersection = overlap[0] * overlap[1] * overlap[2]
union = box0[3] * box0[3] * box0[3] + box1[3] * box1[3] * box1[3] - intersection
return intersection / union
def nms(output, nms_th):
if len(output) == 0: return output
output = output[np.argsort(-output[:, 0])]
bboxes = [output[0]]
for i in np.arange(1, len(output)):
bbox = output[i]
flag = 1
for j in range(len(bboxes)):
if iou(bbox[1:5], bboxes[j][1:5]) >= nms_th:
flag = -1
break
if flag == 1: bboxes.append(bbox)
bboxes = np.asarray(bboxes, np.float32)
return bboxes
# find the mapping
# load groundtruth
antclscsv = pd.read_csv('/media/data1/wentao/tianchi/luna16/CSVFILES/annotationdetclsconvfnl_v3.csv', \
names=['seriesuid', 'coordX', 'coordY', 'coordZ', 'diameter_mm', 'malignant'])
srslst = antclscsv['seriesuid'].tolist()[1:]
cdxlst = antclscsv['coordX'].tolist()[1:]
cdylst = antclscsv['coordY'].tolist()[1:]
cdzlst = antclscsv['coordZ'].tolist()[1:]
dimlst = antclscsv['diameter_mm'].tolist()[1:]
mlglst = antclscsv['malignant'].tolist()[1:]
gtdct = {}
for idx in xrange(len(srslst)):
vlu = [float(cdxlst[idx]), float(cdylst[idx]), float(cdzlst[idx]), float(dimlst[idx]), int(mlglst[idx])]
if srslst[idx].split('-')[0] not in gtdct: gtdct[srslst[idx].split('-')[0]] = [vlu]
else: gtdct[srslst[idx].split('-')[0]].append(vlu)
tedetpath = '/media/data1/wentao/CTnoddetector/training/detector/results/res18/ft96'+str(fold)\
+'/val'+str(resep)+'/predanno'+str(testdetp)+'.csv'
# fid = open(tedetpath, 'r')
prdcsv = pd.read_csv(tedetpath, names=['seriesuid','coordX','coordY','coordZ','probability'])
srslst = prdcsv['seriesuid'].tolist()[1:]
cdxlst = prdcsv['coordX'].tolist()[1:]
cdylst = prdcsv['coordY'].tolist()[1:]
cdzlst = prdcsv['coordZ'].tolist()[1:]
prblst = prdcsv['probability'].tolist()[1:]
# build dict first for rach seriesuid
srsdct = {}
for idx in xrange(len(srslst)):
vlu = [cdxlst[idx], cdylst[idx], cdzlst[idx], prblst[idx]]
if srslst[idx] not in srsdct: srsdct[srslst[idx]] = [vlu]
else: srsdct[srslst[idx]].append(vlu)
# pbb path, find the mapping of csv to pbb
pbbpth = '/media/data1/wentao/CTnoddetector/training/detector/results/res18/ft96'+str(fold)+'/val'+str(resep)+'/'
rawpth = '/media/data1/wentao/tianchi/luna16/lunaall/'
prppth = '/media/data1/wentao/tianchi/luna16/preprocess/lunaall/'
trudat = {}
tefnmlst = []
tecdxlst = []
tecdylst = []
tecdzlst = []
telablst = []
tedimlst = []
import math
for srs, vlu in srsdct.iteritems():
pbb = np.load(os.path.join(pbbpth, srs+'_pbb.npy'))
lbb = np.load(os.path.join(pbbpth, srs+'_lbb.npy')) # list, x y z d
# sliceim,origin,spacing,isflip = load_itk_image(os.path.join(rawpth, srslst[idx]+'.mhd'))
# origin = np.load(os.path.join(prppth, srslst[idx]+'_origin.npy'))
# spacing = np.load(os.path.join(prppth, srslst[idx]+'_spacing.npy'))
# resolution = np.array([1, 1, 1])
# extendbox = np.load(os.path.join(prppth, srslst[idx]+'_extendbox.npy'))
pbbold = np.array(pbb[pbb[:,0] > testdetp])#detp])
pbb = nms(pbbold, 0.1)
# print pbb.shape, len(vlu)
assert pbb.shape[0] == len(vlu)
kptpbb = np.array(pbb)#[:5, :]) # prob, x, y, z, d
# find the true label
for idx in xrange(kptpbb.shape[0]):
tefnmlst.append(srs)
tecdxlst.append(kptpbb[idx, 1])
tecdylst.append(kptpbb[idx, 2])
tecdzlst.append(kptpbb[idx, 3])
tedimlst.append(kptpbb[idx, 4])
if lbb.shape[0] == 0 or (lbb.shape[0]==1 and abs(lbb[0,0])+abs(lbb[0,1])+abs(lbb[0,2])+abs(lbb[0,3])==0):
kptpbb[idx, 0] = 0
telablst.append(0)
continue
ispos = 0
if srs in gtdct:
for l in gtdct[srs]:
if math.pow(l[0]-kptpbb[idx,1],2.) + math.pow(l[1]-kptpbb[idx,2],2.) + math.pow(l[2]-kptpbb[idx,3],2.) < \
math.pow(max(16., l[3]/2),2.):
kptpbb[idx, 0] = l[4]
telablst.append(l[4])
ispos = 1
break
if ispos == 0:
kptpbb[idx, 0] = 0
telablst.append(0)
trudat[srs] = kptpbb
print(len(telablst), sum(telablst), np.sum(kptpbb[:,0]))
# load train data
tedetpath = '/media/data1/wentao/CTnoddetector/training/detector/results/res18/ft96'+str(fold)+\
'/train'+str(resep)+'/predanno'+str(traindetp)+'.csv'
# fid = open(tedetpath, 'r')
prdcsv = pd.read_csv(tedetpath, names=['seriesuid','coordX','coordY','coordZ','probability'])
srslst = prdcsv['seriesuid'].tolist()[1:]
cdxlst = prdcsv['coordX'].tolist()[1:]
cdylst = prdcsv['coordY'].tolist()[1:]
cdzlst = prdcsv['coordZ'].tolist()[1:]
prblst = prdcsv['probability'].tolist()[1:]
# build dict first for rach seriesuid
srsdct = {}
for idx in xrange(len(srslst)):
vlu = [cdxlst[idx], cdylst[idx], cdzlst[idx], prblst[idx]]
if srslst[idx] not in srsdct: srsdct[srslst[idx]] = [vlu]
else: srsdct[srslst[idx]].append(vlu)
# pbb path, find the mapping of csv to pbb
pbbpth = '/media/data1/wentao/CTnoddetector/training/detector/results/res18/ft96'+str(fold)+'/train'+str(resep)+'/'
rawpth = '/media/data1/wentao/tianchi/luna16/lunaall/'
prppth = '/media/data1/wentao/tianchi/luna16/preprocess/lunaall/'
trudat = {}
trfnmlst = []
trcdxlst = []
trcdylst = []
trcdzlst = []
trlablst = []
trdimlst = []
import math
for srs, vlu in srsdct.iteritems():
pbb = np.load(os.path.join(pbbpth, srs+'_pbb.npy'))
lbb = np.load(os.path.join(pbbpth, srs+'_lbb.npy')) # list, x y z d
# sliceim,origin,spacing,isflip = load_itk_image(os.path.join(rawpth, srslst[idx]+'.mhd'))
# origin = np.load(os.path.join(prppth, srslst[idx]+'_origin.npy'))
# spacing = np.load(os.path.join(prppth, srslst[idx]+'_spacing.npy'))
# resolution = np.array([1, 1, 1])
# extendbox = np.load(os.path.join(prppth, srslst[idx]+'_extendbox.npy'))
pbbold = np.array(pbb[pbb[:,0] > traindetp])#detp])
pbb = nms(pbbold, 0.1)
# print pbb.shape, len(vlu)
assert pbb.shape[0] == len(vlu)
kptpbb = np.array(pbb)#pbb[:5, :]) # prob, x, y, z, d # :5 is the first version
# find the true label
for idx in xrange(kptpbb.shape[0]):
trfnmlst.append(srs)
trcdxlst.append(kptpbb[idx, 1])
trcdylst.append(kptpbb[idx, 2])
trcdzlst.append(kptpbb[idx, 3])
trdimlst.append(kptpbb[idx, 4])
if lbb.shape[0] == 0 or (lbb.shape[0]==1 and abs(lbb[0,0])+abs(lbb[0,1])+abs(lbb[0,2])+abs(lbb[0,3])==0):
kptpbb[idx, 0] = 0
trlablst.append(0)
continue
ispos = 0
if srs in gtdct:
for l in gtdct[srs]:
if math.pow(l[0]-kptpbb[idx,1],2.) + math.pow(l[1]-kptpbb[idx,2],2.) + math.pow(l[2]-kptpbb[idx,3],2.) < \
math.pow(max(16., l[3]/2),2.):
kptpbb[idx, 0] = l[4]
trlablst.append(l[4])
ispos = 1
break
if ispos == 0:
kptpbb[idx, 0] = 0
trlablst.append(0)
trudat[srs] = kptpbb
print(len(trlablst), sum(trlablst), np.sum(kptpbb[:,0]))
# save the data - later
# run test
import numpy as np
import torch
from torch.nn import DataParallel
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torch import optim
from torch.autograd import Variable
from models import *
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import transforms as transforms
import os
import argparse
from models import *
from utils import progress_bar
from torch.autograd import Variable
import numpy as np
criterion = nn.CrossEntropyLoss()
CROPSIZE = 17
blklst = []
# blklst = ['1.3.6.1.4.1.14519.5.2.1.6279.6001.121993590721161347818774929286-388', \
# '1.3.6.1.4.1.14519.5.2.1.6279.6001.121993590721161347818774929286-389', \
# '1.3.6.1.4.1.14519.5.2.1.6279.6001.132817748896065918417924920957-660']
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
best_acc = 0 # best test accuracy
best_acc_gbt = 0
# start_epoch = 50 # start from epoch 0 or last checkpoint epoch
# Cal mean std
preprocesspath = '/media/data1/wentao/tianchi/luna16/cls/crop_v3/'
preprocessallpath = '/media/data1/wentao/tianchi/luna16/preprocess/lunaall/'
pixvlu, npix = 0, 0
for fname in os.listdir(preprocesspath):
if fname.endswith('.npy'):
if fname[:-4] in blklst: continue
data = np.load(os.path.join(preprocesspath, fname))
pixvlu += np.sum(data)
npix += np.prod(data.shape)
pixmean = pixvlu / float(npix)
pixvlu = 0
for fname in os.listdir(preprocesspath):
if fname.endswith('.npy'):
if fname[:-4] in blklst: continue
data = np.load(os.path.join(preprocesspath, fname))-pixmean
pixvlu += np.sum(data * data)
pixstd = np.sqrt(pixvlu / float(npix))
# pixstd /= 255
print(pixmean, pixstd)
print('mean '+str(pixmean)+' std '+str(pixstd))
# Datatransforms
print('==> Preparing data..') # Random Crop, Zero out, x z flip, scale,
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((pixmean), (pixstd)),
])
transform_train = transforms.Compose([
# transforms.RandomScale(range(28, 38)),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomYFlip(),
transforms.RandomZFlip(),
transforms.ZeroOut(4),
transforms.ToTensor(),
transforms.Normalize((pixmean), (pixstd)), # need to cal mean and std, revise norm func
])
from dataloadernp import lunanod
import pandas as pd
import logging
# fold = 1
# gbtdepth = 3
savemodelpath = './detcls-'+str(fold)+'new/'
if not os.path.isdir(savemodelpath):
os.mkdir(savemodelpath)
logging.basicConfig(filename=savemodelpath+'detclslog-'+str(fold), level=logging.INFO)
mxx = mxy = mxz = mxd = 0
tefnamelst = []
telabellst = []
tefeatlst = []
trfnamelst = []
trlabellst = []
trfeatlst = []
for srsid, label, x, y, z, d in zip(tefnmlst, telablst, tecdxlst, tecdylst, tecdzlst, tedimlst):
mxx = max(abs(float(x)), mxx)
mxy = max(abs(float(y)), mxy)
mxz = max(abs(float(z)), mxz)
mxd = max(abs(float(d)), mxd)
if srsid in blklst: continue
# crop raw pixel as feature
data = np.load(os.path.join(preprocessallpath, srsid+'_clean.npy'))
# print data.shape
bgx = int(min(data.shape[1],max(0,x-CROPSIZE/2)))
bgy = int(min(data.shape[2],max(0,y-CROPSIZE/2)))
bgz = int(min(data.shape[3],max(0,z-CROPSIZE/2)))
data0 = np.array(data[0,bgx:bgx+CROPSIZE, bgy:bgy+CROPSIZE, bgz:bgz+CROPSIZE])
# print data0.shape
data1 = np.ones((CROPSIZE, CROPSIZE, CROPSIZE)) * 170
data1[:data0.shape[0], :data0.shape[1], :data0.shape[2]] = np.array(data0)
# print data1.shape
feat = np.hstack((np.reshape(data1, (-1,)) / 255, float(d)))
# if srsid.split('-')[0] in teidlst:
bgx = int(min(data.shape[1],max(0,x-32/2)))
bgy = int(min(data.shape[2],max(0,y-32/2)))
bgz = int(min(data.shape[3],max(0,z-32/2)))
data0 = np.array(data[0,bgx:bgx+32, bgy:bgy+32, bgz:bgz+32])
# print data0.shape
data1 = np.ones((32, 32, 32)) * 170
data1[:data0.shape[0], :data0.shape[1], :data0.shape[2]] = np.array(data0)
tefnamelst.append(data1)
telabellst.append(int(label))
tefeatlst.append(feat)
print(len(telabellst), sum(telabellst))
for srsid, label, x, y, z, d in zip(trfnmlst, trlablst, trcdxlst, trcdylst, trcdzlst, trdimlst):
mxx = max(abs(float(x)), mxx)
mxy = max(abs(float(y)), mxy)
mxz = max(abs(float(z)), mxz)
mxd = max(abs(float(d)), mxd)
if srsid in blklst: continue
# crop raw pixel as feature
data = np.load(os.path.join(preprocessallpath, srsid+'_clean.npy'))
# print data.shape
bgx = int(min(data.shape[1],max(0,x-CROPSIZE/2)))
bgy = int(min(data.shape[2],max(0,y-CROPSIZE/2)))
bgz = int(min(data.shape[3],max(0,z-CROPSIZE/2)))
data0 = np.array(data[0,bgx:bgx+CROPSIZE, bgy:bgy+CROPSIZE, bgz:bgz+CROPSIZE])
# print data0.shape
data1 = np.ones((CROPSIZE, CROPSIZE, CROPSIZE)) * 170
data1[:data0.shape[0], :data0.shape[1], :data0.shape[2]] = np.array(data0)
# print data1.shape
feat = np.hstack((np.reshape(data1, (-1,)) / 255, float(d)))
# if srsid.split('-')[0] in teidlst:
bgx = int(min(data.shape[1],max(0,x-32/2)))
bgy = int(min(data.shape[2],max(0,y-32/2)))
bgz = int(min(data.shape[3],max(0,z-32/2)))
data0 = np.array(data[0,bgx:bgx+32, bgy:bgy+32, bgz:bgz+32])
# print data0.shape
data1 = np.ones((32, 32, 32)) * 170
data1[:data0.shape[0], :data0.shape[1], :data0.shape[2]] = np.array(data0)
trfnamelst.append(data1)
trlabellst.append(int(label))
trfeatlst.append(feat)
print(len(trlabellst), sum(trlabellst))
for idx in xrange(len(trfeatlst)):
# trfeatlst[idx][0] /= mxx
# trfeatlst[idx][1] /= mxy
# trfeatlst[idx][2] /= mxz
trfeatlst[idx][-1] /= mxd
for idx in xrange(len(tefeatlst)):
# tefeatlst[idx][0] /= mxx
# tefeatlst[idx][1] /= mxy
# tefeatlst[idx][2] /= mxz
tefeatlst[idx][-1] /= mxd
# trainset = lunanod(trfnamelst, trlabellst, trfeatlst, train=False, transform=transform_test)
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=1, shuffle=False, num_workers=30)
print(len(tefnamelst), sum(telablst), len(trfnamelst), sum(trlablst))
trainset = lunanod(preprocessallpath, trfnamelst, trlabellst, trfeatlst, train=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=16, shuffle=True, num_workers=30)
testset = lunanod(preprocessallpath, tefnamelst, telabellst, tefeatlst, train=False, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=16, shuffle=False, num_workers=30)
checkpoint = torch.load(resmodelpath)#'./checkpoint-1-45/ckpt.t7')
print(checkpoint.keys())
net = DPN92_3D()
net = checkpoint['net']
# neptime = 0.2
def get_lr(epoch):
if epoch < 150*neptime:
lr = 0.1 #args.lr
elif epoch < 300*neptime:
lr = 0.01
else:
lr = 0.001
return lr
if use_cuda:
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = False #True
import pickle
from sklearn.ensemble import GradientBoostingClassifier as gbt
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
def train(epoch):
logging.info('\nEpoch: '+str(epoch))
net.train()
lr = get_lr(epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
train_loss = 0
correct = 0
total = 0
trainfeat = np.zeros((len(trfnamelst), 2560+CROPSIZE*CROPSIZE*CROPSIZE+1))
trainlabel = np.zeros((len(trfnamelst),))
idx = 0
for batch_idx, (inputs, targets, feat) in enumerate(trainloader):
if use_cuda:
# print(len(inputs), len(targets), len(feat), type(inputs[0]), type(targets[0]), type(feat[0]))
# print(type(targets), type(inputs), len(targets))
# targetarr = np.zeros((len(targets),))
# for idx in xrange(len(targets)):
# targetarr[idx] = targets[idx]
# print((Variable(torch.from_numpy(targetarr)).data).cpu().numpy().shape)
inputs, targets = inputs.cuda(), targets.cuda()
optimizer.zero_grad()
inputs, targets = Variable(inputs), Variable(targets)
outputs, dfeat = net(inputs)
# add feature into the array
# print(torch.stack(targets).data.numpy().shape, torch.stack(feat).data.numpy().shape)
# print((dfeat.data).cpu().numpy().shape)
trainfeat[idx:idx+len(targets), :2560] = np.array((dfeat.data).cpu().numpy())
for i in xrange(len(targets)):
trainfeat[idx+i, 2560:] = np.array((Variable(feat[i]).data).cpu().numpy())
trainlabel[idx+i] = np.array((targets[i].data).cpu().numpy())
idx += len(targets)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
m = gbt(max_depth=gbtdepth, random_state=0)
m.fit(trainfeat, trainlabel)
gbttracc = np.mean(m.predict(trainfeat) == trainlabel)
print('ep '+str(epoch)+' tracc '+str(correct/float(total))+' lr '+str(lr)+' gbtacc '+str(gbttracc))
logging.info('ep '+str(epoch)+' tracc '+str(correct/float(total))+' lr '+str(lr)+' gbtacc '+str(gbttracc))
return m
def test(epoch, m):
global best_acc
global best_acc_gbt
net.eval()
test_loss = 0
correct = 0
total = 0
testfeat = np.zeros((len(tefnamelst), 2560+CROPSIZE*CROPSIZE*CROPSIZE+1))
testlabel = np.zeros((len(tefnamelst),))
dpnpred = np.zeros((len(tefnamelst),))
idx = 0
for batch_idx, (inputs, targets, feat) in enumerate(testloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs, dfeat = net(inputs)
# add feature into the array
testfeat[idx:idx+len(targets), :2560] = np.array((dfeat.data).cpu().numpy())
loss = criterion(outputs, targets)
test_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
for i in xrange(len(targets)):
testfeat[idx+i, 2560:] = np.array((Variable(feat[i]).data).cpu().numpy())
testlabel[idx+i] = np.array((targets[i].data).cpu().numpy())
dpnpred[idx+i] = np.array((Variable(predicted[i]).data).cpu().numpy())
idx += len(targets)
# print(testlabel.shape, testfeat.shape, testlabel)#, trainfeat[:, 3])
gbtpred = m.predict(testfeat)
np.save(savemodelpath+'gbtpred'+str(epoch)+'.npy', gbtpred)
np.save(savemodelpath+'dpnpred'+str(epoch)+'.npy', dpnpred)
gbtteacc = np.mean(gbtpred == testlabel)
if gbtteacc > best_acc_gbt:
if not os.path.isdir(savemodelpath):
os.mkdir(savemodelpath)
pickle.dump(m, open(savemodelpath+'gbtmodel-'+str(fold)+'.sav', 'wb'))
logging.info('Saving gbt ..')
state = {
'net': net.module if use_cuda else net,
'epoch': epoch,
}
if not os.path.isdir(savemodelpath):
os.mkdir(savemodelpath)
torch.save(state, savemodelpath+'ckptgbt.t7')
best_acc_gbt = gbtteacc
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
logging.info('Saving..')
state = {
'net': net.module if use_cuda else net,
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir(savemodelpath):
os.mkdir(savemodelpath)
torch.save(state, savemodelpath+'ckpt.t7')
best_acc = acc
logging.info('Saving..')
state = {
'net': net.module if use_cuda else net,
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir(savemodelpath):
os.mkdir(savemodelpath)
# if epoch % 50 == 0:
torch.save(state, savemodelpath+'ckpt'+str(epoch)+'.t7')
pickle.dump(m, open(savemodelpath+'gbtmodel-'+str(fold)+'-'+str(epoch)+'.sav', 'wb'))
# best_acc = acc
print('teacc '+str(acc)+' bestacc '+str(best_acc)+' gbttestaccgbt '+str(gbtteacc)+' bestgbt '+str(best_acc_gbt))
logging.info('teacc '+str(acc)+' bestacc '+str(best_acc)+' ccgbt '+str(gbtteacc)+' bestgbt '+str(best_acc_gbt))
for epoch in range(start_epoch, int(start_epoch+350*neptime)):#200):
m = train(epoch)
test(epoch, m)
|
1629755
|
from math import ceil
from math import floor
def closest_even_integer(x):
"""
Computes closest even integer to an input float or integer.
If x is an integer, the output is x+1 if x is odd else x
"""
if not isinstance(x, (int, float)):
raise ValueError(f"Expected {x} to be an integer or a float")
if isinstance(x, int):
if x % 2 == 0:
return x
return x + 1
lower = floor(x)
upper = ceil(x)
# If the closest integer to x is smaller than x
if abs(lower - x) < abs(upper - x):
return lower if lower % 2 == 0 else upper
return upper if upper % 2 == 0 else lower
|
1629761
|
import os
import unittest
from __main__ import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
#
# BreachWarningSelfTest
#
class BreachWarningSelfTest(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "BreachWarningSelfTest"
self.parent.categories = ["Testing.IGT Tests"]
self.parent.dependencies = ["CreateModels", "BreachWarning"]
self.parent.contributors = ["<NAME>, <NAME>, <NAME> (Queen's University)"]
self.parent.helpText = """This is a self test for the breach warning module."""
self.parent.acknowledgementText = """This work was was funded by Cancer Care Ontario and the Ontario Consortium for Adaptive Interventions in Radiation Oncology (OCAIRO)"""
# Add this test to the SelfTest module's list for discovery when the module
# is created. Since this module may be discovered before SelfTests itself,
# create the list if it doesn't already exist.
try:
slicer.selfTests
except AttributeError:
slicer.selfTests = {}
slicer.selfTests['BreachWarningSelfTest'] = self.runTest
def runTest(self):
tester = BreachWarningSelfTestTest()
tester.runTest()
#
# BreachWarningSelfTestWidget
#
class BreachWarningSelfTestWidget(ScriptedLoadableModuleWidget):
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
#
# BreachWarningSelfTestLogic
#
class BreachWarningSelfTestLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget
"""
def __init__(self):
pass
class BreachWarningSelfTestTest(ScriptedLoadableModuleTest):
"""This is the test case for your scripted module.
"""
def setUp(self):
"""Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_BreachWarningSelfTest1()
def test_BreachWarningSelfTest1(self):
"""Ideally you should have several levels of tests. At the lowest level
tests sould exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
slicer.util.delayDisplay("Starting the test")
slicer.util.delayDisplay("Create models")
modelNodes = []
createModelsLogic = slicer.modules.createmodels.logic()
sphereRadius = 10.0
sphereModel = createModelsLogic.CreateSphere(sphereRadius) # watched model
toolModel = createModelsLogic.CreateNeedle(50.0, 1.5, 0.0, False)
slicer.util.delayDisplay("Set up module GUI")
mainWindow = slicer.util.mainWindow()
mainWindow.moduleSelector().selectModule('BreachWarning')
bwWidget = slicer.modules.breachwarning.widgetRepresentation()
bwColorPickerButton = slicer.util.findChildren(widget=bwWidget, className='ctkColorPickerButton', name='WarningColorPickerButton')[0]
bwModelNodeCombobox = slicer.util.findChildren(widget=bwWidget, name='ModelNodeComboBox')[0]
bwToolNodeCombobox = slicer.util.findChildren(widget=bwWidget, name='ToolComboBox')[0]
bwModelNodeCombobox.setCurrentNodeID(sphereModel.GetID())
warningColor = (1.0,0.0,0.0)
color = qt.QColor(warningColor[0]*255,warningColor[1]*255,warningColor[2]*255,1)
bwColorPickerButton.setColor(color)
# Transform the sphere somewhere
sphereTransform = slicer.vtkMRMLLinearTransformNode()
sphereTransformMatrix = vtk.vtkMatrix4x4()
sphereTransformMatrix.SetElement(0, 3, 80)
sphereTransformMatrix.SetElement(1, 3, 40)
sphereTransformMatrix.SetElement(2, 3, 30)
sphereTransform.SetMatrixTransformToParent(sphereTransformMatrix)
slicer.mrmlScene.AddNode(sphereTransform)
sphereModel.SetAndObserveTransformNodeID(sphereTransform.GetID())
# Create transforms node hierarchy for the tool
transforms=[]
numberOfTransforms = 3
for i in range(numberOfTransforms):
transforms.append(slicer.vtkMRMLLinearTransformNode())
transformName = "Tool_"+str(i)
transforms[i].SetName(slicer.mrmlScene.GenerateUniqueName(transformName))
slicer.mrmlScene.AddNode(transforms[i])
if i>0:
transforms[i].SetAndObserveTransformNodeID(transforms[i-1].GetID())
# Tool transform is the one at the bottom of the transform hierarchy
# (to make sure transform changes in the middle of the transform hierarchy are used correctly)
toolModel.SetAndObserveTransformNodeID(transforms[-1].GetID())
bwToolNodeCombobox.setCurrentNodeID(transforms[-1].GetID())
# Pick a transform in the middle of the transform hierarchy that we change to simulate tool motion,
# leave the rest of the transforms unchanged
toolToWorldTransform = transforms[1]
transformMatrixOutside = vtk.vtkMatrix4x4()
transformMatrixOutside.DeepCopy(sphereTransformMatrix)
transformMatrixOutside.SetElement(0, 3, transformMatrixOutside.GetElement(0,3) + sphereRadius*2.1)
transformMatrixOutside.SetElement(1, 3, transformMatrixOutside.GetElement(1,3) + sphereRadius*1.3)
transformMatrixOutside.SetElement(2, 3, transformMatrixOutside.GetElement(2, 3) + sphereRadius*3.2)
transformMatrixInside = vtk.vtkMatrix4x4()
transformMatrixInside.DeepCopy(sphereTransformMatrix)
transformMatrixInside.SetElement(0, 3, transformMatrixInside.GetElement(0,3) + sphereRadius*0.1)
transformMatrixInside.SetElement(1, 3, transformMatrixInside.GetElement(1,3) + sphereRadius*0.3)
transformMatrixInside.SetElement(2, 3, transformMatrixInside.GetElement(2,3) + sphereRadius*0.2)
# Start breach warning checks
slicer.util.delayDisplay('Tool is outside the sphere')
toolToWorldTransform.SetMatrixTransformToParent(transformMatrixOutside)
sphereColor = sphereModel.GetDisplayNode().GetColor()
self.assertNotEqual(sphereColor, warningColor)
slicer.util.delayDisplay('Tool is inside the sphere')
toolToWorldTransform.SetMatrixTransformToParent(transformMatrixInside)
sphereColor = sphereModel.GetDisplayNode().GetColor()
self.assertEqual(sphereColor, warningColor)
slicer.util.delayDisplay('Tool is outside the sphere')
toolToWorldTransform.SetMatrixTransformToParent(transformMatrixOutside)
sphereColor = sphereModel.GetDisplayNode().GetColor()
self.assertNotEqual(sphereColor, warningColor)
slicer.util.delayDisplay('Test passed!')
|
1629781
|
class InPlace:
def __init__(self, val):
self.val = val
def __ipow__(self, other):
self.val **= other
return self
def __imul__(self, other):
self.val *= other
return self
def __imatmul__(self, other):
# I guess you could think of an int as a 1x1 matrix
self.val *= other
return self
def __itruediv__(self, other):
self.val /= other
return self
def __ifloordiv__(self, other):
self.val //= other
return self
def __imod__(self, other):
self.val %= other
return self
def __iadd__(self, other):
self.val += other
return self
def __isub__(self, other):
self.val -= other
return self
def __ilshift__(self, other):
self.val <<= other
return self
def __irshift__(self, other):
self.val >>= other
return self
def __iand__(self, other):
self.val &= other
return self
def __ixor__(self, other):
self.val ^= other
return self
def __ior__(self, other):
self.val |= other
return self
i = InPlace(2)
i **= 3
assert i.val == 8
i = InPlace(2)
i *= 2
assert i.val == 4
i = InPlace(2)
i @= 2
assert i.val == 4
i = InPlace(1)
i /= 2
assert i.val == 0.5
i = InPlace(1)
i //= 2
assert i.val == 0
i = InPlace(10)
i %= 3
assert i.val == 1
i = InPlace(1)
i += 1
assert i.val == 2
i = InPlace(2)
i -= 1
assert i.val == 1
i = InPlace(2)
i <<= 3
assert i.val == 16
i = InPlace(16)
i >>= 3
assert i.val == 2
i = InPlace(0b010101)
i &= 0b111000
assert i.val == 0b010000
i = InPlace(0b010101)
i ^= 0b111000
assert i.val == 0b101101
i = InPlace(0b010101)
i |= 0b111000
assert i.val == 0b111101
|
1629793
|
import torch
import numpy as np
import cv2
import tqdm
import os
import json
from pycocotools.mask import *
from src.unet_plus import SE_Res50UNet,SE_Res101UNet
import time
local_time = time.strftime('%Y-%m-%d-%H-%M',time.localtime(time.time()))
TEST_IMG_PATH = '/mnt/jinnan2_round2_test_b_20190424'
NORMAL_LIST_PATH = 'cvfly_normal_list_b.txt'
SUBMIT_PATH = './submit/cvfly_test_b_{}.json'.format(local_time)
SE50_MODEL_PATH = './models/se50/best_fold3_se50.pth'
SE101_MODEL_PATH = './models/se101/best_se101.pth'
def get_models(is_clc = False):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model50 = SE_Res50UNet(6, cls_only=is_clc)
model50.load_state_dict(torch.load(SE50_MODEL_PATH), strict=True)
model50 = model50.to(device)
model50.eval()
model101 = SE_Res101UNet(6,cls_only = is_clc)
model101.load_state_dict(torch.load(SE101_MODEL_PATH), strict=True)
model101 = model101.to(device)
model101.eval()
return model50, model101
def clc_aug(img):
img_list = []
img_list.append(img.copy())
img_list.append(np.flipud(img).copy())
img_list.append(np.fliplr(img).copy())
return img_list
def clc_aug_tensor(img,size = None):
img = cv2.resize(img, size)
assert img.shape[0] == img.shape[1]
img_list = []
img_list.append(img.copy())
img_list.append(np.flipud(img).copy())
img_list.append(np.fliplr(img).copy())
img_array = np.array(img_list)
img_array = torch.from_numpy(img_array).float().permute(0,3,1,2) / 255.
return img_array
def filter_img_tta(img50,
img101,
model50,
model101,
):
with torch.no_grad():
pred50 = model50(img50)
pred101 = model101(img101)
pred = pred50 + pred101
pred = torch.nn.functional.softmax(pred.float(), dim=-1)[0]
prob = pred[0].data.cpu().numpy()
return prob > 0.5
def seg_aug_image(img):
img_list = []
img_list.append(img)
img_list.append(np.flipud(img).copy())
img_list.append(np.fliplr(img).copy())
img_list.append(np.rot90(img, 1).copy())
img_list.append(np.rot90(img, 2).copy())
img_list.append(np.rot90(img, 3).copy())
return img_list
def seg_restore_mask(img_list):
img_list[0] = img_list[0]
img_list[1] = np.flipud(img_list[1])
img_list[2] = np.fliplr(img_list[2])
img_list[3] = np.rot90(img_list[3], 3)
img_list[4] = np.rot90(img_list[4], 2)
img_list[5] = np.rot90(img_list[5], 1)
return img_list
def seg_decode_mask(mask_list):
mask = mask_list[0]
for i in range(1, len(mask_list)):
mask += mask_list[i]
mask = mask/len(mask_list)
return mask
def seg_aug_image_tensor(img,img_size):
img = cv2.resize(img, img_size)
img_list = []
img_list.append(img)
img_list.append(np.flipud(img).copy())
img_list.append(np.fliplr(img).copy())
img_list.append(np.rot90(img, 1).copy())
img_list.append(np.rot90(img, 2).copy())
img_list.append(np.rot90(img, 3).copy())
img_array = np.array(img_list)
img_array = torch.from_numpy(img_array).float().permute(0,3,1,2) / 255.
return img_array
def seg_aug(img_list, model):
mask_list = []
with torch.no_grad():
for i in range(img_list.shape[0]):
one_img = img_list[i]
one_img = one_img.unsqueeze(0).cuda()
one_img = one_img.cuda()
pred = model(one_img)
pred = pred[0]
preds_np = pred.data.cpu().permute(1,2,0).numpy()
mask_list.append(preds_np)
mask_list = seg_restore_mask(mask_list)
mask = seg_decode_mask(mask_list)
return mask
def make_submit(image_name,preds):
'''
Convert the prediction of each image to the required submit format
:param image_name: image file name
:param preds: 5 class prediction mask in numpy array
:return:
'''
submit=dict()
submit['image_name']= image_name
submit['size']=(preds.shape[1],preds.shape[2]) #(height,width)
submit['mask']=dict()
for cls_id in range(0,5): # 5 classes in this competition
mask=preds[cls_id,:,:]
cls_id_str=str(cls_id+1) # class index from 1 to 5,convert to str
fortran_mask = np.asfortranarray(mask)
rle = encode(fortran_mask) #encode the mask into rle, for detail see: https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/mask.py
submit['mask'][cls_id_str]=rle
return submit
def dump_2_json(submits,save_p):
'''
:param submits: submits dict
:param save_p: json dst save path
:return:
'''
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, bytes):
return str(obj, encoding='utf-8')
return json.JSONEncoder.default(self, obj)
file = open(save_p, 'w', encoding='utf-8');
file.write(json.dumps(submits, cls=MyEncoder, indent=4))
file.close()
from torch.utils.data import Dataset
class cls_tta_dataset(Dataset):
def __init__(self,path,size50=(960,960),size101=(768,768)):
self.size50 = size50
self.size101 = size101
self.path = path
self.img_list = os.listdir(path)
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
name = self.img_list[idx]
img_path = os.path.join(self.path, name)
img = cv2.imread(img_path)
tensor_img50 = clc_aug_tensor(img,self.size50)
tensor_img101 = clc_aug_tensor(img, self.size101)
return tensor_img50,tensor_img101,name
#### 分类
def clc():
model50, model101 = get_models(is_clc=True)
normal_list = []
img_list = os.listdir(TEST_IMG_PATH)
f = open(NORMAL_LIST_PATH, 'w')
cls_tta = cls_tta_dataset(path=TEST_IMG_PATH,size50=(960,960),size101=(768,768))
loader = torch.utils.data.DataLoader(cls_tta, batch_size=1, shuffle=False, num_workers=4)
for img50,img101,name in tqdm.tqdm(loader,ncols=50):
name = name[0]
img50 = img50.squeeze(0).cuda()
img101 = img101.squeeze(0).cuda()
if not filter_img_tta(img50,img101,
model50,
model101):
normal_list.append(name)
f.write(name + "\n")
f.close()
print('normal images: ',len(normal_list))
print('abnormal images: ',len(img_list) - len(normal_list))
#### 分割
class seg_tta_dataset(Dataset):
def __init__(self,path,size50=(960,960),size101=(768,768)):
self.size50 = size50
self.size101 = size101
self.path = path
self.img_list = os.listdir(path)
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
name = self.img_list[idx]
img_path = os.path.join(self.path, name)
img = cv2.imread(img_path)
h, w, c = img.shape
tensor50 = seg_aug_image_tensor(img,self.size50)
tensor101 = seg_aug_image_tensor(img,self.size101)
return tensor50,tensor101,name,(h,w)
def seg():
model50, model101 = get_models(is_clc=False)
img_list = os.listdir(TEST_IMG_PATH)
with open(NORMAL_LIST_PATH) as f:
normal_list = [l.strip() for l in f.readlines()]
print('normal_list len: ', len(normal_list))
submits_dict = dict()
cls_tta = seg_tta_dataset(path=TEST_IMG_PATH,size50=(960,960),size101=(768,768))
loader = torch.utils.data.DataLoader(cls_tta, batch_size=1, shuffle=False, num_workers=4)
for tensor50,tensor101,image_id,org_size in tqdm.tqdm(loader,ncols=50):
h,w = org_size
image_id = image_id[0]
if image_id in normal_list:
preds_np = np.zeros((5, h, w)).astype(np.uint8)
submit = make_submit(image_id, preds_np)
submits_dict[image_id] = submit
continue
pred50 = seg_aug(tensor50[0], model50)
pred101 = seg_aug(tensor101[0], model101)
pred101 = cv2.resize(pred101, (960, 960), interpolation=cv2.INTER_CUBIC)
pred = (pred50 + pred101) / 2
pred = np.where(pred > 0.5, 1, 0).astype(np.uint8)
preds_np = pred[:, :, 1:]
preds_np = cv2.resize(preds_np, (w, h))
preds_np = np.transpose(preds_np, (2, 0, 1))
submit = make_submit(image_id, preds_np)
submits_dict[image_id] = submit
dump_2_json(submits_dict, SUBMIT_PATH)
if __name__ == '__main__':
clc()
seg()
|
1629805
|
import time
import torch
import sys
import os
import subprocess
argslist = list(sys.argv)[1:]
num_gpus = torch.cuda.device_count()
argslist.append('--n_gpus={}'.format(num_gpus))
workers = []
job_id = time.strftime("%Y_%m_%d-%H%M%S")
argslist.append("--group_name=group_{}".format(job_id))
os.makedirs('logs', exist_ok=True)
for i in range(num_gpus):
argslist.append('--rank={}'.format(i))
stdout = None if i == 0 else open("logs/{}_GPU_{}.log".format(job_id, i),
"w")
print(argslist)
p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout)
workers.append(p)
argslist = argslist[:-1]
for p in workers:
p.wait()
#crashed = False
#while (!crashed):
# for p in workers:
# poll = p.poll()
# if poll != None:
# crashed = True
# time.sleep(2)
#for p in workers:
# try:
# p.terminate()
# p.kill()
# except Exception as e:
# pass
|
1629839
|
import os
import networkx as nx
def fix_layout(G):
for n in G.nodes:
node = G.nodes(data=True)[n]
if node["kind"] in {"model"}:
node["shape"] = '"square"'
node["width"] = "1"
elif node["kind"] in {"data", "prob"}:
node["shape"] = '"circle"'
elif node["kind"] in {"vote", "merge"}:
node["shape"] = '"triangle"'
elif node["kind"] in {"imputation"}:
node["shape"] = '"invtriangle"'
else:
pass
return G
def to_dot(
g,
dname="tmp",
fname="test",
extension=".dot",
return_fname=False,
ortho=False,
fi_labels=False,
):
"""
Convert a graph to a dot file.
"""
# Layout
if fi_labels:
for e in g.edges():
g.edges()[e]["label"] = "{0:.2f}".format(g.edges()[e].get("fi", 0))
dot = nx.drawing.nx_pydot.to_pydot(g)
dot.set("rankdir", "BT")
if ortho:
dot.set("splines", "ortho")
# To file
full_fname = os.path.join(dname, fname + extension)
with open(full_fname, "w") as f:
print(dot.to_string(), file=f)
if return_fname:
return full_fname
else:
return
|
1629862
|
import unittest
from boost_collections.zskiplist.zskiplist import Zskiplist
class TestZskiplist(unittest.TestCase):
def setUp(self):
self.zsl = Zskiplist()
self.zsl.zsl_insert(10, 'a')
self.zsl.zsl_insert(10, 'b')
def test_zsl_insert(self):
zsl = self.zsl
self.zsl.zsl_insert(10, 'c')
self.assertEqual(3, zsl.length)
def test_zsl_delete(self):
zsl = self.zsl
zsl.zsl_delete(10, 'b')
self.assertEqual(1, zsl.length)
retval, node = zsl.zsl_delete(10, 'c')
self.assertEqual(0, retval)
def test_zsl_get_element_by_rank(self):
zsl = self.zsl
zsl.zsl_insert(3, 'c')
ele = zsl.zsl_get_element_by_rank(1)
self.assertIsNotNone(ele)
self.assertEqual('c', ele.ele)
ele = zsl.zsl_get_element_by_rank(4)
self.assertIsNone(ele)
if __name__ == '__main__':
unittest.main()
|
1629906
|
from collections import namedtuple
import os
from utils import cleanup
class Test(object):
Case = namedtuple('Case', ['path', 'versions',
'command', 'options', 'cleanup'])
cases = dict()
# unit test cases
cases['vectorAdd.f128'] = Case(
path='samples/vectorAdd.f128', versions=[], command='./vectorAdd', options=[], cleanup=True)
cases['op_graph_simple'] = Case(
path='samples/op_graph_simple', versions=[], command='./main', options=[], cleanup=True)
cases['op_pattern_simple'] = Case(
path='samples/op_pattern_simple', versions=[], command='./main', options=[], cleanup=True)
cases['stress'] = Case(path='samples/stress', versions=[],
command='./stress', options=[], cleanup=True)
# sample test cases
cases['bfs'] = Case(path='samples/bfs', command='./bfs', versions=['vp-opt1',
'vp-opt2', 'vp-opt'], options=['../data/graph1MW_6.txt'], cleanup=True)
cases['backprop'] = Case(path='samples/backprop', command='./backprop', versions=[
'vp-opt1', 'vp-opt2', 'vp-opt'], options=['65536'], cleanup=True)
cases['cfd'] = Case(path='samples/cfd', command='./euler3d', versions=['vp-opt1',
'vp-opt2', 'vp-opt'], options=['../data/fvcorr.domn.097K'], cleanup=True)
cases['hotspot'] = Case(path='samples/hotspot', command='./hotspot', versions=['vp-opt'], options=[
'512', '2', '2', '../data/temp_512', '../data/power_512', 'output.out'], cleanup=True)
cases['hotspot3D'] = Case(path='samples/hotspot3D', command='./3D', versions=['vp-opt'], options=[
'512', '8', '100', '../data/power_512x8', '../data/temp_512x8', 'output.out'], cleanup=True)
cases['huffman'] = Case(path='samples/huffman', command='./pavle', versions=[
'vp-opt'], options=['../data/test1024_H2.206587175259.in'], cleanup=True)
cases['lavaMD'] = Case(path='samples/lavaMD', command='./lavaMD',
versions=['vp-opt'], options=['-boxes1d', '10'], cleanup=True)
cases['particlefilter'] = Case(path='samples/particlefilter', command='./particlefilter_float', versions=[
'vp-opt'], options=['-x', '128', '-y', '128', '-z', '10', '-np', '1000'], cleanup=True)
cases['pathfinder'] = Case(path='samples/pathfinder', command='./pathfinder',
versions=['vp-opt'], options=['100000', '100', '20'], cleanup=True)
cases['srad'] = Case(path='samples/srad_v1', command='./srad', versions=['vp-opt1',
'vp-opt2', 'vp-opt'], options=['10', '0.5', '502', '458'], cleanup=True)
cases['streamcluster'] = Case(path='samples/streamcluster', command='./sc_gpu', versions=['vp-opt'], options=[
'10', '20', '256', '65536', '65536', '1000', 'none', 'output.txt', '1'], cleanup=True)
# application cases
cases['barracuda'] = Case(path='samples/barracuda', command='./barracuda', versions=['vp-opt'],
options=['aln', 'sample_data/Saccharomyces_cerevisiae.SGD1.01.50.dna_rm.toplevel.fa',
'sample_data/sample_reads.fastq', '>', 'quicktest.sai'], cleanup=False)
cases['castro'] = Case(path='samples/Castro/Exec/hydro_tests/Sedov', command='Castro2d.gnu.CUDA.ex', versions=['vp-opt'],
options=['./inputs.2d.cyl_in_cartcoords'], cleanup=False)
cases['darknet'] = Case(path='samples/darknet', command='./darknet', versions=['vp-opt'],
options=['detector', 'test', './cfg/coco.data', './cfg/yolov4.cfg',
'./yolov4.weights', 'data/dog.jpg', '-i', '0', '-thresh', '0.25'], cleanup=False)
cases['deepwave'] = Case(path='samples/deepwave', command='./Deepwave_SEAM_example1.py', versions=['vp-opt'],
options=[], cleanup=False)
cases['namd'] = Case(path='samples/NAMD/Linux-x86_64-g++', command='./namd3',
versions=['vp-opt'], options=['../alain'], cleanup=False)
cases['qmcpack'] = Case(path='samples/qmcpack/workspace/NiO/dmc-a4-e48-batched_driver-DU8',
command='../../../build/bin/qmcpack', versions=['vp-opt'], options=['./NiO-fcc-S1-dmc.xml'], cleanup=False)
def __init__(self, name, arch, version=None):
self._name = name
self._arch = arch
self._version = version
self._configs = dict()
def name(self):
return self._name
def setup(self, choices):
pass
def _run_impl(self, case_name, version):
pass
def run(self, iterations=1):
cwd = os.getcwd()
for i in range(iterations):
for case_name, case in Test.cases.items():
if case_name not in self._configs:
continue
os.chdir(case.path)
if i == 0 and case.cleanup:
cleanup(self._arch)
self._run_impl(case_name, None)
os.chdir(cwd)
if self._version is None:
continue
for version in case.versions:
if version == self._version or self._version == 'all':
os.chdir(case.path + '-' + version)
if i == 0 and case.cleanup:
cleanup(self._arch)
self._run_impl(case_name, version)
os.chdir(cwd)
|
1629911
|
from django.apps import AppConfig
class OAuthConfig(AppConfig):
"""
Configuration for the OAuth app
Set the OAUTH_METHOD variable in the project's settings.py to
'token' to send Django Rest Framework tokens upon login.
Otherwise, successfull logins will redirect to the url set by
LOGIN_REDIRECT_URL.
"""
name = 'oauth'
verbose_name = 'OAuth'
|
1629971
|
from django.contrib import admin
from src.apps.trainings.admin.network_admin import NetworkAdmin
from src.apps.trainings.models import Network
admin.site.register(Network, NetworkAdmin)
|
1629990
|
import cPickle as pickle
import uuid
import logging
import time
import heapq
import socket
NAMESPACE = uuid.UUID('7e0d7720-fa98-4270-94ff-650a2c25f3f0')
def addr_to_tuple(addr):
parts = addr.split('-')
return parts[0], int(parts[1])
def tuple_to_addr(addr):
if addr[0] == '0.0.0.0':
addr = socket.gethostbyname(socket.gethostname()), addr[1]
return '%s-%s' % addr
class Node(object):
port = 0 # XXX temporary
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('', self.port)) # XXX temporary
self.address = tuple_to_addr(self.sock.getsockname())
self.timers = []
self.logger = logging.getLogger('node.%s' % (self.address,))
self.unique_id = uuid.uuid3(NAMESPACE, self.address).int
# XXX temporary
self.unique_id = self.sock.getsockname()[1]
def start(self):
# subclasses can override this
pass
def run(self):
self.start()
self.running = True
while self.running:
if self.timers:
next_timer = self.timers[0][0]
if next_timer < time.time():
when, do, callable = heapq.heappop(self.timers)
if do:
callable()
continue
else:
next_timer = 0
timeout = max(0.1, next_timer - time.time())
self.sock.settimeout(timeout)
try:
msg, address = self.sock.recvfrom(102400)
except socket.timeout:
continue
action, kwargs = pickle.loads(msg)
self.logger.debug("received %r with args %r" % (action, kwargs))
getattr(self, 'do_%s' % action)(**kwargs)
def stop(self):
self.running = False
def set_timer(self, seconds, callable):
timer = [time.time() + seconds, True, callable]
heapq.heappush(self.timers, timer)
return timer
def cancel_timer(self, timer):
timer[1] = False
def send(self, destinations, action, **kwargs):
self.logger.debug("sending %s with args %s to %s" %
(action, kwargs, destinations))
pkl = pickle.dumps((action, kwargs))
for dest in destinations:
self.sock.sendto(pkl, addr_to_tuple(dest))
# tests
import unittest
import threading
class TestNode(Node):
foo_called = False
bar_called = False
def do_FOO(self, x, y):
self.foo_called = True
self.stop()
class NodeTests(unittest.TestCase):
def test_comm(self):
sender = Node()
receiver = TestNode()
rxthread = threading.Thread(target=receiver.run)
rxthread.start()
sender.send([receiver.address], 'FOO', x=10, y=20)
rxthread.join()
self.failUnless(receiver.foo_called)
def test_timeout(self):
node = TestNode()
def cb():
node.bar_called = True
node.stop()
node.set_timer(0.01, cb)
node.run()
self.failUnless(node.bar_called)
def test_cancel_timeout(self):
node = TestNode()
def fail():
raise RuntimeError("nooo")
nonex = node.set_timer(0.01, fail)
def cb():
node.bar_called = True
node.stop()
node.set_timer(0.02, cb)
node.cancel_timer(nonex)
node.run()
# this just needs to not crash
|
1630036
|
from traceback_with_variables import printing_exc, ColorSchemes
def mean(vs):
return sum(vs) / sum(1 for v in vs)
def get_avg_ratio(size1, size2):
return mean([get_ratio(h, w) for h, w in [size1, size2]])
def get_ratio(h, w):
return h / w
def main():
sizes_str = '300 200 300 0'
with printing_exc(reraise=False):
h1, w1, h2, w2 = map(int, sizes_str.split())
return get_avg_ratio((h1, w1), (h2, w2))
main()
|
1630076
|
import argparse
import sys
import os
import shutil
import time
import numpy as np
from random import sample
from sklearn import metrics
import torch
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.tensorboard import SummaryWriter
from deepKNet.data import get_train_valid_test_loader
from deepKNet.model3D import PointNet
parser = argparse.ArgumentParser(description='deepKNet model')
parser.add_argument('--root', metavar='DATA_DIR')
parser.add_argument('--target', metavar='TARGET_PROPERTY')
parser.add_argument('--nclass', type=int)
parser.add_argument('--run_name', metavar='RUNID')
parser.add_argument('--gpu_id', type=int, metavar='GPUID')
# hyper parameter tuning
parser.add_argument('--npoint', type=int, metavar='NPOINT CUTOFF')
parser.add_argument('--point_dim', type=int, metavar='NPOINT DIM')
parser.add_argument('--data_aug', type=str)
parser.add_argument('--rot_range', type=float, nargs='+')
parser.add_argument('--random_intensity', type=str)
parser.add_argument('--systematic_absence', type=str)
parser.add_argument('--conv_dims', type=int, nargs='+')
parser.add_argument('--nbert', type=int)
parser.add_argument('--fc_dims', type=int, nargs='+')
parser.add_argument('--pool', type=str)
parser.add_argument('--epochs', type=int, metavar='N')
parser.add_argument('--batch_size', type=int, metavar='N')
parser.add_argument('--optim', type=str, metavar='OPTIM')
parser.add_argument('--lr', type=float, metavar='LR')
parser.add_argument('--lr_milestones', nargs='+', type=int)
parser.add_argument('--dropout', type=float, metavar='DROPOUT')
parser.add_argument('--stn', action='store_true')
# default params
parser.add_argument('--start_epoch', default=0, type=int, metavar='N')
parser.add_argument('--weight_decay', default=0, type=float, metavar='W')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M')
n_threads = torch.get_num_threads()
parser.add_argument('--num_threads', default=n_threads, type=int, metavar='N_thread')
parser.add_argument('--num_data_workers', default=4, type=int, metavar='N')
parser.add_argument('--print_freq', default=10, type=int, metavar='N')
parser.add_argument('--test_freq', default=50, type=int, metavar='N')
parser.add_argument('--disable_cuda', action='store_true')
parser.add_argument('--resume', default='', type=str, metavar='PATH')
# parse args
args = parser.parse_args()
args.cuda = torch.cuda.is_available() and not args.disable_cuda
cuda_device = torch.device('cuda:{}'.format(args.gpu_id)) if args.cuda else None
if args.num_threads != n_threads:
torch.set_num_threads(args.num_threads)
print('User defined variables:', flush=True)
for key, val in vars(args).items():
print(' => {:17s}: {}'.format(key, val), flush=True)
best_performance = 0.
def main():
global args, best_performance, cuda_device
# get data loader
train_loader, valid_loader, test_loader = get_train_valid_test_loader(
root=args.root,
target=args.target,
npoint=args.npoint,
point_dim=args.point_dim,
data_aug=args.data_aug=='True',
rot_range=args.rot_range,
random_intensity=args.random_intensity=='True',
systematic_absence=args.systematic_absence=='True',
batch_size=args.batch_size,
num_data_workers=args.num_data_workers,
pin_memory=args.cuda)
# build model
assert(args.conv_dims[0] == args.point_dim)
if args.target == 'crystal_system':
assert(args.nclass == 7)
elif args.target == 'crystal_family':
assert(args.nclass == 6)
model = PointNet(nclass=args.nclass,
conv_dims=args.conv_dims,
nbert=args.nbert,
fc_dims=args.fc_dims,
pool=args.pool,
dropout=args.dropout,
stn=args.stn)
# number of trainable model parameters
trainable_params = sum(p.numel() for p in model.parameters()
if p.requires_grad)
print('Number of trainable model parameters: {:d}' \
.format(trainable_params), flush=True)
if args.cuda:
print('running on GPU:{}..'.format(args.gpu_id), flush=True)
model = model.cuda(device=cuda_device)
else:
print('running on CPU..', flush=True)
# define loss function
criterion = torch.nn.NLLLoss()
if args.cuda:
criterion = criterion.cuda(device=cuda_device)
# optimization algo
if args.optim == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
weight_decay=args.weight_decay)
elif args.optim == 'SGD':
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
else:
raise NameError('Only Adam or SGD is allowed as --optim')
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume), flush=True)
checkpoint = torch.load(args.resume, map_location=torch.device('cpu'))
args.start_epoch = checkpoint['epoch'] + 1
best_performance = checkpoint['best_performance']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']), flush=True)
else:
print("=> no checkpoint found at '{}', existing.." \
.format(args.resume), flush=True)
sys.exit(1)
# TensorBoard writer
summary_root = './runs/'
summary_file = summary_root + args.run_name
if not os.path.exists(summary_root):
os.mkdir(summary_root)
if os.path.exists(summary_file):
print('run file already exists, use a different --run_name')
sys.exit(1)
writer = SummaryWriter(summary_file)
# learning-rate scheduler
scheduler = MultiStepLR(optimizer=optimizer, milestones=args.lr_milestones,
gamma=0.1, last_epoch=-1)
for epoch in range(args.start_epoch, args.start_epoch+args.epochs):
# train for one epoch
train(train_loader, model, criterion, args.nclass, optimizer, epoch, writer)
# evaluate on validation set
performance = validate(valid_loader, model, criterion, args.nclass, epoch, writer)
scheduler.step()
# remember best auc and save checkpoint
is_best = performance > best_performance
best_performance = max(performance, best_performance)
# save checkpoint
save_checkpoint({
'epoch': epoch,
'state_dict': model.state_dict(),
'best_performance': best_performance,
'optimizer': optimizer.state_dict(),
}, is_best)
if ((epoch-args.start_epoch+1)%args.test_freq == 0) or \
(epoch == args.start_epoch+args.epochs-1):
# test best model
print('---------Evaluate Model on Test Set---------------', flush=True)
best_model = load_best_model()
print('best validation performance: {:.3f}'.format(best_model['best_performance']))
model.load_state_dict(best_model['state_dict'])
validate(test_loader, model, criterion, args.nclass, epoch, writer, test_mode=True)
def train(train_loader, model, criterion, nclass, optimizer, epoch, writer):
batch_time = AverageMeter('Time', ':4.2f')
data_time = AverageMeter('Data', ':4.2f')
losses = AverageMeter('Loss', ':6.3f')
accuracies = AverageMeter('Accu', ':6.3f')
precisions = AverageMeter('Prec', ':6.3f')
recalls = AverageMeter('Rec', ':6.3f')
fscores = AverageMeter('Fsc', ':6.3f')
auc_scores = AverageMeter('AUC', ':6.3f')
ave_precisions = AverageMeter('AP', ':6.3f')
if nclass == 2:
report = [batch_time, data_time, losses, accuracies, precisions,
recalls, fscores, ave_precisions, auc_scores]
else:
report = [batch_time, data_time, losses, accuracies]
progress = ProgressMeter(
len(train_loader),
report,
prefix="Epoch: [{}]".format(epoch)
)
# switch to training mode
model.train()
end = time.time()
running_loss = 0.0
for idx, data in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
point_cloud, target, _ = data
# optionally skip the last batch
if target.size(0) < 16: continue
target = target.view(-1)
if args.cuda:
point_cloud = point_cloud.cuda(device=cuda_device)
target = target.cuda(device=cuda_device)
# compute output
output = model(point_cloud)
loss = criterion(output, target)
# measure accuracy and record loss
accuracy, precision, recall, fscore, auc_score, ave_precision =\
class_eval(output, target)
losses.update(loss.item(), target.size(0))
accuracies.update(accuracy.item(), target.size(0))
precisions.update(precision.item(), target.size(0))
recalls.update(recall.item(), target.size(0))
fscores.update(fscore.item(), target.size(0))
auc_scores.update(auc_score.item(), target.size(0))
ave_precisions.update(ave_precision.item(), target.size(0))
# compute gradient and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print progress and write to TensorBoard
running_loss += loss.item()
if (idx+1) % args.print_freq == 0:
progress.display(idx+1)
writer.add_scalar('training loss',
running_loss / args.print_freq,
epoch * len(train_loader) + idx)
running_loss = 0.0
def validate(valid_loader, model, criterion, nclass, epoch, writer, test_mode=False):
batch_time = AverageMeter('Time', ':4.2f')
data_time = AverageMeter('Data', ':4.2f')
losses = AverageMeter('Loss', ':6.3f')
accuracies = AverageMeter('Accu', ':6.3f')
precisions = AverageMeter('Prec', ':6.3f')
recalls = AverageMeter('Rec', ':6.3f')
fscores = AverageMeter('Fsc', ':6.3f')
auc_scores = AverageMeter('AUC', ':6.3f')
ave_precisions = AverageMeter('AP', ':6.3f')
if nclass == 2:
report = [batch_time, data_time, losses, accuracies, precisions,
recalls, fscores, ave_precisions, auc_scores]
else:
report = [batch_time, data_time, losses, accuracies]
progress = ProgressMeter(
len(valid_loader),
report,
prefix='Validate: ' if not test_mode else 'Test: '
)
# switch to evaluation mode
model.eval()
with torch.no_grad():
end = time.time()
running_loss = 0.0
for idx, data in enumerate(valid_loader):
point_cloud, target, _ = data
# optionally skip the last batch
if target.size(0) < 8: continue
target = target.view(-1)
if args.cuda:
point_cloud = point_cloud.cuda(device=cuda_device)
target = target.cuda(device=cuda_device)
# compute output
output = model(point_cloud)
loss = criterion(output, target)
# measure accuracy and record loss
accuracy, precision, recall, fscore, auc_score, ave_precision =\
class_eval(output, target)
losses.update(loss.item(), target.size(0))
accuracies.update(accuracy.item(), target.size(0))
precisions.update(precision.item(), target.size(0))
recalls.update(recall.item(), target.size(0))
fscores.update(fscore.item(), target.size(0))
auc_scores.update(auc_score.item(), target.size(0))
ave_precisions.update(ave_precision.item(), target.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print progress and write to TensorBoard
running_loss += loss.item()
if (idx+1) % args.print_freq == 0 and not test_mode:
progress.display(idx+1)
writer.add_scalar('validation loss',
running_loss / args.print_freq,
epoch * len(valid_loader) + idx)
running_loss = 0.0
if nclass == 2:
print(' * AUC {auc.avg:.3f}'.format(auc=auc_scores), flush=True)
return auc_scores.avg
else:
print(' * ACCU {accu.avg:.3f}'.format(accu=accuracies), flush=True)
return accuracies.avg
def save_checkpoint(state, is_best):
check_root = './checkpoints/'
if not os.path.exists(check_root):
os.mkdir(check_root)
filename = check_root + args.run_name + '_checkpoint.pth.tar'
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, check_root+args.run_name+'_model_best.pth.tar')
def load_best_model():
check_root = './checkpoints/'
if not os.path.exists(check_root):
print('{} dir does not exist, exiting...', flush=True)
sys.exit(1)
filename = check_root + args.run_name + '_model_best.pth.tar'
if not os.path.isfile(filename):
print('checkpoint {} not found, exiting...', flush=True)
sys.exit(1)
return torch.load(filename)
def class_eval(prediction, target):
prediction = np.exp(prediction.detach().cpu().numpy())
pred_label = np.argmax(prediction, axis=1)
target = target.detach().cpu().numpy()
target_label = np.squeeze(target)
if prediction.shape[1] == 2:
precision, recall, fscore, _ = metrics.precision_recall_fscore_support(
target_label, pred_label, average='binary', warn_for=tuple())
try:
auc_score = metrics.roc_auc_score(target_label, prediction[:,1])
except:
auc_score = np.float64(-1E8)
accuracy = metrics.accuracy_score(target_label, pred_label)
ave_precision = metrics.average_precision_score(target_label, prediction[:,1])
else:
correct = np.equal(pred_label, target_label).sum()
precision, recall = np.float64(0.0), np.float64(0.0)
fscore, auc_score = np.float64(0.0), np.float64(0.0)
accuracy = np.float64(correct/float(target_label.size))
ave_precision = np.float64(0.0)
return accuracy, precision, recall, fscore, auc_score, ave_precision
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0.
self.avg = 0.
self.sum = 0.
self.cnt = 0.
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print(' '.join(entries), flush=True)
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
if __name__ == "__main__":
main()
|
1630112
|
import unittest
from mockito import *
from meme import Meme
class MemeApiTest(unittest.TestCase):
def test_should_get_meme_by_name(self):
meme_repository_mock = Mock()
when(meme_repository_mock).get('some_name').thenReturn('ok')
Meme.meme_repository = meme_repository_mock
assert Meme.get(name='some_name') == 'ok'
def test_should_search_for_memes(self):
meme_repository_mock = Mock()
when(meme_repository_mock).search('a query', 10).thenReturn(['search_result1'])
when(meme_repository_mock).search('a query', 40).thenReturn(['search_result2'])
Meme.meme_repository = meme_repository_mock
assert Meme.search('a query') == ['search_result1']
assert Meme.search('a query', count=40) == ['search_result2']
class MemePostsApiTest(unittest.TestCase):
def test_should_get_one_post(self):
post_repository_mock = Mock()
when(post_repository_mock).get('123', '456').thenReturn('post')
Meme.Posts.post_repository = post_repository_mock
assert Meme.Posts.get(owner_guid='123', pubid='456') == 'post'
def test_should_get_popular_posts(self):
post_repository_mock = Mock()
when(post_repository_mock).popular('en', 10).thenReturn(['popular_posts1'])
when(post_repository_mock).popular('pt', 10).thenReturn(['popular_posts2'])
when(post_repository_mock).popular('en', 33).thenReturn(['popular_posts3'])
when(post_repository_mock).popular('pt', 33).thenReturn(['popular_posts4'])
Meme.Posts.post_repository = post_repository_mock
assert Meme.Posts.popular() == ['popular_posts1']
assert Meme.Posts.popular(locale='pt') == ['popular_posts2']
assert Meme.Posts.popular(count=33) == ['popular_posts3']
assert Meme.Posts.popular(locale='pt', count=33) == ['popular_posts4']
def test_should_search_for_posts(self):
post_repository_mock = Mock()
when(post_repository_mock).search('a query', 10).thenReturn(['search_result1'])
when(post_repository_mock).search('a query', 40).thenReturn(['search_result2'])
Meme.Posts.post_repository = post_repository_mock
assert Meme.Posts.search('a query') == ['search_result1']
assert Meme.Posts.search('a query', count=40) == ['search_result2']
|
1630123
|
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(XA,yA)
yP = knn.predict(XB)
|
1630124
|
ALL_USERS_URI = "http://acs.amazonaws.com/groups/global/AllUsers"
def is_s3_object_is_public(s3_obj):
# Loop over all the grants.
for grant in s3_obj.Acl().grants:
# Find the all users grantee.
if "URI" not in grant["Grantee"]:
continue
elif grant["Grantee"]["URI"] == ALL_USERS_URI:
if grant["Permission"] == "READ":
return True
return False
|
1630129
|
import pytest
def test_api_methods(api_info):
assert api_info.endpoints.report_test_start.version >= 1
@pytest.fixture
def api_info(client):
return client.api.info()
|
1630152
|
from __future__ import unicode_literals
import unittest
import utn11
CANONICAL_PAIRS = [
('\u1010\u102D\u103A', '\u1010\u103A\u102D'),
('\u1010\u103A\u102D', '\u1010\u103A\u102D'),
('\u101B\u1031\u1037\u103E', '\u101B\u103E\u1031\u1037'),
('\u101B\u1031\u103E\u1037', '\u101B\u103E\u1031\u1037'),
('\u101B\u1037\u1031\u103E', '\u101B\u103E\u1031\u1037'),
('\u101B\u1037\u103E\u1031', '\u101B\u103E\u1031\u1037'),
('\u101B\u103E\u1031\u1037', '\u101B\u103E\u1031\u1037'),
('\u101B\u103E\u1037\u1031', '\u101B\u103E\u1031\u1037'),
# Mon:
('\u1010\u1031\u103A\u103E', '\u1010\u103E\u103A\u1031'),
('\u1010\u1031\u103E\u103A', '\u1010\u103E\u103A\u1031'),
('\u1010\u103A\u1031\u103E', '\u1010\u103E\u103A\u1031'),
('\u1010\u103A\u103E\u1031', '\u1010\u103E\u103A\u1031'),
('\u1010\u103E\u1031\u103A', '\u1010\u103E\u103A\u1031'),
('\u1010\u103E\u103A\u1031', '\u1010\u103E\u103A\u1031'),
# Mon:
('\u1000\u102C\u102F\u1031\u1036', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102C\u102F\u1036\u1031', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102C\u1031\u102F\u1036', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102C\u1031\u1036\u102F', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102C\u1036\u102F\u1031', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102C\u1036\u1031\u102F', '\u1000\u1031\u102F\u102C\u1036'),
#
('\u1000\u102F\u102C\u1031\u1036', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102F\u102C\u1036\u1031', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102F\u1031\u102C\u1036', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102F\u1031\u1036\u102C', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102F\u1036\u102C\u1031', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102F\u1036\u1031\u102C', '\u1000\u1031\u102F\u102C\u1036'),
#
('\u1000\u1031\u102C\u102F\u1036', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1031\u102C\u1036\u102F', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1031\u102F\u102C\u1036', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1031\u102F\u1036\u102C', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1031\u1036\u102C\u102F', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1031\u1036\u102F\u102C', '\u1000\u1031\u102F\u102C\u1036'),
#
('\u1000\u1036\u102C\u102F\u1031', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1036\u102C\u1031\u102F', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1036\u102F\u102C\u1031', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1036\u102F\u1031\u102C', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1036\u1031\u102C\u102F', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1036\u1031\u102F\u102C', '\u1000\u1031\u102F\u102C\u1036'),
]
class TestUtn11(unittest.TestCase):
def TestCanonicalization(self):
for raw, canonical in CANONICAL_PAIRS:
self.assertEqual(canonical, utn11.Canonicalize(raw))
self.assertTrue(utn11.CLUSTER.match(canonical) is not None)
return
if __name__ == '__main__':
unittest.main()
|
1630153
|
import time
import terminalio
import displayio
import adafruit_imageload
from adafruit_display_text import label
from adafruit_bitmap_font import bitmap_font
from adafruit_magtag.magtag import MagTag
# --| USER CONFIG |--------------------------
STATION_ID = (
"9447130" # tide location, find yours here: https://tidesandcurrents.noaa.gov/
)
METRIC = False # set to True for metric units
VSCALE = 2 # pixels per ft or m
DAILY_UPDATE_HOUR = 3 # 24 hour format
DST_ON = True # Day Light Saving currently active?
# -------------------------------------------
# don't change these
PLOT_WIDTH = 116
PLOT_HEIGHT = 116
PLOT_X = 174
PLOT_Y = 6
PLOT_Y_SCALE = round(PLOT_HEIGHT / (4 * VSCALE))
DATE_FONT = bitmap_font.load_font("/fonts/Kanit-Black-24.bdf")
TIME_FONT = bitmap_font.load_font("/fonts/Kanit-Medium-20.bdf")
# our MagTag
magtag = MagTag()
magtag.json_path = ["predictions"]
# ----------------------------
# Grid overlay for plot
# ----------------------------
grid_bmp, grid_pal = adafruit_imageload.load("/bmps/tides_bg_land.bmp")
grid_pal.make_transparent(1)
grid_overlay = displayio.TileGrid(grid_bmp, pixel_shader=grid_pal)
# ----------------------------
# Tide plot (bitmap, palette, tilegrid)
# ----------------------------
tide_plot = displayio.Bitmap(PLOT_WIDTH, PLOT_HEIGHT, 4)
tide_pal = displayio.Palette(4)
tide_pal[0] = 0x000000 # black
tide_pal[1] = 0x555555 # dark gray
tide_pal[2] = 0xAAAAAA # light gray
tide_pal[3] = 0xFFFFFF # white
tide_pal.make_transparent(3)
tide_tg = displayio.TileGrid(tide_plot, pixel_shader=tide_pal, x=PLOT_X, y=PLOT_Y)
# ----------------------------
# Plot scale labels
# ----------------------------
plot_y_pos = label.Label(terminalio.FONT, text="+99", color=0x000000)
plot_y_pos.text = "{:>3}".format(PLOT_Y_SCALE)
plot_y_pos.anchor_point = (1.0, 0.5)
plot_y_pos.anchored_position = (178, 34)
plot_y_neg = label.Label(terminalio.FONT, text="-99", color=0x000000)
plot_y_neg.text = "{:>3}".format(-1 * PLOT_Y_SCALE)
plot_y_neg.anchor_point = (1.0, 0.5)
plot_y_neg.anchored_position = (178, 92)
plot_y_labels = displayio.Group()
plot_y_labels.append(plot_y_pos)
plot_y_labels.append(plot_y_neg)
# ----------------------------
# Date label
# ----------------------------
date_label = displayio.Group()
date_text = [label.Label(DATE_FONT, text="A", color=0xFFFFFF) for _ in range(5)]
y_offset = 8
for text in date_text:
date_label.append(text)
text.anchor_point = (0.5, 0)
text.anchored_position = (20, y_offset)
y_offset += 23
# ----------------------------
# HiLo Times and Icons
# ----------------------------
tide_info = displayio.Group()
hilo_times = [label.Label(TIME_FONT, text="12:34 P", color=0x000000) for _ in range(4)]
y_offset = 18
for hilo in hilo_times:
tide_info.append(hilo)
hilo.hidden = True
hilo.anchor_point = (1, 0.5)
hilo.anchored_position = (158, y_offset)
y_offset += 28
icon_bmp, icon_pal = adafruit_imageload.load("/bmps/tides_icons.bmp")
icon_pal.make_transparent(1)
hilo_icons = [
displayio.TileGrid(
icon_bmp,
pixel_shader=icon_pal,
width=1,
height=1,
tile_width=24,
tile_height=24,
)
for _ in range(4)
]
y_offset = 6
for icon in hilo_icons:
tide_info.append(icon)
icon.hidden = True
icon.x = 46
icon.y = y_offset
y_offset += 28
# ----------------------------
# Station ID
# ----------------------------
station_info = label.Label(
terminalio.FONT, text="STATION ID: " + STATION_ID, color=0x000000
)
station_info.anchor_point = (1, 1)
station_info.anchored_position = (158, 126)
# ----------------------------
# Add all the graphic layers
# ----------------------------
magtag.splash.append(tide_tg)
magtag.splash.append(grid_overlay)
magtag.splash.append(plot_y_labels)
magtag.splash.append(tide_info)
magtag.splash.append(date_label)
magtag.splash.append(station_info)
# /////////////////////////////////////////////////////////////////////////
def get_data_source_url(station=STATION_ID, metric=METRIC, hilo_only=True):
"""Build and return the URL for the tides API."""
date = "{}{:02}{:02}".format(now.tm_year, now.tm_mon, now.tm_mday)
URL = "https://api.tidesandcurrents.noaa.gov/api/prod/datagetter?format=json"
URL += "&product=predictions"
URL += "&interval=hilo" if hilo_only else ""
URL += "&datum=mllw" # MLLW = "tides"
URL += "&units=metric" if metric else "&units=english"
URL += "&time_zone=lst_ldt" if DST_ON else "&time_zone=lst"
URL += "&begin_date=" + date
URL += "&end_date=" + date
URL += "&station=" + station
return URL
def get_tide_data():
"""Fetch JSON tide data and return parsed results in a list."""
# Get raw JSON data
magtag.url = get_data_source_url(hilo_only=False)
raw_data = magtag.fetch()
# Results will be stored in a list that is PLOT_WIDTH long
new_tide_data = [PLOT_HEIGHT] * PLOT_WIDTH
# Convert raw data to display coordinates
for data in raw_data:
_, t = data["t"].split(" ") # date and time
h, m = t.split(":") # hours and minutes
v = data["v"] # water level
x = round((PLOT_WIDTH - 1) * (60 * float(h) + float(m)) / 1434)
y = (PLOT_HEIGHT // 2) - round(VSCALE * float(v))
y = 0 if y < 0 else y
y = PLOT_HEIGHT - 1 if y >= PLOT_HEIGHT else y
new_tide_data[x] = y
return new_tide_data
def get_hilo_data():
"""Get high / low times."""
# Get raw JSON data
magtag.url = get_data_source_url(hilo_only=True)
return magtag.fetch()
def show_today():
"""Display month and day."""
month_text = (
"JAN",
"FEB",
"MAR",
"APR",
"MAY",
"JUN",
"JUL",
"AUG",
"SEP",
"OCT",
"NOV",
"DEC",
)[now.tm_mon - 1]
day_text = "{:2}".format(now.tm_mday)
date_label[0].text = month_text[0]
date_label[1].text = month_text[1]
date_label[2].text = month_text[2]
date_label[3].text = day_text[0]
date_label[4].text = day_text[1]
def plot_tides():
"""Graphical plot of water level."""
tide_plot.fill(3)
for x in range(PLOT_WIDTH):
y = tide_data[x]
for yfill in range(y, PLOT_HEIGHT):
try:
tide_plot[x, yfill] = 2
except IndexError:
pass
tide_plot[x, y] = 0
def show_hilo():
"""Show high / low times."""
for i in hilo_icons:
i.hidden = True
for t in hilo_times:
t.hidden = True
for i, data in enumerate(hilo_data):
# make it visible
hilo_icons[i].hidden = False
hilo_times[i].hidden = False
# icon
hilo_icons[i][0] = 0 if data["type"] == "H" else 1
# time
h, m = data["t"].split(" ")[1].split(":")
m = int(m)
h = int(h)
ampm = "A" if h < 12 else "P"
h = h if h < 13 else h - 12
hilo_times[i].text = "{:>2}:{:02} {}".format(h, m, ampm)
def time_to_sleep():
"""Compute amount of time to sleep."""
# daily event time
event_time = time.struct_time(
(now[0], now[1], now[2], DAILY_UPDATE_HOUR, 0, 0, -1, -1, now[8])
)
# how long is that from now?
remaining = time.mktime(event_time) - time.mktime(now)
# is that today or tomorrow?
if remaining < 0: # ah its aready happened today...
remaining += 24 * 60 * 60 # wrap around to the next day
# return it
return remaining
# ===========
# M A I N
# ===========
# get current time
magtag.get_local_time()
now = time.localtime()
# show today's date
show_today()
# get and plot tide levels
tide_data = get_tide_data()
plot_tides()
# get and show hilo tide times
hilo_data = get_hilo_data()
show_hilo()
# refresh display
time.sleep(magtag.display.time_to_refresh + 1)
magtag.display.refresh()
time.sleep(magtag.display.time_to_refresh + 1)
# ZZZZZZzzzzzzzzz
now = time.localtime()
magtag.exit_and_deep_sleep(time_to_sleep())
#
# code.py runs again when board wakes up
#
|
1630162
|
import base64
from functools import wraps
from django.http import HttpResponse
from corehq.apps.api.cors import ACCESS_CONTROL_ALLOW, add_cors_headers_to_response
from corehq.apps.api.models import ApiUser
def api_user_basic_auth(permission, realm=''):
def real_decorator(view):
def wrapper(request, *args, **kwargs):
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
if auth[0].lower() == 'basic':
username, password = base64.b64decode(auth[1]).split(':', 1)
if ApiUser.auth(username, password, permission):
return view(request, *args, **kwargs)
response = HttpResponse(status=401)
response['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return response
return wrapper
return real_decorator
def allow_cors(allowed_methods):
allowed_methods = allowed_methods or []
# always allow options
allowed_methods = allowed_methods + ['OPTIONS']
def decorator(view_func):
@wraps(view_func)
def wrapped_view(request, *args, **kwargs):
if request.method == "OPTIONS":
response = HttpResponse()
response[ACCESS_CONTROL_ALLOW] = ', '.join(allowed_methods)
return add_cors_headers_to_response(response)
response = view_func(request, *args, **kwargs)
if request.method in allowed_methods:
add_cors_headers_to_response(response)
return response
return wrapped_view
return decorator
|
1630200
|
from flask import request, jsonify, url_for
from app import app
from config import ROOT_URL
from app.tasks import debug_celery_task
from app.tasks import update_local_ftp_configurations, update_local_tftp_configurations
@app.route(ROOT_URL + "debug/calculate_task", methods=['POST'])
def debug_calculate_task():
"""
Ajax view that create a simple calculation job
:return:
"""
a = request.form.get('a', type=int)
b = request.form.get('b', type=int)
task = debug_celery_task.delay(a, b)
return jsonify({}), 202, {'Location': url_for('task_status_json', task_id=task.id)}
@app.route(ROOT_URL + "export/template/<int:config_template_id>/local_ftp", methods=['POST'])
def update_local_ftp_config_task(config_template_id):
"""
used to trigger the update of the local FTP files for the given config template
:param config_template_id:
:return:
"""
task = update_local_ftp_configurations.delay(config_template_id)
return jsonify({}), 202, {'Location': url_for('task_status_json', task_id=task.id)}
@app.route(ROOT_URL + "export/template/<int:config_template_id>/local_tftp", methods=['POST'])
def update_local_tftp_config_task(config_template_id):
"""
used to trigger the update of the local TFTP files for the given config template
:param config_template_id:
:return:
"""
task = update_local_tftp_configurations.delay(config_template_id)
return jsonify({}), 202, {'Location': url_for('task_status_json', task_id=task.id)}
|
1630255
|
import uuid
import pytest
from supriya.patterns.events import CompositeEvent, NodeFreeEvent, NullEvent, Priority
id_ = uuid.uuid4()
@pytest.mark.parametrize(
"event, offset, expected",
[
(
CompositeEvent([NullEvent(delta=0.25), NodeFreeEvent(id_, delta=0.0)]),
0.0,
[(0.25, Priority.START, NodeFreeEvent(id_))],
),
(
CompositeEvent([NullEvent(delta=0.5), NodeFreeEvent(id_, delta=0.0)]),
2.5,
[(3.0, Priority.START, NodeFreeEvent(id_))],
),
],
)
def test_expand(event, offset, expected):
print(event)
actual = event.expand(offset)
assert actual == expected
|
1630264
|
from bokeh.io import output_file, show
from bokeh.models.widgets import Div
output_file("div.html")
div = Div(text="""Your <a href="https://en.wikipedia.org/wiki/HTML">HTML</a>-supported text is initialized with the <b>text</b> argument. The
remaining div arguments are <b>width</b> and <b>height</b>. For this example, those values
are <i>200</i> and <i>100</i> respectively.""",
width=200, height=100)
show(div)
|
1630279
|
from django.test import TestCase
from django.core.cache import cache
from django_redis import get_redis_connection
class CacheAwareTestCase(TestCase):
"""
Cache-aware TestCase that clears the Redis storage and cache on startup
"""
def clearCache(self):
"""
Clears the cache
Can be invoked manually if the unit test requires it
"""
cache.clear()
con = get_redis_connection("persistent")
con.flushall()
def setUp(self):
super(CacheAwareTestCase, self).setUp()
self.clearCache()
|
1630303
|
import flask as f
from ..entities._entity import EntitySerializer
from ..entities.commit import CommitSerializer
from ..entities.run import Run
class _Serializer(EntitySerializer):
def _dump(self, commits):
def _run(contender):
baseline = contender.get_baseline_run()
baseline_url, contender_url, compare_url = None, None, None
baseline_timestamp, contender_timestamp = None, None
if baseline and contender:
compare_ids = f"{baseline.id}...{contender.id}"
compare_url = f.url_for(
"api.compare-runs",
compare_ids=compare_ids,
_external=True,
)
if baseline:
baseline_timestamp = baseline.timestamp.isoformat()
baseline_url = f.url_for(
"api.run",
run_id=baseline.id,
_external=True,
)
if contender:
contender_timestamp = contender.timestamp.isoformat()
contender_url = f.url_for(
"api.run",
run_id=contender.id,
_external=True,
)
return {
"baseline": {
"machine_name": baseline.machine.name if baseline else None,
"run": baseline_url,
"run_id": baseline.id if baseline else None,
"run_name": baseline.name if baseline else None,
"run_timestamp": baseline_timestamp,
},
"contender": {
"machine_name": contender.machine.name if contender else None,
"run": contender_url,
"run_id": contender.id if contender else None,
"run_name": contender.name if contender else None,
"run_timestamp": contender_timestamp,
},
"compare": compare_url,
}
baseline_commit, contender_commit = commits
contender_runs = Run.all(commit_id=contender_commit.id)
compare_shas = f"{baseline_commit.sha}...{contender_commit.sha}"
result = {
"commits": {
"baseline": CommitSerializer().one.dump(baseline_commit),
"contender": CommitSerializer().one.dump(contender_commit),
},
"runs": [_run(r) for r in contender_runs],
"links": {
"self": f.url_for(
"api.compare-commits", compare_shas=compare_shas, _external=True
),
},
}
result["commits"]["baseline"].pop("links", None)
result["commits"]["contender"].pop("links", None)
return result
class CompareSummarySerializer:
one = _Serializer()
many = _Serializer(many=True)
|
1630343
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
import time
def build_net(net_name, cross_socket):
net = core.Net(net_name)
net.Proto().type = "async_scheduling"
numa_device_option = caffe2_pb2.DeviceOption()
numa_device_option.device_type = caffe2_pb2.CPU
numa_device_option.numa_node_id = 0
net.XavierFill([], net_name + "/input_blob", shape=[1024, 1024],
device_option=numa_device_option)
if cross_socket:
numa_device_option.numa_node_id = 1
net.Copy(net_name + "/input_blob", net_name + "/output_blob",
device_option=numa_device_option)
return net
def main():
assert workspace.IsNUMAEnabled() and workspace.GetNumNUMANodes() >= 2
single_net = build_net("single_net", False)
cross_net = build_net("cross_net", True)
workspace.CreateNet(single_net)
workspace.CreateNet(cross_net)
for _ in range(4):
t = time.time()
workspace.RunNet(single_net.Name(), 5000)
print("Single socket time:", time.time() - t)
t = time.time()
workspace.RunNet(cross_net.Name(), 5000)
print("Cross socket time:", time.time() - t)
if __name__ == '__main__':
core.GlobalInit(["caffe2", "--caffe2_cpu_numa_enabled=1"])
main()
|
1630351
|
from monzo.monzo import Monzo
from monzo.errors import BadRequestError
import pytest
class TestApiErrors:
@pytest.fixture
def unauthorized_client(self):
return Monzo("gibberish")
def test_whoami(self, unauthorized_client):
with pytest.raises(BadRequestError):
unauthorized_client.whoami()
|
1630354
|
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch
import torch.nn.functional as F
from ops.TCP.TCP_module import TCP
from torch.nn.init import orthogonal_
__all__ = ['Res2Net', 'res2net50']
class MEModule(nn.Module):
""" Motion exciation module
:param reduction=16
:param n_segment=8/16
"""
def __init__(self, channel, reduction=16, n_segment=8):
super(MEModule, self).__init__()
self.channel = channel
self.reduction = reduction
self.n_segment = n_segment
self.conv1 = nn.Conv2d(
in_channels=self.channel,
out_channels=self.channel//self.reduction,
kernel_size=1,
bias=False)
self.bn1 = nn.BatchNorm2d(num_features=self.channel//self.reduction)
self.conv2 = nn.Conv2d(
in_channels=self.channel//self.reduction,
out_channels=self.channel//self.reduction,
kernel_size=3,
padding=1,
groups=channel//self.reduction,
bias=False)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.sigmoid = nn.Sigmoid()
self.pad = (0, 0, 0, 0, 0, 0, 0, 1)
self.conv3 = nn.Conv2d(
in_channels=self.channel//self.reduction,
out_channels=self.channel,
kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(num_features=self.channel)
self.identity = nn.Identity()
def forward(self, x):
nt, c, h, w = x.size()
bottleneck = self.conv1(x) # nt, c//r, h, w
bottleneck = self.bn1(bottleneck) # nt, c//r, h, w
# t feature
reshape_bottleneck = bottleneck.view((-1, self.n_segment) + bottleneck.size()[1:]) # n, t, c//r, h, w
t_fea, __ = reshape_bottleneck.split([self.n_segment-1, 1], dim=1) # n, t-1, c//r, h, w
# apply transformation conv to t+1 feature
conv_bottleneck = self.conv2(bottleneck) # nt, c//r, h, w
# reshape fea: n, t, c//r, h, w
reshape_conv_bottleneck = conv_bottleneck.view((-1, self.n_segment) + conv_bottleneck.size()[1:])
__, tPlusone_fea = reshape_conv_bottleneck.split([1, self.n_segment-1], dim=1) # n, t-1, c//r, h, w
# motion fea = t+1_fea - t_fea
# pad the last timestamp
diff_fea = tPlusone_fea - t_fea # n, t-1, c//r, h, w
# pad = (0,0,0,0,0,0,0,1)
diff_fea_pluszero = F.pad(diff_fea, self.pad, mode="constant", value=0) # n, t, c//r, h, w
diff_fea_pluszero = diff_fea_pluszero.view((-1,) + diff_fea_pluszero.size()[2:]) #nt, c//r, h, w
y = self.avg_pool(diff_fea_pluszero) # nt, c//r, 1, 1
y = self.conv3(y) # nt, c, 1, 1
y = self.bn3(y) # nt, c, 1, 1
y = self.sigmoid(y) # nt, c, 1, 1
y = y - 0.5
output = x + x * y.expand_as(x)
return output
class ShiftModule(nn.Module):
"""1D Temporal convolutions, the convs are initialized to act as the "Part shift" layer
"""
def __init__(self, input_channels, n_segment=8, n_div=8, mode='shift'):
super(ShiftModule, self).__init__()
self.input_channels = input_channels
self.n_segment = n_segment
self.fold_div = n_div
self.fold = self.input_channels // self.fold_div
self.conv = nn.Conv1d(
2*self.fold, 2*self.fold,
kernel_size=3, padding=1, groups=2*self.fold,
bias=False)
# weight_size: (2*self.fold, 1, 3)
if mode == 'shift':
# import pdb; pdb.set_trace()
self.conv.weight.requires_grad = True
self.conv.weight.data.zero_()
self.conv.weight.data[:self.fold, 0, 2] = 1 # shift left
self.conv.weight.data[self.fold: 2 * self.fold, 0, 0] = 1 # shift right
if 2*self.fold < self.input_channels:
self.conv.weight.data[2 * self.fold:, 0, 1] = 1 # fixed
elif mode == 'fixed':
self.conv.weight.requires_grad = True
self.conv.weight.data.zero_()
self.conv.weight.data[:, 0, 1] = 1 # fixed
elif mode == 'norm':
self.conv.weight.requires_grad = True
def forward(self, x):
# shift by conv
# import pdb; pdb.set_trace()
nt, c, h, w = x.size()
n_batch = nt // self.n_segment
x = x.view(n_batch, self.n_segment, c, h, w)
x = x.permute([0, 3, 4, 2, 1]) # (n_batch, h, w, c, n_segment)
x = x.contiguous().view(n_batch*h*w, c, self.n_segment)
x = self.conv(x) # (n_batch*h*w, c, n_segment)
x = x.view(n_batch, h, w, c, self.n_segment)
x = x.permute([0, 4, 3, 1, 2]) # (n_batch, n_segment, c, h, w)
x = x.contiguous().view(nt, c, h, w)
return x
class Bottle2neckShift(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, baseWidth=26, scale=4, stype='normal'):
""" Constructor
Args:
inplanes: input channel dimensionality
planes: output channel dimensionality
stride: conv stride. Replaces pooling layer.
downsample: None when stride = 1
baseWidth: basic width of conv3x3
scale: number of scale.
type: 'normal': normal set. 'stage': first block of a new stage.
"""
super(Bottle2neckShift, self).__init__()
width = int(math.floor(planes * (baseWidth/64.0)))
self.me = MEModule(width*scale, reduction=16, n_segment=8)
self.conv1 = nn.Conv2d(inplanes, width*scale, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(width*scale)
if scale == 1:
self.nums = 1
else:
self.nums = scale - 1
if stype == 'stage':
self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)
convs = []
bns = []
shifts = []
for i in range(self.nums):
convs.append(nn.Conv2d(width, width, kernel_size=3, stride=stride,
padding=1, bias=False))
bns.append(nn.BatchNorm2d(width))
shifts.append(ShiftModule(width, n_segment=8, n_div=2, mode='fixed'))
shifts.append(ShiftModule(width, n_segment=8, n_div=2, mode='shift'))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.shifts = nn.ModuleList(shifts)
self.conv3 = nn.Conv2d(width*scale, planes * self.expansion,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stype = stype
self.scale = scale
self.width = width
def forward(self, x):
# import pdb; pdb.set_trace()
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.me(out)
spx = torch.split(out, self.width, 1) # 4*(nt, c/4, h, w)
for i in range(self.nums):
if i == 0 or self.stype == 'stage':
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.shifts[i](sp)
sp = self.convs[i](sp)
sp = self.relu(self.bns[i](sp))
if i == 0:
out = sp
else:
out = torch.cat((out, sp), 1)
last_sp = spx[self.nums]
last_sp = self.shifts[self.nums](last_sp)
if self.scale != 1 and self.stype == 'normal':
out = torch.cat((out, last_sp), 1)
elif self.scale != 1 and self.stype == 'stage':
if self.stype =='stage' and spx[-1].shape[1] == 208:
out = torch.cat((out, last_sp), 1)
# print(out.shape)
else:
out = torch.cat((out, self.pool(last_sp)), 1)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottle2neck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, baseWidth=26, scale = 4, stype='normal'):
""" Constructor
Args:
inplanes: input channel dimensionality
planes: output channel dimensionality
stride: conv stride. Replaces pooling layer.
downsample: None when stride = 1
baseWidth: basic width of conv3x3
scale: number of scale.
type: 'normal': normal set. 'stage': first block of a new stage.
"""
super(Bottle2neck, self).__init__()
width = int(math.floor(planes * (baseWidth/64.0)))
self.conv1 = nn.Conv2d(inplanes, width*scale, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(width*scale)
if scale == 1:
self.nums = 1
else:
self.nums = scale -1
if stype == 'stage':
self.pool = nn.AvgPool2d(kernel_size=3, stride = stride, padding=1)
convs = []
bns = []
for i in range(self.nums):
convs.append(nn.Conv2d(width, width, kernel_size=3, stride = stride, padding=1, bias=False))
bns.append(nn.BatchNorm2d(width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = nn.Conv2d(width*scale, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stype = stype
self.scale = scale
self.width = width
def forward(self, x):
import pdb; pdb.set_trace()
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if i==0 or self.stype=='stage':
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp)
sp = self.relu(self.bns[i](sp))
if i==0:
out = sp
else:
out = torch.cat((out, sp), 1)
if self.scale != 1 and self.stype=='normal':
out = torch.cat((out, spx[self.nums]),1)
elif self.scale != 1 and self.stype=='stage':
out = torch.cat((out, self.pool(spx[self.nums])),1)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Res2Net(nn.Module):
def __init__(self, block, layers, baseWidth = 26, scale = 4, num_classes=1000,
TCP_module=None, segment=None,
):
self.inplanes = 64
super(Res2Net, self).__init__()
self.baseWidth = baseWidth
self.scale = scale
self.num_segments = segment
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
if TCP_module is not None:
print('Adding TCP module...')
self.TCP = TCP_module
else:
self.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.TCP = None
for m in self.modules():
if m == self.TCP :#reverse the initialization in TCP
break
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample=downsample,
stype='stage', baseWidth = self.baseWidth, scale=self.scale))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, baseWidth = self.baseWidth, scale=self.scale))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.TCP is not None:
x = self.TCP(x)
else :
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def res2net50(pretrained=False, **kwargs):
"""Constructs a Res2Net-50 model.
Res2Net-50 refers to the Res2Net-50_26w_4s.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth = 26, scale = 4, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['res2net50_26w_4s']))
return model
def tea50_8f(TCP_module=None, **kwargs):
"""Constructs a TEA model.
part of the TEA model refers to the Res2Net-50_26w_4s.
Args:
TCP_module: if not None, generating TCP Net.
"""
model = Res2Net(Bottle2neckShift, [3, 4, 6, 3], baseWidth = 26, scale = 4,
TCP_module=TCP_module, **kwargs)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['res2net50_26w_4s']),
# strict=False)
return model
def res2net50_26w_4s(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_26w_4s model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth = 26, scale = 4, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['res2net50_26w_4s']))
return model
def res2net101_26w_4s(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_26w_4s model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 4, 23, 3], baseWidth = 26, scale = 4, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['res2net101_26w_4s']))
return model
def res2net50_26w_6s(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_26w_4s model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth = 26, scale = 6, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['res2net50_26w_6s']))
return model
def res2net50_26w_8s(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_26w_4s model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth = 26, scale = 8, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['res2net50_26w_8s']))
return model
def res2net50_48w_2s(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_48w_2s model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth = 48, scale = 2, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['res2net50_48w_2s']))
return model
def res2net50_14w_8s(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_14w_8s model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth = 14, scale = 8, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['res2net50_14w_8s']))
return model
if __name__ == '__main__':
images = torch.rand(8, 3, 224, 224)
model = res2net50shift(pretrained=True)
output = model(images)
print(output.size())
|
1630358
|
import welleng.clearance
import welleng.io
import welleng.error
import welleng.survey
import welleng.utils
import welleng.mesh
import welleng.visual
import welleng.version
import welleng.errors.tool_errors
import welleng.exchange.wbp
import welleng.exchange.csv
import welleng.target
import welleng.connector
import welleng.exchange.edm
import welleng.fluid
import welleng.node
|
1630376
|
from pybricks.hubs import PrimeHub
from pybricks.parameters import Button
# Initialize the hub.
hub = PrimeHub()
# Configure the stop button combination. Now, your program stops
# if you press the center and Bluetooth buttons simultaneously.
hub.system.set_stop_button((Button.CENTER, Button.BLUETOOTH))
# Now we can use the center button as a normal button.
while True:
# Play a sound if the center button is pressed.
if Button.CENTER in hub.buttons.pressed():
hub.speaker.beep()
|
1630384
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lib.model.utils.config import cfg
from lib.model.faster_rcnn.faster_rcnn import _fasterRCNN_BiDet
import lib.model.faster_rcnn.binary_utils as b_utils
import torch
import torch.nn as nn
import math
import pdb
def binary_conv1x1(in_planes, out_planes, stride=1, **kwargs):
"""3x3 convolution with padding"""
return b_utils.BinarizeConv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False,
**kwargs)
def binary_conv3x3(in_planes, out_planes, stride=1, **kwargs):
"""3x3 convolution with padding"""
return b_utils.BinarizeConv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False,
**kwargs)
def binary_block3x3(in_planes, out_planes, stride=1, **kwargs):
"""3x3 convolution with padding"""
return b_utils.BinBlock(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False,
**kwargs)
def binary_block5x5(in_planes, out_planes, stride=1, **kwargs):
"""3x3 convolution with padding"""
return b_utils.BinBlock(in_planes, out_planes, kernel_size=5, stride=stride,
padding=2, bias=False,
**kwargs)
def conv1x1(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False)
def conv3x3(in_planes, out_planes, stride=1, **kwargs):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False, **kwargs)
class BinBasicBlock(nn.Module):
"""
Shortcut between every two adjacent convs
"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, **kwargs):
super(BinBasicBlock, self).__init__()
if downsample is not None:
res_func1 = downsample
else:
res_func1 = b_utils.myid
res_func2 = b_utils.myid
self.conv1 = binary_block3x3(inplanes, planes, stride, res_func=res_func1, **kwargs)
self.conv2 = binary_block3x3(planes, planes, res_func=res_func2, **kwargs)
self.stride = stride
def forward(self, x):
out = x
out = self.conv1(out)
out = self.conv2(out)
return out
class BiDetResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, channels=(64, 128, 256, 512), **kwargs):
super(BiDetResNet, self).__init__()
self.inplanes = channels[0]
first_inplanes = self.inplanes
self.conv1 = nn.Conv2d(3, first_inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(first_inplanes)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, first_inplanes, layers[0], **kwargs)
self.layer2 = self._make_layer(block, channels[1], layers[1], stride=2, **kwargs)
self.layer3 = self._make_layer(block, channels[2], layers[2], stride=2, **kwargs)
if len(channels) == 4:
self.layer4 = self._make_layer(block, channels[3], layers[3], stride=2, **kwargs)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(channels[-1] * block.expansion, num_classes, bias=True)
self.log_softmax = nn.LogSoftmax(dim=-1)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, b_utils.BinarizeConv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear) or isinstance(m, b_utils.BinarizeLinear):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, **kwargs):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
conv = nn.Conv2d
ds_out_planes = planes * block.expansion
downsample = nn.Sequential(
nn.AvgPool2d(2, stride=stride, ceil_mode=True),
conv(self.inplanes, ds_out_planes, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(ds_out_planes)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, **kwargs))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, **kwargs))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
if hasattr(self, 'layer4'):
x = self.layer4(x)
x = self.avgpool(x).view(x.size(0), -1)
x = self.fc(x)
return self.log_softmax(x)
def bidetnet18(**kwargs):
model = BiDetResNet(BinBasicBlock, [2, 2, 2, 2], **kwargs)
return model
def bidetnet34(**kwargs):
model = BiDetResNet(BinBasicBlock, [3, 4, 6, 3], **kwargs)
return model
class bidet_resnet(_fasterRCNN_BiDet):
def __init__(self, classes, num_layers=18, class_agnostic=False, model_path=None,
fix_real_conv=True, fix_base_bn=True, fix_top_bn=True, nms_threshold=0.01, sample_sigma=0.001,
rpn_prior_weight=0.2, rpn_reg_weight=0.1, head_prior_weight=0.2, head_reg_weight=0.1):
# assume that base net can only be bireal18 or bireal34
self.depth = num_layers
self.model_path = model_path
self.dout_base_model = 256
self.pooled_feat_size = 512
self.class_agnostic = class_agnostic
self.fix_real_conv = fix_real_conv
self.fix_base_bn = fix_base_bn
self.fix_top_bn = fix_top_bn
_fasterRCNN_BiDet.__init__(self, classes, class_agnostic, sample_sigma=sample_sigma,
nms_threshold=nms_threshold,
rpn_prior_weight=rpn_prior_weight, rpn_reg_weight=rpn_reg_weight,
head_prior_weight=head_prior_weight, head_reg_weight=head_reg_weight)
def _init_modules(self):
if self.depth == 18:
resnet = bidetnet18()
elif self.depth == 34:
resnet = bidetnet34()
else:
exit(-1)
if self.model_path is not None:
print("Loading pretrained weights from %s" % self.model_path)
state_dict = torch.load(self.model_path)
resnet.load_state_dict(state_dict, strict=True)
# Build resnet
self.RCNN_base = nn.Sequential(resnet.conv1, resnet.bn1, resnet.maxpool,
resnet.layer1, resnet.layer2, resnet.layer3)
self.RCNN_top = nn.Sequential(resnet.layer4)
self.RCNN_cls_score = nn.Linear(self.pooled_feat_size, self.n_classes)
if self.class_agnostic:
self.RCNN_bbox_pred = nn.Linear(self.pooled_feat_size, 8)
else:
self.RCNN_bbox_pred = nn.Linear(self.pooled_feat_size, 8 * self.n_classes)
# Fix blocks
if self.fix_real_conv:
print("fix base net conv1 and bn1")
for p in self.RCNN_base[0].parameters(): p.requires_grad = False
for p in self.RCNN_base[1].parameters(): p.requires_grad = False
assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4)
if cfg.RESNET.FIXED_BLOCKS >= 3:
print("fix base net layer3")
for p in self.RCNN_base[5].parameters(): p.requires_grad = False
if cfg.RESNET.FIXED_BLOCKS >= 2:
print("fix base net layer2")
for p in self.RCNN_base[4].parameters(): p.requires_grad = False
if cfg.RESNET.FIXED_BLOCKS >= 1:
print("fix base net layer1")
for p in self.RCNN_base[3].parameters(): p.requires_grad = False
def set_bn_fix(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
for p in m.parameters(): p.requires_grad = False
if self.fix_base_bn:
print("fix rcnn base bn")
self.RCNN_base.apply(set_bn_fix)
if self.fix_top_bn:
print("fix rcnn top bn")
self.RCNN_top.apply(set_bn_fix)
def train(self, mode=True):
# Override train so that the training mode is set as we want
nn.Module.train(self, mode)
if mode:
# Set fixed blocks to be in eval mode
# base[0] and base[1] are in eval mode
self.RCNN_base.eval()
assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4)
if cfg.RESNET.FIXED_BLOCKS == 3:
# fix base[0], [1], [3], [4], [5]
pass
elif cfg.RESNET.FIXED_BLOCKS == 2:
# fix base[0], [1], [3], [4]
self.RCNN_base[5].train()
elif cfg.RESNET.FIXED_BLOCKS == 1:
# fix base[0], [1], [3]
self.RCNN_base[5].train()
self.RCNN_base[4].train()
elif cfg.RESNET.FIXED_BLOCKS == 0:
# fix base[0], [1]
self.RCNN_base[5].train()
self.RCNN_base[4].train()
self.RCNN_base[3].train()
if not self.fix_real_conv:
self.RCNN_base[0].train()
self.RCNN_base[1].train()
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
if self.fix_base_bn:
self.RCNN_base.apply(set_bn_eval)
if self.fix_top_bn:
self.RCNN_top.apply(set_bn_eval)
def _head_to_tail(self, pool5):
fc7 = self.RCNN_top(pool5).mean(3).mean(2)
return fc7
|
1630389
|
from ethereum.block import BlockHeader
from ethereum.utils import decode_hex, int256, big_endian_to_int
def is_dao_challenge(config, number, amount, skip):
return number == config['DAO_FORK_BLKNUM'] and amount == 1 and skip == 0
def build_dao_header(config):
return BlockHeader(
prevhash=decode_hex('a218e2c611f21232d857e3c8cecdcdf1f65f25a4477f98f6f47e4063807f2308'),
uncles_hash=decode_hex('1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347'),
coinbase=decode_hex('bcdfc35b86bedf72f0cda046a3c16829a2ef41d1'),
state_root=decode_hex('c5e389416116e3696cce82ec4533cce33efccb24ce245ae9546a4b8f0d5e9a75'),
tx_list_root=decode_hex('7701df8e07169452554d14aadd7bfa256d4a1d0355c1d174ab373e3e2d0a3743'),
receipts_root=decode_hex('26cf9d9422e9dd95aedc7914db690b92bab6902f5221d62694a2fa5d065f534b'),
bloom=int256.deserialize(
decode_hex('00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'),
),
difficulty=big_endian_to_int(decode_hex('38c3bf2616aa')),
number=config['DAO_FORK_BLKNUM'],
gas_limit=big_endian_to_int(decode_hex('47e7c0')),
gas_used=big_endian_to_int(decode_hex('014820')),
timestamp=big_endian_to_int(decode_hex('578f7aa8')),
extra_data=config['DAO_FORK_BLKEXTRA'],
mixhash=decode_hex('5b5acbf4bf305f948bd7be176047b20623e1417f75597341a059729165b92397'),
nonce=decode_hex('bede87201de42426')
)
|
1630392
|
from __future__ import print_function, absolute_import, division
class ConditionalResetAction(object):
def __init__(self, adict):
self.field = adict['field']
self.update_fields = adict.get('update_fields', None)
def process_version(self, version):
new_version = version.copy()
reset_part = new_version.get_part(self.field)
for f in self.update_fields:
update_part = new_version.get_part(f)
update_part.inc()
if new_version == version:
reset_part.inc()
else:
reset_part.reset()
return new_version
|
1630429
|
import boto3
import sys
import time
# This Lambda function was designed to pull some code from a zip file location in a place of your choice. For my test,
# I used the S3 script below:
# import boto3
# def get_s3buckets():
# s3Buckets = boto3.client('s3')
# resource = s3Buckets.list_buckets()
# for b in resource['Buckets']:
# print(b['Name'])
# You can, of course, use this to create any Lambda function you would like though. Enjoy!
def create_lambdafunction(LambdafunctionName, iamRole, PythonfunctionName):
filePath = input(
'Please enter the location of the zip file where your Python code is for your Lambda function: ')
lambdaCreation = boto3.client('lambda')
resource = lambdaCreation.list_functions()
for f in resource['Functions']:
if LambdafunctionName in f['FunctionName']:
print('Lambda function already exists')
print('Closing in 5 seconds')
time.sleep(5)
exit()
else:
newLambda = lambdaCreation.create_function(
FunctionName=LambdafunctionName,
Runtime='python3.7',
# The role is the IAM role you created that has access to kick off the Lambda Function. You need to put in the ARN
Role=iamRole,
Handler='{}.lambda_handler'.format(PythonfunctionName),
# The code below is some sample code. This will pull all of your S3 bucket names
Code={'ZipFile': open(filePath, 'rb').read(), },
Description='Print out all S3 bucket names'
)
print(newLambda)
LambdafunctionName = sys.argv[1]
iamRole = sys.argv[2]
PythonfunctionName = sys.argv[3]
if __name__ == '__main__':
create_lambdafunction(LambdafunctionName, iamRole, PythonfunctionName)
|
1630492
|
import tarfile
import sys
version = sys.argv[1]
tar = tarfile.open(f"dist/bioscrape-{version}.tar.gz")
for member in tar.getmembers():
print(member)
tar.close()
|
1630550
|
from distutils.core import setup
try:
from setuptools import find_packages
except ImportError:
print ("Please install Distutils and setuptools"
" before installing this package")
raise
setup(
name='relay.runner',
version='0.1.10.dev0',
description=(
'A smart thermostat. Given a metric, or some timeseries that should'
' approach a given target, add heat or coolant as necessary'
' You can use Relay to auto-scale workers in large'
' distributed systems or do anything a thermostat might do.'
),
long_description="Check the project homepage for details",
keywords=[
'relay', 'pid', 'pid controller', 'thermostat', 'tuning',
'oscilloscope', 'auto-scale'],
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/sailthru/relay',
packages=find_packages(),
include_package_data=True,
install_requires=['argparse_tools>=1.0.6', 'colorlog', 'numpy'],
extras_require={
'webui': ['pyzmq'],
},
tests_require=['nose'],
test_suite="nose.main",
zip_safe=True,
entry_points = {
'console_scripts': [
'relay = relay.__main__:go',
],
'setuptools.installation': [
'eggsecutable = relay.__main__:go',
],
},
)
|
1630552
|
import unittest
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
#check if DEFAULTTUNE is set and it's value is: x86-64-x32
defaulttune = oeRuntimeTest.tc.d.getVar("DEFAULTTUNE", True)
if "x86-64-x32" not in defaulttune:
skipModule("DEFAULTTUNE is not set to x86-64-x32")
class X32libTest(oeRuntimeTest):
@testcase(281)
@skipUnlessPassed("test_ssh")
def test_x32_file(self):
status1 = self.target.run("readelf -h /bin/ls | grep Class | grep ELF32")[0]
status2 = self.target.run("readelf -h /bin/ls | grep Machine | grep X86-64")[0]
self.assertTrue(status1 == 0 and status2 == 0, msg="/bin/ls isn't an X86-64 ELF32 binary. readelf says: %s" % self.target.run("readelf -h /bin/ls")[1])
|
1630561
|
from sqlalchemy import create_engine
from sqlalchemy import Table, Column
from sqlalchemy import Integer, String, Text
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# engine = create_engine('mysql+mysqldb://user:password@127.0.0.1:3306/dbname')
engine = create_engine('sqlite:///db.sqlite3')
Base = declarative_base()
Session = sessionmaker(bind=engine)
article_map_table = Table('article_map', Base.metadata,
Column('old_article_id', Integer, ForeignKey('article_old.id'), primary_key=True),
Column('new_article_id', Integer, ForeignKey('article_new.id'), primary_key=True),
)
class OldArticle(Base):
__tablename__ = 'article_old'
id = Column(Integer, primary_key=True)
text = Column(Text, nullable=False)
mapped_into = relationship('NewArticle', secondary=article_map_table, back_populates='mapped_from')
paragraphs = relationship('OldParagraph', backref='starts_from', lazy=True)
class NewArticle(Base):
__tablename__ = 'article_new'
id = Column(Integer, primary_key=True)
text = Column(Text, nullable=False)
mapped_from = relationship('OldArticle', secondary=article_map_table, back_populates='mapped_into')
paragraphs = relationship('NewParagraph', backref='starts_from', lazy=True)
class OldParagraph(Base):
__tablename__ = 'paragraph_old'
id = Column(Integer, primary_key=True)
level = Column(Integer, nullable=False)
name = Column(String(32), nullable=False)
starts_from_id = Column(Integer, ForeignKey('article_old.id'), nullable=False)
class NewParagraph(Base):
__tablename__ = 'paragraph_new'
id = Column(Integer, primary_key=True)
level = Column(Integer, nullable=False)
name = Column(String(32), nullable=False)
starts_from_id = Column(Integer, ForeignKey('article_new.id'), nullable=False)
|
1630620
|
import tensorflow as tf
import numpy as np
from typing import Text, List, Dict, Any, Union, Optional, Tuple, Callable
from rasa.shared.nlu.constants import TEXT
from rasa.utils.tensorflow.model_data import FeatureSignature
from rasa.utils.tensorflow.constants import (
REGULARIZATION_CONSTANT,
CONNECTION_DENSITY,
NUM_TRANSFORMER_LAYERS,
TRANSFORMER_SIZE,
NUM_HEADS,
UNIDIRECTIONAL_ENCODER,
KEY_RELATIVE_ATTENTION,
VALUE_RELATIVE_ATTENTION,
MAX_RELATIVE_POSITION,
MASKED_LM,
HIDDEN_LAYERS_SIZES,
DROP_RATE,
SPARSE_INPUT_DROPOUT,
DENSE_INPUT_DROPOUT,
DENSE_DIMENSION,
CONCAT_DIMENSION,
DROP_RATE_ATTENTION,
SEQUENCE,
SENTENCE,
)
from rasa.utils.tensorflow import layers
from rasa.utils.tensorflow.exceptions import TFLayerConfigException
from rasa.utils.tensorflow.transformer import TransformerEncoder
from rasa.nlu.constants import DEFAULT_TRANSFORMER_SIZE
class RasaCustomLayer(tf.keras.layers.Layer):
"""Parent class for all classes in `rasa_layers.py`.
Allows a shared implementation for adjusting `DenseForSparse`
layers during incremental training.
During fine-tuning, sparse feature sizes might change due to addition of new data.
If this happens, we need to adjust our `DenseForSparse` layers to a new size.
`ConcatenateSparseDenseFeatures`, `RasaSequenceLayer` and
`RasaFeatureCombiningLayer` all inherit from `RasaCustomLayer` and thus can
change their own `DenseForSparse` layers if it's needed.
"""
def adjust_sparse_layers_for_incremental_training(
self,
new_sparse_feature_sizes: Dict[Text, Dict[Text, List[int]]],
old_sparse_feature_sizes: Dict[Text, Dict[Text, List[int]]],
reg_lambda: float,
) -> None:
"""Finds and adjusts `DenseForSparse` layers during incremental training.
Recursively looks through the layers until it finds all the `DenseForSparse`
ones and adjusts those which have their sparse feature sizes increased.
This function heavily relies on the name of `DenseForSparse` layer being
in the following format - f"sparse_to_dense.{attribute}_{feature_type}" -
in order to correctly extract the attribute and feature type.
New and old sparse feature sizes could look like this:
{TEXT: {FEATURE_TYPE_SEQUENCE: [4, 24, 128], FEATURE_TYPE_SENTENCE: [4, 128]}}
Args:
new_sparse_feature_sizes: sizes of current sparse features.
old_sparse_feature_sizes: sizes of sparse features the model was
previously trained on.
reg_lambda: regularization constant.
"""
for name, layer in self._tf_layers.items():
if isinstance(layer, RasaCustomLayer):
layer.adjust_sparse_layers_for_incremental_training(
new_sparse_feature_sizes=new_sparse_feature_sizes,
old_sparse_feature_sizes=old_sparse_feature_sizes,
reg_lambda=reg_lambda,
)
elif isinstance(layer, layers.DenseForSparse):
attribute = layer.get_attribute()
feature_type = layer.get_feature_type()
if (
attribute in new_sparse_feature_sizes
and feature_type in new_sparse_feature_sizes[attribute]
):
new_feature_sizes = new_sparse_feature_sizes[attribute][
feature_type
]
old_feature_sizes = old_sparse_feature_sizes[attribute][
feature_type
]
if sum(new_feature_sizes) > sum(old_feature_sizes):
self._tf_layers[name] = self._replace_dense_for_sparse_layer(
layer_to_replace=layer,
new_sparse_feature_sizes=new_feature_sizes,
old_sparse_feature_sizes=old_feature_sizes,
attribute=attribute,
feature_type=feature_type,
reg_lambda=reg_lambda,
)
@staticmethod
def _replace_dense_for_sparse_layer(
layer_to_replace: layers.DenseForSparse,
new_sparse_feature_sizes: List[int],
old_sparse_feature_sizes: List[int],
attribute: Text,
feature_type: Text,
reg_lambda: float,
) -> layers.DenseForSparse:
"""Replaces a `DenseForSparse` layer with a new one.
Replaces an existing `DenseForSparse` layer with a new one
in order to adapt it to incremental training.
Args:
layer_to_replace: a `DenseForSparse` layer that is used to create a new one.
new_sparse_feature_sizes: sizes of sparse features that will be
the input of the layer.
old_sparse_feature_sizes: sizes of sparse features that used to be
the input of the layer.
attribute: an attribute of the data fed to the layer.
feature_type: a feature type of the data fed to the layer.
reg_lambda: regularization constant.
Returns:
New `DenseForSparse` layer.
"""
kernel = layer_to_replace.get_kernel().numpy()
bias = layer_to_replace.get_bias()
if bias is not None:
bias = bias.numpy()
units = layer_to_replace.get_units()
# split kernel by feature sizes to update the layer accordingly
kernel_splits = []
splitting_index = 0
for size in old_sparse_feature_sizes:
kernel_splits.append(kernel[splitting_index : splitting_index + size, :])
splitting_index += size
additional_sizes = [
new_size - old_size
for new_size, old_size in zip(
new_sparse_feature_sizes, old_sparse_feature_sizes
)
]
std, mean = np.std(kernel), np.mean(kernel)
additional_weights = [
np.random.normal(mean, std, size=(num_rows, units)).astype(np.float32)
for num_rows in additional_sizes
]
merged_weights = [
np.vstack((existing, new))
for existing, new in zip(kernel_splits, additional_weights)
]
# stack each merged weight to form a new weight tensor
new_weights = np.vstack(merged_weights)
kernel_init = tf.constant_initializer(new_weights)
bias_init = tf.constant_initializer(bias) if bias is not None else None
new_layer = layers.DenseForSparse(
name=f"sparse_to_dense.{attribute}_{feature_type}",
reg_lambda=reg_lambda,
units=units,
use_bias=bias is not None,
kernel_initializer=kernel_init,
bias_initializer=bias_init,
)
return new_layer
class ConcatenateSparseDenseFeatures(RasaCustomLayer):
"""Combines multiple sparse and dense feature tensors into one dense tensor.
This layer combines features from various featurisers into a single feature array
per input example. All features must be of the same feature type, i.e. sentence-
level or sequence-level (token-level).
The layer combines a given list of tensors (whether sparse or dense) by:
1. converting sparse tensors into dense ones
2. optionally, applying dropout to sparse tensors before and/or after the conversion
3. concatenating all tensors along the last dimension
Arguments:
attribute: Name of attribute (e.g. `text` or `label`) whose features will be
processed.
feature_type: Feature type to be processed -- `sequence` or `sentence`.
feature_type_signature: A list of signatures for the given attribute and feature
type.
config: A model config for correctly parametrising the layer.
Input shape:
Tuple containing one list of N-D tensors, each with shape: `(batch_size, ...,
input_dim)`.
All dense tensors must have the same shape, except possibly the last dimension.
All sparse tensors must have the same shape, including the last dimension.
Output shape:
N-D tensor with shape: `(batch_size, ..., units)` where `units` is the sum of
the last dimension sizes across all input tensors, with sparse tensors instead
contributing `config[DENSE_DIMENSION][attribute]` units each.
Raises:
A `TFLayerConfigException` if no feature signatures are provided.
Attributes:
output_units: The last dimension size of the layer's output.
"""
SPARSE_DROPOUT = "sparse_dropout"
SPARSE_TO_DENSE = "sparse_to_dense"
DENSE_DROPOUT = "dense_dropout"
def __init__(
self,
attribute: Text,
feature_type: Text,
feature_type_signature: List[FeatureSignature],
config: Dict[Text, Any],
) -> None:
"""Creates a new `ConcatenateSparseDenseFeatures` object."""
if not feature_type_signature:
raise TFLayerConfigException(
"The feature type signature must contain some feature signatures."
)
super().__init__(
name=f"concatenate_sparse_dense_features_{attribute}_{feature_type}"
)
self._check_sparse_input_units(feature_type_signature)
self.output_units = self._calculate_output_units(
attribute, feature_type_signature, config
)
# Prepare dropout and sparse-to-dense layers if any sparse tensors are expected
self._tf_layers: Dict[Text, tf.keras.layers.Layer] = {}
if any([signature.is_sparse for signature in feature_type_signature]):
self._prepare_layers_for_sparse_tensors(attribute, feature_type, config)
def _check_sparse_input_units(
self, feature_type_signature: List[FeatureSignature]
) -> None:
"""Checks that all sparse features have the same last dimension size."""
sparse_units = [
feature_sig.units
for feature_sig in feature_type_signature
if feature_sig.is_sparse
]
if len(set(sparse_units)) > 1:
raise TFLayerConfigException(
f"All sparse features must have the same last dimension size but found "
f"different sizes: {set(sparse_units)}."
)
def _prepare_layers_for_sparse_tensors(
self, attribute: Text, feature_type: Text, config: Dict[Text, Any]
) -> None:
"""Sets up sparse tensor pre-processing before combining with dense ones."""
# For optionally applying dropout to sparse tensors
if config[SPARSE_INPUT_DROPOUT]:
self._tf_layers[self.SPARSE_DROPOUT] = layers.SparseDropout(
rate=config[DROP_RATE]
)
# For converting sparse tensors to dense
self._tf_layers[self.SPARSE_TO_DENSE] = layers.DenseForSparse(
name=f"sparse_to_dense.{attribute}_{feature_type}",
units=config[DENSE_DIMENSION][attribute],
reg_lambda=config[REGULARIZATION_CONSTANT],
)
# For optionally apply dropout to sparse tensors after they're converted to
# dense tensors.
if config[DENSE_INPUT_DROPOUT]:
self._tf_layers[self.DENSE_DROPOUT] = tf.keras.layers.Dropout(
rate=config[DROP_RATE]
)
@staticmethod
def _calculate_output_units(
attribute: Text,
feature_type_signature: List[FeatureSignature],
config: Dict[Text, Any],
) -> int:
"""Determines the output units from the provided feature signatures.
Sparse features will be turned into dense ones, hence they each contribute with
their future dense number of units.
"""
return sum(
[
config[DENSE_DIMENSION][attribute]
if signature.is_sparse
else signature.units
for signature in feature_type_signature
]
)
def _process_sparse_feature(
self, feature: tf.SparseTensor, training: bool
) -> tf.Tensor:
"""Turns sparse tensor into dense, possibly adds dropout before and/or after."""
if self.SPARSE_DROPOUT in self._tf_layers:
feature = self._tf_layers[self.SPARSE_DROPOUT](feature, training)
feature = self._tf_layers[self.SPARSE_TO_DENSE](feature)
if self.DENSE_DROPOUT in self._tf_layers:
feature = self._tf_layers[self.DENSE_DROPOUT](feature, training)
return feature
def call(
self,
inputs: Tuple[List[Union[tf.Tensor, tf.SparseTensor]]],
training: bool = False,
) -> tf.Tensor:
"""Combines sparse and dense feature tensors into one tensor.
Arguments:
inputs: Contains the input tensors, all of the same rank.
training: A flag indicating whether the layer should behave in training mode
(applying dropout to sparse tensors if applicable) or in inference mode
(not applying dropout).
Returns:
Single tensor with all input tensors combined along the last dimension.
"""
features = inputs[0]
dense_features = []
for f in features:
if isinstance(f, tf.SparseTensor):
f = self._process_sparse_feature(f, training)
dense_features.append(f)
# Now that all features are made dense, concatenate them along the last (units)
# dimension.
return tf.concat(dense_features, axis=-1)
class RasaFeatureCombiningLayer(RasaCustomLayer):
"""Combines multiple dense or sparse feature tensors into one.
This layer combines features by following these steps:
1. Apply a `ConcatenateSparseDenseFeatures` layer separately to sequence- and
sentence-level features, yielding two tensors (one for each feature type).
2. Concatenate the sequence- and sentence-level tensors along the sequence dimension
by appending sentence-level features at the first available token position after
the sequence-level (token-level) features.
Arguments:
attribute: Name of attribute (e.g. `text` or `label`) whose features will be
processed.
attribute_signature: A dictionary containing two lists of feature signatures,
one for each feature type (`sequence` or `sentence`) of the given attribute.
config: A model config used for correctly parameterising the layer and the
`ConcatenateSparseDenseFeatures` layer it uses internally.
Input shape:
Tuple of three input tensors:
sequence_features: List of 3-D dense or sparse tensors, each with shape
`(batch_size, max_seq_length, input_dim)` where `input_dim` can be
different for sparse vs dense tensors. See the input shape of
`ConcatenateSparseDenseFeatures` for more information.
sentence_features: List of 3-D dense or sparse tensors, each with shape
`(batch_size, 1, input_dim)` where `input_dim` can be different for
sparse vs dense tensors, and can differ from that in
`sequence_features`. See the input shape of
`ConcatenateSparseDenseFeatures` for more information.
sequence_feature_lengths: Dense tensor of shape `(batch_size, )`.
Output shape:
combined_features: A 3-D tensor with shape `(batch_size, sequence_length,
units)` where `units` is completely determined by the internally applied
`ConcatenateSparseDenseFeatures` layer and `sequence_length` is the combined
length of sequence- and sentence-level features: `max_seq_length + 1` if
both feature types are present, `max_seq_length` if only sequence-level
features are present, and 1 if only sentence-level features are present).
mask_combined_sequence_sentence: A 3-D tensor with shape
`(batch_size, sequence_length, 1)`.
Raises:
A `TFLayerConfigException` if no feature signatures are provided.
Attributes:
output_units: The last dimension size of the layer's `combined_features` output.
"""
def __init__(
self,
attribute: Text,
attribute_signature: Dict[Text, List[FeatureSignature]],
config: Dict[Text, Any],
) -> None:
"""Creates a new `RasaFeatureCombiningLayer` object."""
if not attribute_signature or not (
attribute_signature.get(SENTENCE, [])
or attribute_signature.get(SEQUENCE, [])
):
raise TFLayerConfigException(
"The attribute signature must contain some feature signatures."
)
super().__init__(name=f"rasa_feature_combining_layer_{attribute}")
self._tf_layers: Dict[Text, tf.keras.layers.Layer] = {}
# Prepare sparse-dense combining layers for each present feature type
self._feature_types_present = self._get_present_feature_types(
attribute_signature
)
self._prepare_sparse_dense_concat_layers(attribute, attribute_signature, config)
# Prepare components for combining sequence- and sentence-level features
self._prepare_sequence_sentence_concat(attribute, config)
self.output_units = self._calculate_output_units(attribute, config)
@staticmethod
def _get_present_feature_types(
attribute_signature: Dict[Text, List[FeatureSignature]]
) -> Dict[Text, bool]:
"""Determines feature types that are present.
Knowing which feature types are present is important because many downstream
operations depend on it, e.g. combining sequence- and sentence-level features
is only done if both feature types are present.
"""
return {
feature_type: (
feature_type in attribute_signature
and len(attribute_signature[feature_type]) > 0
)
for feature_type in [SEQUENCE, SENTENCE]
}
def _prepare_sparse_dense_concat_layers(
self,
attribute: Text,
attribute_signature: Dict[Text, List[FeatureSignature]],
config: Dict[Text, Any],
) -> None:
"""Prepares sparse-dense combining layers for all present feature types."""
for feature_type, present in self._feature_types_present.items():
if not present:
continue
self._tf_layers[
f"sparse_dense.{feature_type}"
] = ConcatenateSparseDenseFeatures(
attribute=attribute,
feature_type=feature_type,
feature_type_signature=attribute_signature[feature_type],
config=config,
)
def _prepare_sequence_sentence_concat(
self, attribute: Text, config: Dict[Text, Any]
) -> None:
"""Sets up combining sentence- and sequence-level features (if needed).
This boils down to preparing for unifying the units of the sequence- and
sentence-level features if they differ -- the same number of units is required
for combining the features.
"""
if (
self._feature_types_present[SEQUENCE]
and self._feature_types_present[SENTENCE]
):
# The output units of this layer will be based on the output sizes of the
# sparse+dense combining layers that are internally applied to all features.
sequence_units = self._tf_layers[f"sparse_dense.{SEQUENCE}"].output_units
sentence_units = self._tf_layers[f"sparse_dense.{SENTENCE}"].output_units
# Last dimension needs to be unified if sequence- and sentence-level
# features have different sizes, e.g. due to being produced by different
# featurizers.
if sequence_units != sentence_units:
for feature_type in [SEQUENCE, SENTENCE]:
self._tf_layers[
f"unify_dims_before_seq_sent_concat.{feature_type}"
] = layers.Ffnn(
layer_name_suffix=f"unify_dims.{attribute}_{feature_type}",
layer_sizes=[config[CONCAT_DIMENSION][attribute]],
dropout_rate=config[DROP_RATE],
reg_lambda=config[REGULARIZATION_CONSTANT],
density=config[CONNECTION_DENSITY],
)
def _calculate_output_units(self, attribute: Text, config: Dict[Text, Any]) -> int:
"""Calculates the number of output units for this layer class.
The number depends mainly on whether dimension unification is used or not.
"""
# If dimension unification is used, output units are determined by the unifying
# layers.
if (
f"unify_dims_before_seq_sent_concat.{SEQUENCE}" in self._tf_layers
or f"unify_dims_before_seq_sent_concat.{SENTENCE}" in self._tf_layers
):
return config[CONCAT_DIMENSION][attribute]
# Without dimension unification, the units from the underlying sparse_dense
# layers are carried over and should be the same for sequence-level features
# (if present) as for sentence-level features.
elif self._feature_types_present[SEQUENCE]:
return self._tf_layers[f"sparse_dense.{SEQUENCE}"].output_units
return self._tf_layers[f"sparse_dense.{SENTENCE}"].output_units
def _concat_sequence_sentence_features(
self,
sequence_tensor: tf.Tensor,
sentence_tensor: tf.Tensor,
mask_combined_sequence_sentence: tf.Tensor,
) -> tf.Tensor:
"""Concatenates sequence- & sentence-level features along sequence dimension."""
# If needed, pass both feature types through a dense layer to bring them to the
# same shape.
if f"unify_dims_before_seq_sent_concat.{SEQUENCE}" in self._tf_layers:
sequence_tensor = self._tf_layers[
f"unify_dims_before_seq_sent_concat.{SEQUENCE}"
](sequence_tensor)
if f"unify_dims_before_seq_sent_concat.{SENTENCE}" in self._tf_layers:
sentence_tensor = self._tf_layers[
f"unify_dims_before_seq_sent_concat.{SENTENCE}"
](sentence_tensor)
# mask_combined_sequence_sentence has for each input example a sequence of 1s of
# the length seq_length+1, where seq_length is the number of real tokens. The
# rest is 0s which form a padding up to the max. sequence length + 1 (max.
# number of real tokens + 1). Here the mask is turned into a mask that has 0s
# everywhere and 1 only at the immediate next position after the last real
# token's position for a given input example. Example (batch size = 2, sequence
# lengths = [1, 2]):
# [[[1], [0], [0]], ___\ [[[0], [1], [0]],
# [[1], [1], [0]]] / [[0], [0], [1]]]
sentence_feature_positions_mask = (
mask_combined_sequence_sentence
* tf.math.cumprod(
1 - mask_combined_sequence_sentence,
axis=1,
exclusive=True,
reverse=True,
)
)
# The new mask is used to distribute the sentence features at the sequence
# positions marked by 1s. The sentence features' dimensionality effectively
# changes from `(batch_size, 1, feature_dim)` to `(batch_size, max_seq_length+1,
# feature_dim)`, but the array is sparse, with real features present only at
# positions determined by 1s in the mask.
sentence_tensor = sentence_feature_positions_mask * sentence_tensor
# Padding of sequence-level features is increased by 1 in the sequence
# dimension to match the shape of modified sentence-level features.
sequence_tensor = tf.pad(sequence_tensor, [[0, 0], [0, 1], [0, 0]])
# Sequence- and sentence-level features effectively get concatenated by
# summing the two padded feature arrays like this (batch size = 1):
# [[seq1, seq2, seq3, 0, 0]] + [[0, 0, 0, sent1, 0]] =
# = [[seq1, seq2, seq3, sent1, 0]]
return sequence_tensor + sentence_tensor
def _combine_sequence_level_features(
self,
sequence_features: List[Union[tf.Tensor, tf.SparseTensor]],
mask_sequence: tf.Tensor,
training: bool,
) -> Optional[tf.Tensor]:
"""Processes & combines sequence-level features if any are present."""
if self._feature_types_present[SEQUENCE]:
sequence_features_combined = self._tf_layers[f"sparse_dense.{SEQUENCE}"](
(sequence_features,), training=training
)
# Apply mask which has 1s at positions of real tokens and 0s at all padded
# token positions. This is needed because the sparse+dense combining layer
# might've turned some fake (padded) features (i.e. 0s) into non-zero
# numbers and we want those to become zeros again.
# This step isn't needed for sentence-level features because those are never
# padded -- the effective sequence length in their case is always 1.
return sequence_features_combined * mask_sequence
return None
def _combine_sentence_level_features(
self,
sentence_features: List[Union[tf.Tensor, tf.SparseTensor]],
sequence_feature_lengths: tf.Tensor,
training: bool,
) -> Tuple[Optional[tf.Tensor], Optional[tf.Tensor]]:
"""Processes & combines sentence-level features if any are present."""
if self._feature_types_present[SENTENCE]:
sentence_features_combined = self._tf_layers[f"sparse_dense.{SENTENCE}"](
(sentence_features,), training=training
)
# Sentence-level features have sequence dimension of length 1, add it to
# sequence-level feature lengths.
combined_sequence_sentence_feature_lengths = sequence_feature_lengths + 1
else:
sentence_features_combined = None
# Without sentence-level features, the feature sequence lengths are
# completely determined by sequence-level features.
combined_sequence_sentence_feature_lengths = sequence_feature_lengths
return sentence_features_combined, combined_sequence_sentence_feature_lengths
def call(
self,
inputs: Tuple[
List[Union[tf.Tensor, tf.SparseTensor]],
List[Union[tf.Tensor, tf.SparseTensor]],
tf.Tensor,
],
training: bool = False,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Combines multiple 3-D dense/sparse feature tensors into one.
Arguments:
inputs: Tuple containing:
sequence_features: Dense or sparse tensors representing different
token-level features.
sentence_features: Dense or sparse tensors representing sentence-level
features.
sequence_feature_lengths: A tensor containing the real sequence length
(the number of real -- not padding -- tokens) for each example in
the batch.
training: A flag indicating whether the layer should behave in training mode
(applying dropout to sparse tensors if applicable) or in inference mode
(not applying dropout).
Returns:
combined features: A tensor containing all the features combined.
mask_combined_sequence_sentence: A binary mask with 1s in place of real
features in the combined feature tensor, and 0s in padded positions with
fake features.
"""
sequence_features = inputs[0]
sentence_features = inputs[1]
sequence_feature_lengths = inputs[2]
# This mask is specifically for sequence-level features.
mask_sequence = compute_mask(sequence_feature_lengths)
sequence_features_combined = self._combine_sequence_level_features(
sequence_features, mask_sequence, training
)
(
sentence_features_combined,
combined_sequence_sentence_feature_lengths,
) = self._combine_sentence_level_features(
sentence_features, sequence_feature_lengths, training
)
mask_combined_sequence_sentence = compute_mask(
combined_sequence_sentence_feature_lengths
)
# If both feature types are present, combine them. Otherwise, just the present
# feature type will be returned.
if (
sequence_features_combined is not None
and sentence_features_combined is not None
):
features_to_return = self._concat_sequence_sentence_features(
sequence_features_combined,
sentence_features_combined,
mask_combined_sequence_sentence,
)
elif sequence_features_combined is not None:
features_to_return = sequence_features_combined
else:
features_to_return = sentence_features_combined
return features_to_return, mask_combined_sequence_sentence
class RasaSequenceLayer(RasaCustomLayer):
"""Creates an embedding from all features for a sequence attribute; facilitates MLM.
This layer combines all features for an attribute and embeds them using a
transformer, optionally doing masked language modeling. The layer is meant only for
attributes with sequence-level features, such as `text`, `response` and
`action_text`.
Internally, this layer applies the following steps:
1. Combine features using `RasaFeatureCombiningLayer`.
2. Apply a dense layer(s) to the combined features.
3. Optionally, and only during training for the `text` attribute, apply masking to
the features and create further helper variables for masked language modeling.
4. Embed the features using a transformer, effectively reducing variable-length
sequences of features to fixed-size embeddings.
Arguments:
attribute: Name of attribute (e.g. `text` or `label`) whose features will be
processed.
attribute_signature: A dictionary containing two lists of feature signatures,
one for each feature type (`sentence` or `sequence`) of the given attribute.
config: A model config used for correctly parameterising the underlying layers.
Input shape:
Tuple of three input tensors:
sequence_features: List of 3-D dense or sparse tensors, each with shape
`(batch_size, max_seq_length, input_dim)` where `input_dim` can be
different for sparse vs dense tensors. See the input shape of
`ConcatenateSparseDenseFeatures` for more information.
sentence_features: List of 3-D dense or sparse tensors, each with shape
`(batch_size, 1, input_dim)` where `input_dim` can be different for
sparse vs dense tensors, and can differ from that in
`sequence_features`. See the input shape of
`ConcatenateSparseDenseFeatures` for more information.
sequence_feature_lengths: Dense tensor of shape `(batch_size, )`.
Output shape:
outputs: `(batch_size, seq_length, units)` where `units` matches the underlying
transformer's output size (if present), otherwise it matches the output size
of the `Ffnn` block applied to the combined features, or it's the output
size of the underlying `RasaFeatureCombiningLayer` if the `Ffnn` block has 0
layers. `seq_length` is the sum of the sequence dimension
sizes of sequence- and sentence-level features (for details, see the output
shape of `RasaFeatureCombiningLayer`). If both feature types are present,
then `seq_length` will be 1 + the length of the longest sequence of real
tokens across all examples in the given batch.
seq_sent_features: `(batch_size, seq_length, hidden_dim)`, where `hidden_dim` is
the output size of the underlying `Ffnn` block, or the output size of the
underlying `RasaFeatureCombiningLayer` if the `Ffnn` block has 0 layers.
mask_combined_sequence_sentence: `(batch_size, seq_length, 1)`
token_ids: `(batch_size, seq_length, id_dim)`. `id_dim` is 2 when no dense
sequence-level features are present. Otherwise, it's arbitrarily chosen to
match the last dimension size of the first dense sequence-level feature in
the input list of features.
mlm_boolean_mask: `(batch_size, seq_length, 1)`, empty tensor if not doing MLM.
attention_weights: `(transformer_layers, batch_size, num_transformer_heads,
seq_length, seq_length)`, empty tensor if the transformer has 0 layers.
Raises:
A `TFLayerConfigException` if no feature signatures for sequence-level features
are provided.
Attributes:
output_units: The last dimension size of the layer's first output (`outputs`).
"""
FEATURE_COMBINING = "feature_combining"
FFNN = "ffnn"
TRANSFORMER = "transformer"
MLM_INPUT_MASK = "mlm_input_mask"
SPARSE_TO_DENSE_FOR_TOKEN_IDS = "sparse_to_dense_for_token_ids"
def __init__(
self,
attribute: Text,
attribute_signature: Dict[Text, List[FeatureSignature]],
config: Dict[Text, Any],
) -> None:
"""Creates a new `RasaSequenceLayer` object."""
if not attribute_signature or not attribute_signature.get(SEQUENCE, []):
raise TFLayerConfigException(
"The attribute signature must contain some sequence-level feature"
"signatures but none were found."
)
super().__init__(name=f"rasa_sequence_layer_{attribute}")
self._tf_layers: Dict[Text, Any] = {
self.FEATURE_COMBINING: RasaFeatureCombiningLayer(
attribute, attribute_signature, config
),
self.FFNN: layers.Ffnn(
config[HIDDEN_LAYERS_SIZES][attribute],
config[DROP_RATE],
config[REGULARIZATION_CONSTANT],
config[CONNECTION_DENSITY],
layer_name_suffix=attribute,
),
}
self._enables_mlm = False
# Note: Within TED, masked language modeling becomes just input dropout,
# since there is no loss term associated with predicting the masked tokens.
self._prepare_masked_language_modeling(attribute, attribute_signature, config)
transformer_layers, transformer_units = self._prepare_transformer(
attribute, config
)
self._has_transformer = transformer_layers > 0
self.output_units = self._calculate_output_units(
attribute, transformer_layers, transformer_units, config
)
@staticmethod
def _get_transformer_dimensions(
attribute: Text, config: Dict[Text, Any]
) -> Tuple[int, int]:
"""Determines # of transformer layers & output size from the model config.
The config can contain these directly (same for all attributes) or specified
separately for each attribute.
If a transformer is used (e.i. if `number_of_transformer_layers` is positive),
the default `transformer_size` which is `None` breaks things. Thus,
we need to set a reasonable default value so that the model works fine.
"""
transformer_layers = config[NUM_TRANSFORMER_LAYERS]
if isinstance(transformer_layers, dict):
transformer_layers = transformer_layers[attribute]
transformer_units = config[TRANSFORMER_SIZE]
if isinstance(transformer_units, dict):
transformer_units = transformer_units[attribute]
if transformer_layers > 0 and (not transformer_units or transformer_units < 1):
transformer_units = DEFAULT_TRANSFORMER_SIZE
return transformer_layers, transformer_units
def _prepare_transformer(
self, attribute: Text, config: Dict[Text, Any]
) -> Tuple[int, int]:
"""Creates a transformer & returns its number of layers and output units."""
transformer_layers, transformer_units = self._get_transformer_dimensions(
attribute, config
)
self._tf_layers[self.TRANSFORMER] = prepare_transformer_layer(
attribute_name=attribute,
config=config,
num_layers=transformer_layers,
units=transformer_units,
drop_rate=config[DROP_RATE],
unidirectional=config[UNIDIRECTIONAL_ENCODER],
)
return transformer_layers, transformer_units
def _prepare_masked_language_modeling(
self,
attribute: Text,
attribute_signature: Dict[Text, List[FeatureSignature]],
config: Dict[Text, Any],
) -> None:
"""Prepares masking and computing helper variables for masked language modeling.
Only done for the text attribute and only if sequence-level (token-level)
features are present (MLM requires token-level information).
"""
if attribute == TEXT and SEQUENCE in attribute_signature and config[MASKED_LM]:
self._enables_mlm = True
self._tf_layers[self.MLM_INPUT_MASK] = layers.InputMask()
# Unique IDs of different token types are needed to construct the possible
# label space for MLM. If dense features are present, they're used as such
# IDs, othwerise sparse features are embedded by a non-trainable
# DenseForSparse layer to create small embeddings that serve as IDs.
expect_dense_seq_features = any(
[not signature.is_sparse for signature in attribute_signature[SEQUENCE]]
)
if not expect_dense_seq_features:
self._tf_layers[
self.SPARSE_TO_DENSE_FOR_TOKEN_IDS
] = layers.DenseForSparse(
units=2,
use_bias=False,
trainable=False,
name=f"{self.SPARSE_TO_DENSE_FOR_TOKEN_IDS}.{attribute}",
)
def _calculate_output_units(
self,
attribute: Text,
transformer_layers: int,
transformer_units: int,
config: Dict[Text, Any],
) -> int:
"""Determines the output units based on what layer components are present.
The size depends on which component is the last created one in the internal
pipeline that is `RasaFeatureCombiningLayer` -> `Ffnn` -> `Transformer`, since
not all the components are always created.
"""
# transformer is the last component
if transformer_layers > 0:
return transformer_units
# the Ffnn block is the last component
if len(config[HIDDEN_LAYERS_SIZES][attribute]) > 0:
# this is the output size of the last layer of the Ffnn block
return config[HIDDEN_LAYERS_SIZES][attribute][-1]
# only the RasaFeatureCombiningLayer is present
return self._tf_layers[self.FEATURE_COMBINING].output_units
def _features_as_token_ids(
self, features: List[Union[tf.Tensor, tf.SparseTensor]]
) -> Optional[tf.Tensor]:
"""Creates dense labels (token IDs) used for negative sampling in MLM."""
# If there are dense features, we use them as labels - taking the first dense
# feature in the list, but any other dense feature would do the job.
for f in features:
if not isinstance(f, tf.SparseTensor):
return tf.stop_gradient(f)
# If no dense features are found, use a sparse feature but convert it into
# a dense one first.
for f in features:
if isinstance(f, tf.SparseTensor):
return tf.stop_gradient(
self._tf_layers[self.SPARSE_TO_DENSE_FOR_TOKEN_IDS](f)
)
return None
def _create_mlm_tensors(
self,
sequence_features: List[Union[tf.Tensor, tf.SparseTensor]],
seq_sent_features: tf.Tensor,
mask_sequence: tf.Tensor,
sentence_features_present: bool,
training: bool,
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Produces helper variables for masked language modelling (only in training).
The `token_ids` embeddings can be viewed as token-level labels/unique IDs of all
input tokens (to be used later in the MLM loss) because these embeddings aren't
affected by dropout or masking and are effectively always unique for different
input tokens (and same for the same tokens).
`token_ids` share the batch and sequence dimension with the combined sequence-
and sentence-level features, the last dimension is unimportant and mimics the
first dense sequence-level feature in the list of features, or alternatively the
last dimension will have size 2 if there are only sparse sequence features
present.
"""
token_ids = self._features_as_token_ids(sequence_features)
# Pad in the sequence dimension to match the shape of combined sequence- and
# sentence-level features. This means padding by 1 if sentence-level features
# are present (those effectively have sequence length of 1) and not padding
# otherwise.
if sentence_features_present:
token_ids = tf.pad(token_ids, [[0, 0], [0, 1], [0, 0]])
mask_sequence = tf.pad(mask_sequence, [[0, 0], [0, 1], [0, 0]])
# mlm_boolean_mask has the same shape as the tensor with all combined features
# (except the last dimension), with True meaning tokens that are masked and
# False meaning tokens that aren't masked or that are fake (padded) tokens.
# Note that only sequence-level features are masked, nothing happens to the
# sentence-level features in the combined features tensor.
seq_sent_features, mlm_boolean_mask = self._tf_layers[self.MLM_INPUT_MASK](
seq_sent_features, mask_sequence, training
)
return seq_sent_features, token_ids, mlm_boolean_mask
def call(
self,
inputs: Tuple[
List[Union[tf.Tensor, tf.SparseTensor]],
List[Union[tf.Tensor, tf.SparseTensor]],
tf.Tensor,
],
training: bool = False,
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""Combines all of an attribute's features and embeds using a transformer.
Arguments:
inputs: Tuple containing:
sequence_features: Dense or sparse tensors representing different
token-level features.
sentence_features: Dense or sparse tensors representing different
sentence-level features.
sequence_feature_lengths: A tensor containing the real sequence length
(the number of real -- not padding -- tokens) for each example in
the batch.
training: A flag indicating whether the layer should behave in training mode
(applying dropout to sparse tensors if applicable) or in inference mode
(not applying dropout).
Returns:
outputs: Tensor with all features combined, masked (if doing MLM) and
embedded with a transformer.
seq_sent_features: Tensor with all features combined from just before the
masking and transformer is applied
mask_combined_sequence_sentence: A binary mask with 1s in place of real
features in the combined feature tensor, and 0s in padded positions with
fake features.
token_ids: Tensor with dense token-level features which can serve as
IDs (unique embeddings) of all the different tokens found in the batch.
Empty tensor if not doing MLM.
mlm_boolean_mask: A boolean mask with `True` where real tokens in `outputs`
were masked and `False` elsewhere. Empty tensor if not doing MLM.
attention_weights: Tensor containing self-attention weights received
from the underlying transformer. Empty tensor if the transformer has 0
layers.
"""
sequence_features = inputs[0]
sentence_features = inputs[1]
sequence_feature_lengths = inputs[2]
# Combine all features (sparse/dense, sequence-/sentence-level) into one tensor,
# also get a binary mask that has 1s at positions with real features and 0s at
# padded positions.
seq_sent_features, mask_combined_sequence_sentence = self._tf_layers[
self.FEATURE_COMBINING
]((sequence_features, sentence_features, sequence_feature_lengths))
# Apply one or more dense layers.
seq_sent_features = self._tf_layers[self.FFNN](seq_sent_features, training)
# If using masked language modeling, mask the transformer inputs and get labels
# for the masked tokens and a boolean mask. Note that TED does not use MLM loss,
# hence using masked language modeling (if enabled) becomes just input dropout.
if self._enables_mlm and training:
mask_sequence = compute_mask(sequence_feature_lengths)
(
seq_sent_features_masked,
token_ids,
mlm_boolean_mask,
) = self._create_mlm_tensors(
sequence_features,
seq_sent_features,
mask_sequence,
sentence_features_present=len(sentence_features) > 0,
training=training,
)
else:
# tf.zeros((0,)) is an alternative to None
token_ids = tf.zeros((0,))
mlm_boolean_mask = tf.zeros((0,))
seq_sent_features_masked = seq_sent_features
# Apply the transformer (if present), hence reducing a sequences of features per
# input example into a simple fixed-size embedding.
if self._has_transformer:
mask_padding = 1 - mask_combined_sequence_sentence
outputs, attention_weights = self._tf_layers[self.TRANSFORMER](
seq_sent_features_masked, mask_padding, training
)
outputs = tf.nn.gelu(outputs)
else:
# tf.zeros((0,)) is an alternative to None
outputs, attention_weights = seq_sent_features_masked, tf.zeros((0,))
return (
outputs,
seq_sent_features,
mask_combined_sequence_sentence,
token_ids,
mlm_boolean_mask,
attention_weights,
)
def compute_mask(sequence_lengths: tf.Tensor) -> tf.Tensor:
"""Computes binary mask given real sequence lengths.
Takes a 1-D tensor of shape `(batch_size,)` containing the lengths of sequences
(in terms of number of tokens) in the batch. Creates a binary mask of shape
`(batch_size, max_seq_length, 1)` with 1s at positions with real tokens and 0s
elsewhere.
"""
mask = tf.sequence_mask(sequence_lengths, dtype=tf.float32)
return tf.expand_dims(mask, -1)
def prepare_transformer_layer(
attribute_name: Text,
config: Dict[Text, Any],
num_layers: int,
units: int,
drop_rate: float,
unidirectional: bool,
) -> Union[
TransformerEncoder,
Callable[
[tf.Tensor, Optional[tf.Tensor], Optional[Union[tf.Tensor, bool]]],
Tuple[tf.Tensor, Optional[tf.Tensor]],
],
]:
"""Creates & returns a transformer encoder, potentially with 0 layers."""
if num_layers > 0:
return TransformerEncoder(
num_layers,
units,
config[NUM_HEADS],
units * 4,
config[REGULARIZATION_CONSTANT],
dropout_rate=drop_rate,
attention_dropout_rate=config[DROP_RATE_ATTENTION],
density=config[CONNECTION_DENSITY],
unidirectional=unidirectional,
use_key_relative_position=config[KEY_RELATIVE_ATTENTION],
use_value_relative_position=config[VALUE_RELATIVE_ATTENTION],
max_relative_position=config[MAX_RELATIVE_POSITION],
name=f"{attribute_name}_encoder",
)
# create lambda so that it can be used later without the check
return lambda x, mask, training: (x, None)
|
1630635
|
import pandas as pd
from server.telemetry.pandas_import import TelemetryFrame, TelemetrySeries
def _describe(s: pd.Series):
ps = [0.25, 0.5, 0.75, 0.9, 0.95]
d = s.describe(percentiles=ps)
print(d)
def print_series_summary(series: TelemetrySeries, by_pid: bool = False):
if by_pid:
for pid in sorted(series.pids()):
pd_s = series.data(by_pid=pid)
print(f"Time series: {series.label()} | PID {pid}")
_describe(pd_s)
else:
pd_s = series.data()
print(f"Time series: {series.label()}")
_describe(pd_s)
def print_measurements_summaries(frame: TelemetryFrame, by_pid: bool = False):
for label in sorted(frame.labels()):
t = frame.time_series(label)
print_series_summary(t, by_pid)
print()
def measurements_per_second(series: TelemetrySeries) -> pd.Series:
return series.data().resample('1S').count()
def sum_by_second_difference(t1: TelemetrySeries, t2: TelemetrySeries) \
-> (float, pd.Series):
# NB assumption: for each k . t1[k] <= t2[k]
s1 = t1.data().sum()
s2 = t2.data().sum()
diff_ratio = (s2 - s1) / s2 # ~1% => s1 ~ s2; ~99% => s2 predominant
x = t1.data().resample('1S').sum() # sum values in each 1-second bucket
y = t2.data().resample('1S').sum()
return diff_ratio, y.sub(x)
def sum_by_second_ratio(t1: TelemetrySeries, t2: TelemetrySeries) \
-> (float, pd.Series):
s1 = t1.data().sum()
s2 = t2.data().sum()
ratio = s1 / s2
x = t1.data().resample('1S').sum() # sum values in each 1-second bucket
y = t2.data().resample('1S').sum()
return ratio, x.divide(y) # result[k] = x[k] / y[k]
def plot_to_file(figure_name: str, data: pd.Series):
fig = data.plot().get_figure()
fig.savefig(f"{figure_name}.pdf")
|
1630675
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from gma import Aggregate
from setrans import ExpandedFeatTrans
import copy
class FlowHead(nn.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class ConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=128+128):
super(ConvGRU, self).__init__()
self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SepConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(SepConvGRU, self).__init__()
self.convz1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convr1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convq1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convz2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convr2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convq2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
def forward(self, h, x):
# horizontal
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz1(hx))
r = torch.sigmoid(self.convr1(hx))
q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
# vertical
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz2(hx))
r = torch.sigmoid(self.convr2(hx))
q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class BasicMotionEncoder(nn.Module):
def __init__(self, args):
super(BasicMotionEncoder, self).__init__()
# When both f1 and f2 are applied SS-Trans, corr_multiplier = 2.
# Otherwise corr_multiplier = 1.
cor_planes = args.corr_levels * args.corr_multiplier * (2*args.corr_radius + 1)**2
self.convc1 = nn.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = nn.Conv2d(256, 192, 3, padding=1)
self.convf1 = nn.Conv2d(2, 128, 7, padding=3)
self.convf2 = nn.Conv2d(128, 64, 3, padding=1)
self.conv = nn.Conv2d(64+192, 128-2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class BasicUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=128, input_dim=128):
super(BasicUpdateBlock, self).__init__()
self.args = args
self.encoder = BasicMotionEncoder(args)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128+hidden_dim)
self.flow_head = FlowHead(input_dim=hidden_dim, hidden_dim=256)
self.mask = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 64*9, 1, padding=0))
def forward(self, net_feat, inp_feat, corr, flow, upsample=True):
# motion_features: (256+2)-dimensional.
motion_features = self.encoder(flow, corr)
inp_feat = torch.cat([inp_feat, motion_features], dim=1)
net_feat = self.gru(net_feat, inp_feat)
delta_flow = self.flow_head(net_feat)
# scale mask to balance gradients
mask = .25 * self.mask(net_feat)
return net_feat, mask, delta_flow
class GMAUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=128):
super().__init__()
self.args = args
self.encoder = BasicMotionEncoder(args)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128+hidden_dim+hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 64*9, 1, padding=0))
self.use_setrans = args.use_setrans
if self.use_setrans:
self.intra_trans_config = args.intra_trans_config
self.aggregator = ExpandedFeatTrans(self.intra_trans_config, 'Motion Aggregator')
else:
# Aggregate is attention with a (learnable-weighted) skip connection, without FFN.
self.aggregator = Aggregate(args=self.args, dim=128, dim_head=128, heads=self.args.num_heads)
def forward(self, net, inp, corr, flow, attention):
# encoder: BasicMotionEncoder
# corr: [3, 676, 50, 90]
motion_features = self.encoder(flow, corr)
# motion_features: 128-dim
if self.use_setrans:
# attention is multi-mode. ExpandedFeatTrans takes multi-mode attention.
B, C, H, W = motion_features.shape
motion_features_3d = motion_features.view(B, C, H*W).permute(0, 2, 1)
# motion_features_3d: [1, 7040, 128], attention: [1, 4, 7040, 7040]
motion_features_global_3d = self.aggregator(motion_features_3d, attention)
motion_features_global = motion_features_global_3d.view(B, H, W, C).permute(0, 3, 1, 2)
else:
# attention: [8, 1, 2852, 2852]. motion_features: [8, 128, 46, 62].
motion_features_global = self.aggregator(attention, motion_features)
inp_cat = torch.cat([inp, motion_features, motion_features_global], dim=1)
# Attentional update
net = self.gru(net, inp_cat)
delta_flow = self.flow_head(net)
# scale mask to balence gradients
mask = .25 * self.mask(net)
return net, mask, delta_flow
|
1630683
|
from django.contrib.auth.decorators import login_required, user_passes_test
from django.http.response import HttpResponse, HttpResponseRedirect
from django.views.decorators.http import require_http_methods
from django.shortcuts import render
from django.urls import reverse
from . models import LdapAcademiaUser
from . serializers import LdapImportExport
@user_passes_test(lambda u: u.is_staff)
def import_file(request):
file_format = request.POST.get('file_format')
file_to_import = request.FILES.get('file_to_import')
# content here
url = reverse('admin:ldap_peoples_ldapacademiauser_changelist')
if not file_to_import:
return HttpResponseRedirect(url)
if not file_format or not file_to_import:
# scrivi un messaggio di errore
pass
response = False
if file_format == 'json':
response = LdapImportExport.import_entries_from_json(file_to_import)
elif file_format == 'ldif':
response = LdapImportExport.import_entries_from_ldif(file_to_import)
return HttpResponseRedirect(url)
|
1630702
|
import itertools
import numba as nb
import numpy as np
import pandas as pd
import pytest
from sid.contacts import _consolidate_reason_of_infection
from sid.contacts import _numpy_replace
from sid.contacts import calculate_infections_by_contacts
from sid.contacts import create_group_indexer
@pytest.mark.unit
@pytest.mark.parametrize(
"states, group_code_name, expected",
[
(
pd.DataFrame({"a": [1] * 7 + [0] * 8}),
"a",
[list(range(7, 15)), list(range(7))],
),
(
pd.DataFrame({"a": pd.Series([0, 1, 2, 3, 0, 1, 2, 3]).astype("category")}),
"a",
[[0, 4], [1, 5], [2, 6], [3, 7]],
),
(
pd.DataFrame({"a": pd.Series([0, 1, 2, 3, 0, 1, 2, -1])}),
"a",
[[0, 4], [1, 5], [2, 6], [3]],
),
],
)
def test_create_group_indexer(states, group_code_name, expected):
result = create_group_indexer(states, group_code_name)
result = [r.tolist() for r in result]
assert result == expected
@pytest.fixture()
def households_w_one_infected():
states = pd.DataFrame(
{
"infectious": [True] + [False] * 7,
"cd_infectious_true": [-1] * 8,
"immunity": [1.0] + [0.0] * 7,
"group_codes_households": [0] * 4 + [1] * 4,
"households": [0] * 4 + [1] * 4,
"group_codes_non_rec": [0] * 4 + [1] * 4,
"n_has_infected": 0,
"virus_strain": pd.Series(["base_strain"] + [pd.NA] * 7, dtype="category"),
}
)
params = pd.DataFrame(
columns=["value"],
data=1,
index=pd.MultiIndex.from_tuples(
[("infection_prob", "households", "households")]
),
)
indexers = {"recurrent": nb.typed.List()}
indexers["recurrent"].append(create_group_indexer(states, ["households"]))
assortative_matching_cum_probs = nb.typed.List()
assortative_matching_cum_probs.append(np.zeros((0, 0)))
group_codes_info = {"households": {"name": "group_codes_households"}}
virus_strains = {
"names": ["base_strain"],
"contagiousness_factor": np.ones(1),
"immunity_resistance_factor": np.zeros(1),
}
return {
"states": states,
"recurrent_contacts": np.ones((len(states), 1), dtype=bool),
"random_contacts": None,
"params": params,
"indexers": indexers,
"assortative_matching_cum_probs": assortative_matching_cum_probs,
"group_codes_info": group_codes_info,
"susceptibility_factor": np.ones(len(states)),
"virus_strains": virus_strains,
"seasonality_factor": pd.Series([1], index=["households"]),
}
@pytest.mark.integration
def test_calculate_infections_only_recurrent_all_participate(
households_w_one_infected,
):
(
calc_infected,
calc_n_has_additionally_infected,
calc_missed_contacts,
was_infected_by,
) = calculate_infections_by_contacts(
**households_w_one_infected,
contact_models={"households": {"is_recurrent": True}},
seed=itertools.count(),
)
states = households_w_one_infected["states"]
exp_infected = pd.Series([-1] + [0] * 3 + [-1] * 4, dtype="int8")
exp_infection_counter = pd.Series([3] + [0] * 7, dtype="int32")
assert calc_infected.equals(exp_infected)
assert (
(states["n_has_infected"] + calc_n_has_additionally_infected)
.astype(np.int32)
.equals(exp_infection_counter)
)
assert calc_missed_contacts is None
@pytest.mark.integration
def test_calculate_infections_only_recurrent_sick_skips(
households_w_one_infected,
):
households_w_one_infected["recurrent_contacts"][0] = 0
(
calc_infected,
calc_n_has_additionally_infected,
calc_missed_contacts,
was_infected_by,
) = calculate_infections_by_contacts(
**households_w_one_infected,
contact_models={"households": {"is_recurrent": True}},
seed=itertools.count(),
)
exp_infected = pd.Series([-1] * 8, dtype="int8")
exp_infection_counter = pd.Series([0] * 8, dtype="int32")
assert calc_infected.equals(exp_infected)
assert calc_n_has_additionally_infected.astype(np.int32).equals(
exp_infection_counter
)
assert calc_missed_contacts is None
@pytest.mark.integration
def test_calculate_infections_only_recurrent_one_skips(
households_w_one_infected,
):
# 2nd person does not participate in household meeting
households_w_one_infected["recurrent_contacts"][1] = 0
(
calc_infected,
calc_n_has_additionally_infected,
calc_missed_contacts,
was_infected_by,
) = calculate_infections_by_contacts(
**households_w_one_infected,
contact_models={"households": {"is_recurrent": True}},
seed=itertools.count(),
)
exp_infected = pd.Series([-1, -1] + [0] * 2 + [-1] * 4, dtype="int8")
exp_infection_counter = pd.Series([2] + [0] * 7, dtype="int32")
assert calc_infected.equals(exp_infected)
assert calc_n_has_additionally_infected.astype(np.int32).equals(
exp_infection_counter
)
assert calc_missed_contacts is None
@pytest.mark.integration
def test_calculate_infections_only_recurrent_one_immune(
households_w_one_infected,
):
households_w_one_infected["states"].loc[1, "immunity"] = 1.0
(
calc_infected,
calc_n_has_additionally_infected,
calc_missed_contacts,
was_infected_by,
) = calculate_infections_by_contacts(
**households_w_one_infected,
contact_models={"households": {"is_recurrent": True}},
seed=itertools.count(),
)
exp_infected = pd.Series([-1, -1] + [0] * 2 + [-1] * 4, dtype="int8")
exp_infection_counter = pd.Series([2] + [0] * 7, dtype="int32")
assert calc_infected.equals(exp_infected)
assert calc_n_has_additionally_infected.astype(np.int32).equals(
exp_infection_counter
)
assert calc_missed_contacts is None
@pytest.mark.integration
def test_calculate_infections_only_non_recurrent(households_w_one_infected):
random_contacts = households_w_one_infected.pop("recurrent_contacts")
random_contacts[0] = 1
params = pd.DataFrame(
columns=["value"],
data=1,
index=pd.MultiIndex.from_tuples([("infection_prob", "non_rec", "non_rec")]),
)
states = households_w_one_infected["states"]
indexers = {"random": nb.typed.List()}
indexers["random"].append(create_group_indexer(states, ["group_codes_non_rec"]))
assortative_matching_cum_probs = nb.typed.List()
assortative_matching_cum_probs.append(np.array([[0.8, 1], [0.2, 1]]))
(
calc_infected,
calc_n_has_additionally_infected,
calc_missed_contacts,
was_infected_by,
) = calculate_infections_by_contacts(
states=households_w_one_infected["states"],
random_contacts=random_contacts,
recurrent_contacts=None,
params=params,
indexers=indexers,
assortative_matching_cum_probs=assortative_matching_cum_probs,
contact_models={"non_rec": {"is_recurrent": False}},
group_codes_info={"non_rec": {"name": "group_codes_non_rec"}},
susceptibility_factor=households_w_one_infected["susceptibility_factor"],
virus_strains=households_w_one_infected["virus_strains"],
seasonality_factor=pd.Series([1], index=["non_rec"]),
seed=itertools.count(),
)
exp_infected = pd.Series([-1, -1, 0, -1, -1, -1, -1, -1], dtype="int8")
exp_infection_counter = pd.Series([1] + [0] * 7, dtype="int32")
assert calc_infected.equals(exp_infected)
assert calc_n_has_additionally_infected.astype(np.int32).equals(
exp_infection_counter
)
assert not np.any(calc_missed_contacts)
@pytest.mark.unit
def test_consolidate_reason_of_infection():
was_infected_by_recurrent = np.array([0, 1, 1, -1, -1, -1, 0, -1])
was_infected_by_random = np.array([-1, -1, -1, 0, 0, 1, 0, -1])
contact_models = {
"a": {"is_recurrent": True},
"b": {"is_recurrent": True},
"c": {"is_recurrent": False},
"d": {"is_recurrent": False},
}
result = _consolidate_reason_of_infection(
was_infected_by_recurrent, was_infected_by_random, contact_models
)
expected = pd.Series(
pd.Categorical(
["a", "b", "b", "c", "c", "d", "a", "not_infected_by_contact"],
categories=["not_infected_by_contact", "a", "b", "c", "d"],
)
)
assert result.equals(expected)
@pytest.mark.unit
def test_numpy_replace():
x = np.arange(6)
replace_to = {4: 6, 5: 7}
result = _numpy_replace(x, replace_to)
assert (result == np.array([0, 1, 2, 3, 6, 7])).all()
|
1630730
|
from pyxnat import Interface
import os.path as op
from . import skip_if_no_network
_modulepath = op.dirname(op.abspath(__file__))
fp = op.join(op.dirname(op.abspath(__file__)), 'central.cfg')
print(fp)
central = Interface(config=fp)
proj_1 = central.select.project('surfmask_smpl2')
subj_1 = proj_1.subject('CENTRAL05_S01120')
exp_1 = subj_1.experiment('CENTRAL05_E02681')
scan_1 = exp_1.scan('11')
resource_1 = exp_1.resource('obscure_algorithm_output')
proj_2 = central.select.project('NFB')
subj_2 = proj_2.subject('BOA')
exp_2 = subj_2.experiment('GHJ')
scan_2 = exp_2.scan('JKL')
resource_2 = scan_2.resource('IOP')
@skip_if_no_network
def test_project_exists():
if proj_1.exists():
assert isinstance(proj_1, object)
assert str(proj_1) != '<Project Object> NFB'
@skip_if_no_network
def test_project_not_exists():
if not proj_2.exists():
assert isinstance(proj_2, object)
assert str(proj_2) == '<Project Object> NFB'
@skip_if_no_network
def test_info_project():
assert isinstance(proj_1, object)
expected_output = '<Project Object> surfmask_smpl2 `Surface masking '\
'samples 2` (private) 1 subject 1 MR experiment (owner: nosetests) '\
'(created on 2020-10-22 15:23:39.458) https://central.xnat.org/data/'\
'projects/surfmask_smpl2?format=html'
assert list(sorted(str(proj_1))) == list(sorted(expected_output))
@skip_if_no_network
def test_subject_exists():
if subj_1.exists():
assert isinstance(subj_1, object)
assert str(subj_1) != '<Subject Object> BOA'
@skip_if_no_network
def test_subject_not_exists():
if not subj_2.exists():
assert isinstance(subj_2, object)
assert str(subj_2) == '<Subject Object> BOA'
@skip_if_no_network
def test_info_subject():
assert isinstance(subj_1, object)
expected_output = '<Subject Object> CENTRAL05_S01120 `001` (project: '\
'surfmask_smpl2) (Gender: U) 1 experiment https://central.xnat.org/'\
'data/projects/surfmask_smpl2/subjects/CENTRAL05_S01120?format=html'
assert list(sorted(str(subj_1))) == list(sorted(expected_output))
@skip_if_no_network
def test_experiment_exists():
if exp_1.exists():
assert isinstance(exp_1, object)
assert str(exp_1) != '<Experiment Object> GHJ'
@skip_if_no_network
def test_experiment_not_exists():
if not exp_2.exists():
assert isinstance(exp_2, object)
assert str(exp_2) == '<Experiment Object> GHJ'
@skip_if_no_network
def test_info_experiment():
assert isinstance(exp_1, object)
expected_output = '<Experiment Object> CENTRAL05_E02681 `001_obscured` (subject: '\
'CENTRAL05_S01120 `001`) (project: surfmask_smpl2) 4 scans 1 resource '\
'(created on 2020-10-22 15:24:30.139) https://central.xnat.org/'\
'data/projects/surfmask_smpl2/subjects/CENTRAL05_S01120/experiments/'\
'CENTRAL05_E02681?format=html'
assert list(sorted(str(exp_1))) == list(sorted(expected_output))
@skip_if_no_network
def test_scan_exists():
if scan_1.exists():
assert isinstance(scan_1, object)
assert str(scan_1) != '<Scan Object> JKL'
@skip_if_no_network
def test_scan_not_exists():
if not scan_2.exists():
assert isinstance(scan_2, object)
assert str(scan_2) == '<Scan Object> JKL'
@skip_if_no_network
def test_info_scan():
assert isinstance(scan_1, object)
expected_output = '<Scan Object> 11 (`SPGR` 175 frames) '\
'https://central.xnat.org/data/projects/surfmask_smpl2/subjects/'\
'CENTRAL05_S01120/experiments/CENTRAL05_E02681/scans/11?format=html'
assert list(sorted(str(scan_1))) == list(sorted(expected_output))
@skip_if_no_network
def test_resource_exists():
if resource_1.exists():
assert isinstance(resource_1, object)
assert str(resource_1) != '<Resource Object> IOP'
@skip_if_no_network
def test_resource_not_exists():
if not resource_2.exists():
assert isinstance(resource_2, object)
assert str(resource_2) == '<Resource Object> IOP'
@skip_if_no_network
def test_info_resource():
assert isinstance(resource_1, object)
expected_output = '<Resource Object> 123361501 '\
'`obscure_algorithm_output` (66 files 2.06 GB)'
assert list(sorted(str(resource_1))) == list(sorted(expected_output))
@skip_if_no_network
def test_create_delete_create():
p = central.select.project('nosetests5')
from uuid import uuid1
sid = uuid1().hex
s = p.subject(sid)
s.create()
assert(s.exists())
s.delete()
s.create()
s.delete()
assert(not s.exists())
|
1630777
|
import numpy as np
import skimage
from skimage import transform
from PIL import Image
from constants import scale_fact
def float_im(img):
return np.divide(img, 255.)
# Adapted from: https://stackoverflow.com/a/39382475/9768291
def crop_center(img, crop_x, crop_y):
"""
To crop the center of an image
:param img: the image
:param crop_x: how much to crop on the x-axis
:param crop_y: how much to crop on the y-axis
:return: cropped image, floated (values between 0 and 1)
"""
y, x, _ = img.shape
start_x = x//2-(crop_x // 2)
start_y = y//2-(crop_y // 2)
cropped_img = img[start_y:start_y + crop_y, start_x:start_x + crop_x]
return float_im(cropped_img)
# TODO: provide some way of saving FLOAT images
def save_np_img(np_img, path, name):
"""
To save the image.
:param np_img: numpy_array type image
:param path: string type of the existing path where to save the image
:param name: string type that includes the format (ex:"bob.png")
:return: numpy array
"""
assert isinstance(path, str), 'Path of wrong type! (Must be String)'
assert isinstance(name, str), 'Name of wrong type! (Must be String)'
# TODO: To transform float-arrays into int-arrays (see https://stackoverflow.com/questions/52490653/saving-float-numpy-images)
if type(np_img[0][0][0].item()) != int:
np_img = np.multiply(np_img, 255).astype(int)
# File "C:\Users\payne\Anaconda3\envs\ml-gpu\lib\site-packages\PIL\Image.py", line 2460, in fromarray
# mode, rawmode = _fromarray_typemap[typekey]
# KeyError: ((1, 1, 3), '<i4')
# File "C:\Users\payne\Anaconda3\envs\ml-gpu\lib\site-packages\PIL\Image.py", line 2463, in fromarray
# raise TypeError("Cannot handle this data type")
# TypeError: Cannot handle this data type
im = Image.fromarray(np_img)
im.save(path + name)
return np_img
def single_downscale(img, width, height):
"""
Downscales an image by the factor set in the 'constants'
:param img: the image, as a Numpy Array
:param width: width to be downscaled
:param height: height to be downscaled
:return: returns a float-type numpy by default (values between 0 and 1)
"""
# TODO: look into `skimage.transform.downscale_local_mean()`
scaled_img = skimage.transform.resize(
img,
(width // scale_fact, height // scale_fact),
mode='reflect',
anti_aliasing=True)
return scaled_img
|
1630817
|
class FenwickTree:
def __init__(self, n):
self.size = n
self.tree = [0] * (n + 1)
def __lowbit(self, index):
return index & (- index)
def update(self, index, delta):
while index < self.size + 1:
self.tree[index] += delta
index += self.__lowbit(index)
def query(self, index):
res = 0
while index > 0:
res += self.tree[index]
index -= self.__lowbit(index)
return res
class Solution:
def countSmaller(self, nums: List[int]) -> List[int]:
n = len(nums)
if n < 1:
return []
if n == 1:
return [0]
s = list(set(nums))
total_rank = len(s)
rank_map = {}
import heapq
heapq.heapify(s)
for _ in range(1, total_rank + 1):
rank_map[heapq.heappop(s)] = _
res = []
ft = FenwickTree(total_rank)
for _ in range(n - 1, -1, -1):
index = rank_map[nums[_]]
ft.update(index, 1)
res = [ft.query(index - 1)] + res
return res
|
1630858
|
import struct
import binascii
from libband.commands.facilities import Facility
def lookup_packet(packet):
"""
Analyzes command sent to MSFT Band (from Bluetooth HCI log for example)
and spits out all the parameters, for example - which command it is,
how much data it expects in return, what arguments are being passed
"""
binary_packet = binascii.unhexlify(packet)
command_part_start = binary_packet.index(struct.pack("<H", 12025)) + 2
command = binary_packet[command_part_start:command_part_start+2]
data_stage_size = struct.unpack(
"<I", binary_packet[command_part_start+2:command_part_start+6]
)[0]
arguments = binary_packet[command_part_start+6:]
return {
'command': lookup_command(struct.unpack("<H", command)[0]),
'data_stage_size': data_stage_size,
'arguments': arguments
}
def lookup_command(command):
"""
Splits command encoded as ushort into Facility, TX bit and index
"""
category = Facility((command & 65280) >> 8)
is_tx_command = (command & 128) >> 7 == 1
index = (command & 127)
return category, is_tx_command, index
def make_command(facility, is_tx, index):
"""
Given Facility, TX bit and index, spits out command encoded as ushort
"""
command = facility.value << 8 | int(is_tx) << 7 | index
return command
|
1630910
|
import pytest
import six
from mock import MagicMock
from requests import HTTPError
from threatresponse.request.response import Response
def test_that_getattr_and_setattr_are_delegated():
inner_response = MagicMock()
response = Response(inner_response)
response.foo = 'bar'
response.spam('eggs')
assert inner_response.foo == 'bar'
inner_response.spam.assert_called_once_with('eggs')
def test_that_raise_for_status_extends_error_message():
inner_response = MagicMock()
inner_response.text = '{"foo": "bar", "spam": ["eggs"]}'
error = HTTPError('Something went wrong.')
error.response = inner_response
inner_response.raise_for_status.side_effect = error
response = Response(inner_response)
with pytest.raises(HTTPError):
response.raise_for_status()
inner_response.raise_for_status.assert_called_once_with()
assert error.args == (
'Something went wrong.\n'
'{\n'
' "foo": "bar",' + (' ' if six.PY2 else '') + '\n'
' "spam": [\n'
' "eggs"\n'
' ]\n'
'}',
)
|
1630919
|
import os
from core.BaseImporter import BaseImporter
from scipy.io import loadmat
import yaml
class Importer(BaseImporter):
def __init__(self):
with open('./modules/config/Matconv/equivalences.yaml', 'r') as infile:
self.equivalences = yaml.load(infile)
self.bottom = None
def find_layer_by_type(self, dict_, type_):
for key in dict_:
if dict_[key]['type'] == type_:
return key
def load(self, file_path):
output = {}
output['layers'] = {}
output['parameters'] = {}
model = loadmat(file_path)['layers'][0]
for index, layer in enumerate(model):
layer_type = layer['type'][0][0][0]
name = layer['name'][0][0][0]
if layer_type == 'conv' and 'fc' in name:
layer_type = 'fc'
converted_type = self.find_layer_by_type(self.equivalences['layers'], layer_type)
eq_fields = self.equivalences['layers'][converted_type]
output['layers'][name] = {}
layer_obj = output['layers'][name]
for field in eq_fields:
if layer_type == 'conv' or layer_type == 'fc':
if field == 'weights_name':
weights = layer[eq_fields[field]].item()[0][0].copy()
weights = weights.transpose(3,2,0,1)
output['parameters'][name + '_w'] = weights
layer_obj['weights_name'] = name + '_w'
layer_obj['dim'] = weights.shape
elif field == 'biases_name':
try:
biases = layer[eq_fields[field]].item()[0][1].copy()
output['parameters'][name + '_b'] = biases
layer_obj['biases_name'] = name + '_b'
except:
pass
elif layer_type == 'lrn':
if field == 'local_size':
layer_obj['local_size'] = layer['param'][0][0][0][0]
elif field == 'kappa':
layer_obj['kappa'] = layer['param'][0][0][0][1]
elif field == 'alpha':
layer_obj['alpha'] = layer['param'][0][0][0][0]*layer['param'][0][0][0][1]
elif field == 'beta':
layer_obj['beta'] = layer['param'][0][0][0][3]
if field == 'bottom':
layer_obj['bottom'] = self.bottom
self.bottom = name
elif field == 'top':
if index + 1 < len(model):
layer_obj['top'] = model[index + 1]['name'][0][0][0]
else:
layer_obj['top'] = None
else:
if eq_fields[field] in layer.dtype.names:
layer_obj[field] = layer[eq_fields[field]][0][0][0]
else:
layer_obj[field] = eq_fields[field]
return output
if __name__=='__main__':
importer = Importer()
output = importer.load('/Users/prlz77/Downloads/imagenet-vgg-f.mat')
pass
|
1630932
|
import isobar as iso
from isobar.io.midi import MidiInputDevice, MidiOutputDevice
import pytest
import time
from . import dummy_timeline
VIRTUAL_DEVICE_NAME = "Virtual Device"
no_midi = False
try:
midi_out = iso.MidiOutputDevice()
except iso.DeviceNotFoundException:
no_midi = True
@pytest.mark.skipif(no_midi, reason="Device does not have MIDI support")
def test_io_midi():
"""
Send a MIDI message through a virtual loopback device.
Note that virtual=True is not needed for subsequent calls, as it has already been
created so is visible to rtmidi as an existing device.
"""
events = []
def log_event(message):
nonlocal events
events.append(message)
midi_in = iso.MidiInputDevice(VIRTUAL_DEVICE_NAME, virtual=True)
midi_in.callback = log_event
midi_out = iso.MidiOutputDevice(VIRTUAL_DEVICE_NAME)
timeline = iso.Timeline(120, midi_out)
timeline.stop_when_done = True
timeline.schedule({
"note": iso.PSequence([ 60 ], 1),
"duration" : 0.1
})
timeline.run()
assert len(events) == 1
@pytest.mark.skipif(no_midi, reason="Device does not have MIDI support")
def test_io_midi_sync():
tempo = 150
midi_out = iso.MidiOutputDevice(VIRTUAL_DEVICE_NAME, virtual=True, send_clock=True)
print("Created MIDI out: %s" % midi_out)
clock = iso.Clock(tempo=tempo, clock_target=midi_out)
midi_in = iso.MidiInputDevice(VIRTUAL_DEVICE_NAME)
clock.background()
time.sleep(0.1)
clock.stop()
assert midi_in.tempo == pytest.approx(tempo, rel=0.03)
|
1630959
|
import numpy as np
import os
import pytest
import tempfile
import zipfile
import shutil
from b3get.utils import unzip_to
@pytest.fixture
def azipfile():
basedir = tempfile.mkdtemp()
input_files = [tempfile.mktemp(dir=basedir) for _ in range(16)]
for idx, fn in enumerate(input_files):
with open(fn, 'w') as of:
of.write(str(idx))
of.write('\t')
of.write(fn)
zip_path = tempfile.mktemp('.zip')
with zipfile.ZipFile(zip_path, 'w') as zf:
for f in input_files:
an = os.path.join(os.path.split(basedir)[-1], os.path.basename(f))
zf.write(f, arcname=an)
zf.close()
return zip_path, input_files
def test_fixture_values(azipfile):
zf, content = azipfile
assert os.path.isfile(zf)
assert os.stat(zf).st_size > 0
with zipfile.ZipFile(zf, 'r') as zfo:
nl = zfo.namelist()
assert len(nl) == 16
assert not nl[0].startswith('/tmp')
os.remove(zf)
[ os.remove(c) for c in content ]
def test_unzip_to_simple(azipfile):
zf, src_files = azipfile
somedir = tempfile.mkdtemp()
files = unzip_to(zf, somedir)
files = sorted(files)
src_files = sorted(src_files)
assert len(files) > 0
assert len(files) == len(src_files)
assert os.path.basename(files[0]) == os.path.basename(src_files[0])
assert os.path.basename(files[-1]) == os.path.basename(src_files[-1])
os.remove(zf)
[ os.remove(c) for c in src_files ]
shutil.rmtree(somedir)
def test_unzip_twice(azipfile):
zf, src_files = azipfile
somedir = tempfile.mkdtemp()
files = unzip_to(zf, somedir)
files = sorted(files)
src_files = sorted(src_files)
assert len(files) > 0
assert len(files) == len(src_files)
assert os.path.basename(files[0]) == os.path.basename(src_files[0])
assert os.path.basename(files[-1]) == os.path.basename(src_files[-1])
again = sorted(unzip_to(zf,somedir))
assert len(again) > 0
assert len(again) == len(src_files)
assert os.path.basename(again[0]) == os.path.basename(src_files[0])
assert os.path.basename(again[-1]) == os.path.basename(src_files[-1])
os.remove(zf)
[ os.remove(c) for c in src_files ]
shutil.rmtree(somedir)
def test_unzip_twice_impartial(azipfile):
zf, src_files = azipfile
somedir = tempfile.mkdtemp()
files = unzip_to(zf, somedir)
files = sorted(files)
src_files = sorted(src_files)
assert len(files) > 0
assert len(files) == len(src_files)
assert os.path.basename(files[0]) == os.path.basename(src_files[0])
assert os.path.basename(files[-1]) == os.path.basename(src_files[-1])
os.remove(files[-1])
os.remove(files[-2])
again = sorted(unzip_to(zf,somedir))
assert len(again) > 0
assert len(again) == len(src_files)
assert os.path.basename(again[0]) == os.path.basename(src_files[0])
assert os.path.basename(again[-1]) == os.path.basename(src_files[-1])
os.remove(zf)
[ os.remove(c) for c in src_files ]
shutil.rmtree(somedir)
|
1631034
|
from arg_parser import UserArgs
from collections import Counter
from dataset_handler.dataset import CUB_Xian, SUN_Xian, AWA1_Xian
from dataset_handler.transfer_task_split import ZSLsplit, GZSLsplit, ImbalancedDataSplit, DragonSplit, GFSLSplit
from attribute_expert.model import AttributeExpert
from keras.utils import to_categorical
import numpy as np
class DataLoader(object):
def __init__(self, should_test_split):
# init data factory and split factory
self.data_loaders_factory = {
'CUB': CUB_Xian,
'SUN': SUN_Xian,
'AWA1': AWA1_Xian
}
self.task_factory = {
'ZSL': ZSLsplit(val_fold_id=1),
'GZSL': GZSLsplit(seen_val_seed=1002),
'IMB': ImbalancedDataSplit(classes_shuffle_seed=0, seen_val_seed=0),
'GFSL': GFSLSplit(val_seed=0, test_seed=0, fs_nsamples=UserArgs.train_max_fs_samples),
'DRAGON': DragonSplit(val_seed=0, test_seed=0,
train_dist_function=UserArgs.train_dist,
fs_nsamples=UserArgs.train_max_fs_samples)
}
self.dataset = self.data_loaders_factory[UserArgs.dataset_name](UserArgs.data_dir)
# split dataset to train, val and test
self.data = self.task_factory[UserArgs.transfer_task]._split(self.dataset, should_test_split)
self.data, \
self.X_train, self.Y_train, self.Attributes_train, self.train_classes, \
self.X_val, self.Y_val, self.Attributes_val, self.val_classes, \
self.X_test, self.Y_test, self.Attributes_test, self.test_classes, \
self.input_dim, self.categories_dim, self.attributes_dim, \
self.class_descriptions_crossval, \
self.attributes_groups_ranges_ids = AttributeExpert.prepare_data_for_model(self.data)
# one hot encoding for Y's
self.Y_train_oh = to_categorical(self.Y_train, num_classes=self.categories_dim)
self.Y_val_oh = to_categorical(self.Y_val, num_classes=self.categories_dim)
self.Y_test_oh = to_categorical(self.Y_test, num_classes=self.categories_dim)
# prepare evaluation parameters
self.train_data = (self.X_train, self.Y_train, self.Attributes_train, self.train_classes)
self.val_data = (self.X_val, self.Y_val, self.Attributes_val, self.val_classes)
self.test_data = (self.X_test, self.Y_test, self.Attributes_test, self.test_classes)
train_distribution = self.task_factory[UserArgs.transfer_task].train_distribution
# save num_training_samples_per_class
class_samples_map = Counter(self.Y_train)
self.num_training_samples_per_class = [class_samples_map[key] for key in
sorted(class_samples_map.keys(), reverse=False)]
# save many_shot and few_shot classes
self.ms_classes = self.task_factory[UserArgs.transfer_task].ms_classes
self.fs_classes = self.task_factory[UserArgs.transfer_task].fs_classes
# seperate validation to many shot, few shot indexes
val_ms_indexes, val_fs_indexes = self.get_ms_and_fs_indexes(self.Y_val)
X_val_many = self.X_val[val_ms_indexes]
Y_val_many = self.Y_val[val_ms_indexes]
X_val_few = self.X_val[val_fs_indexes]
Y_val_few = self.Y_val[val_fs_indexes]
self.eval_params = (self.X_val, self.Y_val, self.val_classes,
train_distribution,self.ms_classes, self.fs_classes, X_val_many,
Y_val_many, X_val_few, Y_val_few)
test_ms_indexes, test_fs_indexes = self.get_ms_and_fs_indexes(self.Y_test)
X_test_many = self.X_test[test_ms_indexes]
Y_test_many = self.Y_test[test_ms_indexes]
X_test_few = self.X_test[test_fs_indexes]
Y_test_few = self.Y_test[test_fs_indexes]
self.test_eval_params = (self.X_test, self.Y_test, self.test_classes,
train_distribution, self.ms_classes, self.fs_classes, X_test_many,
Y_test_many, X_test_few, Y_test_few)
print(f"""Dataset: {UserArgs.dataset_name}
Train Shape: {self.X_train.shape}
Val Shape: {self.X_val.shape}
Test Shape: {self.X_test.shape}""")
# Evaluate many and few shot accuracies
def get_ms_and_fs_indexes(self, Y):
# get all indexes of many_shot classes
ms_indexes = np.array([], dtype=int)
for ms_class in self.ms_classes:
cur_class_indexes = np.where(Y == ms_class)[0]
ms_indexes = np.append(ms_indexes, cur_class_indexes)
# get all indexes of few_shot classes
fs_indexes = np.array([], dtype=int)
for fs_class in self.fs_classes:
cur_class_indexes = np.where(Y == fs_class)[0]
fs_indexes = np.append(fs_indexes, cur_class_indexes)
return ms_indexes, fs_indexes
|
1631065
|
from __future__ import print_function
import deepstate_base
import logrun
class CrashTest(deepstate_base.DeepStateTestCase):
def run_deepstate(self, deepstate):
(r, output) = logrun.logrun([deepstate, "build/examples/Crash"],
"deepstate.out", 1800)
self.assertEqual(r, 0)
self.assertTrue("Passed: Crash_SegFault" in output)
foundCrashSave = False
for line in output.split("\n"):
if ("Saved test case" in line) and (".crash" in line):
foundCrashSave = True
self.assertTrue(foundCrashSave)
|
1631084
|
from torch import nn
from torch.nn import Linear
class AE_encoder(nn.Module):
def __init__(self, ae_n_enc_1, ae_n_enc_2, ae_n_enc_3, n_input, n_z):
super(AE_encoder, self).__init__()
self.enc_1 = Linear(n_input, ae_n_enc_1)
self.enc_2 = Linear(ae_n_enc_1, ae_n_enc_2)
self.enc_3 = Linear(ae_n_enc_2, ae_n_enc_3)
self.z_layer = Linear(ae_n_enc_3, n_z)
self.act = nn.LeakyReLU(0.2, inplace=True)
def forward(self, x):
z = self.act(self.enc_1(x))
z = self.act(self.enc_2(z))
z = self.act(self.enc_3(z))
z_ae = self.z_layer(z)
return z_ae
class AE_decoder(nn.Module):
def __init__(self, ae_n_dec_1, ae_n_dec_2, ae_n_dec_3, n_input, n_z):
super(AE_decoder, self).__init__()
self.dec_1 = Linear(n_z, ae_n_dec_1)
self.dec_2 = Linear(ae_n_dec_1, ae_n_dec_2)
self.dec_3 = Linear(ae_n_dec_2, ae_n_dec_3)
self.x_bar_layer = Linear(ae_n_dec_3, n_input)
self.act = nn.LeakyReLU(0.2, inplace=True)
def forward(self, z_ae):
z = self.act(self.dec_1(z_ae))
z = self.act(self.dec_2(z))
z = self.act(self.dec_3(z))
x_hat = self.x_bar_layer(z)
return x_hat
class AE(nn.Module):
def __init__(self, ae_n_enc_1, ae_n_enc_2, ae_n_enc_3, ae_n_dec_1, ae_n_dec_2, ae_n_dec_3, n_input, n_z):
super(AE, self).__init__()
self.encoder = AE_encoder(
ae_n_enc_1=ae_n_enc_1,
ae_n_enc_2=ae_n_enc_2,
ae_n_enc_3=ae_n_enc_3,
n_input=n_input,
n_z=n_z)
self.decoder = AE_decoder(
ae_n_dec_1=ae_n_dec_1,
ae_n_dec_2=ae_n_dec_2,
ae_n_dec_3=ae_n_dec_3,
n_input=n_input,
n_z=n_z)
def forward(self, x):
z_ae = self.encoder(x)
x_hat = self.decoder(z_ae)
return x_hat, z_ae
|
1631108
|
def limit_vals(input_value, low_limit, high_limit):
"""
Apply limits to an input value.
Parameters
----------
input_value : float
Input value.
low_limit : float
Low limit. If value falls below this limit it will be set to this value.
high_limit : float
High limit. If value falls above this limit it will be set to this value.
Returns
-------
float
Returns input value unless it falls above or below the entered limits.
"""
if input_value < low_limit:
return low_limit
elif input_value > high_limit:
return high_limit
else:
return input_value
def dec_perc_convert(input_value, input_units):
"""
Convert from decimal to percent or percent to decimal.
Parameters
----------
input_value : float
Value to be converted.
input_units : string
Units of the input value.
Enter either "percent" or "decimal"
Returns
-------
float
Returns converted value in percent or decimal.
"""
if input_units == "percent":
return input_value / 100
elif input_units == "decimal":
return input_value * 100
else:
raise Exception("Enter a valid unit value: decimal or percent")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.