code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
In this file we run ours models one by one
"""
# Imports
import random
from random import shuffle
import numpy as np
import os
import scipy.sparse as sp
import torch
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
import pickle
from torch.utils.data import DataLoader
from models import MLP_With_Average_Voting, PretrainedDensenet, PretrainedResnet, CNN_With_Average_Voting, \
MLP_With_Max_Pooling, CNN_MLP_Average_Voting, CNN_MLP_Max_Pooling, PretrainedDensenetAverageVoting, \
PretrainedDensenetRELU, PretrainedDensenetAverageVotingRELU, CNN_With_Average_VotingRELU, \
CNN_MLP_Average_VotingRELU, CNN_MLP_Max_PoolingRELU, CNN_With_Max_Pooling, CNN_With_Max_PoolingRELU
from sklearn.metrics import roc_curve, auc, roc_auc_score, average_precision_score
import re
import argparse
import logging
import pandas as pd
import json
from dataloader import get_study_level_data, get_dataloaders
# Seed for our experiments
seed = 1997
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# Setting cuda for GPU if it is available
use_cuda = torch.cuda.is_available()
if use_cuda:
torch.cuda.manual_seed(seed)
# Base directory for checkpoints
odir_checkpoint = '/mnt/data/sotiris/checkpoints/'
# odir_checkpoint = 'drive/My Drive/MURA Project/checkpoints/'
# Initialize the logger handle to None
hdlr = None
# Initialize names of the body parts for the MURA dataset
study_wrist = 'XR_WRIST'
study_elbow = 'XR_ELBOW'
study_finger = 'XR_FINGER'
study_forearm = 'XR_FOREARM'
study_hand = 'XR_HAND'
study_humerus = 'XR_HUMERUS'
study_shoulder = 'XR_SHOULDER'
# Set checkpoints for each model
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING -- OUR LOSS
# best_checkpoint_name = 'densenet_mlp_averagevoting.pth.tar'
# progress_checkpoint = 'densenet_mlp_averagevoting_progress.pth.tar'
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING WITH RELU -- OUR LOSS
# best_checkpoint_name = 'densenet_mlp_averagevoting_relu.pth.tar'
# progress_checkpoint = 'densenet_mlp_averagevoting_relu_progress.pth.tar'
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# best_checkpoint_name = 'densenet_mlp_maxpooling.pth.tar'
# progress_checkpoint = 'densenet_mlp_maxpooling_progress.pth.tar'
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS WITH RELU -- OUR LOSS
# best_checkpoint_name = 'densenet_mlp_maxpooling_relu.pth.tar'
# progress_checkpoint = 'densenet_mlp_maxpooling_relu_progress.pth.tar'
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING -- OUR LOSS
# best_checkpoint_name = 'frozen_densenet_mlp_averagevoting.pth.tar'
# progress_checkpoint = 'frozen_densenet_mlp_averagevoting_progress.pth.tar'
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING WITH RELU -- OUR LOSS
# best_checkpoint_name = 'frozen_densenet_mlp_averagevoting_relu.pth.tar'
# progress_checkpoint = 'frozen_densenet_mlp_averagevoting_relu_progress.pth.tar'
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# best_checkpoint_name = 'frozen_densenet_mlp_maxpooling.pth.tar'
# progress_checkpoint = 'frozen_densenet_mlp_maxpooling_progress.pth.tar'
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS WITH RELU -- OUR LOSS
# best_checkpoint_name = 'frozen_densenet_mlp_maxpooling_relu.pth.tar'
# progress_checkpoint = 'frozen_densenet_mlp_maxpooling_relu_progress.pth.tar'
# THIS IS FOR RESNET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# best_checkpoint_name = 'resnet_mlp_maxpooling.pth.tar'
# progress_checkpoint = 'resnet_mlp_maxpooling_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + AVERAGE VOTING -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_averagevoting.pth.tar'
# progress_checkpoint = 'cnn_2layers_averagevoting_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MAX POOLING -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_maxpooling.pth.tar'
# progress_checkpoint = 'cnn_2layers_maxpooling.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MLP + AVERAGE VOTING -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_mlp_averagevoting.pth.tar'
# progress_checkpoint = 'cnn_2layers_mlp_averagevoting_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MLP + MAX POOLING OVER VIEWS -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_mpl_maxpooling.pth.tar'
# progress_checkpoint = 'cnn_2layers_mpl_maxpooling_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + AVERAGE VOTING WITH RELU -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_averagevoting_relu.pth.tar'
# progress_checkpoint = 'cnn_2layers_averagevoting_relu_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MAX POOLING OVER VIEWS WITH RELU -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_maxpooling_relu.pth.tar'
# progress_checkpoint = 'cnn_2layers_maxpooling_relu_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MLP + AVERAGE VOTING WITH RELU-- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_mlp_averagevoting_relu.pth.tar'
# progress_checkpoint = 'cnn_2layers_mlp_averagevoting_relu_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MLP + MAX POOLING OVER VIEWS WITH RELU-- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_mpl_maxpooling_relu.pth.tar'
# progress_checkpoint = 'cnn_2layers_mpl_maxpooling_relu_progress.pth.tar'
# THIS IS FOR MLP + AVERAGE POOLING -- OUR LOSS
# best_checkpoint_name = 'mlp_averagevoting.pth.tar'
# progress_checkpoint = 'mlp_averagevoting_progress.pth.tar'
# best_checkpoint_name = 'mlp_averagevoting_nodropout.pth.tar'
# progress_checkpoint = 'mlp_averagevoting_nodropout_progress.pth.tar'
# THIS IS FOR MLP + MAX POOLING -- OUR LOSS
# best_checkpoint_name = 'mlp_maxpooling.pth.tar'
# progress_checkpoint = 'mlp_maxpooling_progress.pth.tar'
# best_checkpoint_name = 'mlp_maxpooling_nodropout.pth.tar'
# progress_checkpoint = 'mlp_maxpooling_nodropout_progress.pth.tar'
# FOR TESTING
# best_checkpoint_name = 'testing.pth.tar'
# progress_checkpoint = 'testing_progress.pth.tar'
# FOR BEST MODEL
best_checkpoint_name = 'densenet_maxpooling_relu/hyperopt_trial_0.pth.tar'
progress_checkpoint = None
# Create the checkpoints directory
if not os.path.exists(odir_checkpoint):
os.makedirs(odir_checkpoint)
def print_params(model):
'''
It just prints the number of parameters in the model.
:param model: The pytorch model
:return: Nothing.
'''
print(40 * '=')
print(model)
print(40 * '=')
logger.info(40 * '=')
logger.info(model)
logger.info(40 * '=')
trainable = 0
untrainable = 0
for parameter in model.parameters():
# print(parameter.size())
v = 1
for s in parameter.size():
v *= s
if parameter.requires_grad:
trainable += v
else:
untrainable += v
total_params = trainable + untrainable
print(40 * '=')
print('trainable:{} untrainable:{} total:{}'.format(trainable, untrainable, total_params))
print(40 * '=')
logger.info(40 * '=')
logger.info('trainable:{} untrainable:{} total:{}'.format(trainable, untrainable, total_params))
logger.info(40 * '=')
logger.info('')
logger.info('')
def save_checkpoint(state, filename='checkpoint.pth.tar'):
"""
Save the torch checkpoint
:param state: The state/checkpoint to save
:param filename: The path and filename
:return: Nothing
"""
torch.save(state, filename)
def init_the_logger(hdlr):
"""
Initializes the logger
:param hdlr: The handler for the logger
:return: The logger and its handler
"""
# Create the checkpoints folder
if not os.path.exists(odir_checkpoint):
os.makedirs(odir_checkpoint)
# Set the logger base directory
od = odir_checkpoint.split('/')[-1]
logger = logging.getLogger(od)
# Remove the previous handler
if (hdlr is not None):
logger.removeHandler(hdlr)
# Create the handler for the logger for each experiment
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'densenet_mlp_averagevoting.log'))
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'densenet_mlp_averagevoting_relu.log'))
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'densenet_mlp_maxpooling.log'))
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'densenet_mlp_maxpooling_relu.log'))
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'frozen_densenet_mlp_averagevoting.log'))
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'frozen_densenet_mlp_averagevoting_relu.log'))
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'frozen_densenet_mlp_maxpooling.log'))
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'frozen_densenet_mlp_maxpooling_relu.log'))
# THIS IS FOR RESNET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'resnet_mlp_maxpooling.log'))
# THIS IS FOR CNN 2 LAYERS + AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_averagevoting.log'))
# THIS IS FOR CNN 2 LAYERS + MAX POOLING OVER VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_maxpooling.log'))
# THIS IS FOR CNN 2 LAYERS + MLP + AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_mlp_averagevoting.log'))
# THIS IS FOR CNN 2 LAYERS + MLP + MAX POOLING OVER VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_mpl_maxpooling.log'))
# THIS IS FOR CNN 2 LAYERS + AVERAGE VOTING WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_averagevoting_relu.log'))
# THIS IS FOR CNN 2 LAYERS + MAX POOLING OVER VIEWS WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_maxpooling_relu.log'))
# THIS IS FOR CNN 2 LAYERS + MLP + AVERAGE VOTING WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_mlp_averagevoting_relu.log'))
# THIS IS FOR CNN 2 LAYERS + MLP + MAX POOLING OVER VIEWS WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_mpl_maxpooling_relu.log'))
# THIS IS FOR MLP + AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'mlp_averagevoting.log'))
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'mlp_averagevoting_nodropout.log'))
# THIS IS FOR MLP + MAX POOLING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'mlp_maxpooling.log'))
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'mlp_maxpooling_nodropout.log'))
# FOR TESTING
hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'testing.log'))
# Set the format for the logger
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
# Add the handler to the logger
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
return logger, hdlr
# Initialize the logger
logger, hdlr = init_the_logger(hdlr)
def back_prop(batch_costs):
"""
Perform back propagation for a batch
:param batch_costs: The costs for the batch
:return: The average cost of the batch
"""
batch_cost = sum(batch_costs) / float(len(batch_costs))
batch_cost.backward()
optimizer.step()
optimizer.zero_grad()
batch_aver_cost = batch_cost.cpu().item()
return batch_aver_cost
# HERE YOU PASS POSITIVE AND NEGATIVE WEIGHTS
# IT IS THE LOSS FROM THE PAPER
# def weighted_binary_cross_entropy(output, target, weights=None):
# if weights is not None:
# assert len(weights) == 2
# loss = weights[1] * (target * torch.log(output)) + weights[0] * ((1 - target) * torch.log(1 - output))
# else:
# loss = target * torch.log(output) + (1 - target) * torch.log(1 - output)
# return torch.neg(torch.mean(loss))
print()
print('Loading Data...')
print()
print('Loading ELBOW')
study_data_elbow = get_study_level_data(study_elbow)
print('Loading FINGER')
study_data_finger = get_study_level_data(study_finger)
print('Loading FOREARM')
study_data_forearm = get_study_level_data(study_forearm)
print('Loading HAND')
study_data_hand = get_study_level_data(study_hand)
print('Loading WRIST')
study_data_wrist = get_study_level_data(study_wrist)
print('Loading SHOULDER')
study_data_shoulder = get_study_level_data(study_shoulder)
print('Loading HUMERUS')
study_data_humerus = get_study_level_data(study_humerus)
print()
print('Data Loaded!')
print()
frames_train = [study_data_elbow['train'],
study_data_finger['train'],
study_data_forearm['train'],
study_data_hand['train'],
study_data_wrist['train'],
study_data_shoulder['train'],
study_data_humerus['train']]
frames_dev = [study_data_elbow['valid'],
study_data_finger['valid'],
study_data_forearm['valid'],
study_data_hand['valid'],
study_data_wrist['valid'],
study_data_shoulder['valid'],
study_data_humerus['valid']]
for_test_dev = pd.concat(frames_dev)
# Shuffle it first and then split it
# Set random state so the shuffling will always have the same result
for_test_dev = for_test_dev.sample(frac=1, random_state=seed)
study_data = {'train': pd.concat(frames_train), 'valid': for_test_dev.iloc[700:], 'test': for_test_dev.iloc[:700]}
# FOR TESTING PURPOSES -- PER STUDY
# study_data_elbow = get_study_level_data(study_elbow)
# frames_train = [study_data_elbow['train']]
# frames_dev = [study_data_elbow['valid']]
# study_data_finger = get_study_level_data(study_finger)
# frames_train = [study_data_finger['train']]
# frames_dev = [study_data_finger['valid']]
# study_data_forearm = get_study_level_data(study_forearm)
# frames_train = [study_data_forearm['train']]
# frames_dev = [study_data_forearm['valid']]
# study_data_hand = get_study_level_data(study_hand)
# frames_train = [study_data_hand['train']]
# frames_dev = [study_data_hand['valid']]
# study_data_wrist = get_study_level_data(study_wrist)
# frames_train = [study_data_wrist['train']]
# frames_dev = [study_data_wrist['valid']]
# study_data_shoulder = get_study_level_data(study_shoulder)
# frames_train = [study_data_shoulder['train']]
# frames_dev = [study_data_shoulder['valid']]
# study_data_humerus = get_study_level_data(study_humerus)
# frames_train = [study_data_humerus['train']]
# frames_dev = [study_data_humerus['valid']]
# for_test_dev = pd.concat(frames_dev)
# for_test_dev = for_test_dev.sample(frac=1, random_state=seed)
# study_data = {'train': pd.concat(frames_train), 'valid': for_test_dev.iloc[70:], 'test': for_test_dev.iloc[:70]}
# END FOR TESTING PURPOSES
# Create the dataloaders for the data
data_cat = ['train', 'valid', 'test']
dataloaders, image_shape = get_dataloaders(study_data, batch_size=1)
dataset_sizes = {x: len(study_data[x]) for x in data_cat}
# find weights for the positive class (as pos_weight)
# this loss will be different from the paper
# i think it makes sense to only do it in the training phase
# Abnormal is our positive / we find how many views are abnormal and normal
train_dataframe = study_data['train']
num_abnormal_images = train_dataframe[train_dataframe['Path'].str.contains('positive')]['Count'].sum()
num_normal_images = train_dataframe[train_dataframe['Path'].str.contains('negative')]['Count'].sum()
# Abnormal weight
pos_weight = torch.FloatTensor(np.array(num_abnormal_images / (num_abnormal_images + num_normal_images)))
# normal weight
# neg_weight = torch.FloatTensor(np.array(num_normal_images / (num_abnormal_images + num_normal_images)))
# weights for weighted binary cross entropy
# weights = [neg_weight, pos_weight]
# Set the learning rate, batch size, epochs and patience
lr = 0.001
batch_size = 64
epochs = 20
max_patience = 5
# Set if you want to resume the training
resume = False
# Set if you want to just evaluate the test dataset
eval_test = True
# ================================== DEFINE MODEL ================================== #
# model = PretrainedDensenetAverageVoting(hidden_size=500, num_class=1)
# model = PretrainedDensenetAverageVotingRELU(hidden_size=500, num_class=1)
# model = PretrainedDensenet(hidden_size=500, num_class=1)
# model = PretrainedDensenetRELU(hidden_size=500, num_class=1)
# model = PretrainedDensenetAverageVoting(hidden_size=500, num_class=1, frozen=False)
# model = PretrainedDensenetAverageVotingRELU(hidden_size=500, num_class=1, frozen=False)
# model = PretrainedDensenet(hidden_size=500, num_class=1, frozen=False)
model = PretrainedDensenetRELU(hidden_size=500, num_class=1, frozen=False)
# model = PretrainedResnet(hidden_size=500, num_class=1)
# model = MLP_With_Average_Voting(input_dim=3 * image_shape[0] * image_shape[1],
# n_classes=1,
# hidden_1=500,
# hidden_2=200,
# hidden_3=100,
# dropout=0.3)
# model = MLP_With_Max_Pooling(input_dim=3 * image_shape[0] * image_shape[1],
# n_classes=1,
# hidden_1=500,
# hidden_2=200,
# hidden_3=100,
# dropout=0.3)
# model = CNN_With_Average_Voting(input_channels=3, input_shape=image_shape,
# n_classes=1,
# n_filters_1=10,
# n_filters_2=20,
# dropout=0.3)
# model = CNN_With_Max_Pooling(input_channels=3, input_shape=image_shape,
# n_classes=1,
# n_filters_1=10,
# n_filters_2=20,
# dropout=0.3)
# model = CNN_MLP_Average_Voting(input_channels=3, input_shape=image_shape,
# n_classes=1,
# n_filters_1=10,
# n_filters_2=20,
# hidden_size=500,
# dropout=0.3)
# model = CNN_MLP_Max_Pooling(input_channels=3,
# input_shape=image_shape,
# n_classes=1,
# n_filters_1=10,
# n_filters_2=20,
# hidden_size=500,
# dropout=0.3)
# model = CNN_With_Average_VotingRELU(input_channels=3, input_shape=image_shape,
# n_classes=1,
# n_filters_1=10,
# n_filters_2=20,
# dropout=0.3)
# model = CNN_With_Max_PoolingRELU(input_channels=3, input_shape=image_shape,
# n_classes=1,
# n_filters_1=10,
# n_filters_2=20,
# dropout=0.3)
# model = CNN_MLP_Average_VotingRELU(input_channels=3, input_shape=image_shape,
# n_classes=1,
# n_filters_1=10,
# n_filters_2=20,
# hidden_size=500,
# dropout=0.3)
# model = CNN_MLP_Max_PoolingRELU(input_channels=3,
# input_shape=image_shape,
# n_classes=1,
# n_filters_1=10,
# n_filters_2=20,
# hidden_size=500,
# dropout=0.3)
# ================================== ================================== #
# Print the parameters of the model
print_params(model)
# Get the model parameters
paramaters = model.parameters()
# Set the loss function
loss = nn.BCEWithLogitsLoss(pos_weight=pos_weight)
# Set the optimizer
optimizer = torch.optim.Adam(params=paramaters, lr=lr)
# Get the dataset iterators
train_iterator = dataloaders['train']
dev_iterator = dataloaders['valid']
test_iterator = dataloaders['test']
# Initialize values for best auc and best epoch
best_auc = -1000.0
best_epoch = 0
# Load json file to store model results or create an empty dictionary
results_mura = None
if os.path.exists('/mnt/data/sotiris/results_mura.json'):
with open('/mnt/data/sotiris/results_mura.json') as fi:
results_mura = json.load(fi)
else:
results_mura = dict()
if use_cuda:
print()
print('GPU available!!')
print()
model = model.cuda()
def evaluate(iterator, model):
"""
Method that evaluates the dev/test sets
:param iterator: The dataset iterator
:param model: The model
:return: Metrics for the set
"""
# Perform all actions without keeping gradients
with torch.no_grad():
# Set the model to evaluation mode
model.eval()
# Initialize values and lists
batch_preds = []
eval_costs = []
eval_cost = 0.0
batch_labels = []
aucs = []
aps = []
# Iterate the set
for ev_batch in iterator:
# Get the images and the labels
dev_images = ev_batch['images']
dev_labels = ev_batch['labels'].float()
# Cast them to cuda if necessary
if use_cuda:
dev_images = dev_images.cuda()
dev_labels = dev_labels.cuda()
# Reset the gradients in the optimizer
optimizer.zero_grad()
# Pass the images through the model to get the predictions
dev_preds = model(dev_images)
# Calculate the accumulated loss
eval_cost += float(loss(dev_preds, dev_labels).cpu().item())
# Append the labels and preds to the batch lists
batch_labels.append(dev_labels)
batch_preds.append(dev_preds)
# If we have reached the batch size
if len(batch_preds) == batch_size:
# Get the average of the losses and append it to the list
eval_costs.append(eval_cost / batch_size)
# Set the accumulated loss to 0
eval_cost = 0
# Pass the batch predictions through a sigmoid
sigmoid_dev_preds = torch.sigmoid(torch.stack(batch_preds))
# Calculate auc score
dev_auc_easy = roc_auc_score(torch.stack(batch_labels).cpu().numpy(),
sigmoid_dev_preds.cpu().numpy())
# Calculate average precision
average_precision = average_precision_score(torch.stack(batch_labels).cpu().numpy(),
sigmoid_dev_preds.cpu().numpy())
# Append scores to the lists
aucs.append(dev_auc_easy)
aps.append(average_precision)
# Reset the lists
batch_labels = []
batch_preds = []
# Return metrics
return dev_auc_easy, aucs, aps, eval_costs
def evaluate_cam(iterator, model, num_of_images):
"""
Method that evaluates the dev/test set and also creates the gradCAM images
:param iterator: The dataset iterator
:param model: The model
:param num_of_images: The number of images to get for CAM
:return: Metrics for the set
"""
# Set the model to evaluation mode
model.eval()
# Initialize values and lists
batch_preds = []
eval_costs = []
eval_cost = 0.0
batch_labels = []
aucs = []
aps = []
img_i = 0
dev_auc_easy = 0
# Iterate the set
for ev_batch in iterator:
# Get the images and the labels
dev_images = ev_batch['images']
dev_labels = ev_batch['labels'].float()
# Cast them to cuda if necessary
if use_cuda:
dev_images = dev_images.cuda()
dev_labels = dev_labels.cuda()
# Reset the gradients in the optimizer
optimizer.zero_grad()
# Create gradCAM images only for the first n instances
if img_i <= num_of_images:
# Generate heatmap
# as in: https://medium.com/@stepanulyanin/implementing-grad-cam-in-pytorch-ea0937c31e82
import cv2
# Get the instance's path to the image file
pathImageFiles = ev_batch['paths']
# Set the output image's file
pathOutputFile = 'cam_images/test{}.jpg'.format(img_i)
# Increment the output image id
img_i += 1
# Get predictions with hook on the gradients
cam_output = model.forward_cam(dev_images)
# Legacy for dev -- so that we don't pass it 2 times
dev_preds = cam_output
eval_cost += float(loss(dev_preds, dev_labels).cpu().item())
# Get the gradient of the output with respect to the parameters of the model
cam_output.backward()
# Pull the gradients out of the model
gradients = model.get_activations_gradient()
# Pool the gradients across the channels
pooled_gradients = torch.mean(gradients, dim=[2, 3])
# Get the activations of the last convolutional layer
activations = model.get_activations(dev_images).detach()
# Weight the channels by corresponding gradients
for v in range(len(ev_batch['paths'][0])):
for i in range(activations.shape[1]):
activations[v, i, :, :] *= pooled_gradients[v, i]
# Average the channels of the activations
heatmaps = torch.mean(activations, dim=1)
# Create plot for the heatmaps and the superposed image
import matplotlib.pyplot as plt
fig, axis = plt.subplots(len(ev_batch['paths']), 2)
if len(ev_batch['paths']) == 1:
axis = axis.reshape(1, 2)
fig.suptitle('/'.join(ev_batch['paths'][0][0].split('/')[5:-1]) +
'\nTrue: {} -- Predicted: {:.3f}'.format(dev_labels.cpu().item(),
F.sigmoid(cam_output).cpu().item()))
# For every view in the instance
for v in range(len(ev_batch['paths'])):
# leaky relu on top of the heatmap
# or maybe better use relu
# heatmap = F.leaky_relu(heatmaps[v])
# Pass the heatmaps from a relu to throw negative scores
heatmap = F.relu(heatmaps[v])
# Normalize the heatmap
h_max = torch.max(heatmap)
if h_max != 0.0:
heatmap /= h_max
# Save the heatmaps -- for debugging
# plt.matshow(heatmap.cpu().numpy())
# plt.savefig('{}_matrix.png'.format(v))
# plt.clf()
# Add the heatmap for hte view in the plot
axis[v, 0].matshow(heatmap.cpu().numpy())
axis[v, 0].axis('off')
# Read the image from the path
imgOriginal = cv2.imread(pathImageFiles[v][0])
# Resize the heatmap to the image's dimensions
heatmap = cv2.resize(heatmap.cpu().numpy(), (imgOriginal.shape[1], imgOriginal.shape[0]))
# Cast heatmap values to [0,255] ints
heatmap = np.uint8(255 * heatmap)
# # Use opencv to superimpose image
# heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
# img = heatmap * 0.4 + imgOriginal
# cv2.imwrite('{}_'.format(v) + pathOutputFile, img)
# Use matplotlib instead of opencv
# plt.title('View: {}\nTrue: {} -- Predicted: {:.3f}'.format(v, dev_labels.cpu().item(), F.sigmoid(cam_output).cpu().item()))
# plt.imshow(imgOriginal)
# plt.imshow(heatmap, cmap='jet', alpha=0.4)
# plt.savefig('{}_'.format(v) + pathOutputFile)
# plt.clf()
# Add superposed image to the plot
axis[v, 1].imshow(imgOriginal)
axis[v, 1].imshow(heatmap, cmap='jet', alpha=0.4)
axis[v, 1].axis('off')
# Save the instance plot
fig.savefig(pathOutputFile, dpi=600)
# END
else:
# Get the predictions from the model
dev_preds = model(dev_images)
# Calcualte the accumulated loss
eval_cost += float(loss(dev_preds, dev_labels).cpu().item())
# Append the labels and preds to the batch lists
batch_labels.append(dev_labels)
batch_preds.append(dev_preds)
# If we have reached the batch size
if len(batch_preds) == batch_size:
# Get the average of the losses and append it to the list
eval_costs.append(eval_cost / batch_size)
# Set the accumulated loss to 0
eval_cost = 0
# Pass the batch predictions through a sigmoid
sigmoid_dev_preds = torch.sigmoid(torch.stack(batch_preds))
# Calculate auc score
dev_auc_easy = roc_auc_score(torch.stack(batch_labels).cpu().detach().numpy(),
sigmoid_dev_preds.cpu().detach().numpy())
# Calculate average precision
average_precision = average_precision_score(torch.stack(batch_labels).cpu().detach().numpy(),
sigmoid_dev_preds.cpu().detach().numpy())
# Append scores to the lists
aucs.append(dev_auc_easy)
aps.append(average_precision)
# Reset the lists
batch_labels = []
batch_preds = []
# Return metrics
return dev_auc_easy, aucs, aps, eval_costs
# Initialize values
epoch = -1
patience = max_patience
# If resume is set
if resume:
# Use cuda if possible
if torch.cuda.is_available():
print()
print('GPU available..will resume training!!')
print()
device = torch.device('cuda')
else:
device = torch.device('cpu')
# Load the checkpoint
modelcheckpoint = torch.load(os.path.join(odir_checkpoint, progress_checkpoint), map_location=device)
model.load_state_dict(modelcheckpoint['state_dict'])
epoch = modelcheckpoint['epoch']
optimizer.load_state_dict(modelcheckpoint['optimizer'])
best_auc = modelcheckpoint['auc']
patience = modelcheckpoint['patience']
print()
print('Resuming from file: {}'.format(progress_checkpoint))
print('Best auc was: {}'.format(best_auc))
print('The epoch was: {}'.format(epoch))
print()
logger.info('')
logger.info('Resuming from file: {}'.format(progress_checkpoint))
logger.info('Best auc was: {}'.format(best_auc))
logger.info('The epoch was: {}'.format(epoch))
logger.info('')
# If resumed model has already early stopped
if patience == 0:
print()
print('Resumed model has patience 0, which means it had early stopped.')
print('Quitting training...')
print()
logger.info('')
logger.info('Resumed model has patience 0, which means it had early stopped.')
logger.info('Quitting training...')
logger.info('')
# If resumed model has already finished training (reached max epochs)
if epoch == epochs - 1:
print()
print('Resumed model has already been trained for the max epochs.')
print('Quitting training...')
print()
logger.info('')
logger.info('Resumed model has already been trained for the max epochs.')
logger.info('Quitting training...')
logger.info('')
# If patience isn't 0 and the model hasn't reached the max epochs
if patience != 0 and epoch != epochs - 1 and not eval_test:
print()
print('Training model...')
logger.info('')
logger.info('Training model...')
# For each epoch
for epoch in tqdm(range(epoch + 1, epochs)):
print()
print('Epoch: {}'.format(epoch))
print()
logger.info('')
logger.info('Epoch: {}'.format(epoch))
logger.info('')
# Set the model to train mode
model.train()
# Initialize lists
batch_costs = []
batch_logits = []
batch_labels = []
epoch_costs = []
train_aucs = []
dev_aucs = []
dev_aps = []
# Reset optimizer gradients to zero
optimizer.zero_grad()
# Iterate the set
for batch in tqdm(train_iterator):
# Get the images and the labels
images = batch['images']
labels = batch['labels'].float()
# Cast them to cuda if necessary
if use_cuda:
images = images.cuda()
labels = labels.cuda()
# Get the logits from the model
logits = model(images)
# Append to list
batch_logits.append(logits)
batch_labels.append(labels)
# Calculate the loss
cost = loss(logits, labels)
# cost = weighted_binary_cross_entropy(logits, labels, weights)
# Perform clipping
# torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)
# Append the loss to the list
batch_costs.append(cost)
# If we reached batch size
if len(batch_costs) == batch_size:
# Perform back propagation for the batch
batch_aver_cost = back_prop(batch_costs)
epoch_costs.append(batch_aver_cost)
# Calculate the auc score
train_auc = roc_auc_score(torch.stack(batch_labels).cpu().detach().numpy(),
torch.sigmoid(torch.stack(batch_logits)).cpu().detach().numpy())
# Append to list
train_aucs.append(train_auc)
batch_costs = []
batch_logits = []
batch_labels = []
print('Epoch Average Loss: {}, Epoch Average AUC: {}, Epoch: {} '.format(
sum(epoch_costs) / float(len(epoch_costs)), np.mean(train_aucs), epoch))
logger.info('Epoch Average Loss: {}, Epoch Average AUC: {}, Epoch: {} '.format(
sum(epoch_costs) / float(len(epoch_costs)), np.mean(train_aucs), epoch))
print()
print(40 * '*')
print('Evaluating on the dev set...')
print()
logger.info('')
logger.info(40 * '*')
logger.info('Evaluating on the dev set...')
logger.info('')
# Evaluate on the dev set
dev_auc, dev_aucs, dev_aps, dev_costs = evaluate(dev_iterator, model)
print('Average Loss on Dev Set: {}, Epoch: {}'.format(np.mean(dev_costs), epoch))
print('AUC on Dev Set: {}, Epoch: {}'.format(np.mean(dev_aucs), epoch))
print('Average Precision on dev set: {}, Epoch: {}'.format(np.mean(dev_aps), epoch))
print(40 * '*')
logger.info('Average Loss on Dev Set: {}, Epoch: {}'.format(np.mean(dev_costs), epoch))
logger.info('AUC on Dev Set: {}, Epoch: {}'.format(np.mean(dev_aucs), epoch))
logger.info('Average Precision on dev set: {}, Epoch: {}'.format(np.mean(dev_aps), epoch))
logger.info(40 * '*')
logger.info('')
# If we found new best dev auc score
if np.mean(dev_aucs) > best_auc:
best_auc = np.mean(dev_aucs)
best_epoch = epoch
state = {'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'auc': best_auc,
'patience': max_patience,
'best_epoch': best_epoch}
# Reset patience
patience = max_patience
# Save the best checkpoint
save_checkpoint(state, filename=os.path.join(odir_checkpoint, best_checkpoint_name))
else:
# Reduce patience by 1
patience -= 1
# Save progress checkpoint
state = {'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'auc': best_auc,
'patience': patience,
'best_epoch': best_epoch}
save_checkpoint(state, filename=os.path.join(odir_checkpoint, progress_checkpoint))
# Save metrics and info to the json
model_name = best_checkpoint_name.split('.')[0]
if model_name not in results_mura.keys():
results_mura[model_name] = list()
results_mura[model_name].append({
'epoch': epoch,
'patience': patience,
'train_loss': sum(epoch_costs) / float(len(epoch_costs)),
'train_auc': np.mean(train_aucs),
'dev_loss': np.mean(dev_costs),
'dev_auc': np.mean(dev_aucs),
'dev_ap': np.mean(dev_aps),
'best_epoch': best_epoch,
'best_auc': best_auc
})
with open('/mnt/data/sotiris/results_mura.json', 'w') as out:
json.dump(results_mura, out)
# If the max_patience_th time it still hasn't improved then stop the training
if patience == 0:
print()
print('Early stopping at epoch: {}'.format(epoch))
print()
logger.info('')
logger.info('Early stopping at epoch: {}'.format(epoch))
logger.info('')
break
print()
print(40 * '-')
print("Best AUC {} at epoch: {}".format(best_auc, best_epoch))
print(40 * '-')
print()
print('=' * 90)
print()
logger.info('')
logger.info(40 * '-')
logger.info("Best AUC {} at epoch: {}".format(best_auc, best_epoch))
logger.info(40 * '-')
logger.info('')
logger.info('')
logger.info('=' * 90)
print()
print('Evaluating on the test set...')
print()
if use_cuda:
print()
print('GPU available...')
print()
device = torch.device('cuda')
else:
device = torch.device('cpu')
# Load best checkpoint and eval on the test set
best_check = torch.load(os.path.join(odir_checkpoint, best_checkpoint_name), map_location=device)
model.load_state_dict(best_check['state_dict'])
if use_cuda:
model = model.cuda()
# Evaluate the test set
# _, test_aucs, test_aps, _ = evaluate(test_iterator, model)
# Evaluate the test set and create gradcam images
_, test_aucs, test_aps, _ = evaluate_cam(test_iterator, model, 100)
print()
print('Best Epoch:', best_check['epoch'])
print('Best Auc on Dev Set:', best_check['auc'])
print('Auc on Test set:', np.mean(test_aucs))
print()
print('=' * 90)
print()
logger.info('Best Epoch: {}'.format((best_check['epoch'])))
logger.info('Best Auc on Dev Set: {}'.format(str(best_check['auc'])))
logger.info('Auc on Test set: {}'.format(str(np.mean(test_aucs))))
logger.info('=' * 90)
| [
"logging.getLogger",
"numpy.uint8",
"torch.max",
"torch.nn.functional.sigmoid",
"numpy.array",
"torch.cuda.is_available",
"os.path.exists",
"numpy.mean",
"models.PretrainedDensenetRELU",
"torch.mean",
"numpy.random.seed",
"dataloader.get_study_level_data",
"torch.save",
"torch.nn.functiona... | [((1081, 1098), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1092, 1098), False, 'import random\n'), ((1100, 1120), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1114, 1120), True, 'import numpy as np\n'), ((1122, 1145), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1139, 1145), False, 'import torch\n'), ((1203, 1228), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1226, 1228), False, 'import torch\n'), ((13579, 13612), 'dataloader.get_study_level_data', 'get_study_level_data', (['study_elbow'], {}), '(study_elbow)\n', (13599, 13612), False, 'from dataloader import get_study_level_data, get_dataloaders\n'), ((13659, 13693), 'dataloader.get_study_level_data', 'get_study_level_data', (['study_finger'], {}), '(study_finger)\n', (13679, 13693), False, 'from dataloader import get_study_level_data, get_dataloaders\n'), ((13742, 13777), 'dataloader.get_study_level_data', 'get_study_level_data', (['study_forearm'], {}), '(study_forearm)\n', (13762, 13777), False, 'from dataloader import get_study_level_data, get_dataloaders\n'), ((13820, 13852), 'dataloader.get_study_level_data', 'get_study_level_data', (['study_hand'], {}), '(study_hand)\n', (13840, 13852), False, 'from dataloader import get_study_level_data, get_dataloaders\n'), ((13897, 13930), 'dataloader.get_study_level_data', 'get_study_level_data', (['study_wrist'], {}), '(study_wrist)\n', (13917, 13930), False, 'from dataloader import get_study_level_data, get_dataloaders\n'), ((13981, 14017), 'dataloader.get_study_level_data', 'get_study_level_data', (['study_shoulder'], {}), '(study_shoulder)\n', (14001, 14017), False, 'from dataloader import get_study_level_data, get_dataloaders\n'), ((14066, 14101), 'dataloader.get_study_level_data', 'get_study_level_data', (['study_humerus'], {}), '(study_humerus)\n', (14086, 14101), False, 'from dataloader import get_study_level_data, get_dataloaders\n'), ((14781, 14802), 'pandas.concat', 'pd.concat', (['frames_dev'], {}), '(frames_dev)\n', (14790, 14802), True, 'import pandas as pd\n'), ((16558, 16599), 'dataloader.get_dataloaders', 'get_dataloaders', (['study_data'], {'batch_size': '(1)'}), '(study_data, batch_size=1)\n', (16573, 16599), False, 'from dataloader import get_study_level_data, get_dataloaders\n'), ((18379, 18445), 'models.PretrainedDensenetRELU', 'PretrainedDensenetRELU', ([], {'hidden_size': '(500)', 'num_class': '(1)', 'frozen': '(False)'}), '(hidden_size=500, num_class=1, frozen=False)\n', (18401, 18445), False, 'from models import MLP_With_Average_Voting, PretrainedDensenet, PretrainedResnet, CNN_With_Average_Voting, MLP_With_Max_Pooling, CNN_MLP_Average_Voting, CNN_MLP_Max_Pooling, PretrainedDensenetAverageVoting, PretrainedDensenetRELU, PretrainedDensenetAverageVotingRELU, CNN_With_Average_VotingRELU, CNN_MLP_Average_VotingRELU, CNN_MLP_Max_PoolingRELU, CNN_With_Max_Pooling, CNN_With_Max_PoolingRELU\n'), ((21930, 21973), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {'pos_weight': 'pos_weight'}), '(pos_weight=pos_weight)\n', (21950, 21973), True, 'import torch.nn as nn\n'), ((22010, 22052), 'torch.optim.Adam', 'torch.optim.Adam', ([], {'params': 'paramaters', 'lr': 'lr'}), '(params=paramaters, lr=lr)\n', (22026, 22052), False, 'import torch\n'), ((22382, 22435), 'os.path.exists', 'os.path.exists', (['"""/mnt/data/sotiris/results_mura.json"""'], {}), "('/mnt/data/sotiris/results_mura.json')\n", (22396, 22435), False, 'import os\n'), ((1248, 1276), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (1270, 1276), False, 'import torch\n'), ((6506, 6537), 'os.path.exists', 'os.path.exists', (['odir_checkpoint'], {}), '(odir_checkpoint)\n', (6520, 6537), False, 'import os\n'), ((6544, 6572), 'os.makedirs', 'os.makedirs', (['odir_checkpoint'], {}), '(odir_checkpoint)\n', (6555, 6572), False, 'import os\n'), ((7797, 7824), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (7807, 7824), False, 'import torch\n'), ((8205, 8226), 'logging.getLogger', 'logging.getLogger', (['od'], {}), '(od)\n', (8222, 8226), False, 'import logging\n'), ((12327, 12385), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)s %(message)s"""'], {}), "('%(asctime)s %(levelname)s %(message)s')\n", (12344, 12385), False, 'import logging\n'), ((15002, 15025), 'pandas.concat', 'pd.concat', (['frames_train'], {}), '(frames_train)\n', (15011, 15025), True, 'import pandas as pd\n'), ((17201, 17274), 'numpy.array', 'np.array', (['(num_abnormal_images / (num_abnormal_images + num_normal_images))'], {}), '(num_abnormal_images / (num_abnormal_images + num_normal_images))\n', (17209, 17274), True, 'import numpy as np\n'), ((32422, 32447), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (32445, 32447), False, 'import torch\n'), ((40794, 40814), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (40806, 40814), False, 'import torch\n'), ((40838, 40857), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (40850, 40857), False, 'import torch\n'), ((40934, 40985), 'os.path.join', 'os.path.join', (['odir_checkpoint', 'best_checkpoint_name'], {}), '(odir_checkpoint, best_checkpoint_name)\n', (40946, 40985), False, 'import os\n'), ((41443, 41461), 'numpy.mean', 'np.mean', (['test_aucs'], {}), '(test_aucs)\n', (41450, 41461), True, 'import numpy as np\n'), ((8040, 8071), 'os.path.exists', 'os.path.exists', (['odir_checkpoint'], {}), '(odir_checkpoint)\n', (8054, 8071), False, 'import os\n'), ((8082, 8110), 'os.makedirs', 'os.makedirs', (['odir_checkpoint'], {}), '(odir_checkpoint)\n', (8093, 8110), False, 'import os\n'), ((12225, 12269), 'os.path.join', 'os.path.join', (['odir_checkpoint', '"""testing.log"""'], {}), "(odir_checkpoint, 'testing.log')\n", (12237, 12269), False, 'import os\n'), ((22522, 22535), 'json.load', 'json.load', (['fi'], {}), '(fi)\n', (22531, 22535), False, 'import json\n'), ((22936, 22951), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22949, 22951), False, 'import torch\n'), ((32557, 32577), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (32569, 32577), False, 'import torch\n'), ((32607, 32626), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (32619, 32626), False, 'import torch\n'), ((32690, 32740), 'os.path.join', 'os.path.join', (['odir_checkpoint', 'progress_checkpoint'], {}), '(odir_checkpoint, progress_checkpoint)\n', (32702, 32740), False, 'import os\n'), ((35127, 35147), 'tqdm.tqdm', 'tqdm', (['train_iterator'], {}), '(train_iterator)\n', (35131, 35147), False, 'from tqdm import tqdm\n'), ((27418, 27451), 'torch.mean', 'torch.mean', (['gradients'], {'dim': '[2, 3]'}), '(gradients, dim=[2, 3])\n', (27428, 27451), False, 'import torch\n'), ((27918, 27948), 'torch.mean', 'torch.mean', (['activations'], {'dim': '(1)'}), '(activations, dim=1)\n', (27928, 27948), False, 'import torch\n'), ((38054, 38071), 'numpy.mean', 'np.mean', (['dev_aucs'], {}), '(dev_aucs)\n', (38061, 38071), True, 'import numpy as np\n'), ((38108, 38125), 'numpy.mean', 'np.mean', (['dev_aucs'], {}), '(dev_aucs)\n', (38115, 38125), True, 'import numpy as np\n'), ((39853, 39881), 'json.dump', 'json.dump', (['results_mura', 'out'], {}), '(results_mura, out)\n', (39862, 39881), False, 'import json\n'), ((41678, 41696), 'numpy.mean', 'np.mean', (['test_aucs'], {}), '(test_aucs)\n', (41685, 41696), True, 'import numpy as np\n'), ((28856, 28875), 'torch.nn.functional.relu', 'F.relu', (['heatmaps[v]'], {}), '(heatmaps[v])\n', (28862, 28875), True, 'import torch.nn.functional as F\n'), ((28944, 28962), 'torch.max', 'torch.max', (['heatmap'], {}), '(heatmap)\n', (28953, 28962), False, 'import torch\n'), ((29474, 29506), 'cv2.imread', 'cv2.imread', (['pathImageFiles[v][0]'], {}), '(pathImageFiles[v][0])\n', (29484, 29506), False, 'import cv2\n'), ((29764, 29787), 'numpy.uint8', 'np.uint8', (['(255 * heatmap)'], {}), '(255 * heatmap)\n', (29772, 29787), True, 'import numpy as np\n'), ((31520, 31544), 'torch.stack', 'torch.stack', (['batch_preds'], {}), '(batch_preds)\n', (31531, 31544), False, 'import torch\n'), ((36795, 36814), 'numpy.mean', 'np.mean', (['train_aucs'], {}), '(train_aucs)\n', (36802, 36814), True, 'import numpy as np\n'), ((36972, 36991), 'numpy.mean', 'np.mean', (['train_aucs'], {}), '(train_aucs)\n', (36979, 36991), True, 'import numpy as np\n'), ((37424, 37442), 'numpy.mean', 'np.mean', (['dev_costs'], {}), '(dev_costs)\n', (37431, 37442), True, 'import numpy as np\n'), ((37506, 37523), 'numpy.mean', 'np.mean', (['dev_aucs'], {}), '(dev_aucs)\n', (37513, 37523), True, 'import numpy as np\n'), ((37601, 37617), 'numpy.mean', 'np.mean', (['dev_aps'], {}), '(dev_aps)\n', (37608, 37617), True, 'import numpy as np\n'), ((37723, 37741), 'numpy.mean', 'np.mean', (['dev_costs'], {}), '(dev_costs)\n', (37730, 37741), True, 'import numpy as np\n'), ((37811, 37828), 'numpy.mean', 'np.mean', (['dev_aucs'], {}), '(dev_aucs)\n', (37818, 37828), True, 'import numpy as np\n'), ((37912, 37928), 'numpy.mean', 'np.mean', (['dev_aps'], {}), '(dev_aps)\n', (37919, 37928), True, 'import numpy as np\n'), ((39076, 39126), 'os.path.join', 'os.path.join', (['odir_checkpoint', 'progress_checkpoint'], {}), '(odir_checkpoint, progress_checkpoint)\n', (39088, 39126), False, 'import os\n'), ((39534, 39553), 'numpy.mean', 'np.mean', (['train_aucs'], {}), '(train_aucs)\n', (39541, 39553), True, 'import numpy as np\n'), ((39580, 39598), 'numpy.mean', 'np.mean', (['dev_costs'], {}), '(dev_costs)\n', (39587, 39598), True, 'import numpy as np\n'), ((39624, 39641), 'numpy.mean', 'np.mean', (['dev_aucs'], {}), '(dev_aucs)\n', (39631, 39641), True, 'import numpy as np\n'), ((39666, 39682), 'numpy.mean', 'np.mean', (['dev_aps'], {}), '(dev_aps)\n', (39673, 39682), True, 'import numpy as np\n'), ((24477, 24501), 'torch.stack', 'torch.stack', (['batch_preds'], {}), '(batch_preds)\n', (24488, 24501), False, 'import torch\n'), ((38604, 38655), 'os.path.join', 'os.path.join', (['odir_checkpoint', 'best_checkpoint_name'], {}), '(odir_checkpoint, best_checkpoint_name)\n', (38616, 38655), False, 'import os\n'), ((24588, 24613), 'torch.stack', 'torch.stack', (['batch_labels'], {}), '(batch_labels)\n', (24599, 24613), False, 'import torch\n'), ((24816, 24841), 'torch.stack', 'torch.stack', (['batch_labels'], {}), '(batch_labels)\n', (24827, 24841), False, 'import torch\n'), ((28464, 28485), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['cam_output'], {}), '(cam_output)\n', (28473, 28485), True, 'import torch.nn.functional as F\n'), ((31623, 31648), 'torch.stack', 'torch.stack', (['batch_labels'], {}), '(batch_labels)\n', (31634, 31648), False, 'import torch\n'), ((31857, 31882), 'torch.stack', 'torch.stack', (['batch_labels'], {}), '(batch_labels)\n', (31868, 31882), False, 'import torch\n'), ((36311, 36336), 'torch.stack', 'torch.stack', (['batch_labels'], {}), '(batch_labels)\n', (36322, 36336), False, 'import torch\n'), ((36418, 36443), 'torch.stack', 'torch.stack', (['batch_logits'], {}), '(batch_logits)\n', (36429, 36443), False, 'import torch\n')] |
'''
Authors: <NAME>
Contact: <EMAIL>
'''
import logging, time
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from py4j.java_gateway import (JavaGateway, GatewayParameters)
from subprocess import call, Popen, PIPE
import random
from py4j.tests.java_gateway_test import gateway
#CNTS = [2,2,2]
# logging
logger = logging.getLogger(__name__)
def scale_action(action_space, action):
"""
Rescale the action from [low, high] to [-1, 1]
(no need for symmetric action space)
:param action_space: (gym.spaces.box.Box)
:param action: (np.ndarray)
:return: (np.ndarray)
"""
low, high = action_space.low, action_space.high
return 2.0 * ((action - low) / (high - low)) - 1.0
def unscale_action(action_space, scaled_action):
"""
Rescale the action from [-1, 1] to [low, high]
(no need for symmetric action space)
:param action_space: (gym.spaces.box.Box)
:param action: (np.ndarray)
:return: (np.ndarray)
"""
low, high = action_space.low, action_space.high
return low + (0.5 * (scaled_action + 1.0) * (high - low))
def refer(val):
temp = oct(val)[2:].zfill(4)
result = [0] * 4
for i, c in enumerate(temp):
result[i] = int(c)
return result
def refer_new(val, cnts):
l = len(cnts)
idx = l - 1
result = [0] * l
while val > 0:
result[idx] = val % cnts[idx]
val //= cnts[idx]
idx -=1
return result
def referback(actions, cnts):
result = 0
#actions = list(map(int, "".join(list(map(str, actions))).lstrip('0')))
p = 0
for i in range(len(actions))[::-1]:
result += actions[i]* (cnts[i] ** p)
p += 1
return result
def createNumpyAryFromJavaByte1DAry(javaByte1DAry, datatype):
iMax, jMax = np.frombuffer(javaByte1DAry, dtype=np.intc, count=2) # get the size from the header
#print(iMax, jMax)
temp_ary = np.frombuffer(javaByte1DAry, dtype=datatype, offset=8) # read the body, skip the header 8 bytes
dblAry = temp_ary.reshape((iMax, jMax))
return dblAry
def transfer2JavaDblAry(gateway, pyArray, size):
dblAry = gateway.new_array(gateway.jvm.double, size)
i = 0
for x in pyArray:
dblAry[i] = float(x)
i = i + 1
return dblAry
def transfer2JavaStringAry(gateway, pyArray):
strAry = gateway.new_array(gateway.jvm.String, len(pyArray))
i = 0
for x in pyArray:
strAry[i] = str(x)
i = i + 1
return strAry
def transferJavaStringAry2Python(java_Str_ary):
size = len(java_Str_ary)
#print('java_Str_ary size =', size)
py_str_ary = []
for i in range(size):
py_str_ary.append(java_Str_ary[i])
#print('py_str_ary',i, py_str_ary[i])
return py_str_ary
#
# function to transfer data from Java_Collections array to python Numpy array
#
def transfer1DJavaArray2NumpyArray(ary) :
size = len(ary)
np_ary = np.zeros(size)
for i in range(size):
np_ary[i] = ary[i]
return np_ary
def transfer2DJavaArray2NumpyArray(ary) :
size1 = len(ary)
size2 = len(ary[0])
np_ary = np.zeros((size1,size2))
for i in range(size1):
for j in range(size2):
np_ary[i,j] = ary[i][j]
return np_ary
# A power system dynamic simulation environment implementation by extending the Gym Env class defined in core.py, which is available in
# https://github.com/openai/gym/blob/master/gym/core.py
class PowerDynSimEnv(gym.Env):
metadata = {
}
_case_files =""
_dyn_sim_config_file =""
_rl_config_file =""
default_time_step = 0.1
action_type = 'discrete'
a_gateway = None
# define InterPSS dynamic simulation service
ipss_app = None
server_process = None
# to save the original action low and high values to help rescale them back before applying to the environment
original_action_space = None
is_action_space_scaled =False
# number of power flow base cases
total_case_num = 1
def __init__(self,case_files, dyn_sim_config_file,rl_config_file , jar_file, server_port_num = 25333,data_path="",
force_symmetric_continuous_action=False, verbose=0):
# change from global to class-level variable to support parallel process
jvms ="-Dcom.sun.management.jmxremote=true"
self.server_process = Popen(["java", "-Xmx1024m", jvms, "-jar", jar_file, str(server_port_num), str(verbose)], close_fds=True)
print("IPSS-RL Java server lib path:", jar_file)
print("Java server started with PID:", self.server_process.pid)
time.sleep(5.0)
s = True
while s:
try:
# global gateway
self.a_gateway = JavaGateway(
gateway_parameters=GatewayParameters(port=server_port_num, auto_convert=True))
# global ipss_app
self.ipss_app = self.a_gateway.entry_point
s = False
except:
time.sleep(0.1)
from gym import spaces
if case_files is not None:
_case_files = transfer2JavaStringAry(self.a_gateway,case_files)
else:
_case_files = None
#initialize the power system simulation service
# set the IpssLogger level: 0 OFF, 1 Warning, >=2 Fine
self.ipss_app.setLoggerLevel(verbose)
# {observation_history_length,observation_space_dim, action_location_num, action_level_num};
dim_ary= self.ipss_app.initStudyCase(_case_files,dyn_sim_config_file,rl_config_file,data_path)
observation_history_length = dim_ary[0]
observation_space_dim = dim_ary[1]
# set agent-environment interaction time step,
self.time_step = self.ipss_app.getEnvTimeStep() if self.ipss_app.getEnvTimeStep() > 0 else self.default_time_step
self.action_type = self.ipss_app.getActionSpaceType()
print ('action type = ', self.action_space)
#define action and observation spaces
if self.action_type.lower() == 'discrete':
print ('observation_history_length,observation_space_dim, action_location_num, action_level_num = ')
print (dim_ary[0], dim_ary[1],dim_ary[2], dim_ary[3])
action_location_num = dim_ary[2]
action_level_num = dim_ary[3]
action_num = action_level_num ** action_location_num
self.action_space = spaces.Discrete(action_num)
self.cnts = np.ones(action_location_num)*action_level_num
elif self.action_type.lower() == 'continuous':
print ('observation_history_length,observation_space_dim, action_location_num = ')
print (dim_ary[0], dim_ary[1],dim_ary[2])
action_ranges = transfer2DJavaArray2NumpyArray(self.ipss_app.getActionValueRanges())
print ('action value ranges = ', action_ranges)
low = action_ranges[:,0]
high = action_ranges[:,1]
print ('action range low =', low, 'action range high =', high)
print ('low shape:', np.shape(low))
self.action_space = spaces.Box(low, high, dtype=action_ranges.dtype)
if force_symmetric_continuous_action: # i.e., force np.abs(low) == high
if not (np.abs(low) == high).all():
print('!!Warming: the original action space is non-symmetric, convert it to [-1,1] for each action')
self.original_action_space = spaces.Box(low, high, dtype=action_ranges.dtype)
ones = np.ones_like(low)
self.action_space = spaces.Box(-ones, ones, dtype=action_ranges.dtype)
self.is_action_space_scaled = True
#print (self.action_space)
self.observation_space = spaces.Box(-999,999,shape=(observation_history_length * observation_space_dim,)) # Continuous
#print ('obs shape[0]',self.observation_space.shape[0])
self.seed()
#TOOD get the initial states
self.state = None
self.steps_beyond_done = None
self.restart_simulation = True
self.total_case_num = self.ipss_app.getTotalBaseCaseNumber()
if self.total_case_num == 0:
self.total_case_num = 1
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
## This is a tentative solution of the error of additional dimension is added to the returned
# action in OpenAI Gym DDPG
if not self.action_space.contains(action):
action = action[0]
assert self.action_space.contains(action), "%r (%s) invalid"%(action, type(action))
# step-1 convert from Gym discrete actions into actions applied to power system simulator
actionMapped = None
if self.action_type == 'discrete':
actionMapped = refer_new(action, self.cnts)
elif self.action_type == 'continuous':
actionMapped = np.asarray(action)
#print("action from policy =", actionMapped)
actionPyAry = np.asarray(actionMapped,dtype = np.float64)
if self.is_action_space_scaled and self.original_action_space is not None:
#Rescale the action from [-1, 1] to [low, high]
actionPyAry = unscale_action(self.original_action_space, actionPyAry)
# print(actionPyAry, 'len = ', actionPyAry.size)
# np array size = number of elements in the array
actionJavaAry = self.a_gateway.new_array(self.a_gateway.jvm.double, actionPyAry.size)
if(actionPyAry.size ==1):
actionJavaAry[0] = float(action)
else:
i = 0
for x in actionPyAry:
actionJavaAry[i] = x
i = i + 1
# step-2 apply the actions to the simulator and run the simulation for one interaction step forward
self.ipss_app.nextStepDynSim(self.time_step, actionJavaAry, self.action_type)
# step-3 retrieve the state from InterPSS simulation service
# observations is a Java_Collections 1-D byte array
observations = self.ipss_app.getEnvObservationsByte1DAry()
# convert it from Java_collections array to native Python array
self.state = createNumpyAryFromJavaByte1DAry(observations,datatype=np.float)
#print('observation shape: ', np.shape(self.state))
# step-4 check the states to see whether it go beyond the limits
done = self.ipss_app.isSimulationDone()
if not done:
reward = self.ipss_app.getReward()
elif self.steps_beyond_done is None:
self.steps_beyond_done = 0
reward = self.ipss_app.getReward() # even it is done, ipss_app would calculate and return a corresponding reward
else:
if self.steps_beyond_done == 0:
logger.warning("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
return np.array(self.state).ravel(), reward, done, {}
def reset(self):
# reset need to randomize the operation state and fault location, and fault time
case_Idx = np.random.randint(0, self.total_case_num) # an integer
total_fault_buses = len(self.ipss_app.getFaultBusCandidates())
fault_bus_idx = np.random.randint(0, total_fault_buses)# an integer, in the range of [0, total_bus_num-1]
#fault_bus_idx = 3 # an integer, in the range of [0, total_bus_num-1]
#fault_start_time =random.uniform(0.99, 1.01) # a double number, in the range of [0.2, 1]
fault_start_time_ary = transfer1DJavaArray2NumpyArray(self.ipss_app.getFaultStartTimeCandidates())
fault_start_time = fault_start_time_ary[np.random.randint(0, len(fault_start_time_ary))]
ftd_candidates = transfer1DJavaArray2NumpyArray(self.ipss_app.getFaultDurationCandidates())
fault_duration_time = ftd_candidates[np.random.randint(0, len(ftd_candidates))] # a double number, in the range of [0.08, 0.4]
# reset initial state to states of time = 0, non-fault
self.ipss_app.reset(case_Idx, fault_bus_idx, fault_start_time, fault_duration_time)
#self.state = None
# observations is a Java_Collections 1-D byte array
observations = self.ipss_app.getEnvObservationsByte1DAry()
# convert it from Java_collections array to native Python array
self.state = createNumpyAryFromJavaByte1DAry(observations,datatype=np.float)
#print(self.state)
self.steps_beyond_done = None
self.restart_simulation = True
return np.array(self.state).ravel()
# init the system with a specific state and fault
def validate(self, case_Idx, fault_bus_idx, fault_start_time, fault_duation_time):
self.ipss_app.reset(case_Idx,fault_bus_idx,fault_start_time,fault_duation_time)
# observations is a Java_Collections 1-D byte array
observations = self.ipss_app.getEnvObservationsByte1DAry()
# convert it from Java_collections array to native Python array
self.state = createNumpyAryFromJavaByte1DAry(observations,datatype=np.float)
self.steps_beyond_done = None
self.restart_simulation = True
return np.array(self.state).ravel()
def close_connection(self):
self.a_gateway.shutdown()
self.a_gateway.close(keep_callback_server=False, close_callback_server_connections=False)
self.server_process.terminate()
print("Java server terminated with PID:", self.server_process.pid)
def get_base_cases(self):
base_cases = list(self.ipss_app.getBaseCases())
return base_cases
def get_observation_names(self):
obs_names = list(self.ipss_app.getEnvObservationNames())
return obs_names
def get_all_generator_activePower(self):
gen_power = np.array(list(self.ipss_app.getGenerationPAry()))
return gen_power
def get_all_load_activePower(self):
load_power = np.array(list(self.ipss_app.getLoadPAry()))
return load_power
def get_load_activePower_within_action_scope(self):
load_power = np.array(list(self.ipss_app.getLoadPWithinActionScope()))
return load_power
def get_load_id_within_action_scope(self):
load_bus_ids = list(self.ipss_app.getLoadIdWithinActionScope())
return load_bus_ids
def get_adjacency_matrix(self):
# adjacency is a Java_Collections 1-D byte array
adj_matrix_ary = self.ipss_app.getAdjacencyMatrixByte1DAry()
# convert it from Java_collections array to Python numpy array
return createNumpyAryFromJavaByte1DAry(adj_matrix_ary,datatype=np.int32)
def set_branch_status(self, from_bus_num, to_bus_num, cir_id_str, status_int):
self.ipss_app.setBranchStatus(from_bus_num, to_bus_num, cir_id_str, status_int)
# def _render(self, mode='human', close=False):
| [
"logging.getLogger",
"numpy.ones_like",
"py4j.tests.java_gateway_test.gateway.new_array",
"py4j.java_gateway.GatewayParameters",
"numpy.abs",
"numpy.ones",
"numpy.asarray",
"gym.spaces.Discrete",
"time.sleep",
"gym.spaces.Box",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"numpy.f... | [((367, 394), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (384, 394), False, 'import logging, time\n'), ((1818, 1870), 'numpy.frombuffer', 'np.frombuffer', (['javaByte1DAry'], {'dtype': 'np.intc', 'count': '(2)'}), '(javaByte1DAry, dtype=np.intc, count=2)\n', (1831, 1870), True, 'import numpy as np\n'), ((1940, 1994), 'numpy.frombuffer', 'np.frombuffer', (['javaByte1DAry'], {'dtype': 'datatype', 'offset': '(8)'}), '(javaByte1DAry, dtype=datatype, offset=8)\n', (1953, 1994), True, 'import numpy as np\n'), ((2161, 2204), 'py4j.tests.java_gateway_test.gateway.new_array', 'gateway.new_array', (['gateway.jvm.double', 'size'], {}), '(gateway.jvm.double, size)\n', (2178, 2204), False, 'from py4j.tests.java_gateway_test import gateway\n'), ((2948, 2962), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (2956, 2962), True, 'import numpy as np\n'), ((3135, 3159), 'numpy.zeros', 'np.zeros', (['(size1, size2)'], {}), '((size1, size2))\n', (3143, 3159), True, 'import numpy as np\n'), ((4626, 4641), 'time.sleep', 'time.sleep', (['(5.0)'], {}), '(5.0)\n', (4636, 4641), False, 'import logging, time\n'), ((7942, 8028), 'gym.spaces.Box', 'spaces.Box', (['(-999)', '(999)'], {'shape': '(observation_history_length * observation_space_dim,)'}), '(-999, 999, shape=(observation_history_length *\n observation_space_dim,))\n', (7952, 8028), False, 'from gym import spaces\n'), ((8478, 8501), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (8495, 8501), False, 'from gym.utils import seeding\n'), ((9281, 9323), 'numpy.asarray', 'np.asarray', (['actionMapped'], {'dtype': 'np.float64'}), '(actionMapped, dtype=np.float64)\n', (9291, 9323), True, 'import numpy as np\n'), ((11543, 11584), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.total_case_num'], {}), '(0, self.total_case_num)\n', (11560, 11584), True, 'import numpy as np\n'), ((11711, 11750), 'numpy.random.randint', 'np.random.randint', (['(0)', 'total_fault_buses'], {}), '(0, total_fault_buses)\n', (11728, 11750), True, 'import numpy as np\n'), ((6518, 6545), 'gym.spaces.Discrete', 'spaces.Discrete', (['action_num'], {}), '(action_num)\n', (6533, 6545), False, 'from gym import spaces\n'), ((6583, 6611), 'numpy.ones', 'np.ones', (['action_location_num'], {}), '(action_location_num)\n', (6590, 6611), True, 'import numpy as np\n'), ((7262, 7310), 'gym.spaces.Box', 'spaces.Box', (['low', 'high'], {'dtype': 'action_ranges.dtype'}), '(low, high, dtype=action_ranges.dtype)\n', (7272, 7310), False, 'from gym import spaces\n'), ((9174, 9192), 'numpy.asarray', 'np.asarray', (['action'], {}), '(action)\n', (9184, 9192), True, 'import numpy as np\n'), ((13049, 13069), 'numpy.array', 'np.array', (['self.state'], {}), '(self.state)\n', (13057, 13069), True, 'import numpy as np\n'), ((13689, 13709), 'numpy.array', 'np.array', (['self.state'], {}), '(self.state)\n', (13697, 13709), True, 'import numpy as np\n'), ((5029, 5044), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5039, 5044), False, 'import logging, time\n'), ((7214, 7227), 'numpy.shape', 'np.shape', (['low'], {}), '(low)\n', (7222, 7227), True, 'import numpy as np\n'), ((11364, 11384), 'numpy.array', 'np.array', (['self.state'], {}), '(self.state)\n', (11372, 11384), True, 'import numpy as np\n'), ((4812, 4870), 'py4j.java_gateway.GatewayParameters', 'GatewayParameters', ([], {'port': 'server_port_num', 'auto_convert': '(True)'}), '(port=server_port_num, auto_convert=True)\n', (4829, 4870), False, 'from py4j.java_gateway import JavaGateway, GatewayParameters\n'), ((7620, 7668), 'gym.spaces.Box', 'spaces.Box', (['low', 'high'], {'dtype': 'action_ranges.dtype'}), '(low, high, dtype=action_ranges.dtype)\n', (7630, 7668), False, 'from gym import spaces\n'), ((7696, 7713), 'numpy.ones_like', 'np.ones_like', (['low'], {}), '(low)\n', (7708, 7713), True, 'import numpy as np\n'), ((7754, 7804), 'gym.spaces.Box', 'spaces.Box', (['(-ones)', 'ones'], {'dtype': 'action_ranges.dtype'}), '(-ones, ones, dtype=action_ranges.dtype)\n', (7764, 7804), False, 'from gym import spaces\n'), ((7422, 7433), 'numpy.abs', 'np.abs', (['low'], {}), '(low)\n', (7428, 7433), True, 'import numpy as np\n')] |
import sys
sys.path.append('./util')
sys.path.append('./model')
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.nn.functional as F
from torchvision import transforms
import numpy as np
import argparse
import os
import time
import gc
import tensorflow as tf
from dataloader import salicon
from evaluation import cal_cc_score, cal_sim_score, cal_kld_score, cal_auc_score, cal_nss_score, add_center_bias
from unet import standard_unet
from loss import NSS, CC, KLD, cross_entropy
from sam import SAM
parser = argparse.ArgumentParser(description='Saliency prediction for images')
parser.add_argument('--mode', type=str, default='train', help='Selecting running mode (default: train)')
parser.add_argument('--img_dir', type=str, default='../data/images', help='Directory to the image data')
parser.add_argument('--fix_dir', type=str, default='../data/fixations', help='Directory to the raw fixation file')
parser.add_argument('--anno_dir', type=str, default='../data/maps', help='Directory to the saliency maps')
parser.add_argument('--width', type=int, default=320, help='Width of input data')
parser.add_argument('--height', type=int, default=240, help='Height of input data')
parser.add_argument('--clip', type=float, default=-1, help='Gradient clipping')
parser.add_argument('--batch', type=int, default=10, help='Batch size')
parser.add_argument('--epoch', type=int, default=30, help='Number of epochs')
parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate')
parser.add_argument('--lr_decay', type=float, default=0.25, help='Learning rate decay factor')
parser.add_argument('--lr_decay_step', type=int, default=10, help='Learning rate decay step')
parser.add_argument('--checkpoint', type=str, default='../save/', help='Checkpoint path')
parser.add_argument('--resume', type=bool, default=False, help='Resume from checkpoint or not')
parser.add_argument('--center_bias', type=bool, default=True, help='Adding center bias or not')
args = parser.parse_args()
seed = 1
np.random.seed(seed)
torch.manual_seed(seed)
transform = transforms.Compose([
transforms.Resize((args.height,args.width)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def add_summary_value(writer, key, value, iteration): #tensorboard visualization
summary = tf.Summary(value=[tf.Summary.Value(tag=key, simple_value=value)])
writer.add_summary(summary, iteration)
def clip_gradient(optimizer, grad_clip):
for group in optimizer.param_groups:
for param in group['params']:
param.grad.data.clamp_(-grad_clip, grad_clip)
def adjust_learning_rate(optimizer, epoch):
"adatively adjust lr based on iteration"
if epoch >= 1: #30-adam
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * (args.lr_decay ** (epoch/args.lr_decay_step))
def main():
# IO
tf_summary_writer = tf and tf.summary.FileWriter(args.checkpoint)
train_data = salicon(args.anno_dir,args.fix_dir,args.img_dir,args.width,args.height,'train',transform)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=args.batch, shuffle=True, num_workers=4)
test_data = salicon(args.anno_dir,args.fix_dir,args.img_dir,args.width,args.height,'val',transform)
testloader = torch.utils.data.DataLoader(test_data, batch_size=args.batch, shuffle=False, num_workers=4)
# model construction
#model = standard_unet().cuda()
model = SAM().cuda()
if args.resume:
model.load_state_dict(torch.load(os.path.join(args.checkpoint,'model.pth')))
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-5, nesterov=True)
# optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-3) #1e-8
def train(iteration):
model.train()
avg_loss = 0
for i, (img,sal_map,fix) in enumerate(trainloader):
load_t0 = time.time()
img, sal_map, fix = img.cuda(), sal_map.cuda(), fix.cuda()
optimizer.zero_grad()
pred = model(img)
loss = NSS(pred,fix) + KLD(pred,sal_map) + CC(pred,sal_map)
# loss = cross_entropy(pred,sal_map)
loss.backward()
if args.clip != -1 :
clip_gradient(optimizer,args.clip) #gradient clipping without normalization
optimizer.step()
avg_loss = (avg_loss*np.maximum(0,i) + loss.data.cpu().numpy())/(i+1)
load_t1 = time.time()
if i%25 == 0:
add_summary_value(tf_summary_writer, 'train_loss', avg_loss, iteration)
tf_summary_writer.flush()
# show the information
print('Totel iter ' + repr(iteration) + ' || L: %.4f || ' % (loss.item()) +
'Batch time: %.4f sec. || ' % (load_t1 - load_t0) + 'LR: %.8f' % (args.lr))
iteration += 1
return iteration
def test(iteration):
model.eval()
nss_score = []
cc_score = []
auc_score = []
sim_score = []
kld_score = []
for i, (img,sal_map,fix) in enumerate(testloader):
img = img.cuda()
#pred = model(img,softmax=False)
with torch.no_grad():
pred = model(img)
pred = pred.data.cpu().numpy()
sal_map = sal_map.data.numpy()
fix = fix.data.numpy()
# computing score for each data
for j in range(len(img)):
cur_pred = pred[j].squeeze()
cur_pred /= cur_pred.max() # for training with Saliency evaluation metrics
if args.center_bias:
cur_pred = add_center_bias(cur_pred)
cc_score.append(cal_cc_score(cur_pred,sal_map[j]))
sim_score.append(cal_sim_score(cur_pred,sal_map[j]))
kld_score.append(cal_kld_score(cur_pred,sal_map[j]))
nss_score.append(cal_nss_score(cur_pred,fix[j]))
auc_score.append(cal_auc_score(cur_pred,fix[j]))
add_summary_value(tf_summary_writer, 'NSS', np.mean(nss_score), iteration)
add_summary_value(tf_summary_writer, 'CC', np.mean(cc_score), iteration)
add_summary_value(tf_summary_writer, 'AUC', np.mean(auc_score), iteration)
add_summary_value(tf_summary_writer, 'SIM', np.mean(sim_score), iteration)
add_summary_value(tf_summary_writer, 'KLD', np.mean(kld_score), iteration)
tf_summary_writer.flush()
return np.mean(cc_score)
iteration = 0
best_score = 0
for epoch in range(args.epoch):
adjust_learning_rate(optimizer, epoch+1)
iteration = train(iteration)
cur_score = test(iteration)
# torch.save(model.module.state_dict(),os.path.join(args.checkpoint,'model.pth'))
torch.save(model.state_dict(),os.path.join(args.checkpoint,'model.pth')) # for single-GPU training
if cur_score > best_score:
best_score = cur_score
# torch.save(model.module.state_dict(),os.path.join(args.checkpoint,'model_best.pth'))
torch.save(model.state_dict(),os.path.join(args.checkpoint,'model_best.pth')) # for single-GPU training
# evaluation-only
def evaluation():
pass
if args.mode == 'train':
main()
else:
evaluation()
| [
"tensorflow.Summary.Value",
"loss.NSS",
"sys.path.append",
"numpy.mean",
"evaluation.cal_auc_score",
"argparse.ArgumentParser",
"loss.KLD",
"sam.SAM",
"numpy.random.seed",
"evaluation.add_center_bias",
"evaluation.cal_cc_score",
"torchvision.transforms.ToTensor",
"numpy.maximum",
"evaluati... | [((11, 36), 'sys.path.append', 'sys.path.append', (['"""./util"""'], {}), "('./util')\n", (26, 36), False, 'import sys\n'), ((37, 63), 'sys.path.append', 'sys.path.append', (['"""./model"""'], {}), "('./model')\n", (52, 63), False, 'import sys\n'), ((573, 642), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Saliency prediction for images"""'}), "(description='Saliency prediction for images')\n", (596, 642), False, 'import argparse\n'), ((2055, 2075), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2069, 2075), True, 'import numpy as np\n'), ((2076, 2099), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2093, 2099), False, 'import torch\n'), ((3161, 3260), 'dataloader.salicon', 'salicon', (['args.anno_dir', 'args.fix_dir', 'args.img_dir', 'args.width', 'args.height', '"""train"""', 'transform'], {}), "(args.anno_dir, args.fix_dir, args.img_dir, args.width, args.height,\n 'train', transform)\n", (3168, 3260), False, 'from dataloader import salicon\n'), ((3269, 3364), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': 'args.batch', 'shuffle': '(True)', 'num_workers': '(4)'}), '(train_data, batch_size=args.batch, shuffle=True,\n num_workers=4)\n', (3296, 3364), False, 'import torch\n'), ((3381, 3478), 'dataloader.salicon', 'salicon', (['args.anno_dir', 'args.fix_dir', 'args.img_dir', 'args.width', 'args.height', '"""val"""', 'transform'], {}), "(args.anno_dir, args.fix_dir, args.img_dir, args.width, args.height,\n 'val', transform)\n", (3388, 3478), False, 'from dataloader import salicon\n'), ((3486, 3581), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': 'args.batch', 'shuffle': '(False)', 'num_workers': '(4)'}), '(test_data, batch_size=args.batch, shuffle=False,\n num_workers=4)\n', (3513, 3581), False, 'import torch\n'), ((2166, 2210), 'torchvision.transforms.Resize', 'transforms.Resize', (['(args.height, args.width)'], {}), '((args.height, args.width))\n', (2183, 2210), False, 'from torchvision import transforms\n'), ((2243, 2264), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2262, 2264), False, 'from torchvision import transforms\n'), ((2299, 2365), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2319, 2365), False, 'from torchvision import transforms\n'), ((3105, 3143), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['args.checkpoint'], {}), '(args.checkpoint)\n', (3126, 3143), True, 'import tensorflow as tf\n'), ((6745, 6762), 'numpy.mean', 'np.mean', (['cc_score'], {}), '(cc_score)\n', (6752, 6762), True, 'import numpy as np\n'), ((3652, 3657), 'sam.SAM', 'SAM', ([], {}), '()\n', (3655, 3657), False, 'from sam import SAM\n'), ((4147, 4158), 'time.time', 'time.time', ([], {}), '()\n', (4156, 4158), False, 'import time\n'), ((4702, 4713), 'time.time', 'time.time', ([], {}), '()\n', (4711, 4713), False, 'import time\n'), ((6335, 6353), 'numpy.mean', 'np.mean', (['nss_score'], {}), '(nss_score)\n', (6342, 6353), True, 'import numpy as np\n'), ((6417, 6434), 'numpy.mean', 'np.mean', (['cc_score'], {}), '(cc_score)\n', (6424, 6434), True, 'import numpy as np\n'), ((6499, 6517), 'numpy.mean', 'np.mean', (['auc_score'], {}), '(auc_score)\n', (6506, 6517), True, 'import numpy as np\n'), ((6582, 6600), 'numpy.mean', 'np.mean', (['sim_score'], {}), '(sim_score)\n', (6589, 6600), True, 'import numpy as np\n'), ((6665, 6683), 'numpy.mean', 'np.mean', (['kld_score'], {}), '(kld_score)\n', (6672, 6683), True, 'import numpy as np\n'), ((7088, 7130), 'os.path.join', 'os.path.join', (['args.checkpoint', '"""model.pth"""'], {}), "(args.checkpoint, 'model.pth')\n", (7100, 7130), False, 'import os\n'), ((2515, 2560), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'key', 'simple_value': 'value'}), '(tag=key, simple_value=value)\n', (2531, 2560), True, 'import tensorflow as tf\n'), ((3727, 3769), 'os.path.join', 'os.path.join', (['args.checkpoint', '"""model.pth"""'], {}), "(args.checkpoint, 'model.pth')\n", (3739, 3769), False, 'import os\n'), ((4349, 4366), 'loss.CC', 'CC', (['pred', 'sal_map'], {}), '(pred, sal_map)\n', (4351, 4366), False, 'from loss import NSS, CC, KLD, cross_entropy\n'), ((5462, 5477), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5475, 5477), False, 'import torch\n'), ((7368, 7415), 'os.path.join', 'os.path.join', (['args.checkpoint', '"""model_best.pth"""'], {}), "(args.checkpoint, 'model_best.pth')\n", (7380, 7415), False, 'import os\n'), ((4313, 4327), 'loss.NSS', 'NSS', (['pred', 'fix'], {}), '(pred, fix)\n', (4316, 4327), False, 'from loss import NSS, CC, KLD, cross_entropy\n'), ((4329, 4347), 'loss.KLD', 'KLD', (['pred', 'sal_map'], {}), '(pred, sal_map)\n', (4332, 4347), False, 'from loss import NSS, CC, KLD, cross_entropy\n'), ((5920, 5945), 'evaluation.add_center_bias', 'add_center_bias', (['cur_pred'], {}), '(cur_pred)\n', (5935, 5945), False, 'from evaluation import cal_cc_score, cal_sim_score, cal_kld_score, cal_auc_score, cal_nss_score, add_center_bias\n'), ((5978, 6012), 'evaluation.cal_cc_score', 'cal_cc_score', (['cur_pred', 'sal_map[j]'], {}), '(cur_pred, sal_map[j])\n', (5990, 6012), False, 'from evaluation import cal_cc_score, cal_sim_score, cal_kld_score, cal_auc_score, cal_nss_score, add_center_bias\n'), ((6046, 6081), 'evaluation.cal_sim_score', 'cal_sim_score', (['cur_pred', 'sal_map[j]'], {}), '(cur_pred, sal_map[j])\n', (6059, 6081), False, 'from evaluation import cal_cc_score, cal_sim_score, cal_kld_score, cal_auc_score, cal_nss_score, add_center_bias\n'), ((6115, 6150), 'evaluation.cal_kld_score', 'cal_kld_score', (['cur_pred', 'sal_map[j]'], {}), '(cur_pred, sal_map[j])\n', (6128, 6150), False, 'from evaluation import cal_cc_score, cal_sim_score, cal_kld_score, cal_auc_score, cal_nss_score, add_center_bias\n'), ((6184, 6215), 'evaluation.cal_nss_score', 'cal_nss_score', (['cur_pred', 'fix[j]'], {}), '(cur_pred, fix[j])\n', (6197, 6215), False, 'from evaluation import cal_cc_score, cal_sim_score, cal_kld_score, cal_auc_score, cal_nss_score, add_center_bias\n'), ((6249, 6280), 'evaluation.cal_auc_score', 'cal_auc_score', (['cur_pred', 'fix[j]'], {}), '(cur_pred, fix[j])\n', (6262, 6280), False, 'from evaluation import cal_cc_score, cal_sim_score, cal_kld_score, cal_auc_score, cal_nss_score, add_center_bias\n'), ((4631, 4647), 'numpy.maximum', 'np.maximum', (['(0)', 'i'], {}), '(0, i)\n', (4641, 4647), True, 'import numpy as np\n')] |
import pickle
import pytest
import numpy as np
import scipy.sparse as sp
import joblib
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_raises_regexp
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import ignore_warnings
from sklearn.utils.fixes import parse_version
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone, is_classifier
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.exceptions import ConvergenceWarning
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
from sklearn.linear_model import _sgd_fast as sgd_fast
from sklearn.model_selection import RandomizedSearchCV
def _update_kwargs(kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
if "tol" not in kwargs:
kwargs["tol"] = None
if "max_iter" not in kwargs:
kwargs["max_iter"] = 5
class _SparseSGDClassifier(linear_model.SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super().fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super().partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super().decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super().predict_proba(X)
class _SparseSGDRegressor(linear_model.SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
# XXX untested as of v0.22
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.decision_function(self, X, *args,
**kw)
def SGDClassifier(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDClassifier(**kwargs)
def SGDRegressor(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDRegressor(**kwargs)
def SparseSGDClassifier(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDClassifier(**kwargs)
def SparseSGDRegressor(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDRegressor(**kwargs)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundant feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
###############################################################################
# Common Test Case to classification and regression
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(klass, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if klass in (SparseSGDClassifier, SparseSGDRegressor):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_sgd_bad_alpha(klass):
# Check whether expected ValueError on bad alpha
with pytest.raises(ValueError):
klass(alpha=-.1)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_sgd_bad_penalty(klass):
# Check whether expected ValueError on bad penalty
with pytest.raises(ValueError):
klass(penalty='foobar', l1_ratio=0.85)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_sgd_bad_loss(klass):
# Check whether expected ValueError on bad loss
with pytest.raises(ValueError):
klass(loss="foobar")
def _test_warm_start(klass, X, Y, lr):
# Test that explicit warm restart...
clf = klass(alpha=0.01, eta0=0.01, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = klass(alpha=0.001, eta0=0.01, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = klass(alpha=0.01, eta0=0.01, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert clf3.t_ == clf.t_
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert clf3.t_ == clf2.t_
assert_array_almost_equal(clf3.coef_, clf2.coef_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
@pytest.mark.parametrize('lr',
["constant", "optimal", "invscaling", "adaptive"])
def test_warm_start(klass, lr):
_test_warm_start(klass, X, Y, lr)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_input_format(klass):
# Input format tests.
clf = klass(alpha=0.01, shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
with pytest.raises(ValueError):
clf.fit(X, Y_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_clone(klass):
# Test whether clone works ok.
clf = klass(alpha=0.01, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = klass(alpha=0.01, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_plain_has_no_average_attr(klass):
clf = klass(average=True, eta0=.01)
clf.fit(X, Y)
assert hasattr(clf, '_average_coef')
assert hasattr(clf, '_average_intercept')
assert hasattr(clf, '_standard_intercept')
assert hasattr(clf, '_standard_coef')
clf = klass()
clf.fit(X, Y)
assert not hasattr(clf, '_average_coef')
assert not hasattr(clf, '_average_intercept')
assert not hasattr(clf, '_standard_intercept')
assert not hasattr(clf, '_standard_coef')
# TODO: remove in 1.0
@pytest.mark.parametrize('klass', [SGDClassifier, SGDRegressor])
def test_sgd_deprecated_attr(klass):
est = klass(average=True, eta0=.01)
est.fit(X, Y)
msg = "Attribute {} was deprecated"
for att in ['average_coef_', 'average_intercept_',
'standard_coef_', 'standard_intercept_']:
with pytest.warns(FutureWarning, match=msg.format(att)):
getattr(est, att)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_late_onset_averaging_not_reached(klass):
clf1 = klass(average=600)
clf2 = klass()
for _ in range(100):
if is_classifier(clf1):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_late_onset_averaging_reached(klass):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = klass(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, max_iter=2, shuffle=False)
clf2 = klass(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, max_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
asgd(klass, X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_sgd_bad_alpha_for_optimal_learning_rate(klass):
# Check whether expected ValueError on bad alpha, i.e. 0
# since alpha is used to compute the optimal learning rate
with pytest.raises(ValueError):
klass(alpha=0, learning_rate="optimal")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_early_stopping(klass):
X = iris.data[iris.target > 0]
Y = iris.target[iris.target > 0]
for early_stopping in [True, False]:
max_iter = 1000
clf = klass(early_stopping=early_stopping, tol=1e-3,
max_iter=max_iter).fit(X, Y)
assert clf.n_iter_ < max_iter
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_adaptive_longer_than_constant(klass):
clf1 = klass(learning_rate="adaptive", eta0=0.01, tol=1e-3,
max_iter=100)
clf1.fit(iris.data, iris.target)
clf2 = klass(learning_rate="constant", eta0=0.01, tol=1e-3,
max_iter=100)
clf2.fit(iris.data, iris.target)
assert clf1.n_iter_ > clf2.n_iter_
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_validation_set_not_used_for_training(klass):
X, Y = iris.data, iris.target
validation_fraction = 0.4
seed = 42
shuffle = False
max_iter = 10
clf1 = klass(early_stopping=True,
random_state=np.random.RandomState(seed),
validation_fraction=validation_fraction,
learning_rate='constant', eta0=0.01,
tol=None, max_iter=max_iter, shuffle=shuffle)
clf1.fit(X, Y)
assert clf1.n_iter_ == max_iter
clf2 = klass(early_stopping=False,
random_state=np.random.RandomState(seed),
learning_rate='constant', eta0=0.01,
tol=None, max_iter=max_iter, shuffle=shuffle)
if is_classifier(clf2):
cv = StratifiedShuffleSplit(test_size=validation_fraction,
random_state=seed)
else:
cv = ShuffleSplit(test_size=validation_fraction,
random_state=seed)
idx_train, idx_val = next(cv.split(X, Y))
idx_train = np.sort(idx_train) # remove shuffling
clf2.fit(X[idx_train], Y[idx_train])
assert clf2.n_iter_ == max_iter
assert_array_equal(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_n_iter_no_change(klass):
X, Y = iris.data, iris.target
# test that n_iter_ increases monotonically with n_iter_no_change
for early_stopping in [True, False]:
n_iter_list = [klass(early_stopping=early_stopping,
n_iter_no_change=n_iter_no_change,
tol=1e-4, max_iter=1000
).fit(X, Y).n_iter_
for n_iter_no_change in [2, 3, 10]]
assert_array_equal(n_iter_list, sorted(n_iter_list))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_not_enough_sample_for_early_stopping(klass):
# test an error is raised if the training or validation set is empty
clf = klass(early_stopping=True, validation_fraction=0.99)
with pytest.raises(ValueError):
clf.fit(X3, Y3)
###############################################################################
# Classification Test Case
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_clf(klass):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = klass(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, max_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_bad_l1_ratio(klass):
# Check whether expected ValueError on bad l1_ratio
with pytest.raises(ValueError):
klass(l1_ratio=1.1)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_bad_learning_rate_schedule(klass):
# Check whether expected ValueError on bad learning_rate
with pytest.raises(ValueError):
klass(learning_rate="<unknown>")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_bad_eta0(klass):
# Check whether expected ValueError on bad eta0
with pytest.raises(ValueError):
klass(eta0=0, learning_rate="constant")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_max_iter_param(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(max_iter=-10000)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_shuffle_param(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(shuffle="false")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_early_stopping_param(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(early_stopping="false")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_validation_fraction(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(validation_fraction=-.1)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_n_iter_no_change(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(n_iter_no_change=0)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_argument_coef(klass):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset
with pytest.raises(TypeError):
klass(coef_init=np.zeros((3,)))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_provide_coef(klass):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
with pytest.raises(ValueError):
klass().fit(X, Y, coef_init=np.zeros((3,)))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_set_intercept(klass):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
with pytest.raises(ValueError):
klass().fit(X, Y, intercept_init=np.zeros((3,)))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_early_stopping_with_partial_fit(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(early_stopping=True).partial_fit(X, Y)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_set_intercept_binary(klass):
# Checks intercept_ shape for the warm starts in binary case
klass().fit(X5, Y5, intercept_init=0)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_average_binary_computed_correctly(klass):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = klass(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_set_intercept_to_intercept(klass):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = klass().fit(X5, Y5)
klass().fit(X5, Y5, intercept_init=clf.intercept_)
clf = klass().fit(X, Y)
klass().fit(X, Y, intercept_init=clf.intercept_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_at_least_two_labels(klass):
# Target must have at least two labels
clf = klass(alpha=0.01, max_iter=20)
with pytest.raises(ValueError):
clf.fit(X2, np.ones(9))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_weight_class_balanced(klass):
# partial_fit with class_weight='balanced' not supported"""
regex = (r"class_weight 'balanced' is not supported for "
r"partial_fit\. In order to use 'balanced' weights, "
r"use compute_class_weight\('balanced', classes=classes, y=y\). "
r"In place of y you can us a large enough sample "
r"of the full training set target to properly "
r"estimate the class frequency distributions\. "
r"Pass the resulting weights as the class_weight "
r"parameter\.")
assert_raises_regexp(ValueError,
regex,
klass(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass(klass):
# Multi-class test case
clf = klass(alpha=0.01, max_iter=20).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_average(klass):
eta = .001
alpha = .01
# Multi-class average test case
clf = klass(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = asgd(klass, X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_with_init_coef(klass):
# Multi-class test case
clf = klass(alpha=0.01, max_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape, (3,)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_njobs(klass):
# Multi-class test case with multi-core support
clf = klass(alpha=0.01, max_iter=20, n_jobs=2).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_set_coef_multiclass(klass):
# Checks coef_init and intercept_init shape for multi-class
# problems
# Provided coef_ does not match dataset
clf = klass()
with pytest.raises(ValueError):
clf.fit(X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = klass().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = klass()
with pytest.raises(ValueError):
clf.fit(X2, Y2, intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = klass().fit(X2, Y2, intercept_init=np.zeros((3,)))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_predict_proba_method_access(klass):
# Checks that SGDClassifier predict_proba and predict_log_proba methods
# can either be accessed or raise an appropriate error message
# otherwise. See
# https://github.com/scikit-learn/scikit-learn/issues/10938 for more
# details.
for loss in linear_model.SGDClassifier.loss_functions:
clf = SGDClassifier(loss=loss)
if loss in ('log', 'modified_huber'):
assert hasattr(clf, 'predict_proba')
assert hasattr(clf, 'predict_log_proba')
else:
message = ("probability estimates are not "
"available for loss={!r}".format(loss))
assert not hasattr(clf, 'predict_proba')
assert not hasattr(clf, 'predict_log_proba')
with pytest.raises(AttributeError,
match=message):
clf.predict_proba
with pytest.raises(AttributeError,
match=message):
clf.predict_log_proba
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_proba(klass):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01,
max_iter=10, tol=None).fit(X, Y)
assert not hasattr(clf, "predict_proba")
assert not hasattr(clf, "predict_log_proba")
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = klass(loss=loss, alpha=0.01, max_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert p[0, 1] > 0.5
p = clf.predict_proba([[-1, -1]])
assert p[0, 1] < 0.5
p = clf.predict_log_proba([[3, 2]])
assert p[0, 1] > p[0, 0]
p = clf.predict_log_proba([[-1, -1]])
assert p[0, 1] < p[0, 0]
# log loss multiclass probability estimates
clf = klass(loss="log", alpha=0.01, max_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert np.all(p[0] >= 0)
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
lp = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), lp)
lp = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), lp)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = klass(loss="modified_huber", alpha=0.01, max_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if klass != SparseSGDClassifier:
assert np.argmax(d, axis=1) == np.argmax(p, axis=1)
else: # XXX the sparse test gets a different X2 (?)
assert np.argmin(d, axis=1) == np.argmin(p, axis=1)
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_l1(klass):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = klass(penalty='l1', alpha=.2, fit_intercept=False,
max_iter=2000, tol=None, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert sp.issparse(clf.coef_)
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert sp.issparse(clf.coef_)
pred = clf.predict(X)
assert_array_equal(pred, Y)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_class_weights(klass):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_equal_class_weight(klass):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = klass(alpha=0.1, max_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = klass(alpha=0.1, max_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_wrong_class_weight_label(klass):
# ValueError due to not existing class label.
clf = klass(alpha=0.1, max_iter=1000, class_weight={0: 0.5})
with pytest.raises(ValueError):
clf.fit(X, Y)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_wrong_class_weight_format(klass):
# ValueError due to wrong class_weight argument type.
clf = klass(alpha=0.1, max_iter=1000, class_weight=[0.5])
with pytest.raises(ValueError):
clf.fit(X, Y)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_weights_multiplied(klass):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
rng = np.random.RandomState(0)
sample_weights = rng.random_sample(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = klass(alpha=0.1, max_iter=20, class_weight=class_weights)
clf2 = klass(alpha=0.1, max_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_balanced_weight(klass):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = klass(alpha=0.0001, max_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
f1 = metrics.f1_score(y, clf.predict(X), average='weighted')
assert_almost_equal(f1, 0.96, decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = klass(alpha=0.0001, max_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
f1 = metrics.f1_score(y, clf_balanced.predict(X), average='weighted')
assert_almost_equal(f1, 0.96, decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = klass(max_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred, average='weighted') < 0.96
# fit a model with balanced class_weight enabled
clf = klass(max_iter=1000, class_weight="balanced",
shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred, average='weighted') > 0.96
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sample_weights(klass):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_wrong_sample_weights(klass):
# Test if ValueError is raised if sample_weight has wrong shape
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False)
# provided sample_weight too long
with pytest.raises(ValueError):
clf.fit(X, Y, sample_weight=np.arange(7))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_exception(klass):
clf = klass(alpha=0.01)
# classes was not specified
with pytest.raises(ValueError):
clf.partial_fit(X3, Y3)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_binary(klass):
third = X.shape[0] // 3
clf = klass(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert clf.coef_.shape == (1, X.shape[1])
assert clf.intercept_.shape == (1,)
assert clf.decision_function([[0, 0]]).shape == (1, )
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_multiclass(klass):
third = X2.shape[0] // 3
clf = klass(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_multiclass_average(klass):
third = X2.shape[0] // 3
clf = klass(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
clf.partial_fit(X2[third:], Y2[third:])
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_fit_then_partial_fit(klass):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = klass()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
@pytest.mark.parametrize('lr',
["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit_classif(klass, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = klass(alpha=0.01, eta0=0.01, max_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = klass(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert clf.t_ == t
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_regression_losses(klass):
random_state = np.random.RandomState(1)
clf = klass(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive",
random_state=random_state)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive",
random_state=random_state)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(alpha=0.01, loss="huber", random_state=random_state)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss", random_state=random_state)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_warm_start_multiclass(klass):
_test_warm_start(klass, X2, Y2, "optimal")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_multiple_fit(klass):
# Test multiple calls of fit w/ different shaped inputs.
clf = klass(alpha=0.01, shuffle=False)
clf.fit(X, Y)
assert hasattr(clf, "coef_")
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
###############################################################################
# Regression Test Case
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_sgd_reg(klass):
# Check that SGD gives any results.
clf = klass(alpha=0.1, max_iter=2, fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert clf.coef_[0] == clf.coef_[1]
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_sgd_averaged_computed_correctly(klass):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = klass(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_sgd_averaged_partial_fit(klass):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = klass(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_average_sparse(klass):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = klass(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = asgd(klass, X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_sgd_least_squares_fit(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(loss='squared_loss', alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(loss='squared_loss', alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_sgd_epsilon_insensitive(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_sgd_huber_fit(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(loss="huber", epsilon=0.1, alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(loss="huber", epsilon=0.1, alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_elasticnet_convergence(klass):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = klass(penalty='elasticnet', max_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
@ignore_warnings
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_partial_fit(klass):
third = X.shape[0] // 3
clf = klass(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert clf.coef_.shape == (X.shape[1], )
assert clf.intercept_.shape == (1,)
assert clf.predict([[0, 0]]).shape == (1, )
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
@pytest.mark.parametrize('lr',
["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit(klass, lr):
clf = klass(alpha=0.01, max_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = klass(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert clf.t_ == t
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_loss_function_epsilon(klass):
clf = klass(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet', tol=None,
max_iter=6, l1_ratio=0.9999999999,
random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', max_iter=6,
random_state=42, tol=None).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet', tol=None,
max_iter=6, l1_ratio=0.0000000001,
random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', max_iter=6,
random_state=42, tol=None).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert np.isfinite(X).all()
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert np.isfinite(X_scaled).all()
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', max_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert np.isfinite(model.coef_).all()
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', max_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0, tol=None)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert np.isfinite(model.coef_).all()
@pytest.mark.parametrize('penalty', ['l2', 'l1', 'elasticnet'])
def test_large_regularization(penalty):
# Non regression tests for numerical stability issues caused by large
# regularization parameters
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
penalty=penalty, shuffle=False,
tol=None, max_iter=6)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
def test_tol_parameter():
# Test that the tol parameter behaves as expected
X = StandardScaler().fit_transform(iris.data)
y = iris.target == 1
# With tol is None, the number of iteration should be equal to max_iter
max_iter = 42
model_0 = SGDClassifier(tol=None, random_state=0, max_iter=max_iter)
model_0.fit(X, y)
assert max_iter == model_0.n_iter_
# If tol is not None, the number of iteration should be less than max_iter
max_iter = 2000
model_1 = SGDClassifier(tol=0, random_state=0, max_iter=max_iter)
model_1.fit(X, y)
assert max_iter > model_1.n_iter_
assert model_1.n_iter_ > 5
# A larger tol should yield a smaller number of iteration
model_2 = SGDClassifier(tol=0.1, random_state=0, max_iter=max_iter)
model_2.fit(X, y)
assert model_1.n_iter_ > model_2.n_iter_
assert model_2.n_iter_ > 3
# Strict tolerance and small max_iter should trigger a warning
model_3 = SGDClassifier(max_iter=3, tol=1e-3, random_state=0)
model_3 = assert_warns(ConvergenceWarning, model_3.fit, X, y)
assert model_3.n_iter_ == 3
def _test_loss_common(loss_function, cases):
# Test the different loss functions
# cases is a list of (p, y, expected)
for p, y, expected_loss, expected_dloss in cases:
assert_almost_equal(loss_function.py_loss(p, y), expected_loss)
assert_almost_equal(loss_function.py_dloss(p, y), expected_dloss)
def test_loss_hinge():
# Test Hinge (hinge / perceptron)
# hinge
loss = sgd_fast.Hinge(1.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.1, 1.0, 0.0, 0.0), (-2.0, -1.0, 0.0, 0.0),
(1.0, 1.0, 0.0, -1.0), (-1.0, -1.0, 0.0, 1.0), (0.5, 1.0, 0.5, -1.0),
(2.0, -1.0, 3.0, 1.0), (-0.5, -1.0, 0.5, 1.0), (0.0, 1.0, 1, -1.0)
]
_test_loss_common(loss, cases)
# perceptron
loss = sgd_fast.Hinge(0.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0), (-0.1, -1.0, 0.0, 0.0),
(0.0, 1.0, 0.0, -1.0), (0.0, -1.0, 0.0, 1.0), (0.5, -1.0, 0.5, 1.0),
(2.0, -1.0, 2.0, 1.0), (-0.5, 1.0, 0.5, -1.0), (-1.0, 1.0, 1.0, -1.0),
]
_test_loss_common(loss, cases)
def test_gradient_squared_hinge():
# Test SquaredHinge
loss = sgd_fast.SquaredHinge(1.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0), (-2.0, -1.0, 0.0, 0.0), (1.0, -1.0, 4.0, 4.0),
(-1.0, 1.0, 4.0, -4.0), (0.5, 1.0, 0.25, -1.0), (0.5, -1.0, 2.25, 3.0)
]
_test_loss_common(loss, cases)
def test_loss_log():
# Test Log (logistic loss)
loss = sgd_fast.Log()
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, np.log(1.0 + np.exp(-1.0)), -1.0 / (np.exp(1.0) + 1.0)),
(1.0, -1.0, np.log(1.0 + np.exp(1.0)), 1.0 / (np.exp(-1.0) + 1.0)),
(-1.0, -1.0, np.log(1.0 + np.exp(-1.0)), 1.0 / (np.exp(1.0) + 1.0)),
(-1.0, 1.0, np.log(1.0 + np.exp(1.0)), -1.0 / (np.exp(-1.0) + 1.0)),
(0.0, 1.0, np.log(2), -0.5), (0.0, -1.0, np.log(2), 0.5),
(17.9, -1.0, 17.9, 1.0), (-17.9, 1.0, 17.9, -1.0),
]
_test_loss_common(loss, cases)
assert_almost_equal(loss.py_dloss(18.1, 1.0), np.exp(-18.1) * -1.0, 16)
assert_almost_equal(loss.py_loss(18.1, 1.0), np.exp(-18.1), 16)
assert_almost_equal(loss.py_dloss(-18.1, -1.0), np.exp(-18.1) * 1.0, 16)
assert_almost_equal(loss.py_loss(-18.1, 1.0), 18.1, 16)
def test_loss_squared_loss():
# Test SquaredLoss
loss = sgd_fast.SquaredLoss()
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0), (1.0, 1.0, 0.0, 0.0), (1.0, 0.0, 0.5, 1.0),
(0.5, -1.0, 1.125, 1.5), (-2.5, 2.0, 10.125, -4.5)
]
_test_loss_common(loss, cases)
def test_loss_huber():
# Test Huber
loss = sgd_fast.Huber(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0), (0.1, 0.0, 0.005, 0.1), (0.0, 0.1, 0.005, -0.1),
(3.95, 4.0, 0.00125, -0.05), (5.0, 2.0, 0.295, 0.1),
(-1.0, 5.0, 0.595, -0.1)
]
_test_loss_common(loss, cases)
def test_loss_modified_huber():
# (p, y, expected_loss, expected_dloss)
loss = sgd_fast.ModifiedHuber()
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0), (-1.0, -1.0, 0.0, 0.0), (2.0, 1.0, 0.0, 0.0),
(0.0, 1.0, 1.0, -2.0), (-1.0, 1.0, 4.0, -4.0), (0.5, -1.0, 2.25, 3.0),
(-2.0, 1.0, 8, -4.0), (-3.0, 1.0, 12, -4.0)
]
_test_loss_common(loss, cases)
def test_loss_epsilon_insensitive():
# Test EpsilonInsensitive
loss = sgd_fast.EpsilonInsensitive(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0), (0.1, 0.0, 0.0, 0.0), (-2.05, -2.0, 0.0, 0.0),
(3.05, 3.0, 0.0, 0.0), (2.2, 2.0, 0.1, 1.0), (2.0, -1.0, 2.9, 1.0),
(2.0, 2.2, 0.1, -1.0), (-2.0, 1.0, 2.9, -1.0)
]
_test_loss_common(loss, cases)
def test_loss_squared_epsilon_insensitive():
# Test SquaredEpsilonInsensitive
loss = sgd_fast.SquaredEpsilonInsensitive(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0), (0.1, 0.0, 0.0, 0.0), (-2.05, -2.0, 0.0, 0.0),
(3.05, 3.0, 0.0, 0.0), (2.2, 2.0, 0.01, 0.2), (2.0, -1.0, 8.41, 5.8),
(2.0, 2.2, 0.01, -0.2), (-2.0, 1.0, 8.41, -5.8)
]
_test_loss_common(loss, cases)
def test_multi_thread_multi_class_and_early_stopping():
# This is a non-regression test for a bad interaction between
# early stopping internal attribute and thread-based parallelism.
clf = SGDClassifier(alpha=1e-3, tol=1e-3, max_iter=1000,
early_stopping=True, n_iter_no_change=100,
random_state=0, n_jobs=2)
clf.fit(iris.data, iris.target)
assert clf.n_iter_ > clf.n_iter_no_change
assert clf.n_iter_ < clf.n_iter_no_change + 20
assert clf.score(iris.data, iris.target) > 0.8
def test_multi_core_gridsearch_and_early_stopping():
# This is a non-regression test for a bad interaction between
# early stopping internal attribute and process-based multi-core
# parallelism.
param_grid = {
'alpha': np.logspace(-4, 4, 9),
'n_iter_no_change': [5, 10, 50],
}
clf = SGDClassifier(tol=1e-2, max_iter=1000, early_stopping=True,
random_state=0)
search = RandomizedSearchCV(clf, param_grid, n_iter=3, n_jobs=2,
random_state=0)
search.fit(iris.data, iris.target)
assert search.best_score_ > 0.8
@pytest.mark.parametrize("backend",
["loky", "multiprocessing", "threading"])
def test_SGDClassifier_fit_for_all_backends(backend):
# This is a non-regression smoke test. In the multi-class case,
# SGDClassifier.fit fits each class in a one-versus-all fashion using
# joblib.Parallel. However, each OvA step updates the coef_ attribute of
# the estimator in-place. Internally, SGDClassifier calls Parallel using
# require='sharedmem'. This test makes sure SGDClassifier.fit works
# consistently even when the user asks for a backend that does not provide
# sharedmem semantics.
# We further test a case where memmapping would have been used if
# SGDClassifier.fit was called from a loky or multiprocessing backend. In
# this specific case, in-place modification of clf.coef_ would have caused
# a segmentation fault when trying to write in a readonly memory mapped
# buffer.
if (parse_version(joblib.__version__) < parse_version('0.12')
and backend == 'loky'):
pytest.skip('loky backend does not exist in joblib <0.12')
random_state = np.random.RandomState(42)
# Create a classification problem with 50000 features and 20 classes. Using
# loky or multiprocessing this make the clf.coef_ exceed the threshold
# above which memmaping is used in joblib and loky (1MB as of 2018/11/1).
X = sp.random(500, 2000, density=0.02, format='csr',
random_state=random_state)
y = random_state.choice(20, 500)
# Begin by fitting a SGD classifier sequentially
clf_sequential = SGDClassifier(max_iter=1000, n_jobs=1,
random_state=42)
clf_sequential.fit(X, y)
# Fit a SGDClassifier using the specified backend, and make sure the
# coefficients are equal to those obtained using a sequential fit
clf_parallel = SGDClassifier(max_iter=1000, n_jobs=4,
random_state=42)
with joblib.parallel_backend(backend=backend):
clf_parallel.fit(X, y)
assert_array_almost_equal(clf_sequential.coef_, clf_parallel.coef_)
| [
"sklearn.model_selection.StratifiedShuffleSplit",
"sklearn.preprocessing.LabelEncoder",
"sklearn.linear_model._sgd_fast.Huber",
"sklearn.linear_model.SGDRegressor",
"pickle.dumps",
"sklearn.utils.fixes.parse_version",
"sklearn.utils._testing.assert_array_equal",
"numpy.log",
"numpy.argsort",
"nump... | [((2703, 2767), 'numpy.array', 'np.array', (['[[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]'], {}), '([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])\n', (2711, 2767), True, 'import numpy as np\n'), ((2795, 2831), 'numpy.array', 'np.array', (['[[-1, -1], [2, 2], [3, 2]]'], {}), '([[-1, -1], [2, 2], [3, 2]])\n', (2803, 2831), True, 'import numpy as np\n'), ((2899, 3013), 'numpy.array', 'np.array', (['[[-1, 1], [-0.75, 0.5], [-1.5, 1.5], [1, 1], [0.75, 0.5], [1.5, 1.5], [-1, \n -1], [0, -0.5], [1, -1]]'], {}), '([[-1, 1], [-0.75, 0.5], [-1.5, 1.5], [1, 1], [0.75, 0.5], [1.5, \n 1.5], [-1, -1], [0, -0.5], [1, -1]])\n', (2907, 3013), True, 'import numpy as np\n'), ((3091, 3131), 'numpy.array', 'np.array', (['[[-1.5, 0.5], [1, 2], [0, -2]]'], {}), '([[-1.5, 0.5], [1, 2], [0, -2]])\n', (3099, 3131), True, 'import numpy as np\n'), ((3193, 3371), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0,\n 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, \n 0, 1, 0, 0]]'], {}), '([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0,\n 1, 0, 0, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0, 0]])\n', (3201, 3371), True, 'import numpy as np\n'), ((3414, 3448), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 2, 2, 2, 2]'], {}), '([1, 1, 1, 1, 2, 2, 2, 2])\n', (3422, 3448), True, 'import numpy as np\n'), ((3515, 3736), 'numpy.array', 'np.array', (['[[1, 0.9, 0.8, 0, 0, 0], [1, 0.84, 0.98, 0, 0, 0], [1, 0.96, 0.88, 0, 0, 0],\n [1, 0.91, 0.99, 0, 0, 0], [0, 0, 0, 0.89, 0.91, 1], [0, 0, 0, 0.79, \n 0.84, 1], [0, 0, 0, 0.91, 0.95, 1], [0, 0, 0, 0.93, 1, 1]]'], {}), '([[1, 0.9, 0.8, 0, 0, 0], [1, 0.84, 0.98, 0, 0, 0], [1, 0.96, 0.88,\n 0, 0, 0], [1, 0.91, 0.99, 0, 0, 0], [0, 0, 0, 0.89, 0.91, 1], [0, 0, 0,\n 0.79, 0.84, 1], [0, 0, 0, 0.91, 0.95, 1], [0, 0, 0, 0.93, 1, 1]])\n', (3523, 3736), True, 'import numpy as np\n'), ((3766, 3800), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 2, 2, 2, 2]'], {}), '([1, 1, 1, 1, 2, 2, 2, 2])\n', (3774, 3800), True, 'import numpy as np\n'), ((3809, 3829), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (3827, 3829), False, 'from sklearn import linear_model, datasets, metrics\n'), ((3901, 3965), 'numpy.array', 'np.array', (['[[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]'], {}), '([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])\n', (3909, 3965), True, 'import numpy as np\n'), ((5176, 5284), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier,\n SGDRegressor, SparseSGDRegressor])\n", (5199, 5284), False, 'import pytest\n'), ((5464, 5572), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier,\n SGDRegressor, SparseSGDRegressor])\n", (5487, 5572), False, 'import pytest\n'), ((5778, 5886), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier,\n SGDRegressor, SparseSGDRegressor])\n", (5801, 5886), False, 'import pytest\n'), ((6859, 6967), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier,\n SGDRegressor, SparseSGDRegressor])\n", (6882, 6967), False, 'import pytest\n'), ((7000, 7085), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lr"""', "['constant', 'optimal', 'invscaling', 'adaptive']"], {}), "('lr', ['constant', 'optimal', 'invscaling', 'adaptive']\n )\n", (7023, 7085), False, 'import pytest\n'), ((7179, 7287), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier,\n SGDRegressor, SparseSGDRegressor])\n", (7202, 7287), False, 'import pytest\n'), ((7558, 7666), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier,\n SGDRegressor, SparseSGDRegressor])\n", (7581, 7666), False, 'import pytest\n'), ((7983, 8091), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier,\n SGDRegressor, SparseSGDRegressor])\n", (8006, 8091), False, 'import pytest\n'), ((8656, 8719), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SGDRegressor]'], {}), "('klass', [SGDClassifier, SGDRegressor])\n", (8679, 8719), False, 'import pytest\n'), ((9067, 9175), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier,\n SGDRegressor, SparseSGDRegressor])\n", (9090, 9175), False, 'import pytest\n'), ((9701, 9809), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier,\n SGDRegressor, SparseSGDRegressor])\n", (9724, 9809), False, 'import pytest\n'), ((10794, 10902), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier,\n SGDRegressor, SparseSGDRegressor])\n", (10817, 10902), False, 'import pytest\n'), ((11202, 11310), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier,\n SGDRegressor, SparseSGDRegressor])\n", (11225, 11310), False, 'import pytest\n'), ((11662, 11770), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier,\n SGDRegressor, SparseSGDRegressor])\n", (11685, 11770), False, 'import pytest\n'), ((12155, 12263), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier,\n SGDRegressor, SparseSGDRegressor])\n", (12178, 12263), False, 'import pytest\n'), ((13500, 13608), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier,\n SGDRegressor, SparseSGDRegressor])\n", (13523, 13608), False, 'import pytest\n'), ((14168, 14276), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier,\n SGDRegressor, SparseSGDRegressor])\n", (14191, 14276), False, 'import pytest\n'), ((14669, 14739), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (14692, 14739), False, 'import pytest\n'), ((15152, 15222), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (15175, 15222), False, 'import pytest\n'), ((15380, 15450), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (15403, 15450), False, 'import pytest\n'), ((15640, 15710), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (15663, 15710), False, 'import pytest\n'), ((15880, 15950), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (15903, 15950), False, 'import pytest\n'), ((16093, 16163), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (16116, 16163), False, 'import pytest\n'), ((16305, 16375), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (16328, 16375), False, 'import pytest\n'), ((16531, 16601), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (16554, 16601), False, 'import pytest\n'), ((16757, 16827), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (16780, 16827), False, 'import pytest\n'), ((16975, 17045), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (16998, 17045), False, 'import pytest\n'), ((17263, 17333), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (17286, 17333), False, 'import pytest\n'), ((17549, 17619), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (17572, 17619), False, 'import pytest\n'), ((17847, 17917), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (17870, 17917), False, 'import pytest\n'), ((18099, 18169), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (18122, 18169), False, 'import pytest\n'), ((18318, 18388), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (18341, 18388), False, 'import pytest\n'), ((19341, 19411), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (19364, 19411), False, 'import pytest\n'), ((19724, 19794), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (19747, 19794), False, 'import pytest\n'), ((19991, 20061), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (20014, 20061), False, 'import pytest\n'), ((20859, 20929), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (20882, 20929), False, 'import pytest\n'), ((21252, 21322), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (21275, 21322), False, 'import pytest\n'), ((22113, 22183), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (22136, 22183), False, 'import pytest\n'), ((22536, 22606), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (22559, 22606), False, 'import pytest\n'), ((22969, 23039), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (22992, 23039), False, 'import pytest\n'), ((23675, 23745), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (23698, 23745), False, 'import pytest\n'), ((24799, 24869), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (24822, 24869), False, 'import pytest\n'), ((27482, 27552), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (27505, 27552), False, 'import pytest\n'), ((28323, 28393), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (28346, 28393), False, 'import pytest\n'), ((29096, 29166), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (29119, 29166), False, 'import pytest\n'), ((29729, 29799), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (29752, 29799), False, 'import pytest\n'), ((30018, 30088), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (30041, 30088), False, 'import pytest\n'), ((30313, 30383), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (30336, 30383), False, 'import pytest\n'), ((31036, 31106), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (31059, 31106), False, 'import pytest\n'), ((32902, 32972), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (32925, 32972), False, 'import pytest\n'), ((33589, 33659), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (33612, 33659), False, 'import pytest\n'), ((33956, 34026), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (33979, 34026), False, 'import pytest\n'), ((34197, 34267), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (34220, 34267), False, 'import pytest\n'), ((34837, 34907), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (34860, 34907), False, 'import pytest\n'), ((35416, 35486), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (35439, 35486), False, 'import pytest\n'), ((35925, 35995), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (35948, 35995), False, 'import pytest\n'), ((36340, 36410), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (36363, 36410), False, 'import pytest\n'), ((36412, 36497), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lr"""', "['constant', 'optimal', 'invscaling', 'adaptive']"], {}), "('lr', ['constant', 'optimal', 'invscaling', 'adaptive']\n )\n", (36435, 36497), False, 'import pytest\n'), ((37158, 37228), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (37181, 37228), False, 'import pytest\n'), ((38082, 38152), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (38105, 38152), False, 'import pytest\n'), ((38242, 38312), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDClassifier, SparseSGDClassifier]'], {}), "('klass', [SGDClassifier, SparseSGDClassifier])\n", (38265, 38312), False, 'import pytest\n'), ((38769, 38837), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDRegressor, SparseSGDRegressor])\n", (38792, 38837), False, 'import pytest\n'), ((39055, 39123), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDRegressor, SparseSGDRegressor])\n", (39078, 39123), False, 'import pytest\n'), ((40003, 40071), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDRegressor, SparseSGDRegressor])\n", (40026, 40071), False, 'import pytest\n'), ((41074, 41142), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDRegressor, SparseSGDRegressor])\n", (41097, 41142), False, 'import pytest\n'), ((41921, 41989), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDRegressor, SparseSGDRegressor])\n", (41944, 41989), False, 'import pytest\n'), ((42679, 42747), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDRegressor, SparseSGDRegressor])\n", (42702, 42747), False, 'import pytest\n'), ((43513, 43581), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDRegressor, SparseSGDRegressor])\n", (43536, 43581), False, 'import pytest\n'), ((44275, 44343), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDRegressor, SparseSGDRegressor])\n", (44298, 44343), False, 'import pytest\n'), ((45576, 45644), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDRegressor, SparseSGDRegressor])\n", (45599, 45644), False, 'import pytest\n'), ((46079, 46147), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDRegressor, SparseSGDRegressor])\n", (46102, 46147), False, 'import pytest\n'), ((46149, 46234), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lr"""', "['constant', 'optimal', 'invscaling', 'adaptive']"], {}), "('lr', ['constant', 'optimal', 'invscaling', 'adaptive']\n )\n", (46172, 46234), False, 'import pytest\n'), ((46715, 46783), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[SGDRegressor, SparseSGDRegressor]'], {}), "('klass', [SGDRegressor, SparseSGDRegressor])\n", (46738, 46783), False, 'import pytest\n'), ((49995, 50057), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""penalty"""', "['l2', 'l1', 'elasticnet']"], {}), "('penalty', ['l2', 'l1', 'elasticnet'])\n", (50018, 50057), False, 'import pytest\n'), ((57150, 57226), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""backend"""', "['loky', 'multiprocessing', 'threading']"], {}), "('backend', ['loky', 'multiprocessing', 'threading'])\n", (57173, 57226), False, 'import pytest\n'), ((2317, 2353), 'sklearn.linear_model.SGDClassifier', 'linear_model.SGDClassifier', ([], {}), '(**kwargs)\n', (2343, 2353), False, 'from sklearn import linear_model, datasets, metrics\n'), ((2422, 2457), 'sklearn.linear_model.SGDRegressor', 'linear_model.SGDRegressor', ([], {}), '(**kwargs)\n', (2447, 2457), False, 'from sklearn import linear_model, datasets, metrics\n'), ((4447, 4467), 'numpy.zeros', 'np.zeros', (['X.shape[1]'], {}), '(X.shape[1])\n', (4455, 4467), True, 'import numpy as np\n'), ((6669, 6717), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['clf3.coef_', 'clf.coef_'], {}), '(clf3.coef_, clf.coef_)\n', (6694, 6717), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((6806, 6855), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['clf3.coef_', 'clf2.coef_'], {}), '(clf3.coef_, clf2.coef_)\n', (6831, 6855), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((7808, 7818), 'sklearn.base.clone', 'clone', (['clf'], {}), '(clf)\n', (7813, 7818), False, 'from sklearn.base import clone, is_classifier\n'), ((7938, 7979), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['clf.coef_', 'clf2.coef_'], {}), '(clf.coef_, clf2.coef_)\n', (7956, 7979), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((9566, 9627), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['clf1.coef_', 'clf2.coef_'], {'decimal': '(16)'}), '(clf1.coef_, clf2.coef_, decimal=16)\n', (9591, 9627), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((9632, 9697), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['clf1.intercept_', 'clf2.intercept_'], {'decimal': '(16)'}), '(clf1.intercept_, clf2.intercept_, decimal=16)\n', (9651, 9697), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((9936, 9947), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (9944, 9947), True, 'import numpy as np\n'), ((10723, 10790), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['clf1.intercept_', 'average_intercept'], {'decimal': '(16)'}), '(clf1.intercept_, average_intercept, decimal=16)\n', (10742, 10790), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((13016, 13035), 'sklearn.base.is_classifier', 'is_classifier', (['clf2'], {}), '(clf2)\n', (13029, 13035), False, 'from sklearn.base import clone, is_classifier\n'), ((13333, 13351), 'numpy.sort', 'np.sort', (['idx_train'], {}), '(idx_train)\n', (13340, 13351), True, 'import numpy as np\n'), ((13454, 13496), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['clf1.coef_', 'clf2.coef_'], {}), '(clf1.coef_, clf2.coef_)\n', (13472, 13496), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((18587, 18611), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (18608, 18611), True, 'import numpy as np\n'), ((18961, 18973), 'numpy.dot', 'np.dot', (['X', 'w'], {}), '(X, w)\n', (18967, 18973), True, 'import numpy as np\n'), ((18982, 18992), 'numpy.sign', 'np.sign', (['y'], {}), '(y)\n', (18989, 18992), True, 'import numpy as np\n'), ((19141, 19206), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['clf.coef_', 'average_weights'], {'decimal': '(14)'}), '(clf.coef_, average_weights, decimal=14)\n', (19166, 19206), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((19271, 19337), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['clf.intercept_', 'average_intercept'], {'decimal': '(14)'}), '(clf.intercept_, average_intercept, decimal=14)\n', (19290, 19337), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((21210, 21248), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['pred', 'true_result2'], {}), '(pred, true_result2)\n', (21228, 21248), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((21654, 21666), 'numpy.array', 'np.array', (['Y2'], {}), '(Y2)\n', (21662, 21666), True, 'import numpy as np\n'), ((21704, 21720), 'numpy.unique', 'np.unique', (['np_Y2'], {}), '(np_Y2)\n', (21713, 21720), True, 'import numpy as np\n'), ((22494, 22532), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['pred', 'true_result2'], {}), '(pred, true_result2)\n', (22512, 22532), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((22927, 22965), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['pred', 'true_result2'], {}), '(pred, true_result2)\n', (22945, 22965), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((26117, 26134), 'numpy.all', 'np.all', (['(p[0] >= 0)'], {}), '(p[0] >= 0)\n', (26123, 26134), True, 'import numpy as np\n'), ((27331, 27345), 'numpy.all', 'np.all', (['(d < -1)'], {}), '(d < -1)\n', (27337, 27345), True, 'import numpy as np\n'), ((27632, 27657), 'numpy.random.RandomState', 'np.random.RandomState', (['(13)'], {}), '(13)\n', (27653, 27657), True, 'import numpy as np\n'), ((27668, 27680), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (27677, 27680), True, 'import numpy as np\n'), ((27963, 27990), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['pred', 'Y'], {}), '(pred, Y)\n', (27981, 27990), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((28060, 28082), 'scipy.sparse.issparse', 'sp.issparse', (['clf.coef_'], {}), '(clf.coef_)\n', (28071, 28082), True, 'import scipy.sparse as sp\n'), ((28113, 28140), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['pred', 'Y'], {}), '(pred, Y)\n', (28131, 28140), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((28239, 28261), 'scipy.sparse.issparse', 'sp.issparse', (['clf.coef_'], {}), '(clf.coef_)\n', (28250, 28261), True, 'import scipy.sparse as sp\n'), ((28292, 28319), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['pred', 'Y'], {}), '(pred, Y)\n', (28310, 28319), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((28459, 28532), 'numpy.array', 'np.array', (['[[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]'], {}), '([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])\n', (28467, 28532), True, 'import numpy as np\n'), ((29664, 29725), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['clf.coef_', 'clf_weighted.coef_'], {'decimal': '(2)'}), '(clf.coef_, clf_weighted.coef_, decimal=2)\n', (29683, 29725), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((30532, 30556), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (30553, 30556), True, 'import numpy as np\n'), ((30635, 30658), 'numpy.copy', 'np.copy', (['sample_weights'], {}), '(sample_weights)\n', (30642, 30658), True, 'import numpy as np\n'), ((30989, 31032), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['clf1.coef_', 'clf2.coef_'], {}), '(clf1.coef_, clf2.coef_)\n', (31008, 31032), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((31318, 31326), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (31323, 31326), False, 'from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler\n'), ((31337, 31358), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (31346, 31358), True, 'import numpy as np\n'), ((31369, 31393), 'numpy.random.RandomState', 'np.random.RandomState', (['(6)'], {}), '(6)\n', (31390, 31393), True, 'import numpy as np\n'), ((31619, 31659), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['f1', '(0.96)'], {'decimal': '(1)'}), '(f1, 0.96, decimal=1)\n', (31638, 31659), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((31952, 31992), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['f1', '(0.96)'], {'decimal': '(1)'}), '(f1, 0.96, decimal=1)\n', (31971, 31992), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((32092, 32151), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['clf.coef_', 'clf_balanced.coef_', '(6)'], {}), '(clf.coef_, clf_balanced.coef_, 6)\n', (32117, 32151), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((32277, 32304), 'numpy.vstack', 'np.vstack', (['([X] + [X_0] * 10)'], {}), '([X] + [X_0] * 10)\n', (32286, 32304), True, 'import numpy as np\n'), ((32324, 32356), 'numpy.concatenate', 'np.concatenate', (['([y] + [y_0] * 10)'], {}), '([y] + [y_0] * 10)\n', (32338, 32356), True, 'import numpy as np\n'), ((33054, 33127), 'numpy.array', 'np.array', (['[[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]'], {}), '([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])\n', (33062, 33127), True, 'import numpy as np\n'), ((34374, 34386), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (34383, 34386), True, 'import numpy as np\n'), ((34794, 34833), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['y_pred', 'true_result'], {}), '(y_pred, true_result)\n', (34812, 34833), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((35019, 35032), 'numpy.unique', 'np.unique', (['Y2'], {}), '(Y2)\n', (35028, 35032), True, 'import numpy as np\n'), ((35627, 35640), 'numpy.unique', 'np.unique', (['Y2'], {}), '(Y2)\n', (35636, 35640), True, 'import numpy as np\n'), ((37283, 37307), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (37304, 37307), True, 'import numpy as np\n'), ((39321, 39345), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (39342, 39345), True, 'import numpy as np\n'), ((39483, 39495), 'numpy.dot', 'np.dot', (['X', 'w'], {}), '(X, w)\n', (39489, 39495), True, 'import numpy as np\n'), ((39803, 39868), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['clf.coef_', 'average_weights'], {'decimal': '(16)'}), '(clf.coef_, average_weights, decimal=16)\n', (39828, 39868), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((39933, 39999), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['clf.intercept_', 'average_intercept'], {'decimal': '(16)'}), '(clf.intercept_, average_intercept, decimal=16)\n', (39952, 39999), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((40265, 40289), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (40286, 40289), True, 'import numpy as np\n'), ((40427, 40439), 'numpy.dot', 'np.dot', (['X', 'w'], {}), '(X, w)\n', (40433, 40439), True, 'import numpy as np\n'), ((40871, 40936), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['clf.coef_', 'average_weights'], {'decimal': '(16)'}), '(clf.coef_, average_weights, decimal=16)\n', (40896, 40936), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((41001, 41070), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['clf.intercept_[0]', 'average_intercept'], {'decimal': '(16)'}), '(clf.intercept_[0], average_intercept, decimal=16)\n', (41020, 41070), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((41721, 41786), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['clf.coef_', 'average_weights'], {'decimal': '(16)'}), '(clf.coef_, average_weights, decimal=16)\n', (41746, 41786), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((41851, 41917), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['clf.intercept_', 'average_intercept'], {'decimal': '(16)'}), '(clf.intercept_, average_intercept, decimal=16)\n', (41870, 41917), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((42082, 42106), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (42103, 42106), True, 'import numpy as np\n'), ((42842, 42866), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (42863, 42866), True, 'import numpy as np\n'), ((43666, 43690), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (43687, 43690), True, 'import numpy as np\n'), ((44501, 44525), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (44522, 44525), True, 'import numpy as np\n'), ((44761, 44789), 'numpy.dot', 'np.dot', (['X', 'ground_truth_coef'], {}), '(X, ground_truth_coef)\n', (44767, 44789), True, 'import numpy as np\n'), ((46658, 46711), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y_pred', 'y_pred2'], {'decimal': '(2)'}), '(y_pred, y_pred2, decimal=2)\n', (46683, 46711), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((47033, 47135), 'sklearn.datasets.make_classification', 'datasets.make_classification', ([], {'n_samples': '(1000)', 'n_features': '(100)', 'n_informative': '(20)', 'random_state': '(1234)'}), '(n_samples=1000, n_features=100, n_informative=\n 20, random_state=1234)\n', (47061, 47135), False, 'from sklearn import linear_model, datasets, metrics\n'), ((47609, 47662), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['est_en.coef_', 'est_l1.coef_'], {}), '(est_en.coef_, est_l1.coef_)\n', (47634, 47662), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((48061, 48114), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['est_en.coef_', 'est_l2.coef_'], {}), '(est_en.coef_, est_l2.coef_)\n', (48086, 48114), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((51559, 51610), 'sklearn.utils._testing.assert_warns', 'assert_warns', (['ConvergenceWarning', 'model_3.fit', 'X', 'y'], {}), '(ConvergenceWarning, model_3.fit, X, y)\n', (51571, 51610), False, 'from sklearn.utils._testing import assert_warns\n'), ((52058, 52077), 'sklearn.linear_model._sgd_fast.Hinge', 'sgd_fast.Hinge', (['(1.0)'], {}), '(1.0)\n', (52072, 52077), True, 'from sklearn.linear_model import _sgd_fast as sgd_fast\n'), ((52417, 52436), 'sklearn.linear_model._sgd_fast.Hinge', 'sgd_fast.Hinge', (['(0.0)'], {}), '(0.0)\n', (52431, 52436), True, 'from sklearn.linear_model import _sgd_fast as sgd_fast\n'), ((52822, 52848), 'sklearn.linear_model._sgd_fast.SquaredHinge', 'sgd_fast.SquaredHinge', (['(1.0)'], {}), '(1.0)\n', (52843, 52848), True, 'from sklearn.linear_model import _sgd_fast as sgd_fast\n'), ((53173, 53187), 'sklearn.linear_model._sgd_fast.Log', 'sgd_fast.Log', ([], {}), '()\n', (53185, 53187), True, 'from sklearn.linear_model import _sgd_fast as sgd_fast\n'), ((54069, 54091), 'sklearn.linear_model._sgd_fast.SquaredLoss', 'sgd_fast.SquaredLoss', ([], {}), '()\n', (54089, 54091), True, 'from sklearn.linear_model import _sgd_fast as sgd_fast\n'), ((54381, 54400), 'sklearn.linear_model._sgd_fast.Huber', 'sgd_fast.Huber', (['(0.1)'], {}), '(0.1)\n', (54395, 54400), True, 'from sklearn.linear_model import _sgd_fast as sgd_fast\n'), ((54766, 54790), 'sklearn.linear_model._sgd_fast.ModifiedHuber', 'sgd_fast.ModifiedHuber', ([], {}), '()\n', (54788, 54790), True, 'from sklearn.linear_model import _sgd_fast as sgd_fast\n'), ((55181, 55213), 'sklearn.linear_model._sgd_fast.EpsilonInsensitive', 'sgd_fast.EpsilonInsensitive', (['(0.1)'], {}), '(0.1)\n', (55208, 55213), True, 'from sklearn.linear_model import _sgd_fast as sgd_fast\n'), ((55619, 55658), 'sklearn.linear_model._sgd_fast.SquaredEpsilonInsensitive', 'sgd_fast.SquaredEpsilonInsensitive', (['(0.1)'], {}), '(0.1)\n', (55653, 55658), True, 'from sklearn.linear_model import _sgd_fast as sgd_fast\n'), ((56968, 57039), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['clf', 'param_grid'], {'n_iter': '(3)', 'n_jobs': '(2)', 'random_state': '(0)'}), '(clf, param_grid, n_iter=3, n_jobs=2, random_state=0)\n', (56986, 57039), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((58289, 58314), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (58310, 58314), True, 'import numpy as np\n'), ((58557, 58632), 'scipy.sparse.random', 'sp.random', (['(500)', '(2000)'], {'density': '(0.02)', 'format': '"""csr"""', 'random_state': 'random_state'}), "(500, 2000, density=0.02, format='csr', random_state=random_state)\n", (58566, 58632), True, 'import scipy.sparse as sp\n'), ((59221, 59288), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['clf_sequential.coef_', 'clf_parallel.coef_'], {}), '(clf_sequential.coef_, clf_parallel.coef_)\n', (59246, 59288), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((1242, 1258), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X'], {}), '(X)\n', (1255, 1258), True, 'import scipy.sparse as sp\n'), ((1364, 1380), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X'], {}), '(X)\n', (1377, 1380), True, 'import scipy.sparse as sp\n'), ((1484, 1500), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X'], {}), '(X)\n', (1497, 1500), True, 'import scipy.sparse as sp\n'), ((1590, 1606), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X'], {}), '(X)\n', (1603, 1606), True, 'import scipy.sparse as sp\n'), ((1753, 1769), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X'], {}), '(X)\n', (1766, 1769), True, 'import scipy.sparse as sp\n'), ((1785, 1839), 'sklearn.linear_model.SGDRegressor.fit', 'linear_model.SGDRegressor.fit', (['self', 'X', 'y', '*args'], {}), '(self, X, y, *args, **kw)\n', (1814, 1839), False, 'from sklearn import linear_model, datasets, metrics\n'), ((1899, 1915), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X'], {}), '(X)\n', (1912, 1915), True, 'import scipy.sparse as sp\n'), ((1931, 1993), 'sklearn.linear_model.SGDRegressor.partial_fit', 'linear_model.SGDRegressor.partial_fit', (['self', 'X', 'y', '*args'], {}), '(self, X, y, *args, **kw)\n', (1968, 1993), False, 'from sklearn import linear_model, datasets, metrics\n'), ((2091, 2107), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X'], {}), '(X)\n', (2104, 2107), True, 'import scipy.sparse as sp\n'), ((2123, 2188), 'sklearn.linear_model.SGDRegressor.decision_function', 'linear_model.SGDRegressor.decision_function', (['self', 'X', '*args'], {}), '(self, X, *args, **kw)\n', (2166, 2188), False, 'from sklearn import linear_model, datasets, metrics\n'), ((4363, 4383), 'numpy.zeros', 'np.zeros', (['X.shape[1]'], {}), '(X.shape[1])\n', (4371, 4383), True, 'import numpy as np\n'), ((4713, 4735), 'numpy.dot', 'np.dot', (['entry', 'weights'], {}), '(entry, weights)\n', (4719, 4735), True, 'import numpy as np\n'), ((5409, 5434), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5422, 5434), False, 'import pytest\n'), ((5701, 5726), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5714, 5726), False, 'import pytest\n'), ((6009, 6034), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6022, 6034), False, 'import pytest\n'), ((7445, 7456), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (7453, 7456), True, 'import numpy as np\n'), ((7505, 7530), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7518, 7530), False, 'import pytest\n'), ((9342, 9361), 'sklearn.base.is_classifier', 'is_classifier', (['clf1'], {}), '(clf1)\n', (9355, 9361), False, 'from sklearn.base import clone, is_classifier\n'), ((11124, 11149), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11137, 11149), False, 'import pytest\n'), ((13050, 13122), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'test_size': 'validation_fraction', 'random_state': 'seed'}), '(test_size=validation_fraction, random_state=seed)\n', (13072, 13122), False, 'from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit\n'), ((13182, 13244), 'sklearn.model_selection.ShuffleSplit', 'ShuffleSplit', ([], {'test_size': 'validation_fraction', 'random_state': 'seed'}), '(test_size=validation_fraction, random_state=seed)\n', (13194, 13244), False, 'from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit\n'), ((14507, 14532), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (14520, 14532), False, 'import pytest\n'), ((15322, 15347), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15335, 15347), False, 'import pytest\n'), ((15569, 15594), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15582, 15594), False, 'import pytest\n'), ((15802, 15827), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15815, 15827), False, 'import pytest\n'), ((16032, 16057), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16045, 16057), False, 'import pytest\n'), ((16244, 16269), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16257, 16269), False, 'import pytest\n'), ((16463, 16488), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16476, 16488), False, 'import pytest\n'), ((16688, 16713), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16701, 16713), False, 'import pytest\n'), ((16911, 16936), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16924, 16936), False, 'import pytest\n'), ((17194, 17218), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (17207, 17218), False, 'import pytest\n'), ((17467, 17492), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17480, 17492), False, 'import pytest\n'), ((17760, 17785), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17773, 17785), False, 'import pytest\n'), ((18016, 18041), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (18029, 18041), False, 'import pytest\n'), ((19929, 19954), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (19942, 19954), False, 'import pytest\n'), ((21773, 21796), 'numpy.ones', 'np.ones', (['np_Y2.shape[0]'], {}), '(np_Y2.shape[0])\n', (21780, 21796), True, 'import numpy as np\n'), ((21910, 21975), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['average_coef', 'clf.coef_[i]'], {'decimal': '(16)'}), '(average_coef, clf.coef_[i], decimal=16)\n', (21935, 21975), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((21984, 22053), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['average_intercept', 'clf.intercept_[i]'], {'decimal': '(16)'}), '(average_intercept, clf.intercept_[i], decimal=16)\n', (22003, 22053), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((23227, 23252), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23240, 23252), False, 'import pytest\n'), ((23482, 23507), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23495, 23507), False, 'import pytest\n'), ((26023, 26043), 'numpy.argmax', 'np.argmax', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (26032, 26043), True, 'import numpy as np\n'), ((26045, 26065), 'numpy.argmax', 'np.argmax', (['d'], {'axis': '(1)'}), '(d, axis=1)\n', (26054, 26065), True, 'import numpy as np\n'), ((26239, 26255), 'numpy.argsort', 'np.argsort', (['p[0]'], {}), '(p[0])\n', (26249, 26255), True, 'import numpy as np\n'), ((26257, 26273), 'numpy.argsort', 'np.argsort', (['d[0]'], {}), '(d[0])\n', (26267, 26273), True, 'import numpy as np\n'), ((26383, 26392), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (26389, 26392), True, 'import numpy as np\n'), ((26510, 26519), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (26516, 26519), True, 'import numpy as np\n'), ((27433, 27479), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['p[0]', '([1 / 3.0] * 3)'], {}), '(p[0], [1 / 3.0] * 3)\n', (27458, 27479), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((27917, 27931), 'numpy.zeros', 'np.zeros', (['(4,)'], {}), '((4,))\n', (27925, 27931), True, 'import numpy as np\n'), ((28209, 28226), 'pickle.dumps', 'pickle.dumps', (['clf'], {}), '(clf)\n', (28221, 28226), False, 'import pickle\n'), ((28744, 28757), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (28752, 28757), True, 'import numpy as np\n'), ((29077, 29091), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (29085, 29091), True, 'import numpy as np\n'), ((29966, 29991), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (29979, 29991), False, 'import pytest\n'), ((30261, 30286), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (30274, 30286), False, 'import pytest\n'), ((32569, 32616), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['y', 'y_pred'], {'average': '"""weighted"""'}), "(y, y_pred, average='weighted')\n", (32585, 32616), False, 'from sklearn import linear_model, datasets, metrics\n'), ((32844, 32891), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['y', 'y_pred'], {'average': '"""weighted"""'}), "(y, y_pred, average='weighted')\n", (32860, 32891), False, 'from sklearn import linear_model, datasets, metrics\n'), ((33304, 33317), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (33312, 33317), True, 'import numpy as np\n'), ((33570, 33584), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (33578, 33584), True, 'import numpy as np\n'), ((33876, 33901), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (33889, 33901), False, 'import pytest\n'), ((34135, 34160), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (34148, 34160), False, 'import pytest\n'), ((36831, 36844), 'numpy.unique', 'np.unique', (['Y_'], {}), '(Y_)\n', (36840, 36844), True, 'import numpy as np\n'), ((37101, 37154), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y_pred', 'y_pred2'], {'decimal': '(2)'}), '(y_pred, y_pred2, decimal=2)\n', (37126, 37154), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((48159, 48183), 'numpy.errstate', 'np.errstate', ([], {'all': '"""raise"""'}), "(all='raise')\n", (48170, 48183), True, 'import numpy as np\n'), ((48264, 48288), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (48285, 48288), True, 'import numpy as np\n'), ((49416, 49476), 'sklearn.utils._testing.assert_raises_regexp', 'assert_raises_regexp', (['ValueError', 'msg_regxp', 'model.fit', 'X', 'y'], {}), '(ValueError, msg_regxp, model.fit, X, y)\n', (49436, 49476), False, 'from sklearn.utils._testing import assert_raises_regexp\n'), ((49882, 49906), 'numpy.errstate', 'np.errstate', ([], {'all': '"""raise"""'}), "(all='raise')\n", (49893, 49906), True, 'import numpy as np\n'), ((50392, 50416), 'numpy.errstate', 'np.errstate', ([], {'all': '"""raise"""'}), "(all='raise')\n", (50403, 50416), True, 'import numpy as np\n'), ((50503, 50529), 'numpy.zeros_like', 'np.zeros_like', (['model.coef_'], {}), '(model.coef_)\n', (50516, 50529), True, 'import numpy as np\n'), ((53847, 53860), 'numpy.exp', 'np.exp', (['(-18.1)'], {}), '(-18.1)\n', (53853, 53860), True, 'import numpy as np\n'), ((56774, 56795), 'numpy.logspace', 'np.logspace', (['(-4)', '(4)', '(9)'], {}), '(-4, 4, 9)\n', (56785, 56795), True, 'import numpy as np\n'), ((58210, 58268), 'pytest.skip', 'pytest.skip', (['"""loky backend does not exist in joblib <0.12"""'], {}), "('loky backend does not exist in joblib <0.12')\n", (58221, 58268), False, 'import pytest\n'), ((59144, 59184), 'joblib.parallel_backend', 'joblib.parallel_backend', ([], {'backend': 'backend'}), '(backend=backend)\n', (59167, 59184), False, 'import joblib\n'), ((12533, 12560), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (12554, 12560), True, 'import numpy as np\n'), ((12862, 12889), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (12883, 12889), True, 'import numpy as np\n'), ((19976, 19986), 'numpy.ones', 'np.ones', (['(9)'], {}), '(9)\n', (19983, 19986), True, 'import numpy as np\n'), ((20842, 20854), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (20851, 20854), True, 'import numpy as np\n'), ((22330, 22346), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {}), '((3, 2))\n', (22338, 22346), True, 'import numpy as np\n'), ((22375, 22386), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (22383, 22386), True, 'import numpy as np\n'), ((23387, 23403), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {}), '((3, 2))\n', (23395, 23403), True, 'import numpy as np\n'), ((23656, 23670), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (23664, 23670), True, 'import numpy as np\n'), ((26932, 26952), 'numpy.argmax', 'np.argmax', (['d'], {'axis': '(1)'}), '(d, axis=1)\n', (26941, 26952), True, 'import numpy as np\n'), ((26956, 26976), 'numpy.argmax', 'np.argmax', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (26965, 26976), True, 'import numpy as np\n'), ((27050, 27070), 'numpy.argmin', 'np.argmin', (['d'], {'axis': '(1)'}), '(d, axis=1)\n', (27059, 27070), True, 'import numpy as np\n'), ((27074, 27094), 'numpy.argmin', 'np.argmin', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (27083, 27094), True, 'import numpy as np\n'), ((42115, 42149), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'n_samples'], {}), '(xmin, xmax, n_samples)\n', (42126, 42149), True, 'import numpy as np\n'), ((42875, 42909), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'n_samples'], {}), '(xmin, xmax, n_samples)\n', (42886, 42909), True, 'import numpy as np\n'), ((43699, 43733), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'n_samples'], {}), '(xmin, xmax, n_samples)\n', (43710, 43733), True, 'import numpy as np\n'), ((44940, 45016), 'sklearn.linear_model.ElasticNet', 'linear_model.ElasticNet', ([], {'alpha': 'alpha', 'l1_ratio': 'l1_ratio', 'fit_intercept': '(False)'}), '(alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False)\n', (44963, 45016), False, 'from sklearn import linear_model, datasets, metrics\n'), ((45455, 45523), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['cd.coef_', 'sgd.coef_'], {'decimal': '(2)', 'err_msg': 'err_msg'}), '(cd.coef_, sgd.coef_, decimal=2, err_msg=err_msg)\n', (45474, 45523), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((48926, 48938), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (48935, 48938), True, 'import numpy as np\n'), ((49961, 49985), 'numpy.isfinite', 'np.isfinite', (['model.coef_'], {}), '(model.coef_)\n', (49972, 49985), True, 'import numpy as np\n'), ((50621, 50637), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (50635, 50637), False, 'from sklearn.preprocessing import StandardScaler\n'), ((53575, 53584), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (53581, 53584), True, 'import numpy as np\n'), ((53605, 53614), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (53611, 53614), True, 'import numpy as np\n'), ((53772, 53785), 'numpy.exp', 'np.exp', (['(-18.1)'], {}), '(-18.1)\n', (53778, 53785), True, 'import numpy as np\n'), ((53918, 53931), 'numpy.exp', 'np.exp', (['(-18.1)'], {}), '(-18.1)\n', (53924, 53931), True, 'import numpy as np\n'), ((58108, 58141), 'sklearn.utils.fixes.parse_version', 'parse_version', (['joblib.__version__'], {}), '(joblib.__version__)\n', (58121, 58141), False, 'from sklearn.utils.fixes import parse_version\n'), ((58144, 58165), 'sklearn.utils.fixes.parse_version', 'parse_version', (['"""0.12"""'], {}), "('0.12')\n", (58157, 58165), False, 'from sklearn.utils.fixes import parse_version\n'), ((17244, 17258), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (17252, 17258), True, 'import numpy as np\n'), ((17530, 17544), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (17538, 17544), True, 'import numpy as np\n'), ((17828, 17842), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (17836, 17842), True, 'import numpy as np\n'), ((23288, 23304), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (23296, 23304), True, 'import numpy as np\n'), ((23548, 23562), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (23556, 23562), True, 'import numpy as np\n'), ((24553, 24597), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': 'message'}), '(AttributeError, match=message)\n', (24566, 24597), False, 'import pytest\n'), ((24681, 24725), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': 'message'}), '(AttributeError, match=message)\n', (24694, 24725), False, 'import pytest\n'), ((33939, 33951), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (33948, 33951), True, 'import numpy as np\n'), ((48432, 48446), 'numpy.isfinite', 'np.isfinite', (['X'], {}), '(X)\n', (48443, 48446), True, 'import numpy as np\n'), ((48654, 48668), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (48666, 48668), False, 'from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler\n'), ((48701, 48722), 'numpy.isfinite', 'np.isfinite', (['X_scaled'], {}), '(X_scaled)\n', (48712, 48722), True, 'import numpy as np\n'), ((49126, 49150), 'numpy.isfinite', 'np.isfinite', (['model.coef_'], {}), '(model.coef_)\n', (49137, 49150), True, 'import numpy as np\n'), ((9406, 9418), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (9415, 9418), True, 'import numpy as np\n'), ((9463, 9475), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (9472, 9475), True, 'import numpy as np\n'), ((38603, 38617), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (38615, 38617), False, 'from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler\n'), ((48845, 48875), 'numpy.dot', 'np.dot', (['X_scaled', 'ground_truth'], {}), '(X_scaled, ground_truth)\n', (48851, 48875), True, 'import numpy as np\n'), ((53282, 53294), 'numpy.exp', 'np.exp', (['(-1.0)'], {}), '(-1.0)\n', (53288, 53294), True, 'import numpy as np\n'), ((53305, 53316), 'numpy.exp', 'np.exp', (['(1.0)'], {}), '(1.0)\n', (53311, 53316), True, 'import numpy as np\n'), ((53359, 53370), 'numpy.exp', 'np.exp', (['(1.0)'], {}), '(1.0)\n', (53365, 53370), True, 'import numpy as np\n'), ((53380, 53392), 'numpy.exp', 'np.exp', (['(-1.0)'], {}), '(-1.0)\n', (53386, 53392), True, 'import numpy as np\n'), ((53436, 53448), 'numpy.exp', 'np.exp', (['(-1.0)'], {}), '(-1.0)\n', (53442, 53448), True, 'import numpy as np\n'), ((53458, 53469), 'numpy.exp', 'np.exp', (['(1.0)'], {}), '(1.0)\n', (53464, 53469), True, 'import numpy as np\n'), ((53512, 53523), 'numpy.exp', 'np.exp', (['(1.0)'], {}), '(1.0)\n', (53518, 53523), True, 'import numpy as np\n'), ((53534, 53546), 'numpy.exp', 'np.exp', (['(-1.0)'], {}), '(-1.0)\n', (53540, 53546), True, 'import numpy as np\n')] |
import inspect
import os
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from Config import Config
from NIM import benchmarks
from PIL import Image
classes = inspect.getmembers(benchmarks, inspect.isclass)
step = 0
ignore_benchmarks_name = ["Benchmark", "Eggholder", "Griewank", "Schwefel"]
unignore_benchmarks_name = ["SingleSourceFunction"]
# unignore_benchmarks_name = ["SingleSourceFunction", "MultiSourceFunction"]
for (benchmarks_name, benchmarks_class) in tqdm(classes):
# if benchmarks_name in ignore_benchmarks_name:
# continue
if benchmarks_name not in unignore_benchmarks_name:
continue
bc = benchmarks_class(dimension=3)
if bc.upper[1] - bc.lower[1] >= 200 or bc.upper[0] - bc.lower[0] >= 200:
step = 1
elif 200 > bc.upper[1] - bc.lower[1] >= 50 or 200 > bc.upper[0] - bc.lower[0] >= 50:
step = 0.1
else:
step = 0.1
x = np.arange(bc.lower[0], bc.upper[0], step)
y = np.arange(bc.lower[1], bc.upper[1], step)
X, Y = np.meshgrid(x, y)
Z = np.dstack((X, Y))
Z = Z.reshape((Z.shape[0] * Z.shape[1], Z.shape[2]))
Z = np.apply_along_axis(bc.eval, 1, Z)
Z = Z.reshape((X.shape[0], X.shape[1]))
Z = Z.astype(np.float64)
Z[Z == 0] = 10 ** -20
plt.figure(figsize=(5.4, 5.4), dpi=120)
levels = [-4, -3, -2, -1, 1, 2, 3, 4]
plt.contourf(X, Y, np.log(Z), cmap=plt.get_cmap('Pastel2'))
axis = plt.axis()
ax = plt.gca()
ax.xaxis.set_ticks_position("top")
ax.invert_yaxis()
plt.scatter(bc.get_optimum()[0][0][0], bc.get_optimum()[0][0][1], marker='*', c="r")
# plt.scatter(bc.get_optimum()[0][1][0], bc.get_optimum()[0][1][1], marker='*', c="r")
name = benchmarks_name + ".png"
plt.axis('off')
plt.savefig("./out/" + name, bbox_inches='tight', pad_inches=0)
plt.savefig(os.path.join(Config.project_root, "data/bg/contour", name), bbox_inches='tight', pad_inches=0)
plt.clf()
image = Image.open("./out/" + name)
resized_image = image.resize((Config.contour_pixel, Config.contour_pixel), Image.ANTIALIAS)
resized_image.save(os.path.join(Config.project_root, "data/bg/contour", name), quality=100)
| [
"numpy.dstack",
"PIL.Image.open",
"inspect.getmembers",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gca",
"tqdm.tqdm",
"matplotlib.pyplot.clf",
"numpy.log",
"os.path.join",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"numpy.apply_along_axis",
"numpy.meshgrid",
"numpy.arange",... | [((186, 233), 'inspect.getmembers', 'inspect.getmembers', (['benchmarks', 'inspect.isclass'], {}), '(benchmarks, inspect.isclass)\n', (204, 233), False, 'import inspect\n'), ((491, 504), 'tqdm.tqdm', 'tqdm', (['classes'], {}), '(classes)\n', (495, 504), False, 'from tqdm import tqdm\n'), ((928, 969), 'numpy.arange', 'np.arange', (['bc.lower[0]', 'bc.upper[0]', 'step'], {}), '(bc.lower[0], bc.upper[0], step)\n', (937, 969), True, 'import numpy as np\n'), ((978, 1019), 'numpy.arange', 'np.arange', (['bc.lower[1]', 'bc.upper[1]', 'step'], {}), '(bc.lower[1], bc.upper[1], step)\n', (987, 1019), True, 'import numpy as np\n'), ((1031, 1048), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1042, 1048), True, 'import numpy as np\n'), ((1057, 1074), 'numpy.dstack', 'np.dstack', (['(X, Y)'], {}), '((X, Y))\n', (1066, 1074), True, 'import numpy as np\n'), ((1140, 1174), 'numpy.apply_along_axis', 'np.apply_along_axis', (['bc.eval', '(1)', 'Z'], {}), '(bc.eval, 1, Z)\n', (1159, 1174), True, 'import numpy as np\n'), ((1280, 1319), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5.4, 5.4)', 'dpi': '(120)'}), '(figsize=(5.4, 5.4), dpi=120)\n', (1290, 1319), True, 'import matplotlib.pyplot as plt\n'), ((1440, 1450), 'matplotlib.pyplot.axis', 'plt.axis', ([], {}), '()\n', (1448, 1450), True, 'import matplotlib.pyplot as plt\n'), ((1460, 1469), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1467, 1469), True, 'import matplotlib.pyplot as plt\n'), ((1753, 1768), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1761, 1768), True, 'import matplotlib.pyplot as plt\n'), ((1774, 1837), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./out/' + name)"], {'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "('./out/' + name, bbox_inches='tight', pad_inches=0)\n", (1785, 1837), True, 'import matplotlib.pyplot as plt\n'), ((1953, 1962), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1960, 1962), True, 'import matplotlib.pyplot as plt\n'), ((1976, 2003), 'PIL.Image.open', 'Image.open', (["('./out/' + name)"], {}), "('./out/' + name)\n", (1986, 2003), False, 'from PIL import Image\n'), ((1387, 1396), 'numpy.log', 'np.log', (['Z'], {}), '(Z)\n', (1393, 1396), True, 'import numpy as np\n'), ((1854, 1912), 'os.path.join', 'os.path.join', (['Config.project_root', '"""data/bg/contour"""', 'name'], {}), "(Config.project_root, 'data/bg/contour', name)\n", (1866, 1912), False, 'import os\n'), ((2123, 2181), 'os.path.join', 'os.path.join', (['Config.project_root', '"""data/bg/contour"""', 'name'], {}), "(Config.project_root, 'data/bg/contour', name)\n", (2135, 2181), False, 'import os\n'), ((1403, 1426), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Pastel2"""'], {}), "('Pastel2')\n", (1415, 1426), True, 'import matplotlib.pyplot as plt\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Cross resonance Hamiltonian tomography.
"""
from typing import List, Tuple, Iterable, Optional, Type
import warnings
import numpy as np
from qiskit import pulse, circuit, QuantumCircuit
from qiskit.circuit.parameterexpression import ParameterValueType
from qiskit.exceptions import QiskitError
from qiskit.providers import Backend
from qiskit_experiments.framework import BaseExperiment, Options
from qiskit_experiments.library.characterization.analysis import CrossResonanceHamiltonianAnalysis
class CrossResonanceHamiltonian(BaseExperiment):
r"""Cross resonance Hamiltonian tomography experiment.
# section: overview
This experiment assumes the two qubit Hamiltonian in the form
.. math::
H = \frac{I \otimes A}{2} + \frac{Z \otimes B}{2}
where :math:`A` and :math:`B` are linear combinations of
the Pauli operators :math:`\in {X, Y, Z}`.
The coefficient of each Pauli term in the Hamiltonian
can be estimated with this experiment.
This experiment is performed by stretching the pulse duration of a cross resonance pulse
and measuring the target qubit by projecting onto the x, y, and z bases.
The control qubit state dependent (controlled-) Rabi oscillation on the
target qubit is observed by repeating the experiment with the control qubit
both in the ground and excited states. The fit for the oscillations in the
three bases with the two control qubit preparations tomographically
reconstructs the Hamiltonian in the form shown above.
See Ref. [1] for more details.
More specifically, the following circuits are executed in this experiment.
.. parsed-literal::
(X measurement)
┌───┐┌────────────────────┐
q_0: ┤ P ├┤0 ├────────
└───┘│ cr_tone(duration) │┌───┐┌─┐
q_1: ─────┤1 ├┤ H ├┤M├
└────────────────────┘└───┘└╥┘
c: 1/═════════════════════════════════╩═
0
(Y measurement)
┌───┐┌────────────────────┐
q_0: ┤ P ├┤0 ├───────────────
└───┘│ cr_tone(duration) │┌─────┐┌───┐┌─┐
q_1: ─────┤1 ├┤ Sdg ├┤ H ├┤M├
└────────────────────┘└─────┘└───┘└╥┘
c: 1/════════════════════════════════════════╩═
0
(Z measurement)
┌───┐┌────────────────────┐
q_0: ┤ P ├┤0 ├───
└───┘│ cr_tone(duration) │┌─┐
q_1: ─────┤1 ├┤M├
└────────────────────┘└╥┘
c: 1/════════════════════════════╩═
0
The ``P`` gate on the control qubit (``q_0``) indicates the state preparation.
Since this experiment requires two sets of sub experiments with the control qubit in the
excited and ground state, ``P`` will become ``X`` gate or just be omitted, respectively.
Here ``cr_tone`` is implemented by a single cross resonance tone
driving the control qubit at the frequency of the target qubit.
The pulse envelope is the flat-topped Gaussian implemented by the parametric pulse
:py:class:`~qiskit.pulse.library.parametric_pulses.GaussianSquare`.
This experiment scans the flat-top width of the :py:class:`~qiskit.pulse.library.\
parametric_pulses.GaussianSquare` envelope with the fixed rising and falling edges.
The total pulse duration is implicitly computed to meet the timing constraints of
the target backend. The edge duration is usually computed as
.. math::
\tau_{\rm edges} = 2 r \sigma,
where the :math:`r` is the ratio of the actual edge duration to :math:`\sigma` of
the Gaussian rising and falling edges. Note that actual edge duration is not
identical to the net duration because of the smaller pulse amplitude of the edges.
The net edge duration is an extra fitting parameter with initial guess
.. math::
\tau_{\rm edges}' = \sqrt{2 \pi} \sigma,
which is derived by assuming a square edges with the full pulse amplitude.
# section: analysis_ref
:py:class:`CrossResonanceHamiltonianAnalysis`
# section: reference
.. ref_arxiv:: 1 1603.04821
# section: tutorial
.. ref_website:: Qiskit Textbook 6.7,
https://qiskit.org/textbook/ch-quantum-hardware/hamiltonian-tomography.html
"""
# Number of CR pulses. The flat top duration per pulse is divided by this number.
num_pulses = 1
class CRPulseGate(circuit.Gate):
"""A pulse gate of cross resonance. Definition should be provided via calibration."""
def __init__(self, width: ParameterValueType):
super().__init__("cr_gate", 2, [width])
def __init__(
self,
qubits: Tuple[int, int],
flat_top_widths: Iterable[float],
backend: Optional[Backend] = None,
cr_gate: Optional[Type[circuit.Gate]] = None,
**kwargs,
):
"""Create a new experiment.
Args:
qubits: Two-value tuple of qubit indices on which to run tomography.
The first index stands for the control qubit.
flat_top_widths: The total duration of the square part of cross resonance pulse(s)
to scan, in units of dt. The total pulse duration including Gaussian rising and
falling edges is implicitly computed with experiment parameters ``sigma`` and
``risefall``.
backend: Optional, the backend to run the experiment on.
cr_gate: Optional, circuit gate instruction of cross resonance pulse.
kwargs: Pulse parameters. See :meth:`experiment_options` for details.
Raises:
QiskitError: When ``qubits`` length is not 2.
"""
super().__init__(qubits, analysis=CrossResonanceHamiltonianAnalysis(), backend=backend)
if len(qubits) != 2:
raise QiskitError(
"Length of qubits is not 2. Please provide index for control and target qubit."
)
self.set_experiment_options(flat_top_widths=flat_top_widths, **kwargs)
self._cr_gate = cr_gate
# backend parameters required to run this experiment
# random values are populated here but these are immediately updated after backend is set
# this is to keep capability of generating circuits just for checking
self._dt = 1
self._cr_channel = 0
self._granularity = 1
@classmethod
def _default_experiment_options(cls) -> Options:
"""Default experiment options.
Experiment Options:
flat_top_widths (np.ndarray): The total duration of the square part of
cross resonance pulse(s) to scan, in units of dt. This can start from zero and
take positive real values representing the durations.
Pulse edge effect is considered as an offset to the durations.
amp (complex): Amplitude of the cross resonance tone.
amp_t (complex): Amplitude of the cancellation or rotary drive on target qubit.
sigma (float): Sigma of Gaussian rise and fall edges, in units of dt.
risefall (float): Ratio of edge durations to sigma.
"""
options = super()._default_experiment_options()
options.flat_top_widths = None
options.amp = 0.2
options.amp_t = 0.0
options.sigma = 64
options.risefall = 2
return options
def _set_backend(self, backend: Backend):
super()._set_backend(backend)
if self._cr_gate is None:
# This falls into CRPulseGate which requires pulse schedule
# Extract control channel index
try:
cr_channels = backend.configuration().control(self.physical_qubits)
self._cr_channel = cr_channels[0].index
except AttributeError:
warnings.warn(
f"{backend.name()} doesn't provide cr channel mapping. "
"Cannot find proper channel index to play the cross resonance pulse.",
UserWarning,
)
# Extract pulse granularity
try:
self._granularity = backend.configuration().timing_constraints["granularity"]
except (AttributeError, KeyError):
# Probably no chunk size restriction on waveform memory.
pass
# Extract time resolution, this is anyways required for xvalue conversion
try:
self._dt = backend.configuration().dt
except AttributeError:
warnings.warn(
f"{backend.name()} doesn't provide system time resolution dt. "
"Cannot estimate Hamiltonian coefficients in SI units.",
UserWarning,
)
def _build_cr_circuit(self, pulse_gate: circuit.Gate) -> QuantumCircuit:
"""Single tone cross resonance.
Args:
pulse_gate: A pulse gate to represent a single cross resonance pulse.
Returns:
A circuit definition for the cross resonance pulse to measure.
"""
cr_circuit = QuantumCircuit(2)
cr_circuit.append(pulse_gate, [0, 1])
return cr_circuit
def _build_cr_schedule(self, flat_top_width: float) -> pulse.ScheduleBlock:
"""GaussianSquared cross resonance pulse.
Args:
flat_top_width: Total length of flat top part of the pulse in units of dt.
Returns:
A schedule definition for the cross resonance pulse to measure.
"""
opt = self.experiment_options
# Compute valid integer duration
duration = flat_top_width + 2 * opt.sigma * opt.risefall
valid_duration = int(self._granularity * np.floor(duration / self._granularity))
with pulse.build(default_alignment="left", name="cr") as cross_resonance:
# add cross resonance tone
pulse.play(
pulse.GaussianSquare(
duration=valid_duration,
amp=opt.amp,
sigma=opt.sigma,
width=flat_top_width,
),
pulse.ControlChannel(self._cr_channel),
)
# add cancellation tone
if not np.isclose(opt.amp_t, 0.0):
pulse.play(
pulse.GaussianSquare(
duration=valid_duration,
amp=opt.amp_t,
sigma=opt.sigma,
width=flat_top_width,
),
pulse.DriveChannel(self.physical_qubits[1]),
)
else:
pulse.delay(valid_duration, pulse.DriveChannel(self.physical_qubits[1]))
# place holder for empty drive channels. this is necessary due to known pulse gate bug.
pulse.delay(valid_duration, pulse.DriveChannel(self.physical_qubits[0]))
return cross_resonance
def circuits(self) -> List[QuantumCircuit]:
"""Return a list of experiment circuits.
Returns:
A list of :class:`QuantumCircuit`.
Raises:
AttributeError: When the backend doesn't report the time resolution of waveforms.
"""
opt = self.experiment_options
expr_circs = []
for flat_top_width in opt.flat_top_widths:
if self._cr_gate is None:
# default pulse gate execution
cr_schedule = self._build_cr_schedule(flat_top_width)
cr_gate = self.CRPulseGate(flat_top_width)
else:
cr_schedule = None
cr_gate = self._cr_gate(flat_top_width)
for control_state in (0, 1):
for meas_basis in ("x", "y", "z"):
tomo_circ = QuantumCircuit(2, 1)
if control_state:
tomo_circ.x(0)
tomo_circ.compose(
other=self._build_cr_circuit(cr_gate),
qubits=[0, 1],
inplace=True,
)
if meas_basis == "x":
tomo_circ.h(1)
elif meas_basis == "y":
tomo_circ.sdg(1)
tomo_circ.h(1)
tomo_circ.measure(1, 0)
tomo_circ.metadata = {
"experiment_type": self.experiment_type,
"qubits": self.physical_qubits,
"xval": flat_top_width * self._dt, # in units of sec
"control_state": control_state,
"meas_basis": meas_basis,
}
if isinstance(cr_gate, self.CRPulseGate):
# Attach calibration if this is bare pulse gate
tomo_circ.add_calibration(
gate=cr_gate,
qubits=self.physical_qubits,
schedule=cr_schedule,
)
expr_circs.append(tomo_circ)
# Set analysis option for initial guess that depends on experiment option values.
edge_duration = np.sqrt(2 * np.pi) * self.experiment_options.sigma * self.num_pulses
init_guess = self.analysis.options.p0.copy()
init_guess["t_off"] = edge_duration * self._dt
self.analysis.set_options(p0=init_guess)
return expr_circs
class EchoedCrossResonanceHamiltonian(CrossResonanceHamiltonian):
r"""Echoed cross resonance Hamiltonian tomography experiment.
# section: overview
This is a variant of :py:class:`CrossResonanceHamiltonian`
for which the experiment framework is identical but the
cross resonance operation is realized as an echoed sequence
to remove unwanted single qubit rotations. The cross resonance
circuit looks like:
.. parsed-literal::
┌────────────────────┐ ┌───┐ ┌────────────────────┐
q_0: ┤0 ├──┤ X ├──┤0 ├──────────
│ cr_tone(duration) │┌─┴───┴─┐│ cr_tone(duration) │┌────────┐
q_1: ┤1 ├┤ Rz(π) ├┤1 ├┤ Rz(-π) ├
└────────────────────┘└───────┘└────────────────────┘└────────┘
Here two ``cr_tone``s are applied where the latter one is with the
control qubit state flipped and with a phase flip of the target qubit frame.
This operation is equivalent to applying the ``cr_tone`` with a negative amplitude.
The Hamiltonian for this decomposition has no IX and ZI interactions,
and also a reduced IY interaction to some extent (not completely eliminated) [1].
Note that the CR Hamiltonian tomography experiment cannot detect the ZI term.
However, it is sensitive to the IX and IY terms.
# section: reference
.. ref_arxiv:: 1 2007.02925
"""
num_pulses = 2
def _build_cr_circuit(self, pulse_gate: circuit.Gate) -> QuantumCircuit:
"""Single tone cross resonance.
Args:
pulse_gate: A pulse gate to represent a single cross resonance pulse.
Returns:
A circuit definition for the cross resonance pulse to measure.
"""
cr_circuit = QuantumCircuit(2)
cr_circuit.append(pulse_gate, [0, 1])
cr_circuit.x(0)
cr_circuit.rz(np.pi, 1)
cr_circuit.append(pulse_gate, [0, 1])
cr_circuit.rz(-np.pi, 1)
return cr_circuit
| [
"qiskit.pulse.GaussianSquare",
"numpy.isclose",
"numpy.sqrt",
"numpy.floor",
"qiskit.exceptions.QiskitError",
"qiskit_experiments.library.characterization.analysis.CrossResonanceHamiltonianAnalysis",
"qiskit.pulse.ControlChannel",
"qiskit.pulse.build",
"qiskit.pulse.DriveChannel",
"qiskit.QuantumC... | [((10037, 10054), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['(2)'], {}), '(2)\n', (10051, 10054), False, 'from qiskit import pulse, circuit, QuantumCircuit\n'), ((16319, 16336), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['(2)'], {}), '(2)\n', (16333, 16336), False, 'from qiskit import pulse, circuit, QuantumCircuit\n'), ((6761, 6863), 'qiskit.exceptions.QiskitError', 'QiskitError', (['"""Length of qubits is not 2. Please provide index for control and target qubit."""'], {}), "(\n 'Length of qubits is not 2. Please provide index for control and target qubit.'\n )\n", (6772, 6863), False, 'from qiskit.exceptions import QiskitError\n'), ((10715, 10763), 'qiskit.pulse.build', 'pulse.build', ([], {'default_alignment': '"""left"""', 'name': '"""cr"""'}), "(default_alignment='left', name='cr')\n", (10726, 10763), False, 'from qiskit import pulse, circuit, QuantumCircuit\n'), ((6659, 6694), 'qiskit_experiments.library.characterization.analysis.CrossResonanceHamiltonianAnalysis', 'CrossResonanceHamiltonianAnalysis', ([], {}), '()\n', (6692, 6694), False, 'from qiskit_experiments.library.characterization.analysis import CrossResonanceHamiltonianAnalysis\n'), ((10661, 10699), 'numpy.floor', 'np.floor', (['(duration / self._granularity)'], {}), '(duration / self._granularity)\n', (10669, 10699), True, 'import numpy as np\n'), ((10864, 10965), 'qiskit.pulse.GaussianSquare', 'pulse.GaussianSquare', ([], {'duration': 'valid_duration', 'amp': 'opt.amp', 'sigma': 'opt.sigma', 'width': 'flat_top_width'}), '(duration=valid_duration, amp=opt.amp, sigma=opt.sigma,\n width=flat_top_width)\n', (10884, 10965), False, 'from qiskit import pulse, circuit, QuantumCircuit\n'), ((11078, 11116), 'qiskit.pulse.ControlChannel', 'pulse.ControlChannel', (['self._cr_channel'], {}), '(self._cr_channel)\n', (11098, 11116), False, 'from qiskit import pulse, circuit, QuantumCircuit\n'), ((11187, 11213), 'numpy.isclose', 'np.isclose', (['opt.amp_t', '(0.0)'], {}), '(opt.amp_t, 0.0)\n', (11197, 11213), True, 'import numpy as np\n'), ((11814, 11857), 'qiskit.pulse.DriveChannel', 'pulse.DriveChannel', (['self.physical_qubits[0]'], {}), '(self.physical_qubits[0])\n', (11832, 11857), False, 'from qiskit import pulse, circuit, QuantumCircuit\n'), ((14186, 14204), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (14193, 14204), True, 'import numpy as np\n'), ((11263, 11367), 'qiskit.pulse.GaussianSquare', 'pulse.GaussianSquare', ([], {'duration': 'valid_duration', 'amp': 'opt.amp_t', 'sigma': 'opt.sigma', 'width': 'flat_top_width'}), '(duration=valid_duration, amp=opt.amp_t, sigma=opt.\n sigma, width=flat_top_width)\n', (11283, 11367), False, 'from qiskit import pulse, circuit, QuantumCircuit\n'), ((11503, 11546), 'qiskit.pulse.DriveChannel', 'pulse.DriveChannel', (['self.physical_qubits[1]'], {}), '(self.physical_qubits[1])\n', (11521, 11546), False, 'from qiskit import pulse, circuit, QuantumCircuit\n'), ((11628, 11671), 'qiskit.pulse.DriveChannel', 'pulse.DriveChannel', (['self.physical_qubits[1]'], {}), '(self.physical_qubits[1])\n', (11646, 11671), False, 'from qiskit import pulse, circuit, QuantumCircuit\n'), ((12739, 12759), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['(2)', '(1)'], {}), '(2, 1)\n', (12753, 12759), False, 'from qiskit import pulse, circuit, QuantumCircuit\n')] |
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from functools import partial
import numpy as np
import logging
import torch
import distiller
from .pruner import _ParameterPruner
msglogger = logging.getLogger()
class RankedStructureParameterPruner(_ParameterPruner):
"""Base class for pruning structures by ranking them.
"""
def __init__(self, name, group_type, desired_sparsity, weights, group_dependency=None):
super().__init__(name)
self.group_type = group_type
self.group_dependency = group_dependency
self.params_names = weights
assert self.params_names
self.leader_binary_map = None
self.last_target_sparsity = None
self.desired_sparsity = desired_sparsity
def leader(self):
# The "leader" is the first weights-tensor in the list
return self.params_names[0]
def is_supported(self, param_name):
return param_name in self.params_names
def fraction_to_prune(self, param_name):
return self.desired_sparsity
def set_param_mask(self, param, param_name, zeros_mask_dict, meta):
if not self.is_supported(param_name):
return
fraction_to_prune = self.fraction_to_prune(param_name)
try:
model = meta['model']
except TypeError:
model = None
return self.prune_to_target_sparsity(param, param_name, zeros_mask_dict, fraction_to_prune, model)
def prune_to_target_sparsity(self, param, param_name, zeros_mask_dict, target_sparsity, model):
if not self.is_supported(param_name):
return
binary_map = None
if self.group_dependency == "Leader":
if target_sparsity != self.last_target_sparsity:
# Each time we change the target sparsity we need to compute and cache the leader's binary-map.
# We don't have control over the order that this function is invoked, so the only indication that
# we need to compute a new leader binary-map is the change of the target_sparsity.
self.last_target_sparsity = target_sparsity
self.leader_binary_map = self.prune_group(target_sparsity, model.state_dict()[self.leader()],
self.leader(), zeros_mask_dict=None)
assert self.leader_binary_map is not None
binary_map = self.leader_binary_map
# Delegate the actual pruning to a sub-class
self.prune_group(target_sparsity, param, param_name, zeros_mask_dict, model, binary_map)
def prune_group(self, fraction_to_prune, param, param_name, zeros_mask_dict, model=None, binary_map=None):
raise NotImplementedError
l1_magnitude = partial(torch.norm, p=1)
l2_magnitude = partial(torch.norm, p=2)
class LpRankedStructureParameterPruner(RankedStructureParameterPruner):
"""Uses Lp-norm to rank and prune structures.
This class prunes to a prescribed percentage of structured-sparsity (level pruning), by
first ranking (sorting) the structures based on their Lp-norm, and then pruning a perctenage
of the lower-ranking structures.
See also: https://en.wikipedia.org/wiki/Lp_space#The_p-norm_in_finite_dimensions
"""
def __init__(self, name, group_type, desired_sparsity, weights,
group_dependency=None, kwargs=None, magnitude_fn=None):
super().__init__(name, group_type, desired_sparsity, weights, group_dependency)
if group_type not in ['3D', 'Filters', 'Channels', 'Rows', 'Blocks']:
raise ValueError("Structure {} was requested but "
"currently ranking of this shape is not supported".
format(group_type))
assert magnitude_fn is not None
self.magnitude_fn = magnitude_fn
if group_type == 'Blocks':
try:
self.block_shape = kwargs['block_shape']
except KeyError:
raise ValueError("When defining a block pruner you must also specify the block shape")
def prune_group(self, fraction_to_prune, param, param_name, zeros_mask_dict, model=None, binary_map=None):
if fraction_to_prune == 0:
return
if self.group_type in ['3D', 'Filters']:
group_pruning_fn = self.rank_and_prune_filters
elif self.group_type == 'Channels':
group_pruning_fn = partial(self.rank_and_prune_channels)
elif self.group_type == 'Rows':
group_pruning_fn = self.rank_and_prune_rows
elif self.group_type == 'Blocks':
group_pruning_fn = partial(self.rank_and_prune_blocks, block_shape=self.block_shape)
binary_map = group_pruning_fn(fraction_to_prune, param, param_name,
zeros_mask_dict, model, binary_map,
magnitude_fn=self.magnitude_fn)
return binary_map
@staticmethod
def rank_and_prune_channels(fraction_to_prune, param, param_name=None,
zeros_mask_dict=None, model=None, binary_map=None, magnitude_fn=l1_magnitude):
def rank_channels(fraction_to_prune, param):
num_filters = param.size(0)
num_channels = param.size(1)
kernel_size = param.size(2) * param.size(3)
# First, reshape the weights tensor such that each channel (kernel) in the original
# tensor, is now a row in the 2D tensor.
view_2d = param.view(-1, kernel_size)
# Next, compute the sums of each kernel
kernel_mags = magnitude_fn(view_2d, dim=1)
# Now group by channels
k_sums_mat = kernel_mags.view(num_filters, num_channels).t()
channel_mags = k_sums_mat.mean(dim=1)
k = int(fraction_to_prune * channel_mags.size(0))
if k == 0:
msglogger.info("Too few channels (%d)- can't prune %.1f%% channels",
num_channels, 100*fraction_to_prune)
return None, None
bottomk, _ = torch.topk(channel_mags, k, largest=False, sorted=True)
return bottomk, channel_mags
def binary_map_to_mask(binary_map, param):
num_filters = param.size(0)
num_channels = param.size(1)
a = binary_map.expand(num_filters, num_channels)
c = a.unsqueeze(-1)
d = c.expand(num_filters, num_channels, param.size(2) * param.size(3)).contiguous()
return d.view(num_filters, num_channels, param.size(2), param.size(3))
if binary_map is None:
bottomk_channels, channel_mags = rank_channels(fraction_to_prune, param)
if bottomk_channels is None:
# Empty list means that fraction_to_prune is too low to prune anything
return
threshold = bottomk_channels[-1]
binary_map = channel_mags.gt(threshold).type(param.data.type())
if zeros_mask_dict is not None:
zeros_mask_dict[param_name].mask = binary_map_to_mask(binary_map, param)
msglogger.info("L1RankedStructureParameterPruner - param: %s pruned=%.3f goal=%.3f (%d/%d)", param_name,
distiller.sparsity_ch(zeros_mask_dict[param_name].mask),
fraction_to_prune, binary_map.sum().item(), param.size(1))
return binary_map
@staticmethod
def rank_and_prune_filters(fraction_to_prune, param, param_name,
zeros_mask_dict, model=None, binary_map=None, magnitude_fn=l1_magnitude):
assert param.dim() == 4, "This thresholding is only supported for 4D weights"
threshold = None
if binary_map is None:
# First we rank the filters
view_filters = param.view(param.size(0), -1)
filter_mags = magnitude_fn(view_filters, dim=1)
topk_filters = int(fraction_to_prune * filter_mags.size(0))
if topk_filters == 0:
msglogger.info("Too few filters - can't prune %.1f%% filters", 100*fraction_to_prune)
return
bottomk, _ = torch.topk(filter_mags, topk_filters, largest=False, sorted=True)
threshold = bottomk[-1]
msglogger.info("L1RankedStructureParameterPruner - param: %s pruned=(%d/%d)",
param_name,
topk_filters, filter_mags.size(0))
# Then we threshold
threshold_type = 'L1' if magnitude_fn == l1_magnitude else 'L2'
mask, binary_map = distiller.group_threshold_mask(param, 'Filters', threshold, threshold_type, binary_map)
if zeros_mask_dict is not None:
zeros_mask_dict[param_name].mask = mask
msglogger.info("L1RankedStructureParameterPruner - param: %s pruned=%.3f goal=%.3f",
param_name,
distiller.sparsity(mask),
fraction_to_prune)
return binary_map
@staticmethod
def rank_and_prune_rows(fraction_to_prune, param, param_name,
zeros_mask_dict, model=None, binary_map=None, magnitude_fn=l1_magnitude):
"""Prune the rows of a matrix, based on ranked L1-norms of the matrix rows.
PyTorch stores the weights matrices in a transposed format. I.e. before performing GEMM, a matrix is
transposed. This is counter-intuitive. To deal with this, we can either transpose the matrix and
then proceed to compute the masks as usual, or we can treat columns as rows, and rows as columns :-(.
We choose the latter, because transposing very large matrices can be detrimental to performance. Note
that computing mean L1-norm of columns is also not optimal, because consequtive column elements are far
away from each other in memory, and this means poor use of caches and system memory.
"""
assert param.dim() == 2, "This thresholding is only supported for 2D weights"
ROWS_DIM = 0
THRESHOLD_DIM = 'Cols'
rows_mags = magnitude_fn(param, dim=ROWS_DIM)
num_rows_to_prune = int(fraction_to_prune * rows_mags.size(0))
if num_rows_to_prune == 0:
msglogger.info("Too few filters - can't prune %.1f%% rows", 100*fraction_to_prune)
return
bottomk_rows, _ = torch.topk(rows_mags, num_rows_to_prune, largest=False, sorted=True)
threshold = bottomk_rows[-1]
threshold_type = 'L1' if magnitude_fn == l1_magnitude else 'L2'
zeros_mask_dict[param_name].mask = distiller.group_threshold_mask(param, THRESHOLD_DIM,
threshold, threshold_type)
msglogger.info("L1RankedStructureParameterPruner - param: %s pruned=%.3f goal=%.3f (%d/%d)", param_name,
distiller.sparsity(zeros_mask_dict[param_name].mask),
fraction_to_prune, num_rows_to_prune, rows_mags.size(0))
@staticmethod
def rank_and_prune_blocks(fraction_to_prune, param, param_name=None, zeros_mask_dict=None,
model=None, binary_map=None, block_shape=None, magnitude_fn=l1_magnitude):
"""Block-wise pruning for 4D tensors.
The block shape is specified using a tuple: [block_repetitions, block_depth, block_height, block_width].
The dimension 'block_repetitions' specifies in how many consecutive filters the "basic block"
(shaped as [block_depth, block_height, block_width]) repeats to produce a (4D) "super block".
For example:
block_pruner:
class: L1RankedStructureParameterPruner_AGP
initial_sparsity : 0.05
final_sparsity: 0.70
group_type: Blocks
kwargs:
block_shape: [1,8,1,1] # [block_repetitions, block_depth, block_height, block_width]
Currently the only supported block shape is: block_repetitions x block_depth x 1 x 1
"""
if len(block_shape) != 4:
raise ValueError("The block shape must be specified as a 4-element tuple")
block_repetitions, block_depth, block_height, block_width = block_shape
if not block_width == block_height == 1:
raise ValueError("Currently the only supported block shape is: block_repetitions x block_depth x 1 x 1")
super_block_volume = distiller.volume(block_shape)
num_super_blocks = distiller.volume(param) / super_block_volume
if distiller.volume(param) % super_block_volume != 0:
raise ValueError("The super-block size must divide the weight tensor exactly.")
num_filters = param.size(0)
num_channels = param.size(1)
kernel_size = param.size(2) * param.size(3)
if block_depth > 1:
view_dims = (num_filters*num_channels//(block_repetitions*block_depth),
block_repetitions*block_depth,
kernel_size,)
else:
view_dims = (num_filters // block_repetitions,
block_repetitions,
-1,)
def rank_blocks(fraction_to_prune, param):
# Create a view where each block is a column
view1 = param.view(*view_dims)
# Next, compute the sums of each column (block)
block_mags = magnitude_fn(view1, dim=1)
block_mags = block_mags.view(-1) # flatten
k = int(fraction_to_prune * block_mags.size(0))
if k == 0:
msglogger.info("Too few blocks (%d)- can't prune %.1f%% blocks",
block_mags.size(0), 100*fraction_to_prune)
return None, None
bottomk, _ = torch.topk(block_mags, k, largest=False, sorted=True)
return bottomk, block_mags
def binary_map_to_mask(binary_map, param):
a = binary_map.view(view_dims[0], view_dims[2])
c = a.unsqueeze(1)
d = c.expand(*view_dims).contiguous()
return d.view(num_filters, num_channels, param.size(2), param.size(3))
if binary_map is None:
bottomk_blocks, block_mags = rank_blocks(fraction_to_prune, param)
if bottomk_blocks is None:
# Empty list means that fraction_to_prune is too low to prune anything
return
threshold = bottomk_blocks[-1]
binary_map = block_mags.gt(threshold).type(param.data.type())
if zeros_mask_dict is not None:
zeros_mask_dict[param_name].mask = binary_map_to_mask(binary_map, param)
msglogger.info("L1RankedStructureParameterPruner - param: %s pruned=%.3f goal=%.3f (%d/%d)", param_name,
distiller.sparsity_blocks(zeros_mask_dict[param_name].mask, block_shape=block_shape),
fraction_to_prune, binary_map.sum().item(), num_super_blocks)
return binary_map
class L1RankedStructureParameterPruner(LpRankedStructureParameterPruner):
"""Uses mean L1-norm to rank and prune structures.
This class prunes to a prescribed percentage of structured-sparsity (level pruning).
"""
def __init__(self, name, group_type, desired_sparsity, weights,
group_dependency=None, kwargs=None):
super().__init__(name, group_type, desired_sparsity, weights,
group_dependency, kwargs, magnitude_fn=l1_magnitude)
class L2RankedStructureParameterPruner(LpRankedStructureParameterPruner):
"""Uses mean L2-norm to rank and prune structures.
This class prunes to a prescribed percentage of structured-sparsity (level pruning).
"""
def __init__(self, name, group_type, desired_sparsity, weights,
group_dependency=None, kwargs=None):
super().__init__(name, group_type, desired_sparsity, weights,
group_dependency, kwargs, magnitude_fn=l2_magnitude)
def mask_from_filter_order(filters_ordered_by_criterion, param, num_filters, binary_map):
if binary_map is None:
binary_map = torch.zeros(num_filters).cuda()
binary_map[filters_ordered_by_criterion] = 1
expanded = binary_map.expand(param.size(1) * param.size(2) * param.size(3), param.size(0)).t().contiguous()
return expanded.view(param.shape), binary_map
class ActivationRankedFilterPruner(RankedStructureParameterPruner):
"""Base class for pruners ranking convolution filters by some quality criterion of the
corresponding feature-map channels (e.g. mean channel activation L1 value).
"""
def __init__(self, name, group_type, desired_sparsity, weights, group_dependency=None):
super().__init__(name, group_type, desired_sparsity, weights, group_dependency)
@property
def activation_rank_criterion(self):
raise NotImplementedError
def prune_group(self, fraction_to_prune, param, param_name, zeros_mask_dict, model=None, binary_map=None):
if fraction_to_prune == 0:
return
binary_map = self.rank_and_prune_filters(fraction_to_prune, param, param_name,
zeros_mask_dict, model, binary_map)
return binary_map
def rank_and_prune_filters(self, fraction_to_prune, param, param_name, zeros_mask_dict, model, binary_map=None):
assert param.dim() == 4, "This thresholding is only supported for 4D weights"
# Use the parameter name to locate the module that has the activation sparsity statistics
fq_name = param_name.replace(".conv", ".relu")[:-len(".weight")]
module = distiller.find_module_by_fq_name(model, fq_name)
if module is None:
raise ValueError("Could not find a layer named %s in the model."
"\nMake sure to use assign_layer_fq_names()" % fq_name)
if not hasattr(module, self.activation_rank_criterion):
raise ValueError("Could not find attribute \"{}\" in module %s"
"\nMake sure to use SummaryActivationStatsCollector(\"{}\")".
format(self.activation_rank_criterion, fq_name, self.activation_rank_criterion))
quality_criterion, std = getattr(module, self.activation_rank_criterion).value()
num_filters = param.size(0)
num_filters_to_prune = int(fraction_to_prune * num_filters)
if num_filters_to_prune == 0:
msglogger.info("Too few filters - can't prune %.1f%% filters", 100*fraction_to_prune)
return
# Sort from low to high, and remove the bottom 'num_filters_to_prune' filters
filters_ordered_by_criterion = np.argsort(quality_criterion)[:-num_filters_to_prune]
mask, binary_map = mask_from_filter_order(filters_ordered_by_criterion, param, num_filters, binary_map)
zeros_mask_dict[param_name].mask = mask
msglogger.info("ActivationL1RankedStructureParameterPruner - param: %s pruned=%.3f goal=%.3f (%d/%d)",
param_name,
distiller.sparsity_3D(zeros_mask_dict[param_name].mask),
fraction_to_prune, num_filters_to_prune, num_filters)
return binary_map
class ActivationAPoZRankedFilterPruner(ActivationRankedFilterPruner):
"""Uses mean APoZ (average percentage of zeros) activation channels to rank filters
and prune a specified percentage of filters.
"Network Trimming: A Data-Driven Neuron Pruning Approach towards Efficient Deep Architectures,"
<NAME>, <NAME>, <NAME>, <NAME>. ICLR 2016.
https://arxiv.org/abs/1607.03250
"""
@property
def activation_rank_criterion(self):
return 'apoz_channels'
class ActivationMeanRankedFilterPruner(ActivationRankedFilterPruner):
"""Uses mean value of activation channels to rank filters and prune a specified percentage of filters.
"Pruning Convolutional Neural Networks for Resource Efficient Inference,"
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>. ICLR 2017.
https://arxiv.org/abs/1611.06440
"""
@property
def activation_rank_criterion(self):
return 'mean_channels'
class RandomRankedFilterPruner(RankedStructureParameterPruner):
"""A Random ranking of filters.
This is used for sanity testing of other algorithms.
"""
def __init__(self, name, group_type, desired_sparsity, weights, group_dependency=None):
super().__init__(name, group_type, desired_sparsity, weights, group_dependency)
def prune_group(self, fraction_to_prune, param, param_name, zeros_mask_dict, model=None, binary_map=None):
if fraction_to_prune == 0:
return
binary_map = self.rank_and_prune_filters(fraction_to_prune, param, param_name,
zeros_mask_dict, model, binary_map)
return binary_map
def rank_and_prune_filters(self, fraction_to_prune, param, param_name, zeros_mask_dict, model, binary_map=None):
assert param.dim() == 4, "This thresholding is only supported for 4D weights"
num_filters = param.size(0)
num_filters_to_prune = int(fraction_to_prune * num_filters)
if num_filters_to_prune == 0:
msglogger.info("Too few filters - can't prune %.1f%% filters", 100*fraction_to_prune)
return
filters_ordered_randomly = np.random.permutation(num_filters)[:-num_filters_to_prune]
mask, binary_map = mask_from_filter_order(filters_ordered_randomly, param, num_filters, binary_map)
zeros_mask_dict[param_name].mask = mask
msglogger.info("RandomRankedFilterPruner - param: %s pruned=%.3f goal=%.3f (%d/%d)",
param_name,
distiller.sparsity_3D(zeros_mask_dict[param_name].mask),
fraction_to_prune, num_filters_to_prune, num_filters)
return binary_map
class GradientRankedFilterPruner(RankedStructureParameterPruner):
"""
"""
def __init__(self, name, group_type, desired_sparsity, weights, group_dependency=None):
super().__init__(name, group_type, desired_sparsity, weights, group_dependency)
def prune_group(self, fraction_to_prune, param, param_name, zeros_mask_dict, model=None, binary_map=None):
if fraction_to_prune == 0:
return
binary_map = self.rank_and_prune_filters(fraction_to_prune, param, param_name,
zeros_mask_dict, model, binary_map)
return binary_map
def rank_and_prune_filters(self, fraction_to_prune, param, param_name, zeros_mask_dict, model, binary_map=None):
assert param.dim() == 4, "This thresholding is only supported for 4D weights"
if param.grad is None:
msglogger.info("Skipping gradient pruning of %s because it does not have a gradient yet", param_name)
return
num_filters = param.size(0)
num_filters_to_prune = int(fraction_to_prune * num_filters)
if num_filters_to_prune == 0:
msglogger.info("Too few filters - can't prune %.1f%% filters", 100*fraction_to_prune)
return
# Compute the multiplication of the filters times the filter_gradienrs
view_filters = param.view(param.size(0), -1)
view_filter_grads = param.grad.view(param.size(0), -1)
weighted_gradients = view_filter_grads * view_filters
weighted_gradients = weighted_gradients.sum(dim=1)
# Sort from high to low, and remove the bottom 'num_filters_to_prune' filters
filters_ordered_by_gradient = np.argsort(-weighted_gradients.detach().cpu().numpy())[:-num_filters_to_prune]
mask, binary_map = mask_from_filter_order(filters_ordered_by_gradient, param, num_filters, binary_map)
zeros_mask_dict[param_name].mask = mask
msglogger.info("GradientRankedFilterPruner - param: %s pruned=%.3f goal=%.3f (%d/%d)",
param_name,
distiller.sparsity_3D(zeros_mask_dict[param_name].mask),
fraction_to_prune, num_filters_to_prune, num_filters)
return binary_map
| [
"logging.getLogger",
"distiller.group_threshold_mask",
"torch.topk",
"distiller.volume",
"distiller.sparsity",
"distiller.find_module_by_fq_name",
"distiller.sparsity_3D",
"numpy.argsort",
"distiller.sparsity_ch",
"functools.partial",
"distiller.sparsity_blocks",
"torch.zeros",
"numpy.random... | [((734, 753), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (751, 753), False, 'import logging\n'), ((3289, 3313), 'functools.partial', 'partial', (['torch.norm'], {'p': '(1)'}), '(torch.norm, p=1)\n', (3296, 3313), False, 'from functools import partial\n'), ((3329, 3353), 'functools.partial', 'partial', (['torch.norm'], {'p': '(2)'}), '(torch.norm, p=2)\n', (3336, 3353), False, 'from functools import partial\n'), ((9148, 9239), 'distiller.group_threshold_mask', 'distiller.group_threshold_mask', (['param', '"""Filters"""', 'threshold', 'threshold_type', 'binary_map'], {}), "(param, 'Filters', threshold, threshold_type,\n binary_map)\n", (9178, 9239), False, 'import distiller\n'), ((10939, 11007), 'torch.topk', 'torch.topk', (['rows_mags', 'num_rows_to_prune'], {'largest': '(False)', 'sorted': '(True)'}), '(rows_mags, num_rows_to_prune, largest=False, sorted=True)\n', (10949, 11007), False, 'import torch\n'), ((11160, 11239), 'distiller.group_threshold_mask', 'distiller.group_threshold_mask', (['param', 'THRESHOLD_DIM', 'threshold', 'threshold_type'], {}), '(param, THRESHOLD_DIM, threshold, threshold_type)\n', (11190, 11239), False, 'import distiller\n'), ((12992, 13021), 'distiller.volume', 'distiller.volume', (['block_shape'], {}), '(block_shape)\n', (13008, 13021), False, 'import distiller\n'), ((18225, 18273), 'distiller.find_module_by_fq_name', 'distiller.find_module_by_fq_name', (['model', 'fq_name'], {}), '(model, fq_name)\n', (18257, 18273), False, 'import distiller\n'), ((6649, 6704), 'torch.topk', 'torch.topk', (['channel_mags', 'k'], {'largest': '(False)', 'sorted': '(True)'}), '(channel_mags, k, largest=False, sorted=True)\n', (6659, 6704), False, 'import torch\n'), ((8728, 8793), 'torch.topk', 'torch.topk', (['filter_mags', 'topk_filters'], {'largest': '(False)', 'sorted': '(True)'}), '(filter_mags, topk_filters, largest=False, sorted=True)\n', (8738, 8793), False, 'import torch\n'), ((9479, 9503), 'distiller.sparsity', 'distiller.sparsity', (['mask'], {}), '(mask)\n', (9497, 9503), False, 'import distiller\n'), ((11450, 11502), 'distiller.sparsity', 'distiller.sparsity', (['zeros_mask_dict[param_name].mask'], {}), '(zeros_mask_dict[param_name].mask)\n', (11468, 11502), False, 'import distiller\n'), ((13049, 13072), 'distiller.volume', 'distiller.volume', (['param'], {}), '(param)\n', (13065, 13072), False, 'import distiller\n'), ((14347, 14400), 'torch.topk', 'torch.topk', (['block_mags', 'k'], {'largest': '(False)', 'sorted': '(True)'}), '(block_mags, k, largest=False, sorted=True)\n', (14357, 14400), False, 'import torch\n'), ((19279, 19308), 'numpy.argsort', 'np.argsort', (['quality_criterion'], {}), '(quality_criterion)\n', (19289, 19308), True, 'import numpy as np\n'), ((19663, 19718), 'distiller.sparsity_3D', 'distiller.sparsity_3D', (['zeros_mask_dict[param_name].mask'], {}), '(zeros_mask_dict[param_name].mask)\n', (19684, 19718), False, 'import distiller\n'), ((21967, 22001), 'numpy.random.permutation', 'np.random.permutation', (['num_filters'], {}), '(num_filters)\n', (21988, 22001), True, 'import numpy as np\n'), ((22334, 22389), 'distiller.sparsity_3D', 'distiller.sparsity_3D', (['zeros_mask_dict[param_name].mask'], {}), '(zeros_mask_dict[param_name].mask)\n', (22355, 22389), False, 'import distiller\n'), ((24583, 24638), 'distiller.sparsity_3D', 'distiller.sparsity_3D', (['zeros_mask_dict[param_name].mask'], {}), '(zeros_mask_dict[param_name].mask)\n', (24604, 24638), False, 'import distiller\n'), ((4970, 5007), 'functools.partial', 'partial', (['self.rank_and_prune_channels'], {}), '(self.rank_and_prune_channels)\n', (4977, 5007), False, 'from functools import partial\n'), ((7810, 7865), 'distiller.sparsity_ch', 'distiller.sparsity_ch', (['zeros_mask_dict[param_name].mask'], {}), '(zeros_mask_dict[param_name].mask)\n', (7831, 7865), False, 'import distiller\n'), ((13105, 13128), 'distiller.volume', 'distiller.volume', (['param'], {}), '(param)\n', (13121, 13128), False, 'import distiller\n'), ((15363, 15452), 'distiller.sparsity_blocks', 'distiller.sparsity_blocks', (['zeros_mask_dict[param_name].mask'], {'block_shape': 'block_shape'}), '(zeros_mask_dict[param_name].mask, block_shape=\n block_shape)\n', (15388, 15452), False, 'import distiller\n'), ((16702, 16726), 'torch.zeros', 'torch.zeros', (['num_filters'], {}), '(num_filters)\n', (16713, 16726), False, 'import torch\n'), ((5177, 5242), 'functools.partial', 'partial', (['self.rank_and_prune_blocks'], {'block_shape': 'self.block_shape'}), '(self.rank_and_prune_blocks, block_shape=self.block_shape)\n', (5184, 5242), False, 'from functools import partial\n')] |
import torch
import numpy as np
from typing import Union, Optional
from ..base import Flow
from .ic_helper import (
dist_deriv,
angle_deriv,
torsion_deriv,
det3x3,
init_xyz2ics,
init_ics2xyz,
ic2xyz_deriv,
)
from .pca import WhitenFlow
__all__ = [
"RelativeInternalCoordinateTransformation",
"GlobalInternalCoordinateTransformation",
"MixedCoordinateTransformation",
]
def decompose_z_matrix(z_matrix, fixed):
"""Decompose the z-matrix into blocks to allow parallel (batched) reconstruction
of cartesian coordinates starting from the fixed atoms.
Parameters
----------
z_matrix : np.ndarray
Z-matrix definition for the internal coordinate transform.
Each row in the z-matrix defines a (proper or improper) torsion by specifying the atom indices
forming this torsion. Atom indices are integers >= 0.
The shape of the z-matrix is (n_conditioned_atoms, 4).
fixed : np.ndarray
Fixed atoms that are used to seed the reconstruction of Cartesian from internal coordinates.
Returns
-------
blocks : list of np.ndarray
Z-matrix for each stage of the reconstruction. The shape for each block is
(n_conditioned_atoms_in_block, 4).
index2atom : np.ndarray
index2atom[i] specifies the atom index of the atom that is placed by the i-th row in the original Z-matrix.
The shape is (n_conditioned_atoms, ).
atom2index : np.ndarray
atom2index[i] specifies the row in the original z-matrix that is responsible for placing the i-th atom.
The shape is (n_conditioned_atoms, ).
index2order : np.ndarray
order in which the reconstruction is applied, where i denotes a row in the Z-matrix.
The shape is (n_conditioned_atoms, ).
"""
atoms = [fixed]
blocks = (
[]
) # blocks of Z-matrices. Each block corresponds to one stage of Cartesian reconstruction.
given = np.sort(fixed) # atoms that were already visited
# filter out conditioned variables
non_given = ~np.isin(z_matrix[:, 0], given)
z_matrix = z_matrix[non_given]
# prepend the torsion index to each torsion in the z matrix
z_matrix = np.concatenate([np.arange(len(z_matrix))[:, None], z_matrix], axis=1)
order = [] # torsion indices
while len(z_matrix) > 0:
can_be_placed_in_this_stage = np.all(np.isin(z_matrix[:, 2:], given), axis=-1)
# torsions, where atoms 2-4 were already visited
if (not np.any(can_be_placed_in_this_stage)) and len(z_matrix) > 0:
raise ValueError(
f"Z-matrix decomposition failed. "
f"The following atoms were not reachable from the fixed atoms: \n{z_matrix[:,1]}"
)
pos = z_matrix[can_be_placed_in_this_stage, 0]
atom = z_matrix[can_be_placed_in_this_stage, 1]
atoms.append(atom)
order.append(pos)
blocks.append(z_matrix[can_be_placed_in_this_stage][:, 1:])
given = np.union1d(given, atom)
z_matrix = z_matrix[~can_be_placed_in_this_stage]
index2atom = np.concatenate(atoms)
atom2index = np.argsort(index2atom)
index2order = np.concatenate(order)
return blocks, index2atom, atom2index, index2order
def slice_initial_atoms(z_matrix):
s = np.sum(z_matrix == -1, axis=-1)
order = np.argsort(s)[::-1][:3]
return z_matrix[:, 0][order], z_matrix[s == 0]
def normalize_torsions(torsions):
period = 2 * np.pi
torsions = (torsions + period / 2) / period
dlogp = -np.log(period) * (torsions.shape[-1])
return torsions, dlogp
def normalize_angles(angles):
period = np.pi
angles = angles / period
dlogp = -np.log(period) * (angles.shape[-1])
return angles, dlogp
def unnormalize_torsions(torsions):
period = 2 * np.pi
torsions = torsions * (period) - period / 2
dlogp = np.log(period) * (torsions.shape[-1])
return torsions, dlogp
def unnormalize_angles(angles):
period = np.pi
angles = angles * period
dlogp = np.log(period) * (angles.shape[-1])
return angles, dlogp
class ReferenceSystemTransformation(Flow):
"""
Internal coordinate transformation of the reference frame set by the first three atoms.
Please not that the forward transformation transforms *from* xyz coordinates *into* internal coordinates.
By default output angles and torsions are normalized and fit into a (0, 1) interval.
Parameters:
------------
normalize_angles : bool
bring angles and torsions into (0, 1) interval
orientation : "euler" | "basis"
which representation is used to represent the global orientation of the system
eps : float
numerical epsilon used to enforce manifold boundaries
raise_warnings : bool
raise warnings if manifold boundaries are violated
"""
def __init__(
self,
normalize_angles=True,
eps=1e-7,
enforce_boundaries=True,
raise_warnings=True,
):
super().__init__()
self._normalize_angles = normalize_angles
self._eps = eps
self._enforce_boundaries = enforce_boundaries
self._raise_warnings = raise_warnings
def _forward(self, x0, x1, x2, *args, **kwargs):
"""
Parameters:
----------
x0, x1, x2: torch.Tensor
xyz coordinates of the first three points
Returns:
--------
x0: torch.Tensor
origin of the system
orientation: torch.Tensor
if orientation is "basis" returns [3,3] matrix
if orientation is "euler" returns tuple with three Euler angles (alpha, beta, gamma)
their range are in [0, pi] if unnormalized and [0, 1] if normalized
d01: torch.Tensor
d12: torch.Tensor
a012: torch.Tensor
dlogp: torch.Tensor
log det jacobian of the transformation
"""
x0, d01, d12, a012, alpha, beta, gamma, dlogp = init_xyz2ics(
x0,
x1,
x2,
eps=self._eps,
enforce_boundaries=self._enforce_boundaries,
raise_warnings=self._raise_warnings,
)
if self._normalize_angles:
a012, dlogp_a = normalize_angles(a012)
dlogp += dlogp_a
if self._normalize_angles:
alpha, dlogp_alpha = normalize_torsions(alpha)
dlogp += dlogp_alpha
# beta, dlogp_beta = normalize_angles(beta)
# dlogp += dlogp_beta
gamma, dlogp_gamma = normalize_torsions(gamma)
dlogp += dlogp_gamma
orientation = torch.cat([alpha, beta, gamma], dim=-1)
return (x0, orientation, d01, d12, a012, dlogp)
def _inverse(self, x0, orientation, d01, d12, a012, *args, **kwargs):
"""
Parameters:
----------
x0: torch.Tensor
origin of the system
orientation: torch.Tensor
if orientation is "euler" returns tuple with three Euler angles (alpha, beta, gamma)
if unnormalized ranges are
alpha: [0, pi]
beta: [-1, 1]
gamma: [0, pi]
if normalized all are in [0, 1]
d01: torch.Tensor
d12: torch.Tensor
a012: torch.Tensor
Returns:
--------
x0, x1, x2: torch.Tensor
xyz coordinates of the first three points
dlogp: torch.Tensor
log det jacobian of the transformation
"""
dlogp = 0
alpha, beta, gamma = orientation.chunk(3, dim=-1)
if self._normalize_angles:
alpha, dlogp_alpha = unnormalize_torsions(alpha)
dlogp += dlogp_alpha
# beta, dlogp_beta = unnormalize_angles(beta)
# dlogp += dlogp_beta
gamma, dlogp_gamma = unnormalize_torsions(gamma)
dlogp += dlogp_gamma
if self._normalize_angles:
a012, dlogp_a = unnormalize_angles(a012)
dlogp += dlogp_a
x0, x1, x2, dlogp_b = init_ics2xyz(
x0,
d01,
d12,
a012,
alpha,
beta,
gamma,
eps=self._eps,
enforce_boundaries=self._enforce_boundaries,
raise_warnings=self._raise_warnings,
)
dlogp += dlogp_b
return (x0, x1, x2, dlogp)
class RelativeInternalCoordinateTransformation(Flow):
"""
Internal coordinate transformation relative to a set of fixed atoms.
Please not that the forward transformation transforms *from* xyz coordinates *into* internal coordinates.
By default output angles and torsions are normalized and fit into a (0, 1) interval.
Parameters:
----------
z_matrix : Union[np.ndarray, torch.LongTensor]
z matrix used for ic transformation
fixed_atoms : np.ndarray
atoms not affected by transformation
normalize_angles : bool
bring angles and torsions into (0, 1) interval
eps : float
numerical epsilon used to enforce manifold boundaries
raise_warnings : bool
raise warnings if manifold boundaries are violated
Attributes
----------
z_matrix : np.ndarray
z matrix used for ic transformation
fixed_atoms : np.ndarray
atom indices that are kept as Cartesian coordinates
dim_bonds : int
number of bonds
dim_angles : int
number of angles
dim_torsions : int
number of torsions
dim_fixed : int
number of degrees of freedom for fixed atoms
bond_indices : np.array of int
atom ids that are connected by a bond (shape: (dim_bonds, 2))
angle_indices : np.array of int
atoms ids that are connected by an angle (shape: (dim_angles, 3))
torsion_indices : np.array of int
atoms ids that are connected by a torsion (shape: (dim_torsions, 4))
normalize_angles : bool
whether this transform normalizes angles and torsions to [0,1]
"""
@property
def z_matrix(self):
return self._z_matrix
@property
def fixed_atoms(self):
return self._fixed_atoms
@property
def dim_bonds(self):
return len(self.z_matrix)
@property
def dim_angles(self):
return len(self.z_matrix)
@property
def dim_torsions(self):
return len(self.z_matrix)
@property
def dim_fixed(self):
return 3 * len(self._fixed_atoms)
@property
def bond_indices(self):
return self._bond_indices
@property
def angle_indices(self):
return self._angle_indices
@property
def torsion_indices(self):
return self._torsion_indices
@property
def normalize_angles(self):
return self._normalize_angles
def __init__(
self,
z_matrix: Union[np.ndarray, torch.LongTensor],
fixed_atoms: np.ndarray,
normalize_angles: bool = True,
eps: float = 1e-7,
enforce_boundaries: bool = True,
raise_warnings: bool = True,
):
super().__init__()
self._z_matrix = z_matrix
self._fixed_atoms = fixed_atoms
(
self._z_blocks,
self._index2atom,
self._atom2index,
self._index2order,
) = decompose_z_matrix(z_matrix, fixed_atoms)
self._bond_indices = self._z_matrix[:, :2]
self._angle_indices = self._z_matrix[:, :3]
self._torsion_indices = self._z_matrix[:, :4]
self._normalize_angles = normalize_angles
self._eps = eps
self._enforce_boundaries = enforce_boundaries
self._raise_warnings = raise_warnings
def _forward(self, x, with_pose=True, *args, **kwargs):
n_batch = x.shape[0]
x = x.view(n_batch, -1, 3)
# compute bonds, angles, torsions
# together with jacobians (wrt. to diagonal atom)
bonds, jbonds = dist_deriv(
x[:, self._z_matrix[:, 0]],
x[:, self._z_matrix[:, 1]],
eps=self._eps,
enforce_boundaries=self._enforce_boundaries,
raise_warnings=self._raise_warnings,
)
angles, jangles = angle_deriv(
x[:, self._z_matrix[:, 0]],
x[:, self._z_matrix[:, 1]],
x[:, self._z_matrix[:, 2]],
eps=self._eps,
enforce_boundaries=self._enforce_boundaries,
raise_warnings=self._raise_warnings,
)
torsions, jtorsions = torsion_deriv(
x[:, self._z_matrix[:, 0]],
x[:, self._z_matrix[:, 1]],
x[:, self._z_matrix[:, 2]],
x[:, self._z_matrix[:, 3]],
eps=self._eps,
enforce_boundaries=self._enforce_boundaries,
raise_warnings=self._raise_warnings,
)
# slice fixed coordinates needed to reconstruct the system
x_fixed = x[:, self._fixed_atoms].view(n_batch, -1)
# aggregated induced volume change
dlogp = 0.0
# transforms angles from [-pi, pi] to [0, 1]
if self._normalize_angles:
angles, dlogp_a = normalize_angles(angles)
torsions, dlogp_t = normalize_torsions(torsions)
dlogp += dlogp_a + dlogp_t
# compute volume change
j = torch.stack([jbonds, jangles, jtorsions], dim=-2)
dlogp += det3x3(j).abs().log().sum(dim=1, keepdim=True)
return bonds, angles, torsions, x_fixed, dlogp
def _inverse(self, bonds, angles, torsions, x_fixed, **kwargs):
# aggregated induced volume change
dlogp = 0
# transforms angles from [0, 1] to [-pi, pi]
if self._normalize_angles:
angles, dlogp_a = unnormalize_angles(angles)
torsions, dlogp_t = unnormalize_torsions(torsions)
dlogp += dlogp_a + dlogp_t
# infer dimensions from input
n_batch = x_fixed.shape[0]
x_fixed = x_fixed.view(n_batch, -1, 3)
n_fixed = x_fixed.shape[-2]
n_conditioned = bonds.shape[-1]
assert angles.shape[-1] == n_conditioned
assert torsions.shape[-1] == n_conditioned
# reconstruct points; initial points are the fixed points
points = torch.empty(
(n_batch, n_fixed + n_conditioned, 3),
dtype=x_fixed.dtype,
device=x_fixed.device,
)
points[:, :n_fixed, :] = x_fixed.view(n_batch, -1, 3)
# blockwise reconstruction of points left
current_index = n_fixed
for block in self._z_blocks:
# map atoms from z matrix
# to indices in reconstruction order
ref = self._atom2index[block]
# slice three context points
# from the already reconstructed
# points using the indices
context = points[:, ref[:, 1:]]
p0 = context[:, :, 0]
p1 = context[:, :, 1]
p2 = context[:, :, 2]
# obtain index of currently placed
# point in original z-matrix
idx = self._index2order[ref[:, 0] - len(self._fixed_atoms)]
# get bonds, angles, torsions
# using this z-matrix index
b = bonds[:, idx, None]
a = angles[:, idx, None]
t = torsions[:, idx, None]
# now we have three context points
# and correct ic values to reconstruct the current point
p, J = ic2xyz_deriv(
p0,
p1,
p2,
b,
a,
t,
eps=self._eps,
enforce_boundaries=self._enforce_boundaries,
raise_warnings=self._raise_warnings,
)
# compute jacobian
dlogp += det3x3(J).abs().log().sum(-1)[:, None]
# update list of reconstructed points
points[:, current_index : current_index + p.shape[1], :] = p
current_index += p.shape[1]
# finally make sure that atoms are sorted
# from reconstruction order to original order
points = points[:, self._atom2index]
return points.view(n_batch, -1), dlogp
class GlobalInternalCoordinateTransformation(Flow):
"""
Global internal coordinate transformation.
Please note that the forward transformation transforms *from* xyz coordinates *into* internal coordinates.
By default output angles and torsions are normalized and fit into a (0, 1) interval.
Parameters
----------
z_matrix : Union[np.ndarray, torch.LongTensor]
z matrix used for ic transformation
normalize_angles : bool
bring angles and torsions into (0, 1) interval
eps : float
numerical epsilon used to enforce manifold boundaries
raise_warnings : bool
raise warnings if manifold boundaries are violated
Attributes
----------
z_matrix : np.ndarray
z matrix used by the underlying relative ic transformation
fixed_atoms : np.ndarray
empty array, just to satisfy the interface
dim_bonds : int
number of bonds
dim_angles : int
number of angles
dim_torsions : int
number of torsions
dim_fixed : int
is zero for this transform
bond_indices : np.array of int
atom ids that are connected by a bond (shape: (dim_bonds, 2))
angle_indices : np.array of int
atoms ids that are connected by an angle (shape: (dim_angles, 3))
torsion_indices : np.array of int
atoms ids that are connected by a torsion (shape: (dim_torsions, 4))
normalize_angles : bool
whether this transform normalizes angles and torsions to [0,1]
"""
@property
def z_matrix(self):
return self._rel_ic.z_matrix
@property
def fixed_atoms(self):
return np.array([], dtype=np.int64)
@property
def dim_bonds(self):
return len(self.z_matrix) + 2
@property
def dim_angles(self):
return len(self.z_matrix) + 1
@property
def dim_torsions(self):
return len(self.z_matrix)
@property
def dim_fixed(self):
return 0
@property
def bond_indices(self):
fix = self._rel_ic.fixed_atoms
return np.row_stack(
[np.array([[fix[1], fix[0]], [fix[2], fix[1]]]), self._rel_ic.bond_indices]
)
@property
def angle_indices(self):
fix = self._rel_ic.fixed_atoms
return np.row_stack(
[np.array([[fix[2], fix[1], fix[0]]]), self._rel_ic.angle_indices]
)
@property
def torsion_indices(self):
return self._rel_ic.torsion_indices
@property
def normalize_angles(self):
return self._rel_ic.normalize_angles
def __init__(
self,
z_matrix,
normalize_angles=True,
eps: float = 1e-7,
enforce_boundaries: bool = True,
raise_warnings: bool = True,
):
super().__init__()
# find initial atoms
initial_atoms, z_matrix = slice_initial_atoms(z_matrix)
self._rel_ic = RelativeInternalCoordinateTransformation(
z_matrix=z_matrix,
fixed_atoms=initial_atoms,
normalize_angles=normalize_angles,
eps=eps,
enforce_boundaries=enforce_boundaries,
raise_warnings=raise_warnings,
)
self._ref_ic = ReferenceSystemTransformation(
normalize_angles=normalize_angles,
eps=eps,
enforce_boundaries=enforce_boundaries,
raise_warnings=raise_warnings,
)
def _forward(self, x, *args, **kwargs):
"""
Parameters:
----------
x: torch.Tensor
xyz coordinates
Returns:
--------
bonds: torch.Tensor
angles: torch.Tensor
torsions: torch.Tensor
x0: torch.Tensor
the systems origin point set in the first atom.
has shape [batch, 1, 3]
R: torch.Tensor
global rotation of the system - 3-vector of Euler angles
see ReferenceSystemTransformation for more details.
dlogp: torch.Tensor
log det jacobian of the transformation
"""
n_batch = x.shape[0]
x = x.view(n_batch, -1, 3)
# transform relative system wrt reference system
bonds, angles, torsions, x_fixed, dlogp_rel = self._rel_ic(x, *args, **kwargs)
x_fixed = x_fixed.view(n_batch, -1, 3)
# transform reference system
x0, R, d01, d12, a012, dlogp_ref = self._ref_ic(
x_fixed[:, [0]], x_fixed[:, [1]], x_fixed[:, [2]]
)
# gather bonds and angles
bonds = torch.cat([d01, d12, bonds], dim=-1)
angles = torch.cat([a012, angles], dim=-1)
# aggregate volume change
dlogp = dlogp_rel + dlogp_ref
return bonds, angles, torsions, x0, R, dlogp
def _inverse(self, bonds, angles, torsions, x0, R, *args, **kwargs):
"""
Parameters:
-----------
bonds: torch.Tensor
angles: torch.Tensor
torsions: torch.Tensor
x0: torch.Tensor
system's origin. should have shape [batch, 1, 3]
R: torch.Tensor
global rotation of the system - 3-vector of Euler angles
see ReferenceSystemTransformation for more details.
Returns:
--------
x: torch.Tensor
xyz coordinates
dlogp: torch.Tensor
log det jacobian of the transformation
"""
# get ics of reference system
d01 = bonds[:, [0]]
d12 = bonds[:, [1]]
a012 = angles[:, [0]]
# transform reference system back
x0, x1, x2, dlogp_ref = self._ref_ic(x0, R, d01, d12, a012, inverse=True)
x_init = torch.cat([x0, x1, x2], dim=1)
# now transform relative system wrt reference system back
x, dlogp_rel = self._rel_ic(
bonds[:, 2:], angles[:, 1:], torsions, x_init, inverse=True
)
# aggregate volume change
dlogp = dlogp_rel + dlogp_ref
return x, dlogp
class MixedCoordinateTransformation(Flow):
"""
Mixed coordinate transformation.
This combines an relative coordinate transformation with a whitening transformation on the fixed atoms.
Please note that the forward transformation transforms *from* xyz coordinates *into* internal coordinates.
By default output angles and torsions are normalized and fit into a (0, 1) interval.
Parameters
----------
data : torch.Tensor
data used to compute the whitening transformation of the fixed atoms
z_matrix : Union[np.ndarray, torch.LongTensor]
z matrix used for ic transformation
fixed_atoms : torch.Tensor
atoms not affected by transformation
keepdims : Optional[int]
number of dimensions kept in whitening transformation
normalize_angles : bool
bring angles and torsions into (0, 1) interval
eps : float
numerical epsilon used to enforce manifold boundaries
raise_warnings : bool
raise warnings if manifold boundaries are violated
Attributes
----------
z_matrix : np.ndarray
z matrix used for ic transformation
fixed_atoms : np.ndarray
atom indices that are kept as Cartesian coordinates
dim_bonds : int
number of bonds
dim_angles : int
number of angles
dim_torsions : int
number of torsions
dim_fixed : int
number of learnable degrees of freedom for fixed atoms
bond_indices : np.array of int
atom ids that are connected by a bond (shape: (dim_bonds, 2))
angle_indices : np.array of int
atoms ids that are connected by an angle (shape: (dim_angles, 3))
torsion_indices : np.array of int
atoms ids that are connected by a torsion (shape: (dim_torsions, 4))
normalize_angles : bool
whether this transform normalizes angles and torsions to [0,1]
"""
@property
def z_matrix(self):
return self._rel_ic.z_matrix
@property
def fixed_atoms(self):
return self._rel_ic.fixed_atoms
@property
def dim_bonds(self):
return len(self.z_matrix)
@property
def dim_angles(self):
return len(self.z_matrix)
@property
def dim_torsions(self):
return len(self.z_matrix)
@property
def dim_fixed(self):
return self._whiten.keepdims
@property
def bond_indices(self):
return self._rel_ic.bond_indices
@property
def angle_indices(self):
return self._rel_ic.angle_indices
@property
def torsion_indices(self):
return self._rel_ic.torsion_indices
@property
def normalize_angles(self):
return self._rel_ic.normalize_angles
def __init__(
self,
data: torch.Tensor,
z_matrix: Union[np.ndarray, torch.Tensor],
fixed_atoms: np.ndarray,
keepdims: Optional[int] = None,
normalize_angles=True,
eps: float = 1e-7,
enforce_boundaries: bool = True,
raise_warnings: bool = True,
):
super().__init__()
self._whiten = self._setup_whitening_layer(data, fixed_atoms, keepdims=keepdims)
self._rel_ic = RelativeInternalCoordinateTransformation(
z_matrix=z_matrix,
fixed_atoms=fixed_atoms,
normalize_angles=normalize_angles,
eps=eps,
enforce_boundaries=enforce_boundaries,
raise_warnings=raise_warnings,
)
def _setup_whitening_layer(self, data, fixed_atoms, keepdims):
n_data = data.shape[0]
data = data.view(n_data, -1, 3)
fixed = data[:, fixed_atoms].view(n_data, -1)
return WhitenFlow(fixed, keepdims=keepdims, whiten_inverse=False)
def _forward(self, x, *args, **kwargs):
"""
Parameters:
-----------
x: torch.Tensor
xyz coordinates
Returns:
--------
bonds: torch.Tensor
angles: torch.Tensor
torsions: torch.Tensor
z_fixed: torch.Tensor
whitened fixed atom coordinates
dlogp: torch.Tensor
log det jacobian of the transformation
"""
n_batch = x.shape[0]
bonds, angles, torsions, x_fixed, dlogp_rel = self._rel_ic(x)
x_fixed = x_fixed.view(n_batch, -1)
z_fixed, dlogp_ref = self._whiten(x_fixed)
dlogp = dlogp_rel + dlogp_ref
return bonds, angles, torsions, z_fixed, dlogp
def _inverse(self, bonds, angles, torsions, z_fixed, *args, **kwargs):
"""
Parameters:
-----------
bonds: torch.Tensor
angles: torch.Tensor
torsions: torch.Tensor
z_fixed: torch.Tensor
whitened fixed atom coordinates
Returns:
--------
x: torch.Tensor
xyz coordinates
dlogp: torch.Tensor
log det jacobian of the transformation
"""
n_batch = z_fixed.shape[0]
x_fixed, dlogp_ref = self._whiten(z_fixed, inverse=True)
x_fixed = x_fixed.view(n_batch, -1, 3)
x, dlogp_rel = self._rel_ic(bonds, angles, torsions, x_fixed, inverse=True)
dlogp = dlogp_rel + dlogp_ref
return x, dlogp
| [
"numpy.union1d",
"numpy.sort",
"numpy.log",
"torch.stack",
"numpy.isin",
"numpy.any",
"numpy.argsort",
"numpy.sum",
"numpy.array",
"numpy.concatenate",
"torch.empty",
"torch.cat"
] | [((1966, 1980), 'numpy.sort', 'np.sort', (['fixed'], {}), '(fixed)\n', (1973, 1980), True, 'import numpy as np\n'), ((3117, 3138), 'numpy.concatenate', 'np.concatenate', (['atoms'], {}), '(atoms)\n', (3131, 3138), True, 'import numpy as np\n'), ((3156, 3178), 'numpy.argsort', 'np.argsort', (['index2atom'], {}), '(index2atom)\n', (3166, 3178), True, 'import numpy as np\n'), ((3197, 3218), 'numpy.concatenate', 'np.concatenate', (['order'], {}), '(order)\n', (3211, 3218), True, 'import numpy as np\n'), ((3319, 3350), 'numpy.sum', 'np.sum', (['(z_matrix == -1)'], {'axis': '(-1)'}), '(z_matrix == -1, axis=-1)\n', (3325, 3350), True, 'import numpy as np\n'), ((2073, 2103), 'numpy.isin', 'np.isin', (['z_matrix[:, 0]', 'given'], {}), '(z_matrix[:, 0], given)\n', (2080, 2103), True, 'import numpy as np\n'), ((3017, 3040), 'numpy.union1d', 'np.union1d', (['given', 'atom'], {}), '(given, atom)\n', (3027, 3040), True, 'import numpy as np\n'), ((3898, 3912), 'numpy.log', 'np.log', (['period'], {}), '(period)\n', (3904, 3912), True, 'import numpy as np\n'), ((4057, 4071), 'numpy.log', 'np.log', (['period'], {}), '(period)\n', (4063, 4071), True, 'import numpy as np\n'), ((6692, 6731), 'torch.cat', 'torch.cat', (['[alpha, beta, gamma]'], {'dim': '(-1)'}), '([alpha, beta, gamma], dim=-1)\n', (6701, 6731), False, 'import torch\n'), ((13395, 13444), 'torch.stack', 'torch.stack', (['[jbonds, jangles, jtorsions]'], {'dim': '(-2)'}), '([jbonds, jangles, jtorsions], dim=-2)\n', (13406, 13444), False, 'import torch\n'), ((14325, 14423), 'torch.empty', 'torch.empty', (['(n_batch, n_fixed + n_conditioned, 3)'], {'dtype': 'x_fixed.dtype', 'device': 'x_fixed.device'}), '((n_batch, n_fixed + n_conditioned, 3), dtype=x_fixed.dtype,\n device=x_fixed.device)\n', (14336, 14423), False, 'import torch\n'), ((17940, 17968), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (17948, 17968), True, 'import numpy as np\n'), ((20817, 20853), 'torch.cat', 'torch.cat', (['[d01, d12, bonds]'], {'dim': '(-1)'}), '([d01, d12, bonds], dim=-1)\n', (20826, 20853), False, 'import torch\n'), ((20872, 20905), 'torch.cat', 'torch.cat', (['[a012, angles]'], {'dim': '(-1)'}), '([a012, angles], dim=-1)\n', (20881, 20905), False, 'import torch\n'), ((21935, 21965), 'torch.cat', 'torch.cat', (['[x0, x1, x2]'], {'dim': '(1)'}), '([x0, x1, x2], dim=1)\n', (21944, 21965), False, 'import torch\n'), ((2398, 2429), 'numpy.isin', 'np.isin', (['z_matrix[:, 2:]', 'given'], {}), '(z_matrix[:, 2:], given)\n', (2405, 2429), True, 'import numpy as np\n'), ((3363, 3376), 'numpy.argsort', 'np.argsort', (['s'], {}), '(s)\n', (3373, 3376), True, 'import numpy as np\n'), ((3558, 3572), 'numpy.log', 'np.log', (['period'], {}), '(period)\n', (3564, 3572), True, 'import numpy as np\n'), ((3716, 3730), 'numpy.log', 'np.log', (['period'], {}), '(period)\n', (3722, 3730), True, 'import numpy as np\n'), ((2513, 2548), 'numpy.any', 'np.any', (['can_be_placed_in_this_stage'], {}), '(can_be_placed_in_this_stage)\n', (2519, 2548), True, 'import numpy as np\n'), ((18384, 18430), 'numpy.array', 'np.array', (['[[fix[1], fix[0]], [fix[2], fix[1]]]'], {}), '([[fix[1], fix[0]], [fix[2], fix[1]]])\n', (18392, 18430), True, 'import numpy as np\n'), ((18594, 18630), 'numpy.array', 'np.array', (['[[fix[2], fix[1], fix[0]]]'], {}), '([[fix[2], fix[1], fix[0]]])\n', (18602, 18630), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from fairseq.data import data_utils
from . import BaseWrapperDataset
class TruncateDataset(BaseWrapperDataset):
"""Truncate a sequence by returning the first truncation_length tokens
"""
def __init__(self, dataset, truncation_length):
super().__init__(dataset)
assert truncation_length is not None
self.truncation_length = truncation_length
self.dataset = dataset
def __getitem__(self, index):
item = self.dataset[index]
item_len = item.size(0)
if item_len > self.truncation_length:
item = item[:self.truncation_length]
return item
@property
def sizes(self):
return np.minimum(self.dataset.sizes, self.truncation_length)
def __len__(self):
return len(self.dataset)
class RandomCropDataset(TruncateDataset):
"""Truncate a sequence by returning a random crop of truncation_length tokens
"""
def __init__(self, dataset, truncation_length, seed=1):
super().__init__(dataset, truncation_length)
self.seed = seed
self.epoch = 0
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
def __getitem__(self, index):
with data_utils.numpy_seed(self.seed, self.epoch, index):
item = self.dataset[index]
item_len = item.size(0)
excess = item_len - self.truncation_length
if excess > 0:
start_idx = np.random.randint(0, excess)
item = item[start_idx:start_idx+self.truncation_length]
return item
def maybe_shorten_dataset(
dataset,
split,
shorten_data_split_list,
shorten_method,
tokens_per_sample,
seed,
):
truncate_split = split in shorten_data_split_list.split(',') \
or len(shorten_data_split_list) == 0
if shorten_method == 'truncate' and truncate_split:
dataset = TruncateDataset(dataset, tokens_per_sample)
elif shorten_method == 'random_crop' and truncate_split:
dataset = RandomCropDataset(dataset, tokens_per_sample, seed)
return dataset
| [
"fairseq.data.data_utils.numpy_seed",
"numpy.minimum",
"numpy.random.randint"
] | [((907, 961), 'numpy.minimum', 'np.minimum', (['self.dataset.sizes', 'self.truncation_length'], {}), '(self.dataset.sizes, self.truncation_length)\n', (917, 961), True, 'import numpy as np\n'), ((1486, 1537), 'fairseq.data.data_utils.numpy_seed', 'data_utils.numpy_seed', (['self.seed', 'self.epoch', 'index'], {}), '(self.seed, self.epoch, index)\n', (1507, 1537), False, 'from fairseq.data import data_utils\n'), ((1729, 1757), 'numpy.random.randint', 'np.random.randint', (['(0)', 'excess'], {}), '(0, excess)\n', (1746, 1757), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2019 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A toy model using Mesh TensorFlow.
Using input_reader to handle the input pipeline.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import mesh_tensorflow as mtf
import numpy as np
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=g-direct-third-party-import
from mesh_tensorflow.experimental import input_reader
from mesh_tensorflow.experimental import unet
from tensorflow.contrib import summary as contrib_summary
from tensorflow.contrib import tpu
from tensorflow.contrib.tpu.python.tpu import device_assignment
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import flags
from tensorflow.python.tpu.ops import tpu_ops
FLAGS = flags.FLAGS
flags.DEFINE_boolean('use_tpu', True, 'Use TPU or GPU.')
flags.DEFINE_float('lr', 0.003, 'Learning rate.')
flags.DEFINE_float('lr_drop_steps', 20000,
'Learning rate drops for every `lr_drop_steps` steps.')
flags.DEFINE_float('lr_drop_rate', 0.3,
'Learning rate drops by this amount.')
flags.DEFINE_integer('num_train_iterations_per_loop', 500,
'Number of training iterations per loop.')
flags.DEFINE_integer('num_eval_iterations_per_loop', 2,
'Number of eval iterations per loop.')
flags.DEFINE_integer('num_training_loops', 1000,
'Number of training loops.')
flags.DEFINE_string('mesh_shape', 'rows:4, columns:4, cores:2',
'mesh shape')
flags.DEFINE_string('master', '', 'Can be a headless master.')
flags.DEFINE_string('checkpoint_dir', '', 'Path to saved models.')
flags.DEFINE_integer('save_checkpoints_steps', 500,
'Frequency for saving models.')
flags.DEFINE_boolean('write_summary', True, 'Whether to write summary.')
flags.DEFINE_string('summary_dir', '', 'Path to saved summaries.')
flags.DEFINE_string('pred_output_dir', '', 'Path to saved pred results.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
class _CkptLoaderHook(tf.estimator.SessionRunHook):
"""Load checkpoint right after the session started."""
def after_create_session(self, session, coord):
# pylint: disable=protected-access
saver_collection = tf.get_collection(tf.GraphKeys.SAVERS)
if saver_collection:
saver = saver_collection[0]
check_point = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
if check_point:
saver.restore(session, check_point)
class MeshContext(object):
"""Creates mtf graph, mesh, and mesh implementation."""
def __init__(self, sess, use_tpu, mesh_shape, layout_rules):
super(MeshContext, self).__init__()
self._use_tpu = use_tpu
self._mesh_shape = mtf.convert_to_shape(mesh_shape)
self._layout_rules = layout_rules
self._d_assignment = None
self._num_hosts = None
self._num_cores = None
self._cpu_devices, self._gpu_devices = self._list_cpu_gpu_devices(sess)
if self._use_tpu:
topology = sess.run(tpu.initialize_system())
topo_object = tpu.Topology(serialized=topology)
self._num_cores = int(np.prod(topo_object.mesh_shape))
self._num_hosts = int(topo_object.num_tasks)
num_cores_per_host = int(self._num_cores // self._num_hosts)
assert num_cores_per_host == int(topo_object.num_tpus_per_task)
# Get a device_assignment object for mtf.
self._d_assignment = device_assignment.device_assignment(
topology, computation_shape=[1, 1, 1],
num_replicas=self._num_cores)
self._mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(
self._mesh_shape, self._layout_rules, None, self._d_assignment)
else:
self._mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
self._mesh_shape, self._layout_rules, self._gpu_devices)
def create_graph_mesh_and_mesh_impl(self):
"""Creates mtf graph, mesh, and mesh impl.
This function can be called inside model_fn, which might be tpu_rewritten.
Returns:
graph, mesh, mesh_impl
"""
if self._use_tpu:
assert self._d_assignment
graph = mtf.Graph()
# Worker 0 caches all the TPU binaries.
replica_cache_size = 300 * 1024 * 1024 # 300M per replica.
worker0_mem = replica_cache_size * 8 * self._num_hosts
devices_memory_usage = [worker0_mem] + [0] * (self._num_hosts - 1)
var_placer = mtf.utils.BalancedVariablePlacer(self._cpu_devices,
devices_memory_usage)
mesh = mtf.Mesh(graph, 'my_mesh', var_placer)
mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(
self._mesh_shape, self._layout_rules, None, self._d_assignment)
return graph, mesh, mesh_impl
else:
graph = mtf.Graph()
mesh = mtf.Mesh(graph, 'my_mesh', None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
self._mesh_shape, self._layout_rules, self._gpu_devices)
return graph, mesh, mesh_impl
@property
def device_assignment(self):
return self._d_assignment
@property
def num_hosts(self):
return self._num_hosts
@property
def num_cores(self):
return self._num_cores
@property
def num_cores_per_host(self):
return self._num_cores // self._num_hosts
@property
def mesh_impl(self):
return self._mesh_impl
def _list_cpu_gpu_devices(self, sess):
"""Return the list of CPU and GPU (if any) devices in legacy name."""
def _convert_to_legacy_name(n):
n = re.sub('device:CPU', 'cpu', n)
n = re.sub('device:GPU', 'gpu', n)
return n
def _sort_device_name(devices):
parsed = []
for d in devices:
m = re.match('/job:(.*)/replica:(.*)/task:(.*)/.*', d)
parsed.append((m.group(1), int(m.group(2)), int(m.group(3)), d))
return [_[3] for _ in sorted(parsed)]
all_devices = sess.list_devices()
cpus = []
for d in all_devices:
if d.device_type == 'CPU':
cpus += [_convert_to_legacy_name(d.name)]
cpus = [n for n in _sort_device_name(cpus) if 'coordinator' not in n]
gpus = []
for d in all_devices:
if d.device_type == 'GPU':
gpus += [_convert_to_legacy_name(d.name)]
gpus = _sort_device_name(gpus)
return cpus, gpus
def _get_model_fn(train_or_eval, mesh_context):
"""Returns _model_fn."""
captured_hooks = _CapturedObject()
captured_output_dtypes_shapes = _CapturedObject()
assert train_or_eval in ['train', 'eval']
def _model_fn(input_fea, input_lab):
"""Creates a model, add summary, modes (train or eval), and hooks."""
# input_fea and input_lab should be a list (laid_out_tensors).
if not isinstance(input_fea, list):
input_fea = [input_fea]
if not isinstance(input_lab, list):
input_lab = [input_lab]
def _add_summary(lowering, train_or_eval, tf_loss, scalars, global_step):
"""Add all summaries."""
for k in scalars.keys():
if not isinstance(scalars[k], tf.Tensor):
scalars[k] = tf.cast(
lowering.export_to_tf_tensor(scalars[k]), tf.float32)
def _host_loss_summary(global_step, tf_loss, **scalars):
"""Add summary.scalar in host side."""
gs = tf.cast(global_step, tf.int64)
sum_loss = contrib_summary.scalar(
'{}_loss'.format(train_or_eval), tf_loss, step=gs)
sum_ops = [sum_loss.op]
for description, tf_metric in scalars.iteritems():
sum_metric = contrib_summary.scalar(
'{}_{}'.format(train_or_eval, description), tf_metric, step=gs)
sum_ops.append(sum_metric)
with tf.control_dependencies(sum_ops):
return tf.identity(tf_loss)
if FLAGS.use_tpu:
# Cast the global step to tf.int32, since
# outside_compilation does not support tf.int64.
tf_loss = tpu.outside_compilation(
_host_loss_summary,
tf.cast(global_step, tf.int32),
tf_loss,
**scalars)
else:
tf_loss = _host_loss_summary(
tf.cast(global_step, tf.int32),
tf_loss,
**scalars)
return tf_loss
global_step = tf.train.get_or_create_global_step()
graph, mesh, mesh_impl = mesh_context.create_graph_mesh_and_mesh_impl()
with mtf.utils.outside_all_rewrites():
# Do not tpu_rewrite this part. Inside this unet, If you use Tensorflow,
# instead of Mesh-Tensorflor, it will cause host to tpu send/rec.
preds, loss, scalars, bn_update_ops = (
unet.unet_with_spatial_partition(
mesh, mesh_impl, train_or_eval, input_fea, input_lab))
if train_or_eval == 'train':
var_grads = mtf.gradients(
[loss], [v.outputs[0] for v in graph.trainable_variables])
lr = FLAGS.lr * tf.pow(
FLAGS.lr_drop_rate,
tf.floor(tf.cast(global_step, tf.float32) / FLAGS.lr_drop_steps))
scalars['learning_rate'] = lr
optimizer = mtf.optimize.AdafactorOptimizer(learning_rate=lr)
update_ops = optimizer.apply_grads(var_grads, graph.trainable_variables)
# This is where the actual tf graph got built.
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
tf_update_ops.append(tf.assign_add(global_step, 1))
tf_update_ops.extend(
[lowering.lowered_operation(op) for op in bn_update_ops])
else: # train_or_eval == 'eval':
preds = [mtf.anonymize(pred) for pred in preds]
# This is where the actual tf graph got built.
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
tf_preds = [tf.cast(
lowering.export_to_tf_tensor(pred), tf.float32) for pred in preds]
tf_loss = tf.cast(lowering.export_to_tf_tensor(loss), tf.float32)
if FLAGS.write_summary:
tf_loss = _add_summary(
lowering, train_or_eval, tf_loss, scalars, global_step)
master_to_slice_hook = mtf.MtfRestoreHook(lowering)
if train_or_eval == 'train':
with mtf.utils.outside_all_rewrites():
saver = tf.train.Saver(tf.global_variables(),
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
saver_listener = mtf.MtfCheckpointSaverListener(lowering)
slice_to_master_hook = tf.train.CheckpointSaverHook(
FLAGS.checkpoint_dir,
save_steps=FLAGS.save_checkpoints_steps,
saver=saver, listeners=[saver_listener])
captured_hooks.capture([master_to_slice_hook, slice_to_master_hook])
return tf.group([tf_loss] + tf_update_ops)
else: # train_or_eval == 'eval':
if FLAGS.use_tpu:
tf_preds.extend([tf_loss, global_step])
tf_preds_dtypes = [tf_pred.dtype for tf_pred in tf_preds]
tf_preds_shapes = [tf_pred.shape for tf_pred in tf_preds]
captured_hooks.capture([master_to_slice_hook, None])
captured_output_dtypes_shapes.capture(
[tf_preds_dtypes, tf_preds_shapes])
return tpu_ops.outfeed_enqueue_tuple(tf_preds)
else:
tf_preds.extend([tf_loss, global_step])
captured_hooks.capture([master_to_slice_hook, None])
return tf_preds
return _model_fn, captured_hooks, captured_output_dtypes_shapes
def _get_scaffold(additional_initializers):
return tf.train.Scaffold(
init_op=control_flow_ops.group(
tf.global_variables_initializer(),
*additional_initializers),
local_init_op=tf.group(
tf.local_variables_initializer(),
tf.train.Scaffold.default_local_init_op(),
*additional_initializers))
def _print_variable_values(sess):
"""May give `Protocol buffer too large` error."""
np.set_printoptions(precision=4, linewidth=1000)
tf.logging.info('Printing variables.')
tf.logging.info('===================')
values = sess.run(tf.trainable_variables())
for variable, value in zip(tf.trainable_variables(), values):
tf.logging.info('{}, {}'.format(variable.name, value.shape))
tf.logging.info('{}'.format(np.array(value).flatten()))
def _train_phase(mesh_context):
"""Handles input pipeline and trains the network."""
if FLAGS.num_train_iterations_per_loop <= 0:
return
def _run_train_phase():
"""The real function that runs the training phase."""
# Setup input pipeline.
ds_creator = unet.get_dataset_creator('train')
mtf_shapes = unet.get_input_mtf_shapes('train')
model_train_fn, train_hooks, _ = _get_model_fn('train', mesh_context)
if FLAGS.use_tpu:
assert mesh_context.device_assignment
assert mesh_context.num_cores
simd_input_reader = input_reader.SimdMeshImplInputReader(
mesh_context.mesh_impl, ds_creator, mtf_shapes, is_eval_mode=False)
train_computation = tpu.replicate(
computation=model_train_fn,
inputs=[[]] * mesh_context.num_cores,
infeed_queue=simd_input_reader.infeed_queue,
device_assignment=mesh_context.device_assignment)
else:
placement_input_reader = input_reader.PlacementMeshImplInputReader(
mesh_context.mesh_impl, ds_creator, mtf_shapes, is_eval_mode=False)
train_computation = placement_input_reader.gpu_placement(model_train_fn)
###########################################################
# Training.
master_to_slice_hook, slice_to_master_hook = train_hooks.get()
ckpt_loader_hook = _CkptLoaderHook()
step_counter_hook = tf.train.StepCounterHook(every_n_steps=10)
all_hooks = [ckpt_loader_hook, master_to_slice_hook,
slice_to_master_hook, step_counter_hook]
if FLAGS.write_summary:
flush_summary = contrib_summary.flush()
with tf.train.MonitoredTrainingSession(
master=FLAGS.master,
scaffold=_get_scaffold(additional_initializers=[]),
hooks=all_hooks,
config=tf.ConfigProto(allow_soft_placement=True)) as sess:
if FLAGS.write_summary:
contrib_summary.initialize(session=sess)
if FLAGS.use_tpu:
simd_input_reader.start_infeed_thread(
sess, FLAGS.num_train_iterations_per_loop)
else:
placement_input_reader.initialize(sess)
for step in range(FLAGS.num_train_iterations_per_loop):
sess.run(train_computation)
if FLAGS.write_summary:
sess.run(flush_summary)
tf.logging.info('train steps: {}'.format(step))
with tf.Graph().as_default():
if FLAGS.write_summary:
summary_writer = contrib_summary.create_file_writer(FLAGS.summary_dir)
with summary_writer.as_default(), (
contrib_summary.always_record_summaries()):
_run_train_phase()
else:
_run_train_phase()
def _eval_phase(mesh_context):
"""Handles input pipeline and evaluates the network."""
if FLAGS.num_eval_iterations_per_loop <= 0:
return
def _run_eval_phase():
"""The real function that runs the evaluation phase."""
# Setup input pipeline.
ds_creator = unet.get_dataset_creator('eval')
mtf_shapes = unet.get_input_mtf_shapes('eval')
model_eval_fn, eval_hooks, output_dtypes_shapes = _get_model_fn(
'eval', mesh_context)
if FLAGS.use_tpu:
assert mesh_context.device_assignment
assert mesh_context.num_cores
simd_input_reader = input_reader.SimdMeshImplInputReader(
mesh_context.mesh_impl, ds_creator, mtf_shapes, is_eval_mode=True)
eval_computation = tpu.replicate(
computation=model_eval_fn,
inputs=[[]] * mesh_context.num_cores,
infeed_queue=simd_input_reader.infeed_queue,
device_assignment=mesh_context.device_assignment)
output_dtypes, output_shapes = output_dtypes_shapes.get()
outfeed_dequeue_ops = []
# Create outfeed_dequeue_ops.
for host_id in range(mesh_context.num_hosts):
# pylint: disable=protected-access
with ops.device(input_reader._host_id_to_tf_device(
host_id, external_worker=True)):
for device_ordinal in range(mesh_context.num_cores_per_host):
outfeed_dequeue_op = tpu_ops.outfeed_dequeue_tuple(
dtypes=output_dtypes,
shapes=output_shapes,
device_ordinal=device_ordinal)
# We don't need output other than from core 0.
if outfeed_dequeue_ops:
outfeed_dequeue_ops.append(
[tf.reduce_mean(x) for x in outfeed_dequeue_op])
else:
outfeed_dequeue_ops.append(outfeed_dequeue_op)
else:
placement_input_reader = input_reader.PlacementMeshImplInputReader(
mesh_context.mesh_impl, ds_creator, mtf_shapes, is_eval_mode=False)
eval_computation = placement_input_reader.gpu_placement(model_eval_fn)
###########################################################
# Evaluation.
master_to_slice_hook, _ = eval_hooks.get()
ckpt_loader_hook = _CkptLoaderHook()
all_hooks = [ckpt_loader_hook, master_to_slice_hook]
if FLAGS.write_summary:
flush_summary = contrib_summary.flush()
with tf.train.MonitoredSession(
session_creator=tf.train.ChiefSessionCreator(
master=FLAGS.master,
config=tf.ConfigProto(allow_soft_placement=True)),
hooks=all_hooks) as sess:
if FLAGS.write_summary:
contrib_summary.initialize(session=sess)
if FLAGS.use_tpu:
simd_input_reader.start_infeed_thread(
sess, FLAGS.num_eval_iterations_per_loop)
else:
placement_input_reader.initialize(sess)
pprocessor = unet.PostProcessor()
for step in range(FLAGS.num_eval_iterations_per_loop):
# Only get results from the 0-th core.
if FLAGS.use_tpu:
sess.run(eval_computation)
results = sess.run(outfeed_dequeue_ops)[0]
else:
results = sess.run(eval_computation)
pprocessor.record(results, FLAGS.pred_output_dir)
if FLAGS.write_summary:
sess.run(flush_summary)
tf.logging.info('eval steps: {}'.format(step))
pprocessor.finish()
with tf.Graph().as_default():
if FLAGS.write_summary:
summary_writer = contrib_summary.create_file_writer(FLAGS.summary_dir)
with summary_writer.as_default(), (
contrib_summary.always_record_summaries()):
_run_eval_phase()
else:
_run_eval_phase()
def _shutdown():
with tf.Session(target=FLAGS.master,
config=tf.ConfigProto(allow_soft_placement=True)) as sess:
sess.run(tpu.shutdown_system())
def train_and_eval():
"""Trains and evaluates MeshTensorflow model without TPUEstimator.
TODO(lehou): Pack everything nicely as a set of APIs.
"""
mesh_context = None
tf.logging.info('FLAGS.master: {}'.format(FLAGS.master))
with tf.Session(target=FLAGS.master,
config=tf.ConfigProto(allow_soft_placement=True)) as sess:
mesh_context = MeshContext(
sess, FLAGS.use_tpu, FLAGS.mesh_shape, unet.get_layout())
for _ in range(FLAGS.num_training_loops):
_train_phase(mesh_context)
_eval_phase(mesh_context)
if FLAGS.use_tpu:
_shutdown()
tf.logging.info('finished.')
def main(_):
train_and_eval()
if __name__ == '__main__':
tf.compat.v1.app.run()
| [
"mesh_tensorflow.experimental.unet.PostProcessor",
"numpy.prod",
"mesh_tensorflow.anonymize",
"mesh_tensorflow.utils.outside_all_rewrites",
"mesh_tensorflow.experimental.unet.get_input_mtf_shapes",
"mesh_tensorflow.Mesh",
"tensorflow.compat.v1.train.Scaffold.default_local_init_op",
"numpy.array",
"t... | [((1462, 1518), 'tensorflow.python.platform.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""use_tpu"""', '(True)', '"""Use TPU or GPU."""'], {}), "('use_tpu', True, 'Use TPU or GPU.')\n", (1482, 1518), False, 'from tensorflow.python.platform import flags\n'), ((1519, 1568), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""lr"""', '(0.003)', '"""Learning rate."""'], {}), "('lr', 0.003, 'Learning rate.')\n", (1537, 1568), False, 'from tensorflow.python.platform import flags\n'), ((1569, 1671), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""lr_drop_steps"""', '(20000)', '"""Learning rate drops for every `lr_drop_steps` steps."""'], {}), "('lr_drop_steps', 20000,\n 'Learning rate drops for every `lr_drop_steps` steps.')\n", (1587, 1671), False, 'from tensorflow.python.platform import flags\n'), ((1687, 1765), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""lr_drop_rate"""', '(0.3)', '"""Learning rate drops by this amount."""'], {}), "('lr_drop_rate', 0.3, 'Learning rate drops by this amount.')\n", (1705, 1765), False, 'from tensorflow.python.platform import flags\n'), ((1785, 1890), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_train_iterations_per_loop"""', '(500)', '"""Number of training iterations per loop."""'], {}), "('num_train_iterations_per_loop', 500,\n 'Number of training iterations per loop.')\n", (1805, 1890), False, 'from tensorflow.python.platform import flags\n'), ((1908, 2006), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_eval_iterations_per_loop"""', '(2)', '"""Number of eval iterations per loop."""'], {}), "('num_eval_iterations_per_loop', 2,\n 'Number of eval iterations per loop.')\n", (1928, 2006), False, 'from tensorflow.python.platform import flags\n'), ((2024, 2101), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_training_loops"""', '(1000)', '"""Number of training loops."""'], {}), "('num_training_loops', 1000, 'Number of training loops.')\n", (2044, 2101), False, 'from tensorflow.python.platform import flags\n'), ((2124, 2201), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""mesh_shape"""', '"""rows:4, columns:4, cores:2"""', '"""mesh shape"""'], {}), "('mesh_shape', 'rows:4, columns:4, cores:2', 'mesh shape')\n", (2143, 2201), False, 'from tensorflow.python.platform import flags\n'), ((2222, 2284), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""master"""', '""""""', '"""Can be a headless master."""'], {}), "('master', '', 'Can be a headless master.')\n", (2241, 2284), False, 'from tensorflow.python.platform import flags\n'), ((2286, 2352), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""checkpoint_dir"""', '""""""', '"""Path to saved models."""'], {}), "('checkpoint_dir', '', 'Path to saved models.')\n", (2305, 2352), False, 'from tensorflow.python.platform import flags\n'), ((2353, 2440), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""save_checkpoints_steps"""', '(500)', '"""Frequency for saving models."""'], {}), "('save_checkpoints_steps', 500,\n 'Frequency for saving models.')\n", (2373, 2440), False, 'from tensorflow.python.platform import flags\n'), ((2459, 2531), 'tensorflow.python.platform.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""write_summary"""', '(True)', '"""Whether to write summary."""'], {}), "('write_summary', True, 'Whether to write summary.')\n", (2479, 2531), False, 'from tensorflow.python.platform import flags\n'), ((2532, 2598), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""summary_dir"""', '""""""', '"""Path to saved summaries."""'], {}), "('summary_dir', '', 'Path to saved summaries.')\n", (2551, 2598), False, 'from tensorflow.python.platform import flags\n'), ((2599, 2672), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""pred_output_dir"""', '""""""', '"""Path to saved pred results."""'], {}), "('pred_output_dir', '', 'Path to saved pred results.')\n", (2618, 2672), False, 'from tensorflow.python.platform import flags\n'), ((13067, 13115), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'linewidth': '(1000)'}), '(precision=4, linewidth=1000)\n', (13086, 13115), True, 'import numpy as np\n'), ((13118, 13156), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Printing variables."""'], {}), "('Printing variables.')\n", (13133, 13156), True, 'import tensorflow.compat.v1 as tf\n'), ((13159, 13197), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""==================="""'], {}), "('===================')\n", (13174, 13197), True, 'import tensorflow.compat.v1 as tf\n'), ((20504, 20532), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""finished."""'], {}), "('finished.')\n", (20519, 20532), True, 'import tensorflow.compat.v1 as tf\n'), ((20598, 20620), 'tensorflow.compat.v1.compat.v1.app.run', 'tf.compat.v1.app.run', ([], {}), '()\n', (20618, 20620), True, 'import tensorflow.compat.v1 as tf\n'), ((3589, 3627), 'tensorflow.compat.v1.get_collection', 'tf.get_collection', (['tf.GraphKeys.SAVERS'], {}), '(tf.GraphKeys.SAVERS)\n', (3606, 3627), True, 'import tensorflow.compat.v1 as tf\n'), ((4064, 4096), 'mesh_tensorflow.convert_to_shape', 'mtf.convert_to_shape', (['mesh_shape'], {}), '(mesh_shape)\n', (4084, 4096), True, 'import mesh_tensorflow as mtf\n'), ((9486, 9522), 'tensorflow.compat.v1.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (9520, 9522), True, 'import tensorflow.compat.v1 as tf\n'), ((11281, 11309), 'mesh_tensorflow.MtfRestoreHook', 'mtf.MtfRestoreHook', (['lowering'], {}), '(lowering)\n', (11299, 11309), True, 'import mesh_tensorflow as mtf\n'), ((13218, 13242), 'tensorflow.compat.v1.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (13240, 13242), True, 'import tensorflow.compat.v1 as tf\n'), ((13273, 13297), 'tensorflow.compat.v1.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (13295, 13297), True, 'import tensorflow.compat.v1 as tf\n'), ((13710, 13743), 'mesh_tensorflow.experimental.unet.get_dataset_creator', 'unet.get_dataset_creator', (['"""train"""'], {}), "('train')\n", (13734, 13743), False, 'from mesh_tensorflow.experimental import unet\n'), ((13761, 13795), 'mesh_tensorflow.experimental.unet.get_input_mtf_shapes', 'unet.get_input_mtf_shapes', (['"""train"""'], {}), "('train')\n", (13786, 13795), False, 'from mesh_tensorflow.experimental import unet\n'), ((14813, 14855), 'tensorflow.compat.v1.train.StepCounterHook', 'tf.train.StepCounterHook', ([], {'every_n_steps': '(10)'}), '(every_n_steps=10)\n', (14837, 14855), True, 'import tensorflow.compat.v1 as tf\n'), ((16335, 16367), 'mesh_tensorflow.experimental.unet.get_dataset_creator', 'unet.get_dataset_creator', (['"""eval"""'], {}), "('eval')\n", (16359, 16367), False, 'from mesh_tensorflow.experimental import unet\n'), ((16385, 16418), 'mesh_tensorflow.experimental.unet.get_input_mtf_shapes', 'unet.get_input_mtf_shapes', (['"""eval"""'], {}), "('eval')\n", (16410, 16418), False, 'from mesh_tensorflow.experimental import unet\n'), ((3707, 3755), 'tensorflow.compat.v1.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (3733, 3755), True, 'import tensorflow.compat.v1 as tf\n'), ((4391, 4424), 'tensorflow.contrib.tpu.Topology', 'tpu.Topology', ([], {'serialized': 'topology'}), '(serialized=topology)\n', (4403, 4424), False, 'from tensorflow.contrib import tpu\n'), ((4750, 4858), 'tensorflow.contrib.tpu.python.tpu.device_assignment.device_assignment', 'device_assignment.device_assignment', (['topology'], {'computation_shape': '[1, 1, 1]', 'num_replicas': 'self._num_cores'}), '(topology, computation_shape=[1, 1, 1],\n num_replicas=self._num_cores)\n', (4785, 4858), False, 'from tensorflow.contrib.tpu.python.tpu import device_assignment\n'), ((4901, 5000), 'mesh_tensorflow.simd_mesh_impl.SimdMeshImpl', 'mtf.simd_mesh_impl.SimdMeshImpl', (['self._mesh_shape', 'self._layout_rules', 'None', 'self._d_assignment'], {}), '(self._mesh_shape, self._layout_rules, None,\n self._d_assignment)\n', (4932, 5000), True, 'import mesh_tensorflow as mtf\n'), ((5042, 5145), 'mesh_tensorflow.placement_mesh_impl.PlacementMeshImpl', 'mtf.placement_mesh_impl.PlacementMeshImpl', (['self._mesh_shape', 'self._layout_rules', 'self._gpu_devices'], {}), '(self._mesh_shape, self.\n _layout_rules, self._gpu_devices)\n', (5083, 5145), True, 'import mesh_tensorflow as mtf\n'), ((5445, 5456), 'mesh_tensorflow.Graph', 'mtf.Graph', ([], {}), '()\n', (5454, 5456), True, 'import mesh_tensorflow as mtf\n'), ((5723, 5796), 'mesh_tensorflow.utils.BalancedVariablePlacer', 'mtf.utils.BalancedVariablePlacer', (['self._cpu_devices', 'devices_memory_usage'], {}), '(self._cpu_devices, devices_memory_usage)\n', (5755, 5796), True, 'import mesh_tensorflow as mtf\n'), ((5862, 5900), 'mesh_tensorflow.Mesh', 'mtf.Mesh', (['graph', '"""my_mesh"""', 'var_placer'], {}), "(graph, 'my_mesh', var_placer)\n", (5870, 5900), True, 'import mesh_tensorflow as mtf\n'), ((5919, 6018), 'mesh_tensorflow.simd_mesh_impl.SimdMeshImpl', 'mtf.simd_mesh_impl.SimdMeshImpl', (['self._mesh_shape', 'self._layout_rules', 'None', 'self._d_assignment'], {}), '(self._mesh_shape, self._layout_rules, None,\n self._d_assignment)\n', (5950, 6018), True, 'import mesh_tensorflow as mtf\n'), ((6087, 6098), 'mesh_tensorflow.Graph', 'mtf.Graph', ([], {}), '()\n', (6096, 6098), True, 'import mesh_tensorflow as mtf\n'), ((6112, 6144), 'mesh_tensorflow.Mesh', 'mtf.Mesh', (['graph', '"""my_mesh"""', 'None'], {}), "(graph, 'my_mesh', None)\n", (6120, 6144), True, 'import mesh_tensorflow as mtf\n'), ((6163, 6266), 'mesh_tensorflow.placement_mesh_impl.PlacementMeshImpl', 'mtf.placement_mesh_impl.PlacementMeshImpl', (['self._mesh_shape', 'self._layout_rules', 'self._gpu_devices'], {}), '(self._mesh_shape, self.\n _layout_rules, self._gpu_devices)\n', (6204, 6266), True, 'import mesh_tensorflow as mtf\n'), ((6825, 6855), 're.sub', 're.sub', (['"""device:CPU"""', '"""cpu"""', 'n'], {}), "('device:CPU', 'cpu', n)\n", (6831, 6855), False, 'import re\n'), ((6866, 6896), 're.sub', 're.sub', (['"""device:GPU"""', '"""gpu"""', 'n'], {}), "('device:GPU', 'gpu', n)\n", (6872, 6896), False, 'import re\n'), ((9609, 9641), 'mesh_tensorflow.utils.outside_all_rewrites', 'mtf.utils.outside_all_rewrites', ([], {}), '()\n', (9639, 9641), True, 'import mesh_tensorflow as mtf\n'), ((9850, 9940), 'mesh_tensorflow.experimental.unet.unet_with_spatial_partition', 'unet.unet_with_spatial_partition', (['mesh', 'mesh_impl', 'train_or_eval', 'input_fea', 'input_lab'], {}), '(mesh, mesh_impl, train_or_eval, input_fea,\n input_lab)\n', (9882, 9940), False, 'from mesh_tensorflow.experimental import unet\n'), ((10005, 10077), 'mesh_tensorflow.gradients', 'mtf.gradients', (['[loss]', '[v.outputs[0] for v in graph.trainable_variables]'], {}), '([loss], [v.outputs[0] for v in graph.trainable_variables])\n', (10018, 10077), True, 'import mesh_tensorflow as mtf\n'), ((10281, 10330), 'mesh_tensorflow.optimize.AdafactorOptimizer', 'mtf.optimize.AdafactorOptimizer', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (10312, 10330), True, 'import mesh_tensorflow as mtf\n'), ((10481, 10519), 'mesh_tensorflow.Lowering', 'mtf.Lowering', (['graph', '{mesh: mesh_impl}'], {}), '(graph, {mesh: mesh_impl})\n', (10493, 10519), True, 'import mesh_tensorflow as mtf\n'), ((10915, 10953), 'mesh_tensorflow.Lowering', 'mtf.Lowering', (['graph', '{mesh: mesh_impl}'], {}), '(graph, {mesh: mesh_impl})\n', (10927, 10953), True, 'import mesh_tensorflow as mtf\n'), ((14000, 14108), 'mesh_tensorflow.experimental.input_reader.SimdMeshImplInputReader', 'input_reader.SimdMeshImplInputReader', (['mesh_context.mesh_impl', 'ds_creator', 'mtf_shapes'], {'is_eval_mode': '(False)'}), '(mesh_context.mesh_impl, ds_creator,\n mtf_shapes, is_eval_mode=False)\n', (14036, 14108), False, 'from mesh_tensorflow.experimental import input_reader\n'), ((14142, 14325), 'tensorflow.contrib.tpu.replicate', 'tpu.replicate', ([], {'computation': 'model_train_fn', 'inputs': '([[]] * mesh_context.num_cores)', 'infeed_queue': 'simd_input_reader.infeed_queue', 'device_assignment': 'mesh_context.device_assignment'}), '(computation=model_train_fn, inputs=[[]] * mesh_context.\n num_cores, infeed_queue=simd_input_reader.infeed_queue,\n device_assignment=mesh_context.device_assignment)\n', (14155, 14325), False, 'from tensorflow.contrib import tpu\n'), ((14400, 14513), 'mesh_tensorflow.experimental.input_reader.PlacementMeshImplInputReader', 'input_reader.PlacementMeshImplInputReader', (['mesh_context.mesh_impl', 'ds_creator', 'mtf_shapes'], {'is_eval_mode': '(False)'}), '(mesh_context.mesh_impl,\n ds_creator, mtf_shapes, is_eval_mode=False)\n', (14441, 14513), False, 'from mesh_tensorflow.experimental import input_reader\n'), ((15022, 15045), 'tensorflow.contrib.summary.flush', 'contrib_summary.flush', ([], {}), '()\n', (15043, 15045), True, 'from tensorflow.contrib import summary as contrib_summary\n'), ((15844, 15897), 'tensorflow.contrib.summary.create_file_writer', 'contrib_summary.create_file_writer', (['FLAGS.summary_dir'], {}), '(FLAGS.summary_dir)\n', (15878, 15897), True, 'from tensorflow.contrib import summary as contrib_summary\n'), ((16648, 16755), 'mesh_tensorflow.experimental.input_reader.SimdMeshImplInputReader', 'input_reader.SimdMeshImplInputReader', (['mesh_context.mesh_impl', 'ds_creator', 'mtf_shapes'], {'is_eval_mode': '(True)'}), '(mesh_context.mesh_impl, ds_creator,\n mtf_shapes, is_eval_mode=True)\n', (16684, 16755), False, 'from mesh_tensorflow.experimental import input_reader\n'), ((16788, 16970), 'tensorflow.contrib.tpu.replicate', 'tpu.replicate', ([], {'computation': 'model_eval_fn', 'inputs': '([[]] * mesh_context.num_cores)', 'infeed_queue': 'simd_input_reader.infeed_queue', 'device_assignment': 'mesh_context.device_assignment'}), '(computation=model_eval_fn, inputs=[[]] * mesh_context.\n num_cores, infeed_queue=simd_input_reader.infeed_queue,\n device_assignment=mesh_context.device_assignment)\n', (16801, 16970), False, 'from tensorflow.contrib import tpu\n'), ((17921, 18034), 'mesh_tensorflow.experimental.input_reader.PlacementMeshImplInputReader', 'input_reader.PlacementMeshImplInputReader', (['mesh_context.mesh_impl', 'ds_creator', 'mtf_shapes'], {'is_eval_mode': '(False)'}), '(mesh_context.mesh_impl,\n ds_creator, mtf_shapes, is_eval_mode=False)\n', (17962, 18034), False, 'from mesh_tensorflow.experimental import input_reader\n'), ((18398, 18421), 'tensorflow.contrib.summary.flush', 'contrib_summary.flush', ([], {}), '()\n', (18419, 18421), True, 'from tensorflow.contrib import summary as contrib_summary\n'), ((18929, 18949), 'mesh_tensorflow.experimental.unet.PostProcessor', 'unet.PostProcessor', ([], {}), '()\n', (18947, 18949), False, 'from mesh_tensorflow.experimental import unet\n'), ((19525, 19578), 'tensorflow.contrib.summary.create_file_writer', 'contrib_summary.create_file_writer', (['FLAGS.summary_dir'], {}), '(FLAGS.summary_dir)\n', (19559, 19578), True, 'from tensorflow.contrib import summary as contrib_summary\n'), ((19883, 19904), 'tensorflow.contrib.tpu.shutdown_system', 'tpu.shutdown_system', ([], {}), '()\n', (19902, 19904), False, 'from tensorflow.contrib import tpu\n'), ((20339, 20356), 'mesh_tensorflow.experimental.unet.get_layout', 'unet.get_layout', ([], {}), '()\n', (20354, 20356), False, 'from mesh_tensorflow.experimental import unet\n'), ((4346, 4369), 'tensorflow.contrib.tpu.initialize_system', 'tpu.initialize_system', ([], {}), '()\n', (4367, 4369), False, 'from tensorflow.contrib import tpu\n'), ((4453, 4484), 'numpy.prod', 'np.prod', (['topo_object.mesh_shape'], {}), '(topo_object.mesh_shape)\n', (4460, 4484), True, 'import numpy as np\n'), ((7003, 7053), 're.match', 're.match', (['"""/job:(.*)/replica:(.*)/task:(.*)/.*"""', 'd'], {}), "('/job:(.*)/replica:(.*)/task:(.*)/.*', d)\n", (7011, 7053), False, 'import re\n'), ((8537, 8567), 'tensorflow.compat.v1.cast', 'tf.cast', (['global_step', 'tf.int64'], {}), '(global_step, tf.int64)\n', (8544, 8567), True, 'import tensorflow.compat.v1 as tf\n'), ((10624, 10653), 'tensorflow.compat.v1.assign_add', 'tf.assign_add', (['global_step', '(1)'], {}), '(global_step, 1)\n', (10637, 10653), True, 'import tensorflow.compat.v1 as tf\n'), ((10805, 10824), 'mesh_tensorflow.anonymize', 'mtf.anonymize', (['pred'], {}), '(pred)\n', (10818, 10824), True, 'import mesh_tensorflow as mtf\n'), ((11355, 11387), 'mesh_tensorflow.utils.outside_all_rewrites', 'mtf.utils.outside_all_rewrites', ([], {}), '()\n', (11385, 11387), True, 'import mesh_tensorflow as mtf\n'), ((11508, 11556), 'tensorflow.compat.v1.add_to_collection', 'tf.add_to_collection', (['tf.GraphKeys.SAVERS', 'saver'], {}), '(tf.GraphKeys.SAVERS, saver)\n', (11528, 11556), True, 'import tensorflow.compat.v1 as tf\n'), ((11582, 11622), 'mesh_tensorflow.MtfCheckpointSaverListener', 'mtf.MtfCheckpointSaverListener', (['lowering'], {}), '(lowering)\n', (11612, 11622), True, 'import mesh_tensorflow as mtf\n'), ((11654, 11791), 'tensorflow.compat.v1.train.CheckpointSaverHook', 'tf.train.CheckpointSaverHook', (['FLAGS.checkpoint_dir'], {'save_steps': 'FLAGS.save_checkpoints_steps', 'saver': 'saver', 'listeners': '[saver_listener]'}), '(FLAGS.checkpoint_dir, save_steps=FLAGS.\n save_checkpoints_steps, saver=saver, listeners=[saver_listener])\n', (11682, 11791), True, 'import tensorflow.compat.v1 as tf\n'), ((11916, 11951), 'tensorflow.compat.v1.group', 'tf.group', (['([tf_loss] + tf_update_ops)'], {}), '([tf_loss] + tf_update_ops)\n', (11924, 11951), True, 'import tensorflow.compat.v1 as tf\n'), ((12366, 12405), 'tensorflow.python.tpu.ops.tpu_ops.outfeed_enqueue_tuple', 'tpu_ops.outfeed_enqueue_tuple', (['tf_preds'], {}), '(tf_preds)\n', (12395, 12405), False, 'from tensorflow.python.tpu.ops import tpu_ops\n'), ((12741, 12774), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (12772, 12774), True, 'import tensorflow.compat.v1 as tf\n'), ((12853, 12885), 'tensorflow.compat.v1.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (12883, 12885), True, 'import tensorflow.compat.v1 as tf\n'), ((12897, 12938), 'tensorflow.compat.v1.train.Scaffold.default_local_init_op', 'tf.train.Scaffold.default_local_init_op', ([], {}), '()\n', (12936, 12938), True, 'import tensorflow.compat.v1 as tf\n'), ((15311, 15351), 'tensorflow.contrib.summary.initialize', 'contrib_summary.initialize', ([], {'session': 'sess'}), '(session=sess)\n', (15337, 15351), True, 'from tensorflow.contrib import summary as contrib_summary\n'), ((15768, 15778), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (15776, 15778), True, 'import tensorflow.compat.v1 as tf\n'), ((15950, 15991), 'tensorflow.contrib.summary.always_record_summaries', 'contrib_summary.always_record_summaries', ([], {}), '()\n', (15989, 15991), True, 'from tensorflow.contrib import summary as contrib_summary\n'), ((18682, 18722), 'tensorflow.contrib.summary.initialize', 'contrib_summary.initialize', ([], {'session': 'sess'}), '(session=sess)\n', (18708, 18722), True, 'from tensorflow.contrib import summary as contrib_summary\n'), ((19449, 19459), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (19457, 19459), True, 'import tensorflow.compat.v1 as tf\n'), ((19631, 19672), 'tensorflow.contrib.summary.always_record_summaries', 'contrib_summary.always_record_summaries', ([], {}), '()\n', (19670, 19672), True, 'from tensorflow.contrib import summary as contrib_summary\n'), ((19818, 19859), 'tensorflow.compat.v1.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (19832, 19859), True, 'import tensorflow.compat.v1 as tf\n'), ((20208, 20249), 'tensorflow.compat.v1.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (20222, 20249), True, 'import tensorflow.compat.v1 as tf\n'), ((8940, 8972), 'tensorflow.compat.v1.control_dependencies', 'tf.control_dependencies', (['sum_ops'], {}), '(sum_ops)\n', (8963, 8972), True, 'import tensorflow.compat.v1 as tf\n'), ((8991, 9011), 'tensorflow.compat.v1.identity', 'tf.identity', (['tf_loss'], {}), '(tf_loss)\n', (9002, 9011), True, 'import tensorflow.compat.v1 as tf\n'), ((9231, 9261), 'tensorflow.compat.v1.cast', 'tf.cast', (['global_step', 'tf.int32'], {}), '(global_step, tf.int32)\n', (9238, 9261), True, 'import tensorflow.compat.v1 as tf\n'), ((9369, 9399), 'tensorflow.compat.v1.cast', 'tf.cast', (['global_step', 'tf.int32'], {}), '(global_step, tf.int32)\n', (9376, 9399), True, 'import tensorflow.compat.v1 as tf\n'), ((11420, 11441), 'tensorflow.compat.v1.global_variables', 'tf.global_variables', ([], {}), '()\n', (11439, 11441), True, 'import tensorflow.compat.v1 as tf\n'), ((15220, 15261), 'tensorflow.compat.v1.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (15234, 15261), True, 'import tensorflow.compat.v1 as tf\n'), ((13405, 13420), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (13413, 13420), True, 'import numpy as np\n'), ((17255, 17320), 'mesh_tensorflow.experimental.input_reader._host_id_to_tf_device', 'input_reader._host_id_to_tf_device', (['host_id'], {'external_worker': '(True)'}), '(host_id, external_worker=True)\n', (17289, 17320), False, 'from mesh_tensorflow.experimental import input_reader\n'), ((17441, 17549), 'tensorflow.python.tpu.ops.tpu_ops.outfeed_dequeue_tuple', 'tpu_ops.outfeed_dequeue_tuple', ([], {'dtypes': 'output_dtypes', 'shapes': 'output_shapes', 'device_ordinal': 'device_ordinal'}), '(dtypes=output_dtypes, shapes=output_shapes,\n device_ordinal=device_ordinal)\n', (17470, 17549), False, 'from tensorflow.python.tpu.ops import tpu_ops\n'), ((10169, 10201), 'tensorflow.compat.v1.cast', 'tf.cast', (['global_step', 'tf.float32'], {}), '(global_step, tf.float32)\n', (10176, 10201), True, 'import tensorflow.compat.v1 as tf\n'), ((18565, 18606), 'tensorflow.compat.v1.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (18579, 18606), True, 'import tensorflow.compat.v1 as tf\n'), ((17752, 17769), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['x'], {}), '(x)\n', (17766, 17769), True, 'import tensorflow.compat.v1 as tf\n')] |
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import math
import numpy as np
class DataGenerator:
def __init__(self, config, features, labels):
self.config = config
self.features_train, self.features_test, self.labels_train, self.labels_test = train_test_split(features, labels, test_size=self.config.test_size, shuffle=False)
self.features_scaler = MinMaxScaler()
self.features_scaler.fit(self.features_train)
self.labels_scaler = MinMaxScaler()
self.labels_scaler.fit(self.labels_train)
self.X_train, self.Y_train = self.sliding_window(self.features_scaler.transform(self.features_train), self.labels_scaler.transform(self.labels_train), self.config.sequence_length)
self.X_test, self.Y_test = self.sliding_window(self.features_scaler.transform(self.features_test), self.labels_scaler.transform(self.labels_test), self.config.sequence_length)
self.num_iter_per_epoch = math.ceil(len(self.X_train) / self.config.batch_size)
def sliding_window(self, features, labels, sequence_length, step=1):
X = []
Y = []
for i in range(0, len(features) - sequence_length, step):
X.append(features[i:i + sequence_length])
Y.append(labels[i + sequence_length])
X = np.array(X)
Y = np.array(Y)
return X, Y
def next_batch(self):
p = np.random.permutation(len(self.X_train))
self.X_train_shuffled = self.X_train[p]
self.Y_train_shuffled = self.Y_train[p]
for i in range(0, len(self.X_train_shuffled) - self.config.batch_size, self.config.batch_size):
end = min(i + self.config.batch_size, len(self.X_train_shuffled))
yield self.X_train_shuffled[i:end], self.Y_train_shuffled[i:end]
| [
"sklearn.model_selection.train_test_split",
"numpy.array",
"sklearn.preprocessing.MinMaxScaler"
] | [((344, 431), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'labels'], {'test_size': 'self.config.test_size', 'shuffle': '(False)'}), '(features, labels, test_size=self.config.test_size, shuffle\n =False)\n', (360, 431), False, 'from sklearn.model_selection import train_test_split\n'), ((459, 473), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (471, 473), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((557, 571), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (569, 571), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1372, 1383), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1380, 1383), True, 'import numpy as np\n'), ((1396, 1407), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (1404, 1407), True, 'import numpy as np\n')] |
import numpy as np
from src.maths import rotationMatrix
R = rotationMatrix(0, 0, 0)
R1 = rotationMatrix(0, 0, np.pi / 2)
R2 = rotationMatrix(0, 0, np.pi)
R3 = rotationMatrix(0, 0, 3 * np.pi / 2)
R4 = rotationMatrix(0, 0, 2 * np.pi)
R5 = rotationMatrix(0, np.pi / 2, 0)
R6 = rotationMatrix(0, np.pi, 0)
R7 = rotationMatrix(0, 3 * np.pi / 2, 0)
R8 = rotationMatrix(0, 2 * np.pi, 0)
R9 = rotationMatrix(np.pi / 2, 0, 0)
R10 = rotationMatrix(np.pi, 0, 0)
R11 = rotationMatrix(3 * np.pi / 2, 0, 0)
R12 = rotationMatrix(2 * np.pi, 0, 0)
class TestRotationMatrix:
def test_determinat(self):
assert np.linalg.det(R) == 1
def test_ortonormal(self):
assert (np.linalg.inv(R) == R.T).all() == True
def test_rotation_matrix_0_0_0(self):
assert (np.abs(R - np.eye(3)) < 1e-12).all() # Identity matrix
def test_rotation_matrix_0_0_90(self):
assert (np.abs(R1 - np.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])) < 1e-12).all()
def test_rotation_matrix_0_0_180(self):
assert (
np.abs(R2 - np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])) < 1e-12
).all()
def test_rotation_matrix_0_0_270(self):
assert (np.abs(R3 - np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])) < 1e-12).all()
def test_rotation_matrix_0_0_360(self):
assert (np.abs(R4 - np.eye(3)) < 1e-12).all() # Identity matrix
def test_rotation_matrix_0_90_0(self):
assert (np.abs(R5 - np.array([[0, 0, -1], [0, 1, 0], [1, 0, 0]])) < 1e-12).all()
def test_rotation_matrix_0_180_0(self):
assert (
np.abs(R6 - np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]])) < 1e-12
).all()
def test_rotation_matrix_0_270_0(self):
assert (np.abs(R7 - np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]])) < 1e-12).all()
def test_rotation_matrix_0_360_0(self):
assert (np.abs(R8 - np.eye(3)) < 1e-12).all() # Identity matrix
def test_rotation_matrix_90_0_0(self):
assert (np.abs(R9 - np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]])) < 1e-12).all()
def test_rotation_matrix_180_0_0(self):
assert (
np.abs(R10 - np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])) < 1e-12
).all()
def test_rotation_matrix_270_0_0(self):
assert (
np.abs(R11 - np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]])) < 1e-12
).all()
def test_rotation_matrix_360_0_0(self):
assert (np.abs(R12 - np.eye(3)) < 1e-12).all() # Identity matrix
# Minus matrices (not necesary, but nice to have)
R13 = rotationMatrix(0, 0, -np.pi / 2)
R14 = rotationMatrix(0, 0, -np.pi)
R15 = rotationMatrix(0, 0, -3 * np.pi / 2)
R16 = rotationMatrix(0, 0, -2 * np.pi)
R17 = rotationMatrix(0, -np.pi / 2, 0)
R18 = rotationMatrix(0, -np.pi, 0)
R19 = rotationMatrix(0, -3 * np.pi / 2, 0)
R20 = rotationMatrix(0, -2 * np.pi, 0)
R21 = rotationMatrix(-np.pi / 2, 0, 0)
R22 = rotationMatrix(-np.pi, 0, 0)
R23 = rotationMatrix(-3 * np.pi / 2, 0, 0)
R24 = rotationMatrix(-2 * np.pi, 0, 0)
class TestRotationMatrixSign:
def test_minus_0_0_90(self):
assert (np.abs(R1 - R15) < 1e7 - 7).all()
def test_minus_0_0_180(self):
assert (np.abs(R2 - R14) < 1e7 - 7).all()
def test_minus_0_0_270(self):
assert (np.abs(R3 - R13) < 1e7 - 7).all()
def test_rotation_matrix_0_0_360(self):
assert (np.abs(R16 - R) < 1e7 - 7).all()
def test_minus_0_90_0(self):
assert (np.abs(R5 - R19) < 1e7 - 7).all()
def test_minus_0_180_0(self):
assert (np.abs(R6 - R18) < 1e7 - 7).all()
def test_minus_0_270_0(self):
assert (np.abs(R7 - R17) < 1e7 - 7).all()
def test_minus_0_360_0(self):
assert (np.abs(R8 - R) < 1e7 - 7).all()
def test_minus_90_0_0(self):
assert (np.abs(R9 - R23) < 1e7 - 7).all()
def test_minus_180_0_0(self):
assert (np.abs(R10 - R22) < 1e7 - 7).all()
def test_minus_270_0_0(self):
assert (np.abs(R11 - R21) < 1e7 - 7).all()
def test_minus_360_0_0(self):
assert (np.abs(R12 - R) < 1e7 - 7).all()
| [
"src.maths.rotationMatrix",
"numpy.eye",
"numpy.abs",
"numpy.linalg.det",
"numpy.array",
"numpy.linalg.inv"
] | [((62, 85), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (76, 85), False, 'from src.maths import rotationMatrix\n'), ((92, 123), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', '(0)', '(np.pi / 2)'], {}), '(0, 0, np.pi / 2)\n', (106, 123), False, 'from src.maths import rotationMatrix\n'), ((129, 156), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', '(0)', 'np.pi'], {}), '(0, 0, np.pi)\n', (143, 156), False, 'from src.maths import rotationMatrix\n'), ((162, 197), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', '(0)', '(3 * np.pi / 2)'], {}), '(0, 0, 3 * np.pi / 2)\n', (176, 197), False, 'from src.maths import rotationMatrix\n'), ((203, 234), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', '(0)', '(2 * np.pi)'], {}), '(0, 0, 2 * np.pi)\n', (217, 234), False, 'from src.maths import rotationMatrix\n'), ((241, 272), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', '(np.pi / 2)', '(0)'], {}), '(0, np.pi / 2, 0)\n', (255, 272), False, 'from src.maths import rotationMatrix\n'), ((278, 305), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', 'np.pi', '(0)'], {}), '(0, np.pi, 0)\n', (292, 305), False, 'from src.maths import rotationMatrix\n'), ((311, 346), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', '(3 * np.pi / 2)', '(0)'], {}), '(0, 3 * np.pi / 2, 0)\n', (325, 346), False, 'from src.maths import rotationMatrix\n'), ((352, 383), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', '(2 * np.pi)', '(0)'], {}), '(0, 2 * np.pi, 0)\n', (366, 383), False, 'from src.maths import rotationMatrix\n'), ((390, 421), 'src.maths.rotationMatrix', 'rotationMatrix', (['(np.pi / 2)', '(0)', '(0)'], {}), '(np.pi / 2, 0, 0)\n', (404, 421), False, 'from src.maths import rotationMatrix\n'), ((428, 455), 'src.maths.rotationMatrix', 'rotationMatrix', (['np.pi', '(0)', '(0)'], {}), '(np.pi, 0, 0)\n', (442, 455), False, 'from src.maths import rotationMatrix\n'), ((462, 497), 'src.maths.rotationMatrix', 'rotationMatrix', (['(3 * np.pi / 2)', '(0)', '(0)'], {}), '(3 * np.pi / 2, 0, 0)\n', (476, 497), False, 'from src.maths import rotationMatrix\n'), ((504, 535), 'src.maths.rotationMatrix', 'rotationMatrix', (['(2 * np.pi)', '(0)', '(0)'], {}), '(2 * np.pi, 0, 0)\n', (518, 535), False, 'from src.maths import rotationMatrix\n'), ((2543, 2575), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', '(0)', '(-np.pi / 2)'], {}), '(0, 0, -np.pi / 2)\n', (2557, 2575), False, 'from src.maths import rotationMatrix\n'), ((2582, 2610), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', '(0)', '(-np.pi)'], {}), '(0, 0, -np.pi)\n', (2596, 2610), False, 'from src.maths import rotationMatrix\n'), ((2617, 2653), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', '(0)', '(-3 * np.pi / 2)'], {}), '(0, 0, -3 * np.pi / 2)\n', (2631, 2653), False, 'from src.maths import rotationMatrix\n'), ((2660, 2692), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', '(0)', '(-2 * np.pi)'], {}), '(0, 0, -2 * np.pi)\n', (2674, 2692), False, 'from src.maths import rotationMatrix\n'), ((2700, 2732), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', '(-np.pi / 2)', '(0)'], {}), '(0, -np.pi / 2, 0)\n', (2714, 2732), False, 'from src.maths import rotationMatrix\n'), ((2739, 2767), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', '(-np.pi)', '(0)'], {}), '(0, -np.pi, 0)\n', (2753, 2767), False, 'from src.maths import rotationMatrix\n'), ((2774, 2810), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', '(-3 * np.pi / 2)', '(0)'], {}), '(0, -3 * np.pi / 2, 0)\n', (2788, 2810), False, 'from src.maths import rotationMatrix\n'), ((2817, 2849), 'src.maths.rotationMatrix', 'rotationMatrix', (['(0)', '(-2 * np.pi)', '(0)'], {}), '(0, -2 * np.pi, 0)\n', (2831, 2849), False, 'from src.maths import rotationMatrix\n'), ((2857, 2889), 'src.maths.rotationMatrix', 'rotationMatrix', (['(-np.pi / 2)', '(0)', '(0)'], {}), '(-np.pi / 2, 0, 0)\n', (2871, 2889), False, 'from src.maths import rotationMatrix\n'), ((2896, 2924), 'src.maths.rotationMatrix', 'rotationMatrix', (['(-np.pi)', '(0)', '(0)'], {}), '(-np.pi, 0, 0)\n', (2910, 2924), False, 'from src.maths import rotationMatrix\n'), ((2931, 2967), 'src.maths.rotationMatrix', 'rotationMatrix', (['(-3 * np.pi / 2)', '(0)', '(0)'], {}), '(-3 * np.pi / 2, 0, 0)\n', (2945, 2967), False, 'from src.maths import rotationMatrix\n'), ((2974, 3006), 'src.maths.rotationMatrix', 'rotationMatrix', (['(-2 * np.pi)', '(0)', '(0)'], {}), '(-2 * np.pi, 0, 0)\n', (2988, 3006), False, 'from src.maths import rotationMatrix\n'), ((610, 626), 'numpy.linalg.det', 'np.linalg.det', (['R'], {}), '(R)\n', (623, 626), True, 'import numpy as np\n'), ((3088, 3104), 'numpy.abs', 'np.abs', (['(R1 - R15)'], {}), '(R1 - R15)\n', (3094, 3104), True, 'import numpy as np\n'), ((3173, 3189), 'numpy.abs', 'np.abs', (['(R2 - R14)'], {}), '(R2 - R14)\n', (3179, 3189), True, 'import numpy as np\n'), ((3258, 3274), 'numpy.abs', 'np.abs', (['(R3 - R13)'], {}), '(R3 - R13)\n', (3264, 3274), True, 'import numpy as np\n'), ((3353, 3368), 'numpy.abs', 'np.abs', (['(R16 - R)'], {}), '(R16 - R)\n', (3359, 3368), True, 'import numpy as np\n'), ((3436, 3452), 'numpy.abs', 'np.abs', (['(R5 - R19)'], {}), '(R5 - R19)\n', (3442, 3452), True, 'import numpy as np\n'), ((3521, 3537), 'numpy.abs', 'np.abs', (['(R6 - R18)'], {}), '(R6 - R18)\n', (3527, 3537), True, 'import numpy as np\n'), ((3606, 3622), 'numpy.abs', 'np.abs', (['(R7 - R17)'], {}), '(R7 - R17)\n', (3612, 3622), True, 'import numpy as np\n'), ((3691, 3705), 'numpy.abs', 'np.abs', (['(R8 - R)'], {}), '(R8 - R)\n', (3697, 3705), True, 'import numpy as np\n'), ((3773, 3789), 'numpy.abs', 'np.abs', (['(R9 - R23)'], {}), '(R9 - R23)\n', (3779, 3789), True, 'import numpy as np\n'), ((3858, 3875), 'numpy.abs', 'np.abs', (['(R10 - R22)'], {}), '(R10 - R22)\n', (3864, 3875), True, 'import numpy as np\n'), ((3944, 3961), 'numpy.abs', 'np.abs', (['(R11 - R21)'], {}), '(R11 - R21)\n', (3950, 3961), True, 'import numpy as np\n'), ((4030, 4045), 'numpy.abs', 'np.abs', (['(R12 - R)'], {}), '(R12 - R)\n', (4036, 4045), True, 'import numpy as np\n'), ((680, 696), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (693, 696), True, 'import numpy as np\n'), ((789, 798), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (795, 798), True, 'import numpy as np\n'), ((906, 950), 'numpy.array', 'np.array', (['[[0, 1, 0], [-1, 0, 0], [0, 0, 1]]'], {}), '([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])\n', (914, 950), True, 'import numpy as np\n'), ((1053, 1098), 'numpy.array', 'np.array', (['[[-1, 0, 0], [0, -1, 0], [0, 0, 1]]'], {}), '([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])\n', (1061, 1098), True, 'import numpy as np\n'), ((1197, 1241), 'numpy.array', 'np.array', (['[[0, -1, 0], [1, 0, 0], [0, 0, 1]]'], {}), '([[0, -1, 0], [1, 0, 0], [0, 0, 1]])\n', (1205, 1241), True, 'import numpy as np\n'), ((1331, 1340), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1337, 1340), True, 'import numpy as np\n'), ((1448, 1492), 'numpy.array', 'np.array', (['[[0, 0, -1], [0, 1, 0], [1, 0, 0]]'], {}), '([[0, 0, -1], [0, 1, 0], [1, 0, 0]])\n', (1456, 1492), True, 'import numpy as np\n'), ((1595, 1640), 'numpy.array', 'np.array', (['[[-1, 0, 0], [0, 1, 0], [0, 0, -1]]'], {}), '([[-1, 0, 0], [0, 1, 0], [0, 0, -1]])\n', (1603, 1640), True, 'import numpy as np\n'), ((1739, 1783), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, 1, 0], [-1, 0, 0]]'], {}), '([[0, 0, 1], [0, 1, 0], [-1, 0, 0]])\n', (1747, 1783), True, 'import numpy as np\n'), ((1873, 1882), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1879, 1882), True, 'import numpy as np\n'), ((1990, 2034), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 0, 1], [0, -1, 0]]'], {}), '([[1, 0, 0], [0, 0, 1], [0, -1, 0]])\n', (1998, 2034), True, 'import numpy as np\n'), ((2138, 2183), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, -1, 0], [0, 0, -1]]'], {}), '([[1, 0, 0], [0, -1, 0], [0, 0, -1]])\n', (2146, 2183), True, 'import numpy as np\n'), ((2296, 2340), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 0, -1], [0, 1, 0]]'], {}), '([[1, 0, 0], [0, 0, -1], [0, 1, 0]])\n', (2304, 2340), True, 'import numpy as np\n'), ((2440, 2449), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2446, 2449), True, 'import numpy as np\n')] |
#################################################################################
#Script to calculate an input percentile for precipitation totals as a function of
#space, smooth window-to-window variability in percentile threshold via Fourier
#harmonics, and saves the result in netCDF4 format. Loads .npy files saved from
#calcWindowStats.py
#Arguments
#---------
#length : int
# Total number of days in the window.
#percentile : int or float
# Percentile to calculate.
#components : int
# Number of Fourier harmonics to use in the smoothing. components=3 will use
# wavenumbers 0, 1, 2, and 3 to smooth the raw signal.
#Author : <NAME>
#Last Updated : June 2021
#################################################################################
import numpy as np
from netCDF4 import Dataset, date2num
import datetime
import argparse
def getPercentiles(date, length, q):
"""
Calculate a given percentile for precipitation data.
Parameters
----------
date : datetime obj.
Begin date of window to load
length : int
Length of window. Ensures the correct file is loaded.
q : int
Percentile to calculate.
Returns
-------
percs : array, shape (y,x)
qth percentile for precipitation for the length-day window beginning on
date. Final shape corresponds to the number of latitudes and number of longitudes.
"""
#backfill month and day with zeros if necessary (e.g., 1 --> 01)
month = str(date.month).zfill(2)
day = str(date.day).zfill(2)
path = f'/scratch/tdickinson/Livneh/windows/{length}/totals/precip.{month}{day}.npy'
data = np.load(path)
percs = np.nanpercentile(data, q=q, axis=0)
return percs
def fourierSeries(period, N):
"""
Calculate the Fourier series coefficients up to the Nth harmonic for a 2D
array.
Parameters
----------
period : arr-like, shape (t, s)
2D array with raw percentiles for each window throughout the calendar year.
The first dimension should be time (i.e., t=365) and the second dimension
should be the number of grid points in space, s.
N : int
Number of harmonics to calculate.
Returns
-------
harmonics : array, shape (N+1, 2, s)
3D array of Fourier harmonics. Shape corresponds to first N harmonics,
both a and b, for all points in the grid.
"""
harmonics = np.zeros((N+1,2,period.shape[1]))
T = period.shape[0]
t = np.arange(T)
for n in range(N+1):
an = 2/T*(period * np.cos(2*np.pi*n*t/T)[:,np.newaxis]).sum(axis=0)
bn = 2/T*(period * np.sin(2*np.pi*n*t/T)[:,np.newaxis]).sum(axis=0)
harmonics[n,0,:] = an
harmonics[n,1,:] = bn
return harmonics
def reconstruct(P, anbn):
"""
Reconstruct the signal using a reduced number of harmonics.
Parameters
----------
P : int
Length of signal to reconstruct. To reconstruct for the entire year, set P=365.
anbn : arr-like, shape (N, 2, s)
a and b Fourier coefficients to reconstruct time series. Use output from
fourierSeries function.
Returns
-------
result : array, shape (P, s)
Reconstructed time series using the reduced number of Fourier harmonics.
"""
result = np.zeros((P, anbn.shape[-1]))
t = np.arange(P)
for n, (a, b) in enumerate(anbn):
if n == 0:
a /= 2
aTerm = a[np.newaxis,:] * np.cos(2*np.pi*n*t/P)[:,np.newaxis]
bTerm = b[np.newaxis,:] * np.sin(2*np.pi*n*t/P)[:,np.newaxis]
result += aTerm + bTerm
return result
def consecutive(data, stepsize=1):
"""
Helper function for fixNegativeThresholds to group consecutive elements
in a NumPy array.
Parameters
----------
data : arr-like
NumPy array containing indices with negative percentile thresholds.
stepsize : int, default = 1
Size between elements to be grouped together. Setting stepsize=1 (default)
groups consecutive elements.
Returns
-------
Array of arrays where each array contains consecutive elements.
"""
return np.split(data, np.where(np.diff(data) != stepsize)[0]+1)
def fixNegativeThresholds(raw, smoothed):
"""
Function to remove unphysical, negative precipitation thresholds that arise
from smoothing via a reduced number of Fourier harmonics. Negative values are
replaced with the mean values over the same indices (i.e., time windows) before
smoothing occurred.
Parameters
----------
raw : arr-like
Raw precipitation thresholds before Fourier smoothing. In other words,
the window-by-window percentiles.
smoothed : arr-like
Precipitation thresholds after Fourier smoothing.
Returns
-------
new : numpy.ndarray
Copy of smoothed except with negative indices replaced with the mean raw
values over the same indices.
"""
new = smoothed.copy()
for i in range(new.shape[-1]):
negLocs = np.where(smoothed[:,i] <= 0)[0]
if all(np.isnan(smoothed[:,i])) or (negLocs.size == 0):
continue
else:
negLocs = consecutive(negLocs)
for j in negLocs:
new[j,i] = np.mean(raw[j,i])
return new
def netCDF4Writer(data, name, length, q, n):
"""
Function to create a netCDF4 Classic file to store relevant info for extreme
precipitation event identification.
Inputs
------
data : dict
Dictionary holding latitudes, longitudes, and percentile thresholds.
name : str
Name for the netCDF file. Path is assumed to be included.
length : int
Length of window. Used for file metadata.
q : int
Percentile used to form thresholds. Used for file metadata.
n : int
Number of retained Fourier harmonics in smoothing. Used for file metadata.
"""
dataset = Dataset(name,'w',format='NETCDF4_CLASSIC')
#there are 3 dimensions to my data: latitudes, longtiudes, and times
lats = dataset.createDimension('lat', data['lat'].size)
lons = dataset.createDimension('lon', data['lon'].size)
times = dataset.createDimension('time', data['threshold'].shape[0])
#here, I create the variables to be stored in the file
lat = dataset.createVariable('lat', np.float64, ('lat',), zlib=True, shuffle=True, complevel=6, fill_value=None)
lon = dataset.createVariable('lon', np.float64, ('lon',), zlib=True, shuffle=True, complevel=6, fill_value=None)
time = dataset.createVariable('time', np.float64, ('time',), zlib=True, shuffle=True, complevel=6, fill_value=None)
threshold = dataset.createVariable('threshold', np.float64, ('time','lat','lon'), zlib=True, shuffle=True, complevel=6, fill_value=None)
#variable attributes
lat.standard_name = 'latitude'
lat.units = 'degree_north'
minimum = np.nanmin(data['lat'])
maximum = np.nanmax(data['lat'])
lat.actual_range = np.array([minimum,maximum])
lon.standard_name = 'longitude'
lon.units = 'degree_east'
minimum = np.nanmin(data['lon'])
maximum = np.nanmax(data['lon'])
lon.actual_range = np.array([minimum,maximum])
time.standard_name = 'time'
time.calendar = 'standard'
time.units = 'days since 1915-01-01 00:00:00'
threshold.standard_name = 'threshold'
threshold.long_name = f'threshold for {length}-day extreme precipitation events (percentile={q})'
threshold.units = 'mm'
dataset.description = f'File containing precipitation thresholds for the CONUS for each window of length {length} of the year. Thresholds were calculated using the {q} percentile and smoothed using the first {n} Fourier harmonics. Original data source was daily Livneh data plus interpolated daily PRISM data post 2011. The year attribute in the time object is arbitrary.'
today = datetime.datetime.today()
dataset.history = 'Created %d/%d/%d'%(today.month, today.day, today.year)
#store data in the variables created earlier
lat[:] = data['lat']
lon[:] = data['lon']
dates = []
for n in range(data['threshold'].shape[0]):
dates.append(datetime.datetime(year=1915, month=1, day=1) + n*datetime.timedelta(days=1))
time[:] = date2num(dates,units=time.units,calendar=time.calendar)
threshold[:] = data['threshold']
dataset.close()
return
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--length", type=int, help="number of days in window")
parser.add_argument("-p", "--percentile", type=float, help="percentile to calculate")
parser.add_argument("-c", "--components", type=int, help="number of Fourier harmonics to keep")
args = parser.parse_args()
length = args.length
percentile = args.percentile
numComponents = args.components
begin = datetime.datetime(month=1, day=1, year=1915)
data = {}
with Dataset('/scratch/tdickinson/Livneh/prec.1915.nc','r') as nc:
data['lat'] = nc.variables['lat'][:]
data['lon'] = nc.variables['lon'][:]
numLats = data['lat'].size
numLons = data['lon'].size
percs = np.zeros((365, numLats, numLons))*np.nan
for i in range(percs.shape[0]):
date = begin + datetime.timedelta(days=i)
print(date)
percs[i,:,:] = getPercentiles(date=date, length=length, q=percentile)
percs = percs.reshape(365, numLats*numLons)
coefs = fourierSeries(period=percs, N=numComponents)
threshold = reconstruct(P=365, anbn=coefs)
data['threshold'] = fixNegativeThresholds(raw=percs, smoothed=threshold)
data['threshold'] = data['threshold'].reshape(365, numLats, numLons)
fileName = f'/scratch/tdickinson/files/livnehPRISM.thresholds.q{int(percentile)}.n{length}.nc'
netCDF4Writer(data=data, name=fileName, length=length, q=percentile, n=numComponents)
| [
"numpy.nanpercentile",
"numpy.array",
"numpy.sin",
"datetime.datetime.today",
"numpy.nanmin",
"datetime.timedelta",
"numpy.arange",
"datetime.datetime",
"numpy.mean",
"argparse.ArgumentParser",
"numpy.where",
"netCDF4.Dataset",
"numpy.diff",
"numpy.nanmax",
"netCDF4.date2num",
"numpy.i... | [((8394, 8419), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8417, 8419), False, 'import argparse\n'), ((8801, 8845), 'datetime.datetime', 'datetime.datetime', ([], {'month': '(1)', 'day': '(1)', 'year': '(1915)'}), '(month=1, day=1, year=1915)\n', (8818, 8845), False, 'import datetime\n'), ((1644, 1657), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1651, 1657), True, 'import numpy as np\n'), ((1671, 1706), 'numpy.nanpercentile', 'np.nanpercentile', (['data'], {'q': 'q', 'axis': '(0)'}), '(data, q=q, axis=0)\n', (1687, 1706), True, 'import numpy as np\n'), ((2415, 2452), 'numpy.zeros', 'np.zeros', (['(N + 1, 2, period.shape[1])'], {}), '((N + 1, 2, period.shape[1]))\n', (2423, 2452), True, 'import numpy as np\n'), ((2481, 2493), 'numpy.arange', 'np.arange', (['T'], {}), '(T)\n', (2490, 2493), True, 'import numpy as np\n'), ((3292, 3321), 'numpy.zeros', 'np.zeros', (['(P, anbn.shape[-1])'], {}), '((P, anbn.shape[-1]))\n', (3300, 3321), True, 'import numpy as np\n'), ((3330, 3342), 'numpy.arange', 'np.arange', (['P'], {}), '(P)\n', (3339, 3342), True, 'import numpy as np\n'), ((5929, 5973), 'netCDF4.Dataset', 'Dataset', (['name', '"""w"""'], {'format': '"""NETCDF4_CLASSIC"""'}), "(name, 'w', format='NETCDF4_CLASSIC')\n", (5936, 5973), False, 'from netCDF4 import Dataset, date2num\n'), ((6899, 6921), 'numpy.nanmin', 'np.nanmin', (["data['lat']"], {}), "(data['lat'])\n", (6908, 6921), True, 'import numpy as np\n'), ((6936, 6958), 'numpy.nanmax', 'np.nanmax', (["data['lat']"], {}), "(data['lat'])\n", (6945, 6958), True, 'import numpy as np\n'), ((6982, 7010), 'numpy.array', 'np.array', (['[minimum, maximum]'], {}), '([minimum, maximum])\n', (6990, 7010), True, 'import numpy as np\n'), ((7091, 7113), 'numpy.nanmin', 'np.nanmin', (["data['lon']"], {}), "(data['lon'])\n", (7100, 7113), True, 'import numpy as np\n'), ((7128, 7150), 'numpy.nanmax', 'np.nanmax', (["data['lon']"], {}), "(data['lon'])\n", (7137, 7150), True, 'import numpy as np\n'), ((7174, 7202), 'numpy.array', 'np.array', (['[minimum, maximum]'], {}), '([minimum, maximum])\n', (7182, 7202), True, 'import numpy as np\n'), ((7878, 7903), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (7901, 7903), False, 'import datetime\n'), ((8258, 8315), 'netCDF4.date2num', 'date2num', (['dates'], {'units': 'time.units', 'calendar': 'time.calendar'}), '(dates, units=time.units, calendar=time.calendar)\n', (8266, 8315), False, 'from netCDF4 import Dataset, date2num\n'), ((8862, 8917), 'netCDF4.Dataset', 'Dataset', (['"""/scratch/tdickinson/Livneh/prec.1915.nc"""', '"""r"""'], {}), "('/scratch/tdickinson/Livneh/prec.1915.nc', 'r')\n", (8869, 8917), False, 'from netCDF4 import Dataset, date2num\n'), ((9070, 9103), 'numpy.zeros', 'np.zeros', (['(365, numLats, numLons)'], {}), '((365, numLats, numLons))\n', (9078, 9103), True, 'import numpy as np\n'), ((9162, 9188), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'i'}), '(days=i)\n', (9180, 9188), False, 'import datetime\n'), ((5028, 5057), 'numpy.where', 'np.where', (['(smoothed[:, i] <= 0)'], {}), '(smoothed[:, i] <= 0)\n', (5036, 5057), True, 'import numpy as np\n'), ((3453, 3482), 'numpy.cos', 'np.cos', (['(2 * np.pi * n * t / P)'], {}), '(2 * np.pi * n * t / P)\n', (3459, 3482), True, 'import numpy as np\n'), ((3523, 3552), 'numpy.sin', 'np.sin', (['(2 * np.pi * n * t / P)'], {}), '(2 * np.pi * n * t / P)\n', (3529, 3552), True, 'import numpy as np\n'), ((5075, 5099), 'numpy.isnan', 'np.isnan', (['smoothed[:, i]'], {}), '(smoothed[:, i])\n', (5083, 5099), True, 'import numpy as np\n'), ((5259, 5277), 'numpy.mean', 'np.mean', (['raw[j, i]'], {}), '(raw[j, i])\n', (5266, 5277), True, 'import numpy as np\n'), ((8167, 8211), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(1915)', 'month': '(1)', 'day': '(1)'}), '(year=1915, month=1, day=1)\n', (8184, 8211), False, 'import datetime\n'), ((8216, 8242), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (8234, 8242), False, 'import datetime\n'), ((4166, 4179), 'numpy.diff', 'np.diff', (['data'], {}), '(data)\n', (4173, 4179), True, 'import numpy as np\n'), ((2546, 2575), 'numpy.cos', 'np.cos', (['(2 * np.pi * n * t / T)'], {}), '(2 * np.pi * n * t / T)\n', (2552, 2575), True, 'import numpy as np\n'), ((2622, 2651), 'numpy.sin', 'np.sin', (['(2 * np.pi * n * t / T)'], {}), '(2 * np.pi * n * t / T)\n', (2628, 2651), True, 'import numpy as np\n')] |
import sys
import os
import yaml
import argparse
import numpy as np
import pandas as pd
import csv
import random
import stat
import glob
import subprocess
from statistics import mean
from pprint import pprint, pformat
import geopandas
from shapely.geometry import Point
from math import sin, cos, atan2, sqrt, pi
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.algorithms.moo.nsga3 import NSGA3
from pymoo.algorithms.moo.moead import MOEAD, ParallelMOEAD
from pymoo.factory import get_sampling, get_crossover, get_mutation, \
get_problem, get_reference_directions
from pymoo.optimize import minimize
from pymoo.visualization.scatter import Scatter
from pymoo.core.problem import Problem
from pymoo.factory import get_performance_indicator
from moo_algs.bce_moead import BCEMOEAD
import time
from datetime import timedelta
work_dir = os.path.dirname(os.path.abspath(__file__))
EXEC_LOG_FILE = None
USE_PJ = False
QCG_MANAGER = None
class dict_to_obj:
def __init__(self, in_dict: dict):
assert isinstance(in_dict, dict)
for key, val in in_dict.items():
if isinstance(val, (list, tuple)):
setattr(self, key, [dict_to_obj(x) if isinstance(
x, dict) else x for x in val])
else:
setattr(self, key, dict_to_obj(val)
if isinstance(val, dict) else val)
def MOO_log(msg):
with open(EXEC_LOG_FILE, "a") as log_file:
print("{}".format(msg), file=log_file)
def read_MOO_setting_yaml():
"""
read MOO setting from yaml file
"""
with open(os.path.join(work_dir, "MOO_setting.yaml")) as f:
MOO_CONFIG = yaml.safe_load(f)
# convert the json to a nested object
# MOO_CONFIG_DICT = dict_to_obj(MOO_CONFIG)
# return MOO_CONFIG_DICT
return MOO_CONFIG
class FLEE_MOO_Problem(Problem):
def __init__(self, execution_mode, simulation_period, cores,
work_dir=work_dir):
# TODO: add input vraibles to MOO_setting.yaml file
super().__init__(n_var=1,
n_obj=5,
xl=np.array([0]), #
xu=np.array([19688])) #
self.work_dir = work_dir
self.cnt_SWEEP_dir = 0
self.execution_mode = execution_mode
self.simulation_period = simulation_period
self.cores = cores
def avg_distance(self, agents_out_files, camp_name):
df_array = [pd.read_csv(filename, index_col=None, header=0)
for filename in agents_out_files]
df = pd.concat(df_array, axis=0, ignore_index=True)
# filter rows for agent location == camp_name
df = df[(df["agent location"] == camp_name) &
(df["distance_moved_this_timestep"] > 0)
]
df.to_csv(os.path.join(
os.path.dirname(agents_out_files[0]), "df_agents.out.csv"),
sep=",",
mode="w",
index=False,
encoding='utf-8'
)
return df["distance_travelled"].mean()
def find_closest_location_to_camp(self, camp_lon, camp_lat):
# in kilometres
R = 6371
p = pi/180
dist = []
locations=[]
# Read lat(Latitude) and lon(Longitude) column in locations.csv file row by row.
locations_path = os.path.join(self.work_dir, "input_csv", "locations.csv")
with open(locations_path, newline='') as csvfile:
reader = csv.reader(csvfile)
next(reader)
# Iterate over each row after the header in the csv
for row in reader:
# row variable is a list that represents a row in csv
# print(row)
if row[2] == 'South_Sudan':
locations.append(row[0])
lat = float(row[3])
lon = float(row[4])
MOO_log(msg="\tlocation ={}".format(row[0]))
MOO_log(msg="\tlongitude ={}".format(lon))
MOO_log(msg="\tlatitude ={}".format(lat))
# calculate the haversine distance between Z and other locations in south sudan, respectively.
phi = (camp_lat-lat) * p
lam = (lon-camp_lon) * p
a = sin(phi/2)*sin(phi/2)+cos(lat*p)*cos(camp_lat*p)*sin(lam/2)*sin(lam/2);
c = 2*atan2(sqrt(a),sqrt(1-a))
dist.append(R * c)
MOO_log(msg="\tall locations ={}".format(locations))
MOO_log(msg="\tdistance between these locations and Z={}".format(dist))
# find the shortest path
min_dist = np.amin(dist)
index_min_dist = dist.index(min_dist)
nearest_loc = locations[index_min_dist]
return nearest_loc, min_dist
# --------------------------------------------------------------------------
def change_route_to_camp(self, csv_name):
"""
Change the location that connect to the camp
"""
MOO_log(msg="\n[change_route_to_camp]")
selectedCamps_csv_PATH = os.path.join(self.work_dir, "input_csv", csv_name)
# Read the data in selectedCamps.csv file row by row.
with open(selectedCamps_csv_PATH, newline='') as csvfile:
reader = csv.reader(csvfile)
next(reader)
# print(header)
# Iterate over each row after the header in the csv
for row in reader:
# row variable is a list that represents a row in csv
# print(row)
lon = float(row[0])
lat = float(row[1])
ipc = float(row[2])
accessibility = float(row[3])
MOO_log(msg="\tcamp lon ={}".format(lon))
MOO_log(msg="\tcamp lat ={}".format(lat))
# 1. Find the nearest location to camp and calculate the distance
# between them.
nearest_loc, min_dist = self.find_closest_location_to_camp(
camp_lon=float(lon), camp_lat=float(lat)
)
# 2. Read routes.csv and modify the data (i.e., the nearest
# location to camp and the distance between them)
routes_csv_PATH = os.path.join(self.work_dir, "input_csv", "routes.csv")
df = pd.read_csv(routes_csv_PATH)
# change one value of a row
df.loc[lambda df: df['name2'] == 'Z', lambda df:'#name1'] = nearest_loc
df.loc[lambda df: df['name2'] == 'Z', lambda df:'distance'] = str(min_dist)
MOO_log(msg="\tLatitude of camp Z: {} \n\t"
"Longitude of camp Z: {}\n\t"
"nearest location: {}\n\t"
"distance to {}:{}".format(
float(lon),
float(lat),
nearest_loc,
nearest_loc, min_dist)
)
# 3. Write the updated route.csv in the moo_ssudan SWEEP
# directory.
sweep_dir = os.path.join(self.work_dir, "SWEEP")
# curr_dir_count = len(os.listdir(sweep_dir))
curr_dir_count = self.cnt_SWEEP_dir
sub_dir_SWEEP = os.path.join(
sweep_dir, "{}".format(curr_dir_count + 1), "input_csv"
)
if os.path.exists(sub_dir_SWEEP):
raise RuntimeError(
"SWEEP dir {} is exists !!!!!".format(sub_dir_SWEEP)
)
os.makedirs(sub_dir_SWEEP)
MOO_log(msg="\tgenerates SWEEP : {}".format(sub_dir_SWEEP))
updated_routes_csv_PATH = os.path.join(sub_dir_SWEEP, "routes.csv")
df.to_csv(updated_routes_csv_PATH, index = False)
# 4. Write campIPC.csv in the moo_ssudan SWEEP directory
campIPC_PATH = os.path.join(sub_dir_SWEEP, "campIPC.csv")
with open(campIPC_PATH, "w", newline="") as fout:
writer = csv.writer(fout, delimiter=",")
writer.writerow(["lon", "lat", "ipc", "accessibility"])
writer.writerow([lon, lat, ipc, accessibility])
self.cnt_SWEEP_dir += 1
MOO_log(msg="\t{}".format("-" * 30))
# --------------------------------------------------------------------------
def flee_optmization(self, run_dir, camp_name):
MOO_log(msg="\n[flee_optmization] called for "
"run_dir = {} camp_name = {}".format(run_dir, camp_name)
)
# calculate camp population, obj#2
df = pd.read_csv(os.path.join(run_dir, "out.csv"))
sim_camp_population_last_day = df["{} sim".format(camp_name)].iloc[-1]
sim_camp_population = df["{} sim".format(camp_name)].tolist()
MOO_log(msg="\tsim camp {} population of the last day = {}".format(
camp_name, sim_camp_population_last_day)
)
MOO_log(msg="\tsim camp {} population = {}".format(
camp_name, sim_camp_population)
)
# find the agents.out files
agents_out_files = glob.glob(
"{}".format(os.path.join(run_dir, "agents.out.*"))
)
# obj#1
avg_distance_travelled = self.avg_distance(
agents_out_files=agents_out_files, camp_name=camp_name
)
MOO_log(
msg="\tInput file : {}"
"\n\t\tavg distance travelled for agents "
"to camp name {} = {}".format(
[os.path.basename(filename) for filename in agents_out_files],
camp_name,
avg_distance_travelled
)
)
# clean agents.out files to reduce the disk space usage
clean_agents_cmd = "rm {}".format(os.path.join(
os.path.dirname(agents_out_files[0]), "agents.out.*"))
subprocess.check_output(
clean_agents_cmd,
shell=True,
)
# calculate camp capacity
PopulationScaledownFactor = 100
df = pd.read_csv(os.path.join(run_dir, "input_csv", "locations.csv"))
camp_population = df[df["#name"] == camp_name]["population"].values[0]
camp_population = camp_population/PopulationScaledownFactor
MOO_log(msg="\tmax camp {} population = {}".format(
camp_name, camp_population)
)
# calculate average remain camp capacity over simulation days, obj#3
remain_camp_capacity = mean(
[abs(camp_population - i) for i in sim_camp_population]
)
MOO_log(msg="\tremain camp {} capacity = {}".format(
camp_name, remain_camp_capacity)
)
# calculate IPC phase, obj#4
input_dir_SWEEP = os.path.join(run_dir, "input_csv")
ipc_df = pd.read_csv(os.path.join(input_dir_SWEEP, "campIPC.csv"))
camp_ipc = float(ipc_df.loc[0,"ipc"])
# calculate accessibility score, obj#5
camp_accessibility = float(ipc_df.loc[0,"accessibility"])
MOO_log(msg="\tcamp {}: IPC phase = {},\taccessibility score = {}".format(
camp_name, camp_ipc, camp_accessibility)
)
# return values [obj#1, obj#2, obj#3, obj#4, obj#5]
return [avg_distance_travelled, sim_camp_population_last_day,
remain_camp_capacity, camp_ipc, camp_accessibility]
#------------------------------------start-----------------------------------
def run_simulation_with_PJ(self, sh_jobs_scripts):
"""
running simulation from SWEEP dir using PJ
"""
from qcg.pilotjob.api.job import Jobs
jobs = Jobs()
for sh_job_scripts in sh_jobs_scripts:
sweep_dir_name = os.path.basename(os.path.dirname(sh_job_scripts))
jobs.add(
name="SWEEP_{}".format(sweep_dir_name),
exec="bash",
args=["-l", sh_job_scripts],
stdout="{}/{}.stdout".format(
os.path.dirname(sh_job_scripts),
"${jname}__${uniq}"
),
stderr="{}/{}.stderr".format(
os.path.dirname(sh_job_scripts),
"${jname}__${uniq}"
),
numCores={"exact": self.cores},
model="default"
)
print("\nAdd job with :")
print("name=SWEEP_{}".format(sweep_dir_name))
print("args = [-l,{}]".format(sh_job_scripts))
print("stdout = {}/{}.stdout".format(
os.path.dirname(sh_job_scripts),
"${jname}__${uniq}")
)
print("stderr = {}/{}.stderr".format(
os.path.dirname(sh_job_scripts),
"${jname}__${uniq}")
)
print("numCores=exact: {}".format(self.cores))
ids = QCG_MANAGER.submit(jobs)
# wait until submited jobs finish
QCG_MANAGER.wait4(ids)
print("\nAll new SWEEP dirs are finished...\n")
def run_simulation_without_PJ(self, sh_jobs_scripts):
"""
running simulation from SWEEP dir without using PJ
"""
for sh_job_scripts in sh_jobs_scripts:
# subprocess.check_output(sh_job_scripts, shell=True)
try:
p = subprocess.Popen(sh_job_scripts, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
except Exception as e:
raise RuntimeError("Unexpected error: {}".format(e))
sys.exit()
acceptable_err_subprocesse_ret_codes = [0]
if p.returncode not in acceptable_err_subprocesse_ret_codes:
raise RuntimeError(
"\njob execution encountered an error (return code {})"
"while executing '{}'".format(p.returncode, command)
)
sys.exit(0)
#-------------------------------------end------------------------------------
def _evaluate(self, x, out, *args, **kwargs):
"""
1. The _evaluate method takes a one-dimensional NumPy array X with n rows as an input.
The row represents an individual, namely, the index of a possible camp location.
After doing the necessary calculations, the objective values must be
added to the dictionary, out, with the key F.
"""
# ---------------------------------start--------------------------------
# read accessible_camp_ipc.csv
df = pd.read_csv("accessible_camp_ipc.csv")
camp_coords_df = df[['lon', 'lat']]
coords = camp_coords_df.to_numpy()
# obtain coordinates of selected camps
X_1D = x.flatten()
X_1D = X_1D.astype('int64')
population = coords[X_1D, :]
pop_size = len(population)
MOO_log(
msg="\n{}\nExecuting _evaluate function with input "
"population : \n{}\n".format("-" * 30, pformat(population))
)
n = 1
for row in population:
MOO_log("\tpotential location {}: {}".format(n, row))
n += 1
# Get IPC phase data of each camp location
ipc = df.loc[X_1D, 'IPC']
ipc_list = ipc.tolist()
# Get accessibility score of each camp location
accessibility_score = df.loc[X_1D, 'landcover']
accessibility_list = accessibility_score.tolist()
selected_camps = [[*a, b, c] for a, b, c in zip(population, ipc_list, accessibility_list)]
selectedCamps_csv_PATH = os.path.join(
self.work_dir, "input_csv", "selectedCamps.csv"
)
# Save data to CSV
with open(selectedCamps_csv_PATH, "w", newline="") as file:
writer = csv.writer(file, delimiter=",")
writer.writerow(["Camp Longitude", "Camp Latitude", "IPC Score", "Accessibility Score"]) # header
writer.writerows(selected_camps)
# ------------------------------end-----------------------------------
# count the number of run folder in SWEEP dir
sweep_dir = os.path.join(self.work_dir, "SWEEP")
####################################################################
# Run change_route_to_camp function to update the routes.csv file #
# according to the parameter ind, which is the coordinate of camp. #
####################################################################
cnt_SWEEP_dir_before = self.cnt_SWEEP_dir
self.change_route_to_camp(csv_name="selectedCamps.csv")
####################################
# job_script parameter preparation #
####################################
# list of files and folders to be included
sel_files_folders = ["**input_csv/***", "**source_data/***",
"run.py",
"run_par.py", "simsetting.csv"
]
# Note: be careful with rync command arguments
rync_cmd = " ".join([
*["rsync -pthrvz --ignore-existing"],
*["--include='{}' ".format(sel) for sel in sel_files_folders],
*["--exclude='*'"],
*["--exclude='SWEEP'"],
*["{}/ .".format(self.work_dir)]
])
# set the execution command for flee simulation
if self.execution_mode.lower() == "serial":
flee_exec_cmd = "python3 run.py input_csv source_data " \
"{} simsetting.csv > out.csv".format(
self.simulation_period)
elif self.execution_mode.lower() == "parallel":
flee_exec_cmd = "mpirun -np {} " \
"python3 run_par.py input_csv source_data " \
"{} simsetting.csv > out.csv".format(
self.cores,
self.simulation_period)
else:
raise RuntimeError(
"The input execution_mode {} not valid!".format(
self.execution_mode)
)
# clean the SWEEP dir after simulation finished
clean_cmd = "find . -type f ! \( -name 'out.csv' " \
"-o -name 'routes.csv' -o -name 'agents.out.*' " \
"-o -name 'flee_exec_cmd.sh' "\
"-o -name '*.stdout' "\
"-o -name '*.stderr' "\
"-o -name 'selectedCamps.csv' "\
"-o -name 'campIPC.csv' "\
"-o -name 'locations.csv' \) -exec rm -rf {} \; ;" \
"rm -rf source_data"
###################################################
# save job_script in each new generated SWEEP dir #
###################################################
print("cnt_SWEEP_dir_before = {}\nself.cnt_SWEEP_dir={}\n".format(
cnt_SWEEP_dir_before, self.cnt_SWEEP_dir)
)
sh_jobs_scripts = []
for i in range(cnt_SWEEP_dir_before, self.cnt_SWEEP_dir):
dest_SWEEP_dir = os.path.join(work_dir, "SWEEP", str(i + 1))
# here we create a bash script to call the execution part
flee_exec_sh = os.path.join(dest_SWEEP_dir, "flee_exec_cmd.sh")
with open(flee_exec_sh, "w") as f:
f.write("#!/bin/bash\n\n")
f.write("# change dir\n\n")
f.write("cd {}\n\n".format(dest_SWEEP_dir))
f.write("# copying the required input files\n")
f.write("{}\n\n".format(rync_cmd))
f.write("# running simulation\n")
# f.write("cd {}\n".format(dest_SWEEP_dir))
f.write("{}\n\n".format(flee_exec_cmd))
f.write("# cleaning the SWEEP dir after simulation finished\n")
f.write("{}\n\n".format(clean_cmd))
f.write("touch DONE\n")
# change file permission to executable
st = os.stat(flee_exec_sh)
os.chmod(flee_exec_sh, st.st_mode | stat.S_IEXEC)
sh_jobs_scripts.append(flee_exec_sh)
#####################################
# run simulation per each SWEEP dir #
#####################################
if USE_PJ is False:
self.run_simulation_without_PJ(sh_jobs_scripts)
else:
self.run_simulation_with_PJ(sh_jobs_scripts)
# Step 3: Calculate objective values
# Create an csv file only contains header
with open("objectives.csv", "w", newline="") as file:
writer = csv.writer(file, delimiter=",")
# add header
writer.writerow(["Objective #1", "Objective #2", "Objective #3", "Objective #4", "Objective #5"])
# Calculate objective values and save the data in objectives.csv file
for i in range(cnt_SWEEP_dir_before, self.cnt_SWEEP_dir):
dest_SWEEP_dir = os.path.join("SWEEP", str(i + 1))
row = self.flee_optmization(run_dir=dest_SWEEP_dir, camp_name="Z")
with open("objectives.csv", "a", newline="") as file:
writer = csv.writer(file)
writer.writerow(row)
MOO_log(msg="=" * 50)
# Fetch the objective values
objectives = pd.read_csv("objectives.csv")
MOO_log(msg="objectives.csv =\n{}".format(pformat(objectives)))
# objective 1: minimize average distance travelled by each arriving
# refugee.
f1 = objectives["Objective #1"].values
MOO_log(msg="\tf1: {}".format(f1))
# objective 2: maximize camp population, i.e.,the number of people in
# the camp at the end of the simulation.
f2 = -objectives["Objective #2"].values
MOO_log(msg="\tf2: {}".format(f2))
# objective 3: minimize the average remain camp capacity over simulation days
f3 = objectives["Objective #3"].values
MOO_log(msg="\tf3: {}".format(f3))
# objective 4: minimize the IPC phase score of camp
f4 = objectives["Objective #4"].values
MOO_log(msg="\tf4: {}".format(f4))
# objective 5: maximize accessibility
f5 = -objectives["Objective #5"].values
MOO_log(msg="\tf5: {}".format(f5))
MOO_log(msg="=" * 50)
out["F"] = np.column_stack([f1, f2, f3, f4, f5])
if __name__ == "__main__":
start_time = time.monotonic()
# do your work here
# Instantiate the parser
parser = argparse.ArgumentParser()
parser.add_argument("--execution_mode", action="store", default="serial")
parser.add_argument("--simulation_period", action="store", type=int,
default="-1")
parser.add_argument("--exec_log_file", action="store",
default="log_MOO.txt")
parser.add_argument("--cores", action="store", type=int, default="1")
parser.add_argument("--USE_PJ", action="store", default="False")
args = parser.parse_args()
execution_mode = args.execution_mode
simulation_period = args.simulation_period
cores = args.cores
if args.USE_PJ.lower() == "true":
USE_PJ = True
from qcg.pilotjob.api.manager import LocalManager
QCG_MANAGER = LocalManager(
cfg={'log_level': 'DEBUG'}, server_args=['--log', 'debug']
)
else:
USE_PJ = False
EXEC_LOG_FILE = os.path.join(work_dir, args.exec_log_file)
MOO_log(msg="run_MOO input args : {}".format(args))
# read MOO setting from config yaml file
MOO_CONFIG = read_MOO_setting_yaml()
MOO_log(msg="MOO_CONFIG =\n{}".format(pformat(MOO_CONFIG)))
problem = FLEE_MOO_Problem(
execution_mode=execution_mode,
simulation_period=simulation_period,
cores=cores,
)
algorithm = None
alg_name = MOO_CONFIG["alg_name"]
crossover_func = MOO_CONFIG["crossover_func"]
crossover_func_args = MOO_CONFIG["crossover_func_args"][crossover_func]
mutation_func = MOO_CONFIG["mutation_func"]
mutation_func_args = MOO_CONFIG["mutation_func_args"][mutation_func]
alg_specific_args = MOO_CONFIG["alg_specific_args"][alg_name]
try:
ref_dir_func = alg_specific_args["ref_dir_name"]
ref_dir_func_args = MOO_CONFIG["ref_dir_func"][ref_dir_func]
ref_dir_func_args.update({"n_dim": problem.n_obj})
except KeyError as e:
# DO NOT raise any Exception if the alg_name does not require
# any input reference direction function
pass
except Exception as e:
print(e)
sys.exit()
if alg_name == "NSGA2":
sampling_func = MOO_CONFIG["sampling_func"]
pop_size = alg_specific_args["pop_size"]
#################
# set algorithm #
#################
algorithm = NSGA2(
pop_size=pop_size,
sampling=get_sampling(sampling_func),
crossover=get_crossover(crossover_func, **crossover_func_args),
mutation=get_mutation(mutation_func, **mutation_func_args),
eliminate_duplicates=True
)
#####################
# algorithm logging #
#####################
MOO_log(
msg="algorithm = {}(\n"
"pop_size={},\n"
"sampling=get_sampling({}),\n"
"crossover=get_crossover({},{}),\n"
"mutation=get_mutation({},{}),\n"
"eliminate_duplicates=True\n"
")".format(
alg_name,
pop_size,
sampling_func,
crossover_func, crossover_func_args,
mutation_func, mutation_func_args,
)
)
elif alg_name == "MOEAD":
alg_specific_args = MOO_CONFIG["alg_specific_args"]["MOEAD"]
n_neighbors = alg_specific_args["n_neighbors"]
prob_neighbor_mating = alg_specific_args["prob_neighbor_mating"]
#################
# set algorithm #
#################
algorithm = MOEAD(
ref_dirs=get_reference_directions(ref_dir_func,
**ref_dir_func_args),
n_neighbors=n_neighbors,
prob_neighbor_mating=prob_neighbor_mating,
crossover=get_crossover(crossover_func, **crossover_func_args),
mutation=get_mutation(mutation_func, **mutation_func_args),
)
#####################
# algorithm logging #
#####################
MOO_log(
msg="algorithm = {}(\n"
"ref_dirs = get_reference_directions({},{}),\n"
"n_neighbors = {}\n"
"prob_neighbor_mating = {}\n"
"crossover=get_crossover({},{}),\n"
"mutation=get_mutation({},{}),\n"
")".format(
alg_name,
ref_dir_func, ref_dir_func_args,
n_neighbors,
prob_neighbor_mating,
crossover_func, crossover_func_args,
mutation_func, mutation_func_args,
)
)
elif alg_name == "BCE-MOEAD":
alg_specific_args = MOO_CONFIG["alg_specific_args"]["BCE-MOEAD"]
n_neighbors = alg_specific_args["n_neighbors"]
prob_neighbor_mating = alg_specific_args["prob_neighbor_mating"]
#################
# set algorithm #
#################
algorithm = BCEMOEAD(
ref_dirs=get_reference_directions(ref_dir_func,
**ref_dir_func_args),
n_neighbors=n_neighbors,
prob_neighbor_mating=prob_neighbor_mating,
crossover=get_crossover(crossover_func, **crossover_func_args),
mutation=get_mutation(mutation_func, **mutation_func_args),
)
#####################
# algorithm logging #
#####################
MOO_log(
msg="algorithm = {}(\n"
"ref_dirs = get_reference_directions({},{}),\n"
"n_neighbors = {}\n"
"prob_neighbor_mating = {}\n"
"crossover=get_crossover({},{}),\n"
"mutation=get_mutation({},{}),\n"
")".format(
alg_name,
ref_dir_func, ref_dir_func_args,
n_neighbors,
prob_neighbor_mating,
crossover_func, crossover_func_args,
mutation_func, mutation_func_args,
)
)
elif alg_name == "NSGA3":
pop_size = alg_specific_args["pop_size"]
#################
# set algorithm #
#################
algorithm = NSGA3(
pop_size=pop_size,
ref_dirs=get_reference_directions(ref_dir_func,
**ref_dir_func_args),
crossover=get_crossover(crossover_func, **crossover_func_args),
mutation=get_mutation(mutation_func, **mutation_func_args),
)
#####################
# algorithm logging #
#####################
MOO_log(
msg="algorithm = {}(\n"
"pop_size = {}\n`"
"ref_dirs = get_reference_directions({},{}),\n"
"crossover=get_crossover({},{}),\n"
"mutation=get_mutation({},{}),\n"
")".format(
alg_name,
pop_size,
ref_dir_func, ref_dir_func_args,
crossover_func, crossover_func_args,
mutation_func, mutation_func_args,
)
)
if algorithm is None:
raise RuntimeError(
"Input alg_name = {} is not valid or "
"not supported within run_MOO.py".format(
MOO_CONFIG.alg_name)
)
# convert dict {'n_gen': 2}} to tuple ('n_gen', 2)
termination = list(MOO_CONFIG["termination"].items())[0]
MOO_log(msg="termination = {}".format(termination))
res = minimize(
problem=problem,
algorithm=algorithm,
termination=termination,
verbose=True
)
x = res.pop.get("X")
MOO_log(msg="location index = \n {}".format(x))
X_1D = x.flatten()
X_1D = X_1D.astype('int64')
# read accessible_camp_ipc.csv
df = pd.read_csv("accessible_camp_ipc.csv")
camp_coords_df = df[['lon', 'lat']]
coords = camp_coords_df.to_numpy()
# obtain coordinates of selected camps
popu = coords[X_1D, :]
MOO_log(msg="{}".format("#" * 50))
MOO_log(msg="locations of camp Z:\n\t{}".format(popu))
MOO_log(msg="corresponding objective values:\n\t{}".format(res.pop.get("F")))
out_F = res.pop.get("F")
out_F[:, 1] = -out_F[:, 1]
out_F[:, -1] = -out_F[:, -1]
output = np.hstack([popu, out_F])
with open("population.csv", "w", newline="") as file:
writer = csv.writer(file, delimiter=",")
writer.writerow(["lon", "lat", "obj_1", "obj_2", "obj_3", "obj_4", "obj_5"]) # header
writer.writerows(output)
MOO_log(msg="The output is stored in {}/population.csv\n".format(work_dir))
if USE_PJ is True:
QCG_MANAGER.finish()
QCG_MANAGER.kill_manager_process()
QCG_MANAGER.cleanup()
end_time = time.monotonic()
print('Duration:\t{}'.format(timedelta(seconds=end_time - start_time)))
| [
"pandas.read_csv",
"numpy.hstack",
"math.sqrt",
"numpy.column_stack",
"math.cos",
"numpy.array",
"sys.exit",
"pymoo.factory.get_crossover",
"pymoo.factory.get_reference_directions",
"datetime.timedelta",
"os.path.exists",
"qcg.pilotjob.api.manager.LocalManager",
"argparse.ArgumentParser",
... | [((866, 891), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (881, 891), False, 'import os\n'), ((22500, 22516), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (22514, 22516), False, 'import time\n'), ((22584, 22609), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (22607, 22609), False, 'import argparse\n'), ((23483, 23525), 'os.path.join', 'os.path.join', (['work_dir', 'args.exec_log_file'], {}), '(work_dir, args.exec_log_file)\n', (23495, 23525), False, 'import os\n'), ((30011, 30100), 'pymoo.optimize.minimize', 'minimize', ([], {'problem': 'problem', 'algorithm': 'algorithm', 'termination': 'termination', 'verbose': '(True)'}), '(problem=problem, algorithm=algorithm, termination=termination,\n verbose=True)\n', (30019, 30100), False, 'from pymoo.optimize import minimize\n'), ((30313, 30351), 'pandas.read_csv', 'pd.read_csv', (['"""accessible_camp_ipc.csv"""'], {}), "('accessible_camp_ipc.csv')\n", (30324, 30351), True, 'import pandas as pd\n'), ((30791, 30815), 'numpy.hstack', 'np.hstack', (['[popu, out_F]'], {}), '([popu, out_F])\n', (30800, 30815), True, 'import numpy as np\n'), ((31274, 31290), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (31288, 31290), False, 'import time\n'), ((1666, 1683), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (1680, 1683), False, 'import yaml\n'), ((2572, 2618), 'pandas.concat', 'pd.concat', (['df_array'], {'axis': '(0)', 'ignore_index': '(True)'}), '(df_array, axis=0, ignore_index=True)\n', (2581, 2618), True, 'import pandas as pd\n'), ((3344, 3401), 'os.path.join', 'os.path.join', (['self.work_dir', '"""input_csv"""', '"""locations.csv"""'], {}), "(self.work_dir, 'input_csv', 'locations.csv')\n", (3356, 3401), False, 'import os\n'), ((4671, 4684), 'numpy.amin', 'np.amin', (['dist'], {}), '(dist)\n', (4678, 4684), True, 'import numpy as np\n'), ((5101, 5151), 'os.path.join', 'os.path.join', (['self.work_dir', '"""input_csv"""', 'csv_name'], {}), "(self.work_dir, 'input_csv', csv_name)\n", (5113, 5151), False, 'import os\n'), ((10031, 10084), 'subprocess.check_output', 'subprocess.check_output', (['clean_agents_cmd'], {'shell': '(True)'}), '(clean_agents_cmd, shell=True)\n', (10054, 10084), False, 'import subprocess\n'), ((10903, 10937), 'os.path.join', 'os.path.join', (['run_dir', '"""input_csv"""'], {}), "(run_dir, 'input_csv')\n", (10915, 10937), False, 'import os\n'), ((11791, 11797), 'qcg.pilotjob.api.job.Jobs', 'Jobs', ([], {}), '()\n', (11795, 11797), False, 'from qcg.pilotjob.api.job import Jobs\n'), ((14770, 14808), 'pandas.read_csv', 'pd.read_csv', (['"""accessible_camp_ipc.csv"""'], {}), "('accessible_camp_ipc.csv')\n", (14781, 14808), True, 'import pandas as pd\n'), ((15803, 15864), 'os.path.join', 'os.path.join', (['self.work_dir', '"""input_csv"""', '"""selectedCamps.csv"""'], {}), "(self.work_dir, 'input_csv', 'selectedCamps.csv')\n", (15815, 15864), False, 'import os\n'), ((16341, 16377), 'os.path.join', 'os.path.join', (['self.work_dir', '"""SWEEP"""'], {}), "(self.work_dir, 'SWEEP')\n", (16353, 16377), False, 'import os\n'), ((21391, 21420), 'pandas.read_csv', 'pd.read_csv', (['"""objectives.csv"""'], {}), "('objectives.csv')\n", (21402, 21420), True, 'import pandas as pd\n'), ((22415, 22452), 'numpy.column_stack', 'np.column_stack', (['[f1, f2, f3, f4, f5]'], {}), '([f1, f2, f3, f4, f5])\n', (22430, 22452), True, 'import numpy as np\n'), ((23334, 23406), 'qcg.pilotjob.api.manager.LocalManager', 'LocalManager', ([], {'cfg': "{'log_level': 'DEBUG'}", 'server_args': "['--log', 'debug']"}), "(cfg={'log_level': 'DEBUG'}, server_args=['--log', 'debug'])\n", (23346, 23406), False, 'from qcg.pilotjob.api.manager import LocalManager\n'), ((30891, 30922), 'csv.writer', 'csv.writer', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (30901, 30922), False, 'import csv\n'), ((1595, 1637), 'os.path.join', 'os.path.join', (['work_dir', '"""MOO_setting.yaml"""'], {}), "(work_dir, 'MOO_setting.yaml')\n", (1607, 1637), False, 'import os\n'), ((2456, 2503), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'index_col': 'None', 'header': '(0)'}), '(filename, index_col=None, header=0)\n', (2467, 2503), True, 'import pandas as pd\n'), ((3482, 3501), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (3492, 3501), False, 'import csv\n'), ((5302, 5321), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (5312, 5321), False, 'import csv\n'), ((8782, 8814), 'os.path.join', 'os.path.join', (['run_dir', '"""out.csv"""'], {}), "(run_dir, 'out.csv')\n", (8794, 8814), False, 'import os\n'), ((10220, 10271), 'os.path.join', 'os.path.join', (['run_dir', '"""input_csv"""', '"""locations.csv"""'], {}), "(run_dir, 'input_csv', 'locations.csv')\n", (10232, 10271), False, 'import os\n'), ((10967, 11011), 'os.path.join', 'os.path.join', (['input_dir_SWEEP', '"""campIPC.csv"""'], {}), "(input_dir_SWEEP, 'campIPC.csv')\n", (10979, 11011), False, 'import os\n'), ((16005, 16036), 'csv.writer', 'csv.writer', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (16015, 16036), False, 'import csv\n'), ((19322, 19370), 'os.path.join', 'os.path.join', (['dest_SWEEP_dir', '"""flee_exec_cmd.sh"""'], {}), "(dest_SWEEP_dir, 'flee_exec_cmd.sh')\n", (19334, 19370), False, 'import os\n'), ((20092, 20113), 'os.stat', 'os.stat', (['flee_exec_sh'], {}), '(flee_exec_sh)\n', (20099, 20113), False, 'import os\n'), ((20126, 20175), 'os.chmod', 'os.chmod', (['flee_exec_sh', '(st.st_mode | stat.S_IEXEC)'], {}), '(flee_exec_sh, st.st_mode | stat.S_IEXEC)\n', (20134, 20175), False, 'import os\n'), ((20703, 20734), 'csv.writer', 'csv.writer', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (20713, 20734), False, 'import csv\n'), ((24660, 24670), 'sys.exit', 'sys.exit', ([], {}), '()\n', (24668, 24670), False, 'import sys\n'), ((31324, 31364), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(end_time - start_time)'}), '(seconds=end_time - start_time)\n', (31333, 31364), False, 'from datetime import timedelta\n'), ((2122, 2135), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2130, 2135), True, 'import numpy as np\n'), ((2168, 2185), 'numpy.array', 'np.array', (['[19688]'], {}), '([19688])\n', (2176, 2185), True, 'import numpy as np\n'), ((2848, 2884), 'os.path.dirname', 'os.path.dirname', (['agents_out_files[0]'], {}), '(agents_out_files[0])\n', (2863, 2884), False, 'import os\n'), ((6289, 6343), 'os.path.join', 'os.path.join', (['self.work_dir', '"""input_csv"""', '"""routes.csv"""'], {}), "(self.work_dir, 'input_csv', 'routes.csv')\n", (6301, 6343), False, 'import os\n'), ((6366, 6394), 'pandas.read_csv', 'pd.read_csv', (['routes_csv_PATH'], {}), '(routes_csv_PATH)\n', (6377, 6394), True, 'import pandas as pd\n'), ((7167, 7203), 'os.path.join', 'os.path.join', (['self.work_dir', '"""SWEEP"""'], {}), "(self.work_dir, 'SWEEP')\n", (7179, 7203), False, 'import os\n'), ((7478, 7507), 'os.path.exists', 'os.path.exists', (['sub_dir_SWEEP'], {}), '(sub_dir_SWEEP)\n', (7492, 7507), False, 'import os\n'), ((7665, 7691), 'os.makedirs', 'os.makedirs', (['sub_dir_SWEEP'], {}), '(sub_dir_SWEEP)\n', (7676, 7691), False, 'import os\n'), ((7811, 7852), 'os.path.join', 'os.path.join', (['sub_dir_SWEEP', '"""routes.csv"""'], {}), "(sub_dir_SWEEP, 'routes.csv')\n", (7823, 7852), False, 'import os\n'), ((8024, 8066), 'os.path.join', 'os.path.join', (['sub_dir_SWEEP', '"""campIPC.csv"""'], {}), "(sub_dir_SWEEP, 'campIPC.csv')\n", (8036, 8066), False, 'import os\n'), ((9319, 9356), 'os.path.join', 'os.path.join', (['run_dir', '"""agents.out.*"""'], {}), "(run_dir, 'agents.out.*')\n", (9331, 9356), False, 'import os\n'), ((9967, 10003), 'os.path.dirname', 'os.path.dirname', (['agents_out_files[0]'], {}), '(agents_out_files[0])\n', (9982, 10003), False, 'import os\n'), ((11891, 11922), 'os.path.dirname', 'os.path.dirname', (['sh_job_scripts'], {}), '(sh_job_scripts)\n', (11906, 11922), False, 'import os\n'), ((13464, 13561), 'subprocess.Popen', 'subprocess.Popen', (['sh_job_scripts'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(sh_job_scripts, shell=True, stdout=subprocess.PIPE, stderr\n =subprocess.PIPE)\n', (13480, 13561), False, 'import subprocess\n'), ((14161, 14172), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (14169, 14172), False, 'import sys\n'), ((21248, 21264), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (21258, 21264), False, 'import csv\n'), ((23712, 23731), 'pprint.pformat', 'pformat', (['MOO_CONFIG'], {}), '(MOO_CONFIG)\n', (23719, 23731), False, 'from pprint import pprint, pformat\n'), ((24958, 24985), 'pymoo.factory.get_sampling', 'get_sampling', (['sampling_func'], {}), '(sampling_func)\n', (24970, 24985), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation, get_problem, get_reference_directions\n'), ((25009, 25061), 'pymoo.factory.get_crossover', 'get_crossover', (['crossover_func'], {}), '(crossover_func, **crossover_func_args)\n', (25022, 25061), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation, get_problem, get_reference_directions\n'), ((25084, 25133), 'pymoo.factory.get_mutation', 'get_mutation', (['mutation_func'], {}), '(mutation_func, **mutation_func_args)\n', (25096, 25133), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation, get_problem, get_reference_directions\n'), ((8162, 8193), 'csv.writer', 'csv.writer', (['fout'], {'delimiter': '""","""'}), "(fout, delimiter=',')\n", (8172, 8193), False, 'import csv\n'), ((12709, 12740), 'os.path.dirname', 'os.path.dirname', (['sh_job_scripts'], {}), '(sh_job_scripts)\n', (12724, 12740), False, 'import os\n'), ((12859, 12890), 'os.path.dirname', 'os.path.dirname', (['sh_job_scripts'], {}), '(sh_job_scripts)\n', (12874, 12890), False, 'import os\n'), ((13802, 13812), 'sys.exit', 'sys.exit', ([], {}), '()\n', (13810, 13812), False, 'import sys\n'), ((15217, 15236), 'pprint.pformat', 'pformat', (['population'], {}), '(population)\n', (15224, 15236), False, 'from pprint import pprint, pformat\n'), ((21471, 21490), 'pprint.pformat', 'pformat', (['objectives'], {}), '(objectives)\n', (21478, 21490), False, 'from pprint import pprint, pformat\n'), ((26124, 26183), 'pymoo.factory.get_reference_directions', 'get_reference_directions', (['ref_dir_func'], {}), '(ref_dir_func, **ref_dir_func_args)\n', (26148, 26183), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation, get_problem, get_reference_directions\n'), ((26345, 26397), 'pymoo.factory.get_crossover', 'get_crossover', (['crossover_func'], {}), '(crossover_func, **crossover_func_args)\n', (26358, 26397), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation, get_problem, get_reference_directions\n'), ((26420, 26469), 'pymoo.factory.get_mutation', 'get_mutation', (['mutation_func'], {}), '(mutation_func, **mutation_func_args)\n', (26432, 26469), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation, get_problem, get_reference_directions\n'), ((9682, 9708), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (9698, 9708), False, 'import os\n'), ((12143, 12174), 'os.path.dirname', 'os.path.dirname', (['sh_job_scripts'], {}), '(sh_job_scripts)\n', (12158, 12174), False, 'import os\n'), ((12301, 12332), 'os.path.dirname', 'os.path.dirname', (['sh_job_scripts'], {}), '(sh_job_scripts)\n', (12316, 12332), False, 'import os\n'), ((27513, 27572), 'pymoo.factory.get_reference_directions', 'get_reference_directions', (['ref_dir_func'], {}), '(ref_dir_func, **ref_dir_func_args)\n', (27537, 27572), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation, get_problem, get_reference_directions\n'), ((27734, 27786), 'pymoo.factory.get_crossover', 'get_crossover', (['crossover_func'], {}), '(crossover_func, **crossover_func_args)\n', (27747, 27786), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation, get_problem, get_reference_directions\n'), ((27809, 27858), 'pymoo.factory.get_mutation', 'get_mutation', (['mutation_func'], {}), '(mutation_func, **mutation_func_args)\n', (27821, 27858), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation, get_problem, get_reference_directions\n'), ((4313, 4325), 'math.sin', 'sin', (['(phi / 2)'], {}), '(phi / 2)\n', (4316, 4325), False, 'from math import sin, cos, atan2, sqrt, pi\n'), ((4324, 4336), 'math.sin', 'sin', (['(phi / 2)'], {}), '(phi / 2)\n', (4327, 4336), False, 'from math import sin, cos, atan2, sqrt, pi\n'), ((4373, 4385), 'math.sin', 'sin', (['(lam / 2)'], {}), '(lam / 2)\n', (4376, 4385), False, 'from math import sin, cos, atan2, sqrt, pi\n'), ((4417, 4424), 'math.sqrt', 'sqrt', (['a'], {}), '(a)\n', (4421, 4424), False, 'from math import sin, cos, atan2, sqrt, pi\n'), ((4425, 4436), 'math.sqrt', 'sqrt', (['(1 - a)'], {}), '(1 - a)\n', (4429, 4436), False, 'from math import sin, cos, atan2, sqrt, pi\n'), ((28774, 28833), 'pymoo.factory.get_reference_directions', 'get_reference_directions', (['ref_dir_func'], {}), '(ref_dir_func, **ref_dir_func_args)\n', (28798, 28833), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation, get_problem, get_reference_directions\n'), ((28903, 28955), 'pymoo.factory.get_crossover', 'get_crossover', (['crossover_func'], {}), '(crossover_func, **crossover_func_args)\n', (28916, 28955), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation, get_problem, get_reference_directions\n'), ((28978, 29027), 'pymoo.factory.get_mutation', 'get_mutation', (['mutation_func'], {}), '(mutation_func, **mutation_func_args)\n', (28990, 29027), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation, get_problem, get_reference_directions\n'), ((4362, 4374), 'math.sin', 'sin', (['(lam / 2)'], {}), '(lam / 2)\n', (4365, 4374), False, 'from math import sin, cos, atan2, sqrt, pi\n'), ((4335, 4347), 'math.cos', 'cos', (['(lat * p)'], {}), '(lat * p)\n', (4338, 4347), False, 'from math import sin, cos, atan2, sqrt, pi\n'), ((4346, 4363), 'math.cos', 'cos', (['(camp_lat * p)'], {}), '(camp_lat * p)\n', (4349, 4363), False, 'from math import sin, cos, atan2, sqrt, pi\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 20:10:00 2020
@author: thorius
"""
import os
import sys
from queue import Queue
import numpy as np
import logging
import pyaudio
import time
from tflite_runtime.interpreter import Interpreter
import collections
from scipy import signal
class StreamControl():
def __init__(self,
path_model = './model/E2E_1stage_v8/tflite_non_stream',
name_model = 'non_stream.tflite',
sample_rate = 16000,
chunk_duration = 0.25,
feed_duration = 1.0,
channels = 1,
threshold = 0.5,
time_out = 8):
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
argumentList = sys.argv
self.path_model = path_model
self.name_model = name_model
self.sample_rate = sample_rate
#chunk_duration -- time in second of a chunk
if(len(argumentList) == 2):
self.chunk_duration = float(sys.argv[1])
self.threshold = threshold
elif(len(argumentList) == 3):
self.chunk_duration = float(sys.argv[1])
self.threshold = float(sys.argv[2])
else:
self.chunk_duration = chunk_duration
self.threshold = threshold
# times for ru
# chanels of audio
self.channels = channels
#feed_duration -- time in second of the input to model
self.feed_duration = feed_duration
self.device_sample_rate = 44100
self.chunk_samples = int(self.device_sample_rate * self.chunk_duration)
self.feed_samples = int(self.device_sample_rate * self.feed_duration)
# Queue to communiate between the audio callback and main thread
self.q = Queue()
# Data buffer for the input wavform
self.data = np.zeros(self.feed_samples, dtype='int16')
with open(os.path.join(path_model, 'labels.txt'), 'r') as fd:
labels_txt = fd.read()
self.labels = labels_txt.split()
assert float(self.feed_duration/self.chunk_duration) == float(self.feed_duration/self.chunk_duration)
self.stream = True
def run(self):
# callback method
def audio_callback(in_data, frame_count, time_info, status):
data0 = np.frombuffer(in_data, dtype='int16')
self.data = np.append(self.data,data0)
if len(self.data) > self.feed_samples:
self.data = self.data[-self.feed_samples:]
# Process data async by sending a queue.
self.q.put(self.data)
return (in_data, pyaudio.paContinue)
self.audio = pyaudio.PyAudio()
self.stream_in = self.audio.open(
input=True, output=False,
format=pyaudio.paInt16,
channels=self.channels,
rate=self.device_sample_rate,
frames_per_buffer=self.chunk_samples,
stream_callback=audio_callback)
size_predicts = int(int(self.feed_duration / self.chunk_duration))
predictions = np.zeros([size_predicts])
try:
while self.stream:
current_time = time.time()
for i in range(size_predicts):
data = self.q.get()
predictions[i] = self.predict(data)
counter_predictions = collections.Counter(predictions)
predictions[:size_predicts] = 0
keymax_predictions = max(counter_predictions, key = counter_predictions.get)
precision = counter_predictions[keymax_predictions] / size_predicts
if(precision >= self.threshold):
message = time.strftime("%Y-%m-%d %H:%M:%S: ", time.localtime(time.time())) + self.labels[int(keymax_predictions)] + "(p: %0.2f)"% (precision)
else:
message = time.strftime("%Y-%m-%d %H:%M:%S: ", time.localtime(time.time())) + self.labels[1]
logging.info(message)
lastprocessing_time = time.time()
remain_time = lastprocessing_time - current_time
if(remain_time < self.feed_duration):
time.sleep(remain_time)
except (KeyboardInterrupt, SystemExit):
self.stream = False
# Stop and close the stream
self.stream_in.stop_stream()
self.stream_in.close()
# Terminate the PortAudio interface
self.audio.terminate()
def predict(self, data):
try:
data = np.array(data, np.float32)
data = np.expand_dims(data, axis = 0)
data = signal.resample(data, self.sample_rate, axis = 1)
assert data.shape == (1, 16000)
# Normalize short ints to floats in range [-1..1).
#data = data / float(np.max(np.absolute(data)))
data = np.array(data, np.float32) / 32768.0
# prepare TFLite interpreter
with open(os.path.join(self.path_model, self.name_model), 'rb') as f:
model_content = f.read()
interpreter = Interpreter(model_content=model_content)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
padded_input = np.zeros((1, 16000), dtype=np.float32)
padded_input[:, :data.shape[1]] = data
# set input audio data (by default data at index 0)
interpreter.set_tensor(input_details[0]['index'], padded_input.astype(np.float32))
# run inference
interpreter.invoke()
# get output: classification
out_tflite = interpreter.get_tensor(output_details[0]['index'])
out_tflite_argmax = np.argmax(out_tflite)
return out_tflite_argmax
except(AssertionError):
self.stream = False
return -1
def main():
run_stream = StreamControl()
run_stream.run()
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"tflite_runtime.interpreter.Interpreter",
"os.path.join",
"numpy.argmax",
"logging.info",
"time.sleep",
"numpy.append",
"numpy.array",
"numpy.zeros",
"scipy.signal.resample",
"collections.Counter",
"numpy.expand_dims",
"numpy.frombuffer",
"queue.Queue",
"pyaudio.PyAu... | [((739, 758), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (756, 758), False, 'import logging\n'), ((1966, 1973), 'queue.Queue', 'Queue', ([], {}), '()\n', (1971, 1973), False, 'from queue import Queue\n'), ((2056, 2098), 'numpy.zeros', 'np.zeros', (['self.feed_samples'], {'dtype': '"""int16"""'}), "(self.feed_samples, dtype='int16')\n", (2064, 2098), True, 'import numpy as np\n'), ((3043, 3060), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (3058, 3060), False, 'import pyaudio\n'), ((3475, 3500), 'numpy.zeros', 'np.zeros', (['[size_predicts]'], {}), '([size_predicts])\n', (3483, 3500), True, 'import numpy as np\n'), ((2638, 2675), 'numpy.frombuffer', 'np.frombuffer', (['in_data'], {'dtype': '"""int16"""'}), "(in_data, dtype='int16')\n", (2651, 2675), True, 'import numpy as np\n'), ((2714, 2741), 'numpy.append', 'np.append', (['self.data', 'data0'], {}), '(self.data, data0)\n', (2723, 2741), True, 'import numpy as np\n'), ((5213, 5239), 'numpy.array', 'np.array', (['data', 'np.float32'], {}), '(data, np.float32)\n', (5221, 5239), True, 'import numpy as np\n'), ((5259, 5287), 'numpy.expand_dims', 'np.expand_dims', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (5273, 5287), True, 'import numpy as np\n'), ((5322, 5369), 'scipy.signal.resample', 'signal.resample', (['data', 'self.sample_rate'], {'axis': '(1)'}), '(data, self.sample_rate, axis=1)\n', (5337, 5369), False, 'from scipy import signal\n'), ((5829, 5869), 'tflite_runtime.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'model_content'}), '(model_content=model_content)\n', (5840, 5869), False, 'from tflite_runtime.interpreter import Interpreter\n'), ((6113, 6151), 'numpy.zeros', 'np.zeros', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (6121, 6151), True, 'import numpy as np\n'), ((6624, 6645), 'numpy.argmax', 'np.argmax', (['out_tflite'], {}), '(out_tflite)\n', (6633, 6645), True, 'import numpy as np\n'), ((2153, 2191), 'os.path.join', 'os.path.join', (['path_model', '"""labels.txt"""'], {}), "(path_model, 'labels.txt')\n", (2165, 2191), False, 'import os\n'), ((3599, 3610), 'time.time', 'time.time', ([], {}), '()\n', (3608, 3610), False, 'import time\n'), ((3846, 3878), 'collections.Counter', 'collections.Counter', (['predictions'], {}), '(predictions)\n', (3865, 3878), False, 'import collections\n'), ((4536, 4557), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (4548, 4557), False, 'import logging\n'), ((4613, 4624), 'time.time', 'time.time', ([], {}), '()\n', (4622, 4624), False, 'import time\n'), ((5571, 5597), 'numpy.array', 'np.array', (['data', 'np.float32'], {}), '(data, np.float32)\n', (5579, 5597), True, 'import numpy as np\n'), ((4764, 4787), 'time.sleep', 'time.sleep', (['remain_time'], {}), '(remain_time)\n', (4774, 4787), False, 'import time\n'), ((5689, 5735), 'os.path.join', 'os.path.join', (['self.path_model', 'self.name_model'], {}), '(self.path_model, self.name_model)\n', (5701, 5735), False, 'import os\n'), ((4488, 4499), 'time.time', 'time.time', ([], {}), '()\n', (4497, 4499), False, 'import time\n'), ((4303, 4314), 'time.time', 'time.time', ([], {}), '()\n', (4312, 4314), False, 'import time\n')] |
import cv2
import numpy as np
from numpy.linalg import norm
import sys
import os
import json
SZ = 20 #训练图片长宽
MAX_WIDTH = 1000 #原始图片最大宽度
Min_Area = 2000 #车牌区域允许最大面积
PROVINCE_START = 1000
#读取图片文件
def imreadex(filename):
return cv2.imdecode(np.fromfile(filename, dtype=np.uint8), cv2.IMREAD_COLOR)
def point_limit(point):
if point[0] < 0:
point[0] = 0
if point[1] < 0:
point[1] = 0
#根据设定的阈值和图片直方图,找出波峰,用于分隔字符
def find_waves(threshold, histogram):
up_point = -1#上升点
is_peak = False
if histogram[0] > threshold:
up_point = 0
is_peak = True
wave_peaks = []
for i,x in enumerate(histogram):
if is_peak and x < threshold:
if i - up_point > 2:
is_peak = False
wave_peaks.append((up_point, i))
elif not is_peak and x >= threshold:
is_peak = True
up_point = i
if is_peak and up_point != -1 and i - up_point > 4:
wave_peaks.append((up_point, i))
return wave_peaks
#根据找出的波峰,分隔图片,从而得到逐个字符图片
def seperate_card(img, waves):
part_cards = []
for wave in waves:
part_cards.append(img[:, wave[0]:wave[1]])
return part_cards
#来自opencv的sample,用于svm训练
def deskew(img):
m = cv2.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])
img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
return img
#来自opencv的sample,用于svm训练
def preprocess_hog(digits):
samples = []
for img in digits:
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
bin_n = 16
bin = np.int32(bin_n*ang/(2*np.pi))
bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
# transform to Hellinger kernel
eps = 1e-7
hist /= hist.sum() + eps
hist = np.sqrt(hist)
hist /= norm(hist) + eps
samples.append(hist)
return np.float32(samples)
#不能保证包括所有省份
provinces = [
"zh_cuan", "川",
"zh_e", "鄂",
"zh_gan", "赣",
"zh_gan1", "甘",
"zh_gui", "贵",
"zh_gui1", "桂",
"zh_hei", "黑",
"zh_hu", "沪",
"zh_ji", "冀",
"zh_jin", "津",
"zh_jing", "京",
"zh_jl", "吉",
"zh_liao", "辽",
"zh_lu", "鲁",
"zh_meng", "蒙",
"zh_min", "闽",
"zh_ning", "宁",
"zh_qing", "靑",
"zh_qiong", "琼",
"zh_shan", "陕",
"zh_su", "苏",
"zh_sx", "晋",
"zh_wan", "皖",
"zh_xiang", "湘",
"zh_xin", "新",
"zh_yu", "豫",
"zh_yu1", "渝",
"zh_yue", "粤",
"zh_yun", "云",
"zh_zang", "藏",
"zh_zhe", "浙"
]
class StatModel(object):
def load(self, fn):
self.model = self.model.load(fn)
def save(self, fn):
self.model.save(fn)
class SVM(StatModel):
def __init__(self, C = 1, gamma = 0.5):
self.model = cv2.ml.SVM_create()
self.model.setGamma(gamma)
self.model.setC(C)
self.model.setKernel(cv2.ml.SVM_RBF)
self.model.setType(cv2.ml.SVM_C_SVC)
#训练svm
def train(self, samples, responses):
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
#字符识别
def predict(self, samples):
r = self.model.predict(samples)
return r[1].ravel()
class CardPredictor:
def __init__(self):
#车牌识别的部分参数保存在js中,便于根据图片分辨率做调整
f = open('config.js')
j = json.load(f)
for c in j["config"]:
if c["open"]:
self.cfg = c.copy()
break
else:
raise RuntimeError('没有设置有效配置参数')
def __del__(self):
self.save_traindata()
def train_svm(self):
#识别英文字母和数字
self.model = SVM(C=1, gamma=0.5)
#识别中文
self.modelchinese = SVM(C=1, gamma=0.5)
if os.path.exists("svm.dat"):
self.model.load("svm.dat")
else:
chars_train = []
chars_label = []
for root, dirs, files in os.walk("train\\chars2"):
if len(os.path.basename(root)) > 1:
continue
root_int = ord(os.path.basename(root))
for filename in files:
filepath = os.path.join(root,filename)
digit_img = cv2.imread(filepath)
digit_img = cv2.cvtColor(digit_img, cv2.COLOR_BGR2GRAY)
chars_train.append(digit_img)
#chars_label.append(1)
chars_label.append(root_int)
chars_train = list(map(deskew, chars_train))
chars_train = preprocess_hog(chars_train)
#chars_train = chars_train.reshape(-1, 20, 20).astype(np.float32)
chars_label = np.array(chars_label)
print(chars_train.shape)
self.model.train(chars_train, chars_label)
if os.path.exists("svmchinese.dat"):
self.modelchinese.load("svmchinese.dat")
else:
chars_train = []
chars_label = []
for root, dirs, files in os.walk("train\\charsChinese"):
if not os.path.basename(root).startswith("zh_"):
continue
pinyin = os.path.basename(root)
index = provinces.index(pinyin) + PROVINCE_START + 1 #1是拼音对应的汉字
for filename in files:
filepath = os.path.join(root,filename)
digit_img = cv2.imread(filepath)
digit_img = cv2.cvtColor(digit_img, cv2.COLOR_BGR2GRAY)
chars_train.append(digit_img)
#chars_label.append(1)
chars_label.append(index)
chars_train = list(map(deskew, chars_train))
chars_train = preprocess_hog(chars_train)
#chars_train = chars_train.reshape(-1, 20, 20).astype(np.float32)
chars_label = np.array(chars_label)
print(chars_train.shape)
self.modelchinese.train(chars_train, chars_label)
def save_traindata(self):
if not os.path.exists("svm.dat"):
self.model.save("svm.dat")
if not os.path.exists("svmchinese.dat"):
self.modelchinese.save("svmchinese.dat")
def accurate_place(self, card_img_hsv, limit1, limit2, color):
row_num, col_num = card_img_hsv.shape[:2]
xl = col_num
xr = 0
yh = 0
yl = row_num
#col_num_limit = self.cfg["col_num_limit"]
row_num_limit = self.cfg["row_num_limit"]
col_num_limit = col_num * 0.8 if color != "green" else col_num * 0.5#绿色有渐变
for i in range(row_num):
count = 0
for j in range(col_num):
H = card_img_hsv.item(i, j, 0)
S = card_img_hsv.item(i, j, 1)
V = card_img_hsv.item(i, j, 2)
if limit1 < H <= limit2 and 34 < S and 46 < V:
count += 1
if count > col_num_limit:
if yl > i:
yl = i
if yh < i:
yh = i
for j in range(col_num):
count = 0
for i in range(row_num):
H = card_img_hsv.item(i, j, 0)
S = card_img_hsv.item(i, j, 1)
V = card_img_hsv.item(i, j, 2)
if limit1 < H <= limit2 and 34 < S and 46 < V:
count += 1
if count > row_num - row_num_limit:
if xl > j:
xl = j
if xr < j:
xr = j
return xl, xr, yh, yl
def predict(self, car_pic):
if type(car_pic) == type(""):
img = imreadex(car_pic)
else:
img = car_pic
pic_hight, pic_width = img.shape[:2]
if pic_width > MAX_WIDTH:
resize_rate = MAX_WIDTH / pic_width
img = cv2.resize(img, (MAX_WIDTH, int(pic_hight*resize_rate)), interpolation=cv2.INTER_AREA)
blur = self.cfg["blur"]
#高斯去噪
if blur > 0:
img = cv2.GaussianBlur(img, (blur, blur), 0)#图片分辨率调整
oldimg = img
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#equ = cv2.equalizeHist(img)
#img = np.hstack((img, equ))
#去掉图像中不会是车牌的区域
kernel = np.ones((20, 20), np.uint8)
img_opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
img_opening = cv2.addWeighted(img, 1, img_opening, -1, 0);
#找到图像边缘
ret, img_thresh = cv2.threshold(img_opening, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
img_edge = cv2.Canny(img_thresh, 100, 200)
#使用开运算和闭运算让图像边缘成为一个整体
kernel = np.ones((self.cfg["morphologyr"], self.cfg["morphologyc"]), np.uint8)
img_edge1 = cv2.morphologyEx(img_edge, cv2.MORPH_CLOSE, kernel)
img_edge2 = cv2.morphologyEx(img_edge1, cv2.MORPH_OPEN, kernel)
#查找图像边缘整体形成的矩形区域,可能有很多,车牌就在其中一个矩形区域中
try:
contours, hierarchy = cv2.findContours(img_edge2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
except ValueError:
image, contours, hierarchy = cv2.findContours(img_edge2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [cnt for cnt in contours if cv2.contourArea(cnt) > Min_Area]
print('len(contours)', len(contours))
#一一排除不是车牌的矩形区域
car_contours = []
for cnt in contours:
rect = cv2.minAreaRect(cnt)
area_width, area_height = rect[1]
if area_width < area_height:
area_width, area_height = area_height, area_width
wh_ratio = area_width / area_height
#print(wh_ratio)
#要求矩形区域长宽比在2到5.5之间,2到5.5是车牌的长宽比,其余的矩形排除
if wh_ratio > 2 and wh_ratio < 5.5:
car_contours.append(rect)
box = cv2.boxPoints(rect)
box = np.int0(box)
#oldimg = cv2.drawContours(oldimg, [box], 0, (0, 0, 255), 2)
#cv2.imshow("edge4", oldimg)
#print(rect)
print(len(car_contours))
print("精确定位")
card_imgs = []
#矩形区域可能是倾斜的矩形,需要矫正,以便使用颜色定位
for rect in car_contours:
if rect[2] > -1 and rect[2] < 1:#创造角度,使得左、高、右、低拿到正确的值
angle = 1
else:
angle = rect[2]
rect = (rect[0], (rect[1][0]+5, rect[1][1]+5), angle)#扩大范围,避免车牌边缘被排除
box = cv2.boxPoints(rect)
heigth_point = right_point = [0, 0]
left_point = low_point = [pic_width, pic_hight]
for point in box:
if left_point[0] > point[0]:
left_point = point
if low_point[1] > point[1]:
low_point = point
if heigth_point[1] < point[1]:
heigth_point = point
if right_point[0] < point[0]:
right_point = point
if left_point[1] <= right_point[1]:#正角度
new_right_point = [right_point[0], heigth_point[1]]
pts2 = np.float32([left_point, heigth_point, new_right_point])#字符只是高度需要改变
pts1 = np.float32([left_point, heigth_point, right_point])
M = cv2.getAffineTransform(pts1, pts2)
dst = cv2.warpAffine(oldimg, M, (pic_width, pic_hight))
point_limit(new_right_point)
point_limit(heigth_point)
point_limit(left_point)
card_img = dst[int(left_point[1]):int(heigth_point[1]), int(left_point[0]):int(new_right_point[0])]
card_imgs.append(card_img)
#cv2.imshow("card", card_img)
#cv2.waitKey(0)
elif left_point[1] > right_point[1]:#负角度
new_left_point = [left_point[0], heigth_point[1]]
pts2 = np.float32([new_left_point, heigth_point, right_point])#字符只是高度需要改变
pts1 = np.float32([left_point, heigth_point, right_point])
M = cv2.getAffineTransform(pts1, pts2)
dst = cv2.warpAffine(oldimg, M, (pic_width, pic_hight))
point_limit(right_point)
point_limit(heigth_point)
point_limit(new_left_point)
card_img = dst[int(right_point[1]):int(heigth_point[1]), int(new_left_point[0]):int(right_point[0])]
card_imgs.append(card_img)
#cv2.imshow("card", card_img)
#cv2.waitKey(0)
#开始使用颜色定位,排除不是车牌的矩形,目前只识别蓝、绿、黄车牌
colors = []
for card_index,card_img in enumerate(card_imgs):
green = yello = blue = black = white = 0
card_img_hsv = cv2.cvtColor(card_img, cv2.COLOR_BGR2HSV)
#有转换失败的可能,原因来自于上面矫正矩形出错
if card_img_hsv is None:
continue
row_num, col_num= card_img_hsv.shape[:2]
card_img_count = row_num * col_num
for i in range(row_num):
for j in range(col_num):
H = card_img_hsv.item(i, j, 0)
S = card_img_hsv.item(i, j, 1)
V = card_img_hsv.item(i, j, 2)
if 11 < H <= 34 and S > 34:#图片分辨率调整
yello += 1
elif 35 < H <= 99 and S > 34:#图片分辨率调整
green += 1
elif 99 < H <= 124 and S > 34:#图片分辨率调整
blue += 1
if 0 < H <180 and 0 < S < 255 and 0 < V < 46:
black += 1
elif 0 < H <180 and 0 < S < 43 and 221 < V < 225:
white += 1
color = "no"
limit1 = limit2 = 0
if yello*2 >= card_img_count:
color = "yello"
limit1 = 11
limit2 = 34#有的图片有色偏偏绿
elif green*2 >= card_img_count:
color = "green"
limit1 = 35
limit2 = 99
elif blue*2 >= card_img_count:
color = "blue"
limit1 = 100
limit2 = 124#有的图片有色偏偏紫
elif black + white >= card_img_count*0.7:#TODO
color = "bw"
print(color)
colors.append(color)
print(blue, green, yello, black, white, card_img_count)
#cv2.imshow("color", card_img)
#cv2.waitKey(0)
if limit1 == 0:
continue
#以上为确定车牌颜色
#以下为根据车牌颜色再定位,缩小边缘非车牌边界
xl, xr, yh, yl = self.accurate_place(card_img_hsv, limit1, limit2, color)
if yl == yh and xl == xr:
continue
need_accurate = False
if yl >= yh:
yl = 0
yh = row_num
need_accurate = True
if xl >= xr:
xl = 0
xr = col_num
need_accurate = True
card_imgs[card_index] = card_img[yl:yh, xl:xr] if color != "green" or yl < (yh-yl)//4 else card_img[yl-(yh-yl)//4:yh, xl:xr]
if need_accurate:#可能x或y方向未缩小,需要再试一次
card_img = card_imgs[card_index]
card_img_hsv = cv2.cvtColor(card_img, cv2.COLOR_BGR2HSV)
xl, xr, yh, yl = self.accurate_place(card_img_hsv, limit1, limit2, color)
if yl == yh and xl == xr:
continue
if yl >= yh:
yl = 0
yh = row_num
if xl >= xr:
xl = 0
xr = col_num
card_imgs[card_index] = card_img[yl:yh, xl:xr] if color != "green" or yl < (yh-yl)//4 else card_img[yl-(yh-yl)//4:yh, xl:xr]
#以上为车牌定位
#以下为识别车牌中字符
predict_result = []
roi = None
card_color = None
for i, color in enumerate(colors):
if color in ("blue", "yello", "green"):
card_img = card_imgs[i]
gray_img = cv2.cvtColor(card_img, cv2.COLOR_BGR2GRAY)
#黄、绿车牌字符比背景暗、与蓝车牌刚好相反,所以黄、绿车牌需要反向
if color == "green" or color == "yello":
gray_img = cv2.bitwise_not(gray_img)
ret, gray_img = cv2.threshold(gray_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#查找水平直方图波峰
x_histogram = np.sum(gray_img, axis=1)
x_min = np.min(x_histogram)
x_average = np.sum(x_histogram)/x_histogram.shape[0]
x_threshold = (x_min + x_average)/2
wave_peaks = find_waves(x_threshold, x_histogram)
if len(wave_peaks) == 0:
print("peak less 0:")
continue
#认为水平方向,最大的波峰为车牌区域
wave = max(wave_peaks, key=lambda x:x[1]-x[0])
gray_img = gray_img[wave[0]:wave[1]]
#查找垂直直方图波峰
row_num, col_num= gray_img.shape[:2]
#去掉车牌上下边缘1个像素,避免白边影响阈值判断
gray_img = gray_img[1:row_num-1]
y_histogram = np.sum(gray_img, axis=0)
y_min = np.min(y_histogram)
y_average = np.sum(y_histogram)/y_histogram.shape[0]
y_threshold = (y_min + y_average)/5#U和0要求阈值偏小,否则U和0会被分成两半
wave_peaks = find_waves(y_threshold, y_histogram)
#for wave in wave_peaks:
# cv2.line(card_img, pt1=(wave[0], 5), pt2=(wave[1], 5), color=(0, 0, 255), thickness=2)
#车牌字符数应大于6
if len(wave_peaks) <= 6:
print("peak less 1:", len(wave_peaks))
continue
wave = max(wave_peaks, key=lambda x:x[1]-x[0])
max_wave_dis = wave[1] - wave[0]
#判断是否是左侧车牌边缘
if wave_peaks[0][1] - wave_peaks[0][0] < max_wave_dis/3 and wave_peaks[0][0] == 0:
wave_peaks.pop(0)
#组合分离汉字
cur_dis = 0
for i,wave in enumerate(wave_peaks):
if wave[1] - wave[0] + cur_dis > max_wave_dis * 0.6:
break
else:
cur_dis += wave[1] - wave[0]
if i > 0:
wave = (wave_peaks[0][0], wave_peaks[i][1])
wave_peaks = wave_peaks[i+1:]
wave_peaks.insert(0, wave)
#去除车牌上的分隔点
point = wave_peaks[2]
if point[1] - point[0] < max_wave_dis/3:
point_img = gray_img[:,point[0]:point[1]]
if np.mean(point_img) < 255/5:
wave_peaks.pop(2)
if len(wave_peaks) <= 6:
print("peak less 2:", len(wave_peaks))
continue
part_cards = seperate_card(gray_img, wave_peaks)
for i, part_card in enumerate(part_cards):
#可能是固定车牌的铆钉
if np.mean(part_card) < 255/5:
print("a point")
continue
part_card_old = part_card
w = abs(part_card.shape[1] - SZ)//2
part_card = cv2.copyMakeBorder(part_card, 0, 0, w, w, cv2.BORDER_CONSTANT, value = [0,0,0])
part_card = cv2.resize(part_card, (SZ, SZ), interpolation=cv2.INTER_AREA)
#part_card = deskew(part_card)
part_card = preprocess_hog([part_card])
if i == 0:
resp = self.modelchinese.predict(part_card)
charactor = provinces[int(resp[0]) - PROVINCE_START]
else:
resp = self.model.predict(part_card)
charactor = chr(resp[0])
#判断最后一个数是否是车牌边缘,假设车牌边缘被认为是1
if charactor == "1" and i == len(part_cards)-1:
if part_card_old.shape[0]/part_card_old.shape[1] >= 7:#1太细,认为是边缘
continue
predict_result.append(charactor)
roi = card_img
card_color = color
break
return predict_result, roi, card_color#识别到的字符、定位的车牌图像、车牌颜色
if __name__ == '__main__':
c = CardPredictor()
c.train_svm()
r, roi, color = c.predict(r"C:\Users\999\Documents\Upupoo\Docker\config\文件\图像数字处理\苏E05EV8.jpg")
print(r)
| [
"numpy.fromfile",
"numpy.sqrt",
"numpy.hstack",
"numpy.int32",
"numpy.array",
"numpy.linalg.norm",
"os.walk",
"os.path.exists",
"numpy.mean",
"cv2.threshold",
"cv2.contourArea",
"cv2.minAreaRect",
"cv2.addWeighted",
"numpy.min",
"cv2.warpAffine",
"numpy.ones",
"cv2.boxPoints",
"cv2... | [((1113, 1129), 'cv2.moments', 'cv2.moments', (['img'], {}), '(img)\n', (1124, 1129), False, 'import cv2\n'), ((1210, 1262), 'numpy.float32', 'np.float32', (['[[1, skew, -0.5 * SZ * skew], [0, 1, 0]]'], {}), '([[1, skew, -0.5 * SZ * skew], [0, 1, 0]])\n', (1220, 1262), True, 'import numpy as np\n'), ((1266, 1345), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(SZ, SZ)'], {'flags': '(cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)'}), '(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)\n', (1280, 1345), False, 'import cv2\n'), ((2029, 2048), 'numpy.float32', 'np.float32', (['samples'], {}), '(samples)\n', (2039, 2048), True, 'import numpy as np\n'), ((250, 287), 'numpy.fromfile', 'np.fromfile', (['filename'], {'dtype': 'np.uint8'}), '(filename, dtype=np.uint8)\n', (261, 287), True, 'import numpy as np\n'), ((1452, 1484), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_32F', '(1)', '(0)'], {}), '(img, cv2.CV_32F, 1, 0)\n', (1461, 1484), False, 'import cv2\n'), ((1492, 1524), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_32F', '(0)', '(1)'], {}), '(img, cv2.CV_32F, 0, 1)\n', (1501, 1524), False, 'import cv2\n'), ((1538, 1561), 'cv2.cartToPolar', 'cv2.cartToPolar', (['gx', 'gy'], {}), '(gx, gy)\n', (1553, 1561), False, 'import cv2\n'), ((1583, 1618), 'numpy.int32', 'np.int32', (['(bin_n * ang / (2 * np.pi))'], {}), '(bin_n * ang / (2 * np.pi))\n', (1591, 1618), True, 'import numpy as np\n'), ((1851, 1867), 'numpy.hstack', 'np.hstack', (['hists'], {}), '(hists)\n', (1860, 1867), True, 'import numpy as np\n'), ((1954, 1967), 'numpy.sqrt', 'np.sqrt', (['hist'], {}), '(hist)\n', (1961, 1967), True, 'import numpy as np\n'), ((2750, 2769), 'cv2.ml.SVM_create', 'cv2.ml.SVM_create', ([], {}), '()\n', (2767, 2769), False, 'import cv2\n'), ((3197, 3209), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3206, 3209), False, 'import json\n'), ((3499, 3524), 'os.path.exists', 'os.path.exists', (['"""svm.dat"""'], {}), "('svm.dat')\n", (3513, 3524), False, 'import os\n'), ((4310, 4342), 'os.path.exists', 'os.path.exists', (['"""svmchinese.dat"""'], {}), "('svmchinese.dat')\n", (4324, 4342), False, 'import os\n'), ((6847, 6884), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (6859, 6884), False, 'import cv2\n'), ((6975, 7002), 'numpy.ones', 'np.ones', (['(20, 20)', 'np.uint8'], {}), '((20, 20), np.uint8)\n', (6982, 7002), True, 'import numpy as np\n'), ((7019, 7064), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_OPEN', 'kernel'], {}), '(img, cv2.MORPH_OPEN, kernel)\n', (7035, 7064), False, 'import cv2\n'), ((7081, 7124), 'cv2.addWeighted', 'cv2.addWeighted', (['img', '(1)', 'img_opening', '(-1)', '(0)'], {}), '(img, 1, img_opening, -1, 0)\n', (7096, 7124), False, 'import cv2\n'), ((7157, 7228), 'cv2.threshold', 'cv2.threshold', (['img_opening', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(img_opening, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (7170, 7228), False, 'import cv2\n'), ((7242, 7273), 'cv2.Canny', 'cv2.Canny', (['img_thresh', '(100)', '(200)'], {}), '(img_thresh, 100, 200)\n', (7251, 7273), False, 'import cv2\n'), ((7309, 7378), 'numpy.ones', 'np.ones', (["(self.cfg['morphologyr'], self.cfg['morphologyc'])", 'np.uint8'], {}), "((self.cfg['morphologyr'], self.cfg['morphologyc']), np.uint8)\n", (7316, 7378), True, 'import numpy as np\n'), ((7393, 7444), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img_edge', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(img_edge, cv2.MORPH_CLOSE, kernel)\n', (7409, 7444), False, 'import cv2\n'), ((7459, 7510), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img_edge1', 'cv2.MORPH_OPEN', 'kernel'], {}), '(img_edge1, cv2.MORPH_OPEN, kernel)\n', (7475, 7510), False, 'import cv2\n'), ((1978, 1988), 'numpy.linalg.norm', 'norm', (['hist'], {}), '(hist)\n', (1982, 1988), False, 'from numpy.linalg import norm\n'), ((3636, 3660), 'os.walk', 'os.walk', (['"""train\\\\chars2"""'], {}), "('train\\\\chars2')\n", (3643, 3660), False, 'import os\n'), ((4209, 4230), 'numpy.array', 'np.array', (['chars_label'], {}), '(chars_label)\n', (4217, 4230), True, 'import numpy as np\n'), ((4464, 4494), 'os.walk', 'os.walk', (['"""train\\\\charsChinese"""'], {}), "('train\\\\charsChinese')\n", (4471, 4494), False, 'import os\n'), ((5110, 5131), 'numpy.array', 'np.array', (['chars_label'], {}), '(chars_label)\n', (5118, 5131), True, 'import numpy as np\n'), ((5250, 5275), 'os.path.exists', 'os.path.exists', (['"""svm.dat"""'], {}), "('svm.dat')\n", (5264, 5275), False, 'import os\n'), ((5316, 5348), 'os.path.exists', 'os.path.exists', (['"""svmchinese.dat"""'], {}), "('svmchinese.dat')\n", (5330, 5348), False, 'import os\n'), ((6777, 6815), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(blur, blur)', '(0)'], {}), '(img, (blur, blur), 0)\n', (6793, 6815), False, 'import cv2\n'), ((7583, 7650), 'cv2.findContours', 'cv2.findContours', (['img_edge2', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(img_edge2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (7599, 7650), False, 'import cv2\n'), ((7956, 7976), 'cv2.minAreaRect', 'cv2.minAreaRect', (['cnt'], {}), '(cnt)\n', (7971, 7976), False, 'import cv2\n'), ((8741, 8760), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (8754, 8760), False, 'import cv2\n'), ((10518, 10559), 'cv2.cvtColor', 'cv2.cvtColor', (['card_img', 'cv2.COLOR_BGR2HSV'], {}), '(card_img, cv2.COLOR_BGR2HSV)\n', (10530, 10559), False, 'import cv2\n'), ((4576, 4598), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (4592, 4598), False, 'import os\n'), ((7704, 7771), 'cv2.findContours', 'cv2.findContours', (['img_edge2', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(img_edge2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (7720, 7771), False, 'import cv2\n'), ((8281, 8300), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (8294, 8300), False, 'import cv2\n'), ((8311, 8323), 'numpy.int0', 'np.int0', (['box'], {}), '(box)\n', (8318, 8323), True, 'import numpy as np\n'), ((9215, 9270), 'numpy.float32', 'np.float32', (['[left_point, heigth_point, new_right_point]'], {}), '([left_point, heigth_point, new_right_point])\n', (9225, 9270), True, 'import numpy as np\n'), ((9293, 9344), 'numpy.float32', 'np.float32', (['[left_point, heigth_point, right_point]'], {}), '([left_point, heigth_point, right_point])\n', (9303, 9344), True, 'import numpy as np\n'), ((9353, 9387), 'cv2.getAffineTransform', 'cv2.getAffineTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (9375, 9387), False, 'import cv2\n'), ((9398, 9447), 'cv2.warpAffine', 'cv2.warpAffine', (['oldimg', 'M', '(pic_width, pic_hight)'], {}), '(oldimg, M, (pic_width, pic_hight))\n', (9412, 9447), False, 'import cv2\n'), ((12316, 12357), 'cv2.cvtColor', 'cv2.cvtColor', (['card_img', 'cv2.COLOR_BGR2HSV'], {}), '(card_img, cv2.COLOR_BGR2HSV)\n', (12328, 12357), False, 'import cv2\n'), ((12905, 12947), 'cv2.cvtColor', 'cv2.cvtColor', (['card_img', 'cv2.COLOR_BGR2GRAY'], {}), '(card_img, cv2.COLOR_BGR2GRAY)\n', (12917, 12947), False, 'import cv2\n'), ((13093, 13161), 'cv2.threshold', 'cv2.threshold', (['gray_img', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(gray_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (13106, 13161), False, 'import cv2\n'), ((13196, 13220), 'numpy.sum', 'np.sum', (['gray_img'], {'axis': '(1)'}), '(gray_img, axis=1)\n', (13202, 13220), True, 'import numpy as np\n'), ((13233, 13252), 'numpy.min', 'np.min', (['x_histogram'], {}), '(x_histogram)\n', (13239, 13252), True, 'import numpy as np\n'), ((13729, 13753), 'numpy.sum', 'np.sum', (['gray_img'], {'axis': '(0)'}), '(gray_img, axis=0)\n', (13735, 13753), True, 'import numpy as np\n'), ((13766, 13785), 'numpy.min', 'np.min', (['y_histogram'], {}), '(y_histogram)\n', (13772, 13785), True, 'import numpy as np\n'), ((3735, 3757), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (3751, 3757), False, 'import os\n'), ((3802, 3830), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (3814, 3830), False, 'import os\n'), ((3847, 3867), 'cv2.imread', 'cv2.imread', (['filepath'], {}), '(filepath)\n', (3857, 3867), False, 'import cv2\n'), ((3885, 3928), 'cv2.cvtColor', 'cv2.cvtColor', (['digit_img', 'cv2.COLOR_BGR2GRAY'], {}), '(digit_img, cv2.COLOR_BGR2GRAY)\n', (3897, 3928), False, 'import cv2\n'), ((4710, 4738), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (4722, 4738), False, 'import os\n'), ((4755, 4775), 'cv2.imread', 'cv2.imread', (['filepath'], {}), '(filepath)\n', (4765, 4775), False, 'import cv2\n'), ((4793, 4836), 'cv2.cvtColor', 'cv2.cvtColor', (['digit_img', 'cv2.COLOR_BGR2GRAY'], {}), '(digit_img, cv2.COLOR_BGR2GRAY)\n', (4805, 4836), False, 'import cv2\n'), ((7813, 7833), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (7828, 7833), False, 'import cv2\n'), ((9842, 9897), 'numpy.float32', 'np.float32', (['[new_left_point, heigth_point, right_point]'], {}), '([new_left_point, heigth_point, right_point])\n', (9852, 9897), True, 'import numpy as np\n'), ((9920, 9971), 'numpy.float32', 'np.float32', (['[left_point, heigth_point, right_point]'], {}), '([left_point, heigth_point, right_point])\n', (9930, 9971), True, 'import numpy as np\n'), ((9980, 10014), 'cv2.getAffineTransform', 'cv2.getAffineTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (10002, 10014), False, 'import cv2\n'), ((10025, 10074), 'cv2.warpAffine', 'cv2.warpAffine', (['oldimg', 'M', '(pic_width, pic_hight)'], {}), '(oldimg, M, (pic_width, pic_hight))\n', (10039, 10074), False, 'import cv2\n'), ((13047, 13072), 'cv2.bitwise_not', 'cv2.bitwise_not', (['gray_img'], {}), '(gray_img)\n', (13062, 13072), False, 'import cv2\n'), ((13269, 13288), 'numpy.sum', 'np.sum', (['x_histogram'], {}), '(x_histogram)\n', (13275, 13288), True, 'import numpy as np\n'), ((13802, 13821), 'numpy.sum', 'np.sum', (['y_histogram'], {}), '(y_histogram)\n', (13808, 13821), True, 'import numpy as np\n'), ((15302, 15381), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['part_card', '(0)', '(0)', 'w', 'w', 'cv2.BORDER_CONSTANT'], {'value': '[0, 0, 0]'}), '(part_card, 0, 0, w, w, cv2.BORDER_CONSTANT, value=[0, 0, 0])\n', (15320, 15381), False, 'import cv2\n'), ((15399, 15460), 'cv2.resize', 'cv2.resize', (['part_card', '(SZ, SZ)'], {'interpolation': 'cv2.INTER_AREA'}), '(part_card, (SZ, SZ), interpolation=cv2.INTER_AREA)\n', (15409, 15460), False, 'import cv2\n'), ((3673, 3695), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (3689, 3695), False, 'import os\n'), ((14872, 14890), 'numpy.mean', 'np.mean', (['point_img'], {}), '(point_img)\n', (14879, 14890), True, 'import numpy as np\n'), ((15141, 15159), 'numpy.mean', 'np.mean', (['part_card'], {}), '(part_card)\n', (15148, 15159), True, 'import numpy as np\n'), ((4507, 4529), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (4523, 4529), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Tests of the neo.core.irregularlysampledsignal.IrregularySampledSignal class
"""
import unittest
import os
import pickle
import warnings
from copy import deepcopy
import numpy as np
import quantities as pq
from numpy.testing import assert_array_equal
from neo.core.dataobject import ArrayDict
try:
from IPython.lib.pretty import pretty
except ImportError as err:
HAVE_IPYTHON = False
else:
HAVE_IPYTHON = True
from neo.core.irregularlysampledsignal import IrregularlySampledSignal
from neo.core import Segment, ChannelIndex
from neo.core.baseneo import MergeError
from neo.test.tools import (assert_arrays_almost_equal, assert_arrays_equal,
assert_neo_object_is_compliant, assert_same_sub_schema,
assert_same_attributes, assert_same_annotations,
assert_same_array_annotations)
from neo.test.generate_datasets import (get_fake_value, get_fake_values, fake_neo,
TEST_ANNOTATIONS)
class Test__generate_datasets(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.annotations = dict(
[(str(x), TEST_ANNOTATIONS[x]) for x in range(len(TEST_ANNOTATIONS))])
def test__get_fake_values(self):
self.annotations['seed'] = 0
times = get_fake_value('times', pq.Quantity, seed=0, dim=1)
signal = get_fake_value('signal', pq.Quantity, seed=1, dim=2)
name = get_fake_value('name', str, seed=2, obj=IrregularlySampledSignal)
description = get_fake_value('description', str, seed=3, obj='IrregularlySampledSignal')
file_origin = get_fake_value('file_origin', str)
arr_ann = get_fake_value('array_annotations', dict, seed=5,
obj=IrregularlySampledSignal, n=1)
attrs1 = {'name': name, 'description': description, 'file_origin': file_origin}
attrs2 = attrs1.copy()
attrs2.update(self.annotations)
attrs2['array_annotations'] = arr_ann
res11 = get_fake_values(IrregularlySampledSignal, annotate=False, seed=0)
res12 = get_fake_values('IrregularlySampledSignal', annotate=False, seed=0)
res21 = get_fake_values(IrregularlySampledSignal, annotate=True, seed=0)
res22 = get_fake_values('IrregularlySampledSignal', annotate=True, seed=0)
assert_array_equal(res11.pop('times'), times)
assert_array_equal(res12.pop('times'), times)
assert_array_equal(res21.pop('times'), times)
assert_array_equal(res22.pop('times'), times)
assert_array_equal(res11.pop('signal'), signal)
assert_array_equal(res12.pop('signal'), signal)
assert_array_equal(res21.pop('signal'), signal)
assert_array_equal(res22.pop('signal'), signal)
self.assertEqual(res11, attrs1)
self.assertEqual(res12, attrs1)
# Array annotations need to be compared separately
# because numpy arrays define equality differently
arr_ann_res21 = res21.pop('array_annotations')
arr_ann_attrs2 = attrs2.pop('array_annotations')
self.assertEqual(res21, attrs2)
assert_arrays_equal(arr_ann_res21['valid'], arr_ann_attrs2['valid'])
assert_arrays_equal(arr_ann_res21['number'], arr_ann_attrs2['number'])
arr_ann_res22 = res22.pop('array_annotations')
self.assertEqual(res22, attrs2)
assert_arrays_equal(arr_ann_res22['valid'], arr_ann_attrs2['valid'])
assert_arrays_equal(arr_ann_res22['number'], arr_ann_attrs2['number'])
def test__fake_neo__cascade(self):
self.annotations['seed'] = None
obj_type = IrregularlySampledSignal
cascade = True
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, IrregularlySampledSignal))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
def test__fake_neo__nocascade(self):
self.annotations['seed'] = None
obj_type = 'IrregularlySampledSignal'
cascade = False
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, IrregularlySampledSignal))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
class TestIrregularlySampledSignalConstruction(unittest.TestCase):
def test_IrregularlySampledSignal_creation_times_units_signal_units(self):
params = {'test2': 'y1', 'test3': True}
arr_ann = {'anno1': [23], 'anno2': ['A']}
sig = IrregularlySampledSignal([1.1, 1.5, 1.7] * pq.ms, signal=[20., 40., 60.] * pq.mV,
name='test', description='tester', file_origin='test.file',
test1=1, array_annotations=arr_ann, **params)
sig.annotate(test1=1.1, test0=[1, 2])
assert_neo_object_is_compliant(sig)
assert_array_equal(sig.times, [1.1, 1.5, 1.7] * pq.ms)
assert_array_equal(np.asarray(sig).flatten(), np.array([20., 40., 60.]))
self.assertEqual(sig.units, pq.mV)
self.assertEqual(sig.name, 'test')
self.assertEqual(sig.description, 'tester')
self.assertEqual(sig.file_origin, 'test.file')
self.assertEqual(sig.annotations['test0'], [1, 2])
self.assertEqual(sig.annotations['test1'], 1.1)
self.assertEqual(sig.annotations['test2'], 'y1')
self.assertTrue(sig.annotations['test3'])
assert_arrays_equal(sig.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(sig.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(sig.array_annotations, ArrayDict)
def test_IrregularlySampledSignal_creation_units_arg(self):
params = {'test2': 'y1', 'test3': True}
arr_ann = {'anno1': [23], 'anno2': ['A']}
sig = IrregularlySampledSignal([1.1, 1.5, 1.7], signal=[20., 40., 60.], units=pq.V,
time_units=pq.s, name='test', description='tester',
file_origin='test.file', test1=1,
array_annotations=arr_ann, **params)
sig.annotate(test1=1.1, test0=[1, 2])
assert_neo_object_is_compliant(sig)
assert_array_equal(sig.times, [1.1, 1.5, 1.7] * pq.s)
assert_array_equal(np.asarray(sig).flatten(), np.array([20., 40., 60.]))
self.assertEqual(sig.units, pq.V)
self.assertEqual(sig.name, 'test')
self.assertEqual(sig.description, 'tester')
self.assertEqual(sig.file_origin, 'test.file')
self.assertEqual(sig.annotations['test0'], [1, 2])
self.assertEqual(sig.annotations['test1'], 1.1)
self.assertEqual(sig.annotations['test2'], 'y1')
self.assertTrue(sig.annotations['test3'])
assert_arrays_equal(sig.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(sig.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(sig.array_annotations, ArrayDict)
def test_IrregularlySampledSignal_creation_units_rescale(self):
params = {'test2': 'y1', 'test3': True}
arr_ann = {'anno1': [23], 'anno2': ['A']}
sig = IrregularlySampledSignal([1.1, 1.5, 1.7] * pq.s, signal=[2., 4., 6.] * pq.V,
units=pq.mV, time_units=pq.ms, name='test',
description='tester', file_origin='test.file', test1=1,
array_annotations=arr_ann, **params)
sig.annotate(test1=1.1, test0=[1, 2])
assert_neo_object_is_compliant(sig)
assert_array_equal(sig.times, [1100, 1500, 1700] * pq.ms)
assert_array_equal(np.asarray(sig).flatten(), np.array([2000., 4000., 6000.]))
self.assertEqual(sig.units, pq.mV)
self.assertEqual(sig.name, 'test')
self.assertEqual(sig.description, 'tester')
self.assertEqual(sig.file_origin, 'test.file')
self.assertEqual(sig.annotations['test0'], [1, 2])
self.assertEqual(sig.annotations['test1'], 1.1)
self.assertEqual(sig.annotations['test2'], 'y1')
self.assertTrue(sig.annotations['test3'])
assert_arrays_equal(sig.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(sig.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(sig.array_annotations, ArrayDict)
def test_IrregularlySampledSignal_different_lens_ValueError(self):
times = [1.1, 1.5, 1.7] * pq.ms
signal = [20., 40., 60., 70.] * pq.mV
self.assertRaises(ValueError, IrregularlySampledSignal, times, signal)
def test_IrregularlySampledSignal_no_signal_units_ValueError(self):
times = [1.1, 1.5, 1.7] * pq.ms
signal = [20., 40., 60.]
self.assertRaises(ValueError, IrregularlySampledSignal, times, signal)
def test_IrregularlySampledSignal_no_time_units_ValueError(self):
times = [1.1, 1.5, 1.7]
signal = [20., 40., 60.] * pq.mV
self.assertRaises(ValueError, IrregularlySampledSignal, times, signal)
class TestIrregularlySampledSignalProperties(unittest.TestCase):
def setUp(self):
self.times = [np.arange(10.0) * pq.s, np.arange(-100.0, 100.0, 10.0) * pq.ms,
np.arange(100) * pq.ns]
self.data = [np.arange(10.0) * pq.nA, np.arange(-100.0, 100.0, 10.0) * pq.mV,
np.random.uniform(size=100) * pq.uV]
self.signals = [IrregularlySampledSignal(t, signal=D, testattr='test') for D, t in
zip(self.data, self.times)]
def test__compliant(self):
for signal in self.signals:
assert_neo_object_is_compliant(signal)
def test__t_start_getter(self):
for signal, times in zip(self.signals, self.times):
self.assertAlmostEqual(signal.t_start, times[0], delta=1e-15)
def test__t_stop_getter(self):
for signal, times in zip(self.signals, self.times):
self.assertAlmostEqual(signal.t_stop, times[-1], delta=1e-15)
def test__duration_getter(self):
for signal, times in zip(self.signals, self.times):
self.assertAlmostEqual(signal.duration, times[-1] - times[0], delta=1e-15)
def test__sampling_intervals_getter(self):
for signal, times in zip(self.signals, self.times):
assert_arrays_almost_equal(signal.sampling_intervals, np.diff(times), threshold=1e-15)
def test_IrregularlySampledSignal_repr(self):
sig = IrregularlySampledSignal([1.1, 1.5, 1.7] * pq.s, signal=[2., 4., 6.] * pq.V,
name='test', description='tester', file_origin='test.file',
test1=1)
assert_neo_object_is_compliant(sig)
if np.__version__.split(".")[:2] > ['1', '13']:
# see https://github.com/numpy/numpy/blob/master/doc/release/1.14.0-notes.rst#many
# -changes-to-array-printing-disableable-with-the-new-legacy-printing-mode
targ = (
'<IrregularlySampledSignal(array([[2.],\n [4.],\n [6.]]) * V '
'' + 'at times [1.1 1.5 1.7] s)>')
else:
targ = (
'<IrregularlySampledSignal(array([[ 2.],\n [ 4.],\n [ 6.]]) '
'* V ' + 'at times [ 1.1 1.5 1.7] s)>')
res = repr(sig)
self.assertEqual(targ, res)
class TestIrregularlySampledSignalArrayMethods(unittest.TestCase):
def setUp(self):
self.data1 = np.arange(10.0)
self.data1quant = self.data1 * pq.mV
self.time1 = np.logspace(1, 5, 10)
self.time1quant = self.time1 * pq.ms
self.arr_ann = {'anno1': [23], 'anno2': ['A']}
self.signal1 = IrregularlySampledSignal(self.time1quant, signal=self.data1quant,
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test',
array_annotations=self.arr_ann)
self.signal1.segment = Segment()
self.signal1.channel_index = ChannelIndex([0])
def test__compliant(self):
assert_neo_object_is_compliant(self.signal1)
self.assertEqual(self.signal1.name, 'spam')
self.assertEqual(self.signal1.description, 'eggs')
self.assertEqual(self.signal1.file_origin, 'testfile.txt')
self.assertEqual(self.signal1.annotations, {'arg1': 'test'})
assert_arrays_equal(self.signal1.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(self.signal1.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(self.signal1.array_annotations, ArrayDict)
def test__slice_should_return_IrregularlySampledSignal(self):
result = self.signal1[3:8]
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(result.size, 5)
self.assertEqual(result.t_start, self.time1quant[3])
self.assertEqual(result.t_stop, self.time1quant[7])
assert_array_equal(self.time1quant[3:8], result.times)
assert_array_equal(self.data1[3:8].reshape(-1, 1), result.magnitude)
# Test other attributes were copied over (in this case, defaults)
self.assertEqual(result.file_origin, self.signal1.file_origin)
self.assertEqual(result.name, self.signal1.name)
self.assertEqual(result.description, self.signal1.description)
self.assertEqual(result.annotations, self.signal1.annotations)
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test__getitem_should_return_single_quantity(self):
self.assertEqual(self.signal1[0], 0 * pq.mV)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertRaises(IndexError, self.signal1.__getitem__, 10)
def test__getitem_out_of_bounds_IndexError(self):
self.assertRaises(IndexError, self.signal1.__getitem__, 10)
def test_comparison_operators(self):
assert_array_equal(self.signal1 >= 5 * pq.mV, np.array(
[[False, False, False, False, False, True, True, True, True, True]]).T)
assert_array_equal(self.signal1 == 5 * pq.mV, np.array(
[[False, False, False, False, False, True, False, False, False, False]]).T)
assert_array_equal(self.signal1 == self.signal1, np.array(
[[True, True, True, True, True, True, True, True, True, True]]).T)
def test__comparison_as_indexing_single_trace(self):
self.assertEqual(self.signal1[self.signal1 == 5], [5 * pq.mV])
def test__comparison_as_indexing_multi_trace(self):
signal = IrregularlySampledSignal(self.time1quant, np.arange(20).reshape((-1, 2)) * pq.V)
assert_array_equal(signal[signal < 10],
np.array([[0, 2, 4, 6, 8], [1, 3, 5, 7, 9]]).T * pq.V)
def test__indexing_keeps_order_across_channels(self):
# AnalogSignals with 10 traces each having 5 samples (eg. data[0] = [0,10,20,30,40])
data = np.array([range(10), range(10, 20), range(20, 30), range(30, 40), range(40, 50)])
mask = np.full((5, 10), fill_value=False, dtype=bool)
# selecting one entry per trace
mask[[0, 1, 0, 3, 0, 2, 4, 3, 1, 4], range(10)] = True
signal = IrregularlySampledSignal(np.arange(5) * pq.s, np.array(data) * pq.V)
assert_array_equal(signal[mask], np.array([[0, 11, 2, 33, 4, 25, 46, 37, 18, 49]]) * pq.V)
def test__indexing_keeps_order_across_time(self):
# AnalogSignals with 10 traces each having 5 samples (eg. data[0] = [0,10,20,30,40])
data = np.array([range(10), range(10, 20), range(20, 30), range(30, 40), range(40, 50)])
mask = np.full((5, 10), fill_value=False, dtype=bool)
# selecting two entries per trace
temporal_ids = [0, 1, 0, 3, 1, 2, 4, 2, 1, 4] + [4, 3, 2, 1, 0, 1, 2, 3, 2, 1]
mask[temporal_ids, list(range(10)) + list(range(10))] = True
signal = IrregularlySampledSignal(np.arange(5) * pq.s, np.array(data) * pq.V)
assert_array_equal(signal[mask], np.array([[0, 11, 2, 13, 4, 15, 26, 27, 18, 19],
[40, 31, 22, 33, 14, 25, 46, 37, 28,
49]]) * pq.V)
def test__comparison_with_inconsistent_units_should_raise_Exception(self):
self.assertRaises(ValueError, self.signal1.__gt__, 5 * pq.nA)
def test_simple_statistics(self):
targmean = self.signal1[:-1] * np.diff(self.time1quant).reshape(-1, 1)
targmean = targmean.sum() / (self.time1quant[-1] - self.time1quant[0])
self.assertEqual(self.signal1.max(), 9 * pq.mV)
self.assertEqual(self.signal1.min(), 0 * pq.mV)
self.assertEqual(self.signal1.mean(), targmean)
def test_mean_interpolation_NotImplementedError(self):
self.assertRaises(NotImplementedError, self.signal1.mean, True)
def test_resample_NotImplementedError(self):
self.assertRaises(NotImplementedError, self.signal1.resample, True)
def test__rescale_same(self):
result = self.signal1.copy()
result = result.rescale(pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(result.units, 1 * pq.mV)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1))
assert_array_equal(result.times, self.time1quant)
assert_same_sub_schema(result, self.signal1)
self.assertIsInstance(result.channel_index, ChannelIndex)
self.assertIsInstance(result.segment, Segment)
self.assertIs(result.channel_index, self.signal1.channel_index)
self.assertIs(result.segment, self.signal1.segment)
def test__rescale_new(self):
result = self.signal1.copy()
result = result.rescale(pq.uV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(result.units, 1 * pq.uV)
assert_arrays_almost_equal(np.array(result), self.data1.reshape(-1, 1) * 1000., 1e-10)
assert_array_equal(result.times, self.time1quant)
self.assertIsInstance(result.channel_index, ChannelIndex)
self.assertIsInstance(result.segment, Segment)
self.assertIs(result.channel_index, self.signal1.channel_index)
self.assertIs(result.segment, self.signal1.segment)
def test__rescale_new_incompatible_ValueError(self):
self.assertRaises(ValueError, self.signal1.rescale, pq.nA)
def test_time_slice(self):
targdataquant = [[1.0], [2.0], [3.0]] * pq.mV
targtime = np.logspace(1, 5, 10)
targtimequant = targtime[1:4] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = 15
t_stop = 250
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test__time_slice_deepcopy_annotations(self):
params1 = {'test0': 'y1', 'test1': ['deeptest'], 'test2': True}
self.signal1.annotate(**params1)
result = self.signal1.time_slice(None, None)
# Change annotations of original
params2 = {'test0': 'y2', 'test2': False}
self.signal1.annotate(**params2)
self.signal1.annotations['test1'][0] = 'shallowtest'
self.assertNotEqual(self.signal1.annotations['test0'], result.annotations['test0'])
self.assertNotEqual(self.signal1.annotations['test1'], result.annotations['test1'])
self.assertNotEqual(self.signal1.annotations['test2'], result.annotations['test2'])
# Change annotations of result
params3 = {'test0': 'y3'}
result.annotate(**params3)
result.annotations['test1'][0] = 'shallowtest2'
self.assertNotEqual(self.signal1.annotations['test0'], result.annotations['test0'])
self.assertNotEqual(self.signal1.annotations['test1'], result.annotations['test1'])
self.assertNotEqual(self.signal1.annotations['test2'], result.annotations['test2'])
def test__time_slice_deepcopy_array_annotations(self):
length = self.signal1.shape[-1]
params1 = {'test0': ['y{}'.format(i) for i in range(length)],
'test1': ['deeptest' for i in range(length)],
'test2': [(-1)**i > 0 for i in range(length)]}
self.signal1.array_annotate(**params1)
result = self.signal1.time_slice(None, None)
# Change annotations of original
params2 = {'test0': ['x{}'.format(i) for i in range(length)],
'test2': [(-1) ** (i + 1) > 0 for i in range(length)]}
self.signal1.array_annotate(**params2)
self.signal1.array_annotations['test1'][0] = 'shallowtest'
self.assertFalse(all(self.signal1.array_annotations['test0']
== result.array_annotations['test0']))
self.assertFalse(all(self.signal1.array_annotations['test1']
== result.array_annotations['test1']))
self.assertFalse(all(self.signal1.array_annotations['test2']
== result.array_annotations['test2']))
# Change annotations of result
params3 = {'test0': ['z{}'.format(i) for i in range(1, result.shape[-1]+1)]}
result.array_annotate(**params3)
result.array_annotations['test1'][0] = 'shallow2'
self.assertFalse(all(self.signal1.array_annotations['test0']
== result.array_annotations['test0']))
self.assertFalse(all(self.signal1.array_annotations['test1']
== result.array_annotations['test1']))
self.assertFalse(all(self.signal1.array_annotations['test2']
== result.array_annotations['test2']))
def test__time_slice_deepcopy_data(self):
result = self.signal1.time_slice(None, None)
# Change values of original array
self.signal1[2] = 7.3*self.signal1.units
self.assertFalse(all(self.signal1 == result))
# Change values of sliced array
result[3] = 9.5*result.units
self.assertFalse(all(self.signal1 == result))
def test_time_slice_out_of_boundries(self):
targdataquant = self.data1quant
targtimequant = self.time1quant
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = 0
t_stop = 2500000
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test_time_slice_empty(self):
targdataquant = [] * pq.mV
targtimequant = [] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = 15
t_stop = 250
result = targ_signal.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(result.array_annotations, {})
self.assertIsInstance(result.array_annotations, ArrayDict)
def test_time_slice_none_stop(self):
targdataquant = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0], [9.0]] * pq.mV
targtime = np.logspace(1, 5, 10)
targtimequant = targtime[1:10] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = 15
t_stop = None
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test_time_slice_none_start(self):
targdataquant = [[0.0], [1.0], [2.0], [3.0]] * pq.mV
targtime = np.logspace(1, 5, 10)
targtimequant = targtime[0:4] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = None
t_stop = 250
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test_time_slice_none_both(self):
targdataquant = [[0.0], [1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0],
[9.0]] * pq.mV
targtime = np.logspace(1, 5, 10)
targtimequant = targtime[0:10] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = None
t_stop = None
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test_time_slice_differnt_units(self):
targdataquant = [[1.0], [2.0], [3.0]] * pq.mV
targtime = np.logspace(1, 5, 10)
targtimequant = targtime[1:4] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = 15
t_stop = 250
t_start = 0.015 * pq.s
t_stop = .250 * pq.s
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test__time_slice_should_set_parents_to_None(self):
# When timeslicing, a deep copy is made,
# thus the reference to parent objects should be destroyed
result = self.signal1.time_slice(1 * pq.ms, 3 * pq.ms)
self.assertEqual(result.segment, None)
self.assertEqual(result.channel_index, None)
def test__deepcopy_should_set_parents_objects_to_None(self):
# Deepcopy should destroy references to parents
result = deepcopy(self.signal1)
self.assertEqual(result.segment, None)
self.assertEqual(result.channel_index, None)
def test__time_shift_same_attributes(self):
result = self.signal1.time_shift(1 * pq.ms)
assert_same_attributes(result, self.signal1, exclude=['times', 't_start', 't_stop'])
def test__time_shift_same_annotations(self):
result = self.signal1.time_shift(1 * pq.ms)
assert_same_annotations(result, self.signal1)
def test__time_shift_same_array_annotations(self):
result = self.signal1.time_shift(1 * pq.ms)
assert_same_array_annotations(result, self.signal1)
def test__time_shift_should_set_parents_to_None(self):
# When time-shifting, a deep copy is made,
# thus the reference to parent objects should be destroyed
result = self.signal1.time_shift(1 * pq.ms)
self.assertEqual(result.segment, None)
self.assertEqual(result.channel_index, None)
def test__time_shift_by_zero(self):
shifted = self.signal1.time_shift(0 * pq.ms)
assert_arrays_equal(shifted.times, self.signal1.times)
def test__time_shift_same_units(self):
shifted = self.signal1.time_shift(10 * pq.ms)
assert_arrays_equal(shifted.times, self.signal1.times + 10 * pq.ms)
def test__time_shift_different_units(self):
shifted = self.signal1.time_shift(1 * pq.s)
assert_arrays_equal(shifted.times, self.signal1.times + 1000 * pq.ms)
def test_as_array(self):
sig_as_arr = self.signal1.as_array()
self.assertIsInstance(sig_as_arr, np.ndarray)
assert_array_equal(self.data1, sig_as_arr.flat)
def test_as_quantity(self):
sig_as_q = self.signal1.as_quantity()
self.assertIsInstance(sig_as_q, pq.Quantity)
assert_array_equal(self.data1, sig_as_q.magnitude.flat)
def test__copy_should_preserve_parent_objects(self):
result = self.signal1.copy()
self.assertIs(result.segment, self.signal1.segment)
self.assertIs(result.channel_index, self.signal1.channel_index)
class TestIrregularlySampledSignalCombination(unittest.TestCase):
def setUp(self):
self.data1 = np.arange(10.0)
self.data1quant = self.data1 * pq.mV
self.time1 = np.logspace(1, 5, 10)
self.time1quant = self.time1 * pq.ms
self.arr_ann = {'anno1': [23], 'anno2': ['A']}
self.signal1 = IrregularlySampledSignal(self.time1quant, signal=self.data1quant,
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test',
array_annotations=self.arr_ann)
def test__compliant(self):
assert_neo_object_is_compliant(self.signal1)
self.assertEqual(self.signal1.name, 'spam')
self.assertEqual(self.signal1.description, 'eggs')
self.assertEqual(self.signal1.file_origin, 'testfile.txt')
self.assertEqual(self.signal1.annotations, {'arg1': 'test'})
assert_arrays_equal(self.signal1.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(self.signal1.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(self.signal1.array_annotations, ArrayDict)
def test__add_const_quantity_should_preserve_data_complement(self):
result = self.signal1 + 0.065 * pq.V
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1) + 65)
assert_array_equal(result.times, self.time1quant)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], 74 * pq.mV)
def test__add_two_consistent_signals_should_preserve_data_complement(self):
data2 = np.arange(10.0, 20.0)
data2quant = data2 * pq.mV
signal2 = IrregularlySampledSignal(self.time1quant, signal=data2quant)
assert_neo_object_is_compliant(signal2)
result = self.signal1 + signal2
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
targ = IrregularlySampledSignal(self.time1quant, signal=np.arange(10.0, 30.0, 2.0),
units="mV", name='spam', description='eggs',
file_origin='testfile.txt', arg1='test')
assert_neo_object_is_compliant(targ)
assert_array_equal(result, targ)
assert_array_equal(self.time1quant, targ.times)
assert_array_equal(result.times, targ.times)
assert_same_sub_schema(result, targ)
def test__add_signals_with_inconsistent_times_AssertionError(self):
signal2 = IrregularlySampledSignal(self.time1quant * 2., signal=np.arange(10.0),
units="mV")
assert_neo_object_is_compliant(signal2)
self.assertRaises(ValueError, self.signal1.__add__, signal2)
def test__add_signals_with_inconsistent_dimension_ValueError(self):
signal2 = np.arange(20).reshape(2, 10)
self.assertRaises(ValueError, self.signal1.__add__, signal2)
def test__subtract_const_should_preserve_data_complement(self):
result = self.signal1 - 65 * pq.mV
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], -56 * pq.mV)
assert_array_equal(result.magnitude, (self.data1 - 65).reshape(-1, 1))
assert_array_equal(result.times, self.time1quant)
def test__subtract_from_const_should_return_signal(self):
result = 10 * pq.mV - self.signal1
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], 1 * pq.mV)
assert_array_equal(result.magnitude, (10 - self.data1).reshape(-1, 1))
assert_array_equal(result.times, self.time1quant)
def test__mult_signal_by_const_float_should_preserve_data_complement(self):
result = self.signal1 * 2.
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], 18 * pq.mV)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1) * 2)
assert_array_equal(result.times, self.time1quant)
def test__mult_signal_by_const_array_should_preserve_data_complement(self):
result = self.signal1 * np.array(2.)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], 18 * pq.mV)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1) * 2)
assert_array_equal(result.times, self.time1quant)
def test__divide_signal_by_const_should_preserve_data_complement(self):
result = self.signal1 / 0.5
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], 18 * pq.mV)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1) / 0.5)
assert_array_equal(result.times, self.time1quant)
@unittest.skipUnless(HAVE_IPYTHON, "requires IPython")
def test__pretty(self):
res = pretty(self.signal1)
signal = self.signal1
targ = (("IrregularlySampledSignal with %d channels of length %d; units %s; datatype %s \n"
"" % (signal.shape[1], signal.shape[0], signal.units.dimensionality.unicode,
signal.dtype))
+ ("name: '%s'\ndescription: '%s'\n" % (signal.name, signal.description))
+ ("annotations: %s\n" % str(signal.annotations))
+ ("sample times: %s" % (signal.times[:10],)))
self.assertEqual(res, targ)
def test__merge(self):
data1 = np.arange(1000.0, 1066.0).reshape((11, 6)) * pq.uV
data2 = np.arange(2.0, 2.033, 0.001).reshape((11, 3)) * pq.mV
times1 = np.arange(11.0) * pq.ms
times2 = np.arange(1.0, 12.0) * pq.ms
arr_ann1 = {'anno1': np.arange(6), 'anno2': ['a', 'b', 'c', 'd', 'e', 'f']}
arr_ann2 = {'anno1': np.arange(100, 103), 'anno3': []}
signal1 = IrregularlySampledSignal(times1, data1, name='signal1',
description='test signal', file_origin='testfile.txt',
array_annotations=arr_ann1)
signal2 = IrregularlySampledSignal(times1, data2, name='signal2',
description='test signal', file_origin='testfile.txt',
array_annotations=arr_ann2)
signal3 = IrregularlySampledSignal(times2, data2, name='signal3',
description='test signal', file_origin='testfile.txt')
with warnings.catch_warnings(record=True) as w:
merged12 = signal1.merge(signal2)
self.assertTrue(len(w) == 1)
self.assertEqual(w[0].category, UserWarning)
self.assertSequenceEqual(str(w[0].message), "The following array annotations were "
"omitted, because they were only present"
" in one of the merged objects: "
"['anno2'] from the one that was merged "
"into and ['anno3'] from the one that "
"was merged into the other")
target_data12 = np.hstack([data1, data2.rescale(pq.uV)])
assert_neo_object_is_compliant(signal1)
assert_neo_object_is_compliant(signal2)
assert_neo_object_is_compliant(merged12)
self.assertAlmostEqual(merged12[5, 0], 1030.0 * pq.uV, 9)
self.assertAlmostEqual(merged12[5, 6], 2015.0 * pq.uV, 9)
self.assertEqual(merged12.name, 'merge(signal1, signal2)')
self.assertEqual(merged12.file_origin, 'testfile.txt')
assert_arrays_equal(merged12.array_annotations['anno1'],
np.array([0, 1, 2, 3, 4, 5, 100, 101, 102]))
self.assertIsInstance(merged12.array_annotations, ArrayDict)
assert_arrays_equal(merged12.magnitude, target_data12)
self.assertRaises(MergeError, signal1.merge, signal3)
class TestAnalogSignalFunctions(unittest.TestCase):
def test__pickle(self):
signal1 = IrregularlySampledSignal(np.arange(10.0) / 100 * pq.s, np.arange(10.0),
units="mV")
fobj = open('./pickle', 'wb')
pickle.dump(signal1, fobj)
fobj.close()
fobj = open('./pickle', 'rb')
try:
signal2 = pickle.load(fobj)
except ValueError:
signal2 = None
assert_array_equal(signal1, signal2)
fobj.close()
os.remove('./pickle')
class TestIrregularlySampledSignalEquality(unittest.TestCase):
def test__signals_with_different_times_should_be_not_equal(self):
signal1 = IrregularlySampledSignal(np.arange(10.0) / 100 * pq.s, np.arange(10.0),
units="mV")
signal2 = IrregularlySampledSignal(np.arange(10.0) / 100 * pq.ms, np.arange(10.0),
units="mV")
self.assertNotEqual(signal1, signal2)
if __name__ == "__main__":
unittest.main()
| [
"neo.test.tools.assert_same_array_annotations",
"neo.test.tools.assert_neo_object_is_compliant",
"neo.core.irregularlysampledsignal.IrregularlySampledSignal",
"neo.test.tools.assert_same_sub_schema",
"numpy.array",
"copy.deepcopy",
"unittest.main",
"numpy.arange",
"os.remove",
"neo.test.generate_d... | [((43190, 43243), 'unittest.skipUnless', 'unittest.skipUnless', (['HAVE_IPYTHON', '"""requires IPython"""'], {}), "(HAVE_IPYTHON, 'requires IPython')\n", (43209, 43243), False, 'import unittest\n'), ((47527, 47542), 'unittest.main', 'unittest.main', ([], {}), '()\n', (47540, 47542), False, 'import unittest\n'), ((1129, 1146), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1143, 1146), True, 'import numpy as np\n'), ((1354, 1405), 'neo.test.generate_datasets.get_fake_value', 'get_fake_value', (['"""times"""', 'pq.Quantity'], {'seed': '(0)', 'dim': '(1)'}), "('times', pq.Quantity, seed=0, dim=1)\n", (1368, 1405), False, 'from neo.test.generate_datasets import get_fake_value, get_fake_values, fake_neo, TEST_ANNOTATIONS\n'), ((1423, 1475), 'neo.test.generate_datasets.get_fake_value', 'get_fake_value', (['"""signal"""', 'pq.Quantity'], {'seed': '(1)', 'dim': '(2)'}), "('signal', pq.Quantity, seed=1, dim=2)\n", (1437, 1475), False, 'from neo.test.generate_datasets import get_fake_value, get_fake_values, fake_neo, TEST_ANNOTATIONS\n'), ((1491, 1556), 'neo.test.generate_datasets.get_fake_value', 'get_fake_value', (['"""name"""', 'str'], {'seed': '(2)', 'obj': 'IrregularlySampledSignal'}), "('name', str, seed=2, obj=IrregularlySampledSignal)\n", (1505, 1556), False, 'from neo.test.generate_datasets import get_fake_value, get_fake_values, fake_neo, TEST_ANNOTATIONS\n'), ((1579, 1653), 'neo.test.generate_datasets.get_fake_value', 'get_fake_value', (['"""description"""', 'str'], {'seed': '(3)', 'obj': '"""IrregularlySampledSignal"""'}), "('description', str, seed=3, obj='IrregularlySampledSignal')\n", (1593, 1653), False, 'from neo.test.generate_datasets import get_fake_value, get_fake_values, fake_neo, TEST_ANNOTATIONS\n'), ((1676, 1710), 'neo.test.generate_datasets.get_fake_value', 'get_fake_value', (['"""file_origin"""', 'str'], {}), "('file_origin', str)\n", (1690, 1710), False, 'from neo.test.generate_datasets import get_fake_value, get_fake_values, fake_neo, TEST_ANNOTATIONS\n'), ((1729, 1818), 'neo.test.generate_datasets.get_fake_value', 'get_fake_value', (['"""array_annotations"""', 'dict'], {'seed': '(5)', 'obj': 'IrregularlySampledSignal', 'n': '(1)'}), "('array_annotations', dict, seed=5, obj=\n IrregularlySampledSignal, n=1)\n", (1743, 1818), False, 'from neo.test.generate_datasets import get_fake_value, get_fake_values, fake_neo, TEST_ANNOTATIONS\n'), ((2069, 2134), 'neo.test.generate_datasets.get_fake_values', 'get_fake_values', (['IrregularlySampledSignal'], {'annotate': '(False)', 'seed': '(0)'}), '(IrregularlySampledSignal, annotate=False, seed=0)\n', (2084, 2134), False, 'from neo.test.generate_datasets import get_fake_value, get_fake_values, fake_neo, TEST_ANNOTATIONS\n'), ((2151, 2218), 'neo.test.generate_datasets.get_fake_values', 'get_fake_values', (['"""IrregularlySampledSignal"""'], {'annotate': '(False)', 'seed': '(0)'}), "('IrregularlySampledSignal', annotate=False, seed=0)\n", (2166, 2218), False, 'from neo.test.generate_datasets import get_fake_value, get_fake_values, fake_neo, TEST_ANNOTATIONS\n'), ((2235, 2299), 'neo.test.generate_datasets.get_fake_values', 'get_fake_values', (['IrregularlySampledSignal'], {'annotate': '(True)', 'seed': '(0)'}), '(IrregularlySampledSignal, annotate=True, seed=0)\n', (2250, 2299), False, 'from neo.test.generate_datasets import get_fake_value, get_fake_values, fake_neo, TEST_ANNOTATIONS\n'), ((2316, 2382), 'neo.test.generate_datasets.get_fake_values', 'get_fake_values', (['"""IrregularlySampledSignal"""'], {'annotate': '(True)', 'seed': '(0)'}), "('IrregularlySampledSignal', annotate=True, seed=0)\n", (2331, 2382), False, 'from neo.test.generate_datasets import get_fake_value, get_fake_values, fake_neo, TEST_ANNOTATIONS\n'), ((3184, 3252), 'neo.test.tools.assert_arrays_equal', 'assert_arrays_equal', (["arr_ann_res21['valid']", "arr_ann_attrs2['valid']"], {}), "(arr_ann_res21['valid'], arr_ann_attrs2['valid'])\n", (3203, 3252), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((3261, 3331), 'neo.test.tools.assert_arrays_equal', 'assert_arrays_equal', (["arr_ann_res21['number']", "arr_ann_attrs2['number']"], {}), "(arr_ann_res21['number'], arr_ann_attrs2['number'])\n", (3280, 3331), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((3435, 3503), 'neo.test.tools.assert_arrays_equal', 'assert_arrays_equal', (["arr_ann_res22['valid']", "arr_ann_attrs2['valid']"], {}), "(arr_ann_res22['valid'], arr_ann_attrs2['valid'])\n", (3454, 3503), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((3512, 3582), 'neo.test.tools.assert_arrays_equal', 'assert_arrays_equal', (["arr_ann_res22['number']", "arr_ann_attrs2['number']"], {}), "(arr_ann_res22['number'], arr_ann_attrs2['number'])\n", (3531, 3582), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((3744, 3788), 'neo.test.generate_datasets.fake_neo', 'fake_neo', ([], {'obj_type': 'obj_type', 'cascade': 'cascade'}), '(obj_type=obj_type, cascade=cascade)\n', (3752, 3788), False, 'from neo.test.generate_datasets import get_fake_value, get_fake_values, fake_neo, TEST_ANNOTATIONS\n'), ((3865, 3900), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['res'], {}), '(res)\n', (3895, 3900), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((4127, 4171), 'neo.test.generate_datasets.fake_neo', 'fake_neo', ([], {'obj_type': 'obj_type', 'cascade': 'cascade'}), '(obj_type=obj_type, cascade=cascade)\n', (4135, 4171), False, 'from neo.test.generate_datasets import get_fake_value, get_fake_values, fake_neo, TEST_ANNOTATIONS\n'), ((4248, 4283), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['res'], {}), '(res)\n', (4278, 4283), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((4604, 4802), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['([1.1, 1.5, 1.7] * pq.ms)'], {'signal': '([20.0, 40.0, 60.0] * pq.mV)', 'name': '"""test"""', 'description': '"""tester"""', 'file_origin': '"""test.file"""', 'test1': '(1)', 'array_annotations': 'arr_ann'}), "([1.1, 1.5, 1.7] * pq.ms, signal=[20.0, 40.0, 60.0] *\n pq.mV, name='test', description='tester', file_origin='test.file',\n test1=1, array_annotations=arr_ann, **params)\n", (4628, 4802), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((4924, 4959), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['sig'], {}), '(sig)\n', (4954, 4959), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((4969, 5023), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['sig.times', '([1.1, 1.5, 1.7] * pq.ms)'], {}), '(sig.times, [1.1, 1.5, 1.7] * pq.ms)\n', (4987, 5023), False, 'from numpy.testing import assert_array_equal\n'), ((5915, 6128), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['[1.1, 1.5, 1.7]'], {'signal': '[20.0, 40.0, 60.0]', 'units': 'pq.V', 'time_units': 'pq.s', 'name': '"""test"""', 'description': '"""tester"""', 'file_origin': '"""test.file"""', 'test1': '(1)', 'array_annotations': 'arr_ann'}), "([1.1, 1.5, 1.7], signal=[20.0, 40.0, 60.0], units=\n pq.V, time_units=pq.s, name='test', description='tester', file_origin=\n 'test.file', test1=1, array_annotations=arr_ann, **params)\n", (5939, 6128), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((6287, 6322), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['sig'], {}), '(sig)\n', (6317, 6322), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((6332, 6385), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['sig.times', '([1.1, 1.5, 1.7] * pq.s)'], {}), '(sig.times, [1.1, 1.5, 1.7] * pq.s)\n', (6350, 6385), False, 'from numpy.testing import assert_array_equal\n'), ((7280, 7504), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['([1.1, 1.5, 1.7] * pq.s)'], {'signal': '([2.0, 4.0, 6.0] * pq.V)', 'units': 'pq.mV', 'time_units': 'pq.ms', 'name': '"""test"""', 'description': '"""tester"""', 'file_origin': '"""test.file"""', 'test1': '(1)', 'array_annotations': 'arr_ann'}), "([1.1, 1.5, 1.7] * pq.s, signal=[2.0, 4.0, 6.0] *\n pq.V, units=pq.mV, time_units=pq.ms, name='test', description='tester',\n file_origin='test.file', test1=1, array_annotations=arr_ann, **params)\n", (7304, 7504), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((7665, 7700), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['sig'], {}), '(sig)\n', (7695, 7700), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((7710, 7767), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['sig.times', '([1100, 1500, 1700] * pq.ms)'], {}), '(sig.times, [1100, 1500, 1700] * pq.ms)\n', (7728, 7767), False, 'from numpy.testing import assert_array_equal\n'), ((10597, 10749), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['([1.1, 1.5, 1.7] * pq.s)'], {'signal': '([2.0, 4.0, 6.0] * pq.V)', 'name': '"""test"""', 'description': '"""tester"""', 'file_origin': '"""test.file"""', 'test1': '(1)'}), "([1.1, 1.5, 1.7] * pq.s, signal=[2.0, 4.0, 6.0] *\n pq.V, name='test', description='tester', file_origin='test.file', test1=1)\n", (10621, 10749), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((10829, 10864), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['sig'], {}), '(sig)\n', (10859, 10864), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((11621, 11636), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (11630, 11636), True, 'import numpy as np\n'), ((11703, 11724), 'numpy.logspace', 'np.logspace', (['(1)', '(5)', '(10)'], {}), '(1, 5, 10)\n', (11714, 11724), True, 'import numpy as np\n'), ((11848, 12028), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['self.time1quant'], {'signal': 'self.data1quant', 'name': '"""spam"""', 'description': '"""eggs"""', 'file_origin': '"""testfile.txt"""', 'arg1': '"""test"""', 'array_annotations': 'self.arr_ann'}), "(self.time1quant, signal=self.data1quant, name=\n 'spam', description='eggs', file_origin='testfile.txt', arg1='test',\n array_annotations=self.arr_ann)\n", (11872, 12028), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((12195, 12204), 'neo.core.Segment', 'Segment', ([], {}), '()\n', (12202, 12204), False, 'from neo.core import Segment, ChannelIndex\n'), ((12242, 12259), 'neo.core.ChannelIndex', 'ChannelIndex', (['[0]'], {}), '([0])\n', (12254, 12259), False, 'from neo.core import Segment, ChannelIndex\n'), ((12300, 12344), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['self.signal1'], {}), '(self.signal1)\n', (12330, 12344), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((13010, 13048), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (13040, 13048), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((13443, 13497), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['self.time1quant[3:8]', 'result.times'], {}), '(self.time1quant[3:8], result.times)\n', (13461, 13497), False, 'from numpy.testing import assert_array_equal\n'), ((15669, 15715), 'numpy.full', 'np.full', (['(5, 10)'], {'fill_value': '(False)', 'dtype': 'bool'}), '((5, 10), fill_value=False, dtype=bool)\n', (15676, 15715), True, 'import numpy as np\n'), ((16265, 16311), 'numpy.full', 'np.full', (['(5, 10)'], {'fill_value': '(False)', 'dtype': 'bool'}), '((5, 10), fill_value=False, dtype=bool)\n', (16272, 16311), True, 'import numpy as np\n'), ((17798, 17836), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (17828, 17836), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((18417, 18466), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result.times', 'self.time1quant'], {}), '(result.times, self.time1quant)\n', (18435, 18466), False, 'from numpy.testing import assert_array_equal\n'), ((18475, 18519), 'neo.test.tools.assert_same_sub_schema', 'assert_same_sub_schema', (['result', 'self.signal1'], {}), '(result, self.signal1)\n', (18497, 18519), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((18957, 18995), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (18987, 18995), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((19599, 19648), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result.times', 'self.time1quant'], {}), '(result.times, self.time1quant)\n', (19617, 19648), False, 'from numpy.testing import assert_array_equal\n'), ((20133, 20154), 'numpy.logspace', 'np.logspace', (['(1)', '(5)', '(10)'], {}), '(1, 5, 10)\n', (20144, 20154), True, 'import numpy as np\n'), ((20223, 20362), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['targtimequant'], {'signal': 'targdataquant', 'name': '"""spam"""', 'description': '"""eggs"""', 'file_origin': '"""testfile.txt"""', 'arg1': '"""test"""'}), "(targtimequant, signal=targdataquant, name='spam',\n description='eggs', file_origin='testfile.txt', arg1='test')\n", (20247, 20362), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((20563, 20602), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result', 'targ_signal'], {}), '(result, targ_signal)\n', (20581, 20602), False, 'from numpy.testing import assert_array_equal\n'), ((20611, 20658), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result.times', 'targtimequant'], {}), '(result.times, targtimequant)\n', (20629, 20658), False, 'from numpy.testing import assert_array_equal\n'), ((20781, 20819), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (20811, 20819), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((24682, 24821), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['targtimequant'], {'signal': 'targdataquant', 'name': '"""spam"""', 'description': '"""eggs"""', 'file_origin': '"""testfile.txt"""', 'arg1': '"""test"""'}), "(targtimequant, signal=targdataquant, name='spam',\n description='eggs', file_origin='testfile.txt', arg1='test')\n", (24706, 24821), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((25025, 25064), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result', 'targ_signal'], {}), '(result, targ_signal)\n', (25043, 25064), False, 'from numpy.testing import assert_array_equal\n'), ((25073, 25120), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result.times', 'targtimequant'], {}), '(result.times, targtimequant)\n', (25091, 25120), False, 'from numpy.testing import assert_array_equal\n'), ((25243, 25281), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (25273, 25281), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((25861, 26000), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['targtimequant'], {'signal': 'targdataquant', 'name': '"""spam"""', 'description': '"""eggs"""', 'file_origin': '"""testfile.txt"""', 'arg1': '"""test"""'}), "(targtimequant, signal=targdataquant, name='spam',\n description='eggs', file_origin='testfile.txt', arg1='test')\n", (25885, 26000), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((26200, 26239), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result', 'targ_signal'], {}), '(result, targ_signal)\n', (26218, 26239), False, 'from numpy.testing import assert_array_equal\n'), ((26248, 26295), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result.times', 'targtimequant'], {}), '(result.times, targtimequant)\n', (26266, 26295), False, 'from numpy.testing import assert_array_equal\n'), ((26418, 26456), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (26448, 26456), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((26959, 26980), 'numpy.logspace', 'np.logspace', (['(1)', '(5)', '(10)'], {}), '(1, 5, 10)\n', (26970, 26980), True, 'import numpy as np\n'), ((27050, 27189), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['targtimequant'], {'signal': 'targdataquant', 'name': '"""spam"""', 'description': '"""eggs"""', 'file_origin': '"""testfile.txt"""', 'arg1': '"""test"""'}), "(targtimequant, signal=targdataquant, name='spam',\n description='eggs', file_origin='testfile.txt', arg1='test')\n", (27074, 27189), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((27391, 27430), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result', 'targ_signal'], {}), '(result, targ_signal)\n', (27409, 27430), False, 'from numpy.testing import assert_array_equal\n'), ((27439, 27486), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result.times', 'targtimequant'], {}), '(result.times, targtimequant)\n', (27457, 27486), False, 'from numpy.testing import assert_array_equal\n'), ((27609, 27647), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (27639, 27647), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((28220, 28241), 'numpy.logspace', 'np.logspace', (['(1)', '(5)', '(10)'], {}), '(1, 5, 10)\n', (28231, 28241), True, 'import numpy as np\n'), ((28310, 28449), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['targtimequant'], {'signal': 'targdataquant', 'name': '"""spam"""', 'description': '"""eggs"""', 'file_origin': '"""testfile.txt"""', 'arg1': '"""test"""'}), "(targtimequant, signal=targdataquant, name='spam',\n description='eggs', file_origin='testfile.txt', arg1='test')\n", (28334, 28449), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((28652, 28691), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result', 'targ_signal'], {}), '(result, targ_signal)\n', (28670, 28691), False, 'from numpy.testing import assert_array_equal\n'), ((28700, 28747), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result.times', 'targtimequant'], {}), '(result.times, targtimequant)\n', (28718, 28747), False, 'from numpy.testing import assert_array_equal\n'), ((28870, 28908), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (28900, 28908), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((29547, 29568), 'numpy.logspace', 'np.logspace', (['(1)', '(5)', '(10)'], {}), '(1, 5, 10)\n', (29558, 29568), True, 'import numpy as np\n'), ((29638, 29777), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['targtimequant'], {'signal': 'targdataquant', 'name': '"""spam"""', 'description': '"""eggs"""', 'file_origin': '"""testfile.txt"""', 'arg1': '"""test"""'}), "(targtimequant, signal=targdataquant, name='spam',\n description='eggs', file_origin='testfile.txt', arg1='test')\n", (29662, 29777), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((29981, 30020), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result', 'targ_signal'], {}), '(result, targ_signal)\n', (29999, 30020), False, 'from numpy.testing import assert_array_equal\n'), ((30029, 30076), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result.times', 'targtimequant'], {}), '(result.times, targtimequant)\n', (30047, 30076), False, 'from numpy.testing import assert_array_equal\n'), ((30199, 30237), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (30229, 30237), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((30807, 30828), 'numpy.logspace', 'np.logspace', (['(1)', '(5)', '(10)'], {}), '(1, 5, 10)\n', (30818, 30828), True, 'import numpy as np\n'), ((30897, 31036), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['targtimequant'], {'signal': 'targdataquant', 'name': '"""spam"""', 'description': '"""eggs"""', 'file_origin': '"""testfile.txt"""', 'arg1': '"""test"""'}), "(targtimequant, signal=targdataquant, name='spam',\n description='eggs', file_origin='testfile.txt', arg1='test')\n", (30921, 31036), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((31299, 31338), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result', 'targ_signal'], {}), '(result, targ_signal)\n', (31317, 31338), False, 'from numpy.testing import assert_array_equal\n'), ((31347, 31394), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result.times', 'targtimequant'], {}), '(result.times, targtimequant)\n', (31365, 31394), False, 'from numpy.testing import assert_array_equal\n'), ((31517, 31555), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (31547, 31555), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((32483, 32505), 'copy.deepcopy', 'deepcopy', (['self.signal1'], {}), '(self.signal1)\n', (32491, 32505), False, 'from copy import deepcopy\n'), ((32715, 32803), 'neo.test.tools.assert_same_attributes', 'assert_same_attributes', (['result', 'self.signal1'], {'exclude': "['times', 't_start', 't_stop']"}), "(result, self.signal1, exclude=['times', 't_start',\n 't_stop'])\n", (32737, 32803), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((32910, 32955), 'neo.test.tools.assert_same_annotations', 'assert_same_annotations', (['result', 'self.signal1'], {}), '(result, self.signal1)\n', (32933, 32955), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((33072, 33123), 'neo.test.tools.assert_same_array_annotations', 'assert_same_array_annotations', (['result', 'self.signal1'], {}), '(result, self.signal1)\n', (33101, 33123), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((33556, 33610), 'neo.test.tools.assert_arrays_equal', 'assert_arrays_equal', (['shifted.times', 'self.signal1.times'], {}), '(shifted.times, self.signal1.times)\n', (33575, 33610), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((33717, 33784), 'neo.test.tools.assert_arrays_equal', 'assert_arrays_equal', (['shifted.times', '(self.signal1.times + 10 * pq.ms)'], {}), '(shifted.times, self.signal1.times + 10 * pq.ms)\n', (33736, 33784), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((33894, 33963), 'neo.test.tools.assert_arrays_equal', 'assert_arrays_equal', (['shifted.times', '(self.signal1.times + 1000 * pq.ms)'], {}), '(shifted.times, self.signal1.times + 1000 * pq.ms)\n', (33913, 33963), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((34101, 34148), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['self.data1', 'sig_as_arr.flat'], {}), '(self.data1, sig_as_arr.flat)\n', (34119, 34148), False, 'from numpy.testing import assert_array_equal\n'), ((34289, 34344), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['self.data1', 'sig_as_q.magnitude.flat'], {}), '(self.data1, sig_as_q.magnitude.flat)\n', (34307, 34344), False, 'from numpy.testing import assert_array_equal\n'), ((34682, 34697), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (34691, 34697), True, 'import numpy as np\n'), ((34764, 34785), 'numpy.logspace', 'np.logspace', (['(1)', '(5)', '(10)'], {}), '(1, 5, 10)\n', (34775, 34785), True, 'import numpy as np\n'), ((34909, 35089), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['self.time1quant'], {'signal': 'self.data1quant', 'name': '"""spam"""', 'description': '"""eggs"""', 'file_origin': '"""testfile.txt"""', 'arg1': '"""test"""', 'array_annotations': 'self.arr_ann'}), "(self.time1quant, signal=self.data1quant, name=\n 'spam', description='eggs', file_origin='testfile.txt', arg1='test',\n array_annotations=self.arr_ann)\n", (34933, 35089), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((35265, 35309), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['self.signal1'], {}), '(self.signal1)\n', (35295, 35309), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((35991, 36029), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (36021, 36029), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((36565, 36614), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result.times', 'self.time1quant'], {}), '(result.times, self.time1quant)\n', (36583, 36614), False, 'from numpy.testing import assert_array_equal\n'), ((36813, 36834), 'numpy.arange', 'np.arange', (['(10.0)', '(20.0)'], {}), '(10.0, 20.0)\n', (36822, 36834), True, 'import numpy as np\n'), ((36888, 36948), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['self.time1quant'], {'signal': 'data2quant'}), '(self.time1quant, signal=data2quant)\n', (36912, 36948), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((36957, 36996), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['signal2'], {}), '(signal2)\n', (36987, 36996), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((37110, 37148), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (37140, 37148), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((37865, 37901), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['targ'], {}), '(targ)\n', (37895, 37901), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((37911, 37943), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result', 'targ'], {}), '(result, targ)\n', (37929, 37943), False, 'from numpy.testing import assert_array_equal\n'), ((37952, 37999), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['self.time1quant', 'targ.times'], {}), '(self.time1quant, targ.times)\n', (37970, 37999), False, 'from numpy.testing import assert_array_equal\n'), ((38008, 38052), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result.times', 'targ.times'], {}), '(result.times, targ.times)\n', (38026, 38052), False, 'from numpy.testing import assert_array_equal\n'), ((38061, 38097), 'neo.test.tools.assert_same_sub_schema', 'assert_same_sub_schema', (['result', 'targ'], {}), '(result, targ)\n', (38083, 38097), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((38323, 38362), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['signal2'], {}), '(signal2)\n', (38353, 38362), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((38807, 38845), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (38837, 38845), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((39485, 39534), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result.times', 'self.time1quant'], {}), '(result.times, self.time1quant)\n', (39503, 39534), False, 'from numpy.testing import assert_array_equal\n'), ((39713, 39751), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (39743, 39751), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((40389, 40438), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result.times', 'self.time1quant'], {}), '(result.times, self.time1quant)\n', (40407, 40438), False, 'from numpy.testing import assert_array_equal\n'), ((40627, 40665), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (40657, 40665), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((41301, 41350), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result.times', 'self.time1quant'], {}), '(result.times, self.time1quant)\n', (41319, 41350), False, 'from numpy.testing import assert_array_equal\n'), ((41549, 41587), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (41579, 41587), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((42223, 42272), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result.times', 'self.time1quant'], {}), '(result.times, self.time1quant)\n', (42241, 42272), False, 'from numpy.testing import assert_array_equal\n'), ((42458, 42496), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['result'], {}), '(result)\n', (42488, 42496), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((43134, 43183), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['result.times', 'self.time1quant'], {}), '(result.times, self.time1quant)\n', (43152, 43183), False, 'from numpy.testing import assert_array_equal\n'), ((43286, 43306), 'IPython.lib.pretty.pretty', 'pretty', (['self.signal1'], {}), '(self.signal1)\n', (43292, 43306), False, 'from IPython.lib.pretty import pretty\n'), ((44242, 44385), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['times1', 'data1'], {'name': '"""signal1"""', 'description': '"""test signal"""', 'file_origin': '"""testfile.txt"""', 'array_annotations': 'arr_ann1'}), "(times1, data1, name='signal1', description=\n 'test signal', file_origin='testfile.txt', array_annotations=arr_ann1)\n", (44266, 44385), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((44485, 44628), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['times1', 'data2'], {'name': '"""signal2"""', 'description': '"""test signal"""', 'file_origin': '"""testfile.txt"""', 'array_annotations': 'arr_ann2'}), "(times1, data2, name='signal2', description=\n 'test signal', file_origin='testfile.txt', array_annotations=arr_ann2)\n", (44509, 44628), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((44728, 44843), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['times2', 'data2'], {'name': '"""signal3"""', 'description': '"""test signal"""', 'file_origin': '"""testfile.txt"""'}), "(times2, data2, name='signal3', description=\n 'test signal', file_origin='testfile.txt')\n", (44752, 44843), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((45722, 45761), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['signal1'], {}), '(signal1)\n', (45752, 45761), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((45770, 45809), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['signal2'], {}), '(signal2)\n', (45800, 45809), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((45818, 45858), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['merged12'], {}), '(merged12)\n', (45848, 45858), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((46339, 46393), 'neo.test.tools.assert_arrays_equal', 'assert_arrays_equal', (['merged12.magnitude', 'target_data12'], {}), '(merged12.magnitude, target_data12)\n', (46358, 46393), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((46731, 46757), 'pickle.dump', 'pickle.dump', (['signal1', 'fobj'], {}), '(signal1, fobj)\n', (46742, 46757), False, 'import pickle\n'), ((46934, 46970), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['signal1', 'signal2'], {}), '(signal1, signal2)\n', (46952, 46970), False, 'from numpy.testing import assert_array_equal\n'), ((47000, 47021), 'os.remove', 'os.remove', (['"""./pickle"""'], {}), "('./pickle')\n", (47009, 47021), False, 'import os\n'), ((5078, 5106), 'numpy.array', 'np.array', (['[20.0, 40.0, 60.0]'], {}), '([20.0, 40.0, 60.0])\n', (5086, 5106), True, 'import numpy as np\n'), ((5581, 5595), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (5589, 5595), True, 'import numpy as np\n'), ((5657, 5672), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (5665, 5672), True, 'import numpy as np\n'), ((6440, 6468), 'numpy.array', 'np.array', (['[20.0, 40.0, 60.0]'], {}), '([20.0, 40.0, 60.0])\n', (6448, 6468), True, 'import numpy as np\n'), ((6942, 6956), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (6950, 6956), True, 'import numpy as np\n'), ((7018, 7033), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (7026, 7033), True, 'import numpy as np\n'), ((7822, 7856), 'numpy.array', 'np.array', (['[2000.0, 4000.0, 6000.0]'], {}), '([2000.0, 4000.0, 6000.0])\n', (7830, 7856), True, 'import numpy as np\n'), ((8331, 8345), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (8339, 8345), True, 'import numpy as np\n'), ((8407, 8422), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (8415, 8422), True, 'import numpy as np\n'), ((9561, 9615), 'neo.core.irregularlysampledsignal.IrregularlySampledSignal', 'IrregularlySampledSignal', (['t'], {'signal': 'D', 'testattr': '"""test"""'}), "(t, signal=D, testattr='test')\n", (9585, 9615), False, 'from neo.core.irregularlysampledsignal import IrregularlySampledSignal\n'), ((9760, 9798), 'neo.test.tools.assert_neo_object_is_compliant', 'assert_neo_object_is_compliant', (['signal'], {}), '(signal)\n', (9790, 9798), False, 'from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal, assert_neo_object_is_compliant, assert_same_sub_schema, assert_same_attributes, assert_same_annotations, assert_same_array_annotations\n'), ((12661, 12675), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (12669, 12675), True, 'import numpy as np\n'), ((12746, 12761), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (12754, 12761), True, 'import numpy as np\n'), ((13983, 13997), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (13991, 13997), True, 'import numpy as np\n'), ((14062, 14077), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (14070, 14077), True, 'import numpy as np\n'), ((18123, 18137), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (18131, 18137), True, 'import numpy as np\n'), ((18202, 18217), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (18210, 18217), True, 'import numpy as np\n'), ((19282, 19296), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (19290, 19296), True, 'import numpy as np\n'), ((19361, 19376), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (19369, 19376), True, 'import numpy as np\n'), ((19531, 19547), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (19539, 19547), True, 'import numpy as np\n'), ((21106, 21120), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (21114, 21120), True, 'import numpy as np\n'), ((21185, 21200), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (21193, 21200), True, 'import numpy as np\n'), ((25568, 25582), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (25576, 25582), True, 'import numpy as np\n'), ((25647, 25662), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (25655, 25662), True, 'import numpy as np\n'), ((27934, 27948), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (27942, 27948), True, 'import numpy as np\n'), ((28013, 28028), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (28021, 28028), True, 'import numpy as np\n'), ((29195, 29209), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (29203, 29209), True, 'import numpy as np\n'), ((29274, 29289), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (29282, 29289), True, 'import numpy as np\n'), ((30524, 30538), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (30532, 30538), True, 'import numpy as np\n'), ((30603, 30618), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (30611, 30618), True, 'import numpy as np\n'), ((31842, 31856), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (31850, 31856), True, 'import numpy as np\n'), ((31921, 31936), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (31929, 31936), True, 'import numpy as np\n'), ((35626, 35640), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (35634, 35640), True, 'import numpy as np\n'), ((35711, 35726), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (35719, 35726), True, 'import numpy as np\n'), ((36316, 36330), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (36324, 36330), True, 'import numpy as np\n'), ((36395, 36410), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (36403, 36410), True, 'import numpy as np\n'), ((37435, 37449), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (37443, 37449), True, 'import numpy as np\n'), ((37514, 37529), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (37522, 37529), True, 'import numpy as np\n'), ((39132, 39146), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (39140, 39146), True, 'import numpy as np\n'), ((39211, 39226), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (39219, 39226), True, 'import numpy as np\n'), ((40038, 40052), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (40046, 40052), True, 'import numpy as np\n'), ((40117, 40132), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (40125, 40132), True, 'import numpy as np\n'), ((40952, 40966), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (40960, 40966), True, 'import numpy as np\n'), ((41031, 41046), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (41039, 41046), True, 'import numpy as np\n'), ((41464, 41477), 'numpy.array', 'np.array', (['(2.0)'], {}), '(2.0)\n', (41472, 41477), True, 'import numpy as np\n'), ((41874, 41888), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (41882, 41888), True, 'import numpy as np\n'), ((41953, 41968), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (41961, 41968), True, 'import numpy as np\n'), ((42783, 42797), 'numpy.array', 'np.array', (['[23]'], {}), '([23])\n', (42791, 42797), True, 'import numpy as np\n'), ((42862, 42877), 'numpy.array', 'np.array', (["['A']"], {}), "(['A'])\n", (42870, 42877), True, 'import numpy as np\n'), ((44006, 44021), 'numpy.arange', 'np.arange', (['(11.0)'], {}), '(11.0)\n', (44015, 44021), True, 'import numpy as np\n'), ((44047, 44067), 'numpy.arange', 'np.arange', (['(1.0)', '(12.0)'], {}), '(1.0, 12.0)\n', (44056, 44067), True, 'import numpy as np\n'), ((44105, 44117), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (44114, 44117), True, 'import numpy as np\n'), ((44189, 44208), 'numpy.arange', 'np.arange', (['(100)', '(103)'], {}), '(100, 103)\n', (44198, 44208), True, 'import numpy as np\n'), ((44896, 44932), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (44919, 44932), False, 'import warnings\n'), ((46216, 46259), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 100, 101, 102]'], {}), '([0, 1, 2, 3, 4, 5, 100, 101, 102])\n', (46224, 46259), True, 'import numpy as np\n'), ((46612, 46627), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (46621, 46627), True, 'import numpy as np\n'), ((46853, 46870), 'pickle.load', 'pickle.load', (['fobj'], {}), '(fobj)\n', (46864, 46870), False, 'import pickle\n'), ((47230, 47245), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (47239, 47245), True, 'import numpy as np\n'), ((47376, 47391), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (47385, 47391), True, 'import numpy as np\n'), ((9283, 9298), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (9292, 9298), True, 'import numpy as np\n'), ((9307, 9337), 'numpy.arange', 'np.arange', (['(-100.0)', '(100.0)', '(10.0)'], {}), '(-100.0, 100.0, 10.0)\n', (9316, 9337), True, 'import numpy as np\n'), ((9369, 9383), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (9378, 9383), True, 'import numpy as np\n'), ((9414, 9429), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (9423, 9429), True, 'import numpy as np\n'), ((9439, 9469), 'numpy.arange', 'np.arange', (['(-100.0)', '(100.0)', '(10.0)'], {}), '(-100.0, 100.0, 10.0)\n', (9448, 9469), True, 'import numpy as np\n'), ((9500, 9527), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(100)'}), '(size=100)\n', (9517, 9527), True, 'import numpy as np\n'), ((10499, 10513), 'numpy.diff', 'np.diff', (['times'], {}), '(times)\n', (10506, 10513), True, 'import numpy as np\n'), ((10877, 10902), 'numpy.__version__.split', 'np.__version__.split', (['"""."""'], {}), "('.')\n", (10897, 10902), True, 'import numpy as np\n'), ((14599, 14676), 'numpy.array', 'np.array', (['[[False, False, False, False, False, True, True, True, True, True]]'], {}), '([[False, False, False, False, False, True, True, True, True, True]])\n', (14607, 14676), True, 'import numpy as np\n'), ((14747, 14833), 'numpy.array', 'np.array', (['[[False, False, False, False, False, True, False, False, False, False]]'], {}), '([[False, False, False, False, False, True, False, False, False, \n False]])\n', (14755, 14833), True, 'import numpy as np\n'), ((14902, 14974), 'numpy.array', 'np.array', (['[[True, True, True, True, True, True, True, True, True, True]]'], {}), '([[True, True, True, True, True, True, True, True, True, True]])\n', (14910, 14974), True, 'import numpy as np\n'), ((15862, 15874), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (15871, 15874), True, 'import numpy as np\n'), ((15883, 15897), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (15891, 15897), True, 'import numpy as np\n'), ((15947, 15996), 'numpy.array', 'np.array', (['[[0, 11, 2, 33, 4, 25, 46, 37, 18, 49]]'], {}), '([[0, 11, 2, 33, 4, 25, 46, 37, 18, 49]])\n', (15955, 15996), True, 'import numpy as np\n'), ((16553, 16565), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (16562, 16565), True, 'import numpy as np\n'), ((16574, 16588), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (16582, 16588), True, 'import numpy as np\n'), ((16638, 16734), 'numpy.array', 'np.array', (['[[0, 11, 2, 13, 4, 15, 26, 27, 18, 19], [40, 31, 22, 33, 14, 25, 46, 37, 28,\n 49]]'], {}), '([[0, 11, 2, 13, 4, 15, 26, 27, 18, 19], [40, 31, 22, 33, 14, 25, \n 46, 37, 28, 49]])\n', (16646, 16734), True, 'import numpy as np\n'), ((37663, 37689), 'numpy.arange', 'np.arange', (['(10.0)', '(30.0)', '(2.0)'], {}), '(10.0, 30.0, 2.0)\n', (37672, 37689), True, 'import numpy as np\n'), ((38243, 38258), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (38252, 38258), True, 'import numpy as np\n'), ((38524, 38537), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (38533, 38537), True, 'import numpy as np\n'), ((5051, 5066), 'numpy.asarray', 'np.asarray', (['sig'], {}), '(sig)\n', (5061, 5066), True, 'import numpy as np\n'), ((6413, 6428), 'numpy.asarray', 'np.asarray', (['sig'], {}), '(sig)\n', (6423, 6428), True, 'import numpy as np\n'), ((7795, 7810), 'numpy.asarray', 'np.asarray', (['sig'], {}), '(sig)\n', (7805, 7810), True, 'import numpy as np\n'), ((15350, 15394), 'numpy.array', 'np.array', (['[[0, 2, 4, 6, 8], [1, 3, 5, 7, 9]]'], {}), '([[0, 2, 4, 6, 8], [1, 3, 5, 7, 9]])\n', (15358, 15394), True, 'import numpy as np\n'), ((17069, 17093), 'numpy.diff', 'np.diff', (['self.time1quant'], {}), '(self.time1quant)\n', (17076, 17093), True, 'import numpy as np\n'), ((43868, 43893), 'numpy.arange', 'np.arange', (['(1000.0)', '(1066.0)'], {}), '(1000.0, 1066.0)\n', (43877, 43893), True, 'import numpy as np\n'), ((43935, 43963), 'numpy.arange', 'np.arange', (['(2.0)', '(2.033)', '(0.001)'], {}), '(2.0, 2.033, 0.001)\n', (43944, 43963), True, 'import numpy as np\n'), ((46582, 46597), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (46591, 46597), True, 'import numpy as np\n'), ((47200, 47215), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (47209, 47215), True, 'import numpy as np\n'), ((47345, 47360), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (47354, 47360), True, 'import numpy as np\n'), ((15236, 15249), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (15245, 15249), True, 'import numpy as np\n')] |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Sampling functions
==================
**Module name:** :mod:`strawberryfields.gbs.sample`
.. currentmodule:: strawberryfields.gbs.sample
This module provides functionality for generating GBS samples using classical simulators.
An accompanying tutorial can be found :ref:`here <gbs-sample-tutorial>`.
Generating samples
------------------
An :math:`M` mode GBS device can be programmed by specifying an :math:`(M \times M)`-dimensional
symmetric matrix :math:`A` :cite:`bradler2018gaussian`. Running this device results in samples
that carry relevant information about the encoded matrix :math:`A`. When sampling, one must also
specify the mean number of photons in the device, the form of detection used at the output:
threshold detection or photon-number-resolving (PNR) detection, as well as the amount of loss.
The :func:`sample` function provides a simulation of sampling from GBS:
.. autosummary::
sample
Here, each output sample is an :math:`M`-dimensional list. If threshold detection is used
(``threshold = True``), each element of a sample is either a zero (denoting no photons detected)
or a one (denoting one or more photons detected), conventionally called a "click".
If photon-number resolving (PNR) detection is used (``threshold = False``) then elements of a
sample are non-negative integers counting the number of photons detected in each mode. The ``loss``
parameter allows for simulation of photon loss in the device, which is the most common form of
noise in quantum photonics. Here, ``loss = 0`` describes ideal loss-free GBS, while generally
``0 <= loss <= 1`` describes the proportion of photons lost while passing through the device.
Samples can be postselected based upon their total number of photons or clicks and the ``numpy``
random seed used to generate samples can be fixed:
.. autosummary::
postselect
seed
Generating subgraphs
--------------------
There are two forms to represent a sample from a GBS device:
1. In the form returned by :func:`sample`, each sample is a length :math:`M` list of counts in
each mode, e.g., the sample ``[2, 1, 2, 0, 1]`` denotes two photons in mode 0,
1 photon in mode 1, and so forth.
2. The alternative representation is a list of modes where photons or clicks were observed, e.g.,
the above sample can be written alternatively as ``[0, 0, 1, 2, 2, 4]``.
Converting from the former representation to the latter can be achieved with:
.. autosummary::
modes_from_counts
Graphs can be encoded into GBS by setting the adjacency matrix to be :math:`A`. Resultant samples
can then be used to pick out nodes from the graph to form a subgraph. If threshold detection is
used, any nodes that click are selected as part of the subgraph. For example, if a sample is
``[1, 1, 1, 0, 1]`` then the corresponding subgraph has nodes ``[0, 1, 2, 4]``. This can be found
using:
>>> modes_from_counts([1, 1, 1, 0, 1])
[0, 1, 2, 4]
A collection of GBS samples from a graph, given by :func:`sample` in the first form, can be
converted to subgraphs using:
.. autosummary::
to_subgraphs
A typical workflow would be:
>>> g = nx.erdos_renyi_graph(5, 0.7)
>>> a = nx.to_numpy_array(g)
>>> s = sample(a, 3, 4)
>>> s = to_subgraphs(s, g)
[[0, 2], [0, 1, 2, 4], [1, 2, 3, 4], [1]]
The subgraphs sampled from GBS are likely to be dense :cite:`arrazola2018using`, motivating their
use within heuristics for problems such as maximum clique (see :mod:`~.gbs.clique`).
Code details
^^^^^^^^^^^^
"""
from typing import Optional
import networkx as nx
import numpy as np
import strawberryfields as sf
def sample(
A: np.ndarray, n_mean: float, n_samples: int = 1, threshold: bool = True, loss: float = 0.0
) -> list:
r"""Generate simulated samples from GBS encoded with a symmetric matrix :math:`A`.
**Example usage:**
>>> g = nx.erdos_renyi_graph(5, 0.7)
>>> a = nx.to_numpy_array(g)
>>> sample(a, 3, 4)
[[1, 1, 1, 1, 1], [1, 1, 0, 1, 1], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1]]
Args:
A (array): the symmetric matrix to sample from
n_mean (float): mean photon number
n_samples (int): number of samples
threshold (bool): perform GBS with threshold detectors if ``True`` or photon-number
resolving detectors if ``False``
loss (float): fraction of generated photons that are lost while passing through device.
Parameter should range from ``loss=0`` (ideal noise-free GBS) to ``loss=1``.
Returns:
list[list[int]]: a list of samples from GBS with respect to the input symmetric matrix
"""
if not np.allclose(A, A.T):
raise ValueError("Input must be a NumPy array corresponding to a symmetric matrix")
if n_samples < 1:
raise ValueError("Number of samples must be at least one")
if n_mean < 0:
raise ValueError("Mean photon number must be non-negative")
if not 0 <= loss <= 1:
raise ValueError("Loss parameter must take a value between zero and one")
nodes = len(A)
p = sf.Program(nodes)
eng = sf.LocalEngine(backend="gaussian")
mean_photon_per_mode = n_mean / float(nodes)
# pylint: disable=expression-not-assigned,pointless-statement
with p.context as q:
sf.ops.GraphEmbed(A, mean_photon_per_mode=mean_photon_per_mode) | q
if loss:
for _q in q:
sf.ops.LossChannel(1 - loss) | _q
if threshold:
sf.ops.MeasureThreshold() | q
else:
sf.ops.MeasureFock() | q
s = eng.run(p, run_options={"shots": n_samples}).samples
if n_samples == 1:
return [s]
return s.tolist()
def postselect(samples: list, min_count: int, max_count: int) -> list:
"""Postselect samples by imposing a minimum and maximum number of photons or clicks.
**Example usage:**
>>> s = [[1, 1, 1, 1, 1], [1, 1, 0, 1, 1], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1]]
>>> postselect(s, 2, 4)
[[1, 1, 0, 1, 1], [1, 0, 0, 0, 1]]
Args:
samples (list[list[int]]): a list of samples
min_count (int): minimum number of photons or clicks for a sample to be included
max_count (int): maximum number of photons or clicks for a sample to be included
Returns:
list[list[int]]: the postselected samples
"""
return [s for s in samples if min_count <= sum(s) <= max_count]
def seed(value: Optional[int]) -> None:
"""Seed for random number generators.
Wrapper function for `numpy.random.seed <https://docs.scipy.org/doc/numpy//reference/generated
/numpy.random.seed.html>`_ to seed all NumPy-based random number generators. This allows for
repeatable sampling.
**Example usage:**
>>> g = nx.erdos_renyi_graph(5, 0.7)
>>> a = nx.to_numpy_array(g)
>>> seed(1967)
>>> sample(a, 3, 4)
[[1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 0, 1, 0, 1], [0, 0, 0, 0, 0]]
>>> seed(1967)
>>> sample(a, 3, 4)
[[1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 0, 1, 0, 1], [0, 0, 0, 0, 0]]
Args:
value (int): random seed
"""
np.random.seed(value)
def modes_from_counts(s: list) -> list:
r"""Convert a list of counts to a list of modes where photons or clicks were observed.
Consider an :math:`M`-mode sample :math:`s=\{s_{0},s_{1},\ldots,s_{M-1}\}` of counts in each
mode, i.e., so that mode :math:`i` has :math:`s_{i}` photons/clicks. If :math:`k = \sum_{
i=0}^{M-1} s_{i}` is the total number of photons or clicks, this function returns a list of
modes :math:`m=\{m_{1},m_{2},\ldots, m_{k}\}` where photons or clicks were observed, i.e.,
so that photon/click :math:`i` is in mode :math:`m_{i}`. Since there are typically fewer
photons than modes, the latter form often gives a shorter representation.
**Example usage:**
>>> modes_from_counts([0, 1, 0, 1, 2, 0])
[1, 3, 4, 4]
Args:
s (list[int]): a sample of counts
Returns:
list[int]: a list of modes where photons or clicks were observed, sorted in
non-decreasing order
"""
modes = []
for i, c in enumerate(s):
modes += [i] * int(c)
return sorted(modes)
def to_subgraphs(samples: list, graph: nx.Graph) -> list:
"""Converts samples to their subgraph representation.
Input samples are a list of counts that are processed into subgraphs by selecting the nodes
where a click occured.
**Example usage:**
>>> g = nx.erdos_renyi_graph(5, 0.7)
>>> s = [[1, 1, 1, 1, 1], [1, 1, 0, 1, 1], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1]]
>>> to_subgraphs(s, g)
[[0, 1, 2, 3, 4], [0, 1, 3, 4], [], [0, 4]]
Args:
samples (list[list[int]]): a list of samples, each sample being a list of counts
graph (nx.Graph): the input graph
Returns:
list[list[int]]: a list of subgraphs, where each subgraph is represented by a list of its
nodes
"""
graph_nodes = list(graph.nodes)
node_number = len(graph_nodes)
subgraph_samples = [list(set(modes_from_counts(s))) for s in samples]
if graph_nodes != list(range(node_number)):
return [sorted([graph_nodes[i] for i in s]) for s in subgraph_samples]
return subgraph_samples
| [
"strawberryfields.Program",
"numpy.allclose",
"strawberryfields.ops.MeasureFock",
"strawberryfields.ops.GraphEmbed",
"strawberryfields.ops.LossChannel",
"numpy.random.seed",
"strawberryfields.ops.MeasureThreshold",
"strawberryfields.LocalEngine"
] | [((5616, 5633), 'strawberryfields.Program', 'sf.Program', (['nodes'], {}), '(nodes)\n', (5626, 5633), True, 'import strawberryfields as sf\n'), ((5644, 5678), 'strawberryfields.LocalEngine', 'sf.LocalEngine', ([], {'backend': '"""gaussian"""'}), "(backend='gaussian')\n", (5658, 5678), True, 'import strawberryfields as sf\n'), ((7642, 7663), 'numpy.random.seed', 'np.random.seed', (['value'], {}), '(value)\n', (7656, 7663), True, 'import numpy as np\n'), ((5189, 5208), 'numpy.allclose', 'np.allclose', (['A', 'A.T'], {}), '(A, A.T)\n', (5200, 5208), True, 'import numpy as np\n'), ((5829, 5892), 'strawberryfields.ops.GraphEmbed', 'sf.ops.GraphEmbed', (['A'], {'mean_photon_per_mode': 'mean_photon_per_mode'}), '(A, mean_photon_per_mode=mean_photon_per_mode)\n', (5846, 5892), True, 'import strawberryfields as sf\n'), ((6025, 6050), 'strawberryfields.ops.MeasureThreshold', 'sf.ops.MeasureThreshold', ([], {}), '()\n', (6048, 6050), True, 'import strawberryfields as sf\n'), ((6081, 6101), 'strawberryfields.ops.MeasureFock', 'sf.ops.MeasureFock', ([], {}), '()\n', (6099, 6101), True, 'import strawberryfields as sf\n'), ((5956, 5984), 'strawberryfields.ops.LossChannel', 'sf.ops.LossChannel', (['(1 - loss)'], {}), '(1 - loss)\n', (5974, 5984), True, 'import strawberryfields as sf\n')] |
from __future__ import division
import json
import tempfile
import unittest
import tttrlib
import numpy as np
print("Test: ", __file__)
settings = json.load(open(file="./test/settings.json"))
test_files = settings["test_files"]
class Tests(unittest.TestCase):
@unittest.expectedFailure
def test_reading(self):
# The writting does not work very good
# yet therefore this is expected to fail
_, fn = tempfile.mkstemp()
routine = 'SPC-130'
for file_type in test_files:
data = tttrlib.TTTR(*file_type)
data.write_file(
fn,
routine
)
data_written = tttrlib.TTTR(fn, routine)
self.assertEqual(
np.allclose(
data.get_macro_time(),
data_written.get_macro_time()
),
True
)
def test_write_tttr(self):
_, filename = tempfile.mkstemp(
suffix='.spc'
)
data.write(filename)
d2 = tttrlib.TTTR(filename, 'SPC-130')
self.assertEqual(np.allclose(d2.micro_times, data.micro_times), True)
self.assertEqual(np.allclose(d2.macro_times, data.macro_times), True)
self.assertEqual(np.allclose(d2.routing_channels, data.routing_channels), True)
def test_write_tttr_other_header(self):
_, filename = tempfile.mkstemp(suffix='.ptu')
# Read header from other TTTR file
other_header = tttrlib.TTTRHeader(settings["ptu_hh_t3_filename"], 0)
data.write(filename, other_header)
d2 = tttrlib.TTTR(filename)
self.assertEqual(np.allclose(d2.micro_times, data.micro_times), True)
self.assertEqual(np.allclose(d2.macro_times, data.macro_times), True)
self.assertEqual(np.allclose(d2.routing_channels, data.routing_channels), True)
# def test_write_tttr_new_header(self):
# _, filename = tempfile.mkstemp(suffix='.ptu')
# # Read header from other TTTR file
# other_header = tttrlib.TTTRHeader()
# data.write(filename, other_header)
# d2 = tttrlib.TTTR(filename)
# self.assertEqual(np.allclose(d2.micro_times, data.micro_times), True)
# self.assertEqual(np.allclose(d2.macro_times, data.macro_times), True)
# self.assertEqual(np.allclose(d2.routing_channels, data.routing_channels), True)
| [
"tttrlib.TTTR",
"tempfile.mkstemp",
"numpy.allclose",
"tttrlib.TTTRHeader"
] | [((436, 454), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (452, 454), False, 'import tempfile\n'), ((964, 995), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".spc"""'}), "(suffix='.spc')\n", (980, 995), False, 'import tempfile\n'), ((1060, 1093), 'tttrlib.TTTR', 'tttrlib.TTTR', (['filename', '"""SPC-130"""'], {}), "(filename, 'SPC-130')\n", (1072, 1093), False, 'import tttrlib\n'), ((1405, 1436), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".ptu"""'}), "(suffix='.ptu')\n", (1421, 1436), False, 'import tempfile\n'), ((1503, 1556), 'tttrlib.TTTRHeader', 'tttrlib.TTTRHeader', (["settings['ptu_hh_t3_filename']", '(0)'], {}), "(settings['ptu_hh_t3_filename'], 0)\n", (1521, 1556), False, 'import tttrlib\n'), ((1613, 1635), 'tttrlib.TTTR', 'tttrlib.TTTR', (['filename'], {}), '(filename)\n', (1625, 1635), False, 'import tttrlib\n'), ((539, 563), 'tttrlib.TTTR', 'tttrlib.TTTR', (['*file_type'], {}), '(*file_type)\n', (551, 563), False, 'import tttrlib\n'), ((678, 703), 'tttrlib.TTTR', 'tttrlib.TTTR', (['fn', 'routine'], {}), '(fn, routine)\n', (690, 703), False, 'import tttrlib\n'), ((1119, 1164), 'numpy.allclose', 'np.allclose', (['d2.micro_times', 'data.micro_times'], {}), '(d2.micro_times, data.micro_times)\n', (1130, 1164), True, 'import numpy as np\n'), ((1197, 1242), 'numpy.allclose', 'np.allclose', (['d2.macro_times', 'data.macro_times'], {}), '(d2.macro_times, data.macro_times)\n', (1208, 1242), True, 'import numpy as np\n'), ((1275, 1330), 'numpy.allclose', 'np.allclose', (['d2.routing_channels', 'data.routing_channels'], {}), '(d2.routing_channels, data.routing_channels)\n', (1286, 1330), True, 'import numpy as np\n'), ((1661, 1706), 'numpy.allclose', 'np.allclose', (['d2.micro_times', 'data.micro_times'], {}), '(d2.micro_times, data.micro_times)\n', (1672, 1706), True, 'import numpy as np\n'), ((1739, 1784), 'numpy.allclose', 'np.allclose', (['d2.macro_times', 'data.macro_times'], {}), '(d2.macro_times, data.macro_times)\n', (1750, 1784), True, 'import numpy as np\n'), ((1817, 1872), 'numpy.allclose', 'np.allclose', (['d2.routing_channels', 'data.routing_channels'], {}), '(d2.routing_channels, data.routing_channels)\n', (1828, 1872), True, 'import numpy as np\n')] |
#!/usr/bin/python
"""
vehicleDetection.py: version 0.1.0
History:
2017/01/29: coding style phase1:
reformat to python-guide.org code style
http://docs.python-guide.org/en/latest/writing/style/
which uses PEP 8 as a base: http://pep8.org/.
2017/01/23: Initial version converted to a class
"""
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import cv2
import glob
import time
import os
from sklearn.svm import LinearSVC
from sklearn.preprocessing import StandardScaler
from skimage.feature import hog
from sklearn.externals import joblib
from p5lib.roadGrid import RoadGrid
# a class for wrapping our SVM trained HOG vehicle detector.
class VehicleDetection():
# initialize
def __init__(self, projectedX, projectedY, versionName=None,
cspace='RGB', orient=9, pix_per_cell=8, cell_per_block=2,
hog_channel=0, threshold=2.5,
dataFileNamePattern="imgExt%03d.jpg"):
self.start = time.strftime("%Y%m%d%H%M%S", time.gmtime())
self.projectedX = projectedX
self.projectedY = projectedY
self.versionName = versionName
self.cspace = cspace
self.hog_channel = hog_channel
if versionName is not None:
self.trained_model = './trained/' + versionName + '.pkl'
self.trained_scalar = './trained/scaler' + versionName + '.pkl'
self.svc = joblib.load(self.trained_model)
self.orient = orient
self.pix_per_cell = pix_per_cell
self.cell_per_block = cell_per_block
if self.trained_scalar is not None and \
self.versionName is not None:
self.X_scaler = joblib.load(self.trained_scalar)
self.threshold = threshold
self.dataFileNamePattern = dataFileNamePattern
# Define a function to change the detector's threshold
def set_threshold(self, new_threshold):
self.threshold = new_threshold
# Define a function to compute binned color features
def bin_spatial(self, img, size=(32, 32)):
# Use cv2.resize().ravel() to create the feature vector
features = cv2.resize(img, size).ravel()
# Return the feature vector
return features
# Define a function to compute color histogram features
def color_hist(self, img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(
img[:, :, 0], bins=nbins, range=bins_range)
channel2_hist = np.histogram(
img[:, :, 1], bins=nbins, range=bins_range)
channel3_hist = np.histogram(
img[:, :, 2], bins=nbins, range=bins_range)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate(
(channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
# Define a function to return HOG features and visualization
def get_hog_features(self, img, orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True):
# Call with two outputs if vis==True
if vis:
features, hog_image = hog(
img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features, hog_image
# Otherwise call with one output
else:
features = hog(
img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
transform_sqrt=True, visualise=vis,
feature_vector=feature_vec)
return features
# Define a function to extract features from a list of images
# Have this function call bin_spatial() and color_hist()
def extract_features(self, image, cspace='RGB', spatial_size=(32, 32),
hist_bins=32, hist_range=(0, 256), orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0):
if image.shape[0] > 0 and image.shape[1] > 0:
if image.shape[0] != 64 or image.shape[1] != 64:
image = cv2.resize(image, (64, 64))
# Create a list to append feature vectors to
# apply color conversion if other than 'RGB'
if cspace != 'RGB':
if cspace == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif cspace == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif cspace == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif cspace == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif cspace == 'GRAY':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
elif cspace == 'GRAYRGB':
rgbfeature_image = np.copy(image)
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
else:
feature_image = np.copy(image)
# Apply bin_spatial() to get spatial color features
if cspace == 'GRAYRGB':
spatial_features = self.bin_spatial(
rgbfeature_image, size=spatial_size)
# Apply color_hist() also with a color space option now
hist_features = self.color_hist(
rgbfeature_image, nbins=hist_bins,
bins_range=hist_range)
# Call get_hog_features() with vis=False, feature_vec=True
hog_features = self.get_hog_features(
feature_image, orient, pix_per_cell,
cell_per_block, vis=False, feature_vec=True)
# Append the new feature vector to the features list
hogFeatures = np.concatenate(
(spatial_features, hist_features, hog_features))
elif cspace == 'GRAY':
hog_features = self.get_hog_features(
feature_image, orient, pix_per_cell,
cell_per_block, vis=False, feature_vec=True)
hogFeatures = hog_features
else:
spatial_features = self.bin_spatial(
feature_image, size=spatial_size)
# Apply color_hist() also with a color space option now
hist_features = self.color_hist(
feature_image, nbins=hist_bins, bins_range=hist_range)
# Call get_hog_features() with vis=False, feature_vec=True
hog_features = self.get_hog_features(
feature_image[:, :, hog_channel], orient, pix_per_cell,
cell_per_block, vis=False, feature_vec=True)
# Append the new feature vector to the features list
hogFeatures = np.concatenate(
(spatial_features, hist_features, hog_features))
return self.X_scaler.transform(hogFeatures.reshape(1, -1))
else:
return None
# specialized sliding window generation.
# we are looking at top down birds-eye view and
# limiting the detection to just the lanes.
# we need to use the lane lines to help generate the sliding window
# locations.
def slidingWindows(self, lines, laneIdx, complete=False):
# calculate the window positions
nlanes = len(lines) - 1
x0 = self.projectedX/2
y0 = self.projectedY
# create roadgrid for boxes
window_list = RoadGrid(x0, y0, nlanes, laneIdx)
for i in range(nlanes):
lane_boxes = {}
leftPolynomial = np.poly1d(lines[i].currentFit)
rightPolynomial = np.poly1d(lines[i + 1].currentFit)
# horizontal lines
# we treat left and right lanes differently because of the
# projection. In the 'complete' case we are getting all
# of the sliding windows
if complete:
if i < laneIdx:
indexedBottom = i + 1
else:
indexedBottom = i
for j in range(
int(lines[indexedBottom].bottomProjectedY / 32)):
y1 = 32 * j
mid = int(
(rightPolynomial([y1]) +
leftPolynomial([y1])) / 2)
x1 = mid - 32
x2 = mid + 32
y2 = y1 + 64
if (x1 > 0 and x2 < self.projectedX and
y1 > 0 and y2 < self.projectedY):
lane_boxes['%d' % (j)] = ((x1, y1), (x2, y2))
# In the else case we are getting only the windows at the top
# and bottom of our lanes for the sliding windows
else:
linetop = lines[i].getTopPoint()
if i == laneIdx:
ylist = [(linetop[1], 0),
(linetop[1] + 32, 1),
(linetop[1] + 64, 2)]
elif i < laneIdx:
ylist = [(linetop[1], 0),
(linetop[1] + 32, 1),
(linetop[1] + 64, 2),
(lines[i].bottomProjectedY - 96, 55)]
else:
ylist = [(linetop[1], 0),
(linetop[1] + 32, 1),
(linetop[1] + 64, 2),
(lines[i + 1].bottomProjectedY - 32, 55)]
for y1, j in ylist:
mid = int(
(rightPolynomial([y1]) + leftPolynomial([y1])) / 2)
x1 = mid - 32
x2 = mid + 32
y2 = y1 + 64
if (x1 > 0 and x2 < self.projectedX and
y1 > 0 and y2 < self.projectedY):
lane_boxes['%d' % (j)] = ((x1, y1), (x2, y2))
window_list.map_boxes(i, lane_boxes)
return window_list
# draw_boxes function
def draw_boxes(self, img, windows, color=(255, 255, 255), thick=20):
# Iterate through the bounding boxes in a windows list
for bbox in windows:
# Draw a rectangle given bbox coordinates
cv2.rectangle(
img, (int(bbox[0][0]), int(bbox[0][1])),
(int(bbox[1][0]), int(bbox[1][1])), color, thick)
# Define a way for us to write out a sample of the HOG
def drawPlots(self, imagefile, sampleTitle, images):
# print("saving image and hog results to ", imagefile)
# Setup plot
fig = plt.figure(figsize=(12, len(images) * 9))
w_ratios = [2.0, 6.5, 6.5]
h_ratios = [9.0 for n in range(len(images))]
grid = gridspec.GridSpec(
len(images), 3, wspace=0.05, hspace=0.0,
width_ratios=w_ratios, height_ratios=h_ratios)
i = 0
for filename, orient, pix_per_cell, \
cell_per_block, image1, image2 in images:
# draw the images
# next image
title = '%s\n Orientation: %d\n'
title += ' Pix_per_cell: %d\n'
title += ' Cell_per_block: %d'
title = title % \
(filename, orient, pix_per_cell, cell_per_block)
ax = plt.Subplot(fig, grid[i])
ax.text(-0.5, 0.4, title, fontsize=8)
ax.set_xticks([])
ax.set_yticks([])
for sp in ax.spines.values():
sp.set_visible(False)
fig.add_subplot(ax)
i += 1
ax = plt.Subplot(fig, grid[i])
ax.imshow(image1)
if i == 1:
ax.set_title('Original', size=8)
ax.set_xticks([])
ax.set_yticks([])
fig.add_subplot(ax)
i += 1
ax = plt.Subplot(fig, grid[i])
ax.imshow(image2)
if i == 2:
ax.set_title('Augmented %s' % (sampleTitle), size=8)
ax.set_xticks([])
ax.set_yticks([])
fig.add_subplot(ax)
i += 1
plt.savefig(imagefile)
image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)
y, x, ch = image.shape
cuttoff = int((y / len(images)) * 0.65)
image = image[cuttoff:(y - cuttoff), :, :]
cv2.imwrite(imagefile, cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
# Define a way for us to process an image with
# a list of sliding windows and try to detect vehicles
def detectVehicles(self, image, roadgrid):
mapping = roadgrid.getMapping()
for box in mapping.keys():
if not mapping[box]['occluded'] and \
not mapping[box]['found'] and \
mapping[box]['vehicle'] is None:
window = mapping[box]['window']
wimage = image[
window[0][1]:window[1][1],
window[0][0]:window[1][0]]
wfeatures = self.extract_features(
wimage, cspace=self.cspace, spatial_size=(32, 32),
orient=self.orient, pix_per_cell=self.pix_per_cell,
cell_per_block=self.cell_per_block,
hog_channel=self.hog_channel,
hist_bins=32, hist_range=(0, 256))
if wfeatures is not None:
confidence = self.svc.decision_function(
wfeatures.reshape(1, -1))
if confidence[0] > self.threshold:
roadgrid.setFound(box)
return roadgrid
# Define a way for us to collect data from images and videos
def collectData(self, frame, image, windows):
baseDir = "collected/%s/%04d/" % (self.start, frame)
if not os.path.exists(baseDir):
os.makedirs(baseDir)
i = 0
for window in [lane for lane in windows]:
wimage = image[window[0][1]:window[
1][1], window[0][0]:window[1][0]]
outfilename = baseDir + self.dataFileNamePattern % (i)
cv2.imwrite(outfilename,
cv2.cvtColor(wimage, cv2.COLOR_RGB2BGR))
i += 1
| [
"os.path.exists",
"numpy.histogram",
"numpy.copy",
"matplotlib.pyplot.savefig",
"cv2.resize",
"os.makedirs",
"sklearn.externals.joblib.load",
"matplotlib.pyplot.Subplot",
"numpy.concatenate",
"cv2.cvtColor",
"p5lib.roadGrid.RoadGrid",
"skimage.feature.hog",
"time.gmtime",
"numpy.poly1d",
... | [((2488, 2544), 'numpy.histogram', 'np.histogram', (['img[:, :, 0]'], {'bins': 'nbins', 'range': 'bins_range'}), '(img[:, :, 0], bins=nbins, range=bins_range)\n', (2500, 2544), True, 'import numpy as np\n'), ((2582, 2638), 'numpy.histogram', 'np.histogram', (['img[:, :, 1]'], {'bins': 'nbins', 'range': 'bins_range'}), '(img[:, :, 1], bins=nbins, range=bins_range)\n', (2594, 2638), True, 'import numpy as np\n'), ((2676, 2732), 'numpy.histogram', 'np.histogram', (['img[:, :, 2]'], {'bins': 'nbins', 'range': 'bins_range'}), '(img[:, :, 2], bins=nbins, range=bins_range)\n', (2688, 2732), True, 'import numpy as np\n'), ((2836, 2906), 'numpy.concatenate', 'np.concatenate', (['(channel1_hist[0], channel2_hist[0], channel3_hist[0])'], {}), '((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n', (2850, 2906), True, 'import numpy as np\n'), ((7957, 7990), 'p5lib.roadGrid.RoadGrid', 'RoadGrid', (['x0', 'y0', 'nlanes', 'laneIdx'], {}), '(x0, y0, nlanes, laneIdx)\n', (7965, 7990), False, 'from p5lib.roadGrid import RoadGrid\n'), ((12611, 12633), 'matplotlib.pyplot.savefig', 'plt.savefig', (['imagefile'], {}), '(imagefile)\n', (12622, 12633), True, 'import matplotlib.pyplot as plt\n'), ((1062, 1075), 'time.gmtime', 'time.gmtime', ([], {}), '()\n', (1073, 1075), False, 'import time\n'), ((1462, 1493), 'sklearn.externals.joblib.load', 'joblib.load', (['self.trained_model'], {}), '(self.trained_model)\n', (1473, 1493), False, 'from sklearn.externals import joblib\n'), ((1732, 1764), 'sklearn.externals.joblib.load', 'joblib.load', (['self.trained_scalar'], {}), '(self.trained_scalar)\n', (1743, 1764), False, 'from sklearn.externals import joblib\n'), ((3314, 3511), 'skimage.feature.hog', 'hog', (['img'], {'orientations': 'orient', 'pixels_per_cell': '(pix_per_cell, pix_per_cell)', 'cells_per_block': '(cell_per_block, cell_per_block)', 'transform_sqrt': '(True)', 'visualise': 'vis', 'feature_vector': 'feature_vec'}), '(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True,\n visualise=vis, feature_vector=feature_vec)\n', (3317, 3511), False, 'from skimage.feature import hog\n'), ((3702, 3899), 'skimage.feature.hog', 'hog', (['img'], {'orientations': 'orient', 'pixels_per_cell': '(pix_per_cell, pix_per_cell)', 'cells_per_block': '(cell_per_block, cell_per_block)', 'transform_sqrt': '(True)', 'visualise': 'vis', 'feature_vector': 'feature_vec'}), '(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True,\n visualise=vis, feature_vector=feature_vec)\n', (3705, 3899), False, 'from skimage.feature import hog\n'), ((8081, 8111), 'numpy.poly1d', 'np.poly1d', (['lines[i].currentFit'], {}), '(lines[i].currentFit)\n', (8090, 8111), True, 'import numpy as np\n'), ((8142, 8176), 'numpy.poly1d', 'np.poly1d', (['lines[i + 1].currentFit'], {}), '(lines[i + 1].currentFit)\n', (8151, 8176), True, 'import numpy as np\n'), ((11801, 11826), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'grid[i]'], {}), '(fig, grid[i])\n', (11812, 11826), True, 'import matplotlib.pyplot as plt\n'), ((12086, 12111), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'grid[i]'], {}), '(fig, grid[i])\n', (12097, 12111), True, 'import matplotlib.pyplot as plt\n'), ((12343, 12368), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'grid[i]'], {}), '(fig, grid[i])\n', (12354, 12368), True, 'import matplotlib.pyplot as plt\n'), ((12663, 12684), 'cv2.imread', 'cv2.imread', (['imagefile'], {}), '(imagefile)\n', (12673, 12684), False, 'import cv2\n'), ((12866, 12904), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (12878, 12904), False, 'import cv2\n'), ((14289, 14312), 'os.path.exists', 'os.path.exists', (['baseDir'], {}), '(baseDir)\n', (14303, 14312), False, 'import os\n'), ((14326, 14346), 'os.makedirs', 'os.makedirs', (['baseDir'], {}), '(baseDir)\n', (14337, 14346), False, 'import os\n'), ((2186, 2207), 'cv2.resize', 'cv2.resize', (['img', 'size'], {}), '(img, size)\n', (2196, 2207), False, 'import cv2\n'), ((4489, 4516), 'cv2.resize', 'cv2.resize', (['image', '(64, 64)'], {}), '(image, (64, 64))\n', (4499, 4516), False, 'import cv2\n'), ((5451, 5465), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (5458, 5465), True, 'import numpy as np\n'), ((6245, 6308), 'numpy.concatenate', 'np.concatenate', (['(spatial_features, hist_features, hog_features)'], {}), '((spatial_features, hist_features, hog_features))\n', (6259, 6308), True, 'import numpy as np\n'), ((14637, 14676), 'cv2.cvtColor', 'cv2.cvtColor', (['wimage', 'cv2.COLOR_RGB2BGR'], {}), '(wimage, cv2.COLOR_RGB2BGR)\n', (14649, 14676), False, 'import cv2\n'), ((4736, 4774), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2HSV'], {}), '(image, cv2.COLOR_RGB2HSV)\n', (4748, 4774), False, 'import cv2\n'), ((7274, 7337), 'numpy.concatenate', 'np.concatenate', (['(spatial_features, hist_features, hog_features)'], {}), '((spatial_features, hist_features, hog_features))\n', (7288, 7337), True, 'import numpy as np\n'), ((4849, 4887), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2LUV'], {}), '(image, cv2.COLOR_RGB2LUV)\n', (4861, 4887), False, 'import cv2\n'), ((4962, 5000), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2HLS'], {}), '(image, cv2.COLOR_RGB2HLS)\n', (4974, 5000), False, 'import cv2\n'), ((5075, 5113), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2YUV'], {}), '(image, cv2.COLOR_RGB2YUV)\n', (5087, 5113), False, 'import cv2\n'), ((5189, 5228), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2GRAY'], {}), '(image, cv2.COLOR_RGB2GRAY)\n', (5201, 5228), False, 'import cv2\n'), ((5310, 5324), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (5317, 5324), True, 'import numpy as np\n'), ((5361, 5400), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2GRAY'], {}), '(image, cv2.COLOR_RGB2GRAY)\n', (5373, 5400), False, 'import cv2\n')] |
import sys
import copy
import numpy as np
from pyrigidbody3d import geometry
from pyrigidbody3d import rigidbody
from pyrigidbody3d import world
# real-time updates are a bit choppy
import meshcat
import meshcat.geometry as g
import meshcat.transformations as tf
import math
import time
import numpy as np
SIMULATION_TIME_STEP = 1. / 60.#240.
NUM_SOLVER_ITERATIONS = 20
RADIUS=0.5
physics_world = world.World(NUM_SOLVER_ITERATIONS)
physics_world.gravity = np.array([0.0, -2.0, -9.8])
vis = meshcat.Visualizer().open()
#physics plane
plane = geometry.Plane()
plane_id = rigidbody.RigidBody(inv_mass=0.0, collision_shape=plane)
physics_world.bodies.append(plane_id)
#rendering plane
ground = g.Box([10,10,0.01])
vis['ground'].set_object(ground,g.MeshLambertMaterial( color=0xffffff, wireframe=False))
#physics sphere
sphere = geometry.Sphere(RADIUS)
sphere_id = rigidbody.RigidBody(inv_mass=1.0, collision_shape=sphere)
sphere_id.world_pose.position = np.array([0., 0., 2.6])
physics_world.bodies.append(sphere_id)
#rendering sphere
sphere = g.Sphere([RADIUS])
vis['sphere'].set_object(sphere,g.MeshPhongMaterial(color=0x5555ff, wireframe=True))
dt = SIMULATION_TIME_STEP
#todo: convert the sphere orientation quaternion to mat3x3
mat4 = tf.rotation_matrix(0, [0, 0, 1])
mat4[:3, 3] = sphere_id.world_pose.position
vis['sphere'].set_transform(mat4)
#real-time updates are a bit choppy, so record an animation instead
#for _ in range(200):
# physics_world.step(dt)
# mat4[:3, 3] = sphere_id.world_pose.position
# vis['sphere'].set_transform(mat4)
# time.sleep(0.5*SIMULATION_TIME_STEP)
from meshcat.animation import Animation
import meshcat.transformations as tf
sphere_id.world_pose.position = np.array([0., 0., 2.6])
anim = Animation()
for frame_index in range(200):
physics_world.step(dt)
mat4 = sphere_id.world_pose.matrix()
with anim.at_frame(vis, frame_index) as frame:
frame["sphere"].set_transform(mat4)
# `set_animation` actually sends the animation to the
# viewer. By default, the viewer will play the animation
# right away. To avoid that, you can also pass `play=False`.
vis.set_animation(anim)#, play=False)
| [
"meshcat.geometry.MeshLambertMaterial",
"meshcat.Visualizer",
"pyrigidbody3d.geometry.Sphere",
"meshcat.animation.Animation",
"meshcat.geometry.Sphere",
"numpy.array",
"meshcat.transformations.rotation_matrix",
"meshcat.geometry.MeshPhongMaterial",
"pyrigidbody3d.geometry.Plane",
"pyrigidbody3d.wo... | [((404, 438), 'pyrigidbody3d.world.World', 'world.World', (['NUM_SOLVER_ITERATIONS'], {}), '(NUM_SOLVER_ITERATIONS)\n', (415, 438), False, 'from pyrigidbody3d import world\n'), ((463, 490), 'numpy.array', 'np.array', (['[0.0, -2.0, -9.8]'], {}), '([0.0, -2.0, -9.8])\n', (471, 490), True, 'import numpy as np\n'), ((549, 565), 'pyrigidbody3d.geometry.Plane', 'geometry.Plane', ([], {}), '()\n', (563, 565), False, 'from pyrigidbody3d import geometry\n'), ((577, 633), 'pyrigidbody3d.rigidbody.RigidBody', 'rigidbody.RigidBody', ([], {'inv_mass': '(0.0)', 'collision_shape': 'plane'}), '(inv_mass=0.0, collision_shape=plane)\n', (596, 633), False, 'from pyrigidbody3d import rigidbody\n'), ((699, 720), 'meshcat.geometry.Box', 'g.Box', (['[10, 10, 0.01]'], {}), '([10, 10, 0.01])\n', (704, 720), True, 'import meshcat.geometry as g\n'), ((834, 857), 'pyrigidbody3d.geometry.Sphere', 'geometry.Sphere', (['RADIUS'], {}), '(RADIUS)\n', (849, 857), False, 'from pyrigidbody3d import geometry\n'), ((870, 927), 'pyrigidbody3d.rigidbody.RigidBody', 'rigidbody.RigidBody', ([], {'inv_mass': '(1.0)', 'collision_shape': 'sphere'}), '(inv_mass=1.0, collision_shape=sphere)\n', (889, 927), False, 'from pyrigidbody3d import rigidbody\n'), ((960, 985), 'numpy.array', 'np.array', (['[0.0, 0.0, 2.6]'], {}), '([0.0, 0.0, 2.6])\n', (968, 985), True, 'import numpy as np\n'), ((1051, 1069), 'meshcat.geometry.Sphere', 'g.Sphere', (['[RADIUS]'], {}), '([RADIUS])\n', (1059, 1069), True, 'import meshcat.geometry as g\n'), ((1249, 1281), 'meshcat.transformations.rotation_matrix', 'tf.rotation_matrix', (['(0)', '[0, 0, 1]'], {}), '(0, [0, 0, 1])\n', (1267, 1281), True, 'import meshcat.transformations as tf\n'), ((1713, 1738), 'numpy.array', 'np.array', (['[0.0, 0.0, 2.6]'], {}), '([0.0, 0.0, 2.6])\n', (1721, 1738), True, 'import numpy as np\n'), ((1745, 1756), 'meshcat.animation.Animation', 'Animation', ([], {}), '()\n', (1754, 1756), False, 'from meshcat.animation import Animation\n'), ((751, 805), 'meshcat.geometry.MeshLambertMaterial', 'g.MeshLambertMaterial', ([], {'color': '(16777215)', 'wireframe': '(False)'}), '(color=16777215, wireframe=False)\n', (772, 805), True, 'import meshcat.geometry as g\n'), ((1102, 1152), 'meshcat.geometry.MeshPhongMaterial', 'g.MeshPhongMaterial', ([], {'color': '(5592575)', 'wireframe': '(True)'}), '(color=5592575, wireframe=True)\n', (1121, 1152), True, 'import meshcat.geometry as g\n'), ((497, 517), 'meshcat.Visualizer', 'meshcat.Visualizer', ([], {}), '()\n', (515, 517), False, 'import meshcat\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper methods for tensor data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import binascii
import numpy as np
from tensorboard import build_with_tf, util
USE_TF = build_with_tf.use_tf()
if USE_TF:
from tensorflow.python.debug.cli import command_parser
else:
from tensorboard.utils import command_parser
from tensorboard.plugins.debugger import health_pill_calc
def numel(shape):
"""Obtain total number of elements from a tensor (ndarray) shape.
Args:
shape: A list or tuple represenitng a tensor (ndarray) shape.
"""
output = 1
for dim in shape:
output *= dim
return output
def parse_time_indices(s):
"""Parse a string as time indices.
Args:
s: A valid slicing string for time indices. E.g., '-1', '[:]', ':', '2:10'
Returns:
A slice object.
Raises:
ValueError: If `s` does not represent valid time indices.
"""
if not s.startswith('['):
s = '[' + s + ']'
parsed = command_parser._parse_slices(s)
if len(parsed) != 1:
raise ValueError(
'Invalid number of slicing objects in time indices (%d)' % len(parsed))
else:
return parsed[0]
def translate_dtype(dtype):
"""Translate numpy dtype into a string.
The 'object' type is understood as a TensorFlow string and translated into
'string'.
Args:
dtype: A numpy dtype object.
Returns:
A string representing the data type.
"""
out = str(dtype)
# String-type TensorFlow Tensors are represented as object-type arrays in
# numpy. We map the type name back to 'string' for clarity.
return 'string' if out == 'object' else out
def process_buffers_for_display(s, limit=40):
"""Process a buffer for human-readable display.
This function performs the following operation on each of the buffers in `s`.
1. Truncate input buffer if the length of the buffer is greater than
`limit`, to prevent large strings from overloading the frontend.
2. Apply `binascii.b2a_qp` on the truncated buffer to make the buffer
printable and convertible to JSON.
3. If truncation happened (in step 1), append a string at the end
describing the original length and the truncation.
Args:
s: The buffer to be processed, either a single buffer or a nested array of
them.
limit: Length limit for each buffer, beyond which truncation will occur.
Return:
A single processed buffer or a nested array of processed buffers.
"""
if isinstance(s, (list, tuple)):
return [process_buffers_for_display(elem, limit=limit) for elem in s]
else:
length = len(s)
if length > limit:
return (binascii.b2a_qp(s[:limit]) +
b' (length-%d truncated at %d bytes)' % (length, limit))
else:
return binascii.b2a_qp(s)
def array_view(array, slicing=None, mapping=None):
"""View a slice or the entirety of an ndarray.
Args:
array: The input array, as an numpy.ndarray.
slicing: Optional slicing string, e.g., "[:, 1:3, :]".
mapping: Optional mapping string. Supported mappings:
`None` or case-insensitive `'None'`: Unmapped nested list.
`'image/png'`: Image encoding of a 2D sliced array or 3D sliced array
with 3 as the last dimension. If the sliced array is not 2D or 3D with
3 as the last dimension, a `ValueError` will be thrown.
`health-pill`: A succinct summary of the numeric values of a tensor.
See documentation in [`health_pill_calc.py`] for more details.
Returns:
1. dtype as a `str`.
2. shape of the sliced array, as a tuple of `int`s.
3. the potentially sliced values, as a nested `list`.
"""
dtype = translate_dtype(array.dtype)
sliced_array = (array[command_parser._parse_slices(slicing)] if slicing
else array)
if np.isscalar(sliced_array) and str(dtype) == 'string':
# When a string Tensor (for which dtype is 'object') is sliced down to only
# one element, it becomes a string, instead of an numpy array.
# We preserve the dimensionality of original array in the returned shape
# and slice.
ndims = len(array.shape)
slice_shape = []
for _ in range(ndims):
sliced_array = [sliced_array]
slice_shape.append(1)
return dtype, tuple(slice_shape), sliced_array
else:
shape = sliced_array.shape
if mapping == "image/png":
if len(sliced_array.shape) == 2:
return dtype, shape, array_to_base64_png(sliced_array)
elif len(sliced_array.shape) == 3:
raise NotImplementedError(
"image/png mapping for 3D array has not been implemented")
else:
raise ValueError("Invalid rank for image/png mapping: %d" %
len(sliced_array.shape))
elif mapping == 'health-pill':
health_pill = health_pill_calc.calc_health_pill(array)
return dtype, shape, health_pill
elif mapping is None or mapping == '' or mapping.lower() == 'none':
return dtype, shape, sliced_array.tolist()
else:
raise ValueError("Invalid mapping: %s" % mapping)
IMAGE_COLOR_CHANNELS = 3
POSITIVE_INFINITY_RGB = (0, 62, 212) # +inf --> Blue.
NEGATIVE_INFINITY_RGB = (255, 127, 0) # -inf --> Orange.
NAN_RGB = (221, 47, 45) # nan -> Red.
def array_to_base64_png(array):
"""Convert an array into base64-enoded PNG image.
Args:
array: A 2D np.ndarray or nested list of items.
Returns:
A base64-encoded string the image. The image is grayscale if the array is
2D. The image is RGB color if the image is 3D with lsat dimension equal to
3.
Raises:
ValueError: If the input `array` is not rank-2, or if the rank-2 `array` is
empty.
"""
# TODO(cais): Deal with 3D case.
# TODO(cais): If there are None values in here, replace them with all NaNs.
array = np.array(array, dtype=np.float32)
if len(array.shape) != 2:
raise ValueError(
"Expected rank-2 array; received rank-%d array." % len(array.shape))
if not np.size(array):
raise ValueError(
"Cannot encode an empty array (size: %s) as image." % (array.shape,))
is_infinity = np.isinf(array)
is_positive = array > 0.0
is_positive_infinity = np.logical_and(is_infinity, is_positive)
is_negative_infinity = np.logical_and(is_infinity,
np.logical_not(is_positive))
is_nan = np.isnan(array)
finite_indices = np.where(np.logical_and(np.logical_not(is_infinity),
np.logical_not(is_nan)))
if np.size(finite_indices):
# Finite subset is not empty.
minval = np.min(array[finite_indices])
maxval = np.max(array[finite_indices])
scaled = np.array((array - minval) / (maxval - minval) * 255,
dtype=np.uint8)
rgb = np.repeat(np.expand_dims(scaled, -1), IMAGE_COLOR_CHANNELS, axis=-1)
else:
rgb = np.zeros(array.shape + (IMAGE_COLOR_CHANNELS,), dtype=np.uint8)
# Color-code pixels that correspond to infinities and nans.
rgb[is_positive_infinity] = POSITIVE_INFINITY_RGB
rgb[is_negative_infinity] = NEGATIVE_INFINITY_RGB
rgb[is_nan] = NAN_RGB
image_encoded = base64.b64encode(util.encode_png(rgb))
return image_encoded
| [
"numpy.isscalar",
"numpy.logical_and",
"numpy.size",
"numpy.logical_not",
"numpy.max",
"numpy.array",
"numpy.zeros",
"tensorboard.utils.command_parser._parse_slices",
"tensorboard.build_with_tf.use_tf",
"numpy.isnan",
"binascii.b2a_qp",
"numpy.min",
"tensorboard.util.encode_png",
"numpy.ex... | [((943, 965), 'tensorboard.build_with_tf.use_tf', 'build_with_tf.use_tf', ([], {}), '()\n', (963, 965), False, 'from tensorboard import build_with_tf, util\n'), ((1713, 1744), 'tensorboard.utils.command_parser._parse_slices', 'command_parser._parse_slices', (['s'], {}), '(s)\n', (1741, 1744), False, 'from tensorboard.utils import command_parser\n'), ((6521, 6554), 'numpy.array', 'np.array', (['array'], {'dtype': 'np.float32'}), '(array, dtype=np.float32)\n', (6529, 6554), True, 'import numpy as np\n'), ((6824, 6839), 'numpy.isinf', 'np.isinf', (['array'], {}), '(array)\n', (6832, 6839), True, 'import numpy as np\n'), ((6893, 6933), 'numpy.logical_and', 'np.logical_and', (['is_infinity', 'is_positive'], {}), '(is_infinity, is_positive)\n', (6907, 6933), True, 'import numpy as np\n'), ((7067, 7082), 'numpy.isnan', 'np.isnan', (['array'], {}), '(array)\n', (7075, 7082), True, 'import numpy as np\n'), ((7228, 7251), 'numpy.size', 'np.size', (['finite_indices'], {}), '(finite_indices)\n', (7235, 7251), True, 'import numpy as np\n'), ((4527, 4552), 'numpy.isscalar', 'np.isscalar', (['sliced_array'], {}), '(sliced_array)\n', (4538, 4552), True, 'import numpy as np\n'), ((6691, 6705), 'numpy.size', 'np.size', (['array'], {}), '(array)\n', (6698, 6705), True, 'import numpy as np\n'), ((7027, 7054), 'numpy.logical_not', 'np.logical_not', (['is_positive'], {}), '(is_positive)\n', (7041, 7054), True, 'import numpy as np\n'), ((7300, 7329), 'numpy.min', 'np.min', (['array[finite_indices]'], {}), '(array[finite_indices])\n', (7306, 7329), True, 'import numpy as np\n'), ((7343, 7372), 'numpy.max', 'np.max', (['array[finite_indices]'], {}), '(array[finite_indices])\n', (7349, 7372), True, 'import numpy as np\n'), ((7386, 7454), 'numpy.array', 'np.array', (['((array - minval) / (maxval - minval) * 255)'], {'dtype': 'np.uint8'}), '((array - minval) / (maxval - minval) * 255, dtype=np.uint8)\n', (7394, 7454), True, 'import numpy as np\n'), ((7574, 7637), 'numpy.zeros', 'np.zeros', (['(array.shape + (IMAGE_COLOR_CHANNELS,))'], {'dtype': 'np.uint8'}), '(array.shape + (IMAGE_COLOR_CHANNELS,), dtype=np.uint8)\n', (7582, 7637), True, 'import numpy as np\n'), ((7865, 7885), 'tensorboard.util.encode_png', 'util.encode_png', (['rgb'], {}), '(rgb)\n', (7880, 7885), False, 'from tensorboard import build_with_tf, util\n'), ((3494, 3512), 'binascii.b2a_qp', 'binascii.b2a_qp', (['s'], {}), '(s)\n', (3509, 3512), False, 'import binascii\n'), ((4441, 4478), 'tensorboard.utils.command_parser._parse_slices', 'command_parser._parse_slices', (['slicing'], {}), '(slicing)\n', (4469, 4478), False, 'from tensorboard.utils import command_parser\n'), ((7126, 7153), 'numpy.logical_not', 'np.logical_not', (['is_infinity'], {}), '(is_infinity)\n', (7140, 7153), True, 'import numpy as np\n'), ((7198, 7220), 'numpy.logical_not', 'np.logical_not', (['is_nan'], {}), '(is_nan)\n', (7212, 7220), True, 'import numpy as np\n'), ((7497, 7523), 'numpy.expand_dims', 'np.expand_dims', (['scaled', '(-1)'], {}), '(scaled, -1)\n', (7511, 7523), True, 'import numpy as np\n'), ((3371, 3397), 'binascii.b2a_qp', 'binascii.b2a_qp', (['s[:limit]'], {}), '(s[:limit])\n', (3386, 3397), False, 'import binascii\n'), ((5518, 5558), 'tensorboard.plugins.debugger.health_pill_calc.calc_health_pill', 'health_pill_calc.calc_health_pill', (['array'], {}), '(array)\n', (5551, 5558), False, 'from tensorboard.plugins.debugger import health_pill_calc\n')] |
"""Sarsa"""
import numpy as np
from RL_brain import SarsaLambda
from environment import TestEnv
np.set_printoptions(precision=2, suppress=True)
env = TestEnv(10)
print("Observation_space{}\nAction_space{}".
format(env.observation_space, env.action_space))
RL = SarsaLambda(range(env.action_space.n), reward_decay=1)
for i in range(50):
s = env.reset()
while True:
# env.render()
u = RL.choose_action(s)
s_, r_, done, _ = env.step(u)
RL.learn(s, u, r_, s_)
if done:
print('Completed')
break
s = s_
print(RL.q_table)
env.close()
| [
"environment.TestEnv",
"numpy.set_printoptions"
] | [((97, 144), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)', 'suppress': '(True)'}), '(precision=2, suppress=True)\n', (116, 144), True, 'import numpy as np\n'), ((152, 163), 'environment.TestEnv', 'TestEnv', (['(10)'], {}), '(10)\n', (159, 163), False, 'from environment import TestEnv\n')] |
from sklearn.decomposition import NMF, LatentDirichletAllocation
import numpy as np
import os
def write_lines(string, saveFile):
open("%s" %saveFile, "a").write(string+"\n")
###
def save_topics(model, feature_names, saveFile, topic_nums, top_words_nums):
# saveFile= "%s/LDA_TopWords_Topic%s.txt" %(saveFileDir,topic_nums)
## Save Topics Top Words
for topic_idx, topic in enumerate(model.components_):
# print(saveFile)
write_lines("Topic %s:" % (topic_idx), saveFile)
for i in topic.argsort()[:-top_words_nums - 1:-1]:
# print(feature_names[i])
write_lines(feature_names[i].replace("\n",""), saveFile)
write_lines("",saveFile)
def get_labels(poiTagDir):
f = open(poiTagDir,"r")
fLines = f.readlines()
poiTag=[]
for l in fLines:
poiTag.append(l)
return poiTag
def run_lda(documents, feature_names, saveFileDir, topic_nums = 10, top_words_nums = 20):
lda = LatentDirichletAllocation(n_topics=topic_nums, max_iter=5, learning_method='online', learning_offset=50.,random_state=0).fit(documents)
saveFileHeader = "%s/LDA_TopWords_Topic%s" %(saveFileDir,topic_nums)
### save lda outcomes
saveFile= "%s.txt" %(saveFileHeader)
if os.path.exists(saveFile):
os.remove(saveFile)
## Save Topic top words
save_topics(lda, feature_names, saveFile, topic_nums, top_words_nums)
## Save Topic-words Matrix
np.savetxt("%s_Topic_Words_matrix.txt" %(saveFileHeader), lda.components_, fmt="%.6f")
## Save documents-topics
documents_topics = lda.transform(documents)
np.savetxt("%s_Document_Topics_matrix.txt" %(saveFileHeader), documents_topics, fmt="%.6f")
np.savetxt("%s_Document_Topic.txt" %(saveFileHeader), np.argmax(documents_topics,axis=1).reshape(len(documents_topics),1), fmt="%d")
## Save perplexity
# print(lda.perplexity(documents))
np.savetxt("%s_perplexity.txt" %(saveFileHeader), [-1, lda.perplexity(documents)], fmt="%.6f")
| [
"os.path.exists",
"numpy.argmax",
"numpy.savetxt",
"sklearn.decomposition.LatentDirichletAllocation",
"os.remove"
] | [((1269, 1293), 'os.path.exists', 'os.path.exists', (['saveFile'], {}), '(saveFile)\n', (1283, 1293), False, 'import os\n'), ((1474, 1563), 'numpy.savetxt', 'np.savetxt', (["('%s_Topic_Words_matrix.txt' % saveFileHeader)", 'lda.components_'], {'fmt': '"""%.6f"""'}), "('%s_Topic_Words_matrix.txt' % saveFileHeader, lda.components_,\n fmt='%.6f')\n", (1484, 1563), True, 'import numpy as np\n'), ((1647, 1741), 'numpy.savetxt', 'np.savetxt', (["('%s_Document_Topics_matrix.txt' % saveFileHeader)", 'documents_topics'], {'fmt': '"""%.6f"""'}), "('%s_Document_Topics_matrix.txt' % saveFileHeader,\n documents_topics, fmt='%.6f')\n", (1657, 1741), True, 'import numpy as np\n'), ((1303, 1322), 'os.remove', 'os.remove', (['saveFile'], {}), '(saveFile)\n', (1312, 1322), False, 'import os\n'), ((981, 1108), 'sklearn.decomposition.LatentDirichletAllocation', 'LatentDirichletAllocation', ([], {'n_topics': 'topic_nums', 'max_iter': '(5)', 'learning_method': '"""online"""', 'learning_offset': '(50.0)', 'random_state': '(0)'}), "(n_topics=topic_nums, max_iter=5, learning_method=\n 'online', learning_offset=50.0, random_state=0)\n", (1006, 1108), False, 'from sklearn.decomposition import NMF, LatentDirichletAllocation\n'), ((1797, 1832), 'numpy.argmax', 'np.argmax', (['documents_topics'], {'axis': '(1)'}), '(documents_topics, axis=1)\n', (1806, 1832), True, 'import numpy as np\n')] |
import sys
import numpy
from PyQt5.QtWidgets import QApplication, QMessageBox, QSizePolicy
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui, congruence
from oasys.widgets.exchange import DataExchangeObject
from orangecontrib.xoppy.util.xoppy_xraylib_util import xpower_calc
from oasys.widgets.exchange import DataExchangeObject
from orangecontrib.xoppy.widgets.gui.ow_xoppy_widget import XoppyWidget
import scipy.constants as codata
class Transfocator(XoppyWidget):
name = "Transfocator"
id = "orange.widgets.dataxpower"
description = "Power Absorbed and Transmitted by Optical Elements"
icon = "icons/xoppy_xpower.png"
priority = 10
category = ""
keywords = ["xoppy", "power", "Transfocator"]
inputs = [("ExchangeData", DataExchangeObject, "acceptExchangeData")]
SOURCE = Setting(2)
PINHOLE_APERTURE = Setting(1e-4)
NUMBER_LENS = Setting(26)
SUBSTANCE = Setting('Be')
THICK = Setting(0.05)
DENS = Setting('?')
ENER_MIN = Setting(10000.0)
ENER_MAX = Setting(200000.0)
ENER_N = Setting(2000)
SOURCE_FILE = Setting("?")
FILE_DUMP = 0
def build_gui(self):
self.leftWidgetPart.setSizePolicy(QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding))
self.leftWidgetPart.setMaximumWidth(self.CONTROL_AREA_WIDTH + 20)
self.leftWidgetPart.updateGeometry()
box = oasysgui.widgetBox(self.controlArea, self.name + " Input Parameters", orientation="vertical", width=self.CONTROL_AREA_WIDTH-10)
idx = -1
# widget index 1
idx += 1
box1 = gui.widgetBox(box)
self.box_source = gui.comboBox(box1, self, "SOURCE",
label=self.unitLabels()[idx], addSpace=False,
items=['From Oasys wire', 'Normalized to 1',
'From external file. '],
valueType=int, orientation="horizontal", labelWidth=150)
self.show_at(self.unitFlags()[idx], box1)
#widget index 1.5
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "PINHOLE_APERTURE",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
# widget index 2
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "NUMBER_LENS",
label=self.unitLabels()[idx], addSpace=False,
valueType=int, orientation="horizontal", labelWidth=150)
self.show_at(self.unitFlags()[idx], box1)
# widget index 3
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "SUBSTANCE",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
# widget index 4
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "THICK",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=150)
self.show_at(self.unitFlags()[idx], box1)
# widget index 5
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "DENS",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=150)
self.show_at(self.unitFlags()[idx], box1)
# widget index 6
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "ENER_MIN",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
# widget index 7
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "ENER_MAX",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
# widget index 8
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "ENER_N",
label=self.unitLabels()[idx], addSpace=False,
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
# widget index 9 *********** File Browser ******************
idx += 1
box1 = gui.widgetBox(box)
file_box_id = oasysgui.widgetBox(box1, "", addSpace=False, orientation="horizontal")
self.file_id = oasysgui.lineEdit(file_box_id, self, "SOURCE_FILE", self.unitLabels()[idx],
labelWidth=100, valueType=str, orientation="horizontal")
gui.button(file_box_id, self, "...", callback=self.select_input_file, width=25)
self.show_at(self.unitFlags()[idx], box1)
#widget index 10
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
gui.comboBox(box1, self, "FILE_DUMP",
label=self.unitLabels()[idx], addSpace=False,
items=['No', 'Yes (transfo.spec)'],
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
self.input_spectrum = None
def select_input_file(self):
self.file_id.setText(oasysgui.selectFileFromDialog(self, self.SOURCE_FILE,
"Open 2-columns file with spectral power",
file_extension_filter="ascii dat (*.dat *.txt *spec)"))
def unitLabels(self):
return ['Input beam:','Pinhole aperture [m]', 'Number of Lens','Element','Thickness [mm]',
'Density g/cm^3',
'From energy [eV]: ',
'To energy [eV]:',
'Energy points: ',
'File with input beam spectral power:',"Dump file"]
def unitFlags(self):
return ['True','True','True','True','True',
'True',
'self.SOURCE == 1',
'self.SOURCE == 1',
'self.SOURCE == 1',
'self.SOURCE == 2',
'True']
def get_help_name(self):
return 'Transfocator'
def selectFile(self):
self.le_source_file.setText(oasysgui.selectFileFromDialog(self, self.SOURCE_FILE, "Open Source File", file_extension_filter="*.*"))
def acceptExchangeData(self, exchangeData):
self.input_spectrum = None
self.SOURCE = 0
# self.box_source.setCurrentIndex(self.SOURCE)
try:
if not exchangeData is None:
if exchangeData.get_program_name() == "XOPPY":
no_bandwidth = False
if exchangeData.get_widget_name() =="UNDULATOR_FLUX" :
# self.SOURCE_FILE = "xoppy_undulator_flux"
no_bandwidth = True
index_flux = 2
elif exchangeData.get_widget_name() == "BM" :
if exchangeData.get_content("is_log_plot") == 1:
raise Exception("Logaritmic X scale of Xoppy Energy distribution not supported")
if exchangeData.get_content("calculation_type") == 0 and exchangeData.get_content("psi") == 0:
# self.SOURCE_FILE = "xoppy_bm_flux"
no_bandwidth = True
index_flux = 6
else:
raise Exception("Xoppy result is not an Flux vs Energy distribution integrated in Psi")
elif exchangeData.get_widget_name() =="XWIGGLER" :
# self.SOURCE_FILE = "xoppy_xwiggler_flux"
no_bandwidth = True
index_flux = 2
elif exchangeData.get_widget_name() =="WS" :
# self.SOURCE_FILE = "xoppy_xwiggler_flux"
no_bandwidth = True
index_flux = 2
elif exchangeData.get_widget_name() =="XTUBES" :
# self.SOURCE_FILE = "xoppy_xtubes_flux"
index_flux = 1
no_bandwidth = True
elif exchangeData.get_widget_name() =="XTUBE_W" :
# self.SOURCE_FILE = "xoppy_xtube_w_flux"
index_flux = 1
no_bandwidth = True
elif exchangeData.get_widget_name() =="BLACK_BODY" :
# self.SOURCE_FILE = "xoppy_black_body_flux"
no_bandwidth = True
index_flux = 2
elif exchangeData.get_widget_name() =="UNDULATOR_RADIATION" :
# self.SOURCE_FILE = "xoppy_undulator_radiation"
no_bandwidth = True
index_flux = 1
elif exchangeData.get_widget_name() =="POWER" :
# self.SOURCE_FILE = "xoppy_undulator_power"
no_bandwidth = True
index_flux = -1
elif exchangeData.get_widget_name() =="POWER3D" :
# self.SOURCE_FILE = "xoppy_power3d"
no_bandwidth = True
index_flux = 1
else:
raise Exception("Xoppy Source not recognized")
# self.SOURCE_FILE += "_" + str(id(self)) + ".dat"
spectrum = exchangeData.get_content("xoppy_data")
if exchangeData.get_widget_name() =="UNDULATOR_RADIATION" or \
exchangeData.get_widget_name() =="POWER3D":
[p, e, h, v ] = spectrum
tmp = p.sum(axis=2).sum(axis=1)*(h[1]-h[0])*(v[1]-v[0])*codata.e*1e3
spectrum = numpy.vstack((e,p.sum(axis=2).sum(axis=1)*(h[1]-h[0])*(v[1]-v[0])*
codata.e*1e3))
self.input_spectrum = spectrum
else:
if not no_bandwidth:
spectrum[:,index_flux] /= 0.001*spectrum[:,0]
self.input_spectrum = numpy.vstack((spectrum[:,0],spectrum[:,index_flux]))
self.process_showers()
self.compute()
except Exception as exception:
QMessageBox.critical(self, "Error",
str(exception),
QMessageBox.Ok)
#raise exception
def check_fields(self):
self.NUMBER_LENS = congruence.checkStrictlyPositiveNumber(self.NUMBER_LENS, "Number of Lens")
self.THICK = congruence.checkStrictlyPositiveNumber(self.THICK, "Thickness")
if self.SOURCE == 1:
self.ENER_MIN = congruence.checkPositiveNumber(self.ENER_MIN, "Energy from")
self.ENER_MAX = congruence.checkStrictlyPositiveNumber(self.ENER_MAX, "Energy to")
congruence.checkLessThan(self.ENER_MIN, self.ENER_MAX, "Energy from", "Energy to")
self.NPOINTS = congruence.checkStrictlyPositiveNumber(self.ENER_N, "Energy Points")
elif self.SOURCE == 2:
congruence.checkFile(self.SOURCE_FILE)
def do_xoppy_calculation(self):
return self.xoppy_calc_xpower()
def extract_data_from_xoppy_output(self, calculation_output):
return calculation_output
def get_data_exchange_widget_name(self):
return "POWER"
def getTitles(self):
return ['Input Beam','Transmitivity','Absorption','Intensity']
def getXTitles(self):
return ["Energy [eV]","Energy [eV]","Energy [eV]","Energy [eV]"]
def getYTitles(self):
return ["Source",'Transmitivity','Absorption',"Intensity"]
def getVariablesToPlot(self):
return [(0, 1),(0, 2),(0, 3),(0, 4)]
def getLogPlot(self):
return [(False,False),(False, False),(False, False),(False,False) ]
def Transmitivity(self):
return E,T
def xoppy_calc_xpower(self):
Result=[]
Result_Absorption = []
list=[]
cumulated_data = {}
if self.SOURCE == 0:
if self.input_spectrum is None:
raise Exception("No input beam")
else:
energies = self.input_spectrum[0,:].copy()
source = self.input_spectrum[1,:].copy()
elif self.SOURCE == 1:
energies = numpy.linspace(self.ENER_MIN,self.ENER_MAX,self.ENER_N)
source = numpy.ones(energies.size)
tmp = numpy.vstack( (energies,source))
self.input_spectrum = source
elif self.SOURCE == 2:
if self.SOURCE == 2: source_file = self.SOURCE_FILE
try:
tmp = numpy.loadtxt(source_file)
energies = tmp[:,0]
source = tmp[:,1]
self.input_spectrum = source
except:
print("Error loading file %s "%(source_file))
raise
if self.FILE_DUMP == 0:
output_file = None
else:
output_file = "Transfo.spec"
try:
print(out_dictionary["info"])
except:
pass
#calculate attenuators total
Result.append((out_dictionary['data'][0]).tolist())
Result.append((out_dictionary['data'][1]).tolist())
for k in range(self.NUMBER_LENS):
list.append(out_dictionary['data'][4 + 5*k])
Result.append(List_Product(list))
for k in range(len(Result[0])):
Result_Absorption.append(1-Result[-1][k])
Result.append(Result_Absorption)
Result.append((out_dictionary['data'][5*len(substance)+1]).tolist())
cumulated_data['data']=numpy.array(Result)
#send exchange
calculated_data = DataExchangeObject("XOPPY", self.get_data_exchange_widget_name())
try:
calculated_data.add_content("xoppy_data", cumulated_data["data"].T)
except:
pass
return calculated_data
def List_Product(list):
L = []
l = 1
for k in range(len(list[0])):
for i in range(len(list)):
l = l * list[i][k]
L.append(l)
l = 1
return (L)
if __name__ == "__main__":
from oasys.widgets.exchange import DataExchangeObject
input_data_type = "POWER"
if input_data_type == "POWER":
# create fake UNDULATOR_FLUX xoppy exchange data
e = numpy.linspace(1000.0, 10000.0, 100)
source = e/10
received_data = DataExchangeObject("XOPPY", "POWER")
received_data.add_content("xoppy_data", numpy.vstack((e,e,source)).T)
received_data.add_content("xoppy_code", "US")
elif input_data_type == "POWER3D":
# create unulator_radiation xoppy exchange data
from orangecontrib.xoppy.util.xoppy_undulators import xoppy_calc_undulator_radiation
e, h, v, p, code = xoppy_calc_undulator_radiation(ELECTRONENERGY=6.04,ELECTRONENERGYSPREAD=0.001,ELECTRONCURRENT=0.2,\
ELECTRONBEAMSIZEH=0.000395,ELECTRONBEAMSIZEV=9.9e-06,\
ELECTRONBEAMDIVERGENCEH=1.05e-05,ELECTRONBEAMDIVERGENCEV=3.9e-06,\
PERIODID=0.018,NPERIODS=222,KV=1.68,DISTANCE=30.0,
SETRESONANCE=0,HARMONICNUMBER=1,
GAPH=0.001,GAPV=0.001,\
HSLITPOINTS=41,VSLITPOINTS=41,METHOD=0,
PHOTONENERGYMIN=7000,PHOTONENERGYMAX=8100,PHOTONENERGYPOINTS=20,
USEEMITTANCES=1)
received_data = DataExchangeObject("XOPPY", "POWER3D")
received_data = DataExchangeObject("XOPPY", "UNDULATOR_RADIATION")
received_data.add_content("xoppy_data", [p, e, h, v])
received_data.add_content("xoppy_code", code)
app = QApplication(sys.argv)
w = Transfocator()
w.acceptExchangeData(received_data)
w.show()
app.exec()
w.saveSettings() | [
"oasys.widgets.gui.widgetBox",
"oasys.widgets.congruence.checkStrictlyPositiveNumber",
"numpy.array",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QSizePolicy",
"orangewidget.settings.Setting",
"oasys.widgets.congruence.checkFile",
"oasys.widgets.congruence.checkPositiveNumber",
"orangecontrib.x... | [((881, 891), 'orangewidget.settings.Setting', 'Setting', (['(2)'], {}), '(2)\n', (888, 891), False, 'from orangewidget.settings import Setting\n'), ((915, 930), 'orangewidget.settings.Setting', 'Setting', (['(0.0001)'], {}), '(0.0001)\n', (922, 930), False, 'from orangewidget.settings import Setting\n'), ((947, 958), 'orangewidget.settings.Setting', 'Setting', (['(26)'], {}), '(26)\n', (954, 958), False, 'from orangewidget.settings import Setting\n'), ((975, 988), 'orangewidget.settings.Setting', 'Setting', (['"""Be"""'], {}), "('Be')\n", (982, 988), False, 'from orangewidget.settings import Setting\n'), ((1001, 1014), 'orangewidget.settings.Setting', 'Setting', (['(0.05)'], {}), '(0.05)\n', (1008, 1014), False, 'from orangewidget.settings import Setting\n'), ((1026, 1038), 'orangewidget.settings.Setting', 'Setting', (['"""?"""'], {}), "('?')\n", (1033, 1038), False, 'from orangewidget.settings import Setting\n'), ((1054, 1070), 'orangewidget.settings.Setting', 'Setting', (['(10000.0)'], {}), '(10000.0)\n', (1061, 1070), False, 'from orangewidget.settings import Setting\n'), ((1086, 1103), 'orangewidget.settings.Setting', 'Setting', (['(200000.0)'], {}), '(200000.0)\n', (1093, 1103), False, 'from orangewidget.settings import Setting\n'), ((1117, 1130), 'orangewidget.settings.Setting', 'Setting', (['(2000)'], {}), '(2000)\n', (1124, 1130), False, 'from orangewidget.settings import Setting\n'), ((1149, 1161), 'orangewidget.settings.Setting', 'Setting', (['"""?"""'], {}), "('?')\n", (1156, 1161), False, 'from orangewidget.settings import Setting\n'), ((16710, 16732), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (16722, 16732), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QSizePolicy\n'), ((1456, 1589), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['self.controlArea', "(self.name + ' Input Parameters')"], {'orientation': '"""vertical"""', 'width': '(self.CONTROL_AREA_WIDTH - 10)'}), "(self.controlArea, self.name + ' Input Parameters',\n orientation='vertical', width=self.CONTROL_AREA_WIDTH - 10)\n", (1474, 1589), True, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((1660, 1678), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (1673, 1678), False, 'from orangewidget import gui\n'), ((2201, 2219), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (2214, 2219), False, 'from orangewidget import gui\n'), ((2500, 2518), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (2513, 2518), False, 'from orangewidget import gui\n'), ((2862, 2880), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (2875, 2880), False, 'from orangewidget import gui\n'), ((3154, 3172), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (3167, 3172), False, 'from orangewidget import gui\n'), ((3511, 3529), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (3524, 3529), False, 'from orangewidget import gui\n'), ((3841, 3859), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (3854, 3859), False, 'from orangewidget import gui\n'), ((4175, 4193), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (4188, 4193), False, 'from orangewidget import gui\n'), ((4509, 4527), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (4522, 4527), False, 'from orangewidget import gui\n'), ((4885, 4903), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (4898, 4903), False, 'from orangewidget import gui\n'), ((4926, 4996), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['box1', '""""""'], {'addSpace': '(False)', 'orientation': '"""horizontal"""'}), "(box1, '', addSpace=False, orientation='horizontal')\n", (4944, 4996), True, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((5202, 5281), 'orangewidget.gui.button', 'gui.button', (['file_box_id', 'self', '"""..."""'], {'callback': 'self.select_input_file', 'width': '(25)'}), "(file_box_id, self, '...', callback=self.select_input_file, width=25)\n", (5212, 5281), False, 'from orangewidget import gui\n'), ((5391, 5409), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (5404, 5409), False, 'from orangewidget import gui\n'), ((5418, 5447), 'orangewidget.gui.separator', 'gui.separator', (['box1'], {'height': '(7)'}), '(box1, height=7)\n', (5431, 5447), False, 'from orangewidget import gui\n'), ((11270, 11344), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.NUMBER_LENS', '"""Number of Lens"""'], {}), "(self.NUMBER_LENS, 'Number of Lens')\n", (11308, 11344), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((11366, 11429), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.THICK', '"""Thickness"""'], {}), "(self.THICK, 'Thickness')\n", (11404, 11429), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((14460, 14479), 'numpy.array', 'numpy.array', (['Result'], {}), '(Result)\n', (14471, 14479), False, 'import numpy\n'), ((15177, 15213), 'numpy.linspace', 'numpy.linspace', (['(1000.0)', '(10000.0)', '(100)'], {}), '(1000.0, 10000.0, 100)\n', (15191, 15213), False, 'import numpy\n'), ((15260, 15296), 'oasys.widgets.exchange.DataExchangeObject', 'DataExchangeObject', (['"""XOPPY"""', '"""POWER"""'], {}), "('XOPPY', 'POWER')\n", (15278, 15296), False, 'from oasys.widgets.exchange import DataExchangeObject\n'), ((1249, 1320), 'PyQt5.QtWidgets.QSizePolicy', 'QSizePolicy', (['QSizePolicy.MinimumExpanding', 'QSizePolicy.MinimumExpanding'], {}), '(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)\n', (1260, 1320), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QSizePolicy\n'), ((5847, 6007), 'oasys.widgets.gui.selectFileFromDialog', 'oasysgui.selectFileFromDialog', (['self', 'self.SOURCE_FILE', '"""Open 2-columns file with spectral power"""'], {'file_extension_filter': '"""ascii dat (*.dat *.txt *spec)"""'}), "(self, self.SOURCE_FILE,\n 'Open 2-columns file with spectral power', file_extension_filter=\n 'ascii dat (*.dat *.txt *spec)')\n", (5876, 6007), True, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((6831, 6937), 'oasys.widgets.gui.selectFileFromDialog', 'oasysgui.selectFileFromDialog', (['self', 'self.SOURCE_FILE', '"""Open Source File"""'], {'file_extension_filter': '"""*.*"""'}), "(self, self.SOURCE_FILE, 'Open Source File',\n file_extension_filter='*.*')\n", (6860, 6937), True, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((11487, 11547), 'oasys.widgets.congruence.checkPositiveNumber', 'congruence.checkPositiveNumber', (['self.ENER_MIN', '"""Energy from"""'], {}), "(self.ENER_MIN, 'Energy from')\n", (11517, 11547), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((11576, 11642), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.ENER_MAX', '"""Energy to"""'], {}), "(self.ENER_MAX, 'Energy to')\n", (11614, 11642), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((11655, 11741), 'oasys.widgets.congruence.checkLessThan', 'congruence.checkLessThan', (['self.ENER_MIN', 'self.ENER_MAX', '"""Energy from"""', '"""Energy to"""'], {}), "(self.ENER_MIN, self.ENER_MAX, 'Energy from',\n 'Energy to')\n", (11679, 11741), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((11765, 11833), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.ENER_N', '"""Energy Points"""'], {}), "(self.ENER_N, 'Energy Points')\n", (11803, 11833), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((15646, 16131), 'orangecontrib.xoppy.util.xoppy_undulators.xoppy_calc_undulator_radiation', 'xoppy_calc_undulator_radiation', ([], {'ELECTRONENERGY': '(6.04)', 'ELECTRONENERGYSPREAD': '(0.001)', 'ELECTRONCURRENT': '(0.2)', 'ELECTRONBEAMSIZEH': '(0.000395)', 'ELECTRONBEAMSIZEV': '(9.9e-06)', 'ELECTRONBEAMDIVERGENCEH': '(1.05e-05)', 'ELECTRONBEAMDIVERGENCEV': '(3.9e-06)', 'PERIODID': '(0.018)', 'NPERIODS': '(222)', 'KV': '(1.68)', 'DISTANCE': '(30.0)', 'SETRESONANCE': '(0)', 'HARMONICNUMBER': '(1)', 'GAPH': '(0.001)', 'GAPV': '(0.001)', 'HSLITPOINTS': '(41)', 'VSLITPOINTS': '(41)', 'METHOD': '(0)', 'PHOTONENERGYMIN': '(7000)', 'PHOTONENERGYMAX': '(8100)', 'PHOTONENERGYPOINTS': '(20)', 'USEEMITTANCES': '(1)'}), '(ELECTRONENERGY=6.04, ELECTRONENERGYSPREAD=\n 0.001, ELECTRONCURRENT=0.2, ELECTRONBEAMSIZEH=0.000395,\n ELECTRONBEAMSIZEV=9.9e-06, ELECTRONBEAMDIVERGENCEH=1.05e-05,\n ELECTRONBEAMDIVERGENCEV=3.9e-06, PERIODID=0.018, NPERIODS=222, KV=1.68,\n DISTANCE=30.0, SETRESONANCE=0, HARMONICNUMBER=1, GAPH=0.001, GAPV=0.001,\n HSLITPOINTS=41, VSLITPOINTS=41, METHOD=0, PHOTONENERGYMIN=7000,\n PHOTONENERGYMAX=8100, PHOTONENERGYPOINTS=20, USEEMITTANCES=1)\n', (15676, 16131), False, 'from orangecontrib.xoppy.util.xoppy_undulators import xoppy_calc_undulator_radiation\n'), ((16466, 16504), 'oasys.widgets.exchange.DataExchangeObject', 'DataExchangeObject', (['"""XOPPY"""', '"""POWER3D"""'], {}), "('XOPPY', 'POWER3D')\n", (16484, 16504), False, 'from oasys.widgets.exchange import DataExchangeObject\n'), ((16529, 16579), 'oasys.widgets.exchange.DataExchangeObject', 'DataExchangeObject', (['"""XOPPY"""', '"""UNDULATOR_RADIATION"""'], {}), "('XOPPY', 'UNDULATOR_RADIATION')\n", (16547, 16579), False, 'from oasys.widgets.exchange import DataExchangeObject\n'), ((11877, 11915), 'oasys.widgets.congruence.checkFile', 'congruence.checkFile', (['self.SOURCE_FILE'], {}), '(self.SOURCE_FILE)\n', (11897, 11915), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((13131, 13188), 'numpy.linspace', 'numpy.linspace', (['self.ENER_MIN', 'self.ENER_MAX', 'self.ENER_N'], {}), '(self.ENER_MIN, self.ENER_MAX, self.ENER_N)\n', (13145, 13188), False, 'import numpy\n'), ((13208, 13233), 'numpy.ones', 'numpy.ones', (['energies.size'], {}), '(energies.size)\n', (13218, 13233), False, 'import numpy\n'), ((13252, 13284), 'numpy.vstack', 'numpy.vstack', (['(energies, source)'], {}), '((energies, source))\n', (13264, 13284), False, 'import numpy\n'), ((15345, 15373), 'numpy.vstack', 'numpy.vstack', (['(e, e, source)'], {}), '((e, e, source))\n', (15357, 15373), False, 'import numpy\n'), ((10874, 10929), 'numpy.vstack', 'numpy.vstack', (['(spectrum[:, 0], spectrum[:, index_flux])'], {}), '((spectrum[:, 0], spectrum[:, index_flux]))\n', (10886, 10929), False, 'import numpy\n'), ((13460, 13486), 'numpy.loadtxt', 'numpy.loadtxt', (['source_file'], {}), '(source_file)\n', (13473, 13486), False, 'import numpy\n')] |
'''
Test Vid4 (SR) and REDS4 (SR-clean, SR-blur, deblur-clean, deblur-compression) datasets
'''
import os
import os.path as osp
import glob
import logging
import numpy as np
import cv2
import torch
import utils.util as util
import data.util as data_util
import models.archs.EDVR_arch as EDVR_arch
def main():
#################
# configurations
#################
device = torch.device('cuda')
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
data_mode = 'Vid4' # Vid4 | sharp_bicubic | blur_bicubic | blur | blur_comp
# Vid4: SR
# REDS4: sharp_bicubic (SR-clean), blur_bicubic (SR-blur);
# blur (deblur-clean), blur_comp (deblur-compression).
stage = 1 # 1 or 2, use two stage strategy for REDS dataset.
flip_test = False
############################################################################
#### model
if data_mode == 'Vid4':
if stage == 1:
model_path = '../experiments/pretrained_models/EDVR_Vimeo90K_SR_L.pth'
else:
raise ValueError('Vid4 does not support stage 2.')
elif data_mode == 'sharp_bicubic':
if stage == 1:
model_path = '../experiments/pretrained_models/EDVR_REDS_SR_L.pth'
else:
model_path = '../experiments/pretrained_models/EDVR_REDS_SR_Stage2.pth'
elif data_mode == 'blur_bicubic':
if stage == 1:
model_path = '../experiments/pretrained_models/EDVR_REDS_SRblur_L.pth'
else:
model_path = '../experiments/pretrained_models/EDVR_REDS_SRblur_Stage2.pth'
elif data_mode == 'blur':
if stage == 1:
model_path = '../experiments/pretrained_models/EDVR_REDS_deblur_L.pth'
else:
model_path = '../experiments/pretrained_models/EDVR_REDS_deblur_Stage2.pth'
elif data_mode == 'blur_comp':
if stage == 1:
model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_L.pth'
else:
model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_Stage2.pth'
else:
raise NotImplementedError
if data_mode == 'Vid4':
N_in = 7 # use N_in images to restore one HR image
else:
N_in = 5
predeblur, HR_in = False, False
back_RBs = 40
if data_mode == 'blur_bicubic':
predeblur = True
if data_mode == 'blur' or data_mode == 'blur_comp':
predeblur, HR_in = True, True
if stage == 2:
HR_in = True
back_RBs = 20
model = EDVR_arch.EDVR(128, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in)
#### dataset
if data_mode == 'Vid4':
test_dataset_folder = '../datasets/Vid4/BIx4'
GT_dataset_folder = '../datasets/Vid4/GT'
else:
if stage == 1:
test_dataset_folder = '../datasets/REDS4/{}'.format(data_mode)
else:
test_dataset_folder = '../results/REDS-EDVR_REDS_SR_L_flipx4'
print('You should modify the test_dataset_folder path for stage 2')
GT_dataset_folder = '../datasets/REDS4/GT'
#### evaluation
crop_border = 0
border_frame = N_in // 2 # border frames when evaluate
# temporal padding mode
if data_mode == 'Vid4' or data_mode == 'sharp_bicubic':
padding = 'new_info'
else:
padding = 'replicate'
save_imgs = True
save_folder = '../results/{}'.format(data_mode)
util.mkdirs(save_folder)
util.setup_logger('base', save_folder, 'test', level=logging.INFO, screen=True, tofile=True)
logger = logging.getLogger('base')
#### log info
logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder))
logger.info('Padding mode: {}'.format(padding))
logger.info('Model path: {}'.format(model_path))
logger.info('Save images: {}'.format(save_imgs))
logger.info('Flip test: {}'.format(flip_test))
#### set up the models
model.load_state_dict(torch.load(model_path), strict=True)
model.eval()
model = model.to(device)
avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l = [], [], []
subfolder_name_l = []
subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, '*')))
subfolder_GT_l = sorted(glob.glob(osp.join(GT_dataset_folder, '*')))
# for each subfolder
for subfolder, subfolder_GT in zip(subfolder_l, subfolder_GT_l):
subfolder_name = osp.basename(subfolder)
subfolder_name_l.append(subfolder_name)
save_subfolder = osp.join(save_folder, subfolder_name)
img_path_l = sorted(glob.glob(osp.join(subfolder, '*')))
max_idx = len(img_path_l)
if save_imgs:
util.mkdirs(save_subfolder)
#### read LQ and GT images
imgs_LQ = data_util.read_img_seq(subfolder)
img_GT_l = []
for img_GT_path in sorted(glob.glob(osp.join(subfolder_GT, '*'))):
img_GT_l.append(data_util.read_img(None, img_GT_path))
avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = 0, 0, 0, 0, 0
# process each image
for img_idx, img_path in enumerate(img_path_l):
img_name = osp.splitext(osp.basename(img_path))[0]
select_idx = data_util.index_generation(img_idx, max_idx, N_in, padding=padding)
imgs_in = imgs_LQ.index_select(0, torch.LongTensor(select_idx)).unsqueeze(0).to(device)
if flip_test:
output = util.flipx4_forward(model, imgs_in)
else:
output = util.single_forward(model, imgs_in)
output = util.tensor2img(output.squeeze(0))
if save_imgs:
cv2.imwrite(osp.join(save_subfolder, '{}.png'.format(img_name)), output)
# calculate PSNR
output = output / 255.
GT = np.copy(img_GT_l[img_idx])
# For REDS, evaluate on RGB channels; for Vid4, evaluate on the Y channel
if data_mode == 'Vid4': # bgr2y, [0, 1]
GT = data_util.bgr2ycbcr(GT, only_y=True)
output = data_util.bgr2ycbcr(output, only_y=True)
output, GT = util.crop_border([output, GT], crop_border)
crt_psnr = util.calculate_psnr(output * 255, GT * 255)
logger.info('{:3d} - {:25} \tPSNR: {:.6f} dB'.format(img_idx + 1, img_name, crt_psnr))
if img_idx >= border_frame and img_idx < max_idx - border_frame: # center frames
avg_psnr_center += crt_psnr
N_center += 1
else: # border frames
avg_psnr_border += crt_psnr
N_border += 1
avg_psnr = (avg_psnr_center + avg_psnr_border) / (N_center + N_border)
avg_psnr_center = avg_psnr_center / N_center
avg_psnr_border = 0 if N_border == 0 else avg_psnr_border / N_border
avg_psnr_l.append(avg_psnr)
avg_psnr_center_l.append(avg_psnr_center)
avg_psnr_border_l.append(avg_psnr_border)
logger.info('Folder {} - Average PSNR: {:.6f} dB for {} frames; '
'Center PSNR: {:.6f} dB for {} frames; '
'Border PSNR: {:.6f} dB for {} frames.'.format(subfolder_name, avg_psnr,
(N_center + N_border),
avg_psnr_center, N_center,
avg_psnr_border, N_border))
logger.info('################ Tidy Outputs ################')
for subfolder_name, psnr, psnr_center, psnr_border in zip(subfolder_name_l, avg_psnr_l,
avg_psnr_center_l, avg_psnr_border_l):
logger.info('Folder {} - Average PSNR: {:.6f} dB. '
'Center PSNR: {:.6f} dB. '
'Border PSNR: {:.6f} dB.'.format(subfolder_name, psnr, psnr_center,
psnr_border))
logger.info('################ Final Results ################')
logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder))
logger.info('Padding mode: {}'.format(padding))
logger.info('Model path: {}'.format(model_path))
logger.info('Save images: {}'.format(save_imgs))
logger.info('Flip test: {}'.format(flip_test))
logger.info('Total Average PSNR: {:.6f} dB for {} clips. '
'Center PSNR: {:.6f} dB. Border PSNR: {:.6f} dB.'.format(
sum(avg_psnr_l) / len(avg_psnr_l), len(subfolder_l),
sum(avg_psnr_center_l) / len(avg_psnr_center_l),
sum(avg_psnr_border_l) / len(avg_psnr_border_l)))
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"utils.util.setup_logger",
"numpy.copy",
"models.archs.EDVR_arch.EDVR",
"data.util.read_img",
"torch.LongTensor",
"torch.load",
"os.path.join",
"utils.util.single_forward",
"data.util.read_img_seq",
"data.util.index_generation",
"utils.util.calculate_psnr",
"os.path.base... | [((391, 411), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (403, 411), False, 'import torch\n'), ((2495, 2570), 'models.archs.EDVR_arch.EDVR', 'EDVR_arch.EDVR', (['(128)', 'N_in', '(8)', '(5)', 'back_RBs'], {'predeblur': 'predeblur', 'HR_in': 'HR_in'}), '(128, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in)\n', (2509, 2570), True, 'import models.archs.EDVR_arch as EDVR_arch\n'), ((3384, 3408), 'utils.util.mkdirs', 'util.mkdirs', (['save_folder'], {}), '(save_folder)\n', (3395, 3408), True, 'import utils.util as util\n'), ((3413, 3510), 'utils.util.setup_logger', 'util.setup_logger', (['"""base"""', 'save_folder', '"""test"""'], {'level': 'logging.INFO', 'screen': '(True)', 'tofile': '(True)'}), "('base', save_folder, 'test', level=logging.INFO, screen=\n True, tofile=True)\n", (3430, 3510), True, 'import utils.util as util\n'), ((3519, 3544), 'logging.getLogger', 'logging.getLogger', (['"""base"""'], {}), "('base')\n", (3536, 3544), False, 'import logging\n'), ((3899, 3921), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (3909, 3921), False, 'import torch\n'), ((4340, 4363), 'os.path.basename', 'osp.basename', (['subfolder'], {}), '(subfolder)\n', (4352, 4363), True, 'import os.path as osp\n'), ((4437, 4474), 'os.path.join', 'osp.join', (['save_folder', 'subfolder_name'], {}), '(save_folder, subfolder_name)\n', (4445, 4474), True, 'import os.path as osp\n'), ((4691, 4724), 'data.util.read_img_seq', 'data_util.read_img_seq', (['subfolder'], {}), '(subfolder)\n', (4713, 4724), True, 'import data.util as data_util\n'), ((4111, 4145), 'os.path.join', 'osp.join', (['test_dataset_folder', '"""*"""'], {}), "(test_dataset_folder, '*')\n", (4119, 4145), True, 'import os.path as osp\n'), ((4186, 4218), 'os.path.join', 'osp.join', (['GT_dataset_folder', '"""*"""'], {}), "(GT_dataset_folder, '*')\n", (4194, 4218), True, 'import os.path as osp\n'), ((4609, 4636), 'utils.util.mkdirs', 'util.mkdirs', (['save_subfolder'], {}), '(save_subfolder)\n', (4620, 4636), True, 'import utils.util as util\n'), ((5151, 5218), 'data.util.index_generation', 'data_util.index_generation', (['img_idx', 'max_idx', 'N_in'], {'padding': 'padding'}), '(img_idx, max_idx, N_in, padding=padding)\n', (5177, 5218), True, 'import data.util as data_util\n'), ((5740, 5766), 'numpy.copy', 'np.copy', (['img_GT_l[img_idx]'], {}), '(img_GT_l[img_idx])\n', (5747, 5766), True, 'import numpy as np\n'), ((6056, 6099), 'utils.util.crop_border', 'util.crop_border', (['[output, GT]', 'crop_border'], {}), '([output, GT], crop_border)\n', (6072, 6099), True, 'import utils.util as util\n'), ((6123, 6166), 'utils.util.calculate_psnr', 'util.calculate_psnr', (['(output * 255)', '(GT * 255)'], {}), '(output * 255, GT * 255)\n', (6142, 6166), True, 'import utils.util as util\n'), ((4514, 4538), 'os.path.join', 'osp.join', (['subfolder', '"""*"""'], {}), "(subfolder, '*')\n", (4522, 4538), True, 'import os.path as osp\n'), ((4791, 4818), 'os.path.join', 'osp.join', (['subfolder_GT', '"""*"""'], {}), "(subfolder_GT, '*')\n", (4799, 4818), True, 'import os.path as osp\n'), ((4850, 4887), 'data.util.read_img', 'data_util.read_img', (['None', 'img_GT_path'], {}), '(None, img_GT_path)\n', (4868, 4887), True, 'import data.util as data_util\n'), ((5371, 5406), 'utils.util.flipx4_forward', 'util.flipx4_forward', (['model', 'imgs_in'], {}), '(model, imgs_in)\n', (5390, 5406), True, 'import utils.util as util\n'), ((5450, 5485), 'utils.util.single_forward', 'util.single_forward', (['model', 'imgs_in'], {}), '(model, imgs_in)\n', (5469, 5485), True, 'import utils.util as util\n'), ((5927, 5963), 'data.util.bgr2ycbcr', 'data_util.bgr2ycbcr', (['GT'], {'only_y': '(True)'}), '(GT, only_y=True)\n', (5946, 5963), True, 'import data.util as data_util\n'), ((5989, 6029), 'data.util.bgr2ycbcr', 'data_util.bgr2ycbcr', (['output'], {'only_y': '(True)'}), '(output, only_y=True)\n', (6008, 6029), True, 'import data.util as data_util\n'), ((5099, 5121), 'os.path.basename', 'osp.basename', (['img_path'], {}), '(img_path)\n', (5111, 5121), True, 'import os.path as osp\n'), ((5265, 5293), 'torch.LongTensor', 'torch.LongTensor', (['select_idx'], {}), '(select_idx)\n', (5281, 5293), False, 'import torch\n')] |
"""
qgs tensor module
=================
This module computes and holds the tensor representing the tendencies of the model's equations.
Notes
-----
These are computed using the analytical expressions from:
* <NAME>., <NAME>. and <NAME>.: *The Modular Arbitrary-Order Ocean-Atmosphere Model: MAOOAM v1.0*,
Geosci. Model Dev., **9**, 2793-2808, `doi:10.5194/gmd-9-2793-2016 <http://dx.doi.org/10.5194/gmd-9-2793-2016>`_, 2016.
* <NAME>., & <NAME>. (1987). *Theories of multiple equilibria and weather regimes—A critical reexamination.
Part II: Baroclinic two-layer models*. Journal of the atmospheric sciences, **44** (21), 3282-3303.
`link <https://journals.ametsoc.org/doi/abs/10.1175/1520-0469(1987)044%3C3282%3ATOMEAW%3E2.0.CO%3B2>`_
"""
import numpy as np
import sparse as sp
class QgsTensor(object):
"""qgs tendencies tensor class.
Parameters
----------
atmospheric_innner_product: AtmosphericInnerProducts or None
The inner products of the atmospheric basis functions on which the model's PDE atmospheric equations are projected.
If None, disable the atmospheric tendencies.
oceanic_innner_product: OceanicInnerProducts or None
The inner products of the atmospheric basis functions on which the model's PDE oceanic equations are projected.
If None, disable the oceanic tendencies.
Attributes
----------
atmospheric_innner_product: AtmosphericInnerProducts or None
The inner products of the atmospheric basis functions on which the model's PDE equations are projected.
If None, the atmospheric tendencies are disabled.
oceanic_innner_product: OceanicInnerProducts or None
The inner products of the atmospheric basis functions on which the model's PDE equations are projected.
If None, the oceanic tendencies are disabled.
params: QgParams
The models parameters.
tensor: sparse.COO(float)
The tensor :math:`\mathcal{T}_{i,j,k}` :math:`i`-th components.
jacobian_tensor: sparse.COO(float)
The jacobian tensor :math:`\mathcal{T}_{i,j,k} + \mathcal{T}_{i,k,j}` :math:`i`-th components.
"""
def __init__(self, atmospheric_inner_products=None, oceanic_inner_products=None):
self.atmospheric_inner_products = atmospheric_inner_products
self.oceanic_inner_products = oceanic_inner_products
if self.atmospheric_inner_products is not None:
self.params = self.atmospheric_inner_products.params
if self.oceanic_inner_products is not None:
self.params = self.oceanic_inner_products.params
self.tensor = None
self.jacobian_tensor = None
self.compute_tensor()
def _psi_a(self, i):
"""Transform the :math:`\psi_{\mathrm a}` :math:`i`-th coefficient into the effective model's variable.
Parameters
----------
i: int
The :math:`i`-th coefficients of :math:`\psi_{\mathrm a}`
Returns
-------
int
The effective model's variable.
"""
return i
def _theta_a(self, i):
"""Transform the :math:`\\theta_{\mathrm a}` :math:`i`-th coefficient into the effective model's variable.
Parameters
----------
i: int
The :math:`i`-th coefficients of :math:`\\theta_{\mathrm a}`
Returns
-------
int
The effective model's variable.
"""
return i + self.params.nmod[0]
def _psi_o(self, i):
"""Transform the :math:`\psi_{\mathrm o}` :math:`i`-th coefficient into the effective model's variable.
Parameters
----------
i: int
The :math:`i`-th coefficients of :math:`\psi_{\mathrm o}`
Returns
-------
int
The effective model's variable.
"""
return i + 2 * self.params.nmod[0]
def _deltaT_o(self, i):
"""Transform the :math:`\delta T_{\mathrm o}` :math:`i`-th coefficient into the effective model's variable.
Parameters
----------
i: int
The :math:`i`-th coefficients of :math:`\delta T_{\mathrm o}`
Returns
-------
int
The effective model's variable.
"""
return i + 2 * self.params.nmod[0] + self.params.nmod[1]
def _deltaT_g(self, i):
"""Transform the :math:`\delta T_{\mathrm o}` :math:`i`-th coefficient into the effective model's variable.
Parameters
----------
i: int
The :math:`i`-th coefficients of :math:`\delta T_{\mathrm o}`
Returns
-------
int
The effective model's variable.
"""
return i + 2 * self.params.nmod[0]
def compute_tensor(self):
"""Routine to compute the tensor."""
aips = self.atmospheric_inner_products
oips = self.oceanic_inner_products
par = self.params
atp = par.atemperature_params
ap = par.atmospheric_params
op = par.oceanic_params
scp = par.scale_params
gp = par.ground_params
namod = par.nmod[0]
ngomod = par.nmod[1]
ndim = par.ndim
if par.gotemperature_params is not None:
ocean = par.gotemperature_params._name == "Oceanic Temperature"
ground_temp = par.gotemperature_params._name == "Ground Temperature"
else:
ocean = False
ground_temp = False
# 0-th tensor component is an empty matrix
tensor = sp.zeros((ndim+1, ndim + 1, ndim + 1), dtype=np.float64, format='dok')
jacobian_tensor = sp.zeros((ndim+1, ndim + 1, ndim + 1), dtype=np.float64, format='dok')
## Problem with matmul with object and DOK archi : Temporary fix until a better solution is found
hk = np.array(gp.hk, dtype=np.float)
g = aips.g.to_coo()
#################
# psi_a part
for i in range(1, namod + 1):
t = np.zeros((ndim + 1, ndim + 1), dtype=np.float64)
for j in range(1, namod + 1):
t[self._psi_a(j), 0] = -((aips.c[(i - 1), (j - 1)] * scp.beta) / aips.a[(i - 1), (i - 1)]) \
- (ap.kd * _kronecker_delta((i - 1), (j - 1))) / 2
t[self._theta_a(j), 0] = (ap.kd * _kronecker_delta((i - 1), (j - 1))) / 2
if gp.hk is not None:
oro = (g[(i - 1), (j - 1), :] @ hk) / (2 * aips.a[(i - 1), (i - 1)])
t[self._psi_a(j), 0] -= oro
t[self._theta_a(j), 0] += oro
for k in range(1, namod + 1):
t[self._psi_a(j), self._psi_a(k)] = - aips.b[(i - 1), (j - 1), (k - 1)] \
/ aips.a[(i - 1), (i - 1)]
t[self._theta_a(j), self._theta_a(k)] = - aips.b[(i - 1), (j - 1), (k - 1)] \
/ aips.a[(i - 1), (i - 1)]
if ocean:
for j in range(1, ngomod + 1):
t[self._psi_o(j), 0] = ap.kd * aips.d[(i - 1), (j - 1)] / \
(2 * aips.a[(i - 1), (i - 1)])
t = self.simplify_matrix(t)
tensor[self._psi_a(i)] = t
jacobian_tensor[self._psi_a(i)] = t + t.T
# theta_a part
for i in range(1, namod + 1):
t = np.zeros((ndim + 1, ndim + 1), dtype=np.float64)
if par.Cpa is not None:
t[0, 0] = par.Cpa[i - 1] / (1 - aips.a[0, 0] * ap.sig0)
if atp.hd is not None and atp.thetas is not None:
t[0, 0] += atp.hd * atp.thetas[(i - 1)] / (1. - ap.sig0 * aips.a[(i - 1), (i - 1)])
for j in range(1, namod + 1):
t[self._psi_a(j), 0] = (aips.a[(i - 1), (j - 1)] * ap.kd * ap.sig0) \
/ (-2 + 2 * aips.a[(i - 1), (i - 1)] * ap.sig0)
if par.LSBpa is not None and par.Lpa is not None:
heat = 2. * (par.LSBpa + atp.sc * par.Lpa) * _kronecker_delta((i - 1), (j - 1))
else:
heat = 0
t[self._theta_a(j), 0] = (-((ap.sig0 * (2. * aips.c[(i - 1), (j - 1)]
* scp.beta + aips.a[(i - 1), (j - 1)] * (ap.kd + 4. * ap.kdp))))
+ heat) / (-2. + 2. * aips.a[(i - 1), (i - 1)] * ap.sig0)
if atp.hd is not None:
t[self._theta_a(j), 0] += (atp.hd * _kronecker_delta((i - 1), (j - 1))) / (ap.sig0 * aips.a[(i - 1), (i - 1)] - 1.)
if gp.hk is not None:
oro = (ap.sig0 * g[(i - 1), (j - 1), :] @ hk) / (2 * aips.a[(i - 1), (i - 1)] * ap.sig0 - 2.)
t[self._theta_a(j), 0] -= oro
t[self._psi_a(j), 0] += oro
for k in range(1, namod + 1):
t[self._psi_a(j), self._theta_a(k)] = (aips.g[(i - 1), (j - 1), (k - 1)]
- aips.b[(i - 1), (j - 1), (k - 1)] * ap.sig0) / \
(-1 + aips.a[(i - 1), (i - 1)] * ap.sig0)
t[self._theta_a(j), self._psi_a(k)] = (aips.b[(i - 1), (j - 1), (k - 1)] * ap.sig0) \
/ (1 - aips.a[(i - 1), (i - 1)] * ap.sig0)
if ocean:
for j in range(1, ngomod + 1):
t[self._psi_o(j), 0] = ap.kd * (aips.d[(i - 1), (j - 1)] * ap.sig0) \
/ (2 - 2 * aips.a[(i - 1), (i - 1)] * ap.sig0)
if par.LSBpgo is not None and par.Lpa is not None:
t[self._deltaT_o(j), 0] = aips.s[(i - 1), (j - 1)] * (2 * par.LSBpgo + par.Lpa) \
/ (2 - 2 * aips.a[(i - 1), (i - 1)] * ap.sig0)
if ground_temp and i <= ngomod:
t[self._deltaT_g(i), 0] = (2 * par.LSBpgo + par.Lpa) / (2 - 2 * aips.a[(i - 1), (i - 1)] * ap.sig0)
t = self.simplify_matrix(t)
tensor[self._theta_a(i)] = t
jacobian_tensor[self._theta_a(i)] = t + t.T
if ocean:
# psi_o part
for i in range(1, ngomod + 1):
t = np.zeros((ndim + 1, ndim + 1), dtype=np.float64)
for j in range(1, namod + 1):
t[self._psi_a(j), 0] = oips.K[(i - 1), (j - 1)] * op.d \
/ (oips.M[(i - 1), (i - 1)] + par.G)
t[self._theta_a(j), 0] = -(oips.K[(i - 1), (j - 1)]) * op.d \
/ (oips.M[(i - 1), (i - 1)] + par.G)
for j in range(1, ngomod + 1):
t[self._psi_o(j), 0] = -((oips.N[(i - 1), (j - 1)] * scp.beta +
oips.M[(i - 1), (i - 1)] * (op.r + op.d) *
_kronecker_delta((i - 1), (j - 1)))) / (oips.M[(i - 1), (i - 1)] + par.G)
for k in range(1, ngomod + 1):
t[self._psi_o(j), self._psi_o(k)] = -(oips.C[(i - 1), (j - 1), (k - 1)]) \
/ (oips.M[(i - 1), (i - 1)] + par.G)
t = self.simplify_matrix(t)
tensor[self._psi_o(i)] = t
jacobian_tensor[self._psi_o(i)] = t + t.T
# deltaT_o part
## Problem with matmul with object and DOK archi : Temporary fix until a better solution is found
Cpgo = np.array(par.Cpgo, dtype=np.float)
W = oips.W.to_coo()
#################
for i in range(1, ngomod + 1):
t = np.zeros((ndim + 1, ndim + 1), dtype=np.float64)
t[0, 0] = W[(i - 1), :] @ Cpgo
for j in range(1, namod + 1):
t[self._theta_a(j), 0] = oips.W[(i - 1), (j - 1)] * (2 * atp.sc * par.Lpgo + par.sbpa)
for j in range(1, ngomod + 1):
t[self._deltaT_o(j), 0] = - (par.Lpgo + par.sbpgo) * _kronecker_delta((i - 1), (j - 1))
for k in range(1, ngomod + 1):
t[self._psi_o(j), self._deltaT_o(k)] = -(oips.O[(i - 1), (j - 1), (k - 1)])
t = self.simplify_matrix(t)
tensor[self._deltaT_o(i)] = t
jacobian_tensor[self._deltaT_o(i)] = t + t.T
# deltaT_g part
if ground_temp:
for i in range(1, ngomod + 1):
t = np.zeros((ndim + 1, ndim + 1), dtype=np.float64)
t[0, 0] = par.Cpgo[(i - 1)]
t[self._theta_a(i), 0] = 2 * atp.sc * par.Lpgo + par.sbpa
t[self._deltaT_g(i), 0] = - (par.Lpgo + par.sbpgo)
t = self.simplify_matrix(t)
tensor[self._deltaT_g(i)] = t
jacobian_tensor[self._deltaT_g(i)] = t + t.T
self.tensor = tensor.to_coo()
self.jacobian_tensor = jacobian_tensor.to_coo()
@staticmethod
def simplify_matrix(matrix):
"""Routine that simplifies the component of the 3D tensors :math:`\mathcal{T}`.
For each index :math:`i`, it upper-triangularizes the
matrix :math:`\mathcal{T}_{i,j,k} \quad 0 \leq j,k \leq \mathrm{ndim}`.
Parameters
----------
matrix: ~numpy.ndarray
:math:`i`-th matrix component of the tensor :math:`\mathcal{T}_{i,j,k}` to simplify.
Returns
-------
~numpy.ndarray
The upper-triangularized matrix.
"""
return np.triu(matrix) + np.tril(matrix, -1).T
def _kronecker_delta(i, j):
if i == j:
return 1
else:
return 0
if __name__ == '__main__':
from params.params import QgParams
from inner_products.analytic import AtmosphericInnerProducts, OceanicInnerProducts
params = QgParams()
params.set_atmospheric_modes(2, 2)
params.set_oceanic_modes(2, 4)
aip = AtmosphericInnerProducts(params)
oip = OceanicInnerProducts(params)
aip.connect_to_ocean(oip)
agotensor = QgsTensor(aip, oip) | [
"params.params.QgParams",
"sparse.zeros",
"numpy.array",
"numpy.zeros",
"inner_products.analytic.OceanicInnerProducts",
"inner_products.analytic.AtmosphericInnerProducts",
"numpy.tril",
"numpy.triu"
] | [((14107, 14117), 'params.params.QgParams', 'QgParams', ([], {}), '()\n', (14115, 14117), False, 'from params.params import QgParams\n'), ((14202, 14234), 'inner_products.analytic.AtmosphericInnerProducts', 'AtmosphericInnerProducts', (['params'], {}), '(params)\n', (14226, 14234), False, 'from inner_products.analytic import AtmosphericInnerProducts, OceanicInnerProducts\n'), ((14245, 14273), 'inner_products.analytic.OceanicInnerProducts', 'OceanicInnerProducts', (['params'], {}), '(params)\n', (14265, 14273), False, 'from inner_products.analytic import AtmosphericInnerProducts, OceanicInnerProducts\n'), ((5577, 5649), 'sparse.zeros', 'sp.zeros', (['(ndim + 1, ndim + 1, ndim + 1)'], {'dtype': 'np.float64', 'format': '"""dok"""'}), "((ndim + 1, ndim + 1, ndim + 1), dtype=np.float64, format='dok')\n", (5585, 5649), True, 'import sparse as sp\n'), ((5674, 5746), 'sparse.zeros', 'sp.zeros', (['(ndim + 1, ndim + 1, ndim + 1)'], {'dtype': 'np.float64', 'format': '"""dok"""'}), "((ndim + 1, ndim + 1, ndim + 1), dtype=np.float64, format='dok')\n", (5682, 5746), True, 'import sparse as sp\n'), ((5865, 5896), 'numpy.array', 'np.array', (['gp.hk'], {'dtype': 'np.float'}), '(gp.hk, dtype=np.float)\n', (5873, 5896), True, 'import numpy as np\n'), ((6026, 6074), 'numpy.zeros', 'np.zeros', (['(ndim + 1, ndim + 1)'], {'dtype': 'np.float64'}), '((ndim + 1, ndim + 1), dtype=np.float64)\n', (6034, 6074), True, 'import numpy as np\n'), ((7476, 7524), 'numpy.zeros', 'np.zeros', (['(ndim + 1, ndim + 1)'], {'dtype': 'np.float64'}), '((ndim + 1, ndim + 1), dtype=np.float64)\n', (7484, 7524), True, 'import numpy as np\n'), ((11760, 11794), 'numpy.array', 'np.array', (['par.Cpgo'], {'dtype': 'np.float'}), '(par.Cpgo, dtype=np.float)\n', (11768, 11794), True, 'import numpy as np\n'), ((13807, 13822), 'numpy.triu', 'np.triu', (['matrix'], {}), '(matrix)\n', (13814, 13822), True, 'import numpy as np\n'), ((10448, 10496), 'numpy.zeros', 'np.zeros', (['(ndim + 1, ndim + 1)'], {'dtype': 'np.float64'}), '((ndim + 1, ndim + 1), dtype=np.float64)\n', (10456, 10496), True, 'import numpy as np\n'), ((11921, 11969), 'numpy.zeros', 'np.zeros', (['(ndim + 1, ndim + 1)'], {'dtype': 'np.float64'}), '((ndim + 1, ndim + 1), dtype=np.float64)\n', (11929, 11969), True, 'import numpy as np\n'), ((12746, 12794), 'numpy.zeros', 'np.zeros', (['(ndim + 1, ndim + 1)'], {'dtype': 'np.float64'}), '((ndim + 1, ndim + 1), dtype=np.float64)\n', (12754, 12794), True, 'import numpy as np\n'), ((13825, 13844), 'numpy.tril', 'np.tril', (['matrix', '(-1)'], {}), '(matrix, -1)\n', (13832, 13844), True, 'import numpy as np\n')] |
import logging
import numpy as np
from ctapipe.calib.camera import CameraCalibrator
from ctapipe.io import (
EventSource,
read_table,
)
from numba import njit
from scipy.interpolate import interp1d
from traitlets.config import Config
from lstchain.io import standard_config
from lstchain.io.config import read_configuration_file
__all__ = [
'add_noise_in_pixels',
'calculate_required_additional_nsb',
'calculate_noise_parameters',
'random_psf_smearer',
'set_numba_seed',
'tune_nsb_on_waveform',
]
log = logging.getLogger(__name__)
# number of neighbors of completely surrounded pixels of hexagonal cameras:
N_PIXEL_NEIGHBORS = 6
SMEAR_PROBABILITIES = np.full(N_PIXEL_NEIGHBORS, 1 / N_PIXEL_NEIGHBORS)
def add_noise_in_pixels(rng, image, extra_noise_in_dim_pixels,
extra_bias_in_dim_pixels, transition_charge,
extra_noise_in_bright_pixels):
"""
Addition of Poissonian noise to the pixels
Parameters
----------
rng : `numpy.random.default_rng`
Random number generator
image: `np.ndarray`
Charges (p.e.) in the camera
extra_noise_in_dim_pixels: `float`
Mean additional number of p.e. to be added (Poisson noise) to
pixels with charge below transition_charge. To be tuned by
comparing the starting MC and data
extra_bias_in_dim_pixels: `float`
Mean bias (w.r.t. original charge) of the new charge in pixels.
Should be 0 for non-peak-search pulse integrators. To be tuned by
comparing the starting MC and data
transition_charge: `float`
Border between "dim" and "bright" pixels. To be tuned by
comparing the starting MC and data
extra_noise_in_bright_pixels: `float`
Mean additional number of p.e. to be added (Poisson noise) to
pixels with charge above transition_charge. This is unbiased,
i.e. Poisson noise is introduced, and its average subtracted,
so that the mean charge in bright pixels remains unaltered.
This is because we assume that above transition_charge the
integration window is determined by the Cherenkov light, and
would not be modified by the additional NSB noise (presumably
small compared to the C-light). To be tuned by
comparing the starting MC and data
Returns
-------
image: `np.ndarray`
Modified (noisier) image
"""
bright_pixels = image > transition_charge
noise = np.where(bright_pixels, extra_noise_in_bright_pixels,
extra_noise_in_dim_pixels)
bias = np.where(bright_pixels, -extra_noise_in_bright_pixels,
extra_bias_in_dim_pixels - extra_noise_in_dim_pixels)
image = image + rng.poisson(noise) + bias
return image
@njit(cache=True)
def set_numba_seed(seed):
np.random.seed(seed)
@njit(cache=True)
def random_psf_smearer(image, fraction, indices, indptr):
"""
Random PSF smearer
Parameters
----------
image: `np.ndarray`
Charges (p.e.) in the camera
indices : `camera_geometry.neighbor_matrix_sparse.indices`
Pixel indices.
indptr : camera_geometry.neighbor_matrix_sparse.indptr
fraction: `float`
Fraction of the light in a pixel that will be distributed among its
immediate surroundings, i.e. immediate neighboring pixels, according
to Poisson statistics. Some light is lost for pixels which are at
the camera edge and hence don't have all possible neighbors
Returns
-------
new_image: `np.ndarray`
Modified (smeared) image
"""
new_image = image.copy()
for pixel in range(len(image)):
if image[pixel] <= 0:
continue
to_smear = np.random.poisson(image[pixel] * fraction)
if to_smear == 0:
continue
# remove light from current pixel
new_image[pixel] -= to_smear
# add light to neighbor pixels
neighbors = indices[indptr[pixel]: indptr[pixel + 1]]
n_neighbors = len(neighbors)
# all neighbors are equally likely to receive the charge
# we always distribute the charge into 6 neighbors, so that charge
# on the edges of the camera is lost
neighbor_charges = np.random.multinomial(to_smear, SMEAR_PROBABILITIES)
for n in range(n_neighbors):
neighbor = neighbors[n]
new_image[neighbor] += neighbor_charges[n]
return new_image
def calculate_noise_parameters(simtel_filename, data_dl1_filename,
config_filename=None):
"""
Calculates the parameters needed to increase the noise in an MC DL1 file
to match the noise in a real data DL1 file, using add_noise_in_pixels
The returned parameters are those needed by the function add_noise_in_pixels (see
description in its documentation above).
Parameters
----------
simtel_filename: `str`
a simtel file containing showers, from the same
production (same NSB and telescope settings) as the MC DL1 file below. It
must contain pixel-wise info on true number of p.e.'s from C-photons (
will be used to identify pixels which only contain noise).
data_dl1_filename: `str`
a real data DL1 file (processed with calibration
settings corresponding to those with which the MC is to be processed).
It must contain calibrated images, i.e. "DL1a" data. This file has the
"target" noise which we want to have in the MC files, for better
agreement of data and simulations.
config_filename: `str`
configuration file containing the calibration
settings used for processing both the data and the MC files above
Returns
-------
extra_noise_in_dim_pixels: `float`
Extra noise of dim pixels.
extra_bias_in_dim_pixels: `float`
Extra bias of dim pixels.
extra_noise_in_bright_pixels: `float`
Extra noise of bright pixels
"""
log.setLevel(logging.INFO)
if config_filename is None:
config = standard_config
else:
config = read_configuration_file(config_filename)
# Real data DL1 tables:
data_dl1_calibration = read_table(data_dl1_filename,
'/dl1/event/telescope/monitoring/calibration')
data_dl1_pedestal = read_table(data_dl1_filename,
'/dl1/event/telescope/monitoring/pedestal')
data_dl1_parameters = read_table(data_dl1_filename,
'/dl1/event/telescope/parameters/LST_LSTCam')
data_dl1_image = read_table(data_dl1_filename,
'/dl1/event/telescope/image/LST_LSTCam')
unusable = data_dl1_calibration['unusable_pixels']
# Locate pixels with HG declared unusable either in original calibration or
# in interleaved events:
bad_pixels = unusable[0][0] # original calibration
for tf in unusable[1:][0]: # calibrations with interleaveds
bad_pixels = np.logical_or(bad_pixels, tf)
good_pixels = ~bad_pixels
# First index: 1,2,... = values from interleaveds (0 is for original
# calibration run)
# Second index: 0 = high gain
# Third index: pixels
# HG adc to pe conversion factors from interleaved calibrations:
data_HG_dc_to_pe = data_dl1_calibration['dc_to_pe'][:, 0, :]
# Pixel-wise pedestal standard deviation (for an unbiased extractor),
# in adc counts:
data_HG_ped_std = data_dl1_pedestal['charge_std'][1:, 0, :]
# indices which connect each pedestal calculation to a given calibration:
calibration_id = data_dl1_pedestal['calibration_id'][1:]
# convert pedestal st deviations to p.e.
dummy = []
for i, x in enumerate(data_HG_ped_std[:, ]):
dummy.append(x * data_HG_dc_to_pe[calibration_id[i],])
dummy = np.array(dummy)
# Average for all interleaved calibrations (in case there are more than one)
data_HG_ped_std_pe = np.mean(dummy, axis=0) # one value per pixel
# Identify noisy pixels, likely containing stars - we want to adjust MC to
# the average diffuse NSB across the camera
data_median_std_ped_pe = np.median(data_HG_ped_std_pe)
data_std_std_ped_pe = np.std(data_HG_ped_std_pe)
log.info(f'Real data: median across camera of good pixels\' pedestal std '
f'{data_median_std_ped_pe:.3f} p.e.')
brightness_limit = data_median_std_ped_pe + 3 * data_std_std_ped_pe
too_bright_pixels = (data_HG_ped_std_pe > brightness_limit)
log.info(f'Number of pixels beyond 3 std dev of median: '
f'{too_bright_pixels.sum()}, (above {brightness_limit:.2f} p.e.)')
ped_mask = data_dl1_parameters['event_type'] == 2
# The charges in the images below are obtained with the extractor for
# showers, usually a biased one, like e.g. LocalPeakWindowSum
data_ped_charges = data_dl1_image['image'][ped_mask]
# Exclude too bright pixels, besides those with unusable calibration:
good_pixels &= ~too_bright_pixels
# recalculate the median of the pixels' std dev, with good_pixels:
data_median_std_ped_pe = np.median(data_HG_ped_std_pe[good_pixels])
log.info(f'Good and not too bright pixels: {good_pixels.sum()}')
# all_good is an events*pixels boolean array of valid signals:
all_good = np.reshape(np.tile(good_pixels, data_ped_charges.shape[0]),
data_ped_charges.shape)
# histogram of pedestal charges (biased extractor) from good and not noisy
# pixels:
qbins = 100
qrange = (-10, 15)
dataq = np.histogram(data_ped_charges[all_good].flatten(), bins=qbins,
range=qrange, density=True)
# Find the peak of the pedestal biased charge distribution of real data.
# Use an interpolated version of the histogram, for robustness:
func = interp1d(0.5*(dataq[1][1:]+dataq[1][:-1]), dataq[0],
kind='quadratic', fill_value='extrapolate')
xx = np.linspace(qrange[0], qrange[1], 100*qbins)
mode_data = xx[np.argmax(func(xx))]
# Event reader for simtel file:
mc_reader = EventSource(input_url=simtel_filename, config=Config(config))
# Obtain the configuration with which the pedestal calculations were
# performed:
ped_config = config['LSTCalibrationCalculator']['PedestalIntegrator']
tel_id = ped_config['tel_id']
# Obtain the (unbiased) extractor used for pedestal calculations:
pedestal_extractor_type = ped_config['charge_product']
pedestal_calibrator = CameraCalibrator(
image_extractor_type=pedestal_extractor_type,
config=Config(ped_config),
subarray=mc_reader.subarray
)
# Obtain the (usually biased) extractor used for shower images:
shower_extractor_type = config['image_extractor']
shower_calibrator = CameraCalibrator(
image_extractor_type=shower_extractor_type,
config=Config(config),
subarray=mc_reader.subarray
)
# Since these extractors are now for use on MC, we have to apply the pulse
# integration correction (in data that is currently, as of
# lstchain v0.7.5, replaced by an empirical (hard-coded) correction of the
# adc to pe conversion factors )
pedestal_calibrator.image_extractors[ped_config['charge_product']].apply_integration_correction = True
shower_calibrator.image_extractors[shower_extractor_type].apply_integration_correction = True
# Pulse integration window width of the (biased) extractor for showers:
shower_extractor_window_width = config[config['image_extractor']]['window_width']
# Pulse integration window width for the pedestal estimation:
pedestal_extractor_config = ped_config[pedestal_extractor_type]
pedestal_extractor_window_width = pedestal_extractor_config['window_width']
# MC pedestals integrated with the unbiased pedestal extractor
mc_ped_charges = []
# MC pedestals integrated with the biased shower extractor
mc_ped_charges_biased = []
for event in mc_reader:
if tel_id not in event.trigger.tels_with_trigger:
continue
# Extract the signals as we do for pedestals (unbiased fixed window
# extractor):
pedestal_calibrator(event)
charges = event.dl1.tel[tel_id].image
# True number of pe's from Cherenkov photons (to identify noise-only pixels)
true_image = event.simulation.tel[tel_id].true_image
mc_ped_charges.append(charges[true_image == 0])
# Now extract the signal as we would do for shower events (usually
# with a biased extractor, e.g. LocalPeakWindowSum):
shower_calibrator(event)
charges_biased = event.dl1.tel[tel_id].image
mc_ped_charges_biased.append(charges_biased[true_image == 0])
# All pixels behave (for now) in the same way in MC, just put them together
mc_ped_charges = np.concatenate(mc_ped_charges)
mc_ped_charges_biased = np.concatenate(mc_ped_charges_biased)
mcq = np.histogram(mc_ped_charges_biased, bins=qbins, range=qrange,
density=True)
# Find the peak of the pedestal biased charge distribution of MC. Use
# an interpolated version of the histogram, for robustness:
func = interp1d(0.5*(mcq[1][1:]+mcq[1][:-1]), mcq[0],
kind='quadratic', fill_value='extrapolate')
xx = np.linspace(qrange[0], qrange[1], 100*qbins)
mode_mc = xx[np.argmax(func(xx))]
mc_unbiased_std_ped_pe = np.std(mc_ped_charges)
# Find the additional noise (in data w.r.t. MC) for the unbiased extractor,
# and scale it to the width of the window for integration of shower images.
# The idea is that when a strong signal is present, the biased extractor
# will integrate around it, and the additional noise is unbiased because
# it won't modify the integration range.
extra_noise_in_bright_pixels = \
((data_median_std_ped_pe**2 - mc_unbiased_std_ped_pe**2) *
shower_extractor_window_width / pedestal_extractor_window_width)
# Just in case, makes sure we just add noise if the MC noise is smaller
# than the real data's:
extra_noise_in_bright_pixels = max(0., extra_noise_in_bright_pixels)
bias = mode_data - mode_mc
extra_bias_in_dim_pixels = max(bias, 0)
# differences of values to peak charge:
dq = data_ped_charges[all_good].flatten() - mode_data
dqmc = mc_ped_charges_biased - mode_mc
# maximum distance (in pe) from peak, to avoid strong impact of outliers:
maxq = 10
# calculate widening of the noise bump:
added_noise = (np.sum(dq[dq<maxq]**2)/len(dq[dq<maxq]) -
np.sum(dqmc[dqmc<maxq]**2)/len(dqmc[dqmc < maxq]))
added_noise = (max(0, added_noise))**0.5
extra_noise_in_dim_pixels = added_noise
return extra_noise_in_dim_pixels, extra_bias_in_dim_pixels, \
extra_noise_in_bright_pixels
def tune_nsb_on_waveform(waveform, added_nsb_fraction, original_nsb,
dt, pulse_templates, gain, charge_spe_cumulative_pdf):
"""
Inject single photon pulses in existing R1 waveforms to increase NSB.
Parameters
----------
waveform: charge (p.e. / ns) in each pixel and sampled time
added_nsb_fraction: fraction of the original NSB in simulation to be added
original_nsb: original NSB rate (astropy unit Hz)
dt: time between waveform samples (astropy unit s)
pulse_templates: `lstchain.data.NormalizedPulseTemplate` containing
the single p.e. pulse template used for the injection
gain: gain channel identifier for each pixel
charge_spe_cumulative_pdf: `scipy.interpolate.interp1d` Single p.e. gain
fluctuation cumulative pdf used to randomise the normalisation of
injected pulses
"""
n_pixels, n_samples = waveform.shape
duration = (20 + n_samples) * dt # TODO check needed time window, effect of edges
t = np.arange(-20, n_samples) * dt.value
mean_added_nsb = (added_nsb_fraction * original_nsb) * duration
rng = np.random.default_rng()
additional_nsb = rng.poisson(mean_added_nsb, n_pixels)
added_nsb_time = rng.uniform(-20 * dt.value, -20 * dt.value + duration.value, (n_pixels, max(additional_nsb)))
added_nsb_amp = charge_spe_cumulative_pdf(rng.uniform(size=(n_pixels, max(additional_nsb))))
baseline_correction = (added_nsb_fraction * original_nsb * dt).value
waveform -= baseline_correction
for i in range(n_pixels):
for j in range(additional_nsb[i]):
waveform[i] += (added_nsb_amp[i][j]
* (pulse_templates(t[20:] - added_nsb_time[i][j], 'HG' if gain[i] else 'LG')))
def calculate_required_additional_nsb(simtel_filename, data_dl1_filename, config=None):
# TODO check if good estimation
# TODO reduce duplicated code with 'calculate_noise_parameters'
"""
Calculates the additional NSB needed in the MC waveforms
to match a real data DL1 file
Parameters
----------
simtel_filename: a simtel file containing showers, from the production
(same NSB and telescope settings) as the one on which the correction will
be applied. It must contain pixel-wise info on true number of p.e.'s from
C-photons (will be used to identify pixels which only contain noise).
data_dl1_filename: a real data DL1 file (processed with calibration
settings corresponding to those with which the MC is to be processed).
It must contain calibrated images, i.e. "DL1a" data. This file has the
"target" NSB which we want to have in the MC files, for better
agreement of data and simulations.
config: configuration containing the calibration
settings used for processing both the data and the MC files above
Returns
-------
extra_nsb: Fraction of the additional NSB in data compared to MC.
data_ped_variance: Pedestal variance from data
mc_ped_variance: Pedestal variance from MC
"""
log.setLevel(logging.INFO)
if config is None:
config = standard_config
# Real data DL1 tables:
data_dl1_calibration = read_table(data_dl1_filename,
'/dl1/event/telescope/monitoring/calibration')
data_dl1_pedestal = read_table(data_dl1_filename,
'/dl1/event/telescope/monitoring/pedestal')
unusable = data_dl1_calibration['unusable_pixels']
# Locate pixels with HG declared unusable either in original calibration or
# in interleaved events:
bad_pixels = unusable[0][0] # original calibration
for tf in unusable[1:][0]: # calibrations with interleaved
bad_pixels = np.logical_or(bad_pixels, tf)
good_pixels = ~bad_pixels
# First index: 1,2,... = values from interleaved (0 is for original
# calibration run)
# Second index: 0 = high gain
# Third index: pixels
# HG adc to pe conversion factors from interleaved calibrations:
data_HG_dc_to_pe = data_dl1_calibration['dc_to_pe'][:, 0, :]
# Pixel-wise pedestal standard deviation (for an unbiased extractor),
# in adc counts:
data_HG_ped_std = data_dl1_pedestal['charge_std'][1:, 0, :]
# indices which connect each pedestal calculation to a given calibration:
calibration_id = data_dl1_pedestal['calibration_id'][1:]
# convert pedestal st deviations to p.e.
dummy = []
for i, x in enumerate(data_HG_ped_std[:, ]):
dummy.append(x * data_HG_dc_to_pe[calibration_id[i],])
dummy = np.array(dummy)
# Average for all interleaved calibrations (in case there are more than one)
data_HG_ped_std_pe = np.mean(dummy, axis=0) # one value per pixel
# Identify noisy pixels, likely containing stars - we want to adjust MC to
# the average diffuse NSB across the camera
data_median_std_ped_pe = np.median(data_HG_ped_std_pe)
data_std_std_ped_pe = np.std(data_HG_ped_std_pe)
log.info(f'Real data: median across camera of good pixels\' pedestal std '
f'{data_median_std_ped_pe:.3f} p.e.')
brightness_limit = data_median_std_ped_pe + 3 * data_std_std_ped_pe
too_bright_pixels = (data_HG_ped_std_pe > brightness_limit)
log.info(f'Number of pixels beyond 3 std dev of median: '
f'{too_bright_pixels.sum()}, (above {brightness_limit:.2f} p.e.)')
# Exclude too bright pixels, besides those with unusable calibration:
good_pixels &= ~too_bright_pixels
# recalculate the median of the pixels' std dev, with good_pixels:
data_median_std_ped_pe = np.median(data_HG_ped_std_pe[good_pixels])
log.info(f'Good and not too bright pixels: {good_pixels.sum()}')
# Event reader for simtel file:
mc_reader = EventSource(input_url=simtel_filename, config=Config(config))
# Obtain the configuration with which the pedestal calculations were
# performed:
ped_config = config['LSTCalibrationCalculator']['PedestalIntegrator']
tel_id = ped_config['tel_id']
# Obtain the (unbiased) extractor used for pedestal calculations:
pedestal_calibrator = CameraCalibrator(
image_extractor_type=ped_config['charge_product'],
config=Config(config['LSTCalibrationCalculator']),
subarray=mc_reader.subarray)
# Since these extractors are now for use on MC, we have to apply the pulse
# integration correction (in data that is currently, as of
# lstchain v0.7.5, replaced by an empirical (hard-coded) correction of the
# adc to pe conversion factors )
pedestal_calibrator.image_extractors[ped_config['charge_product']].apply_integration_correction = True
# MC pedestals integrated with the unbiased pedestal extractor
mc_ped_charges = []
for event in mc_reader:
if tel_id not in event.trigger.tels_with_trigger:
continue
# Extract the signals as we do for pedestals (unbiased fixed window
# extractor):
pedestal_calibrator(event)
charges = event.dl1.tel[tel_id].image
# True number of pe's from Cherenkov photons (to identify noise-only pixels)
true_image = event.simulation.tel[tel_id].true_image
mc_ped_charges.append(charges[true_image == 0])
# All pixels behave (for now) in the same way in MC, just put them together
mc_ped_charges = np.concatenate(mc_ped_charges)
mc_unbiased_std_ped_pe = np.std(mc_ped_charges)
# Find the additional noise (in data w.r.t. MC) for the unbiased extractor
# The idea is that pedestal std scales with NSB (But better correction with sqrt(variance ratio-1) observed)
data_ped_variance = data_median_std_ped_pe ** 2
mc_ped_variance = mc_unbiased_std_ped_pe ** 2
extra_nsb = ((data_ped_variance - mc_ped_variance)
/ mc_ped_variance)
return extra_nsb, data_ped_variance, mc_ped_variance
| [
"logging.getLogger",
"numpy.random.default_rng",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.arange",
"numpy.mean",
"numpy.histogram",
"numpy.random.poisson",
"numpy.where",
"numpy.random.multinomial",
"numpy.linspace",
"numpy.random.seed",
"numpy.concatenate",
"lstchain.io.config.... | [((540, 567), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (557, 567), False, 'import logging\n'), ((690, 739), 'numpy.full', 'np.full', (['N_PIXEL_NEIGHBORS', '(1 / N_PIXEL_NEIGHBORS)'], {}), '(N_PIXEL_NEIGHBORS, 1 / N_PIXEL_NEIGHBORS)\n', (697, 739), True, 'import numpy as np\n'), ((2814, 2830), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (2818, 2830), False, 'from numba import njit\n'), ((2885, 2901), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (2889, 2901), False, 'from numba import njit\n'), ((2504, 2589), 'numpy.where', 'np.where', (['bright_pixels', 'extra_noise_in_bright_pixels', 'extra_noise_in_dim_pixels'], {}), '(bright_pixels, extra_noise_in_bright_pixels, extra_noise_in_dim_pixels\n )\n', (2512, 2589), True, 'import numpy as np\n'), ((2617, 2730), 'numpy.where', 'np.where', (['bright_pixels', '(-extra_noise_in_bright_pixels)', '(extra_bias_in_dim_pixels - extra_noise_in_dim_pixels)'], {}), '(bright_pixels, -extra_noise_in_bright_pixels, \n extra_bias_in_dim_pixels - extra_noise_in_dim_pixels)\n', (2625, 2730), True, 'import numpy as np\n'), ((2861, 2881), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2875, 2881), True, 'import numpy as np\n'), ((6259, 6335), 'ctapipe.io.read_table', 'read_table', (['data_dl1_filename', '"""/dl1/event/telescope/monitoring/calibration"""'], {}), "(data_dl1_filename, '/dl1/event/telescope/monitoring/calibration')\n", (6269, 6335), False, 'from ctapipe.io import EventSource, read_table\n'), ((6381, 6454), 'ctapipe.io.read_table', 'read_table', (['data_dl1_filename', '"""/dl1/event/telescope/monitoring/pedestal"""'], {}), "(data_dl1_filename, '/dl1/event/telescope/monitoring/pedestal')\n", (6391, 6454), False, 'from ctapipe.io import EventSource, read_table\n'), ((6502, 6577), 'ctapipe.io.read_table', 'read_table', (['data_dl1_filename', '"""/dl1/event/telescope/parameters/LST_LSTCam"""'], {}), "(data_dl1_filename, '/dl1/event/telescope/parameters/LST_LSTCam')\n", (6512, 6577), False, 'from ctapipe.io import EventSource, read_table\n'), ((6619, 6689), 'ctapipe.io.read_table', 'read_table', (['data_dl1_filename', '"""/dl1/event/telescope/image/LST_LSTCam"""'], {}), "(data_dl1_filename, '/dl1/event/telescope/image/LST_LSTCam')\n", (6629, 6689), False, 'from ctapipe.io import EventSource, read_table\n'), ((7853, 7868), 'numpy.array', 'np.array', (['dummy'], {}), '(dummy)\n', (7861, 7868), True, 'import numpy as np\n'), ((7976, 7998), 'numpy.mean', 'np.mean', (['dummy'], {'axis': '(0)'}), '(dummy, axis=0)\n', (7983, 7998), True, 'import numpy as np\n'), ((8178, 8207), 'numpy.median', 'np.median', (['data_HG_ped_std_pe'], {}), '(data_HG_ped_std_pe)\n', (8187, 8207), True, 'import numpy as np\n'), ((8234, 8260), 'numpy.std', 'np.std', (['data_HG_ped_std_pe'], {}), '(data_HG_ped_std_pe)\n', (8240, 8260), True, 'import numpy as np\n'), ((9134, 9176), 'numpy.median', 'np.median', (['data_HG_ped_std_pe[good_pixels]'], {}), '(data_HG_ped_std_pe[good_pixels])\n', (9143, 9176), True, 'import numpy as np\n'), ((9858, 9962), 'scipy.interpolate.interp1d', 'interp1d', (['(0.5 * (dataq[1][1:] + dataq[1][:-1]))', 'dataq[0]'], {'kind': '"""quadratic"""', 'fill_value': '"""extrapolate"""'}), "(0.5 * (dataq[1][1:] + dataq[1][:-1]), dataq[0], kind='quadratic',\n fill_value='extrapolate')\n", (9866, 9962), False, 'from scipy.interpolate import interp1d\n'), ((9984, 10030), 'numpy.linspace', 'np.linspace', (['qrange[0]', 'qrange[1]', '(100 * qbins)'], {}), '(qrange[0], qrange[1], 100 * qbins)\n', (9995, 10030), True, 'import numpy as np\n'), ((12893, 12923), 'numpy.concatenate', 'np.concatenate', (['mc_ped_charges'], {}), '(mc_ped_charges)\n', (12907, 12923), True, 'import numpy as np\n'), ((12952, 12989), 'numpy.concatenate', 'np.concatenate', (['mc_ped_charges_biased'], {}), '(mc_ped_charges_biased)\n', (12966, 12989), True, 'import numpy as np\n'), ((13001, 13076), 'numpy.histogram', 'np.histogram', (['mc_ped_charges_biased'], {'bins': 'qbins', 'range': 'qrange', 'density': '(True)'}), '(mc_ped_charges_biased, bins=qbins, range=qrange, density=True)\n', (13013, 13076), True, 'import numpy as np\n'), ((13249, 13347), 'scipy.interpolate.interp1d', 'interp1d', (['(0.5 * (mcq[1][1:] + mcq[1][:-1]))', 'mcq[0]'], {'kind': '"""quadratic"""', 'fill_value': '"""extrapolate"""'}), "(0.5 * (mcq[1][1:] + mcq[1][:-1]), mcq[0], kind='quadratic',\n fill_value='extrapolate')\n", (13257, 13347), False, 'from scipy.interpolate import interp1d\n'), ((13369, 13415), 'numpy.linspace', 'np.linspace', (['qrange[0]', 'qrange[1]', '(100 * qbins)'], {}), '(qrange[0], qrange[1], 100 * qbins)\n', (13380, 13415), True, 'import numpy as np\n'), ((13482, 13504), 'numpy.std', 'np.std', (['mc_ped_charges'], {}), '(mc_ped_charges)\n', (13488, 13504), True, 'import numpy as np\n'), ((16025, 16048), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (16046, 16048), True, 'import numpy as np\n'), ((18086, 18162), 'ctapipe.io.read_table', 'read_table', (['data_dl1_filename', '"""/dl1/event/telescope/monitoring/calibration"""'], {}), "(data_dl1_filename, '/dl1/event/telescope/monitoring/calibration')\n", (18096, 18162), False, 'from ctapipe.io import EventSource, read_table\n'), ((18225, 18298), 'ctapipe.io.read_table', 'read_table', (['data_dl1_filename', '"""/dl1/event/telescope/monitoring/pedestal"""'], {}), "(data_dl1_filename, '/dl1/event/telescope/monitoring/pedestal')\n", (18235, 18298), False, 'from ctapipe.io import EventSource, read_table\n'), ((19473, 19488), 'numpy.array', 'np.array', (['dummy'], {}), '(dummy)\n', (19481, 19488), True, 'import numpy as np\n'), ((19596, 19618), 'numpy.mean', 'np.mean', (['dummy'], {'axis': '(0)'}), '(dummy, axis=0)\n', (19603, 19618), True, 'import numpy as np\n'), ((19799, 19828), 'numpy.median', 'np.median', (['data_HG_ped_std_pe'], {}), '(data_HG_ped_std_pe)\n', (19808, 19828), True, 'import numpy as np\n'), ((19855, 19881), 'numpy.std', 'np.std', (['data_HG_ped_std_pe'], {}), '(data_HG_ped_std_pe)\n', (19861, 19881), True, 'import numpy as np\n'), ((20503, 20545), 'numpy.median', 'np.median', (['data_HG_ped_std_pe[good_pixels]'], {}), '(data_HG_ped_std_pe[good_pixels])\n', (20512, 20545), True, 'import numpy as np\n'), ((22249, 22279), 'numpy.concatenate', 'np.concatenate', (['mc_ped_charges'], {}), '(mc_ped_charges)\n', (22263, 22279), True, 'import numpy as np\n'), ((22309, 22331), 'numpy.std', 'np.std', (['mc_ped_charges'], {}), '(mc_ped_charges)\n', (22315, 22331), True, 'import numpy as np\n'), ((3779, 3821), 'numpy.random.poisson', 'np.random.poisson', (['(image[pixel] * fraction)'], {}), '(image[pixel] * fraction)\n', (3796, 3821), True, 'import numpy as np\n'), ((4302, 4354), 'numpy.random.multinomial', 'np.random.multinomial', (['to_smear', 'SMEAR_PROBABILITIES'], {}), '(to_smear, SMEAR_PROBABILITIES)\n', (4323, 4354), True, 'import numpy as np\n'), ((6162, 6202), 'lstchain.io.config.read_configuration_file', 'read_configuration_file', (['config_filename'], {}), '(config_filename)\n', (6185, 6202), False, 'from lstchain.io.config import read_configuration_file\n'), ((7018, 7047), 'numpy.logical_or', 'np.logical_or', (['bad_pixels', 'tf'], {}), '(bad_pixels, tf)\n', (7031, 7047), True, 'import numpy as np\n'), ((9341, 9388), 'numpy.tile', 'np.tile', (['good_pixels', 'data_ped_charges.shape[0]'], {}), '(good_pixels, data_ped_charges.shape[0])\n', (9348, 9388), True, 'import numpy as np\n'), ((15910, 15935), 'numpy.arange', 'np.arange', (['(-20)', 'n_samples'], {}), '(-20, n_samples)\n', (15919, 15935), True, 'import numpy as np\n'), ((18639, 18668), 'numpy.logical_or', 'np.logical_or', (['bad_pixels', 'tf'], {}), '(bad_pixels, tf)\n', (18652, 18668), True, 'import numpy as np\n'), ((10168, 10182), 'traitlets.config.Config', 'Config', (['config'], {}), '(config)\n', (10174, 10182), False, 'from traitlets.config import Config\n'), ((10625, 10643), 'traitlets.config.Config', 'Config', (['ped_config'], {}), '(ped_config)\n', (10631, 10643), False, 'from traitlets.config import Config\n'), ((10919, 10933), 'traitlets.config.Config', 'Config', (['config'], {}), '(config)\n', (10925, 10933), False, 'from traitlets.config import Config\n'), ((14598, 14624), 'numpy.sum', 'np.sum', (['(dq[dq < maxq] ** 2)'], {}), '(dq[dq < maxq] ** 2)\n', (14604, 14624), True, 'import numpy as np\n'), ((14659, 14689), 'numpy.sum', 'np.sum', (['(dqmc[dqmc < maxq] ** 2)'], {}), '(dqmc[dqmc < maxq] ** 2)\n', (14665, 14689), True, 'import numpy as np\n'), ((20715, 20729), 'traitlets.config.Config', 'Config', (['config'], {}), '(config)\n', (20721, 20729), False, 'from traitlets.config import Config\n'), ((21118, 21160), 'traitlets.config.Config', 'Config', (["config['LSTCalibrationCalculator']"], {}), "(config['LSTCalibrationCalculator'])\n", (21124, 21160), False, 'from traitlets.config import Config\n')] |
import base64
import datetime
import io
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from xlrd.xldate import xldate_as_datetime
from yattag import Doc
plt.rcParams.update({"figure.autolayout": True})
import matplotlib.gridspec as gridspec
import pandas as pd
import scipy.stats
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import logging
"""
TF_CPP_MIN_LOG_LEVEL:
Defaults to 0, so all logs are shown. Set TF_CPP_MIN_LOG_LEVEL to 1 to filter out INFO logs, 2 to additionally filter out WARNING, 3 to additionally filter out ERROR.
"""
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
from tensorflow import keras
class NNetwork(object):
def __init__(self, network_count=200, epochs=1000):
logging.getLogger().setLevel(logging.INFO)
self.xl_dateformat = r"%Y-%m-%dT%H:%M"
self.model = None
self.pretrained_networks = []
self.software_version = "2.0.1"
self.input_filename = None
self.today = str(datetime.date.today())
self.avg_time_elapsed = 0
self.predictors_scaler = MinMaxScaler(feature_range=(-1, 1))
self.targets_scaler = MinMaxScaler(feature_range=(-1, 1))
self.history = None
self.file = None
self.skipped_rows = []
self.ruleset = []
self.layer1_neurons = 12
self.network_count = network_count
self.epochs = epochs
self.predictors = None
self.targets = None
self.predictions = None
self.avg_case_results_am = None
self.avg_case_results_pm = None
self.worst_case_results_am = None
self.worst_case_results_pm = None
self.WB_bandwidth = None
self.post_process_check = False # Is post-processed better than raw. If False, uses raw results, if true, uses post-processed results
self.optimizer = keras.optimizers.Nadam(lr=0.01, beta_1=0.9, beta_2=0.999)
self.model = keras.models.Sequential()
self.model.add(
keras.layers.Dense(self.layer1_neurons, input_dim=5, activation="tanh")
)
self.model.add(keras.layers.Dense(1, activation="linear"))
self.model.compile(loss="mse", optimizer=self.optimizer, metrics=["mse"])
def import_data_from_csv(self, filename):
"""
Imports data to the network by a comma-separated values (CSV) file.
Load data to a network that are stored in .csv file format.
The data loaded from this method can be used both for training reasons as
well as to make predictions.
:param filename: String containing the filename of the .csv file containing the input data (e.g "input_data.csv")
"""
df = pd.read_csv(filename)
self.file = df.copy()
global FRC_IN
global FRC_OUT
global WATTEMP
global COND
# Locate the fields used as inputs/predictors and outputs in the loaded file
# and split them
if "se1_frc" in self.file.columns:
FRC_IN = "se1_frc"
WATTEMP = "se1_wattemp"
COND = "se1_cond"
FRC_OUT = "se4_frc"
elif "ts_frc1" in self.file.columns:
FRC_IN = "ts_frc1"
WATTEMP = "ts_wattemp"
COND = "ts_cond"
FRC_OUT = "hh_frc1"
elif "ts_frc" in self.file.columns:
FRC_IN = "ts_frc"
WATTEMP = "ts_wattemp"
COND = "ts_cond"
FRC_OUT = "hh_frc"
# Standardize the DataFrame by specifying rules
# To add a new rule, call the method execute_rule with the parameters (description, affected_column, query)
self.execute_rule("Invalid tapstand FRC", FRC_IN, self.file[FRC_IN].isnull())
self.execute_rule("Invalid household FRC", FRC_OUT, self.file[FRC_OUT].isnull())
self.execute_rule(
"Invalid tapstand date/time",
"ts_datetime",
self.valid_dates(self.file["ts_datetime"]),
)
self.execute_rule(
"Invalid household date/time",
"hh_datetime",
self.valid_dates(self.file["hh_datetime"]),
)
self.skipped_rows = df.loc[df.index.difference(self.file.index)]
self.file.reset_index(drop=True, inplace=True) # fix dropped indices in pandas
# Locate the rows of the missing data
drop_threshold = 0.90 * len(self.file.loc[:, [FRC_IN]])
nan_rows_watt = self.file.loc[self.file[WATTEMP].isnull()]
if len(nan_rows_watt) < drop_threshold:
self.execute_rule(
"Missing Water Temperature Measurement",
WATTEMP,
self.file[WATTEMP].isnull(),
)
nan_rows_cond = self.file.loc[self.file[COND].isnull()]
if len(nan_rows_cond) < drop_threshold:
self.execute_rule("Missing EC Measurement", COND, self.file[COND].isnull())
self.skipped_rows = df.loc[df.index.difference(self.file.index)]
self.file.reset_index(drop=True, inplace=True)
start_date = self.file["ts_datetime"]
end_date = self.file["hh_datetime"]
durations = []
all_dates = []
collection_time = []
for i in range(len(start_date)):
try:
# excel type
start = float(start_date[i])
end = float(end_date[i])
start = xldate_as_datetime(start, datemode=0)
if start.hour > 12:
collection_time = np.append(collection_time, 1)
else:
collection_time = np.append(collection_time, 0)
end = xldate_as_datetime(end, datemode=0)
except ValueError:
# kobo type
start = start_date[i][:16].replace("/", "-")
end = end_date[i][:16].replace("/", "-")
start = datetime.datetime.strptime(start, self.xl_dateformat)
if start.hour > 12:
collection_time = np.append(collection_time, 1)
else:
collection_time = np.append(collection_time, 0)
end = datetime.datetime.strptime(end, self.xl_dateformat)
durations.append((end - start).total_seconds())
all_dates.append(datetime.datetime.strftime(start, self.xl_dateformat))
self.durations = durations
self.time_of_collection = collection_time
self.avg_time_elapsed = np.mean(durations)
# Extract the column of dates for all data and put them in YYYY-MM-DD format
self.file["formatted_date"] = all_dates
predictors = {
FRC_IN: self.file[FRC_IN],
"elapsed time": (np.array(self.durations) / 3600),
"time of collection (0=AM, 1=PM)": self.time_of_collection,
}
self.targets = self.file.loc[:, FRC_OUT]
self.var_names = [
"Tapstand FRC (mg/L)",
"Elapsed Time",
"time of collection (0=AM, 1=PM)",
]
self.predictors = pd.DataFrame(predictors)
if len(nan_rows_watt) < drop_threshold:
self.predictors[WATTEMP] = self.file[WATTEMP]
self.var_names.append("Water Temperature(" + r"$\degree$" + "C)")
self.median_wattemp = np.median(self.file[WATTEMP].dropna().to_numpy())
self.upper95_wattemp = np.percentile(
self.file[WATTEMP].dropna().to_numpy(), 95
)
if len(nan_rows_cond) < drop_threshold:
self.predictors[COND] = self.file[COND]
self.var_names.append("EC (" + r"$\mu$" + "s/cm)")
self.median_cond = np.median(self.file[COND].dropna().to_numpy())
self.upper95_cond = np.percentile(self.file[COND].dropna().to_numpy(), 95)
self.targets = self.targets.values.reshape(-1, 1)
self.datainputs = self.predictors
self.dataoutputs = self.targets
self.input_filename = filename
def set_up_model(self):
self.optimizer = keras.optimizers.Nadam(lr=0.01, beta_1=0.9, beta_2=0.999)
self.model = keras.models.Sequential()
self.model.add(
keras.layers.Dense(
self.layer1_neurons,
input_dim=len(self.datainputs.columns),
activation="tanh",
)
)
self.model.add(keras.layers.Dense(1, activation="linear"))
self.model.compile(loss="mse", optimizer=self.optimizer)
def train_SWOT_network(self, directory):
"""Train the set of 200 neural networks on SWOT data
Trains an ensemble of 200 neural networks on se1_frc, water temperature,
water conductivity."""
if not os.path.exists(directory):
os.makedirs(directory)
self.predictors_scaler = self.predictors_scaler.fit(self.predictors)
self.targets_scaler = self.targets_scaler.fit(self.targets)
x = self.predictors
t = self.targets
self.calibration_predictions = []
self.trained_models = {}
for i in range(self.network_count):
logging.info('Training Network ' + str(i))
model_out = self.train_network(x, t, directory)
self.trained_models.update({'model_' + str(i): model_out})
def train_network(self, x, t, directory):
"""
Trains a single Neural Network on imported data.
This method trains Neural Network on data that have previously been imported
to the network using the import_data_from_csv() method.
The network used is a Multilayer Perceptron (MLP). Input and Output data are
normalized using MinMax Normalization.
The input dataset is split in training and validation datasets, where 80% of the inputs
are the training dataset and 20% is the validation dataset.
The training history is stored in a variable called self.history (see keras documentation:
keras.model.history object)
Performance metrics are calculated and stored for evaluating the network performance.
"""
tf.keras.backend.clear_session()
early_stopping_monitor = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=10,
restore_best_weights=True)
x_norm = self.predictors_scaler.transform(x)
t_norm = self.targets_scaler.transform(t)
trained_model = keras.models.clone_model(self.model)
x_norm_train, x_norm_val, t_norm_train, t_norm_val = train_test_split(x_norm, t_norm, train_size=0.333,
shuffle=True)
new_weights = [np.random.uniform(-0.05, 0.05, w.shape) for w in trained_model.get_weights()]
trained_model.set_weights(new_weights)
trained_model.compile(loss='mse', optimizer=self.optimizer)
trained_model.fit(x_norm_train, t_norm_train, epochs=self.epochs, validation_data=(x_norm_val, t_norm_val),
callbacks=[early_stopping_monitor], verbose=0, batch_size=len(t_norm_train))
self.calibration_predictions.append(self.targets_scaler.inverse_transform(trained_model.predict(x_norm)))
return trained_model
def calibration_performance_evaluation(self, filename):
Y_true = np.array(self.targets)
Y_pred = np.array(self.calibration_predictions)
FRC_X = self.datainputs[FRC_IN].to_numpy()
capture_all = (
np.less_equal(Y_true, np.max(Y_pred, axis=0))
* np.greater_equal(Y_true, np.min(Y_pred, axis=0))
* 1
)
capture_90 = (
np.less_equal(Y_true, np.percentile(Y_pred, 95, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 5, axis=0))
* 1
)
capture_80 = (
np.less_equal(Y_true, np.percentile(Y_pred, 90, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 10, axis=0))
* 1
)
capture_70 = (
np.less_equal(Y_true, np.percentile(Y_pred, 85, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 15, axis=0))
* 1
)
capture_60 = (
np.less_equal(Y_true, np.percentile(Y_pred, 80, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 20, axis=0))
* 1
)
capture_50 = (
np.less_equal(Y_true, np.percentile(Y_pred, 75, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 25, axis=0))
* 1
)
capture_40 = (
np.less_equal(Y_true, np.percentile(Y_pred, 70, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 30, axis=0))
* 1
)
capture_30 = (
np.less_equal(Y_true, np.percentile(Y_pred, 65, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 35, axis=0))
* 1
)
capture_20 = (
np.less_equal(Y_true, np.percentile(Y_pred, 60, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 40, axis=0))
* 1
)
capture_10 = (
np.less_equal(Y_true, np.percentile(Y_pred, 55, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 45, axis=0))
* 1
)
capture_all_20 = capture_all * np.less(Y_true, 0.2)
capture_90_20 = capture_90 * np.less(Y_true, 0.2)
capture_80_20 = capture_80 * np.less(Y_true, 0.2)
capture_70_20 = capture_70 * np.less(Y_true, 0.2)
capture_60_20 = capture_60 * np.less(Y_true, 0.2)
capture_50_20 = capture_50 * np.less(Y_true, 0.2)
capture_40_20 = capture_40 * np.less(Y_true, 0.2)
capture_30_20 = capture_30 * np.less(Y_true, 0.2)
capture_20_20 = capture_20 * np.less(Y_true, 0.2)
capture_10_20 = capture_10 * np.less(Y_true, 0.2)
length_20 = np.sum(np.less(Y_true, 0.2))
test_len = len(Y_true)
capture_all_sum = np.sum(capture_all)
capture_90_sum = np.sum(capture_90)
capture_80_sum = np.sum(capture_80)
capture_70_sum = np.sum(capture_70)
capture_60_sum = np.sum(capture_60)
capture_50_sum = np.sum(capture_50)
capture_40_sum = np.sum(capture_40)
capture_30_sum = np.sum(capture_30)
capture_20_sum = np.sum(capture_20)
capture_10_sum = np.sum(capture_10)
capture_all_20_sum = np.sum(capture_all_20)
capture_90_20_sum = np.sum(capture_90_20)
capture_80_20_sum = np.sum(capture_80_20)
capture_70_20_sum = np.sum(capture_70_20)
capture_60_20_sum = np.sum(capture_60_20)
capture_50_20_sum = np.sum(capture_50_20)
capture_40_20_sum = np.sum(capture_40_20)
capture_30_20_sum = np.sum(capture_30_20)
capture_20_20_sum = np.sum(capture_20_20)
capture_10_20_sum = np.sum(capture_10_20)
capture = [
capture_10_sum / test_len,
capture_20_sum / test_len,
capture_30_sum / test_len,
capture_40_sum / test_len,
capture_50_sum / test_len,
capture_60_sum / test_len,
capture_70_sum / test_len,
capture_80_sum / test_len,
capture_90_sum / test_len,
capture_all_sum / test_len,
]
capture_20 = [
capture_10_20_sum / length_20,
capture_20_20_sum / length_20,
capture_30_20_sum / length_20,
capture_40_20_sum / length_20,
capture_50_20_sum / length_20,
capture_60_20_sum / length_20,
capture_70_20_sum / length_20,
capture_80_20_sum / length_20,
capture_90_20_sum / length_20,
capture_all_20_sum / length_20,
]
self.percent_capture_cal = capture_all_sum / test_len
self.percent_capture_02_cal = capture_all_20_sum / length_20
self.CI_reliability_cal = (
(0.1 - capture_10_sum / test_len) ** 2
+ (0.2 - capture_20_sum / test_len) ** 2
+ (0.3 - capture_30_sum / test_len) ** 2
+ (0.4 - capture_40_sum / test_len) ** 2
+ (0.5 - capture_50_sum / test_len) ** 2
+ (0.6 - capture_60_sum / test_len) ** 2
+ (0.7 - capture_70_sum / test_len) ** 2
+ (0.8 - capture_80_sum / test_len) ** 2
+ (0.9 - capture_90_sum / test_len) ** 2
+ (1 - capture_all_sum / test_len) ** 2
)
self.CI_reliability_02_cal = (
(0.1 - capture_10_20_sum / length_20) ** 2
+ (0.2 - capture_20_20_sum / length_20) ** 2
+ (0.3 - capture_30_20_sum / length_20) ** 2
+ (0.4 - capture_40_20_sum / length_20) ** 2
+ (0.5 - capture_50_20_sum / length_20) ** 2
+ (0.6 - capture_60_20_sum / length_20) ** 2
+ (0.7 - capture_70_20_sum / length_20) ** 2
+ (0.8 - capture_80_20_sum / length_20) ** 2
+ (0.9 - capture_90_20_sum / length_20) ** 2
+ (1 - capture_all_20_sum / length_20) ** 2
)
# Rank Histogram
rank = []
for a in range(0, len(Y_true)):
n_lower = np.sum(np.greater(Y_true[a], Y_pred[:, a]))
n_equal = np.sum(np.equal(Y_true[a], Y_pred[:, a]))
deviate_rank = np.random.random_integers(0, n_equal)
rank = np.append(rank, n_lower + deviate_rank)
rank_hist = np.histogram(rank, bins=self.network_count + 1)
delta = np.sum((rank_hist[0] - (test_len / ((self.network_count + 1)))) ** 2)
delta_0 = self.network_count * test_len / (self.network_count + 1)
self.delta_score_cal = delta / delta_0
c = self.network_count
alpha = np.zeros((test_len, (c + 1)))
beta = np.zeros((test_len, (c + 1)))
low_outlier = 0
high_outlier = 0
for a in range(0, test_len):
observation = Y_true[a]
forecast = np.sort(Y_pred[:, a])
for b in range(1, c):
if observation > forecast[b]:
alpha[a, b] = forecast[b] - forecast[b - 1]
beta[a, b] = 0
elif forecast[b] > observation > forecast[b - 1]:
alpha[a, b] = observation - forecast[b - 1]
beta[a, b] = forecast[b] - observation
else:
alpha[a, b] = 0
beta[a, b] = forecast[b] - forecast[b - 1]
# overwrite boundaries in case of outliers
if observation < forecast[0]:
beta[a, 0] = forecast[0] - observation
low_outlier += 1
if observation > forecast[c - 1]:
alpha[a, c] = observation - forecast[c - 1]
high_outlier += 1
alpha_bar = np.mean(alpha, axis=0)
beta_bar = np.mean(beta, axis=0)
g_bar = alpha_bar + beta_bar
o_bar = beta_bar / (alpha_bar + beta_bar)
if low_outlier > 0:
o_bar[0] = low_outlier / test_len
g_bar[0] = beta_bar[0] / o_bar[0]
else:
o_bar[0] = 0
g_bar[0] = 0
if high_outlier > 0:
o_bar[c] = high_outlier / test_len
g_bar[c] = alpha_bar[c] / o_bar[c]
else:
o_bar[c] = 0
g_bar[c] = 0
p_i = np.arange(0 / c, (c + 1) / c, 1 / c)
self.CRPS_cal = np.sum(
g_bar * ((1 - o_bar) * (p_i**2) + o_bar * ((1 - p_i) ** 2))
)
CI_x = [0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 1.00]
fig = plt.figure(figsize=(15, 10), dpi=100)
gridspec.GridSpec(2, 3)
plt.subplot2grid((2, 3), (0, 0), colspan=2, rowspan=2)
plt.axhline(0.2, c="k", ls="--", label="Point-of-consumption FRC = 0.2 mg/L")
plt.scatter(
FRC_X, Y_true, edgecolors="k", facecolors="None", s=20, label="Observed"
)
plt.scatter(
FRC_X,
np.median(Y_pred, axis=0),
facecolors="r",
edgecolors="None",
s=10,
label="Forecast Median",
)
plt.vlines(
FRC_X,
np.min(Y_pred, axis=0),
np.max(Y_pred, axis=0),
color="r",
label="Forecast Range",
)
plt.xlabel("Point-of-Distribution FRC (mg/L)")
plt.ylabel("Point-of-Consumption FRC (mg/L)")
plt.xlim([0, np.max(FRC_X)])
plt.legend(
bbox_to_anchor=(0.001, 0.999),
shadow=False,
labelspacing=0.1,
fontsize="small",
handletextpad=0.1,
loc="upper left",
)
ax1 = fig.axes[0]
ax1.set_title("(a)", y=0.88, x=0.05)
plt.subplot2grid((2, 3), (0, 2), colspan=1, rowspan=1)
plt.plot(CI_x, CI_x, c="k")
plt.scatter(CI_x, capture, label="All observations")
plt.scatter(CI_x, capture_20, label="Point-of-Consumption FRC below 0.2 mg/L")
plt.xlabel("Ensemble Confidence Interval")
plt.ylabel("Percent Capture")
plt.ylim([0, 1])
plt.xlim([0, 1])
plt.legend(
bbox_to_anchor=(0.001, 0.999),
shadow=False,
labelspacing=0.1,
fontsize="small",
handletextpad=0.1,
loc="upper left",
)
ax2 = fig.axes[1]
ax2.set_title("(b)", y=0.88, x=0.05)
plt.subplot2grid((2, 3), (1, 2), colspan=1, rowspan=1)
plt.hist(rank, bins=(self.network_count + 1), density=True)
plt.xlabel("Rank")
plt.ylabel("Probability")
ax3 = fig.axes[2]
ax3.set_title("(c)", y=0.88, x=0.05)
plt.savefig(
os.path.splitext(filename)[0] + "_Calibration_Diagnostic_Figs.png",
format="png",
bbox_inches="tight",
)
plt.close()
myStringIOBytes = io.BytesIO()
plt.savefig(myStringIOBytes, format="png", bbox_inches="tight")
myStringIOBytes.seek(0)
my_base_64_pngData = base64.b64encode(myStringIOBytes.read())
return my_base_64_pngData
def get_bw(self):
Y_true = np.array(self.targets)
Y_pred = np.array(self.calibration_predictions)[:, :, 0]
s2 = []
xt_yt = []
for a in range(0, len(Y_true)):
observation = Y_true[a]
forecast = np.sort(Y_pred[:, a])
s2 = np.append(s2, np.var(forecast))
xt_yt = np.append(xt_yt, (np.mean(forecast) - observation) ** 2)
WB_bw = np.mean(xt_yt) - (1 + 1 / self.network_count) * np.mean(s2)
return WB_bw
def post_process_performance_eval(self, bandwidth):
Y_true = np.squeeze(np.array(self.targets))
Y_pred = np.array(self.calibration_predictions)[:, :, 0]
test_len = len(Y_true)
min_CI = []
max_CI = []
CI_90_Lower = []
CI_90_Upper = []
CI_80_Lower = []
CI_80_Upper = []
CI_70_Lower = []
CI_70_Upper = []
CI_60_Lower = []
CI_60_Upper = []
CI_50_Lower = []
CI_50_Upper = []
CI_40_Lower = []
CI_40_Upper = []
CI_30_Lower = []
CI_30_Upper = []
CI_20_Lower = []
CI_20_Upper = []
CI_10_Lower = []
CI_10_Upper = []
CI_median = []
CRPS = []
Kernel_Risk = []
evaluation_range = np.arange(-10, 10.001, 0.001)
# compute CRPS as well as the confidence intervals of each ensemble forecast
for a in range(0, test_len):
scipy_kde = scipy.stats.gaussian_kde(Y_pred[:, a], bw_method=bandwidth)
scipy_pdf = scipy_kde.evaluate(evaluation_range) * 0.001
scipy_cdf = np.cumsum(scipy_pdf)
min_CI = np.append(
min_CI, evaluation_range[np.max(np.where(scipy_cdf == 0)[0])]
)
max_CI = np.append(max_CI, evaluation_range[np.argmax(scipy_cdf)])
CI_90_Lower = np.append(
CI_90_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.05)))]
)
CI_90_Upper = np.append(
CI_90_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.95)))]
)
CI_80_Lower = np.append(
CI_80_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.1)))]
)
CI_80_Upper = np.append(
CI_80_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.9)))]
)
CI_70_Lower = np.append(
CI_70_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.15)))]
)
CI_70_Upper = np.append(
CI_70_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.85)))]
)
CI_60_Lower = np.append(
CI_60_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.2)))]
)
CI_60_Upper = np.append(
CI_60_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.8)))]
)
CI_50_Lower = np.append(
CI_50_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.25)))]
)
CI_50_Upper = np.append(
CI_50_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.75)))]
)
CI_40_Lower = np.append(
CI_40_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.3)))]
)
CI_40_Upper = np.append(
CI_40_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.7)))]
)
CI_30_Lower = np.append(
CI_30_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.35)))]
)
CI_30_Upper = np.append(
CI_30_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.65)))]
)
CI_20_Lower = np.append(
CI_20_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.4)))]
)
CI_20_Upper = np.append(
CI_20_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.6)))]
)
CI_10_Lower = np.append(
CI_10_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.45)))]
)
CI_10_Upper = np.append(
CI_10_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.55)))]
)
CI_median = np.append(
CI_median, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.50)))]
)
Kernel_Risk = np.append(Kernel_Risk, scipy_kde.integrate_box_1d(-10, 0.2))
Heaviside = (evaluation_range >= Y_true[a]).astype(int)
CRPS_dif = (scipy_cdf - Heaviside) ** 2
CRPS = np.append(CRPS, np.sum(CRPS_dif * 0.001))
mean_CRPS = np.mean(CRPS)
capture_all = (
np.less_equal(Y_true, max_CI) * np.greater_equal(Y_true, min_CI) * 1
)
capture_90 = (
np.less_equal(Y_true, CI_90_Upper)
* np.greater_equal(Y_true, CI_90_Lower)
* 1
)
capture_80 = (
np.less_equal(Y_true, CI_80_Upper)
* np.greater_equal(Y_true, CI_80_Lower)
* 1
)
capture_70 = (
np.less_equal(Y_true, CI_70_Upper)
* np.greater_equal(Y_true, CI_70_Lower)
* 1
)
capture_60 = (
np.less_equal(Y_true, CI_60_Upper)
* np.greater_equal(Y_true, CI_60_Lower)
* 1
)
capture_50 = (
np.less_equal(Y_true, CI_50_Upper)
* np.greater_equal(Y_true, CI_50_Lower)
* 1
)
capture_40 = (
np.less_equal(Y_true, CI_40_Upper)
* np.greater_equal(Y_true, CI_40_Lower)
* 1
)
capture_30 = (
np.less_equal(Y_true, CI_30_Upper)
* np.greater_equal(Y_true, CI_30_Lower)
* 1
)
capture_20 = (
np.less_equal(Y_true, CI_20_Upper)
* np.greater_equal(Y_true, CI_20_Lower)
* 1
)
capture_10 = (
np.less_equal(Y_true, CI_10_Upper)
* np.greater_equal(Y_true, CI_10_Lower)
* 1
)
length_20 = np.sum(np.less(Y_true, 0.2))
capture_all_20 = capture_all * np.less(Y_true, 0.2)
capture_90_20 = capture_90 * np.less(Y_true, 0.2)
capture_80_20 = capture_80 * np.less(Y_true, 0.2)
capture_70_20 = capture_70 * np.less(Y_true, 0.2)
capture_60_20 = capture_60 * np.less(Y_true, 0.2)
capture_50_20 = capture_50 * np.less(Y_true, 0.2)
capture_40_20 = capture_40 * np.less(Y_true, 0.2)
capture_30_20 = capture_30 * np.less(Y_true, 0.2)
capture_20_20 = capture_20 * np.less(Y_true, 0.2)
capture_10_20 = capture_10 * np.less(Y_true, 0.2)
capture_all_sum = np.sum(capture_all)
capture_90_sum = np.sum(capture_90)
capture_80_sum = np.sum(capture_80)
capture_70_sum = np.sum(capture_70)
capture_60_sum = np.sum(capture_60)
capture_50_sum = np.sum(capture_50)
capture_40_sum = np.sum(capture_40)
capture_30_sum = np.sum(capture_30)
capture_20_sum = np.sum(capture_20)
capture_10_sum = np.sum(capture_10)
capture_all_20_sum = np.sum(capture_all_20)
capture_90_20_sum = np.sum(capture_90_20)
capture_80_20_sum = np.sum(capture_80_20)
capture_70_20_sum = np.sum(capture_70_20)
capture_60_20_sum = np.sum(capture_60_20)
capture_50_20_sum = np.sum(capture_50_20)
capture_40_20_sum = np.sum(capture_40_20)
capture_30_20_sum = np.sum(capture_30_20)
capture_20_20_sum = np.sum(capture_20_20)
capture_10_20_sum = np.sum(capture_10_20)
capture_sum_squares = (
(0.1 - capture_10_sum / test_len) ** 2
+ (0.2 - capture_20_sum / test_len) ** 2
+ (0.3 - capture_30_sum / test_len) ** 2
+ (0.4 - capture_40_sum / test_len) ** 2
+ (0.5 - capture_50_sum / test_len) ** 2
+ (0.6 - capture_60_sum / test_len) ** 2
+ (0.7 - capture_70_sum / test_len) ** 2
+ (0.8 - capture_80_sum / test_len) ** 2
+ (0.9 - capture_90_sum / test_len) ** 2
+ (1 - capture_all_sum / test_len) ** 2
)
capture_20_sum_squares = (
(0.1 - capture_10_20_sum / length_20) ** 2
+ (0.2 - capture_20_20_sum / length_20) ** 2
+ (0.3 - capture_30_20_sum / length_20) ** 2
+ (0.4 - capture_40_20_sum / length_20) ** 2
+ (0.5 - capture_50_20_sum / length_20) ** 2
+ (0.6 - capture_60_20_sum / length_20) ** 2
+ (0.7 - capture_70_20_sum / length_20) ** 2
+ (0.8 - capture_80_20_sum / length_20) ** 2
+ (0.9 - capture_90_20_sum / length_20) ** 2
+ (1 - capture_all_20_sum / length_20) ** 2
)
return (
mean_CRPS,
capture_sum_squares,
capture_20_sum_squares,
capture_all_sum / test_len,
capture_all_20_sum / length_20,
)
def post_process_cal(self):
self.WB_bandwidth = self.get_bw()
(
self.CRPS_post_cal,
self.CI_reliability_post_cal,
self.CI_reliability_02_post_cal,
self.percent_capture_post_cal,
self.percent_capture_02_post_cal,
) = self.post_process_performance_eval(self.WB_bandwidth)
CRPS_Skill = (self.CRPS_post_cal - self.CRPS_cal) / (0 - self.CRPS_cal)
CI_Skill = (self.CI_reliability_post_cal - self.CI_reliability_cal) / (
0 - self.CI_reliability_cal
)
CI_20_Skill = (self.CI_reliability_02_post_cal - self.CI_reliability_02_cal) / (
0 - self.CI_reliability_02_cal
)
PC_Skill = (self.percent_capture_post_cal - self.percent_capture_cal) / (
1 - self.percent_capture_cal
)
PC_20_Skill = (
self.percent_capture_02_post_cal - self.percent_capture_02_cal
) / (1 - self.percent_capture_02_cal)
Net_Score = CRPS_Skill + CI_Skill + CI_20_Skill + PC_Skill + PC_20_Skill
if Net_Score > 0:
self.post_process_check = True
else:
self.post_process_check = False
def full_performance_evaluation(self, directory):
x_norm = self.predictors_scaler.transform(self.predictors)
t_norm = self.targets_scaler.transform(self.targets)
base_model = self.model
base_model.save(directory + "\\base_network.h5")
x_cal_norm, x_test_norm, t_cal_norm, t_test_norm = train_test_split(
x_norm, t_norm, test_size=0.25, shuffle=False, random_state=10
)
self.verifying_observations = self.targets_scaler.inverse_transform(t_test_norm)
self.test_x_data = self.predictors_scaler.inverse_transform(x_test_norm)
early_stopping_monitor = keras.callbacks.EarlyStopping(
monitor="val_loss", min_delta=0, patience=10, restore_best_weights=True
)
self.verifying_predictions = []
for i in range(0, self.network_count):
tf.keras.backend.clear_session()
self.model = keras.models.load_model(directory + "\\base_network.h5")
x_norm_train, x_norm_val, t_norm_train, t_norm_val = train_test_split(
x_cal_norm,
t_cal_norm,
train_size=1 / 3,
shuffle=True,
random_state=i**2,
)
new_weights = [
np.random.uniform(-0.05, 0.05, w.shape)
for w in self.model.get_weights()
]
self.model.set_weights(new_weights)
self.model.fit(
x_norm_train,
t_norm_train,
epochs=self.epochs,
validation_data=(x_norm_val, t_norm_val),
callbacks=[early_stopping_monitor],
verbose=0,
batch_size=len(t_norm_train),
)
self.verifying_predictions.append(self.targets_scaler.inverse_transform(self.model.predict(x_test_norm)))
Y_true = np.array(self.verifying_observations)
Y_pred = np.array(self.verifying_predictions)
FRC_X = self.test_x_data[:, 0]
capture_all = (
np.less_equal(Y_true, np.max(Y_pred, axis=0))
* np.greater_equal(Y_true, np.min(Y_pred, axis=0))
* 1
)
capture_90 = (
np.less_equal(Y_true, np.percentile(Y_pred, 95, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 5, axis=0))
* 1
)
capture_80 = (
np.less_equal(Y_true, np.percentile(Y_pred, 90, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 10, axis=0))
* 1
)
capture_70 = (
np.less_equal(Y_true, np.percentile(Y_pred, 85, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 15, axis=0))
* 1
)
capture_60 = (
np.less_equal(Y_true, np.percentile(Y_pred, 80, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 20, axis=0))
* 1
)
capture_50 = (
np.less_equal(Y_true, np.percentile(Y_pred, 75, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 25, axis=0))
* 1
)
capture_40 = (
np.less_equal(Y_true, np.percentile(Y_pred, 70, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 30, axis=0))
* 1
)
capture_30 = (
np.less_equal(Y_true, np.percentile(Y_pred, 65, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 35, axis=0))
* 1
)
capture_20 = (
np.less_equal(Y_true, np.percentile(Y_pred, 60, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 40, axis=0))
* 1
)
capture_10 = (
np.less_equal(Y_true, np.percentile(Y_pred, 55, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 45, axis=0))
* 1
)
capture_all_20 = capture_all * np.less(Y_true, 0.2)
capture_90_20 = capture_90 * np.less(Y_true, 0.2)
capture_80_20 = capture_80 * np.less(Y_true, 0.2)
capture_70_20 = capture_70 * np.less(Y_true, 0.2)
capture_60_20 = capture_60 * np.less(Y_true, 0.2)
capture_50_20 = capture_50 * np.less(Y_true, 0.2)
capture_40_20 = capture_40 * np.less(Y_true, 0.2)
capture_30_20 = capture_30 * np.less(Y_true, 0.2)
capture_20_20 = capture_20 * np.less(Y_true, 0.2)
capture_10_20 = capture_10 * np.less(Y_true, 0.2)
length_20 = np.sum(np.less(Y_true, 0.2))
test_len = len(Y_true)
capture_all_sum = np.sum(capture_all)
capture_90_sum = np.sum(capture_90)
capture_80_sum = np.sum(capture_80)
capture_70_sum = np.sum(capture_70)
capture_60_sum = np.sum(capture_60)
capture_50_sum = np.sum(capture_50)
capture_40_sum = np.sum(capture_40)
capture_30_sum = np.sum(capture_30)
capture_20_sum = np.sum(capture_20)
capture_10_sum = np.sum(capture_10)
capture_all_20_sum = np.sum(capture_all_20)
capture_90_20_sum = np.sum(capture_90_20)
capture_80_20_sum = np.sum(capture_80_20)
capture_70_20_sum = np.sum(capture_70_20)
capture_60_20_sum = np.sum(capture_60_20)
capture_50_20_sum = np.sum(capture_50_20)
capture_40_20_sum = np.sum(capture_40_20)
capture_30_20_sum = np.sum(capture_30_20)
capture_20_20_sum = np.sum(capture_20_20)
capture_10_20_sum = np.sum(capture_10_20)
capture = [
capture_10_sum / test_len,
capture_20_sum / test_len,
capture_30_sum / test_len,
capture_40_sum / test_len,
capture_50_sum / test_len,
capture_60_sum / test_len,
capture_70_sum / test_len,
capture_80_sum / test_len,
capture_90_sum / test_len,
capture_all_sum / test_len,
]
capture_20 = [
capture_10_20_sum / length_20,
capture_20_20_sum / length_20,
capture_30_20_sum / length_20,
capture_40_20_sum / length_20,
capture_50_20_sum / length_20,
capture_60_20_sum / length_20,
capture_70_20_sum / length_20,
capture_80_20_sum / length_20,
capture_90_20_sum / length_20,
capture_all_20_sum / length_20,
]
self.percent_capture_cal = capture_all_sum / test_len
self.percent_capture_02_cal = capture_all_20_sum / length_20
self.CI_reliability_cal = (
(0.1 - capture_10_sum / test_len) ** 2
+ (0.2 - capture_20_sum / test_len) ** 2
+ (0.3 - capture_30_sum / test_len) ** 2
+ (0.4 - capture_40_sum / test_len) ** 2
+ (0.5 - capture_50_sum / test_len) ** 2
+ (0.6 - capture_60_sum / test_len) ** 2
+ (0.7 - capture_70_sum / test_len) ** 2
+ (0.8 - capture_80_sum / test_len) ** 2
+ (0.9 - capture_90_sum / test_len) ** 2
+ (1 - capture_all_sum / test_len) ** 2
)
self.CI_reliability_02_cal = (
(0.1 - capture_10_20_sum / length_20) ** 2
+ (0.2 - capture_20_20_sum / length_20) ** 2
+ (0.3 - capture_30_20_sum / length_20) ** 2
+ (0.4 - capture_40_20_sum / length_20) ** 2
+ (0.5 - capture_50_20_sum / length_20) ** 2
+ (0.6 - capture_60_20_sum / length_20) ** 2
+ (0.7 - capture_70_20_sum / length_20) ** 2
+ (0.8 - capture_80_20_sum / length_20) ** 2
+ (0.9 - capture_90_20_sum / length_20) ** 2
+ (1 - capture_all_20_sum / length_20) ** 2
)
# Rank Histogram
rank = []
for a in range(0, len(Y_true)):
n_lower = np.sum(np.greater(Y_true[a], Y_pred[:, a]))
n_equal = np.sum(np.equal(Y_true[a], Y_pred[:, a]))
deviate_rank = np.random.random_integers(0, n_equal)
rank = np.append(rank, n_lower + deviate_rank)
rank_hist = np.histogram(rank, bins=self.network_count + 1)
delta = np.sum((rank_hist[0] - (test_len / ((self.network_count + 1)))) ** 2)
delta_0 = self.network_count * test_len / (self.network_count + 1)
self.delta_score_cal = delta / delta_0
CI_x = [0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 1.00]
fig = plt.figure(figsize=(15, 10), dpi=100)
gridspec.GridSpec(2, 3)
plt.subplot2grid((2, 3), (0, 0), colspan=2, rowspan=2)
plt.axhline(0.2, c="k", ls="--", label="Point-of-consumption FRC = 0.2 mg/L")
plt.scatter(
FRC_X, Y_true, edgecolors="k", facecolors="None", s=20, label="Observed"
)
plt.scatter(
FRC_X,
np.median(Y_pred, axis=0),
facecolors="r",
edgecolors="None",
s=10,
label="Forecast Median",
)
plt.vlines(
FRC_X,
np.min(Y_pred, axis=0),
np.max(Y_pred, axis=0),
color="r",
label="Forecast Range",
)
plt.xlabel("Point-of-Distribution FRC (mg/L)")
plt.ylabel("Point-of-Consumption FRC (mg/L)")
plt.subplot2grid((2, 3), (0, 2), colspan=1, rowspan=1)
plt.plot(CI_x, CI_x, c='k')
plt.scatter(CI_x, capture)
plt.scatter(CI_x, capture_20)
plt.xlabel("Ensemble Confidence Interval")
plt.ylabel("Percent Capture")
plt.ylim([0, 1])
plt.xlim([0, 1])
plt.subplot2grid((2, 3), (1, 2), colspan=1, rowspan=1)
plt.hist(rank, bins=(self.network_count + 1), density=True)
plt.xlabel('Rank')
plt.ylabel('Probability')
plt.savefig(directory + "\\Verification_Diagnostic_Figs.png", format='png')
plt.close()
myStringIOBytes = io.BytesIO()
plt.savefig(myStringIOBytes, format='png')
myStringIOBytes.seek(0)
my_base_64_pngData = base64.b64encode(myStringIOBytes.read())
return my_base_64_pngData
def set_inputs_for_table(self, storage_target):
frc = np.arange(0.20, 2.05, 0.05)
lag_time = [storage_target for i in range(0, len(frc))]
am_collect = [0 for i in range(0, len(frc))]
pm_collect = [1 for i in range(0, len(frc))]
temp_med_am = {
"ts_frc": frc,
"elapsed time": lag_time,
"time of collection (0=AM, 1=PM)": am_collect,
}
temp_med_pm = {
"ts_frc": frc,
"elapsed time": lag_time,
"time of collection (0=AM, 1=PM)": pm_collect,
}
temp_95_am = {
"ts_frc": frc,
"elapsed time": lag_time,
"time of collection (0=AM, 1=PM)": am_collect,
}
temp_95_pm = {
"ts_frc": frc,
"elapsed time": lag_time,
"time of collection (0=AM, 1=PM)": pm_collect,
}
if WATTEMP in self.datainputs.columns:
watt_med = [self.median_wattemp for i in range(0, len(frc))]
watt_95 = [self.upper95_wattemp for i in range(0, len(frc))]
temp_med_am.update({"ts_wattemp": watt_med})
temp_med_pm.update({"ts_wattemp": watt_med})
temp_95_am.update({"ts_wattemp": watt_95})
temp_95_pm.update({"ts_wattemp": watt_95})
if COND in self.datainputs.columns:
cond_med = [self.median_cond for i in range(0, len(frc))]
cond_95 = [self.upper95_cond for i in range(0, len(frc))]
temp_med_am.update({"ts_cond": cond_med})
temp_med_pm.update({"ts_cond": cond_med})
temp_95_am.update({"ts_cond": cond_95})
temp_95_pm.update({"ts_cond": cond_95})
self.avg_case_predictors_am = pd.DataFrame(temp_med_am)
self.avg_case_predictors_pm = pd.DataFrame(temp_med_pm)
self.worst_case_predictors_am = pd.DataFrame(temp_95_am)
self.worst_case_predictors_pm = pd.DataFrame(temp_95_pm)
def post_process_predictions(self, results_table_frc):
# results_table_frc=results_table_frc.to_numpy()
evaluation_range = np.arange(-10, 10.001, 0.001)
test1_frc = np.arange(0.2, 2.05, 0.05)
bandwidth = self.WB_bandwidth
Max_CI = []
Min_CI = []
CI_99_Upper = []
CI_99_Lower = []
CI_95_Upper = []
CI_95_Lower = []
Median_Results = []
risk_00_kernel_frc = []
risk_20_kernel_frc = []
risk_25_kernel_frc = []
risk_30_kernel_frc = []
for a in range(0, len(test1_frc)):
scipy_kde = scipy.stats.gaussian_kde(results_table_frc[a, :], bw_method=bandwidth)
risk_00_kernel_frc = np.append(risk_00_kernel_frc, scipy_kde.integrate_box_1d(-10, 0))
risk_20_kernel_frc = np.append(risk_20_kernel_frc, scipy_kde.integrate_box_1d(-10, 0.2))
risk_25_kernel_frc = np.append(risk_25_kernel_frc, scipy_kde.integrate_box_1d(-10, 0.25))
risk_30_kernel_frc = np.append(risk_30_kernel_frc, scipy_kde.integrate_box_1d(-10, 0.3))
scipy_pdf = scipy_kde.evaluate(evaluation_range) * 0.001
scipy_cdf = np.cumsum(scipy_pdf)
Min_CI = np.append(Min_CI, evaluation_range[np.max(np.where(scipy_cdf == 0)[0])])
Max_CI = np.append(Max_CI, evaluation_range[np.argmax(scipy_cdf)])
CI_99_Upper = np.append(CI_99_Upper,
evaluation_range[np.argmin(np.abs((scipy_cdf - 0.995)))])
CI_99_Lower = np.append(CI_99_Lower,
evaluation_range[np.argmin(np.abs((scipy_cdf - 0.005)))])
CI_95_Upper = np.append(CI_95_Upper,
evaluation_range[np.argmin(np.abs((scipy_cdf - 0.975)))])
CI_95_Lower = np.append(CI_95_Lower,
evaluation_range[np.argmin(np.abs((scipy_cdf - 0.025)))])
Median_Results = np.append(Median_Results,
evaluation_range[np.argmin(np.abs((scipy_cdf - 0.5)))])
temp_key = {"Tapstand FRC":np.arange(0.20,2.05,0.05),"median": Median_Results, "Ensemble Minimum": Min_CI, "Ensemble Maximum": Max_CI,
"Lower 99 CI": CI_99_Lower, "Upper 99 CI": CI_99_Upper, "Lower 95 CI": CI_95_Lower,
"Upper 95 CI": CI_95_Upper, 'probability==0': risk_00_kernel_frc,
"probability<=0.20": risk_20_kernel_frc, "probability<=0.25": risk_25_kernel_frc,
"probability<=0.30": risk_30_kernel_frc}
post_processed_df = pd.DataFrame(temp_key)
return post_processed_df
def predict(self):
"""
To make the predictions, a pretrained model must be loaded using the import_pretrained_model() method.
The SWOT ANN uses an ensemble of 200 ANNs. All of the 200 ANNs make a prediction on the inputs and the results are
stored. The median of all the 200 predictions is calculated and stored here.
The method also calculates the probabilities of the target FRC levels to be less than 0.2, 0.25 and 0.3 mg/L respectively.
The predictions are target FRC values in mg/L, and the probability values range from 0 to 1.
All of the above results are saved in the self.results class field.
V2.0 Notes: If at least 1 WQ variable is provided, we do a scenario analysis, providing targets for the average case
(median water quality) and the "worst case" using the upper 95th percentile water quality
"""
# Initialize empty arrays for the probabilities to be appended in.
avg_case_results_am = {}
avg_case_results_pm = {}
worst_case_results_am = {}
worst_case_results_pm = {}
# Normalize the inputs using the input scaler loaded
input_scaler = self.predictors_scaler
avg_case_inputs_norm_am = input_scaler.transform(self.avg_case_predictors_am)
avg_case_inputs_norm_pm = input_scaler.transform(self.avg_case_predictors_pm)
worst_case_inputs_norm_am = input_scaler.transform(self.worst_case_predictors_am)
worst_case_inputs_norm_pm = input_scaler.transform(self.worst_case_predictors_pm)
##AVERAGE CASE TARGET w AM COLLECTION
# Iterate through all loaded pretrained networks, make predictions based on the inputs,
# calculate the median of the predictions and store everything to self.results
for j in range(0, self.network_count):
key = "se4_frc_net-" + str(j)
predictions = self.targets_scaler.inverse_transform(
self.trained_models["model_" + str(j)].predict(avg_case_inputs_norm_am)).tolist()
temp = sum(predictions, [])
avg_case_results_am.update({key: temp})
self.avg_case_results_am = pd.DataFrame(avg_case_results_am)
self.avg_case_results_am["median"] = self.avg_case_results_am.median(axis=1)
for i in self.avg_case_predictors_am.keys():
self.avg_case_results_am.update({i: self.avg_case_predictors_am[i].tolist()})
self.avg_case_results_am[i] = self.avg_case_predictors_am[i].tolist()
# Include the inputs/predictors in the self.results variable
for i in self.avg_case_predictors_am.keys():
self.avg_case_results_am.update({i: self.avg_case_predictors_am[i].tolist()})
self.avg_case_results_am[i] = self.avg_case_predictors_am[i].tolist()
if self.post_process_check == False:
# Calculate all the probability fields and store them to self.results
# results_table_frc_avg = self.results.iloc[:, 0:(self.network_count - 1)]
self.avg_case_results_am["probability<=0.20"] = np.sum(
np.less_equal(self.avg_case_results_am.iloc[:, 0:(self.network_count - 1)], 0.2),
axis=1) / self.network_count
self.avg_case_results_am["probability<=0.25"] = np.sum(
np.less_equal(self.avg_case_results_am.iloc[:, 0:(self.network_count - 1)], 0.25),
axis=1) / self.network_count
self.avg_case_results_am["probability<=0.30"] = np.sum(
np.less_equal(self.avg_case_results_am.iloc[:, 0:(self.network_count - 1)], 0.3),
axis=1) / self.network_count
else:
self.avg_case_results_am_post = self.post_process_predictions(
self.avg_case_results_am.iloc[:, 0:(self.network_count - 1)].to_numpy())
##AVERAGE CASE TARGET w PM COLLECTION
# Iterate through all loaded pretrained networks, make predictions based on the inputs,
# calculate the median of the predictions and store everything to self.results
for j in range(0, self.network_count):
key = "se4_frc_net-" + str(j)
predictions = self.targets_scaler.inverse_transform(
self.trained_models["model_" + str(j)].predict(avg_case_inputs_norm_pm)).tolist()
temp = sum(predictions, [])
avg_case_results_pm.update({key: temp})
self.avg_case_results_pm = pd.DataFrame(avg_case_results_pm)
self.avg_case_results_pm["median"] = self.avg_case_results_pm.median(axis=1)
# Include the inputs/predictors in the self.results variable
for i in self.avg_case_predictors_pm.keys():
self.avg_case_results_pm.update({i: self.avg_case_predictors_pm[i].tolist()})
self.avg_case_results_pm[i] = self.avg_case_predictors_pm[i].tolist()
if self.post_process_check == False:
# Calculate all the probability fields and store them to self.results
# results_table_frc_avg = self.results.iloc[:, 0:(self.network_count - 1)]
self.avg_case_results_pm["probability<=0.20"] = (
np.sum(
np.less(
self.avg_case_results_pm.iloc[:, 0 : (self.network_count - 1)],
0.2,
),
axis=1,
)
/ self.network_count
)
self.avg_case_results_pm["probability<=0.25"] = (
np.sum(
np.less(
self.avg_case_results_pm.iloc[:, 0 : (self.network_count - 1)],
0.25,
),
axis=1,
)
/ self.network_count
)
self.avg_case_results_pm["probability<=0.30"] = (
np.sum(
np.less(
self.avg_case_results_pm.iloc[:, 0 : (self.network_count - 1)],
0.3,
),
axis=1,
)
/ self.network_count
)
else:
self.avg_case_results_pm_post = self.post_process_predictions(
self.avg_case_results_pm.iloc[:, 0:(self.network_count - 1)].to_numpy())
if WATTEMP in self.datainputs.columns or COND in self.datainputs.columns:
##WORST CASE TARGET w AM COLLECTION
for j in range(0, self.network_count):
key = "se4_frc_net-" + str(j)
predictions = self.targets_scaler.inverse_transform(
self.trained_models["model_" + str(j)].predict(worst_case_inputs_norm_am)).tolist()
temp = sum(predictions, [])
worst_case_results_am.update({key: temp})
self.worst_case_results_am = pd.DataFrame(worst_case_results_am)
self.worst_case_results_am["median"] = self.worst_case_results_am.median(axis=1)
# Include the inputs/predictors in the self.results variable
for i in self.worst_case_predictors_am.keys():
self.worst_case_results_am.update({i: self.worst_case_predictors_am[i].tolist()})
self.worst_case_results_am[i] = self.worst_case_predictors_am[i].tolist()
if self.post_process_check == False:
# Calculate all the probability fields and store them to self.results
# results_table_frc_avg = self.results.iloc[:, 0:(self.network_count - 1)]
self.worst_case_results_am["probability<=0.20"] = (
np.sum(
np.less(
self.worst_case_results_am.iloc[
:, 0 : (self.network_count - 1)
],
0.2,
),
axis=1,
)
/ self.network_count
)
self.worst_case_results_am["probability<=0.25"] = (
np.sum(
np.less(
self.worst_case_results_am.iloc[
:, 0 : (self.network_count - 1)
],
0.25,
),
axis=1,
)
/ self.network_count
)
self.worst_case_results_am["probability<=0.30"] = (
np.sum(
np.less(
self.worst_case_results_am.iloc[
:, 0 : (self.network_count - 1)
],
0.3,
),
axis=1,
)
/ self.network_count
)
else:
self.worst_case_results_am_post = self.post_process_predictions(
self.worst_case_results_am.iloc[:, 0:(self.network_count - 1)].to_numpy())
##WORST CASE TARGET w PM COLLECTION
for j in range(0, self.network_count):
key = "se4_frc_net-" + str(j)
predictions = self.targets_scaler.inverse_transform(
self.trained_models["model_" + str(j)].predict(worst_case_inputs_norm_pm)).tolist()
temp = sum(predictions, [])
worst_case_results_pm.update({key: temp})
self.worst_case_results_pm = pd.DataFrame(worst_case_results_pm)
self.worst_case_results_pm["median"] = self.worst_case_results_pm.median(axis=1)
# Include the inputs/predictors in the self.results variable
for i in self.worst_case_predictors_pm.keys():
self.worst_case_results_pm.update({i: self.worst_case_predictors_pm[i].tolist()})
self.worst_case_results_pm[i] = self.worst_case_predictors_pm[i].tolist()
if self.post_process_check == False:
# Calculate all the probability fields and store them to self.results
# results_table_frc_avg = self.results.iloc[:, 0:(self.network_count - 1)]
self.worst_case_results_pm["probability<=0.20"] = (
np.sum(
np.less(
self.worst_case_results_pm.iloc[
:, 0 : (self.network_count - 1)
],
0.2,
),
axis=1,
)
/ self.network_count
)
self.worst_case_results_pm["probability<=0.25"] = (
np.sum(
np.less(
self.worst_case_results_pm.iloc[
:, 0 : (self.network_count - 1)
],
0.25,
),
axis=1,
)
/ self.network_count
)
self.worst_case_results_pm["probability<=0.30"] = (
np.sum(
np.less(
self.worst_case_results_pm.iloc[
:, 0 : (self.network_count - 1)
],
0.3,
),
axis=1,
)
/ self.network_count
)
else:
self.worst_case_results_pm_post = self.post_process_predictions(
self.worst_case_results_pm.iloc[:, 0:(self.network_count - 1)].to_numpy())
def results_visualization(self, filename, storage_target):
test1_frc = np.arange(0.2, 2.05, 0.05)
# Variables to plot - Full range, 95th percentile, 99th percentile, median, the three risks
if WATTEMP in self.datainputs.columns or COND in self.datainputs.columns:
if self.post_process_check == False:
results_table_frc_avg_am = self.avg_case_results_am.iloc[
:, 0 : (self.network_count - 1)
]
results_table_frc_avg_pm = self.avg_case_results_pm.iloc[
:, 0 : (self.network_count - 1)
]
results_table_frc_worst_am = self.worst_case_results_am.iloc[
:, 0 : (self.network_count - 1)
]
results_table_frc_worst_pm = self.worst_case_results_pm.iloc[
:, 0 : (self.network_count - 1)
]
preds_fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(
2, 2, figsize=(6.69, 6.69), dpi=300
)
ax1.fill_between(
test1_frc,
np.percentile(results_table_frc_avg_am, 97.5, axis=1),
np.percentile(results_table_frc_avg_am, 2.5, axis=1),
facecolor="#ffa600",
alpha=0.5,
label="95th Percentile Range",
)
ax1.axhline(0.2, c="k", ls="-.", linewidth=1, label="FRC = 0.2 mg/L")
ax1.plot(
test1_frc,
np.min(results_table_frc_avg_am, axis=1),
"#ffa600",
linewidth=0.5,
label="Minimum/Maximum",
)
ax1.plot(
test1_frc,
np.max(results_table_frc_avg_am, axis=1),
"#ffa600",
linewidth=0.5,
)
ax1.plot(
test1_frc,
np.median(results_table_frc_avg_am, axis=1),
"#ffa600",
linewidth=1,
label="Median Prediction",
)
ax1.scatter(
self.datainputs[FRC_IN],
self.dataoutputs,
c="k",
s=10,
marker="x",
label="Testing Observations",
)
ax1.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
loc="upper right",
)
ax1.set_xlim([0.19, 2.0])
ax1.set_xticks(np.arange(0.2, 2.2, 0.2))
ax1.set_xlabel("Tap Stand FRC (mg/L)")
ax1.set_ylabel("Household FRC (mg/L)")
ax1.set_title("Average Case - AM Collection")
ax2.fill_between(
test1_frc,
np.percentile(results_table_frc_avg_pm, 97.5, axis=1),
np.percentile(results_table_frc_avg_pm, 2.5, axis=1),
facecolor="#ffa600",
alpha=0.5,
label="95th Percentile Range",
)
ax2.axhline(0.2, c="k", ls="-.", linewidth=1, label="FRC = 0.2 mg/L")
ax2.plot(
test1_frc,
np.min(results_table_frc_avg_pm, axis=1),
"#ffa600",
linewidth=0.5,
label="Minimum/Maximum",
)
ax2.plot(
test1_frc,
np.max(results_table_frc_avg_pm, axis=1),
"#ffa600",
linewidth=0.5,
)
ax2.plot(
test1_frc,
np.median(results_table_frc_avg_pm, axis=1),
"#ffa600",
linewidth=1,
label="Median Prediction",
)
ax2.scatter(
self.datainputs[FRC_IN],
self.dataoutputs,
c="k",
s=10,
marker="x",
label="Testing Observations",
)
ax2.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
loc="upper right",
)
ax2.set_xlim([0.19, 2.0])
ax2.set_xticks(np.arange(0.2, 2.2, 0.2))
ax2.set_xlabel("Tap Stand FRC (mg/L)")
ax2.set_ylabel("Household FRC (mg/L)")
ax2.set_title("Average Case - PM Collection")
ax3.fill_between(
test1_frc,
np.percentile(results_table_frc_worst_am, 97.5, axis=1),
np.percentile(results_table_frc_worst_am, 2.5, axis=1),
facecolor="#b80000",
alpha=0.5,
label="95th Percentile Range",
)
ax3.axhline(0.2, c="k", ls="-.", linewidth=1, label="FRC = 0.2 mg/L")
ax3.plot(
test1_frc,
np.min(results_table_frc_worst_am, axis=1),
"#b80000",
linewidth=0.5,
label="Minimum/Maximum",
)
ax3.plot(
test1_frc,
np.max(results_table_frc_worst_am, axis=1),
"#b80000",
linewidth=0.5,
)
ax3.plot(
test1_frc,
np.median(results_table_frc_worst_am, axis=1),
"#b80000",
linewidth=1,
label="Median Prediction",
)
ax3.scatter(
self.datainputs[FRC_IN],
self.dataoutputs,
c="k",
s=10,
marker="x",
label="Testing Observations",
)
ax3.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
loc="upper right",
)
ax3.set_xlim([0.19, 2.0])
ax3.set_xticks(np.arange(0.2, 2.2, 0.2))
ax3.set_xlabel("Tap Stand FRC (mg/L)")
ax3.set_ylabel("Household FRC (mg/L)")
ax3.set_title("Worst Case - AM Collection")
ax4.fill_between(
test1_frc,
np.percentile(results_table_frc_worst_pm, 97.5, axis=1),
np.percentile(results_table_frc_worst_pm, 2.5, axis=1),
facecolor="#b80000",
alpha=0.5,
label="95th Percentile Range",
)
ax4.axhline(0.2, c="k", ls="-.", linewidth=1, label="FRC = 0.2 mg/L")
ax4.plot(
test1_frc,
np.min(results_table_frc_worst_pm, axis=1),
"#b80000",
linewidth=0.5,
label="Minimum/Maximum",
)
ax4.plot(
test1_frc,
np.max(results_table_frc_worst_pm, axis=1),
"#b80000",
linewidth=0.5,
)
ax4.plot(
test1_frc,
np.median(results_table_frc_worst_pm, axis=1),
"#b80000",
linewidth=1,
label="Median Prediction",
)
ax4.scatter(
self.datainputs[FRC_IN],
self.dataoutputs,
c="k",
s=10,
marker="x",
label="Testing Observations",
)
ax4.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
loc="upper right",
)
ax4.set_xlim([0.19, 2.0])
ax4.set_xticks(np.arange(0.2, 2.2, 0.2))
ax4.set_xlabel("Tap Stand FRC (mg/L)")
ax4.set_ylabel("Household FRC (mg/L)")
ax4.set_title("Worst Case - PM Collection")
plt.subplots_adjust(wspace=0.25)
plt.savefig(
os.path.splitext(filename)[0] + "_Predictions_Fig.png",
format="png",
bbox_inches="tight",
)
# pl.dump(fig, open(os.path.splitext(filename)[0] + 'Fig1.pickle', 'wb'))
StringIOBytes_preds = io.BytesIO()
plt.savefig(StringIOBytes_preds, format="png", bbox_inches="tight")
StringIOBytes_preds.seek(0)
preds_base_64_pngData = base64.b64encode(StringIOBytes_preds.read())
plt.close()
risk_fig = plt.figure(figsize=(6.69, 3.35), dpi=300)
plt.plot(
test1_frc,
np.sum(np.less(results_table_frc_avg_am, 0.20), axis=1)
/ self.network_count,
c="#ffa600",
label="Risk of Household FRC < 0.20 mg/L - Average Case, AM Collection",
)
plt.plot(
test1_frc,
np.sum(np.less(results_table_frc_avg_pm, 0.20), axis=1)
/ self.network_count,
c="#ffa600",
ls="--",
label="Risk of Household FRC < 0.20 mg/L - Average Case, PM Collection",
)
plt.plot(
test1_frc,
np.sum(np.less(results_table_frc_worst_am, 0.2), axis=1)
/ self.network_count,
c="#b80000",
label="Risk of Household FRC < 0.20 mg/L - Worst Case, AM Collection",
)
plt.plot(
test1_frc,
np.sum(np.less(results_table_frc_worst_pm, 0.2), axis=1)
/ self.network_count,
c="#b80000",
ls="--",
label="Risk of Household FRC < 0.20 mg/L - Worst Case, PM Collection",
)
plt.xlim([0.2, 2])
plt.xlabel("Tapstand FRC (mg/L)")
plt.ylim([0, 1])
plt.ylabel("Risk of Point-of-Consumption FRC < 0.2 mg/L")
plt.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
ncol=1,
labelspacing=0.1,
columnspacing=0.2,
handletextpad=0.1,
loc="upper right",
)
plt.subplots_adjust(bottom=0.15, right=0.95)
plt.savefig(
os.path.splitext(filename)[0] + "_Risk_Fig.png",
format="png",
bbox_inches="tight",
)
StringIOBytes_risk = io.BytesIO()
plt.savefig(StringIOBytes_risk, format="png", bbox_inches="tight")
StringIOBytes_risk.seek(0)
risk_base_64_pngData = base64.b64encode(StringIOBytes_risk.read())
# pl.dump(fig, open(os.path.splitext(filename)[0] + 'Fig2.pickle', 'wb'))
plt.close()
elif self.post_process_check == True:
preds_fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(
2, 2, figsize=(6.69, 6.69), dpi=300
)
ax1.fill_between(
test1_frc,
self.avg_case_results_am_post["Upper 95 CI"],
self.avg_case_results_am_post["Lower 95 CI"],
facecolor="#ffa600",
alpha=0.5,
label="95th Percentile Range",
)
ax1.axhline(0.2, c="k", ls="-.", linewidth=1, label="FRC = 0.2 mg/L")
ax1.plot(
test1_frc,
self.avg_case_results_am_post["Ensemble Minimum"],
"#ffa600",
linewidth=0.5,
label="Minimum/Maximum",
)
ax1.plot(
test1_frc,
self.avg_case_results_am_post["Ensemble Maximum"],
"#ffa600",
linewidth=0.5,
)
ax1.plot(
test1_frc,
self.avg_case_results_am_post["median"],
"#ffa600",
linewidth=1,
label="Median Prediction",
)
ax1.scatter(
self.datainputs[FRC_IN],
self.dataoutputs,
c="k",
s=10,
marker="x",
label="Testing Observations",
)
ax1.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
loc="upper right",
)
ax1.set_xlim([0.19, 2.0])
ax1.set_xticks(np.arange(0.2, 2.2, 0.2))
ax1.set_xlabel("Tap Stand FRC (mg/L)")
ax1.set_ylabel("Household FRC (mg/L)")
ax1.set_title("Average Case - AM Collection")
ax2.fill_between(
test1_frc,
self.avg_case_results_pm_post["Upper 95 CI"],
self.avg_case_results_pm_post["Lower 95 CI"],
facecolor="#ffa600",
alpha=0.5,
label="95th Percentile Range",
)
ax2.axhline(0.2, c="k", ls="-.", linewidth=1, label="FRC = 0.2 mg/L")
ax2.plot(
test1_frc,
self.avg_case_results_pm_post["Ensemble Minimum"],
"#ffa600",
linewidth=0.5,
label="Minimum/Maximum",
)
ax2.plot(
test1_frc,
self.avg_case_results_pm_post["Ensemble Maximum"],
"#ffa600",
linewidth=0.5,
)
ax2.plot(
test1_frc,
self.avg_case_results_pm_post["median"],
"#ffa600",
linewidth=1,
label="median Prediction",
)
ax2.scatter(
self.datainputs[FRC_IN],
self.dataoutputs,
c="k",
s=10,
marker="x",
label="Testing Observations",
)
ax2.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
loc="upper right",
)
ax2.set_xlim([0.19, 2.0])
ax2.set_xticks(np.arange(0.2, 2.2, 0.2))
ax2.set_xlabel("Tap Stand FRC (mg/L)")
ax2.set_ylabel("Household FRC (mg/L)")
ax2.set_title("Average Case - PM Collection")
ax3.fill_between(
test1_frc,
self.worst_case_results_am_post["Upper 95 CI"],
self.worst_case_results_am_post["Lower 95 CI"],
facecolor="#b80000",
alpha=0.5,
label="95th Percentile Range",
)
ax3.axhline(0.2, c="k", ls="-.", linewidth=1, label="FRC = 0.2 mg/L")
ax3.plot(
test1_frc,
self.worst_case_results_am_post["Ensemble Minimum"],
"#b80000",
linewidth=0.5,
label="Minimum/Maximum",
)
ax3.plot(
test1_frc,
self.worst_case_results_am_post["Ensemble Maximum"],
"#b80000",
linewidth=0.5,
)
ax3.plot(
test1_frc,
self.worst_case_results_am_post["median"],
"#b80000",
linewidth=1,
label="Median Prediction",
)
ax3.scatter(
self.datainputs[FRC_IN],
self.dataoutputs,
c="k",
s=10,
marker="x",
label="Testing Observations",
)
ax3.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
loc="upper right",
)
ax3.set_xlim([0.19, 2.0])
ax3.set_xticks(np.arange(0.2, 2.2, 0.2))
ax3.set_xlabel("Tap Stand FRC (mg/L)")
ax3.set_ylabel("Household FRC (mg/L)")
ax3.set_title("Worst Case - AM Collection")
ax4.fill_between(
test1_frc,
self.worst_case_results_pm_post["Upper 95 CI"],
self.worst_case_results_pm_post["Lower 95 CI"],
facecolor="#b80000",
alpha=0.5,
label="95th Percentile Range",
)
ax4.axhline(0.2, c="k", ls="-.", linewidth=1, label="FRC = 0.2 mg/L")
ax4.plot(
test1_frc,
self.worst_case_results_pm_post["Ensemble Minimum"],
"#b80000",
linewidth=0.5,
label="Minimum/Maximum",
)
ax4.plot(
test1_frc,
self.worst_case_results_pm_post["Ensemble Maximum"],
"#b80000",
linewidth=0.5,
)
ax4.plot(
test1_frc,
self.worst_case_results_pm_post["median"],
"#b80000",
linewidth=1,
label="Median Prediction",
)
ax4.scatter(
self.datainputs[FRC_IN],
self.dataoutputs,
c="k",
s=10,
marker="x",
label="Testing Observations",
)
ax4.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
loc="upper right",
)
ax4.set_xlim([0.19, 2.0])
ax4.set_xticks(np.arange(0.2, 2.2, 0.2))
ax4.set_xlabel("Tap Stand FRC (mg/L)")
ax4.set_ylabel("Household FRC (mg/L)")
ax4.set_title("Worst Case - PM Collection")
plt.subplots_adjust(wspace=0.25)
plt.savefig(
os.path.splitext(filename)[0] + "_Predictions_Fig.png",
format="png",
bbox_inches="tight",
)
StringIOBytes_preds = io.BytesIO()
plt.savefig(StringIOBytes_preds, format="png", bbox_inches="tight")
StringIOBytes_preds.seek(0)
preds_base_64_pngData = base64.b64encode(StringIOBytes_preds.read())
# pl.dump(fig, open(os.path.splitext(filename)[0] + 'Fig1.pickle', 'wb'))
plt.close()
risk_fig = plt.figure(figsize=(6.69, 3.35), dpi=300)
plt.plot(
test1_frc,
self.avg_case_results_am_post["probability<=0.20"],
c="#ffa600",
label="Risk of Household FRC < 0.20 mg/L - Average Case, AM Collection",
)
plt.plot(
test1_frc,
self.avg_case_results_pm_post["probability<=0.20"],
c="#ffa600",
ls="--",
label="Risk of Household FRC < 0.20 mg/L - Average Case, PM Collection",
)
plt.plot(
test1_frc,
self.worst_case_results_am_post["probability<=0.20"],
c="#b80000",
label="Risk of Household FRC < 0.20 mg/L - Worst Case, AM Collection",
)
plt.plot(
test1_frc,
self.worst_case_results_pm_post["probability<=0.20"],
c="#b80000",
ls="--",
label="Risk of Household FRC < 0.20 mg/L - Worst Case, PM Collection",
)
plt.xlim([0.2, 2])
plt.xlabel("Tapstand FRC (mg/L)")
plt.ylim([0, 1])
plt.ylabel("Risk of Point-of-Consumption FRC < 0.2 mg/L")
plt.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
ncol=1,
labelspacing=0.1,
columnspacing=0.2,
handletextpad=0.1,
loc="upper right",
)
plt.savefig(
os.path.splitext(filename)[0] + "_Risk_Fig.png",
format="png",
bbox_inches="tight",
)
StringIOBytes_risk = io.BytesIO()
plt.savefig(StringIOBytes_risk, format="png", bbox_inches="tight")
StringIOBytes_risk.seek(0)
risk_base_64_pngData = base64.b64encode(StringIOBytes_risk.read())
# pl.dump(fig, open(os.path.splitext(filename)[0] + 'Fig2.pickle', 'wb'))
plt.close()
if WATTEMP in self.datainputs.columns and COND in self.datainputs.columns:
hist_fig, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(
6, 1, figsize=(3.35, 6.69), dpi=300
)
ax1.set_ylabel("Frequency")
ax1.set_xlabel("Tapstand FRC (mg/L)")
ax1.hist(self.datainputs.iloc[:, 0], bins=30, color="grey")
ax2.set_ylabel("Frequency")
ax2.set_xlabel("Elapsed Time (hours)")
ax2.hist(self.datainputs.iloc[:, 1], bins=30, color="grey")
ax3.set_ylabel("Frequency")
ax3.set_xlabel("Collection Time (0=AM, 1=PM)")
ax3.hist(self.datainputs.iloc[:, 2], bins=30, color="grey")
ax4.set_ylabel("Frequency")
ax4.set_xlabel("Water Temperature(" + r"$\degree$" + "C)")
ax4.hist(self.datainputs.iloc[:, 3], bins=30, color="grey")
ax4.axvline(
self.median_wattemp,
c="#ffa600",
ls="--",
label="Average Case Value Used",
)
ax4.axvline(
self.upper95_wattemp,
c="#b80000",
ls="--",
label="Worst Case Value Used",
)
ax4.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
ncol=1,
labelspacing=0.1,
columnspacing=0.2,
handletextpad=0.1,
loc="upper right",
)
ax5.set_ylabel("Frequency")
ax5.set_xlabel("Electrical Conductivity (" + r"$\mu$" + "s/cm)")
ax5.hist(self.datainputs.iloc[:, 4], bins=30, color="grey")
ax5.axvline(
self.median_cond,
c="#ffa600",
ls="--",
label="Average Case Value Used",
)
ax5.axvline(
self.upper95_cond,
c="#b80000",
ls="--",
label="Worst Case Value used",
)
ax5.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
ncol=1,
labelspacing=0.1,
columnspacing=0.2,
handletextpad=0.1,
loc="upper right",
)
ax6.set_ylabel("Frequency")
ax6.set_xlabel("Household FRC (mg/L)")
ax6.hist(self.dataoutputs, bins=30, color="grey")
plt.subplots_adjust(
left=0.18, hspace=0.60, top=0.99, bottom=0.075, right=0.98
)
plt.savefig(
os.path.splitext(filename)[0] + "_Histograms_Fig.png",
format="png",
bbox_inches="tight",
)
# pl.dump(fig, open(os.path.splitext(filename)[0] + 'Fig3.pickle', 'wb'))
plt.close()
StringIOBytes_histogram = io.BytesIO()
plt.savefig(StringIOBytes_histogram, format="png", bbox_inches="tight")
StringIOBytes_histogram.seek(0)
hist_base_64_pngData = base64.b64encode(StringIOBytes_histogram.read())
elif WATTEMP in self.datainputs.columns:
hist_fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(
6, 1, figsize=(3.35, 6.69), dpi=300
)
ax1.set_ylabel("Frequency")
ax1.set_xlabel("Tapstand FRC (mg/L)")
ax1.hist(self.datainputs.iloc[:, 0], bins=30, color="grey")
ax2.set_ylabel("Frequency")
ax2.set_xlabel("Elapsed Time (hours)")
ax2.hist(self.datainputs.iloc[:, 1], bins=30, color="grey")
ax3.set_ylabel("Frequency")
ax3.set_xlabel("Collection Time (0=AM, 1=PM)")
ax3.hist(self.datainputs.iloc[:, 2], bins=30, color="grey")
ax4.set_ylabel("Frequency")
ax4.set_xlabel("Water Temperature(" + r"$\degree$" + "C)")
ax4.hist(self.datainputs.iloc[:, 3], bins=30, color="grey")
ax4.axvline(
self.median_wattemp,
c="#ffa600",
ls="--",
label="Average Case Value Used",
)
ax4.axvline(
self.upper95_wattemp,
c="#b80000",
ls="--",
label="Worst Case Value Used",
)
ax4.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
ncol=1,
labelspacing=0.1,
columnspacing=0.2,
handletextpad=0.1,
loc="upper right",
)
ax5.set_ylabel("Frequency")
ax5.set_xlabel("Household FRC (mg/L)")
ax5.hist(self.dataoutputs, bins=30, color="grey")
plt.subplots_adjust(
left=0.18, hspace=0.60, top=0.99, bottom=0.075, right=0.98
)
plt.savefig(
os.path.splitext(filename)[0] + "_Histograms_Fig.png",
format="png",
bbox_inches="tight",
)
# pl.dump(fig, open(os.path.splitext(filename)[0] + 'Fig3.pickle', 'wb'))
plt.close()
StringIOBytes_histogram = io.BytesIO()
plt.savefig(StringIOBytes_histogram, format="png", bbox_inches="tight")
StringIOBytes_histogram.seek(0)
hist_base_64_pngData = base64.b64encode(StringIOBytes_histogram.read())
elif COND in self.datainputs.columns:
hist_fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(
6, 1, figsize=(3.35, 6.69), dpi=300
)
ax1.set_ylabel("Frequency")
ax1.set_xlabel("Tapstand FRC (mg/L)")
ax1.hist(self.datainputs.iloc[:, 0], bins=30, color="grey")
ax2.set_ylabel("Frequency")
ax2.set_xlabel("Elapsed Time (hours)")
ax2.hist(self.datainputs.iloc[:, 1], bins=30, color="grey")
ax3.set_ylabel("Frequency")
ax3.set_xlabel("Collection Time (0=AM, 1=PM)")
ax3.hist(self.datainputs.iloc[:, 2], bins=30, color="grey")
ax4.set_ylabel("Frequency")
ax4.set_xlabel("Electrical Conductivity (" + r"$\mu$" + "s/cm)")
ax4.hist(self.datainputs.iloc[:, 4], bins=30, color="grey")
ax4.axvline(
self.median_cond,
c="#ffa600",
ls="--",
label="Average Case Value Used",
)
ax4.axvline(
self.upper95_cond,
c="#b80000",
ls="--",
label="Worst Case Value used",
)
ax4.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
ncol=1,
labelspacing=0.1,
columnspacing=0.2,
handletextpad=0.1,
loc="upper right",
)
ax5.set_ylabel("Frequency")
ax5.set_xlabel("Household FRC (mg/L)")
ax5.hist(self.dataoutputs, bins=30, color="grey")
plt.subplots_adjust(
left=0.18, hspace=0.60, top=0.99, bottom=0.075, right=0.98
)
plt.savefig(
os.path.splitext(filename)[0] + "_Histograms_Fig.png",
format="png",
bbox_inches="tight",
)
# pl.dump(fig, open(os.path.splitext(filename)[0] + 'Fig3.pickle', 'wb'))
plt.close()
StringIOBytes_histogram = io.BytesIO()
plt.savefig(StringIOBytes_histogram, format="png", bbox_inches="tight")
StringIOBytes_histogram.seek(0)
hist_base_64_pngData = base64.b64encode(StringIOBytes_histogram.read())
else:
if self.post_process_check == False:
results_table_frc_avg_am = self.avg_case_results_am.iloc[
:, 0 : (self.network_count - 1)
]
results_table_frc_avg_pm = self.avg_case_results_pm.iloc[
:, 0 : (self.network_count - 1)
]
preds_fig, (ax1, ax2) = plt.subplots(
1, 2, figsize=(6.69, 3.35), dpi=300
)
ax1.fill_between(
test1_frc,
np.percentile(results_table_frc_avg_am, 97.5, axis=1),
np.percentile(results_table_frc_avg_am, 2.5, axis=1),
facecolor="#ffa600",
alpha=0.5,
label="95th Percentile Range",
)
ax1.axhline(0.2, c="k", ls="-.", linewidth=1, label="FRC = 0.2 mg/L")
ax1.plot(
test1_frc,
np.min(results_table_frc_avg_am, axis=1),
"#ffa600",
linewidth=0.5,
label="Minimum/Maximum",
)
ax1.plot(
test1_frc,
np.max(results_table_frc_avg_am, axis=1),
"#ffa600",
linewidth=0.5,
)
ax1.plot(
test1_frc,
np.median(results_table_frc_avg_am, axis=1),
"#ffa600",
linewidth=1,
label="Median Prediction",
)
ax1.scatter(
self.datainputs[FRC_IN],
self.dataoutputs,
c="k",
s=10,
marker="x",
label="Testing Observations",
)
ax1.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
loc="upper right",
)
ax1.set_xlim([0.19, 2.0])
ax1.set_xticks(np.arange(0.2, 2.2, 0.2))
ax1.set_xlabel("Tap Stand FRC (mg/L)")
ax1.set_ylabel("Household FRC (mg/L)")
ax1.set_title("AM Collection")
ax2.fill_between(
test1_frc,
np.percentile(results_table_frc_avg_pm, 97.5, axis=1),
np.percentile(results_table_frc_avg_pm, 2.5, axis=1),
facecolor="#ffa600",
alpha=0.5,
label="95th Percentile Range",
)
ax2.axhline(0.2, c="k", ls="-.", linewidth=1, label="FRC = 0.2 mg/L")
ax2.plot(
test1_frc,
np.min(results_table_frc_avg_pm, axis=1),
"#ffa600",
linewidth=0.5,
label="Minimum/Maximum",
)
ax2.plot(
test1_frc,
np.max(results_table_frc_avg_pm, axis=1),
"#ffa600",
linewidth=0.5,
)
ax2.plot(
test1_frc,
np.median(results_table_frc_avg_pm, axis=1),
"#ffa600",
linewidth=1,
label="Median Prediction",
)
ax2.scatter(
self.datainputs[FRC_IN],
self.dataoutputs,
c="k",
s=10,
marker="x",
label="Testing Observations",
)
ax2.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
loc="upper right",
)
ax2.set_xlim([0.19, 2.0])
ax2.set_xticks(np.arange(0.2, 2.2, 0.2))
ax2.set_xlabel("Tap Stand FRC (mg/L)")
ax2.set_ylabel("Household FRC (mg/L)")
ax2.set_title("PM Collection")
plt.subplots_adjust(wspace=0.25)
plt.savefig(
os.path.splitext(filename)[0] + "_Predictions_Fig.png",
format="png",
bbox_inches="tight",
)
# pl.dump(fig, open(os.path.splitext(filename)[0] + 'Fig1.pickle', 'wb'))
StringIOBytes_preds = io.BytesIO()
plt.savefig(StringIOBytes_preds, format="png", bbox_inches="tight")
StringIOBytes_preds.seek(0)
preds_base_64_pngData = base64.b64encode(StringIOBytes_preds.read())
plt.close()
risk_fig = plt.figure(figsize=(6.69, 3.35), dpi=300)
plt.plot(
test1_frc,
np.sum(np.less(results_table_frc_avg_am, 0.20), axis=1)
/ self.network_count,
c="#ffa600",
label="Risk of Household FRC < 0.20 mg/L - Average Case, AM Collection",
)
plt.plot(
test1_frc,
np.sum(np.less(results_table_frc_avg_pm, 0.20), axis=1)
/ self.network_count,
c="#ffa600",
ls="--",
label="Risk of Household FRC < 0.20 mg/L - Average Case, PM Collection",
)
plt.xlim([0.2, 2])
plt.xlabel("Tapstand FRC (mg/L)")
plt.ylim([0, 1])
plt.ylabel("Risk of Point-of-Consumption FRC < 0.2 mg/L")
plt.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
ncol=1,
labelspacing=0.1,
columnspacing=0.2,
handletextpad=0.1,
loc="upper right",
)
plt.subplots_adjust(bottom=0.15, right=0.95)
plt.savefig(
os.path.splitext(filename)[0] + "_Risk_Fig.png",
format="png",
bbox_inches="tight",
)
# pl.dump(fig, open(os.path.splitext(filename)[0] + 'Fig2.pickle', 'wb'))
StringIOBytes_risk = io.BytesIO()
plt.savefig(StringIOBytes_risk, format="png", bbox_inches="tight")
StringIOBytes_risk.seek(0)
risk_base_64_pngData = base64.b64encode(StringIOBytes_risk.read())
plt.close()
elif self.post_process_check == True:
preds_fig, (ax1, ax2) = plt.subplots(
1, 2, figsize=(6.69, 3.35), dpi=300
)
ax1.fill_between(
test1_frc,
self.avg_case_results_am_post["Upper 95 CI"],
self.avg_case_results_am_post["Lower 95 CI"],
facecolor="#ffa600",
alpha=0.5,
label="95th Percentile Range",
)
ax1.axhline(0.2, c="k", ls="-.", linewidth=1, label="FRC = 0.2 mg/L")
ax1.plot(
test1_frc,
self.avg_case_results_am_post["Ensemble Minimum"],
"#ffa600",
linewidth=0.5,
label="Minimum/Maximum",
)
ax1.plot(
test1_frc,
self.avg_case_results_am_post["Ensemble Maximum"],
"#ffa600",
linewidth=0.5,
)
ax1.plot(
test1_frc,
self.avg_case_results_am_post["median"],
"#ffa600",
linewidth=1,
label="Median Prediction",
)
ax1.scatter(
self.datainputs[FRC_IN],
self.dataoutputs,
c="k",
s=10,
marker="x",
label="Testing Observations",
)
ax1.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
loc="upper right",
)
ax1.set_xlim([0.19, 2.0])
ax1.set_xticks(np.arange(0.2, 2.2, 0.2))
ax1.set_xlabel("Tap Stand FRC (mg/L)")
ax1.set_ylabel("Household FRC (mg/L)")
ax1.set_title("AM Collection")
ax2.fill_between(
test1_frc,
self.avg_case_results_pm_post["Upper 95 CI"],
self.avg_case_results_pm_post["Lower 95 CI"],
facecolor="#ffa600",
alpha=0.5,
label="95th Percentile Range",
)
ax2.axhline(0.2, c="k", ls="-.", linewidth=1, label="FRC = 0.2 mg/L")
ax2.plot(
test1_frc,
self.avg_case_results_pm_post["Ensemble Minimum"],
"#ffa600",
linewidth=0.5,
label="Minimum/Maximum",
)
ax2.plot(
test1_frc,
self.avg_case_results_pm_post["Ensemble Maximum"],
"#ffa600",
linewidth=0.5,
)
ax2.plot(
test1_frc,
self.avg_case_results_pm_post["median"],
"#ffa600",
linewidth=1,
label="median Prediction",
)
ax2.scatter(
self.datainputs[FRC_IN],
self.dataoutputs,
c="k",
s=10,
marker="x",
label="Testing Observations",
)
ax2.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
loc="upper right",
)
ax2.set_xlim([0.19, 2.0])
ax2.set_xticks(np.arange(0.2, 2.2, 0.2))
ax2.set_xlabel("Tap Stand FRC (mg/L)")
ax2.set_ylabel("Household FRC (mg/L)")
ax2.set_title("PM Collection")
plt.subplots_adjust(wspace=0.25)
plt.tight_layout()
plt.savefig(
os.path.splitext(filename)[0] + "_Predictions_Fig.png",
format="png",
bbox_inches="tight",
)
# pl.dump(fig, open(os.path.splitext(filename)[0] + 'Fig1.pickle', 'wb'))
StringIOBytes_preds = io.BytesIO()
plt.savefig(StringIOBytes_preds, format="png", bbox_inches="tight")
StringIOBytes_preds.seek(0)
preds_base_64_pngData = base64.b64encode(StringIOBytes_preds.read())
plt.close()
risk_fig = plt.figure(figsize=(6.69, 3.35), dpi=300)
plt.plot(
test1_frc,
self.avg_case_results_am_post["probability<=0.20"],
c="#ffa600",
label="Risk of Household FRC < 0.20 mg/L - Average Case, AM Collection",
)
plt.plot(
test1_frc,
self.avg_case_results_pm_post["probability<=0.20"],
c="#ffa600",
ls="--",
label="Risk of Household FRC < 0.20 mg/L - Average Case, PM Collection",
)
plt.xlim([0.2, 2])
plt.xlabel("Tapstand FRC (mg/L)")
plt.ylim([0, 1])
plt.ylabel("Risk of Point-of-Consumption FRC < 0.2 mg/L")
plt.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
ncol=1,
labelspacing=0.1,
columnspacing=0.2,
handletextpad=0.1,
loc="upper right",
)
plt.savefig(
os.path.splitext(filename)[0] + "_Risk_Fig.png",
format="png",
bbox_inches="tight",
)
# pl.dump(fig, open(os.path.splitext(filename)[0] + 'Fig2.pickle', 'wb'))
StringIOBytes_risk = io.BytesIO()
plt.savefig(StringIOBytes_risk, format="png", bbox_inches="tight")
StringIOBytes_risk.seek(0)
risk_base_64_pngData = base64.b64encode(StringIOBytes_risk.read())
plt.close()
hist_fig, (ax1, ax2, ax3, ax4) = plt.subplots(
4, 1, figsize=(3.35, 6.69), dpi=300
)
ax1.set_ylabel("Frequency")
ax1.set_xlabel("Tapstand FRC (mg/L)")
ax1.hist(self.datainputs.iloc[:, 0], bins=30, color="grey")
ax2.set_ylabel("Frequency")
ax2.set_xlabel("Elapsed Time (hours)")
ax2.hist(self.datainputs.iloc[:, 1], bins=30, color="grey")
ax3.set_ylabel("Frequency")
ax3.set_xlabel("Collection Time (0=AM, 1=PM)")
ax3.hist(self.datainputs.iloc[:, 2], bins=30, color="grey")
ax4.set_ylabel("Frequency")
ax4.set_xlabel("Household FRC (mg/L)")
ax4.hist(self.dataoutputs, bins=30, color="grey")
plt.subplots_adjust(
left=0.18, hspace=0.60, top=0.99, bottom=0.075, right=0.98
)
plt.savefig(
os.path.splitext(filename)[0] + "_Histograms_Fig.png",
format="png",
bbox_inches="tight",
)
# pl.dump(fig, open(os.path.splitext(filename)[0] + 'Fig3.pickle', 'wb'))
plt.close()
StringIOBytes_histogram = io.BytesIO()
plt.savefig(StringIOBytes_histogram, format="png", bbox_inches="tight")
StringIOBytes_histogram.seek(0)
hist_base_64_pngData = base64.b64encode(StringIOBytes_histogram.read())
return hist_base_64_pngData, risk_base_64_pngData, preds_base_64_pngData
def display_results(self):
"""
Display the results of the predictions as a console output.
Display and return all the contents of the self.results variable which is a pandas Dataframe object
:return: A Pandas Dataframe object (self.results) containing all the result of the predictions
"""
if WATTEMP in self.datainputs.columns or COND in self.datainputs.columns:
if self.post_process_check == False:
logging.info(self.avg_case_results_am)
logging.info(self.worst_case_results_am)
logging.info(self.avg_case_results_pm)
logging.info(self.worst_case_results_pm)
return self.avg_case_results_am, self.avg_case_results_pm, self.worst_case_results_am, self.worst_case_results_pm
else:
logging.info(self.avg_case_results_am_post)
logging.info(self.worst_case_results_am_post)
logging.info(self.avg_case_results_pm_post)
logging.info(self.worst_case_results_pm_post)
return self.avg_case_results_am_post, self.avg_case_results_pm_post, self.worst_case_results_am_post, self.worst_case_results_pm_post
else:
if self.post_process_check == False:
logging.info(self.avg_case_results_am)
logging.info(self.avg_case_results_pm)
return self.avg_case_results_am, self.avg_case_results_pm
else:
logging.info(self.avg_case_results_am_post)
logging.info(self.avg_case_results_pm_post)
return self.avg_case_results_am_post, self.avg_case_results_pm_post
def export_results_to_csv(self, filename):
self.avg_case_results_am.to_csv(
os.path.splitext(filename)[0] + "_average_case_am.csv", index=False
)
self.avg_case_results_pm.to_csv(
os.path.splitext(filename)[0] + "_average_case_pm.csv", index=False
)
if WATTEMP in self.datainputs.columns or COND in self.datainputs.columns:
self.worst_case_results_am.to_csv(
os.path.splitext(filename)[0] + "_worst_case_am.csv", index=False
)
self.worst_case_results_pm.to_csv(
os.path.splitext(filename)[0] + "_worst_case_pm.csv", index=False
)
if self.post_process_check == True:
self.avg_case_results_am_post.to_csv(
os.path.splitext(filename)[0] + "_average_case_am.csv", index=False
)
self.avg_case_results_pm_post.to_csv(
os.path.splitext(filename)[0] + "_average_case_pm.csv", index=False
)
if WATTEMP in self.datainputs.columns or COND in self.datainputs.columns:
self.worst_case_results_am_post.to_csv(
os.path.splitext(filename)[0] + "_worst_case_am.csv", index=False
)
self.worst_case_results_pm_post.to_csv(
os.path.splitext(filename)[0] + "_worst_case_pm.csv", index=False
)
def generate_model_performance(self):
"""Generates training performance graphs
Plots the model performance metrics (MSE and R^2 vs # of epochs) after training and returns a
base64 encoded image. The NN has to be trained first otherwise the image will be empty.
Returns: Base64 data stream"""
fig, axs = plt.subplots(1, 2, sharex=True)
ax = axs[0]
ax.boxplot(
[self.total_mse_train, self.total_mse_val, self.total_mse_test],
labels=["Training", "Validation", "Testing"],
)
ax.set_title("Mean Squared Error")
tr_legend = "Training Avg MSE: {mse:.4f}".format(mse=self.avg_mse_train)
val_legend = "Validation Avg MSE: {mse:.4f}".format(mse=self.avg_mse_val)
ts_legend = "Testing Avg MSE: {mse:.4f}".format(mse=self.avg_mse_test)
ax.legend([tr_legend, val_legend, ts_legend])
ax = axs[1]
ax.boxplot(
[
self.total_rsquared_train,
self.total_rsquared_val,
self.total_rsquared_test,
],
labels=["Training", "Validation", "Testing"],
)
ax.set_title("R^2")
tr_legend = "Training Avg. R^2: {rs:.3f}".format(rs=self.avg_rsq_train)
val_legend = "Validation Avg. R^2: {rs:.3f}".format(rs=self.avg_rsq_val)
ts_legend = "Validation Avg. R^2: {rs:.3f}".format(rs=self.avg_rsq_test)
ax.legend([tr_legend, val_legend, ts_legend])
fig.suptitle(
"Performance metrics across 100 training runs on "
+ str(self.epochs)
+ " epochs, with "
+ str(self.layer1_neurons)
+ " neurons on hidden layer."
)
fig.set_size_inches(12, 8)
# plt.show()
# Uncomment the next lines to save the graph to disk
# plt.savefig("model_metrics\\" + str(self.epochs) + "_epochs_" + str(self.layer1_neurons) + "_neurons.png",
# dpi=100)
# plt.close()
plt.show()
myStringIOBytes = io.BytesIO()
plt.savefig(myStringIOBytes, format='png')
myStringIOBytes.seek(0)
my_base_64_pngData = base64.b64encode(myStringIOBytes.read())
return my_base_64_pngData
def generate_2d_scatterplot(self):
"""Generate a 2d scatterplot of the predictions
Plots three, 2-dimensional scatterplots of the predictions as a function of the inputs
The 3 scatterplots are plotting: predictions vs se1_frc and water temperature, predictions
vs water conductivity and water temperature, and predictions vs se1_frc and water conductivity.
A histogram of the prediction set is also generated and plotted. A prediction using the
predict() method must be made first.
Returns: a base64 data represenation of the image."""
df = self.results
# Uncomment the following line to load the results direclty from an csv file
# df = pd.read_csv('results.csv')
# Filter out outlier values
df = df.drop(df[df[FRC_IN] > 2.8].index)
frc = df[FRC_IN]
watt = df[WATTEMP]
cond = df[COND]
c = df["median"]
# sort data for the cdf
sorted_data = np.sort(c)
# The following lines of code calculate the width of the histogram bars
# and align the range of the histogram and the pdf
if min(c) < 0:
lo_limit = 0
else:
lo_limit = round(min(c), 2)
logging.info(lo_limit)
if max(c) <= 0.75:
divisions = 16
hi_limit = 0.75
elif max(c) < 1:
divisions = 21
hi_limit = 1
elif max(c) <= 1.5:
divisions = 31
hi_limit = 1.5
elif max(c) <= 2:
divisions = 41
hi_limit = 2
divisions = round((hi_limit - lo_limit) / 0.05, 0) + 1
logging.info(divisions)
# Get the data between the limits
sorted_data = sorted_data[sorted_data > lo_limit]
sorted_data = sorted_data[sorted_data < hi_limit]
# create a colorbar for the se4_frc and divide it in 0.2 mg/L intervals
cmap = plt.cm.jet_r
cmaplist = [cmap(i) for i in range(cmap.N)]
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"Custom cmap", cmaplist, cmap.N
)
bounds = np.linspace(0, 1.4, 8)
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
fig = plt.figure(figsize=(19.2, 10.8), dpi=100)
ax = fig.add_subplot(221)
img = ax.scatter(frc, watt, c=c, s=5, cmap=cmap, norm=norm, alpha=1)
ax.set_xlabel("FRC at tapstand (mg/L)")
ax.set_ylabel("Water Temperature (" + "\u00b0" + "C)")
ax.grid(linewidth=0.2)
ax = fig.add_subplot(222)
img = ax.scatter(frc, cond, c=c, s=5, cmap=cmap, norm=norm, alpha=1)
ax.set_xlabel("FRC at tapstand (mg/L)")
ax.set_ylabel("Water Conductivity (\u03BCS/cm)")
ax.grid(linewidth=0.2)
ax = fig.add_subplot(223)
img = ax.scatter(watt, cond, c=c, s=5, cmap=cmap, norm=norm, alpha=1)
ax.set_xlabel("Water Temperature (" + "\u00b0" + "C)")
ax.set_ylabel("Water Conductivity (\u03BCS/cm)")
ax.grid(linewidth=0.2)
ax = fig.add_subplot(224)
img = ax.hist(
c,
bins=np.linspace(lo_limit, hi_limit, divisions),
edgecolor="black",
linewidth=0.1,
)
ax.grid(linewidth=0.1)
line02 = ax.axvline(0.2, color="r", linestyle="dashed", linewidth=2)
line03 = ax.axvline(0.3, color="y", linestyle="dashed", linewidth=2)
ax.set_xlabel("FRC at household (mg/L)")
ax.set_ylabel("# of instances")
axcdf = ax.twinx()
(cdf,) = axcdf.step(sorted_data, np.arange(sorted_data.size), color="g")
ax.legend(
(line02, line03, cdf), ("0.2 mg/L", "0.3 mg/L", "CDF"), loc="center right"
)
ax2 = fig.add_axes([0.93, 0.1, 0.01, 0.75])
cb = mpl.colorbar.ColorbarBase(
ax2,
cmap=cmap,
norm=norm,
spacing="proportional",
ticks=bounds,
boundaries=bounds,
)
cb.ax.set_ylabel("FRC at se4 (mg/L)", rotation=270, labelpad=20)
plt.show()
myStringIOBytes = io.BytesIO()
plt.savefig(myStringIOBytes, format="png")
myStringIOBytes.seek(0)
my_base_64_pngData = base64.b64encode(myStringIOBytes.read())
return my_base_64_pngData
def generate_input_info_plots(self, filename):
"""Generates histograms of the inputs to the ANN
Plots one histogram for each input field on the neural network
along with the mean and median values."""
df = self.datainputs
# df = df.drop(df[df["se1_frc"] > 2.8].index)
frc = df[FRC_IN]
watt = df[WATTEMP]
cond = df[COND]
dfo = self.file
dfo = dfo.drop(dfo[dfo[FRC_IN] > 2.8].index)
frc4 = dfo[FRC_OUT]
fig = plt.figure(figsize=(19.2, 10.8), dpi=100)
# fig.suptitle('Total samples: '+ str(len(frc))) # +
# "\n" + "SWOT version: " + self.software_version +
# "\n" + "Input Filename: " + os.path.basename(self.input_filename) +
# "\n" + "Generated on: " + self.today)
axInitialFRC = fig.add_subplot(221)
axInitialFRC.hist(frc, bins=20, edgecolor="black", linewidth=0.1)
axInitialFRC.set_xlabel("Initial FRC (mg/L)")
axInitialFRC.set_ylabel("# of instances")
mean = round(np.mean(frc), 2)
median = np.median(frc)
mean_line = axInitialFRC.axvline(
mean, color="r", linestyle="dashed", linewidth=2
)
median_line = axInitialFRC.axvline(
median, color="y", linestyle="dashed", linewidth=2
)
axInitialFRC.legend(
(mean_line, median_line),
("Mean: " + str(mean) + " mg/L", "Median: " + str(median) + " mg/L"),
)
ax = fig.add_subplot(222)
ax.hist(watt, bins=20, edgecolor="black", linewidth=0.1)
ax.set_xlabel("Water Temperature (" + "\u00b0" + "C)")
ax.set_ylabel("# of instances")
mean = round(np.mean(watt), 2)
median = np.median(watt)
mean_line = ax.axvline(mean, color="r", linestyle="dashed", linewidth=2)
median_line = ax.axvline(median, color="y", linestyle="dashed", linewidth=2)
ax.legend(
(mean_line, median_line),
(
"Mean: " + str(mean) + "\u00b0" + "C",
"Median: " + str(median) + "\u00b0" + "C",
),
)
ax = fig.add_subplot(223)
ax.hist(cond, bins=20, edgecolor="black", linewidth=0.1)
ax.set_xlabel("Water Conductivity (\u03BCS/cm)")
ax.set_ylabel("# of instances")
mean = round(np.mean(cond), 2)
median = np.median(cond)
mean_line = ax.axvline(mean, color="r", linestyle="dashed", linewidth=2)
median_line = ax.axvline(median, color="y", linestyle="dashed", linewidth=2)
ax.legend(
(mean_line, median_line),
(
"Mean: " + str(mean) + " \u03BCS/cm",
"Median: " + str(median) + " \u03BCS/cm",
),
)
axHouseholdFRC = fig.add_subplot(224)
axHouseholdFRC.hist(
frc4, bins=np.linspace(0, 2, 41), edgecolor="black", linewidth=0.1
)
axHouseholdFRC.set_xlabel("Household FRC (\u03BCS/cm)")
axHouseholdFRC.set_ylabel("# of instances")
mean = round(np.mean(frc4), 2)
median = np.median(frc4)
mean_line = axHouseholdFRC.axvline(
mean, color="r", linestyle="dashed", linewidth=2
)
median_line = axHouseholdFRC.axvline(
median, color="y", linestyle="dashed", linewidth=2
)
axHouseholdFRC.legend(
(mean_line, median_line),
(
"Mean: " + str(mean) + " \u03BCS/cm",
"Median: " + str(median) + " \u03BCS/cm",
),
)
fig.savefig(os.path.splitext(filename)[0] + ".png", format="png")
# plt.show()
# create figure for initial and household FRC separately in a single image
figFRC = plt.figure(figsize=(19.2 / 1.45, 6.4), dpi=100)
axInitialFRC = figFRC.add_subplot(211)
axInitialFRC.hist(frc, bins=20, edgecolor="black", linewidth=0.1)
axInitialFRC.set_xlabel("Initial FRC (mg/L)")
axInitialFRC.set_ylabel("# of instances")
mean = round(np.mean(frc), 2)
median = np.median(frc)
mean_line = axInitialFRC.axvline(
mean, color="r", linestyle="dashed", linewidth=2
)
median_line = axInitialFRC.axvline(
median, color="y", linestyle="dashed", linewidth=2
)
axInitialFRC.legend(
(mean_line, median_line),
("Mean: " + str(mean) + " mg/L", "Median: " + str(median) + " mg/L"),
)
axHouseholdFRC = figFRC.add_subplot(212)
axHouseholdFRC.hist(
frc4, bins=np.linspace(0, 2, 41), edgecolor="black", linewidth=0.1
)
axHouseholdFRC.set_xlabel("Household FRC (mg/L)")
axHouseholdFRC.set_ylabel("# of instances")
mean = round(np.mean(frc4), 2)
median = np.median(frc4)
mean_line = axHouseholdFRC.axvline(
mean, color="r", linestyle="dashed", linewidth=2
)
median_line = axHouseholdFRC.axvline(
median, color="y", linestyle="dashed", linewidth=2
)
axHouseholdFRC.legend(
(mean_line, median_line),
("Mean: " + str(mean) + " mg/L", "Median: " + str(median) + " mg/L"),
)
figFRC.savefig(os.path.splitext(filename)[0] + "-frc.jpg", format="jpg")
myStringIOBytes = io.BytesIO()
plt.savefig(myStringIOBytes, format="png")
myStringIOBytes.seek(0)
my_base_64_pngData = base64.b64encode(myStringIOBytes.read())
return my_base_64_pngData
def generate_html_report(self, filename, storage_target):
"""Generates an html report of the SWOT results. The report
is saved on disk under the name 'filename'."""
df = self.datainputs
frc = df[FRC_IN]
# self.generate_input_info_plots(filename).decode('UTF-8')
hist, risk, pred = self.results_visualization(filename, storage_target)
hist.decode("UTF-8")
risk.decode("UTF-8")
pred.decode("UTF-8")
# scatterplots_b64 = self.generate_2d_scatterplot().decode('UTF-8')
if WATTEMP in self.datainputs.columns or COND in self.datainputs.columns:
avg_html_table, worst_html_table = self.prepare_table_for_html_report(
storage_target
)
else:
avg_html_table = self.prepare_table_for_html_report(storage_target)
skipped_rows_table = self.skipped_rows_html()
doc, tag, text, line = Doc().ttl()
with tag("h1", klass="title"):
text("SWOT ARTIFICIAL NEURAL NETWORK REPORT")
with tag("p", klass="swot_version"):
text("SWOT ANN Code Version: " + self.software_version)
with tag("p", klass="input_filename"):
text("Input File Name: " + os.path.basename(self.input_filename))
with tag("p", klass="date"):
text("Date Generated: " + self.today)
with tag("p", klass="time_difference"):
text(
"Average time between tapstand and household: "
+ str(int(self.avg_time_elapsed // 3600))
+ " hours and "
+ str(int((self.avg_time_elapsed // 60) % 60))
+ " minutes"
)
with tag("p"):
text("Total Samples: " + str(len(frc)))
if self.post_process_check == False:
with tag("h2", klass="Header"):
text("Predicted Risk - Raw Ensemble:")
else:
with tag("h2", klass="Header"):
text("Predicted Risk - Post-Processed Ensemble:")
if WATTEMP in self.datainputs.columns or COND in self.datainputs.columns:
with tag("p", klass="Predictions Fig Text"):
text(
"Household FRC forecast from an ensemble of "
+ str(self.network_count)
+ " ANN models. The predictions of each model are grouped into a probability density function to predict the risk of FRC below threshold values of 0.20 mg/L using a fixed input variable set for worst case and average case scenarios (shown in the risk tables below). Note that if FRC is collected using pool testers instead of a cholorimeter, the predicted FRC may be disproportionately clustered towards the centre of the observations, resulting in some observations with low FRC to not be captured within the ensemble forecast. In these cases, the predicted risk in the next figure and in the subsequent risk tables may be underpredicted. Average case predictions use median collected values for conductivity and water temperature; worst-case scenario uses 95th percentile values for conductivity and water temeperature"
)
with tag("div", id="Predictions Graphs"):
doc.stag(
"img",
src=os.path.basename(
os.path.splitext(filename)[0] + "_Predictions_Fig.png"
),
)
# doc.asis('<object data="cid:'+os.path.basename(os.path.splitext(filename)[0]+'.PNG') + '" type="image/jpeg"></object>')
# doc.asis(
# '<object data="'
# + os.path.basename(
# os.path.splitext(filename)[0] + "_Predictions_Fig.png"
# )
# + '" type="image/jpeg"></object>'
# )
with tag("p", klass="Risk Fig Text"):
text(
"Figure and tables showing predicted risk of household FRC below 0.2 mg/L for average and worst case scenarios for both AM and PM collection. Risk obtained from forecast pdf (above) and taken as cumulative probability of houeshold FRC below 0.2 mg/L. Note that 0% predicted risk of household FRC below 0.2 mg/L does not mean that there is no possibility of household FRC being below 0.2 mg/L, simply that the predicted risk is too low to be measured. The average case target may, in some, cases be more conservative than the worst case targets as the worst case target is derived on the assumption that higher conductivity and water temperature will lead to greater decay (as confirmed by FRC decay chemisty and results at past sites). However, this may not be true in all cases, so the most conservative target is always recommended."
)
with tag("div", id="Risk Graphs"):
doc.stag(
"img",
src=os.path.basename(
os.path.splitext(filename)[0] + "_Risk_Fig.png"
),
)
# doc.asis('<object data="cid:'+os.path.basename(os.path.splitext(filename)[0]+'.PNG') + '" type="image/jpeg"></object>')
# doc.asis(
# '<object data="'
# + os.path.basename(os.path.splitext(filename)[0] + "_Risk_Fig.png")
# + '" type="image/jpeg"></object>'
# )
with tag("h2", klass="Header"):
text("Average Case Targets Table")
with tag("table", id="average case table"):
doc.asis(avg_html_table)
with tag("h2", klass="Header"):
text("Worst Case Targets Table")
with tag("table", id="worst case table"):
doc.asis(worst_html_table)
with tag("p", klass="Histograms Text"):
text(
"Histograms for the input variables used to generate predictions and risk recommendations above. Average case and worst case conductivity and water temperature are shown for context of values used to generate targets."
)
with tag("div", id="Histograms"):
doc.stag(
"img",
src=os.path.basename(
os.path.splitext(filename)[0] + "_Histograms_Fig.png"
),
)
# doc.asis('<object data="cid:'+os.path.basename(os.path.splitext(filename)[0]+'.PNG') + '" type="image/jpeg"></object>')
# doc.asis(
# '<object data="'
# + os.path.basename(
# os.path.splitext(filename)[0] + "_Histograms_Fig.png"
# )
# + '" type="image/jpeg"></object>'
# )
else:
with tag("p", klass="Predictions Fig Text"):
text(
"Household FRC forecast from an ensemble of "
+ str(self.network_count)
+ " ANN models. The predictions of each model are grouped into a probability density function to predict the risk of FRC below threshold values of 0.20 mg/L using a fixed input variable set(shown in the risk table below). Note that if FRC is collected using pool testers instead of a cholorimeter, the predicted FRC may be disproportionately clustered towards the centre of the observations, resulting in some observations with low FRC to not be captured within the ensemble forecast. In these cases, the predicted risk in the next figure and in the subsequent risk table may be underpredicted."
)
with tag("div", id="Predictions Graphs"):
doc.stag(
"img",
src=os.path.basename(
os.path.splitext(filename)[0] + "_Predictions_Fig.png"
),
)
# doc.asis('<object data="cid:'+os.path.basename(os.path.splitext(filename)[0]+'.PNG') + '" type="image/jpeg"></object>')
# doc.asis(
# '<object data="'
# + os.path.basename(
# os.path.splitext(filename)[0] + "_Predictions_Fig.png"
# )
# + '" type="image/jpeg"></object>'
# )
with tag("p", klass="Risk Fig Text"):
text(
"Figure and tables showing predicted risk of household FRC below 0.2 mg/L for both AM and PM collection. Risk obtained from forecast probability density function (above) and taken as cumulative probability of houeshold FRC below 0.2 mg/L. Note that 0% predicted risk of household FRC below 0.2 mg/L does not mean that there is no possibility of household FRC being below 0.2 mg/L, simply that the predicted risk is too low to be measured."
)
with tag("div", id="Risk Graphs"):
doc.stag(
"img",
src=os.path.basename(
os.path.splitext(filename)[0] + "_Risk_Fig.png"
),
)
# doc.asis('<object data="cid:'+os.path.basename(os.path.splitext(filename)[0]+'.PNG') + '" type="image/jpeg"></object>')
# doc.asis(
# '<object data="'
# + os.path.basename(os.path.splitext(filename)[0] + "_Risk_Fig.png")
# + '" type="image/jpeg"></object>'
# )
with tag("h2", klass="Header"):
text("Targets Table")
with tag("table", id="average case table"):
doc.asis(avg_html_table)
with tag("p", klass="Histograms Text"):
text(
"Histograms for the input variables used to generate predictions and risk recommendations above."
)
with tag("div", id="Histograms"):
doc.stag(
"img",
src=os.path.basename(
os.path.splitext(filename)[0] + "_Histograms_Fig.png"
),
)
# doc.asis('<object data="cid:'+os.path.basename(os.path.splitext(filename)[0]+'.PNG') + '" type="image/jpeg"></object>')
# doc.asis(
# '<object data="'
# + os.path.basename(
# os.path.splitext(filename)[0] + "_Histograms_Fig.png"
# )
# + '" type="image/jpeg"></object>'
# )
with tag("h2", klass="Header"):
text("Model Diagnostic Figures")
with tag("p", klass="Performance Indicator General Text"):
text(
"These figures evaluate the performance of the ANN ensemble model after training. These figures serve as an indicator of the similarity between the distribution of forecasts produced by the ANN ensembles and the observed data and can be used to evaluate the soundness of the models, and of the confidence we can have in the targets."
)
with tag("p", klass="Performance annotation 1"):
text(
"Subplot A: Household FRC forecasts from an ensemble of"
+ str(self.network_count)
+ " neural networks using the full provided dataset."
)
with tag("p", klass="Performance annotation 2"):
text(
"Subplot B: Confidence Interval (CI) reliability diagram. Each point shows the percentage of observations captured within each ensemble CI. An ideal model will have all points on the 1:1 line. If points are below the line, indicates forecast underdispersion (may lead to overly optimistic targets). If points are above the line, indicates overdispersion (may result in overly conservative targets)."
)
with tag("p", klass="Performance annotation 3"):
text(
"Subplot C: Rank Histogram. This creates a histogram of the relative location of all recorded observations relative to each ensemble member. An ideal model has a flat rank histogram. A U-shaped rank histogram indicates forecast underdispersion (may lead to overly optimistic targets). An arch-shaped rank histogram indicates overdispersion (may result in overly conservative targets)."
)
with tag("div", id="diagnostic_graphs"):
doc.stag(
"img",
src=os.path.basename(
os.path.splitext(filename)[0] + "_Calibration_Diagnostic_Figs.png"
),
)
# doc.asis(
# '<object data="'
# + os.path.basename(
# os.path.splitext(filename)[0] + "_Calibration_Diagnostic_Figs.png"
# )
# + '" type="image/jpeg"></object>'
# )
doc.asis(skipped_rows_table)
totalmatches = 0
if len(self.ruleset):
with tag("ul", id="ann_ruleset"):
for rule in self.ruleset:
totalmatches += rule[2]
line("li", "%s. Matches: %d" % (rule[0], rule[2]))
with tag("div", id="pythonSkipped_count"):
text(totalmatches)
file = open(filename, "w+")
file.write(doc.getvalue())
file.close()
return doc.getvalue()
def generate_metadata(self):
metadata = {}
metadata["average_time"] = self.avg_time_elapsed # in seconds
return metadata
def prepare_table_for_html_report(self, storage_target):
"""Formats the results into an html table for display."""
avg_table_df = pd.DataFrame()
avg_table_df["Input FRC (mg/L)"] = self.avg_case_results_am[FRC_IN]
avg_table_df["Storage Duration for Target"] = storage_target
if WATTEMP in self.datainputs.columns:
avg_table_df["Water Temperature (Degrees C)"] = self.avg_case_results_am[
WATTEMP
]
if COND in self.datainputs.columns:
avg_table_df[
"Electrical Conductivity (s*10^-6/cm)"
] = self.avg_case_results_am[COND]
if self.post_process_check == False:
avg_table_df[
"Median Predicted Household FRC Concentration (mg/L) - AM Collection"
] = np.round(self.avg_case_results_am["median"], decimals=3)
avg_table_df[
"Median Predicted Household FRC Concentration (mg/L) - PM Collection"
] = np.round(self.avg_case_results_pm["median"], decimals=3)
avg_table_df[
"Predicted Risk of Household FRC below 0.20 mg/L - AM Collection"
] = np.round(self.avg_case_results_am["probability<=0.20"], decimals=3)
avg_table_df[
"Predicted Risk of Household FRC below 0.20 mg/L - PM Collection"
] = np.round(self.avg_case_results_pm["probability<=0.20"], decimals=3)
# avg_table_df['Predicted Risk of Household FRC below 0.30 mg/L'] = self.avg_case_results['probability<=0.30']
else:
avg_table_df[
"Median Predicted Household FRC Concentration (mg/L) - AM Collection"
] = np.round(self.avg_case_results_am_post["median"], decimals=3)
avg_table_df[
"Median Predicted Household FRC Concentration (mg/L) - PM Collection"
] = np.round(self.avg_case_results_pm_post["median"], decimals=3)
avg_table_df[
"Predicted Risk of Household FRC below 0.20 mg/L - AM Collection"
] = np.round(self.avg_case_results_am_post["probability<=0.20"], decimals=3)
avg_table_df[
"Predicted Risk of Household FRC below 0.20 mg/L - PM Collection"
] = np.round(self.avg_case_results_pm_post["probability<=0.20"], decimals=3)
# avg_table_df['Predicted Risk of Household FRC below 0.30 mg/L'] = self.avg_case_results['probability<=0.30']
str_io = io.StringIO()
avg_table_df.to_html(buf=str_io, table_id="annTable")
avg_html_str = str_io.getvalue()
if WATTEMP in self.datainputs.columns or COND in self.datainputs.columns:
worst_table_df = pd.DataFrame()
worst_table_df["Input FRC (mg/L)"] = self.worst_case_results_am[FRC_IN]
worst_table_df["Storage Duration for Target"] = storage_target
if WATTEMP in self.datainputs.columns:
worst_table_df[
"Water Temperature(" + r"$\degree$" + "C)"
] = self.worst_case_results_am[WATTEMP]
if COND in self.datainputs.columns:
worst_table_df[
"Electrical Conductivity (" + r"$\mu$" + "s/cm)"
] = self.worst_case_results_am[COND]
worst_table_df["Storage Duration for Target"] = storage_target
if self.post_process_check == False:
worst_table_df[
"Median Predicted FRC level at Household (mg/L) - AM Collection"
] = np.round(self.worst_case_results_am["median"], decimals=3)
worst_table_df[
"Median Predicted FRC level at Household (mg/L) - PM Collection"
] = np.round(self.worst_case_results_pm["median"], decimals=3)
worst_table_df[
"Predicted Risk of Household FRC below 0.20 mg/L - AMM Collection"
] = np.round(
self.worst_case_results_am["probability<=0.20"], decimals=3
)
worst_table_df[
"Predicted Risk of Household FRC below 0.20 mg/L - PM Collection"
] = np.round(
self.worst_case_results_pm["probability<=0.20"], decimals=3
)
else:
worst_table_df[
"Median Predicted FRC level at Household (mg/L) - AM Collection"
] = np.round(self.worst_case_results_am_post["median"], decimals=3)
worst_table_df[
"Median Predicted FRC level at Household (mg/L) - PM Collection"
] = np.round(self.worst_case_results_pm_post["median"], decimals=3)
worst_table_df[
"Predicted Risk of Household FRC below 0.20 mg/L - AMM Collection"
] = np.round(
self.worst_case_results_am_post["probability<=0.20"], decimals=3
)
worst_table_df[
"Predicted Risk of Household FRC below 0.20 mg/L - PM Collection"
] = np.round(
self.worst_case_results_pm_post["probability<=0.20"], decimals=3
)
# worst_table_df['Predicted Risk of Household FRC below 0.30 mg/L'] = self.worst_case_results['probability<=0.30']
str_io = io.StringIO()
worst_table_df.to_html(buf=str_io, table_id='annTable')
worst_html_str = str_io.getvalue()
return avg_html_str, worst_html_str
else:
return avg_html_str
def skipped_rows_html(self):
if self.skipped_rows.empty:
return ""
printable_columns = [
"ts_datetime",
FRC_IN,
"hh_datetime",
FRC_OUT,
WATTEMP,
COND,
]
required_columns = [rule[1] for rule in self.ruleset]
doc, tag, text = Doc().tagtext()
with tag(
"table",
klass="table center fill-whitespace",
id="pythonSkipped",
border="1",
):
with tag("thead"):
with tag("tr"):
for col in printable_columns:
with tag("th"):
text(col)
with tag("tbody"):
for (_, row) in self.skipped_rows[printable_columns].iterrows():
with tag("tr"):
for col in printable_columns:
with tag("td"):
# check if required value in cell is nan
if col in required_columns and (
not row[col] or row[col] != row[col]
):
with tag("div", klass="red-cell"):
text("")
else:
text(row[col])
return doc.getvalue()
def valid_dates(self, series):
mask = []
for i in series.index.to_numpy():
row = series[i]
if row == None:
mask.append(True)
continue
if isinstance(row, str) and not row.replace(".", "", 1).isdigit():
try:
datetime.datetime.strptime(
row[:16].replace("/", "-"), self.xl_dateformat
)
mask.append(False)
except:
mask.append(True)
else:
try:
start = float(row)
start = xldate_as_datetime(start, datemode=0)
mask.append(False)
except:
mask.append(True)
return mask
def execute_rule(self, description, column, matches):
rule = (description, column, sum(matches))
self.ruleset.append(rule)
if sum(matches):
self.file.drop(self.file.loc[matches].index, inplace=True)
def run_swot(self, input_file, results_file, report_file, storage_target):
now = datetime.datetime.now()
directory = (
"model_retraining"
+ os.sep
+ now.strftime("%m%d%Y_%H%M%S")
+ "_"
+ os.path.basename(input_file)
)
# Uncommentfor Excel processing
# file = pd.read_excel(input_file)
file = pd.read_csv(input_file)
# Support from 3 different input templates se1_frc, ts_frc, and ts frc1
if "se1_frc" in file.columns:
FRC_IN = "se1_frc"
WATTEMP = "se1_wattemp"
COND = "se1_cond"
FRC_OUT = "se4_frc"
elif "ts_frc1" in file.columns:
FRC_IN = "ts_frc1"
WATTEMP = "ts_wattemp"
COND = "ts_cond"
FRC_OUT = "hh_frc1"
elif "ts_frc" in file.columns:
FRC_IN = "ts_frc"
WATTEMP = "ts_wattemp"
COND = "ts_cond"
FRC_OUT = "hh_frc"
self.import_data_from_csv(input_file)
self.set_up_model()
self.train_SWOT_network(directory)
self.calibration_performance_evaluation(report_file)
self.post_process_cal()
# self.full_performance_evaluation(directory)
self.set_inputs_for_table(storage_target)
self.predict()
self.display_results()
self.export_results_to_csv(results_file)
self.generate_html_report(report_file, storage_target)
metadata = self.generate_metadata()
return metadata
| [
"logging.getLogger",
"matplotlib.pyplot.hist",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.less_equal",
"matplotlib.colorbar.ColorbarBase",
"io.BytesIO",
"numpy.equal",
"tensorflow.keras.callbacks.EarlyStopping",
"numpy.array",
"matplotlib.colors.BoundaryNorm",
"tensorflow.keras.laye... | [((194, 242), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (213, 242), True, 'import matplotlib.pyplot as plt\n'), ((1164, 1199), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-1, 1)'}), '(feature_range=(-1, 1))\n', (1176, 1199), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1230, 1265), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-1, 1)'}), '(feature_range=(-1, 1))\n', (1242, 1265), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1943, 2000), 'tensorflow.keras.optimizers.Nadam', 'keras.optimizers.Nadam', ([], {'lr': '(0.01)', 'beta_1': '(0.9)', 'beta_2': '(0.999)'}), '(lr=0.01, beta_1=0.9, beta_2=0.999)\n', (1965, 2000), False, 'from tensorflow import keras\n'), ((2022, 2047), 'tensorflow.keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (2045, 2047), False, 'from tensorflow import keras\n'), ((2787, 2808), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (2798, 2808), True, 'import pandas as pd\n'), ((6559, 6577), 'numpy.mean', 'np.mean', (['durations'], {}), '(durations)\n', (6566, 6577), True, 'import numpy as np\n'), ((7142, 7166), 'pandas.DataFrame', 'pd.DataFrame', (['predictors'], {}), '(predictors)\n', (7154, 7166), True, 'import pandas as pd\n'), ((8120, 8177), 'tensorflow.keras.optimizers.Nadam', 'keras.optimizers.Nadam', ([], {'lr': '(0.01)', 'beta_1': '(0.9)', 'beta_2': '(0.999)'}), '(lr=0.01, beta_1=0.9, beta_2=0.999)\n', (8142, 8177), False, 'from tensorflow import keras\n'), ((8199, 8224), 'tensorflow.keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (8222, 8224), False, 'from tensorflow import keras\n'), ((10185, 10217), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (10215, 10217), True, 'import tensorflow as tf\n'), ((10251, 10357), 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(10)', 'restore_best_weights': '(True)'}), "(monitor='val_loss', min_delta=0, patience=10,\n restore_best_weights=True)\n", (10280, 10357), False, 'from tensorflow import keras\n'), ((10546, 10582), 'tensorflow.keras.models.clone_model', 'keras.models.clone_model', (['self.model'], {}), '(self.model)\n', (10570, 10582), False, 'from tensorflow import keras\n'), ((10645, 10709), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_norm', 't_norm'], {'train_size': '(0.333)', 'shuffle': '(True)'}), '(x_norm, t_norm, train_size=0.333, shuffle=True)\n', (10661, 10709), False, 'from sklearn.model_selection import train_test_split\n'), ((11446, 11468), 'numpy.array', 'np.array', (['self.targets'], {}), '(self.targets)\n', (11454, 11468), True, 'import numpy as np\n'), ((11486, 11524), 'numpy.array', 'np.array', (['self.calibration_predictions'], {}), '(self.calibration_predictions)\n', (11494, 11524), True, 'import numpy as np\n'), ((14165, 14184), 'numpy.sum', 'np.sum', (['capture_all'], {}), '(capture_all)\n', (14171, 14184), True, 'import numpy as np\n'), ((14210, 14228), 'numpy.sum', 'np.sum', (['capture_90'], {}), '(capture_90)\n', (14216, 14228), True, 'import numpy as np\n'), ((14254, 14272), 'numpy.sum', 'np.sum', (['capture_80'], {}), '(capture_80)\n', (14260, 14272), True, 'import numpy as np\n'), ((14298, 14316), 'numpy.sum', 'np.sum', (['capture_70'], {}), '(capture_70)\n', (14304, 14316), True, 'import numpy as np\n'), ((14342, 14360), 'numpy.sum', 'np.sum', (['capture_60'], {}), '(capture_60)\n', (14348, 14360), True, 'import numpy as np\n'), ((14386, 14404), 'numpy.sum', 'np.sum', (['capture_50'], {}), '(capture_50)\n', (14392, 14404), True, 'import numpy as np\n'), ((14430, 14448), 'numpy.sum', 'np.sum', (['capture_40'], {}), '(capture_40)\n', (14436, 14448), True, 'import numpy as np\n'), ((14474, 14492), 'numpy.sum', 'np.sum', (['capture_30'], {}), '(capture_30)\n', (14480, 14492), True, 'import numpy as np\n'), ((14518, 14536), 'numpy.sum', 'np.sum', (['capture_20'], {}), '(capture_20)\n', (14524, 14536), True, 'import numpy as np\n'), ((14562, 14580), 'numpy.sum', 'np.sum', (['capture_10'], {}), '(capture_10)\n', (14568, 14580), True, 'import numpy as np\n'), ((14611, 14633), 'numpy.sum', 'np.sum', (['capture_all_20'], {}), '(capture_all_20)\n', (14617, 14633), True, 'import numpy as np\n'), ((14662, 14683), 'numpy.sum', 'np.sum', (['capture_90_20'], {}), '(capture_90_20)\n', (14668, 14683), True, 'import numpy as np\n'), ((14712, 14733), 'numpy.sum', 'np.sum', (['capture_80_20'], {}), '(capture_80_20)\n', (14718, 14733), True, 'import numpy as np\n'), ((14762, 14783), 'numpy.sum', 'np.sum', (['capture_70_20'], {}), '(capture_70_20)\n', (14768, 14783), True, 'import numpy as np\n'), ((14812, 14833), 'numpy.sum', 'np.sum', (['capture_60_20'], {}), '(capture_60_20)\n', (14818, 14833), True, 'import numpy as np\n'), ((14862, 14883), 'numpy.sum', 'np.sum', (['capture_50_20'], {}), '(capture_50_20)\n', (14868, 14883), True, 'import numpy as np\n'), ((14912, 14933), 'numpy.sum', 'np.sum', (['capture_40_20'], {}), '(capture_40_20)\n', (14918, 14933), True, 'import numpy as np\n'), ((14962, 14983), 'numpy.sum', 'np.sum', (['capture_30_20'], {}), '(capture_30_20)\n', (14968, 14983), True, 'import numpy as np\n'), ((15012, 15033), 'numpy.sum', 'np.sum', (['capture_20_20'], {}), '(capture_20_20)\n', (15018, 15033), True, 'import numpy as np\n'), ((15062, 15083), 'numpy.sum', 'np.sum', (['capture_10_20'], {}), '(capture_10_20)\n', (15068, 15083), True, 'import numpy as np\n'), ((17652, 17699), 'numpy.histogram', 'np.histogram', (['rank'], {'bins': '(self.network_count + 1)'}), '(rank, bins=self.network_count + 1)\n', (17664, 17699), True, 'import numpy as np\n'), ((17716, 17781), 'numpy.sum', 'np.sum', (['((rank_hist[0] - test_len / (self.network_count + 1)) ** 2)'], {}), '((rank_hist[0] - test_len / (self.network_count + 1)) ** 2)\n', (17722, 17781), True, 'import numpy as np\n'), ((17957, 17984), 'numpy.zeros', 'np.zeros', (['(test_len, c + 1)'], {}), '((test_len, c + 1))\n', (17965, 17984), True, 'import numpy as np\n'), ((18002, 18029), 'numpy.zeros', 'np.zeros', (['(test_len, c + 1)'], {}), '((test_len, c + 1))\n', (18010, 18029), True, 'import numpy as np\n'), ((19035, 19057), 'numpy.mean', 'np.mean', (['alpha'], {'axis': '(0)'}), '(alpha, axis=0)\n', (19042, 19057), True, 'import numpy as np\n'), ((19077, 19098), 'numpy.mean', 'np.mean', (['beta'], {'axis': '(0)'}), '(beta, axis=0)\n', (19084, 19098), True, 'import numpy as np\n'), ((19573, 19609), 'numpy.arange', 'np.arange', (['(0 / c)', '((c + 1) / c)', '(1 / c)'], {}), '(0 / c, (c + 1) / c, 1 / c)\n', (19582, 19609), True, 'import numpy as np\n'), ((19635, 19700), 'numpy.sum', 'np.sum', (['(g_bar * ((1 - o_bar) * p_i ** 2 + o_bar * (1 - p_i) ** 2))'], {}), '(g_bar * ((1 - o_bar) * p_i ** 2 + o_bar * (1 - p_i) ** 2))\n', (19641, 19700), True, 'import numpy as np\n'), ((19817, 19854), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)', 'dpi': '(100)'}), '(figsize=(15, 10), dpi=100)\n', (19827, 19854), True, 'import matplotlib.pyplot as plt\n'), ((19863, 19886), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(3)'], {}), '(2, 3)\n', (19880, 19886), True, 'import matplotlib.gridspec as gridspec\n'), ((19896, 19950), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 3)', '(0, 0)'], {'colspan': '(2)', 'rowspan': '(2)'}), '((2, 3), (0, 0), colspan=2, rowspan=2)\n', (19912, 19950), True, 'import matplotlib.pyplot as plt\n'), ((19959, 20036), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0.2)'], {'c': '"""k"""', 'ls': '"""--"""', 'label': '"""Point-of-consumption FRC = 0.2 mg/L"""'}), "(0.2, c='k', ls='--', label='Point-of-consumption FRC = 0.2 mg/L')\n", (19970, 20036), True, 'import matplotlib.pyplot as plt\n'), ((20045, 20135), 'matplotlib.pyplot.scatter', 'plt.scatter', (['FRC_X', 'Y_true'], {'edgecolors': '"""k"""', 'facecolors': '"""None"""', 's': '(20)', 'label': '"""Observed"""'}), "(FRC_X, Y_true, edgecolors='k', facecolors='None', s=20, label=\n 'Observed')\n", (20056, 20135), True, 'import matplotlib.pyplot as plt\n'), ((20544, 20590), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Point-of-Distribution FRC (mg/L)"""'], {}), "('Point-of-Distribution FRC (mg/L)')\n", (20554, 20590), True, 'import matplotlib.pyplot as plt\n'), ((20599, 20644), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Point-of-Consumption FRC (mg/L)"""'], {}), "('Point-of-Consumption FRC (mg/L)')\n", (20609, 20644), True, 'import matplotlib.pyplot as plt\n'), ((20690, 20822), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.001, 0.999)', 'shadow': '(False)', 'labelspacing': '(0.1)', 'fontsize': '"""small"""', 'handletextpad': '(0.1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(0.001, 0.999), shadow=False, labelspacing=0.1,\n fontsize='small', handletextpad=0.1, loc='upper left')\n", (20700, 20822), True, 'import matplotlib.pyplot as plt\n'), ((20982, 21036), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 3)', '(0, 2)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((2, 3), (0, 2), colspan=1, rowspan=1)\n', (20998, 21036), True, 'import matplotlib.pyplot as plt\n'), ((21045, 21072), 'matplotlib.pyplot.plot', 'plt.plot', (['CI_x', 'CI_x'], {'c': '"""k"""'}), "(CI_x, CI_x, c='k')\n", (21053, 21072), True, 'import matplotlib.pyplot as plt\n'), ((21081, 21133), 'matplotlib.pyplot.scatter', 'plt.scatter', (['CI_x', 'capture'], {'label': '"""All observations"""'}), "(CI_x, capture, label='All observations')\n", (21092, 21133), True, 'import matplotlib.pyplot as plt\n'), ((21142, 21220), 'matplotlib.pyplot.scatter', 'plt.scatter', (['CI_x', 'capture_20'], {'label': '"""Point-of-Consumption FRC below 0.2 mg/L"""'}), "(CI_x, capture_20, label='Point-of-Consumption FRC below 0.2 mg/L')\n", (21153, 21220), True, 'import matplotlib.pyplot as plt\n'), ((21229, 21271), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ensemble Confidence Interval"""'], {}), "('Ensemble Confidence Interval')\n", (21239, 21271), True, 'import matplotlib.pyplot as plt\n'), ((21280, 21309), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percent Capture"""'], {}), "('Percent Capture')\n", (21290, 21309), True, 'import matplotlib.pyplot as plt\n'), ((21318, 21334), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (21326, 21334), True, 'import matplotlib.pyplot as plt\n'), ((21343, 21359), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (21351, 21359), True, 'import matplotlib.pyplot as plt\n'), ((21368, 21500), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.001, 0.999)', 'shadow': '(False)', 'labelspacing': '(0.1)', 'fontsize': '"""small"""', 'handletextpad': '(0.1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(0.001, 0.999), shadow=False, labelspacing=0.1,\n fontsize='small', handletextpad=0.1, loc='upper left')\n", (21378, 21500), True, 'import matplotlib.pyplot as plt\n'), ((21660, 21714), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 3)', '(1, 2)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((2, 3), (1, 2), colspan=1, rowspan=1)\n', (21676, 21714), True, 'import matplotlib.pyplot as plt\n'), ((21723, 21780), 'matplotlib.pyplot.hist', 'plt.hist', (['rank'], {'bins': '(self.network_count + 1)', 'density': '(True)'}), '(rank, bins=self.network_count + 1, density=True)\n', (21731, 21780), True, 'import matplotlib.pyplot as plt\n'), ((21791, 21809), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Rank"""'], {}), "('Rank')\n", (21801, 21809), True, 'import matplotlib.pyplot as plt\n'), ((21818, 21843), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (21828, 21843), True, 'import matplotlib.pyplot as plt\n'), ((22094, 22105), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (22103, 22105), True, 'import matplotlib.pyplot as plt\n'), ((22133, 22145), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (22143, 22145), False, 'import io\n'), ((22154, 22217), 'matplotlib.pyplot.savefig', 'plt.savefig', (['myStringIOBytes'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(myStringIOBytes, format='png', bbox_inches='tight')\n", (22165, 22217), True, 'import matplotlib.pyplot as plt\n'), ((22394, 22416), 'numpy.array', 'np.array', (['self.targets'], {}), '(self.targets)\n', (22402, 22416), True, 'import numpy as np\n'), ((23655, 23684), 'numpy.arange', 'np.arange', (['(-10)', '(10.001)', '(0.001)'], {}), '(-10, 10.001, 0.001)\n', (23664, 23684), True, 'import numpy as np\n'), ((27070, 27083), 'numpy.mean', 'np.mean', (['CRPS'], {}), '(CRPS)\n', (27077, 27083), True, 'import numpy as np\n'), ((29191, 29210), 'numpy.sum', 'np.sum', (['capture_all'], {}), '(capture_all)\n', (29197, 29210), True, 'import numpy as np\n'), ((29236, 29254), 'numpy.sum', 'np.sum', (['capture_90'], {}), '(capture_90)\n', (29242, 29254), True, 'import numpy as np\n'), ((29280, 29298), 'numpy.sum', 'np.sum', (['capture_80'], {}), '(capture_80)\n', (29286, 29298), True, 'import numpy as np\n'), ((29324, 29342), 'numpy.sum', 'np.sum', (['capture_70'], {}), '(capture_70)\n', (29330, 29342), True, 'import numpy as np\n'), ((29368, 29386), 'numpy.sum', 'np.sum', (['capture_60'], {}), '(capture_60)\n', (29374, 29386), True, 'import numpy as np\n'), ((29412, 29430), 'numpy.sum', 'np.sum', (['capture_50'], {}), '(capture_50)\n', (29418, 29430), True, 'import numpy as np\n'), ((29456, 29474), 'numpy.sum', 'np.sum', (['capture_40'], {}), '(capture_40)\n', (29462, 29474), True, 'import numpy as np\n'), ((29500, 29518), 'numpy.sum', 'np.sum', (['capture_30'], {}), '(capture_30)\n', (29506, 29518), True, 'import numpy as np\n'), ((29544, 29562), 'numpy.sum', 'np.sum', (['capture_20'], {}), '(capture_20)\n', (29550, 29562), True, 'import numpy as np\n'), ((29588, 29606), 'numpy.sum', 'np.sum', (['capture_10'], {}), '(capture_10)\n', (29594, 29606), True, 'import numpy as np\n'), ((29637, 29659), 'numpy.sum', 'np.sum', (['capture_all_20'], {}), '(capture_all_20)\n', (29643, 29659), True, 'import numpy as np\n'), ((29688, 29709), 'numpy.sum', 'np.sum', (['capture_90_20'], {}), '(capture_90_20)\n', (29694, 29709), True, 'import numpy as np\n'), ((29738, 29759), 'numpy.sum', 'np.sum', (['capture_80_20'], {}), '(capture_80_20)\n', (29744, 29759), True, 'import numpy as np\n'), ((29788, 29809), 'numpy.sum', 'np.sum', (['capture_70_20'], {}), '(capture_70_20)\n', (29794, 29809), True, 'import numpy as np\n'), ((29838, 29859), 'numpy.sum', 'np.sum', (['capture_60_20'], {}), '(capture_60_20)\n', (29844, 29859), True, 'import numpy as np\n'), ((29888, 29909), 'numpy.sum', 'np.sum', (['capture_50_20'], {}), '(capture_50_20)\n', (29894, 29909), True, 'import numpy as np\n'), ((29938, 29959), 'numpy.sum', 'np.sum', (['capture_40_20'], {}), '(capture_40_20)\n', (29944, 29959), True, 'import numpy as np\n'), ((29988, 30009), 'numpy.sum', 'np.sum', (['capture_30_20'], {}), '(capture_30_20)\n', (29994, 30009), True, 'import numpy as np\n'), ((30038, 30059), 'numpy.sum', 'np.sum', (['capture_20_20'], {}), '(capture_20_20)\n', (30044, 30059), True, 'import numpy as np\n'), ((30088, 30109), 'numpy.sum', 'np.sum', (['capture_10_20'], {}), '(capture_10_20)\n', (30094, 30109), True, 'import numpy as np\n'), ((33030, 33115), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_norm', 't_norm'], {'test_size': '(0.25)', 'shuffle': '(False)', 'random_state': '(10)'}), '(x_norm, t_norm, test_size=0.25, shuffle=False, random_state=10\n )\n', (33046, 33115), False, 'from sklearn.model_selection import train_test_split\n'), ((33337, 33443), 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(10)', 'restore_best_weights': '(True)'}), "(monitor='val_loss', min_delta=0, patience=10,\n restore_best_weights=True)\n", (33366, 33443), False, 'from tensorflow import keras\n'), ((34587, 34624), 'numpy.array', 'np.array', (['self.verifying_observations'], {}), '(self.verifying_observations)\n', (34595, 34624), True, 'import numpy as np\n'), ((34642, 34678), 'numpy.array', 'np.array', (['self.verifying_predictions'], {}), '(self.verifying_predictions)\n', (34650, 34678), True, 'import numpy as np\n'), ((37307, 37326), 'numpy.sum', 'np.sum', (['capture_all'], {}), '(capture_all)\n', (37313, 37326), True, 'import numpy as np\n'), ((37352, 37370), 'numpy.sum', 'np.sum', (['capture_90'], {}), '(capture_90)\n', (37358, 37370), True, 'import numpy as np\n'), ((37396, 37414), 'numpy.sum', 'np.sum', (['capture_80'], {}), '(capture_80)\n', (37402, 37414), True, 'import numpy as np\n'), ((37440, 37458), 'numpy.sum', 'np.sum', (['capture_70'], {}), '(capture_70)\n', (37446, 37458), True, 'import numpy as np\n'), ((37484, 37502), 'numpy.sum', 'np.sum', (['capture_60'], {}), '(capture_60)\n', (37490, 37502), True, 'import numpy as np\n'), ((37528, 37546), 'numpy.sum', 'np.sum', (['capture_50'], {}), '(capture_50)\n', (37534, 37546), True, 'import numpy as np\n'), ((37572, 37590), 'numpy.sum', 'np.sum', (['capture_40'], {}), '(capture_40)\n', (37578, 37590), True, 'import numpy as np\n'), ((37616, 37634), 'numpy.sum', 'np.sum', (['capture_30'], {}), '(capture_30)\n', (37622, 37634), True, 'import numpy as np\n'), ((37660, 37678), 'numpy.sum', 'np.sum', (['capture_20'], {}), '(capture_20)\n', (37666, 37678), True, 'import numpy as np\n'), ((37704, 37722), 'numpy.sum', 'np.sum', (['capture_10'], {}), '(capture_10)\n', (37710, 37722), True, 'import numpy as np\n'), ((37753, 37775), 'numpy.sum', 'np.sum', (['capture_all_20'], {}), '(capture_all_20)\n', (37759, 37775), True, 'import numpy as np\n'), ((37804, 37825), 'numpy.sum', 'np.sum', (['capture_90_20'], {}), '(capture_90_20)\n', (37810, 37825), True, 'import numpy as np\n'), ((37854, 37875), 'numpy.sum', 'np.sum', (['capture_80_20'], {}), '(capture_80_20)\n', (37860, 37875), True, 'import numpy as np\n'), ((37904, 37925), 'numpy.sum', 'np.sum', (['capture_70_20'], {}), '(capture_70_20)\n', (37910, 37925), True, 'import numpy as np\n'), ((37954, 37975), 'numpy.sum', 'np.sum', (['capture_60_20'], {}), '(capture_60_20)\n', (37960, 37975), True, 'import numpy as np\n'), ((38004, 38025), 'numpy.sum', 'np.sum', (['capture_50_20'], {}), '(capture_50_20)\n', (38010, 38025), True, 'import numpy as np\n'), ((38054, 38075), 'numpy.sum', 'np.sum', (['capture_40_20'], {}), '(capture_40_20)\n', (38060, 38075), True, 'import numpy as np\n'), ((38104, 38125), 'numpy.sum', 'np.sum', (['capture_30_20'], {}), '(capture_30_20)\n', (38110, 38125), True, 'import numpy as np\n'), ((38154, 38175), 'numpy.sum', 'np.sum', (['capture_20_20'], {}), '(capture_20_20)\n', (38160, 38175), True, 'import numpy as np\n'), ((38204, 38225), 'numpy.sum', 'np.sum', (['capture_10_20'], {}), '(capture_10_20)\n', (38210, 38225), True, 'import numpy as np\n'), ((40794, 40841), 'numpy.histogram', 'np.histogram', (['rank'], {'bins': '(self.network_count + 1)'}), '(rank, bins=self.network_count + 1)\n', (40806, 40841), True, 'import numpy as np\n'), ((40858, 40923), 'numpy.sum', 'np.sum', (['((rank_hist[0] - test_len / (self.network_count + 1)) ** 2)'], {}), '((rank_hist[0] - test_len / (self.network_count + 1)) ** 2)\n', (40864, 40923), True, 'import numpy as np\n'), ((41142, 41179), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)', 'dpi': '(100)'}), '(figsize=(15, 10), dpi=100)\n', (41152, 41179), True, 'import matplotlib.pyplot as plt\n'), ((41188, 41211), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(3)'], {}), '(2, 3)\n', (41205, 41211), True, 'import matplotlib.gridspec as gridspec\n'), ((41221, 41275), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 3)', '(0, 0)'], {'colspan': '(2)', 'rowspan': '(2)'}), '((2, 3), (0, 0), colspan=2, rowspan=2)\n', (41237, 41275), True, 'import matplotlib.pyplot as plt\n'), ((41284, 41361), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0.2)'], {'c': '"""k"""', 'ls': '"""--"""', 'label': '"""Point-of-consumption FRC = 0.2 mg/L"""'}), "(0.2, c='k', ls='--', label='Point-of-consumption FRC = 0.2 mg/L')\n", (41295, 41361), True, 'import matplotlib.pyplot as plt\n'), ((41370, 41460), 'matplotlib.pyplot.scatter', 'plt.scatter', (['FRC_X', 'Y_true'], {'edgecolors': '"""k"""', 'facecolors': '"""None"""', 's': '(20)', 'label': '"""Observed"""'}), "(FRC_X, Y_true, edgecolors='k', facecolors='None', s=20, label=\n 'Observed')\n", (41381, 41460), True, 'import matplotlib.pyplot as plt\n'), ((41869, 41915), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Point-of-Distribution FRC (mg/L)"""'], {}), "('Point-of-Distribution FRC (mg/L)')\n", (41879, 41915), True, 'import matplotlib.pyplot as plt\n'), ((41924, 41969), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Point-of-Consumption FRC (mg/L)"""'], {}), "('Point-of-Consumption FRC (mg/L)')\n", (41934, 41969), True, 'import matplotlib.pyplot as plt\n'), ((41979, 42033), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 3)', '(0, 2)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((2, 3), (0, 2), colspan=1, rowspan=1)\n', (41995, 42033), True, 'import matplotlib.pyplot as plt\n'), ((42042, 42069), 'matplotlib.pyplot.plot', 'plt.plot', (['CI_x', 'CI_x'], {'c': '"""k"""'}), "(CI_x, CI_x, c='k')\n", (42050, 42069), True, 'import matplotlib.pyplot as plt\n'), ((42078, 42104), 'matplotlib.pyplot.scatter', 'plt.scatter', (['CI_x', 'capture'], {}), '(CI_x, capture)\n', (42089, 42104), True, 'import matplotlib.pyplot as plt\n'), ((42113, 42142), 'matplotlib.pyplot.scatter', 'plt.scatter', (['CI_x', 'capture_20'], {}), '(CI_x, capture_20)\n', (42124, 42142), True, 'import matplotlib.pyplot as plt\n'), ((42151, 42193), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ensemble Confidence Interval"""'], {}), "('Ensemble Confidence Interval')\n", (42161, 42193), True, 'import matplotlib.pyplot as plt\n'), ((42202, 42231), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percent Capture"""'], {}), "('Percent Capture')\n", (42212, 42231), True, 'import matplotlib.pyplot as plt\n'), ((42240, 42256), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (42248, 42256), True, 'import matplotlib.pyplot as plt\n'), ((42265, 42281), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (42273, 42281), True, 'import matplotlib.pyplot as plt\n'), ((42291, 42345), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 3)', '(1, 2)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((2, 3), (1, 2), colspan=1, rowspan=1)\n', (42307, 42345), True, 'import matplotlib.pyplot as plt\n'), ((42354, 42411), 'matplotlib.pyplot.hist', 'plt.hist', (['rank'], {'bins': '(self.network_count + 1)', 'density': '(True)'}), '(rank, bins=self.network_count + 1, density=True)\n', (42362, 42411), True, 'import matplotlib.pyplot as plt\n'), ((42422, 42440), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Rank"""'], {}), "('Rank')\n", (42432, 42440), True, 'import matplotlib.pyplot as plt\n'), ((42449, 42474), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (42459, 42474), True, 'import matplotlib.pyplot as plt\n'), ((42483, 42558), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(directory + '\\\\Verification_Diagnostic_Figs.png')"], {'format': '"""png"""'}), "(directory + '\\\\Verification_Diagnostic_Figs.png', format='png')\n", (42494, 42558), True, 'import matplotlib.pyplot as plt\n'), ((42567, 42578), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (42576, 42578), True, 'import matplotlib.pyplot as plt\n'), ((42606, 42618), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (42616, 42618), False, 'import io\n'), ((42627, 42669), 'matplotlib.pyplot.savefig', 'plt.savefig', (['myStringIOBytes'], {'format': '"""png"""'}), "(myStringIOBytes, format='png')\n", (42638, 42669), True, 'import matplotlib.pyplot as plt\n'), ((42874, 42900), 'numpy.arange', 'np.arange', (['(0.2)', '(2.05)', '(0.05)'], {}), '(0.2, 2.05, 0.05)\n', (42883, 42900), True, 'import numpy as np\n'), ((44555, 44580), 'pandas.DataFrame', 'pd.DataFrame', (['temp_med_am'], {}), '(temp_med_am)\n', (44567, 44580), True, 'import pandas as pd\n'), ((44619, 44644), 'pandas.DataFrame', 'pd.DataFrame', (['temp_med_pm'], {}), '(temp_med_pm)\n', (44631, 44644), True, 'import pandas as pd\n'), ((44685, 44709), 'pandas.DataFrame', 'pd.DataFrame', (['temp_95_am'], {}), '(temp_95_am)\n', (44697, 44709), True, 'import pandas as pd\n'), ((44750, 44774), 'pandas.DataFrame', 'pd.DataFrame', (['temp_95_pm'], {}), '(temp_95_pm)\n', (44762, 44774), True, 'import pandas as pd\n'), ((44919, 44948), 'numpy.arange', 'np.arange', (['(-10)', '(10.001)', '(0.001)'], {}), '(-10, 10.001, 0.001)\n', (44928, 44948), True, 'import numpy as np\n'), ((44969, 44995), 'numpy.arange', 'np.arange', (['(0.2)', '(2.05)', '(0.05)'], {}), '(0.2, 2.05, 0.05)\n', (44978, 44995), True, 'import numpy as np\n'), ((47406, 47428), 'pandas.DataFrame', 'pd.DataFrame', (['temp_key'], {}), '(temp_key)\n', (47418, 47428), True, 'import pandas as pd\n'), ((49648, 49681), 'pandas.DataFrame', 'pd.DataFrame', (['avg_case_results_am'], {}), '(avg_case_results_am)\n', (49660, 49681), True, 'import pandas as pd\n'), ((51925, 51958), 'pandas.DataFrame', 'pd.DataFrame', (['avg_case_results_pm'], {}), '(avg_case_results_pm)\n', (51937, 51958), True, 'import pandas as pd\n'), ((59374, 59400), 'numpy.arange', 'np.arange', (['(0.2)', '(2.05)', '(0.05)'], {}), '(0.2, 2.05, 0.05)\n', (59383, 59400), True, 'import numpy as np\n'), ((108693, 108724), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharex': '(True)'}), '(1, 2, sharex=True)\n', (108705, 108724), True, 'import matplotlib.pyplot as plt\n'), ((110374, 110384), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (110382, 110384), True, 'import matplotlib.pyplot as plt\n'), ((110412, 110424), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (110422, 110424), False, 'import io\n'), ((110433, 110475), 'matplotlib.pyplot.savefig', 'plt.savefig', (['myStringIOBytes'], {'format': '"""png"""'}), "(myStringIOBytes, format='png')\n", (110444, 110475), True, 'import matplotlib.pyplot as plt\n'), ((111609, 111619), 'numpy.sort', 'np.sort', (['c'], {}), '(c)\n', (111616, 111619), True, 'import numpy as np\n'), ((112289, 112312), 'logging.info', 'logging.info', (['divisions'], {}), '(divisions)\n', (112301, 112312), False, 'import logging\n'), ((112648, 112725), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mpl.colors.LinearSegmentedColormap.from_list', (['"""Custom cmap"""', 'cmaplist', 'cmap.N'], {}), "('Custom cmap', cmaplist, cmap.N)\n", (112692, 112725), True, 'import matplotlib as mpl\n'), ((112765, 112787), 'numpy.linspace', 'np.linspace', (['(0)', '(1.4)', '(8)'], {}), '(0, 1.4, 8)\n', (112776, 112787), True, 'import numpy as np\n'), ((112803, 112842), 'matplotlib.colors.BoundaryNorm', 'mpl.colors.BoundaryNorm', (['bounds', 'cmap.N'], {}), '(bounds, cmap.N)\n', (112826, 112842), True, 'import matplotlib as mpl\n'), ((112858, 112899), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(19.2, 10.8)', 'dpi': '(100)'}), '(figsize=(19.2, 10.8), dpi=100)\n', (112868, 112899), True, 'import matplotlib.pyplot as plt\n'), ((114433, 114546), 'matplotlib.colorbar.ColorbarBase', 'mpl.colorbar.ColorbarBase', (['ax2'], {'cmap': 'cmap', 'norm': 'norm', 'spacing': '"""proportional"""', 'ticks': 'bounds', 'boundaries': 'bounds'}), "(ax2, cmap=cmap, norm=norm, spacing='proportional',\n ticks=bounds, boundaries=bounds)\n", (114458, 114546), True, 'import matplotlib as mpl\n'), ((114708, 114718), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (114716, 114718), True, 'import matplotlib.pyplot as plt\n'), ((114746, 114758), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (114756, 114758), False, 'import io\n'), ((114767, 114809), 'matplotlib.pyplot.savefig', 'plt.savefig', (['myStringIOBytes'], {'format': '"""png"""'}), "(myStringIOBytes, format='png')\n", (114778, 114809), True, 'import matplotlib.pyplot as plt\n'), ((115459, 115500), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(19.2, 10.8)', 'dpi': '(100)'}), '(figsize=(19.2, 10.8), dpi=100)\n', (115469, 115500), True, 'import matplotlib.pyplot as plt\n'), ((116064, 116078), 'numpy.median', 'np.median', (['frc'], {}), '(frc)\n', (116073, 116078), True, 'import numpy as np\n'), ((116727, 116742), 'numpy.median', 'np.median', (['watt'], {}), '(watt)\n', (116736, 116742), True, 'import numpy as np\n'), ((117372, 117387), 'numpy.median', 'np.median', (['cond'], {}), '(cond)\n', (117381, 117387), True, 'import numpy as np\n'), ((118099, 118114), 'numpy.median', 'np.median', (['frc4'], {}), '(frc4)\n', (118108, 118114), True, 'import numpy as np\n'), ((118766, 118813), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(19.2 / 1.45, 6.4)', 'dpi': '(100)'}), '(figsize=(19.2 / 1.45, 6.4), dpi=100)\n', (118776, 118813), True, 'import matplotlib.pyplot as plt\n'), ((119095, 119109), 'numpy.median', 'np.median', (['frc'], {}), '(frc)\n', (119104, 119109), True, 'import numpy as np\n'), ((119833, 119848), 'numpy.median', 'np.median', (['frc4'], {}), '(frc4)\n', (119842, 119848), True, 'import numpy as np\n'), ((120353, 120365), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (120363, 120365), False, 'import io\n'), ((120374, 120416), 'matplotlib.pyplot.savefig', 'plt.savefig', (['myStringIOBytes'], {'format': '"""png"""'}), "(myStringIOBytes, format='png')\n", (120385, 120416), True, 'import matplotlib.pyplot as plt\n'), ((134148, 134162), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (134160, 134162), True, 'import pandas as pd\n'), ((136503, 136516), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (136514, 136516), False, 'import io\n'), ((142220, 142243), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (142241, 142243), False, 'import datetime\n'), ((142533, 142556), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {}), '(input_file)\n', (142544, 142556), True, 'import pandas as pd\n'), ((1073, 1094), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1092, 1094), False, 'import datetime\n'), ((2084, 2155), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['self.layer1_neurons'], {'input_dim': '(5)', 'activation': '"""tanh"""'}), "(self.layer1_neurons, input_dim=5, activation='tanh')\n", (2102, 2155), False, 'from tensorflow import keras\n'), ((2189, 2231), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (2207, 2231), False, 'from tensorflow import keras\n'), ((8456, 8498), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (8474, 8498), False, 'from tensorflow import keras\n'), ((8801, 8826), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (8815, 8826), False, 'import os\n'), ((8840, 8862), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (8851, 8862), False, 'import os\n'), ((10812, 10851), 'numpy.random.uniform', 'np.random.uniform', (['(-0.05)', '(0.05)', 'w.shape'], {}), '(-0.05, 0.05, w.shape)\n', (10829, 10851), True, 'import numpy as np\n'), ((13515, 13535), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (13522, 13535), True, 'import numpy as np\n'), ((13573, 13593), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (13580, 13593), True, 'import numpy as np\n'), ((13631, 13651), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (13638, 13651), True, 'import numpy as np\n'), ((13689, 13709), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (13696, 13709), True, 'import numpy as np\n'), ((13747, 13767), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (13754, 13767), True, 'import numpy as np\n'), ((13805, 13825), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (13812, 13825), True, 'import numpy as np\n'), ((13863, 13883), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (13870, 13883), True, 'import numpy as np\n'), ((13921, 13941), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (13928, 13941), True, 'import numpy as np\n'), ((13979, 13999), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (13986, 13999), True, 'import numpy as np\n'), ((14037, 14057), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (14044, 14057), True, 'import numpy as np\n'), ((14086, 14106), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (14093, 14106), True, 'import numpy as np\n'), ((17534, 17571), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', 'n_equal'], {}), '(0, n_equal)\n', (17559, 17571), True, 'import numpy as np\n'), ((17591, 17630), 'numpy.append', 'np.append', (['rank', '(n_lower + deviate_rank)'], {}), '(rank, n_lower + deviate_rank)\n', (17600, 17630), True, 'import numpy as np\n'), ((18178, 18199), 'numpy.sort', 'np.sort', (['Y_pred[:, a]'], {}), '(Y_pred[:, a])\n', (18185, 18199), True, 'import numpy as np\n'), ((20205, 20230), 'numpy.median', 'np.median', (['Y_pred'], {'axis': '(0)'}), '(Y_pred, axis=0)\n', (20214, 20230), True, 'import numpy as np\n'), ((20407, 20429), 'numpy.min', 'np.min', (['Y_pred'], {'axis': '(0)'}), '(Y_pred, axis=0)\n', (20413, 20429), True, 'import numpy as np\n'), ((20443, 20465), 'numpy.max', 'np.max', (['Y_pred'], {'axis': '(0)'}), '(Y_pred, axis=0)\n', (20449, 20465), True, 'import numpy as np\n'), ((22434, 22472), 'numpy.array', 'np.array', (['self.calibration_predictions'], {}), '(self.calibration_predictions)\n', (22442, 22472), True, 'import numpy as np\n'), ((22618, 22639), 'numpy.sort', 'np.sort', (['Y_pred[:, a]'], {}), '(Y_pred[:, a])\n', (22625, 22639), True, 'import numpy as np\n'), ((22782, 22796), 'numpy.mean', 'np.mean', (['xt_yt'], {}), '(xt_yt)\n', (22789, 22796), True, 'import numpy as np\n'), ((22948, 22970), 'numpy.array', 'np.array', (['self.targets'], {}), '(self.targets)\n', (22956, 22970), True, 'import numpy as np\n'), ((22989, 23027), 'numpy.array', 'np.array', (['self.calibration_predictions'], {}), '(self.calibration_predictions)\n', (22997, 23027), True, 'import numpy as np\n'), ((23984, 24004), 'numpy.cumsum', 'np.cumsum', (['scipy_pdf'], {}), '(scipy_pdf)\n', (23993, 24004), True, 'import numpy as np\n'), ((28560, 28580), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (28567, 28580), True, 'import numpy as np\n'), ((28621, 28641), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (28628, 28641), True, 'import numpy as np\n'), ((28679, 28699), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (28686, 28699), True, 'import numpy as np\n'), ((28737, 28757), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (28744, 28757), True, 'import numpy as np\n'), ((28795, 28815), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (28802, 28815), True, 'import numpy as np\n'), ((28853, 28873), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (28860, 28873), True, 'import numpy as np\n'), ((28911, 28931), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (28918, 28931), True, 'import numpy as np\n'), ((28969, 28989), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (28976, 28989), True, 'import numpy as np\n'), ((29027, 29047), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (29034, 29047), True, 'import numpy as np\n'), ((29085, 29105), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (29092, 29105), True, 'import numpy as np\n'), ((29143, 29163), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (29150, 29163), True, 'import numpy as np\n'), ((33563, 33595), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (33593, 33595), True, 'import tensorflow as tf\n'), ((33622, 33678), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (["(directory + '\\\\base_network.h5')"], {}), "(directory + '\\\\base_network.h5')\n", (33645, 33678), False, 'from tensorflow import keras\n'), ((33745, 33842), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_cal_norm', 't_cal_norm'], {'train_size': '(1 / 3)', 'shuffle': '(True)', 'random_state': '(i ** 2)'}), '(x_cal_norm, t_cal_norm, train_size=1 / 3, shuffle=True,\n random_state=i ** 2)\n', (33761, 33842), False, 'from sklearn.model_selection import train_test_split\n'), ((36657, 36677), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (36664, 36677), True, 'import numpy as np\n'), ((36715, 36735), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (36722, 36735), True, 'import numpy as np\n'), ((36773, 36793), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (36780, 36793), True, 'import numpy as np\n'), ((36831, 36851), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (36838, 36851), True, 'import numpy as np\n'), ((36889, 36909), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (36896, 36909), True, 'import numpy as np\n'), ((36947, 36967), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (36954, 36967), True, 'import numpy as np\n'), ((37005, 37025), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (37012, 37025), True, 'import numpy as np\n'), ((37063, 37083), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (37070, 37083), True, 'import numpy as np\n'), ((37121, 37141), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (37128, 37141), True, 'import numpy as np\n'), ((37179, 37199), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (37186, 37199), True, 'import numpy as np\n'), ((37228, 37248), 'numpy.less', 'np.less', (['Y_true', '(0.2)'], {}), '(Y_true, 0.2)\n', (37235, 37248), True, 'import numpy as np\n'), ((40676, 40713), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', 'n_equal'], {}), '(0, n_equal)\n', (40701, 40713), True, 'import numpy as np\n'), ((40733, 40772), 'numpy.append', 'np.append', (['rank', '(n_lower + deviate_rank)'], {}), '(rank, n_lower + deviate_rank)\n', (40742, 40772), True, 'import numpy as np\n'), ((41530, 41555), 'numpy.median', 'np.median', (['Y_pred'], {'axis': '(0)'}), '(Y_pred, axis=0)\n', (41539, 41555), True, 'import numpy as np\n'), ((41732, 41754), 'numpy.min', 'np.min', (['Y_pred'], {'axis': '(0)'}), '(Y_pred, axis=0)\n', (41738, 41754), True, 'import numpy as np\n'), ((41768, 41790), 'numpy.max', 'np.max', (['Y_pred'], {'axis': '(0)'}), '(Y_pred, axis=0)\n', (41774, 41790), True, 'import numpy as np\n'), ((45965, 45985), 'numpy.cumsum', 'np.cumsum', (['scipy_pdf'], {}), '(scipy_pdf)\n', (45974, 45985), True, 'import numpy as np\n'), ((46917, 46943), 'numpy.arange', 'np.arange', (['(0.2)', '(2.05)', '(0.05)'], {}), '(0.2, 2.05, 0.05)\n', (46926, 46943), True, 'import numpy as np\n'), ((54335, 54370), 'pandas.DataFrame', 'pd.DataFrame', (['worst_case_results_am'], {}), '(worst_case_results_am)\n', (54347, 54370), True, 'import pandas as pd\n'), ((57044, 57079), 'pandas.DataFrame', 'pd.DataFrame', (['worst_case_results_pm'], {}), '(worst_case_results_pm)\n', (57056, 57079), True, 'import pandas as pd\n'), ((103726, 103775), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '(3.35, 6.69)', 'dpi': '(300)'}), '(4, 1, figsize=(3.35, 6.69), dpi=300)\n', (103738, 103775), True, 'import matplotlib.pyplot as plt\n'), ((104471, 104549), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.18)', 'hspace': '(0.6)', 'top': '(0.99)', 'bottom': '(0.075)', 'right': '(0.98)'}), '(left=0.18, hspace=0.6, top=0.99, bottom=0.075, right=0.98)\n', (104490, 104549), True, 'import matplotlib.pyplot as plt\n'), ((104857, 104868), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (104866, 104868), True, 'import matplotlib.pyplot as plt\n'), ((104908, 104920), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (104918, 104920), False, 'import io\n'), ((104933, 105004), 'matplotlib.pyplot.savefig', 'plt.savefig', (['StringIOBytes_histogram'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(StringIOBytes_histogram, format='png', bbox_inches='tight')\n", (104944, 105004), True, 'import matplotlib.pyplot as plt\n'), ((111874, 111896), 'logging.info', 'logging.info', (['lo_limit'], {}), '(lo_limit)\n', (111886, 111896), False, 'import logging\n'), ((114211, 114238), 'numpy.arange', 'np.arange', (['sorted_data.size'], {}), '(sorted_data.size)\n', (114220, 114238), True, 'import numpy as np\n'), ((116030, 116042), 'numpy.mean', 'np.mean', (['frc'], {}), '(frc)\n', (116037, 116042), True, 'import numpy as np\n'), ((116692, 116705), 'numpy.mean', 'np.mean', (['watt'], {}), '(watt)\n', (116699, 116705), True, 'import numpy as np\n'), ((117337, 117350), 'numpy.mean', 'np.mean', (['cond'], {}), '(cond)\n', (117344, 117350), True, 'import numpy as np\n'), ((118064, 118077), 'numpy.mean', 'np.mean', (['frc4'], {}), '(frc4)\n', (118071, 118077), True, 'import numpy as np\n'), ((119061, 119073), 'numpy.mean', 'np.mean', (['frc'], {}), '(frc)\n', (119068, 119073), True, 'import numpy as np\n'), ((119798, 119811), 'numpy.mean', 'np.mean', (['frc4'], {}), '(frc4)\n', (119805, 119811), True, 'import numpy as np\n'), ((134825, 134881), 'numpy.round', 'np.round', (["self.avg_case_results_am['median']"], {'decimals': '(3)'}), "(self.avg_case_results_am['median'], decimals=3)\n", (134833, 134881), True, 'import numpy as np\n'), ((135010, 135066), 'numpy.round', 'np.round', (["self.avg_case_results_pm['median']"], {'decimals': '(3)'}), "(self.avg_case_results_pm['median'], decimals=3)\n", (135018, 135066), True, 'import numpy as np\n'), ((135191, 135258), 'numpy.round', 'np.round', (["self.avg_case_results_am['probability<=0.20']"], {'decimals': '(3)'}), "(self.avg_case_results_am['probability<=0.20'], decimals=3)\n", (135199, 135258), True, 'import numpy as np\n'), ((135383, 135450), 'numpy.round', 'np.round', (["self.avg_case_results_pm['probability<=0.20']"], {'decimals': '(3)'}), "(self.avg_case_results_pm['probability<=0.20'], decimals=3)\n", (135391, 135450), True, 'import numpy as np\n'), ((135716, 135777), 'numpy.round', 'np.round', (["self.avg_case_results_am_post['median']"], {'decimals': '(3)'}), "(self.avg_case_results_am_post['median'], decimals=3)\n", (135724, 135777), True, 'import numpy as np\n'), ((135906, 135967), 'numpy.round', 'np.round', (["self.avg_case_results_pm_post['median']"], {'decimals': '(3)'}), "(self.avg_case_results_pm_post['median'], decimals=3)\n", (135914, 135967), True, 'import numpy as np\n'), ((136092, 136164), 'numpy.round', 'np.round', (["self.avg_case_results_am_post['probability<=0.20']"], {'decimals': '(3)'}), "(self.avg_case_results_am_post['probability<=0.20'], decimals=3)\n", (136100, 136164), True, 'import numpy as np\n'), ((136289, 136361), 'numpy.round', 'np.round', (["self.avg_case_results_pm_post['probability<=0.20']"], {'decimals': '(3)'}), "(self.avg_case_results_pm_post['probability<=0.20'], decimals=3)\n", (136297, 136361), True, 'import numpy as np\n'), ((136733, 136747), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (136745, 136747), True, 'import pandas as pd\n'), ((139392, 139405), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (139403, 139405), False, 'import io\n'), ((142394, 142422), 'os.path.basename', 'os.path.basename', (['input_file'], {}), '(input_file)\n', (142410, 142422), False, 'import os\n'), ((818, 837), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (835, 837), False, 'import logging\n'), ((5480, 5517), 'xlrd.xldate.xldate_as_datetime', 'xldate_as_datetime', (['start'], {'datemode': '(0)'}), '(start, datemode=0)\n', (5498, 5517), False, 'from xlrd.xldate import xldate_as_datetime\n'), ((5734, 5769), 'xlrd.xldate.xldate_as_datetime', 'xldate_as_datetime', (['end'], {'datemode': '(0)'}), '(end, datemode=0)\n', (5752, 5769), False, 'from xlrd.xldate import xldate_as_datetime\n'), ((6385, 6438), 'datetime.datetime.strftime', 'datetime.datetime.strftime', (['start', 'self.xl_dateformat'], {}), '(start, self.xl_dateformat)\n', (6411, 6438), False, 'import datetime\n'), ((6804, 6828), 'numpy.array', 'np.array', (['self.durations'], {}), '(self.durations)\n', (6812, 6828), True, 'import numpy as np\n'), ((17406, 17441), 'numpy.greater', 'np.greater', (['Y_true[a]', 'Y_pred[:, a]'], {}), '(Y_true[a], Y_pred[:, a])\n', (17416, 17441), True, 'import numpy as np\n'), ((17472, 17505), 'numpy.equal', 'np.equal', (['Y_true[a]', 'Y_pred[:, a]'], {}), '(Y_true[a], Y_pred[:, a])\n', (17480, 17505), True, 'import numpy as np\n'), ((20666, 20679), 'numpy.max', 'np.max', (['FRC_X'], {}), '(FRC_X)\n', (20672, 20679), True, 'import numpy as np\n'), ((22671, 22687), 'numpy.var', 'np.var', (['forecast'], {}), '(forecast)\n', (22677, 22687), True, 'import numpy as np\n'), ((22830, 22841), 'numpy.mean', 'np.mean', (['s2'], {}), '(s2)\n', (22837, 22841), True, 'import numpy as np\n'), ((27024, 27048), 'numpy.sum', 'np.sum', (['(CRPS_dif * 0.001)'], {}), '(CRPS_dif * 0.001)\n', (27030, 27048), True, 'import numpy as np\n'), ((27121, 27150), 'numpy.less_equal', 'np.less_equal', (['Y_true', 'max_CI'], {}), '(Y_true, max_CI)\n', (27134, 27150), True, 'import numpy as np\n'), ((27153, 27185), 'numpy.greater_equal', 'np.greater_equal', (['Y_true', 'min_CI'], {}), '(Y_true, min_CI)\n', (27169, 27185), True, 'import numpy as np\n'), ((27235, 27269), 'numpy.less_equal', 'np.less_equal', (['Y_true', 'CI_90_Upper'], {}), '(Y_true, CI_90_Upper)\n', (27248, 27269), True, 'import numpy as np\n'), ((27284, 27321), 'numpy.greater_equal', 'np.greater_equal', (['Y_true', 'CI_90_Lower'], {}), '(Y_true, CI_90_Lower)\n', (27300, 27321), True, 'import numpy as np\n'), ((27383, 27417), 'numpy.less_equal', 'np.less_equal', (['Y_true', 'CI_80_Upper'], {}), '(Y_true, CI_80_Upper)\n', (27396, 27417), True, 'import numpy as np\n'), ((27432, 27469), 'numpy.greater_equal', 'np.greater_equal', (['Y_true', 'CI_80_Lower'], {}), '(Y_true, CI_80_Lower)\n', (27448, 27469), True, 'import numpy as np\n'), ((27531, 27565), 'numpy.less_equal', 'np.less_equal', (['Y_true', 'CI_70_Upper'], {}), '(Y_true, CI_70_Upper)\n', (27544, 27565), True, 'import numpy as np\n'), ((27580, 27617), 'numpy.greater_equal', 'np.greater_equal', (['Y_true', 'CI_70_Lower'], {}), '(Y_true, CI_70_Lower)\n', (27596, 27617), True, 'import numpy as np\n'), ((27679, 27713), 'numpy.less_equal', 'np.less_equal', (['Y_true', 'CI_60_Upper'], {}), '(Y_true, CI_60_Upper)\n', (27692, 27713), True, 'import numpy as np\n'), ((27728, 27765), 'numpy.greater_equal', 'np.greater_equal', (['Y_true', 'CI_60_Lower'], {}), '(Y_true, CI_60_Lower)\n', (27744, 27765), True, 'import numpy as np\n'), ((27827, 27861), 'numpy.less_equal', 'np.less_equal', (['Y_true', 'CI_50_Upper'], {}), '(Y_true, CI_50_Upper)\n', (27840, 27861), True, 'import numpy as np\n'), ((27876, 27913), 'numpy.greater_equal', 'np.greater_equal', (['Y_true', 'CI_50_Lower'], {}), '(Y_true, CI_50_Lower)\n', (27892, 27913), True, 'import numpy as np\n'), ((27975, 28009), 'numpy.less_equal', 'np.less_equal', (['Y_true', 'CI_40_Upper'], {}), '(Y_true, CI_40_Upper)\n', (27988, 28009), True, 'import numpy as np\n'), ((28024, 28061), 'numpy.greater_equal', 'np.greater_equal', (['Y_true', 'CI_40_Lower'], {}), '(Y_true, CI_40_Lower)\n', (28040, 28061), True, 'import numpy as np\n'), ((28123, 28157), 'numpy.less_equal', 'np.less_equal', (['Y_true', 'CI_30_Upper'], {}), '(Y_true, CI_30_Upper)\n', (28136, 28157), True, 'import numpy as np\n'), ((28172, 28209), 'numpy.greater_equal', 'np.greater_equal', (['Y_true', 'CI_30_Lower'], {}), '(Y_true, CI_30_Lower)\n', (28188, 28209), True, 'import numpy as np\n'), ((28271, 28305), 'numpy.less_equal', 'np.less_equal', (['Y_true', 'CI_20_Upper'], {}), '(Y_true, CI_20_Upper)\n', (28284, 28305), True, 'import numpy as np\n'), ((28320, 28357), 'numpy.greater_equal', 'np.greater_equal', (['Y_true', 'CI_20_Lower'], {}), '(Y_true, CI_20_Lower)\n', (28336, 28357), True, 'import numpy as np\n'), ((28419, 28453), 'numpy.less_equal', 'np.less_equal', (['Y_true', 'CI_10_Upper'], {}), '(Y_true, CI_10_Upper)\n', (28432, 28453), True, 'import numpy as np\n'), ((28468, 28505), 'numpy.greater_equal', 'np.greater_equal', (['Y_true', 'CI_10_Lower'], {}), '(Y_true, CI_10_Lower)\n', (28484, 28505), True, 'import numpy as np\n'), ((33977, 34016), 'numpy.random.uniform', 'np.random.uniform', (['(-0.05)', '(0.05)', 'w.shape'], {}), '(-0.05, 0.05, w.shape)\n', (33994, 34016), True, 'import numpy as np\n'), ((40548, 40583), 'numpy.greater', 'np.greater', (['Y_true[a]', 'Y_pred[:, a]'], {}), '(Y_true[a], Y_pred[:, a])\n', (40558, 40583), True, 'import numpy as np\n'), ((40614, 40647), 'numpy.equal', 'np.equal', (['Y_true[a]', 'Y_pred[:, a]'], {}), '(Y_true[a], Y_pred[:, a])\n', (40622, 40647), True, 'import numpy as np\n'), ((60272, 60321), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(6.69, 6.69)', 'dpi': '(300)'}), '(2, 2, figsize=(6.69, 6.69), dpi=300)\n', (60284, 60321), True, 'import matplotlib.pyplot as plt\n'), ((67944, 67976), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.25)'}), '(wspace=0.25)\n', (67963, 67976), True, 'import matplotlib.pyplot as plt\n'), ((68303, 68315), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (68313, 68315), False, 'import io\n'), ((68332, 68399), 'matplotlib.pyplot.savefig', 'plt.savefig', (['StringIOBytes_preds'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(StringIOBytes_preds, format='png', bbox_inches='tight')\n", (68343, 68399), True, 'import matplotlib.pyplot as plt\n'), ((68545, 68556), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (68554, 68556), True, 'import matplotlib.pyplot as plt\n'), ((68585, 68626), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.69, 3.35)', 'dpi': '(300)'}), '(figsize=(6.69, 3.35), dpi=300)\n', (68595, 68626), True, 'import matplotlib.pyplot as plt\n'), ((69975, 69993), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.2, 2]'], {}), '([0.2, 2])\n', (69983, 69993), True, 'import matplotlib.pyplot as plt\n'), ((70010, 70043), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tapstand FRC (mg/L)"""'], {}), "('Tapstand FRC (mg/L)')\n", (70020, 70043), True, 'import matplotlib.pyplot as plt\n'), ((70060, 70076), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (70068, 70076), True, 'import matplotlib.pyplot as plt\n'), ((70093, 70150), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Risk of Point-of-Consumption FRC < 0.2 mg/L"""'], {}), "('Risk of Point-of-Consumption FRC < 0.2 mg/L')\n", (70103, 70150), True, 'import matplotlib.pyplot as plt\n'), ((70167, 70332), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.999, 0.999)', 'shadow': '(False)', 'fontsize': '"""small"""', 'ncol': '(1)', 'labelspacing': '(0.1)', 'columnspacing': '(0.2)', 'handletextpad': '(0.1)', 'loc': '"""upper right"""'}), "(bbox_to_anchor=(0.999, 0.999), shadow=False, fontsize='small',\n ncol=1, labelspacing=0.1, columnspacing=0.2, handletextpad=0.1, loc=\n 'upper right')\n", (70177, 70332), True, 'import matplotlib.pyplot as plt\n'), ((70519, 70563), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.15)', 'right': '(0.95)'}), '(bottom=0.15, right=0.95)\n', (70538, 70563), True, 'import matplotlib.pyplot as plt\n'), ((70792, 70804), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (70802, 70804), False, 'import io\n'), ((70821, 70887), 'matplotlib.pyplot.savefig', 'plt.savefig', (['StringIOBytes_risk'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(StringIOBytes_risk, format='png', bbox_inches='tight')\n", (70832, 70887), True, 'import matplotlib.pyplot as plt\n'), ((71120, 71131), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (71129, 71131), True, 'import matplotlib.pyplot as plt\n'), ((81989, 82038), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(6)', '(1)'], {'figsize': '(3.35, 6.69)', 'dpi': '(300)'}), '(6, 1, figsize=(3.35, 6.69), dpi=300)\n', (82001, 82038), True, 'import matplotlib.pyplot as plt\n'), ((84700, 84778), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.18)', 'hspace': '(0.6)', 'top': '(0.99)', 'bottom': '(0.075)', 'right': '(0.98)'}), '(left=0.18, hspace=0.6, top=0.99, bottom=0.075, right=0.98)\n', (84719, 84778), True, 'import matplotlib.pyplot as plt\n'), ((85122, 85133), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (85131, 85133), True, 'import matplotlib.pyplot as plt\n'), ((85177, 85189), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (85187, 85189), False, 'import io\n'), ((85206, 85277), 'matplotlib.pyplot.savefig', 'plt.savefig', (['StringIOBytes_histogram'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(StringIOBytes_histogram, format='png', bbox_inches='tight')\n", (85217, 85277), True, 'import matplotlib.pyplot as plt\n'), ((90964, 91013), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(6.69, 3.35)', 'dpi': '(300)'}), '(1, 2, figsize=(6.69, 3.35), dpi=300)\n', (90976, 91013), True, 'import matplotlib.pyplot as plt\n'), ((94814, 94846), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.25)'}), '(wspace=0.25)\n', (94833, 94846), True, 'import matplotlib.pyplot as plt\n'), ((95173, 95185), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (95183, 95185), False, 'import io\n'), ((95202, 95269), 'matplotlib.pyplot.savefig', 'plt.savefig', (['StringIOBytes_preds'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(StringIOBytes_preds, format='png', bbox_inches='tight')\n", (95213, 95269), True, 'import matplotlib.pyplot as plt\n'), ((95415, 95426), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (95424, 95426), True, 'import matplotlib.pyplot as plt\n'), ((95455, 95496), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.69, 3.35)', 'dpi': '(300)'}), '(figsize=(6.69, 3.35), dpi=300)\n', (95465, 95496), True, 'import matplotlib.pyplot as plt\n'), ((96181, 96199), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.2, 2]'], {}), '([0.2, 2])\n', (96189, 96199), True, 'import matplotlib.pyplot as plt\n'), ((96216, 96249), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tapstand FRC (mg/L)"""'], {}), "('Tapstand FRC (mg/L)')\n", (96226, 96249), True, 'import matplotlib.pyplot as plt\n'), ((96266, 96282), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (96274, 96282), True, 'import matplotlib.pyplot as plt\n'), ((96299, 96356), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Risk of Point-of-Consumption FRC < 0.2 mg/L"""'], {}), "('Risk of Point-of-Consumption FRC < 0.2 mg/L')\n", (96309, 96356), True, 'import matplotlib.pyplot as plt\n'), ((96373, 96538), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.999, 0.999)', 'shadow': '(False)', 'fontsize': '"""small"""', 'ncol': '(1)', 'labelspacing': '(0.1)', 'columnspacing': '(0.2)', 'handletextpad': '(0.1)', 'loc': '"""upper right"""'}), "(bbox_to_anchor=(0.999, 0.999), shadow=False, fontsize='small',\n ncol=1, labelspacing=0.1, columnspacing=0.2, handletextpad=0.1, loc=\n 'upper right')\n", (96383, 96538), True, 'import matplotlib.pyplot as plt\n'), ((96725, 96769), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.15)', 'right': '(0.95)'}), '(bottom=0.15, right=0.95)\n', (96744, 96769), True, 'import matplotlib.pyplot as plt\n'), ((97088, 97100), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (97098, 97100), False, 'import io\n'), ((97117, 97183), 'matplotlib.pyplot.savefig', 'plt.savefig', (['StringIOBytes_risk'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(StringIOBytes_risk, format='png', bbox_inches='tight')\n", (97128, 97183), True, 'import matplotlib.pyplot as plt\n'), ((97326, 97337), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (97335, 97337), True, 'import matplotlib.pyplot as plt\n'), ((105697, 105735), 'logging.info', 'logging.info', (['self.avg_case_results_am'], {}), '(self.avg_case_results_am)\n', (105709, 105735), False, 'import logging\n'), ((105752, 105792), 'logging.info', 'logging.info', (['self.worst_case_results_am'], {}), '(self.worst_case_results_am)\n', (105764, 105792), False, 'import logging\n'), ((105809, 105847), 'logging.info', 'logging.info', (['self.avg_case_results_pm'], {}), '(self.avg_case_results_pm)\n', (105821, 105847), False, 'import logging\n'), ((105864, 105904), 'logging.info', 'logging.info', (['self.worst_case_results_pm'], {}), '(self.worst_case_results_pm)\n', (105876, 105904), False, 'import logging\n'), ((106069, 106112), 'logging.info', 'logging.info', (['self.avg_case_results_am_post'], {}), '(self.avg_case_results_am_post)\n', (106081, 106112), False, 'import logging\n'), ((106129, 106174), 'logging.info', 'logging.info', (['self.worst_case_results_am_post'], {}), '(self.worst_case_results_am_post)\n', (106141, 106174), False, 'import logging\n'), ((106191, 106234), 'logging.info', 'logging.info', (['self.avg_case_results_pm_post'], {}), '(self.avg_case_results_pm_post)\n', (106203, 106234), False, 'import logging\n'), ((106251, 106296), 'logging.info', 'logging.info', (['self.worst_case_results_pm_post'], {}), '(self.worst_case_results_pm_post)\n', (106263, 106296), False, 'import logging\n'), ((106526, 106564), 'logging.info', 'logging.info', (['self.avg_case_results_am'], {}), '(self.avg_case_results_am)\n', (106538, 106564), False, 'import logging\n'), ((106581, 106619), 'logging.info', 'logging.info', (['self.avg_case_results_pm'], {}), '(self.avg_case_results_pm)\n', (106593, 106619), False, 'import logging\n'), ((106728, 106771), 'logging.info', 'logging.info', (['self.avg_case_results_am_post'], {}), '(self.avg_case_results_am_post)\n', (106740, 106771), False, 'import logging\n'), ((106788, 106831), 'logging.info', 'logging.info', (['self.avg_case_results_pm_post'], {}), '(self.avg_case_results_pm_post)\n', (106800, 106831), False, 'import logging\n'), ((113756, 113798), 'numpy.linspace', 'np.linspace', (['lo_limit', 'hi_limit', 'divisions'], {}), '(lo_limit, hi_limit, divisions)\n', (113767, 113798), True, 'import numpy as np\n'), ((117861, 117882), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(41)'], {}), '(0, 2, 41)\n', (117872, 117882), True, 'import numpy as np\n'), ((119601, 119622), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(41)'], {}), '(0, 2, 41)\n', (119612, 119622), True, 'import numpy as np\n'), ((121496, 121501), 'yattag.Doc', 'Doc', ([], {}), '()\n', (121499, 121501), False, 'from yattag import Doc\n'), ((137572, 137630), 'numpy.round', 'np.round', (["self.worst_case_results_am['median']"], {'decimals': '(3)'}), "(self.worst_case_results_am['median'], decimals=3)\n", (137580, 137630), True, 'import numpy as np\n'), ((137768, 137826), 'numpy.round', 'np.round', (["self.worst_case_results_pm['median']"], {'decimals': '(3)'}), "(self.worst_case_results_pm['median'], decimals=3)\n", (137776, 137826), True, 'import numpy as np\n'), ((137966, 138035), 'numpy.round', 'np.round', (["self.worst_case_results_am['probability<=0.20']"], {'decimals': '(3)'}), "(self.worst_case_results_am['probability<=0.20'], decimals=3)\n", (137974, 138035), True, 'import numpy as np\n'), ((138212, 138281), 'numpy.round', 'np.round', (["self.worst_case_results_pm['probability<=0.20']"], {'decimals': '(3)'}), "(self.worst_case_results_pm['probability<=0.20'], decimals=3)\n", (138220, 138281), True, 'import numpy as np\n'), ((138475, 138538), 'numpy.round', 'np.round', (["self.worst_case_results_am_post['median']"], {'decimals': '(3)'}), "(self.worst_case_results_am_post['median'], decimals=3)\n", (138483, 138538), True, 'import numpy as np\n'), ((138676, 138739), 'numpy.round', 'np.round', (["self.worst_case_results_pm_post['median']"], {'decimals': '(3)'}), "(self.worst_case_results_pm_post['median'], decimals=3)\n", (138684, 138739), True, 'import numpy as np\n'), ((138879, 138953), 'numpy.round', 'np.round', (["self.worst_case_results_am_post['probability<=0.20']"], {'decimals': '(3)'}), "(self.worst_case_results_am_post['probability<=0.20'], decimals=3)\n", (138887, 138953), True, 'import numpy as np\n'), ((139130, 139204), 'numpy.round', 'np.round', (["self.worst_case_results_pm_post['probability<=0.20']"], {'decimals': '(3)'}), "(self.worst_case_results_pm_post['probability<=0.20'], decimals=3)\n", (139138, 139204), True, 'import numpy as np\n'), ((139972, 139977), 'yattag.Doc', 'Doc', ([], {}), '()\n', (139975, 139977), False, 'from yattag import Doc\n'), ((5592, 5621), 'numpy.append', 'np.append', (['collection_time', '(1)'], {}), '(collection_time, 1)\n', (5601, 5621), True, 'import numpy as np\n'), ((5682, 5711), 'numpy.append', 'np.append', (['collection_time', '(0)'], {}), '(collection_time, 0)\n', (5691, 5711), True, 'import numpy as np\n'), ((5972, 6025), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['start', 'self.xl_dateformat'], {}), '(start, self.xl_dateformat)\n', (5998, 6025), False, 'import datetime\n'), ((6243, 6294), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['end', 'self.xl_dateformat'], {}), '(end, self.xl_dateformat)\n', (6269, 6294), False, 'import datetime\n'), ((11635, 11657), 'numpy.max', 'np.max', (['Y_pred'], {'axis': '(0)'}), '(Y_pred, axis=0)\n', (11641, 11657), True, 'import numpy as np\n'), ((11698, 11720), 'numpy.min', 'np.min', (['Y_pred'], {'axis': '(0)'}), '(Y_pred, axis=0)\n', (11704, 11720), True, 'import numpy as np\n'), ((11805, 11838), 'numpy.percentile', 'np.percentile', (['Y_pred', '(95)'], {'axis': '(0)'}), '(Y_pred, 95, axis=0)\n', (11818, 11838), True, 'import numpy as np\n'), ((11879, 11911), 'numpy.percentile', 'np.percentile', (['Y_pred', '(5)'], {'axis': '(0)'}), '(Y_pred, 5, axis=0)\n', (11892, 11911), True, 'import numpy as np\n'), ((11996, 12029), 'numpy.percentile', 'np.percentile', (['Y_pred', '(90)'], {'axis': '(0)'}), '(Y_pred, 90, axis=0)\n', (12009, 12029), True, 'import numpy as np\n'), ((12070, 12103), 'numpy.percentile', 'np.percentile', (['Y_pred', '(10)'], {'axis': '(0)'}), '(Y_pred, 10, axis=0)\n', (12083, 12103), True, 'import numpy as np\n'), ((12188, 12221), 'numpy.percentile', 'np.percentile', (['Y_pred', '(85)'], {'axis': '(0)'}), '(Y_pred, 85, axis=0)\n', (12201, 12221), True, 'import numpy as np\n'), ((12262, 12295), 'numpy.percentile', 'np.percentile', (['Y_pred', '(15)'], {'axis': '(0)'}), '(Y_pred, 15, axis=0)\n', (12275, 12295), True, 'import numpy as np\n'), ((12380, 12413), 'numpy.percentile', 'np.percentile', (['Y_pred', '(80)'], {'axis': '(0)'}), '(Y_pred, 80, axis=0)\n', (12393, 12413), True, 'import numpy as np\n'), ((12454, 12487), 'numpy.percentile', 'np.percentile', (['Y_pred', '(20)'], {'axis': '(0)'}), '(Y_pred, 20, axis=0)\n', (12467, 12487), True, 'import numpy as np\n'), ((12572, 12605), 'numpy.percentile', 'np.percentile', (['Y_pred', '(75)'], {'axis': '(0)'}), '(Y_pred, 75, axis=0)\n', (12585, 12605), True, 'import numpy as np\n'), ((12646, 12679), 'numpy.percentile', 'np.percentile', (['Y_pred', '(25)'], {'axis': '(0)'}), '(Y_pred, 25, axis=0)\n', (12659, 12679), True, 'import numpy as np\n'), ((12764, 12797), 'numpy.percentile', 'np.percentile', (['Y_pred', '(70)'], {'axis': '(0)'}), '(Y_pred, 70, axis=0)\n', (12777, 12797), True, 'import numpy as np\n'), ((12838, 12871), 'numpy.percentile', 'np.percentile', (['Y_pred', '(30)'], {'axis': '(0)'}), '(Y_pred, 30, axis=0)\n', (12851, 12871), True, 'import numpy as np\n'), ((12956, 12989), 'numpy.percentile', 'np.percentile', (['Y_pred', '(65)'], {'axis': '(0)'}), '(Y_pred, 65, axis=0)\n', (12969, 12989), True, 'import numpy as np\n'), ((13030, 13063), 'numpy.percentile', 'np.percentile', (['Y_pred', '(35)'], {'axis': '(0)'}), '(Y_pred, 35, axis=0)\n', (13043, 13063), True, 'import numpy as np\n'), ((13148, 13181), 'numpy.percentile', 'np.percentile', (['Y_pred', '(60)'], {'axis': '(0)'}), '(Y_pred, 60, axis=0)\n', (13161, 13181), True, 'import numpy as np\n'), ((13222, 13255), 'numpy.percentile', 'np.percentile', (['Y_pred', '(40)'], {'axis': '(0)'}), '(Y_pred, 40, axis=0)\n', (13235, 13255), True, 'import numpy as np\n'), ((13340, 13373), 'numpy.percentile', 'np.percentile', (['Y_pred', '(55)'], {'axis': '(0)'}), '(Y_pred, 55, axis=0)\n', (13353, 13373), True, 'import numpy as np\n'), ((13414, 13447), 'numpy.percentile', 'np.percentile', (['Y_pred', '(45)'], {'axis': '(0)'}), '(Y_pred, 45, axis=0)\n', (13427, 13447), True, 'import numpy as np\n'), ((21948, 21974), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (21964, 21974), False, 'import os\n'), ((24185, 24205), 'numpy.argmax', 'np.argmax', (['scipy_cdf'], {}), '(scipy_cdf)\n', (24194, 24205), True, 'import numpy as np\n'), ((34777, 34799), 'numpy.max', 'np.max', (['Y_pred'], {'axis': '(0)'}), '(Y_pred, axis=0)\n', (34783, 34799), True, 'import numpy as np\n'), ((34840, 34862), 'numpy.min', 'np.min', (['Y_pred'], {'axis': '(0)'}), '(Y_pred, axis=0)\n', (34846, 34862), True, 'import numpy as np\n'), ((34947, 34980), 'numpy.percentile', 'np.percentile', (['Y_pred', '(95)'], {'axis': '(0)'}), '(Y_pred, 95, axis=0)\n', (34960, 34980), True, 'import numpy as np\n'), ((35021, 35053), 'numpy.percentile', 'np.percentile', (['Y_pred', '(5)'], {'axis': '(0)'}), '(Y_pred, 5, axis=0)\n', (35034, 35053), True, 'import numpy as np\n'), ((35138, 35171), 'numpy.percentile', 'np.percentile', (['Y_pred', '(90)'], {'axis': '(0)'}), '(Y_pred, 90, axis=0)\n', (35151, 35171), True, 'import numpy as np\n'), ((35212, 35245), 'numpy.percentile', 'np.percentile', (['Y_pred', '(10)'], {'axis': '(0)'}), '(Y_pred, 10, axis=0)\n', (35225, 35245), True, 'import numpy as np\n'), ((35330, 35363), 'numpy.percentile', 'np.percentile', (['Y_pred', '(85)'], {'axis': '(0)'}), '(Y_pred, 85, axis=0)\n', (35343, 35363), True, 'import numpy as np\n'), ((35404, 35437), 'numpy.percentile', 'np.percentile', (['Y_pred', '(15)'], {'axis': '(0)'}), '(Y_pred, 15, axis=0)\n', (35417, 35437), True, 'import numpy as np\n'), ((35522, 35555), 'numpy.percentile', 'np.percentile', (['Y_pred', '(80)'], {'axis': '(0)'}), '(Y_pred, 80, axis=0)\n', (35535, 35555), True, 'import numpy as np\n'), ((35596, 35629), 'numpy.percentile', 'np.percentile', (['Y_pred', '(20)'], {'axis': '(0)'}), '(Y_pred, 20, axis=0)\n', (35609, 35629), True, 'import numpy as np\n'), ((35714, 35747), 'numpy.percentile', 'np.percentile', (['Y_pred', '(75)'], {'axis': '(0)'}), '(Y_pred, 75, axis=0)\n', (35727, 35747), True, 'import numpy as np\n'), ((35788, 35821), 'numpy.percentile', 'np.percentile', (['Y_pred', '(25)'], {'axis': '(0)'}), '(Y_pred, 25, axis=0)\n', (35801, 35821), True, 'import numpy as np\n'), ((35906, 35939), 'numpy.percentile', 'np.percentile', (['Y_pred', '(70)'], {'axis': '(0)'}), '(Y_pred, 70, axis=0)\n', (35919, 35939), True, 'import numpy as np\n'), ((35980, 36013), 'numpy.percentile', 'np.percentile', (['Y_pred', '(30)'], {'axis': '(0)'}), '(Y_pred, 30, axis=0)\n', (35993, 36013), True, 'import numpy as np\n'), ((36098, 36131), 'numpy.percentile', 'np.percentile', (['Y_pred', '(65)'], {'axis': '(0)'}), '(Y_pred, 65, axis=0)\n', (36111, 36131), True, 'import numpy as np\n'), ((36172, 36205), 'numpy.percentile', 'np.percentile', (['Y_pred', '(35)'], {'axis': '(0)'}), '(Y_pred, 35, axis=0)\n', (36185, 36205), True, 'import numpy as np\n'), ((36290, 36323), 'numpy.percentile', 'np.percentile', (['Y_pred', '(60)'], {'axis': '(0)'}), '(Y_pred, 60, axis=0)\n', (36303, 36323), True, 'import numpy as np\n'), ((36364, 36397), 'numpy.percentile', 'np.percentile', (['Y_pred', '(40)'], {'axis': '(0)'}), '(Y_pred, 40, axis=0)\n', (36377, 36397), True, 'import numpy as np\n'), ((36482, 36515), 'numpy.percentile', 'np.percentile', (['Y_pred', '(55)'], {'axis': '(0)'}), '(Y_pred, 55, axis=0)\n', (36495, 36515), True, 'import numpy as np\n'), ((36556, 36589), 'numpy.percentile', 'np.percentile', (['Y_pred', '(45)'], {'axis': '(0)'}), '(Y_pred, 45, axis=0)\n', (36569, 36589), True, 'import numpy as np\n'), ((46137, 46157), 'numpy.argmax', 'np.argmax', (['scipy_cdf'], {}), '(scipy_cdf)\n', (46146, 46157), True, 'import numpy as np\n'), ((50587, 50665), 'numpy.less_equal', 'np.less_equal', (['self.avg_case_results_am.iloc[:, 0:self.network_count - 1]', '(0.2)'], {}), '(self.avg_case_results_am.iloc[:, 0:self.network_count - 1], 0.2)\n', (50600, 50665), True, 'import numpy as np\n'), ((50798, 50877), 'numpy.less_equal', 'np.less_equal', (['self.avg_case_results_am.iloc[:, 0:self.network_count - 1]', '(0.25)'], {}), '(self.avg_case_results_am.iloc[:, 0:self.network_count - 1], 0.25)\n', (50811, 50877), True, 'import numpy as np\n'), ((51010, 51088), 'numpy.less_equal', 'np.less_equal', (['self.avg_case_results_am.iloc[:, 0:self.network_count - 1]', '(0.3)'], {}), '(self.avg_case_results_am.iloc[:, 0:self.network_count - 1], 0.3)\n', (51023, 51088), True, 'import numpy as np\n'), ((52660, 52732), 'numpy.less', 'np.less', (['self.avg_case_results_pm.iloc[:, 0:self.network_count - 1]', '(0.2)'], {}), '(self.avg_case_results_pm.iloc[:, 0:self.network_count - 1], 0.2)\n', (52667, 52732), True, 'import numpy as np\n'), ((53012, 53085), 'numpy.less', 'np.less', (['self.avg_case_results_pm.iloc[:, 0:self.network_count - 1]', '(0.25)'], {}), '(self.avg_case_results_pm.iloc[:, 0:self.network_count - 1], 0.25)\n', (53019, 53085), True, 'import numpy as np\n'), ((53365, 53437), 'numpy.less', 'np.less', (['self.avg_case_results_pm.iloc[:, 0:self.network_count - 1]', '(0.3)'], {}), '(self.avg_case_results_pm.iloc[:, 0:self.network_count - 1], 0.3)\n', (53372, 53437), True, 'import numpy as np\n'), ((60445, 60498), 'numpy.percentile', 'np.percentile', (['results_table_frc_avg_am', '(97.5)'], {'axis': '(1)'}), '(results_table_frc_avg_am, 97.5, axis=1)\n', (60458, 60498), True, 'import numpy as np\n'), ((60520, 60572), 'numpy.percentile', 'np.percentile', (['results_table_frc_avg_am', '(2.5)'], {'axis': '(1)'}), '(results_table_frc_avg_am, 2.5, axis=1)\n', (60533, 60572), True, 'import numpy as np\n'), ((60878, 60918), 'numpy.min', 'np.min', (['results_table_frc_avg_am'], {'axis': '(1)'}), '(results_table_frc_avg_am, axis=1)\n', (60884, 60918), True, 'import numpy as np\n'), ((61126, 61166), 'numpy.max', 'np.max', (['results_table_frc_avg_am'], {'axis': '(1)'}), '(results_table_frc_avg_am, axis=1)\n', (61132, 61166), True, 'import numpy as np\n'), ((61329, 61372), 'numpy.median', 'np.median', (['results_table_frc_avg_am'], {'axis': '(1)'}), '(results_table_frc_avg_am, axis=1)\n', (61338, 61372), True, 'import numpy as np\n'), ((62049, 62073), 'numpy.arange', 'np.arange', (['(0.2)', '(2.2)', '(0.2)'], {}), '(0.2, 2.2, 0.2)\n', (62058, 62073), True, 'import numpy as np\n'), ((62333, 62386), 'numpy.percentile', 'np.percentile', (['results_table_frc_avg_pm', '(97.5)'], {'axis': '(1)'}), '(results_table_frc_avg_pm, 97.5, axis=1)\n', (62346, 62386), True, 'import numpy as np\n'), ((62408, 62460), 'numpy.percentile', 'np.percentile', (['results_table_frc_avg_pm', '(2.5)'], {'axis': '(1)'}), '(results_table_frc_avg_pm, 2.5, axis=1)\n', (62421, 62460), True, 'import numpy as np\n'), ((62766, 62806), 'numpy.min', 'np.min', (['results_table_frc_avg_pm'], {'axis': '(1)'}), '(results_table_frc_avg_pm, axis=1)\n', (62772, 62806), True, 'import numpy as np\n'), ((63014, 63054), 'numpy.max', 'np.max', (['results_table_frc_avg_pm'], {'axis': '(1)'}), '(results_table_frc_avg_pm, axis=1)\n', (63020, 63054), True, 'import numpy as np\n'), ((63217, 63260), 'numpy.median', 'np.median', (['results_table_frc_avg_pm'], {'axis': '(1)'}), '(results_table_frc_avg_pm, axis=1)\n', (63226, 63260), True, 'import numpy as np\n'), ((63937, 63961), 'numpy.arange', 'np.arange', (['(0.2)', '(2.2)', '(0.2)'], {}), '(0.2, 2.2, 0.2)\n', (63946, 63961), True, 'import numpy as np\n'), ((64221, 64276), 'numpy.percentile', 'np.percentile', (['results_table_frc_worst_am', '(97.5)'], {'axis': '(1)'}), '(results_table_frc_worst_am, 97.5, axis=1)\n', (64234, 64276), True, 'import numpy as np\n'), ((64298, 64352), 'numpy.percentile', 'np.percentile', (['results_table_frc_worst_am', '(2.5)'], {'axis': '(1)'}), '(results_table_frc_worst_am, 2.5, axis=1)\n', (64311, 64352), True, 'import numpy as np\n'), ((64658, 64700), 'numpy.min', 'np.min', (['results_table_frc_worst_am'], {'axis': '(1)'}), '(results_table_frc_worst_am, axis=1)\n', (64664, 64700), True, 'import numpy as np\n'), ((64908, 64950), 'numpy.max', 'np.max', (['results_table_frc_worst_am'], {'axis': '(1)'}), '(results_table_frc_worst_am, axis=1)\n', (64914, 64950), True, 'import numpy as np\n'), ((65113, 65158), 'numpy.median', 'np.median', (['results_table_frc_worst_am'], {'axis': '(1)'}), '(results_table_frc_worst_am, axis=1)\n', (65122, 65158), True, 'import numpy as np\n'), ((65836, 65860), 'numpy.arange', 'np.arange', (['(0.2)', '(2.2)', '(0.2)'], {}), '(0.2, 2.2, 0.2)\n', (65845, 65860), True, 'import numpy as np\n'), ((66118, 66173), 'numpy.percentile', 'np.percentile', (['results_table_frc_worst_pm', '(97.5)'], {'axis': '(1)'}), '(results_table_frc_worst_pm, 97.5, axis=1)\n', (66131, 66173), True, 'import numpy as np\n'), ((66195, 66249), 'numpy.percentile', 'np.percentile', (['results_table_frc_worst_pm', '(2.5)'], {'axis': '(1)'}), '(results_table_frc_worst_pm, 2.5, axis=1)\n', (66208, 66249), True, 'import numpy as np\n'), ((66555, 66597), 'numpy.min', 'np.min', (['results_table_frc_worst_pm'], {'axis': '(1)'}), '(results_table_frc_worst_pm, axis=1)\n', (66561, 66597), True, 'import numpy as np\n'), ((66805, 66847), 'numpy.max', 'np.max', (['results_table_frc_worst_pm'], {'axis': '(1)'}), '(results_table_frc_worst_pm, axis=1)\n', (66811, 66847), True, 'import numpy as np\n'), ((67010, 67055), 'numpy.median', 'np.median', (['results_table_frc_worst_pm'], {'axis': '(1)'}), '(results_table_frc_worst_pm, axis=1)\n', (67019, 67055), True, 'import numpy as np\n'), ((67732, 67756), 'numpy.arange', 'np.arange', (['(0.2)', '(2.2)', '(0.2)'], {}), '(0.2, 2.2, 0.2)\n', (67741, 67756), True, 'import numpy as np\n'), ((71238, 71287), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(6.69, 6.69)', 'dpi': '(300)'}), '(2, 2, figsize=(6.69, 6.69), dpi=300)\n', (71250, 71287), True, 'import matplotlib.pyplot as plt\n'), ((78897, 78929), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.25)'}), '(wspace=0.25)\n', (78916, 78929), True, 'import matplotlib.pyplot as plt\n'), ((79166, 79178), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (79176, 79178), False, 'import io\n'), ((79195, 79262), 'matplotlib.pyplot.savefig', 'plt.savefig', (['StringIOBytes_preds'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(StringIOBytes_preds, format='png', bbox_inches='tight')\n", (79206, 79262), True, 'import matplotlib.pyplot as plt\n'), ((79498, 79509), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (79507, 79509), True, 'import matplotlib.pyplot as plt\n'), ((79538, 79579), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.69, 3.35)', 'dpi': '(300)'}), '(figsize=(6.69, 3.35), dpi=300)\n', (79548, 79579), True, 'import matplotlib.pyplot as plt\n'), ((79596, 79763), 'matplotlib.pyplot.plot', 'plt.plot', (['test1_frc', "self.avg_case_results_am_post['probability<=0.20']"], {'c': '"""#ffa600"""', 'label': '"""Risk of Household FRC < 0.20 mg/L - Average Case, AM Collection"""'}), "(test1_frc, self.avg_case_results_am_post['probability<=0.20'], c=\n '#ffa600', label=\n 'Risk of Household FRC < 0.20 mg/L - Average Case, AM Collection')\n", (79604, 79763), True, 'import matplotlib.pyplot as plt\n'), ((79869, 80045), 'matplotlib.pyplot.plot', 'plt.plot', (['test1_frc', "self.avg_case_results_pm_post['probability<=0.20']"], {'c': '"""#ffa600"""', 'ls': '"""--"""', 'label': '"""Risk of Household FRC < 0.20 mg/L - Average Case, PM Collection"""'}), "(test1_frc, self.avg_case_results_pm_post['probability<=0.20'], c=\n '#ffa600', ls='--', label=\n 'Risk of Household FRC < 0.20 mg/L - Average Case, PM Collection')\n", (79877, 80045), True, 'import matplotlib.pyplot as plt\n'), ((80171, 80338), 'matplotlib.pyplot.plot', 'plt.plot', (['test1_frc', "self.worst_case_results_am_post['probability<=0.20']"], {'c': '"""#b80000"""', 'label': '"""Risk of Household FRC < 0.20 mg/L - Worst Case, AM Collection"""'}), "(test1_frc, self.worst_case_results_am_post['probability<=0.20'], c\n ='#b80000', label=\n 'Risk of Household FRC < 0.20 mg/L - Worst Case, AM Collection')\n", (80179, 80338), True, 'import matplotlib.pyplot as plt\n'), ((80444, 80620), 'matplotlib.pyplot.plot', 'plt.plot', (['test1_frc', "self.worst_case_results_pm_post['probability<=0.20']"], {'c': '"""#b80000"""', 'ls': '"""--"""', 'label': '"""Risk of Household FRC < 0.20 mg/L - Worst Case, PM Collection"""'}), "(test1_frc, self.worst_case_results_pm_post['probability<=0.20'], c\n ='#b80000', ls='--', label=\n 'Risk of Household FRC < 0.20 mg/L - Worst Case, PM Collection')\n", (80452, 80620), True, 'import matplotlib.pyplot as plt\n'), ((80746, 80764), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.2, 2]'], {}), '([0.2, 2])\n', (80754, 80764), True, 'import matplotlib.pyplot as plt\n'), ((80781, 80814), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tapstand FRC (mg/L)"""'], {}), "('Tapstand FRC (mg/L)')\n", (80791, 80814), True, 'import matplotlib.pyplot as plt\n'), ((80831, 80847), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (80839, 80847), True, 'import matplotlib.pyplot as plt\n'), ((80864, 80921), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Risk of Point-of-Consumption FRC < 0.2 mg/L"""'], {}), "('Risk of Point-of-Consumption FRC < 0.2 mg/L')\n", (80874, 80921), True, 'import matplotlib.pyplot as plt\n'), ((80938, 81103), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.999, 0.999)', 'shadow': '(False)', 'fontsize': '"""small"""', 'ncol': '(1)', 'labelspacing': '(0.1)', 'columnspacing': '(0.2)', 'handletextpad': '(0.1)', 'loc': '"""upper right"""'}), "(bbox_to_anchor=(0.999, 0.999), shadow=False, fontsize='small',\n ncol=1, labelspacing=0.1, columnspacing=0.2, handletextpad=0.1, loc=\n 'upper right')\n", (80948, 81103), True, 'import matplotlib.pyplot as plt\n'), ((81503, 81515), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (81513, 81515), False, 'import io\n'), ((81532, 81598), 'matplotlib.pyplot.savefig', 'plt.savefig', (['StringIOBytes_risk'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(StringIOBytes_risk, format='png', bbox_inches='tight')\n", (81543, 81598), True, 'import matplotlib.pyplot as plt\n'), ((81831, 81842), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (81840, 81842), True, 'import matplotlib.pyplot as plt\n'), ((85522, 85571), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(6)', '(1)'], {'figsize': '(3.35, 6.69)', 'dpi': '(300)'}), '(6, 1, figsize=(3.35, 6.69), dpi=300)\n', (85534, 85571), True, 'import matplotlib.pyplot as plt\n'), ((87280, 87358), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.18)', 'hspace': '(0.6)', 'top': '(0.99)', 'bottom': '(0.075)', 'right': '(0.98)'}), '(left=0.18, hspace=0.6, top=0.99, bottom=0.075, right=0.98)\n', (87299, 87358), True, 'import matplotlib.pyplot as plt\n'), ((87702, 87713), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (87711, 87713), True, 'import matplotlib.pyplot as plt\n'), ((87757, 87769), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (87767, 87769), False, 'import io\n'), ((87786, 87857), 'matplotlib.pyplot.savefig', 'plt.savefig', (['StringIOBytes_histogram'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(StringIOBytes_histogram, format='png', bbox_inches='tight')\n", (87797, 87857), True, 'import matplotlib.pyplot as plt\n'), ((91137, 91190), 'numpy.percentile', 'np.percentile', (['results_table_frc_avg_am', '(97.5)'], {'axis': '(1)'}), '(results_table_frc_avg_am, 97.5, axis=1)\n', (91150, 91190), True, 'import numpy as np\n'), ((91212, 91264), 'numpy.percentile', 'np.percentile', (['results_table_frc_avg_am', '(2.5)'], {'axis': '(1)'}), '(results_table_frc_avg_am, 2.5, axis=1)\n', (91225, 91264), True, 'import numpy as np\n'), ((91570, 91610), 'numpy.min', 'np.min', (['results_table_frc_avg_am'], {'axis': '(1)'}), '(results_table_frc_avg_am, axis=1)\n', (91576, 91610), True, 'import numpy as np\n'), ((91818, 91858), 'numpy.max', 'np.max', (['results_table_frc_avg_am'], {'axis': '(1)'}), '(results_table_frc_avg_am, axis=1)\n', (91824, 91858), True, 'import numpy as np\n'), ((92021, 92064), 'numpy.median', 'np.median', (['results_table_frc_avg_am'], {'axis': '(1)'}), '(results_table_frc_avg_am, axis=1)\n', (92030, 92064), True, 'import numpy as np\n'), ((92741, 92765), 'numpy.arange', 'np.arange', (['(0.2)', '(2.2)', '(0.2)'], {}), '(0.2, 2.2, 0.2)\n', (92750, 92765), True, 'import numpy as np\n'), ((93010, 93063), 'numpy.percentile', 'np.percentile', (['results_table_frc_avg_pm', '(97.5)'], {'axis': '(1)'}), '(results_table_frc_avg_pm, 97.5, axis=1)\n', (93023, 93063), True, 'import numpy as np\n'), ((93085, 93137), 'numpy.percentile', 'np.percentile', (['results_table_frc_avg_pm', '(2.5)'], {'axis': '(1)'}), '(results_table_frc_avg_pm, 2.5, axis=1)\n', (93098, 93137), True, 'import numpy as np\n'), ((93443, 93483), 'numpy.min', 'np.min', (['results_table_frc_avg_pm'], {'axis': '(1)'}), '(results_table_frc_avg_pm, axis=1)\n', (93449, 93483), True, 'import numpy as np\n'), ((93691, 93731), 'numpy.max', 'np.max', (['results_table_frc_avg_pm'], {'axis': '(1)'}), '(results_table_frc_avg_pm, axis=1)\n', (93697, 93731), True, 'import numpy as np\n'), ((93894, 93937), 'numpy.median', 'np.median', (['results_table_frc_avg_pm'], {'axis': '(1)'}), '(results_table_frc_avg_pm, axis=1)\n', (93903, 93937), True, 'import numpy as np\n'), ((94614, 94638), 'numpy.arange', 'np.arange', (['(0.2)', '(2.2)', '(0.2)'], {}), '(0.2, 2.2, 0.2)\n', (94623, 94638), True, 'import numpy as np\n'), ((97429, 97478), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(6.69, 3.35)', 'dpi': '(300)'}), '(1, 2, figsize=(6.69, 3.35), dpi=300)\n', (97441, 97478), True, 'import matplotlib.pyplot as plt\n'), ((101273, 101305), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.25)'}), '(wspace=0.25)\n', (101292, 101305), True, 'import matplotlib.pyplot as plt\n'), ((101322, 101340), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (101338, 101340), True, 'import matplotlib.pyplot as plt\n'), ((101667, 101679), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (101677, 101679), False, 'import io\n'), ((101696, 101763), 'matplotlib.pyplot.savefig', 'plt.savefig', (['StringIOBytes_preds'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(StringIOBytes_preds, format='png', bbox_inches='tight')\n", (101707, 101763), True, 'import matplotlib.pyplot as plt\n'), ((101910, 101921), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (101919, 101921), True, 'import matplotlib.pyplot as plt\n'), ((101950, 101991), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.69, 3.35)', 'dpi': '(300)'}), '(figsize=(6.69, 3.35), dpi=300)\n', (101960, 101991), True, 'import matplotlib.pyplot as plt\n'), ((102008, 102175), 'matplotlib.pyplot.plot', 'plt.plot', (['test1_frc', "self.avg_case_results_am_post['probability<=0.20']"], {'c': '"""#ffa600"""', 'label': '"""Risk of Household FRC < 0.20 mg/L - Average Case, AM Collection"""'}), "(test1_frc, self.avg_case_results_am_post['probability<=0.20'], c=\n '#ffa600', label=\n 'Risk of Household FRC < 0.20 mg/L - Average Case, AM Collection')\n", (102016, 102175), True, 'import matplotlib.pyplot as plt\n'), ((102281, 102457), 'matplotlib.pyplot.plot', 'plt.plot', (['test1_frc', "self.avg_case_results_pm_post['probability<=0.20']"], {'c': '"""#ffa600"""', 'ls': '"""--"""', 'label': '"""Risk of Household FRC < 0.20 mg/L - Average Case, PM Collection"""'}), "(test1_frc, self.avg_case_results_pm_post['probability<=0.20'], c=\n '#ffa600', ls='--', label=\n 'Risk of Household FRC < 0.20 mg/L - Average Case, PM Collection')\n", (102289, 102457), True, 'import matplotlib.pyplot as plt\n'), ((102583, 102601), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.2, 2]'], {}), '([0.2, 2])\n', (102591, 102601), True, 'import matplotlib.pyplot as plt\n'), ((102618, 102651), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tapstand FRC (mg/L)"""'], {}), "('Tapstand FRC (mg/L)')\n", (102628, 102651), True, 'import matplotlib.pyplot as plt\n'), ((102668, 102684), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (102676, 102684), True, 'import matplotlib.pyplot as plt\n'), ((102701, 102758), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Risk of Point-of-Consumption FRC < 0.2 mg/L"""'], {}), "('Risk of Point-of-Consumption FRC < 0.2 mg/L')\n", (102711, 102758), True, 'import matplotlib.pyplot as plt\n'), ((102775, 102940), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.999, 0.999)', 'shadow': '(False)', 'fontsize': '"""small"""', 'ncol': '(1)', 'labelspacing': '(0.1)', 'columnspacing': '(0.2)', 'handletextpad': '(0.1)', 'loc': '"""upper right"""'}), "(bbox_to_anchor=(0.999, 0.999), shadow=False, fontsize='small',\n ncol=1, labelspacing=0.1, columnspacing=0.2, handletextpad=0.1, loc=\n 'upper right')\n", (102785, 102940), True, 'import matplotlib.pyplot as plt\n'), ((103430, 103442), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (103440, 103442), False, 'import io\n'), ((103459, 103525), 'matplotlib.pyplot.savefig', 'plt.savefig', (['StringIOBytes_risk'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(StringIOBytes_risk, format='png', bbox_inches='tight')\n", (103470, 103525), True, 'import matplotlib.pyplot as plt\n'), ((103668, 103679), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (103677, 103679), True, 'import matplotlib.pyplot as plt\n'), ((107017, 107043), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (107033, 107043), False, 'import os\n'), ((107148, 107174), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (107164, 107174), False, 'import os\n'), ((118590, 118616), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (118606, 118616), False, 'import os\n'), ((120268, 120294), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (120284, 120294), False, 'import os\n'), ((121804, 121841), 'os.path.basename', 'os.path.basename', (['self.input_filename'], {}), '(self.input_filename)\n', (121820, 121841), False, 'import os\n'), ((141727, 141764), 'xlrd.xldate.xldate_as_datetime', 'xldate_as_datetime', (['start'], {'datemode': '(0)'}), '(start, datemode=0)\n', (141745, 141764), False, 'from xlrd.xldate import xldate_as_datetime\n'), ((6100, 6129), 'numpy.append', 'np.append', (['collection_time', '(1)'], {}), '(collection_time, 1)\n', (6109, 6129), True, 'import numpy as np\n'), ((6190, 6219), 'numpy.append', 'np.append', (['collection_time', '(0)'], {}), '(collection_time, 0)\n', (6199, 6219), True, 'import numpy as np\n'), ((22727, 22744), 'numpy.mean', 'np.mean', (['forecast'], {}), '(forecast)\n', (22734, 22744), True, 'import numpy as np\n'), ((24302, 24326), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.05)'], {}), '(scipy_cdf - 0.05)\n', (24308, 24326), True, 'import numpy as np\n'), ((24438, 24462), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.95)'], {}), '(scipy_cdf - 0.95)\n', (24444, 24462), True, 'import numpy as np\n'), ((24574, 24597), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.1)'], {}), '(scipy_cdf - 0.1)\n', (24580, 24597), True, 'import numpy as np\n'), ((24709, 24732), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.9)'], {}), '(scipy_cdf - 0.9)\n', (24715, 24732), True, 'import numpy as np\n'), ((24844, 24868), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.15)'], {}), '(scipy_cdf - 0.15)\n', (24850, 24868), True, 'import numpy as np\n'), ((24980, 25004), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.85)'], {}), '(scipy_cdf - 0.85)\n', (24986, 25004), True, 'import numpy as np\n'), ((25116, 25139), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.2)'], {}), '(scipy_cdf - 0.2)\n', (25122, 25139), True, 'import numpy as np\n'), ((25251, 25274), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.8)'], {}), '(scipy_cdf - 0.8)\n', (25257, 25274), True, 'import numpy as np\n'), ((25386, 25410), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.25)'], {}), '(scipy_cdf - 0.25)\n', (25392, 25410), True, 'import numpy as np\n'), ((25522, 25546), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.75)'], {}), '(scipy_cdf - 0.75)\n', (25528, 25546), True, 'import numpy as np\n'), ((25658, 25681), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.3)'], {}), '(scipy_cdf - 0.3)\n', (25664, 25681), True, 'import numpy as np\n'), ((25793, 25816), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.7)'], {}), '(scipy_cdf - 0.7)\n', (25799, 25816), True, 'import numpy as np\n'), ((25928, 25952), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.35)'], {}), '(scipy_cdf - 0.35)\n', (25934, 25952), True, 'import numpy as np\n'), ((26064, 26088), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.65)'], {}), '(scipy_cdf - 0.65)\n', (26070, 26088), True, 'import numpy as np\n'), ((26200, 26223), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.4)'], {}), '(scipy_cdf - 0.4)\n', (26206, 26223), True, 'import numpy as np\n'), ((26335, 26358), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.6)'], {}), '(scipy_cdf - 0.6)\n', (26341, 26358), True, 'import numpy as np\n'), ((26470, 26494), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.45)'], {}), '(scipy_cdf - 0.45)\n', (26476, 26494), True, 'import numpy as np\n'), ((26606, 26630), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.55)'], {}), '(scipy_cdf - 0.55)\n', (26612, 26630), True, 'import numpy as np\n'), ((26738, 26761), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.5)'], {}), '(scipy_cdf - 0.5)\n', (26744, 26761), True, 'import numpy as np\n'), ((46272, 46297), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.995)'], {}), '(scipy_cdf - 0.995)\n', (46278, 46297), True, 'import numpy as np\n'), ((46415, 46440), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.005)'], {}), '(scipy_cdf - 0.005)\n', (46421, 46440), True, 'import numpy as np\n'), ((46558, 46583), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.975)'], {}), '(scipy_cdf - 0.975)\n', (46564, 46583), True, 'import numpy as np\n'), ((46701, 46726), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.025)'], {}), '(scipy_cdf - 0.025)\n', (46707, 46726), True, 'import numpy as np\n'), ((46853, 46876), 'numpy.abs', 'np.abs', (['(scipy_cdf - 0.5)'], {}), '(scipy_cdf - 0.5)\n', (46859, 46876), True, 'import numpy as np\n'), ((55132, 55206), 'numpy.less', 'np.less', (['self.worst_case_results_am.iloc[:, 0:self.network_count - 1]', '(0.2)'], {}), '(self.worst_case_results_am.iloc[:, 0:self.network_count - 1], 0.2)\n', (55139, 55206), True, 'import numpy as np\n'), ((55590, 55665), 'numpy.less', 'np.less', (['self.worst_case_results_am.iloc[:, 0:self.network_count - 1]', '(0.25)'], {}), '(self.worst_case_results_am.iloc[:, 0:self.network_count - 1], 0.25)\n', (55597, 55665), True, 'import numpy as np\n'), ((56049, 56123), 'numpy.less', 'np.less', (['self.worst_case_results_am.iloc[:, 0:self.network_count - 1]', '(0.3)'], {}), '(self.worst_case_results_am.iloc[:, 0:self.network_count - 1], 0.3)\n', (56056, 56123), True, 'import numpy as np\n'), ((57841, 57915), 'numpy.less', 'np.less', (['self.worst_case_results_pm.iloc[:, 0:self.network_count - 1]', '(0.2)'], {}), '(self.worst_case_results_pm.iloc[:, 0:self.network_count - 1], 0.2)\n', (57848, 57915), True, 'import numpy as np\n'), ((58299, 58374), 'numpy.less', 'np.less', (['self.worst_case_results_pm.iloc[:, 0:self.network_count - 1]', '(0.25)'], {}), '(self.worst_case_results_pm.iloc[:, 0:self.network_count - 1], 0.25)\n', (58306, 58374), True, 'import numpy as np\n'), ((58758, 58832), 'numpy.less', 'np.less', (['self.worst_case_results_pm.iloc[:, 0:self.network_count - 1]', '(0.3)'], {}), '(self.worst_case_results_pm.iloc[:, 0:self.network_count - 1], 0.3)\n', (58765, 58832), True, 'import numpy as np\n'), ((73012, 73036), 'numpy.arange', 'np.arange', (['(0.2)', '(2.2)', '(0.2)'], {}), '(0.2, 2.2, 0.2)\n', (73021, 73036), True, 'import numpy as np\n'), ((74897, 74921), 'numpy.arange', 'np.arange', (['(0.2)', '(2.2)', '(0.2)'], {}), '(0.2, 2.2, 0.2)\n', (74906, 74921), True, 'import numpy as np\n'), ((76792, 76816), 'numpy.arange', 'np.arange', (['(0.2)', '(2.2)', '(0.2)'], {}), '(0.2, 2.2, 0.2)\n', (76801, 76816), True, 'import numpy as np\n'), ((78685, 78709), 'numpy.arange', 'np.arange', (['(0.2)', '(2.2)', '(0.2)'], {}), '(0.2, 2.2, 0.2)\n', (78694, 78709), True, 'import numpy as np\n'), ((88099, 88148), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(6)', '(1)'], {'figsize': '(3.35, 6.69)', 'dpi': '(300)'}), '(6, 1, figsize=(3.35, 6.69), dpi=300)\n', (88111, 88148), True, 'import matplotlib.pyplot as plt\n'), ((89857, 89935), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.18)', 'hspace': '(0.6)', 'top': '(0.99)', 'bottom': '(0.075)', 'right': '(0.98)'}), '(left=0.18, hspace=0.6, top=0.99, bottom=0.075, right=0.98)\n', (89876, 89935), True, 'import matplotlib.pyplot as plt\n'), ((90279, 90290), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (90288, 90290), True, 'import matplotlib.pyplot as plt\n'), ((90334, 90346), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (90344, 90346), False, 'import io\n'), ((90363, 90434), 'matplotlib.pyplot.savefig', 'plt.savefig', (['StringIOBytes_histogram'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(StringIOBytes_histogram, format='png', bbox_inches='tight')\n", (90374, 90434), True, 'import matplotlib.pyplot as plt\n'), ((99203, 99227), 'numpy.arange', 'np.arange', (['(0.2)', '(2.2)', '(0.2)'], {}), '(0.2, 2.2, 0.2)\n', (99212, 99227), True, 'import numpy as np\n'), ((101073, 101097), 'numpy.arange', 'np.arange', (['(0.2)', '(2.2)', '(0.2)'], {}), '(0.2, 2.2, 0.2)\n', (101082, 101097), True, 'import numpy as np\n'), ((104623, 104649), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (104639, 104649), False, 'import os\n'), ((107372, 107398), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (107388, 107398), False, 'import os\n'), ((107515, 107541), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (107531, 107541), False, 'import os\n'), ((107706, 107732), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (107722, 107732), False, 'import os\n'), ((107854, 107880), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (107870, 107880), False, 'import os\n'), ((24085, 24109), 'numpy.where', 'np.where', (['(scipy_cdf == 0)'], {}), '(scipy_cdf == 0)\n', (24093, 24109), True, 'import numpy as np\n'), ((46050, 46074), 'numpy.where', 'np.where', (['(scipy_cdf == 0)'], {}), '(scipy_cdf == 0)\n', (46058, 46074), True, 'import numpy as np\n'), ((68026, 68052), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (68042, 68052), False, 'import os\n'), ((68711, 68749), 'numpy.less', 'np.less', (['results_table_frc_avg_am', '(0.2)'], {}), '(results_table_frc_avg_am, 0.2)\n', (68718, 68749), True, 'import numpy as np\n'), ((69030, 69068), 'numpy.less', 'np.less', (['results_table_frc_avg_pm', '(0.2)'], {}), '(results_table_frc_avg_pm, 0.2)\n', (69037, 69068), True, 'import numpy as np\n'), ((69378, 69418), 'numpy.less', 'np.less', (['results_table_frc_worst_am', '(0.2)'], {}), '(results_table_frc_worst_am, 0.2)\n', (69385, 69418), True, 'import numpy as np\n'), ((69696, 69736), 'numpy.less', 'np.less', (['results_table_frc_worst_pm', '(0.2)'], {}), '(results_table_frc_worst_pm, 0.2)\n', (69703, 69736), True, 'import numpy as np\n'), ((70613, 70639), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (70629, 70639), False, 'import os\n'), ((84868, 84894), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (84884, 84894), False, 'import os\n'), ((94896, 94922), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (94912, 94922), False, 'import os\n'), ((95581, 95619), 'numpy.less', 'np.less', (['results_table_frc_avg_am', '(0.2)'], {}), '(results_table_frc_avg_am, 0.2)\n', (95588, 95619), True, 'import numpy as np\n'), ((95900, 95938), 'numpy.less', 'np.less', (['results_table_frc_avg_pm', '(0.2)'], {}), '(results_table_frc_avg_pm, 0.2)\n', (95907, 95938), True, 'import numpy as np\n'), ((96819, 96845), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (96835, 96845), False, 'import os\n'), ((108098, 108124), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (108114, 108124), False, 'import os\n'), ((108258, 108284), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (108274, 108284), False, 'import os\n'), ((78979, 79005), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (78995, 79005), False, 'import os\n'), ((81324, 81350), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (81340, 81350), False, 'import os\n'), ((87448, 87474), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (87464, 87474), False, 'import os\n'), ((101390, 101416), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (101406, 101416), False, 'import os\n'), ((103161, 103187), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (103177, 103187), False, 'import os\n'), ((132995, 133021), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (133011, 133021), False, 'import os\n'), ((90025, 90051), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (90041, 90051), False, 'import os\n'), ((123906, 123932), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (123922, 123932), False, 'import os\n'), ((125514, 125540), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (125530, 125540), False, 'import os\n'), ((126832, 126858), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (126848, 126858), False, 'import os\n'), ((128338, 128364), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (128354, 128364), False, 'import os\n'), ((129550, 129576), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (129566, 129576), False, 'import os\n'), ((130544, 130570), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (130560, 130570), False, 'import os\n')] |
import pylab
import numpy
class GeneralRandom:
"""This class enables us to generate random numbers with an arbitrary
distribution."""
def __init__(self, x = pylab.arange(-1.0, 1.0, .01), p = None, Nrl = 1000):
"""Initialize the lookup table (with default values if necessary)
Inputs:
x = random number values
p = probability density profile at that point
Nrl = number of reverse look up values between 0 and 1"""
if p == None:
p = pylab.exp(-10*x**2.0)
self.set_pdf(x, p, Nrl)
def set_pdf(self, x, p, Nrl = 1000):
"""Generate the lookup tables.
x is the value of the random variate
pdf is its probability density
cdf is the cumulative pdf
inversecdf is the inverse look up table
"""
self.x = x
self.pdf = p/p.sum() #normalize it
self.cdf = self.pdf.cumsum()
self.inversecdfbins = Nrl
self.Nrl = Nrl
y = pylab.arange(Nrl)/float(Nrl)
delta = 1.0/Nrl
self.inversecdf = pylab.zeros(Nrl)
self.inversecdf[0] = self.x[0]
cdf_idx = 0
for n in xrange(1,self.inversecdfbins):
while self.cdf[cdf_idx] < y[n] and cdf_idx < Nrl:
cdf_idx += 1
self.inversecdf[n] = self.x[cdf_idx-1] + (self.x[cdf_idx] - self.x[cdf_idx-1]) * (y[n] - self.cdf[cdf_idx-1])/(self.cdf[cdf_idx] - self.cdf[cdf_idx-1])
if cdf_idx >= Nrl:
break
self.delta_inversecdf = pylab.concatenate((pylab.diff(self.inversecdf), [0]))
def random(self, N = 1000):
"""Give us N random numbers with the requested distribution"""
idx_f = numpy.random.uniform(size = N, high = self.Nrl-1)
idx = pylab.array([idx_f],'i')
y = self.inversecdf[idx] + (idx_f - idx)*self.delta_inversecdf[idx]
return y
def plot_pdf(self):
pylab.plot(self.x, self.pdf)
def self_test(self, N = 1000):
pylab.figure()
#The cdf
pylab.subplot(2,2,1)
pylab.plot(self.x, self.cdf)
#The inverse cdf
pylab.subplot(2,2,2)
y = pylab.arange(self.Nrl)/float(self.Nrl)
pylab.plot(y, self.inversecdf)
#The actual generated numbers
pylab.subplot(2,2,3)
y = self.random(N)
p1, edges = pylab.histogram(y, bins = 50,
range = (self.x.min(), self.x.max()),
normed = True, new = True)
x1 = 0.5*(edges[0:-1] + edges[1:])
pylab.plot(x1, p1/p1.max())
pylab.plot(self.x, self.pdf/self.pdf.max())
| [
"pylab.zeros",
"pylab.subplot",
"pylab.arange",
"pylab.plot",
"pylab.array",
"pylab.figure",
"pylab.diff",
"numpy.random.uniform",
"pylab.exp"
] | [((168, 197), 'pylab.arange', 'pylab.arange', (['(-1.0)', '(1.0)', '(0.01)'], {}), '(-1.0, 1.0, 0.01)\n', (180, 197), False, 'import pylab\n'), ((987, 1003), 'pylab.zeros', 'pylab.zeros', (['Nrl'], {}), '(Nrl)\n', (998, 1003), False, 'import pylab\n'), ((1585, 1632), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': 'N', 'high': '(self.Nrl - 1)'}), '(size=N, high=self.Nrl - 1)\n', (1605, 1632), False, 'import numpy\n'), ((1645, 1670), 'pylab.array', 'pylab.array', (['[idx_f]', '"""i"""'], {}), "([idx_f], 'i')\n", (1656, 1670), False, 'import pylab\n'), ((1785, 1813), 'pylab.plot', 'pylab.plot', (['self.x', 'self.pdf'], {}), '(self.x, self.pdf)\n', (1795, 1813), False, 'import pylab\n'), ((1856, 1870), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (1868, 1870), False, 'import pylab\n'), ((1888, 1910), 'pylab.subplot', 'pylab.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (1901, 1910), False, 'import pylab\n'), ((1913, 1941), 'pylab.plot', 'pylab.plot', (['self.x', 'self.cdf'], {}), '(self.x, self.cdf)\n', (1923, 1941), False, 'import pylab\n'), ((1967, 1989), 'pylab.subplot', 'pylab.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (1980, 1989), False, 'import pylab\n'), ((2039, 2069), 'pylab.plot', 'pylab.plot', (['y', 'self.inversecdf'], {}), '(y, self.inversecdf)\n', (2049, 2069), False, 'import pylab\n'), ((2113, 2135), 'pylab.subplot', 'pylab.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (2126, 2135), False, 'import pylab\n'), ((474, 499), 'pylab.exp', 'pylab.exp', (['(-10 * x ** 2.0)'], {}), '(-10 * x ** 2.0)\n', (483, 499), False, 'import pylab\n'), ((916, 933), 'pylab.arange', 'pylab.arange', (['Nrl'], {}), '(Nrl)\n', (928, 933), False, 'import pylab\n'), ((1996, 2018), 'pylab.arange', 'pylab.arange', (['self.Nrl'], {}), '(self.Nrl)\n', (2008, 2018), False, 'import pylab\n'), ((1425, 1452), 'pylab.diff', 'pylab.diff', (['self.inversecdf'], {}), '(self.inversecdf)\n', (1435, 1452), False, 'import pylab\n')] |
import matplotlib.pyplot as plt
import numpy as np
while True:
try:
print('-'*111) # for decoration purpose
x = input('<< TO CONTINUE PRESS "ENTER" OR TO KILL THE APPLICATION PRESS "0" >> ')
if x == '':
print()
print('<< THIS APPLICATION CALCULATES ROOTS FOR A GIVEN QUADRATIC EQUATION >>')
print()
print('>>> THE EQUATION WILL BE IN FORM OF << a.X^2 + b.X + c >>')
print()
a = int(input('enter value of a: '))
b = int(input('enter value of b: '))
c = int(input('enter value of c: '))
d = ((b**2)-4*a*c)**(1/2)
discriminant = ((b**2)-4*a*c)
roots_1= ((-b)+d)/(2*a)
roots_2 = ((-b)-d)/(2*a)
root_declare = f'>>> The roots are X = {roots_1} and X = {roots_2}.'
print()
print(f'>>> THE EQUATION IS ( {a}.X^2 + {b}.X + {c} ).')
print()
print(root_declare)
print()
print(f'>>> The value of discriminant is {discriminant}.')
print()
def root_indicator(): # tell's root charateristic
if discriminant > 0:
return('The roots of given equation are real.')
elif discriminant < 0:
return('The roots of given equation are imaginary.')
elif discriminant == 0:
return('There is one real root.')
print(f'>>> {root_indicator()}')
print()
print('')
plot_enable = input('<<< To show the plot press " y " OR To cancel plotting press ENTER >>> ')
if plot_enable == 'y':
def plot_show():
# 100 linearly spaced numbers
x = np.linspace(-10**2,10**2,10**2)
y = np.linspace(-10**2,10**2,10**2)
# the function, which is y = x^2 here
y = (a*(x**2))+(b*x)+c
# setting the axes at the centre
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# plot the function
plt.plot(x,y, 'r')
# show the plot
plt.show()
plot_show()
elif plot_enable == '':
print()
print('>>> Graphing Cancelled')
elif int(x) == 0:
break
except:
if a == 0:
print(f'>> a value must be larger than "zero" and enter numerals only. <<')
else:
print()
print(f'>> Please check the entered value, enter numerals only. <<') | [
"numpy.linspace",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((1838, 1877), 'numpy.linspace', 'np.linspace', (['(-10 ** 2)', '(10 ** 2)', '(10 ** 2)'], {}), '(-10 ** 2, 10 ** 2, 10 ** 2)\n', (1849, 1877), True, 'import numpy as np\n'), ((1894, 1933), 'numpy.linspace', 'np.linspace', (['(-10 ** 2)', '(10 ** 2)', '(10 ** 2)'], {}), '(-10 ** 2, 10 ** 2, 10 ** 2)\n', (1905, 1933), True, 'import numpy as np\n'), ((2108, 2120), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2118, 2120), True, 'import matplotlib.pyplot as plt\n'), ((2580, 2599), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""r"""'], {}), "(x, y, 'r')\n", (2588, 2599), True, 'import matplotlib.pyplot as plt\n'), ((2656, 2666), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2664, 2666), True, 'import matplotlib.pyplot as plt\n')] |
from typing import List
import numpy as np
class BenchmarkResult:
def __init__(self, loop_names: List[str], n_repeats: int, metric_names: List[str]):
"""
:param loop_names: List of loop names
:param n_repeats: Number of random restarts in benchmarking
:param metric_names: List of metric names
"""
self.loop_names = loop_names
self.n_repeats = n_repeats
self.metric_names = metric_names
self._results = dict()
for loop_name in loop_names:
self._results[loop_name] = dict()
for metric_name in metric_names:
self._results[loop_name][metric_name] = []
for i in range(n_repeats):
self._results[loop_name][metric_name].append([])
def add_results(self, loop_name: str, i_repeat: int, metric_name: str, metric_values: np.ndarray) -> None:
"""
Add results for a specific loop, metric and repeat combination
:param loop_name: Name of loop
:param i_repeat: Index of repeat
:param metric_name: Name of metric
:param metric_values: Metric values to add
"""
self._results[loop_name][metric_name][i_repeat] = metric_values.flatten()
def extract_metric_as_array(self, loop_name: str, metric_name: str) -> np.ndarray:
"""
Returns results over all repeats and iterations for a specific metric and loop name pair
:param loop_name: Name of loop to return results for
:param metric_name: Name of metric to extract
:return: 2-d numpy array of shape (n_repeats x n_iterations)
"""
return np.array(self._results[loop_name][metric_name])
| [
"numpy.array"
] | [((1664, 1711), 'numpy.array', 'np.array', (['self._results[loop_name][metric_name]'], {}), '(self._results[loop_name][metric_name])\n', (1672, 1711), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
strategies = np.array([
[
#gamma = 0.5
[-9.563336572173805, -8.193748914742143, -10.270220524396596, -3.0000000000000004, -7.553846153846153, -7.904142011834319, -3.0, -3.0, -3.0, -3.0],
[-7.378487640724603, -3.3197512739857147, -7.496142688168831, -3.0000000000000004, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0],
[-9.314285714285715, -6.0, -11.8, -6.0, -6.0, -6.0, -6.0, -6.0, -6.0, -6.0]
],
[
#gamma = 0.55
[-10.524329851693846, -9.121195380360348, -11.382794510864285, -3.333333333333334, -8.308123249299719, -8.767977779347031, -3.333333333333334, -3.333333333333334, -3.333333333333334, -3.333333333333334],
[-7.853847828218031, -3.71793284163171, -7.966237505840968, -3.333333333333334, -3.333333333333333, -3.333333333333333, -3.333333333333334, -3.333333333333334, -3.333333333333334, -3.333333333333334],
[-10.028985507246377, -6.666666666666666, -13.11111111111111, -6.666666666666666, -6.666666666666666, -6.666666666666666, -6.666666666666666, -6.666666666666666, -6.666666666666666, -6.666666666666666]
],
[
#gamma = 0.6
[-11.719684181565645, -10.273338406900049, -12.76628614916286, -3.75, -9.231481481481477, -9.840534979423865, -3.75, -3.75, -3.75, -3.75],
[-8.42965572232598, -4.21204039850597, -8.528273788767603, -3.7499999999999982, -3.7499999999999982, -3.7499999999999982, -3.7499999999999982, -3.7499999999999982, -3.7499999999999982, -3.7499999999999982],
[-10.911764705882351, -7.499999999999999, -14.441176470588236, -7.499999999999999, -7.499999999999999, -7.499999999999999, -7.499999999999999, -7.499999999999999, -7.499999999999999, -7.499999999999999]
],
[
#gamma = 0.65
[-13.246274902675406, -11.742746583844642, -14.533245644719841, -4.285714285714285, -10.388807069219439, -11.206747339173734, -4.285714285714285, -4.285714285714285, -4.285714285714285, -4.285714285714285],
[-9.145905020699661, -4.841618667247477, -9.218958552423087, -4.285714285714285, -4.285714285714285, -4.285714285714285, -4.285714285714283, -4.285714285714283, -4.285714285714283, -4.285714285714283],
[-12.03411513859275, -8.57142857142857, -15.616204690831555, -8.57142857142857, -8.57142857142857, -8.57142857142857, -8.57142857142857, -8.57142857142857, -8.57142857142857, -8.57142857142857]
],
[
#gamma = 0.7
[-15.26280962470669, -13.681019910677742, -16.868493443177165, -4.999999999999998, -11.883720930232553, -13.004326663061104, -4.999999999999998, -4.999999999999998, -4.999999999999998, -4.999999999999998],
[-10.068193838520614, -5.671752735193775, -10.099054988853647, -4.999999999999998, -4.999999999999998, -4.999999999999998, -4.999999999999998, -4.999999999999998, -4.999999999999998, -4.999999999999998],
[-13.515151515151512, -9.999999999999996, -17.15151515151515, -9.999999999999996, -9.999999999999996, -9.999999999999996, -9.999999999999996, -9.999999999999996, -9.999999999999996, -9.999999999999996]
],
[
#gamma = 0.75
[-18.048619027086353, -16.354914089347076, -20.098144329896904, -5.999999999999999, -13.893333333333327, -15.47199999999999, -5.999999999999998, -5.999999999999998, -5.999999999999998, -5.999999999999998],
[-11.364085664816024, -6.829408299891047, -11.362958194659226, -5.999999999999998, -5.999999999999998, -5.999999999999998, -5.999999999999998, -5.999999999999998, -5.999999999999998, -5.999999999999998],
[-15.569230769230767, -11.999999999999996, -19.261538461538457, -11.999999999999996, -11.999999999999996, -11.999999999999996, -11.999999999999996, -11.999999999999996, -11.999999999999996, -11.999999999999996]
],
[
#gamma = 0.8
[-22.14552188552189, -20.28181818181818, -24.856363636363632, -7.500000000000002, -16.749999999999993, -19.062499999999993, -7.5, -7.500000000000002, -7.500000000000002, -7.500000000000002],
[-13.227691215343736, -8.540503875101978, -13.175865235686418, -7.5, -7.500000000000001, -7.5, -7.499999999999998, -7.5, -7.5, -7.5],
[-18.625, -15.0, -22.375, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0]
],
[
#gamma = 0.85
[-28.76278844268961, -26.61680527433105, -32.56131830251732, -9.999999999999993, -21.169811320754697, -24.752580989676016, -9.999999999999993, -9.999999999999993, -9.999999999999993, -9.999999999999993],
[-16.183356468130675, -11.33189687650437, -16.033301790463963, -9.999999999999993, -9.999999999999991, -9.999999999999993, -9.999999999999993, -9.999999999999993, -9.999999999999993, -9.999999999999993],
[-23.68253968253967, -19.999999999999986, -27.49206349206348, -19.999999999999986, -19.999999999999986, -19.999999999999986, -19.999999999999986, -19.999999999999986, -19.999999999999986, -19.999999999999986]
],
[
#gamma = 0.9
[-41.27742867847752, -38.58932362753994, -47.172156505914224, -14.999999999999755, -29.095238095237843, -35.13605442176845, -14.999999999999753, -14.999999999999753, -14.999999999999753, -14.999999999999753],
[-21.789898957859354, -16.75709624029196, -21.448166972857727, -14.99999999999974, -14.99999999999974, -14.999999999999744, -14.999999999999735, -14.999999999999744, -14.999999999999744, -14.999999999999744],
[-33.74193548387047, -29.999999999999503, -37.61290322580595, -29.999999999999503, -29.999999999999503, -29.999999999999503, -29.999999999999503, -29.999999999999503, -29.999999999999503, -29.999999999999503]
],
[
#gamma = 0.95
[-74.330382553884, -70.25959327963282, -85.68377649107512, -29.99999408538547, -49.09676827893381, -60.80124278465696, -29.999994085385474, -29.99999408538546, -29.999994085385453, -29.999994085385445],
[-37.67557701062915, -32.430971145564975, -36.94165998316571, -29.999994085385467, -29.999994085385474, -29.99999408538546, -29.999994085385474, -29.99999408538546, -29.999994085385453, -29.999994085385445],
[-63.80326685929545, -59.99998817077086, -67.73769308880364, -59.99998817077086, -59.99998817077086, -59.99998817077086, -59.99998817077086, -59.99998817077086, -59.99998817077086, -59.99998817077086]
]
])
gamma = np.array([0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95])
#Plot 1
V0_URS = []
V0_OPT = []
V0_MMP = []
for i in range(len(strategies)):
V0_URS.append(strategies[i][0][0])
V0_OPT.append(strategies[i][1][0])
V0_MMP.append(strategies[i][2][0])
plt.plot(gamma, np.asarray(V0_URS), marker='o')
plt.plot(gamma, np.asarray(V0_OPT), marker='x')
plt.plot(gamma, np.asarray(V0_MMP), marker='+')
plt.ylabel("Defender's Utility $\longrightarrow$")
plt.xlabel("$\gamma \longrightarrow$")
plt.title("Defender's value in state S0.")
plt.legend(['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy'], loc='lower left')
plt.show()
#Plot 2
V1_URS = []
V1_OPT = []
V1_MMP = []
for i in range(len(strategies)):
V1_URS.append(strategies[i][0][1])
V1_OPT.append(strategies[i][1][1])
V1_MMP.append(strategies[i][2][1])
plt.plot(gamma, np.asarray(V1_URS), marker='o')
plt.plot(gamma, np.asarray(V1_OPT), marker='x')
plt.plot(gamma, np.asarray(V1_MMP), marker='+')
plt.ylabel("Defender's Utility $\longrightarrow$")
plt.xlabel("$\gamma \longrightarrow$")
plt.title("Defender's value in state S1.")
plt.legend(['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy'], loc='lower left')
plt.show()
#Plot 3
V2_URS = []
V2_OPT = []
V2_MMP = []
for i in range(len(strategies)):
V2_URS.append(strategies[i][0][2])
V2_OPT.append(strategies[i][1][2])
V2_MMP.append(strategies[i][2][2])
plt.plot(gamma, np.asarray(V2_URS), marker='o')
plt.plot(gamma, np.asarray(V2_OPT), marker='x')
plt.plot(gamma, np.asarray(V2_MMP), marker='+')
plt.ylabel("Defender's Utility $\longrightarrow$")
plt.xlabel("$\gamma \longrightarrow$")
plt.title("Defender's value in state S2.")
plt.legend(['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy'], loc='lower left')
plt.show()
#Plot 4
V3_URS = []
V3_OPT = []
V3_MMP = []
for i in range(len(strategies)):
V3_URS.append(strategies[i][0][3])
V3_OPT.append(strategies[i][1][3])
V3_MMP.append(strategies[i][2][3])
plt.plot(gamma, np.asarray(V3_URS), marker='o')
plt.plot(gamma, np.asarray(V3_OPT), marker='x')
plt.plot(gamma, np.asarray(V3_MMP), marker='+')
plt.ylabel("Defender's Utility $\longrightarrow$")
plt.xlabel("$\gamma \longrightarrow$")
plt.title("Defender's value in state S3.")
plt.legend(['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy'], loc='lower left')
plt.show()
#Plot 5
V4_URS = []
V4_OPT = []
V4_MMP = []
for i in range(len(strategies)):
V4_URS.append(strategies[i][0][4])
V4_OPT.append(strategies[i][1][4])
V4_MMP.append(strategies[i][2][4])
plt.plot(gamma, np.asarray(V4_URS), marker='o')
plt.plot(gamma, np.asarray(V4_OPT), marker='x')
plt.plot(gamma, np.asarray(V4_MMP), marker='+')
plt.ylabel("Defender's Utility $\longrightarrow$")
plt.xlabel("$\gamma \longrightarrow$")
plt.title("Defender's value in state S4.")
plt.legend(['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy'], loc='lower left')
plt.show()
#Plot 6
V5_URS = []
V5_OPT = []
V5_MMP = []
for i in range(len(strategies)):
V5_URS.append(strategies[i][0][5])
V5_OPT.append(strategies[i][1][5])
V5_MMP.append(strategies[i][2][5])
plt.plot(gamma, np.asarray(V5_URS), marker='o')
plt.plot(gamma, np.asarray(V5_OPT), marker='x')
plt.plot(gamma, np.asarray(V5_MMP), marker='+')
plt.ylabel("Defender's Utility $\longrightarrow$")
plt.xlabel("$\gamma \longrightarrow$")
plt.title("Defender's value in state S5.")
plt.legend(['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy'], loc='lower left')
plt.show()
#Plot 7
V6_URS = []
V6_OPT = []
V6_MMP = []
for i in range(len(strategies)):
V6_URS.append(strategies[i][0][6])
V6_OPT.append(strategies[i][1][6])
V6_MMP.append(strategies[i][2][6])
plt.plot(gamma, np.asarray(V6_URS), marker='o')
plt.plot(gamma, np.asarray(V6_OPT), marker='x')
plt.plot(gamma, np.asarray(V6_MMP), marker='+')
plt.ylabel("Defender's Utility $\longrightarrow$")
plt.xlabel("$\gamma \longrightarrow$")
plt.title("Defender's value in state S6.")
plt.legend(['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy'], loc='lower left')
plt.show()
#Plot 8
V7_URS = []
V7_OPT = []
V7_MMP = []
for i in range(len(strategies)):
V7_URS.append(strategies[i][0][7])
V7_OPT.append(strategies[i][1][7])
V7_MMP.append(strategies[i][2][7])
plt.plot(gamma, np.asarray(V7_URS), marker='o')
plt.plot(gamma, np.asarray(V7_OPT), marker='x')
plt.plot(gamma, np.asarray(V7_MMP), marker='+')
plt.ylabel("Defender's Utility $\longrightarrow$")
plt.xlabel("$\gamma \longrightarrow$")
plt.title("Defender's value in state S7.")
plt.legend(['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy'], loc='lower left')
plt.show()
#Plot 9
V8_URS = []
V8_OPT = []
V8_MMP = []
for i in range(len(strategies)):
V8_URS.append(strategies[i][0][8])
V8_OPT.append(strategies[i][1][8])
V8_MMP.append(strategies[i][2][8])
plt.plot(gamma, np.asarray(V8_URS), marker='o')
plt.plot(gamma, np.asarray(V8_OPT), marker='x')
plt.plot(gamma, np.asarray(V8_MMP), marker='+')
plt.ylabel("Defender's Utility $\longrightarrow$")
plt.xlabel("$\gamma \longrightarrow$")
plt.title("Defender's value in state S8.")
plt.legend(['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy'], loc='lower left')
plt.show()
#Plot 10
V9_URS = []
V9_OPT = []
V9_MMP = []
for i in range(len(strategies)):
V9_URS.append(strategies[i][0][9])
V9_OPT.append(strategies[i][1][9])
V9_MMP.append(strategies[i][2][9])
plt.plot(gamma, np.asarray(V9_URS), marker='o')
plt.plot(gamma, np.asarray(V9_OPT), marker='x')
plt.plot(gamma, np.asarray(V9_MMP), marker='+')
plt.ylabel("Defender's Utility $\longrightarrow$")
plt.xlabel("$\gamma \longrightarrow$")
plt.title("Defender's value in state S9.")
plt.legend(['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy'], loc='lower left')
plt.show() | [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.asarray",
"numpy.array",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((65, 6140), 'numpy.array', 'np.array', (['[[[-9.563336572173805, -8.193748914742143, -10.270220524396596, -\n 3.0000000000000004, -7.553846153846153, -7.904142011834319, -3.0, -3.0,\n -3.0, -3.0], [-7.378487640724603, -3.3197512739857147, -\n 7.496142688168831, -3.0000000000000004, -3.0, -3.0, -3.0, -3.0, -3.0, -\n 3.0], [-9.314285714285715, -6.0, -11.8, -6.0, -6.0, -6.0, -6.0, -6.0, -\n 6.0, -6.0]], [[-10.524329851693846, -9.121195380360348, -\n 11.382794510864285, -3.333333333333334, -8.308123249299719, -\n 8.767977779347031, -3.333333333333334, -3.333333333333334, -\n 3.333333333333334, -3.333333333333334], [-7.853847828218031, -\n 3.71793284163171, -7.966237505840968, -3.333333333333334, -\n 3.333333333333333, -3.333333333333333, -3.333333333333334, -\n 3.333333333333334, -3.333333333333334, -3.333333333333334], [-\n 10.028985507246377, -6.666666666666666, -13.11111111111111, -\n 6.666666666666666, -6.666666666666666, -6.666666666666666, -\n 6.666666666666666, -6.666666666666666, -6.666666666666666, -\n 6.666666666666666]], [[-11.719684181565645, -10.273338406900049, -\n 12.76628614916286, -3.75, -9.231481481481477, -9.840534979423865, -3.75,\n -3.75, -3.75, -3.75], [-8.42965572232598, -4.21204039850597, -\n 8.528273788767603, -3.7499999999999982, -3.7499999999999982, -\n 3.7499999999999982, -3.7499999999999982, -3.7499999999999982, -\n 3.7499999999999982, -3.7499999999999982], [-10.911764705882351, -\n 7.499999999999999, -14.441176470588236, -7.499999999999999, -\n 7.499999999999999, -7.499999999999999, -7.499999999999999, -\n 7.499999999999999, -7.499999999999999, -7.499999999999999]], [[-\n 13.246274902675406, -11.742746583844642, -14.533245644719841, -\n 4.285714285714285, -10.388807069219439, -11.206747339173734, -\n 4.285714285714285, -4.285714285714285, -4.285714285714285, -\n 4.285714285714285], [-9.145905020699661, -4.841618667247477, -\n 9.218958552423087, -4.285714285714285, -4.285714285714285, -\n 4.285714285714285, -4.285714285714283, -4.285714285714283, -\n 4.285714285714283, -4.285714285714283], [-12.03411513859275, -\n 8.57142857142857, -15.616204690831555, -8.57142857142857, -\n 8.57142857142857, -8.57142857142857, -8.57142857142857, -\n 8.57142857142857, -8.57142857142857, -8.57142857142857]], [[-\n 15.26280962470669, -13.681019910677742, -16.868493443177165, -\n 4.999999999999998, -11.883720930232553, -13.004326663061104, -\n 4.999999999999998, -4.999999999999998, -4.999999999999998, -\n 4.999999999999998], [-10.068193838520614, -5.671752735193775, -\n 10.099054988853647, -4.999999999999998, -4.999999999999998, -\n 4.999999999999998, -4.999999999999998, -4.999999999999998, -\n 4.999999999999998, -4.999999999999998], [-13.515151515151512, -\n 9.999999999999996, -17.15151515151515, -9.999999999999996, -\n 9.999999999999996, -9.999999999999996, -9.999999999999996, -\n 9.999999999999996, -9.999999999999996, -9.999999999999996]], [[-\n 18.048619027086353, -16.354914089347076, -20.098144329896904, -\n 5.999999999999999, -13.893333333333327, -15.47199999999999, -\n 5.999999999999998, -5.999999999999998, -5.999999999999998, -\n 5.999999999999998], [-11.364085664816024, -6.829408299891047, -\n 11.362958194659226, -5.999999999999998, -5.999999999999998, -\n 5.999999999999998, -5.999999999999998, -5.999999999999998, -\n 5.999999999999998, -5.999999999999998], [-15.569230769230767, -\n 11.999999999999996, -19.261538461538457, -11.999999999999996, -\n 11.999999999999996, -11.999999999999996, -11.999999999999996, -\n 11.999999999999996, -11.999999999999996, -11.999999999999996]], [[-\n 22.14552188552189, -20.28181818181818, -24.856363636363632, -\n 7.500000000000002, -16.749999999999993, -19.062499999999993, -7.5, -\n 7.500000000000002, -7.500000000000002, -7.500000000000002], [-\n 13.227691215343736, -8.540503875101978, -13.175865235686418, -7.5, -\n 7.500000000000001, -7.5, -7.499999999999998, -7.5, -7.5, -7.5], [-\n 18.625, -15.0, -22.375, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0\n ]], [[-28.76278844268961, -26.61680527433105, -32.56131830251732, -\n 9.999999999999993, -21.169811320754697, -24.752580989676016, -\n 9.999999999999993, -9.999999999999993, -9.999999999999993, -\n 9.999999999999993], [-16.183356468130675, -11.33189687650437, -\n 16.033301790463963, -9.999999999999993, -9.999999999999991, -\n 9.999999999999993, -9.999999999999993, -9.999999999999993, -\n 9.999999999999993, -9.999999999999993], [-23.68253968253967, -\n 19.999999999999986, -27.49206349206348, -19.999999999999986, -\n 19.999999999999986, -19.999999999999986, -19.999999999999986, -\n 19.999999999999986, -19.999999999999986, -19.999999999999986]], [[-\n 41.27742867847752, -38.58932362753994, -47.172156505914224, -\n 14.999999999999755, -29.095238095237843, -35.13605442176845, -\n 14.999999999999753, -14.999999999999753, -14.999999999999753, -\n 14.999999999999753], [-21.789898957859354, -16.75709624029196, -\n 21.448166972857727, -14.99999999999974, -14.99999999999974, -\n 14.999999999999744, -14.999999999999735, -14.999999999999744, -\n 14.999999999999744, -14.999999999999744], [-33.74193548387047, -\n 29.999999999999503, -37.61290322580595, -29.999999999999503, -\n 29.999999999999503, -29.999999999999503, -29.999999999999503, -\n 29.999999999999503, -29.999999999999503, -29.999999999999503]], [[-\n 74.330382553884, -70.25959327963282, -85.68377649107512, -\n 29.99999408538547, -49.09676827893381, -60.80124278465696, -\n 29.999994085385474, -29.99999408538546, -29.999994085385453, -\n 29.999994085385445], [-37.67557701062915, -32.430971145564975, -\n 36.94165998316571, -29.999994085385467, -29.999994085385474, -\n 29.99999408538546, -29.999994085385474, -29.99999408538546, -\n 29.999994085385453, -29.999994085385445], [-63.80326685929545, -\n 59.99998817077086, -67.73769308880364, -59.99998817077086, -\n 59.99998817077086, -59.99998817077086, -59.99998817077086, -\n 59.99998817077086, -59.99998817077086, -59.99998817077086]]]'], {}), '([[[-9.563336572173805, -8.193748914742143, -10.270220524396596, -\n 3.0000000000000004, -7.553846153846153, -7.904142011834319, -3.0, -3.0,\n -3.0, -3.0], [-7.378487640724603, -3.3197512739857147, -\n 7.496142688168831, -3.0000000000000004, -3.0, -3.0, -3.0, -3.0, -3.0, -\n 3.0], [-9.314285714285715, -6.0, -11.8, -6.0, -6.0, -6.0, -6.0, -6.0, -\n 6.0, -6.0]], [[-10.524329851693846, -9.121195380360348, -\n 11.382794510864285, -3.333333333333334, -8.308123249299719, -\n 8.767977779347031, -3.333333333333334, -3.333333333333334, -\n 3.333333333333334, -3.333333333333334], [-7.853847828218031, -\n 3.71793284163171, -7.966237505840968, -3.333333333333334, -\n 3.333333333333333, -3.333333333333333, -3.333333333333334, -\n 3.333333333333334, -3.333333333333334, -3.333333333333334], [-\n 10.028985507246377, -6.666666666666666, -13.11111111111111, -\n 6.666666666666666, -6.666666666666666, -6.666666666666666, -\n 6.666666666666666, -6.666666666666666, -6.666666666666666, -\n 6.666666666666666]], [[-11.719684181565645, -10.273338406900049, -\n 12.76628614916286, -3.75, -9.231481481481477, -9.840534979423865, -3.75,\n -3.75, -3.75, -3.75], [-8.42965572232598, -4.21204039850597, -\n 8.528273788767603, -3.7499999999999982, -3.7499999999999982, -\n 3.7499999999999982, -3.7499999999999982, -3.7499999999999982, -\n 3.7499999999999982, -3.7499999999999982], [-10.911764705882351, -\n 7.499999999999999, -14.441176470588236, -7.499999999999999, -\n 7.499999999999999, -7.499999999999999, -7.499999999999999, -\n 7.499999999999999, -7.499999999999999, -7.499999999999999]], [[-\n 13.246274902675406, -11.742746583844642, -14.533245644719841, -\n 4.285714285714285, -10.388807069219439, -11.206747339173734, -\n 4.285714285714285, -4.285714285714285, -4.285714285714285, -\n 4.285714285714285], [-9.145905020699661, -4.841618667247477, -\n 9.218958552423087, -4.285714285714285, -4.285714285714285, -\n 4.285714285714285, -4.285714285714283, -4.285714285714283, -\n 4.285714285714283, -4.285714285714283], [-12.03411513859275, -\n 8.57142857142857, -15.616204690831555, -8.57142857142857, -\n 8.57142857142857, -8.57142857142857, -8.57142857142857, -\n 8.57142857142857, -8.57142857142857, -8.57142857142857]], [[-\n 15.26280962470669, -13.681019910677742, -16.868493443177165, -\n 4.999999999999998, -11.883720930232553, -13.004326663061104, -\n 4.999999999999998, -4.999999999999998, -4.999999999999998, -\n 4.999999999999998], [-10.068193838520614, -5.671752735193775, -\n 10.099054988853647, -4.999999999999998, -4.999999999999998, -\n 4.999999999999998, -4.999999999999998, -4.999999999999998, -\n 4.999999999999998, -4.999999999999998], [-13.515151515151512, -\n 9.999999999999996, -17.15151515151515, -9.999999999999996, -\n 9.999999999999996, -9.999999999999996, -9.999999999999996, -\n 9.999999999999996, -9.999999999999996, -9.999999999999996]], [[-\n 18.048619027086353, -16.354914089347076, -20.098144329896904, -\n 5.999999999999999, -13.893333333333327, -15.47199999999999, -\n 5.999999999999998, -5.999999999999998, -5.999999999999998, -\n 5.999999999999998], [-11.364085664816024, -6.829408299891047, -\n 11.362958194659226, -5.999999999999998, -5.999999999999998, -\n 5.999999999999998, -5.999999999999998, -5.999999999999998, -\n 5.999999999999998, -5.999999999999998], [-15.569230769230767, -\n 11.999999999999996, -19.261538461538457, -11.999999999999996, -\n 11.999999999999996, -11.999999999999996, -11.999999999999996, -\n 11.999999999999996, -11.999999999999996, -11.999999999999996]], [[-\n 22.14552188552189, -20.28181818181818, -24.856363636363632, -\n 7.500000000000002, -16.749999999999993, -19.062499999999993, -7.5, -\n 7.500000000000002, -7.500000000000002, -7.500000000000002], [-\n 13.227691215343736, -8.540503875101978, -13.175865235686418, -7.5, -\n 7.500000000000001, -7.5, -7.499999999999998, -7.5, -7.5, -7.5], [-\n 18.625, -15.0, -22.375, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0\n ]], [[-28.76278844268961, -26.61680527433105, -32.56131830251732, -\n 9.999999999999993, -21.169811320754697, -24.752580989676016, -\n 9.999999999999993, -9.999999999999993, -9.999999999999993, -\n 9.999999999999993], [-16.183356468130675, -11.33189687650437, -\n 16.033301790463963, -9.999999999999993, -9.999999999999991, -\n 9.999999999999993, -9.999999999999993, -9.999999999999993, -\n 9.999999999999993, -9.999999999999993], [-23.68253968253967, -\n 19.999999999999986, -27.49206349206348, -19.999999999999986, -\n 19.999999999999986, -19.999999999999986, -19.999999999999986, -\n 19.999999999999986, -19.999999999999986, -19.999999999999986]], [[-\n 41.27742867847752, -38.58932362753994, -47.172156505914224, -\n 14.999999999999755, -29.095238095237843, -35.13605442176845, -\n 14.999999999999753, -14.999999999999753, -14.999999999999753, -\n 14.999999999999753], [-21.789898957859354, -16.75709624029196, -\n 21.448166972857727, -14.99999999999974, -14.99999999999974, -\n 14.999999999999744, -14.999999999999735, -14.999999999999744, -\n 14.999999999999744, -14.999999999999744], [-33.74193548387047, -\n 29.999999999999503, -37.61290322580595, -29.999999999999503, -\n 29.999999999999503, -29.999999999999503, -29.999999999999503, -\n 29.999999999999503, -29.999999999999503, -29.999999999999503]], [[-\n 74.330382553884, -70.25959327963282, -85.68377649107512, -\n 29.99999408538547, -49.09676827893381, -60.80124278465696, -\n 29.999994085385474, -29.99999408538546, -29.999994085385453, -\n 29.999994085385445], [-37.67557701062915, -32.430971145564975, -\n 36.94165998316571, -29.999994085385467, -29.999994085385474, -\n 29.99999408538546, -29.999994085385474, -29.99999408538546, -\n 29.999994085385453, -29.999994085385445], [-63.80326685929545, -\n 59.99998817077086, -67.73769308880364, -59.99998817077086, -\n 59.99998817077086, -59.99998817077086, -59.99998817077086, -\n 59.99998817077086, -59.99998817077086, -59.99998817077086]]])\n', (73, 6140), True, 'import numpy as np\n'), ((6280, 6345), 'numpy.array', 'np.array', (['[0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]'], {}), '([0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95])\n', (6288, 6345), True, 'import numpy as np\n'), ((6680, 6731), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Defender\'s Utility $\\\\longrightarrow$"""'], {}), '("Defender\'s Utility $\\\\longrightarrow$")\n', (6690, 6731), True, 'import matplotlib.pyplot as plt\n'), ((6731, 6771), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\gamma \\\\longrightarrow$"""'], {}), "('$\\\\gamma \\\\longrightarrow$')\n", (6741, 6771), True, 'import matplotlib.pyplot as plt\n'), ((6770, 6812), 'matplotlib.pyplot.title', 'plt.title', (['"""Defender\'s value in state S0."""'], {}), '("Defender\'s value in state S0.")\n', (6779, 6812), True, 'import matplotlib.pyplot as plt\n'), ((6814, 6926), 'matplotlib.pyplot.legend', 'plt.legend', (["['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy']"], {'loc': '"""lower left"""'}), "(['Uniform Random Strategy', 'Optimal Mixed Strategy',\n 'Min Max Pure Strategy'], loc='lower left')\n", (6824, 6926), True, 'import matplotlib.pyplot as plt\n'), ((6924, 6934), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6932, 6934), True, 'import matplotlib.pyplot as plt\n'), ((7278, 7329), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Defender\'s Utility $\\\\longrightarrow$"""'], {}), '("Defender\'s Utility $\\\\longrightarrow$")\n', (7288, 7329), True, 'import matplotlib.pyplot as plt\n'), ((7329, 7369), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\gamma \\\\longrightarrow$"""'], {}), "('$\\\\gamma \\\\longrightarrow$')\n", (7339, 7369), True, 'import matplotlib.pyplot as plt\n'), ((7368, 7410), 'matplotlib.pyplot.title', 'plt.title', (['"""Defender\'s value in state S1."""'], {}), '("Defender\'s value in state S1.")\n', (7377, 7410), True, 'import matplotlib.pyplot as plt\n'), ((7412, 7524), 'matplotlib.pyplot.legend', 'plt.legend', (["['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy']"], {'loc': '"""lower left"""'}), "(['Uniform Random Strategy', 'Optimal Mixed Strategy',\n 'Min Max Pure Strategy'], loc='lower left')\n", (7422, 7524), True, 'import matplotlib.pyplot as plt\n'), ((7522, 7532), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7530, 7532), True, 'import matplotlib.pyplot as plt\n'), ((7876, 7927), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Defender\'s Utility $\\\\longrightarrow$"""'], {}), '("Defender\'s Utility $\\\\longrightarrow$")\n', (7886, 7927), True, 'import matplotlib.pyplot as plt\n'), ((7927, 7967), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\gamma \\\\longrightarrow$"""'], {}), "('$\\\\gamma \\\\longrightarrow$')\n", (7937, 7967), True, 'import matplotlib.pyplot as plt\n'), ((7966, 8008), 'matplotlib.pyplot.title', 'plt.title', (['"""Defender\'s value in state S2."""'], {}), '("Defender\'s value in state S2.")\n', (7975, 8008), True, 'import matplotlib.pyplot as plt\n'), ((8010, 8122), 'matplotlib.pyplot.legend', 'plt.legend', (["['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy']"], {'loc': '"""lower left"""'}), "(['Uniform Random Strategy', 'Optimal Mixed Strategy',\n 'Min Max Pure Strategy'], loc='lower left')\n", (8020, 8122), True, 'import matplotlib.pyplot as plt\n'), ((8120, 8130), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8128, 8130), True, 'import matplotlib.pyplot as plt\n'), ((8474, 8525), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Defender\'s Utility $\\\\longrightarrow$"""'], {}), '("Defender\'s Utility $\\\\longrightarrow$")\n', (8484, 8525), True, 'import matplotlib.pyplot as plt\n'), ((8525, 8565), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\gamma \\\\longrightarrow$"""'], {}), "('$\\\\gamma \\\\longrightarrow$')\n", (8535, 8565), True, 'import matplotlib.pyplot as plt\n'), ((8564, 8606), 'matplotlib.pyplot.title', 'plt.title', (['"""Defender\'s value in state S3."""'], {}), '("Defender\'s value in state S3.")\n', (8573, 8606), True, 'import matplotlib.pyplot as plt\n'), ((8608, 8720), 'matplotlib.pyplot.legend', 'plt.legend', (["['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy']"], {'loc': '"""lower left"""'}), "(['Uniform Random Strategy', 'Optimal Mixed Strategy',\n 'Min Max Pure Strategy'], loc='lower left')\n", (8618, 8720), True, 'import matplotlib.pyplot as plt\n'), ((8718, 8728), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8726, 8728), True, 'import matplotlib.pyplot as plt\n'), ((9072, 9123), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Defender\'s Utility $\\\\longrightarrow$"""'], {}), '("Defender\'s Utility $\\\\longrightarrow$")\n', (9082, 9123), True, 'import matplotlib.pyplot as plt\n'), ((9123, 9163), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\gamma \\\\longrightarrow$"""'], {}), "('$\\\\gamma \\\\longrightarrow$')\n", (9133, 9163), True, 'import matplotlib.pyplot as plt\n'), ((9162, 9204), 'matplotlib.pyplot.title', 'plt.title', (['"""Defender\'s value in state S4."""'], {}), '("Defender\'s value in state S4.")\n', (9171, 9204), True, 'import matplotlib.pyplot as plt\n'), ((9206, 9318), 'matplotlib.pyplot.legend', 'plt.legend', (["['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy']"], {'loc': '"""lower left"""'}), "(['Uniform Random Strategy', 'Optimal Mixed Strategy',\n 'Min Max Pure Strategy'], loc='lower left')\n", (9216, 9318), True, 'import matplotlib.pyplot as plt\n'), ((9316, 9326), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9324, 9326), True, 'import matplotlib.pyplot as plt\n'), ((9670, 9721), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Defender\'s Utility $\\\\longrightarrow$"""'], {}), '("Defender\'s Utility $\\\\longrightarrow$")\n', (9680, 9721), True, 'import matplotlib.pyplot as plt\n'), ((9721, 9761), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\gamma \\\\longrightarrow$"""'], {}), "('$\\\\gamma \\\\longrightarrow$')\n", (9731, 9761), True, 'import matplotlib.pyplot as plt\n'), ((9760, 9802), 'matplotlib.pyplot.title', 'plt.title', (['"""Defender\'s value in state S5."""'], {}), '("Defender\'s value in state S5.")\n', (9769, 9802), True, 'import matplotlib.pyplot as plt\n'), ((9804, 9916), 'matplotlib.pyplot.legend', 'plt.legend', (["['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy']"], {'loc': '"""lower left"""'}), "(['Uniform Random Strategy', 'Optimal Mixed Strategy',\n 'Min Max Pure Strategy'], loc='lower left')\n", (9814, 9916), True, 'import matplotlib.pyplot as plt\n'), ((9914, 9924), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9922, 9924), True, 'import matplotlib.pyplot as plt\n'), ((10268, 10319), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Defender\'s Utility $\\\\longrightarrow$"""'], {}), '("Defender\'s Utility $\\\\longrightarrow$")\n', (10278, 10319), True, 'import matplotlib.pyplot as plt\n'), ((10319, 10359), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\gamma \\\\longrightarrow$"""'], {}), "('$\\\\gamma \\\\longrightarrow$')\n", (10329, 10359), True, 'import matplotlib.pyplot as plt\n'), ((10358, 10400), 'matplotlib.pyplot.title', 'plt.title', (['"""Defender\'s value in state S6."""'], {}), '("Defender\'s value in state S6.")\n', (10367, 10400), True, 'import matplotlib.pyplot as plt\n'), ((10402, 10514), 'matplotlib.pyplot.legend', 'plt.legend', (["['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy']"], {'loc': '"""lower left"""'}), "(['Uniform Random Strategy', 'Optimal Mixed Strategy',\n 'Min Max Pure Strategy'], loc='lower left')\n", (10412, 10514), True, 'import matplotlib.pyplot as plt\n'), ((10512, 10522), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10520, 10522), True, 'import matplotlib.pyplot as plt\n'), ((10866, 10917), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Defender\'s Utility $\\\\longrightarrow$"""'], {}), '("Defender\'s Utility $\\\\longrightarrow$")\n', (10876, 10917), True, 'import matplotlib.pyplot as plt\n'), ((10917, 10957), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\gamma \\\\longrightarrow$"""'], {}), "('$\\\\gamma \\\\longrightarrow$')\n", (10927, 10957), True, 'import matplotlib.pyplot as plt\n'), ((10956, 10998), 'matplotlib.pyplot.title', 'plt.title', (['"""Defender\'s value in state S7."""'], {}), '("Defender\'s value in state S7.")\n', (10965, 10998), True, 'import matplotlib.pyplot as plt\n'), ((11000, 11112), 'matplotlib.pyplot.legend', 'plt.legend', (["['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy']"], {'loc': '"""lower left"""'}), "(['Uniform Random Strategy', 'Optimal Mixed Strategy',\n 'Min Max Pure Strategy'], loc='lower left')\n", (11010, 11112), True, 'import matplotlib.pyplot as plt\n'), ((11110, 11120), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11118, 11120), True, 'import matplotlib.pyplot as plt\n'), ((11464, 11515), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Defender\'s Utility $\\\\longrightarrow$"""'], {}), '("Defender\'s Utility $\\\\longrightarrow$")\n', (11474, 11515), True, 'import matplotlib.pyplot as plt\n'), ((11515, 11555), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\gamma \\\\longrightarrow$"""'], {}), "('$\\\\gamma \\\\longrightarrow$')\n", (11525, 11555), True, 'import matplotlib.pyplot as plt\n'), ((11554, 11596), 'matplotlib.pyplot.title', 'plt.title', (['"""Defender\'s value in state S8."""'], {}), '("Defender\'s value in state S8.")\n', (11563, 11596), True, 'import matplotlib.pyplot as plt\n'), ((11598, 11710), 'matplotlib.pyplot.legend', 'plt.legend', (["['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy']"], {'loc': '"""lower left"""'}), "(['Uniform Random Strategy', 'Optimal Mixed Strategy',\n 'Min Max Pure Strategy'], loc='lower left')\n", (11608, 11710), True, 'import matplotlib.pyplot as plt\n'), ((11708, 11718), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11716, 11718), True, 'import matplotlib.pyplot as plt\n'), ((12063, 12114), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Defender\'s Utility $\\\\longrightarrow$"""'], {}), '("Defender\'s Utility $\\\\longrightarrow$")\n', (12073, 12114), True, 'import matplotlib.pyplot as plt\n'), ((12114, 12154), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\gamma \\\\longrightarrow$"""'], {}), "('$\\\\gamma \\\\longrightarrow$')\n", (12124, 12154), True, 'import matplotlib.pyplot as plt\n'), ((12153, 12195), 'matplotlib.pyplot.title', 'plt.title', (['"""Defender\'s value in state S9."""'], {}), '("Defender\'s value in state S9.")\n', (12162, 12195), True, 'import matplotlib.pyplot as plt\n'), ((12197, 12309), 'matplotlib.pyplot.legend', 'plt.legend', (["['Uniform Random Strategy', 'Optimal Mixed Strategy', 'Min Max Pure Strategy']"], {'loc': '"""lower left"""'}), "(['Uniform Random Strategy', 'Optimal Mixed Strategy',\n 'Min Max Pure Strategy'], loc='lower left')\n", (12207, 12309), True, 'import matplotlib.pyplot as plt\n'), ((12307, 12317), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12315, 12317), True, 'import matplotlib.pyplot as plt\n'), ((6552, 6570), 'numpy.asarray', 'np.asarray', (['V0_URS'], {}), '(V0_URS)\n', (6562, 6570), True, 'import numpy as np\n'), ((6600, 6618), 'numpy.asarray', 'np.asarray', (['V0_OPT'], {}), '(V0_OPT)\n', (6610, 6618), True, 'import numpy as np\n'), ((6648, 6666), 'numpy.asarray', 'np.asarray', (['V0_MMP'], {}), '(V0_MMP)\n', (6658, 6666), True, 'import numpy as np\n'), ((7150, 7168), 'numpy.asarray', 'np.asarray', (['V1_URS'], {}), '(V1_URS)\n', (7160, 7168), True, 'import numpy as np\n'), ((7198, 7216), 'numpy.asarray', 'np.asarray', (['V1_OPT'], {}), '(V1_OPT)\n', (7208, 7216), True, 'import numpy as np\n'), ((7246, 7264), 'numpy.asarray', 'np.asarray', (['V1_MMP'], {}), '(V1_MMP)\n', (7256, 7264), True, 'import numpy as np\n'), ((7748, 7766), 'numpy.asarray', 'np.asarray', (['V2_URS'], {}), '(V2_URS)\n', (7758, 7766), True, 'import numpy as np\n'), ((7796, 7814), 'numpy.asarray', 'np.asarray', (['V2_OPT'], {}), '(V2_OPT)\n', (7806, 7814), True, 'import numpy as np\n'), ((7844, 7862), 'numpy.asarray', 'np.asarray', (['V2_MMP'], {}), '(V2_MMP)\n', (7854, 7862), True, 'import numpy as np\n'), ((8346, 8364), 'numpy.asarray', 'np.asarray', (['V3_URS'], {}), '(V3_URS)\n', (8356, 8364), True, 'import numpy as np\n'), ((8394, 8412), 'numpy.asarray', 'np.asarray', (['V3_OPT'], {}), '(V3_OPT)\n', (8404, 8412), True, 'import numpy as np\n'), ((8442, 8460), 'numpy.asarray', 'np.asarray', (['V3_MMP'], {}), '(V3_MMP)\n', (8452, 8460), True, 'import numpy as np\n'), ((8944, 8962), 'numpy.asarray', 'np.asarray', (['V4_URS'], {}), '(V4_URS)\n', (8954, 8962), True, 'import numpy as np\n'), ((8992, 9010), 'numpy.asarray', 'np.asarray', (['V4_OPT'], {}), '(V4_OPT)\n', (9002, 9010), True, 'import numpy as np\n'), ((9040, 9058), 'numpy.asarray', 'np.asarray', (['V4_MMP'], {}), '(V4_MMP)\n', (9050, 9058), True, 'import numpy as np\n'), ((9542, 9560), 'numpy.asarray', 'np.asarray', (['V5_URS'], {}), '(V5_URS)\n', (9552, 9560), True, 'import numpy as np\n'), ((9590, 9608), 'numpy.asarray', 'np.asarray', (['V5_OPT'], {}), '(V5_OPT)\n', (9600, 9608), True, 'import numpy as np\n'), ((9638, 9656), 'numpy.asarray', 'np.asarray', (['V5_MMP'], {}), '(V5_MMP)\n', (9648, 9656), True, 'import numpy as np\n'), ((10140, 10158), 'numpy.asarray', 'np.asarray', (['V6_URS'], {}), '(V6_URS)\n', (10150, 10158), True, 'import numpy as np\n'), ((10188, 10206), 'numpy.asarray', 'np.asarray', (['V6_OPT'], {}), '(V6_OPT)\n', (10198, 10206), True, 'import numpy as np\n'), ((10236, 10254), 'numpy.asarray', 'np.asarray', (['V6_MMP'], {}), '(V6_MMP)\n', (10246, 10254), True, 'import numpy as np\n'), ((10738, 10756), 'numpy.asarray', 'np.asarray', (['V7_URS'], {}), '(V7_URS)\n', (10748, 10756), True, 'import numpy as np\n'), ((10786, 10804), 'numpy.asarray', 'np.asarray', (['V7_OPT'], {}), '(V7_OPT)\n', (10796, 10804), True, 'import numpy as np\n'), ((10834, 10852), 'numpy.asarray', 'np.asarray', (['V7_MMP'], {}), '(V7_MMP)\n', (10844, 10852), True, 'import numpy as np\n'), ((11336, 11354), 'numpy.asarray', 'np.asarray', (['V8_URS'], {}), '(V8_URS)\n', (11346, 11354), True, 'import numpy as np\n'), ((11384, 11402), 'numpy.asarray', 'np.asarray', (['V8_OPT'], {}), '(V8_OPT)\n', (11394, 11402), True, 'import numpy as np\n'), ((11432, 11450), 'numpy.asarray', 'np.asarray', (['V8_MMP'], {}), '(V8_MMP)\n', (11442, 11450), True, 'import numpy as np\n'), ((11935, 11953), 'numpy.asarray', 'np.asarray', (['V9_URS'], {}), '(V9_URS)\n', (11945, 11953), True, 'import numpy as np\n'), ((11983, 12001), 'numpy.asarray', 'np.asarray', (['V9_OPT'], {}), '(V9_OPT)\n', (11993, 12001), True, 'import numpy as np\n'), ((12031, 12049), 'numpy.asarray', 'np.asarray', (['V9_MMP'], {}), '(V9_MMP)\n', (12041, 12049), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
get_ipython().system('pip install dask')
from dask import dataframe
import pandas as pd
import yfinance as yf
import os
import logging
import numpy as np
from scipy.cluster.hierarchy import dendrogram, linkage, leaves_list
import seaborn as sns
import matplotlib.pyplot as plt
import bahc
class MeanVariancePortfolio():
def __init__(self, cfg):
self.cfg = cfg
self.data = self.load_data()
def load_data(self):
return self.preprocess(pd.concat([pd.read_parquet(os.path.join(self.cfg.data_dir, f))['Close'] for f in os.listdir(self.cfg.data_dir)]))
def preprocess(self, x, percent0 = 0.5, percent1 = 0.2):
tmp = x.dropna(axis=0, thresh=int(percent0*x.shape[1])).dropna(axis=1, thresh=int(percent1*x.shape[0])).fillna(method="ffill")
dropped = set(x.columns) - set(tmp.columns)
logging.info("Preprocessing dropped the following stocks" + "-".join(list(dropped)))
return tmp
def clean_portfolio(self):
self.data.fillna(method = 'ffill', inplace = True)
columns_missing = self.data.columns[self.data.isna().sum() > 10].values
self.data.drop(columns_missing, inplace= True, axis=1)
self.data.fillna(method = 'bfill', inplace = True)
return self
def min_var_portfolio(mu, cov, target_return):
inv_cov = np.linalg.inv(cov)
ones = np.ones(len(mu))[:, np.newaxis]
a = ones.T @ inv_cov @ ones
b = mu.T @ inv_cov @ ones
c = mu.T.to_numpy() @ inv_cov @ mu
a = a[0][0]
b = b.loc['mu', 0]
c = c.loc[0, 'mu']
num1 = (a * inv_cov @ mu - b * inv_cov @ ones) * target_return
num2 = (c * inv_cov @ ones- b * inv_cov @ mu)
den = a*c - b**2
w = (num1 + num2) / den
var = w.T.to_numpy() @ cov.to_numpy() @ w.to_numpy()
return w, var**0.5
def __call__(self, training_period = 10, num_assets = 50, rf = 0.05, bahc_bool = False, plot_bool = True):
def get_log_returns_matrix(portfolio_data):
log_returns_matrix = np.log(portfolio_data/portfolio_data.shift(1))
log_returns_matrix.fillna(0, inplace=True)
log_returns_matrix = log_returns_matrix[(log_returns_matrix.T != 0).any()]
return log_returns_matrix
def get_stocks_reordered(log_returns_matrix):
cov_daily = log_returns_matrix.cov()
stocks = list(cov_daily.columns)
link = linkage(cov_daily, 'average')
reordered_cov_daily = cov_daily.copy()
stocks_reordered = [stocks[i] for i in leaves_list(link)]
reordered_cov_daily = reordered_cov_daily[stocks_reordered]
reordered_cov_daily = reordered_cov_daily.reindex(stocks_reordered)
return stocks_reordered, reordered_cov_daily
def get_bahc_cov_matrix(log_returns_matrix, stocks_reordered):
cov_bahc = pd.DataFrame(bahc.filterCovariance(np.array(log_returns_matrix).T))
cov_bahc.columns, cov_bahc.index = log_returns_matrix.columns, log_returns_matrix.columns
cov_bahc = cov_bahc[stocks_reordered]
cov_bahc = cov_bahc.reindex(stocks_reordered)
return cov_bahc
def get_weights(mu_vector, cov_matrix, rf):
ones = np.ones(mu_vector.shape[0])[:, np.newaxis]
num = np.linalg.inv(cov_matrix) @ (mu_vector - rf * ones)
den = ones.T @ np.linalg.inv(cov_matrix) @ (mu_vector - rf * ones)
w = (np.asarray(num) / np.asarray(den))
weights = pd.DataFrame(index = mu_vector.index, columns = ['Weights'])
weights['Weights'] = w
return w, weights
def get_cumulative_returns(w, log_returns_matrix):
weighted_returns = (w.T * log_returns_matrix)
portfolio_returns = weighted_returns.sum(axis=1)
cumulative_returns = (portfolio_returns + 1).cumprod()
return cumulative_returns
def plot_cumulative_returns(cumulative_returns):
fig = plt.figure()
ax1 = fig.add_axes([0.1,0.1,0.8,0.8])
ax1.plot(cumulative_returns)
ax1.set_xlabel('Date')
ax1.set_ylabel("Cumulative Returns")
ax1.set_title("Portfolio Cumulative Returns")
plt.show()
portfolio = self.clean_portfolio()
portfolio_data = self.data.iloc[:, :num_assets]
stocks_universe = portfolio_data.columns
log_returns_matrix = get_log_returns_matrix(portfolio_data)
mu_vector = pd.DataFrame(index = stocks_universe, columns = ['mu'])
if bahc_bool == False:
cov_matrix = log_returns_matrix.cov() * 252
else:
stocks_reordered, _ = get_stocks_reordered(log_returns_matrix)
cov_matrix = get_bahc_cov_matrix(log_returns_matrix, stocks_reordered) * 252
for stock in stocks_universe:
series = portfolio_data[stock]
log_returns = np.log(series/series.shift(1)).dropna()
ann_log_return = np.sum(log_returns) / training_period
mu_vector.loc[stock] = ann_log_return
w_tangency, _ = get_weights(mu_vector, cov_matrix, rf)
cumulative_returns_tangent = get_cumulative_returns(w_tangency, log_returns_matrix)
if(plot_bool): plot_cumulative_returns(cumulative_returns_tangent)
return cumulative_returns_tangent
| [
"scipy.cluster.hierarchy.leaves_list",
"os.listdir",
"numpy.ones",
"numpy.asarray",
"os.path.join",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.linalg.inv",
"numpy.array",
"scipy.cluster.hierarchy.linkage",
"pandas.DataFrame",
"matplotlib.pyplot.show"
] | [((1396, 1414), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (1409, 1414), True, 'import numpy as np\n'), ((4661, 4712), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'stocks_universe', 'columns': "['mu']"}), "(index=stocks_universe, columns=['mu'])\n", (4673, 4712), True, 'import pandas as pd\n'), ((2529, 2558), 'scipy.cluster.hierarchy.linkage', 'linkage', (['cov_daily', '"""average"""'], {}), "(cov_daily, 'average')\n", (2536, 2558), False, 'from scipy.cluster.hierarchy import dendrogram, linkage, leaves_list\n'), ((3644, 3700), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'mu_vector.index', 'columns': "['Weights']"}), "(index=mu_vector.index, columns=['Weights'])\n", (3656, 3700), True, 'import pandas as pd\n'), ((4146, 4158), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4156, 4158), True, 'import matplotlib.pyplot as plt\n'), ((4404, 4414), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4412, 4414), True, 'import matplotlib.pyplot as plt\n'), ((3378, 3405), 'numpy.ones', 'np.ones', (['mu_vector.shape[0]'], {}), '(mu_vector.shape[0])\n', (3385, 3405), True, 'import numpy as np\n'), ((3439, 3464), 'numpy.linalg.inv', 'np.linalg.inv', (['cov_matrix'], {}), '(cov_matrix)\n', (3452, 3464), True, 'import numpy as np\n'), ((3587, 3602), 'numpy.asarray', 'np.asarray', (['num'], {}), '(num)\n', (3597, 3602), True, 'import numpy as np\n'), ((3605, 3620), 'numpy.asarray', 'np.asarray', (['den'], {}), '(den)\n', (3615, 3620), True, 'import numpy as np\n'), ((5160, 5179), 'numpy.sum', 'np.sum', (['log_returns'], {}), '(log_returns)\n', (5166, 5179), True, 'import numpy as np\n'), ((2661, 2678), 'scipy.cluster.hierarchy.leaves_list', 'leaves_list', (['link'], {}), '(link)\n', (2672, 2678), False, 'from scipy.cluster.hierarchy import dendrogram, linkage, leaves_list\n'), ((3518, 3543), 'numpy.linalg.inv', 'np.linalg.inv', (['cov_matrix'], {}), '(cov_matrix)\n', (3531, 3543), True, 'import numpy as np\n'), ((606, 635), 'os.listdir', 'os.listdir', (['self.cfg.data_dir'], {}), '(self.cfg.data_dir)\n', (616, 635), False, 'import os\n'), ((3027, 3055), 'numpy.array', 'np.array', (['log_returns_matrix'], {}), '(log_returns_matrix)\n', (3035, 3055), True, 'import numpy as np\n'), ((551, 585), 'os.path.join', 'os.path.join', (['self.cfg.data_dir', 'f'], {}), '(self.cfg.data_dir, f)\n', (563, 585), False, 'import os\n')] |
"""
The idea of late parallelization is that we don't have to
slice our tensor network in the beginning of the contraction.
"""
import psutil
import numpy as np
from functools import partial
from loguru import logger as log
import qtree
import qtensor as qtn
from qtensor.optimisation import Optimizer, RGreedyOptimizer
from qtensor.optimisation import GreedyParvars
def slice_greedy(graph, p_bunch, ordering_algo='greedy'):
""" Slice greedy and inplece """
orderer = qtn.toolbox.get_ordering_algo(ordering_algo)
searcher = GreedyParvars(graph)
peo_ints, path = orderer._get_ordering_ints(graph)
for _ in range(p_bunch):
error = searcher.step()
pv_cnt = len(searcher.result)
log.debug('Parvars count: {}. Amps count: {}', pv_cnt, 2**pv_cnt)
if error:
raise Exception('Estimated OOM')
return searcher.result
class LateParOptimizer(Optimizer):
def __init__(self, target_tw=None, par_vars=None,
p_bunch = None,
n_bunches=None,
ordering_algo='greedy', slicing_algo='greedy'):
"""
The optimizer works in the following way:
1. Find ordering with provided optimizer
2. For each step:
3. Contract graph up to the step;
4. Find p_bunch indices to slice;
5. Find new ordering;
6. Save the step with best performance.
Args:
target_tw (int): Slice until reached this tw.
If None, use system memory to estimate.
Defaults to None.
par_vars (int): number of parallel vars to split. Overrides target_tw.
n_bunches: How many bunches to slice. Overrides target_tw if not None.
"""
self.orderer = qtn.toolbox.get_ordering_algo(ordering_algo)
if target_tw is None:
self.target_tw = self._get_max_tw()
self.target_tw = target_tw
self.n_bunches = n_bunches
self.par_vars = par_vars
if not n_bunches:
self.p_bunch = 1
self.n_bunches = par_vars
#self.n_bunches = par_vars
else:
if p_bunch is None:
self.p_bunch = par_vars//n_bunches
else:
self.p_bunch = p_bunch
if slicing_algo == 'greedy':
self.slicer = partial(slice_greedy, ordering_algo=ordering_algo)
else:
raise ValueError(f'Invalid slicing algorithm: {slicing_algo}')
def _get_max_tw(self):
if hasattr(self, 'max_tw') and self.max_tw is not None:
return self.max_tw
mem = psutil.virtual_memory()
avail = mem.available
log.info('Memory available: {}', avail)
# Cost = 16*2**tw
# tw = log(cost/16) = log(cost) - 4
return np.int(np.log2(avail)) - 4
def find_slice_at_step(self, ordering, graph, p_bunch):
"""
Scaling:
O(n*(Slicer(n)+Ordering(n))) where n is the number of nodes in the graph.
O(2n^2) for greedy
Returns:
graph: sliced graph
p_vars: parallel_vars
step: index in ordering at which to slice
peo: peo after slice
treewidth: treewidth after slice
"""
slice_candidates = []
largest_tw = 0
for node in ordering[:-p_bunch]:
# Room for optimization: stop earlier
# Room for optimization: do not copy graph
sliced_graph = graph.copy()
slice_vars = self.slicer(sliced_graph, p_bunch=p_bunch)
_peo, _path = self.orderer._get_ordering_ints(sliced_graph)
step_tw = qtn.utils.n_neighbors(graph, node) + 1
largest_tw = max(step_tw, largest_tw)
_tw = max(largest_tw, max(_path))
slice_candidates.append(
(slice_vars, _peo, _tw, sliced_graph)
)
qtn.utils.eliminate_node_no_structure(graph, node)
slice_vars, peos, tws, graphs = zip(*slice_candidates)
best_steps, *_ = np.where(tws == np.min(tws))
best_step = best_steps[0]
best_peo = peos[best_step]
best_tw = tws[best_step]
assert len(ordering[:best_step]) + len(best_peo) + p_bunch == len(ordering), \
f"Invalid ordering: total nodes: {len(ordering)}," \
f" step: {best_step}, len next_peo: {len(best_peo)}"
return graphs[best_step], slice_vars[best_step], best_step, best_peo, best_tw
def optimize(self, tensor_net):
"""
Args:
(qtensor.TensorNet): Tensor network to optimize
Returns:
parallel_scheme list((contraction_order, slice_vars)):
Map from parallel var to step at which it is removed
Mutates:
self.treewidth
"""
line_graph = tensor_net.get_line_graph()
free_vars = tensor_net.free_vars
ignored_vars = tensor_net.ket_vars + tensor_net. bra_vars
if free_vars:
current_graph = qtree.graph_model.make_clique_on(line_graph, free_vars)
else:
current_graph = line_graph
current_ordering, tw_path = self.orderer._get_ordering_ints(current_graph)
contraction_schedule = []
log.info(f"Initial treewidth: {max(tw_path)}")
# --
if self.n_bunches is not None:
# Iterate for fixed par_vars
self.target_tw = 0
bunches = [self.par_vars//self.n_bunches]*self.n_bunches
_remaining = self.par_vars%self.n_bunches
bunches = bunches + [_remaining]
bunches = [x for x in bunches if x != 0]
else:
# Iterate until reach target_tw
n_iter = len(current_ordering)
bunches = [self.p_bunch]*n_iter
# --
for p_bunch in bunches:
_a_bunch_of_stuff = self.find_slice_at_step(
current_ordering, current_graph, p_bunch
)
current_graph, slice_vars, step, next_ordering, next_tw = _a_bunch_of_stuff
contraction_schedule.append(
(current_ordering[:step], slice_vars)
)
current_ordering = [x for x in next_ordering if x not in slice_vars]
log.info(f"Sliced {len(slice_vars)}, next treewidth: {next_tw}")
if next_tw <= self.target_tw:
break
log.info(f"Removed {sum(bunches)} variables, reduced tw by {max(tw_path)-next_tw}")
# Contract leftovers
if free_vars:
if not all(x in current_ordering for x in free_vars):
log.warning(f"Not all free variables are in the last ordering chunk!")
current_ordering = qtree.graph_model.get_equivalent_peo(
current_graph, current_ordering, free_vars
)
next_tw_eq = qtree.graph_model.get_treewidth_from_peo(
current_graph, current_ordering
)
assert next_tw == next_tw_eq
self.treewidth = next_tw
contraction_schedule.append((current_ordering, tuple()))
first_slice, first_ordering = contraction_schedule[0]
first_ordering = ignored_vars + first_ordering
contraction_schedule[0] = (first_slice, first_ordering)
return contraction_schedule
| [
"qtensor.toolbox.get_ordering_algo",
"qtree.graph_model.get_equivalent_peo",
"loguru.logger.info",
"loguru.logger.debug",
"qtree.graph_model.make_clique_on",
"qtree.graph_model.get_treewidth_from_peo",
"qtensor.utils.n_neighbors",
"loguru.logger.warning",
"psutil.virtual_memory",
"functools.partia... | [((479, 523), 'qtensor.toolbox.get_ordering_algo', 'qtn.toolbox.get_ordering_algo', (['ordering_algo'], {}), '(ordering_algo)\n', (508, 523), True, 'import qtensor as qtn\n'), ((539, 559), 'qtensor.optimisation.GreedyParvars', 'GreedyParvars', (['graph'], {}), '(graph)\n', (552, 559), False, 'from qtensor.optimisation import GreedyParvars\n'), ((722, 789), 'loguru.logger.debug', 'log.debug', (['"""Parvars count: {}. Amps count: {}"""', 'pv_cnt', '(2 ** pv_cnt)'], {}), "('Parvars count: {}. Amps count: {}', pv_cnt, 2 ** pv_cnt)\n", (731, 789), True, 'from loguru import logger as log\n'), ((1801, 1845), 'qtensor.toolbox.get_ordering_algo', 'qtn.toolbox.get_ordering_algo', (['ordering_algo'], {}), '(ordering_algo)\n', (1830, 1845), True, 'import qtensor as qtn\n'), ((2655, 2678), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (2676, 2678), False, 'import psutil\n'), ((2717, 2756), 'loguru.logger.info', 'log.info', (['"""Memory available: {}"""', 'avail'], {}), "('Memory available: {}', avail)\n", (2725, 2756), True, 'from loguru import logger as log\n'), ((2378, 2428), 'functools.partial', 'partial', (['slice_greedy'], {'ordering_algo': 'ordering_algo'}), '(slice_greedy, ordering_algo=ordering_algo)\n', (2385, 2428), False, 'from functools import partial\n'), ((3959, 4009), 'qtensor.utils.eliminate_node_no_structure', 'qtn.utils.eliminate_node_no_structure', (['graph', 'node'], {}), '(graph, node)\n', (3996, 4009), True, 'import qtensor as qtn\n'), ((5083, 5138), 'qtree.graph_model.make_clique_on', 'qtree.graph_model.make_clique_on', (['line_graph', 'free_vars'], {}), '(line_graph, free_vars)\n', (5115, 5138), False, 'import qtree\n'), ((6777, 6862), 'qtree.graph_model.get_equivalent_peo', 'qtree.graph_model.get_equivalent_peo', (['current_graph', 'current_ordering', 'free_vars'], {}), '(current_graph, current_ordering, free_vars\n )\n', (6813, 6862), False, 'import qtree\n'), ((6914, 6987), 'qtree.graph_model.get_treewidth_from_peo', 'qtree.graph_model.get_treewidth_from_peo', (['current_graph', 'current_ordering'], {}), '(current_graph, current_ordering)\n', (6954, 6987), False, 'import qtree\n'), ((2849, 2863), 'numpy.log2', 'np.log2', (['avail'], {}), '(avail)\n', (2856, 2863), True, 'import numpy as np\n'), ((3707, 3741), 'qtensor.utils.n_neighbors', 'qtn.utils.n_neighbors', (['graph', 'node'], {}), '(graph, node)\n', (3728, 3741), True, 'import qtensor as qtn\n'), ((4115, 4126), 'numpy.min', 'np.min', (['tws'], {}), '(tws)\n', (4121, 4126), True, 'import numpy as np\n'), ((6675, 6745), 'loguru.logger.warning', 'log.warning', (['f"""Not all free variables are in the last ordering chunk!"""'], {}), "(f'Not all free variables are in the last ordering chunk!')\n", (6686, 6745), True, 'from loguru import logger as log\n')] |
'''
Created on 31 Oct 2014
@author: <NAME> (<EMAIL>)
@copyright: (c) 2014 <NAME>
@license: MIT
'''
# standard library
import unittest
# external libraries
import numpy as np
import nose
# local libraries
from zignal import Audio
class Test_single_channel(unittest.TestCase):
def setUp(self):
self.y = np.zeros(4)
def check_values(self, values, expected, position):
x = Audio(fs=10, initialdata=values)
peak, idx = x.peak()
self.assertTrue(len(peak)==1)
self.assertTrue(len(idx)==1)
print("index: %3i peak: %f" %(idx, peak))
print(x)
self.assertAlmostEqual(peak, expected, places=3)
self.assertEqual(idx, position)
def test_positive(self):
self.y[1] = 2.0
self.y[2] = 2.2 # <-- peak
self.y[3] = -1.2
print("init data: %s" %self.y)
self.check_values(self.y, 2.2, 2)
def test_negative(self):
self.y[1] = 2.0
self.y[2] = 3.19
self.y[3] = -3.2 # <-- peak
print("init data: %s" %self.y)
self.check_values(self.y, -3.2, 3)
class Test_multi_channel(unittest.TestCase):
def setUp(self):
self.y = np.zeros((4, 2))
def check_values(self, values, expected, position):
x = Audio(fs=10, initialdata=values)
peak, idx = x.peak()
self.assertTrue(len(peak)==2)
self.assertTrue(len(idx)==2)
print("index: %s peak: %s" %(idx, peak))
print(x)
self.assertAlmostEqual(peak[0], expected[0], places=3)
self.assertAlmostEqual(peak[1], expected[1], places=3)
self.assertEqual(idx[0], position[0])
self.assertEqual(idx[1], position[1])
def test_positive(self):
self.y[1][0] = 1.0
self.y[2][0] = 2.3 # <-- peak
self.y[1][1] = -4.1 # <-- peak
self.y[2][1] = 3.0
print(self.y)
self.check_values(self.y, [2.3, -4.1], [2, 1])
def test_negative(self):
self.y[1][0] = 1.0
self.y[2][0] = 2.0 # <-- peak
self.y[0][1] = -4.0 # <-- peak
self.y[1][1] = 3.0
print(self.y)
self.check_values(self.y, [2, -4], [2, 0])
if __name__ == "__main__":
noseargs = [__name__,
"--verbosity=2",
"--logging-format=%(asctime)s %(levelname)-8s: %(name)-15s "+
"%(module)-15s %(funcName)-20s %(message)s",
"--logging-level=DEBUG",
__file__,
]
nose.run(argv=noseargs)
| [
"zignal.Audio",
"numpy.zeros",
"nose.run"
] | [((2514, 2537), 'nose.run', 'nose.run', ([], {'argv': 'noseargs'}), '(argv=noseargs)\n', (2522, 2537), False, 'import nose\n'), ((318, 329), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (326, 329), True, 'import numpy as np\n'), ((399, 431), 'zignal.Audio', 'Audio', ([], {'fs': '(10)', 'initialdata': 'values'}), '(fs=10, initialdata=values)\n', (404, 431), False, 'from zignal import Audio\n'), ((1190, 1206), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {}), '((4, 2))\n', (1198, 1206), True, 'import numpy as np\n'), ((1276, 1308), 'zignal.Audio', 'Audio', ([], {'fs': '(10)', 'initialdata': 'values'}), '(fs=10, initialdata=values)\n', (1281, 1308), False, 'from zignal import Audio\n')] |
import numpy as np
from scipy.spatial.distance import pdist, squareform
from itertools import permutations
import matplotlib.pyplot as plt
def generate_points(num = 7):
return np.random.rand(num, 2) #technically this is [0,1) rather than (0,1)
def calculate_distance_matrix(point_array):
return squareform(#Not strictly necessary to convert and is less efficient, but easier to program
pdist(point_array, 'euclidean'))
def generate_paths(num = 7):
return permutations(list(range(num)))
def generate_path_lengths(distance_matrix):
n = distance_matrix.shape[0]
for path in generate_paths(n):
distances = [distance_matrix[a,b]
for a, b in zip(path, path[1:])]
yield sum(distances), path
def main():
points = generate_points(num = 7)
dist_matr = calculate_distance_matrix(points)
length, shortest_path = min(generate_path_lengths(dist_matr))
print("Points:\n", points)
print("Shortest path:\n", shortest_path)
print("Path length:\n", length)
ordered_points = points[list(shortest_path)]
plt.plot(ordered_points[:, 0], ordered_points[:, 1], c = 'r')
plt.scatter(points[:, 0], points[:, 1])
plt.show()
if __name__ == "__main__":
main()
| [
"numpy.random.rand",
"scipy.spatial.distance.pdist",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.show"
] | [((179, 201), 'numpy.random.rand', 'np.random.rand', (['num', '(2)'], {}), '(num, 2)\n', (193, 201), True, 'import numpy as np\n'), ((1037, 1096), 'matplotlib.pyplot.plot', 'plt.plot', (['ordered_points[:, 0]', 'ordered_points[:, 1]'], {'c': '"""r"""'}), "(ordered_points[:, 0], ordered_points[:, 1], c='r')\n", (1045, 1096), True, 'import matplotlib.pyplot as plt\n'), ((1101, 1140), 'matplotlib.pyplot.scatter', 'plt.scatter', (['points[:, 0]', 'points[:, 1]'], {}), '(points[:, 0], points[:, 1])\n', (1112, 1140), True, 'import matplotlib.pyplot as plt\n'), ((1143, 1153), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1151, 1153), True, 'import matplotlib.pyplot as plt\n'), ((396, 427), 'scipy.spatial.distance.pdist', 'pdist', (['point_array', '"""euclidean"""'], {}), "(point_array, 'euclidean')\n", (401, 427), False, 'from scipy.spatial.distance import pdist, squareform\n')] |
"""
Process an input dataset into a format suitable for machine learning.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import gzip
import pandas as pd
import numpy as np
import csv
import numbers
import tempfile
from rdkit.Chem import rdmolfiles
from rdkit.Chem import rdmolops
from rdkit import Chem
import time
import sys
import pdb
from deepchem.utils.save import log
from deepchem.utils.save import load_csv_files
#from deepchem.utils.save import load_sdf_files
#from deepchem.utils.save import encode_fasta_sequence
from deepchem.feat import UserDefinedFeaturizer
from dcCustom.data import DiskDataset
from dcCustom.feat import Protein
def convert_df_to_numpy(df, tasks, verbose=False):
"""Transforms a dataframe containing deepchem input into numpy arrays"""
n_samples = df.shape[0]
n_tasks = len(tasks)
time1 = time.time()
y = np.hstack(
[np.reshape(np.array(df[task].values), (n_samples, 1)) for task in tasks])
time2 = time.time()
w = np.ones((n_samples, n_tasks))
missing = np.zeros_like(y).astype(int)
feature_shape = None
for ind in range(n_samples):
for task in range(n_tasks):
if y[ind, task] == "":
missing[ind, task] = 1
# ids = df[id_field].values
# Set missing data to have weight zero
for ind in range(n_samples):
for task in range(n_tasks):
if missing[ind, task]:
y[ind, task] = 0.
w[ind, task] = 0.
return y.astype(float), w.astype(float)
def featurize_protein(df, field, source_field, prot_seq_dict, log_every_N=500, verbose=True):
'''This is supposed to match the format of functions for featurizing molecules.
It is not really featurizing, but only constructs the protein objects from their names.'''
elems = df[field].tolist()
sources = df[source_field].tolist()
proteins = []
for ind, prot in enumerate(elems):
source = sources[ind]
pair = (source, prot)
sequence = prot_seq_dict[pair]
proteins.append([Protein(prot, source = source, sequence = sequence)])
#return np.squeeze(np.array(proteins), axis=1), valid_inds
return np.array(proteins)
def featurize_smiles_df(df, featurizer, field, log_every_N=1000, verbose=True):
"""Featurize individual compounds in dataframe.
Given a featurizer that operates on individual chemical compounds
or macromolecules, compute & add features for that compound to the
features dataframe
"""
sample_elems = df[field].tolist()
features = []
stderr_fileno = sys.stderr.fileno()
stderr_save = os.dup(stderr_fileno)
stderr_fd = open('./logs/error.log', 'a')
os.dup2(stderr_fd.fileno(), stderr_fileno)
for ind, elem in enumerate(sample_elems):
mol = Chem.MolFromSmiles(elem)
# TODO (ytz) this is a bandage solution to reorder the atoms so
# that they're always in the same canonical order. Presumably this
# should be correctly implemented in the future for graph mols.
if mol:
new_order = rdmolfiles.CanonicalRankAtoms(mol)
mol = rdmolops.RenumberAtoms(mol, new_order)
if ind % log_every_N == 0:
log("Featurizing sample %d" % ind, verbose)
features.append(featurizer.featurize([mol], smiles=elem))
stderr_fd.close()
os.dup2(stderr_save, stderr_fileno)
valid_inds = np.array(
[1 if elt.size > 0 else 0 for elt in features], dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
#return np.squeeze(np.array(features), axis=1), valid_inds
return np.array(features), valid_inds
def featurize_smiles_np(arr, featurizer, log_every_N=1000, verbose=True):
"""Featurize individual compounds in a numpy array.
Given a featurizer that operates on individual chemical compounds
or macromolecules, compute & add features for that compound to the
features array
"""
features = []
for ind, elem in enumerate(arr.tolist()):
mol = Chem.MolFromSmiles(elem)
if mol:
new_order = rdmolfiles.CanonicalRankAtoms(mol)
mol = rdmolops.RenumberAtoms(mol, new_order)
if ind % log_every_N == 0:
log("Featurizing sample %d" % ind, verbose)
features.append(featurizer.featurize([mol]))
valid_inds = np.array(
[1 if elt.size > 0 else 0 for elt in features], dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
features = np.squeeze(np.array(features))
return features.reshape(-1,)
def get_user_specified_features(df, featurizer, verbose=True):
"""Extract and merge user specified features.
Merge features included in dataset provided by user
into final features dataframe
Three types of featurization here:
1) Molecule featurization
-) Smiles string featurization
-) Rdkit MOL featurization
2) Complex featurization
-) PDB files for interacting molecules.
3) User specified featurizations.
"""
time1 = time.time()
df[featurizer.feature_fields] = df[featurizer.feature_fields].apply(
pd.to_numeric)
X_shard = df.as_matrix(columns=featurizer.feature_fields)
time2 = time.time()
log("TIMING: user specified processing took %0.3f s" % (time2 - time1),
verbose)
return X_shard
def featurize_mol_df(df, featurizer, field, verbose=True, log_every_N=1000):
"""Featurize individual compounds in dataframe.
Featurizes .sdf files, so the 3-D structure should be preserved
so we use the rdkit "mol" object created from .sdf instead of smiles
string. Some featurizers such as CoulombMatrix also require a 3-D
structure. Featurizing from .sdf is currently the only way to
perform CM feautization.
"""
sample_elems = df[field].tolist()
features = []
for ind, mol in enumerate(sample_elems):
if ind % log_every_N == 0:
log("Featurizing sample %d" % ind, verbose)
features.append(featurizer.featurize([mol]))
valid_inds = np.array(
[1 if elt.size > 0 else 0 for elt in features], dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
return np.squeeze(np.array(features)), valid_inds
class DataLoader(object):
"""
Handles loading/featurizing of chemical samples (datapoints).
Currently knows how to load csv-files/pandas-dataframes/SDF-files. Writes a
dataframe object to disk as output.
"""
def __init__(self,
tasks,
smiles_field=None,
id_field=None,
mol_field=None,
featurizer=None,
protein_field=None,
source_field=None,
verbose=True,
prot_seq_dict=None,
log_every_n=1000):
"""Extracts data from input as Pandas data frame"""
if not isinstance(tasks, list):
raise ValueError("tasks must be a list.")
self.verbose = verbose
self.tasks = tasks
self.smiles_field = smiles_field
if id_field is None:
self.id_field = smiles_field
else:
self.id_field = id_field
self.mol_field = mol_field
self.protein_field = protein_field
self.source_field = source_field
self.prot_seq_dict = prot_seq_dict
self.user_specified_features = None
if isinstance(featurizer, UserDefinedFeaturizer):
self.user_specified_features = featurizer.feature_fields
self.featurizer = featurizer
self.log_every_n = log_every_n
def featurize(self, input_files, data_dir=None, shard_size=8192):
"""Featurize provided files and write to specified location.
For large datasets, automatically shards into smaller chunks
for convenience.
Parameters
----------
input_files: list
List of input filenames.
data_dir: str
(Optional) Directory to store featurized dataset.
shard_size: int
(Optional) Number of examples stored in each shard.
"""
log("Loading raw samples now.", self.verbose)
log("shard_size: %d" % shard_size, self.verbose)
if not isinstance(input_files, list):
input_files = [input_files]
def shard_generator():
for shard_num, shard in enumerate(
self.get_shards(input_files, shard_size)):
time1 = time.time()
X, valid_inds = self.featurize_shard(shard)
ids = shard[self.id_field].values
ids = ids[valid_inds]
if len(self.tasks) > 0:
# Featurize task results iff they exist.
y, w = convert_df_to_numpy(shard, self.tasks, self.id_field)
# Filter out examples where featurization failed.
y, w = (y[valid_inds], w[valid_inds])
assert len(X) == len(ids) == len(y) == len(w)
else:
# For prospective data where results are unknown, it makes
# no sense to have y values or weights.
y, w = (None, None)
assert len(X) == len(ids)
time2 = time.time()
log("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1), self.verbose)
yield X, y, w, ids
return DiskDataset.create_dataset(
shard_generator(), data_dir, self.tasks, verbose=self.verbose)
def get_shards(self, input_files, shard_size):
"""Stub for children classes."""
raise NotImplementedError
def featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
raise NotImplementedError
class CSVLoader(DataLoader):
"""
Handles loading of CSV files.
"""
def get_shards(self, input_files, shard_size, verbose=True):
"""Defines a generator which returns data for each shard"""
return load_csv_files(input_files, shard_size, verbose=verbose)
def featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
mol_features, valid_inds = featurize_smiles_df(shard, self.featurizer, field=self.smiles_field)
if len(mol_features.shape) > 2:
mol_features = np.squeeze(mol_features)
proteins = featurize_protein(shard, field=self.protein_field, source_field=self.source_field,
prot_seq_dict=self.prot_seq_dict)
# Note: for ECFP with 1024 entries, mol_features is a (8192, 1024) sized array.
return np.concatenate((mol_features, proteins), axis=1), valid_inds
| [
"os.dup2",
"numpy.ones",
"rdkit.Chem.rdmolops.RenumberAtoms",
"rdkit.Chem.MolFromSmiles",
"numpy.zeros_like",
"rdkit.Chem.rdmolfiles.CanonicalRankAtoms",
"os.dup",
"numpy.squeeze",
"numpy.array",
"numpy.concatenate",
"dcCustom.feat.Protein",
"deepchem.utils.save.log",
"sys.stderr.fileno",
... | [((908, 919), 'time.time', 'time.time', ([], {}), '()\n', (917, 919), False, 'import time\n'), ((1028, 1039), 'time.time', 'time.time', ([], {}), '()\n', (1037, 1039), False, 'import time\n'), ((1047, 1076), 'numpy.ones', 'np.ones', (['(n_samples, n_tasks)'], {}), '((n_samples, n_tasks))\n', (1054, 1076), True, 'import numpy as np\n'), ((2150, 2168), 'numpy.array', 'np.array', (['proteins'], {}), '(proteins)\n', (2158, 2168), True, 'import numpy as np\n'), ((2539, 2558), 'sys.stderr.fileno', 'sys.stderr.fileno', ([], {}), '()\n', (2556, 2558), False, 'import sys\n'), ((2575, 2596), 'os.dup', 'os.dup', (['stderr_fileno'], {}), '(stderr_fileno)\n', (2581, 2596), False, 'import os\n'), ((3259, 3294), 'os.dup2', 'os.dup2', (['stderr_save', 'stderr_fileno'], {}), '(stderr_save, stderr_fileno)\n', (3266, 3294), False, 'import os\n'), ((3313, 3383), 'numpy.array', 'np.array', (['[(1 if elt.size > 0 else 0) for elt in features]'], {'dtype': 'bool'}), '([(1 if elt.size > 0 else 0) for elt in features], dtype=bool)\n', (3321, 3383), True, 'import numpy as np\n'), ((4221, 4291), 'numpy.array', 'np.array', (['[(1 if elt.size > 0 else 0) for elt in features]'], {'dtype': 'bool'}), '([(1 if elt.size > 0 else 0) for elt in features], dtype=bool)\n', (4229, 4291), True, 'import numpy as np\n'), ((4922, 4933), 'time.time', 'time.time', ([], {}), '()\n', (4931, 4933), False, 'import time\n'), ((5096, 5107), 'time.time', 'time.time', ([], {}), '()\n', (5105, 5107), False, 'import time\n'), ((5110, 5195), 'deepchem.utils.save.log', 'log', (["('TIMING: user specified processing took %0.3f s' % (time2 - time1))", 'verbose'], {}), "('TIMING: user specified processing took %0.3f s' % (time2 - time1), verbose\n )\n", (5113, 5195), False, 'from deepchem.utils.save import log\n'), ((5888, 5958), 'numpy.array', 'np.array', (['[(1 if elt.size > 0 else 0) for elt in features]'], {'dtype': 'bool'}), '([(1 if elt.size > 0 else 0) for elt in features], dtype=bool)\n', (5896, 5958), True, 'import numpy as np\n'), ((2741, 2765), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['elem'], {}), '(elem)\n', (2759, 2765), False, 'from rdkit import Chem\n'), ((3542, 3560), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (3550, 3560), True, 'import numpy as np\n'), ((3934, 3958), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['elem'], {}), '(elem)\n', (3952, 3958), False, 'from rdkit import Chem\n'), ((4401, 4419), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (4409, 4419), True, 'import numpy as np\n'), ((7842, 7887), 'deepchem.utils.save.log', 'log', (['"""Loading raw samples now."""', 'self.verbose'], {}), "('Loading raw samples now.', self.verbose)\n", (7845, 7887), False, 'from deepchem.utils.save import log\n'), ((7892, 7940), 'deepchem.utils.save.log', 'log', (["('shard_size: %d' % shard_size)", 'self.verbose'], {}), "('shard_size: %d' % shard_size, self.verbose)\n", (7895, 7940), False, 'from deepchem.utils.save import log\n'), ((9538, 9594), 'deepchem.utils.save.load_csv_files', 'load_csv_files', (['input_files', 'shard_size'], {'verbose': 'verbose'}), '(input_files, shard_size, verbose=verbose)\n', (9552, 9594), False, 'from deepchem.utils.save import load_csv_files\n'), ((1089, 1105), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (1102, 1105), True, 'import numpy as np\n'), ((3007, 3041), 'rdkit.Chem.rdmolfiles.CanonicalRankAtoms', 'rdmolfiles.CanonicalRankAtoms', (['mol'], {}), '(mol)\n', (3036, 3041), False, 'from rdkit.Chem import rdmolfiles\n'), ((3054, 3092), 'rdkit.Chem.rdmolops.RenumberAtoms', 'rdmolops.RenumberAtoms', (['mol', 'new_order'], {}), '(mol, new_order)\n', (3076, 3092), False, 'from rdkit.Chem import rdmolops\n'), ((3130, 3173), 'deepchem.utils.save.log', 'log', (["('Featurizing sample %d' % ind)", 'verbose'], {}), "('Featurizing sample %d' % ind, verbose)\n", (3133, 3173), False, 'from deepchem.utils.save import log\n'), ((3989, 4023), 'rdkit.Chem.rdmolfiles.CanonicalRankAtoms', 'rdmolfiles.CanonicalRankAtoms', (['mol'], {}), '(mol)\n', (4018, 4023), False, 'from rdkit.Chem import rdmolfiles\n'), ((4036, 4074), 'rdkit.Chem.rdmolops.RenumberAtoms', 'rdmolops.RenumberAtoms', (['mol', 'new_order'], {}), '(mol, new_order)\n', (4058, 4074), False, 'from rdkit.Chem import rdmolops\n'), ((4112, 4155), 'deepchem.utils.save.log', 'log', (["('Featurizing sample %d' % ind)", 'verbose'], {}), "('Featurizing sample %d' % ind, verbose)\n", (4115, 4155), False, 'from deepchem.utils.save import log\n'), ((5780, 5823), 'deepchem.utils.save.log', 'log', (["('Featurizing sample %d' % ind)", 'verbose'], {}), "('Featurizing sample %d' % ind, verbose)\n", (5783, 5823), False, 'from deepchem.utils.save import log\n'), ((6064, 6082), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (6072, 6082), True, 'import numpy as np\n'), ((9841, 9865), 'numpy.squeeze', 'np.squeeze', (['mol_features'], {}), '(mol_features)\n', (9851, 9865), True, 'import numpy as np\n'), ((10103, 10151), 'numpy.concatenate', 'np.concatenate', (['(mol_features, proteins)'], {'axis': '(1)'}), '((mol_features, proteins), axis=1)\n', (10117, 10151), True, 'import numpy as np\n'), ((955, 980), 'numpy.array', 'np.array', (['df[task].values'], {}), '(df[task].values)\n', (963, 980), True, 'import numpy as np\n'), ((2024, 2071), 'dcCustom.feat.Protein', 'Protein', (['prot'], {'source': 'source', 'sequence': 'sequence'}), '(prot, source=source, sequence=sequence)\n', (2031, 2071), False, 'from dcCustom.feat import Protein\n'), ((8156, 8167), 'time.time', 'time.time', ([], {}), '()\n', (8165, 8167), False, 'import time\n'), ((8826, 8837), 'time.time', 'time.time', ([], {}), '()\n', (8835, 8837), False, 'import time\n'), ((8846, 8942), 'deepchem.utils.save.log', 'log', (["('TIMING: featurizing shard %d took %0.3f s' % (shard_num, time2 - time1))", 'self.verbose'], {}), "('TIMING: featurizing shard %d took %0.3f s' % (shard_num, time2 - time1\n ), self.verbose)\n", (8849, 8942), False, 'from deepchem.utils.save import log\n')] |
"""Columbia Uncompressed Image Splicing Detection Evaluation Dataset
- https://www.ee.columbia.edu/ln/dvmm/downloads/authsplcuncmp/
- Detecting Image Splicing Using Geometry Invariants And Camera Characteristics Consistency, <NAME>, <NAME>
"""
import tarfile
from pathlib import Path
from typing import Any, Dict
import cv2
import numpy as np
import toml
import torch
from src.datasets.utils import download_raw_dataset
from torch.utils.data import Dataset
METADATA_FILENAME = Path("data/raw/columbia/metadata.toml")
DL_DATA_DIRNAME = Path("data/downloaded/columbia")
PROCESSED_DATA_DIRNAMES = [DL_DATA_DIRNAME / "4cam_auth", DL_DATA_DIRNAME / "4cam_splc"]
class ColumbiaDataset(Dataset):
def __init__(self, root_dir=DL_DATA_DIRNAME, spliced_only=False) -> None:
self._prepare_data()
self.to_label = {"4cam_auth": 0, "4cam_splc": 1}
root_dir = Path(root_dir)
# Get list of all image paths
self.img_paths = []
# Grab authentic images
if not spliced_only:
auth_dir = root_dir / "4cam_auth"
auth_paths = list(auth_dir.glob("*.tif"))
assert (
len(auth_paths) == 183
), "Incorrect expected number of authentic images in dataset!"
self.img_paths.extend(auth_paths)
# Grab spliced images
splc_dir = root_dir / "4cam_splc"
splc_paths = list(splc_dir.glob("*.tif"))
assert (
len(splc_paths) == 180
), "Incorrect expected number of spliced images in dataset!"
self.img_paths.extend(splc_paths)
def _prepare_data(self) -> None:
if not all(p.exists() for p in PROCESSED_DATA_DIRNAMES):
metadata = toml.load(METADATA_FILENAME)
# Download dataset
download_raw_dataset(metadata, DL_DATA_DIRNAME)
# Process downloaded dataset
print("Unzipping Columbia...")
for filename in metadata["filename"]:
tar = tarfile.open(DL_DATA_DIRNAME / filename, "r:bz2")
tar.extractall(DL_DATA_DIRNAME)
tar.close()
def __getitem__(self, idx) -> Dict[str, Any]:
"""
Returns
-------
Dict[str, Any]
img : torch.ByteTensor
[C, H, W], range [0, 255]
label : int
One of {0, 1}
map : np.ndarray (uint8)
[H, W], values one of {0, 1}
"""
img_path = self.img_paths[idx]
# Get image
img = cv2.imread(str(img_path))[:, :, [2, 1, 0]] # [H, W, C]
assert img.dtype == np.uint8, "Image should be of type int!"
assert (
img.min() >= 0 and img.max() <= 255
), "Image should be bounded between [0, 255]!"
img = torch.from_numpy(img).permute(2, 0, 1) # [C, H, W]
# Get label
label = self.to_label[img_path.parent.name]
# Get localization map
BRIGHT_GREEN = np.array([0, 255, 0])
REGULAR_GREEN = np.array([0, 200, 0])
_, height, width = img.shape
if label:
img_name = img_path.stem
map_path = img_path.parent / "edgemask" / f"{img_name}_edgemask.jpg"
map = cv2.imread(str(map_path))[:, :, [2, 1, 0]] # [H, W, C]
# FIXME Should I include bright red too?
# Find spliced region, i.e. green regions
binary_map = np.zeros((height, width), dtype=np.uint8)
bright_green_mask = (map == BRIGHT_GREEN).all(axis=-1)
regular_green_mask = (map == REGULAR_GREEN).all(axis=-1)
binary_map[bright_green_mask | regular_green_mask] = 1
# If authentic image
else:
binary_map = np.zeros((height, width), dtype=np.uint8)
return {"img": img, "label": label, "map": binary_map}
def __len__(self):
return len(self.img_paths)
| [
"tarfile.open",
"pathlib.Path",
"torch.from_numpy",
"numpy.array",
"numpy.zeros",
"toml.load",
"src.datasets.utils.download_raw_dataset"
] | [((480, 519), 'pathlib.Path', 'Path', (['"""data/raw/columbia/metadata.toml"""'], {}), "('data/raw/columbia/metadata.toml')\n", (484, 519), False, 'from pathlib import Path\n'), ((538, 570), 'pathlib.Path', 'Path', (['"""data/downloaded/columbia"""'], {}), "('data/downloaded/columbia')\n", (542, 570), False, 'from pathlib import Path\n'), ((878, 892), 'pathlib.Path', 'Path', (['root_dir'], {}), '(root_dir)\n', (882, 892), False, 'from pathlib import Path\n'), ((2977, 2998), 'numpy.array', 'np.array', (['[0, 255, 0]'], {}), '([0, 255, 0])\n', (2985, 2998), True, 'import numpy as np\n'), ((3023, 3044), 'numpy.array', 'np.array', (['[0, 200, 0]'], {}), '([0, 200, 0])\n', (3031, 3044), True, 'import numpy as np\n'), ((1717, 1745), 'toml.load', 'toml.load', (['METADATA_FILENAME'], {}), '(METADATA_FILENAME)\n', (1726, 1745), False, 'import toml\n'), ((1789, 1836), 'src.datasets.utils.download_raw_dataset', 'download_raw_dataset', (['metadata', 'DL_DATA_DIRNAME'], {}), '(metadata, DL_DATA_DIRNAME)\n', (1809, 1836), False, 'from src.datasets.utils import download_raw_dataset\n'), ((3427, 3468), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'np.uint8'}), '((height, width), dtype=np.uint8)\n', (3435, 3468), True, 'import numpy as np\n'), ((3741, 3782), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'np.uint8'}), '((height, width), dtype=np.uint8)\n', (3749, 3782), True, 'import numpy as np\n'), ((1994, 2043), 'tarfile.open', 'tarfile.open', (['(DL_DATA_DIRNAME / filename)', '"""r:bz2"""'], {}), "(DL_DATA_DIRNAME / filename, 'r:bz2')\n", (2006, 2043), False, 'import tarfile\n'), ((2797, 2818), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (2813, 2818), False, 'import torch\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import itertools
import numpy as np
import mxnet as mx
from mxnet.test_utils import *
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import with_seed
# * GroupAdaGrad
class PyGroupAdaGrad(mx.optimizer.Optimizer):
"""The python reference of Group AdaGrad optimizer.
Parameters
----------
eps: float, optional
Small value to avoid division by 0.
"""
def __init__(self, eps=1e-5, **kwargs):
super(PyGroupAdaGrad, self).__init__(**kwargs)
self.float_stable_eps = eps
def create_state(self, index, weight):
assert len(weight.shape) == 2
history = mx.nd.zeros(
(weight.shape[0], 1), weight.context, stype=weight.stype)
return history
def update(self, index, weight, grad, state):
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
assert wd == 0
history = state
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)
history[:] += mx.nd.mean(mx.nd.square(grad), axis=1, keepdims=True)
div = lr * grad / mx.nd.sqrt(history + self.float_stable_eps)
weight[:] -= div
def test_group_adagrad():
mx.random.seed(0)
opt1 = PyGroupAdaGrad
opt2 = mx.optimizer.contrib.GroupAdaGrad
shape = (3, 4)
eps_options = [{}, {'eps': 1e-8}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
for dtype in [np.float32]:
for options in itertools.product(eps_options, cg_options, rg_options):
kwarg = dict(wd=0.0)
for option in options:
kwarg.update(option)
compare_optimizer(
opt1(**kwarg),
opt2(**kwarg),
shape,
dtype,
compare_states=False)
compare_optimizer(
opt1(**kwarg),
opt2(**kwarg),
shape,
dtype,
w_stype='row_sparse',
g_stype='row_sparse',
compare_states=False)
compare_optimizer(
opt1(**kwarg),
opt2(**kwarg),
shape,
dtype,
g_stype='row_sparse',
compare_states=False)
@with_seed()
def test_adamw():
def get_refs(m, v, weight, grad_rescale, beta1, beta2, lr, eta, wd, epsilon, clip_grad=-1):
if clip_grad >= 0:
grad_rescale = mx.nd.clip(grad_rescale, -clip_grad, clip_grad)
mean_ref = beta1*m + (1-beta1)*grad_rescale
v_ref = beta2*v + (1-beta2)*(grad_rescale**2)
weight_ref = weight - eta * (lr * mean_ref / (v_ref.sqrt() + epsilon) + weight * wd)
return mean_ref, v_ref, weight_ref
def run_adamw_test(nElem=1, aggregate=False):
aggregate = aggregate or nElem > 1
rescale_factor = 10
eta, lr, wd, epsilon = 1, 1, 0.1, 1e-8
beta1, beta2 = 0.9, 0.999
clip_gradient = np.random.uniform(rescale_factor, rescale_factor)
weight, grad, m, v, etas, lrs, wds, weight_ref = [], [], [], [], [], [], [], []
for i in range(nElem):
shape = (np.random.randint(3, high=10), np.random.randint(3, high=10))
weight.append(mx.nd.random.uniform(shape=shape))
grad.append(mx.nd.random.uniform(-1.0, 1.0, shape=shape))
m.append(mx.nd.random.uniform(shape=shape))
v.append(mx.nd.random.uniform(shape=shape))
etas.append(eta - 1 / np.random.uniform(9, 10))
lrs.append(lr - 1 / np.random.uniform(9, 10))
wds.append(wd - 1 / np.random.uniform(95, 105))
weight_ref.append(weight[i].copy())
if aggregate:
kwargs = {'etas': etas, 'lrs': lrs, 'wds': wds}
else:
kwargs = {'eta': etas[0], 'lr': lrs[0], 'wd': wds[0]}
kwargs.update([('epsilon', epsilon), ('beta1', beta1), ('beta2', beta2), ('clip_gradient', clip_gradient)])
# Test 1: Update is skipped for rescale = nan scalar
rescale_grad = mx.nd.array([rescale_factor])
tested_grad = [rescale_grad * 0, rescale_grad * np.nan, rescale_grad * np.inf]
tested_rescaled_grad = [np.nan]
tested_rescaled_grad.extend(tested_grad)
for rescaled_grad in tested_rescaled_grad:
if aggregate:
mx.nd.contrib.multi_adamw_update(weight, grad, m, v,
rescaled_grad, out=weight, **kwargs)
else:
mx.nd.contrib.adamw_update(weight[0], grad[0], m[0], v[0],
rescaled_grad, out=weight[0], **kwargs)
# weights should remain unchanged
for j in range(nElem):
assert_almost_equal(weight_ref[j], weight[j])
# Test 2: Same as Test 1 for multi-precision update
weight_fp16, grad_fp16, weight_fp16_refs = [], [], []
for i in range(nElem):
weight_fp16.append(weight[i].astype('float16'))
grad_fp16.append(grad[i].astype('float16'))
weight_fp16_refs.append(weight_fp16[i].copy())
for rescaled_grad in tested_grad:
if aggregate:
mx.nd.contrib.multi_mp_adamw_update(weight_fp16, grad_fp16, m, v, weight,
rescaled_grad, out=weight_fp16, **kwargs)
else:
mx.nd.contrib.mp_adamw_update(weight_fp16[0], grad_fp16[0], m[0], v[0], weight[0],
rescaled_grad, out=weight_fp16[0], **kwargs)
# weights should remain unchanged
for i in range(nElem):
assert_almost_equal(weight_ref[i], weight[i])
assert_almost_equal(weight_fp16_refs[i], weight_fp16[i])
# Test 3: Reference normal update
grad_rescale, weight_test, m_refs, v_refs, weight_refs = [], [], [], [], []
for i in range(nElem):
grad_rescale.append(rescale_grad * grad[i])
m_ref, v_ref, weight_ref = get_refs(m[i], v[i], weight[i], grad_rescale[i], beta1, beta2, lrs[i], etas[i], wds[i], epsilon, clip_gradient)
m_refs.append(m_ref)
v_refs.append(v_ref)
weight_refs.append(weight_ref)
weight_test.append(weight[i].copy())
# op normal update
if aggregate:
mx.nd.contrib.multi_adamw_update(weight_test, grad, m, v,
rescale_grad, out=weight_test, **kwargs)
else:
mx.nd.contrib.adamw_update(weight_test[0], grad[0], m[0], v[0],
rescale_grad, out=weight_test[0], **kwargs)
# Compare results
atol = 1e-4 if aggregate else 1e-5
rtol = 1e-4 if aggregate else None
for i in range(nElem):
assert_almost_equal(weight_refs[i], weight_test[i], rtol=rtol, atol=atol)
assert_almost_equal(m_refs[i], m[i], rtol=rtol, atol=atol)
assert_almost_equal(v_refs[i], v[i], atol=atol)
# Test 4: Reference normal multi-precision update
grad_rescale, m_refs, v_refs, weight_refs, weight_fp16_refs = [], [], [], [], []
for i in range(nElem):
grad_rescale.append(rescale_grad * grad_fp16[i].astype('float32'))
m_ref, v_ref, weight_ref = get_refs(m[i], v[i], weight[i], grad_rescale[i], beta1, beta2, lrs[i], etas[i], wds[i], epsilon, clip_gradient)
m_refs.append(m_ref)
v_refs.append(v_ref)
weight_refs.append(weight_ref)
weight_fp16_refs.append(weight_ref.astype('float16'))
# op normal multi-precision update
if aggregate:
mx.nd.contrib.multi_mp_adamw_update(weight_fp16, grad_fp16, m, v, weight,
rescale_grad, out=weight_fp16, **kwargs)
else:
mx.nd.contrib.mp_adamw_update(weight_fp16[0], grad_fp16[0], m[0], v[0], weight[0],
rescale_grad, out=weight_fp16[0], **kwargs)
# Compare results
for i in range(nElem):
assert_almost_equal(m_refs[i], m[i], rtol=rtol, atol=atol)
assert_almost_equal(v_refs[i], v[i], atol=atol)
assert_almost_equal(weight_refs[i], weight[i], rtol=rtol, atol=atol)
assert_almost_equal(weight_fp16_refs[i], weight_fp16[i], rtol=1e-3, atol=atol)
# Testing aggregated Adam update for one element
run_adamw_test(1, aggregate=True)
# Testing Adam update, if nElem = 0, OR
# aggregated Adam update, if nElem > 0
for nElem in range(6):
run_adamw_test(nElem+1)
if __name__ == '__main__':
import nose
nose.runmodule()
| [
"mxnet.nd.random.uniform",
"common.with_seed",
"mxnet.nd.contrib.multi_adamw_update",
"mxnet.nd.zeros",
"mxnet.nd.square",
"mxnet.nd.clip",
"itertools.product",
"mxnet.nd.sqrt",
"mxnet.nd.contrib.mp_adamw_update",
"nose.runmodule",
"mxnet.nd.contrib.adamw_update",
"mxnet.nd.contrib.multi_mp_ad... | [((3318, 3329), 'common.with_seed', 'with_seed', ([], {}), '()\n', (3327, 3329), False, 'from common import with_seed\n'), ((2170, 2187), 'mxnet.random.seed', 'mx.random.seed', (['(0)'], {}), '(0)\n', (2184, 2187), True, 'import mxnet as mx\n'), ((9810, 9826), 'nose.runmodule', 'nose.runmodule', ([], {}), '()\n', (9824, 9826), False, 'import nose\n'), ((1502, 1571), 'mxnet.nd.zeros', 'mx.nd.zeros', (['(weight.shape[0], 1)', 'weight.context'], {'stype': 'weight.stype'}), '((weight.shape[0], 1), weight.context, stype=weight.stype)\n', (1513, 1571), True, 'import mxnet as mx\n'), ((2509, 2563), 'itertools.product', 'itertools.product', (['eps_options', 'cg_options', 'rg_options'], {}), '(eps_options, cg_options, rg_options)\n', (2526, 2563), False, 'import itertools\n'), ((4016, 4065), 'numpy.random.uniform', 'np.random.uniform', (['rescale_factor', 'rescale_factor'], {}), '(rescale_factor, rescale_factor)\n', (4033, 4065), True, 'import numpy as np\n'), ((5102, 5131), 'mxnet.nd.array', 'mx.nd.array', (['[rescale_factor]'], {}), '([rescale_factor])\n', (5113, 5131), True, 'import mxnet as mx\n'), ((1909, 1966), 'mxnet.nd.clip', 'mx.nd.clip', (['grad', '(-self.clip_gradient)', 'self.clip_gradient'], {}), '(grad, -self.clip_gradient, self.clip_gradient)\n', (1919, 1966), True, 'import mxnet as mx\n'), ((2000, 2018), 'mxnet.nd.square', 'mx.nd.square', (['grad'], {}), '(grad)\n', (2012, 2018), True, 'import mxnet as mx\n'), ((2069, 2112), 'mxnet.nd.sqrt', 'mx.nd.sqrt', (['(history + self.float_stable_eps)'], {}), '(history + self.float_stable_eps)\n', (2079, 2112), True, 'import mxnet as mx\n'), ((3498, 3545), 'mxnet.nd.clip', 'mx.nd.clip', (['grad_rescale', '(-clip_grad)', 'clip_grad'], {}), '(grad_rescale, -clip_grad, clip_grad)\n', (3508, 3545), True, 'import mxnet as mx\n'), ((7455, 7558), 'mxnet.nd.contrib.multi_adamw_update', 'mx.nd.contrib.multi_adamw_update', (['weight_test', 'grad', 'm', 'v', 'rescale_grad'], {'out': 'weight_test'}), '(weight_test, grad, m, v, rescale_grad, out\n =weight_test, **kwargs)\n', (7487, 7558), True, 'import mxnet as mx\n'), ((7625, 7736), 'mxnet.nd.contrib.adamw_update', 'mx.nd.contrib.adamw_update', (['weight_test[0]', 'grad[0]', 'm[0]', 'v[0]', 'rescale_grad'], {'out': 'weight_test[0]'}), '(weight_test[0], grad[0], m[0], v[0],\n rescale_grad, out=weight_test[0], **kwargs)\n', (7651, 7736), True, 'import mxnet as mx\n'), ((8796, 8914), 'mxnet.nd.contrib.multi_mp_adamw_update', 'mx.nd.contrib.multi_mp_adamw_update', (['weight_fp16', 'grad_fp16', 'm', 'v', 'weight', 'rescale_grad'], {'out': 'weight_fp16'}), '(weight_fp16, grad_fp16, m, v, weight,\n rescale_grad, out=weight_fp16, **kwargs)\n', (8831, 8914), True, 'import mxnet as mx\n'), ((8985, 9115), 'mxnet.nd.contrib.mp_adamw_update', 'mx.nd.contrib.mp_adamw_update', (['weight_fp16[0]', 'grad_fp16[0]', 'm[0]', 'v[0]', 'weight[0]', 'rescale_grad'], {'out': 'weight_fp16[0]'}), '(weight_fp16[0], grad_fp16[0], m[0], v[0],\n weight[0], rescale_grad, out=weight_fp16[0], **kwargs)\n', (9014, 9115), True, 'import mxnet as mx\n'), ((4206, 4235), 'numpy.random.randint', 'np.random.randint', (['(3)'], {'high': '(10)'}), '(3, high=10)\n', (4223, 4235), True, 'import numpy as np\n'), ((4237, 4266), 'numpy.random.randint', 'np.random.randint', (['(3)'], {'high': '(10)'}), '(3, high=10)\n', (4254, 4266), True, 'import numpy as np\n'), ((4294, 4327), 'mxnet.nd.random.uniform', 'mx.nd.random.uniform', ([], {'shape': 'shape'}), '(shape=shape)\n', (4314, 4327), True, 'import mxnet as mx\n'), ((4353, 4397), 'mxnet.nd.random.uniform', 'mx.nd.random.uniform', (['(-1.0)', '(1.0)'], {'shape': 'shape'}), '(-1.0, 1.0, shape=shape)\n', (4373, 4397), True, 'import mxnet as mx\n'), ((4420, 4453), 'mxnet.nd.random.uniform', 'mx.nd.random.uniform', ([], {'shape': 'shape'}), '(shape=shape)\n', (4440, 4453), True, 'import mxnet as mx\n'), ((4476, 4509), 'mxnet.nd.random.uniform', 'mx.nd.random.uniform', ([], {'shape': 'shape'}), '(shape=shape)\n', (4496, 4509), True, 'import mxnet as mx\n'), ((5402, 5496), 'mxnet.nd.contrib.multi_adamw_update', 'mx.nd.contrib.multi_adamw_update', (['weight', 'grad', 'm', 'v', 'rescaled_grad'], {'out': 'weight'}), '(weight, grad, m, v, rescaled_grad, out=\n weight, **kwargs)\n', (5434, 5496), True, 'import mxnet as mx\n'), ((5575, 5677), 'mxnet.nd.contrib.adamw_update', 'mx.nd.contrib.adamw_update', (['weight[0]', 'grad[0]', 'm[0]', 'v[0]', 'rescaled_grad'], {'out': 'weight[0]'}), '(weight[0], grad[0], m[0], v[0], rescaled_grad,\n out=weight[0], **kwargs)\n', (5601, 5677), True, 'import mxnet as mx\n'), ((6276, 6395), 'mxnet.nd.contrib.multi_mp_adamw_update', 'mx.nd.contrib.multi_mp_adamw_update', (['weight_fp16', 'grad_fp16', 'm', 'v', 'weight', 'rescaled_grad'], {'out': 'weight_fp16'}), '(weight_fp16, grad_fp16, m, v, weight,\n rescaled_grad, out=weight_fp16, **kwargs)\n', (6311, 6395), True, 'import mxnet as mx\n'), ((6478, 6609), 'mxnet.nd.contrib.mp_adamw_update', 'mx.nd.contrib.mp_adamw_update', (['weight_fp16[0]', 'grad_fp16[0]', 'm[0]', 'v[0]', 'weight[0]', 'rescaled_grad'], {'out': 'weight_fp16[0]'}), '(weight_fp16[0], grad_fp16[0], m[0], v[0],\n weight[0], rescaled_grad, out=weight_fp16[0], **kwargs)\n', (6507, 6609), True, 'import mxnet as mx\n'), ((4545, 4569), 'numpy.random.uniform', 'np.random.uniform', (['(9)', '(10)'], {}), '(9, 10)\n', (4562, 4569), True, 'import numpy as np\n'), ((4603, 4627), 'numpy.random.uniform', 'np.random.uniform', (['(9)', '(10)'], {}), '(9, 10)\n', (4620, 4627), True, 'import numpy as np\n'), ((4661, 4687), 'numpy.random.uniform', 'np.random.uniform', (['(95)', '(105)'], {}), '(95, 105)\n', (4678, 4687), True, 'import numpy as np\n')] |
import os
import sys
import math
import fire
import json
from tqdm import tqdm
from math import floor, log2
from random import random
from shutil import rmtree
from functools import partial
import multiprocessing
from contextlib import contextmanager, ExitStack
import numpy as np
import torch
from torch import nn, einsum
from torch.utils import data
from torch.optim import Adam
import torch.nn.functional as F
from torch.autograd import grad as torch_grad
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from einops import rearrange, repeat
from kornia.filters import filter2D
import torchvision
from torchvision import transforms
from stylegan2_pytorch.version import __version__
from stylegan2_pytorch.diff_augment import DiffAugment
from vector_quantize_pytorch import VectorQuantize
from PIL import Image
from pathlib import Path
try:
from apex import amp
APEX_AVAILABLE = True
except:
APEX_AVAILABLE = False
import aim
assert torch.cuda.is_available(), 'You need to have an Nvidia GPU with CUDA installed.'
# constants
NUM_CORES = multiprocessing.cpu_count()
EXTS = ['jpg', 'jpeg', 'png']
# helper classes
class NanException(Exception):
pass
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if not exists(old):
return new
return old * self.beta + (1 - self.beta) * new
class Flatten(nn.Module):
def forward(self, x):
return x.reshape(x.shape[0], -1)
class RandomApply(nn.Module):
def __init__(self, prob, fn, fn_else = lambda x: x):
super().__init__()
self.fn = fn
self.fn_else = fn_else
self.prob = prob
def forward(self, x):
fn = self.fn if random() < self.prob else self.fn_else
return fn(x)
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = ChanNorm(dim)
def forward(self, x):
return self.fn(self.norm(x))
class ChanNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))
def forward(self, x):
std = torch.var(x, dim = 1, unbiased = False, keepdim = True).sqrt()
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) / (std + self.eps) * self.g + self.b
class PermuteToFrom(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
x = x.permute(0, 2, 3, 1)
out, loss = self.fn(x)
out = out.permute(0, 3, 1, 2)
return out, loss
class Blur(nn.Module):
def __init__(self):
super().__init__()
f = torch.Tensor([1, 2, 1])
self.register_buffer('f', f)
def forward(self, x):
f = self.f
f = f[None, None, :] * f [None, :, None]
return filter2D(x, f, normalized=True)
# attention
class DepthWiseConv2d(nn.Module):
def __init__(self, dim_in, dim_out, kernel_size, padding = 0, stride = 1, bias = True):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim_in, dim_in, kernel_size = kernel_size, padding = padding, groups = dim_in, stride = stride, bias = bias),
nn.Conv2d(dim_in, dim_out, kernel_size = 1, bias = bias)
)
def forward(self, x):
return self.net(x)
class LinearAttention(nn.Module):
def __init__(self, dim, dim_head = 64, heads = 8):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.nonlin = nn.GELU()
self.to_q = nn.Conv2d(dim, inner_dim, 1, bias = False)
self.to_kv = DepthWiseConv2d(dim, inner_dim * 2, 3, padding = 1, bias = False)
self.to_out = nn.Conv2d(inner_dim, dim, 1)
def forward(self, fmap):
h, x, y = self.heads, *fmap.shape[-2:]
q, k, v = (self.to_q(fmap), *self.to_kv(fmap).chunk(2, dim = 1))
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) (x y) c', h = h), (q, k, v))
q = q.softmax(dim = -1)
k = k.softmax(dim = -2)
q = q * self.scale
context = einsum('b n d, b n e -> b d e', k, v)
out = einsum('b n d, b d e -> b n e', q, context)
out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)
out = self.nonlin(out)
return self.to_out(out)
# one layer of self-attention and feedforward, for images
attn_and_ff = lambda chan: nn.Sequential(*[
Residual(PreNorm(chan, LinearAttention(chan))),
Residual(PreNorm(chan, nn.Sequential(nn.Conv2d(chan, chan * 2, 1), leaky_relu(), nn.Conv2d(chan * 2, chan, 1))))
])
# helpers
def exists(val):
return val is not None
@contextmanager
def null_context():
yield
def combine_contexts(contexts):
@contextmanager
def multi_contexts():
with ExitStack() as stack:
yield [stack.enter_context(ctx()) for ctx in contexts]
return multi_contexts
def default(value, d):
return value if exists(value) else d
def cycle(iterable):
while True:
for i in iterable:
yield i
def cast_list(el):
return el if isinstance(el, list) else [el]
def is_empty(t):
if isinstance(t, torch.Tensor):
return t.nelement() == 0
return not exists(t)
def raise_if_nan(t):
if torch.isnan(t):
raise NanException
def gradient_accumulate_contexts(gradient_accumulate_every, is_ddp, ddps):
if is_ddp:
num_no_syncs = gradient_accumulate_every - 1
head = [combine_contexts(map(lambda ddp: ddp.no_sync, ddps))] * num_no_syncs
tail = [null_context]
contexts = head + tail
else:
contexts = [null_context] * gradient_accumulate_every
for context in contexts:
with context():
yield
def loss_backwards(fp16, loss, optimizer, loss_id, **kwargs):
if fp16:
with amp.scale_loss(loss, optimizer, loss_id) as scaled_loss:
scaled_loss.backward(**kwargs)
else:
loss.backward(**kwargs)
def gradient_penalty(images, output, weight = 10):
batch_size = images.shape[0]
gradients = torch_grad(outputs=output, inputs=images,
grad_outputs=torch.ones(output.size(), device=images.device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.reshape(batch_size, -1)
return weight * ((gradients.norm(2, dim=1) - 1) ** 2).mean()
def calc_pl_lengths(styles, images):
device = images.device
num_pixels = images.shape[2] * images.shape[3]
pl_noise = torch.randn(images.shape, device=device) / math.sqrt(num_pixels)
outputs = (images * pl_noise).sum()
pl_grads = torch_grad(outputs=outputs, inputs=styles,
grad_outputs=torch.ones(outputs.shape, device=device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
return (pl_grads ** 2).sum(dim=2).mean(dim=1).sqrt()
def noise(n, latent_dim, device):
return torch.randn(n, latent_dim).cuda(device)
def noise_list(n, layers, latent_dim, device):
return [(noise(n, latent_dim, device), layers)]
def mixed_list(n, layers, latent_dim, device):
tt = int(torch.rand(()).numpy() * layers)
return noise_list(n, tt, latent_dim, device) + noise_list(n, layers - tt, latent_dim, device)
def latent_to_w(style_vectorizer, latent_descr):
return [(style_vectorizer(z), num_layers) for z, num_layers in latent_descr]
def image_noise(n, im_size, device):
return torch.FloatTensor(n, im_size, im_size, 1).uniform_(0., 1.).cuda(device)
def leaky_relu(p=0.2):
return nn.LeakyReLU(p, inplace=True)
def evaluate_in_chunks(max_batch_size, model, *args):
split_args = list(zip(*list(map(lambda x: x.split(max_batch_size, dim=0), args))))
chunked_outputs = [model(*i) for i in split_args]
if len(chunked_outputs) == 1:
return chunked_outputs[0]
return torch.cat(chunked_outputs, dim=0)
def styles_def_to_tensor(styles_def):
return torch.cat([t[:, None, :].expand(-1, n, -1) for t, n in styles_def], dim=1)
def set_requires_grad(model, bool):
for p in model.parameters():
p.requires_grad = bool
def slerp(val, low, high):
low_norm = low / torch.norm(low, dim=1, keepdim=True)
high_norm = high / torch.norm(high, dim=1, keepdim=True)
omega = torch.acos((low_norm * high_norm).sum(1))
so = torch.sin(omega)
res = (torch.sin((1.0 - val) * omega) / so).unsqueeze(1) * low + (torch.sin(val * omega) / so).unsqueeze(1) * high
return res
# losses
def gen_hinge_loss(fake, real):
return fake.mean()
def hinge_loss(real, fake):
return (F.relu(1 + real) + F.relu(1 - fake)).mean()
def dual_contrastive_loss(real_logits, fake_logits):
device = real_logits.device
real_logits, fake_logits = map(lambda t: rearrange(t, '... -> (...)'), (real_logits, fake_logits))
def loss_half(t1, t2):
t1 = rearrange(t1, 'i -> i ()')
t2 = repeat(t2, 'j -> i j', i = t1.shape[0])
t = torch.cat((t1, t2), dim = -1)
return F.cross_entropy(t, torch.zeros(t1.shape[0], device = device, dtype = torch.long))
return loss_half(real_logits, fake_logits) + loss_half(-fake_logits, -real_logits)
# dataset
def convert_rgb_to_transparent(image):
if image.mode != 'RGBA':
return image.convert('RGBA')
return image
def convert_transparent_to_rgb(image):
if image.mode != 'RGB':
return image.convert('RGB')
return image
class expand_greyscale(object):
def __init__(self, transparent):
self.transparent = transparent
def __call__(self, tensor):
channels = tensor.shape[0]
num_target_channels = 4 if self.transparent else 3
if channels == num_target_channels:
return tensor
alpha = None
if channels == 1:
color = tensor.expand(3, -1, -1)
elif channels == 2:
color = tensor[:1].expand(3, -1, -1)
alpha = tensor[1:]
else:
raise Exception(f'image with invalid number of channels given {channels}')
if not exists(alpha) and self.transparent:
alpha = torch.ones(1, *tensor.shape[1:], device=tensor.device)
return color if not self.transparent else torch.cat((color, alpha))
def resize_to_minimum_size(min_size, image):
if max(*image.size) < min_size:
return torchvision.transforms.functional.resize(image, min_size)
return image
class Dataset(data.Dataset):
def __init__(self, folder, image_size, transparent = False, aug_prob = 0.):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in EXTS for p in Path(f'{folder}').glob(f'**/*.{ext}')]
assert len(self.paths) > 0, f'No images were found in {folder} for training'
convert_image_fn = convert_transparent_to_rgb if not transparent else convert_rgb_to_transparent
num_channels = 3 if not transparent else 4
self.transform = transforms.Compose([
transforms.Lambda(convert_image_fn),
transforms.Lambda(partial(resize_to_minimum_size, image_size)),
transforms.Resize(image_size),
RandomApply(aug_prob, transforms.RandomResizedCrop(image_size, scale=(0.5, 1.0), ratio=(0.98, 1.02)), transforms.CenterCrop(image_size)),
transforms.ToTensor(),
transforms.Lambda(expand_greyscale(transparent))
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# augmentations
def random_hflip(tensor, prob):
if prob > random():
return tensor
return torch.flip(tensor, dims=(3,))
class AugWrapper(nn.Module):
def __init__(self, D, image_size):
super().__init__()
self.D = D
def forward(self, images, prob = 0., types = [], detach = False):
if random() < prob:
images = random_hflip(images, prob=0.5)
images = DiffAugment(images, types=types)
if detach:
images = images.detach()
return self.D(images)
# stylegan2 classes
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, lr_mul = 1, bias = True):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim))
self.lr_mul = lr_mul
def forward(self, input):
return F.linear(input, self.weight * self.lr_mul, bias=self.bias * self.lr_mul)
class StyleVectorizer(nn.Module):
def __init__(self, emb, depth, lr_mul = 0.1):
super().__init__()
layers = []
for i in range(depth):
layers.extend([EqualLinear(emb, emb, lr_mul), leaky_relu()])
self.net = nn.Sequential(*layers)
def forward(self, x):
x = F.normalize(x, dim=1)
return self.net(x)
class RGBBlock(nn.Module):
def __init__(self, latent_dim, input_channel, upsample, rgba = False):
super().__init__()
self.input_channel = input_channel
self.to_style = nn.Linear(latent_dim, input_channel)
out_filters = 3 if not rgba else 4
self.conv = Conv2DMod(input_channel, out_filters, 1, demod=False)
self.upsample = nn.Sequential(
nn.Upsample(scale_factor = 2, mode='bilinear', align_corners=False),
Blur()
) if upsample else None
def forward(self, x, prev_rgb, istyle):
b, c, h, w = x.shape
style = self.to_style(istyle)
x = self.conv(x, style)
if exists(prev_rgb):
x = x + prev_rgb
if exists(self.upsample):
x = self.upsample(x)
return x
class Conv2DMod(nn.Module):
def __init__(self, in_chan, out_chan, kernel, demod=True, stride=1, dilation=1, eps = 1e-8, **kwargs):
super().__init__()
self.filters = out_chan
self.demod = demod
self.kernel = kernel
self.stride = stride
self.dilation = dilation
self.weight = nn.Parameter(torch.randn((out_chan, in_chan, kernel, kernel)))
self.eps = eps
nn.init.kaiming_normal_(self.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')
def _get_same_padding(self, size, kernel, dilation, stride):
return ((size - 1) * (stride - 1) + dilation * (kernel - 1)) // 2
def forward(self, x, y):
b, c, h, w = x.shape
w1 = y[:, None, :, None, None]
w2 = self.weight[None, :, :, :, :]
weights = w2 * (w1 + 1)
if self.demod:
d = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps)
weights = weights * d
x = x.reshape(1, -1, h, w)
_, _, *ws = weights.shape
weights = weights.reshape(b * self.filters, *ws)
padding = self._get_same_padding(h, self.kernel, self.dilation, self.stride)
x = F.conv2d(x, weights, padding=padding, groups=b)
x = x.reshape(-1, self.filters, h, w)
return x
class GeneratorBlock(nn.Module):
def __init__(self, latent_dim, input_channels, filters, upsample = True, upsample_rgb = True, rgba = False):
super().__init__()
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) if upsample else None
self.to_style1 = nn.Linear(latent_dim, input_channels)
self.to_noise1 = nn.Linear(1, filters)
self.conv1 = Conv2DMod(input_channels, filters, 3)
self.to_style2 = nn.Linear(latent_dim, filters)
self.to_noise2 = nn.Linear(1, filters)
self.conv2 = Conv2DMod(filters, filters, 3)
self.activation = leaky_relu()
self.to_rgb = RGBBlock(latent_dim, filters, upsample_rgb, rgba)
def forward(self, x, prev_rgb, istyle, inoise):
if exists(self.upsample):
x = self.upsample(x)
inoise = inoise[:, :x.shape[2], :x.shape[3], :]
noise1 = self.to_noise1(inoise).permute((0, 3, 2, 1))
noise2 = self.to_noise2(inoise).permute((0, 3, 2, 1))
style1 = self.to_style1(istyle)
x = self.conv1(x, style1)
x = self.activation(x + noise1)
style2 = self.to_style2(istyle)
x = self.conv2(x, style2)
x = self.activation(x + noise2)
rgb = self.to_rgb(x, prev_rgb, istyle)
return x, rgb
class DiscriminatorBlock(nn.Module):
def __init__(self, input_channels, filters, downsample=True):
super().__init__()
self.conv_res = nn.Conv2d(input_channels, filters, 1, stride = (2 if downsample else 1))
self.net = nn.Sequential(
nn.Conv2d(input_channels, filters, 3, padding=1),
leaky_relu(),
nn.Conv2d(filters, filters, 3, padding=1),
leaky_relu()
)
self.downsample = nn.Sequential(
Blur(),
nn.Conv2d(filters, filters, 3, padding = 1, stride = 2)
) if downsample else None
def forward(self, x):
res = self.conv_res(x)
x = self.net(x)
if exists(self.downsample):
x = self.downsample(x)
x = (x + res) * (1 / math.sqrt(2))
return x
class Generator(nn.Module):
def __init__(self, image_size, latent_dim, network_capacity = 16, transparent = False, attn_layers = [], no_const = False, fmap_max = 512):
super().__init__()
self.image_size = image_size
self.latent_dim = latent_dim
self.num_layers = int(log2(image_size) - 1)
filters = [network_capacity * (2 ** (i + 1)) for i in range(self.num_layers)][::-1]
set_fmap_max = partial(min, fmap_max)
filters = list(map(set_fmap_max, filters))
init_channels = filters[0]
filters = [init_channels, *filters]
in_out_pairs = zip(filters[:-1], filters[1:])
self.no_const = no_const
if no_const:
self.to_initial_block = nn.ConvTranspose2d(latent_dim, init_channels, 4, 1, 0, bias=False)
else:
self.initial_block = nn.Parameter(torch.randn((1, init_channels, 4, 4)))
self.initial_conv = nn.Conv2d(filters[0], filters[0], 3, padding=1)
self.blocks = nn.ModuleList([])
self.attns = nn.ModuleList([])
for ind, (in_chan, out_chan) in enumerate(in_out_pairs):
not_first = ind != 0
not_last = ind != (self.num_layers - 1)
num_layer = self.num_layers - ind
attn_fn = attn_and_ff(in_chan) if num_layer in attn_layers else None
self.attns.append(attn_fn)
block = GeneratorBlock(
latent_dim,
in_chan,
out_chan,
upsample = not_first,
upsample_rgb = not_last,
rgba = transparent
)
self.blocks.append(block)
def forward(self, styles, input_noise):
batch_size = styles.shape[0]
image_size = self.image_size
if self.no_const:
avg_style = styles.mean(dim=1)[:, :, None, None]
x = self.to_initial_block(avg_style)
else:
x = self.initial_block.expand(batch_size, -1, -1, -1)
rgb = None
styles = styles.transpose(0, 1)
x = self.initial_conv(x)
for style, block, attn in zip(styles, self.blocks, self.attns):
if exists(attn):
x = attn(x)
x, rgb = block(x, rgb, style, input_noise)
return rgb
class Discriminator(nn.Module):
def __init__(self, image_size, network_capacity = 16, fq_layers = [], fq_dict_size = 256, attn_layers = [], transparent = False, fmap_max = 512):
super().__init__()
num_layers = int(log2(image_size) - 1)
num_init_filters = 3 if not transparent else 4
blocks = []
filters = [num_init_filters] + [(network_capacity * 4) * (2 ** i) for i in range(num_layers + 1)]
set_fmap_max = partial(min, fmap_max)
filters = list(map(set_fmap_max, filters))
chan_in_out = list(zip(filters[:-1], filters[1:]))
blocks = []
attn_blocks = []
quantize_blocks = []
for ind, (in_chan, out_chan) in enumerate(chan_in_out):
num_layer = ind + 1
is_not_last = ind != (len(chan_in_out) - 1)
block = DiscriminatorBlock(in_chan, out_chan, downsample = is_not_last)
blocks.append(block)
attn_fn = attn_and_ff(out_chan) if num_layer in attn_layers else None
attn_blocks.append(attn_fn)
quantize_fn = PermuteToFrom(VectorQuantize(out_chan, fq_dict_size)) if num_layer in fq_layers else None
quantize_blocks.append(quantize_fn)
self.blocks = nn.ModuleList(blocks)
self.attn_blocks = nn.ModuleList(attn_blocks)
self.quantize_blocks = nn.ModuleList(quantize_blocks)
chan_last = filters[-1]
latent_dim = 2 * 2 * chan_last
self.final_conv = nn.Conv2d(chan_last, chan_last, 3, padding=1)
self.flatten = Flatten()
self.to_logit = nn.Linear(latent_dim, 1)
def forward(self, x):
b, *_ = x.shape
quantize_loss = torch.zeros(1).to(x)
for (block, attn_block, q_block) in zip(self.blocks, self.attn_blocks, self.quantize_blocks):
x = block(x)
if exists(attn_block):
x = attn_block(x)
if exists(q_block):
x, _, loss = q_block(x)
quantize_loss += loss
x = self.final_conv(x)
x = self.flatten(x)
x = self.to_logit(x)
return x.squeeze(), quantize_loss
class StyleGAN2(nn.Module):
def __init__(self, image_size, latent_dim = 512, fmap_max = 512, style_depth = 8, network_capacity = 16, transparent = False, fp16 = False, cl_reg = False, steps = 1, lr = 1e-4, ttur_mult = 2, fq_layers = [], fq_dict_size = 256, attn_layers = [], no_const = False, lr_mlp = 0.1, rank = 0):
super().__init__()
self.lr = lr
self.steps = steps
self.ema_updater = EMA(0.995)
self.S = StyleVectorizer(latent_dim, style_depth, lr_mul = lr_mlp)
self.G = Generator(image_size, latent_dim, network_capacity, transparent = transparent, attn_layers = attn_layers, no_const = no_const, fmap_max = fmap_max)
self.D = Discriminator(image_size, network_capacity, fq_layers = fq_layers, fq_dict_size = fq_dict_size, attn_layers = attn_layers, transparent = transparent, fmap_max = fmap_max)
self.SE = StyleVectorizer(latent_dim, style_depth, lr_mul = lr_mlp)
self.GE = Generator(image_size, latent_dim, network_capacity, transparent = transparent, attn_layers = attn_layers, no_const = no_const)
self.D_cl = None
if cl_reg:
from contrastive_learner import ContrastiveLearner
# experimental contrastive loss discriminator regularization
assert not transparent, 'contrastive loss regularization does not work with transparent images yet'
self.D_cl = ContrastiveLearner(self.D, image_size, hidden_layer='flatten')
# wrapper for augmenting all images going into the discriminator
self.D_aug = AugWrapper(self.D, image_size)
# turn off grad for exponential moving averages
set_requires_grad(self.SE, False)
set_requires_grad(self.GE, False)
# init optimizers
generator_params = list(self.G.parameters()) + list(self.S.parameters())
self.G_opt = Adam(generator_params, lr = self.lr, betas=(0.5, 0.9))
self.D_opt = Adam(self.D.parameters(), lr = self.lr * ttur_mult, betas=(0.5, 0.9))
# init weights
self._init_weights()
self.reset_parameter_averaging()
self.cuda(rank)
# startup apex mixed precision
self.fp16 = fp16
if fp16:
(self.S, self.G, self.D, self.SE, self.GE), (self.G_opt, self.D_opt) = amp.initialize([self.S, self.G, self.D, self.SE, self.GE], [self.G_opt, self.D_opt], opt_level='O1', num_losses=3)
def _init_weights(self):
for m in self.modules():
if type(m) in {nn.Conv2d, nn.Linear}:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')
for block in self.G.blocks:
nn.init.zeros_(block.to_noise1.weight)
nn.init.zeros_(block.to_noise2.weight)
nn.init.zeros_(block.to_noise1.bias)
nn.init.zeros_(block.to_noise2.bias)
def EMA(self):
def update_moving_average(ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = self.ema_updater.update_average(old_weight, up_weight)
update_moving_average(self.SE, self.S)
update_moving_average(self.GE, self.G)
def reset_parameter_averaging(self):
self.SE.load_state_dict(self.S.state_dict())
self.GE.load_state_dict(self.G.state_dict())
def forward(self, x):
return x
class Trainer():
def __init__(
self,
name = 'default',
results_dir = 'results',
models_dir = 'models',
base_dir = './',
image_size = 128,
network_capacity = 16,
fmap_max = 512,
transparent = False,
batch_size = 4,
mixed_prob = 0.9,
gradient_accumulate_every=1,
lr = 2e-4,
lr_mlp = 0.1,
ttur_mult = 2,
rel_disc_loss = False,
num_workers = None,
save_every = 1000,
evaluate_every = 1000,
num_image_tiles = 8,
trunc_psi = 0.6,
fp16 = False,
cl_reg = False,
no_pl_reg = False,
fq_layers = [],
fq_dict_size = 256,
attn_layers = [],
no_const = False,
aug_prob = 0.,
aug_types = ['translation', 'cutout'],
top_k_training = False,
generator_top_k_gamma = 0.99,
generator_top_k_frac = 0.5,
dual_contrast_loss = False,
dataset_aug_prob = 0.,
calculate_fid_every = None,
calculate_fid_num_images = 12800,
clear_fid_cache = False,
is_ddp = False,
rank = 0,
world_size = 1,
log = False,
*args,
**kwargs
):
self.GAN_params = [args, kwargs]
self.GAN = None
self.name = name
base_dir = Path(base_dir)
self.base_dir = base_dir
self.results_dir = base_dir / results_dir
self.models_dir = base_dir / models_dir
self.fid_dir = base_dir / 'fid' / name
self.config_path = self.models_dir / name / '.config.json'
assert log2(image_size).is_integer(), 'image size must be a power of 2 (64, 128, 256, 512, 1024)'
self.image_size = image_size
self.network_capacity = network_capacity
self.fmap_max = fmap_max
self.transparent = transparent
self.fq_layers = cast_list(fq_layers)
self.fq_dict_size = fq_dict_size
self.has_fq = len(self.fq_layers) > 0
self.attn_layers = cast_list(attn_layers)
self.no_const = no_const
self.aug_prob = aug_prob
self.aug_types = aug_types
self.lr = lr
self.lr_mlp = lr_mlp
self.ttur_mult = ttur_mult
self.rel_disc_loss = rel_disc_loss
self.batch_size = batch_size
self.num_workers = num_workers
self.mixed_prob = mixed_prob
self.num_image_tiles = num_image_tiles
self.evaluate_every = evaluate_every
self.save_every = save_every
self.steps = 0
self.av = None
self.trunc_psi = trunc_psi
self.no_pl_reg = no_pl_reg
self.pl_mean = None
self.gradient_accumulate_every = gradient_accumulate_every
assert not fp16 or fp16 and APEX_AVAILABLE, 'Apex is not available for you to use mixed precision training'
self.fp16 = fp16
self.cl_reg = cl_reg
self.d_loss = 0
self.g_loss = 0
self.q_loss = None
self.last_gp_loss = None
self.last_cr_loss = None
self.last_fid = None
self.pl_length_ma = EMA(0.99)
self.init_folders()
self.loader = None
self.dataset_aug_prob = dataset_aug_prob
self.calculate_fid_every = calculate_fid_every
self.calculate_fid_num_images = calculate_fid_num_images
self.clear_fid_cache = clear_fid_cache
self.top_k_training = top_k_training
self.generator_top_k_gamma = generator_top_k_gamma
self.generator_top_k_frac = generator_top_k_frac
self.dual_contrast_loss = dual_contrast_loss
assert not (is_ddp and cl_reg), 'Contrastive loss regularization does not work well with multi GPUs yet'
self.is_ddp = is_ddp
self.is_main = rank == 0
self.rank = rank
self.world_size = world_size
self.logger = aim.Session(experiment=name) if log else None
@property
def image_extension(self):
return 'jpg' if not self.transparent else 'png'
@property
def checkpoint_num(self):
return floor(self.steps // self.save_every)
@property
def hparams(self):
return {'image_size': self.image_size, 'network_capacity': self.network_capacity}
def init_GAN(self):
args, kwargs = self.GAN_params
self.GAN = StyleGAN2(lr = self.lr, lr_mlp = self.lr_mlp, ttur_mult = self.ttur_mult, image_size = self.image_size, network_capacity = self.network_capacity, fmap_max = self.fmap_max, transparent = self.transparent, fq_layers = self.fq_layers, fq_dict_size = self.fq_dict_size, attn_layers = self.attn_layers, fp16 = self.fp16, cl_reg = self.cl_reg, no_const = self.no_const, rank = self.rank, *args, **kwargs)
if self.is_ddp:
ddp_kwargs = {'device_ids': [self.rank]}
self.S_ddp = DDP(self.GAN.S, **ddp_kwargs)
self.G_ddp = DDP(self.GAN.G, **ddp_kwargs)
self.D_ddp = DDP(self.GAN.D, **ddp_kwargs)
self.D_aug_ddp = DDP(self.GAN.D_aug, **ddp_kwargs)
if exists(self.logger):
self.logger.set_params(self.hparams)
def write_config(self):
self.config_path.write_text(json.dumps(self.config()))
def load_config(self):
config = self.config() if not self.config_path.exists() else json.loads(self.config_path.read_text())
self.image_size = config['image_size']
self.network_capacity = config['network_capacity']
self.transparent = config['transparent']
self.fq_layers = config['fq_layers']
self.fq_dict_size = config['fq_dict_size']
self.fmap_max = config.pop('fmap_max', 512)
self.attn_layers = config.pop('attn_layers', [])
self.no_const = config.pop('no_const', False)
self.lr_mlp = config.pop('lr_mlp', 0.1)
del self.GAN
self.init_GAN()
def config(self):
return {'image_size': self.image_size, 'network_capacity': self.network_capacity, 'lr_mlp': self.lr_mlp, 'transparent': self.transparent, 'fq_layers': self.fq_layers, 'fq_dict_size': self.fq_dict_size, 'attn_layers': self.attn_layers, 'no_const': self.no_const}
def set_data_src(self, folder):
self.dataset = Dataset(folder, self.image_size, transparent = self.transparent, aug_prob = self.dataset_aug_prob)
num_workers = num_workers = default(self.num_workers, NUM_CORES if not self.is_ddp else 0)
sampler = DistributedSampler(self.dataset, rank=self.rank, num_replicas=self.world_size, shuffle=True) if self.is_ddp else None
dataloader = data.DataLoader(self.dataset, num_workers = num_workers, batch_size = math.ceil(self.batch_size / self.world_size), sampler = sampler, shuffle = not self.is_ddp, drop_last = True, pin_memory = True)
self.loader = cycle(dataloader)
# auto set augmentation prob for user if dataset is detected to be low
num_samples = len(self.dataset)
if not exists(self.aug_prob) and num_samples < 1e5:
self.aug_prob = min(0.5, (1e5 - num_samples) * 3e-6)
print(f'autosetting augmentation probability to {round(self.aug_prob * 100)}%')
def train(self):
assert exists(self.loader), 'You must first initialize the data source with `.set_data_src(<folder of images>)`'
if not exists(self.GAN):
self.init_GAN()
self.GAN.train()
total_disc_loss = torch.tensor(0.).cuda(self.rank)
total_gen_loss = torch.tensor(0.).cuda(self.rank)
batch_size = math.ceil(self.batch_size / self.world_size)
image_size = self.GAN.G.image_size
latent_dim = self.GAN.G.latent_dim
num_layers = self.GAN.G.num_layers
aug_prob = self.aug_prob
aug_types = self.aug_types
aug_kwargs = {'prob': aug_prob, 'types': aug_types}
apply_gradient_penalty = self.steps % 4 == 0
apply_path_penalty = not self.no_pl_reg and self.steps > 5000 and self.steps % 32 == 0
apply_cl_reg_to_generated = self.steps > 20000
S = self.GAN.S if not self.is_ddp else self.S_ddp
G = self.GAN.G if not self.is_ddp else self.G_ddp
D = self.GAN.D if not self.is_ddp else self.D_ddp
D_aug = self.GAN.D_aug if not self.is_ddp else self.D_aug_ddp
backwards = partial(loss_backwards, self.fp16)
if exists(self.GAN.D_cl):
self.GAN.D_opt.zero_grad()
if apply_cl_reg_to_generated:
for i in range(self.gradient_accumulate_every):
get_latents_fn = mixed_list if random() < self.mixed_prob else noise_list
style = get_latents_fn(batch_size, num_layers, latent_dim, device=self.rank)
noise = image_noise(batch_size, image_size, device=self.rank)
w_space = latent_to_w(self.GAN.S, style)
w_styles = styles_def_to_tensor(w_space)
generated_images = self.GAN.G(w_styles, noise)
self.GAN.D_cl(generated_images.clone().detach(), accumulate=True)
for i in range(self.gradient_accumulate_every):
image_batch = next(self.loader).cuda(self.rank)
self.GAN.D_cl(image_batch, accumulate=True)
loss = self.GAN.D_cl.calculate_loss()
self.last_cr_loss = loss.clone().detach().item()
backwards(loss, self.GAN.D_opt, loss_id = 0)
self.GAN.D_opt.step()
# setup losses
if not self.dual_contrast_loss:
D_loss_fn = hinge_loss
G_loss_fn = gen_hinge_loss
G_requires_reals = False
else:
D_loss_fn = dual_contrastive_loss
G_loss_fn = dual_contrastive_loss
G_requires_reals = True
# train discriminator
avg_pl_length = self.pl_mean
self.GAN.D_opt.zero_grad()
for i in gradient_accumulate_contexts(self.gradient_accumulate_every, self.is_ddp, ddps=[D_aug, S, G]):
get_latents_fn = mixed_list if random() < self.mixed_prob else noise_list
style = get_latents_fn(batch_size, num_layers, latent_dim, device=self.rank)
noise = image_noise(batch_size, image_size, device=self.rank)
w_space = latent_to_w(S, style)
w_styles = styles_def_to_tensor(w_space)
generated_images = G(w_styles, noise)
fake_output, fake_q_loss = D_aug(generated_images.clone().detach(), detach = True, **aug_kwargs)
image_batch = next(self.loader).cuda(self.rank)
image_batch.requires_grad_()
real_output, real_q_loss = D_aug(image_batch, **aug_kwargs)
real_output_loss = real_output
fake_output_loss = fake_output
if self.rel_disc_loss:
real_output_loss = real_output_loss - fake_output.mean()
fake_output_loss = fake_output_loss - real_output.mean()
divergence = D_loss_fn(real_output_loss, fake_output_loss)
disc_loss = divergence
if self.has_fq:
quantize_loss = (fake_q_loss + real_q_loss).mean()
self.q_loss = float(quantize_loss.detach().item())
disc_loss = disc_loss + quantize_loss
if apply_gradient_penalty:
gp = gradient_penalty(image_batch, real_output)
self.last_gp_loss = gp.clone().detach().item()
self.track(self.last_gp_loss, 'GP')
disc_loss = disc_loss + gp
disc_loss = disc_loss / self.gradient_accumulate_every
disc_loss.register_hook(raise_if_nan)
backwards(disc_loss, self.GAN.D_opt, loss_id = 1)
total_disc_loss += divergence.detach().item() / self.gradient_accumulate_every
self.d_loss = float(total_disc_loss)
self.track(self.d_loss, 'D')
self.GAN.D_opt.step()
# train generator
self.GAN.G_opt.zero_grad()
for i in gradient_accumulate_contexts(self.gradient_accumulate_every, self.is_ddp, ddps=[S, G, D_aug]):
style = get_latents_fn(batch_size, num_layers, latent_dim, device=self.rank)
noise = image_noise(batch_size, image_size, device=self.rank)
w_space = latent_to_w(S, style)
w_styles = styles_def_to_tensor(w_space)
generated_images = G(w_styles, noise)
fake_output, _ = D_aug(generated_images, **aug_kwargs)
fake_output_loss = fake_output
real_output = None
if G_requires_reals:
image_batch = next(self.loader).cuda(self.rank)
real_output, _ = D_aug(image_batch, detach = True, **aug_kwargs)
real_output = real_output.detach()
if self.top_k_training:
epochs = (self.steps * batch_size * self.gradient_accumulate_every) / len(self.dataset)
k_frac = max(self.generator_top_k_gamma ** epochs, self.generator_top_k_frac)
k = math.ceil(batch_size * k_frac)
if k != batch_size:
fake_output_loss, _ = fake_output_loss.topk(k=k, largest=False)
loss = G_loss_fn(fake_output_loss, real_output)
gen_loss = loss
if apply_path_penalty:
pl_lengths = calc_pl_lengths(w_styles, generated_images)
avg_pl_length = np.mean(pl_lengths.detach().cpu().numpy())
if not is_empty(self.pl_mean):
pl_loss = ((pl_lengths - self.pl_mean) ** 2).mean()
if not torch.isnan(pl_loss):
gen_loss = gen_loss + pl_loss
gen_loss = gen_loss / self.gradient_accumulate_every
gen_loss.register_hook(raise_if_nan)
backwards(gen_loss, self.GAN.G_opt, loss_id = 2)
total_gen_loss += loss.detach().item() / self.gradient_accumulate_every
self.g_loss = float(total_gen_loss)
self.track(self.g_loss, 'G')
self.GAN.G_opt.step()
# calculate moving averages
if apply_path_penalty and not np.isnan(avg_pl_length):
self.pl_mean = self.pl_length_ma.update_average(self.pl_mean, avg_pl_length)
self.track(self.pl_mean, 'PL')
if self.is_main and self.steps % 10 == 0 and self.steps > 20000:
self.GAN.EMA()
if self.is_main and self.steps <= 25000 and self.steps % 1000 == 2:
self.GAN.reset_parameter_averaging()
# save from NaN errors
if any(torch.isnan(l) for l in (total_gen_loss, total_disc_loss)):
print(f'NaN detected for generator or discriminator. Loading from checkpoint #{self.checkpoint_num}')
self.load(self.checkpoint_num)
raise NanException
# periodically save results
if self.is_main:
if self.steps % self.save_every == 0:
self.save(self.checkpoint_num)
if self.steps % self.evaluate_every == 0 or (self.steps % 100 == 0 and self.steps < 2500):
self.evaluate(floor(self.steps / self.evaluate_every))
if exists(self.calculate_fid_every) and self.steps % self.calculate_fid_every == 0 and self.steps != 0:
num_batches = math.ceil(self.calculate_fid_num_images / self.batch_size)
fid = self.calculate_fid(num_batches)
self.last_fid = fid
with open(str(self.results_dir / self.name / f'fid_scores.txt'), 'a') as f:
f.write(f'{self.steps},{fid}\n')
self.steps += 1
self.av = None
@torch.no_grad()
def evaluate(self, num = 0, trunc = 1.0):
self.GAN.eval()
ext = self.image_extension
num_rows = self.num_image_tiles
latent_dim = self.GAN.G.latent_dim
image_size = self.GAN.G.image_size
num_layers = self.GAN.G.num_layers
# latents and noise
latents = noise_list(num_rows ** 2, num_layers, latent_dim, device=self.rank)
n = image_noise(num_rows ** 2, image_size, device=self.rank)
# regular
generated_images = self.generate_truncated(self.GAN.S, self.GAN.G, latents, n, trunc_psi = self.trunc_psi)
torchvision.utils.save_image(generated_images, str(self.results_dir / self.name / f'{str(num)}.{ext}'), nrow=num_rows)
# moving averages
generated_images = self.generate_truncated(self.GAN.SE, self.GAN.GE, latents, n, trunc_psi = self.trunc_psi)
torchvision.utils.save_image(generated_images, str(self.results_dir / self.name / f'{str(num)}-ema.{ext}'), nrow=num_rows)
# mixing regularities
def tile(a, dim, n_tile):
init_dim = a.size(dim)
repeat_idx = [1] * a.dim()
repeat_idx[dim] = n_tile
a = a.repeat(*(repeat_idx))
order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)])).cuda(self.rank)
return torch.index_select(a, dim, order_index)
nn = noise(num_rows, latent_dim, device=self.rank)
tmp1 = tile(nn, 0, num_rows)
tmp2 = nn.repeat(num_rows, 1)
tt = int(num_layers / 2)
mixed_latents = [(tmp1, tt), (tmp2, num_layers - tt)]
generated_images = self.generate_truncated(self.GAN.SE, self.GAN.GE, mixed_latents, n, trunc_psi = self.trunc_psi)
torchvision.utils.save_image(generated_images, str(self.results_dir / self.name / f'{str(num)}-mr.{ext}'), nrow=num_rows)
@torch.no_grad()
def calculate_fid(self, num_batches):
from pytorch_fid import fid_score
torch.cuda.empty_cache()
real_path = self.fid_dir / 'real'
fake_path = self.fid_dir / 'fake'
# remove any existing files used for fid calculation and recreate directories
if not real_path.exists() or self.clear_fid_cache:
rmtree(real_path, ignore_errors=True)
os.makedirs(real_path)
for batch_num in tqdm(range(num_batches), desc='calculating FID - saving reals'):
real_batch = next(self.loader)
for k, image in enumerate(real_batch.unbind(0)):
filename = str(k + batch_num * self.batch_size)
torchvision.utils.save_image(image, str(real_path / f'{filename}.png'))
# generate a bunch of fake images in results / name / fid_fake
rmtree(fake_path, ignore_errors=True)
os.makedirs(fake_path)
self.GAN.eval()
ext = self.image_extension
latent_dim = self.GAN.G.latent_dim
image_size = self.GAN.G.image_size
num_layers = self.GAN.G.num_layers
for batch_num in tqdm(range(num_batches), desc='calculating FID - saving generated'):
# latents and noise
latents = noise_list(self.batch_size, num_layers, latent_dim, device=self.rank)
noise = image_noise(self.batch_size, image_size, device=self.rank)
# moving averages
generated_images = self.generate_truncated(self.GAN.SE, self.GAN.GE, latents, noise, trunc_psi = self.trunc_psi)
for j, image in enumerate(generated_images.unbind(0)):
torchvision.utils.save_image(image, str(fake_path / f'{str(j + batch_num * self.batch_size)}-ema.{ext}'))
return fid_score.calculate_fid_given_paths([str(real_path), str(fake_path)], 256, noise.device, 2048)
@torch.no_grad()
def truncate_style(self, tensor, trunc_psi = 0.75):
S = self.GAN.S
batch_size = self.batch_size
latent_dim = self.GAN.G.latent_dim
if not exists(self.av):
z = noise(2000, latent_dim, device=self.rank)
samples = evaluate_in_chunks(batch_size, S, z).cpu().numpy()
self.av = np.mean(samples, axis = 0)
self.av = np.expand_dims(self.av, axis = 0)
av_torch = torch.from_numpy(self.av).cuda(self.rank)
tensor = trunc_psi * (tensor - av_torch) + av_torch
return tensor
@torch.no_grad()
def truncate_style_defs(self, w, trunc_psi = 0.75):
w_space = []
for tensor, num_layers in w:
tensor = self.truncate_style(tensor, trunc_psi = trunc_psi)
w_space.append((tensor, num_layers))
return w_space
@torch.no_grad()
def generate_truncated(self, S, G, style, noi, trunc_psi = 0.75, num_image_tiles = 8):
w = map(lambda t: (S(t[0]), t[1]), style)
w_truncated = self.truncate_style_defs(w, trunc_psi = trunc_psi)
w_styles = styles_def_to_tensor(w_truncated)
generated_images = evaluate_in_chunks(self.batch_size, G, w_styles, noi)
return generated_images.clamp_(0., 1.)
@torch.no_grad()
def generate_interpolation(self, num = 0, num_image_tiles = 8, trunc = 1.0, num_steps = 100, save_frames = False):
self.GAN.eval()
ext = self.image_extension
num_rows = num_image_tiles
latent_dim = self.GAN.G.latent_dim
image_size = self.GAN.G.image_size
num_layers = self.GAN.G.num_layers
# latents and noise
latents_low = noise(num_rows ** 2, latent_dim, device=self.rank)
latents_high = noise(num_rows ** 2, latent_dim, device=self.rank)
n = image_noise(num_rows ** 2, image_size, device=self.rank)
ratios = torch.linspace(0., 8., num_steps)
frames = []
for ratio in tqdm(ratios):
interp_latents = slerp(ratio, latents_low, latents_high)
latents = [(interp_latents, num_layers)]
generated_images = self.generate_truncated(self.GAN.SE, self.GAN.GE, latents, n, trunc_psi = self.trunc_psi)
images_grid = torchvision.utils.make_grid(generated_images, nrow = num_rows)
pil_image = transforms.ToPILImage()(images_grid.cpu())
if self.transparent:
background = Image.new("RGBA", pil_image.size, (255, 255, 255))
pil_image = Image.alpha_composite(background, pil_image)
frames.append(pil_image)
frames[0].save(str(self.results_dir / self.name / f'{str(num)}.gif'), save_all=True, append_images=frames[1:], duration=80, loop=0, optimize=True)
if save_frames:
folder_path = (self.results_dir / self.name / f'{str(num)}')
folder_path.mkdir(parents=True, exist_ok=True)
for ind, frame in enumerate(frames):
frame.save(str(folder_path / f'{str(ind)}.{ext}'))
def print_log(self):
data = [
('G', self.g_loss),
('D', self.d_loss),
('GP', self.last_gp_loss),
('PL', self.pl_mean),
('CR', self.last_cr_loss),
('Q', self.q_loss),
('FID', self.last_fid)
]
data = [d for d in data if exists(d[1])]
log = ' | '.join(map(lambda n: f'{n[0]}: {n[1]:.2f}', data))
print(log)
def track(self, value, name):
if not exists(self.logger):
return
self.logger.track(value, name = name)
def model_name(self, num):
return str(self.models_dir / self.name / f'model_{num}.pt')
def init_folders(self):
(self.results_dir / self.name).mkdir(parents=True, exist_ok=True)
(self.models_dir / self.name).mkdir(parents=True, exist_ok=True)
def clear(self):
rmtree(str(self.models_dir / self.name), True)
rmtree(str(self.results_dir / self.name), True)
rmtree(str(self.fid_dir), True)
rmtree(str(self.config_path), True)
self.init_folders()
def save(self, num):
save_data = {
'GAN': self.GAN.state_dict(),
'version': __version__
}
if self.GAN.fp16:
save_data['amp'] = amp.state_dict()
torch.save(save_data, self.model_name(num))
self.write_config()
def load(self, num = -1):
self.load_config()
name = num
if num == -1:
file_paths = [p for p in Path(self.models_dir / self.name).glob('model_*.pt')]
saved_nums = sorted(map(lambda x: int(x.stem.split('_')[1]), file_paths))
if len(saved_nums) == 0:
return
name = saved_nums[-1]
print(f'continuing from previous epoch - {name}')
self.steps = name * self.save_every
load_data = torch.load(self.model_name(name))
if 'version' in load_data:
print(f"loading from version {load_data['version']}")
try:
self.GAN.load_state_dict(load_data['GAN'])
except Exception as e:
print('unable to load save model. please try downgrading the package to the version specified by the saved model')
raise e
if self.GAN.fp16 and 'amp' in load_data:
amp.load_state_dict(load_data['amp'])
class ModelLoader:
def __init__(self, *, base_dir, name = 'default', load_from = -1):
self.model = Trainer(name = name, base_dir = base_dir)
self.model.load(load_from)
def noise_to_styles(self, noise, trunc_psi = None):
noise = noise.cuda()
w = self.model.GAN.SE(noise)
if exists(trunc_psi):
w = self.model.truncate_style(w)
return w
def styles_to_images(self, w):
batch_size, *_ = w.shape
num_layers = self.model.GAN.GE.num_layers
image_size = self.model.image_size
w_def = [(w, num_layers)]
w_tensors = styles_def_to_tensor(w_def)
noise = image_noise(batch_size, image_size, device = 0)
images = self.model.GAN.GE(w_tensors, noise)
images.clamp_(0., 1.)
return images
| [
"apex.amp.scale_loss",
"torchvision.transforms.ToPILImage",
"kornia.filters.filter2D",
"math.log2",
"torch.cuda.is_available",
"torch.flip",
"numpy.arange",
"torch.nn.ModuleList",
"torch.linspace",
"torchvision.transforms.Resize",
"torch.index_select",
"torch.optim.Adam",
"contrastive_learne... | [((1029, 1054), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1052, 1054), False, 'import torch\n'), ((1137, 1164), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1162, 1164), False, 'import multiprocessing\n'), ((5726, 5740), 'torch.isnan', 'torch.isnan', (['t'], {}), '(t)\n', (5737, 5740), False, 'import torch\n'), ((8053, 8082), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['p'], {'inplace': '(True)'}), '(p, inplace=True)\n', (8065, 8082), False, 'from torch import nn, einsum\n'), ((8358, 8391), 'torch.cat', 'torch.cat', (['chunked_outputs'], {'dim': '(0)'}), '(chunked_outputs, dim=0)\n', (8367, 8391), False, 'import torch\n'), ((8828, 8844), 'torch.sin', 'torch.sin', (['omega'], {}), '(omega)\n', (8837, 8844), False, 'import torch\n'), ((12208, 12237), 'torch.flip', 'torch.flip', (['tensor'], {'dims': '(3,)'}), '(tensor, dims=(3,))\n', (12218, 12237), False, 'import torch\n'), ((41317, 41332), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (41330, 41332), False, 'import torch\n'), ((43246, 43261), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (43259, 43261), False, 'import torch\n'), ((45164, 45179), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (45177, 45179), False, 'import torch\n'), ((45758, 45773), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (45771, 45773), False, 'import torch\n'), ((46050, 46065), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (46063, 46065), False, 'import torch\n'), ((46467, 46482), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (46480, 46482), False, 'import torch\n'), ((2609, 2643), 'torch.mean', 'torch.mean', (['x'], {'dim': '(1)', 'keepdim': '(True)'}), '(x, dim=1, keepdim=True)\n', (2619, 2643), False, 'import torch\n'), ((3061, 3084), 'torch.Tensor', 'torch.Tensor', (['[1, 2, 1]'], {}), '([1, 2, 1])\n', (3073, 3084), False, 'import torch\n'), ((3231, 3262), 'kornia.filters.filter2D', 'filter2D', (['x', 'f'], {'normalized': '(True)'}), '(x, f, normalized=True)\n', (3239, 3262), False, 'from kornia.filters import filter2D\n'), ((3970, 3979), 'torch.nn.GELU', 'nn.GELU', ([], {}), '()\n', (3977, 3979), False, 'from torch import nn, einsum\n'), ((4000, 4040), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'inner_dim', '(1)'], {'bias': '(False)'}), '(dim, inner_dim, 1, bias=False)\n', (4009, 4040), False, 'from torch import nn, einsum\n'), ((4152, 4180), 'torch.nn.Conv2d', 'nn.Conv2d', (['inner_dim', 'dim', '(1)'], {}), '(inner_dim, dim, 1)\n', (4161, 4180), False, 'from torch import nn, einsum\n'), ((4539, 4576), 'torch.einsum', 'einsum', (['"""b n d, b n e -> b d e"""', 'k', 'v'], {}), "('b n d, b n e -> b d e', k, v)\n", (4545, 4576), False, 'from torch import nn, einsum\n'), ((4591, 4634), 'torch.einsum', 'einsum', (['"""b n d, b d e -> b n e"""', 'q', 'context'], {}), "('b n d, b d e -> b n e', q, context)\n", (4597, 4634), False, 'from torch import nn, einsum\n'), ((4649, 4710), 'einops.rearrange', 'rearrange', (['out', '"""(b h) (x y) d -> b (h d) x y"""'], {'h': 'h', 'x': 'x', 'y': 'y'}), "(out, '(b h) (x y) d -> b (h d) x y', h=h, x=x, y=y)\n", (4658, 4710), False, 'from einops import rearrange, repeat\n'), ((7000, 7040), 'torch.randn', 'torch.randn', (['images.shape'], {'device': 'device'}), '(images.shape, device=device)\n', (7011, 7040), False, 'import torch\n'), ((7043, 7064), 'math.sqrt', 'math.sqrt', (['num_pixels'], {}), '(num_pixels)\n', (7052, 7064), False, 'import math\n'), ((8667, 8703), 'torch.norm', 'torch.norm', (['low'], {'dim': '(1)', 'keepdim': '(True)'}), '(low, dim=1, keepdim=True)\n', (8677, 8703), False, 'import torch\n'), ((8727, 8764), 'torch.norm', 'torch.norm', (['high'], {'dim': '(1)', 'keepdim': '(True)'}), '(high, dim=1, keepdim=True)\n', (8737, 8764), False, 'import torch\n'), ((9360, 9386), 'einops.rearrange', 'rearrange', (['t1', '"""i -> i ()"""'], {}), "(t1, 'i -> i ()')\n", (9369, 9386), False, 'from einops import rearrange, repeat\n'), ((9400, 9437), 'einops.repeat', 'repeat', (['t2', '"""j -> i j"""'], {'i': 't1.shape[0]'}), "(t2, 'j -> i j', i=t1.shape[0])\n", (9406, 9437), False, 'from einops import rearrange, repeat\n'), ((9452, 9479), 'torch.cat', 'torch.cat', (['(t1, t2)'], {'dim': '(-1)'}), '((t1, t2), dim=-1)\n', (9461, 9479), False, 'import torch\n'), ((10832, 10889), 'torchvision.transforms.functional.resize', 'torchvision.transforms.functional.resize', (['image', 'min_size'], {}), '(image, min_size)\n', (10872, 10889), False, 'import torchvision\n'), ((12049, 12065), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (12059, 12065), False, 'from PIL import Image\n'), ((12165, 12173), 'random.random', 'random', ([], {}), '()\n', (12171, 12173), False, 'from random import random\n'), ((13008, 13080), 'torch.nn.functional.linear', 'F.linear', (['input', '(self.weight * self.lr_mul)'], {'bias': '(self.bias * self.lr_mul)'}), '(input, self.weight * self.lr_mul, bias=self.bias * self.lr_mul)\n', (13016, 13080), True, 'import torch.nn.functional as F\n'), ((13338, 13360), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (13351, 13360), False, 'from torch import nn, einsum\n'), ((13400, 13421), 'torch.nn.functional.normalize', 'F.normalize', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (13411, 13421), True, 'import torch.nn.functional as F\n'), ((13646, 13682), 'torch.nn.Linear', 'nn.Linear', (['latent_dim', 'input_channel'], {}), '(latent_dim, input_channel)\n', (13655, 13682), False, 'from torch import nn, einsum\n'), ((14691, 14779), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.weight'], {'a': '(0)', 'mode': '"""fan_in"""', 'nonlinearity': '"""leaky_relu"""'}), "(self.weight, a=0, mode='fan_in', nonlinearity=\n 'leaky_relu')\n", (14714, 14779), False, 'from torch import nn, einsum\n'), ((15461, 15508), 'torch.nn.functional.conv2d', 'F.conv2d', (['x', 'weights'], {'padding': 'padding', 'groups': 'b'}), '(x, weights, padding=padding, groups=b)\n', (15469, 15508), True, 'import torch.nn.functional as F\n'), ((15885, 15922), 'torch.nn.Linear', 'nn.Linear', (['latent_dim', 'input_channels'], {}), '(latent_dim, input_channels)\n', (15894, 15922), False, 'from torch import nn, einsum\n'), ((15948, 15969), 'torch.nn.Linear', 'nn.Linear', (['(1)', 'filters'], {}), '(1, filters)\n', (15957, 15969), False, 'from torch import nn, einsum\n'), ((16063, 16093), 'torch.nn.Linear', 'nn.Linear', (['latent_dim', 'filters'], {}), '(latent_dim, filters)\n', (16072, 16093), False, 'from torch import nn, einsum\n'), ((16119, 16140), 'torch.nn.Linear', 'nn.Linear', (['(1)', 'filters'], {}), '(1, filters)\n', (16128, 16140), False, 'from torch import nn, einsum\n'), ((17061, 17129), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', 'filters', '(1)'], {'stride': '(2 if downsample else 1)'}), '(input_channels, filters, 1, stride=2 if downsample else 1)\n', (17070, 17129), False, 'from torch import nn, einsum\n'), ((18167, 18189), 'functools.partial', 'partial', (['min', 'fmap_max'], {}), '(min, fmap_max)\n', (18174, 18189), False, 'from functools import partial\n'), ((18661, 18708), 'torch.nn.Conv2d', 'nn.Conv2d', (['filters[0]', 'filters[0]', '(3)'], {'padding': '(1)'}), '(filters[0], filters[0], 3, padding=1)\n', (18670, 18708), False, 'from torch import nn, einsum\n'), ((18731, 18748), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (18744, 18748), False, 'from torch import nn, einsum\n'), ((18770, 18787), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (18783, 18787), False, 'from torch import nn, einsum\n'), ((20486, 20508), 'functools.partial', 'partial', (['min', 'fmap_max'], {}), '(min, fmap_max)\n', (20493, 20508), False, 'from functools import partial\n'), ((21277, 21298), 'torch.nn.ModuleList', 'nn.ModuleList', (['blocks'], {}), '(blocks)\n', (21290, 21298), False, 'from torch import nn, einsum\n'), ((21326, 21352), 'torch.nn.ModuleList', 'nn.ModuleList', (['attn_blocks'], {}), '(attn_blocks)\n', (21339, 21352), False, 'from torch import nn, einsum\n'), ((21384, 21414), 'torch.nn.ModuleList', 'nn.ModuleList', (['quantize_blocks'], {}), '(quantize_blocks)\n', (21397, 21414), False, 'from torch import nn, einsum\n'), ((21514, 21559), 'torch.nn.Conv2d', 'nn.Conv2d', (['chan_last', 'chan_last', '(3)'], {'padding': '(1)'}), '(chan_last, chan_last, 3, padding=1)\n', (21523, 21559), False, 'from torch import nn, einsum\n'), ((21617, 21641), 'torch.nn.Linear', 'nn.Linear', (['latent_dim', '(1)'], {}), '(latent_dim, 1)\n', (21626, 21641), False, 'from torch import nn, einsum\n'), ((24043, 24095), 'torch.optim.Adam', 'Adam', (['generator_params'], {'lr': 'self.lr', 'betas': '(0.5, 0.9)'}), '(generator_params, lr=self.lr, betas=(0.5, 0.9))\n', (24047, 24095), False, 'from torch.optim import Adam\n'), ((27032, 27046), 'pathlib.Path', 'Path', (['base_dir'], {}), '(base_dir)\n', (27036, 27046), False, 'from pathlib import Path\n'), ((29770, 29806), 'math.floor', 'floor', (['(self.steps // self.save_every)'], {}), '(self.steps // self.save_every)\n', (29775, 29806), False, 'from math import floor, log2\n'), ((33204, 33248), 'math.ceil', 'math.ceil', (['(self.batch_size / self.world_size)'], {}), '(self.batch_size / self.world_size)\n', (33213, 33248), False, 'import math\n'), ((33981, 34015), 'functools.partial', 'partial', (['loss_backwards', 'self.fp16'], {}), '(loss_backwards, self.fp16)\n', (33988, 34015), False, 'from functools import partial\n'), ((42867, 42889), 'torch.nn.repeat', 'nn.repeat', (['num_rows', '(1)'], {}), '(num_rows, 1)\n', (42876, 42889), False, 'from torch import nn, einsum\n'), ((43354, 43378), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (43376, 43378), False, 'import torch\n'), ((44144, 44181), 'shutil.rmtree', 'rmtree', (['fake_path'], {'ignore_errors': '(True)'}), '(fake_path, ignore_errors=True)\n', (44150, 44181), False, 'from shutil import rmtree\n'), ((44190, 44212), 'os.makedirs', 'os.makedirs', (['fake_path'], {}), '(fake_path)\n', (44201, 44212), False, 'import os\n'), ((47090, 47125), 'torch.linspace', 'torch.linspace', (['(0.0)', '(8.0)', 'num_steps'], {}), '(0.0, 8.0, num_steps)\n', (47104, 47125), False, 'import torch\n'), ((47166, 47178), 'tqdm.tqdm', 'tqdm', (['ratios'], {}), '(ratios)\n', (47170, 47178), False, 'from tqdm import tqdm\n'), ((2407, 2431), 'torch.ones', 'torch.ones', (['(1)', 'dim', '(1)', '(1)'], {}), '(1, dim, 1, 1)\n', (2417, 2431), False, 'import torch\n'), ((2463, 2488), 'torch.zeros', 'torch.zeros', (['(1)', 'dim', '(1)', '(1)'], {}), '(1, dim, 1, 1)\n', (2474, 2488), False, 'import torch\n'), ((3476, 3589), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'dim_in'], {'kernel_size': 'kernel_size', 'padding': 'padding', 'groups': 'dim_in', 'stride': 'stride', 'bias': 'bias'}), '(dim_in, dim_in, kernel_size=kernel_size, padding=padding, groups=\n dim_in, stride=stride, bias=bias)\n', (3485, 3589), False, 'from torch import nn, einsum\n'), ((3608, 3660), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'dim_out'], {'kernel_size': '(1)', 'bias': 'bias'}), '(dim_in, dim_out, kernel_size=1, bias=bias)\n', (3617, 3660), False, 'from torch import nn, einsum\n'), ((5252, 5263), 'contextlib.ExitStack', 'ExitStack', ([], {}), '()\n', (5261, 5263), False, 'from contextlib import contextmanager, ExitStack\n'), ((6293, 6333), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optimizer', 'loss_id'], {}), '(loss, optimizer, loss_id)\n', (6307, 6333), False, 'from apex import amp\n'), ((7434, 7460), 'torch.randn', 'torch.randn', (['n', 'latent_dim'], {}), '(n, latent_dim)\n', (7445, 7460), False, 'import torch\n'), ((9261, 9289), 'einops.rearrange', 'rearrange', (['t', '"""... -> (...)"""'], {}), "(t, '... -> (...)')\n", (9270, 9289), False, 'from einops import rearrange, repeat\n'), ((9516, 9573), 'torch.zeros', 'torch.zeros', (['t1.shape[0]'], {'device': 'device', 'dtype': 'torch.long'}), '(t1.shape[0], device=device, dtype=torch.long)\n', (9527, 9573), False, 'import torch\n'), ((10603, 10657), 'torch.ones', 'torch.ones', (['(1)', '*tensor.shape[1:]'], {'device': 'tensor.device'}), '(1, *tensor.shape[1:], device=tensor.device)\n', (10613, 10657), False, 'import torch\n'), ((10709, 10734), 'torch.cat', 'torch.cat', (['(color, alpha)'], {}), '((color, alpha))\n', (10718, 10734), False, 'import torch\n'), ((12435, 12443), 'random.random', 'random', ([], {}), '()\n', (12441, 12443), False, 'from random import random\n'), ((12525, 12557), 'stylegan2_pytorch.diff_augment.DiffAugment', 'DiffAugment', (['images'], {'types': 'types'}), '(images, types=types)\n', (12536, 12557), False, 'from stylegan2_pytorch.diff_augment import DiffAugment\n'), ((12826, 12854), 'torch.randn', 'torch.randn', (['out_dim', 'in_dim'], {}), '(out_dim, in_dim)\n', (12837, 12854), False, 'import torch\n'), ((14610, 14658), 'torch.randn', 'torch.randn', (['(out_chan, in_chan, kernel, kernel)'], {}), '((out_chan, in_chan, kernel, kernel))\n', (14621, 14658), False, 'import torch\n'), ((15771, 15836), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(scale_factor=2, mode='bilinear', align_corners=False)\n", (15782, 15836), False, 'from torch import nn, einsum\n'), ((17181, 17229), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', 'filters', '(3)'], {'padding': '(1)'}), '(input_channels, filters, 3, padding=1)\n', (17190, 17229), False, 'from torch import nn, einsum\n'), ((17269, 17310), 'torch.nn.Conv2d', 'nn.Conv2d', (['filters', 'filters', '(3)'], {'padding': '(1)'}), '(filters, filters, 3, padding=1)\n', (17278, 17310), False, 'from torch import nn, einsum\n'), ((18466, 18532), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['latent_dim', 'init_channels', '(4)', '(1)', '(0)'], {'bias': '(False)'}), '(latent_dim, init_channels, 4, 1, 0, bias=False)\n', (18484, 18532), False, 'from torch import nn, einsum\n'), ((23584, 23646), 'contrastive_learner.ContrastiveLearner', 'ContrastiveLearner', (['self.D', 'image_size'], {'hidden_layer': '"""flatten"""'}), "(self.D, image_size, hidden_layer='flatten')\n", (23602, 23646), False, 'from contrastive_learner import ContrastiveLearner\n'), ((24473, 24591), 'apex.amp.initialize', 'amp.initialize', (['[self.S, self.G, self.D, self.SE, self.GE]', '[self.G_opt, self.D_opt]'], {'opt_level': '"""O1"""', 'num_losses': '(3)'}), "([self.S, self.G, self.D, self.SE, self.GE], [self.G_opt,\n self.D_opt], opt_level='O1', num_losses=3)\n", (24487, 24591), False, 'from apex import amp\n'), ((24847, 24885), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['block.to_noise1.weight'], {}), '(block.to_noise1.weight)\n', (24861, 24885), False, 'from torch import nn, einsum\n'), ((24898, 24936), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['block.to_noise2.weight'], {}), '(block.to_noise2.weight)\n', (24912, 24936), False, 'from torch import nn, einsum\n'), ((24949, 24985), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['block.to_noise1.bias'], {}), '(block.to_noise1.bias)\n', (24963, 24985), False, 'from torch import nn, einsum\n'), ((24998, 25034), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['block.to_noise2.bias'], {}), '(block.to_noise2.bias)\n', (25012, 25034), False, 'from torch import nn, einsum\n'), ((29562, 29590), 'aim.Session', 'aim.Session', ([], {'experiment': 'name'}), '(experiment=name)\n', (29573, 29590), False, 'import aim\n'), ((30528, 30557), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['self.GAN.S'], {}), '(self.GAN.S, **ddp_kwargs)\n', (30531, 30557), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((30583, 30612), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['self.GAN.G'], {}), '(self.GAN.G, **ddp_kwargs)\n', (30586, 30612), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((30638, 30667), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['self.GAN.D'], {}), '(self.GAN.D, **ddp_kwargs)\n', (30641, 30667), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((30697, 30730), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['self.GAN.D_aug'], {}), '(self.GAN.D_aug, **ddp_kwargs)\n', (30700, 30730), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((32119, 32216), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['self.dataset'], {'rank': 'self.rank', 'num_replicas': 'self.world_size', 'shuffle': '(True)'}), '(self.dataset, rank=self.rank, num_replicas=self.\n world_size, shuffle=True)\n', (32137, 32216), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((42715, 42754), 'torch.index_select', 'torch.index_select', (['a', 'dim', 'order_index'], {}), '(a, dim, order_index)\n', (42733, 42754), False, 'import torch\n'), ((43623, 43660), 'shutil.rmtree', 'rmtree', (['real_path'], {'ignore_errors': '(True)'}), '(real_path, ignore_errors=True)\n', (43629, 43660), False, 'from shutil import rmtree\n'), ((43673, 43695), 'os.makedirs', 'os.makedirs', (['real_path'], {}), '(real_path)\n', (43684, 43695), False, 'import os\n'), ((45525, 45549), 'numpy.mean', 'np.mean', (['samples'], {'axis': '(0)'}), '(samples, axis=0)\n', (45532, 45549), True, 'import numpy as np\n'), ((45574, 45605), 'numpy.expand_dims', 'np.expand_dims', (['self.av'], {'axis': '(0)'}), '(self.av, axis=0)\n', (45588, 45605), True, 'import numpy as np\n'), ((47449, 47509), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['generated_images'], {'nrow': 'num_rows'}), '(generated_images, nrow=num_rows)\n', (47476, 47509), False, 'import torchvision\n'), ((49545, 49561), 'apex.amp.state_dict', 'amp.state_dict', ([], {}), '()\n', (49559, 49561), False, 'from apex import amp\n'), ((50586, 50623), 'apex.amp.load_state_dict', 'amp.load_state_dict', (["load_data['amp']"], {}), "(load_data['amp'])\n", (50605, 50623), False, 'from apex import amp\n'), ((1832, 1840), 'random.random', 'random', ([], {}), '()\n', (1838, 1840), False, 'from random import random\n'), ((2531, 2580), 'torch.var', 'torch.var', (['x'], {'dim': '(1)', 'unbiased': '(False)', 'keepdim': '(True)'}), '(x, dim=1, unbiased=False, keepdim=True)\n', (2540, 2580), False, 'import torch\n'), ((4363, 4412), 'einops.rearrange', 'rearrange', (['t', '"""b (h c) x y -> (b h) (x y) c"""'], {'h': 'h'}), "(t, 'b (h c) x y -> (b h) (x y) c', h=h)\n", (4372, 4412), False, 'from einops import rearrange, repeat\n'), ((7203, 7243), 'torch.ones', 'torch.ones', (['outputs.shape'], {'device': 'device'}), '(outputs.shape, device=device)\n', (7213, 7243), False, 'import torch\n'), ((9086, 9102), 'torch.nn.functional.relu', 'F.relu', (['(1 + real)'], {}), '(1 + real)\n', (9092, 9102), True, 'import torch.nn.functional as F\n'), ((9105, 9121), 'torch.nn.functional.relu', 'F.relu', (['(1 - fake)'], {}), '(1 - fake)\n', (9111, 9121), True, 'import torch.nn.functional as F\n'), ((11499, 11534), 'torchvision.transforms.Lambda', 'transforms.Lambda', (['convert_image_fn'], {}), '(convert_image_fn)\n', (11516, 11534), False, 'from torchvision import transforms\n'), ((11624, 11653), 'torchvision.transforms.Resize', 'transforms.Resize', (['image_size'], {}), '(image_size)\n', (11641, 11653), False, 'from torchvision import transforms\n'), ((11817, 11838), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (11836, 11838), False, 'from torchvision import transforms\n'), ((12910, 12930), 'torch.zeros', 'torch.zeros', (['out_dim'], {}), '(out_dim)\n', (12921, 12930), False, 'import torch\n'), ((13853, 13918), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(scale_factor=2, mode='bilinear', align_corners=False)\n", (13864, 13918), False, 'from torch import nn, einsum\n'), ((17421, 17472), 'torch.nn.Conv2d', 'nn.Conv2d', (['filters', 'filters', '(3)'], {'padding': '(1)', 'stride': '(2)'}), '(filters, filters, 3, padding=1, stride=2)\n', (17430, 17472), False, 'from torch import nn, einsum\n'), ((17693, 17705), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (17702, 17705), False, 'import math\n'), ((18028, 18044), 'math.log2', 'log2', (['image_size'], {}), '(image_size)\n', (18032, 18044), False, 'from math import floor, log2\n'), ((18593, 18630), 'torch.randn', 'torch.randn', (['(1, init_channels, 4, 4)'], {}), '((1, init_channels, 4, 4))\n', (18604, 18630), False, 'import torch\n'), ((20258, 20274), 'math.log2', 'log2', (['image_size'], {}), '(image_size)\n', (20262, 20274), False, 'from math import floor, log2\n'), ((21718, 21732), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (21729, 21732), False, 'import torch\n'), ((24717, 24802), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'a': '(0)', 'mode': '"""fan_in"""', 'nonlinearity': '"""leaky_relu"""'}), "(m.weight, a=0, mode='fan_in', nonlinearity='leaky_relu'\n )\n", (24740, 24802), False, 'from torch import nn, einsum\n'), ((27308, 27324), 'math.log2', 'log2', (['image_size'], {}), '(image_size)\n', (27312, 27324), False, 'from math import floor, log2\n'), ((32328, 32372), 'math.ceil', 'math.ceil', (['(self.batch_size / self.world_size)'], {}), '(self.batch_size / self.world_size)\n', (32337, 32372), False, 'import math\n'), ((33091, 33108), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (33103, 33108), False, 'import torch\n'), ((33149, 33166), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (33161, 33166), False, 'import torch\n'), ((38708, 38738), 'math.ceil', 'math.ceil', (['(batch_size * k_frac)'], {}), '(batch_size * k_frac)\n', (38717, 38738), False, 'import math\n'), ((39806, 39829), 'numpy.isnan', 'np.isnan', (['avg_pl_length'], {}), '(avg_pl_length)\n', (39814, 39829), True, 'import numpy as np\n'), ((40238, 40252), 'torch.isnan', 'torch.isnan', (['l'], {}), '(l)\n', (40249, 40252), False, 'import torch\n'), ((40968, 41026), 'math.ceil', 'math.ceil', (['(self.calculate_fid_num_images / self.batch_size)'], {}), '(self.calculate_fid_num_images / self.batch_size)\n', (40977, 41026), False, 'import math\n'), ((45628, 45653), 'torch.from_numpy', 'torch.from_numpy', (['self.av'], {}), '(self.av)\n', (45644, 45653), False, 'import torch\n'), ((47536, 47559), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (47557, 47559), False, 'from torchvision import transforms\n'), ((47654, 47704), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'pil_image.size', '(255, 255, 255)'], {}), "('RGBA', pil_image.size, (255, 255, 255))\n", (47663, 47704), False, 'from PIL import Image\n'), ((47733, 47777), 'PIL.Image.alpha_composite', 'Image.alpha_composite', (['background', 'pil_image'], {}), '(background, pil_image)\n', (47754, 47777), False, 'from PIL import Image\n'), ((7635, 7649), 'torch.rand', 'torch.rand', (['()'], {}), '(())\n', (7645, 7649), False, 'import torch\n'), ((7946, 7987), 'torch.FloatTensor', 'torch.FloatTensor', (['n', 'im_size', 'im_size', '(1)'], {}), '(n, im_size, im_size, 1)\n', (7963, 7987), False, 'import torch\n'), ((11566, 11609), 'functools.partial', 'partial', (['resize_to_minimum_size', 'image_size'], {}), '(resize_to_minimum_size, image_size)\n', (11573, 11609), False, 'from functools import partial\n'), ((11689, 11767), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['image_size'], {'scale': '(0.5, 1.0)', 'ratio': '(0.98, 1.02)'}), '(image_size, scale=(0.5, 1.0), ratio=(0.98, 1.02))\n', (11717, 11767), False, 'from torchvision import transforms\n'), ((11769, 11802), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['image_size'], {}), '(image_size)\n', (11790, 11802), False, 'from torchvision import transforms\n'), ((21130, 21168), 'vector_quantize_pytorch.VectorQuantize', 'VectorQuantize', (['out_chan', 'fq_dict_size'], {}), '(out_chan, fq_dict_size)\n', (21144, 21168), False, 'from vector_quantize_pytorch import VectorQuantize\n'), ((35714, 35722), 'random.random', 'random', ([], {}), '()\n', (35720, 35722), False, 'from random import random\n'), ((40780, 40819), 'math.floor', 'floor', (['(self.steps / self.evaluate_every)'], {}), '(self.steps / self.evaluate_every)\n', (40785, 40819), False, 'from math import floor, log2\n'), ((8856, 8886), 'torch.sin', 'torch.sin', (['((1.0 - val) * omega)'], {}), '((1.0 - val) * omega)\n', (8865, 8886), False, 'import torch\n'), ((8915, 8937), 'torch.sin', 'torch.sin', (['(val * omega)'], {}), '(val * omega)\n', (8924, 8937), False, 'import torch\n'), ((11159, 11176), 'pathlib.Path', 'Path', (['f"""{folder}"""'], {}), "(f'{folder}')\n", (11163, 11176), False, 'from pathlib import Path\n'), ((39280, 39300), 'torch.isnan', 'torch.isnan', (['pl_loss'], {}), '(pl_loss)\n', (39291, 39300), False, 'import torch\n'), ((4978, 5006), 'torch.nn.Conv2d', 'nn.Conv2d', (['chan', '(chan * 2)', '(1)'], {}), '(chan, chan * 2, 1)\n', (4987, 5006), False, 'from torch import nn, einsum\n'), ((5022, 5050), 'torch.nn.Conv2d', 'nn.Conv2d', (['(chan * 2)', 'chan', '(1)'], {}), '(chan * 2, chan, 1)\n', (5031, 5050), False, 'from torch import nn, einsum\n'), ((34248, 34256), 'random.random', 'random', ([], {}), '()\n', (34254, 34256), False, 'from random import random\n'), ((49780, 49813), 'pathlib.Path', 'Path', (['(self.models_dir / self.name)'], {}), '(self.models_dir / self.name)\n', (49784, 49813), False, 'from pathlib import Path\n'), ((42630, 42647), 'numpy.arange', 'np.arange', (['n_tile'], {}), '(n_tile)\n', (42639, 42647), True, 'import numpy as np\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class BatchDatasetTest(test.TestCase):
def testBatchDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count) -> BatchDataset(batch_size).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count).batch(batch_size).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([[None] + list(c.shape[1:]) for c in components],
[t.shape.as_list() for t in get_next])
with self.test_session() as sess:
# Batch of a finite input, where the batch_size divides the
# total number of elements.
sess.run(init_op, feed_dict={count: 28, batch_size: 14})
num_batches = (28 * 7) // 14
for i in range(num_batches):
result = sess.run(get_next)
for component, result_component in zip(components, result):
for j in range(14):
self.assertAllEqual(component[(i*14 + j) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Batch of a finite input, where the batch_size does not
# divide the total number of elements.
sess.run(init_op, feed_dict={count: 14, batch_size: 8})
# We expect (num_batches - 1) full-sized batches.
num_batches = int(math.ceil((14 * 7) / 8))
for i in range(num_batches - 1):
result = sess.run(get_next)
for component, result_component in zip(components, result):
for j in range(8):
self.assertAllEqual(component[(i*8 + j) % 7]**2,
result_component[j])
result = sess.run(get_next)
for component, result_component in zip(components, result):
for j in range((14 * 7) % 8):
self.assertAllEqual(component[((num_batches - 1)*8 + j) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Batch of an empty input should fail straight away.
sess.run(init_op, feed_dict={count: 0, batch_size: 8})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Empty batch should be an initialization time error.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={count: 14, batch_size: 0})
def testPaddedBatchDataset(self):
seq_lens = array_ops.placeholder(dtypes.int32, shape=[None])
padded_shape = array_ops.placeholder(dtypes.int64, shape=[1])
iterator = (dataset_ops.Dataset.from_tensor_slices(seq_lens)
.map(lambda x: array_ops.fill([x], x)).padded_batch(
4,
padded_shapes=padded_shape).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# Test with random sequence lengths, and max padding.
random_seq_lens = np.random.randint(20, size=(32,)).astype(np.int32)
sess.run(init_op, feed_dict={padded_shape: [-1],
seq_lens: random_seq_lens})
for i in range(8):
result = sess.run(get_next)
padded_len = np.max(result)
self.assertEqual((4, padded_len), result.shape)
for j in range(4):
seq_len = random_seq_lens[(i*4)+j]
self.assertAllEqual(result[j, :seq_len], [seq_len] * seq_len)
self.assertAllEqual(result[j, seq_len:], [0] * (padded_len - seq_len))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test with random sequence lengths, and constant padding.
sess.run(init_op, feed_dict={padded_shape: [25],
seq_lens: random_seq_lens})
for i in range(8):
result = sess.run(get_next)
self.assertEqual((4, 25), result.shape)
for j in range(4):
seq_len = random_seq_lens[(i*4)+j]
self.assertAllEqual(result[j, :seq_len], [seq_len] * seq_len)
self.assertAllEqual(result[j, seq_len:], [0] * (25 - seq_len))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test correct handling of empty tensors.
sess.run(init_op, feed_dict={padded_shape: [-1],
seq_lens: [0, 0, 0, 0]})
result = sess.run(get_next)
self.assertAllEqual([[], [], [], []], result)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test error handling with constant sequence lengths, and
# too-short padding.
sess.run(init_op, feed_dict={padded_shape: [5],
seq_lens: [6, 5, 5, 5]})
with self.assertRaises(errors.DataLossError):
result = sess.run(get_next)
def testPaddedBatchDatasetNonDefaultPadding(self):
seq_lens = array_ops.placeholder(dtypes.int32, shape=[None])
padded_shape = array_ops.placeholder(dtypes.int64, shape=[1])
def fill_tuple(x):
filled = array_ops.fill([x], x)
return (filled, string_ops.as_string(filled))
iterator = (dataset_ops.Dataset.from_tensor_slices(seq_lens).map(fill_tuple)
.padded_batch(
4,
padded_shapes=(padded_shape, padded_shape),
padding_values=(-1, "<end>")).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# Test with random sequence lengths, and max padding.
random_seq_lens = np.random.randint(20, size=(32,)).astype(np.int32)
sess.run(init_op, feed_dict={padded_shape: [-1],
seq_lens: random_seq_lens})
for i in range(8):
result = sess.run(get_next)
padded_len = np.max(result[0])
self.assertEqual((4, padded_len), result[0].shape)
self.assertEqual((4, padded_len), result[1].shape)
for j in range(4):
seq_len = random_seq_lens[(i*4)+j]
self.assertAllEqual(result[0][j, :seq_len], [seq_len] * seq_len)
self.assertAllEqual(result[0][j, seq_len:],
[-1] * (padded_len - seq_len))
self.assertAllEqual(result[1][j, :seq_len],
[compat.as_bytes(str(seq_len))] * seq_len)
self.assertAllEqual(result[1][j, seq_len:],
[b"<end>"] * (padded_len - seq_len))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPaddedBatchDatasetShapeSpecifications(self):
int_placeholder = array_ops.placeholder(dtypes.int32)
float_placeholder = array_ops.placeholder(dtypes.float32)
string_placeholder = array_ops.placeholder(dtypes.string)
input_dataset = dataset_ops.Dataset.from_tensors(
(int_placeholder, float_placeholder, string_placeholder))
# Test different ways of specifying the `padded_shapes` argument.
dynamic_padding_from_tensor_shapes = input_dataset.padded_batch(
32,
padded_shapes=(tensor_shape.TensorShape([None]),
tensor_shape.TensorShape([None, None]),
tensor_shape.TensorShape([37])))
dynamic_padding_from_lists = input_dataset.padded_batch(
32, padded_shapes=([None], [None, None], [37]))
dynamic_padding_from_lists_with_minus_one = input_dataset.padded_batch(
32, padded_shapes=([-1], [-1, -1], [37]))
dynamic_padding_from_tensors = input_dataset.padded_batch(
32,
padded_shapes=(constant_op.constant([-1], dtype=dtypes.int64),
constant_op.constant([-1, -1], dtype=dtypes.int64),
constant_op.constant([37], dtype=dtypes.int64)))
for dataset in [dynamic_padding_from_tensor_shapes,
dynamic_padding_from_lists,
dynamic_padding_from_lists_with_minus_one,
dynamic_padding_from_tensors]:
self.assertEqual([None, None], dataset.output_shapes[0].as_list())
self.assertEqual([None, None, None], dataset.output_shapes[1].as_list())
self.assertEqual([None, 37], dataset.output_shapes[2].as_list())
if __name__ == "__main__":
test.main()
| [
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors",
"math.ceil",
"tensorflow.python.ops.string_ops.as_string",
"tensorflow.python.framework.constant_op.constant",
"numpy.max",
"numpy.array",
"numpy.random.randint",
"tensorflow.python.framewor... | [((10147, 10158), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (10156, 10158), False, 'from tensorflow.python.platform import test\n'), ((1791, 1836), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.int64'], {'shape': '[]'}), '(dtypes.int64, shape=[])\n', (1812, 1836), False, 'from tensorflow.python.ops import array_ops\n'), ((1854, 1899), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.int64'], {'shape': '[]'}), '(dtypes.int64, shape=[])\n', (1875, 1899), False, 'from tensorflow.python.ops import array_ops\n'), ((4310, 4359), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.int32'], {'shape': '[None]'}), '(dtypes.int32, shape=[None])\n', (4331, 4359), False, 'from tensorflow.python.ops import array_ops\n'), ((4379, 4425), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.int64'], {'shape': '[1]'}), '(dtypes.int64, shape=[1])\n', (4400, 4425), False, 'from tensorflow.python.ops import array_ops\n'), ((6770, 6819), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.int32'], {'shape': '[None]'}), '(dtypes.int32, shape=[None])\n', (6791, 6819), False, 'from tensorflow.python.ops import array_ops\n'), ((6839, 6885), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.int64'], {'shape': '[1]'}), '(dtypes.int64, shape=[1])\n', (6860, 6885), False, 'from tensorflow.python.ops import array_ops\n'), ((8530, 8565), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.int32'], {}), '(dtypes.int32)\n', (8551, 8565), False, 'from tensorflow.python.ops import array_ops\n'), ((8590, 8627), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float32'], {}), '(dtypes.float32)\n', (8611, 8627), False, 'from tensorflow.python.ops import array_ops\n'), ((8653, 8689), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.string'], {}), '(dtypes.string)\n', (8674, 8689), False, 'from tensorflow.python.ops import array_ops\n'), ((8710, 8804), 'tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors', 'dataset_ops.Dataset.from_tensors', (['(int_placeholder, float_placeholder, string_placeholder)'], {}), '((int_placeholder, float_placeholder,\n string_placeholder))\n', (8742, 8804), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((1644, 1656), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (1653, 1656), True, 'import numpy as np\n'), ((6925, 6947), 'tensorflow.python.ops.array_ops.fill', 'array_ops.fill', (['[x]', 'x'], {}), '([x], x)\n', (6939, 6947), False, 'from tensorflow.python.ops import array_ops\n'), ((1676, 1697), 'numpy.array', 'np.array', (['[[1, 2, 3]]'], {}), '([[1, 2, 3]])\n', (1684, 1697), True, 'import numpy as np\n'), ((1747, 1761), 'numpy.array', 'np.array', (['(37.0)'], {}), '(37.0)\n', (1755, 1761), True, 'import numpy as np\n'), ((1764, 1776), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (1773, 1776), True, 'import numpy as np\n'), ((1940, 1958), 'tensorflow.python.ops.math_ops.square', 'math_ops.square', (['x'], {}), '(x)\n', (1955, 1958), False, 'from tensorflow.python.ops import math_ops\n'), ((1960, 1978), 'tensorflow.python.ops.math_ops.square', 'math_ops.square', (['y'], {}), '(y)\n', (1975, 1978), False, 'from tensorflow.python.ops import math_ops\n'), ((1980, 1998), 'tensorflow.python.ops.math_ops.square', 'math_ops.square', (['z'], {}), '(z)\n', (1995, 1998), False, 'from tensorflow.python.ops import math_ops\n'), ((3216, 3237), 'math.ceil', 'math.ceil', (['(14 * 7 / 8)'], {}), '(14 * 7 / 8)\n', (3225, 3237), False, 'import math\n'), ((5108, 5122), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (5114, 5122), True, 'import numpy as np\n'), ((6970, 6998), 'tensorflow.python.ops.string_ops.as_string', 'string_ops.as_string', (['filled'], {}), '(filled)\n', (6990, 6998), False, 'from tensorflow.python.ops import string_ops\n'), ((7725, 7742), 'numpy.max', 'np.max', (['result[0]'], {}), '(result[0])\n', (7731, 7742), True, 'import numpy as np\n'), ((1700, 1712), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (1709, 1712), True, 'import numpy as np\n'), ((4857, 4890), 'numpy.random.randint', 'np.random.randint', (['(20)'], {'size': '(32,)'}), '(20, size=(32,))\n', (4874, 4890), True, 'import numpy as np\n'), ((7474, 7507), 'numpy.random.randint', 'np.random.randint', (['(20)'], {'size': '(32,)'}), '(20, size=(32,))\n', (7491, 7507), True, 'import numpy as np\n'), ((8985, 9017), 'tensorflow.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', (['[None]'], {}), '([None])\n', (9009, 9017), False, 'from tensorflow.python.framework import tensor_shape\n'), ((9042, 9080), 'tensorflow.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', (['[None, None]'], {}), '([None, None])\n', (9066, 9080), False, 'from tensorflow.python.framework import tensor_shape\n'), ((9105, 9135), 'tensorflow.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', (['[37]'], {}), '([37])\n', (9129, 9135), False, 'from tensorflow.python.framework import tensor_shape\n'), ((9479, 9525), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[-1]'], {'dtype': 'dtypes.int64'}), '([-1], dtype=dtypes.int64)\n', (9499, 9525), False, 'from tensorflow.python.framework import constant_op\n'), ((9550, 9600), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[-1, -1]'], {'dtype': 'dtypes.int64'}), '([-1, -1], dtype=dtypes.int64)\n', (9570, 9600), False, 'from tensorflow.python.framework import constant_op\n'), ((9625, 9671), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[37]'], {'dtype': 'dtypes.int64'}), '([37], dtype=dtypes.int64)\n', (9645, 9671), False, 'from tensorflow.python.framework import constant_op\n'), ((4443, 4491), 'tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices', 'dataset_ops.Dataset.from_tensor_slices', (['seq_lens'], {}), '(seq_lens)\n', (4481, 4491), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((4523, 4545), 'tensorflow.python.ops.array_ops.fill', 'array_ops.fill', (['[x]', 'x'], {}), '([x], x)\n', (4537, 4545), False, 'from tensorflow.python.ops import array_ops\n'), ((7016, 7064), 'tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices', 'dataset_ops.Dataset.from_tensor_slices', (['seq_lens'], {}), '(seq_lens)\n', (7054, 7064), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((2016, 2066), 'tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices', 'dataset_ops.Dataset.from_tensor_slices', (['components'], {}), '(components)\n', (2054, 2066), False, 'from tensorflow.python.data.ops import dataset_ops\n')] |
import theano.tensor as T
import numpy as np
from ..utils.utils_functions import ITLFunctions
__all__ = [
'dummy_score',
'get_accuracy',
'score_accuracy',
'score_ensemble_ambiguity',
'score_rms',
'score_silverman',
'mutual_information_cs',
'mutual_information_ed',
'mutual_information_parzen'
]
# noinspection PyUnusedLocal
def dummy_score(_input, _output, _target, model):
""" Dummy score function, this function only return zeros for each elements in _target.
Parameters
----------
_input : theano.tensor.matrix
Input Sample.
_output : theano.tensor.matrix
Output model.
_target : theano.tensor.matrix
Target Sample.
model : Model
Model.
Returns
-------
theano.tensor.matrix
Returns only zeros for each elements in _target.
"""
return T.zeros(_target.shape)
#
# Classification Functions
#
def get_accuracy(Y, _target):
# noinspection PyTypeChecker,PyTypeChecker
return float(np.sum(Y == _target)) / float(_target.shape[0])
# noinspection PyUnusedLocal
def score_accuracy(_input, _output, _target, model):
""" Accuracy score in a classifier models.
Parameters
----------
_input : theano.tensor.matrix
Input sample.
_output : theano.tensor.matrix
Output sample.
_target : theano.tensor.matrix
Target sample.
model : Model
Model.
Returns
-------
theano.tensor.matrix
Returns accuracy in a classifier models.
"""
return _target.shape[-1] * T.mean(_target * _output)
# noinspection PyUnusedLocal
def score_ensemble_ambiguity(_input, _output, _target, model):
""" Score ambiguity for Ensemble.
Parameters
----------
_input : theano.tensor.matrix
Input sample.
_output : theano.tensor.matrix
Output sample.
_target : theano.tensor.matrix
Target sample.
model : Model
Model.
Returns
-------
float
Returns a score ambiguity.
"""
ensemble = model
err = [T.mean(T.sqr(model.output(_input, prob=False) - _output)) for model in ensemble.get_models()]
return sum(err) / ensemble.get_num_models()
# noinspection PyUnusedLocal
def score_silverman(_input, _output, _target, model):
""" Score Silverman.
Parameters
----------
_input : theano.tensor.matrix
Input sample.
_output : theano.tensor.matrix
Output sample.
_target : theano.tensor.matrix
Target sample.
model : Model
Model.
Returns
-------
float
Returns size kernel with Silverman Rule.
"""
return ITLFunctions.silverman(model.output(_input))
#
# Regression Functions
#
# noinspection PyUnusedLocal
def score_rms(_input, _output, _target, model):
""" Gets Root Mean Square like score in a regressor model.
Parameters
----------
_input : theano.tensor.matrix
Input sample.
_output : theano.tensor.matrix
Output sample.
_target : theano.tensor.matrix
Target sample.
model : Model
Model.
Returns
-------
theano.tensor.matrix
Returns Root Mean Square.
"""
return T.mean(T.power(_output - _target, 2.0))
# noinspection PyUnusedLocal
def mutual_information_cs(_input, _output, _target, model):
""" Quadratic Mutual Information Cauchy-Schwarz
Parameters
----------
_input : theano.tensor.matrix
Input sample.
_output : theano.tensor.matrix
Output sample.
_target : theano.tensor.matrix
Target sample.
model : Model
Model.
Returns
-------
theano.tensor.matrix
Returns Quadratic Mutual Information Cauchy-Schwarz.
"""
s = ITLFunctions.silverman(_target)
return ITLFunctions.mutual_information_cs([_output], _target, s)
# noinspection PyUnusedLocal
def mutual_information_ed(_input, _output, _target, model):
""" Quadratic Mutual Information Euclidean.
Parameters
----------
_input : theano.tensor.matrix
Input sample.
_output : theano.tensor.matrix
Output sample.
_target : theano.tensor.matrix
Target sample.
model : Model
Model.
Returns
-------
theano.tensor.matrix
Returns Quadratic Mutual Information Euclidean.
"""
s = ITLFunctions.silverman(_target)
return ITLFunctions.mutual_information_ed([_output], _target, s)
# noinspection PyUnusedLocal
def mutual_information_parzen(_input, _output, _target, model):
""" Mutual Information (Parzen Window)
Parameters
----------
_input : theano.tensor.matrix
Input sample.
_output : theano.tensor.matrix
Output sample.
_target : theano.tensor.matrix
Target sample.
model : Model
Model.
Returns
-------
theano.tensor.matrix
Returns Mutual Information (Parzen Window).
"""
s = ITLFunctions.silverman(_target)
return ITLFunctions.mutual_information_parzen(_output, _target, s)
| [
"numpy.sum",
"theano.tensor.mean",
"theano.tensor.zeros",
"theano.tensor.power"
] | [((869, 891), 'theano.tensor.zeros', 'T.zeros', (['_target.shape'], {}), '(_target.shape)\n', (876, 891), True, 'import theano.tensor as T\n'), ((1575, 1600), 'theano.tensor.mean', 'T.mean', (['(_target * _output)'], {}), '(_target * _output)\n', (1581, 1600), True, 'import theano.tensor as T\n'), ((3240, 3271), 'theano.tensor.power', 'T.power', (['(_output - _target)', '(2.0)'], {}), '(_output - _target, 2.0)\n', (3247, 3271), True, 'import theano.tensor as T\n'), ((1019, 1039), 'numpy.sum', 'np.sum', (['(Y == _target)'], {}), '(Y == _target)\n', (1025, 1039), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# wujian@2018
"""
Do WPE Dereverbration Algorithm
"""
import argparse
from distutils.util import strtobool
import numpy as np
from libs.data_handler import SpectrogramReader, WaveWriter
from libs.opts import StftParser
from libs.utils import get_logger, inverse_stft
from libs.wpe import wpe
logger = get_logger(__name__)
def run(args):
stft_kwargs = {
"frame_len": args.frame_len,
"frame_hop": args.frame_hop,
"window": args.window,
"center": args.center, # false to comparable with kaldi
"transpose": True # T x F
}
spectrogram_reader = SpectrogramReader(
args.wav_scp, round_power_of_two=args.round_power_of_two, **stft_kwargs)
num_done = 0
with WaveWriter(args.dst_dir, sr=args.sr) as writer:
for key, reverbed in spectrogram_reader:
logger.info(f"Processing utt {key}...")
if reverbed.ndim == 2:
reverbed = reverbed[None, ...]
# N x T x F => F x N x T
reverbed = np.transpose(reverbed, (2, 0, 1))
try:
if args.nara_wpe:
from nara_wpe.wpe import wpe_v8
# T x F x N
dereverb = wpe_v8(reverbed,
taps=args.taps,
delay=args.delay,
iterations=args.num_iters,
psd_context=args.context)
else:
dereverb = wpe(reverbed,
num_iters=args.num_iters,
context=args.context,
taps=args.taps,
delay=args.delay)
except np.linalg.LinAlgError:
logger.warn(f"{key}: Failed cause LinAlgError in wpe")
continue
# F x N x T => N x T x F
dereverb = np.transpose(dereverb, (1, 2, 0))
# dump multi-channel
samps = np.stack(
[inverse_stft(spectra, **stft_kwargs) for spectra in dereverb])
writer.write(key, samps)
# show progress cause slow speed
num_done += 1
if not num_done % 100:
logger.info(f"Processed {num_done:d} utterances...")
logger.info(
f"Processed {num_done:d} utterances over {len(spectrogram_reader):d}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Command to do GWPE dereverbration algorithm (recommended "
"configuration: 512/128/blackman)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=[StftParser.parser])
parser.add_argument("wav_scp",
type=str,
help="Multi-channel rspecifier in kaldi format")
parser.add_argument("dst_dir",
type=str,
help="Location to dump dereverbrated files")
parser.add_argument("--taps",
default=10,
type=int,
help="Value of taps used in GWPE algorithm")
parser.add_argument("--delay",
default=3,
type=int,
help="Value of delay used in GWPE algorithm")
parser.add_argument("--context",
default=1,
dest="context",
type=int,
help="Context value to compute PSD "
"matrix in GWPE algorithm")
parser.add_argument("--num-iters",
default=3,
type=int,
help="Number of iterations to step in GWPE")
parser.add_argument("--sample-rate",
type=int,
default=16000,
dest="sr",
help="Waveform data sample rate")
parser.add_argument("--nara-wpe",
type=strtobool,
default=False,
help="Use nara-wpe package")
args = parser.parse_args()
run(args)
| [
"libs.utils.inverse_stft",
"argparse.ArgumentParser",
"libs.wpe.wpe",
"libs.utils.get_logger",
"libs.data_handler.SpectrogramReader",
"nara_wpe.wpe.wpe_v8",
"numpy.transpose",
"libs.data_handler.WaveWriter"
] | [((327, 347), 'libs.utils.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (337, 347), False, 'from libs.utils import get_logger, inverse_stft\n'), ((621, 715), 'libs.data_handler.SpectrogramReader', 'SpectrogramReader', (['args.wav_scp'], {'round_power_of_two': 'args.round_power_of_two'}), '(args.wav_scp, round_power_of_two=args.round_power_of_two,\n **stft_kwargs)\n', (638, 715), False, 'from libs.data_handler import SpectrogramReader, WaveWriter\n'), ((2509, 2737), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Command to do GWPE dereverbration algorithm (recommended configuration: 512/128/blackman)"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'parents': '[StftParser.parser]'}), "(description=\n 'Command to do GWPE dereverbration algorithm (recommended configuration: 512/128/blackman)'\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=[\n StftParser.parser])\n", (2532, 2737), False, 'import argparse\n'), ((748, 784), 'libs.data_handler.WaveWriter', 'WaveWriter', (['args.dst_dir'], {'sr': 'args.sr'}), '(args.dst_dir, sr=args.sr)\n', (758, 784), False, 'from libs.data_handler import SpectrogramReader, WaveWriter\n'), ((1039, 1072), 'numpy.transpose', 'np.transpose', (['reverbed', '(2, 0, 1)'], {}), '(reverbed, (2, 0, 1))\n', (1051, 1072), True, 'import numpy as np\n'), ((1982, 2015), 'numpy.transpose', 'np.transpose', (['dereverb', '(1, 2, 0)'], {}), '(dereverb, (1, 2, 0))\n', (1994, 2015), True, 'import numpy as np\n'), ((1239, 1347), 'nara_wpe.wpe.wpe_v8', 'wpe_v8', (['reverbed'], {'taps': 'args.taps', 'delay': 'args.delay', 'iterations': 'args.num_iters', 'psd_context': 'args.context'}), '(reverbed, taps=args.taps, delay=args.delay, iterations=args.\n num_iters, psd_context=args.context)\n', (1245, 1347), False, 'from nara_wpe.wpe import wpe_v8\n'), ((1548, 1648), 'libs.wpe.wpe', 'wpe', (['reverbed'], {'num_iters': 'args.num_iters', 'context': 'args.context', 'taps': 'args.taps', 'delay': 'args.delay'}), '(reverbed, num_iters=args.num_iters, context=args.context, taps=args.\n taps, delay=args.delay)\n', (1551, 1648), False, 'from libs.wpe import wpe\n'), ((2096, 2132), 'libs.utils.inverse_stft', 'inverse_stft', (['spectra'], {}), '(spectra, **stft_kwargs)\n', (2108, 2132), False, 'from libs.utils import get_logger, inverse_stft\n')] |
from pathlib import Path
from tqdm import tqdm
import numpy as np
import zarr
import pandas as pd
import joblib
from sklearn.neighbors import NearestNeighbors
from model_repair.override_for_release import get_interface
from model_repair.cache42.cached_42 import cache_42
# @cache_42(ignore_args=["gcfg"], force_recompute=True)
@cache_42(ignore_args=["gcfg"])
def show_clusters_content(cfg, embed_out, cluster_out, gcfg, cache):
# Load dataframe
path_df = cluster_out.get_path() / "clustered.feather"
objs_info = pd.read_feather(str(path_df))
best_centers = joblib.load(str(cluster_out.get_path() / "best_centers.joblib"))
# Load embedding data
z_file_embeds = zarr.open(str(embed_out.get_path() / "embedded.zarr"), mode='r')
embedded_nd = np.array(z_file_embeds["embedded_nd"])
interface = get_interface(gcfg)
n_clusters = objs_info["cluster"].unique().max() + 1
assert objs_info[objs_info["cluster"] >= 0]["idx_into_embeds"].max() < embedded_nd.shape[0]
out_path_images = Path(cache.get_path()) / "images"
out_path_plot = Path(cache.get_path()) / "plots"
for cluster_id in tqdm(range(n_clusters)):
objs_f = objs_info[objs_info["cluster"]==cluster_id]
embedded_nd_f = embedded_nd[objs_f["idx_into_embeds"]]
if cfg["n_examples"] > len(objs_f):
how_many = len(objs_f)
else:
how_many = cfg["n_examples"]
if "get_nearest" not in cfg or cfg["get_nearest"]:
neigh = NearestNeighbors(n_neighbors=how_many)
neigh.fit(embedded_nd_f)
dists, found_ids = neigh.kneighbors(best_centers[np.newaxis, cluster_id], how_many, return_distance=True)
assert dists.shape[0] == 1 # Just in case
dists, found_ids = dists[0], found_ids[0] # Remove first dim
N_found = dists.shape[0]
else:
# Pick examples at random
N_found = min(embedded_nd_f.shape[0], how_many)
dists = [0 for e in range(embedded_nd_f.shape[0])]
found_ids = np.random.choice(embedded_nd_f.shape[0], how_many)
show_objs = []
show_dists = []
for i in range(N_found):
interface.export_datapoint(objs_f.iloc[found_ids[i]], dists[i], i, out_path_images / f"{cluster_id}")
show_objs.append(objs_f.iloc[found_ids[i]])
show_dists.append(dists[i])
interface.plot_cluster_description(show_objs, show_dists, out_path_plot / f"{cluster_id}.png")
return cache
| [
"numpy.random.choice",
"numpy.array",
"model_repair.override_for_release.get_interface",
"sklearn.neighbors.NearestNeighbors",
"model_repair.cache42.cached_42.cache_42"
] | [((332, 362), 'model_repair.cache42.cached_42.cache_42', 'cache_42', ([], {'ignore_args': "['gcfg']"}), "(ignore_args=['gcfg'])\n", (340, 362), False, 'from model_repair.cache42.cached_42 import cache_42\n'), ((774, 812), 'numpy.array', 'np.array', (["z_file_embeds['embedded_nd']"], {}), "(z_file_embeds['embedded_nd'])\n", (782, 812), True, 'import numpy as np\n'), ((830, 849), 'model_repair.override_for_release.get_interface', 'get_interface', (['gcfg'], {}), '(gcfg)\n', (843, 849), False, 'from model_repair.override_for_release import get_interface\n'), ((1503, 1541), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'how_many'}), '(n_neighbors=how_many)\n', (1519, 1541), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((2060, 2110), 'numpy.random.choice', 'np.random.choice', (['embedded_nd_f.shape[0]', 'how_many'], {}), '(embedded_nd_f.shape[0], how_many)\n', (2076, 2110), True, 'import numpy as np\n')] |
from __future__ import (absolute_import, division, print_function)
from collections import OrderedDict
import numpy as np
from .util import extract_vars, get_id, get_iterable, is_mapping, to_np
from .py3compat import viewkeys
from .latlonutils import _lat_varname, _lon_varname, _ll_to_xy, _xy_to_ll
from .metadecorators import set_latlon_metadata
from .config import xarray_enabled
if xarray_enabled():
from xarray import DataArray
def get_lat(wrfin, timeidx=0, method="cat", squeeze=True,
cache=None, meta=True, _key=None,
stagger=None):
"""Return the two dimensional latitude coordinate variable.
This functions extracts the necessary variables from the NetCDF file
object in order to perform the calculation.
Args:
wrfin (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`, or an \
iterable): WRF-ARW NetCDF
data as a :class:`netCDF4.Dataset`, :class:`Nio.NioFile`
or an iterable sequence of the aforementioned types.
timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The
desired time index. This value can be a positive integer,
negative integer, or
:data:`wrf.ALL_TIMES` (an alias for None) to return
all times in the file or sequence. The default is 0.
method (:obj:`str`, optional): The aggregation method to use for
sequences. Must be either 'cat' or 'join'.
'cat' combines the data along the Time dimension.
'join' creates a new dimension for the file index.
The default is 'cat'.
squeeze (:obj:`bool`, optional): Set to False to prevent dimensions
with a size of 1 from being automatically removed from the shape
of the output. Default is True.
cache (:obj:`dict`, optional): A dictionary of (varname, ndarray)
that can be used to supply pre-extracted NetCDF variables to the
computational routines. It is primarily used for internal
purposes, but can also be used to improve performance by
eliminating the need to repeatedly extract the same variables
used in multiple diagnostics calculations, particularly when using
large sequences of files.
Default is None.
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is True.
_key (:obj:`int`, optional): A caching key. This is used for internal
purposes only. Default is None.
stagger (:obj:`str`): By default, the latitude is returned on the mass
grid, but a staggered grid can be chosen with the following
options:
- 'm': Use the mass grid (default).
- 'u': Use the same staggered grid as the u wind component,
which has a staggered west_east (x) dimension.
- 'v': Use the same staggered grid as the v wind component,
which has a staggered south_north (y) dimension.
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`: The
two dimensional latitude coordinate variable.
If xarray is enabled and the *meta* parameter is True, then the result
will be a :class:`xarray.DataArray` object. Otherwise, the result will
be a :class:`numpy.ndarray` object with no metadata.
"""
varname = _lat_varname(wrfin, stagger)
lat_var = extract_vars(wrfin, timeidx, varname, method, squeeze, cache,
meta, _key)
return lat_var[varname]
def get_lon(wrfin, timeidx=0, method="cat", squeeze=True,
cache=None, meta=True, _key=None,
stagger=None):
"""Return the two dimensional longitude coordinate variable.
This functions extracts the necessary variables from the NetCDF file
object in order to perform the calculation.
Args:
wrfin (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`, or an \
iterable): WRF-ARW NetCDF
data as a :class:`netCDF4.Dataset`, :class:`Nio.NioFile`
or an iterable sequence of the aforementioned types.
timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The
desired time index. This value can be a positive integer,
negative integer, or
:data:`wrf.ALL_TIMES` (an alias for None) to return
all times in the file or sequence. The default is 0.
method (:obj:`str`, optional): The aggregation method to use for
sequences. Must be either 'cat' or 'join'.
'cat' combines the data along the Time dimension.
'join' creates a new dimension for the file index.
The default is 'cat'.
squeeze (:obj:`bool`, optional): Set to False to prevent dimensions
with a size of 1 from being automatically removed from the shape
of the output. Default is True.
cache (:obj:`dict`, optional): A dictionary of (varname, ndarray)
that can be used to supply pre-extracted NetCDF variables to the
computational routines. It is primarily used for internal
purposes, but can also be used to improve performance by
eliminating the need to repeatedly extract the same variables
used in multiple diagnostics calculations, particularly when using
large sequences of files.
Default is None.
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is True.
_key (:obj:`int`, optional): A caching key. This is used for internal
purposes only. Default is None.
stagger (:obj:`str`): By default, the longitude is returned on the mass
grid, but a staggered grid can be chosen with the following
options:
- 'm': Use the mass grid (default).
- 'u': Use the same staggered grid as the u wind component,
which has a staggered west_east (x) dimension.
- 'v': Use the same staggered grid as the v wind component,
which has a staggered south_north (y) dimension.
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`: The
two dimensional longitude coordinate variable.
If xarray is enabled and the *meta* parameter is True, then the result
will be a :class:`xarray.DataArray` object. Otherwise, the result will
be a :class:`numpy.ndarray` object with no metadata.
"""
varname = _lon_varname(wrfin, stagger)
lon_var = extract_vars(wrfin, timeidx, varname, method, squeeze, cache,
meta, _key)
return lon_var[varname]
def _llxy_mapping(wrfin, x_or_lat, y_or_lon, func, timeidx, stagger,
squeeze, meta, as_int=None):
"""Return the x,y/lat,lon coordinates for a dictionary input.
The leftmost dimension(s) for the result is:
- return_val[key,...,0,...] will contain the x/lat values.
- return_val[key,...,1,...] will contain the y/lon values.
Nested dictionaries are allowed.
Args:
wrfin (:obj:`dict`): A mapping of key name to a WRF NetCDF file object
or sequence of WRF NetCDF file objects.
x_or_lat (:obj:`float` or sequence): A single latitude/x value or a
sequence of latitude/x values to be converted.
y_or_lon (:obj:`float` or sequence): A single longitude/y value or a
sequence of longitude/y values to be converted.
func (function): Either the xy_to_ll or ll_to_xy function.
timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`): The
desired time index. This value can be a positive integer,
negative integer, or
:data:`wrf.ALL_TIMES` (an alias for None) to return
all times in the file or sequence. The default is 0.
stagger (:obj:`str`): By default, the values are returned on the mass
grid, but a staggered grid can be chosen with the following
options:
- 'm': Use the mass grid (default).
- 'u': Use the same staggered grid as the u wind component,
which has a staggered west_east (x) dimension.
- 'v': Use the same staggered grid as the v wind component,
which has a staggered south_north (y) dimension.
squeeze (:obj:`bool`, optional): Set to False to prevent dimensions
with a size of 1 from being automatically removed from the shape
of the output. Default is True.
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is True.
as_int (:obj:`bool`, optional): Set to True to return the x,y values as
:obj:`int`, otherwise they will be returned as :obj:`float`. This
is only used when *func* is ll_to_xy.
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`: The
lat,lon/x,y coordinate value(s) whose leftmost dimensions are the
dictionary keys, followed by a dimension of size
2 (0=X, 1=Y)/(0=lat, 1=lon).
If xarray is enabled and the *meta* parameter is True, then the result
will be a :class:`xarray.DataArray` object. Otherwise, the result will
be a :class:`numpy.ndarray` object with no metadata.
"""
keynames = []
# This might not work once mapping iterators are implemented
numkeys = len(wrfin)
key_iter = iter(viewkeys(wrfin))
first_key = next(key_iter)
keynames.append(first_key)
first_args = [wrfin[first_key], x_or_lat, y_or_lon, timeidx, squeeze,
meta, stagger]
if as_int is not None:
first_args.append(as_int)
first_array = func(*first_args)
# Create the output data numpy array based on the first array
outdims = [numkeys]
outdims += first_array.shape
outdata = np.empty(outdims, first_array.dtype)
outdata[0,:] = first_array[:]
idx = 1
while True:
try:
key = next(key_iter)
except StopIteration:
break
else:
keynames.append(key)
args = [wrfin[first_key], x_or_lat, y_or_lon, timeidx, squeeze,
meta, stagger]
if as_int is not None:
args.append(as_int)
result_array = func(*args)
if outdata.shape[1:] != result_array.shape:
raise ValueError("data sequences must have the "
"same size for all dictionary keys")
outdata[idx,:] = to_np(result_array)[:]
idx += 1
if xarray_enabled() and meta:
outname = str(first_array.name)
# Note: assumes that all entries in dict have same coords
outcoords = OrderedDict(first_array.coords)
# First find and store all the existing key coord names/values
# This is applicable only if there are nested dictionaries.
key_coordnames = []
coord_vals = []
existing_cnt = 0
while True:
key_coord_name = "key_{}".format(existing_cnt)
if key_coord_name not in first_array.dims:
break
key_coordnames.append(key_coord_name)
coord_vals.append(to_np(first_array.coords[key_coord_name]))
existing_cnt += 1
# Now add the key coord name and values for THIS dictionary.
# Put the new key_n name at the bottom, but the new values will
# be at the top to be associated with key_0 (left most). This
# effectively shifts the existing 'key_n' coordinate values to the
# right one dimension so *this* dicionary's key coordinate values
# are at 'key_0'.
key_coordnames.append(key_coord_name)
coord_vals.insert(0, keynames)
# make it so that key_0 is leftmost
outdims = key_coordnames + list(first_array.dims[existing_cnt:])
# Create the new 'key_n', value pairs
for coordname, coordval in zip(key_coordnames, coord_vals):
outcoords[coordname] = coordval
outattrs = OrderedDict(first_array.attrs)
outarr = DataArray(outdata, name=outname, coords=outcoords,
dims=outdims, attrs=outattrs)
else:
outarr = outdata
return outarr
@set_latlon_metadata(xy=True)
def ll_to_xy(wrfin, latitude, longitude, timeidx=0,
squeeze=True, meta=True, stagger=None, as_int=True):
"""Return the x,y coordinates for a specified latitude and longitude.
The *latitude* and *longitude* arguments can be a single value or a
sequence of values.
The leftmost dimension of the returned array represents two different
quantities:
- return_val[0,...] will contain the X (west_east) values.
- return_val[1,...] will contain the Y (south_north) values.
Args:
wrfin (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`, or an \
iterable): WRF-ARW NetCDF
data as a :class:`netCDF4.Dataset`, :class:`Nio.NioFile`
or an iterable sequence of the aforementioned types.
latitude (:obj:`float` or sequence): A single latitude or a sequence
of latitude values to be converted.
longitude (:obj:`float` or sequence): A single longitude or a sequence
of latitude values to be converted.
timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The
desired time index. This value can be a positive integer,
negative integer, or
:data:`wrf.ALL_TIMES` (an alias for None) to return
all times in the file or sequence. The default is 0.
squeeze (:obj:`bool`, optional): Set to False to prevent dimensions
with a size of 1 from being automatically removed from the shape
of the output. Default is True.
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is True.
stagger (:obj:`str`): By default, the latitude is returned on the mass
grid, but a staggered grid can be chosen with the following
options:
- 'm': Use the mass grid (default).
- 'u': Use the same staggered grid as the u wind component,
which has a staggered west_east (x) dimension.
- 'v': Use the same staggered grid as the v wind component,
which has a staggered south_north (y) dimension.
as_int (:obj:`bool`): Set to False to return the x,y values as
:obj:`float`, otherwise they will be returned as :obj:`int`.
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`: The
x,y coordinate value(s) whose leftmost dimension is 2 (0=X, 1=Y).
If xarray is enabled and the *meta* parameter is True, then the result
will be a :class:`xarray.DataArray` object. Otherwise, the result will
be a :class:`numpy.ndarray` object with no metadata.
"""
if is_mapping(wrfin):
return _llxy_mapping(wrfin, latitude, longitude, ll_to_xy,
timeidx, stagger, squeeze, meta, as_int)
_key = get_id(wrfin)
_wrfin = get_iterable(wrfin)
return _ll_to_xy(latitude, longitude, _wrfin, timeidx, stagger, "cat",
squeeze, None, _key, as_int, **{})
@set_latlon_metadata(xy=True)
def ll_to_xy_proj(latitude, longitude, meta=True, squeeze=True, as_int=True,
map_proj=None, truelat1=None, truelat2=None, stand_lon=None,
ref_lat=None, ref_lon=None, pole_lat=None, pole_lon=None,
known_x=None, known_y=None, dx=None, dy=None,
latinc=None, loninc=None):
"""Return the x, y coordinates for a specified latitude and longitude.
The *latitude* and *longitude* arguments can be a single value or a
sequence of values. This version of the ll_to_xy routine allows users
to manually specify projection parameters.
The leftmost dimension of the returned array represents two different
quantities:
- return_val[0,...] will contain the X (west_east) values.
- return_val[1,...] will contain the Y (south_north) values.
Args:
latitude (:obj:`float` or sequence): A single latitude or a sequence
of latitude values to be converted.
longitude (:obj:`float` or sequence): A single longitude or a sequence
of latitude values to be converted.
squeeze (:obj:`bool`, optional): Set to False to prevent dimensions
with a size of 1 from being automatically removed from the shape
of the output. Default is True.
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is True.
as_int (:obj:`bool`): Set to False to return the x,y values as
:obj:`float`, otherwise they will be returned as :obj:`int`.
map_proj (:obj:`int`): Model projection [1=Lambert Conformal,
2=Polar Stereographic, 3=Mercator, 6=Lat-Lon]. Required.
truelat1 (:obj:`float`): True latitude 1. Required for
map_proj = 1, 2, 3 (defaults to 0 otherwise).
truelat2 (:obj:`float`): True latitude 2. Optional for
map_proj = 1 (defaults to 0 otherwise).
stand_lon (:obj:`float`): Standard longitude. Required.
ref_lat (:obj:`float`): A reference latitude. Required.
ref_lon (:obj:`float`): A reference longitude. Required.
known_x (:obj:`float`): The known x-coordinate associated with
*ref_lon*. Required.
known_y (:obj:`float`): The known y-coordinate associated with
*ref_lat*. Required.
pole_lat (:obj:`float`): Pole latitude. Optional for
*map_proj* = 6 (defaults to 90 otherwise).
pole_lon (:obj:`float`): Pole longitude. Optional for
*map_proj* = 6 (defaults to 0 otherwise).
dx (:obj:`float`): The x spacing in meters at the true latitude.
Required for *map_proj* = 1, 2, 3 (defaults to 0 otherwise).
dy (:obj:`float`) - The y spacing in meters at the true latitude.
Required for *map_proj* = 1, 2, 3 (defaults to 0 otherwise).
latinc (:obj:`float`): Required for *map_proj* = 6. Defined as:
.. code-block:: python
latinc = (dy*360.0)/2.0/Constants.PI/Constants.WRF_EARTH_RADIUS
loninc (:obj:`float`): Required for *map_proj* = 6. Defined as:
.. code-block:: python
loninc = (dx*360.0)/2.0/Constants.PI/Constants.WRF_EARTH_RADIUS
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`: The
x,y coordinate value(s) whose leftmost dimension is 2 (0=X, 1=Y).
If xarray is enabled and the *meta* parameter is True, then the result
will be a :class:`xarray.DataArray` object. Otherwise, the result will
be a :class:`numpy.ndarray` object with no metadata.
"""
loc = locals()
projparams = {name : loc[name] for name in ("map_proj", "truelat1",
"truelat2", "stand_lon", "ref_lat",
"ref_lon", "pole_lat", "pole_lon",
"known_x", "known_y", "dx", "dy",
"latinc", "loninc")}
return _ll_to_xy(latitude, longitude, None, 0, True, "cat", squeeze, None,
None, as_int, **projparams)
@set_latlon_metadata(xy=False)
def xy_to_ll(wrfin, x, y, timeidx=0, squeeze=True, meta=True, stagger=None):
"""Return the latitude and longitude for specified x,y coordinates.
The *x* and *y* arguments can be a single value or a sequence of values.
The leftmost dimension of the returned array represents two different
quantities:
- return_val[0,...] will contain the latitude values.
- return_val[1,...] will contain the longitude values.
Args:
wrfin (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`, or an \
iterable): WRF-ARW NetCDF
data as a :class:`netCDF4.Dataset`, :class:`Nio.NioFile`
or an iterable sequence of the aforementioned types.
x (:obj:`float` or sequence): A single x-coordinate or a sequence
of x-coordinate values to be converted.
y (:obj:`float` or sequence): A single y-coordinate or a sequence
of y-coordinate values to be converted.
timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The
desired time index. This value can be a positive integer,
negative integer, or
:data:`wrf.ALL_TIMES` (an alias for None) to return
all times in the file or sequence. The default is 0.
squeeze (:obj:`bool`, optional): Set to False to prevent dimensions
with a size of 1 from being automatically removed from the shape
of the output. Default is True.
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is True.
stagger (:obj:`str`): By default, the latitude is returned on the mass
grid, but a staggered grid can be chosen with the following
options:
- 'm': Use the mass grid (default).
- 'u': Use the same staggered grid as the u wind component,
which has a staggered west_east (x) dimension.
- 'v': Use the same staggered grid as the v wind component,
which has a staggered south_north (y) dimension.
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`: The
latitude and longitude values whose leftmost dimension is 2
(0=latitude, 1=longitude).
If xarray is enabled and the *meta* parameter is True, then the result
will be a :class:`xarray.DataArray` object. Otherwise, the result will
be a :class:`numpy.ndarray` object with no metadata.
"""
if is_mapping(wrfin):
return _llxy_mapping(wrfin, x, y, xy_to_ll,
timeidx, stagger, squeeze, meta)
_key = get_id(wrfin)
_wrfin = get_iterable(wrfin)
return _xy_to_ll(x, y, _wrfin, timeidx, stagger, "cat", True, None,
_key, **{})
@set_latlon_metadata(xy=False)
def xy_to_ll_proj(x, y, meta=True, squeeze=True, map_proj=None, truelat1=None,
truelat2=None, stand_lon=None, ref_lat=None, ref_lon=None,
pole_lat=None, pole_lon=None, known_x=None, known_y=None,
dx=None, dy=None, latinc=None, loninc=None):
"""Return the latitude and longitude for the specified x,y coordinates.
The *x* and *y* arguments can be a single value or a
sequence of values. This version of the xy_to_ll routine allows users
to manually specify map projection parameters.
The leftmost dimension of the returned array represents two different
quantities:
- return_val[0,...] will contain the latitude values.
- return_val[1,...] will contain the longitude values.
Args:
x (:obj:`float` or sequence): A single x-coordinate or a sequence
of x-coordinate values to be converted.
y (:obj:`float` or sequence): A single y-coordinate or a sequence
of y-coordinate values to be converted.
squeeze (:obj:`bool`, optional): Set to False to prevent dimensions
with a size of 1 from being automatically removed from the shape
of the output. Default is True.
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is True.
map_proj (:obj:`int`): Model projection [1=Lambert Conformal,
2=Polar Stereographic, 3=Mercator, 6=Lat-Lon]. Required.
truelat1 (:obj:`float`): True latitude 1. Required for
map_proj = 1, 2, 3 (defaults to 0 otherwise).
truelat2 (:obj:`float`): True latitude 2. Optional for
map_proj = 1 (defaults to 0 otherwise).
stand_lon (:obj:`float`): Standard longitude. Required.
ref_lat (:obj:`float`): A reference latitude. Required.
ref_lon (:obj:`float`): A reference longitude. Required.
known_x (:obj:`float`): The known x-coordinate associated with
*ref_lon*. Required.
known_y (:obj:`float`): The known y-coordinate associated with
*ref_lat*. Required.
pole_lat (:obj:`float`): Pole latitude. Optional for
*map_proj* = 6 (defaults to 90 otherwise).
pole_lon (:obj:`float`): Pole longitude. Optional for
*map_proj* = 6 (defaults to 0 otherwise).
dx (:obj:`float`): The x spacing in meters at the true latitude.
Required for *map_proj* = 1, 2, 3 (defaults to 0 otherwise).
dy (:obj:`float`) - The y spacing in meters at the true latitude.
Required for *map_proj* = 1, 2, 3 (defaults to 0 otherwise).
latinc (:obj:`float`): Required for *map_proj* = 6. Defined as:
.. code-block:: python
latinc = (dy*360.0)/2.0/Constants.PI/Constants.WRF_EARTH_RADIUS
loninc (:obj:`float`): Required for *map_proj* = 6. Defined as:
.. code-block:: python
loninc = (dx*360.0)/2.0/Constants.PI/Constants.WRF_EARTH_RADIUS
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`: The
latitude and longitude values whose leftmost dimension is 2
(0=latitude, 1=longitude).
If xarray is enabled and the *meta* parameter is True, then the result
will be a :class:`xarray.DataArray` object. Otherwise, the result will
be a :class:`numpy.ndarray` object with no metadata.
"""
loc = locals()
projparams = {name : loc[name] for name in ("map_proj", "truelat1",
"truelat2", "stand_lon", "ref_lat",
"ref_lon", "pole_lat", "pole_lon",
"known_x", "known_y", "dx", "dy",
"latinc", "loninc")}
return _xy_to_ll(x, y, None, 0, None, "cat", squeeze, None, None,
**projparams)
| [
"collections.OrderedDict",
"numpy.empty",
"xarray.DataArray"
] | [((10697, 10733), 'numpy.empty', 'np.empty', (['outdims', 'first_array.dtype'], {}), '(outdims, first_array.dtype)\n', (10705, 10733), True, 'import numpy as np\n'), ((11629, 11660), 'collections.OrderedDict', 'OrderedDict', (['first_array.coords'], {}), '(first_array.coords)\n', (11640, 11660), False, 'from collections import OrderedDict\n'), ((13057, 13087), 'collections.OrderedDict', 'OrderedDict', (['first_array.attrs'], {}), '(first_array.attrs)\n', (13068, 13087), False, 'from collections import OrderedDict\n'), ((13114, 13199), 'xarray.DataArray', 'DataArray', (['outdata'], {'name': 'outname', 'coords': 'outcoords', 'dims': 'outdims', 'attrs': 'outattrs'}), '(outdata, name=outname, coords=outcoords, dims=outdims, attrs=outattrs\n )\n', (13123, 13199), False, 'from xarray import DataArray\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Generate a correlation matrix plot"""
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from scipy import cluster
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from . set_style import set_plot_style
from .. pygeostat_parameters import Parameters
@set_plot_style
def correlation_matrix_plot(correlation_data, figsize=None, ax=None, cax=None, title=None, xticklabels=None,
ticklabels=None, yticklabels=None, rotateticks=None, cbar=None, annotation=None, lower_matrix=False,
lw=0.5, hierarchy=None, dendrogram=False, vlim=(-1, 1), cbar_label=None, cmap=None,
plot_style=None, custom_style=None, output_file=None, out_kws=None, sigfigs=3, **kwargs):
"""
This function uses matplotlib to create a correlation matrix heatmap illustrating the
correlation coefficient between each pair of variables.
The only parameter needed is the correlation matrix. All of the other arguments are optional.
Figure size will likely have to be manually adjusted. If the label parameters are left to their
default value of ``None`` and the input matrix is contained in a pandas dataframe, the
index/column information will be used to label the columns and rows. If a numpy array is
passed, axis tick labels will need to be provided. Axis tick labels are automatically checked
for overlap and if needed, are rotated. If rotation is necessary, consider condensing the
variables names or plotting a larger figure as the result is odd. If ``cbar`` is left to its
default value of ``None``, a colorbar will only be plotted if the ``lower_matrix`` is set to True. It
can also be turned on or off manually. If ``annotation`` is left to its default value of ``None``,
annotations will only be placed if a full matrix is being plotted. It can also be turned on or
off manually.
The parameter ``ticklabels`` is odd in that it can take a few forms, all of which are a tuple
with the first value controlling the x-axis and second value controlling the y-axis (x, y). If
left to its default of ``None``, another pygeostat function will check to see if the labels
overlap, if so it will rotate the axis labels by a default angle of (45, -45) if required. If
a value of ``True`` is pased for either axis, the respective default values previously stated
is used. If either value is a float, that value is used to rotate the axis labels.
The correlation matrix can be ordered based on hierarchical clustering. The following is a list
of permissible arguments: ``'single', 'complete', 'average', 'weighted', 'centroid', 'median',
'ward'``. The denrogram if plotted will have a height equal to 15% the height of the
correlation matrix. This is currently hard coded.
.. seealso::
http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html#scipy.cluster.hierarchy.linkage
Please review the documentation of the :func:`gs.set_style()
<pygeostat.plotting.set_style.set_style>` and :func:`gs.export_image()
<pygeostat.plotting.export_image.export_image>` functions for details on their parameters so that
their use in this function can be understood.
Parameters:
correlation_data: Pandas dataframe or numpy matrix containing the required loadings or correlation
matrix
figsize (tuple): Figure size (width, height)
ax (mpl.axis): Matplotlib axis to plot the figure
title (str): Title for the plot
ticklabels (list): Tick labels for both axes
xticklabels (list): Tick labels along the x-axis (overwritten if ticklabels is passed)
yticklabels (list): Tick labels along the y-axis (overwritten if ticklabels is passed)
rotateticks (bool or float tuple): Bool or float values to control axis label rotations.
See above for more info.
cbar (bool): Indicate if a colorbar should be plotted or not
annotation (bool): Indicate if the cells should be annotationated or not
lower_matrix (bool): Indicate if only the lower matrix should be plotted
lw (float): Line width of lines in correlation matrix
hierarchy (str): Indicate the type of hieriarial clustering to use to reorder the
correlation matrix. Please see above for more details
dendrogram (bool): Indicate if a dendrogram should be plotted. The argument ``hierarchy``
must be set to ``true`` for this argument to have any effect
vlim (tuple): vlim for the data on the correlation_matrix_plot, default = (-1, 1)
cbar_label (str): string for the colorbar label
cmap (str): valid Matplotlib colormap
plot_style (str): Use a predefined set of matplotlib plotting parameters as specified by
:class:`gs.GridDef <pygeostat.data.grid_definition.GridDef>`. Use ``False`` or ``None``
to turn it off
custom_style (dict): Alter some of the predefined parameters in the ``plot_style`` selected.
output_file (str): Output figure file name and location
out_kws (dict): Optional dictionary of permissible keyword arguments to pass to
:func:`gs.export_image() <pygeostat.plotting.export_image.export_image>`
sigfigs (int): significant digits for labeling of colorbar and cells
**kwargs: Optional permissible keyword arguments to pass to matplotlib's pcolormesh
function
Returns:
ax (ax): matplotlib Axes object with the correlation matrix plot
**Examples:**
Calculate the correlation matrix variables in a pandas dataframe
.. plot::
import pygeostat as gs
data_file = gs.ExampleData("point3d_ind_mv")
data = data_file[data_file.variables]
data_cor = data.corr()
gs.correlation_matrix_plot(data_cor, cmap = 'bwr')
|
Again for illustration, convert the correlation dataframe into a numpy matrix. By using a
numpy matrix, the axis labels will need to me manually entered. Reduce the figure size as
well:
.. plot::
import pygeostat as gs
data_file = gs.ExampleData("point3d_ind_mv")
data = data_file[data_file.variables]
data_cor = data.corr()
gs.correlation_matrix_plot(data_cor.values, cmap = 'bwr')
|
Plotting a lower correlation matrix while having
annotations:
.. plot::
import pygeostat as gs
data_file = gs.ExampleData("point3d_ind_mv")
data = data_file[data_file.variables]
data_cor = data.corr()
gs.correlation_matrix_plot(data_cor, lower_matrix=True, annotation=True)
"""
from . export_image import export_image
from . utils import titleoverlap, _tickoverlap, get_contcbarargs
from mpl_toolkits.axes_grid1 import ImageGrid
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1 import axes_divider
# Sanity checks
if lower_matrix and dendrogram:
raise NotImplementedError("Dendrogram plotting while using the ``lower_matrix`` functionality is"
" not currently supported")
# Convert the numpy array to a pd.DataFrame (needed for hierarchy)
if isinstance(correlation_data, np.ndarray):
correlation_data = pd.DataFrame(data=correlation_data)
if not isinstance (correlation_data, pd.DataFrame):
raise ValueError('correlation_data must be convertable to pandas dataframe')
# Set-up some parameters
nx = correlation_data.shape[1]
ny = correlation_data.shape[0]
# Handle dictionary defaults
if out_kws is None:
out_kws = dict()
# Plot Colourbar if required
if cbar is None:
if lower_matrix or annotation is False:
cbar = True
else:
cbar = False
# Determine hierarchy if needed
if hierarchy in ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']:
# Determine the clustering
linkage = cluster.hierarchy.linkage(correlation_data, method=hierarchy)
dendo = cluster.hierarchy.dendrogram(linkage, no_plot=True)
hierarchy = True
# Reorder the correlation matrix and the tick labels
ind = dendo['leaves']
xlab = list(correlation_data.columns)
xorder = [xlab[i] for i in ind]
correlation_data = correlation_data[xorder]
ylab = list(correlation_data.index)
yorder = [ylab[i] for i in ind]
correlation_data = correlation_data.loc[yorder]
if ticklabels:
ticklabels = [ticklabels[i] for i in ind]
if xticklabels:
xticklabels = [xticklabels[i] for i in ind]
if yticklabels:
yticklabels = [yticklabels[i] for i in ind]
# Copy the data and convert it to a numpy matrix if a pandas dataframe was passed
if isinstance(correlation_data, pd.DataFrame):
plot_data = correlation_data.values
else:
plot_data = np.asarray(correlation_data)
# Set-up plot else coerce the passed axes as required
if ax is None:
if cbar:
if hierarchy:
cbar_mode = 'each'
else:
cbar_mode = 'single'
else:
cbar_mode = None
if hierarchy and dendrogram:
nrows_ncols = (2, 1)
else:
nrows_ncols = (1, 1)
# Setup up a new plot
fig = plt.figure(figsize=figsize)
imggrid = ImageGrid(fig, 111, nrows_ncols, axes_pad=(0.07, 0), cbar_mode=cbar_mode, cbar_size=0.075)
ax = imggrid[0]
if hierarchy and dendrogram:
ax_dendo = imggrid[1]
fig.delaxes(imggrid.cbar_axes[1])
if cbar:
cax = imggrid.cbar_axes[0]
elif (cbar or hierarchy) and not isinstance(ax, axes_divider.LocatableAxes):
fig = plt.gcf()
divider = make_axes_locatable(ax)
if cbar:
cax = divider.append_axes("right", size=0.075, pad=0.07, aspect='auto')
if hierarchy and dendrogram:
ax_dendo = divider.append_axes("bottom", size=0.4, pad=0.0, aspect='auto')
elif hierarchy:
raise ValueError("`ax` cannotation be divided meaning the dendrogram cannotation be plotted")
elif cbar and cax is None:
raise ValueError("A colorbar axes `cax` must be passed as the passed `ax` cannotation be"
" divided.")
ax.set_aspect('equal')
# Set the axis ticklabels if possible
if ticklabels:
xlabels = ticklabels
ylabels = ticklabels
else:
# Handle xticklabels
if xticklabels is None and hasattr(correlation_data, 'columns'):
xticklabels = list(correlation_data.columns)
elif isinstance(xticklabels, bool) and xticklabels:
xticklabels = list(correlation_data.columns)
if xticklabels is not None:
xlabels = xticklabels
else:
xlabels = None
# Handle yticklabels
if yticklabels is None and hasattr(correlation_data, 'index'):
yticklabels = list(correlation_data.index)
elif isinstance(yticklabels, bool) and yticklabels:
yticklabels = list(correlation_data.index)
if yticklabels is not None:
ylabels = yticklabels
else:
ylabels = None
# Set-up figure estetics
if lower_matrix:
gridclr = 'white'
ax.set(xlim=(0, nx - 1), ylim=(0, ny - 1))
xticklocs = np.arange(nx - 1)
yticklocs = np.arange(ny - 1)
# Trim the labels
if isinstance(ylabels, list):
ylabels = ylabels[1:]
else:
gridclr = 'black'
ax.xaxis.tick_top()
ax.set(xlim=(0, nx), ylim=(0, ny))
xticklocs = np.arange(nx)
yticklocs = np.arange(ny)
# Set-up x-axis labels and grid locations
ax.set_xticks(xticklocs + 0.5)
ax.set_xticks(xticklocs, minor=True)
if xlabels is not None:
if lower_matrix:
va = 'top'
else:
va = 'bottom'
ax.set_xticklabels(xlabels, va=va, ha='center', rotation='horizontal')
ax.tick_params(axis='x', pad=2)
else:
ax.get_xaxis().set_ticks([])
# Set-up y-axis labels and grid locations
ax.invert_yaxis()
ax.set_yticks(yticklocs + 0.5)
ax.set_yticks(yticklocs, minor=True)
if ylabels is not None:
ax.set_yticklabels(ylabels, va='center', ha='right', rotation='vertical')
ax.tick_params(axis='y', pad=1)
else:
ax.get_yaxis().set_ticks([])
# Set-up the figure spines
for spine in ax.spines:
if lower_matrix:
ax.spines[spine].set_color('white')
# Mask the data if lower_matrix is being used
if lower_matrix:
mask = np.zeros_like(correlation_data, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
plot_data = np.ma.masked_where(mask, plot_data)
plot_data = plot_data[1:, :(ny - 1)]
# Plot the grid
ax.grid(True, which='minor', color=gridclr, zorder=3, lw=lw)
# Tick rotation
plt.draw()
# Check if the axis tick labels overlap, if so rotate them
if rotateticks is None:
rotateticks = Parameters['plotting.rotateticks']
if rotateticks is None:
# The plots tick labels will not be properly accessible until the figure is "drawn", once
# the command below is run, ax.get_ticklabel() will actually work properly.
rotateticks = _tickoverlap(ax)
# Rotate if required
if rotateticks[0] is not False or rotateticks[0] is not None:
if rotateticks[0] is True:
rotateticks[0] = 45
xlabels = ax.get_xticklabels()
for xlabel in xlabels:
if lower_matrix:
xlabel.set_ha('center')
xlabel.set_va('top')
else:
xlabel.set_ha('center')
xlabel.set_va('bottom')
xlabel.set_rotation(rotateticks[0])
if rotateticks[1] is not False or rotateticks[1] is not None:
if rotateticks[1] is True:
rotateticks[1] = -45
ylabels = ax.get_yticklabels()
for ylabel in ylabels:
ylabel.set_ha('right')
ylabel.set_va('center')
ylabel.set_rotation(rotateticks[1])
ax.tick_params(axis='y', pad=2)
# Plot the title if required
if title:
titletxt = ax.set_title(title)
# Due to placing the xticklaebls on the top, if a title is plotted it is going to overlap.
# The following code checks for overlap and bumps the title up step by step until there
# isn't any
if not lower_matrix:
# Check to see if there is overlap with the title and the xticklabels The plots tick
# labels will not be properly accessible until the figure is "drawn", once the command
# below is run, ax.titleoverlap() will actually work properly.
plt.draw()
_titleoverlap = titleoverlap(ax, titletxt)
# If there is, clear the title and start moving a suptitle up until there isn't overlap
if _titleoverlap:
titletxt.set_text('')
shifttitle = True
y = 1.01
else:
shifttitle = False
while _titleoverlap:
titletxt = ax.set_title(title, y=y)
plt.draw()
_titleoverlap = titleoverlap(ax, titletxt)
y = y + 0.01
# Now that a spot without overlap has been found, add the pad of 0.015 (0.01 alread
# added to y) so that it is slightly farther away from the axis labels
if shifttitle:
titletxt = ax.set_title(title, y=(y + 0.005))
# Plot the figure
if cmap is None:
cmap = Parameters['plotting.cmap']
plot = ax.pcolormesh(plot_data, cmap=cmap, norm=plt.Normalize(vmin=vlim[0], vmax=vlim[1]),
zorder=0)
# annotationate if required
if not isinstance(annotation, bool):
if lower_matrix:
annotation = False
else:
annotation = True
if annotation:
# Set-up a colormap to use to color the annotationations. This was manually tuned so if you can
# come up with something better do it
clrvals = [0.85, 0.85, 0.85, 0.85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
if cmap is None:
cmap = []
for clr in clrvals:
cmap.append(str(clr))
if isinstance(cmap, list):
cmap = mpl.colors.ListedColormap(cmap)
clrnorm = mpl.colors.Normalize(vmin=vlim[0], vmax=vlim[1])
clrmap = mpl.cm.ScalarMappable(norm=clrnorm, cmap=cmap)
for y in range(plot_data.shape[0]):
for x in range(plot_data.shape[1]):
if isinstance(plot_data[y, x], np.float):
# try:
# color = clrmap.to_rgba(plot_data[y, x])
# except (KeyError, ValueError):
color = 'black'
ax.text(x + 0.5, y + 0.5, ('{:.%ig}' % sigfigs).format(plot_data[y, x]),
ha='center', va='center', color=color)
if cbar:
# handle parms for colorbars and colormaps
vlim, ticklocs, ticklabels = get_contcbarargs(np.linspace(vlim[0], vlim[1], 5),
sigfigs, vlim)
# Plot the colorbar
cbar = plt.colorbar(plot, cax=cax, ticks=ticklocs)
# Configure the color bar
cbar.ax.set_yticklabels(ticklabels, ha='left')
cbar.ax.tick_params(axis='y', pad=2)
if cbar_label is not None:
cbar.set_label(cbar_label, ha='center', va='top', labelpad=2)
# Plot the dendrogram if required
if hierarchy and dendrogram:
# Get the line coordinates and scale
ylines = np.array(dendo['dcoord'])
ydendo = ny * 0.15
ylines = ylines * (ydendo / ylines.max())
xlines = np.array(dendo['icoord']) / 10
lines = []
for (xline, yline) in zip(xlines, ylines):
lines.append(list(zip(xline, yline)))
# Plot the dendrogram
coll = mpl.collections.LineCollection(lines, color='k', lw=lw)
ax_dendo.add_collection(coll)
# Fix the subplots aesthetics
ax_dendo.set_ylim(ydendo, 0)
for spine in ax_dendo.spines:
ax_dendo.spines[spine].set_visible(False)
ax_dendo.yaxis.set_visible(False)
ax_dendo.xaxis.set_visible(False)
ax_dendo.patch.set_visible(False)
# Export figure
if output_file or ('pdfpages' in out_kws):
export_image(output_file, **out_kws)
return ax
| [
"matplotlib.collections.LineCollection",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.Normalize",
"numpy.asarray",
"numpy.ma.masked_where",
"matplotlib.colors.ListedColormap",
"numpy.linspace",
"matplotlib.cm.ScalarMappable",
"scipy.cluster.hierarchy.linkage",
"mpl_toolkits.axes_grid1.Image... | [((13743, 13753), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (13751, 13753), True, 'import matplotlib.pyplot as plt\n'), ((7952, 7987), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'correlation_data'}), '(data=correlation_data)\n', (7964, 7987), True, 'import pandas as pd\n'), ((8665, 8726), 'scipy.cluster.hierarchy.linkage', 'cluster.hierarchy.linkage', (['correlation_data'], {'method': 'hierarchy'}), '(correlation_data, method=hierarchy)\n', (8690, 8726), False, 'from scipy import cluster\n'), ((8743, 8794), 'scipy.cluster.hierarchy.dendrogram', 'cluster.hierarchy.dendrogram', (['linkage'], {'no_plot': '(True)'}), '(linkage, no_plot=True)\n', (8771, 8794), False, 'from scipy import cluster\n'), ((9637, 9665), 'numpy.asarray', 'np.asarray', (['correlation_data'], {}), '(correlation_data)\n', (9647, 9665), True, 'import numpy as np\n'), ((10080, 10107), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (10090, 10107), True, 'import matplotlib.pyplot as plt\n'), ((10126, 10220), 'mpl_toolkits.axes_grid1.ImageGrid', 'ImageGrid', (['fig', '(111)', 'nrows_ncols'], {'axes_pad': '(0.07, 0)', 'cbar_mode': 'cbar_mode', 'cbar_size': '(0.075)'}), '(fig, 111, nrows_ncols, axes_pad=(0.07, 0), cbar_mode=cbar_mode,\n cbar_size=0.075)\n', (10135, 10220), False, 'from mpl_toolkits.axes_grid1 import ImageGrid\n'), ((12146, 12163), 'numpy.arange', 'np.arange', (['(nx - 1)'], {}), '(nx - 1)\n', (12155, 12163), True, 'import numpy as np\n'), ((12184, 12201), 'numpy.arange', 'np.arange', (['(ny - 1)'], {}), '(ny - 1)\n', (12193, 12201), True, 'import numpy as np\n'), ((12427, 12440), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (12436, 12440), True, 'import numpy as np\n'), ((12461, 12474), 'numpy.arange', 'np.arange', (['ny'], {}), '(ny)\n', (12470, 12474), True, 'import numpy as np\n'), ((13438, 13484), 'numpy.zeros_like', 'np.zeros_like', (['correlation_data'], {'dtype': 'np.bool'}), '(correlation_data, dtype=np.bool)\n', (13451, 13484), True, 'import numpy as np\n'), ((13553, 13588), 'numpy.ma.masked_where', 'np.ma.masked_where', (['mask', 'plot_data'], {}), '(mask, plot_data)\n', (13571, 13588), True, 'import numpy as np\n'), ((18171, 18214), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['plot'], {'cax': 'cax', 'ticks': 'ticklocs'}), '(plot, cax=cax, ticks=ticklocs)\n', (18183, 18214), True, 'import matplotlib.pyplot as plt\n'), ((18591, 18616), 'numpy.array', 'np.array', (["dendo['dcoord']"], {}), "(dendo['dcoord'])\n", (18599, 18616), True, 'import numpy as np\n'), ((18907, 18962), 'matplotlib.collections.LineCollection', 'mpl.collections.LineCollection', (['lines'], {'color': '"""k"""', 'lw': 'lw'}), "(lines, color='k', lw=lw)\n", (18937, 18962), True, 'import matplotlib as mpl\n'), ((10509, 10518), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (10516, 10518), True, 'import matplotlib.pyplot as plt\n'), ((10537, 10560), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (10556, 10560), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((13498, 13524), 'numpy.triu_indices_from', 'np.triu_indices_from', (['mask'], {}), '(mask)\n', (13518, 13524), True, 'import numpy as np\n'), ((15607, 15617), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (15615, 15617), True, 'import matplotlib.pyplot as plt\n'), ((16559, 16600), 'matplotlib.pyplot.Normalize', 'plt.Normalize', ([], {'vmin': 'vlim[0]', 'vmax': 'vlim[1]'}), '(vmin=vlim[0], vmax=vlim[1])\n', (16572, 16600), True, 'import matplotlib.pyplot as plt\n'), ((17241, 17272), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['cmap'], {}), '(cmap)\n', (17266, 17272), True, 'import matplotlib as mpl\n'), ((17295, 17343), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': 'vlim[0]', 'vmax': 'vlim[1]'}), '(vmin=vlim[0], vmax=vlim[1])\n', (17315, 17343), True, 'import matplotlib as mpl\n'), ((17365, 17411), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'clrnorm', 'cmap': 'cmap'}), '(norm=clrnorm, cmap=cmap)\n', (17386, 17411), True, 'import matplotlib as mpl\n'), ((18022, 18054), 'numpy.linspace', 'np.linspace', (['vlim[0]', 'vlim[1]', '(5)'], {}), '(vlim[0], vlim[1], 5)\n', (18033, 18054), True, 'import numpy as np\n'), ((18711, 18736), 'numpy.array', 'np.array', (["dendo['icoord']"], {}), "(dendo['icoord'])\n", (18719, 18736), True, 'import numpy as np\n'), ((16054, 16064), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (16062, 16064), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
def test_list_indexing(todo_list):
try:
assert(todo_list[2] == 'REPLACED')
except AssertionError as e:
print("The element 'TO_REPLACE_1' is not correctly replaced!")
raise(e)
try:
assert(todo_list[-1][-1][-1] == 'REPLACED')
except AssertionError as e:
print("The element 'TO_REPLACE_2' is not correctly replaced!")
raise(e)
print('Well done!')
def test_slicing_1(lst):
c_answer = [2, 3, 4, 5, 6]
try:
assert(lst == c_answer)
except AssertionError:
print('The slice is incorrect!')
raise IncorrectAnswer(lst, c_answer)
else:
print("Well done!")
def test_slicing_2(lst):
c_answer = [5, 7, 9, 11]
try:
assert(lst == c_answer)
except AssertionError:
print('The slice is incorrect!')
raise IncorrectAnswer(lst, c_answer)
else:
print("Well done!")
def test_create_array_with_zeros(arr):
c_answer = np.zeros((2, 3, 5, 3, 7))
try:
assert(np.all(arr.shape == c_answer.shape))
except AssertionError as e:
print("Your array has the wrong shape, namely %r, but I expected %r" % (arr.shape, c_answer.shape,))
raise(e)
try:
assert(np.all(arr == 0.0))
except AssertionError as e:
print("Your array does not contain zeros ... Did you use np.zeros()?")
raise(e)
print("Well done!")
def test_fill_array_with_complement(arr):
c_answer = 1.0 / np.arange(1, 9)
try:
np.testing.assert_array_almost_equal(arr, c_answer, 4)
except AssertionError as e:
print("Your array (%r) does not match the correct answer (%r)!" % (arr, c_answer))
raise(e)
else:
print("AWESOME!")
def test_set_odd_indices_to_zero(arr):
c_answer = np.arange(3, 25)
c_answer[1::2] = 0.0
try:
np.testing.assert_array_almost_equal(arr, c_answer, 4)
except AssertionError as e:
print("Your array (%r) does not match the correct answer (%r)!" % (arr, c_answer))
raise(e)
else:
print("Good job!")
def test_set_lower_right_value_to_one(arr):
c_answer = np.zeros((3, 3))
c_answer[-1, -1] = 1.0
try:
np.testing.assert_array_almost_equal(arr, c_answer, 4)
except AssertionError as e:
print("Your array: \n\n%r\n\ndoes not match the correct answer:\n\n%r!" % (arr, c_answer))
raise(e)
else:
print("Superb!")
def test_bloodpressure_index(arr):
np.random.seed(42)
bp_data = np.random.normal(loc=100, scale=5, size=(20, 24, 30, 2))
c_answer = bp_data[:, :, 17, 1]
try:
assert(arr.shape == (20, 24))
except AssertionError as e:
print("The result of your indexing operation is of shape %r, "
"while it should be %r, namely 20 subjects by 24 hours" % (arr.shape, (20, 24)))
raise(e)
try:
np.testing.assert_array_almost_equal(arr, c_answer, 4)
except AssertionError as e:
print("Your answer is not correct! Did you perhaps forget that Python has zero-based indexing? (First index is 0!)")
raise(e)
print("You're incredible!")
def test_boolean_indexing(arr):
my_array = np.array([[0, 1, -1, -2],
[2, -5, 1, 4],
[10, -2, -4, 20]])
c_answer = my_array[my_array ** 2 > 4]
try:
np.testing.assert_array_equal(arr, c_answer)
except AssertionError as e:
print("Incorrect answer! I expected %r, but I got %r" % (c_answer, arr))
raise(e)
print("EPIC!")
def test_tvalue_computation(arr, h0, tval_ans):
c_tval = (arr.mean() - h0) / (arr.std() / np.sqrt(arr.size - 1))
try:
np.testing.assert_almost_equal(tval_ans, c_tval)
except AssertionError as e:
print("T-value is incorrect! Your t-value is %.3f, while it should be %.3f" % (tval_ans, c_tval))
raise(e)
print("Correct! You stats wizard!")
def test_array_product_and_sum(arr):
arr_A = np.arange(10).reshape((5, 2))
arr_B = np.arange(10, 20).reshape((5, 2))
c_answer = (arr_A * arr_B) + 5
try:
np.testing.assert_array_equal(arr, c_answer)
except AssertionError as e:
print("Your answer is incorrect! I got:\n\n%r\n\nbut I expected:\n\n%r" % (arr, c_answer))
raise(e)
else:
print("Great!")
def test_compute_range_vectorized(arr, ans):
c_answer = arr.max(axis=0) - arr.min(axis=0)
try:
assert(ans.shape == c_answer.shape)
except AssertionError as e:
print("The shape of your answer is incorrect! I got %r, "
"but I expected %r for input-array of shape %r" % (ans.shape, c_answer.shape, arr.shape))
raise(e)
try:
np.testing.assert_array_almost_equal(ans, c_answer, 4)
except AssertionError as e:
print("Your answer is incorrect! Your answer is:\n\n%r/n/n But I expected:\n\n%r" % (ans, c_answer))
raise(e)
print("Easy peasy!") | [
"numpy.random.normal",
"numpy.testing.assert_array_almost_equal",
"numpy.sqrt",
"numpy.testing.assert_array_equal",
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_almost_equal",
"numpy.random.seed",
"numpy.all",
"numpy.arange"
] | [((1025, 1050), 'numpy.zeros', 'np.zeros', (['(2, 3, 5, 3, 7)'], {}), '((2, 3, 5, 3, 7))\n', (1033, 1050), True, 'import numpy as np\n'), ((1867, 1883), 'numpy.arange', 'np.arange', (['(3)', '(25)'], {}), '(3, 25)\n', (1876, 1883), True, 'import numpy as np\n'), ((2229, 2245), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2237, 2245), True, 'import numpy as np\n'), ((2575, 2593), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (2589, 2593), True, 'import numpy as np\n'), ((2608, 2664), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(100)', 'scale': '(5)', 'size': '(20, 24, 30, 2)'}), '(loc=100, scale=5, size=(20, 24, 30, 2))\n', (2624, 2664), True, 'import numpy as np\n'), ((3303, 3362), 'numpy.array', 'np.array', (['[[0, 1, -1, -2], [2, -5, 1, 4], [10, -2, -4, 20]]'], {}), '([[0, 1, -1, -2], [2, -5, 1, 4], [10, -2, -4, 20]])\n', (3311, 3362), True, 'import numpy as np\n'), ((1075, 1110), 'numpy.all', 'np.all', (['(arr.shape == c_answer.shape)'], {}), '(arr.shape == c_answer.shape)\n', (1081, 1110), True, 'import numpy as np\n'), ((1295, 1313), 'numpy.all', 'np.all', (['(arr == 0.0)'], {}), '(arr == 0.0)\n', (1301, 1313), True, 'import numpy as np\n'), ((1542, 1557), 'numpy.arange', 'np.arange', (['(1)', '(9)'], {}), '(1, 9)\n', (1551, 1557), True, 'import numpy as np\n'), ((1575, 1629), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['arr', 'c_answer', '(4)'], {}), '(arr, c_answer, 4)\n', (1611, 1629), True, 'import numpy as np\n'), ((1931, 1985), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['arr', 'c_answer', '(4)'], {}), '(arr, c_answer, 4)\n', (1967, 1985), True, 'import numpy as np\n'), ((2295, 2349), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['arr', 'c_answer', '(4)'], {}), '(arr, c_answer, 4)\n', (2331, 2349), True, 'import numpy as np\n'), ((2983, 3037), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['arr', 'c_answer', '(4)'], {}), '(arr, c_answer, 4)\n', (3019, 3037), True, 'import numpy as np\n'), ((3470, 3514), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['arr', 'c_answer'], {}), '(arr, c_answer)\n', (3499, 3514), True, 'import numpy as np\n'), ((3819, 3867), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['tval_ans', 'c_tval'], {}), '(tval_ans, c_tval)\n', (3849, 3867), True, 'import numpy as np\n'), ((4257, 4301), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['arr', 'c_answer'], {}), '(arr, c_answer)\n', (4286, 4301), True, 'import numpy as np\n'), ((4892, 4946), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['ans', 'c_answer', '(4)'], {}), '(ans, c_answer, 4)\n', (4928, 4946), True, 'import numpy as np\n'), ((3774, 3795), 'numpy.sqrt', 'np.sqrt', (['(arr.size - 1)'], {}), '(arr.size - 1)\n', (3781, 3795), True, 'import numpy as np\n'), ((4128, 4141), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4137, 4141), True, 'import numpy as np\n'), ((4170, 4187), 'numpy.arange', 'np.arange', (['(10)', '(20)'], {}), '(10, 20)\n', (4179, 4187), True, 'import numpy as np\n')] |
import numpy as np
from numpy import pi, exp
from scipy.special import gamma as gamma_func
from scipy.spatial import distance
class Distribution:
_parameter_names = ''
multi = False
"""Base probability distribution class"""
def __init__(self, **kwargs):
pass
@property
def rvs(self):
"""Returns single random sample"""
return self.sample(1)[0]
def sample(self):
#draw samples from distribution
raise NotImplementedError()
def pdf(self, x):
# compute p(x)
raise NotImplementedError()
@property
def _param_values(self):
return ",".join(f"{value}" for value in self.__dict__.values())
@property
def _get_params_str(self):
"""Return name and value for each parameter"""
d = dict(zip(self._parameter_names, self.__dict__.values()))
return ",".join(f"{param}={value}" for param, value in d.items())
@property
def parameters(self):
return ",".join(f"{key}={value}" for key, value in self.__dict__.items())
def __repr__(self):
return f'{self.__class__.__name__}({self.parameters})'
def __str__(self):
return f'{self.__class__.__name__} distribution with {self._get_params_str})'
# @classmethod
# def from_params(cls, params):
# """ Create a distribution from parameter tuple
# --------
# >>> x = (0,1)
# >>> Gaussian(x)
# """
#
# kwargs = dict(zip(cls.parameters(), params))
# return cls(**kwargs)
class Gaussian(Distribution):
"""Gaussian distribution"""
_parameter_names = "mean", "standard deviation"
def __init__(self, mu=0, sd=1):
self.mu = mu
self.sd= sd
@property
def var(self):
return self.sd**2
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
mu_new = self.mu + other.mu
sd_new = np.sqrt(self.var + other.var)
return Gaussian(mu_new, sd_new)
def pdf(self, x):
return exp(-(x-self.mu)**2 / (2*self.sd)) / (self.sd*np.sqrt( 2.0 * pi))
def sample(self, n, seed=None):
rng = np.random.default_rng(seed)
return rng.normal(self.mu, self.sd, size=n)
class Uniform(Distribution):
_parameter_names = "lower bound", "upper bound"
def __init__(self, a=0, b=1):
self.a = a
self.b = b
def pdf(self, x):
if (x >= self.a) and (x <= self.b):
return 1/(self.b-self.a)
else:
return 0
def sample(self, n, seed=None):
rng = np.random.default_rng(seed)
return rng.uniform(self.a, self.b, n)
class Exponential(Distribution):
_parameter_names = "scale"
def __init__(self, l=1):
self.l = l
def pdf(self, x):
if x < 0:
return 0
return self.l*np.exp(-self.l*x)
def sample(self, n, seed=None):
rng = np.random.default_rng(seed)
beta = 1/self.l
return rng.exponential(scale=beta, size=n)
class Gamma(Distribution):
_parameter_names = "shape", "scale"
def __init__(self, k, t):
self.k = k
self.t = t
def pdf(self, x):
return x**(self.k-1)*exp(-x/self.t)/((self.t*self.k)*gamma_func(self.k))
def sample(self, n, seed=None):
rng = np.random.default_rng(seed)
return rng.gamma(self.k, self.t, size=n)
class Logistic(Distribution):
_parameter_names = "mean", "standard deviation"
def __init__(self, mu, sd):
self.mu = mu
self.sd = sd
def pdf(self, x):
e1 = np.exp((x-self.mu)/(2*self.sd))
e2 = np.exp(-(x-self.mu)/(2*self.sd))
return 1/(self.sd*(e1 + e2)**2)
def sample(self, n, seed=None):
rng = np.random.default_rng(seed)
return rng.logistic(self.mu, self.sd, size=n)
class Lognormal(Distribution):
_parameter_names = "mean", "standard deviation"
def __init__(self, mu=0, sd=1):
self.mu = mu
self.sd = sd
def pdf(self, x):
d = x*self.sd*np.sqrt( 2.0 * pi)
return 1/d*exp(-(np.log(x)-self.mu)**2/(2*self.sd**2))
def sample(self, n, seed=None):
rng = np.random.default_rng(seed)
return rng.lognormal(self.mu, self.sd, size=n)
class MultivariateGaussian(Distribution):
_parameter_names = "mean", "covariance"
multi=True
def __init__(self, mu, cov, dim=2):
self.mu = mu
self.cov= cov
self.dim = dim
def mahalanobis(self,x):
return distance.mahalanobis(x, self.mu, self.inv_cov)
@property
def inv_cov(self):
return np.linalg.inv(self.cov)
def pdf(self, x):
det = np.linalg.det(self.cov)
return exp(-(self.mahalanobis(x)**2)/2) / (np.sqrt((2*pi)**self.dim)*det)
def sample(self, n, seed=None):
rng = np.random.default_rng(seed)
return rng.multivariate_normal(self.mu, self.cov, size=n)
mvg = MultivariateGaussian(mu=np.array([1,1]), cov=np.matrix('1, 0; 0, 1'))
| [
"numpy.sqrt",
"numpy.random.default_rng",
"numpy.log",
"numpy.linalg.det",
"numpy.exp",
"numpy.array",
"numpy.linalg.inv",
"scipy.special.gamma",
"numpy.matrix",
"scipy.spatial.distance.mahalanobis"
] | [((2130, 2159), 'numpy.sqrt', 'np.sqrt', (['(self.var + other.var)'], {}), '(self.var + other.var)\n', (2137, 2159), True, 'import numpy as np\n'), ((2358, 2385), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (2379, 2385), True, 'import numpy as np\n'), ((2828, 2855), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (2849, 2855), True, 'import numpy as np\n'), ((3174, 3201), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (3195, 3201), True, 'import numpy as np\n'), ((3566, 3593), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (3587, 3593), True, 'import numpy as np\n'), ((3837, 3874), 'numpy.exp', 'np.exp', (['((x - self.mu) / (2 * self.sd))'], {}), '((x - self.mu) / (2 * self.sd))\n', (3843, 3874), True, 'import numpy as np\n'), ((3882, 3920), 'numpy.exp', 'np.exp', (['(-(x - self.mu) / (2 * self.sd))'], {}), '(-(x - self.mu) / (2 * self.sd))\n', (3888, 3920), True, 'import numpy as np\n'), ((4006, 4033), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (4027, 4033), True, 'import numpy as np\n'), ((4426, 4453), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (4447, 4453), True, 'import numpy as np\n'), ((4764, 4810), 'scipy.spatial.distance.mahalanobis', 'distance.mahalanobis', (['x', 'self.mu', 'self.inv_cov'], {}), '(x, self.mu, self.inv_cov)\n', (4784, 4810), False, 'from scipy.spatial import distance\n'), ((4863, 4886), 'numpy.linalg.inv', 'np.linalg.inv', (['self.cov'], {}), '(self.cov)\n', (4876, 4886), True, 'import numpy as np\n'), ((4924, 4947), 'numpy.linalg.det', 'np.linalg.det', (['self.cov'], {}), '(self.cov)\n', (4937, 4947), True, 'import numpy as np\n'), ((5081, 5108), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (5102, 5108), True, 'import numpy as np\n'), ((5206, 5222), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (5214, 5222), True, 'import numpy as np\n'), ((5227, 5250), 'numpy.matrix', 'np.matrix', (['"""1, 0; 0, 1"""'], {}), "('1, 0; 0, 1')\n", (5236, 5250), True, 'import numpy as np\n'), ((2240, 2280), 'numpy.exp', 'exp', (['(-(x - self.mu) ** 2 / (2 * self.sd))'], {}), '(-(x - self.mu) ** 2 / (2 * self.sd))\n', (2243, 2280), False, 'from numpy import pi, exp\n'), ((3105, 3124), 'numpy.exp', 'np.exp', (['(-self.l * x)'], {}), '(-self.l * x)\n', (3111, 3124), True, 'import numpy as np\n'), ((4294, 4311), 'numpy.sqrt', 'np.sqrt', (['(2.0 * pi)'], {}), '(2.0 * pi)\n', (4301, 4311), True, 'import numpy as np\n'), ((2286, 2303), 'numpy.sqrt', 'np.sqrt', (['(2.0 * pi)'], {}), '(2.0 * pi)\n', (2293, 2303), True, 'import numpy as np\n'), ((3464, 3480), 'numpy.exp', 'exp', (['(-x / self.t)'], {}), '(-x / self.t)\n', (3467, 3480), False, 'from numpy import pi, exp\n'), ((3496, 3514), 'scipy.special.gamma', 'gamma_func', (['self.k'], {}), '(self.k)\n', (3506, 3514), True, 'from scipy.special import gamma as gamma_func\n'), ((4999, 5028), 'numpy.sqrt', 'np.sqrt', (['((2 * pi) ** self.dim)'], {}), '((2 * pi) ** self.dim)\n', (5006, 5028), True, 'import numpy as np\n'), ((4338, 4347), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (4344, 4347), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, relu=True, same_padding=False, bn=False):
super(Conv2d, self).__init__()
padding = int((kernel_size - 1) / 2) if same_padding else 0
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class FC(nn.Module):
def __init__(self, in_features, out_features, relu=True):
super(FC, self).__init__()
self.fc = nn.Linear(in_features, out_features)
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.fc(x)
if self.relu is not None:
x = self.relu(x)
return x
def save_net(fname, net):
import h5py
h5f = h5py.File(fname, mode='w')
for k, v in net.state_dict().items():
h5f.create_dataset(k, data=v.cpu().numpy())
def load_net(fname, net):
import h5py
h5f = h5py.File(fname, mode='r')
for k, v in net.state_dict().items():
if k in h5f: #layer exists in saved model
param = torch.from_numpy(np.asarray(h5f[k]))
v.copy_(param)
else:
print("WARNING: saved model does not have layer {}".format(k))
def np_to_variable(x, is_cuda=True, is_training=False, dtype=torch.FloatTensor):
if is_training:
v = Variable(torch.from_numpy(x).type(dtype))
else:
with torch.no_grad():
v = Variable(torch.from_numpy(x).type(dtype), requires_grad = False)
if is_cuda:
v = v.cuda()
return v
def set_trainable(model, requires_grad):
for param in model.parameters():
param.requires_grad = requires_grad
def weights_normal_init(model, dev=0.01):
if isinstance(model, list):
for m in model:
weights_normal_init(m, dev)
else:
for m in model.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, dev)
if m.bias is not None:
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, dev)
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"numpy.asarray",
"torch.from_numpy",
"torch.nn.Conv2d",
"h5py.File",
"torch.nn.Linear",
"torch.no_grad"
] | [((1274, 1300), 'h5py.File', 'h5py.File', (['fname'], {'mode': '"""w"""'}), "(fname, mode='w')\n", (1283, 1300), False, 'import h5py\n'), ((1449, 1475), 'h5py.File', 'h5py.File', (['fname'], {'mode': '"""r"""'}), "(fname, mode='r')\n", (1458, 1475), False, 'import h5py\n'), ((430, 504), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', 'kernel_size', 'stride'], {'padding': 'padding'}), '(in_channels, out_channels, kernel_size, stride, padding=padding)\n', (439, 504), True, 'import torch.nn as nn\n'), ((993, 1029), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (1002, 1029), True, 'import torch.nn as nn\n'), ((523, 587), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {'eps': '(0.001)', 'momentum': '(0)', 'affine': '(True)'}), '(out_channels, eps=0.001, momentum=0, affine=True)\n', (537, 587), True, 'import torch.nn as nn\n'), ((624, 645), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (631, 645), True, 'import torch.nn as nn\n'), ((1050, 1071), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1057, 1071), True, 'import torch.nn as nn\n'), ((1938, 1953), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1951, 1953), False, 'import torch\n'), ((1613, 1631), 'numpy.asarray', 'np.asarray', (['h5f[k]'], {}), '(h5f[k])\n', (1623, 1631), True, 'import numpy as np\n'), ((1882, 1901), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (1898, 1901), False, 'import torch\n'), ((1980, 1999), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (1996, 1999), False, 'import torch\n')] |
import matplotlib.pyplot as plt
import numpy as np
from DREAM.Settings.Equations.EquationException import EquationException
from DREAM.Settings.Equations.IonSpecies import IonSpecies, IONS_PRESCRIBED, IONIZATION_MODE_FLUID, IONIZATION_MODE_KINETIC, IONIZATION_MODE_KINETIC_APPROX_JAC, ION_OPACITY_MODE_TRANSPARENT
from . UnknownQuantity import UnknownQuantity
# Model to use for ion heat
IONS_T_I_NEGLECT = 1
IONS_T_I_INCLUDE = 2
class Ions(UnknownQuantity):
def __init__(self, settings, ionization=IONIZATION_MODE_FLUID):
"""
Constructor.
"""
super().__init__(settings=settings)
self.ions = list()
self.r = None
self.t = None
self.ionization = ionization
self.typeTi = IONS_T_I_NEGLECT
def addIon(self, name, Z, iontype=IONS_PRESCRIBED, Z0=None, isotope=0, SPIMolarFraction=-1, opacity_mode=ION_OPACITY_MODE_TRANSPARENT, T=None, n=None, r=None, t=None, tritium=False):
"""
Adds a new ion species to the plasma.
:param str name: Name by which the ion species will be referred to.
:param int Z: Ion charge number.
:param int isotope: Ion mass number.
:param int iontype: Method to use for evolving ions in time.
:param int Z0: Charge state to populate (used for populating exactly one charge state for the ion).
:param n: Ion density (can be either a scalar, 1D array or 2D array, depending on the other input parameters)
:param float SPIMolarFraction: Molar fraction of the SPI injection (if any). A negative value means that this species is not part of the SPI injection
:param numpy.ndarray r: Radial grid on which the input density is defined.
:param T: Ion initial temperature (can be scalar for uniform temperature, otherwise 1D array matching `r` in size)
:param numpy.ndarray r: Radial grid on which the input density and temperature is defined.
:param numpy.ndarray t: Time grid on which the input density is defined.
:param bool tritium: If ``True``, the ion species is treated as Tritium.
"""
if (self.r is not None) and (r is not None) and (np.any(self.r != r)):
raise EquationException("The radial grid must be the same for all ion species.")
if (self.t is not None) and (t is not None) and (np.any(self.t != t)):
raise EquationException("The time grid must be the same for all ion species.")
if T is not None:
self.typeTi = IONS_T_I_INCLUDE
ion = IonSpecies(settings=self.settings, name=name, Z=Z, ttype=iontype, Z0=Z0, isotope=isotope, SPIMolarFraction=SPIMolarFraction, opacity_mode=opacity_mode, T=T, n=n, r=r, t=t, interpr=self.r, interpt=None, tritium=tritium)
self.ions.append(ion)
self.r = ion.getR()
if ion.getTime() is not None:
self.t = ion.getTime()
def getCharges(self):
"""
Returns a list of the charges of the various ion species
contained by this object.
"""
return [ion.getZ() for ion in self.ions]
def getIsotopes(self):
"""
Returns a list of the isotopes of the various ion species
contained by this object.
"""
return [ion.getIsotope() for ion in self.ions]
def getSPIMolarFraction(self):
"""
Returns a list of the SPI molar fractions of the various ion species
contained by this object.
"""
return [ion.getSPIMolarFraction() for ion in self.ions]
def getIon(self, i=None):
"""
Returns the ion species with the specified index or name.
:param i: Index or name of ion species to retrieve.
"""
if type(i) == int: return self.ions[i]
elif type(i) == str:
for j in range(0, len(self.ions)):
if self.ions[j].getName() == i:
return self.ions[j]
raise EquationException("No ion with name '{}' has been defined.".format(i))
else:
raise EquationException("Invalid call to 'getIon()'.")
def setIonization(self, ionization=IONIZATION_MODE_FLUID):
"""
Sets which model to use for ionization.
:param int ionization: Flag indicating which model to use for ionization.
"""
self.ionization=ionization
def getTritiumSpecies(self):
"""
Returns a list of names of the ion species which are treated
as Tritium.
"""
trit = []
for ion in self.ions:
if ion.tritium:
trit.append(ion.getName())
return trit
def getTypes(self):
"""
Returns a list of ion types for the various ion species
contained by this object.
"""
return [ion.getType() for ion in self.ions]
def getOpacityModes(self):
"""
Returns a list of ion opacity modes for the various ion species
contained by this object.
"""
return [ion.getOpacityMode() for ion in self.ions]
def setIonType(self, index, ttype):
"""
Modifies the type of equation used for the specified ion species.
:param index: Index or name of ion species to set type for.
:param int ttype: Type of equation to use for evolving the ion species.
"""
ion = self.getIon(index)
# Note that the DREAM kernel only uses positive type indices.
# The negative type indices are interface extensions which can
# only be used with the 'initialize()' methods.
if ttype <= 0:
raise DREAMException("Trying to set invalid ion type for ion species '{}': {}.".format(ion.name, ttype))
ion.ttype = ttype
def fromdict(self, data):
"""
Load settings from the specified dictionary.
:param dict data: Dictionary containing all settings to load.
"""
names = data['names'].split(';')[:-1]
Z = data['Z']
isotopes = data['isotopes']
types = data['types']
opacity_modes = data['opacity_modes']
SPIMolarFraction = data['SPIMolarFraction']
nZSPI = len(Z)-np.sum(SPIMolarFraction<0)
if nZSPI>0:
nShard = int(np.sum(SPIMolarFraction>=0)/nZSPI)
else:
nShard = 0
if 'tritiumnames' in data:
tritiumnames = data['tritiumnames'].split(';')[:-1]
else:
tritiumnames = []
initial = None
prescribed = None
initialTi = None
self.typeTi = IONS_T_I_NEGLECT
if 'typeTi' in data:
self.typeTi = int(data['typeTi'])
if 'initial' in data:
initial = data['initial']
if 'prescribed' in data:
prescribed = data['prescribed']
if 'initialTi' in data:
initialTi = data['initialTi']
iidx, pidx, spiidx = 0, 0, 0
for i in range(len(Z)):
if types[i] == IONS_PRESCRIBED:
n = prescribed['x'][pidx:(pidx+Z[i]+1)]
r = prescribed['r']
t = prescribed['t']
pidx += Z[i]+1
else:
n = initial['x'][iidx:(iidx+Z[i]+1)]
r = initial['r']
t = None #initial['t']
iidx += Z[i]+1
if self.typeTi==IONS_T_I_INCLUDE and initialTi is not None:
T = initialTi['x'][i]
else:
T = None
if SPIMolarFraction[spiidx]>=0:
SPIMolarFractionSingleSpecies = SPIMolarFraction[spiidx:spiidx+nShard]
spiidx+=nShard
else:
SPIMolarFractionSingleSpecies = SPIMolarFraction[spiidx]
spiidx+=1
tritium = (names[i] in tritiumnames)
self.addIon(name=names[i], Z=Z[i], isotope=isotopes[i], SPIMolarFraction=SPIMolarFractionSingleSpecies, iontype=types[i], opacity_mode=opacity_modes[i], T=T, n=n, r=r, t=t, tritium=tritium)
if 'ionization' in data:
self.ionization = int(data['ionization'])
self.verifySettings()
def todict(self):
"""
Returns a Python dictionary containing all settings of
this Ions object.
"""
Z = self.getCharges()
itypes = self.getTypes()
iopacity_modes =self.getOpacityModes()
isotopes = self.getIsotopes()
initial = None
initialTi = None
prescribed = None
names = ""
tritiumnames = ""
SPIMolarFraction = None
for ion in self.ions:
names += '{};'.format(ion.getName())
if ion.tritium:
tritiumnames += '{};'.format(ion.getName())
if ion.getTime() is None:
if initial is None:
initial = np.copy(ion.getDensity())
else:
initial = np.concatenate((initial, ion.getDensity()))
else:
if prescribed is None:
prescribed = np.copy(ion.getDensity())
else:
prescribed = np.concatenate((prescribed, ion.getDensity()))
if initialTi is None:
initialTi = np.copy(ion.getTemperature())
else:
initialTi = np.concatenate((initialTi, ion.getTemperature()))
if SPIMolarFraction is None:
SPIMolarFraction = np.copy(ion.getSPIMolarFraction())
else:
SPIMolarFraction = np.concatenate((SPIMolarFraction, ion.getSPIMolarFraction()))
data = {
'names': names,
'Z': Z,
'isotopes':isotopes,
'SPIMolarFraction':SPIMolarFraction,
'types': itypes,
'opacity_modes':iopacity_modes
}
if len(tritiumnames) > 0:
data['tritiumnames'] = tritiumnames
if initial is not None:
data['initial'] = {
'r': self.r,
'x': initial
}
if prescribed is not None:
data['prescribed'] = {
'r': self.r,
't': self.t,
'x': prescribed
}
data['initialTi'] = {
'r': self.r,
'x': initialTi
}
data['ionization'] = self.ionization
data['typeTi'] = self.typeTi
return data
def verifySettings(self):
"""
Verify that all settings are consistent.
"""
# Make sure there are no double names
for i in range(0, len(self.ions)):
for j in range(0, len(self.ions)):
if i == j: continue
if self.ions[i].getName() == self.ions[j].getName():
raise EquationException("ions: More than one ion species is named '{}'.".format(self.ions[i].getName()))
self.ions[i].verifySettings()
if (self.ionization != IONIZATION_MODE_FLUID) and (self.ionization != IONIZATION_MODE_KINETIC) and (self.ionization != IONIZATION_MODE_KINETIC_APPROX_JAC):
raise EquationException("ions: Invalid ionization mode: {}.".format(self.ionization))
def getFreeElectronDensity(self, t=0):
"""
Returns the plasma free electron density at the given time index, based
on the prescribed/initialized ion densities.
:param int t: Index of time for which to retrieve the free electron density.
"""
n_free = np.zeros( self.r.shape )
for ion in self.ions:
for Z0 in range(1,ion.Z + 1):
if len( ion.n.shape ) == 3:
n_free = n_free + Z0 * ion.n[Z0,t,:]
elif len( ion.n.shape ) == 2:
n_free = n_free + Z0 * ion.n[Z0,:]
return n_free, self.r
| [
"DREAM.Settings.Equations.IonSpecies.IonSpecies",
"DREAM.Settings.Equations.EquationException.EquationException",
"numpy.any",
"numpy.sum",
"numpy.zeros"
] | [((2623, 2855), 'DREAM.Settings.Equations.IonSpecies.IonSpecies', 'IonSpecies', ([], {'settings': 'self.settings', 'name': 'name', 'Z': 'Z', 'ttype': 'iontype', 'Z0': 'Z0', 'isotope': 'isotope', 'SPIMolarFraction': 'SPIMolarFraction', 'opacity_mode': 'opacity_mode', 'T': 'T', 'n': 'n', 'r': 'r', 't': 't', 'interpr': 'self.r', 'interpt': 'None', 'tritium': 'tritium'}), '(settings=self.settings, name=name, Z=Z, ttype=iontype, Z0=Z0,\n isotope=isotope, SPIMolarFraction=SPIMolarFraction, opacity_mode=\n opacity_mode, T=T, n=n, r=r, t=t, interpr=self.r, interpt=None, tritium\n =tritium)\n', (2633, 2855), False, 'from DREAM.Settings.Equations.IonSpecies import IonSpecies, IONS_PRESCRIBED, IONIZATION_MODE_FLUID, IONIZATION_MODE_KINETIC, IONIZATION_MODE_KINETIC_APPROX_JAC, ION_OPACITY_MODE_TRANSPARENT\n'), ((11718, 11740), 'numpy.zeros', 'np.zeros', (['self.r.shape'], {}), '(self.r.shape)\n', (11726, 11740), True, 'import numpy as np\n'), ((2253, 2272), 'numpy.any', 'np.any', (['(self.r != r)'], {}), '(self.r != r)\n', (2259, 2272), True, 'import numpy as np\n'), ((2293, 2367), 'DREAM.Settings.Equations.EquationException.EquationException', 'EquationException', (['"""The radial grid must be the same for all ion species."""'], {}), "('The radial grid must be the same for all ion species.')\n", (2310, 2367), False, 'from DREAM.Settings.Equations.EquationException import EquationException\n'), ((2425, 2444), 'numpy.any', 'np.any', (['(self.t != t)'], {}), '(self.t != t)\n', (2431, 2444), True, 'import numpy as np\n'), ((2465, 2537), 'DREAM.Settings.Equations.EquationException.EquationException', 'EquationException', (['"""The time grid must be the same for all ion species."""'], {}), "('The time grid must be the same for all ion species.')\n", (2482, 2537), False, 'from DREAM.Settings.Equations.EquationException import EquationException\n'), ((6318, 6346), 'numpy.sum', 'np.sum', (['(SPIMolarFraction < 0)'], {}), '(SPIMolarFraction < 0)\n', (6324, 6346), True, 'import numpy as np\n'), ((4133, 4181), 'DREAM.Settings.Equations.EquationException.EquationException', 'EquationException', (['"""Invalid call to \'getIon()\'."""'], {}), '("Invalid call to \'getIon()\'.")\n', (4150, 4181), False, 'from DREAM.Settings.Equations.EquationException import EquationException\n'), ((6390, 6419), 'numpy.sum', 'np.sum', (['(SPIMolarFraction >= 0)'], {}), '(SPIMolarFraction >= 0)\n', (6396, 6419), True, 'import numpy as np\n')] |
#!/usr/bin/python
# This script prints the predicted and real body part in the validation dataset (images/test)
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import numpy as np
from keras import optimizers
from keras.models import load_model
from keras.preprocessing import image
import csv
import re
import argparse
import tensorflow as tf
parser = argparse.ArgumentParser(description='Get prediction on validation dataset')
parser.add_argument("net", help="net to use")
ARGS = parser.parse_args()
#csv
csvFile = open('predict_top1.csv', 'a', newline="")
csvWriter = csv.writer(csvFile)
# Loading and Compiling Model
MODEL = load_model(ARGS.net)
MODEL.compile(optimizer=optimizers.RMSprop(lr=2e-5),
loss='categorical_crossentropy',
metrics=['acc'])
# Path of image you want to predict
for bodyPart in os.listdir('./images/test'):
for imageFile in os.listdir('./images/test/'+bodyPart):
# Find out real class
realClass = bodyPart
# Convert Img to an appropriate numpy array
IMG = image.load_img('./images/test/'+bodyPart+'/'+imageFile, target_size=(299, 299))
X = image.img_to_array(IMG)
X = np.expand_dims(X, axis=0)
IMAGES = np.vstack([X])
# The actual prediction
CLASSES = MODEL(IMAGES, training=False)
# Converting result of prediction to readable categories
CATEGORIES = {0: 'anal', 1: 'arms', 2: 'armsAndHands',
3: 'face', 4: 'feet', 5: 'genitalsFemale',
6: 'genitalsMale', 7: 'hands', 8: 'head',
9: 'legs', 10: 'legsAndfeet', 11: 'torso'}
i=0
other=True
max=0
maxClass=''
for c in tf.unstack(CLASSES, axis=1):
#print(CATEGORIES[i], ': {f:.3f}'.format(f=float(c[0])))
if(float(c[0]) > max):
max = float(c[0])
maxClass = CATEGORIES[i]
if(float(c[0]) > 0.5):
other=False
i += 1
#print('other:', other)
if other:
maxClass = 'other'
match = 0
if maxClass == realClass:
match = 1
print('Image {}: predict {}, real {}'.format(imageFile, maxClass, realClass))
csvWriter.writerow([imageFile, maxClass, realClass, match])
| [
"tensorflow.unstack",
"keras.preprocessing.image.img_to_array",
"os.listdir",
"keras.models.load_model",
"argparse.ArgumentParser",
"csv.writer",
"numpy.vstack",
"numpy.expand_dims",
"keras.optimizers.RMSprop",
"keras.preprocessing.image.load_img"
] | [((407, 482), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Get prediction on validation dataset"""'}), "(description='Get prediction on validation dataset')\n", (430, 482), False, 'import argparse\n'), ((626, 645), 'csv.writer', 'csv.writer', (['csvFile'], {}), '(csvFile)\n', (636, 645), False, 'import csv\n'), ((685, 705), 'keras.models.load_model', 'load_model', (['ARGS.net'], {}), '(ARGS.net)\n', (695, 705), False, 'from keras.models import load_model\n'), ((890, 917), 'os.listdir', 'os.listdir', (['"""./images/test"""'], {}), "('./images/test')\n", (900, 917), False, 'import os\n'), ((940, 979), 'os.listdir', 'os.listdir', (["('./images/test/' + bodyPart)"], {}), "('./images/test/' + bodyPart)\n", (950, 979), False, 'import os\n'), ((730, 758), 'keras.optimizers.RMSprop', 'optimizers.RMSprop', ([], {'lr': '(2e-05)'}), '(lr=2e-05)\n', (748, 758), False, 'from keras import optimizers\n'), ((1104, 1194), 'keras.preprocessing.image.load_img', 'image.load_img', (["('./images/test/' + bodyPart + '/' + imageFile)"], {'target_size': '(299, 299)'}), "('./images/test/' + bodyPart + '/' + imageFile, target_size=(\n 299, 299))\n", (1118, 1194), False, 'from keras.preprocessing import image\n'), ((1196, 1219), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['IMG'], {}), '(IMG)\n', (1214, 1219), False, 'from keras.preprocessing import image\n'), ((1232, 1257), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1246, 1257), True, 'import numpy as np\n'), ((1275, 1289), 'numpy.vstack', 'np.vstack', (['[X]'], {}), '([X])\n', (1284, 1289), True, 'import numpy as np\n'), ((1752, 1779), 'tensorflow.unstack', 'tf.unstack', (['CLASSES'], {'axis': '(1)'}), '(CLASSES, axis=1)\n', (1762, 1779), True, 'import tensorflow as tf\n')] |
from ast import literal_eval
import copy
import yaml
import numpy as np
import os
import argparse
class AttrDict(dict):
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
elif name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if name in self.__dict__:
self.__dict__[name] = value
else:
self[name] = value
__C = AttrDict()
cfg = __C
# --------------------------------------------------------------------------- #
# general options
# --------------------------------------------------------------------------- #
##OS options
__C.DATA_DIRECTORY = "parsed_dataset-p1"
__C.OUTPUT_DIRECTORY = "output"
__C.EXP_NAME = 'p1-run-1'
__C.OUTPUT_FILE_NAME = "predict.json"
__C.MODE = "train"
## Dataset options
__C.SPLIT = "test" #NOT USE
__C.GENERATE_VOCABULARIES = False
__C.LOAD_VOCABULARIES = False
__C.INPUT_VOCAB_PATH = ""
__C.TARGET_VOCAB_PATH = ""
__C.INIT_WRD_EMB_FROM_FILE = False
__C.WRD_EMB_INIT_FILE = ''
# --------------------------------------------------------------------------- #
# model options
# --------------------------------------------------------------------------- #
## Command Encoder
__C.CMD_D_EMBED = 32
__C.CMD_D_ENC = 64
__C.CMD_D_H = 64 # Same as ENC_DIM
## Situation Encoder (LGCN)
__C.SITU_D_FEAT = 64 # 3 * D_CNN_OUTPUT if CNN then LGCN
__C.SITU_D_CTX = 64 # 512
__C.SITU_D_CMD = 64 # 512
__C.SITU_D_CNN_OUTPUT = 64
#1
## Decoder
__C.DEC_D_H = 64
__C.DEC_NUM_LAYER = 1
__C.DEC_CONDITIONAL_ATTENTION = True
__C.H_FEAT = 14
__C.W_FEAT = 14
__C.D_FEAT = 1152 # 1024+128
__C.T_ENCODER = 45
__C.ADD_POS_ENC = True
__C.PE_DIM = 128
__C.PE_SCALE = 1.
__C.MSG_ITER_NUM = 4
__C.STEM_NORMALIZE = True
__C.STEM_LINEAR = True
__C.STEM_CNN = False
__C.STEM_CNN_DIM = 512
__C.STEM_RENORMALIZE = False
# __C.WRD_EMB_DIM = 300
__C.WRD_EMB_FIXED = False
# __C.ENC_DIM = 512
__C.CMD_DIM = 512
__C.CMD_INPUT_ACT = 'ELU'
__C.CTX_DIM = 512
__C.OUT_QUESTION_MUL = True
__C.OUT_CLASSIFIER_DIM = 512
__C.USE_EMA = True
__C.EMA_DECAY_RATE = 0.999
# Dropouts
__C.encInputDropout = 0.8
__C.locDropout = 1.
__C.cmdDropout = 0.92
__C.memoryDropout = 0.85
__C.readDropout = 0.85
__C.outputDropout = 0.85
__C.decoderDropout = 0.85
__C.MASK_PADUNK_IN_LOGITS = True
__C.BUILD_VQA = True
__C.BUILD_REF = False
# CLEVR-Ref configs
__C.BBOX_IOU_THRESH = .5
__C.IMG_H = 320 # size in loc
__C.IMG_W = 480 # size in loc
# Loss option
__C.AUXILIARY_TASK = False
# --------------------------------------------------------------------------- #
# training options
# --------------------------------------------------------------------------- #
__C.TRAIN = AttrDict()
__C.TRAIN.BATCH_SIZE = 200
__C.VAL_BATCH_SIZE = 4000
__C.TRAIN.START_EPOCH = 0
__C.TRAIN.CLIP_GRADIENTS = True
__C.TRAIN.GRAD_MAX_NORM = 8.
__C.TRAIN.SOLVER = AttrDict()
# __C.TRAIN.SOLVER.LR = 3e-4
__C.TRAIN.SOLVER.LR = 8e-4
__C.TRAIN.SOLVER.LR_DECAY = 0.9
__C.TRAIN.SOLVER.ADAM_BETA1 = 0.9
__C.TRAIN.SOLVER.ADAM_BETA2 = 0.999
__C.TRAIN.SOLVER.LR_DECAY_STEP = 20000
__C.TRAIN.MAX_EPOCH = 100
__C.TRAIN.RUN_EVAL = True
__C.TRAIN.USE_MULTI_GPU = True
__C.PRINT_EVERY = 100
__C.EVALUATE_EVERY = 10
__C.SAVE_EVERY = 20
#GSCAN Specific
__C.TRAIN.K = 0
__C.TRAIN.WEIGHT_TARGET_LOSS = 0.3 # change only when auxiliary is used
# --------------------------------------------------------------------------- #
# test options
# --------------------------------------------------------------------------- #
__C.TEST = AttrDict()
__C.TEST.SPLIT = "" #\TODO test split not initialized yet
__C.TEST.MAX_DECODING_STEP = 30
__C.TEST.BATCH_SIZE = 1
__C.TEST.EPOCH = -1 # Needs to be supplied
__C.TEST.DUMP_PRED = False
__C.TEST.RESULT_DIR = './exp_clevr/results/%s/%04d'
__C.TEST.NUM_VIS = 0
__C.TEST.VIS_DIR_PREFIX = 'vis'
__C.TEST.VIS_FILTER_EDGE = True
__C.TEST.VIS_EDGE_SCALE = 1.
__C.TEST.VIS_FINAL_REL_TH = .025
__C.TEST.VIS_FINAL_ABS_TH = .025
__C.TEST.VIS_MSG_TH = .1
# --------------------------------------------------------------------------- #
# post-processing configs after loading
# --------------------------------------------------------------------------- #
def _postprocess_cfg(): # NoQA
__C.GPUS = __C.GPUS.replace(' ', '').replace('(', '').replace(')', '')
assert __C.EXP_NAME != '<fill-with-filename>', 'EXP_NAME must be specified'
# --------------------------------------------------------------------------- #
def build_cfg_from_argparse(args_list=None):
"""Load config with command line options (`--cfg` and a list of options)"""
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', default='')
parser.add_argument('opts', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args(args_list)
if args.cfg:
_merge_cfg_from_file(args.cfg)
if args.opts:
_merge_cfg_from_list(args.opts)
_postprocess_cfg()
return __C
def _merge_cfg_from_file(cfg_filename):
"""Load a yaml config file and merge it into the global config."""
with open(cfg_filename, 'r') as f:
yaml_cfg = yaml.load(f)
if yaml_cfg is not None:
_merge_a_into_b(AttrDict(yaml_cfg), __C)
if __C.EXP_NAME == '<fill-with-filename>':
__C.EXP_NAME = os.path.basename(cfg_filename).replace('.yaml', '')
def _merge_cfg_from_cfg(cfg_other):
"""Merge `cfg_other` into the global config."""
_merge_a_into_b(cfg_other, __C)
def _merge_cfg_from_list(cfg_list):
"""Merge config keys, values in a list (e.g., from command line) into the
global config. For example, `cfg_list = ['TEST.NMS', 0.5]`.
"""
assert len(cfg_list) % 2 == 0
for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = full_key.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d, 'Non-existent key: {}'.format(full_key)
d = d[subkey]
subkey = key_list[-1]
assert subkey in d, 'Non-existent key: {}'.format(full_key)
value = _decode_cfg_value(v)
value = _check_and_coerce_cfg_value_type(
value, d[subkey], subkey, full_key
)
d[subkey] = value
def _merge_a_into_b(a, b, stack=None):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
assert isinstance(a, AttrDict), 'Argument `a` must be an AttrDict'
assert isinstance(b, AttrDict), 'Argument `b` must be an AttrDict'
for k, v_ in a.items():
full_key = '.'.join(stack) + '.' + k if stack is not None else k
# a must specify keys that are in b
if k not in b:
raise KeyError('Non-existent config key: {}'.format(full_key))
v = copy.deepcopy(v_)
v = _decode_cfg_value(v)
v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key)
# Recursively merge dicts
if isinstance(v, AttrDict):
try:
stack_push = [k] if stack is None else stack + [k]
_merge_a_into_b(v, b[k], stack=stack_push)
except BaseException:
raise
else:
b[k] = v
def _decode_cfg_value(v):
"""Decodes a raw config value (e.g., from a yaml config files or command
line argument) into a Python object.
"""
# Configs parsed from raw yaml will contain dictionary keys that need to be
# converted to AttrDict objects
if isinstance(v, dict):
return AttrDict(v)
# All remaining processing is only applied to strings
if not isinstance(v, str):
return v
# Try to interpret `v` as a:
# string, number, tuple, list, dict, boolean, or None
try:
v = literal_eval(v)
# The following two excepts allow v to pass through when it represents a
# string.
#
# Longer explanation:
# The type of v is always a string (before calling literal_eval), but
# sometimes it *represents* a string and other times a data structure, like
# a list. In the case that v represents a string, what we got back from the
# yaml parser is 'foo' *without quotes* (so, not '"foo"'). literal_eval is
# ok with '"foo"', but will raise a ValueError if given 'foo'. In other
# cases, like paths (v = 'foo/bar' and not v = '"foo/bar"'), literal_eval
# will raise a SyntaxError.
except ValueError:
pass
except SyntaxError:
pass
return v
def _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key):
"""Checks that `value_a`, which is intended to replace `value_b` is of the
right type. The type is correct if it matches exactly or is one of a few
cases in which the type can be easily coerced.
"""
# The types must match (with some exceptions)
type_b = type(value_b)
type_a = type(value_a)
if type_a is type_b:
return value_a
# Exceptions: numpy arrays, strings, tuple<->list
if isinstance(value_b, np.ndarray):
value_a = np.array(value_a, dtype=value_b.dtype)
elif isinstance(value_b, str):
value_a = str(value_a)
elif isinstance(value_a, tuple) and isinstance(value_b, list):
value_a = list(value_a)
elif isinstance(value_a, list) and isinstance(value_b, tuple):
value_a = tuple(value_a)
else:
raise ValueError(
'Type mismatch ({} vs. {}) with values ({} vs. {}) for config '
'key: {}'.format(type_b, type_a, value_b, value_a, full_key)
)
return value_a
| [
"argparse.ArgumentParser",
"yaml.load",
"ast.literal_eval",
"numpy.array",
"os.path.basename",
"copy.deepcopy"
] | [((4642, 4667), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4665, 4667), False, 'import argparse\n'), ((5148, 5160), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (5157, 5160), False, 'import yaml\n'), ((6805, 6822), 'copy.deepcopy', 'copy.deepcopy', (['v_'], {}), '(v_)\n', (6818, 6822), False, 'import copy\n'), ((7773, 7788), 'ast.literal_eval', 'literal_eval', (['v'], {}), '(v)\n', (7785, 7788), False, 'from ast import literal_eval\n'), ((9050, 9088), 'numpy.array', 'np.array', (['value_a'], {'dtype': 'value_b.dtype'}), '(value_a, dtype=value_b.dtype)\n', (9058, 9088), True, 'import numpy as np\n'), ((5309, 5339), 'os.path.basename', 'os.path.basename', (['cfg_filename'], {}), '(cfg_filename)\n', (5325, 5339), False, 'import os\n')] |
import unittest
import numpy as np
import pickle
from syft.nn.linear import LinearClassifier
from syft.he.paillier import KeyPair, PaillierTensor
class PySonarNotebooks(unittest.TestCase):
def modelTrainingDemoNotebook(self):
"""If this test fails, you probably broke the demo notebook located at
PySonar/notebooks/Sonar - Decentralized Model Training Simulation
(local blockchain).ipynb """
pubkey, prikey = KeyPair().generate(n_length=1024)
d = LinearClassifier(desc="DiabetesClassifier", n_inputs=10, n_labels=1)
d.encrypt(pubkey)
self.assertTrue(True)
class PySyftNotebooks(unittest.TestCase):
def paillierHEExampleNotebook(self):
"""If this test fails, you probably broke the demo notebook located at
PySyft/notebooks/Syft - Paillier Homomorphic Encryption Example.ipynb
"""
pubkey, prikey = KeyPair().generate()
x = PaillierTensor(pubkey, np.array([1, 2, 3, 4, 5.]))
out1 = x.decrypt(prikey)
self.assertEqual(out1, np.array([1., 2., 3., 4., 5.]))
out2 = (x + x[0]).decrypt(prikey)
self.assertEqual(out2, np.array([2., 3., 4., 5., 6.]))
out3 = (x * 5).decrypt(prikey)
self.assertEqual(out3, np.array([5., 10., 15., 20., 25.]))
out4 = (x + x / 5).decrypt(prikey)
self.assertEqual(out4, np.array([1.2, 2.4, 3.6, 4.8, 6.]))
pubkey_str = pubkey.serialize()
prikey_str = prikey.serialize()
pubkey2, prikey2 = KeyPair().deserialize(pubkey_str, prikey_str)
out5 = prikey2.decrypt(x)
self.assertEqual(out5, np.array([1., 2., 3., 4., 5.]))
y = PaillierTensor(pubkey, (np.ones(5)) / 2)
out6 = prikey.decrypt(y)
self.assertEqual(out6, np.array([.5, .5, .5, .5, .5]))
y_str = pickle.dumps(y)
y2 = pickle.loads(y_str)
out7 = prikey.decrypt(y2)
self.assertEqual(out7, np.array([.5, .5, .5, .5, .5]))
def paillierLinearClassifierNotebook(self):
"""If this test fails, you probably broke the demo notebook located at
PySyft/notebooks/Syft - Paillier Homomorphic Encryption Example.ipynb
"""
pubkey, prikey = KeyPair().generate(n_length=1024)
model = LinearClassifier(n_inputs=4, n_labels=2).encrypt(pubkey)
input = np.array([[0, 0, 1, 1], [0, 0, 1, 0],
[1, 0, 1, 1], [0, 0, 1, 0]])
target = np.array([[0, 1], [0, 0], [1, 1], [0, 0]])
for iter in range(3):
for i in range(len(input)):
model.learn(input=input[i], target=target[i], alpha=0.5)
model = model.decrypt(prikey)
for i in range(len(input)):
model.forward(input[i])
| [
"numpy.ones",
"syft.nn.linear.LinearClassifier",
"pickle.dumps",
"numpy.array",
"syft.he.paillier.KeyPair",
"pickle.loads"
] | [((497, 565), 'syft.nn.linear.LinearClassifier', 'LinearClassifier', ([], {'desc': '"""DiabetesClassifier"""', 'n_inputs': '(10)', 'n_labels': '(1)'}), "(desc='DiabetesClassifier', n_inputs=10, n_labels=1)\n", (513, 565), False, 'from syft.nn.linear import LinearClassifier\n'), ((1829, 1844), 'pickle.dumps', 'pickle.dumps', (['y'], {}), '(y)\n', (1841, 1844), False, 'import pickle\n'), ((1858, 1877), 'pickle.loads', 'pickle.loads', (['y_str'], {}), '(y_str)\n', (1870, 1877), False, 'import pickle\n'), ((2342, 2408), 'numpy.array', 'np.array', (['[[0, 0, 1, 1], [0, 0, 1, 0], [1, 0, 1, 1], [0, 0, 1, 0]]'], {}), '([[0, 0, 1, 1], [0, 0, 1, 0], [1, 0, 1, 1], [0, 0, 1, 0]])\n', (2350, 2408), True, 'import numpy as np\n'), ((2452, 2494), 'numpy.array', 'np.array', (['[[0, 1], [0, 0], [1, 1], [0, 0]]'], {}), '([[0, 1], [0, 0], [1, 1], [0, 0]])\n', (2460, 2494), True, 'import numpy as np\n'), ((960, 987), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5.0]'], {}), '([1, 2, 3, 4, 5.0])\n', (968, 987), True, 'import numpy as np\n'), ((1053, 1088), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0, 4.0, 5.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0])\n', (1061, 1088), True, 'import numpy as np\n'), ((1159, 1194), 'numpy.array', 'np.array', (['[2.0, 3.0, 4.0, 5.0, 6.0]'], {}), '([2.0, 3.0, 4.0, 5.0, 6.0])\n', (1167, 1194), True, 'import numpy as np\n'), ((1262, 1301), 'numpy.array', 'np.array', (['[5.0, 10.0, 15.0, 20.0, 25.0]'], {}), '([5.0, 10.0, 15.0, 20.0, 25.0])\n', (1270, 1301), True, 'import numpy as np\n'), ((1373, 1408), 'numpy.array', 'np.array', (['[1.2, 2.4, 3.6, 4.8, 6.0]'], {}), '([1.2, 2.4, 3.6, 4.8, 6.0])\n', (1381, 1408), True, 'import numpy as np\n'), ((1630, 1665), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0, 4.0, 5.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0])\n', (1638, 1665), True, 'import numpy as np\n'), ((1780, 1815), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5, 0.5, 0.5])\n', (1788, 1815), True, 'import numpy as np\n'), ((1943, 1978), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5, 0.5, 0.5])\n', (1951, 1978), True, 'import numpy as np\n'), ((451, 460), 'syft.he.paillier.KeyPair', 'KeyPair', ([], {}), '()\n', (458, 460), False, 'from syft.he.paillier import KeyPair, PaillierTensor\n'), ((904, 913), 'syft.he.paillier.KeyPair', 'KeyPair', ([], {}), '()\n', (911, 913), False, 'from syft.he.paillier import KeyPair, PaillierTensor\n'), ((1518, 1527), 'syft.he.paillier.KeyPair', 'KeyPair', ([], {}), '()\n', (1525, 1527), False, 'from syft.he.paillier import KeyPair, PaillierTensor\n'), ((1699, 1709), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (1706, 1709), True, 'import numpy as np\n'), ((2219, 2228), 'syft.he.paillier.KeyPair', 'KeyPair', ([], {}), '()\n', (2226, 2228), False, 'from syft.he.paillier import KeyPair, PaillierTensor\n'), ((2269, 2309), 'syft.nn.linear.LinearClassifier', 'LinearClassifier', ([], {'n_inputs': '(4)', 'n_labels': '(2)'}), '(n_inputs=4, n_labels=2)\n', (2285, 2309), False, 'from syft.nn.linear import LinearClassifier\n')] |
# --------------------------------------------------------
# Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from model.config import cfg
from model.bbox_transform import bbox_transform_inv, clip_boxes
from model.nms_wrapper import nms
def proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride, anchors, num_anchors, reject_inds_1, reject_inds_2):
"""A simplified version compared to fast/er RCNN
For details please see the technical report
"""
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
im_info = im_info[0]
# Get the scores and bounding boxes
scores = rpn_cls_prob[:, :, :, num_anchors:]
rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))
scores = scores.reshape((-1, 1))
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
proposals = clip_boxes(proposals, im_info[:2])
######################REJECT VIA RPN################
###------------------------reject process---------------------------###
if reject_inds_1.size != 0:
reject_inds_1 = np.unique(reject_inds_1)
scores[reject_inds_1] = -2
if reject_inds_2.size != 0:
reject_inds_2 = np.unique(reject_inds_2)
scores[reject_inds_2] = -2
passinds = np.where(scores != -2)[0]
#reject via frcn and rpn
proposals = proposals[passinds]
scores = scores[passinds]
###-------------------------reject done-----------------------------###
#####################################################
# Pick the top region proposals
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# Non-maximal suppression
keep = nms(np.hstack((proposals, scores)), nms_thresh)
# Pick th top region proposals after NMS
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# Only support single image as input
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return blob, scores | [
"numpy.unique",
"numpy.hstack",
"numpy.where",
"model.bbox_transform.clip_boxes",
"numpy.zeros",
"model.bbox_transform.bbox_transform_inv"
] | [((1137, 1179), 'model.bbox_transform.bbox_transform_inv', 'bbox_transform_inv', (['anchors', 'rpn_bbox_pred'], {}), '(anchors, rpn_bbox_pred)\n', (1155, 1179), False, 'from model.bbox_transform import bbox_transform_inv, clip_boxes\n'), ((1194, 1228), 'model.bbox_transform.clip_boxes', 'clip_boxes', (['proposals', 'im_info[:2]'], {}), '(proposals, im_info[:2])\n', (1204, 1228), False, 'from model.bbox_transform import bbox_transform_inv, clip_boxes\n'), ((2324, 2375), 'numpy.zeros', 'np.zeros', (['(proposals.shape[0], 1)'], {'dtype': 'np.float32'}), '((proposals.shape[0], 1), dtype=np.float32)\n', (2332, 2375), True, 'import numpy as np\n'), ((1410, 1434), 'numpy.unique', 'np.unique', (['reject_inds_1'], {}), '(reject_inds_1)\n', (1419, 1434), True, 'import numpy as np\n'), ((1517, 1541), 'numpy.unique', 'np.unique', (['reject_inds_2'], {}), '(reject_inds_2)\n', (1526, 1541), True, 'import numpy as np\n'), ((1587, 1609), 'numpy.where', 'np.where', (['(scores != -2)'], {}), '(scores != -2)\n', (1595, 1609), True, 'import numpy as np\n'), ((2068, 2098), 'numpy.hstack', 'np.hstack', (['(proposals, scores)'], {}), '((proposals, scores))\n', (2077, 2098), True, 'import numpy as np\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore.common.tensor import Tensor
from mindspore.ops import operations as P
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_broadcast():
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
shape = (3, 4, 5, 6)
x_np = np.random.rand(3, 1, 5, 1).astype(np.float32)
output = P.BroadcastTo(shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, shape)
assert np.allclose(output.asnumpy(), expect)
x1_np = np.random.rand(3, 1, 5, 1).astype(np.float16)
output = P.BroadcastTo(shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (2, 3, 4, 5)
x1_np = np.random.rand(4, 5).astype(np.float32)
output = P.BroadcastTo(shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, shape)
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_broadcast_dyn_init():
"""
Test running the op with -1's in the init shape to support varied inputs.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
ms_shape = (-1, 4, 5, 6)
np_shape = (3, 4, 5, 6)
x_np = np.random.rand(3, 1, 5, 1).astype(np.float32)
output = P.BroadcastTo(ms_shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, np_shape)
assert np.allclose(output.asnumpy(), expect)
x1_np = np.random.rand(3, 1, 5, 1).astype(np.float16)
output = P.BroadcastTo(ms_shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, np_shape)
assert np.allclose(output.asnumpy(), expect)
ms_shape = (2, 3, -1, 5)
np_shape = (2, 3, 4, 5)
x1_np = np.random.rand(4, 5).astype(np.float32)
output = P.BroadcastTo(ms_shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, np_shape)
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_broadcast_dyn_invalid_init():
"""
Test running the op with -1's in the init shape in incorrect positions.
Expected to fail.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
ms_shape = (2, -1, 4, 5)
x_np = np.random.rand(4, 5).astype(np.float32)
with pytest.raises(ValueError):
P.BroadcastTo(ms_shape)(Tensor(x_np))
| [
"numpy.random.rand",
"mindspore.context.set_context",
"mindspore.ops.operations.BroadcastTo",
"mindspore.common.tensor.Tensor",
"pytest.raises",
"numpy.broadcast_to"
] | [((935, 1000), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': '"""GPU"""'}), "(mode=context.GRAPH_MODE, device_target='GPU')\n", (954, 1000), True, 'import mindspore.context as context\n'), ((1145, 1173), 'numpy.broadcast_to', 'np.broadcast_to', (['x_np', 'shape'], {}), '(x_np, shape)\n', (1160, 1173), True, 'import numpy as np\n'), ((1344, 1373), 'numpy.broadcast_to', 'np.broadcast_to', (['x1_np', 'shape'], {}), '(x1_np, shape)\n', (1359, 1373), True, 'import numpy as np\n'), ((1563, 1592), 'numpy.broadcast_to', 'np.broadcast_to', (['x1_np', 'shape'], {}), '(x1_np, shape)\n', (1578, 1592), True, 'import numpy as np\n'), ((1857, 1922), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': '"""GPU"""'}), "(mode=context.GRAPH_MODE, device_target='GPU')\n", (1876, 1922), True, 'import mindspore.context as context\n'), ((2102, 2133), 'numpy.broadcast_to', 'np.broadcast_to', (['x_np', 'np_shape'], {}), '(x_np, np_shape)\n', (2117, 2133), True, 'import numpy as np\n'), ((2307, 2339), 'numpy.broadcast_to', 'np.broadcast_to', (['x1_np', 'np_shape'], {}), '(x1_np, np_shape)\n', (2322, 2339), True, 'import numpy as np\n'), ((2564, 2596), 'numpy.broadcast_to', 'np.broadcast_to', (['x1_np', 'np_shape'], {}), '(x1_np, np_shape)\n', (2579, 2596), True, 'import numpy as np\n'), ((2889, 2954), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': '"""GPU"""'}), "(mode=context.GRAPH_MODE, device_target='GPU')\n", (2908, 2954), True, 'import mindspore.context as context\n'), ((1097, 1117), 'mindspore.ops.operations.BroadcastTo', 'P.BroadcastTo', (['shape'], {}), '(shape)\n', (1110, 1117), True, 'from mindspore.ops import operations as P\n'), ((1118, 1130), 'mindspore.common.tensor.Tensor', 'Tensor', (['x_np'], {}), '(x_np)\n', (1124, 1130), False, 'from mindspore.common.tensor import Tensor\n'), ((1295, 1315), 'mindspore.ops.operations.BroadcastTo', 'P.BroadcastTo', (['shape'], {}), '(shape)\n', (1308, 1315), True, 'from mindspore.ops import operations as P\n'), ((1316, 1329), 'mindspore.common.tensor.Tensor', 'Tensor', (['x1_np'], {}), '(x1_np)\n', (1322, 1329), False, 'from mindspore.common.tensor import Tensor\n'), ((1514, 1534), 'mindspore.ops.operations.BroadcastTo', 'P.BroadcastTo', (['shape'], {}), '(shape)\n', (1527, 1534), True, 'from mindspore.ops import operations as P\n'), ((1535, 1548), 'mindspore.common.tensor.Tensor', 'Tensor', (['x1_np'], {}), '(x1_np)\n', (1541, 1548), False, 'from mindspore.common.tensor import Tensor\n'), ((2051, 2074), 'mindspore.ops.operations.BroadcastTo', 'P.BroadcastTo', (['ms_shape'], {}), '(ms_shape)\n', (2064, 2074), True, 'from mindspore.ops import operations as P\n'), ((2075, 2087), 'mindspore.common.tensor.Tensor', 'Tensor', (['x_np'], {}), '(x_np)\n', (2081, 2087), False, 'from mindspore.common.tensor import Tensor\n'), ((2255, 2278), 'mindspore.ops.operations.BroadcastTo', 'P.BroadcastTo', (['ms_shape'], {}), '(ms_shape)\n', (2268, 2278), True, 'from mindspore.ops import operations as P\n'), ((2279, 2292), 'mindspore.common.tensor.Tensor', 'Tensor', (['x1_np'], {}), '(x1_np)\n', (2285, 2292), False, 'from mindspore.common.tensor import Tensor\n'), ((2512, 2535), 'mindspore.ops.operations.BroadcastTo', 'P.BroadcastTo', (['ms_shape'], {}), '(ms_shape)\n', (2525, 2535), True, 'from mindspore.ops import operations as P\n'), ((2536, 2549), 'mindspore.common.tensor.Tensor', 'Tensor', (['x1_np'], {}), '(x1_np)\n', (2542, 2549), False, 'from mindspore.common.tensor import Tensor\n'), ((3044, 3069), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3057, 3069), False, 'import pytest\n'), ((1038, 1064), 'numpy.random.rand', 'np.random.rand', (['(3)', '(1)', '(5)', '(1)'], {}), '(3, 1, 5, 1)\n', (1052, 1064), True, 'import numpy as np\n'), ((1236, 1262), 'numpy.random.rand', 'np.random.rand', (['(3)', '(1)', '(5)', '(1)'], {}), '(3, 1, 5, 1)\n', (1250, 1262), True, 'import numpy as np\n'), ((1461, 1481), 'numpy.random.rand', 'np.random.rand', (['(4)', '(5)'], {}), '(4, 5)\n', (1475, 1481), True, 'import numpy as np\n'), ((1992, 2018), 'numpy.random.rand', 'np.random.rand', (['(3)', '(1)', '(5)', '(1)'], {}), '(3, 1, 5, 1)\n', (2006, 2018), True, 'import numpy as np\n'), ((2196, 2222), 'numpy.random.rand', 'np.random.rand', (['(3)', '(1)', '(5)', '(1)'], {}), '(3, 1, 5, 1)\n', (2210, 2222), True, 'import numpy as np\n'), ((2459, 2479), 'numpy.random.rand', 'np.random.rand', (['(4)', '(5)'], {}), '(4, 5)\n', (2473, 2479), True, 'import numpy as np\n'), ((2995, 3015), 'numpy.random.rand', 'np.random.rand', (['(4)', '(5)'], {}), '(4, 5)\n', (3009, 3015), True, 'import numpy as np\n'), ((3079, 3102), 'mindspore.ops.operations.BroadcastTo', 'P.BroadcastTo', (['ms_shape'], {}), '(ms_shape)\n', (3092, 3102), True, 'from mindspore.ops import operations as P\n'), ((3103, 3115), 'mindspore.common.tensor.Tensor', 'Tensor', (['x_np'], {}), '(x_np)\n', (3109, 3115), False, 'from mindspore.common.tensor import Tensor\n')] |
"""
Tests for simulation smoothing
Author: <NAME>
License: Simplified-BSD
"""
import os
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import pandas as pd
from statsmodels import datasets
from statsmodels.tsa.statespace import mlemodel, sarimax, structural
from statsmodels.tsa.statespace.simulation_smoother import (
SIMULATION_STATE, SIMULATION_DISTURBANCE, SIMULATION_ALL)
current_path = os.path.dirname(os.path.abspath(__file__))
class MultivariateVARKnown(object):
"""
Tests for simulation smoothing values in a couple of special cases of
variates. Both computed values and KFAS values are used for comparison
against the simulation smoother output.
"""
@classmethod
def setup_class(cls, missing=None, test_against_KFAS=True,
*args, **kwargs):
cls.test_against_KFAS = test_against_KFAS
# Data
dta = datasets.macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-7-01',
freq='QS')
obs = np.log(dta[['realgdp', 'realcons', 'realinv']]).diff().iloc[1:]
if missing == 'all':
obs.iloc[0:50, :] = np.nan
elif missing == 'partial':
obs.iloc[0:50, 0] = np.nan
elif missing == 'mixed':
obs.iloc[0:50, 0] = np.nan
obs.iloc[19:70, 1] = np.nan
obs.iloc[39:90, 2] = np.nan
obs.iloc[119:130, 0] = np.nan
obs.iloc[119:130, 2] = np.nan
obs.iloc[-10:, :] = np.nan
if test_against_KFAS:
obs = obs.iloc[:9]
# Create the model
mod = mlemodel.MLEModel(obs, k_states=3, k_posdef=3, **kwargs)
mod['design'] = np.eye(3)
mod['obs_cov'] = np.array([
[0.0000640649, 0., 0.],
[0., 0.0000572802, 0.],
[0., 0., 0.0017088585]])
mod['transition'] = np.array([
[-0.1119908792, 0.8441841604, 0.0238725303],
[0.2629347724, 0.4996718412, -0.0173023305],
[-3.2192369082, 4.1536028244, 0.4514379215]])
mod['selection'] = np.eye(3)
mod['state_cov'] = np.array([
[0.0000640649, 0.0000388496, 0.0002148769],
[0.0000388496, 0.0000572802, 0.000001555],
[0.0002148769, 0.000001555, 0.0017088585]])
mod.initialize_approximate_diffuse(1e6)
mod.ssm.filter_univariate = True
cls.model = mod
cls.results = mod.smooth([], return_ssm=True)
cls.sim = cls.model.simulation_smoother()
def test_loglike(self):
assert_allclose(np.sum(self.results.llf_obs), self.true_llf)
def test_simulate_0(self):
n = 10
# Test with all inputs as zeros
measurement_shocks = np.zeros((n, self.model.k_endog))
state_shocks = np.zeros((n, self.model.ssm.k_posdef))
initial_state = np.zeros(self.model.k_states)
obs, states = self.model.ssm.simulate(
nsimulations=n, measurement_shocks=measurement_shocks,
state_shocks=state_shocks, initial_state=initial_state)
assert_allclose(obs, np.zeros((n, self.model.k_endog)))
assert_allclose(states, np.zeros((n, self.model.k_states)))
def test_simulate_1(self):
n = 10
# Test with np.arange / 10 measurement shocks only
measurement_shocks = np.reshape(
np.arange(n * self.model.k_endog) / 10.,
(n, self.model.k_endog))
state_shocks = np.zeros((n, self.model.ssm.k_posdef))
initial_state = np.zeros(self.model.k_states)
obs, states = self.model.ssm.simulate(
nsimulations=n, measurement_shocks=measurement_shocks,
state_shocks=state_shocks, initial_state=initial_state)
assert_allclose(obs, np.reshape(
np.arange(n * self.model.k_endog) / 10.,
(n, self.model.k_endog)))
assert_allclose(states, np.zeros((n, self.model.k_states)))
def test_simulate_2(self):
n = 10
Z = self.model['design']
T = self.model['transition']
# Test with non-zero state shocks and initial state
measurement_shocks = np.zeros((n, self.model.k_endog))
state_shocks = np.ones((n, self.model.ssm.k_posdef))
initial_state = np.ones(self.model.k_states) * 2.5
obs, states = self.model.ssm.simulate(
nsimulations=n, measurement_shocks=measurement_shocks,
state_shocks=state_shocks, initial_state=initial_state)
desired_obs = np.zeros((n, self.model.k_endog))
desired_state = np.zeros((n, self.model.k_states))
desired_state[0] = initial_state
desired_obs[0] = np.dot(Z, initial_state)
for i in range(1, n):
desired_state[i] = np.dot(T, desired_state[i-1]) + state_shocks[i]
desired_obs[i] = np.dot(Z, desired_state[i])
assert_allclose(obs, desired_obs)
assert_allclose(states, desired_state)
def test_simulation_smoothing_0(self):
# Simulation smoothing when setting all variates to zeros
# In this case:
# - unconditional disturbances are zero, because they are simply
# transformed to have the appropriate variance matrix, but keep the
# same mean - of zero
# - generated states are zeros, because initial state is
# zeros and all state disturbances are zeros
# - generated observations are zeros, because states are zeros and all
# measurement disturbances are zeros
# - The simulated state is equal to the smoothed state from the
# original model, because
# simulated state = (generated state - smoothed generated state +
# smoothed state)
# and here generated state = smoothed generated state = 0
# - The simulated measurement disturbance is equal to the smoothed
# measurement disturbance for very similar reasons, because
# simulated measurement disturbance = (
# generated measurement disturbance -
# smoothed generated measurement disturbance +
# smoothed measurement disturbance)
# and here generated measurement disturbance and
# smoothed generated measurement disturbance are zero.
# - The simulated state disturbance is equal to the smoothed
# state disturbance for exactly the same reason as above.
sim = self.sim
Z = self.model['design']
n_disturbance_variates = (
(self.model.k_endog + self.model.ssm.k_posdef) * self.model.nobs)
# Test against known quantities (see above for description)
sim.simulate(disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(self.model.k_states))
assert_allclose(sim.generated_measurement_disturbance, 0)
assert_allclose(sim.generated_state_disturbance, 0)
assert_allclose(sim.generated_state, 0)
assert_allclose(sim.generated_obs, 0)
assert_allclose(sim.simulated_state, self.results.smoothed_state)
if not self.model.ssm.filter_collapsed:
assert_allclose(sim.simulated_measurement_disturbance,
self.results.smoothed_measurement_disturbance)
assert_allclose(sim.simulated_state_disturbance,
self.results.smoothed_state_disturbance)
# Test against R package KFAS values
if self.test_against_KFAS:
path = os.path.join(current_path, 'results',
'results_simulation_smoothing0.csv')
true = pd.read_csv(path)
assert_allclose(sim.simulated_state,
true[['state1', 'state2', 'state3']].T,
atol=1e-7)
assert_allclose(sim.simulated_measurement_disturbance,
true[['eps1', 'eps2', 'eps3']].T,
atol=1e-7)
assert_allclose(sim.simulated_state_disturbance,
true[['eta1', 'eta2', 'eta3']].T,
atol=1e-7)
signals = np.zeros((3, self.model.nobs))
for t in range(self.model.nobs):
signals[:, t] = np.dot(Z, sim.simulated_state[:, t])
assert_allclose(signals, true[['signal1', 'signal2', 'signal3']].T,
atol=1e-7)
def test_simulation_smoothing_1(self):
# Test with measurement disturbance as np.arange / 10., all other
# disturbances are zeros
sim = self.sim
Z = self.model['design']
# Construct the variates
measurement_disturbance_variates = np.reshape(
np.arange(self.model.nobs * self.model.k_endog) / 10.,
(self.model.nobs, self.model.k_endog))
disturbance_variates = np.r_[
measurement_disturbance_variates.ravel(),
np.zeros(self.model.nobs * self.model.ssm.k_posdef)]
# Compute some additional known quantities
generated_measurement_disturbance = np.zeros(
measurement_disturbance_variates.shape)
chol = np.linalg.cholesky(self.model['obs_cov'])
for t in range(self.model.nobs):
generated_measurement_disturbance[t] = np.dot(
chol, measurement_disturbance_variates[t])
generated_model = mlemodel.MLEModel(
generated_measurement_disturbance, k_states=self.model.k_states,
k_posdef=self.model.ssm.k_posdef)
for name in ['design', 'obs_cov', 'transition',
'selection', 'state_cov']:
generated_model[name] = self.model[name]
generated_model.initialize_approximate_diffuse(1e6)
generated_model.ssm.filter_univariate = True
generated_res = generated_model.ssm.smooth()
simulated_state = (
0 - generated_res.smoothed_state + self.results.smoothed_state)
if not self.model.ssm.filter_collapsed:
simulated_measurement_disturbance = (
generated_measurement_disturbance.T -
generated_res.smoothed_measurement_disturbance +
self.results.smoothed_measurement_disturbance)
simulated_state_disturbance = (
0 - generated_res.smoothed_state_disturbance +
self.results.smoothed_state_disturbance)
# Test against known values
sim.simulate(disturbance_variates=disturbance_variates,
initial_state_variates=np.zeros(self.model.k_states))
assert_allclose(sim.generated_measurement_disturbance,
generated_measurement_disturbance)
assert_allclose(sim.generated_state_disturbance, 0)
assert_allclose(sim.generated_state, 0)
assert_allclose(sim.generated_obs,
generated_measurement_disturbance.T)
assert_allclose(sim.simulated_state, simulated_state)
if not self.model.ssm.filter_collapsed:
assert_allclose(sim.simulated_measurement_disturbance,
simulated_measurement_disturbance)
assert_allclose(sim.simulated_state_disturbance,
simulated_state_disturbance)
# Test against R package KFAS values
if self.test_against_KFAS:
path = os.path.join(current_path, 'results',
'results_simulation_smoothing1.csv')
true = pd.read_csv(path)
assert_allclose(sim.simulated_state,
true[['state1', 'state2', 'state3']].T,
atol=1e-7)
assert_allclose(sim.simulated_measurement_disturbance,
true[['eps1', 'eps2', 'eps3']].T,
atol=1e-7)
assert_allclose(sim.simulated_state_disturbance,
true[['eta1', 'eta2', 'eta3']].T,
atol=1e-7)
signals = np.zeros((3, self.model.nobs))
for t in range(self.model.nobs):
signals[:, t] = np.dot(Z, sim.simulated_state[:, t])
assert_allclose(signals, true[['signal1', 'signal2', 'signal3']].T,
atol=1e-7)
def test_simulation_smoothing_2(self):
# Test with measurement and state disturbances as np.arange / 10.,
# initial state variates are zeros.
sim = self.sim
Z = self.model['design']
T = self.model['transition']
# Construct the variates
measurement_disturbance_variates = np.reshape(
np.arange(self.model.nobs * self.model.k_endog) / 10.,
(self.model.nobs, self.model.k_endog))
state_disturbance_variates = np.reshape(
np.arange(self.model.nobs * self.model.ssm.k_posdef) / 10.,
(self.model.nobs, self.model.ssm.k_posdef))
disturbance_variates = np.r_[
measurement_disturbance_variates.ravel(),
state_disturbance_variates.ravel()]
initial_state_variates = np.zeros(self.model.k_states)
# Compute some additional known quantities
generated_measurement_disturbance = np.zeros(
measurement_disturbance_variates.shape)
chol = np.linalg.cholesky(self.model['obs_cov'])
for t in range(self.model.nobs):
generated_measurement_disturbance[t] = np.dot(
chol, measurement_disturbance_variates[t])
generated_state_disturbance = np.zeros(
state_disturbance_variates.shape)
chol = np.linalg.cholesky(self.model['state_cov'])
for t in range(self.model.nobs):
generated_state_disturbance[t] = np.dot(
chol, state_disturbance_variates[t])
generated_obs = np.zeros((self.model.k_endog, self.model.nobs))
generated_state = np.zeros((self.model.k_states, self.model.nobs+1))
chol = np.linalg.cholesky(self.results.initial_state_cov)
generated_state[:, 0] = (
self.results.initial_state + np.dot(chol, initial_state_variates))
for t in range(self.model.nobs):
generated_state[:, t+1] = (np.dot(T, generated_state[:, t]) +
generated_state_disturbance.T[:, t])
generated_obs[:, t] = (np.dot(Z, generated_state[:, t]) +
generated_measurement_disturbance.T[:, t])
generated_model = mlemodel.MLEModel(
generated_obs.T, k_states=self.model.k_states,
k_posdef=self.model.ssm.k_posdef)
for name in ['design', 'obs_cov', 'transition',
'selection', 'state_cov']:
generated_model[name] = self.model[name]
generated_model.initialize_approximate_diffuse(1e6)
generated_model.ssm.filter_univariate = True
generated_res = generated_model.ssm.smooth()
simulated_state = (
generated_state[:, :-1] - generated_res.smoothed_state +
self.results.smoothed_state)
if not self.model.ssm.filter_collapsed:
simulated_measurement_disturbance = (
generated_measurement_disturbance.T -
generated_res.smoothed_measurement_disturbance +
self.results.smoothed_measurement_disturbance)
simulated_state_disturbance = (
generated_state_disturbance.T -
generated_res.smoothed_state_disturbance +
self.results.smoothed_state_disturbance)
# Test against known values
sim.simulate(disturbance_variates=disturbance_variates,
initial_state_variates=np.zeros(self.model.k_states))
assert_allclose(sim.generated_measurement_disturbance,
generated_measurement_disturbance)
assert_allclose(sim.generated_state_disturbance,
generated_state_disturbance)
assert_allclose(sim.generated_state, generated_state)
assert_allclose(sim.generated_obs, generated_obs)
assert_allclose(sim.simulated_state, simulated_state)
if not self.model.ssm.filter_collapsed:
assert_allclose(sim.simulated_measurement_disturbance.T,
simulated_measurement_disturbance.T)
assert_allclose(sim.simulated_state_disturbance,
simulated_state_disturbance)
# Test against R package KFAS values
if self.test_against_KFAS:
path = os.path.join(current_path, 'results',
'results_simulation_smoothing2.csv')
true = pd.read_csv(path)
assert_allclose(sim.simulated_state.T,
true[['state1', 'state2', 'state3']],
atol=1e-7)
assert_allclose(sim.simulated_measurement_disturbance,
true[['eps1', 'eps2', 'eps3']].T,
atol=1e-7)
assert_allclose(sim.simulated_state_disturbance,
true[['eta1', 'eta2', 'eta3']].T,
atol=1e-7)
signals = np.zeros((3, self.model.nobs))
for t in range(self.model.nobs):
signals[:, t] = np.dot(Z, sim.simulated_state[:, t])
assert_allclose(signals, true[['signal1', 'signal2', 'signal3']].T,
atol=1e-7)
class TestMultivariateVARKnown(MultivariateVARKnown):
@classmethod
def setup_class(cls, *args, **kwargs):
super(TestMultivariateVARKnown, cls).setup_class()
cls.true_llf = 39.01246166
class TestMultivariateVARKnownMissingAll(MultivariateVARKnown):
"""
Notes
-----
Cannot test against KFAS because they have a different behavior for
missing entries. When an entry is missing, KFAS does not draw a simulation
smoothed value for that entry, whereas we draw from the unconditional
distribution. It appears there is nothing to definitively recommend one
approach over the other, but it makes it difficult to line up the variates
correctly in order to replicate results.
"""
@classmethod
def setup_class(cls, *args, **kwargs):
super(TestMultivariateVARKnownMissingAll, cls).setup_class(
missing='all', test_against_KFAS=False)
cls.true_llf = 1305.739288
class TestMultivariateVARKnownMissingPartial(MultivariateVARKnown):
@classmethod
def setup_class(cls, *args, **kwargs):
super(TestMultivariateVARKnownMissingPartial, cls).setup_class(
missing='partial', test_against_KFAS=False)
cls.true_llf = 1518.449598
class TestMultivariateVARKnownMissingMixed(MultivariateVARKnown):
@classmethod
def setup_class(cls, *args, **kwargs):
super(TestMultivariateVARKnownMissingMixed, cls).setup_class(
missing='mixed', test_against_KFAS=False)
cls.true_llf = 1117.265303
class TestDFM(TestMultivariateVARKnown):
test_against_KFAS = False
@classmethod
def setup_class(cls, which='none', *args, **kwargs):
# Data
dta = datasets.macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-7-01',
freq='QS')
levels = dta[['realgdp', 'realcons', 'realinv']]
obs = np.log(levels).diff().iloc[1:] * 400
if which == 'all':
obs.iloc[:50, :] = np.nan
obs.iloc[119:130, :] = np.nan
elif which == 'partial':
obs.iloc[0:50, 0] = np.nan
obs.iloc[119:130, 0] = np.nan
elif which == 'mixed':
obs.iloc[0:50, 0] = np.nan
obs.iloc[19:70, 1] = np.nan
obs.iloc[39:90, 2] = np.nan
obs.iloc[119:130, 0] = np.nan
obs.iloc[119:130, 2] = np.nan
# Create the model with typical state space
mod = mlemodel.MLEModel(obs, k_states=2, k_posdef=2, **kwargs)
mod['design'] = np.array([[-32.47143586, 17.33779024],
[-7.40264169, 1.69279859],
[-209.04702853, 125.2879374]])
mod['obs_cov'] = np.diag(
np.array([0.0622668, 1.95666886, 58.37473642]))
mod['transition'] = np.array([[0.29935707, 0.33289005],
[-0.7639868, 1.2844237]])
mod['selection'] = np.eye(2)
mod['state_cov'] = np.array([[1.2, -0.25],
[-0.25, 1.1]])
mod.initialize_approximate_diffuse(1e6)
mod.ssm.filter_univariate = True
mod.ssm.filter_collapsed = True
cls.model = mod
cls.results = mod.smooth([], return_ssm=True)
cls.sim = cls.model.simulation_smoother()
def test_loglike(self):
pass
class MultivariateVAR(object):
"""
More generic tests for simulation smoothing; use actual N(0,1) variates
"""
@classmethod
def setup_class(cls, missing='none', *args, **kwargs):
# Data
dta = datasets.macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-7-01',
freq='QS')
obs = np.log(dta[['realgdp', 'realcons', 'realinv']]).diff().iloc[1:]
if missing == 'all':
obs.iloc[0:50, :] = np.nan
elif missing == 'partial':
obs.iloc[0:50, 0] = np.nan
elif missing == 'mixed':
obs.iloc[0:50, 0] = np.nan
obs.iloc[19:70, 1] = np.nan
obs.iloc[39:90, 2] = np.nan
obs.iloc[119:130, 0] = np.nan
obs.iloc[119:130, 2] = np.nan
obs.iloc[-10:, :] = np.nan
# Create the model
mod = mlemodel.MLEModel(obs, k_states=3, k_posdef=3, **kwargs)
mod['design'] = np.eye(3)
mod['obs_cov'] = np.array([
[0.0000640649, 0., 0.],
[0., 0.0000572802, 0.],
[0., 0., 0.0017088585]])
mod['transition'] = np.array([
[-0.1119908792, 0.8441841604, 0.0238725303],
[0.2629347724, 0.4996718412, -0.0173023305],
[-3.2192369082, 4.1536028244, 0.4514379215]])
mod['selection'] = np.eye(3)
mod['state_cov'] = np.array([
[0.0000640649, 0.0000388496, 0.0002148769],
[0.0000388496, 0.0000572802, 0.000001555],
[0.0002148769, 0.000001555, 0.0017088585]])
mod.initialize_approximate_diffuse(1e6)
mod.ssm.filter_univariate = True
cls.model = mod
cls.results = mod.smooth([], return_ssm=True)
cls.sim = cls.model.simulation_smoother()
def test_loglike(self):
assert_allclose(np.sum(self.results.llf_obs), self.true_llf)
def test_simulation_smoothing(self):
sim = self.sim
Z = self.model['design']
# Simulate with known variates
sim.simulate(disturbance_variates=self.variates[:-3],
initial_state_variates=self.variates[-3:])
# Test against R package KFAS values
assert_allclose(sim.simulated_state.T,
self.true[['state1', 'state2', 'state3']],
atol=1e-7)
assert_allclose(sim.simulated_measurement_disturbance,
self.true[['eps1', 'eps2', 'eps3']].T,
atol=1e-7)
assert_allclose(sim.simulated_state_disturbance,
self.true[['eta1', 'eta2', 'eta3']].T,
atol=1e-7)
signals = np.zeros((3, self.model.nobs))
for t in range(self.model.nobs):
signals[:, t] = np.dot(Z, sim.simulated_state[:, t])
assert_allclose(signals,
self.true[['signal1', 'signal2', 'signal3']].T,
atol=1e-7)
class TestMultivariateVAR(MultivariateVAR):
@classmethod
def setup_class(cls):
super(TestMultivariateVAR, cls).setup_class()
path = os.path.join(current_path, 'results',
'results_simulation_smoothing3_variates.csv')
cls.variates = pd.read_csv(path).values.squeeze()
path = os.path.join(current_path, 'results',
'results_simulation_smoothing3.csv')
cls.true = pd.read_csv(path)
cls.true_llf = 1695.34872
def test_misc():
# Create the model and simulation smoother
dta = datasets.macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-7-01',
freq='QS')
obs = np.log(dta[['realgdp', 'realcons', 'realinv']]).diff().iloc[1:]
mod = sarimax.SARIMAX(obs['realgdp'], order=(1, 0, 0))
mod['design', 0, 0] = 0.
mod['obs_cov', 0, 0] = 1.
mod.update(np.r_[1., 1.])
sim = mod.simulation_smoother()
# Test that the simulation smoother is drawing variates correctly
np.random.seed(1234)
n_disturbance_variates = mod.nobs * (mod.k_endog + mod.k_states)
variates = np.random.normal(size=n_disturbance_variates)
np.random.seed(1234)
sim.simulate()
assert_allclose(sim.generated_measurement_disturbance[:, 0],
variates[:mod.nobs])
assert_allclose(sim.generated_state_disturbance[:, 0],
variates[mod.nobs:])
# Test that we can change the options of the simulations smoother
assert_equal(sim.simulation_output, mod.ssm.smoother_output)
sim.simulation_output = 0
assert_equal(sim.simulation_output, 0)
sim.simulate_state = True
assert_equal(sim.simulation_output, SIMULATION_STATE)
sim.simulate_state = False
assert_equal(sim.simulation_output, 0)
sim.simulate_disturbance = True
assert_equal(sim.simulation_output, SIMULATION_DISTURBANCE)
sim.simulate_disturbance = False
assert_equal(sim.simulation_output, 0)
sim.simulate_all = True
assert_equal(sim.simulation_output, SIMULATION_ALL)
sim.simulate_all = False
assert_equal(sim.simulation_output, 0)
def test_simulation_smoothing_obs_intercept():
nobs = 10
intercept = 100
endog = np.ones(nobs) * intercept
mod = structural.UnobservedComponents(endog, 'rwalk', exog=np.ones(nobs))
mod.update([1, intercept])
sim = mod.simulation_smoother()
sim.simulate(disturbance_variates=np.zeros(mod.nobs * 2),
initial_state_variates=np.zeros(1))
assert_equal(sim.simulated_state[0], 0)
def test_simulation_smoothing_state_intercept():
nobs = 10
intercept = 100
endog = np.ones(nobs) * intercept
mod = sarimax.SARIMAX(endog, order=(0, 0, 0), trend='c',
measurement_error=True)
mod.initialize_known([100], [[0]])
mod.update([intercept, 1., 1.])
sim = mod.simulation_smoother()
sim.simulate(disturbance_variates=np.zeros(mod.nobs * 2),
initial_state_variates=np.zeros(1))
assert_equal(sim.simulated_state[0], intercept)
def test_simulation_smoothing_state_intercept_diffuse():
nobs = 10
intercept = 100
endog = np.ones(nobs) * intercept
# Test without missing values
mod = sarimax.SARIMAX(endog, order=(0, 0, 0), trend='c',
measurement_error=True,
initialization='diffuse')
mod.update([intercept, 1., 1.])
sim = mod.simulation_smoother()
sim.simulate(disturbance_variates=np.zeros(mod.nobs * 2),
initial_state_variates=np.zeros(1))
assert_equal(sim.simulated_state[0], intercept)
# Test with missing values
endog[5] = np.nan
mod = sarimax.SARIMAX(endog, order=(0, 0, 0), trend='c',
measurement_error=True,
initialization='diffuse')
mod.update([intercept, 1., 1.])
sim = mod.simulation_smoother()
sim.simulate(disturbance_variates=np.zeros(mod.nobs * 2),
initial_state_variates=np.zeros(1))
assert_equal(sim.simulated_state[0], intercept)
| [
"numpy.testing.assert_equal",
"pandas.read_csv",
"numpy.log",
"numpy.array",
"numpy.arange",
"pandas.date_range",
"numpy.testing.assert_allclose",
"numpy.dot",
"numpy.random.seed",
"numpy.random.normal",
"numpy.eye",
"numpy.ones",
"statsmodels.datasets.macrodata.load_pandas",
"numpy.linalg... | [((443, 468), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (458, 468), False, 'import os\n'), ((24681, 24742), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""1959-01-01"""', 'end': '"""2009-7-01"""', 'freq': '"""QS"""'}), "(start='1959-01-01', end='2009-7-01', freq='QS')\n", (24694, 24742), True, 'import pandas as pd\n'), ((24858, 24906), 'statsmodels.tsa.statespace.sarimax.SARIMAX', 'sarimax.SARIMAX', (["obs['realgdp']"], {'order': '(1, 0, 0)'}), "(obs['realgdp'], order=(1, 0, 0))\n", (24873, 24906), False, 'from statsmodels.tsa.statespace import mlemodel, sarimax, structural\n'), ((25107, 25127), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (25121, 25127), True, 'import numpy as np\n'), ((25212, 25257), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n_disturbance_variates'}), '(size=n_disturbance_variates)\n', (25228, 25257), True, 'import numpy as np\n'), ((25262, 25282), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (25276, 25282), True, 'import numpy as np\n'), ((25306, 25392), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.generated_measurement_disturbance[:, 0]', 'variates[:mod.nobs]'], {}), '(sim.generated_measurement_disturbance[:, 0], variates[:mod.\n nobs])\n', (25321, 25392), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((25412, 25487), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.generated_state_disturbance[:, 0]', 'variates[mod.nobs:]'], {}), '(sim.generated_state_disturbance[:, 0], variates[mod.nobs:])\n', (25427, 25487), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((25583, 25643), 'numpy.testing.assert_equal', 'assert_equal', (['sim.simulation_output', 'mod.ssm.smoother_output'], {}), '(sim.simulation_output, mod.ssm.smoother_output)\n', (25595, 25643), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((25678, 25716), 'numpy.testing.assert_equal', 'assert_equal', (['sim.simulation_output', '(0)'], {}), '(sim.simulation_output, 0)\n', (25690, 25716), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((25752, 25805), 'numpy.testing.assert_equal', 'assert_equal', (['sim.simulation_output', 'SIMULATION_STATE'], {}), '(sim.simulation_output, SIMULATION_STATE)\n', (25764, 25805), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((25841, 25879), 'numpy.testing.assert_equal', 'assert_equal', (['sim.simulation_output', '(0)'], {}), '(sim.simulation_output, 0)\n', (25853, 25879), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((25921, 25980), 'numpy.testing.assert_equal', 'assert_equal', (['sim.simulation_output', 'SIMULATION_DISTURBANCE'], {}), '(sim.simulation_output, SIMULATION_DISTURBANCE)\n', (25933, 25980), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((26022, 26060), 'numpy.testing.assert_equal', 'assert_equal', (['sim.simulation_output', '(0)'], {}), '(sim.simulation_output, 0)\n', (26034, 26060), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((26094, 26145), 'numpy.testing.assert_equal', 'assert_equal', (['sim.simulation_output', 'SIMULATION_ALL'], {}), '(sim.simulation_output, SIMULATION_ALL)\n', (26106, 26145), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((26179, 26217), 'numpy.testing.assert_equal', 'assert_equal', (['sim.simulation_output', '(0)'], {}), '(sim.simulation_output, 0)\n', (26191, 26217), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((26603, 26642), 'numpy.testing.assert_equal', 'assert_equal', (['sim.simulated_state[0]', '(0)'], {}), '(sim.simulated_state[0], 0)\n', (26615, 26642), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((26777, 26851), 'statsmodels.tsa.statespace.sarimax.SARIMAX', 'sarimax.SARIMAX', (['endog'], {'order': '(0, 0, 0)', 'trend': '"""c"""', 'measurement_error': '(True)'}), "(endog, order=(0, 0, 0), trend='c', measurement_error=True)\n", (26792, 26851), False, 'from statsmodels.tsa.statespace import mlemodel, sarimax, structural\n'), ((27109, 27156), 'numpy.testing.assert_equal', 'assert_equal', (['sim.simulated_state[0]', 'intercept'], {}), '(sim.simulated_state[0], intercept)\n', (27121, 27156), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((27333, 27437), 'statsmodels.tsa.statespace.sarimax.SARIMAX', 'sarimax.SARIMAX', (['endog'], {'order': '(0, 0, 0)', 'trend': '"""c"""', 'measurement_error': '(True)', 'initialization': '"""diffuse"""'}), "(endog, order=(0, 0, 0), trend='c', measurement_error=True,\n initialization='diffuse')\n", (27348, 27437), False, 'from statsmodels.tsa.statespace import mlemodel, sarimax, structural\n'), ((27678, 27725), 'numpy.testing.assert_equal', 'assert_equal', (['sim.simulated_state[0]', 'intercept'], {}), '(sim.simulated_state[0], intercept)\n', (27690, 27725), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((27790, 27894), 'statsmodels.tsa.statespace.sarimax.SARIMAX', 'sarimax.SARIMAX', (['endog'], {'order': '(0, 0, 0)', 'trend': '"""c"""', 'measurement_error': '(True)', 'initialization': '"""diffuse"""'}), "(endog, order=(0, 0, 0), trend='c', measurement_error=True,\n initialization='diffuse')\n", (27805, 27894), False, 'from statsmodels.tsa.statespace import mlemodel, sarimax, structural\n'), ((28135, 28182), 'numpy.testing.assert_equal', 'assert_equal', (['sim.simulated_state[0]', 'intercept'], {}), '(sim.simulated_state[0], intercept)\n', (28147, 28182), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((972, 1033), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""1959-01-01"""', 'end': '"""2009-7-01"""', 'freq': '"""QS"""'}), "(start='1959-01-01', end='2009-7-01', freq='QS')\n", (985, 1033), True, 'import pandas as pd\n'), ((1668, 1724), 'statsmodels.tsa.statespace.mlemodel.MLEModel', 'mlemodel.MLEModel', (['obs'], {'k_states': '(3)', 'k_posdef': '(3)'}), '(obs, k_states=3, k_posdef=3, **kwargs)\n', (1685, 1724), False, 'from statsmodels.tsa.statespace import mlemodel, sarimax, structural\n'), ((1749, 1758), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1755, 1758), True, 'import numpy as np\n'), ((1784, 1875), 'numpy.array', 'np.array', (['[[6.40649e-05, 0.0, 0.0], [0.0, 5.72802e-05, 0.0], [0.0, 0.0, 0.0017088585]]'], {}), '([[6.40649e-05, 0.0, 0.0], [0.0, 5.72802e-05, 0.0], [0.0, 0.0, \n 0.0017088585]])\n', (1792, 1875), True, 'import numpy as np\n'), ((1978, 2128), 'numpy.array', 'np.array', (['[[-0.1119908792, 0.8441841604, 0.0238725303], [0.2629347724, 0.4996718412, \n -0.0173023305], [-3.2192369082, 4.1536028244, 0.4514379215]]'], {}), '([[-0.1119908792, 0.8441841604, 0.0238725303], [0.2629347724, \n 0.4996718412, -0.0173023305], [-3.2192369082, 4.1536028244, 0.4514379215]])\n', (1986, 2128), True, 'import numpy as np\n'), ((2194, 2203), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2200, 2203), True, 'import numpy as np\n'), ((2231, 2368), 'numpy.array', 'np.array', (['[[6.40649e-05, 3.88496e-05, 0.0002148769], [3.88496e-05, 5.72802e-05, \n 1.555e-06], [0.0002148769, 1.555e-06, 0.0017088585]]'], {}), '([[6.40649e-05, 3.88496e-05, 0.0002148769], [3.88496e-05, \n 5.72802e-05, 1.555e-06], [0.0002148769, 1.555e-06, 0.0017088585]])\n', (2239, 2368), True, 'import numpy as np\n'), ((2849, 2882), 'numpy.zeros', 'np.zeros', (['(n, self.model.k_endog)'], {}), '((n, self.model.k_endog))\n', (2857, 2882), True, 'import numpy as np\n'), ((2906, 2944), 'numpy.zeros', 'np.zeros', (['(n, self.model.ssm.k_posdef)'], {}), '((n, self.model.ssm.k_posdef))\n', (2914, 2944), True, 'import numpy as np\n'), ((2969, 2998), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (2977, 2998), True, 'import numpy as np\n'), ((3575, 3613), 'numpy.zeros', 'np.zeros', (['(n, self.model.ssm.k_posdef)'], {}), '((n, self.model.ssm.k_posdef))\n', (3583, 3613), True, 'import numpy as np\n'), ((3638, 3667), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (3646, 3667), True, 'import numpy as np\n'), ((4258, 4291), 'numpy.zeros', 'np.zeros', (['(n, self.model.k_endog)'], {}), '((n, self.model.k_endog))\n', (4266, 4291), True, 'import numpy as np\n'), ((4315, 4352), 'numpy.ones', 'np.ones', (['(n, self.model.ssm.k_posdef)'], {}), '((n, self.model.ssm.k_posdef))\n', (4322, 4352), True, 'import numpy as np\n'), ((4617, 4650), 'numpy.zeros', 'np.zeros', (['(n, self.model.k_endog)'], {}), '((n, self.model.k_endog))\n', (4625, 4650), True, 'import numpy as np\n'), ((4675, 4709), 'numpy.zeros', 'np.zeros', (['(n, self.model.k_states)'], {}), '((n, self.model.k_states))\n', (4683, 4709), True, 'import numpy as np\n'), ((4776, 4800), 'numpy.dot', 'np.dot', (['Z', 'initial_state'], {}), '(Z, initial_state)\n', (4782, 4800), True, 'import numpy as np\n'), ((4976, 5009), 'numpy.testing.assert_allclose', 'assert_allclose', (['obs', 'desired_obs'], {}), '(obs, desired_obs)\n', (4991, 5009), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5018, 5056), 'numpy.testing.assert_allclose', 'assert_allclose', (['states', 'desired_state'], {}), '(states, desired_state)\n', (5033, 5056), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6937, 6994), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.generated_measurement_disturbance', '(0)'], {}), '(sim.generated_measurement_disturbance, 0)\n', (6952, 6994), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7003, 7054), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.generated_state_disturbance', '(0)'], {}), '(sim.generated_state_disturbance, 0)\n', (7018, 7054), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7063, 7102), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.generated_state', '(0)'], {}), '(sim.generated_state, 0)\n', (7078, 7102), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7111, 7148), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.generated_obs', '(0)'], {}), '(sim.generated_obs, 0)\n', (7126, 7148), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7157, 7222), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_state', 'self.results.smoothed_state'], {}), '(sim.simulated_state, self.results.smoothed_state)\n', (7172, 7222), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7421, 7515), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_state_disturbance', 'self.results.smoothed_state_disturbance'], {}), '(sim.simulated_state_disturbance, self.results.\n smoothed_state_disturbance)\n', (7436, 7515), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9220, 9268), 'numpy.zeros', 'np.zeros', (['measurement_disturbance_variates.shape'], {}), '(measurement_disturbance_variates.shape)\n', (9228, 9268), True, 'import numpy as np\n'), ((9297, 9338), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (["self.model['obs_cov']"], {}), "(self.model['obs_cov'])\n", (9315, 9338), True, 'import numpy as np\n'), ((9525, 9646), 'statsmodels.tsa.statespace.mlemodel.MLEModel', 'mlemodel.MLEModel', (['generated_measurement_disturbance'], {'k_states': 'self.model.k_states', 'k_posdef': 'self.model.ssm.k_posdef'}), '(generated_measurement_disturbance, k_states=self.model.\n k_states, k_posdef=self.model.ssm.k_posdef)\n', (9542, 9646), False, 'from statsmodels.tsa.statespace import mlemodel, sarimax, structural\n'), ((10711, 10804), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.generated_measurement_disturbance', 'generated_measurement_disturbance'], {}), '(sim.generated_measurement_disturbance,\n generated_measurement_disturbance)\n', (10726, 10804), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((10833, 10884), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.generated_state_disturbance', '(0)'], {}), '(sim.generated_state_disturbance, 0)\n', (10848, 10884), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((10893, 10932), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.generated_state', '(0)'], {}), '(sim.generated_state, 0)\n', (10908, 10932), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((10941, 11012), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.generated_obs', 'generated_measurement_disturbance.T'], {}), '(sim.generated_obs, generated_measurement_disturbance.T)\n', (10956, 11012), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11045, 11098), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_state', 'simulated_state'], {}), '(sim.simulated_state, simulated_state)\n', (11060, 11098), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11285, 11362), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_state_disturbance', 'simulated_state_disturbance'], {}), '(sim.simulated_state_disturbance, simulated_state_disturbance)\n', (11300, 11362), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((13217, 13246), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (13225, 13246), True, 'import numpy as np\n'), ((13343, 13391), 'numpy.zeros', 'np.zeros', (['measurement_disturbance_variates.shape'], {}), '(measurement_disturbance_variates.shape)\n', (13351, 13391), True, 'import numpy as np\n'), ((13420, 13461), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (["self.model['obs_cov']"], {}), "(self.model['obs_cov'])\n", (13438, 13461), True, 'import numpy as np\n'), ((13660, 13702), 'numpy.zeros', 'np.zeros', (['state_disturbance_variates.shape'], {}), '(state_disturbance_variates.shape)\n', (13668, 13702), True, 'import numpy as np\n'), ((13731, 13774), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (["self.model['state_cov']"], {}), "(self.model['state_cov'])\n", (13749, 13774), True, 'import numpy as np\n'), ((13947, 13994), 'numpy.zeros', 'np.zeros', (['(self.model.k_endog, self.model.nobs)'], {}), '((self.model.k_endog, self.model.nobs))\n', (13955, 13994), True, 'import numpy as np\n'), ((14021, 14073), 'numpy.zeros', 'np.zeros', (['(self.model.k_states, self.model.nobs + 1)'], {}), '((self.model.k_states, self.model.nobs + 1))\n', (14029, 14073), True, 'import numpy as np\n'), ((14087, 14137), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['self.results.initial_state_cov'], {}), '(self.results.initial_state_cov)\n', (14105, 14137), True, 'import numpy as np\n'), ((14617, 14720), 'statsmodels.tsa.statespace.mlemodel.MLEModel', 'mlemodel.MLEModel', (['generated_obs.T'], {'k_states': 'self.model.k_states', 'k_posdef': 'self.model.ssm.k_posdef'}), '(generated_obs.T, k_states=self.model.k_states, k_posdef=\n self.model.ssm.k_posdef)\n', (14634, 14720), False, 'from statsmodels.tsa.statespace import mlemodel, sarimax, structural\n'), ((15860, 15953), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.generated_measurement_disturbance', 'generated_measurement_disturbance'], {}), '(sim.generated_measurement_disturbance,\n generated_measurement_disturbance)\n', (15875, 15953), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((15982, 16059), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.generated_state_disturbance', 'generated_state_disturbance'], {}), '(sim.generated_state_disturbance, generated_state_disturbance)\n', (15997, 16059), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((16092, 16145), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.generated_state', 'generated_state'], {}), '(sim.generated_state, generated_state)\n', (16107, 16145), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((16154, 16203), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.generated_obs', 'generated_obs'], {}), '(sim.generated_obs, generated_obs)\n', (16169, 16203), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((16212, 16265), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_state', 'simulated_state'], {}), '(sim.simulated_state, simulated_state)\n', (16227, 16265), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((16456, 16533), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_state_disturbance', 'simulated_state_disturbance'], {}), '(sim.simulated_state_disturbance, simulated_state_disturbance)\n', (16471, 16533), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((19341, 19402), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""1959-01-01"""', 'end': '"""2009-7-01"""', 'freq': '"""QS"""'}), "(start='1959-01-01', end='2009-7-01', freq='QS')\n", (19354, 19402), True, 'import pandas as pd\n'), ((20068, 20124), 'statsmodels.tsa.statespace.mlemodel.MLEModel', 'mlemodel.MLEModel', (['obs'], {'k_states': '(2)', 'k_posdef': '(2)'}), '(obs, k_states=2, k_posdef=2, **kwargs)\n', (20085, 20124), False, 'from statsmodels.tsa.statespace import mlemodel, sarimax, structural\n'), ((20149, 20250), 'numpy.array', 'np.array', (['[[-32.47143586, 17.33779024], [-7.40264169, 1.69279859], [-209.04702853, \n 125.2879374]]'], {}), '([[-32.47143586, 17.33779024], [-7.40264169, 1.69279859], [-\n 209.04702853, 125.2879374]])\n', (20157, 20250), True, 'import numpy as np\n'), ((20436, 20497), 'numpy.array', 'np.array', (['[[0.29935707, 0.33289005], [-0.7639868, 1.2844237]]'], {}), '([[0.29935707, 0.33289005], [-0.7639868, 1.2844237]])\n', (20444, 20497), True, 'import numpy as np\n'), ((20563, 20572), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (20569, 20572), True, 'import numpy as np\n'), ((20600, 20638), 'numpy.array', 'np.array', (['[[1.2, -0.25], [-0.25, 1.1]]'], {}), '([[1.2, -0.25], [-0.25, 1.1]])\n', (20608, 20638), True, 'import numpy as np\n'), ((21264, 21325), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""1959-01-01"""', 'end': '"""2009-7-01"""', 'freq': '"""QS"""'}), "(start='1959-01-01', end='2009-7-01', freq='QS')\n", (21277, 21325), True, 'import pandas as pd\n'), ((21898, 21954), 'statsmodels.tsa.statespace.mlemodel.MLEModel', 'mlemodel.MLEModel', (['obs'], {'k_states': '(3)', 'k_posdef': '(3)'}), '(obs, k_states=3, k_posdef=3, **kwargs)\n', (21915, 21954), False, 'from statsmodels.tsa.statespace import mlemodel, sarimax, structural\n'), ((21979, 21988), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (21985, 21988), True, 'import numpy as np\n'), ((22014, 22105), 'numpy.array', 'np.array', (['[[6.40649e-05, 0.0, 0.0], [0.0, 5.72802e-05, 0.0], [0.0, 0.0, 0.0017088585]]'], {}), '([[6.40649e-05, 0.0, 0.0], [0.0, 5.72802e-05, 0.0], [0.0, 0.0, \n 0.0017088585]])\n', (22022, 22105), True, 'import numpy as np\n'), ((22208, 22358), 'numpy.array', 'np.array', (['[[-0.1119908792, 0.8441841604, 0.0238725303], [0.2629347724, 0.4996718412, \n -0.0173023305], [-3.2192369082, 4.1536028244, 0.4514379215]]'], {}), '([[-0.1119908792, 0.8441841604, 0.0238725303], [0.2629347724, \n 0.4996718412, -0.0173023305], [-3.2192369082, 4.1536028244, 0.4514379215]])\n', (22216, 22358), True, 'import numpy as np\n'), ((22424, 22433), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (22430, 22433), True, 'import numpy as np\n'), ((22461, 22598), 'numpy.array', 'np.array', (['[[6.40649e-05, 3.88496e-05, 0.0002148769], [3.88496e-05, 5.72802e-05, \n 1.555e-06], [0.0002148769, 1.555e-06, 0.0017088585]]'], {}), '([[6.40649e-05, 3.88496e-05, 0.0002148769], [3.88496e-05, \n 5.72802e-05, 1.555e-06], [0.0002148769, 1.555e-06, 0.0017088585]])\n', (22469, 22598), True, 'import numpy as np\n'), ((23281, 23378), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_state.T', "self.true[['state1', 'state2', 'state3']]"], {'atol': '(1e-07)'}), "(sim.simulated_state.T, self.true[['state1', 'state2',\n 'state3']], atol=1e-07)\n", (23296, 23378), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((23430, 23539), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_measurement_disturbance', "self.true[['eps1', 'eps2', 'eps3']].T"], {'atol': '(1e-07)'}), "(sim.simulated_measurement_disturbance, self.true[['eps1',\n 'eps2', 'eps3']].T, atol=1e-07)\n", (23445, 23539), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((23591, 23694), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_state_disturbance', "self.true[['eta1', 'eta2', 'eta3']].T"], {'atol': '(1e-07)'}), "(sim.simulated_state_disturbance, self.true[['eta1', 'eta2',\n 'eta3']].T, atol=1e-07)\n", (23606, 23694), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((23756, 23786), 'numpy.zeros', 'np.zeros', (['(3, self.model.nobs)'], {}), '((3, self.model.nobs))\n', (23764, 23786), True, 'import numpy as np\n'), ((23901, 23989), 'numpy.testing.assert_allclose', 'assert_allclose', (['signals', "self.true[['signal1', 'signal2', 'signal3']].T"], {'atol': '(1e-07)'}), "(signals, self.true[['signal1', 'signal2', 'signal3']].T,\n atol=1e-07)\n", (23916, 23989), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((24191, 24278), 'os.path.join', 'os.path.join', (['current_path', '"""results"""', '"""results_simulation_smoothing3_variates.csv"""'], {}), "(current_path, 'results',\n 'results_simulation_smoothing3_variates.csv')\n", (24203, 24278), False, 'import os\n'), ((24376, 24450), 'os.path.join', 'os.path.join', (['current_path', '"""results"""', '"""results_simulation_smoothing3.csv"""'], {}), "(current_path, 'results', 'results_simulation_smoothing3.csv')\n", (24388, 24450), False, 'import os\n'), ((24498, 24515), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (24509, 24515), True, 'import pandas as pd\n'), ((24627, 24659), 'statsmodels.datasets.macrodata.load_pandas', 'datasets.macrodata.load_pandas', ([], {}), '()\n', (24657, 24659), False, 'from statsmodels import datasets\n'), ((26313, 26326), 'numpy.ones', 'np.ones', (['nobs'], {}), '(nobs)\n', (26320, 26326), True, 'import numpy as np\n'), ((26740, 26753), 'numpy.ones', 'np.ones', (['nobs'], {}), '(nobs)\n', (26747, 26753), True, 'import numpy as np\n'), ((27262, 27275), 'numpy.ones', 'np.ones', (['nobs'], {}), '(nobs)\n', (27269, 27275), True, 'import numpy as np\n'), ((914, 946), 'statsmodels.datasets.macrodata.load_pandas', 'datasets.macrodata.load_pandas', ([], {}), '()\n', (944, 946), False, 'from statsmodels import datasets\n'), ((2687, 2715), 'numpy.sum', 'np.sum', (['self.results.llf_obs'], {}), '(self.results.llf_obs)\n', (2693, 2715), True, 'import numpy as np\n'), ((3211, 3244), 'numpy.zeros', 'np.zeros', (['(n, self.model.k_endog)'], {}), '((n, self.model.k_endog))\n', (3219, 3244), True, 'import numpy as np\n'), ((3278, 3312), 'numpy.zeros', 'np.zeros', (['(n, self.model.k_states)'], {}), '((n, self.model.k_states))\n', (3286, 3312), True, 'import numpy as np\n'), ((4015, 4049), 'numpy.zeros', 'np.zeros', (['(n, self.model.k_states)'], {}), '((n, self.model.k_states))\n', (4023, 4049), True, 'import numpy as np\n'), ((4377, 4405), 'numpy.ones', 'np.ones', (['self.model.k_states'], {}), '(self.model.k_states)\n', (4384, 4405), True, 'import numpy as np\n'), ((4939, 4966), 'numpy.dot', 'np.dot', (['Z', 'desired_state[i]'], {}), '(Z, desired_state[i])\n', (4945, 4966), True, 'import numpy as np\n'), ((7283, 7389), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_measurement_disturbance', 'self.results.smoothed_measurement_disturbance'], {}), '(sim.simulated_measurement_disturbance, self.results.\n smoothed_measurement_disturbance)\n', (7298, 7389), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7635, 7709), 'os.path.join', 'os.path.join', (['current_path', '"""results"""', '"""results_simulation_smoothing0.csv"""'], {}), "(current_path, 'results', 'results_simulation_smoothing0.csv')\n", (7647, 7709), False, 'import os\n'), ((7761, 7778), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (7772, 7778), True, 'import pandas as pd\n'), ((7792, 7884), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_state', "true[['state1', 'state2', 'state3']].T"], {'atol': '(1e-07)'}), "(sim.simulated_state, true[['state1', 'state2', 'state3']].T,\n atol=1e-07)\n", (7807, 7884), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7948, 8052), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_measurement_disturbance', "true[['eps1', 'eps2', 'eps3']].T"], {'atol': '(1e-07)'}), "(sim.simulated_measurement_disturbance, true[['eps1', 'eps2',\n 'eps3']].T, atol=1e-07)\n", (7963, 8052), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((8116, 8214), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_state_disturbance', "true[['eta1', 'eta2', 'eta3']].T"], {'atol': '(1e-07)'}), "(sim.simulated_state_disturbance, true[['eta1', 'eta2',\n 'eta3']].T, atol=1e-07)\n", (8131, 8214), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((8288, 8318), 'numpy.zeros', 'np.zeros', (['(3, self.model.nobs)'], {}), '((3, self.model.nobs))\n', (8296, 8318), True, 'import numpy as np\n'), ((8445, 8524), 'numpy.testing.assert_allclose', 'assert_allclose', (['signals', "true[['signal1', 'signal2', 'signal3']].T"], {'atol': '(1e-07)'}), "(signals, true[['signal1', 'signal2', 'signal3']].T, atol=1e-07)\n", (8460, 8524), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9431, 9480), 'numpy.dot', 'np.dot', (['chol', 'measurement_disturbance_variates[t]'], {}), '(chol, measurement_disturbance_variates[t])\n', (9437, 9480), True, 'import numpy as np\n'), ((11159, 11252), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_measurement_disturbance', 'simulated_measurement_disturbance'], {}), '(sim.simulated_measurement_disturbance,\n simulated_measurement_disturbance)\n', (11174, 11252), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11487, 11561), 'os.path.join', 'os.path.join', (['current_path', '"""results"""', '"""results_simulation_smoothing1.csv"""'], {}), "(current_path, 'results', 'results_simulation_smoothing1.csv')\n", (11499, 11561), False, 'import os\n'), ((11613, 11630), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (11624, 11630), True, 'import pandas as pd\n'), ((11643, 11735), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_state', "true[['state1', 'state2', 'state3']].T"], {'atol': '(1e-07)'}), "(sim.simulated_state, true[['state1', 'state2', 'state3']].T,\n atol=1e-07)\n", (11658, 11735), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11799, 11903), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_measurement_disturbance', "true[['eps1', 'eps2', 'eps3']].T"], {'atol': '(1e-07)'}), "(sim.simulated_measurement_disturbance, true[['eps1', 'eps2',\n 'eps3']].T, atol=1e-07)\n", (11814, 11903), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11967, 12065), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_state_disturbance', "true[['eta1', 'eta2', 'eta3']].T"], {'atol': '(1e-07)'}), "(sim.simulated_state_disturbance, true[['eta1', 'eta2',\n 'eta3']].T, atol=1e-07)\n", (11982, 12065), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((12139, 12169), 'numpy.zeros', 'np.zeros', (['(3, self.model.nobs)'], {}), '((3, self.model.nobs))\n', (12147, 12169), True, 'import numpy as np\n'), ((12296, 12375), 'numpy.testing.assert_allclose', 'assert_allclose', (['signals', "true[['signal1', 'signal2', 'signal3']].T"], {'atol': '(1e-07)'}), "(signals, true[['signal1', 'signal2', 'signal3']].T, atol=1e-07)\n", (12311, 12375), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((13554, 13603), 'numpy.dot', 'np.dot', (['chol', 'measurement_disturbance_variates[t]'], {}), '(chol, measurement_disturbance_variates[t])\n', (13560, 13603), True, 'import numpy as np\n'), ((13861, 13904), 'numpy.dot', 'np.dot', (['chol', 'state_disturbance_variates[t]'], {}), '(chol, state_disturbance_variates[t])\n', (13867, 13904), True, 'import numpy as np\n'), ((14213, 14249), 'numpy.dot', 'np.dot', (['chol', 'initial_state_variates'], {}), '(chol, initial_state_variates)\n', (14219, 14249), True, 'import numpy as np\n'), ((16326, 16423), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_measurement_disturbance.T', 'simulated_measurement_disturbance.T'], {}), '(sim.simulated_measurement_disturbance.T,\n simulated_measurement_disturbance.T)\n', (16341, 16423), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((16658, 16732), 'os.path.join', 'os.path.join', (['current_path', '"""results"""', '"""results_simulation_smoothing2.csv"""'], {}), "(current_path, 'results', 'results_simulation_smoothing2.csv')\n", (16670, 16732), False, 'import os\n'), ((16784, 16801), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (16795, 16801), True, 'import pandas as pd\n'), ((16814, 16906), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_state.T', "true[['state1', 'state2', 'state3']]"], {'atol': '(1e-07)'}), "(sim.simulated_state.T, true[['state1', 'state2', 'state3']],\n atol=1e-07)\n", (16829, 16906), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((16970, 17074), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_measurement_disturbance', "true[['eps1', 'eps2', 'eps3']].T"], {'atol': '(1e-07)'}), "(sim.simulated_measurement_disturbance, true[['eps1', 'eps2',\n 'eps3']].T, atol=1e-07)\n", (16985, 17074), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17138, 17236), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.simulated_state_disturbance', "true[['eta1', 'eta2', 'eta3']].T"], {'atol': '(1e-07)'}), "(sim.simulated_state_disturbance, true[['eta1', 'eta2',\n 'eta3']].T, atol=1e-07)\n", (17153, 17236), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17310, 17340), 'numpy.zeros', 'np.zeros', (['(3, self.model.nobs)'], {}), '((3, self.model.nobs))\n', (17318, 17340), True, 'import numpy as np\n'), ((17467, 17546), 'numpy.testing.assert_allclose', 'assert_allclose', (['signals', "true[['signal1', 'signal2', 'signal3']].T"], {'atol': '(1e-07)'}), "(signals, true[['signal1', 'signal2', 'signal3']].T, atol=1e-07)\n", (17482, 17546), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((19283, 19315), 'statsmodels.datasets.macrodata.load_pandas', 'datasets.macrodata.load_pandas', ([], {}), '()\n', (19313, 19315), False, 'from statsmodels import datasets\n'), ((20360, 20406), 'numpy.array', 'np.array', (['[0.0622668, 1.95666886, 58.37473642]'], {}), '([0.0622668, 1.95666886, 58.37473642])\n', (20368, 20406), True, 'import numpy as np\n'), ((21206, 21238), 'statsmodels.datasets.macrodata.load_pandas', 'datasets.macrodata.load_pandas', ([], {}), '()\n', (21236, 21238), False, 'from statsmodels import datasets\n'), ((22917, 22945), 'numpy.sum', 'np.sum', (['self.results.llf_obs'], {}), '(self.results.llf_obs)\n', (22923, 22945), True, 'import numpy as np\n'), ((23856, 23892), 'numpy.dot', 'np.dot', (['Z', 'sim.simulated_state[:, t]'], {}), '(Z, sim.simulated_state[:, t])\n', (23862, 23892), True, 'import numpy as np\n'), ((26402, 26415), 'numpy.ones', 'np.ones', (['nobs'], {}), '(nobs)\n', (26409, 26415), True, 'import numpy as np\n'), ((26522, 26544), 'numpy.zeros', 'np.zeros', (['(mod.nobs * 2)'], {}), '(mod.nobs * 2)\n', (26530, 26544), True, 'import numpy as np\n'), ((26586, 26597), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (26594, 26597), True, 'import numpy as np\n'), ((27028, 27050), 'numpy.zeros', 'np.zeros', (['(mod.nobs * 2)'], {}), '(mod.nobs * 2)\n', (27036, 27050), True, 'import numpy as np\n'), ((27092, 27103), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (27100, 27103), True, 'import numpy as np\n'), ((27597, 27619), 'numpy.zeros', 'np.zeros', (['(mod.nobs * 2)'], {}), '(mod.nobs * 2)\n', (27605, 27619), True, 'import numpy as np\n'), ((27661, 27672), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (27669, 27672), True, 'import numpy as np\n'), ((28054, 28076), 'numpy.zeros', 'np.zeros', (['(mod.nobs * 2)'], {}), '(mod.nobs * 2)\n', (28062, 28076), True, 'import numpy as np\n'), ((28118, 28129), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (28126, 28129), True, 'import numpy as np\n'), ((3474, 3507), 'numpy.arange', 'np.arange', (['(n * self.model.k_endog)'], {}), '(n * self.model.k_endog)\n', (3483, 3507), True, 'import numpy as np\n'), ((4862, 4893), 'numpy.dot', 'np.dot', (['T', 'desired_state[i - 1]'], {}), '(T, desired_state[i - 1])\n', (4868, 4893), True, 'import numpy as np\n'), ((6820, 6852), 'numpy.zeros', 'np.zeros', (['n_disturbance_variates'], {}), '(n_disturbance_variates)\n', (6828, 6852), True, 'import numpy as np\n'), ((6898, 6927), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (6906, 6927), True, 'import numpy as np\n'), ((8396, 8432), 'numpy.dot', 'np.dot', (['Z', 'sim.simulated_state[:, t]'], {}), '(Z, sim.simulated_state[:, t])\n', (8402, 8432), True, 'import numpy as np\n'), ((8861, 8908), 'numpy.arange', 'np.arange', (['(self.model.nobs * self.model.k_endog)'], {}), '(self.model.nobs * self.model.k_endog)\n', (8870, 8908), True, 'import numpy as np\n'), ((9071, 9122), 'numpy.zeros', 'np.zeros', (['(self.model.nobs * self.model.ssm.k_posdef)'], {}), '(self.model.nobs * self.model.ssm.k_posdef)\n', (9079, 9122), True, 'import numpy as np\n'), ((10672, 10701), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (10680, 10701), True, 'import numpy as np\n'), ((12247, 12283), 'numpy.dot', 'np.dot', (['Z', 'sim.simulated_state[:, t]'], {}), '(Z, sim.simulated_state[:, t])\n', (12253, 12283), True, 'import numpy as np\n'), ((12761, 12808), 'numpy.arange', 'np.arange', (['(self.model.nobs * self.model.k_endog)'], {}), '(self.model.nobs * self.model.k_endog)\n', (12770, 12808), True, 'import numpy as np\n'), ((12928, 12980), 'numpy.arange', 'np.arange', (['(self.model.nobs * self.model.ssm.k_posdef)'], {}), '(self.model.nobs * self.model.ssm.k_posdef)\n', (12937, 12980), True, 'import numpy as np\n'), ((14331, 14363), 'numpy.dot', 'np.dot', (['T', 'generated_state[:, t]'], {}), '(T, generated_state[:, t])\n', (14337, 14363), True, 'import numpy as np\n'), ((14477, 14509), 'numpy.dot', 'np.dot', (['Z', 'generated_state[:, t]'], {}), '(Z, generated_state[:, t])\n', (14483, 14509), True, 'import numpy as np\n'), ((15820, 15849), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (15828, 15849), True, 'import numpy as np\n'), ((17418, 17454), 'numpy.dot', 'np.dot', (['Z', 'sim.simulated_state[:, t]'], {}), '(Z, sim.simulated_state[:, t])\n', (17424, 17454), True, 'import numpy as np\n'), ((3904, 3937), 'numpy.arange', 'np.arange', (['(n * self.model.k_endog)'], {}), '(n * self.model.k_endog)\n', (3913, 3937), True, 'import numpy as np\n'), ((24326, 24343), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (24337, 24343), True, 'import pandas as pd\n'), ((24783, 24830), 'numpy.log', 'np.log', (["dta[['realgdp', 'realcons', 'realinv']]"], {}), "(dta[['realgdp', 'realcons', 'realinv']])\n", (24789, 24830), True, 'import numpy as np\n'), ((1082, 1129), 'numpy.log', 'np.log', (["dta[['realgdp', 'realcons', 'realinv']]"], {}), "(dta[['realgdp', 'realcons', 'realinv']])\n", (1088, 1129), True, 'import numpy as np\n'), ((21374, 21421), 'numpy.log', 'np.log', (["dta[['realgdp', 'realcons', 'realinv']]"], {}), "(dta[['realgdp', 'realcons', 'realinv']])\n", (21380, 21421), True, 'import numpy as np\n'), ((19508, 19522), 'numpy.log', 'np.log', (['levels'], {}), '(levels)\n', (19514, 19522), True, 'import numpy as np\n')] |
import os
import numpy as np
import pytest
from napari import Viewer, layers
from napari._tests.utils import (
add_layer_by_type,
check_view_transform_consistency,
check_viewer_functioning,
layer_test_data,
)
from napari.utils._tests.test_naming import eval_with_filename
def test_viewer(make_test_viewer):
"""Test instantiating viewer."""
viewer = make_test_viewer()
view = viewer.window.qt_viewer
assert viewer.title == 'napari'
assert view.viewer == viewer
assert len(viewer.layers) == 0
assert view.layers.vbox_layout.count() == 2
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
# Switch to 3D rendering mode and back to 2D rendering mode
viewer.dims.ndisplay = 3
assert viewer.dims.ndisplay == 3
viewer.dims.ndisplay = 2
assert viewer.dims.ndisplay == 2
# Run all class key bindings
for func in viewer.class_keymap.values():
# skip fullscreen test locally
if func.__name__ == 'toggle_fullscreen' and not os.getenv("CI"):
continue
if func.__name__ == 'play':
continue
func(viewer)
@pytest.mark.run(order=1) # provided by pytest-ordering
def test_no_qt_loop():
"""Test informative error raised when no Qt event loop exists.
Logically, this test should go at the top of the file. Howveer, that
resulted in tests passing when only this file was run, but failing when
other tests involving Qt-bot were run before this file. Putting this test
second provides a sanity check that pytest-ordering is correctly doing its
magic.
"""
with pytest.raises(RuntimeError):
_ = Viewer()
@pytest.mark.parametrize('layer_class, data, ndim', layer_test_data)
@pytest.mark.parametrize('visible', [True, False])
def test_add_layer(make_test_viewer, layer_class, data, ndim, visible):
viewer = make_test_viewer()
layer = add_layer_by_type(viewer, layer_class, data, visible=visible)
check_viewer_functioning(viewer, viewer.window.qt_viewer, data, ndim)
# Run all class key bindings
for func in layer.class_keymap.values():
func(layer)
@pytest.mark.parametrize('layer_class, a_unique_name, ndim', layer_test_data)
def test_add_layer_magic_name(
make_test_viewer, layer_class, a_unique_name, ndim
):
"""Test magic_name works when using add_* for layers"""
# Tests for issue #1709
viewer = make_test_viewer() # noqa: F841
layer = eval_with_filename(
"add_layer_by_type(viewer, layer_class, a_unique_name)", "somefile.py",
)
assert layer.name == "a_unique_name"
def test_screenshot(make_test_viewer):
"""Test taking a screenshot."""
viewer = make_test_viewer()
np.random.seed(0)
# Add image
data = np.random.random((10, 15))
viewer.add_image(data)
# Add labels
data = np.random.randint(20, size=(10, 15))
viewer.add_labels(data)
# Add points
data = 20 * np.random.random((10, 2))
viewer.add_points(data)
# Add vectors
data = 20 * np.random.random((10, 2, 2))
viewer.add_vectors(data)
# Add shapes
data = 20 * np.random.random((10, 4, 2))
viewer.add_shapes(data)
# Take screenshot of the image canvas only
screenshot = viewer.screenshot(canvas_only=True)
assert screenshot.ndim == 3
# Take screenshot with the viewer included
screenshot = viewer.screenshot(canvas_only=False)
assert screenshot.ndim == 3
def test_changing_theme(make_test_viewer):
"""Test changing the theme updates the full window."""
viewer = make_test_viewer()
viewer.add_points(data=None)
assert viewer.palette['folder'] == 'dark'
screenshot_dark = viewer.screenshot(canvas_only=False)
viewer.theme = 'light'
assert viewer.palette['folder'] == 'light'
screenshot_light = viewer.screenshot(canvas_only=False)
equal = (screenshot_dark == screenshot_light).min(-1)
# more than 99.5% of the pixels have changed
assert (np.count_nonzero(equal) / equal.size) < 0.05, "Themes too similar"
with pytest.raises(ValueError):
viewer.theme = 'nonexistent_theme'
@pytest.mark.parametrize('layer_class, data, ndim', layer_test_data)
def test_roll_traspose_update(make_test_viewer, layer_class, data, ndim):
"""Check that transpose and roll preserve correct transform sequence."""
viewer = make_test_viewer()
np.random.seed(0)
layer = add_layer_by_type(viewer, layer_class, data)
# Set translations and scalings (match type of visual layer storing):
transf_dict = {
'translate': np.random.randint(0, 10, ndim).astype(np.float32),
'scale': np.random.rand(ndim).astype(np.float32),
}
for k, val in transf_dict.items():
setattr(layer, k, val)
if layer_class in [layers.Image, layers.Labels]:
transf_dict['translate'] -= transf_dict['scale'] / 2
# Check consistency:
check_view_transform_consistency(layer, viewer, transf_dict)
# Roll dims and check again:
viewer.dims._roll()
check_view_transform_consistency(layer, viewer, transf_dict)
# Transpose and check again:
viewer.dims._transpose()
check_view_transform_consistency(layer, viewer, transf_dict)
def test_toggling_axes(make_test_viewer):
"""Test toggling axes."""
viewer = make_test_viewer()
# Check axes are not visible
assert not viewer.axes.visible
# Make axes visible
viewer.axes.visible = True
assert viewer.axes.visible
# Enter 3D rendering and check axes still visible
viewer.dims.ndisplay = 3
assert viewer.axes.visible
# Make axes not visible
viewer.axes.visible = False
assert not viewer.axes.visible
def test_toggling_scale_bar(make_test_viewer):
"""Test toggling scale bar."""
viewer = make_test_viewer()
# Check scale bar is not visible
assert not viewer.scale_bar.visible
# Make scale bar visible
viewer.scale_bar.visible = True
assert viewer.scale_bar.visible
# Enter 3D rendering and check scale bar is still visible
viewer.dims.ndisplay = 3
assert viewer.scale_bar.visible
# Make scale bar not visible
viewer.scale_bar.visible = False
assert not viewer.scale_bar.visible
| [
"napari.utils._tests.test_naming.eval_with_filename",
"napari.Viewer",
"numpy.random.rand",
"os.getenv",
"pytest.mark.run",
"numpy.random.random",
"napari._tests.utils.check_view_transform_consistency",
"napari._tests.utils.check_viewer_functioning",
"numpy.count_nonzero",
"pytest.mark.parametrize... | [((1213, 1237), 'pytest.mark.run', 'pytest.mark.run', ([], {'order': '(1)'}), '(order=1)\n', (1228, 1237), False, 'import pytest\n'), ((1747, 1814), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""layer_class, data, ndim"""', 'layer_test_data'], {}), "('layer_class, data, ndim', layer_test_data)\n", (1770, 1814), False, 'import pytest\n'), ((1816, 1865), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""visible"""', '[True, False]'], {}), "('visible', [True, False])\n", (1839, 1865), False, 'import pytest\n'), ((2220, 2296), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""layer_class, a_unique_name, ndim"""', 'layer_test_data'], {}), "('layer_class, a_unique_name, ndim', layer_test_data)\n", (2243, 2296), False, 'import pytest\n'), ((4206, 4273), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""layer_class, data, ndim"""', 'layer_test_data'], {}), "('layer_class, data, ndim', layer_test_data)\n", (4229, 4273), False, 'import pytest\n'), ((1982, 2043), 'napari._tests.utils.add_layer_by_type', 'add_layer_by_type', (['viewer', 'layer_class', 'data'], {'visible': 'visible'}), '(viewer, layer_class, data, visible=visible)\n', (1999, 2043), False, 'from napari._tests.utils import add_layer_by_type, check_view_transform_consistency, check_viewer_functioning, layer_test_data\n'), ((2048, 2117), 'napari._tests.utils.check_viewer_functioning', 'check_viewer_functioning', (['viewer', 'viewer.window.qt_viewer', 'data', 'ndim'], {}), '(viewer, viewer.window.qt_viewer, data, ndim)\n', (2072, 2117), False, 'from napari._tests.utils import add_layer_by_type, check_view_transform_consistency, check_viewer_functioning, layer_test_data\n'), ((2532, 2626), 'napari.utils._tests.test_naming.eval_with_filename', 'eval_with_filename', (['"""add_layer_by_type(viewer, layer_class, a_unique_name)"""', '"""somefile.py"""'], {}), "('add_layer_by_type(viewer, layer_class, a_unique_name)',\n 'somefile.py')\n", (2550, 2626), False, 'from napari.utils._tests.test_naming import eval_with_filename\n'), ((2793, 2810), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2807, 2810), True, 'import numpy as np\n'), ((2838, 2864), 'numpy.random.random', 'np.random.random', (['(10, 15)'], {}), '((10, 15))\n', (2854, 2864), True, 'import numpy as np\n'), ((2921, 2957), 'numpy.random.randint', 'np.random.randint', (['(20)'], {'size': '(10, 15)'}), '(20, size=(10, 15))\n', (2938, 2957), True, 'import numpy as np\n'), ((4463, 4480), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4477, 4480), True, 'import numpy as np\n'), ((4494, 4538), 'napari._tests.utils.add_layer_by_type', 'add_layer_by_type', (['viewer', 'layer_class', 'data'], {}), '(viewer, layer_class, data)\n', (4511, 4538), False, 'from napari._tests.utils import add_layer_by_type, check_view_transform_consistency, check_viewer_functioning, layer_test_data\n'), ((4985, 5045), 'napari._tests.utils.check_view_transform_consistency', 'check_view_transform_consistency', (['layer', 'viewer', 'transf_dict'], {}), '(layer, viewer, transf_dict)\n', (5017, 5045), False, 'from napari._tests.utils import add_layer_by_type, check_view_transform_consistency, check_viewer_functioning, layer_test_data\n'), ((5108, 5168), 'napari._tests.utils.check_view_transform_consistency', 'check_view_transform_consistency', (['layer', 'viewer', 'transf_dict'], {}), '(layer, viewer, transf_dict)\n', (5140, 5168), False, 'from napari._tests.utils import add_layer_by_type, check_view_transform_consistency, check_viewer_functioning, layer_test_data\n'), ((5236, 5296), 'napari._tests.utils.check_view_transform_consistency', 'check_view_transform_consistency', (['layer', 'viewer', 'transf_dict'], {}), '(layer, viewer, transf_dict)\n', (5268, 5296), False, 'from napari._tests.utils import add_layer_by_type, check_view_transform_consistency, check_viewer_functioning, layer_test_data\n'), ((680, 716), 'numpy.sum', 'np.sum', (['view.dims._displayed_sliders'], {}), '(view.dims._displayed_sliders)\n', (686, 716), True, 'import numpy as np\n'), ((1694, 1721), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (1707, 1721), False, 'import pytest\n'), ((1735, 1743), 'napari.Viewer', 'Viewer', ([], {}), '()\n', (1741, 1743), False, 'from napari import Viewer, layers\n'), ((3020, 3045), 'numpy.random.random', 'np.random.random', (['(10, 2)'], {}), '((10, 2))\n', (3036, 3045), True, 'import numpy as np\n'), ((3109, 3137), 'numpy.random.random', 'np.random.random', (['(10, 2, 2)'], {}), '((10, 2, 2))\n', (3125, 3137), True, 'import numpy as np\n'), ((3201, 3229), 'numpy.random.random', 'np.random.random', (['(10, 4, 2)'], {}), '((10, 4, 2))\n', (3217, 3229), True, 'import numpy as np\n'), ((4133, 4158), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4146, 4158), False, 'import pytest\n'), ((4056, 4079), 'numpy.count_nonzero', 'np.count_nonzero', (['equal'], {}), '(equal)\n', (4072, 4079), True, 'import numpy as np\n'), ((1094, 1109), 'os.getenv', 'os.getenv', (['"""CI"""'], {}), "('CI')\n", (1103, 1109), False, 'import os\n'), ((4655, 4685), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'ndim'], {}), '(0, 10, ndim)\n', (4672, 4685), True, 'import numpy as np\n'), ((4723, 4743), 'numpy.random.rand', 'np.random.rand', (['ndim'], {}), '(ndim)\n', (4737, 4743), True, 'import numpy as np\n')] |
'''Module to manage and advanced game state'''
from collections import defaultdict
import numpy as np
from . import constants
from . import characters
from . import utility
class ForwardModel(object):
"""Class for helping with the [forward] modeling of the game state."""
def run(self,
num_times,
board,
agents,
bombs,
items,
flames,
is_partially_observable,
agent_view_size,
action_space,
training_agent=None,
is_communicative=False):
"""Run the forward model.
Args:
num_times: The number of times to run it for. This is a maximum and
it will stop early if we reach a done.
board: The board state to run it from.
agents: The agents to use to run it.
bombs: The starting bombs.
items: The starting items.
flames: The starting flames.
is_partially_observable: Whether the board is partially observable or
not. Only applies to TeamRadio.
agent_view_size: If it's partially observable, then the size of the
square that the agent can view.
action_space: The actions that each agent can take.
training_agent: The training agent to pass to done.
is_communicative: Whether the action depends on communication
observations as well.
Returns:
steps: The list of step results, which are each a dict of "obs",
"next_obs", "reward", "action".
board: Updated board.
agents: Updated agents, same models though.
bombs: Updated bombs.
items: Updated items.
flames: Updated flames.
done: Whether we completed the game in these steps.
info: The result of the game if it's completed.
"""
steps = []
for _ in num_times:
obs = self.get_observations(
board, agents, bombs, is_partially_observable, agent_view_size)
actions = self.act(
agents, obs, action_space, is_communicative=is_communicative)
board, agents, bombs, items, flames = self.step(
actions, board, agents, bombs, items, flames)
next_obs = self.get_observations(
board, agents, bombs, is_partially_observable, agent_view_size)
reward = self.get_rewards(agents, game_type, step_count, max_steps)
done = self.get_done(agents, game_type, step_count, max_steps,
training_agent)
info = self.get_info(done, rewards, game_type, agents)
steps.append({
"obs": obs,
"next_obs": next_obs,
"reward": reward,
"actions": actions,
})
if done:
# Callback to let the agents know that the game has ended.
for agent in agents:
agent.episode_end(reward[agent.agent_id])
break
return steps, board, agents, bombs, items, flames, done, info
@staticmethod
def act(agents, obs, action_space, is_communicative=False):
"""Returns actions for each agent in this list.
Args:
agents: A list of agent objects.
obs: A list of matching observations per agent.
action_space: The action space for the environment using this model.
is_communicative: Whether the action depends on communication
observations as well.
Returns a list of actions.
"""
def act_ex_communication(agent):
'''Handles agent's move without communication'''
if agent.is_alive:
return agent.act(obs[agent.agent_id], action_space=action_space)
else:
return constants.Action.Stop.value
def act_with_communication(agent):
'''Handles agent's move with communication'''
if agent.is_alive:
action = agent.act(
obs[agent.agent_id], action_space=action_space)
if type(action) == int:
action = [action] + [0, 0]
assert (type(action) == list)
return action
else:
return [constants.Action.Stop.value, 0, 0]
ret = []
for agent in agents:
if is_communicative:
ret.append(act_with_communication(agent))
else:
ret.append(act_ex_communication(agent))
return ret
@staticmethod
def step(actions,
curr_board,
curr_agents,
curr_bombs,
curr_items,
curr_flames,
max_blast_strength=10):
board_size = len(curr_board)
# Tick the flames. Replace any dead ones with passages. If there is an
# item there, then reveal that item.
flames = []
for flame in curr_flames:
position = flame.position
if flame.is_dead():
item_value = curr_items.get(position)
if item_value:
del curr_items[position]
else:
item_value = constants.Item.Passage.value
curr_board[position] = item_value
else:
flame.tick()
flames.append(flame)
curr_flames = flames
# Redraw all current flames
# Multiple flames may share a position and the map should contain
# a flame until all flames are dead to avoid issues with bomb
# movements and explosions.
for flame in curr_flames:
curr_board[flame.position] = constants.Item.Flames.value
# Step the living agents and moving bombs.
# If two agents try to go to the same spot, they should bounce back to
# their previous spots. This is complicated with one example being when
# there are three agents all in a row. If the one in the middle tries
# to go to the left and bounces with the one on the left, and then the
# one on the right tried to go to the middle one's position, she should
# also bounce. A way of doing this is to gather all the new positions
# before taking any actions. Then, if there are disputes, correct those
# disputes iteratively.
# Additionally, if two agents try to switch spots by moving into each
# Figure out desired next position for alive agents
alive_agents = [agent for agent in curr_agents if agent.is_alive]
desired_agent_positions = [agent.position for agent in alive_agents]
for num_agent, agent in enumerate(alive_agents):
position = agent.position
# We change the curr_board here as a safeguard. We will later
# update the agent's new position.
curr_board[position] = constants.Item.Passage.value
action = actions[agent.agent_id]
if action == constants.Action.Stop.value:
pass
elif action == constants.Action.Bomb.value:
position = agent.position
if not utility.position_is_bomb(curr_bombs, position):
bomb = agent.maybe_lay_bomb()
if bomb:
curr_bombs.append(bomb)
elif utility.is_valid_direction(curr_board, position, action):
desired_agent_positions[num_agent] = agent.get_next_position(
action)
# Gather desired next positions for moving bombs. Handle kicks later.
desired_bomb_positions = [bomb.position for bomb in curr_bombs]
for num_bomb, bomb in enumerate(curr_bombs):
curr_board[bomb.position] = constants.Item.Passage.value
if bomb.is_moving():
desired_position = utility.get_next_position(
bomb.position, bomb.moving_direction)
if utility.position_on_board(curr_board, desired_position) \
and not utility.position_is_powerup(curr_board, desired_position) \
and not utility.position_is_wall(curr_board, desired_position):
desired_bomb_positions[num_bomb] = desired_position
# Position switches:
# Agent <-> Agent => revert both to previous position.
# Bomb <-> Bomb => revert both to previous position.
# Agent <-> Bomb => revert Bomb to previous position.
crossings = {}
def crossing(current, desired):
'''Checks to see if an agent is crossing paths'''
current_x, current_y = current
desired_x, desired_y = desired
if current_x != desired_x:
assert current_y == desired_y
return ('X', min(current_x, desired_x), current_y)
assert current_x == desired_x
return ('Y', current_x, min(current_y, desired_y))
for num_agent, agent in enumerate(alive_agents):
if desired_agent_positions[num_agent] != agent.position:
desired_position = desired_agent_positions[num_agent]
border = crossing(agent.position, desired_position)
if border in crossings:
# Crossed another agent - revert both to prior positions.
desired_agent_positions[num_agent] = agent.position
num_agent2, _ = crossings[border]
desired_agent_positions[num_agent2] = alive_agents[
num_agent2].position
else:
crossings[border] = (num_agent, True)
for num_bomb, bomb in enumerate(curr_bombs):
if desired_bomb_positions[num_bomb] != bomb.position:
desired_position = desired_bomb_positions[num_bomb]
border = crossing(bomb.position, desired_position)
if border in crossings:
# Crossed - revert to prior position.
desired_bomb_positions[num_bomb] = bomb.position
num, is_agent = crossings[border]
if not is_agent:
# Crossed bomb - revert that to prior position as well.
desired_bomb_positions[num] = curr_bombs[num].position
else:
crossings[border] = (num_bomb, False)
# Deal with multiple agents or multiple bomb collisions on desired next
# position by resetting desired position to current position for
# everyone involved in the collision.
agent_occupancy = defaultdict(int)
bomb_occupancy = defaultdict(int)
for desired_position in desired_agent_positions:
agent_occupancy[desired_position] += 1
for desired_position in desired_bomb_positions:
bomb_occupancy[desired_position] += 1
# Resolve >=2 agents or >=2 bombs trying to occupy the same space.
change = True
while change:
change = False
for num_agent, agent in enumerate(alive_agents):
desired_position = desired_agent_positions[num_agent]
curr_position = agent.position
# Either another agent is going to this position or more than
# one bomb is going to this position. In both scenarios, revert
# to the original position.
if desired_position != curr_position and \
(agent_occupancy[desired_position] > 1 or bomb_occupancy[desired_position] > 1):
desired_agent_positions[num_agent] = curr_position
agent_occupancy[curr_position] += 1
change = True
for num_bomb, bomb in enumerate(curr_bombs):
desired_position = desired_bomb_positions[num_bomb]
curr_position = bomb.position
if desired_position != curr_position and \
(bomb_occupancy[desired_position] > 1 or agent_occupancy[desired_position] > 1):
desired_bomb_positions[num_bomb] = curr_position
bomb_occupancy[curr_position] += 1
change = True
# Handle kicks.
agent_indexed_by_kicked_bomb = {}
kicked_bomb_indexed_by_agent = {}
delayed_bomb_updates = []
delayed_agent_updates = []
# Loop through all bombs to see if they need a good kicking or cause
# collisions with an agent.
for num_bomb, bomb in enumerate(curr_bombs):
desired_position = desired_bomb_positions[num_bomb]
if agent_occupancy[desired_position] == 0:
# There was never an agent around to kick or collide.
continue
agent_list = [
(num_agent, agent) for (num_agent, agent) in enumerate(alive_agents) \
if desired_position == desired_agent_positions[num_agent]]
if not agent_list:
# Agents moved from collision.
continue
# The agent_list should contain a single element at this point.
assert (len(agent_list) == 1)
num_agent, agent = agent_list[0]
if desired_position == agent.position:
# Agent did not move
if desired_position != bomb.position:
# Bomb moved, but agent did not. The bomb should revert
# and stop.
delayed_bomb_updates.append((num_bomb, bomb.position))
continue
# NOTE: At this point, we have that the agent in question tried to
# move into this position.
if not agent.can_kick:
# If we move the agent at this point, then we risk having two
# agents on a square in future iterations of the loop. So we
# push this change to the next stage instead.
delayed_bomb_updates.append((num_bomb, bomb.position))
delayed_agent_updates.append((num_agent, agent.position))
continue
# Agent moved and can kick - see if the target for the kick never had anyhing on it
direction = constants.Action(actions[agent.agent_id])
target_position = utility.get_next_position(desired_position,
direction)
if utility.position_on_board(curr_board, target_position) and \
agent_occupancy[target_position] == 0 and \
bomb_occupancy[target_position] == 0 and \
not utility.position_is_powerup(curr_board, target_position) and \
not utility.position_is_wall(curr_board, target_position):
# Ok to update bomb desired location as we won't iterate over it again here
# but we can not update bomb_occupancy on target position and need to check it again
# However we need to set the bomb count on the current position to zero so
# that the agent can stay on this position.
bomb_occupancy[desired_position] = 0
delayed_bomb_updates.append((num_bomb, target_position))
agent_indexed_by_kicked_bomb[num_bomb] = num_agent
kicked_bomb_indexed_by_agent[num_agent] = num_bomb
bomb.moving_direction = direction
# Bombs may still collide and we then need to reverse bomb and agent ..
else:
delayed_bomb_updates.append((num_bomb, bomb.position))
delayed_agent_updates.append((num_agent, agent.position))
for (num_bomb, bomb_position) in delayed_bomb_updates:
desired_bomb_positions[num_bomb] = bomb_position
bomb_occupancy[bomb_position] += 1
change = True
for (num_agent, agent_position) in delayed_agent_updates:
desired_agent_positions[num_agent] = agent_position
agent_occupancy[agent_position] += 1
change = True
while change:
change = False
for num_agent, agent in enumerate(alive_agents):
desired_position = desired_agent_positions[num_agent]
curr_position = agent.position
# Agents and bombs can only share a square if they are both in their
# original position (Agent dropped bomb and has not moved)
if desired_position != curr_position and \
(agent_occupancy[desired_position] > 1 or bomb_occupancy[desired_position] != 0):
# Late collisions resulting from failed kicks force this agent to stay at the
# original position. Check if this agent successfully kicked a bomb above and undo
# the kick.
if num_agent in kicked_bomb_indexed_by_agent:
num_bomb = kicked_bomb_indexed_by_agent[num_agent]
bomb = curr_bombs[num_bomb]
desired_bomb_positions[num_bomb] = bomb.position
bomb_occupancy[bomb.position] += 1
del agent_indexed_by_kicked_bomb[num_bomb]
del kicked_bomb_indexed_by_agent[num_agent]
desired_agent_positions[num_agent] = curr_position
agent_occupancy[curr_position] += 1
change = True
for num_bomb, bomb in enumerate(curr_bombs):
desired_position = desired_bomb_positions[num_bomb]
curr_position = bomb.position
# This bomb may be a boomerang, i.e. it was kicked back to the
# original location it moved from. If it is blocked now, it
# can't be kicked and the agent needs to move back to stay
# consistent with other movements.
if desired_position == curr_position and num_bomb not in agent_indexed_by_kicked_bomb:
continue
bomb_occupancy_ = bomb_occupancy[desired_position]
agent_occupancy_ = agent_occupancy[desired_position]
# Agents and bombs can only share a square if they are both in their
# original position (Agent dropped bomb and has not moved)
if bomb_occupancy_ > 1 or agent_occupancy_ != 0:
desired_bomb_positions[num_bomb] = curr_position
bomb_occupancy[curr_position] += 1
num_agent = agent_indexed_by_kicked_bomb.get(num_bomb)
if num_agent is not None:
agent = alive_agents[num_agent]
desired_agent_positions[num_agent] = agent.position
agent_occupancy[agent.position] += 1
del kicked_bomb_indexed_by_agent[num_agent]
del agent_indexed_by_kicked_bomb[num_bomb]
change = True
for num_bomb, bomb in enumerate(curr_bombs):
if desired_bomb_positions[num_bomb] == bomb.position and \
not num_bomb in agent_indexed_by_kicked_bomb:
# Bomb was not kicked this turn and its desired position is its
# current location. Stop it just in case it was moving before.
bomb.stop()
else:
# Move bomb to the new position.
# NOTE: We already set the moving direction up above.
bomb.position = desired_bomb_positions[num_bomb]
for num_agent, agent in enumerate(alive_agents):
if desired_agent_positions[num_agent] != agent.position:
agent.move(actions[agent.agent_id])
if utility.position_is_powerup(curr_board, agent.position):
agent.pick_up(
constants.Item(curr_board[agent.position]),
max_blast_strength=max_blast_strength)
# Explode bombs.
exploded_map = np.zeros_like(curr_board)
has_new_explosions = False
for bomb in curr_bombs:
bomb.tick()
if bomb.exploded():
has_new_explosions = True
elif curr_board[bomb.position] == constants.Item.Flames.value:
bomb.fire()
has_new_explosions = True
# Chain the explosions.
while has_new_explosions:
next_bombs = []
has_new_explosions = False
for bomb in curr_bombs:
if not bomb.exploded():
next_bombs.append(bomb)
continue
bomb.bomber.incr_ammo()
for _, indices in bomb.explode().items():
for r, c in indices:
if not all(
[r >= 0, c >= 0, r < board_size, c < board_size]):
break
if curr_board[r][c] == constants.Item.Rigid.value:
break
exploded_map[r][c] = 1
if curr_board[r][c] == constants.Item.Wood.value:
break
curr_bombs = next_bombs
for bomb in curr_bombs:
if bomb.in_range(exploded_map):
bomb.fire()
has_new_explosions = True
# Update the board's bombs.
for bomb in curr_bombs:
curr_board[bomb.position] = constants.Item.Bomb.value
# Update the board's flames.
flame_positions = np.where(exploded_map == 1)
for row, col in zip(flame_positions[0], flame_positions[1]):
curr_flames.append(characters.Flame((row, col)))
for flame in curr_flames:
curr_board[flame.position] = constants.Item.Flames.value
# Kill agents on flames. Otherwise, update position on curr_board.
for agent in alive_agents:
if curr_board[agent.position] == constants.Item.Flames.value:
agent.die()
else:
curr_board[agent.position] = utility.agent_value(agent.agent_id)
return curr_board, curr_agents, curr_bombs, curr_items, curr_flames
def get_observations(self, curr_board, agents, bombs, flames,
is_partially_observable, agent_view_size,
game_type, game_env):
"""Gets the observations as an np.array of the visible squares.
The agent gets to choose whether it wants to keep the fogged part in
memory.
"""
board_size = len(curr_board)
def make_bomb_maps(position):
''' Makes an array of an agents bombs and the bombs attributes '''
blast_strengths = np.zeros((board_size, board_size))
life = np.zeros((board_size, board_size))
moving_direction = np.zeros((board_size, board_size))
for bomb in bombs:
x, y = bomb.position
if not is_partially_observable \
or in_view_range(position, x, y):
blast_strengths[(x, y)] = bomb.blast_strength
life[(x, y)] = bomb.life
if bomb.moving_direction is not None:
moving_direction[(x, y)] = bomb.moving_direction.value
return blast_strengths, life, moving_direction
def make_flame_map(position):
''' Makes an array of an agents flame life'''
life = np.zeros((board_size, board_size))
for flame in flames:
x, y = flame.position
if not is_partially_observable \
or in_view_range(position, x, y):
# +1 needed because flame removal check is done
# before flame is ticked down, i.e. flame life
# in environment is 2 -> 1 -> 0 -> dead
life[(x, y)] = flame.life + 1
return life
def in_view_range(position, v_row, v_col):
'''Checks to see if a tile is in an agents viewing area'''
row, col = position
return all([
row >= v_row - agent_view_size, row <= v_row + agent_view_size,
col >= v_col - agent_view_size, col <= v_col + agent_view_size
])
attrs = [
'position', 'blast_strength', 'can_kick', 'teammate', 'ammo',
'enemies'
]
alive_agents = [
utility.agent_value(agent.agent_id)
for agent in agents
if agent.is_alive
]
observations = []
for agent in agents:
agent_obs = {'alive': alive_agents}
board = curr_board.copy()
if is_partially_observable:
for row in range(board_size):
for col in range(board_size):
if not in_view_range(agent.position, row, col):
board[row, col] = constants.Item.Fog.value
agent_obs['board'] = board
bomb_blast_strengths, bomb_life, bomb_moving_direction = make_bomb_maps(agent.position)
agent_obs['bomb_blast_strength'] = bomb_blast_strengths
agent_obs['bomb_life'] = bomb_life
agent_obs['bomb_moving_direction'] = bomb_moving_direction
flame_life = make_flame_map(agent.position)
agent_obs['flame_life'] = flame_life
agent_obs['game_type'] = game_type.value
agent_obs['game_env'] = game_env
for attr in attrs:
assert hasattr(agent, attr)
agent_obs[attr] = getattr(agent, attr)
observations.append(agent_obs)
return observations
@staticmethod
def get_done(agents, step_count, max_steps, game_type, training_agent):
alive = [agent for agent in agents if agent.is_alive]
alive_ids = sorted([agent.agent_id for agent in alive])
if step_count >= max_steps:
return True
elif game_type == constants.GameType.FFA or game_type == constants.GameType.OneVsOne:
if training_agent is not None and training_agent not in alive_ids:
return True
return len(alive) <= 1
elif any([
len(alive_ids) <= 1,
alive_ids == [0, 2],
alive_ids == [1, 3],
]):
return True
return False
@staticmethod
def get_info(done, rewards, game_type, agents):
if game_type == constants.GameType.FFA or game_type == constants.GameType.OneVsOne:
alive = [agent for agent in agents if agent.is_alive]
if done:
if len(alive) != 1:
# Either we have more than 1 alive (reached max steps) or
# we have 0 alive (last agents died at the same time).
return {
'result': constants.Result.Tie,
}
else:
return {
'result': constants.Result.Win,
'winners': [num for num, reward in enumerate(rewards) \
if reward == 1]
}
else:
return {
'result': constants.Result.Incomplete,
}
elif done:
# We are playing a team game.
if rewards == [-1] * 4:
return {
'result': constants.Result.Tie,
}
else:
return {
'result': constants.Result.Win,
'winners': [num for num, reward in enumerate(rewards) \
if reward == 1],
}
else:
return {
'result': constants.Result.Incomplete,
}
@staticmethod
def get_rewards(agents, game_type, step_count, max_steps):
def any_lst_equal(lst, values):
'''Checks if list are equal'''
return any([lst == v for v in values])
alive_agents = [num for num, agent in enumerate(agents) \
if agent.is_alive]
if game_type == constants.GameType.FFA:
if len(alive_agents) == 1:
# An agent won. Give them +1, others -1.
return [2 * int(agent.is_alive) - 1 for agent in agents]
elif step_count >= max_steps:
# Game is over from time. Everyone gets -1.
return [-1] * 4
else:
# Game running: 0 for alive, -1 for dead.
return [int(agent.is_alive) - 1 for agent in agents]
elif game_type == constants.GameType.OneVsOne:
if len(alive_agents) == 1:
# An agent won. Give them +1, the other -1.
return [2 * int(agent.is_alive) - 1 for agent in agents]
elif step_count >= max_steps:
# Game is over from time. Everyone gets -1.
return [-1] * 2
else:
# Game running
return [0, 0]
else:
# We are playing a team game.
if any_lst_equal(alive_agents, [[0, 2], [0], [2]]):
# Team [0, 2] wins.
return [1, -1, 1, -1]
elif any_lst_equal(alive_agents, [[1, 3], [1], [3]]):
# Team [1, 3] wins.
return [-1, 1, -1, 1]
elif step_count >= max_steps:
# Game is over by max_steps. All agents tie.
return [-1] * 4
elif len(alive_agents) == 0:
# Everyone's dead. All agents tie.
return [-1] * 4
else:
# No team has yet won or lost.
return [0] * 4
| [
"numpy.where",
"numpy.zeros_like",
"collections.defaultdict",
"numpy.zeros"
] | [((10750, 10766), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (10761, 10766), False, 'from collections import defaultdict\n'), ((10792, 10808), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (10803, 10808), False, 'from collections import defaultdict\n'), ((20273, 20298), 'numpy.zeros_like', 'np.zeros_like', (['curr_board'], {}), '(curr_board)\n', (20286, 20298), True, 'import numpy as np\n'), ((21844, 21871), 'numpy.where', 'np.where', (['(exploded_map == 1)'], {}), '(exploded_map == 1)\n', (21852, 21871), True, 'import numpy as np\n'), ((23038, 23072), 'numpy.zeros', 'np.zeros', (['(board_size, board_size)'], {}), '((board_size, board_size))\n', (23046, 23072), True, 'import numpy as np\n'), ((23092, 23126), 'numpy.zeros', 'np.zeros', (['(board_size, board_size)'], {}), '((board_size, board_size))\n', (23100, 23126), True, 'import numpy as np\n'), ((23158, 23192), 'numpy.zeros', 'np.zeros', (['(board_size, board_size)'], {}), '((board_size, board_size))\n', (23166, 23192), True, 'import numpy as np\n'), ((23787, 23821), 'numpy.zeros', 'np.zeros', (['(board_size, board_size)'], {}), '((board_size, board_size))\n', (23795, 23821), True, 'import numpy as np\n')] |
"""
==================================
Faster rendering by using blitting
==================================
*Blitting* is a `standard technique
<https://en.wikipedia.org/wiki/Bit_blit>`__ in raster graphics that,
in the context of Matplotlib, can be used to (drastically) improve
performance of interactive figures. For example, the
:mod:`~.animation` and :mod:`~.widgets` modules use blitting
internally. Here, we demonstrate how to implement your own blitting, outside
of these classes.
Blitting speeds up repetitive drawing by rendering all non-changing
graphic elements into a background image once. Then, for every draw, only the
changing elements need to be drawn onto this background. For example,
if the limits of an Axes have not changed, we can render the empty Axes
including all ticks and labels once, and only draw the changing data later.
The strategy is
- Prepare the constant background:
- Draw the figure, but exclude all artists that you want to animate by
marking them as *animated* (see `.Artist.set_animated`).
- Save a copy of the RBGA buffer.
- Render the individual images:
- Restore the copy of the RGBA buffer.
- Redraw the animated artists using `.Axes.draw_artist` /
`.Figure.draw_artist`.
- Show the resulting image on the screen.
One consequence of this procedure is that your animated artists are always
drawn on top of the static artists.
Not all backends support blitting. You can check if a given canvas does via
the `.FigureCanvasBase.supports_blit` property.
.. warning::
This code does not work with the OSX backend (but does work with other
GUI backends on mac).
Minimal example
---------------
We can use the `.FigureCanvasAgg` methods
`~.FigureCanvasAgg.copy_from_bbox` and
`~.FigureCanvasAgg.restore_region` in conjunction with setting
``animated=True`` on our artist to implement a minimal example that
uses blitting to accelerate rendering
"""
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 2 * np.pi, 100)
fig, ax = plt.subplots()
# animated=True tells matplotlib to only draw the artist when we
# explicitly request it
(ln,) = ax.plot(x, np.sin(x), animated=True)
# make sure the window is raised, but the script keeps going
plt.show(block=False)
# stop to admire our empty window axes and ensure it is rendered at
# least once.
#
# We need to fully draw the figure at its final size on the screen
# before we continue on so that :
# a) we have the correctly sized and drawn background to grab
# b) we have a cached renderer so that ``ax.draw_artist`` works
# so we spin the event loop to let the backend process any pending operations
plt.pause(0.1)
# get copy of entire figure (everything inside fig.bbox) sans animated artist
bg = fig.canvas.copy_from_bbox(fig.bbox)
# draw the animated artist, this uses a cached renderer
ax.draw_artist(ln)
# show the result to the screen, this pushes the updated RGBA buffer from the
# renderer to the GUI framework so you can see it
fig.canvas.blit(fig.bbox)
for j in range(100):
# reset the background back in the canvas state, screen unchanged
fig.canvas.restore_region(bg)
# update the artist, neither the canvas state nor the screen have changed
ln.set_ydata(np.sin(x + (j / 100) * np.pi))
# re-render the artist, updating the canvas state, but not the screen
ax.draw_artist(ln)
# copy the image to the GUI state, but screen might not be changed yet
fig.canvas.blit(fig.bbox)
# flush any pending GUI events, re-painting the screen if needed
fig.canvas.flush_events()
# you can put a pause in if you want to slow things down
# plt.pause(.1)
###############################################################################
# This example works and shows a simple animation, however because we
# are only grabbing the background once, if the size of the figure in
# pixels changes (due to either the size or dpi of the figure
# changing) , the background will be invalid and result in incorrect
# (but sometimes cool looking!) images. There is also a global
# variable and a fair amount of boiler plate which suggests we should
# wrap this in a class.
#
# Class-based example
# -------------------
#
# We can use a class to encapsulate the boilerplate logic and state of
# restoring the background, drawing the artists, and then blitting the
# result to the screen. Additionally, we can use the ``'draw_event'``
# callback to capture a new background whenever a full re-draw
# happens to handle resizes correctly.
class BlitManager:
def __init__(self, canvas, animated_artists=()):
"""
Parameters
----------
canvas : FigureCanvasAgg
The canvas to work with, this only works for sub-classes of the Agg
canvas which have the `~FigureCanvasAgg.copy_from_bbox` and
`~FigureCanvasAgg.restore_region` methods.
animated_artists : Iterable[Artist]
List of the artists to manage
"""
self.canvas = canvas
self._bg = None
self._artists = []
for a in animated_artists:
self.add_artist(a)
# grab the background on every draw
self.cid = canvas.mpl_connect("draw_event", self.on_draw)
def on_draw(self, event):
"""Callback to register with 'draw_event'."""
cv = self.canvas
if event is not None:
if event.canvas != cv:
raise RuntimeError
self._bg = cv.copy_from_bbox(cv.figure.bbox)
self._draw_animated()
def add_artist(self, art):
"""
Add an artist to be managed.
Parameters
----------
art : Artist
The artist to be added. Will be set to 'animated' (just
to be safe). *art* must be in the figure associated with
the canvas this class is managing.
"""
if art.figure != self.canvas.figure:
raise RuntimeError
art.set_animated(True)
self._artists.append(art)
def _draw_animated(self):
"""Draw all of the animated artists."""
fig = self.canvas.figure
for a in self._artists:
fig.draw_artist(a)
def update(self):
"""Update the screen with animated artists."""
cv = self.canvas
fig = cv.figure
# paranoia in case we missed the draw event,
if self._bg is None:
self.on_draw(None)
else:
# restore the background
cv.restore_region(self._bg)
# draw all of the animated artists
self._draw_animated()
# update the GUI state
cv.blit(fig.bbox)
# let the GUI event loop process anything it has to do
cv.flush_events()
###############################################################################
# Here is how we would use our class. This is a slightly more complicated
# example than the first case as we add a text frame counter as well.
# make a new figure
fig, ax = plt.subplots()
# add a line
(ln,) = ax.plot(x, np.sin(x), animated=True)
# add a frame number
fr_number = ax.annotate(
"0",
(0, 1),
xycoords="axes fraction",
xytext=(10, -10),
textcoords="offset points",
ha="left",
va="top",
animated=True,
)
bm = BlitManager(fig.canvas, [ln, fr_number])
# make sure our window is on the screen and drawn
plt.show(block=False)
plt.pause(.1)
for j in range(100):
# update the artists
ln.set_ydata(np.sin(x + (j / 100) * np.pi))
fr_number.set_text("frame: {j}".format(j=j))
# tell the blitting manager to do its thing
bm.update()
###############################################################################
# This class does not depend on `.pyplot` and is suitable to embed
# into larger GUI application.
| [
"numpy.sin",
"numpy.linspace",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((1984, 2014), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(100)'], {}), '(0, 2 * np.pi, 100)\n', (1995, 2014), True, 'import numpy as np\n'), ((2026, 2040), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2038, 2040), True, 'import matplotlib.pyplot as plt\n'), ((2238, 2259), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (2246, 2259), True, 'import matplotlib.pyplot as plt\n'), ((2652, 2666), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (2661, 2666), True, 'import matplotlib.pyplot as plt\n'), ((7017, 7031), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7029, 7031), True, 'import matplotlib.pyplot as plt\n'), ((7387, 7408), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (7395, 7408), True, 'import matplotlib.pyplot as plt\n'), ((7409, 7423), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (7418, 7423), True, 'import matplotlib.pyplot as plt\n'), ((2150, 2159), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (2156, 2159), True, 'import numpy as np\n'), ((7064, 7073), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (7070, 7073), True, 'import numpy as np\n'), ((3237, 3264), 'numpy.sin', 'np.sin', (['(x + j / 100 * np.pi)'], {}), '(x + j / 100 * np.pi)\n', (3243, 3264), True, 'import numpy as np\n'), ((7487, 7514), 'numpy.sin', 'np.sin', (['(x + j / 100 * np.pi)'], {}), '(x + j / 100 * np.pi)\n', (7493, 7514), True, 'import numpy as np\n')] |
import numpy as np
def create_label_map(num_classes=19):
name_label_mapping = {
'unlabeled': 0, 'outlier': 1, 'car': 10, 'bicycle': 11,
'bus': 13, 'motorcycle': 15, 'on-rails': 16, 'truck': 18,
'other-vehicle': 20, 'person': 30, 'bicyclist': 31,
'motorcyclist': 32, 'road': 40, 'parking': 44,
'sidewalk': 48, 'other-ground': 49, 'building': 50,
'fence': 51, 'other-structure': 52, 'lane-marking': 60,
'vegetation': 70, 'trunk': 71, 'terrain': 72, 'pole': 80,
'traffic-sign': 81, 'other-object': 99, 'moving-car': 252,
'moving-bicyclist': 253, 'moving-person': 254, 'moving-motorcyclist': 255,
'moving-on-rails': 256, 'moving-bus': 257, 'moving-truck': 258,
'moving-other-vehicle': 259
}
for k in name_label_mapping:
name_label_mapping[k] = name_label_mapping[k.replace('moving-', '')]
train_label_name_mapping = {
0: 'car', 1: 'bicycle', 2: 'motorcycle', 3: 'truck', 4:
'other-vehicle', 5: 'person', 6: 'bicyclist', 7: 'motorcyclist',
8: 'road', 9: 'parking', 10: 'sidewalk', 11: 'other-ground',
12: 'building', 13: 'fence', 14: 'vegetation', 15: 'trunk',
16: 'terrain', 17: 'pole', 18: 'traffic-sign'
}
label_map = np.zeros(260)+num_classes
for i in range(num_classes):
cls_name = train_label_name_mapping[i]
label_map[name_label_mapping[cls_name]] = min(num_classes,i)
return label_map.astype(np.int64) | [
"numpy.zeros"
] | [((1284, 1297), 'numpy.zeros', 'np.zeros', (['(260)'], {}), '(260)\n', (1292, 1297), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""CIFAR datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
# Shared constants
_CIFAR_IMAGE_SIZE = 32
_CIFAR_IMAGE_SHAPE = (_CIFAR_IMAGE_SIZE, _CIFAR_IMAGE_SIZE, 3)
_CITATION = """\
@TECHREPORT{Krizhevsky09learningmultiple,
author = {<NAME>},
title = {Learning multiple layers of features from tiny images},
institution = {},
year = {2009}
}
"""
class Cifar10(tfds.core.GeneratorBasedBuilder):
"""CIFAR-10."""
VERSION = tfds.core.Version("3.0.1")
SUPPORTED_VERSIONS = [
tfds.core.Version(
"3.0.2", experiments={tfds.core.Experiment.METADATA: True}
),
]
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=("The CIFAR-10 dataset consists of 60000 32x32 colour "
"images in 10 classes, with 6000 images per class. There "
"are 50000 training images and 10000 test images."),
features=tfds.features.FeaturesDict({
"id": tfds.features.Text(),
"image": tfds.features.Image(shape=_CIFAR_IMAGE_SHAPE),
"label": tfds.features.ClassLabel(num_classes=10),
}),
supervised_keys=("image", "label"),
homepage="https://www.cs.toronto.edu/~kriz/cifar.html",
citation=_CITATION,
)
@property
def _cifar_info(self):
return CifarInfo(
name=self.name,
url="https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz",
train_files=[
"data_batch_1.bin", "data_batch_2.bin", "data_batch_3.bin",
"data_batch_4.bin", "data_batch_5.bin"
],
test_files=["test_batch.bin"],
prefix="cifar-10-batches-bin/",
label_files=["batches.meta.txt"],
label_keys=["label"],
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
cifar_path = dl_manager.download_and_extract(self._cifar_info.url)
cifar_info = self._cifar_info
cifar_path = os.path.join(cifar_path, cifar_info.prefix)
# Load the label names
for label_key, label_file in zip(cifar_info.label_keys,
cifar_info.label_files):
labels_path = os.path.join(cifar_path, label_file)
with tf.io.gfile.GFile(labels_path) as label_f:
label_names = [name for name in label_f.read().split("\n") if name]
self.info.features[label_key].names = label_names
# Define the splits
def gen_filenames(filenames):
for f in filenames:
yield os.path.join(cifar_path, f)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"split_prefix": "train_",
"filepaths": gen_filenames(cifar_info.train_files)
}),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"split_prefix": "test_",
"filepaths": gen_filenames(cifar_info.test_files)
}),
]
def _generate_examples(self, split_prefix, filepaths):
"""Generate CIFAR examples as dicts.
Shared across CIFAR-{10, 100}. Uses self._cifar_info as
configuration.
Args:
split_prefix (str): Prefix that identifies the split (e.g. "tr" or "te").
filepaths (list[str]): The files to use to generate the data.
Yields:
The cifar examples, as defined in the dataset info features.
"""
label_keys = self._cifar_info.label_keys
index = 0 # Using index as key since data is always loaded in same order.
for path in filepaths:
for labels, np_image in _load_data(path, len(label_keys)):
record = dict(zip(label_keys, labels))
# Note: "id" is only provided for the user convenience. To shuffle the
# dataset we use `index`, so that the sharding is compatible with
# earlier versions.
record["id"] = "{}{:05d}".format(split_prefix, index)
record["image"] = np_image
yield index, record
index += 1
class Cifar100(Cifar10):
"""CIFAR-100 dataset."""
VERSION = tfds.core.Version("3.0.1")
SUPPORTED_VERSIONS = [
tfds.core.Version(
"3.0.2", experiments={tfds.core.Experiment.METADATA: True}
),
]
@property
def _cifar_info(self):
return CifarInfo(
name=self.name,
url="https://www.cs.toronto.edu/~kriz/cifar-100-binary.tar.gz",
train_files=["train.bin"],
test_files=["test.bin"],
prefix="cifar-100-binary/",
label_files=["coarse_label_names.txt", "fine_label_names.txt"],
label_keys=["coarse_label", "label"],
)
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=("This dataset is just like the CIFAR-10, except it has "
"100 classes containing 600 images each. There are 500 "
"training images and 100 testing images per class. The "
"100 classes in the CIFAR-100 are grouped into 20 "
"superclasses. Each image comes with a \"fine\" label "
"(the class to which it belongs) and a \"coarse\" label "
"(the superclass to which it belongs)."),
features=tfds.features.FeaturesDict({
"id": tfds.features.Text(),
"image": tfds.features.Image(shape=_CIFAR_IMAGE_SHAPE),
"label": tfds.features.ClassLabel(num_classes=100),
"coarse_label": tfds.features.ClassLabel(num_classes=20),
}),
supervised_keys=("image", "label"),
homepage="https://www.cs.toronto.edu/~kriz/cifar.html",
citation=_CITATION,
)
class CifarInfo(collections.namedtuple("_CifarInfo", [
"name",
"url",
"prefix",
"train_files",
"test_files",
"label_files",
"label_keys",
])):
"""Contains the information necessary to generate a CIFAR dataset.
Attributes:
name (str): name of dataset.
url (str): data URL.
prefix (str): path prefix within the downloaded and extracted file to look
for `train_files` and `test_files`.
train_files (list<str>): name of training files within `prefix`.
test_files (list<str>): name of test files within `prefix`.
label_files (list<str>): names of the label files in the data.
label_keys (list<str>): names of the label keys in the data.
"""
def _load_data(path, labels_number=1):
"""Yields (labels, np_image) tuples."""
with tf.io.gfile.GFile(path, "rb") as f:
data = f.read()
offset = 0
max_offset = len(data) - 1
while offset < max_offset:
labels = np.frombuffer(data, dtype=np.uint8, count=labels_number,
offset=offset).reshape((labels_number,))
# 1 byte per label, 1024 * 3 = 3072 bytes for the image.
offset += labels_number
img = (np.frombuffer(data, dtype=np.uint8, count=3072, offset=offset)
.reshape((3, _CIFAR_IMAGE_SIZE, _CIFAR_IMAGE_SIZE))
.transpose((1, 2, 0))
)
offset += 3072
yield labels, img
| [
"tensorflow_datasets.public_api.features.Image",
"numpy.frombuffer",
"collections.namedtuple",
"tensorflow_datasets.public_api.features.Text",
"os.path.join",
"tensorflow_datasets.public_api.features.ClassLabel",
"tensorflow_datasets.public_api.core.Version",
"tensorflow.compat.v2.io.gfile.GFile"
] | [((6490, 6615), 'collections.namedtuple', 'collections.namedtuple', (['"""_CifarInfo"""', "['name', 'url', 'prefix', 'train_files', 'test_files', 'label_files',\n 'label_keys']"], {}), "('_CifarInfo', ['name', 'url', 'prefix',\n 'train_files', 'test_files', 'label_files', 'label_keys'])\n", (6512, 6615), False, 'import collections\n'), ((1279, 1305), 'tensorflow_datasets.public_api.core.Version', 'tfds.core.Version', (['"""3.0.1"""'], {}), "('3.0.1')\n", (1296, 1305), True, 'import tensorflow_datasets.public_api as tfds\n'), ((4885, 4911), 'tensorflow_datasets.public_api.core.Version', 'tfds.core.Version', (['"""3.0.1"""'], {}), "('3.0.1')\n", (4902, 4911), True, 'import tensorflow_datasets.public_api as tfds\n'), ((1337, 1414), 'tensorflow_datasets.public_api.core.Version', 'tfds.core.Version', (['"""3.0.2"""'], {'experiments': '{tfds.core.Experiment.METADATA: True}'}), "('3.0.2', experiments={tfds.core.Experiment.METADATA: True})\n", (1354, 1414), True, 'import tensorflow_datasets.public_api as tfds\n'), ((2785, 2828), 'os.path.join', 'os.path.join', (['cifar_path', 'cifar_info.prefix'], {}), '(cifar_path, cifar_info.prefix)\n', (2797, 2828), False, 'import os\n'), ((4943, 5020), 'tensorflow_datasets.public_api.core.Version', 'tfds.core.Version', (['"""3.0.2"""'], {'experiments': '{tfds.core.Experiment.METADATA: True}'}), "('3.0.2', experiments={tfds.core.Experiment.METADATA: True})\n", (4960, 5020), True, 'import tensorflow_datasets.public_api as tfds\n'), ((7269, 7298), 'tensorflow.compat.v2.io.gfile.GFile', 'tf.io.gfile.GFile', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (7286, 7298), True, 'import tensorflow.compat.v2 as tf\n'), ((2999, 3035), 'os.path.join', 'os.path.join', (['cifar_path', 'label_file'], {}), '(cifar_path, label_file)\n', (3011, 3035), False, 'import os\n'), ((3047, 3077), 'tensorflow.compat.v2.io.gfile.GFile', 'tf.io.gfile.GFile', (['labels_path'], {}), '(labels_path)\n', (3064, 3077), True, 'import tensorflow.compat.v2 as tf\n'), ((7409, 7480), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'np.uint8', 'count': 'labels_number', 'offset': 'offset'}), '(data, dtype=np.uint8, count=labels_number, offset=offset)\n', (7422, 7480), True, 'import numpy as np\n'), ((3321, 3348), 'os.path.join', 'os.path.join', (['cifar_path', 'f'], {}), '(cifar_path, f)\n', (3333, 3348), False, 'import os\n'), ((1808, 1828), 'tensorflow_datasets.public_api.features.Text', 'tfds.features.Text', ([], {}), '()\n', (1826, 1828), True, 'import tensorflow_datasets.public_api as tfds\n'), ((1851, 1896), 'tensorflow_datasets.public_api.features.Image', 'tfds.features.Image', ([], {'shape': '_CIFAR_IMAGE_SHAPE'}), '(shape=_CIFAR_IMAGE_SHAPE)\n', (1870, 1896), True, 'import tensorflow_datasets.public_api as tfds\n'), ((1919, 1959), 'tensorflow_datasets.public_api.features.ClassLabel', 'tfds.features.ClassLabel', ([], {'num_classes': '(10)'}), '(num_classes=10)\n', (1943, 1959), True, 'import tensorflow_datasets.public_api as tfds\n'), ((6094, 6114), 'tensorflow_datasets.public_api.features.Text', 'tfds.features.Text', ([], {}), '()\n', (6112, 6114), True, 'import tensorflow_datasets.public_api as tfds\n'), ((6137, 6182), 'tensorflow_datasets.public_api.features.Image', 'tfds.features.Image', ([], {'shape': '_CIFAR_IMAGE_SHAPE'}), '(shape=_CIFAR_IMAGE_SHAPE)\n', (6156, 6182), True, 'import tensorflow_datasets.public_api as tfds\n'), ((6205, 6246), 'tensorflow_datasets.public_api.features.ClassLabel', 'tfds.features.ClassLabel', ([], {'num_classes': '(100)'}), '(num_classes=100)\n', (6229, 6246), True, 'import tensorflow_datasets.public_api as tfds\n'), ((6276, 6316), 'tensorflow_datasets.public_api.features.ClassLabel', 'tfds.features.ClassLabel', ([], {'num_classes': '(20)'}), '(num_classes=20)\n', (6300, 6316), True, 'import tensorflow_datasets.public_api as tfds\n'), ((7634, 7696), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'np.uint8', 'count': '(3072)', 'offset': 'offset'}), '(data, dtype=np.uint8, count=3072, offset=offset)\n', (7647, 7696), True, 'import numpy as np\n')] |
# %%
from core.dqn_agent import DQNAgent
from cartpole.cartpole_neural_network import CartPoleNeuralNetwork
from cartpole.cartpole_wrapper import CartPoleWrapper
import gym
import numpy as np
from tqdm import tqdm
import numpy as np
import shutil
from pathlib import Path
import shutil
from utils import *
import plotly.express as px
# %% [markdown]
# Initialize deep Q-learning agent, neural network and metrics
# %%
seed = 1000
np.random.seed(seed)
agent = DQNAgent(env=CartPoleWrapper(gym.make("CartPole-v1")),
nn=CartPoleNeuralNetwork(), replay_memory_max_size=10000, batch_size=30)
#agent.env.seed(0)
#agent.env.action_space.np_random.seed(seed)
DISCOUNT_FACTOR = 0.99
LEARNING_RATE = 0.0001
STEPS_TO_SYNC_TARGET_NN=10
n_episodes = []
total_rewards = []
number_steps = []
total_episodes = 0
# %% [markdown]
# Training
# %%
if Path('results/cartpole/saves').exists():
shutil.rmtree('results/cartpole/saves')
logger = tqdm(range(100))
for _ in logger:
total_reward, steps = agent.start_episode_and_evaluate(DISCOUNT_FACTOR, LEARNING_RATE, steps_to_sync_target_nn=STEPS_TO_SYNC_TARGET_NN,
epsilon=0, min_epsilon=0, momentum=0.4, render=False, optimize=False)
logger.set_description(f'episode: {total_episodes}\tsteps: {steps}\ttotal_reward: {total_reward}')
n_episodes.append(total_episodes)
total_rewards.append(total_reward)
number_steps.append(steps)
for i in range(10):
agent.start_episode(DISCOUNT_FACTOR, LEARNING_RATE, steps_to_sync_target_nn=STEPS_TO_SYNC_TARGET_NN,
epsilon=1, epsilon_decay=0.99, min_epsilon=0.01, momentum=0.4)
total_episodes += i+1
if total_episodes % 20 == 0:
agent.save_weights(f'results/cartpole/saves/data{total_episodes}.nn')
# %% [markdown]
# Visualize training metrics
# %%
plot_metrics(n_episodes, total_rewards, number_steps,)
# %% [markdown]
# Evaluation
## Video demos
# %%
if Path('results/cartpole/recording/tmp-videos').exists():
shutil.rmtree('results/cartpole/recording/tmp-videos')
agent.env = gym.wrappers.Monitor(agent.env, 'results/cartpole/recording/tmp-videos', force=True, video_callable=lambda episode_id: True)
agent.load_weights('results/cartpole/good-results/3best/saves/data320.nn')
for i in range(10):
total_reward, steps = agent.start_episode_and_evaluate(DISCOUNT_FACTOR, LEARNING_RATE, epsilon=0, min_epsilon=0, momentum=0.4, render=True, optimize=False)
print(f'{i}\t{steps}\t{total_reward}')
agent.env.close()
agent.env = agent.env.env
plot_videos('results/cartpole/recording/tmp-videos', f'results/cartpole/recording/output.mp4')
# %% [markdown]
## Compute total reward distribution
# %%
agent.load_weights('results/cartpole/good-results/3best/saves/data320.nn')
total_rewards = []
number_steps = []
for i in range(10000):
total_reward, steps = agent.start_episode_and_evaluate(DISCOUNT_FACTOR, LEARNING_RATE, epsilon=0, min_epsilon=0, momentum=0.4, render=False, optimize=False)
if i % 100 == 0: print(f'{i}\t{steps}\t{total_reward}')
total_rewards.append(total_reward)
number_steps.append(steps)
# %% [markdown]
## Plot total reward distribution
# %%
x = total_rewards
fig = px.histogram(x=x, nbins=round(len(x)/10))
fig.update_xaxes(title_text='total_rewards')
fig.add_vline(x=np.mean(x), line_width=3, line_dash="dash", line_color="green")
fig.show()
print(f'mean: {np.mean(x)}')
print(f'standard deviation: {np.std(x)}') | [
"numpy.mean",
"pathlib.Path",
"numpy.std",
"cartpole.cartpole_neural_network.CartPoleNeuralNetwork",
"numpy.random.seed",
"shutil.rmtree",
"gym.wrappers.Monitor",
"gym.make"
] | [((431, 451), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (445, 451), True, 'import numpy as np\n'), ((2030, 2158), 'gym.wrappers.Monitor', 'gym.wrappers.Monitor', (['agent.env', '"""results/cartpole/recording/tmp-videos"""'], {'force': '(True)', 'video_callable': '(lambda episode_id: True)'}), "(agent.env, 'results/cartpole/recording/tmp-videos',\n force=True, video_callable=lambda episode_id: True)\n", (2050, 2158), False, 'import gym\n'), ((897, 936), 'shutil.rmtree', 'shutil.rmtree', (['"""results/cartpole/saves"""'], {}), "('results/cartpole/saves')\n", (910, 936), False, 'import shutil\n'), ((1963, 2017), 'shutil.rmtree', 'shutil.rmtree', (['"""results/cartpole/recording/tmp-videos"""'], {}), "('results/cartpole/recording/tmp-videos')\n", (1976, 2017), False, 'import shutil\n'), ((534, 557), 'cartpole.cartpole_neural_network.CartPoleNeuralNetwork', 'CartPoleNeuralNetwork', ([], {}), '()\n', (555, 557), False, 'from cartpole.cartpole_neural_network import CartPoleNeuralNetwork\n'), ((852, 882), 'pathlib.Path', 'Path', (['"""results/cartpole/saves"""'], {}), "('results/cartpole/saves')\n", (856, 882), False, 'from pathlib import Path\n'), ((1906, 1951), 'pathlib.Path', 'Path', (['"""results/cartpole/recording/tmp-videos"""'], {}), "('results/cartpole/recording/tmp-videos')\n", (1910, 1951), False, 'from pathlib import Path\n'), ((3262, 3272), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (3269, 3272), True, 'import numpy as np\n'), ((489, 512), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (497, 512), False, 'import gym\n'), ((3352, 3362), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (3359, 3362), True, 'import numpy as np\n'), ((3395, 3404), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (3401, 3404), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Some matrix specialization."""
import time
from pygimli.core import _pygimli_ as pg
import numpy as np
# make core matrices (now in pg, later pg.core) known here for tab-completion
# BlockMatrix = pg.BlockMatrix
# IdentityMatrix = pg.IdentityMatrix
class MultLeftMatrix(pg.MatrixBase):
"""Matrix consisting of actual RMatrix and lef-side vector."""
def __init__(self, A, left, verbose=False):
"""Constructor saving matrix and vector."""
if A.rows() != len(left):
raise Exception("Matrix columns do not fit vector length!")
self.A = A
self.left = left
super().__init__(verbose) # only in Python 3
# pg.MatrixBase.__init__(self) # the Python 2 variant
def rows(self):
"""Return number of rows (using underlying matrix)."""
return self.A.rows()
def cols(self):
"""Return number of columns (using underlying matrix)."""
return self.A.cols()
def mult(self, x):
"""Multiplication from right-hand-side (dot product A*x)."""
return self.A.mult(x) * self.left
def transMult(self, x):
"""Multiplication from right-hand-side (dot product A.T * x)"""
return self.A.transMult(x * self.left)
LMultRMatrix = MultLeftMatrix # alias for backward compatibility
class MultRightMatrix(pg.MatrixBase):
"""Some Matrix, multiplied with a right hand side vector r."""
def __init__(self, A, r=None):
super().__init__()
self.A = A
if r is None:
self.r = pg.RVector(A.cols(), 1.0)
else:
self.r = r
def mult(self, x):
"""Return M*x = A*(r*x)"""
return self.A.mult(x * self.r)
def transMult(self, x):
"""Return M.T*x=(A.T*x)*r"""
return self.A.transMult(x) * self.r
def cols(self):
"""Number of columns."""
return self.A.cols()
def rows(self):
"""Number of rows."""
return self.A.rows()
RMultRMatrix = MultRightMatrix # alias for backward compatibility
class MultLeftRightMatrix(pg.MatrixBase):
"""Matrix consisting of actual RMatrix and left-hand-side vector."""
def __init__(self, A, left, right, verbose=False):
"""Constructor saving matrix and vector."""
if A.cols() != len(right):
raise Exception("Matrix columns do not fit right vector length!")
if A.rows() != len(left):
raise Exception("Matrix rows do not fit left vector length!")
self.A = A
self.right = right
self.left = left
super().__init__(verbose) # only in Python 3
# pg.MatrixBase.__init__(self) # the Python 2 variant
def rows(self):
"""Number of rows (using the underlying matrix)."""
return self.A.rows()
def cols(self):
"""Number of columns (using the underlying matrix)."""
return self.A.cols()
def mult(self, x):
"""Multiplication from right-hand-side (dot product A*x)."""
return self.A.mult(x * self.right) * self.left
def transMult(self, x):
"""Multiplication from right-hand-side (dot product A.T*x)."""
return self.A.transMult(x * self.left) * self.right
LRMultRMatrix = MultLeftRightMatrix # alias for backward compatibility
class Add2Matrix(pg.MatrixBase):
"""Matrix by adding two matrices."""
def __init__(self, A, B):
super().__init__()
self.A = A
self.B = B
assert A.rows() == B.rows()
assert A.cols() == B.cols()
def mult(self, x):
"""Return M*x = A*(r*x)"""
return self.A.mult(x) + self.B.mult(x)
def transMult(self, x):
"""Return M.T*x=(A.T*x)*r"""
return self.A.transMult(x) + self.B.transMult(x)
def cols(self):
"""Number of columns."""
return self.A.cols()
def rows(self):
"""Number of rows."""
return self.A.rows()
class Mult2Matrix(pg.MatrixBase):
"""Matrix by multiplying two matrices."""
def __init__(self, A, B):
super().__init__()
self.A = A
self.B = B
assert A.cols() == B.rows()
def mult(self, x):
"""Return M*x = A*(r*x)"""
return self.A.mult(self.B.mult(x))
def transMult(self, x):
"""Return M.T*x=(A.T*x)*r"""
return self.B.transMult(self.A.transMult(x))
def cols(self):
"""Number of columns."""
return self.B.cols()
def rows(self):
"""Number of rows."""
return self.A.rows()
class DiagonalMatrix(pg.MatrixBase):
"""Square matrix with a vector on the main diagonal."""
def __init__(self, d):
super().__init__()
self.d = d
def mult(self, x):
"""Return M*x = r*x (element-wise)"""
return x * self.d
def transMult(self, x):
"""Return M.T*x=(A.T*x)*r"""
return x * self.d
def cols(self):
"""Number of columns (length of diagonal)."""
return len(self.d)
def rows(self):
"""Number of rows (length of diagonal)."""
return len(self.d)
class Cm05Matrix(pg.MatrixBase):
"""Matrix implicitly representing the inverse square-root."""
def __init__(self, A, verbose=False):
"""Constructor saving matrix and vector.
Parameters
----------
A : ndarray
numpy type (full) matrix
"""
from scipy.linalg import eigh # , get_blas_funcs
if A.shape[0] != A.shape[1]: # rows/cols for pg matrix
raise Exception("Matrix must by square (and symmetric)!")
self.size = A.shape[0]
t = time.time()
self.ew, self.EV = eigh(A)
self.mul = np.sqrt(1./self.ew)
if verbose:
pg.info('(C) Time for eigenvalue decomposition:{:.1f} s'.format(
time.time() - t))
self.A = A
super().__init__(verbose) # only in Python 3
def rows(self):
"""Return number of rows (using underlying matrix)."""
return self.size
def cols(self):
"""Return number of columns (using underlying matrix)."""
return self.size
def mult(self, x):
"""Multiplication from right-hand side (dot product)."""
part1 = (np.dot(np.transpose(x), self.EV).T*self.mul).reshape(-1, 1)
return self.EV.dot(part1).reshape(-1,)
# return self.EV.dot((x.T.dot(self.EV)*self.mul).T)
def transMult(self, x):
"""Multiplication from right-hand side (dot product)."""
return self.mult(x) # matrix is symmetric by definition
| [
"scipy.linalg.eigh",
"numpy.transpose",
"numpy.sqrt",
"time.time"
] | [((5643, 5654), 'time.time', 'time.time', ([], {}), '()\n', (5652, 5654), False, 'import time\n'), ((5682, 5689), 'scipy.linalg.eigh', 'eigh', (['A'], {}), '(A)\n', (5686, 5689), False, 'from scipy.linalg import eigh\n'), ((5709, 5731), 'numpy.sqrt', 'np.sqrt', (['(1.0 / self.ew)'], {}), '(1.0 / self.ew)\n', (5716, 5731), True, 'import numpy as np\n'), ((5842, 5853), 'time.time', 'time.time', ([], {}), '()\n', (5851, 5853), False, 'import time\n'), ((6268, 6283), 'numpy.transpose', 'np.transpose', (['x'], {}), '(x)\n', (6280, 6283), True, 'import numpy as np\n')] |
# Copyright 2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-arguments
"""
This module provides the ``QSimDevice`` and ``QSimhDevice`` from Cirq.
"""
import cirq
import numpy as np
import pennylane as qml
try:
import qsimcirq
except ImportError as e:
raise ImportError(
"qsim Cirq is needed for the qsim devices to work."
"\nIt can be installed using pip:"
"\n\npip install qsimcirq"
) from e
from .simulator_device import SimulatorDevice
from .cirq_device import CirqDevice
class QSimDevice(SimulatorDevice):
r"""qsim device for PennyLane.
Args:
wires (int, Iterable[Number, str]]): Number of subsystems represented by the device,
or iterable that contains unique labels for the subsystems as numbers (i.e., ``[-1, 0, 2]``)
or strings (``['ancilla', 'q1', 'q2']``).
shots (int): Number of circuit evaluations/random samples used
to estimate expectation values of observables. Shots need
to be >= 1. If ``None``, expectation values are calculated analytically.
qubits (List[cirq.Qubit]): A list of Cirq qubits that are used
as wires. The wire number corresponds to the index in the list.
By default, an array of ``cirq.LineQubit`` instances is created.
qsim_options: (Dict[str, Any]): A dictionary with options for the qsimh simulator. See the `qsim
usage documentation <https://github.com/quantumlib/qsim/blob/master/docs/usage.md>`__
for further details.
"""
name = "QSim device for PennyLane"
short_name = "cirq.qsim"
def __init__(self, wires, shots=None, qubits=None, qsim_options=None):
super().__init__(wires, shots)
self.circuit = qsimcirq.QSimCircuit(cirq_circuit=cirq.Circuit())
self._simulator = qsimcirq.QSimSimulator(qsim_options=qsim_options or {})
def reset(self):
# pylint: disable=missing-function-docstring
super().reset()
self.circuit = qsimcirq.QSimCircuit(cirq_circuit=cirq.Circuit())
@property
def operations(self):
# pylint: disable=missing-function-docstring
ops = set(self._operation_map.keys()) - {
"QubitStateVector",
"BasisState",
"CRX",
"CRY",
"CRZ",
"CRot",
}
return ops
@classmethod
def capabilities(cls):
# pylint: disable=missing-function-docstring
capabilities = super().capabilities().copy()
capabilities.update(
supports_inverse_operations=False,
)
return capabilities
def expval(self, observable, shot_range=None, bin_size=None):
is_tensor = isinstance(observable, qml.operation.Tensor)
if (
is_tensor and all(obs == "Identity" for obs in observable.name)
) or observable.name == "Identity":
return 1
return super().expval(observable, shot_range, bin_size)
class QSimhDevice(SimulatorDevice):
r"""qsimh device for PennyLane.
Args:
wires (int, Iterable[Number, str]]): Number of subsystems represented by the device,
or iterable that contains unique labels for the subsystems as numbers (i.e., ``[-1, 0, 2]``)
or strings (``['ancilla', 'q1', 'q2']``).
qsimh_options (dict): A dictionary with options for the qsimh simulator. See the `qsim
usage documentation <https://github.com/quantumlib/qsim/blob/master/docs/usage.md>`__
for further details.
shots (int): Number of circuit evaluations/random samples used
to estimate expectation values of observables. Shots need
to be >= 1. If ``None``, expectation values are calculated analytically.
qubits (List[cirq.Qubit]): A list of Cirq qubits that are used
as wires. The wire number corresponds to the index in the list.
By default, an array of ``cirq.LineQubit`` instances is created.
"""
name = "qsimh device for PennyLane"
short_name = "cirq.qsimh"
def __init__(self, wires, qsimh_options, shots=None, qubits=None):
super().__init__(wires, shots, qubits)
self.circuit = None
self.qsimh_options = qsimh_options
self._simulator = qsimcirq.QSimhSimulator(qsimh_options)
@property
def operations(self):
# pylint: disable=missing-function-docstring
ops = set(self._operation_map.keys()) - {
"QubitStateVector",
"BasisState",
"CRX",
"CRY",
"CRZ",
"CRot",
}
return ops
@classmethod
def capabilities(cls):
# pylint: disable=missing-function-docstring
capabilities = super().capabilities().copy()
capabilities.update(
supports_inverse_operations=False,
)
return capabilities
def expval(self, observable, shot_range=None, bin_size=None):
return qml.QubitDevice.expval(self, observable, shot_range, bin_size)
def apply(self, operations, **kwargs):
# pylint: disable=missing-function-docstring
CirqDevice.apply(self, operations, **kwargs)
# TODO: remove the need for this hack by keeping better track of unused wires
# We apply identity gates to all wires, otherwise Cirq would ignore
# wires that are not acted upon
for qb in self.qubits:
self.circuit.append(cirq.IdentityGate(1)(qb))
state = self._simulator.compute_amplitudes(
program=self.circuit, bitstrings=list(range(2 ** len(self.wires)))
)
self._state = np.array(state)
def generate_samples(self):
# pylint: disable=missing-function-docstring
number_of_states = 2 ** self.num_wires
rotated_prob = self.analytic_probability()
if rotated_prob is not None:
rotated_prob /= np.sum(rotated_prob)
samples = self.sample_basis_states(number_of_states, rotated_prob)
return self.states_to_binary(samples, self.num_wires)
| [
"qsimcirq.QSimhSimulator",
"pennylane.QubitDevice.expval",
"qsimcirq.QSimSimulator",
"cirq.Circuit",
"numpy.array",
"numpy.sum",
"cirq.IdentityGate"
] | [((2383, 2438), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'qsim_options': '(qsim_options or {})'}), '(qsim_options=qsim_options or {})\n', (2405, 2438), False, 'import qsimcirq\n'), ((4844, 4882), 'qsimcirq.QSimhSimulator', 'qsimcirq.QSimhSimulator', (['qsimh_options'], {}), '(qsimh_options)\n', (4867, 4882), False, 'import qsimcirq\n'), ((5538, 5600), 'pennylane.QubitDevice.expval', 'qml.QubitDevice.expval', (['self', 'observable', 'shot_range', 'bin_size'], {}), '(self, observable, shot_range, bin_size)\n', (5560, 5600), True, 'import pennylane as qml\n'), ((6208, 6223), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (6216, 6223), True, 'import numpy as np\n'), ((6474, 6494), 'numpy.sum', 'np.sum', (['rotated_prob'], {}), '(rotated_prob)\n', (6480, 6494), True, 'import numpy as np\n'), ((2341, 2355), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (2353, 2355), False, 'import cirq\n'), ((2595, 2609), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (2607, 2609), False, 'import cirq\n'), ((6017, 6037), 'cirq.IdentityGate', 'cirq.IdentityGate', (['(1)'], {}), '(1)\n', (6034, 6037), False, 'import cirq\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ...serialization.serializables import Int64Field, Float64Field
from ..array_utils import array_module
from ..utils import gen_random_seeds
from .core import TensorRandomOperandMixin, TensorSimpleRandomData
class TensorRandint(TensorSimpleRandomData, TensorRandomOperandMixin):
_op_type_ = OperandDef.RAND_RANDINT
_fields_ = '_low', '_high', '_density', '_size'
_low = Int64Field('low')
_high = Int64Field('high')
_density = Float64Field('density')
_func_name = 'randint'
def __init__(self, size=None, dtype=None,
low=None, high=None, density=None, **kw):
dtype = np.dtype(dtype) if dtype is not None else dtype
super().__init__(_size=size, _low=low, _high=high,
_density=density, dtype=dtype, **kw)
@property
def low(self):
return self._low
@property
def high(self):
return self._high
@property
def density(self):
return self._density
def __call__(self, chunk_size=None):
return self.new_tensor(None, None, raw_chunk_size=chunk_size)
@classmethod
def execute(cls, ctx, op):
if op.sparse:
cls.execute_sparse(ctx, op)
else:
super().execute(ctx, op)
@classmethod
def execute_sparse(cls, ctx, op):
from ...lib.sparse import SparseNDArray
from ...lib.sparse.core import cps, sps
xp = array_module(op.gpu)
if op.seed:
rs = np.random.RandomState(op.seed)
else:
rs = None
chunk = op.outputs[0]
if chunk.ndim > 2:
raise NotImplementedError
low = 1 if op.low == 0 else op.low
rs = rs or xp.random
size = int(np.ceil(np.prod(chunk.shape) * op.density))
xps = cps if op.gpu else sps
ij = xp.empty((2, size))
ij[0] = rs.randint(chunk.shape[0], size=size)
ij[1] = rs.randint(chunk.shape[1], size=size)
data = rs.randint(low, op.high, size=size).astype(op.dtype)
m = xps.coo_matrix((data, ij), chunk.shape).tocsr()
m.data[m.data >= op.high] = op.high - 1
# scipy.sparse is too slow, we remove the precise version due to the performance
# m = sps.random(*chunk.shape, density=op.density, format='csr')
# m.data = (rs or xp.random).randint(low, op.high, size=m.data.size)\
# .astype(op.dtype)
ctx[chunk.key] = SparseNDArray(m)
@classmethod
def estimate_size(cls, ctx, op):
chunk = op.outputs[0]
if not op.sparse or not getattr(op, '_density', None):
super().estimate_size(ctx, op)
else:
# use density to estimate real memory usage
nbytes = int(chunk.nbytes * getattr(chunk.op, '_density'))
ctx[chunk.key] = (nbytes, nbytes)
def randint(random_state, low, high=None, size=None, dtype='l', density=None,
chunk_size=None, gpu=None):
"""
Return random integers from `low` (inclusive) to `high` (exclusive).
Return random integers from the "discrete uniform" distribution of
the specified dtype in the "half-open" interval [`low`, `high`). If
`high` is None (the default), then results are from [0, `low`).
Parameters
----------
low : int
Lowest (signed) integer to be drawn from the distribution (unless
``high=None``, in which case this parameter is one above the
*highest* such integer).
high : int, optional
If provided, one above the largest (signed) integer to be drawn
from the distribution (see above for behavior if ``high=None``).
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
dtype : dtype, optional
Desired dtype of the result. All dtypes are determined by their
name, i.e., 'int64', 'int', etc, so byteorder is not available
and a specific precision may have different C types depending
on the platform. The default value is 'np.int'.
density: float, optional
if density specified, a sparse tensor will be created
chunk_size : int or tuple of int or tuple of ints, optional
Desired chunk size on each dimension
gpu : bool, optional
Allocate the tensor on GPU if True, False as default
dtype : data-type, optional
Data-type of the returned tensor.
Returns
-------
out : int or Tensor of ints
`size`-shaped tensor of random integers from the appropriate
distribution, or a single such random int if `size` not provided.
See Also
--------
random.random_integers : similar to `randint`, only for the closed
interval [`low`, `high`], and 1 is the lowest value if `high` is
omitted. In particular, this other one is the one to use to generate
uniformly distributed discrete non-integers.
Examples
--------
>>> import mars.tensor as mt
>>> mt.random.randint(2, size=10).execute()
array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0])
>>> mt.random.randint(1, size=10).execute()
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
Generate a 2 x 4 tensor of ints between 0 and 4, inclusive:
>>> mt.random.randint(5, size=(2, 4)).execute()
array([[4, 0, 2, 1],
[3, 2, 2, 0]])
"""
sparse = bool(density)
size = random_state._handle_size(size)
seed = gen_random_seeds(1, random_state.to_numpy())[0]
op = TensorRandint(seed=seed, low=low, high=high, size=size, dtype=dtype,
gpu=gpu, sparse=sparse, density=density)
return op(chunk_size=chunk_size)
| [
"numpy.prod",
"numpy.dtype",
"numpy.random.RandomState"
] | [((1326, 1341), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (1334, 1341), True, 'import numpy as np\n'), ((2180, 2210), 'numpy.random.RandomState', 'np.random.RandomState', (['op.seed'], {}), '(op.seed)\n', (2201, 2210), True, 'import numpy as np\n'), ((2444, 2464), 'numpy.prod', 'np.prod', (['chunk.shape'], {}), '(chunk.shape)\n', (2451, 2464), True, 'import numpy as np\n')] |
"""
plasmapy.classes.plasma
=======================
Defines the core Plasma class used by PlasmaPy to represent plasma properties.
"""
import numpy as np
import astropy.units as u
from astropy.utils.console import ProgressBar
from .simulation import MHDSimulation, dot
from ..constants import mu0
class Plasma:
"""Core class for describing and calculating plasma parameters.
Attributes
----------
x : `astropy.units.Quantity`
x-coordinates within the plasma domain. Equal to `domain_x`.
y : `astropy.units.Quantity`
y-coordinates within the plasma domain. Equal to `domain_y`.
z : `astropy.units.Quantity`
z-coordinates within the plasma domain. Equal to `domain_z`.
grid : `astropy.units.Quantity`
(3, x, y, z) array containing the values of each coordinate
at every point in the domain.
domain_shape : tuple
Shape of the plasma domain.
density : `astropy.units.Quantity`
(x, y, z) array of mass density at every point in the domain.
momentum : `astropy.units.Quantity`
(3, x, y, z) array of the momentum vector at every point in the domain.
pressure : `astropy.units.Quantity`
(x, y, z) array of pressure at every point in the domain.
magnetic_field : `astropy.units.Quantity`
(3, x, y, z) array of the magnetic field vector at every point in the
domain.
Parameters
----------
domain_x : `astropy.units.Quantity`
1D array of x-coordinates for the plasma domain. Must have units
convertable to length.
domain_y : `astropy.units.Quantity`
1D array of y-coordinates for the plasma domain. Must have units
convertable to length.
domain_z : `astropy.units.Quantity`
1D array of z-coordinates for the plasma domain. Must have units
convertable to length.
"""
@u.quantity_input(domain_x=u.m, domain_y=u.m, domain_z=u.m)
def __init__(self, domain_x, domain_y, domain_z, gamma=5/3):
# Define domain sizes
self.x = domain_x
self.y = domain_y
self.z = domain_z
x, y, z = self.x.si.value, self.y.si.value, self.z.si.value
self.grid = np.meshgrid(x, y, z, indexing='ij') * u.m
self.grid = np.squeeze(self.grid)
self.domain_shape = []
for length in (len(x), len(y), len(z)):
if length > 1:
self.domain_shape.append(length)
self.domain_shape = tuple(self.domain_shape)
print(self.domain_shape)
self.gamma = gamma
# Initiate core plasma variables
self._density = np.zeros(self.domain_shape) * u.kg / u.m**3
self._momentum = np.zeros((3, *self.domain_shape)) * u.kg \
/ (u.m**2 * u.s)
self._energy = np.zeros(self.domain_shape) * u.J / u.m**3
self._magnetic_field = np.zeros((3, *self.domain_shape)) * u.T
# Collect core variables into a list for usefulness
self.core_variables
# Connect a simulation object for simulating
self.simulation_physics = MHDSimulation(self)
"""
Define getters and setters for variables.
"""
# ==== Core variables ====
# Density
@property
def density(self):
return self._density
@density.setter
@u.quantity_input
def density(self, density: u.kg/u.m**3):
"""Sets the simulation's density profile to the specified array.
Other arrays which depend on the density values, such as the kinetic
pressure, are then redefined automatically.
Parameters
----------
density : ndarray
Array of density values. Shape and size must be equal to those of
the simulation grid.
Must have units of density.
"""
assert density.shape == self.domain_shape, """
Specified density array shape {} does not match simulation grid {}.
""".format(density.shape, self.domain_shape)
self._density = density
# Momentum
@property
def momentum(self):
return self._momentum
@momentum.setter
@u.quantity_input
def momentum(self, momentum: u.kg/(u.m**2 * u.s)):
"""Sets the simulation's momentum profile to the specified array.
Other arrays which depend on the velocity values, such as the kinetic
pressure,
are then redefined automatically.
Parameters
----------
momentum : ndarray
Array of momentum vectors. Shape must be (3, x, [y, z]), where x,
y and z are the dimensions of the simulation grid.
Note that a full 3D vector is necessary even if the simulation has
fewer than 3 dimensions.
"""
assert momentum.shape == (3, *self.domain_shape), """
Specified density array shape {} does not match simulation grid {}.
""".format(momentum.shape, (3, *self.domain_shape))
self._momentum = momentum
# Internal energy
@property
def energy(self):
return self._energy
@energy.setter
@u.quantity_input
def energy(self, energy: u.J/u.m**3):
"""Sets the simulation's total energy density profile to the specified array.
Other arrays which depend on the energy values, such as the kinetic
pressure, are then redefined automatically.
Parameters
----------
energy : ndarray
Array of energy values. Shape must be (x, y, z), where x, y, and z
are the grid sizes of the simulation in the x, y, and z dimensions.
Must have units of energy.
"""
assert energy.shape == self.domain_shape, """
Specified density array shape {} does not match simulation grid {}.
""".format(energy.shape, self.domain_shape)
self._energy = energy
# Magnetic field
@property
def magnetic_field(self):
return self._magnetic_field
@magnetic_field.setter
@u.quantity_input
def magnetic_field(self, magnetic_field: u.Tesla):
"""
Sets the simulation's magnetic field profile to the specified array.
Other arrays which depend on the magnetic field, such as the magnetic
pressure, are then redefined automatically.
Parameters
----------
magnetic_field : ndarray
Array of magnetic field values. Shape must be (3, x, [y, z]),
where x, y, and z are the grid sizes of the simulation in the x, y,
and z dimensions.
Note that a full 3D vector is necessary even if the simulation has
fewer than 3 dimensions.
"""
assert magnetic_field.shape == (3, *self.domain_shape), """
Specified density array shape {} does not match simulation grid {}.
""".format(magnetic_field.shape, (3, *self.domain_shape))
self._magnetic_field = magnetic_field
@property
def core_variables(self):
"""Returns an up-to-date list of the core variables used in the calculations.
"""
return [self.density, self.momentum, self.energy, self.magnetic_field]
# ==== Derived variables ====
# Velocity
@property
def velocity(self):
"""Returns the velocity profile of the simulation, as calculated from the
momentum and total density.
"""
return self.momentum / self.density
@velocity.setter
@u.quantity_input
def velocity(self, velocity: u.m / u.s):
"""Defines the velocity throughout the simulation, and automatically
updates the momentum based on the current density values.
Parameters
----------
velocity : ndarray
Array of velocity vectors with shape (3, x, [y, z]) where x, y and
z are the spatial grid sizes of the simulation.
Note that a full 3D vector is required even if the simulation is
run for fewer than 3 dimensions.
Must have units of velocity.
"""
assert velocity.shape == (3, *self.domain_shape), """Specified velocity
array shape does not match simulation grid."""
self.momentum = velocity * self.density
@property
def pressure(self):
"""Sets the simulation's kinetic pressure profile to the specified array.
The kinetic pressure is defined as:
.. math::
p = (\\gamma - 1) (e_0 - \\frac{\\rho\\textbf{v}^2}{2})
"""
v = self.velocity
return (self.gamma - 1) \
* (self.energy - ((self.density * dot(v, v)) / 2))
@property
def sound_speed(self):
"""Calculate the sound speed everywhere in the domain based on the pressure,
density and adiabatic index:
.. math::
c_s = \\sqrt{\\frac{\\gamma p}{\\rho}}
"""
return np.sqrt((self.gamma * self.pressure) / self.density)
@property
def magnetic_field_strength(self):
"""
"""
return np.sqrt(dot(self.magnetic_field, self.magnetic_field))
@property
def alfven_speed(self):
"""
"""
return self.magnetic_field_strength / np.sqrt(mu0 * self.density)
@u.quantity_input(max_time=u.s)
def simulate(self, max_its=np.inf, max_time=np.inf*u.s):
"""Simulates the plasma as set up, either for the given number of
iterations or until the simulation reaches the given time.
Parameters
----------
max_its : int
Tells the simulation to run for a set number of iterations.
max_time : astropy.units.Quantity
Maximum total (in-simulation) time to allow the simulation to run.
Must have units of time.
Examples
--------
>>> # Run a simulation for exactly one thousand iterations.
>>> myplasma.simulate(max_time=1000)
>>> # Run a simulation for up to half an hour of simulation time.
>>> myplasma.simulate(max_time=30*u.minute)
"""
if np.isinf(max_its) and np.isinf(max_time.value):
raise ValueError("Either max_time or max_its must be set.")
physics = self.simulation_physics
dt = physics.dt
if np.isinf(max_time):
pb = ProgressBar(max_its)
else:
pb = ProgressBar(int(max_time / dt))
with pb as bar:
while (physics.current_iteration < max_its
and physics.current_time < max_time):
physics.time_stepper()
bar.update()
| [
"numpy.sqrt",
"astropy.utils.console.ProgressBar",
"numpy.squeeze",
"numpy.zeros",
"numpy.meshgrid",
"numpy.isinf",
"astropy.units.quantity_input"
] | [((1874, 1932), 'astropy.units.quantity_input', 'u.quantity_input', ([], {'domain_x': 'u.m', 'domain_y': 'u.m', 'domain_z': 'u.m'}), '(domain_x=u.m, domain_y=u.m, domain_z=u.m)\n', (1890, 1932), True, 'import astropy.units as u\n'), ((9229, 9259), 'astropy.units.quantity_input', 'u.quantity_input', ([], {'max_time': 'u.s'}), '(max_time=u.s)\n', (9245, 9259), True, 'import astropy.units as u\n'), ((2257, 2278), 'numpy.squeeze', 'np.squeeze', (['self.grid'], {}), '(self.grid)\n', (2267, 2278), True, 'import numpy as np\n'), ((8879, 8929), 'numpy.sqrt', 'np.sqrt', (['(self.gamma * self.pressure / self.density)'], {}), '(self.gamma * self.pressure / self.density)\n', (8886, 8929), True, 'import numpy as np\n'), ((10251, 10269), 'numpy.isinf', 'np.isinf', (['max_time'], {}), '(max_time)\n', (10259, 10269), True, 'import numpy as np\n'), ((2195, 2230), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y', 'z'], {'indexing': '"""ij"""'}), "(x, y, z, indexing='ij')\n", (2206, 2230), True, 'import numpy as np\n'), ((2851, 2884), 'numpy.zeros', 'np.zeros', (['(3, *self.domain_shape)'], {}), '((3, *self.domain_shape))\n', (2859, 2884), True, 'import numpy as np\n'), ((9195, 9222), 'numpy.sqrt', 'np.sqrt', (['(mu0 * self.density)'], {}), '(mu0 * self.density)\n', (9202, 9222), True, 'import numpy as np\n'), ((10052, 10069), 'numpy.isinf', 'np.isinf', (['max_its'], {}), '(max_its)\n', (10060, 10069), True, 'import numpy as np\n'), ((10074, 10098), 'numpy.isinf', 'np.isinf', (['max_time.value'], {}), '(max_time.value)\n', (10082, 10098), True, 'import numpy as np\n'), ((10288, 10308), 'astropy.utils.console.ProgressBar', 'ProgressBar', (['max_its'], {}), '(max_its)\n', (10299, 10308), False, 'from astropy.utils.console import ProgressBar\n'), ((2613, 2640), 'numpy.zeros', 'np.zeros', (['self.domain_shape'], {}), '(self.domain_shape)\n', (2621, 2640), True, 'import numpy as np\n'), ((2682, 2715), 'numpy.zeros', 'np.zeros', (['(3, *self.domain_shape)'], {}), '((3, *self.domain_shape))\n', (2690, 2715), True, 'import numpy as np\n'), ((2777, 2804), 'numpy.zeros', 'np.zeros', (['self.domain_shape'], {}), '(self.domain_shape)\n', (2785, 2804), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import torch
from torch import tensor
from sklearn.metrics import roc_curve, auc, accuracy_score, confusion_matrix, classification_report
from .models import RatioModel
import matplotlib.pyplot as plt
logger = logging.getLogger(__name__)
def evaluate_ratio_model(
model,
xs=None,
run_on_gpu=True,
double_precision=False,
return_grad_x=False,
):
# CPU or GPU?
run_on_gpu = run_on_gpu and torch.cuda.is_available()
device = torch.device("cuda" if run_on_gpu else "cpu")
dtype = torch.double if double_precision else torch.float
# Prepare data
n_xs = len(xs)
xs = torch.stack([tensor(i) for i in xs])
model = model.to(device, dtype)
xs = xs.to(device, dtype)
with torch.no_grad():
model.eval()
r_hat, s_hat = model(xs)
# Do we need this as ml/models.py::forward() defined implicitely that the output of the network is:
# s_hat = torch.sigmoid(s_hat) where s_hat at this point is the network last layer
# r_hat = (1-s_hat) / s_hat = p_{1}(x) / p_{0}(x)
s_hat = torch.sigmoid(s_hat)
# Copy back tensors to CPU
if run_on_gpu:
r_hat = r_hat.cpu()
s_hat = s_hat.cpu()
# Get data and return
r_hat = r_hat.detach().numpy().flatten()
s_hat = s_hat.detach().numpy().flatten()
return r_hat, s_hat
def evaluate_performance_model(
model,
xs,
ys,
run_on_gpu=True,
double_precision=False,
return_grad_x=False,
):
# CPU or GPU?
run_on_gpu = run_on_gpu and torch.cuda.is_available()
device = torch.device("cuda" if run_on_gpu else "cpu")
dtype = torch.double if double_precision else torch.float
# Prepare data
n_xs = len(xs)
xs = torch.stack([tensor(i) for i in xs])
model = model.to(device, dtype)
xs = xs.to(device, dtype)
with torch.no_grad():
model.eval()
_, logit = model(xs)
probs = torch.sigmoid(logit)
y_pred = torch.round(probs)
print("confusion matrix ",confusion_matrix(ys, y_pred))
print(classification_report(ys, y_pred))
fpr, tpr, auc_thresholds = roc_curve(ys, y_pred)
def plot_roc_curve(fpr, tpr, label=None):
plt.figure(figsize=(8,8))
plt.title('ROC Curve')
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([-0.005, 1, 0, 1.005])
plt.xticks(np.arange(0,1, 0.05), rotation=90)
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate (Recall)")
plt.legend(loc='best')
| [
"logging.getLogger",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"torch.sigmoid",
"matplotlib.pyplot.axis",
"torch.no_grad",
"matplotlib.pyplot.figure",
"torch.cud... | [((330, 357), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (347, 357), False, 'import logging\n'), ((575, 620), 'torch.device', 'torch.device', (["('cuda' if run_on_gpu else 'cpu')"], {}), "('cuda' if run_on_gpu else 'cpu')\n", (587, 620), False, 'import torch\n'), ((1724, 1769), 'torch.device', 'torch.device', (["('cuda' if run_on_gpu else 'cpu')"], {}), "('cuda' if run_on_gpu else 'cpu')\n", (1736, 1769), False, 'import torch\n'), ((2353, 2379), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (2363, 2379), True, 'import matplotlib.pyplot as plt\n'), ((2383, 2405), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC Curve"""'], {}), "('ROC Curve')\n", (2392, 2405), True, 'import matplotlib.pyplot as plt\n'), ((2410, 2454), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'linewidth': '(2)', 'label': 'label'}), '(fpr, tpr, linewidth=2, label=label)\n', (2418, 2454), True, 'import matplotlib.pyplot as plt\n'), ((2459, 2490), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {}), "([0, 1], [0, 1], 'k--')\n", (2467, 2490), True, 'import matplotlib.pyplot as plt\n'), ((2495, 2526), 'matplotlib.pyplot.axis', 'plt.axis', (['[-0.005, 1, 0, 1.005]'], {}), '([-0.005, 1, 0, 1.005])\n', (2503, 2526), True, 'import matplotlib.pyplot as plt\n'), ((2581, 2614), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (2591, 2614), True, 'import matplotlib.pyplot as plt\n'), ((2619, 2660), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate (Recall)"""'], {}), "('True Positive Rate (Recall)')\n", (2629, 2660), True, 'import matplotlib.pyplot as plt\n'), ((2665, 2687), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2675, 2687), True, 'import matplotlib.pyplot as plt\n'), ((536, 561), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (559, 561), False, 'import torch\n'), ((844, 859), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (857, 859), False, 'import torch\n'), ((1201, 1221), 'torch.sigmoid', 'torch.sigmoid', (['s_hat'], {}), '(s_hat)\n', (1214, 1221), False, 'import torch\n'), ((1685, 1710), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1708, 1710), False, 'import torch\n'), ((1994, 2009), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2007, 2009), False, 'import torch\n'), ((2079, 2099), 'torch.sigmoid', 'torch.sigmoid', (['logit'], {}), '(logit)\n', (2092, 2099), False, 'import torch\n'), ((2117, 2135), 'torch.round', 'torch.round', (['probs'], {}), '(probs)\n', (2128, 2135), False, 'import torch\n'), ((2284, 2305), 'sklearn.metrics.roc_curve', 'roc_curve', (['ys', 'y_pred'], {}), '(ys, y_pred)\n', (2293, 2305), False, 'from sklearn.metrics import roc_curve, auc, accuracy_score, confusion_matrix, classification_report\n'), ((2542, 2563), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.05)'], {}), '(0, 1, 0.05)\n', (2551, 2563), True, 'import numpy as np\n'), ((744, 753), 'torch.tensor', 'tensor', (['i'], {}), '(i)\n', (750, 753), False, 'from torch import tensor\n'), ((1893, 1902), 'torch.tensor', 'tensor', (['i'], {}), '(i)\n', (1899, 1902), False, 'from torch import tensor\n'), ((2170, 2198), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['ys', 'y_pred'], {}), '(ys, y_pred)\n', (2186, 2198), False, 'from sklearn.metrics import roc_curve, auc, accuracy_score, confusion_matrix, classification_report\n'), ((2214, 2247), 'sklearn.metrics.classification_report', 'classification_report', (['ys', 'y_pred'], {}), '(ys, y_pred)\n', (2235, 2247), False, 'from sklearn.metrics import roc_curve, auc, accuracy_score, confusion_matrix, classification_report\n')] |
#!/usr/bin/env python
import numpy
def abserror(a, b):
return numpy.abs(a - b)
def relerror(a, b):
return abserror(a, b) / max(numpy.abs(a), numpy.abs(b))
def eq(a, b, e):
if type(a) == numpy.ndarray:
return all(abserror(a, b) < e)
return abserror(a, b) < e
if __name__ == '__main__':
pass
| [
"numpy.abs"
] | [((69, 85), 'numpy.abs', 'numpy.abs', (['(a - b)'], {}), '(a - b)\n', (78, 85), False, 'import numpy\n'), ((140, 152), 'numpy.abs', 'numpy.abs', (['a'], {}), '(a)\n', (149, 152), False, 'import numpy\n'), ((154, 166), 'numpy.abs', 'numpy.abs', (['b'], {}), '(b)\n', (163, 166), False, 'import numpy\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
import windtools.util as util
class Weibull(object):
def __init__(self, data, ws_field='ws', wd_field='wd', wd_bin_size=30, ws_bin_size=1, prepare_data=True):
self.data = pd.DataFrame(data)
self.data.rename(columns={ws_field: 'ws', wd_field: 'wd'})
self.param = None
if prepare_data:
self.prepare_data(wd_bin_size, ws_bin_size)
@classmethod
def load_raw_data(cls, fpath, ws_field='ws', wd_field='wd', wd_bin_size=30, **loading_options):
field_map = {ws_field: 'ws', wd_field: 'wd'}
df = util.load_data(fpath=fpath, field_map=field_map, loading_options=loading_options, dropna='any')
return cls(data=df, wd_bin_size=wd_bin_size)
def prepare_data(self, wd_bin_size, ws_bin_size):
max_ws = self.data['ws'].max()
self.data.ix[self.data['wd'] == 360] = 0
self.data['wd_bin'] = pd.cut(self.data['wd'], bins=np.arange(0, 360.1, wd_bin_size))
self.data['ws_bin'] = pd.cut(self.data['ws'], bins=np.arange(0, max_ws+0.1, ws_bin_size))
self.data.dropna(inplace=True)
def fit_distribution(self):
result_dict = {}
for bin_name, sub_df in self.data.groupby('wd_bin'):
k, mu, lam = stats.weibull_min.fit(sub_df['ws'], floc=0)
result_dict[bin_name] = {'k': k, 'mu': mu, 'lam': lam}
self.param = pd.DataFrame(result_dict).T
return self.param
def create_plots(self, savefig=False):
fig = plt.figure(figsize=(15, 12), dpi=80)
sp_n = len(self.param.shape[0])
sp_rows = int(np.sqrt(sp_n))
sp_cols = np.ceil(sp_n / sp_rows)
lab_fsize = int(-4 / 5 * sp_n + 20)
for i, (bin_name, sub_df) in enumerate(self.data.groupby('wd_bin')):
ax = fig.add_subplot(sp_rows, sp_cols, i + 1)
k, = self.param.loc[bin_name, 'k']
mu = self.param.loc[bin_name, 'mu']
lam = self.param.loc[bin_name, 'lam']
weib_x = np.linspace(0, max(sub_df['ws']), 1000)
weib_y = stats.weibull_min(k, mu, lam).pdf(weib_x)
# pt = pd.pivot_table(self.data, values=['ws'], index=['ws_bin'], aggfunc='count').fillna(0)
# bar_x = [float(x[1:].split(',')[0]) for x in pt.index]
# bar_y = [x / sum(pt['ws']) for x in pt['ws']]
# plt.bar(bar_x, bar_y, width=1, label="data")
plt.plot(weib_x, weib_y, 'r--', linewidth=2, label="weib fit")
plt.xlabel('wind speed [m/s]', fontsize=lab_fsize)
plt.ylabel('frequency', fontsize=lab_fsize)
plt.title('WD={} A={} k={} u={}'.format(bin_name, round(lam, 2), round(k, 2),
round(np.mean(sub_df['ws']), 2)), fontsize=lab_fsize)
plt.legend(fontsize=lab_fsize)
fig.suptitle('Weibull fit', fontsize=21)
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.91, wspace=0.5, hspace=0.5)
if savefig:
plt.savefig('weib_fit.png', transparent=True)
plt.show()
# todo: remove below after done refactoring
def weibull_fit(dfb, plot=False, savefig=False):
# get wd binning index sorted
wd_bin_names = [x for x in dfb['wd_bin'].unique() if pd.notnull(x)]
wd_bin_names = sorted(wd_bin_names, key=lambda x: float(x[1:].split(",")[0]))
sp_n = len(wd_bin_names)
sp_rows = int(np.sqrt(sp_n))
sp_cols = np.ceil(sp_n / sp_rows)
lab_fsize = int(-4 / 5 * sp_n + 20)
weibParam = []
if plot: fig = plt.figure(figsize=(15, 12), dpi=80)
for i, i_bin in enumerate(wd_bin_names):
data = dfb[dfb['wd_bin'] == i_bin][['ws', 'ws_bin']]
k, mu, lam = stats.weibull_min.fit(data['ws'], floc=0) # weib fitting
weibParam.append([i_bin, k, lam])
if plot:
ax = fig.add_subplot(sp_rows, sp_cols, i + 1)
pt = pd.pivot_table(data, values=['ws'], index=['ws_bin'], aggfunc='count').fillna(0)
bar_x = [float(x[1:].split(',')[0]) for x in pt.index]
bar_y = [x / sum(pt['ws']) for x in pt['ws']]
weib_x = np.linspace(0, max(data['ws']), 1000)
weib_y = stats.weibull_min(k, mu, lam).pdf(weib_x)
plt.bar(bar_x, bar_y, width=1, label="data")
plt.plot(weib_x, weib_y, 'r--', linewidth=2, label="weib fit")
plt.xlabel('wind speed [m/s]', fontsize=lab_fsize)
plt.ylabel('frequency', fontsize=lab_fsize)
plt.title('WD={} A={} k={} u={}'.format(i_bin, round(lam, 2), round(k, 2),
round(np.mean(data['ws']), 2)), fontsize=lab_fsize)
plt.legend(fontsize=lab_fsize)
if plot:
fig.suptitle('Weibull fit', fontsize=21)
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.91, wspace=0.5, hspace=0.5)
if savefig: plt.savefig('weib_fit.png', transparent=True)
plt.show()
return weibParam
if __name__ == "__main__":
import os
fpath = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'tests', 'samples', 'sample_data.csv')
w = Weibull.load_raw_data(fpath)
w.fit_distribution()
w.create_plots()
print(w.data.head())
#weibull_fit(df, plot=True, savefig=False)
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"pandas.notnull",
"numpy.arange",
"numpy.mean",
"pandas.pivot_table",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"pandas.DataFrame",
"scipy.stats.weibull_min.fit",
"numpy.ceil",
"matplotlib.pyplot.savefig",
"os.path.dirname",
"matplotlib... | [((3520, 3543), 'numpy.ceil', 'np.ceil', (['(sp_n / sp_rows)'], {}), '(sp_n / sp_rows)\n', (3527, 3543), True, 'import numpy as np\n'), ((285, 303), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (297, 303), True, 'import pandas as pd\n'), ((663, 763), 'windtools.util.load_data', 'util.load_data', ([], {'fpath': 'fpath', 'field_map': 'field_map', 'loading_options': 'loading_options', 'dropna': '"""any"""'}), "(fpath=fpath, field_map=field_map, loading_options=\n loading_options, dropna='any')\n", (677, 763), True, 'import windtools.util as util\n'), ((1573, 1609), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 12)', 'dpi': '(80)'}), '(figsize=(15, 12), dpi=80)\n', (1583, 1609), True, 'import matplotlib.pyplot as plt\n'), ((1706, 1729), 'numpy.ceil', 'np.ceil', (['(sp_n / sp_rows)'], {}), '(sp_n / sp_rows)\n', (1713, 1729), True, 'import numpy as np\n'), ((3151, 3161), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3159, 3161), True, 'import matplotlib.pyplot as plt\n'), ((3491, 3504), 'numpy.sqrt', 'np.sqrt', (['sp_n'], {}), '(sp_n)\n', (3498, 3504), True, 'import numpy as np\n'), ((3623, 3659), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 12)', 'dpi': '(80)'}), '(figsize=(15, 12), dpi=80)\n', (3633, 3659), True, 'import matplotlib.pyplot as plt\n'), ((3788, 3829), 'scipy.stats.weibull_min.fit', 'stats.weibull_min.fit', (["data['ws']"], {'floc': '(0)'}), "(data['ws'], floc=0)\n", (3809, 3829), True, 'import scipy.stats as stats\n'), ((5030, 5040), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5038, 5040), True, 'import matplotlib.pyplot as plt\n'), ((1329, 1372), 'scipy.stats.weibull_min.fit', 'stats.weibull_min.fit', (["sub_df['ws']"], {'floc': '(0)'}), "(sub_df['ws'], floc=0)\n", (1350, 1372), True, 'import scipy.stats as stats\n'), ((1461, 1486), 'pandas.DataFrame', 'pd.DataFrame', (['result_dict'], {}), '(result_dict)\n', (1473, 1486), True, 'import pandas as pd\n'), ((1673, 1686), 'numpy.sqrt', 'np.sqrt', (['sp_n'], {}), '(sp_n)\n', (1680, 1686), True, 'import numpy as np\n'), ((2488, 2550), 'matplotlib.pyplot.plot', 'plt.plot', (['weib_x', 'weib_y', '"""r--"""'], {'linewidth': '(2)', 'label': '"""weib fit"""'}), "(weib_x, weib_y, 'r--', linewidth=2, label='weib fit')\n", (2496, 2550), True, 'import matplotlib.pyplot as plt\n'), ((2563, 2613), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""wind speed [m/s]"""'], {'fontsize': 'lab_fsize'}), "('wind speed [m/s]', fontsize=lab_fsize)\n", (2573, 2613), True, 'import matplotlib.pyplot as plt\n'), ((2626, 2669), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {'fontsize': 'lab_fsize'}), "('frequency', fontsize=lab_fsize)\n", (2636, 2669), True, 'import matplotlib.pyplot as plt\n'), ((2878, 2908), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'lab_fsize'}), '(fontsize=lab_fsize)\n', (2888, 2908), True, 'import matplotlib.pyplot as plt\n'), ((3097, 3142), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""weib_fit.png"""'], {'transparent': '(True)'}), "('weib_fit.png', transparent=True)\n", (3108, 3142), True, 'import matplotlib.pyplot as plt\n'), ((3347, 3360), 'pandas.notnull', 'pd.notnull', (['x'], {}), '(x)\n', (3357, 3360), True, 'import pandas as pd\n'), ((4322, 4366), 'matplotlib.pyplot.bar', 'plt.bar', (['bar_x', 'bar_y'], {'width': '(1)', 'label': '"""data"""'}), "(bar_x, bar_y, width=1, label='data')\n", (4329, 4366), True, 'import matplotlib.pyplot as plt\n'), ((4379, 4441), 'matplotlib.pyplot.plot', 'plt.plot', (['weib_x', 'weib_y', '"""r--"""'], {'linewidth': '(2)', 'label': '"""weib fit"""'}), "(weib_x, weib_y, 'r--', linewidth=2, label='weib fit')\n", (4387, 4441), True, 'import matplotlib.pyplot as plt\n'), ((4454, 4504), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""wind speed [m/s]"""'], {'fontsize': 'lab_fsize'}), "('wind speed [m/s]', fontsize=lab_fsize)\n", (4464, 4504), True, 'import matplotlib.pyplot as plt\n'), ((4517, 4560), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {'fontsize': 'lab_fsize'}), "('frequency', fontsize=lab_fsize)\n", (4527, 4560), True, 'import matplotlib.pyplot as plt\n'), ((4764, 4794), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'lab_fsize'}), '(fontsize=lab_fsize)\n', (4774, 4794), True, 'import matplotlib.pyplot as plt\n'), ((4976, 5021), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""weib_fit.png"""'], {'transparent': '(True)'}), "('weib_fit.png', transparent=True)\n", (4987, 5021), True, 'import matplotlib.pyplot as plt\n'), ((5147, 5172), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5162, 5172), False, 'import os\n'), ((1014, 1046), 'numpy.arange', 'np.arange', (['(0)', '(360.1)', 'wd_bin_size'], {}), '(0, 360.1, wd_bin_size)\n', (1023, 1046), True, 'import numpy as np\n'), ((1107, 1146), 'numpy.arange', 'np.arange', (['(0)', '(max_ws + 0.1)', 'ws_bin_size'], {}), '(0, max_ws + 0.1, ws_bin_size)\n', (1116, 1146), True, 'import numpy as np\n'), ((2139, 2168), 'scipy.stats.weibull_min', 'stats.weibull_min', (['k', 'mu', 'lam'], {}), '(k, mu, lam)\n', (2156, 2168), True, 'import scipy.stats as stats\n'), ((3981, 4051), 'pandas.pivot_table', 'pd.pivot_table', (['data'], {'values': "['ws']", 'index': "['ws_bin']", 'aggfunc': '"""count"""'}), "(data, values=['ws'], index=['ws_bin'], aggfunc='count')\n", (3995, 4051), True, 'import pandas as pd\n'), ((4267, 4296), 'scipy.stats.weibull_min', 'stats.weibull_min', (['k', 'mu', 'lam'], {}), '(k, mu, lam)\n', (4284, 4296), True, 'import scipy.stats as stats\n'), ((2818, 2839), 'numpy.mean', 'np.mean', (["sub_df['ws']"], {}), "(sub_df['ws'])\n", (2825, 2839), True, 'import numpy as np\n'), ((4706, 4725), 'numpy.mean', 'np.mean', (["data['ws']"], {}), "(data['ws'])\n", (4713, 4725), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
from smoothing_actions import *
N_simul = 150
def complete_ss(beta, b0, x0, A, C, S_y, T=12):
"""
Computes the path of consumption and debt for the previously described
complete markets model where exogenous income follows a linear
state space
"""
# Create a linear state space for simulation purposes
# This adds "b" as a state to the linear state space system
# so that setting the seed places shocks in same place for
# both the complete and incomplete markets economy
# Atilde = np.vstack([np.hstack([A, np.zeros((A.shape[0], 1))]),
# np.zeros((1, A.shape[1] + 1))])
# Ctilde = np.vstack([C, np.zeros((1, 1))])
# S_ytilde = np.hstack([S_y, np.zeros((1, 1))])
lss = qe.LinearStateSpace(A, C, S_y, mu_0=x0)
# Add extra state to initial condition
# x0 = np.hstack([x0, np.zeros(1)])
# Compute the (I - beta*A)^{-1}
rm = la.inv(np.eye(A.shape[0]) - beta*A)
# Constant level of consumption
cbar = (1-beta) * (S_y @ rm @ x0 - b0)
c_hist = np.ones(T)*cbar
# Debt
x_hist, y_hist = lss.simulate(T)
b_hist = np.squeeze(S_y @ rm @ x_hist - cbar/(1-beta))
return c_hist, b_hist, np.squeeze(y_hist), x_hist
if __name__ == '__main__':
# Define parameters
alpha, rho1, rho2 = 10.0, 0.9, 0.0
sigma = 1.0
# N_simul = 1
# T = N_simul
A = np.array([[1., 0., 0.],
[alpha, rho1, rho2],
[0., 1., 0.]])
C = np.array([[0.], [sigma], [0.]])
S_y = np.array([[1, 1.0, 0.]])
beta, b0 = 0.95, -10.0
x0 = np.array([1.0, alpha/(1-rho1), alpha/(1-rho1)])
# Do simulation for complete markets
s = np.random.randint(0, 10000)
np.random.seed(s) # Seeds get set the same for both economies
out = complete_ss(beta, b0, x0, A, C, S_y, 150)
c_hist_com, b_hist_com, y_hist_com, x_hist_com = out
fig, ax = plt.subplots(1, 2, figsize = (15, 5))
# Consumption plots
ax[0].set_title('Cons and income', fontsize = 17)
ax[0].plot(np.arange(N_simul), c_hist_com, label = 'consumption', lw = 3)
ax[0].plot(np.arange(N_simul), y_hist_com, label = 'income',
lw = 2, color = sb.color_palette()[3], alpha = .6, linestyle = '--')
ax[0].legend(loc = 'best', fontsize = 15)
ax[0].set_xlabel('Periods', fontsize = 13)
ax[0].set_ylim([-5.0, 110])
# Debt plots
ax[1].set_title('Debt and income', fontsize = 17)
ax[1].plot(np.arange(N_simul), b_hist_com, label = 'debt', lw = 2)
ax[1].plot(np.arange(N_simul), y_hist_com, label = 'Income',
lw = 2, color = sb.color_palette()[3], alpha = .6, linestyle = '--')
ax[1].legend(loc = 'best', fontsize = 15)
ax[1].axhline(0, color = 'k', lw = 1)
ax[1].set_xlabel('Periods', fontsize = 13)
plt.show()
| [
"numpy.eye",
"numpy.ones",
"seaborn.color_palette",
"numpy.squeeze",
"numpy.array",
"numpy.random.randint",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((1195, 1244), 'numpy.squeeze', 'np.squeeze', (['(S_y @ rm @ x_hist - cbar / (1 - beta))'], {}), '(S_y @ rm @ x_hist - cbar / (1 - beta))\n', (1205, 1244), True, 'import numpy as np\n'), ((1454, 1519), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [alpha, rho1, rho2], [0.0, 1.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0], [alpha, rho1, rho2], [0.0, 1.0, 0.0]])\n', (1462, 1519), True, 'import numpy as np\n'), ((1558, 1591), 'numpy.array', 'np.array', (['[[0.0], [sigma], [0.0]]'], {}), '([[0.0], [sigma], [0.0]])\n', (1566, 1591), True, 'import numpy as np\n'), ((1600, 1625), 'numpy.array', 'np.array', (['[[1, 1.0, 0.0]]'], {}), '([[1, 1.0, 0.0]])\n', (1608, 1625), True, 'import numpy as np\n'), ((1662, 1717), 'numpy.array', 'np.array', (['[1.0, alpha / (1 - rho1), alpha / (1 - rho1)]'], {}), '([1.0, alpha / (1 - rho1), alpha / (1 - rho1)])\n', (1670, 1717), True, 'import numpy as np\n'), ((1760, 1787), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (1777, 1787), True, 'import numpy as np\n'), ((1792, 1809), 'numpy.random.seed', 'np.random.seed', (['s'], {}), '(s)\n', (1806, 1809), True, 'import numpy as np\n'), ((1980, 2015), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(15, 5)'}), '(1, 2, figsize=(15, 5))\n', (1992, 2015), True, 'import matplotlib.pyplot as plt\n'), ((2881, 2891), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2889, 2891), True, 'import matplotlib.pyplot as plt\n'), ((1117, 1127), 'numpy.ones', 'np.ones', (['T'], {}), '(T)\n', (1124, 1127), True, 'import numpy as np\n'), ((1274, 1292), 'numpy.squeeze', 'np.squeeze', (['y_hist'], {}), '(y_hist)\n', (1284, 1292), True, 'import numpy as np\n'), ((2112, 2130), 'numpy.arange', 'np.arange', (['N_simul'], {}), '(N_simul)\n', (2121, 2130), True, 'import numpy as np\n'), ((2190, 2208), 'numpy.arange', 'np.arange', (['N_simul'], {}), '(N_simul)\n', (2199, 2208), True, 'import numpy as np\n'), ((2536, 2554), 'numpy.arange', 'np.arange', (['N_simul'], {}), '(N_simul)\n', (2545, 2554), True, 'import numpy as np\n'), ((2607, 2625), 'numpy.arange', 'np.arange', (['N_simul'], {}), '(N_simul)\n', (2616, 2625), True, 'import numpy as np\n'), ((995, 1013), 'numpy.eye', 'np.eye', (['A.shape[0]'], {}), '(A.shape[0])\n', (1001, 1013), True, 'import numpy as np\n'), ((2271, 2289), 'seaborn.color_palette', 'sb.color_palette', ([], {}), '()\n', (2287, 2289), True, 'import seaborn as sb\n'), ((2688, 2706), 'seaborn.color_palette', 'sb.color_palette', ([], {}), '()\n', (2704, 2706), True, 'import seaborn as sb\n')] |
from __future__ import print_function
import sys
from coffea import lookup_tools
import uproot
from coffea.util import awkward
from coffea.util import numpy as np
import pytest
from dummy_distributions import dummy_jagged_eta_pt, dummy_four_momenta
def jetmet_evaluator():
from coffea.lookup_tools import extractor
extract = extractor()
extract.add_weight_sets(['* * tests/samples/Summer16_23Sep2016V3_MC_L1FastJet_AK4PFPuppi.jec.txt.gz',
'* * tests/samples/Summer16_23Sep2016V3_MC_L2L3Residual_AK4PFPuppi.jec.txt.gz',
'* * tests/samples/Summer16_23Sep2016V3_MC_L2Relative_AK4PFPuppi.jec.txt.gz',
'* * tests/samples/Summer16_23Sep2016V3_MC_L3Absolute_AK4PFPuppi.jec.txt.gz',
'* * tests/samples/Summer16_23Sep2016V3_MC_UncertaintySources_AK4PFPuppi.junc.txt.gz',
'* * tests/samples/Summer16_23Sep2016V3_MC_Uncertainty_AK4PFPuppi.junc.txt.gz',
'* * tests/samples/Fall17_17Nov2017_V6_MC_UncertaintySources_AK4PFchs.junc.txt.gz',
'* * tests/samples/Regrouped_Fall17_17Nov2017_V32_MC_UncertaintySources_AK4PFchs.junc.txt',
'* * tests/samples/Spring16_25nsV10_MC_PtResolution_AK4PFPuppi.jr.txt.gz',
'* * tests/samples/Spring16_25nsV10_MC_SF_AK4PFPuppi.jersf.txt.gz',
'* * tests/samples/Autumn18_V7_MC_SF_AK4PFchs.jersf.txt.gz'])
extract.finalize()
return extract.make_evaluator()
evaluator = jetmet_evaluator()
def test_factorized_jet_corrector():
from coffea.jetmet_tools import FactorizedJetCorrector
counts, test_eta, test_pt = dummy_jagged_eta_pt()
test_Rho = np.full_like(test_eta, 100.)
test_A = np.full_like(test_eta, 5.)
jec_names = ['Summer16_23Sep2016V3_MC_L1FastJet_AK4PFPuppi',
'Summer16_23Sep2016V3_MC_L2Relative_AK4PFPuppi',
'Summer16_23Sep2016V3_MC_L2L3Residual_AK4PFPuppi',
'Summer16_23Sep2016V3_MC_L3Absolute_AK4PFPuppi']
corrector = FactorizedJetCorrector(**{name: evaluator[name] for name in jec_names})
print(corrector)
pt_copy = np.copy(test_pt)
corrs = corrector.getCorrection(JetEta=test_eta, Rho=test_Rho, JetPt=test_pt, JetA=test_A)
assert((np.abs(pt_copy - test_pt) < 1e-6).all())
# Test for bug #320
def make_starts_stops(counts, data, padding):
cumcounts = np.cumsum(counts)
first_nonempty_event = cumcounts[np.nonzero(counts > 1)[0][0]]
data_pad = np.empty(data.size + padding)
data_pad[:first_nonempty_event] = data[:first_nonempty_event]
data_pad[first_nonempty_event + padding:] = data[first_nonempty_event:]
starts = np.r_[0, cumcounts[:-1]]
starts[starts >= first_nonempty_event] += padding
return awkward.JaggedArray(starts, starts + counts, data_pad)
test_pt_jag = make_starts_stops(counts, test_pt, 5)
test_eta_jag = make_starts_stops(counts, test_eta, 3)
test_Rho_jag = awkward.JaggedArray.fromcounts(counts, test_Rho)
test_A_jag = awkward.JaggedArray.fromcounts(counts, test_A)
corrs_jag = corrector.getCorrection(JetEta=test_eta_jag, Rho=test_Rho_jag, JetPt=test_pt_jag, JetA=test_A_jag)
assert((np.abs(pt_copy - test_pt_jag.flatten()) < 1e-6).all())
assert((np.abs(corrs - corrs_jag.flatten()) < 1e-6).all())
def test_jet_resolution():
from coffea.jetmet_tools import JetResolution
counts, test_eta, test_pt = dummy_jagged_eta_pt()
test_Rho = np.full_like(test_eta, 100.)
jer_names = ['Spring16_25nsV10_MC_PtResolution_AK4PFPuppi']
reso = JetResolution(**{name: evaluator[name] for name in jer_names})
print(reso)
resos = reso.getResolution(JetEta=test_eta, Rho=test_Rho, JetPt=test_pt)
def test_jet_correction_uncertainty():
from coffea.jetmet_tools import JetCorrectionUncertainty
counts, test_eta, test_pt = dummy_jagged_eta_pt()
junc_names = ['Summer16_23Sep2016V3_MC_Uncertainty_AK4PFPuppi']
junc = JetCorrectionUncertainty(**{name: evaluator[name] for name in junc_names})
print(junc)
juncs = junc.getUncertainty(JetEta=test_eta, JetPt=test_pt)
for level, corrs in juncs:
assert(corrs.shape[0] == test_eta.shape[0])
def test_jet_correction_uncertainty_sources():
from coffea.jetmet_tools import JetCorrectionUncertainty
counts, test_eta, test_pt = dummy_jagged_eta_pt()
junc_names = []
levels = []
for name in dir(evaluator):
if 'Summer16_23Sep2016V3_MC_UncertaintySources_AK4PFPuppi' in name:
junc_names.append(name)
levels.append(name.split('_')[-1])
junc = JetCorrectionUncertainty(**{name: evaluator[name] for name in junc_names})
print(junc)
juncs = junc.getUncertainty(JetEta=test_eta, JetPt=test_pt)
for level, corrs in juncs:
assert(level in levels)
assert(corrs.shape[0] == test_eta.shape[0])
def test_jet_correction_regrouped_uncertainty_sources():
from coffea.jetmet_tools import JetCorrectionUncertainty
counts, test_eta, test_pt = dummy_jagged_eta_pt()
junc_names = []
levels = []
for name in dir(evaluator):
if 'Regrouped_Fall17_17Nov2017_V32_MC_UncertaintySources_AK4PFchs' in name:
junc_names.append(name)
if len(name.split('_')) == 9:
levels.append("_".join(name.split('_')[-2:]))
else:
levels.append(name.split('_')[-1])
junc = JetCorrectionUncertainty(**{name: evaluator[name] for name in junc_names})
print(junc)
for tpl in list(junc.getUncertainty(JetEta=test_eta, JetPt=test_pt)):
assert(tpl[0] in levels)
assert(tpl[1].shape[0] == test_eta.shape[0])
def test_jet_resolution_sf():
from coffea.jetmet_tools import JetResolutionScaleFactor
counts, test_eta, test_pt = dummy_jagged_eta_pt()
jersf_names = ['Spring16_25nsV10_MC_SF_AK4PFPuppi']
resosf = JetResolutionScaleFactor(**{name: evaluator[name] for name in jersf_names})
print(resosf)
resosfs = resosf.getScaleFactor(JetEta=test_eta)
def test_jet_resolution_sf_2d():
from coffea.jetmet_tools import JetResolutionScaleFactor
counts, test_eta, test_pt = dummy_jagged_eta_pt()
resosf = JetResolutionScaleFactor(**{name: evaluator[name] for name in ["Autumn18_V7_MC_SF_AK4PFchs"]})
resosfs = resosf.getScaleFactor(JetPt=test_pt, JetEta=test_eta)
def test_jet_transformer():
import numpy as np
import awkward as ak
import math
from coffea.analysis_objects import JaggedCandidateArray as CandArray
from coffea.jetmet_tools import (FactorizedJetCorrector,
JetResolution,
JetResolutionScaleFactor,
JetCorrectionUncertainty,
JetTransformer)
counts, test_px, test_py, test_pz, test_e = dummy_four_momenta()
test_Rho = np.full(shape=(np.sum(counts),), fill_value=100.)
test_A = np.full(shape=(np.sum(counts),), fill_value=5.)
jets = CandArray.candidatesfromcounts(counts, px=test_px, py=test_py, pz=test_pz, energy=test_e)
jets.add_attributes(ptRaw=jets.pt,
massRaw=jets.mass,
rho=test_Rho,
area=test_A)
fakemet = np.random.exponential(scale=1.0,size=counts.size)
metphi = np.random.uniform(low=-math.pi, high=math.pi, size=counts.size)
syst_up = 0.001*fakemet
syst_down = -0.001*fakemet
met = CandArray.candidatesfromcounts(np.ones_like(counts),
pt=fakemet,
eta=np.zeros_like(counts),
phi=metphi,
mass=np.zeros_like(counts),
MetUnclustEnUpDeltaX=syst_up*np.cos(metphi),
MetUnclustEnUpDeltaY=syst_down*np.sin(metphi))
jec_names = ['Summer16_23Sep2016V3_MC_L1FastJet_AK4PFPuppi',
'Summer16_23Sep2016V3_MC_L2Relative_AK4PFPuppi',
'Summer16_23Sep2016V3_MC_L2L3Residual_AK4PFPuppi',
'Summer16_23Sep2016V3_MC_L3Absolute_AK4PFPuppi']
corrector = FactorizedJetCorrector(**{name: evaluator[name] for name in jec_names})
junc_names = []
for name in dir(evaluator):
if 'Summer16_23Sep2016V3_MC_UncertaintySources_AK4PFPuppi' in name:
junc_names.append(name)
junc = JetCorrectionUncertainty(**{name: evaluator[name] for name in junc_names})
jer_names = ['Spring16_25nsV10_MC_PtResolution_AK4PFPuppi']
reso = JetResolution(**{name: evaluator[name] for name in jer_names})
jersf_names = ['Spring16_25nsV10_MC_SF_AK4PFPuppi']
resosf = JetResolutionScaleFactor(**{name: evaluator[name] for name in jersf_names})
xform = JetTransformer(jec=corrector, junc=junc, jer=reso, jersf=resosf)
print(xform.uncertainties)
xform.transform(jets, met=met)
print('jets',jets.columns)
print('met',met.columns)
assert('pt_jer_up' in jets.columns)
assert('pt_jer_down' in jets.columns)
assert('mass_jer_up' in jets.columns)
assert('mass_jer_down' in jets.columns)
assert('pt_UnclustEn_up' in met.columns)
assert('pt_UnclustEn_down' in met.columns)
assert('phi_UnclustEn_up' in met.columns)
assert('phi_UnclustEn_down' in met.columns)
for unc in xform.uncertainties:
assert('pt_'+unc+'_up' in jets.columns)
assert('pt_'+unc+'_down' in jets.columns)
assert('mass_'+unc+'_up' in jets.columns)
assert('mass_'+unc+'_down' in jets.columns)
assert('pt_'+unc+'_up' in met.columns)
assert('phi_'+unc+'_up' in met.columns)
def test_jet_correction_uncertainty_sources():
from coffea.jetmet_tools import JetCorrectionUncertainty
counts, test_eta, test_pt = dummy_jagged_eta_pt()
junc_names = []
levels = []
for name in dir(evaluator):
if 'Summer16_23Sep2016V3_MC_UncertaintySources_AK4PFPuppi' in name:
junc_names.append(name)
levels.append(name.split('_')[-1])
#test for underscore in dataera
if 'Fall17_17Nov2017_V6_MC_UncertaintySources_AK4PFchs_AbsoluteFlavMap' in name:
junc_names.append(name)
levels.append(name.split('_')[-1])
junc = JetCorrectionUncertainty(**{name: evaluator[name] for name in junc_names})
print(junc)
juncs = junc.getUncertainty(JetEta=test_eta, JetPt=test_pt)
for level, corrs in juncs:
assert(level in levels)
assert(corrs.shape[0] == test_eta.shape[0])
| [
"coffea.jetmet_tools.JetResolution",
"numpy.random.exponential",
"numpy.sin",
"numpy.full_like",
"dummy_distributions.dummy_four_momenta",
"numpy.empty",
"coffea.util.awkward.JaggedArray",
"numpy.abs",
"coffea.jetmet_tools.FactorizedJetCorrector",
"dummy_distributions.dummy_jagged_eta_pt",
"coff... | [((338, 349), 'coffea.lookup_tools.extractor', 'extractor', ([], {}), '()\n', (347, 349), False, 'from coffea.lookup_tools import extractor\n'), ((1755, 1776), 'dummy_distributions.dummy_jagged_eta_pt', 'dummy_jagged_eta_pt', ([], {}), '()\n', (1774, 1776), False, 'from dummy_distributions import dummy_jagged_eta_pt, dummy_four_momenta\n'), ((1793, 1822), 'numpy.full_like', 'np.full_like', (['test_eta', '(100.0)'], {}), '(test_eta, 100.0)\n', (1805, 1822), True, 'import numpy as np\n'), ((1835, 1862), 'numpy.full_like', 'np.full_like', (['test_eta', '(5.0)'], {}), '(test_eta, 5.0)\n', (1847, 1862), True, 'import numpy as np\n'), ((2144, 2215), 'coffea.jetmet_tools.FactorizedJetCorrector', 'FactorizedJetCorrector', ([], {}), '(**{name: evaluator[name] for name in jec_names})\n', (2166, 2215), False, 'from coffea.jetmet_tools import FactorizedJetCorrector, JetResolution, JetResolutionScaleFactor, JetCorrectionUncertainty, JetTransformer\n'), ((2253, 2269), 'numpy.copy', 'np.copy', (['test_pt'], {}), '(test_pt)\n', (2260, 2269), True, 'import numpy as np\n'), ((3108, 3156), 'coffea.util.awkward.JaggedArray.fromcounts', 'awkward.JaggedArray.fromcounts', (['counts', 'test_Rho'], {}), '(counts, test_Rho)\n', (3138, 3156), False, 'from coffea.util import awkward\n'), ((3174, 3220), 'coffea.util.awkward.JaggedArray.fromcounts', 'awkward.JaggedArray.fromcounts', (['counts', 'test_A'], {}), '(counts, test_A)\n', (3204, 3220), False, 'from coffea.util import awkward\n'), ((3581, 3602), 'dummy_distributions.dummy_jagged_eta_pt', 'dummy_jagged_eta_pt', ([], {}), '()\n', (3600, 3602), False, 'from dummy_distributions import dummy_jagged_eta_pt, dummy_four_momenta\n'), ((3619, 3648), 'numpy.full_like', 'np.full_like', (['test_eta', '(100.0)'], {}), '(test_eta, 100.0)\n', (3631, 3648), True, 'import numpy as np\n'), ((3724, 3786), 'coffea.jetmet_tools.JetResolution', 'JetResolution', ([], {}), '(**{name: evaluator[name] for name in jer_names})\n', (3737, 3786), False, 'from coffea.jetmet_tools import FactorizedJetCorrector, JetResolution, JetResolutionScaleFactor, JetCorrectionUncertainty, JetTransformer\n'), ((4017, 4038), 'dummy_distributions.dummy_jagged_eta_pt', 'dummy_jagged_eta_pt', ([], {}), '()\n', (4036, 4038), False, 'from dummy_distributions import dummy_jagged_eta_pt, dummy_four_momenta\n'), ((4119, 4193), 'coffea.jetmet_tools.JetCorrectionUncertainty', 'JetCorrectionUncertainty', ([], {}), '(**{name: evaluator[name] for name in junc_names})\n', (4143, 4193), False, 'from coffea.jetmet_tools import JetCorrectionUncertainty\n'), ((4503, 4524), 'dummy_distributions.dummy_jagged_eta_pt', 'dummy_jagged_eta_pt', ([], {}), '()\n', (4522, 4524), False, 'from dummy_distributions import dummy_jagged_eta_pt, dummy_four_momenta\n'), ((4764, 4838), 'coffea.jetmet_tools.JetCorrectionUncertainty', 'JetCorrectionUncertainty', ([], {}), '(**{name: evaluator[name] for name in junc_names})\n', (4788, 4838), False, 'from coffea.jetmet_tools import JetCorrectionUncertainty\n'), ((5190, 5211), 'dummy_distributions.dummy_jagged_eta_pt', 'dummy_jagged_eta_pt', ([], {}), '()\n', (5209, 5211), False, 'from dummy_distributions import dummy_jagged_eta_pt, dummy_four_momenta\n'), ((5585, 5659), 'coffea.jetmet_tools.JetCorrectionUncertainty', 'JetCorrectionUncertainty', ([], {}), '(**{name: evaluator[name] for name in junc_names})\n', (5609, 5659), False, 'from coffea.jetmet_tools import JetCorrectionUncertainty\n'), ((5963, 5984), 'dummy_distributions.dummy_jagged_eta_pt', 'dummy_jagged_eta_pt', ([], {}), '()\n', (5982, 5984), False, 'from dummy_distributions import dummy_jagged_eta_pt, dummy_four_momenta\n'), ((6055, 6130), 'coffea.jetmet_tools.JetResolutionScaleFactor', 'JetResolutionScaleFactor', ([], {}), '(**{name: evaluator[name] for name in jersf_names})\n', (6079, 6130), False, 'from coffea.jetmet_tools import FactorizedJetCorrector, JetResolution, JetResolutionScaleFactor, JetCorrectionUncertainty, JetTransformer\n'), ((6331, 6352), 'dummy_distributions.dummy_jagged_eta_pt', 'dummy_jagged_eta_pt', ([], {}), '()\n', (6350, 6352), False, 'from dummy_distributions import dummy_jagged_eta_pt, dummy_four_momenta\n'), ((6366, 6465), 'coffea.jetmet_tools.JetResolutionScaleFactor', 'JetResolutionScaleFactor', ([], {}), "(**{name: evaluator[name] for name in [\n 'Autumn18_V7_MC_SF_AK4PFchs']})\n", (6390, 6465), False, 'from coffea.jetmet_tools import FactorizedJetCorrector, JetResolution, JetResolutionScaleFactor, JetCorrectionUncertainty, JetTransformer\n'), ((7037, 7057), 'dummy_distributions.dummy_four_momenta', 'dummy_four_momenta', ([], {}), '()\n', (7055, 7057), False, 'from dummy_distributions import dummy_jagged_eta_pt, dummy_four_momenta\n'), ((7197, 7290), 'coffea.analysis_objects.JaggedCandidateArray.candidatesfromcounts', 'CandArray.candidatesfromcounts', (['counts'], {'px': 'test_px', 'py': 'test_py', 'pz': 'test_pz', 'energy': 'test_e'}), '(counts, px=test_px, py=test_py, pz=test_pz,\n energy=test_e)\n', (7227, 7290), True, 'from coffea.analysis_objects import JaggedCandidateArray as CandArray\n'), ((7459, 7509), 'numpy.random.exponential', 'np.random.exponential', ([], {'scale': '(1.0)', 'size': 'counts.size'}), '(scale=1.0, size=counts.size)\n', (7480, 7509), True, 'import numpy as np\n'), ((7522, 7585), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-math.pi)', 'high': 'math.pi', 'size': 'counts.size'}), '(low=-math.pi, high=math.pi, size=counts.size)\n', (7539, 7585), True, 'import numpy as np\n'), ((8407, 8478), 'coffea.jetmet_tools.FactorizedJetCorrector', 'FactorizedJetCorrector', ([], {}), '(**{name: evaluator[name] for name in jec_names})\n', (8429, 8478), False, 'from coffea.jetmet_tools import FactorizedJetCorrector, JetResolution, JetResolutionScaleFactor, JetCorrectionUncertainty, JetTransformer\n'), ((8655, 8729), 'coffea.jetmet_tools.JetCorrectionUncertainty', 'JetCorrectionUncertainty', ([], {}), '(**{name: evaluator[name] for name in junc_names})\n', (8679, 8729), False, 'from coffea.jetmet_tools import JetCorrectionUncertainty\n'), ((8806, 8868), 'coffea.jetmet_tools.JetResolution', 'JetResolution', ([], {}), '(**{name: evaluator[name] for name in jer_names})\n', (8819, 8868), False, 'from coffea.jetmet_tools import FactorizedJetCorrector, JetResolution, JetResolutionScaleFactor, JetCorrectionUncertainty, JetTransformer\n'), ((8939, 9014), 'coffea.jetmet_tools.JetResolutionScaleFactor', 'JetResolutionScaleFactor', ([], {}), '(**{name: evaluator[name] for name in jersf_names})\n', (8963, 9014), False, 'from coffea.jetmet_tools import FactorizedJetCorrector, JetResolution, JetResolutionScaleFactor, JetCorrectionUncertainty, JetTransformer\n'), ((9028, 9092), 'coffea.jetmet_tools.JetTransformer', 'JetTransformer', ([], {'jec': 'corrector', 'junc': 'junc', 'jer': 'reso', 'jersf': 'resosf'}), '(jec=corrector, junc=junc, jer=reso, jersf=resosf)\n', (9042, 9092), False, 'from coffea.jetmet_tools import FactorizedJetCorrector, JetResolution, JetResolutionScaleFactor, JetCorrectionUncertainty, JetTransformer\n'), ((10052, 10073), 'dummy_distributions.dummy_jagged_eta_pt', 'dummy_jagged_eta_pt', ([], {}), '()\n', (10071, 10073), False, 'from dummy_distributions import dummy_jagged_eta_pt, dummy_four_momenta\n'), ((10525, 10599), 'coffea.jetmet_tools.JetCorrectionUncertainty', 'JetCorrectionUncertainty', ([], {}), '(**{name: evaluator[name] for name in junc_names})\n', (10549, 10599), False, 'from coffea.jetmet_tools import JetCorrectionUncertainty\n'), ((2515, 2532), 'numpy.cumsum', 'np.cumsum', (['counts'], {}), '(counts)\n', (2524, 2532), True, 'import numpy as np\n'), ((2623, 2652), 'numpy.empty', 'np.empty', (['(data.size + padding)'], {}), '(data.size + padding)\n', (2631, 2652), True, 'import numpy as np\n'), ((2918, 2972), 'coffea.util.awkward.JaggedArray', 'awkward.JaggedArray', (['starts', '(starts + counts)', 'data_pad'], {}), '(starts, starts + counts, data_pad)\n', (2937, 2972), False, 'from coffea.util import awkward\n'), ((7686, 7706), 'numpy.ones_like', 'np.ones_like', (['counts'], {}), '(counts)\n', (7698, 7706), True, 'import numpy as np\n'), ((7806, 7827), 'numpy.zeros_like', 'np.zeros_like', (['counts'], {}), '(counts)\n', (7819, 7827), True, 'import numpy as np\n'), ((7928, 7949), 'numpy.zeros_like', 'np.zeros_like', (['counts'], {}), '(counts)\n', (7941, 7949), True, 'import numpy as np\n'), ((2379, 2404), 'numpy.abs', 'np.abs', (['(pt_copy - test_pt)'], {}), '(pt_copy - test_pt)\n', (2385, 2404), True, 'import numpy as np\n'), ((7089, 7103), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (7095, 7103), True, 'import numpy as np\n'), ((7152, 7166), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (7158, 7166), True, 'import numpy as np\n'), ((8021, 8035), 'numpy.cos', 'np.cos', (['metphi'], {}), '(metphi)\n', (8027, 8035), True, 'import numpy as np\n'), ((8109, 8123), 'numpy.sin', 'np.sin', (['metphi'], {}), '(metphi)\n', (8115, 8123), True, 'import numpy as np\n'), ((2574, 2596), 'numpy.nonzero', 'np.nonzero', (['(counts > 1)'], {}), '(counts > 1)\n', (2584, 2596), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from gym import spaces
from habitat import logger
from habitat.tasks.nav.nav import (
EpisodicCompassSensor,
EpisodicGPSSensor,
HeadingSensor,
ImageGoalSensor,
IntegratedPointGoalGPSAndCompassSensor,
PointGoalSensor,
ProximitySensor,
)
from habitat.tasks.nav.object_nav_task import ObjectGoalSensor
from habitat_baselines.common.utils import Flatten, ResizeCenterCropper
from habitat_baselines.rl.ddppo.policy import resnet
from habitat_baselines.rl.ddppo.policy.running_mean_and_var import (
RunningMeanAndVar,
)
from habitat_baselines.rl.models.rnn_state_encoder import RNNStateEncoder
from habitat_baselines.rl.ppo import Net, Policy
class PointNavResNetPolicy(Policy):
def __init__(
self,
observation_space,
action_space,
hidden_size=512,
num_recurrent_layers=2,
rnn_type="LSTM",
resnet_baseplanes=32,
backbone="resnet50",
normalize_visual_inputs=False,
obs_transform=ResizeCenterCropper(size=(256, 256)),
force_blind_policy=False,
):
super().__init__(
PointNavResNetNet(
observation_space=observation_space,
action_space=action_space,
hidden_size=hidden_size,
num_recurrent_layers=num_recurrent_layers,
rnn_type=rnn_type,
backbone=backbone,
resnet_baseplanes=resnet_baseplanes,
normalize_visual_inputs=normalize_visual_inputs,
obs_transform=obs_transform,
force_blind_policy=force_blind_policy,
),
action_space.n,
)
class ResNetEncoder(nn.Module):
def __init__(
self,
observation_space,
baseplanes=32,
ngroups=32,
spatial_size=128,
make_backbone=None,
normalize_visual_inputs=False,
obs_transform=ResizeCenterCropper(size=(256, 256)),
):
super().__init__()
self.obs_transform = obs_transform
if self.obs_transform is not None:
observation_space = self.obs_transform.transform_observation_space(
observation_space
)
if "rgb" in observation_space.spaces:
self._n_input_rgb = observation_space.spaces["rgb"].shape[2]
spatial_size = observation_space.spaces["rgb"].shape[0] // 2
else:
self._n_input_rgb = 0
if "depth" in observation_space.spaces:
self._n_input_depth = observation_space.spaces["depth"].shape[2]
spatial_size = observation_space.spaces["depth"].shape[0] // 2
else:
self._n_input_depth = 0
if normalize_visual_inputs:
self.running_mean_and_var = RunningMeanAndVar(
self._n_input_depth + self._n_input_rgb
)
else:
self.running_mean_and_var = nn.Sequential()
if not self.is_blind:
input_channels = self._n_input_depth + self._n_input_rgb
self.backbone = make_backbone(input_channels, baseplanes, ngroups)
final_spatial = int(
spatial_size * self.backbone.final_spatial_compress
)
after_compression_flat_size = 2048
num_compression_channels = int(
round(after_compression_flat_size / (final_spatial ** 2))
)
self.compression = nn.Sequential(
nn.Conv2d(
self.backbone.final_channels,
num_compression_channels,
kernel_size=3,
padding=1,
bias=False,
),
nn.GroupNorm(1, num_compression_channels),
nn.ReLU(True),
)
self.output_shape = (
num_compression_channels,
final_spatial,
final_spatial,
)
@property
def is_blind(self):
return self._n_input_rgb + self._n_input_depth == 0
def layer_init(self):
for layer in self.modules():
if isinstance(layer, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(
layer.weight, nn.init.calculate_gain("relu")
)
if layer.bias is not None:
nn.init.constant_(layer.bias, val=0)
def forward(self, observations):
if self.is_blind:
return None
cnn_input = []
if self._n_input_rgb > 0:
rgb_observations = observations["rgb"]
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT X WIDTH]
rgb_observations = rgb_observations.permute(0, 3, 1, 2)
rgb_observations = rgb_observations / 255.0 # normalize RGB
cnn_input.append(rgb_observations)
if self._n_input_depth > 0:
depth_observations = observations["depth"]
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT X WIDTH]
depth_observations = depth_observations.permute(0, 3, 1, 2)
cnn_input.append(depth_observations)
if self.obs_transform:
cnn_input = [self.obs_transform(inp) for inp in cnn_input]
x = torch.cat(cnn_input, dim=1)
x = F.avg_pool2d(x, 2)
x = self.running_mean_and_var(x)
x = self.backbone(x)
x = self.compression(x)
return x
class PointNavResNetNet(Net):
"""Network which passes the input image through CNN and concatenates
goal vector with CNN's output and passes that through RNN.
"""
def __init__(
self,
observation_space,
action_space,
hidden_size,
num_recurrent_layers,
rnn_type,
backbone,
resnet_baseplanes,
normalize_visual_inputs,
obs_transform=ResizeCenterCropper(size=(256, 256)),
force_blind_policy=False,
):
super().__init__()
self.prev_action_embedding = nn.Embedding(action_space.n + 1, 32)
self._n_prev_action = 32
rnn_input_size = self._n_prev_action
if (
IntegratedPointGoalGPSAndCompassSensor.cls_uuid
in observation_space.spaces
):
n_input_goal = (
observation_space.spaces[
IntegratedPointGoalGPSAndCompassSensor.cls_uuid
].shape[0]
+ 1
)
self.tgt_embeding = nn.Linear(n_input_goal, 32)
rnn_input_size += 32
if ObjectGoalSensor.cls_uuid in observation_space.spaces:
self._n_object_categories = (
int(
observation_space.spaces[ObjectGoalSensor.cls_uuid].high[0]
)
+ 1
)
self.obj_categories_embedding = nn.Embedding(
self._n_object_categories, 32
)
rnn_input_size += 32
if EpisodicGPSSensor.cls_uuid in observation_space.spaces:
input_gps_dim = observation_space.spaces[
EpisodicGPSSensor.cls_uuid
].shape[0]
self.gps_embedding = nn.Linear(input_gps_dim, 32)
rnn_input_size += 32
if PointGoalSensor.cls_uuid in observation_space.spaces:
input_pointgoal_dim = observation_space.spaces[
PointGoalSensor.cls_uuid
].shape[0]
self.pointgoal_embedding = nn.Linear(input_pointgoal_dim, 32)
rnn_input_size += 32
if HeadingSensor.cls_uuid in observation_space.spaces:
input_heading_dim = (
observation_space.spaces[HeadingSensor.cls_uuid].shape[0] + 1
)
assert input_heading_dim == 2, "Expected heading with 2D rotation."
self.heading_embedding = nn.Linear(input_heading_dim, 32)
rnn_input_size += 32
if ProximitySensor.cls_uuid in observation_space.spaces:
input_proximity_dim = observation_space.spaces[
ProximitySensor.cls_uuid
].shape[0]
self.proximity_embedding = nn.Linear(input_proximity_dim, 32)
rnn_input_size += 32
if EpisodicCompassSensor.cls_uuid in observation_space.spaces:
assert (
observation_space.spaces[EpisodicCompassSensor.cls_uuid].shape[
0
]
== 1
), "Expected compass with 2D rotation."
input_compass_dim = 2 # cos and sin of the angle
self.compass_embedding = nn.Linear(input_compass_dim, 32)
rnn_input_size += 32
if ImageGoalSensor.cls_uuid in observation_space.spaces:
goal_observation_space = spaces.Dict(
{"rgb": observation_space.spaces[ImageGoalSensor.cls_uuid]}
)
self.goal_visual_encoder = ResNetEncoder(
goal_observation_space,
baseplanes=resnet_baseplanes,
ngroups=resnet_baseplanes // 2,
make_backbone=getattr(resnet, backbone),
normalize_visual_inputs=normalize_visual_inputs,
obs_transform=obs_transform,
)
self.goal_visual_fc = nn.Sequential(
Flatten(),
nn.Linear(
np.prod(self.goal_visual_encoder.output_shape), hidden_size
),
nn.ReLU(True),
)
rnn_input_size += hidden_size
self._hidden_size = hidden_size
self.visual_encoder = ResNetEncoder(
observation_space if not force_blind_policy else spaces.Dict({}),
baseplanes=resnet_baseplanes,
ngroups=resnet_baseplanes // 2,
make_backbone=getattr(resnet, backbone),
normalize_visual_inputs=normalize_visual_inputs,
obs_transform=obs_transform,
)
if not self.visual_encoder.is_blind:
self.visual_fc = nn.Sequential(
Flatten(),
nn.Linear(
np.prod(self.visual_encoder.output_shape), hidden_size
),
nn.ReLU(True),
)
self.state_encoder = RNNStateEncoder(
(0 if self.is_blind else self._hidden_size) + rnn_input_size,
self._hidden_size,
rnn_type=rnn_type,
num_layers=num_recurrent_layers,
)
self.train()
@property
def output_size(self):
return self._hidden_size
@property
def is_blind(self):
return self.visual_encoder.is_blind
@property
def num_recurrent_layers(self):
return self.state_encoder.num_recurrent_layers
def forward(self, observations, rnn_hidden_states, prev_actions, masks):
x = []
if not self.is_blind:
if "visual_features" in observations:
visual_feats = observations["visual_features"]
else:
visual_feats = self.visual_encoder(observations)
visual_feats = self.visual_fc(visual_feats)
x.append(visual_feats)
if IntegratedPointGoalGPSAndCompassSensor.cls_uuid in observations:
goal_observations = observations[
IntegratedPointGoalGPSAndCompassSensor.cls_uuid
]
goal_observations = torch.stack(
[
goal_observations[:, 0],
torch.cos(-goal_observations[:, 1]),
torch.sin(-goal_observations[:, 1]),
],
-1,
)
x.append(self.tgt_embeding(goal_observations))
if PointGoalSensor.cls_uuid in observations:
goal_observations = observations[PointGoalSensor.cls_uuid]
x.append(self.pointgoal_embedding(goal_observations))
if ProximitySensor.cls_uuid in observations:
sensor_observations = observations[ProximitySensor.cls_uuid]
x.append(self.proximity_embedding(sensor_observations))
if HeadingSensor.cls_uuid in observations:
sensor_observations = observations[HeadingSensor.cls_uuid]
sensor_observations = torch.stack(
[
torch.cos(sensor_observations[0]),
torch.sin(sensor_observations[0]),
],
-1,
)
x.append(self.heading_embedding(sensor_observations))
if ObjectGoalSensor.cls_uuid in observations:
object_goal = observations[ObjectGoalSensor.cls_uuid].long()
x.append(self.obj_categories_embedding(object_goal).squeeze(dim=1))
if EpisodicCompassSensor.cls_uuid in observations:
compass_observations = torch.stack(
[
torch.cos(observations[EpisodicCompassSensor.cls_uuid]),
torch.sin(observations[EpisodicCompassSensor.cls_uuid]),
],
-1,
)
x.append(
self.compass_embedding(compass_observations.squeeze(dim=1))
)
if EpisodicGPSSensor.cls_uuid in observations:
x.append(
self.gps_embedding(observations[EpisodicGPSSensor.cls_uuid])
)
if ImageGoalSensor.cls_uuid in observations:
goal_image = observations[ImageGoalSensor.cls_uuid]
goal_output = self.goal_visual_encoder({"rgb": goal_image})
x.append(self.goal_visual_fc(goal_output))
prev_actions = self.prev_action_embedding(
((prev_actions.float() + 1) * masks).long().squeeze(dim=-1)
)
x.append(prev_actions)
x = torch.cat(x, dim=1)
x, rnn_hidden_states = self.state_encoder(x, rnn_hidden_states, masks)
return x, rnn_hidden_states
| [
"habitat_baselines.common.utils.Flatten",
"torch.nn.GroupNorm",
"torch.nn.ReLU",
"numpy.prod",
"torch.nn.Embedding",
"habitat_baselines.common.utils.ResizeCenterCropper",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"habitat_baselines.rl.models.rnn_state_encoder.RNNStateEncoder",
"torch.sin",... | [((1278, 1314), 'habitat_baselines.common.utils.ResizeCenterCropper', 'ResizeCenterCropper', ([], {'size': '(256, 256)'}), '(size=(256, 256))\n', (1297, 1314), False, 'from habitat_baselines.common.utils import Flatten, ResizeCenterCropper\n'), ((2202, 2238), 'habitat_baselines.common.utils.ResizeCenterCropper', 'ResizeCenterCropper', ([], {'size': '(256, 256)'}), '(size=(256, 256))\n', (2221, 2238), False, 'from habitat_baselines.common.utils import Flatten, ResizeCenterCropper\n'), ((5549, 5576), 'torch.cat', 'torch.cat', (['cnn_input'], {'dim': '(1)'}), '(cnn_input, dim=1)\n', (5558, 5576), False, 'import torch\n'), ((5589, 5607), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (5601, 5607), True, 'import torch.nn.functional as F\n'), ((6155, 6191), 'habitat_baselines.common.utils.ResizeCenterCropper', 'ResizeCenterCropper', ([], {'size': '(256, 256)'}), '(size=(256, 256))\n', (6174, 6191), False, 'from habitat_baselines.common.utils import Flatten, ResizeCenterCropper\n'), ((6299, 6335), 'torch.nn.Embedding', 'nn.Embedding', (['(action_space.n + 1)', '(32)'], {}), '(action_space.n + 1, 32)\n', (6311, 6335), True, 'import torch.nn as nn\n'), ((10541, 10698), 'habitat_baselines.rl.models.rnn_state_encoder.RNNStateEncoder', 'RNNStateEncoder', (['((0 if self.is_blind else self._hidden_size) + rnn_input_size)', 'self._hidden_size'], {'rnn_type': 'rnn_type', 'num_layers': 'num_recurrent_layers'}), '((0 if self.is_blind else self._hidden_size) +\n rnn_input_size, self._hidden_size, rnn_type=rnn_type, num_layers=\n num_recurrent_layers)\n', (10556, 10698), False, 'from habitat_baselines.rl.models.rnn_state_encoder import RNNStateEncoder\n'), ((14030, 14049), 'torch.cat', 'torch.cat', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (14039, 14049), False, 'import torch\n'), ((3058, 3116), 'habitat_baselines.rl.ddppo.policy.running_mean_and_var.RunningMeanAndVar', 'RunningMeanAndVar', (['(self._n_input_depth + self._n_input_rgb)'], {}), '(self._n_input_depth + self._n_input_rgb)\n', (3075, 3116), False, 'from habitat_baselines.rl.ddppo.policy.running_mean_and_var import RunningMeanAndVar\n'), ((3201, 3216), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (3214, 3216), True, 'import torch.nn as nn\n'), ((6771, 6798), 'torch.nn.Linear', 'nn.Linear', (['n_input_goal', '(32)'], {}), '(n_input_goal, 32)\n', (6780, 6798), True, 'import torch.nn as nn\n'), ((7138, 7181), 'torch.nn.Embedding', 'nn.Embedding', (['self._n_object_categories', '(32)'], {}), '(self._n_object_categories, 32)\n', (7150, 7181), True, 'import torch.nn as nn\n'), ((7466, 7494), 'torch.nn.Linear', 'nn.Linear', (['input_gps_dim', '(32)'], {}), '(input_gps_dim, 32)\n', (7475, 7494), True, 'import torch.nn as nn\n'), ((7757, 7791), 'torch.nn.Linear', 'nn.Linear', (['input_pointgoal_dim', '(32)'], {}), '(input_pointgoal_dim, 32)\n', (7766, 7791), True, 'import torch.nn as nn\n'), ((8132, 8164), 'torch.nn.Linear', 'nn.Linear', (['input_heading_dim', '(32)'], {}), '(input_heading_dim, 32)\n', (8141, 8164), True, 'import torch.nn as nn\n'), ((8427, 8461), 'torch.nn.Linear', 'nn.Linear', (['input_proximity_dim', '(32)'], {}), '(input_proximity_dim, 32)\n', (8436, 8461), True, 'import torch.nn as nn\n'), ((8880, 8912), 'torch.nn.Linear', 'nn.Linear', (['input_compass_dim', '(32)'], {}), '(input_compass_dim, 32)\n', (8889, 8912), True, 'import torch.nn as nn\n'), ((9049, 9121), 'gym.spaces.Dict', 'spaces.Dict', (["{'rgb': observation_space.spaces[ImageGoalSensor.cls_uuid]}"], {}), "({'rgb': observation_space.spaces[ImageGoalSensor.cls_uuid]})\n", (9060, 9121), False, 'from gym import spaces\n'), ((3753, 3860), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.backbone.final_channels', 'num_compression_channels'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(False)'}), '(self.backbone.final_channels, num_compression_channels,\n kernel_size=3, padding=1, bias=False)\n', (3762, 3860), True, 'import torch.nn as nn\n'), ((3993, 4034), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(1)', 'num_compression_channels'], {}), '(1, num_compression_channels)\n', (4005, 4034), True, 'import torch.nn as nn\n'), ((4052, 4065), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (4059, 4065), True, 'import torch.nn as nn\n'), ((9587, 9596), 'habitat_baselines.common.utils.Flatten', 'Flatten', ([], {}), '()\n', (9594, 9596), False, 'from habitat_baselines.common.utils import Flatten, ResizeCenterCropper\n'), ((9740, 9753), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (9747, 9753), True, 'import torch.nn as nn\n'), ((9960, 9975), 'gym.spaces.Dict', 'spaces.Dict', (['{}'], {}), '({})\n', (9971, 9975), False, 'from gym import spaces\n'), ((10334, 10343), 'habitat_baselines.common.utils.Flatten', 'Flatten', ([], {}), '()\n', (10341, 10343), False, 'from habitat_baselines.common.utils import Flatten, ResizeCenterCropper\n'), ((10482, 10495), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (10489, 10495), True, 'import torch.nn as nn\n'), ((4530, 4560), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (4552, 4560), True, 'import torch.nn as nn\n'), ((4642, 4678), 'torch.nn.init.constant_', 'nn.init.constant_', (['layer.bias'], {'val': '(0)'}), '(layer.bias, val=0)\n', (4659, 4678), True, 'import torch.nn as nn\n'), ((9645, 9691), 'numpy.prod', 'np.prod', (['self.goal_visual_encoder.output_shape'], {}), '(self.goal_visual_encoder.output_shape)\n', (9652, 9691), True, 'import numpy as np\n'), ((10392, 10433), 'numpy.prod', 'np.prod', (['self.visual_encoder.output_shape'], {}), '(self.visual_encoder.output_shape)\n', (10399, 10433), True, 'import numpy as np\n'), ((11775, 11810), 'torch.cos', 'torch.cos', (['(-goal_observations[:, 1])'], {}), '(-goal_observations[:, 1])\n', (11784, 11810), False, 'import torch\n'), ((11832, 11867), 'torch.sin', 'torch.sin', (['(-goal_observations[:, 1])'], {}), '(-goal_observations[:, 1])\n', (11841, 11867), False, 'import torch\n'), ((12576, 12609), 'torch.cos', 'torch.cos', (['sensor_observations[0]'], {}), '(sensor_observations[0])\n', (12585, 12609), False, 'import torch\n'), ((12631, 12664), 'torch.sin', 'torch.sin', (['sensor_observations[0]'], {}), '(sensor_observations[0])\n', (12640, 12664), False, 'import torch\n'), ((13139, 13194), 'torch.cos', 'torch.cos', (['observations[EpisodicCompassSensor.cls_uuid]'], {}), '(observations[EpisodicCompassSensor.cls_uuid])\n', (13148, 13194), False, 'import torch\n'), ((13216, 13271), 'torch.sin', 'torch.sin', (['observations[EpisodicCompassSensor.cls_uuid]'], {}), '(observations[EpisodicCompassSensor.cls_uuid])\n', (13225, 13271), False, 'import torch\n')] |
"""
inheritance-diagram:: dfo.optimizer.direct
:parts: 1
"""
from misc.debug import DbgMsgOut, DbgMsg
from .base import BoxConstrainedOptimizer
from numpy import max, min, abs, array
import numpy as np
import heapq
__all__ = ['Cube', 'DIRECT']
class Cube(object):
def __init__(self, x, f, depth):
self.x = array(x)
self.f = f
self.ndim = self.x.shape[0]
self.depth = depth
def increase_depth(self, i=None):
if i is not None:
self.depth[i] += 1
else:
for i in range(self.ndim):
self.depth[i] += 1
class DIRECT(BoxConstrainedOptimizer):
def __init__(self, function, xlo=None, xhi=None, debug=0, fstop=None, maxiter=None):
BoxConstrainedOptimizer.__init__(self, function, xlo, xhi, debug, fstop, maxiter, cache=True)
self.pq_cache = None
self.eps = 1e-2
self.K = 0
self.max_depth = 5
self.visited = []
def check(self):
"""
Checks the optimization algorithm's settings and raises an exception if
something is wrong.
"""
BoxConstrainedOptimizer.check(self)
# if self.samplesize is None:
# raise Exception(DbgMsg("DIRECT", "The sample size should not be None."))
def reset(self, x0):
"""
Puts the optimizer in its initial state and sets the initial point to
be the 1-dimensional array *x0*. The length of the array becomes the
dimension of the optimization problem (:attr:`ndim` member). The shape
of *x* must match that of *xlo* and *xhi*.
"""
BoxConstrainedOptimizer.reset(self, x0)
# Debug message
if self.debug:
DbgMsgOut("DIRECT", "Resetting DIRECT")
def run(self):
"""
Run the DIRECT algorithm.
"""
# Debug message
if self.debug:
DbgMsgOut("CSOPT", "Starting a coordinate search run at i=" + str(self.niter))
# Reset stop flag
self.stop = False
# Check
self.check()
self.x = 0.5 * np.ones(shape=(self.ndim,))
self.f = self.fun(self.denormalize(self.x))
# pq: potentiality, f(center), depth
self.pq_cache = [(self.f - 1.0 / 3.0 * self.K, 1, Cube(self.x, self.f, np.ones(shape=(self.ndim,))))]
while not self.stop and self.pq_cache:
val, it, cube = heapq.heappop(self.pq_cache)
self.update_cube(cube)
x, depth = cube.x, cube.depth
minimum_depth = min(depth)
# print("depth: ", depth)
if self.debug:
DbgMsgOut("DIRECT", "Cube.f =" + str(cube.f))
inc_index, better_index, same_index, worse_index = [], [], [], []
for i in range(self.ndim):
# try points with length of the maximum side of hyper-rectangle
if depth[i] == minimum_depth:
x[i] -= (1 / 3)**depth[i]
improved = self.update_potential_rectangle(x, depth, i)
if improved == 0:
same_index.append(i)
elif improved > 0:
better_index.append(i)
else:
worse_index.append(i)
x[i] += 2 * (1 / 3)**depth[i]
improved = self.update_potential_rectangle(x, depth, i)
if improved == 0:
same_index.append(i)
elif improved > 0:
better_index.append(i)
else:
worse_index.append(i)
x[i] -= (1 / 3) ** depth[i]
inc_index.append(i)
if better_index != [] and worse_index != []:
# Decrease the size of the cube and save it in the cache
for idx in inc_index:
cube.increase_depth(idx)
self.niter += 1
# Push the smaller cube into the cache centering at self.x
heapq.heappush(self.pq_cache, (cube.f - 0.5**depth[0] * self.K, self.niter, cube))
if self.debug:
DbgMsgOut("DIRECT", "Iteration i=" + str(self.niter) + " fbest=" + str(self.f))
def update_cube(self, cube):
'''
:param cube: class Cube object
:return: None
'''
# print("update cube")
x = cube.x
depth = cube.depth
for i in range(self.ndim):
if self.cache.isVisited(x + (1.0 / 3.0)**depth[i]) or self.cache.isVisited(x - (1.0 / 3.0)**depth[i]):
cube.increase_depth(i)
# print("cube's depth: ", cube.depth)
def update_potential_rectangle(self, x, depth, i):
'''
Check potentially Potential Hyper-rectangles.
:param x:
:param depth:
:param i:
:return: updated or not
'''
# print("x::::::::::", x)
f = self.fun(self.denormalize(x))
if depth[i] <= self.max_depth and f <= self.f:
# build new cube with x_new, depth_new
x_new = x.copy()
depth_new = depth.copy()
depth_new[i] += 1
cube = Cube(x_new, f, depth_new)
heapq.heappush(self.pq_cache, (f - self.K * 0.5**depth_new[i], self.niter, cube))
if f < self.f:
self.f = f
self.x = x.copy()
if self.debug:
DbgMsgOut("DIRECT", "Better centers found in iteration i=" + str(self.niter) + " fbest=" + str(self.f))
return 1
elif f == self.f:
return 0
else:
return -1
| [
"numpy.ones",
"numpy.array",
"heapq.heappop",
"numpy.min",
"heapq.heappush",
"misc.debug.DbgMsgOut"
] | [((327, 335), 'numpy.array', 'array', (['x'], {}), '(x)\n', (332, 335), False, 'from numpy import max, min, abs, array\n'), ((1727, 1766), 'misc.debug.DbgMsgOut', 'DbgMsgOut', (['"""DIRECT"""', '"""Resetting DIRECT"""'], {}), "('DIRECT', 'Resetting DIRECT')\n", (1736, 1766), False, 'from misc.debug import DbgMsgOut, DbgMsg\n'), ((2098, 2125), 'numpy.ones', 'np.ones', ([], {'shape': '(self.ndim,)'}), '(shape=(self.ndim,))\n', (2105, 2125), True, 'import numpy as np\n'), ((2410, 2438), 'heapq.heappop', 'heapq.heappop', (['self.pq_cache'], {}), '(self.pq_cache)\n', (2423, 2438), False, 'import heapq\n'), ((2544, 2554), 'numpy.min', 'min', (['depth'], {}), '(depth)\n', (2547, 2554), False, 'from numpy import max, min, abs, array\n'), ((5280, 5367), 'heapq.heappush', 'heapq.heappush', (['self.pq_cache', '(f - self.K * 0.5 ** depth_new[i], self.niter, cube)'], {}), '(self.pq_cache, (f - self.K * 0.5 ** depth_new[i], self.niter,\n cube))\n', (5294, 5367), False, 'import heapq\n'), ((4082, 4171), 'heapq.heappush', 'heapq.heappush', (['self.pq_cache', '(cube.f - 0.5 ** depth[0] * self.K, self.niter, cube)'], {}), '(self.pq_cache, (cube.f - 0.5 ** depth[0] * self.K, self.\n niter, cube))\n', (4096, 4171), False, 'import heapq\n'), ((2303, 2330), 'numpy.ones', 'np.ones', ([], {'shape': '(self.ndim,)'}), '(shape=(self.ndim,))\n', (2310, 2330), True, 'import numpy as np\n')] |
"""
---------------------------------------------------------------------
-- Author: <NAME>
---------------------------------------------------------------------
Main file to execute the model on the MNIST dataset
"""
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import argparse
import random
import numpy as np
import os
import torch
from torchvision import datasets, transforms
from torch.utils.data.sampler import SubsetRandomSampler
import torch.utils.data
from model.GMVAE import *
#########################################################
## Input Parameters
#########################################################
parser = argparse.ArgumentParser(description='PyTorch Implementation of DGM Clustering')
## Used only in notebooks
parser.add_argument('-f', '--file',
help='Path for input file. First line should contain number of lines to search in')
## Dataset
parser.add_argument('--dataset', type=str, choices=['mnist'],
default='mnist', help='dataset (default: mnist)')
parser.add_argument('--seed', type=int, default=0, help='random seed (default: 0)')
## GPU
parser.add_argument('--cuda', type=int, default=1,
help='use of cuda (default: 1)')
parser.add_argument('--gpuID', type=int, default=0,
help='set gpu id to use (default: 0)')
## Training
parser.add_argument('--epochs', type=int, default=100,
help='number of total epochs to run (default: 200)')
parser.add_argument('--batch_size', default=64, type=int,
help='mini-batch size (default: 64)')
parser.add_argument('--batch_size_val', default=200, type=int,
help='mini-batch size of validation (default: 200)')
parser.add_argument('--learning_rate', default=1e-3, type=float,
help='learning rate (default: 0.001)')
parser.add_argument('--decay_epoch', default=-1, type=int,
help='Reduces the learning rate every decay_epoch')
parser.add_argument('--lr_decay', default=0.5, type=float,
help='Learning rate decay for training (default: 0.5)')
## Architecture
parser.add_argument('--num_classes', type=int, default=10,
help='number of classes (default: 10)')
parser.add_argument('--gaussian_size', default=64, type=int,
help='gaussian size (default: 64)')
parser.add_argument('--input_size', default=784, type=int,
help='input size (default: 784)')
## Partition parameters
parser.add_argument('--train_proportion', default=1.0, type=float,
help='proportion of examples to consider for training only (default: 1.0)')
## Gumbel parameters
parser.add_argument('--init_temp', default=1.0, type=float,
help='Initial temperature used in gumbel-softmax (recommended 0.5-1.0, default:1.0)')
parser.add_argument('--decay_temp', default=1, type=int,
help='Set 1 to decay gumbel temperature at every epoch (default: 1)')
parser.add_argument('--hard_gumbel', default=0, type=int,
help='Set 1 to use the hard version of gumbel-softmax (default: 1)')
parser.add_argument('--min_temp', default=0.5, type=float,
help='Minimum temperature of gumbel-softmax after annealing (default: 0.5)' )
parser.add_argument('--decay_temp_rate', default=0.013862944, type=float,
help='Temperature decay rate at every epoch (default: 0.013862944)')
## Loss function parameters
parser.add_argument('--w_gauss', default=1, type=float,
help='weight of gaussian loss (default: 1)')
parser.add_argument('--w_categ', default=1, type=float,
help='weight of categorical loss (default: 1)')
parser.add_argument('--w_rec', default=1, type=float,
help='weight of reconstruction loss (default: 1)')
parser.add_argument('--rec_type', type=str, choices=['bce', 'mse'],
default='bce', help='desired reconstruction loss function (default: bce)')
## Others
parser.add_argument('--verbose', default=0, type=int,
help='print extra information at every epoch.(default: 0)')
parser.add_argument('--random_search_it', type=int, default=20,
help='iterations of random search (default: 20)')
args = parser.parse_args()
if args.cuda == 1:
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpuID)
## Random Seed
SEED = args.seed
np.random.seed(SEED)
random.seed(SEED)
torch.manual_seed(SEED)
if args.cuda:
torch.cuda.manual_seed(SEED)
#########################################################
## Read Data
#########################################################
if args.dataset == "mnist":
print("Loading mnist dataset...")
# Download or load downloaded MNIST dataset
train_dataset = datasets.MNIST('./mnist', train=True, download=True, transform=transforms.ToTensor())
test_dataset = datasets.MNIST('./mnist', train=False, transform=transforms.ToTensor())
#########################################################
## Data Partition
#########################################################
def partition_dataset(n, proportion=0.8):
train_num = int(n * proportion)
indices = np.random.permutation(n)
train_indices, val_indices = indices[:train_num], indices[train_num:]
return train_indices, val_indices
if args.train_proportion == 1.0:
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size_val, shuffle=False)
val_loader = test_loader
else:
train_indices, val_indices = partition_dataset(len(train_dataset), args.train_proportion)
# Create data loaders for train, validation and test datasets
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, sampler=SubsetRandomSampler(train_indices))
val_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size_val, sampler=SubsetRandomSampler(val_indices))
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size_val, shuffle=False)
## Calculate flatten size of each input data
args.input_size = np.prod(train_dataset[0][0].size())
print(args.input_size)
#########################################################
## Train and Test Model
#########################################################
gmvae = GMVAE(args)
## Training Phase
history_loss = gmvae.train(train_loader, val_loader)
## Testing Phase
accuracy, nmi = gmvae.test(test_loader)
print("Testing phase...")
print("Accuracy: %.5lf, NMI: %.5lf" % (accuracy, nmi) )
| [
"torch.utils.data.sampler.SubsetRandomSampler",
"torch.manual_seed",
"argparse.ArgumentParser",
"matplotlib.use",
"random.seed",
"numpy.random.seed",
"torch.utils.data.DataLoader",
"torch.cuda.manual_seed",
"torchvision.transforms.ToTensor",
"numpy.random.permutation"
] | [((238, 259), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (252, 259), False, 'import matplotlib\n'), ((662, 741), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Implementation of DGM Clustering"""'}), "(description='PyTorch Implementation of DGM Clustering')\n", (685, 741), False, 'import argparse\n'), ((4481, 4501), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (4495, 4501), True, 'import numpy as np\n'), ((4502, 4519), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (4513, 4519), False, 'import random\n'), ((4520, 4543), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (4537, 4543), False, 'import torch\n'), ((4560, 4588), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['SEED'], {}), '(SEED)\n', (4582, 4588), False, 'import torch\n'), ((5246, 5270), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (5267, 5270), True, 'import numpy as np\n'), ((5430, 5518), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=args.batch_size,\n shuffle=True)\n', (5457, 5518), False, 'import torch\n'), ((5531, 5623), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'args.batch_size_val', 'shuffle': '(False)'}), '(test_dataset, batch_size=args.batch_size_val,\n shuffle=False)\n', (5558, 5623), False, 'import torch\n'), ((6089, 6181), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'args.batch_size_val', 'shuffle': '(False)'}), '(test_dataset, batch_size=args.batch_size_val,\n shuffle=False)\n', (6116, 6181), False, 'import torch\n'), ((4910, 4931), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4929, 4931), False, 'from torchvision import datasets, transforms\n'), ((4999, 5020), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5018, 5020), False, 'from torchvision import datasets, transforms\n'), ((5905, 5939), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_indices'], {}), '(train_indices)\n', (5924, 5939), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((6039, 6071), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['val_indices'], {}), '(val_indices)\n', (6058, 6071), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n')] |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from Bio import BiopythonWarning
import os
import uuid
import io
import subprocess
import logging
import sys
import csv
import time
import re
import shutil
import platform
import distro
import multiprocessing
import itertools
import hashlib
import math
import gzip
import operator
import textwrap
import errno
import datetime
from natsort import natsorted
import funannotate.resources as resources
from funannotate.interlap import InterLap
from collections import defaultdict
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
try:
from itertools import izip as zip
except ImportError:
pass
import warnings
from Bio import SeqIO
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from Bio import SearchIO
warnings.simplefilter('ignore', BiopythonWarning)
# get the working directory, so you can move back into DB folder to find the files you need
global parentdir
parentdir = os.path.join(os.path.dirname(__file__))
GeneMark2GFF = os.path.join(parentdir, 'aux_scripts', 'genemark_gtf2gff3.pl')
class colr:
GRN = '\033[92m'
END = '\033[0m'
WARN = '\033[93m'
class suppress_stdout_stderr(object):
'''
A context manager for doing a "deep suppression" of stdout and stderr in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
This will not suppress raised exceptions, since exceptions are printed
to stderr just before a script exits, and after the context manager has
exited (at least, I think that is why it lets exceptions through).
'''
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = (os.dup(1), os.dup(2))
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
os.close(self.null_fds[0])
os.close(self.null_fds[1])
class gzopen(object):
"""Generic opener that decompresses gzipped files
if needed. Encapsulates an open file or a GzipFile.
Use the same way you would use 'open()'.
"""
def __init__(self, fname):
f = open(fname)
# Read magic number (the first 2 bytes) and rewind.
magic_number = f.read(2)
f.seek(0)
# Encapsulated 'self.f' is a file or a GzipFile.
if magic_number == b'\x1f\x8b':
self.f = gzip.GzipFile(fileobj=f)
else:
self.f = f
# Define '__enter__' and '__exit__' to use in
# 'with' blocks. Always close the file and the
# GzipFile if applicable.
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
try:
self.f.fileobj.close()
except AttributeError:
pass
finally:
self.f.close()
# Reproduce the interface of an open file
# by encapsulation.
def __getattr__(self, name):
return getattr(self.f, name)
def __iter__(self):
return iter(self.f)
def __next__(self):
return next(self.f)
def createdir(name):
try:
os.makedirs(name)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
def softwrap(string, every=80):
lines = []
for i in range(0, len(string), every):
lines.append(string[i:i+every])
return '\n'.join(lines)
def len_without_format(text):
try:
return len(remove_formatting(text))
except TypeError:
return len(str(text))
def remove_formatting(text):
return re.sub('\033.*?m', '', text)
def colour(text, text_colour):
bold_text = 'bold' in text_colour
text_colour = text_colour.replace('bold', '')
underline_text = 'underline' in text_colour
text_colour = text_colour.replace('underline', '')
text_colour = text_colour.replace('_', '')
text_colour = text_colour.replace(' ', '')
text_colour = text_colour.lower()
if 'red' in text_colour:
coloured_text = RED
elif 'green' in text_colour:
coloured_text = GREEN
elif 'yellow' in text_colour:
coloured_text = YELLOW
elif 'dim' in text_colour:
coloured_text = DIM
else:
coloured_text = ''
if bold_text:
coloured_text += BOLD
if underline_text:
coloured_text += UNDERLINE
if not coloured_text:
return text
coloured_text += text + END_FORMATTING
return coloured_text
END_FORMATTING = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
RED = '\033[31m'
GREEN = '\033[32m'
MAGENTA = '\033[35m'
YELLOW = '\033[93m'
DIM = '\033[2m'
def green(text):
return GREEN + text + END_FORMATTING
def bold_green(text):
return GREEN + BOLD + text + END_FORMATTING
def red(text):
return RED + text + END_FORMATTING
def magenta(text):
return MAGENTA + text + END_FORMATTING
def bold_red(text):
return RED + BOLD + text + END_FORMATTING
def bold(text):
return BOLD + text + END_FORMATTING
def bold_underline(text):
return BOLD + UNDERLINE + text + END_FORMATTING
def underline(text):
return UNDERLINE + text + END_FORMATTING
def dim(text):
return DIM + text + END_FORMATTING
def dim_underline(text):
return DIM + UNDERLINE + text + END_FORMATTING
def bold_yellow(text):
return YELLOW + BOLD + text + END_FORMATTING
def bold_yellow_underline(text):
return YELLOW + BOLD + UNDERLINE + text + END_FORMATTING
def bold_red_underline(text):
return RED + BOLD + UNDERLINE + text + END_FORMATTING
def print_table(table, alignments='', max_col_width=30, col_separation=3, indent=2,
row_colour=None, sub_colour=None, row_extra_text=None, leading_newline=False,
subsequent_indent='', return_str=False, header_format='underline',
hide_header=False, fixed_col_widths=None, left_align_header=True,
bottom_align_header=True, verbosity=1):
"""
Args:
table: a list of lists of strings (one row is one list, all rows should be the same length)
alignments: a string of L and R, indicating the alignment for each row
max_col_width: values longer than this will be wrapped
col_separation: the number of spaces between columns
indent: the number of spaces between the table and the left side of the terminal
row_colour: a dictionary of row indices and their colour names
sub_colour: a dictionary of values to colour names for which the text colour will be set
row_extra_text: a dictionary of row indices and extra text to display after the row
leading_newline: if True, the function will print a blank line above the table
subsequent_indent: this string will be added to the start of wrapped text lines
return_str: if True, this function will return a string of the table instead of printing it
header_format: the formatting (colour, underline, etc) of the header line
hide_header: if True, the header is not printed
fixed_col_widths: a list to specify exact column widths (automatic if not used)
left_align_header: if False, the header will follow the column alignments
bottom_align_header: if False, the header will align to the top, like other rows
verbosity: the table will only be logged if the logger verbosity is >= this value
"""
# this function is written by <NAME> in Unicycler code
# modified to not support colors
column_count = len(table[0])
table = [x[:column_count] for x in table]
table = [x + [''] * (column_count - len(x)) for x in table]
if row_colour is None:
row_colour = {}
if sub_colour is None:
sub_colour = {}
if row_extra_text is None:
row_extra_text = {}
if leading_newline:
print('')
# Ensure the alignments string is the same length as the column count
alignments += 'L' * (column_count - len(alignments))
alignments = alignments[:column_count]
if fixed_col_widths is not None:
col_widths = fixed_col_widths
else:
col_widths = [0] * column_count
for row in table:
col_widths = [min(max(col_widths[i], len_without_format(x)), max_col_width)
for i, x in enumerate(row)]
separator = ' ' * col_separation
indenter = ' ' * indent
full_table_str = ''
for i, row in enumerate(table):
row = [str(x) for x in row]
if hide_header and i == 0:
continue
if fixed_col_widths is not None:
wrapped_row = []
for col, fixed_width in zip(row, fixed_col_widths):
wrapper = textwrap.TextWrapper(subsequent_indent=subsequent_indent,
width=fixed_width)
wrapped_row.append(wrapper.wrap(col))
else:
wrapper = textwrap.TextWrapper(
subsequent_indent=subsequent_indent, width=max_col_width)
wrapped_row = [wrapper.wrap(x) for x in row]
row_rows = max(len(x) for x in wrapped_row)
if i == 0 and bottom_align_header:
wrapped_row = [[''] * (row_rows - len(x)) + x for x in wrapped_row]
for j in range(row_rows):
row_line = [x[j] if j < len(x) else '' for x in wrapped_row]
aligned_row = []
for value, col_width, alignment in zip(row_line, col_widths, alignments):
if alignment == 'L' or (i == 0 and left_align_header):
aligned_row.append(value.ljust(col_width))
elif alignment == 'C':
aligned_row.append(value.center(col_width))
else:
aligned_row.append(value.rjust(col_width))
row_str = separator.join(aligned_row)
if i in row_extra_text:
row_str += row_extra_text[i]
if i == 0 and header_format:
row_str = colour(row_str, header_format)
if i in row_colour:
row_str = colour(row_str, row_colour[i])
for text, colour_name in list(sub_colour.items()):
row_str = row_str.replace(text, colour(text, colour_name))
if j < row_rows - 1 and UNDERLINE in row_str:
row_str = re.sub('\033\[4m', '', row_str)
if return_str:
full_table_str += indenter + row_str + '\n'
else:
print((indenter + row_str))
if return_str:
return full_table_str
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=open(
os.devnull, 'w'), cwd=parentdir).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', '--short', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = False
return GIT_REVISION
def Funzip(input, output, cpus):
'''
function to unzip as fast as it can, pigz -> bgzip -> gzip
'''
if which('pigz'):
cmd = ['pigz', '--decompress', '-c', '-p', str(cpus), input]
else:
cmd = ['gzip', '--decompress', '-c', input]
try:
runSubprocess2(cmd, '.', log, output)
except NameError:
with open(output, 'w') as outfile:
subprocess.call(cmd, stdout=outfile)
def Fzip(input, output, cpus):
'''
function to zip as fast as it can, pigz -> bgzip -> gzip
'''
if which('pigz'):
cmd = ['pigz', '-c', '-p', str(cpus), input]
else:
cmd = ['gzip', '-c', input]
try:
runSubprocess2(cmd, '.', log, output)
except NameError:
with open(output, 'w') as outfile:
subprocess.call(cmd, stdout=outfile)
def Fzip_inplace(input, cpus):
'''
function to zip as fast as it can, pigz -> bgzip -> gzip
'''
if which('pigz'):
cmd = ['pigz', '-f', '-p', str(cpus), input]
else:
cmd = ['gzip', '-f', input]
try:
runSubprocess(cmd, '.', log)
except NameError:
subprocess.call(cmd)
# RNA seq mediated modules
def concatenateReads(input, output):
'''
Since I can't seem to get the comma separated lists to work with subprocess modules, just
concatenate FASTQ files in order and use a single file, input should be a list of FASTQ files
using system cat here so that gzipped files are concatenated correctly
'''
cmd = ['cat']
cmd = cmd + input
runSubprocess2(cmd, '.', log, output)
def which2(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def open_pipe(command, mode='r', buff=1024*1024):
import subprocess
import signal
if 'r' in mode:
return subprocess.Popen(command, shell=True, bufsize=buff,
stdout=subprocess.PIPE, universal_newlines=True,
preexec_fn=lambda: signal.signal(
signal.SIGPIPE, signal.SIG_DFL)
).stdout
elif 'w' in mode:
return subprocess.Popen(command, shell=True, bufsize=buff, universal_newlines=True,
stdin=subprocess.PIPE).stdin
return None
NORMAL = 0
PROCESS = 1
PARALLEL = 2
WHICH_BZIP2 = which2("bzip2")
WHICH_PBZIP2 = which2("pbzip2")
def open_bz2(filename, mode='r', buff=1024*1024, external=PARALLEL):
if external is None or external == NORMAL:
import bz2
return bz2.BZ2File(filename, mode, buff)
elif external == PROCESS:
if not WHICH_BZIP2:
return open_bz2(filename, mode, buff, NORMAL)
if 'r' in mode:
return open_pipe("bzip2 -dc " + filename, mode, buff)
elif 'w' in mode:
return open_pipe("bzip2 >" + filename, mode, buff)
elif external == PARALLEL:
if not WHICH_PBZIP2:
return open_bz2(filename, mode, buff, PROCESS)
if 'r' in mode:
return open_pipe("pbzip2 -dc " + filename, mode, buff)
elif 'w' in mode:
return open_pipe("pbzip2 >" + filename, mode, buff)
return None
WHICH_GZIP = which2("gzip")
WHICH_PIGZ = which2("pigz")
def open_gz(filename, mode='r', buff=1024*1024, external=PARALLEL):
if external is None or external == NORMAL:
import gzip
return gzip.GzipFile(filename, mode, buff)
elif external == PROCESS:
if not WHICH_GZIP:
return open_gz(filename, mode, buff, NORMAL)
if 'r' in mode:
return open_pipe("gzip -dc " + filename, mode, buff)
elif 'w' in mode:
return open_pipe("gzip >" + filename, mode, buff)
elif external == PARALLEL:
if not WHICH_PIGZ:
return open_gz(filename, mode, buff, PROCESS)
if 'r' in mode:
return open_pipe("pigz -dc " + filename, mode, buff)
elif 'w' in mode:
return open_pipe("pigz >" + filename, mode, buff)
return None
WHICH_XZ = which2("xz")
def open_xz(filename, mode='r', buff=1024*1024, external=PARALLEL):
if WHICH_XZ:
if 'r' in mode:
return open_pipe("xz -dc " + filename, mode, buff)
elif 'w' in mode:
return open_pipe("xz >" + filename, mode, buff)
return None
def zopen(filename, mode='r', buff=1024*1024, external=PARALLEL):
"""
Open pipe, zipped, or unzipped file automagically
# external == 0: normal zip libraries
# external == 1: (zcat, gzip) or (bzcat, bzip2)
# external == 2: (pigz -dc, pigz) or (pbzip2 -dc, pbzip2)
"""
if 'r' in mode and 'w' in mode:
return None
if filename.startswith('!'):
return open_pipe(filename[1:], mode, buff)
elif filename.endswith('.bz2'):
return open_bz2(filename, mode, buff, external)
elif filename.endswith('.gz'):
return open_gz(filename, mode, buff, external)
elif filename.endswith('.xz'):
return open_xz(filename, mode, buff, external)
else:
return open(filename, mode, buff)
return None
def execute(cmd):
DEVNULL = open(os.devnull, 'w')
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,
universal_newlines=True, stderr=DEVNULL)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
def getDiamondVersion():
vers = subprocess.Popen(['diamond', 'version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True).communicate()
if vers[1] == '': # then this is older version and parse the stdout
vers = vers[0].split('version ')[-1].rstrip()
else:
vers = vers[1].split()[1].replace('v', '')
return vers
def CheckDiamondDB(database):
diamond_version = getDiamondVersion()
DBvers = None
for line in execute(['diamond', 'dbinfo', '-d', database]):
if 'Database format version' in line:
DBvers = int(line.strip().split()[-1])
if not DBvers:
log.error('Could not determine diamond database version')
return False
runVers = None
if diamond_version < '0.9.10':
return False
elif diamond_version < '0.9.25':
runVers = 2
else:
runVers = 3
if runVers >= DBvers:
return True
else:
return False
def CheckFASTQandFix(forward, reverse, cpus=2):
from Bio.SeqIO.QualityIO import FastqGeneralIterator
# open and check first header, if okay exit, if not fix
file1 = FastqGeneralIterator(zopen(forward, 'rt'))
file2 = FastqGeneralIterator(zopen(reverse, 'rt'))
check = True
for read1, read2 in zip(file1, file2):
if ' ' in read1[0] and ' ' in read2[0]:
# std illumina, exit
if read1[0].split(' ', 1)[1].startswith('1') and read2[0].split(' ', 1)[1].startswith('2'):
break
else:
log.debug("R1 header: {} and R2 header: {} are not 1 and 2 as expected".format(read1[0],read2[0]))
check = False
break
elif read1[0].endswith('/1') and read2[0].endswith('/2'): # also acceptable
break
else: # it is not okay missing paired information
log.debug("R1 header: {} and R2 header: {} are missing pairing as expected".format(read1[0],read2[0]))
check = False
break
file1.close()
file2.close()
if not check:
log.error('ERROR: FASTQ headers are not properly paired, see logfile and reformat your FASTQ headers')
sys.exit(1)
'''
# now need to fix these reads
log.info(
"PE reads do not conform to Trinity naming convention (need either /1 /2 or std illumina), fixing...")
# work on forward reads first
if forward.endswith('.gz'):
Funzip(forward, forward+'.bak', cpus)
SafeRemove(forward)
else:
os.rename(forward, forward+'.bak')
# now add ending to reads
with open(forward+'.fix', 'w') as forwardfix:
for title, seq, qual in FastqGeneralIterator(open(forward+'.bak')):
title = title+'/1'
forwardfix.write("@%s\n%s\n+\n%s\n" % (title, seq, qual))
Fzip(forward+'.fix', forward, cpus)
SafeRemove(forward+'.bak')
SafeRemove(forward+'.fix')
# now work on reverse reads
if reverse.endswith('.gz'):
Funzip(reverse, reverse+'.bak', cpus)
else:
os.rename(reverse, reverse+'.bak')
with open(reverse+'.fix', 'w') as reversefix:
for title, seq, qual in FastqGeneralIterator(open(reverse+'.bak')):
title = title+'/2'
reversefix.write("@%s\n%s\n+\n%s\n" % (title, seq, qual))
# zip back up to original file
Fzip(reverse+'.fix', reverse, cpus)
SafeRemove(reverse+'.bak')
SafeRemove(reverse+'.fix')
'''
else:
log.debug('FASTQ headers seem compatible with Trinity')
return 0
def SafeRemove(input):
if os.path.isdir(input):
shutil.rmtree(input)
elif os.path.isfile(input):
os.remove(input)
else:
return
def runSubprocess(cmd, dir, logfile):
logfile.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, cwd=dir, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
logfile.error('CMD ERROR: {}'.format(' '.join(cmd)))
if stdout:
logfile.error(stdout.decode("utf-8"))
if stderr:
logfile.error(stderr.decode("utf-8"))
sys.exit(1)
else:
if stdout:
logfile.debug(stdout.decode("utf-8"))
if stderr:
logfile.debug(stderr.decode("utf-8"))
def runSubprocess2(cmd, dir, logfile, output):
# function where output of cmd is STDOUT, capture STDERR in logfile
logfile.debug(' '.join(cmd))
with open(output, 'w') as out:
proc = subprocess.Popen(cmd, cwd=dir, stdout=out,
stderr=subprocess.PIPE)
stderr = proc.communicate()
if proc.returncode != 0:
logfile.error('CMD ERROR: {}'.format(' '.join(cmd)))
if stderr:
logfile.error(stderr)
sys.exit(1)
else:
if stderr:
if stderr[0] is not None:
logfile.debug(stderr)
def runSubprocess3(cmd, dir, logfile):
# function where STDOUT pipes to FNULL, capture STDERR in logfile
FNULL = open(os.devnull, 'w')
logfile.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, cwd=dir, stdout=FNULL, stderr=subprocess.PIPE)
stderr = proc.communicate()
if stderr:
logfile.debug(stderr)
def runSubprocess4(cmd, dir, logfile):
# function where STDOUT and STDERR pipes to FNULL
logfile.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, cwd=dir, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
logfile.error('CMD ERROR: {}'.format(' '.join(cmd)))
if stdout:
print(stdout)
if stderr:
print(stderr)
sys.exit(1)
def runSubprocess5(cmd, dir, logfile, input, output):
# function where STDOUT to file, STDIN as input, STDERR pipes to logfile
logfile.debug(' '.join(cmd))
with open(input) as infile:
with open(output, 'w') as out:
proc = subprocess.Popen(cmd, cwd=dir, stdin=infile, stdout=out,
stderr=subprocess.PIPE)
stderr = proc.communicate()
if proc.returncode != 0:
logfile.error('CMD ERROR: {}'.format(' '.join(cmd)))
if stderr:
logfile.error(stderr)
sys.exit(1)
else:
if stderr:
if stderr[0] is not None:
logfile.debug(stderr)
def runSubprocess6(cmd, dir, logfile, logfile2):
# function where cmd captured in logfile, but both stdout and stdin piped to additional logfile
logfile.debug(' '.join(cmd))
with open(logfile2, 'w') as logout:
proc = subprocess.Popen(cmd, cwd=dir, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
logfile.error('CMD ERROR: {}'.format(' '.join(cmd)))
if stdout:
logfile.error(stdout)
if stderr:
logfile.error(stderr)
sys.exit(1)
else:
if stdout:
logout.write(stdout)
if stderr:
logout.write(stderr)
def runSubprocess7(cmd, dir, logfile, output):
# function where output of cmd is STDOUT, capture STDERR in logfile
logfile.debug(' '.join(cmd))
with open(output, 'a') as out:
proc = subprocess.Popen(cmd, cwd=dir, stdout=out,
stderr=subprocess.PIPE)
stderr = proc.communicate()
if proc.returncode != 0:
logfile.error('CMD ERROR: {}'.format(' '.join(cmd)))
if stderr:
logfile.error(stderr)
sys.exit(1)
else:
if stderr:
if stderr[0] is not None:
logfile.debug(stderr)
def runSubprocess8(cmd, dir, logfile, output):
# function where output of cmd is STDOUT, capture STDERR in FNULL
logfile.debug(' '.join(cmd))
with open(output, 'w') as out:
proc = subprocess.Popen(cmd, cwd=dir, stdout=out,
stderr=subprocess.PIPE)
stderr = proc.communicate()
if proc.returncode != 0:
logfile.error('CMD ERROR: {}'.format(' '.join(cmd)))
if stderr:
logfile.error(stderr)
sys.exit(1)
def evmGFFvalidate(input, evmpath, logfile):
Validator = os.path.join(evmpath, 'EvmUtils', 'gff3_gene_prediction_file_validator.pl')
cmd = ['perl', Validator, os.path.realpath(input)]
logfile.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True)
stdout, stderr = proc.communicate()
if not stderr:
return True
else:
logfile.error(stderr.rstrip())
return False
def hashfile(afile, hasher, blocksize=65536):
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.digest()
def sha256_check(file1, file2):
files = [file1, file2]
output = [(fname, hashfile(open(fname, 'rb'), hashlib.sha256()))
for fname in files]
if output[0][1] == output[1][1]:
return True
else:
return False
def readBlocks(source, pattern):
buffer = []
for line in source:
try:
line = line.decode('utf-8')
except AttributeError:
line = line
if line.startswith(pattern):
if buffer:
yield buffer
buffer = [line]
else:
buffer.append(line)
yield buffer
def readBlocks2(source, startpattern, endpattern):
buffer = []
for line in source:
try:
line = line.decode('utf-8')
except AttributeError:
line = line
if line.startswith(startpattern) or line.endswith(endpattern):
if buffer:
yield buffer
buffer = [line]
else:
buffer.append(line)
yield buffer
def empty_line_sep(line):
return line == '\n'
def get_parent_dir(directory):
return os.path.dirname(directory)
def getSize(filename):
st = os.stat(filename)
return st.st_size
def checkinputs(filename):
if not os.path.isfile(filename):
log.error("%s is not a valid file, exiting" % filename)
sys.exit(1)
size = getSize(filename)
if size < 2: # this is 1 character...
log.error("%s appears to be empty, exiting" % filename)
sys.exit(1)
def make_tarfile(output_filename, source_dir):
import tarfile
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
def multipleReplace(text, wordDict):
for key in wordDict:
text = text.replace(key, wordDict[key])
return text
def which_path(file_name):
for path in os.environ["PATH"].split(os.pathsep):
full_path = os.path.join(path, file_name)
if os.path.exists(full_path) and os.access(full_path, os.X_OK):
return full_path
return None
def which(name):
try:
with open(os.devnull) as devnull:
diff = ['tbl2asn', 'dustmasker', 'mafft', 'signalp',
'proteinortho', 'ete3', 'phyml', 'phobius.pl', 'tantan']
if not any(name in x for x in diff):
subprocess.Popen([name], stdout=devnull,
stderr=devnull, universal_newlines=True).communicate()
else:
if name == 'signalp':
subprocess.Popen([name, '-V'], stdout=devnull,
stderr=devnull, universal_newlines=True).communicate()
elif name == 'dustmasker':
subprocess.Popen(
[name, '-version-full'], stdout=devnull, stderr=devnull, universal_newlines=True).communicate()
elif name == 'tbl2asn':
subprocess.Popen(
[name, '--help'], stdout=devnull, stderr=devnull, universal_newlines=True).communicate()
elif name == 'raxmlHPC-PTHREADS':
subprocess.Popen(
[name, '-version'], stdout=devnull, stderr=devnull, universal_newlines=True).communicate()
elif name == 'ete3':
subprocess.Popen(
[name, 'version'], stdout=devnull, stderr=devnull, universal_newlines=True).communicate()
elif name == 'phobius.pl':
subprocess.Popen([name, '-h'], stdout=devnull,
stderr=devnull, universal_newlines=True).communicate()
else:
subprocess.Popen(
[name, '--version'], stdout=devnull, stderr=devnull, universal_newlines=True).communicate()
except OSError as e:
if e.errno == errno.ENOENT:
return False
return True
def vers_tblastn():
p1 = subprocess.Popen(['tblastn', '-version'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
vers = p1.communicate()[0].split('+')[0]
vers = vers.split(' ')[-1]
return vers
def CheckDependencies(input):
missing = []
for p in input:
if which(p) is False:
missing.append(p)
if missing != []:
error = ", ".join(missing)
try:
log.error(
"Missing Dependencies: %s. Please install missing dependencies and re-run script" % (error))
except NameError:
print("Missing Dependencies: %s. Please install missing dependencies and re-run script" % (error))
sys.exit(1)
def checkannotations(input):
if input and os.path.isfile(input):
filesize = getSize(input)
if int(filesize) < 1:
return False
else:
return True
elif input and os.path.islink(input):
return True
else:
return False
def line_count(fname):
with open(fname) as f:
i = -1
for i, l in enumerate(f):
pass
return i + 1
def countfasta(input):
count = 0
with open(input, 'r') as f:
for line in f:
if line.startswith(">"):
count += 1
return count
def getGeneBasename(fastafile):
bases = []
with open(fastafile, 'r') as input:
for line in input:
line = line.replace('\n', '')
if line.startswith('>'):
line = line.replace('>', '')
transcript, gene = line.split(' ')
if '_' in gene:
Base = line.split('_')[0]+'_'
elif '-' in gene:
Base = line.split('-')[0]
else:
Base = gene
if not Base in bases:
bases.append(Base)
return bases
def get_version():
from pkg_resources import get_distribution
__version__ = get_distribution('funannotate').version
return __version__
def ver_tuple(z):
return tuple([int(x) for x in z.split('.') if x.isdigit()])
def cmp(a, b):
return (a > b) - (a < b)
def ver_cmp(a, b):
return cmp(ver_tuple(a), ver_tuple(b))
def versionCheck(a, b):
if ver_cmp(a, b) == -1:
return False
else:
return True
def checkAugustusFunc():
'''
function to try to test Augustus installation is working, note segmentation fault still results in a pass
'''
functional = False
p1 = subprocess.Popen(['augustus', '--version'], stderr=subprocess.STDOUT,
stdout=subprocess.PIPE, universal_newlines=True).communicate()
stdout, stderr = p1
if isinstance(stdout, str):
try:
stdout = stdout.decode('ascii', 'ignore').encode('ascii')
except AttributeError:
pass
version = stdout.split(' is ')[0]
model = os.path.join(parentdir, 'config', 'EOG092C0B3U.prfl')
if not os.path.isfile(model):
log.error("Testing Augustus Error: installation seems wrong, can't find prfl model")
sys.exit(1)
profile = '--proteinprofile='+model
proc = subprocess.Popen(['augustus', '--species=anidulans', profile, os.path.join(parentdir, 'config', 'busco_test.fa')],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
stdout, stderr = proc.communicate()
stderr = stderr.strip()
if isinstance(stdout, str):
try:
stdout = stdout.decode('ascii', 'ignore').encode('ascii')
except AttributeError:
pass
stdout = stdout.strip().split('\n')
if stderr.startswith('augustus: ERROR'):
print(stderr)
return version, functional
else:
for line in stdout:
line = line.strip()
if line.startswith('# start gene g1'):
functional = True
return version, functional
def maker2evm(inputfile, outputdir):
tr = os.path.join(outputdir, 'transcript_alignments.gff3')
pr = os.path.join(outputdir, 'protein_alignments.gff3')
gr = os.path.join(outputdir, 'gene_predictions.gff3')
with open(tr, 'w') as trout:
with open(pr, 'w') as prout:
with open(gr, 'w') as grout:
with open(inputfile, 'r') as input:
for line in input:
if line.startswith('#'):
continue
if 'trnascan' in line:
continue
cols = line.split('\t')
if 'maker' in cols[1]:
grout.write(line)
elif 'protein2genome' in cols[1]:
if 'match_part' in cols[2]:
cols[2] = 'nucleotide_to_protein_match'
cols[5] = '.'
prout.write('\t'.join(cols))
elif 'est2genome' in cols[1]:
if 'match_part' in cols[2]:
cols[2] = 'EST_match'
cols[5] = '.'
trout.write('\t'.join(cols))
elif 'cdna2genome' in cols[1]:
if 'match_part' in cols[2]:
cols[2] = 'EST_match'
cols[5] = '.'
trout.write('\t'.join(cols))
elif 'pred_gff' in cols[1]:
if 'match_part' in cols[2]:
cols[1] = cols[1].replace('pred_gff:', '')
cols[2] = 'EST_match'
cols[5] = '100.0'
trout.write('\t'.join(cols))
def flatten(l):
flatList = []
for elem in l:
# if an element of a list is a list
# iterate over this list and add elements to flatList
if type(elem) == list:
for e in elem:
flatList.append(e)
else:
flatList.append(elem)
return flatList
def fmtcols(mylist, cols):
justify = []
for i in range(0, cols):
length = max([len(x) for x in mylist[i::cols]])
length += 2
ljust = [x.ljust(length) for x in mylist[i::cols]]
justify.append(ljust)
justify = flatten(justify)
num_lines = len(mylist) / cols
lines = (' '.join(justify[i::num_lines])
for i in range(0, num_lines))
return "\n".join(lines)
def list_columns(obj, cols=4, columnwise=True, gap=4):
"""
Print the given list in evenly-spaced columns.
Parameters
----------
obj : list
The list to be printed.
cols : int
The number of columns in which the list should be printed.
columnwise : bool, default=True
If True, the items in the list will be printed column-wise.
If False the items in the list will be printed row-wise.
gap : int
The number of spaces that should separate the longest column
item/s from the next column. This is the effective spacing
between columns based on the maximum len() of the list items.
"""
sobj = [str(item) for item in obj]
if cols > len(sobj):
cols = len(sobj)
max_len = max([len(item) for item in sobj])
if columnwise:
cols = int(math.ceil(float(len(sobj)) / float(cols)))
plist = [sobj[i: i+cols] for i in range(0, len(sobj), cols)]
if columnwise:
if not len(plist[-1]) == cols:
plist[-1].extend(['']*(len(sobj) - len(plist[-1])))
plist = list(zip(*plist))
printer = '\n'.join([
''.join([c.ljust(max_len + gap) for c in p])
for p in plist])
return printer
def roundup(x):
return x if x % 100 == 0 else x + 100 - x % 100
def maxabs(a, axis=None):
import numpy as np
"""Return slice of a, keeping only those values that are furthest away
from 0 along axis"""
maxa = a.max(axis=axis)
mina = a.min(axis=axis)
p = abs(maxa) > abs(mina) # bool, or indices where +ve values win
n = abs(mina) > abs(maxa) # bool, or indices where -ve values win
if axis is None:
if p:
return maxa
else:
return mina
shape = list(a.shape)
shape.pop(axis)
out = np.zeros(shape, dtype=a.dtype)
out[p] = maxa[p]
out[n] = mina[n]
return out
def setupLogging(LOGNAME):
global log
if 'darwin' in sys.platform:
stdoutformat = logging.Formatter(
colr.GRN+'%(asctime)s'+colr.END+': %(message)s', datefmt='[%b %d %I:%M %p]')
else:
stdoutformat = logging.Formatter(
'%(asctime)s: %(message)s', datefmt='[%b %d %I:%M %p]')
fileformat = logging.Formatter(
'%(asctime)s: %(message)s', datefmt='[%x %H:%M:%S]')
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
sth = logging.StreamHandler()
sth.setLevel(logging.INFO)
sth.setFormatter(stdoutformat)
log.addHandler(sth)
fhnd = logging.FileHandler(LOGNAME)
fhnd.setLevel(logging.DEBUG)
fhnd.setFormatter(fileformat)
log.addHandler(fhnd)
def renameGFF(input, newname, output):
contigs = set()
with open(output, 'w') as outfile:
with open(input, 'r') as infile:
for line in infile:
if line.startswith('>'): # remove any fasta sequences
continue
if line.startswith('#'):
outfile.write(line)
else:
cols = line.split('\t')
# make sure it has correct columns to be GFF
if len(cols) == 9:
contigs.add(cols[0])
outfile.write('{}\t{}\t{}'.format(cols[0], newname,
'\t'.join(cols[2:])))
return contigs
def countGFFgenes(input):
count = 0
if os.path.exists(input):
with open(input, 'r') as f:
for line in f:
if "\tgene\t" in line:
count += 1
return count
def countEVMpredictions(input):
Counts = {'total': 0}
with open(input, 'r') as f:
for line in f:
if line.startswith('\n') or line.startswith('#'):
continue
line = line.strip()
contig, source, feature, start, end, blank, strand, score, info = line.split(
'\t')
if feature == 'gene':
Counts['total'] += 1
if not source in Counts:
Counts[source] = 1
else:
Counts[source] += 1
return Counts
def countGMAPtranscripts(input):
count = 0
with open(input, 'r') as f:
for line in f:
if line.startswith('###'):
count += 1
return count
def runMultiProgress(function, inputList, cpus, progress=True):
# setup pool
p = multiprocessing.Pool(cpus)
# setup results and split over cpus
tasks = len(inputList)
results = []
for i in inputList:
results.append(p.apply_async(function, [i]))
# refresh pbar every 5 seconds
if progress:
while True:
incomplete_count = sum(1 for x in results if not x.ready())
if incomplete_count == 0:
break
sys.stdout.write(" Progress: %.2f%% \r" %
(float(tasks - incomplete_count) / tasks * 100))
sys.stdout.flush()
time.sleep(1)
p.close()
p.join()
def runMultiNoProgress(function, inputList, cpus):
# setup pool
p = multiprocessing.Pool(cpus)
# setup results and split over cpus
results = []
for i in inputList:
results.append(p.apply_async(function, [i]))
p.close()
p.join()
def cleanProteins(inputList, output):
# expecting a list of protein fasta files for combining/cleaning headers
# make sure you aren't duplicated sequences names
# dropping proteins less than 50 amino acids
seen = set()
with open(output, 'w') as out:
for x in inputList:
with open(x, 'r') as input:
for rec in SeqIO.parse(input, 'fasta'):
if len(rec.seq) < 50:
continue
# explicitly check for swissprot and jgi
if rec.id.startswith('sp|') or rec.id.startswith('jgi|'):
ID = rec.id.split('|')[-1]
else:
ID = rec.id
# now clean up the shit
badshit = [':', ';', '/', '\\', '.', ',', '%']
for i in badshit:
if i in ID:
ID = ID.replace(i, '_')
if not ID in seen:
seen.add(ID)
else:
# means that ID has already been used, so add a number to it, auto increment
counter = 1
while ID in seen:
oldnum = counter-1
ID = ID.replace('_'+str(oldnum),
'') + '_'+str(counter)
counter += 1
seen.add(ID)
out.write('>%s\n%s\n' % (ID, rec.seq))
def genemark2busco(genemark, bedfile, output):
#function to load coords into Interlap from bedfile, then pull out
#genemark EVM gff3 format
counter = 0
inter = bed2interlap(bedfile)
with open(output, 'w') as outfile:
with open(genemark, 'r') as infile:
for gene_model in readBlocks(infile, '\n'):
if len(gene_model) < 2:
continue
if gene_model[0] == '\n':
cols = gene_model[1].split('\t')
else:
cols = gene_model[0].split('\t')
coords = [int(cols[3]), int(cols[4])]
chr = cols[0]
if interlapIntersect(coords, chr, inter):
counter += 1
outfile.write('{}'.format(''.join(gene_model)))
return counter
def evidence2busco(evidence, bedfile, output):
counter = 0
inter = bed2interlap(bedfile)
with open(output, 'w') as outfile:
with open(evidence, 'r') as infile:
for hit in readBlocks(infile, '\n'):
hit = [x for x in hit if x != '\n']
if len(hit) == 1:
start = int(hit[0].split('\t')[3])
end = int(hit[0].split('\t')[4])
coords = [start, end]
chr = hit[0].split('\t')[0]
elif len(hit) > 1:
start = int(hit[0].split('\t')[3])
end = int(hit[-1].split('\t')[4])
chr = hit[0].split('\t')[0]
if start < end:
coords = [start, end]
else:
coords = [end, start]
else:
continue
if interlapIntersect(coords, chr, inter):
counter += 1
outfile.write('{}\n'.format(''.join(hit)))
return counter
def fix_busco_naming(busco_infile, genome, augustus, gffout, ploidy=1,
proteins=False):
def group_separator(line):
return line == '\n'
# parse the busco table into dictionary format
busco_complete = {}
passing = ['Complete']
if ploidy > 1:
passing.append('Duplicated')
with open(busco_infile, 'r') as buscoinput:
for line in buscoinput:
if line.startswith('#'):
continue
cols = line.split('\t')
if cols[1] in passing:
if not cols[0] in busco_complete:
busco_complete[cols[0]] = cols[2]+':'+cols[3]+'-'+cols[4]
# now parse the augustus input file where gene numbers are likely repeated.
results = []
with open(augustus) as f:
for key, group in itertools.groupby(f, group_separator):
if not key:
results.append(list(group))
# loop through each gene model, lookup the BUSCO name, and then replace the name with counter based and busco model name
tmpOut = augustus+'.intermediate'
counter = 0
inverse_busco = {v: k for k, v in list(busco_complete.items())}
with open(tmpOut, 'w') as output:
for i in results:
counter += 1
cols = i[0].split('\t')
lookup = cols[0]+':'+cols[3]+'-'+cols[4]
if lookup in inverse_busco:
name = inverse_busco.get(lookup)
else:
name = 'unknown_model'
ID = cols[8].split(';')[0]
ID = ID.replace('ID=', '')
newID = 'gene'+str(counter)
newblock = ''.join(i)
newblock = newblock.replace('Augustus%20prediction', name)
newblock = newblock.replace(ID, newID)
output.write(newblock+'\n')
#write to GFF3 properly and fix CDS
Genes = {}
Genes = gff2dict(tmpOut, genome, Genes)
dict2gff3(Genes, gffout)
if proteins:
dict2proteins(Genes, proteins)
def gb2output(input, output1, output2, output3):
with open(output1, 'w') as proteins:
with open(output2, 'w') as transcripts:
with open(output3, 'w') as scaffolds:
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
scaffolds.write(">%s\n%s\n" % (record.id, record.seq))
for f in record.features:
if f.type == "CDS":
proteins.write(">%s\n%s\n" % (f.qualifiers['locus_tag'][0], softwrap(
f.qualifiers['translation'][0].rstrip('*'))))
if f.type == "mRNA":
feature_seq = f.extract(record.seq)
transcripts.write(">%s\n%s\n" % (
f.qualifiers['locus_tag'][0], softwrap(feature_seq)))
def sortGFF(input, output, order):
cmd = ['bedtools', 'sort', '-header', '-faidx', order, '-i', input]
with open(output, 'w') as out:
proc = subprocess.Popen(cmd, stdout=out, stderr=subprocess.PIPE)
stderr = proc.communicate()
if stderr:
if stderr[0] is None:
if stderr[1] != '':
log.error(
"Sort GFF failed, unreferenced scaffold present in gene predictions, check logfile")
sys.exit(1)
def sortBedproper(input, output):
# sort BED file same as GFF3 files
data = []
with open(input, 'r') as infile:
for line in infile:
if line.startswith('\n'):
continue
line = line.rstrip()
cols = line.split('\t')
data.append(cols)
# we can now sort
sort_data = natsorted(data, key=lambda x: (x[0], int(x[1])))
# now we can write back out to file
with open(output, 'w') as outfile:
for x in sort_data:
outfile.write('{}\n'.format('\t'.join(x)))
def sortGFFproper(input, output):
# function to sort GFF3 file but maintain gene, mrna, exon, cds order
data = []
features = set()
comments = []
with open(input, 'r') as infile:
for line in infile:
if line.startswith('\n'):
continue
if line.startswith('#'):
comments.append(line)
continue
line = line.rstrip()
cols = line.split('\t')
data.append(cols)
features.add(cols[2])
# build sort order dictionary for features
order_map = {'gene': 0, 'mRNA': 1, 'transcript': 2, 'tRNA': 3, 'ncRNA': 4,
'rRNA': 5, 'pseudogene': 6, 'five_prime_utr': 7,
'five_prime_UTR': 8, 'exon': 9, 'CDS': 10,
'three_prime_utr': 11, 'three_prime_UTR': 12}
idx = len(order_map)
for x in features:
if x not in order_map:
order_map[x] = idx
idx += 1
# we can now sort
sort_data = natsorted(data, key=lambda x: (x[0], int(x[3]), order_map[x[2]]))
# now we can write back out to file
with open(output, 'w') as outfile:
for y in comments:
outfile.write(y)
for x in sort_data:
outfile.write('{}\n'.format('\t'.join(x)))
def checkGenBank(input):
count = 0
with open(input, 'r') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
for f in record.features:
if f.type == 'CDS':
count += 1
if count == 0:
return False
else:
return True
def countGenBank(input):
cds = 0
trna = 0
dnas = 0
with open(input, 'r') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
dnas += 1
for f in record.features:
if f.type == 'CDS':
cds += 1
elif f.type == 'tRNA':
trna += 1
return dnas, cds, trna
def checkFastaHeaders(input, limit):
length = 0
names = []
with open(input, 'r') as fasta:
for line in fasta:
if line.startswith('>'):
line = line.replace('\n', '')
ID = line.replace('>', '').strip()
names.append(ID)
# subtract one character for fasta carrot
headlen = len(line) - 1
if headlen > length:
length = headlen
if length > int(limit):
return (False, names)
else:
return (True, names)
def analyzeAssembly(input, header_max=16):
from Bio.SeqIO.FastaIO import SimpleFastaParser
bad_names = []
IUPAC = {'A', 'C', 'G', 'T', 'R', 'Y',
'S', 'W', 'K', 'M', 'B', 'D',
'H', 'V', 'N'}
nuc_errors = {}
suspect = {}
with open(input, 'r') as infile:
for title, seq in SimpleFastaParser(infile):
if len(title) > header_max:
bad_names.append(title)
# get number of each contig
characters = {}
for nuc in seq:
nuc = nuc.upper()
if not nuc in characters:
characters[nuc] = 1
else:
characters[nuc] += 1
# check for non IUPAC characters
errors = []
for k, v in characters.items():
if k not in IUPAC:
errors.append((k, v))
if len(errors) > 0:
nuc_errors[title] = errors
# if there are less than 4 characters in scaffolds, its suspect
if len(characters) < 4:
suspect[title] = characters
return bad_names, nuc_errors, suspect
def BamHeaderTest(genome, mapping):
# get list of fasta headers from genome
genome_headers = []
with open(genome, 'r') as input:
for rec in SeqIO.parse(input, 'fasta'):
if rec.id not in genome_headers:
genome_headers.append(rec.id)
# get list of fasta headers from BAM
bam_headers = []
cmd = ['samtools', 'idxstats', os.path.realpath(mapping)]
for line in execute(cmd):
line = line.rstrip()
chr, length, mapped, unmapped = line.split('\t')[:4]
if chr != '*':
bam_headers.append(chr)
# now compare lists, basically if BAM headers not in genome headers, then output bad names to logfile and return FALSE
genome_headers = set(genome_headers)
diffs = [x for x in bam_headers if x not in genome_headers]
if len(diffs) > 0:
log.debug(
"ERROR: These BAM headers not found in genome FASTA headers\n%s" % ','.join(diffs))
return False
else:
return True
def mapCount(input, location_dict, output):
Counts = {}
for aln in execute(['samtools', 'view', os.path.realpath(input)]):
cols = aln.split('\t')
if not cols[2] in Counts:
Counts[cols[2]] = 1
else:
Counts[cols[2]] += 1
with open(output, 'w') as outfile:
outfile.write("#mRNA-ID\tgene-ID\tLocation\tTPM\n")
for k, v in natsorted(list(location_dict.items())):
if k in Counts:
tpm = Counts.get(k)
else:
tpm = 0
geneID = v[0]
location = v[1]
outfile.write('{:}\t{:}\t{:}\t{:.2f}\n'.format(
k, geneID, location, float(tpm)))
def tokenizeString(aString, separators):
# separators is an array of strings that are being used to split the the string.
# sort separators in order of descending length
separators.sort(key=len)
listToReturn = []
i = 0
while i < len(aString):
theSeparator = ""
for current in separators:
if current == aString[i:i+len(current)]:
theSeparator = current
if theSeparator != "":
listToReturn += [theSeparator]
i = i + len(theSeparator)
else:
if listToReturn == []:
listToReturn = [""]
if(listToReturn[-1] in separators):
listToReturn += [""]
listToReturn[-1] += aString[i]
i += 1
return listToReturn
def bam2gff3(input, output):
count = 0
with open(output, 'w') as gffout:
gffout.write('##gff-version 3\n')
for aln in execute(['samtools', 'view', os.path.realpath(input)]):
cols = aln.split('\t')
if cols[1] == '0':
strand = '+'
elif cols[1] == '16':
strand = '-'
else:
continue
cs = None
nm = None
tags = cols[11:]
if not tags:
continue
for x in tags:
if x.startswith('cs:'):
cs = x.replace('cs:Z:', '')
if x.startswith('NM:'):
nm = int(x.split(':')[-1])
if nm is None or cs is None:
continue
matches = 0
ProperSplice = True
splitter = []
exons = [int(cols[3])]
position = int(cols[3])
query = [1]
querypos = 0
num_exons = 1
gaps = 0
splitter = tokenizeString(cs, [':', '*', '+', '-', '~'])
for i, x in enumerate(splitter):
if x == ':':
matches += int(splitter[i+1])
position += int(splitter[i+1])
querypos += int(splitter[i+1])
elif x == '-':
gaps += 1
elif x == '+':
gaps += 1
querypos += len(splitter[i+1])
elif x == '~':
if cols[1] == '0':
if splitter[i+1].startswith('gt') and splitter[i+1].endswith('ag'):
ProperSplice = True
elif splitter[i+1].startswith('at') and splitter[i+1].endswith('ac'):
ProperSplice = True
else:
ProperSplice = False
elif cols[1] == '16':
if splitter[i+1].startswith('ct') and splitter[i+1].endswith('ac'):
ProperSplice = True
elif splitter[i+1].startswith('gt') and splitter[i+1].endswith('at'):
ProperSplice = True
else:
ProperSplice = False
num_exons += 1
exons.append(position)
query.append(querypos)
query.append(querypos+1)
intronLen = int(splitter[i+1][2:-2])
position += intronLen
exons.append(position)
# add last Position
exons.append(position)
query.append(len(cols[9]))
# convert exon list into list of exon tuples
exons = list(zip(exons[0::2], exons[1::2]))
queries = list(zip(query[0::2], query[1::2]))
if ProperSplice:
mismatches = nm - gaps
pident = 100 * (matches / (matches + mismatches))
if pident < 80:
continue
count += 1
for i, exon in enumerate(exons):
start = exon[0]
end = exon[1]-1
if strand == '+':
qstart = queries[i][0]
qend = queries[i][1]
else:
qstart = len(cols[9]) - queries[i][1] + 1
qend = len(cols[9]) - queries[i][0] + 1
gffout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:.2f}\t{:}\t{:}\tID={:};Target={:} {:} {:}\n'.format(
cols[2], 'genome', 'cDNA_match', start, end, pident, strand, '.', cols[0], cols[0], qstart, qend))
return count
def bam2ExonsHints(input, gff3, hints):
count = 0
with open(gff3, 'w') as gffout:
gffout.write('##gff-version 3\n')
with open(hints, 'w') as hintsout:
num = -1
for aln in execute(['samtools', 'view', os.path.realpath(input)]):
num += 1
cols = aln.split('\t')
if cols[1] == '0':
strand = '+'
elif cols[1] == '16':
strand = '-'
else:
continue
cs = None
nm = None
tags = cols[11:]
for x in tags:
if x.startswith('cs:'):
cs = x.replace('cs:Z:', '')
if x.startswith('NM:'):
nm = int(x.split(':')[-1])
if nm is None or cs is None:
continue
matches = 0
ProperSplice = True
splitter = []
exons = [int(cols[3])]
position = int(cols[3])
query = [1]
querypos = 0
num_exons = 1
gaps = 0
splitter = tokenizeString(cs, [':', '*', '+', '-', '~'])
for i, x in enumerate(splitter):
if x == ':':
matches += int(splitter[i+1])
position += int(splitter[i+1])
querypos += int(splitter[i+1])
elif x == '-':
gaps += 1
elif x == '+':
gaps += 1
querypos += len(splitter[i+1])
elif x == '~':
if cols[1] == 0:
if splitter[i+1].startswith('gt') and splitter[i+1].endswith('ag'):
ProperSplice = True
elif splitter[i+1].startswith('at') and splitter[i+1].endswith('ac'):
ProperSplice = True
else:
ProperSplice = False
break
elif cols[1] == 16:
if splitter[i+1].startswith('ct') and splitter[i+1].endswith('ac'):
ProperSplice = True
elif splitter[i+1].startswith('gt') and splitter[i+1].endswith('at'):
ProperSplice = True
else:
ProperSplice = False
break
num_exons += 1
exons.append(position)
query.append(querypos)
query.append(querypos+1)
intronLen = int(splitter[i+1][2:-2])
position += intronLen
exons.append(position)
# add last Position
exons.append(position)
query.append(len(cols[9]))
# convert exon list into list of exon tuples
exons = list(zip(exons[0::2], exons[1::2]))
queries = list(zip(query[0::2], query[1::2]))
introns = []
if len(exons) > 1:
for x, y in enumerate(exons):
try:
introns.append((y[1], exons[x+1][0]-1))
except IndexError:
pass
if ProperSplice:
mismatches = nm - gaps
pident = 100 * (matches / (matches + mismatches))
if pident < 80:
continue
feature = 'EST_match'
if pident > 95:
feature = 'cDNA_match'
count += 1
for i, exon in enumerate(exons):
start = exon[0]
end = exon[1]-1
qstart = queries[i][0]
qend = queries[i][1]
if i == 0 or i == len(exons)-1:
gffout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:.2f}\t{:}\t{:}\tID=minimap2_{:};Target={:} {:} {:} {:}\n'.format(
cols[2], 'genome', feature, start, end, pident, strand, '.', num+1, cols[0], qstart, qend, strand))
hintsout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tgrp=minimap2_{:};pri=4;src=E\n'.format(
cols[2], 'b2h', 'ep', start, end, 0, strand, '.', num+1, cols[0]))
else:
gffout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:.2f}\t{:}\t{:}\tID=minimap2_{:};Target={:} {:} {:} {:}\n'.format(
cols[2], 'genome', feature, start, end, pident, strand, '.', num+1, cols[0], qstart, qend, strand))
hintsout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tgrp=minimap2_{:};pri=4;src=E\n'.format(
cols[2], 'b2h', 'exon', start, end, 0, strand, '.', num+1, cols[0]))
if len(introns) > 0:
for z in introns:
hintsout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tgrp=minimap2_{:};pri=4;src=E\n'.format(
cols[2], 'b2h', 'intron', z[0], z[1], 1, strand, '.', num+1, cols[0]))
return count
def combineTranscripts(minimap, gmap, output):
'''
function to combine minimap GFF3 and gmap GFF3 files
need to rename GMAP as you loop through and GFF3 from gmap is kind of messed up.
'''
with open(output, 'w') as out:
if minimap:
with open(minimap, 'r') as mini:
for line in mini:
out.write(line)
else:
out.write('##gff-version 3\n')
with open(gmap, 'r') as gmap_in:
for i, aln in enumerate(readBlocks(gmap_in, '###')):
for x in aln:
if not x.startswith('#'):
contig, source, feature, start, end, score, strand, phase, attributes = x.split(
'\t')
info = attributes.split(';')
for y in info:
if y.startswith('Target='):
Target = y
out.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID=gmap_{:};{:}\n'.format(
contig, source, feature, start, end, score, strand, phase, i+1, Target))
def RevComp(s):
rev_comp_lib = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'U': 'A', 'M': 'K', 'R': 'Y', 'W': 'W',
'S': 'S', 'Y': 'R', 'K': 'M', 'V': 'B', 'H': 'D', 'D': 'H', 'B': 'V', 'X': 'X', 'N': 'N'}
cseq = ''
n = len(s)
s = s.upper()
for i in range(0, n):
c = s[n-i-1]
cseq += rev_comp_lib[c]
return cseq
def translate(cDNA, strand, phase):
'''
translate cDNA into protein sequence
trying to see if I can speed this up over Biopython
'''
def _split(str, num):
return [str[start:start+num] for start in range(0, len(str), num)]
codon_table = {'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', 'TAA': '*', 'TAG': '*', 'TGA': '*'}
if strand == '-' or strand == -1:
seq = RevComp(cDNA)
else:
seq = cDNA
seq = seq[phase:]
# map seq to proteins
protSeq = []
for i in _split(seq, 3):
if len(i) == 3:
iSeq = i.upper()
if iSeq in codon_table:
aa = codon_table[iSeq]
protSeq.append(aa)
else:
protSeq.append('X')
return ''.join(protSeq)
def extend2stop(seqDict, header, coordinates, strand, phase, protLen):
'''
try to extend a CDS lacking a stop to find a stop codon
it will extend a CDS up to 20 codons (60 bp) from the current
frame to find a stop codon, if none is found it will return
the original coordinates
'''
sorted_coordinates = sorted(coordinates, key=lambda tup: tup[0])
if strand == '+':
newStop = sorted_coordinates[-1][1]+60
if newStop > len(seqDict[header]):
newStop = len(seqDict[header])
lastTup = (sorted_coordinates[-1][0], newStop)
if len(sorted_coordinates) > 1:
newCoords = sorted_coordinates[:-1]
newCoords.append(lastTup)
else:
newCoords = [lastTup]
updateCDS = getSeqRegions(seqDict, header, newCoords)
updateProt = translate(updateCDS, strand, phase)
if '*' in updateProt:
num = (updateProt.find('*') - protLen + 1) * 3
finalTup = (sorted_coordinates[-1][0],
sorted_coordinates[-1][1]+num)
if len(sorted_coordinates) > 1:
finalCoords = sorted_coordinates[:-1]
finalCoords.append(finalTup)
else:
finalCoords = [finalTup]
return True, finalCoords
else:
return False, coordinates
else:
newStop = sorted_coordinates[0][0]-60
if newStop < 1:
newStop = 1
lastTup = (newStop, sorted_coordinates[0][1])
newCoords = [lastTup]
if len(sorted_coordinates) > 1:
newCoords += sorted_coordinates[1:]
updateCDS = getSeqRegions(seqDict, header, newCoords)
updateProt = translate(updateCDS, strand, phase)
if '*' in updateProt:
num = (updateProt.find('*') - protLen + 1) * 3
finalTup = (sorted_coordinates[0][0]-num, sorted_coordinates[0][1])
finalCoords = [finalTup]
if len(sorted_coordinates) > 1:
finalCoords += sorted_coordinates[1:]
finalSort = sorted(
finalCoords, key=lambda tup: tup[0], reverse=True)
return True, finalSort
else:
return False, coordinates
def getSeqRegions(SeqRecordDict, header, coordinates):
# takes SeqRecord dictionary or Index, returns sequence string
# coordinates is a list of tuples [(1,10), (20,30)]
result = ''
sorted_coordinates = sorted(coordinates, key=lambda tup: tup[0])
for x in sorted_coordinates:
partial = SeqRecordDict[header][x[0]-1:x[1]]
result += str(partial.seq)
return result
def convertgff2tbl(gff, prefix, fasta, prots, trans, tblout, external=False):
from collections import OrderedDict
'''
function to convert directly from gff to tbl
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# load GFF annotations into funannotate dictionary
Genes = {}
Genes = gff2dict(gff, fasta, Genes)
# get scaffold names/lengths
scaffLen = {}
with open(fasta, 'r') as seqin:
for record in SeqIO.parse(seqin, 'fasta'):
if not record.id in scaffLen:
scaffLen[record.id] = len(record.seq)
# get partialStart/stop info and load scaffold dictionary with coordinates of Genes
sGenes = sorted(iter(Genes.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
renamedGenes = {}
scaff2genes = {}
counter = 1
for k, v in list(sortedGenes.items()):
if not prefix:
locusTag = k
else:
locusTag = prefix+'_'+str(counter).zfill(6)
if not locusTag in renamedGenes:
renamedGenes[locusTag] = v
if not v['contig'] in scaff2genes:
scaff2genes[v['contig']] = [locusTag]
else:
scaff2genes[v['contig']].append(locusTag)
counter += 1
if external:
log.info('Found {:,} gene models from GFF3 annotation'.format(len(sortedGenes)))
dicts2tbl(renamedGenes, scaff2genes, scaffLen, 'CFMR', '12345', [],
tblout, external=external)
# transcript to geneID dictionary
geneDB = {}
for k, v in list(renamedGenes.items()):
for x in v['ids']:
if not x in geneDB:
geneDB[x] = k
# write to protein and transcripts
with open(prots, 'w') as protout:
with open(trans, 'w') as tranout:
for k, v in natsorted(list(Genes.items())):
if v['pseudo'] and v['pseudo'] is True:
continue
for i, x in enumerate(v['ids']):
try:
Transcript = str(v['transcript'][i])
except IndexError:
print((k, v))
if v['strand'] == '-':
Transcript = RevComp(Transcript)
tranout.write('>%s %s\n%s\n' % (x, k, softwrap(Transcript)))
if v['type'] == 'mRNA':
Prot = v['protein'][i]
if Prot.endswith('*'):
Prot = Prot.rstrip('*')
protout.write('>%s %s\n%s\n' % (x, k, softwrap(Prot)))
return len(Genes), geneDB
def tblfilter(input, remove, output):
'''
function to take an NCBI tbl file and drop gene models present in remove file
'''
# get items to remove list
removeModels = []
with open(remove, 'r') as file:
for line in file:
if line.startswith('#') or line.startswith('\n'):
continue
line = line.strip()
if not line in removeModels:
removeModels.append(line)
# now loop through tbl file and get line positions of gene models
found = []
with open(output, 'w') as outfile:
with open(input, 'r') as infile:
for gene in readBlocks2(infile, '>Feature', '\tgene\n'):
if gene[0].startswith('>Feature'):
outfile.write(''.join(gene))
else:
locusTag = None
for x in gene:
if x.startswith('\t\t\tlocus_tag\t'):
locusTag = x.split('\t')[-1].rstrip()
if locusTag and not locusTag in removeModels:
outfile.write(''.join(gene))
else:
if not locusTag:
log.debug(
'LocusTag not found parsing NCBI Tbl file (this should not happen)')
print(gene)
else:
found.append(locusTag)
log.debug("Removed %i out of %i gene models from annotation" %
(len(found), len(removeModels)))
s = set(found)
diff = [x for x in removeModels if x not in s]
if len(diff) > 0:
log.debug('Could not find %i gene models:\n %s' %
(len(diff), ','.join(diff)))
def annotations2dict(input, geneDB={}, custom=False):
Annotations = {}
with open(input, 'r') as all_annots:
for line in all_annots:
line = line.replace('\n', '')
ID, refDB, description = line.split('\t')
if description == '': # there is nothing here, so skip
continue
if refDB == 'name' or refDB == 'product':
if len(geneDB) == 0:
if '-T' in ID:
geneID = ID.split('-T')[0]
else:
geneID = ID
else:
if ID in geneDB:
geneID = geneDB[ID]
else:
geneID = ID
else:
geneID = ID
if not geneID in Annotations:
Annotations[geneID] = {refDB: [description]}
else:
if not refDB in Annotations[geneID]:
Annotations[geneID][refDB] = [description]
else:
Annotations[geneID][refDB].append(description)
if custom:
log.info("Parsing custom annotations from {:}".format(custom))
with open(custom, 'r') as custom_annots:
for line in custom_annots:
line = line.rstrip()
try:
if line.count('\t') != 2:
continue
except UnicodeDecodeError:
log.error('Error parsing the custom annotations:')
print(line)
sys.exit(1)
ID, refDB, description = line.split('\t')
if description == '':
continue
if refDB in ['name', 'product', 'gene_synonym']:
if len(geneDB) == 0:
if '-T' in ID:
geneID = ID.split('-T')[0]
else:
geneID = ID
else:
if ID in geneDB:
geneID = geneDB[ID]
else:
geneID = ID
else:
geneID = ID
if not geneID in Annotations:
Annotations[geneID] = {refDB: [description]}
else:
if not refDB in Annotations[geneID]:
Annotations[geneID][refDB] = [description]
elif refDB == 'name':
previousNames = Annotations[geneID][refDB]
if not 'gene_synonym' in Annotations[geneID]:
Annotations[geneID]['gene_synonym'] = previousNames
else:
Annotations[geneID]['gene_synonym'] += previousNames
Annotations[geneID][refDB] = [description]
elif refDB == 'product':
Annotations[geneID][refDB] = [description]
else:
Annotations[geneID][refDB].append(description)
# make sure no synonyms are repeated
for k, v in natsorted(Annotations.items()):
if 'gene_synonym' in v and 'name' in v:
synonym_set = set(v['gene_synonym'])
cleaned = [x for x in synonym_set if x not in v['name']]
Annotations[k]['gene_synonym'] = cleaned
elif 'gene_synonm' in v:
synonym_set = set(v['gene_synonym'])
Annotations[k]['gene_synonym'] = list(synonym_set)
return Annotations
def updateTBL(input, annotDict, output, prefix=False, newtag=False):
'''
general function to parse ncbi tbl format and add functional annotation
'''
log.debug('Parsing tbl file: {:}'.format(os.path.abspath(input)))
tmpoutput = output+'.tmp'
with open(input, 'r') as infile:
with open(tmpoutput, 'w') as outfile:
for gene in readBlocks2(infile, '>Feature', '\tgene\n'):
transcriptsSeen = []
# transcriptNum = 0
if gene[0].startswith('>Feature'):
outfile.write(''.join(gene))
else:
locusTag, locusTagIndex, LocusType, geneAnnot, transcriptAnnot = (
None,)*5
for i, x in enumerate(gene):
if x.startswith('\t\t\tlocus_tag\t'):
locusTag = x.split('\t')[-1].rstrip()
locusTagIndex = i
if not locusTagIndex:
outfile.write(''.join(gene))
continue
try:
locusType = gene[locusTagIndex+1].split('\t')[-1].rstrip()
except IndexError:
print(gene)
except TypeError:
print(gene)
if locusType in ['tRNA', 'ncRNA', 'rRNA']:
outfile.write(''.join(gene))
elif locusType == 'mRNA':
if locusTag in annotDict:
geneAnnot = annotDict.get(locusTag)
else:
geneAnnot = {}
for line in gene:
if line.startswith('\t\t\tlocus_tag\t'):
if 'name' in geneAnnot:
outfile.write('\t\t\tgene\t%s\n' %
geneAnnot['name'][0])
if 'gene_synonym' in geneAnnot:
for z in set(geneAnnot['gene_synonym']):
outfile.write('\t\t\tgene_synonym\t%s\n' % z)
outfile.write(line)
elif line.startswith('\t\t\tproduct\t'):
if not 'product' in geneAnnot:
outfile.write(line)
elif line.startswith('\t\t\ttranscript_id\t'):
ID = line.split('|')[-1]
ID = ID.split('_mrna')[0]
if not ID in transcriptsSeen:
transcriptsSeen.append(ID)
transcriptNum = len(transcriptsSeen)
if ID in annotDict:
transcriptAnnot = annotDict.get(ID)
if 'product' in geneAnnot:
Description = geneAnnot['product'][0]
if transcriptNum > 1:
Description = Description + ', variant {:}'.format(transcriptNum)
outfile.write('\t\t\tproduct\t%s\n' % Description)
outfile.write(line)
elif line.startswith('\t\t\tcodon_start\t'):
outfile.write(line)
if transcriptAnnot:
for item in transcriptAnnot:
if item in ['name', 'product', 'gene_synonym']:
continue
for x in set(transcriptAnnot[item]):
outfile.write('\t\t\t%s\t%s\n' % (item, x))
else:
outfile.write(line)
if newtag:
with open(output, 'w') as outfile:
with open(tmpoutput, 'r') as infile:
for line in infile:
if line.startswith('\t\t\tlocus_tag\t'):
line = line.replace('\t'+prefix, '\t'+newtag)
elif line.startswith('\t\t\ttranscript_id\t') or line.startswith('\t\t\tprotein_id\t'):
line = line.replace('|'+prefix, '|'+newtag)
outfile.write(line)
os.remove(tmpoutput)
else:
os.rename(tmpoutput, output)
def bed2gff3(input, output):
'''
convert repeats bed file into GFF3 format
Contig245 36 69 Repeat_1
Contig245 265 288 Repeat_2
Contig245 477 493 Repeat_3
Contig245 780 797 Repeat_4
Contig245 997 1016 Repeat_5
'''
with open(output, 'w') as outfile:
outfile.write("##gff-version 3\n")
with open(input, 'r') as bedfile:
for line in bedfile:
line = line.strip()
if line.startswith('\n'):
continue
contig, start, end, name = line.split('\t')
start = int(start) + 1 # bed is 0-based, gff 1-based
outfile.write(
'{:}\tRepeatMasker\tdispersed_repeat\t{:}\t{:}\t.\t+\t.\tID={:}\n'.format(contig, start, end, name))
def findUTRs(cds, mrna, strand):
import numpy
FiveUTR = []
ThreeUTR = []
if cds != mrna:
inter = InterLap()
inter.add(cds)
for i, x in enumerate(mrna):
if not x in inter:
loc = (list(inter)[0][0], list(inter)[-1][1])
diff = numpy.subtract(x, loc)
if diff[0] < 0 and diff[1] < 0:
if strand == '+':
FiveUTR.append(x)
else:
ThreeUTR.append(x)
elif diff[0] > 0 and diff[1] > 0:
if strand == '+':
ThreeUTR.append(x)
else:
FiveUTR.append(x)
else:
hit = list(inter.find(x))
if x == hit[0]:
continue
else:
diff = numpy.subtract(x, hit[0])
if strand == '+':
if int(diff[0]) < 1 and int(diff[1]) == 0:
FiveUTR.append((x[0], hit[0][0]-1))
elif int(diff[1]) > 1 and int(diff[0]) == 0:
ThreeUTR.append((hit[0][1]+1, x[1]))
elif int(diff[0]) < 1 and int(diff[1]) > 1:
FiveUTR.append((x[0], hit[0][0]-1))
ThreeUTR.append((hit[0][1]+1, x[1]))
else:
if diff[0] == 0 and diff[1] > 0:
FiveUTR.append((hit[0][1]+1, x[1]))
elif diff[0] < 0 and diff[1] == 0:
ThreeUTR.append((x[0], hit[0][0]-1))
elif diff[0] < 0 and diff[1] > 0:
FiveUTR.append((hit[0][1]+1, x[1]))
ThreeUTR.append((x[0], hit[0][0]-1))
return FiveUTR, ThreeUTR
def dict2nucleotides2(input, prots, trans, cdstrans):
'''
function to generate protein and transcripts from dictionary
'''
# write to protein and transcripts
with open(prots, 'w') as protout:
with open(trans, 'w') as tranout:
with open(cdstrans, 'w') as cdsout:
for k, v in natsorted(list(input.items())):
if 'pseudo' in v:
if v['pseudo']:
continue
if v['type'] == 'mRNA' and not v['CDS']:
continue
if v['type'] == 'mRNA' and not len(v['ids']) == len(v['mRNA']) == len(v['CDS']):
continue
for i, x in enumerate(v['ids']):
try:
Transcript = str(v['transcript'][i])
if v['strand'] == '-':
Transcript = RevComp(Transcript)
tranout.write('>{:} {:}\n{:}\n'.format(
x, k, softwrap(Transcript)))
except IndexError:
pass
try:
CDStranscript = str(v['cds_transcript'][i])
if v['strand'] == '-':
CDStranscript = RevComp(CDStranscript)
cdsout.write('>{:} {:}\n{:}\n'.format(
x, k, softwrap(CDStranscript)))
except IndexError:
pass
if v['type'] == 'mRNA':
try:
Prot = v['protein'][i]
except IndexError:
print(('ERROR', k, v))
sys.exit(1)
if Prot.endswith('*'):
Prot = Prot[:-1]
protout.write('>{:} {:}\n{:}\n'.format(
x, k, softwrap(Prot)))
def simpleFastaStats(fasta):
from Bio.SeqUtils import GC
from Bio.SeqIO.FastaIO import SimpleFastaParser
contigs = []
with open(fasta, 'r') as infile:
for header, seq in SimpleFastaParser(infile):
contigs.append(seq)
contigs = sorted(contigs, key=lambda x: len(x), reverse=True)
lengths = [len(x) for x in contigs]
pctGC = round(GC(''.join(contigs)), 2)
#N50
totalLen = sum(lengths)
n50_len = totalLen*.50
n90_len = totalLen*.90
n50 = None
n90 = None
runningSum = 0
for y, c in enumerate(lengths):
runningSum += c
if not n50 and runningSum >= n50_len:
n50 = c
if not n90 and runningSum >= n90_len:
n90 = c
l50 = lengths.index(n50) + 1
l90 = lengths.index(n90) + 1
numContigs = len(contigs)
avg_length = '{:.2f}'.format(totalLen / float(len(contigs)))
return numContigs, totalLen, pctGC, float(avg_length), n50, l50, n90, l90
def databases2json(FUNDB):
resources = {}
dbfile = os.path.join(FUNDB, 'funannotate-db-info.txt')
if not os.path.isfile(dbfile):
return resources
else:
with open(dbfile, 'r') as infile:
for line in infile:
line = line.rstrip()
cols = line.split('\t')
resources[cols[0]] = {
'type': cols[1],
'version': cols[3],
'date': cols[4],
'num-records': cols[5]
}
return resources
def annotation_summary(fasta, output, gff=False, tbl=False, pct=0.90,
transcripts=False, proteins=False, previous=False,
database='.', command='', organism=''):
'''
function to output annotation stats from GFF3 or TBL files
'''
import json
stats = {
'format': 'annotation',
'command': command,
'organism': organism,
'software':{
'name': 'funannotate',
'version': get_version(),
'date': datetime.datetime.today().strftime('%Y-%m-%d'),
'resources': databases2json(database)
},
'assembly': {
'num_contigs': 0,
'length': 0,
'mean_length': 0,
'N50': 0,
'L50': 0,
'N90': 0,
'L90': 0,
'GC_content': 0,
},
'annotation': {
'genes': 0,
'common_name': 0,
'mRNA': 0,
'tRNA': 0,
'ncRNA': 0,
'rRNA': 0,
'avg_gene_length': 0,
'transcript-level': {
'CDS_transcripts': 0,
'CDS_five_utr': 0,
'CDS_three_utr': 0,
'CDS_no_utr': 0,
'CDS_five_three_utr': 0,
'CDS_complete': 0,
'CDS_no-start': 0,
'CDS_no-stop': 0,
'CDS_no-start_no-stop': 0,
'total_exons': 0,
'total_cds_exons': 0,
'multiple_exon_transcript': 0,
'single_exon_transcript': 0,
'avg_exon_length': 0,
'avg_protein_length': 0,
'functional': {
'go_terms': 0,
'interproscan': 0,
'eggnog': 0,
'pfam': 0,
'cazyme': 0,
'merops': 0,
'busco': 0,
'secretion': 0
}
}
}
}
if previous: # load some stats that cant calculate from annotation
with open(previous, 'r') as infile:
previousStats = json.load(infile)
try:
stats['annotation']['transcript-level']['pct_exon_overlap_protein_evidence'] = previousStats['annotation']['transcript-level']['pct_exon_overlap_protein_evidence']
except KeyError:
pass
try:
stats['annotation']['transcript-level']['pct_exon_overlap_transcript_evidence'] = previousStats['annotation']['transcript-level']['pct_exon_overlap_transcript_evidence']
except KeyError:
pass
num, tot, gc, avg, n50, l50, n90, l90 = simpleFastaStats(fasta)
stats['assembly']['num_contigs'] = num
stats['assembly']['length'] = tot
stats['assembly']['GC_content'] = gc
stats['assembly']['mean_length'] = avg
stats['assembly']['N50'] = n50
stats['assembly']['L50'] = l50
stats['assembly']['N90'] = n90
stats['assembly']['L90'] = l90
Genes = {}
if tbl:
Genes = tbl2dict(tbl, fasta, Genes)
elif gff:
Genes = gff2dict(gff, fasta, Genes)
if len(Genes) > 0:
protLengths = []
geneLengths = []
exonLengths = []
for k, v in Genes.items():
stats['annotation']['genes'] += 1
gLength = v['location'][1] - v['location'][0]
geneLengths.append(gLength)
if v['type'] == 'tRNA':
stats['annotation']['tRNA'] += 1
elif v['type'] == 'rRNA':
stats['annotation']['rRNA'] += 1
elif v['type'] == 'ncRNA':
stats['annotation']['ncRNA'] += 1
if v['name']:
stats['annotation']['common_name'] += 1
for i in range(0, len(v['ids'])):
if v['type'] == 'mRNA':
stats['annotation']['mRNA'] += 1
stats['annotation']['transcript-level']['CDS_transcripts'] += 1
pLen = len(v['protein'][i])
if v['protein'][i].endswith('*'):
pLen -= 1
protLengths.append(pLen)
if len(v['mRNA'][i]) > 1:
stats['annotation']['transcript-level']['multiple_exon_transcript'] += 1
for y in v['mRNA'][i]:
exon_length = y[1] - y[0]
exonLengths.append(exon_length)
else:
stats['annotation']['transcript-level']['single_exon_transcript'] += 1
stats['annotation']['transcript-level']['total_exons'] += len(v['mRNA'][i])
stats['annotation']['transcript-level']['total_exons'] += len(v['5UTR'][i])
stats['annotation']['transcript-level']['total_exons'] += len(v['3UTR'][i])
stats['annotation']['transcript-level']['total_cds_exons'] += len(v['CDS'][i])
if v['partialStart'][i] and v['partialStop'][i]:
stats['annotation']['transcript-level']['CDS_no-start_no-stop'] += 1
elif v['partialStart'][i]:
stats['annotation']['transcript-level']['CDS_no-start'] += 1
elif v['partialStop'][i]:
stats['annotation']['transcript-level']['CDS_no-stop'] += 1
else:
stats['annotation']['transcript-level']['CDS_complete'] += 1
if len(v['5UTR'][i]) > 0 and len(v['3UTR'][i]) > 0:
stats['annotation']['transcript-level']['CDS_five_three_utr'] += 1
elif len(v['3UTR'][i]) > 0:
stats['annotation']['transcript-level']['CDS_three_utr'] += 1
elif len(v['5UTR'][i]) > 0:
stats['annotation']['transcript-level']['CDS_three_utr'] += 1
else:
stats['annotation']['transcript-level']['CDS_no_utr'] += 1
if v['go_terms'][i]:
stats['annotation']['transcript-level']['functional']['go_terms'] += 1
if any(s.startswith('PFAM:') for s in v['db_xref'][i]):
stats['annotation']['transcript-level']['functional']['pfam'] += 1
if any(s.startswith('InterPro:') for s in v['db_xref'][i]):
stats['annotation']['transcript-level']['functional']['interproscan'] += 1
if any(s.startswith('EggNog:') for s in v['note'][i]):
stats['annotation']['transcript-level']['functional']['eggnog'] += 1
if any(s.startswith('CAZy:') for s in v['note'][i]):
stats['annotation']['transcript-level']['functional']['cazyme'] += 1
if any(s.startswith('MEROPS:') for s in v['note'][i]):
stats['annotation']['transcript-level']['functional']['merops'] += 1
if any(s.startswith('BUSCO:') for s in v['note'][i]):
stats['annotation']['transcript-level']['functional']['busco'] += 1
if any(s.startswith('SECRETED:') for s in v['note'][i]):
stats['annotation']['transcript-level']['functional']['secretion'] += 1
stats['annotation']['avg_gene_length'] = round(sum(geneLengths) / float(len(geneLengths)), 2)
stats['annotation']['transcript-level']['avg_protein_length'] = round(sum(protLengths) / float(len(protLengths)), 2)
stats['annotation']['transcript-level']['avg_exon_length'] = round(sum(exonLengths) / float(len(exonLengths)), 2)
exonBED = 'tmp.exon.{}.bed'.format(os.getpid())
if transcripts or proteins:
exonCount = 0
bedtools_cmd = ['bedtools', 'intersect', '-a', exonBED,
'-u', '-f', str(pct), '-s', '-b']
with open(exonBED, 'w') as outfile:
for k, v in Genes.items():
for i in range(0, len(v['ids'])):
for z, x in enumerate(v['mRNA'][i]):
exonCount += 1
outfile.write('{}\t{}\t{}\t{}.exon{}\t.\t{}\n'.format(
v['contig'], x[0]-1, x[1], v['ids'][i], z+1, v['strand']))
if transcripts: # calculate exons covered by transcripts
cmd = bedtools_cmd + [transcripts]
overlapCount = 0
for line in execute(cmd):
overlapCount += 1
pctOverlap = '{:.2f}'.format(overlapCount/exonCount*100)
stats['annotation']['transcript-level']['pct_exon_overlap_transcript_evidence'] = float(pctOverlap)
if proteins: # calculate exons covered by proteins
cmd = bedtools_cmd + [proteins]
overlapCount = 0
for line in execute(cmd):
overlapCount += 1
pctOverlap = '{:.2f}'.format(overlapCount/exonCount*100)
stats['annotation']['transcript-level']['pct_exon_overlap_protein_evidence'] = float(pctOverlap)
if os.path.isfile(exonBED):
os.remove(exonBED)
# write to json format
with open(output, 'w') as outfile:
json.dump(stats, outfile, indent=4)
def tbl2allout(input, fasta, GFF, Proteins, Transcripts, cdsTranscripts, DNA):
'''
function to convert NCBI tbl format directly to other formats; this will be a replacement
for Genbank derived output files and correctly parse/print the transcript/proteins
'''
Genes = {}
Genes = tbl2dict(input, fasta, Genes)
# write GFF
dict2gff3(Genes, GFF)
# write to protein and transcripts
dict2nucleotides2(Genes, Proteins, Transcripts, cdsTranscripts)
# copy over DNA fasta file
shutil.copyfile(fasta, DNA)
def tbl2dict(input, fasta, Genes):
'''
need a method to convert directly from NCBI tbl format to several output formats
to avoid conversion problems with GBK files that have mutliple transcripts
if can load funannotate dictionary directly from tbl format, then can write the other
formats directly
'''
with open(input, 'r') as infile:
contig = ''
for item in readBlocks2(infile, '>Feature', '\tgene\n'):
if item[0].startswith('>Feature'): # this will be contig header block
contig = item[0].rstrip().split(' ')[-1]
else: # these are all gene model blocks
geneID, Name, type, start, end, fivepartial, threepartial, strand, location = (
None,)*9
codon_start = []
transcriptID = []
proteinID = []
synonyms = []
product = []
first, firstpartial, second, secondpartial = (False,)*4
position = None
# check number of transcripts
tNum = 0
for z in item:
if z.startswith('\t\t\ttranscript_id'):
tNum += 1
if tNum > 0:
tNum = int(tNum / 2)
else:
tNum = 1
# setup lists for transcripts
mRNA = [[] for y in range(tNum)]
CDS = [[] for y in range(tNum)]
note = [[] for y in range(tNum)]
dbxref = [[] for y in range(tNum)]
ECnum = [[] for y in range(tNum)]
go_terms = [[] for y in range(tNum)]
fivepartial = [False, ]*tNum
threepartial = [False, ]*tNum
currentNum = 0
for x in item:
exonF, exonR, cdsF, cdsR, cols = (None,)*5
if x.endswith('\tgene\n') and not position:
cols = x.strip().split('\t')
position = 'gene'
if cols[0].startswith('<'):
first = int(cols[0].split('<')[-1])
else:
first = int(cols[0])
if cols[1].startswith('>'):
second = int(cols[1].split('>')[-1])
else:
second = int(cols[1])
if first < second:
start = first
end = second
strand = '+'
else:
start = second
end = first
strand = '-'
location = (start, end)
elif x.startswith('\t\t\tgene\t'):
Name = x.strip().split('\t')[-1]
elif x.startswith('\t\t\tlocus_tag\t'):
geneID = x.strip().split('\t')[-1]
elif x.endswith('\ttRNA\n') and x.count('\t') == 2 and position == 'gene':
type = 'tRNA'
position = 'tRNA'
cols = x.strip().split('\t')
exonF = int(cols[0].replace('<', ''))
exonR = int(cols[1].replace('>', ''))
if strand == '+':
mRNA[currentNum].append((exonF, exonR))
else:
mRNA[currentNum].append((exonR, exonF))
elif x.endswith('\tncRNA\n') and x.count('\t') == 2 and position == 'gene':
type = 'ncRNA'
position = 'ncRNA'
cols = x.strip().split('\t')
exonF = int(cols[0].replace('<', ''))
exonR = int(cols[1].replace('>', ''))
if strand == '+':
mRNA[currentNum].append((exonF, exonR))
else:
mRNA[currentNum].append((exonR, exonF))
elif x.endswith('\trRNA\n') and x.count('\t') == 2 and position == 'gene':
type = 'rRNA'
position = 'rRNA'
cols = x.strip().split('\t')
exonF = int(cols[0].replace('<', ''))
exonR = int(cols[1].replace('>', ''))
if strand == '+':
mRNA[currentNum].append((exonF, exonR))
else:
mRNA[currentNum].append((exonR, exonF))
elif x.endswith('\tmRNA\n') and x.count('\t') == 2:
if position == 'CDS':
currentNum += 1
elif position == 'gene':
type = 'mRNA'
position = 'mRNA'
cols = x.strip().split('\t')
exonF = int(cols[0].replace('<', ''))
exonR = int(cols[1].replace('>', ''))
if strand == '+':
mRNA[currentNum].append((exonF, exonR))
else:
mRNA[currentNum].append((exonR, exonF))
elif x.endswith('\tCDS\n') and x.count('\t') == 2:
position = 'CDS'
cols = x.strip().split('\t')
cdsF = int(cols[0].replace('<', ''))
cdsR = int(cols[1].replace('>', ''))
if strand == '+':
CDS[currentNum].append((cdsF, cdsR))
else:
CDS[currentNum].append((cdsR, cdsF))
elif x.startswith('\t\t\tcodon_start\t'):
cNum = int(x.strip().split('\t')[-1])
codon_start.append(cNum)
elif x.startswith('\t\t\tproduct\t') and position != 'mRNA':
product.append(x.strip().split('\t')[-1])
elif x.startswith('\t\t\ttranscript_id\t'):
tID = x.strip().split('|')[-1]
if '_mrna' in tID:
tID = tID.replace('_mrna', '')
if not tID in transcriptID:
transcriptID.append(tID)
elif x.startswith('\t\t\tprotein_id\t'):
pID = x.strip().split('|')[-1]
if not pID in proteinID:
proteinID.append(pID)
elif x.startswith('\t\t\tgene_synonym\t'):
synonyms.append(x.strip().split('\t')[-1])
elif x.startswith('\t\t\tgo_'): # go terms
go_terms[currentNum].append(
'GO:{:}'.format(x.strip().split('|')[1]))
elif x.startswith('\t\t\tnote\t'):
note[currentNum].append(x.strip().split('\t')[-1])
elif x.startswith('\t\t\tdb_xref\t'):
dbxref[currentNum].append(x.strip().split('\t')[-1])
elif x.startswith('\t\t\tEC_number\t'):
ECnum[currentNum].append(x.strip().split('\t')[-1])
elif position == 'mRNA' and x.count('\t') == 1:
cols = x.strip().split('\t')
exonF = int(cols[0].replace('<', ''))
exonR = int(cols[1].replace('>', ''))
if strand == '+':
mRNA[currentNum].append((exonF, exonR))
else:
mRNA[currentNum].append((exonR, exonF))
elif position in ['tRNA', 'ncRNA', 'rRNA'] and x.count('\t') == 1:
cols = x.strip().split('\t')
exonF = int(cols[0].replace('<', ''))
exonR = int(cols[1].replace('>', ''))
if strand == '+':
mRNA[currentNum].append((exonF, exonR))
else:
mRNA[currentNum].append((exonR, exonF))
elif position == 'CDS' and x.count('\t') == 1:
cols = x.strip().split('\t')
cdsF = int(cols[0].replace('<', ''))
cdsR = int(cols[1].replace('>', ''))
if strand == '+':
CDS[currentNum].append((cdsF, cdsR))
else:
CDS[currentNum].append((cdsR, cdsF))
if not geneID in Genes:
if type in ['tRNA', 'ncRNA', 'rRNA']:
Genes[geneID] = {'name': Name, 'type': type,
'transcript': [],
'cds_transcript': [],
'protein': [], '5UTR': [[]],
'3UTR': [[]],
'codon_start': codon_start,
'ids': [geneID+'-T1'], 'CDS': CDS,
'mRNA': mRNA, 'strand': strand,
'gene_synonym': synonyms,
'location': location,
'contig': contig,
'product': product,
'source': 'funannotate', 'phase': [],
'db_xref': dbxref,
'go_terms': go_terms,
'EC_number': ECnum, 'note': note,
'partialStart': [True],
'partialStop': [True],
'pseudo': False
}
else:
Genes[geneID] = {'name': Name, 'type': type,
'transcript': [], 'cds_transcript': [],
'protein': [], '5UTR': [], '3UTR': [],
'codon_start': codon_start,
'ids': proteinID, 'CDS': CDS,
'mRNA': mRNA, 'strand': strand,
'gene_synonym': synonyms,
'location': location,
'contig': contig, 'product': product,
'source': 'funannotate', 'phase': [],
'db_xref': dbxref,
'go_terms': go_terms,
'EC_number': ECnum, 'note': note,
'partialStart': fivepartial,
'partialStop': threepartial,
'pseudo': False
}
# now we need to sort coordinates, get protein/transcript sequences and capture UTRs
SeqRecords = SeqIO.to_dict(SeqIO.parse(fasta, 'fasta'))
for k, v in list(Genes.items()):
# @nextgenusfs we should clarify or rename this variable to indicate
# i is the i-th transcript, right??
for i in range(0, len(v['ids'])):
if v['type'] in ['mRNA', 'tRNA', 'ncRNA']:
if v['strand'] == '+':
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0])
else:
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0],
reverse=True)
Genes[k]['mRNA'][i] = sortedExons
mrnaSeq = getSeqRegions(SeqRecords, v['contig'], sortedExons)
Genes[k]['transcript'].append(mrnaSeq)
if v['type'] == 'mRNA':
if v['strand'] == '+':
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0])
else:
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0],
reverse=True)
cdsSeq = getSeqRegions(SeqRecords, v['contig'], sortedCDS)
# If the translation starts in middle of a codon,
# we need to truncate the CDS seq either at start or end
# depending on strand.
if v['codon_start'][i] > 1:
if v['strand'] == "+":
# drop first N bases based on codon_start
# to reflect the translation frame
cdsSeq = cdsSeq[v['codon_start'][i]-1:]
elif v['strand'] == "-":
# drop last N bases based on codon_start
# to reflect the translation frame (because this is
# is reverse strand gene)
endTrunc = len(cdsSeq) - (v['codon_start'][i] - 1)
cdsSeq = cdsSeq[0:endTrunc]
else:
# could trigger more of a warning/error
print("ERROR strand (%s) is nonsensical for %s"%(v['strand'],k))
Genes[k]['cds_transcript'].append(cdsSeq)
Genes[k]['CDS'][i] = sortedCDS
protSeq = translate(cdsSeq, v['strand'],0)
if protSeq:
Genes[k]['protein'].append(protSeq)
if protSeq.endswith('*'):
Genes[k]['partialStop'][i] = False
else:
Genes[k]['partialStop'][i] = True
if v['codon_start'][i] == 1 and protSeq.startswith('M'):
Genes[k]['partialStart'][i] = False
else:
Genes[k]['partialStart'][i] = True
# get UTRs
try:
FiveUTR, ThreeUTR = findUTRs(sortedCDS, sortedExons,
v['strand'])
Genes[k]['5UTR'].append(FiveUTR)
Genes[k]['3UTR'].append(ThreeUTR)
except ValueError:
print(('ERROR', k, v))
return Genes
def dicts2tbl(genesDict, scaff2genes, scaffLen, SeqCenter, SeqRefNum, skipList,
output, annotations=False, external=False):
'''
function to take funannotate annotation dictionaries and convert to NCBI tbl output
'''
duplicates = 0
pseudo = 0
nocds = 0
# to parse annotations, will need to have access to GO OBO dictionary
goDict = {}
if annotations:
from goatools import obo_parser
# location of go.obo
for item in obo_parser.OBOReader(os.path.join(os.environ["FUNANNOTATE_DB"], 'go.obo')):
goDict[item.id] = {'name': item.name, 'namespace': item.namespace}
def _goFormat(id, goDict=goDict):
# go_function serine-type endopeptidase activity|0004252||IEA
# go_process proteolysis|0006508||IEA
# go_component nucleus|0005634||IEA
if id in goDict:
if goDict[id]['namespace'] == 'biological_process':
base = 'go_process'
elif goDict[id]['namespace'] == 'molecular_function':
base = 'go_function'
elif goDict[id]['namespace'] == 'cellular_component':
base = 'go_component'
reformatted = '\t\t\t{:}\t{:}|{:}||IEA'.format(
base, goDict[id]['name'], id.replace('GO:', ''))
return reformatted
else:
return False
with open(output, 'w') as tbl:
for k, v in natsorted(list(scaff2genes.items())):
tbl.write('>Feature %s\n' % k)
tbl.write('1\t%s\tREFERENCE\n' % scaffLen.get(k))
tbl.write('\t\t\t%s\t%s\n' % (SeqCenter, SeqRefNum))
for genes in v: # now loop through each gene on the scaffold
if genes in skipList:
continue
# single funannotate standard dictionary
geneInfo = genesDict.get(genes)
if 'pseudo' in geneInfo:
if geneInfo['pseudo']:
try:
log.debug('{:} is pseudo, skipping'.format(genes))
except NameError:
print(('{:} is pseudo, skipping'.format(genes)))
pseudo += 1
continue
if geneInfo['type'] == 'mRNA' and not geneInfo['CDS']:
try:
log.debug(
'Skipping {:} because no CDS found.'.format(genes))
except NameError:
print((
'Skipping {:} because no CDS found.'.format(genes)))
pseudo += 1
continue
if geneInfo['type'] == 'mRNA' and not len(geneInfo['ids']) == len(geneInfo['mRNA']) == len(geneInfo['CDS']):
try:
log.debug('Incompatible annotation found: {:}\n{:}'.format(
genes, geneInfo))
except NameError:
print(('Incompatible annotation found: {:}\n{:}'.format(
genes, geneInfo)))
duplicates += 1
continue
if geneInfo['type'] == 'mRNA' and len(geneInfo['CDS']) == 0:
nocds += 1
continue
if geneInfo['type'] is None:
continue
# check for partial models
if True in geneInfo['partialStart']:
ps = '<'
else:
ps = ''
if True in geneInfo['partialStop']:
pss = '>'
else:
pss = ''
# now write gene model
if geneInfo['strand'] == '+':
tbl.write('%s%i\t%s%i\tgene\n' % (
ps, geneInfo['location'][0], pss, geneInfo['location'][1]))
if annotations:
if geneInfo['name']:
tbl.write('\t\t\tgene\t%s\n' % geneInfo['name'])
if geneInfo['gene_synonym']:
for alias in geneInfo['gene_synonym']:
tbl.write('\t\t\tgene_synonym\t%s\n' % alias)
tbl.write('\t\t\tlocus_tag\t%s\n' % genes)
else:
tbl.write('%s%i\t%s%i\tgene\n' % (
ps, geneInfo['location'][1], pss, geneInfo['location'][0]))
if annotations:
if geneInfo['name']:
tbl.write('\t\t\tgene\t%s\n' % geneInfo['name'])
if geneInfo['gene_synonym']:
for alias in geneInfo['gene_synonym']:
tbl.write('\t\t\tgene_synonym\t%s\n' % alias)
tbl.write('\t\t\tlocus_tag\t%s\n' % genes)
# now will output the gene models with -T1, -T2, -T3 annotations based on expression values
# means need to get the order
order = []
# multiple transcripts, so get order of highest TPM
if len(geneInfo['ids']) > 1:
tpms = []
for num, tpm in enumerate(geneInfo['note']):
for item in tpm:
if item.startswith('TPM:'):
value = float(item.split(':')[-1])
tpms.append((value, num))
if len(tpms) > 0:
for x in sorted(tpms, reverse=True):
order.append(x[1])
else:
order = list(range(0, len(geneInfo['ids'])))
else:
order.append(0)
for num, i in enumerate(order): # now write mRNA and CDS features
# if geneInfo['ids'][i].startswith('evm.model'): #if from predict, rename to match locus_tag
# protein_id = genes+'-T'+str(num+1)
# else:
# protein_id = geneInfo['ids'][i]
if external:
protein_id = geneInfo['ids'][i]
else:
protein_id = genes+'-T'+str(num+1)
if geneInfo['type'] == 'mRNA':
if geneInfo['partialStart'][i] is False:
ps = ''
else:
ps = '<'
if geneInfo['partialStop'][i] is False:
pss = ''
else:
pss = '>'
if geneInfo['strand'] == '+':
for num, exon in enumerate(geneInfo['mRNA'][i]):
# single exon, so slightly differnt method
if num == 0 and num == len(geneInfo['mRNA'][i]) - 1:
tbl.write('%s%s\t%s%s\tmRNA\n' %
(ps, exon[0], pss, exon[1]))
elif num == 0:
tbl.write('%s%s\t%s\tmRNA\n' %
(ps, exon[0], exon[1]))
# this is last one
elif num == len(geneInfo['mRNA'][i]) - 1:
tbl.write('%s\t%s%s\n' %
(exon[0], pss, exon[1]))
else:
tbl.write('%s\t%s\n' % (exon[0], exon[1]))
tbl.write('\t\t\tproduct\t%s\n' %
geneInfo['product'][i])
tbl.write('\t\t\ttranscript_id\tgnl|ncbi|%s_mrna\n' % (protein_id))
tbl.write('\t\t\tprotein_id\tgnl|ncbi|%s\n' %
(protein_id))
for num, cds in enumerate(geneInfo['CDS'][i]):
# single exon, so slightly differnt method
if num == 0 and num == len(geneInfo['CDS'][i]) - 1:
tbl.write('%s%s\t%s%s\tCDS\n' %
(ps, cds[0], pss, cds[1]))
elif num == 0:
tbl.write('%s%s\t%s\tCDS\n' %
(ps, cds[0], cds[1]))
# this is last one
elif num == len(geneInfo['CDS'][i]) - 1:
tbl.write('%s\t%s%s\n' %
(cds[0], pss, cds[1]))
else:
tbl.write('%s\t%s\n' % (cds[0], cds[1]))
tbl.write('\t\t\tcodon_start\t%i\n' %
geneInfo['codon_start'][i])
if annotations: # write functional annotation
if geneInfo['EC_number'][i]:
for EC in geneInfo['EC_number'][i]:
tbl.write('\t\t\tEC_number\t%s\n' % EC)
if geneInfo['db_xref'][i]:
for xref in geneInfo['db_xref'][i]:
tbl.write('\t\t\tdb_xref\t%s\n' % xref)
if geneInfo['go_terms'][i]:
for go in geneInfo['go_terms'][i]:
goLine = _goFormat(go)
if goLine:
tbl.write('{:}\n'.format(goLine))
if geneInfo['note'][i]:
for item in geneInfo['note'][i]:
tbl.write('\t\t\tnote\t%s\n' % item)
tbl.write('\t\t\tproduct\t%s\n' %
geneInfo['product'][i])
tbl.write('\t\t\ttranscript_id\tgnl|ncbi|%s_mrna\n' % (protein_id))
tbl.write('\t\t\tprotein_id\tgnl|ncbi|%s\n' %
(protein_id))
else: # means this is on crick strand
for num, exon in enumerate(geneInfo['mRNA'][i]):
# single exon, so slightly differnt method
if num == 0 and num == len(geneInfo['mRNA'][i]) - 1:
tbl.write('%s%s\t%s%s\tmRNA\n' %
(ps, exon[1], pss, exon[0]))
elif num == 0:
tbl.write('%s%s\t%s\tmRNA\n' %
(ps, exon[1], exon[0]))
# this is last one
elif num == len(geneInfo['mRNA'][i]) - 1:
tbl.write('%s\t%s%s\n' %
(exon[1], pss, exon[0]))
else:
tbl.write('%s\t%s\n' % (exon[1], exon[0]))
tbl.write('\t\t\tproduct\t%s\n' %
geneInfo['product'][i])
tbl.write('\t\t\ttranscript_id\tgnl|ncbi|%s_mrna\n' % (protein_id))
tbl.write('\t\t\tprotein_id\tgnl|ncbi|%s\n' %
(protein_id))
for num, cds in enumerate(geneInfo['CDS'][i]):
# single exon, so slightly differnt method
if num == 0 and num == len(geneInfo['CDS'][i]) - 1:
tbl.write('%s%s\t%s%s\tCDS\n' %
(ps, cds[1], pss, cds[0]))
elif num == 0:
tbl.write('%s%s\t%s\tCDS\n' %
(ps, cds[1], cds[0]))
# this is last one
elif num == (len(geneInfo['CDS'][i]) - 1):
tbl.write('%s\t%s%s\n' %
(cds[1], pss, cds[0]))
else:
tbl.write('%s\t%s\n' % (cds[1], cds[0]))
tbl.write('\t\t\tcodon_start\t%i\n' %
geneInfo['codon_start'][i])
if annotations: # write functional annotation
if geneInfo['EC_number'][i]:
for EC in geneInfo['EC_number'][i]:
tbl.write('\t\t\tEC_number\t%s\n' % EC)
if geneInfo['db_xref'][i]:
for xref in geneInfo['db_xref'][i]:
tbl.write('\t\t\tdb_xref\t%s\n' % xref)
if geneInfo['go_terms'][i]:
for go in geneInfo['go_terms'][i]:
goLine = _goFormat(go)
if goLine:
tbl.write('{:}\n'.format(goLine))
if geneInfo['note'][i]:
for item in geneInfo['note'][i]:
tbl.write('\t\t\tnote\t%s\n' % item)
tbl.write('\t\t\tproduct\t%s\n' %
geneInfo['product'][i])
tbl.write('\t\t\ttranscript_id\tgnl|ncbi|%s_mrna\n' % (protein_id))
tbl.write('\t\t\tprotein_id\tgnl|ncbi|%s\n' %
(protein_id))
elif geneInfo['type'] == 'tRNA':
if geneInfo['strand'] == '+':
for num, exon in enumerate(geneInfo['mRNA'][i]):
if num == 0:
tbl.write('%s\t%s\t%s\n' % (
exon[0], exon[1], geneInfo['type']))
else:
tbl.write('%s\t%s\n' % (exon[0], exon[1]))
tbl.write('\t\t\tproduct\t%s\n' %
geneInfo['product'][i])
if geneInfo['product'] == 'tRNA-Xxx':
tbl.write('\t\t\tpseudo\n')
else:
for num, exon in enumerate(geneInfo['mRNA'][i]):
if num == 0:
tbl.write('%s\t%s\t%s\n' % (
exon[1], exon[0], geneInfo['type']))
else:
tbl.write('%s\t%s\n' % (exon[1], exon[0]))
tbl.write('\t\t\tproduct\t%s\n' %
geneInfo['product'][i])
if geneInfo['product'] == 'tRNA-Xxx':
tbl.write('\t\t\tpseudo\n')
elif geneInfo['type'] in ['rRNA', 'ncRNA']:
if geneInfo['strand'] == '+':
tbl.write('%s\t%s\t%s\n' % (
geneInfo['location'][0], geneInfo['location'][1], geneInfo['type']))
tbl.write('\t\t\tproduct\t%s\n' %
geneInfo['product'][i])
else:
tbl.write('%s\t%s\t%s\n' % (
geneInfo['location'][1], geneInfo['location'][0], geneInfo['type']))
tbl.write('\t\t\tproduct\t%s\n' %
geneInfo['product'][i])
if any(i > 0 for i in [duplicates, pseudo, nocds]):
try:
print(('Skipped {:,} annotations: {:,} pseudo genes; {:,} no CDS; {:,} duplicated features'.format(
sum([pseudo, nocds, duplicates]), pseudo, nocds, duplicates)))
except NameError:
print(('Skipped {:,} annotations: {:,} pseudo genes; {:,} no CDS; {:,} duplicated features'.format(
sum([pseudo, nocds, duplicates]), pseudo, nocds, duplicates)))
def GFF2tbl(evm, trnascan, fasta, scaffLen, prefix, Numbering, SeqCenter,
SeqRefNum, tblout):
from collections import OrderedDict
'''
function to take EVM protein models and tRNA scan GFF to produce a GBK tbl file as well
as a new GFF3 file. The function will also rename locus_id if passed.
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][1])
# load GFF into dictionary
Genes = {}
Genes = gff2dict(evm, fasta, Genes)
Genes = gff2dict(trnascan, fasta, Genes)
# now sort dictionary by contig and location, rename using prefix, translate to protein space to get proper start/stop info
sGenes = natsorted(iter(Genes.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
renamedGenes = {}
scaff2genes = {}
count = Numbering
for k, v in list(sortedGenes.items()):
if prefix:
locusTag = prefix+'_'+str(count).zfill(6)
else:
locusTag = k
renamedGenes[locusTag] = v
if not v['contig'] in scaff2genes:
scaff2genes[v['contig']] = [locusTag]
else:
scaff2genes[v['contig']].append(locusTag)
count += 1
# write tbl outputfile
dicts2tbl(renamedGenes, scaff2genes, scaffLen,
SeqCenter, SeqRefNum, [], tblout)
def checkRefSeq(input):
refseq = False
with open(input, 'r') as infile:
for record in SeqIO.parse(infile, 'genbank'):
if 'RefSeq' in record.annotations['keywords']:
refseq = True
break
return refseq
def getGBKinfo(input):
accession = None
organism = None
strain = None
isolate = None
gb_gi = None
WGS_accession = None
version = None
with open(input, 'r') as infile:
for record in SeqIO.parse(infile, 'genbank'):
try:
WGS_accession = 'WGS:' + \
record.annotations['contig'].split(
':')[0].replace('join(', '')[:4]
except KeyError:
pass
try:
accession = record.annotations['accessions'][0]
except KeyError:
pass
try:
organism = record.annotations['organism'].replace(
'Unclassified.', '').rstrip()
except KeyError:
pass
try:
gb_gi = record.annotations['gi']
except KeyError:
pass
try:
version = record.annotations['sequence_version']
except KeyError:
pass
for f in record.features:
if f.type == "source":
isolate = f.qualifiers.get("isolate", [None])[0]
strain = f.qualifiers.get("strain", [None])[0]
break
return organism, strain, isolate, accession, WGS_accession, gb_gi, version
def getGBKLocusTag(input):
LocusTags = []
with open(input, 'r') as infile:
for record in SeqIO.parse(infile, 'genbank'):
for f in record.features:
if f.type == 'gene':
ID = f.qualifiers['locus_tag'][0]
if not ID in LocusTags:
LocusTags.append(ID)
lastTag = natsorted(LocusTags)[-1]
if not '_' in lastTag:
print('ERROR: underscore "_" not found in locus_tag, exiting.')
sys.exit(1)
tag, count = lastTag.rsplit('_', 1)
justify = len(count)
return tag, count, justify
def gb2dna(input, output):
with open(output, 'w') as outfile:
with open(input, 'r') as infile:
for record in SeqIO.parse(infile, 'genbank'):
outfile.write(">%s\n%s\n" %
(record.id, softwrap(str(record.seq))))
def getID(input, type):
# function to get ID from genbank record.features
locusTag = None
ID = None
Parent = None
if type == 'gene':
try:
locusTag = input.qualifiers['locus_tag'][0]
except KeyError:
pass
if not locusTag:
try:
locusTag = input.qualifiers['gene'][0]
except KeyError:
pass
else:
try:
ID = input.qualifiers['gene'][0]
except KeyError:
pass
return locusTag, ID, locusTag
elif type in ['mRNA', 'tRNA', 'ncRNA', 'rRNA', 'misc_RNA', 'exon']:
try:
locusTag = input.qualifiers['locus_tag'][0]
Parent = locusTag
except KeyError:
pass
if not locusTag:
try:
locusTag = input.qualifiers['gene'][0]
except KeyError:
pass
if locusTag:
Parent = locusTag
try:
ID = input.qualifiers['transcript_id'][0]
except KeyError:
pass
else:
try:
locusTag = input.qualifiers['transcript_id'][0]
Parent = locusTag
except KeyError:
pass
else:
try:
ID = input.qualifiers['transcript_id'][0]
except KeyError:
pass
if ID:
if ':' in ID:
ID = ID.split(':')[-1]
else:
try:
ID = input.qualifiers['standard_name'][0]
except KeyError:
pass
return locusTag, ID, Parent
elif type == 'CDS':
try:
locusTag = input.qualifiers['locus_tag'][0]
Parent = locusTag
except KeyError:
pass
if not locusTag:
try:
locusTag = input.qualifiers['gene'][0]
except KeyError:
pass
if locusTag:
Parent = locusTag
try:
ID = input.qualifiers['protein_id'][0]
except KeyError:
pass
else:
try:
locusTag = input.qualifiers['protein_id'][0]
Parent = locusTag
except KeyError:
pass
else:
try:
ID = input.qualifiers['protein_id'][0]
except KeyError:
ID = locusTag
if ID:
if ':' in ID:
ID = ID.split(':')[-1]
else:
try:
ID = input.qualifiers['standard_name'][0]
except KeyError:
pass
return locusTag, ID, Parent
def gb2nucleotides(input, prots, trans, dna):
'''
function to generate protein, transcripts, and contigs from genbank file
'''
genes = {}
with open(dna, 'w') as dnaout:
with open(input, 'r') as filein:
for record in SeqIO.parse(filein, 'genbank'):
dnaout.write(">%s\n%s\n" %
(record.id, softwrap(str(record.seq))))
for f in record.features:
gb_feature_add2dict(f, record, genes)
# write to protein and transcripts
dict2nucleotides(genes, prots, trans)
return len(genes)
def dict2proteins(input, prots):
with open(prots, 'w') as protout:
for k, v in natsorted(list(input.items())):
if 'pseudo' in v:
if v['pseudo']:
continue
if v['type'] == 'mRNA' and not v['CDS']:
continue
if v['type'] == 'mRNA' and not len(v['ids']) == len(v['mRNA']) == len(v['CDS']):
continue
for i, x in enumerate(v['ids']):
if v['type'] == 'mRNA':
Prot = v['protein'][i]
protout.write('>{:} {:}\n{:}\n'.format(
x, k, softwrap(Prot)))
def dict2nucleotides(input, prots, trans):
'''
function to generate protein and transcripts from dictionary
'''
# write to protein and transcripts
with open(prots, 'w') as protout:
with open(trans, 'w') as tranout:
for k, v in natsorted(list(input.items())):
if 'pseudo' in v:
if v['pseudo']:
continue
if v['type'] == 'mRNA' and not v['CDS']:
continue
if v['type'] == 'mRNA' and not len(v['ids']) == len(v['mRNA']) == len(v['CDS']):
continue
for i, x in enumerate(v['ids']):
try:
Transcript = str(v['transcript'][i])
tranout.write('>{:} {:}\n{:}\n'.format(
x, k, softwrap(Transcript)))
except IndexError:
pass
if v['type'] == 'mRNA':
Prot = v['protein'][i]
protout.write('>{:} {:}\n{:}\n'.format(
x, k, softwrap(Prot)))
def gb2gffnuc(input, gff, prots, trans, dna):
'''
function to generate protein, transcripts, and contigs from genbank file
'''
genes = {}
with open(dna, 'w') as dnaout:
with open(input, 'r') as filein:
for record in SeqIO.parse(filein, 'genbank'):
dnaout.write(">{:}\n{:}\n".format(
record.id, softwrap(str(record.seq))))
for f in record.features:
gb_feature_add2dict(f, record, genes)
# write gff3 output
dict2gff3(genes, gff)
# write to protein and transcripts
dict2nucleotides(genes, prots, trans)
return len(genes)
def gb2parts(input, tbl, gff, prots, trans, dna):
'''
function returns a dictionary of all gene models from a genbank file this function
can handle multiple transcripts per locus/gene
'''
genes = {}
scaff2genes = {}
scaffLen = {}
with open(dna, 'w') as dnaout:
with open(input, 'r') as filein:
for record in SeqIO.parse(filein, 'genbank'):
dnaout.write(">{:}\n{:}\n".format(
record.id, softwrap(str(record.seq))))
Contig = record.id
if not Contig in scaffLen:
scaffLen[Contig] = len(record.seq)
for f in record.features:
if f.type == 'gene':
locusTag, ID, Parent = getID(f, f.type)
if not Contig in scaff2genes:
scaff2genes[Contig] = [locusTag]
else:
scaff2genes[Contig].append(locusTag)
gb_feature_add2dict(f, record, genes)
# write tbl output
dicts2tbl(genes, scaff2genes, scaffLen, 'CFMR', '12345', [], tbl)
# write gff3 output
dict2gff3_old(genes, gff)
# write to protein and transcripts
dict2nucleotides(genes, prots, trans)
return len(genes)
def gb_feature_add2dict(f, record, genes):
'''
general function to take a genbank feature from flat file and add to funannotate standardized dictionary
locustag: {
'contig': contigName
'type': mRNA/rRNA/tRNA/ncRNA
'location': (start, end) #integer tuple
'strand': +/-
'ids': [transcript/protein IDs] #list
'mRNA':[[(ex1,ex1),(ex2,ex2)]] #list of lists of tuples (start, end)
'CDS':[[(cds1,cds1),(cds2,cds2)]] #list of lists of tuples (start, end)
'transcript': [seq1, seq2] #list of mRNA trnascripts
'cds_transcript': [seq1, seq2] list of mRNA (no UTRs)
'protein': [protseq1,protseq2] #list of CDS translations
'protein_id': [id,id] #from NCBI
'codon_start': [1,1] #codon start for translations
'note': [[first note, second note], [first, second, etc]] #list of lists
'name': genename
'product': [hypothetical protein, velvet complex] #list of product definitions
'go_terms': [[GO:0000001,GO:0000002]] #list of lists
'db_xref': [[InterPro:IPR0001,PFAM:004384]] #list of lists
'partialStart': True/False
'partialStop': True/False
'source': annotation source
'pseudo': True/False
}
'''
# get info from features, if there is no locusTag then exit
if f.type and f.type in ['gene', 'mRNA', 'CDS', 'tRNA', 'rRNA', 'ncRNA', 'exon', 'misc_RNA']:
try:
locusTag, ID, Parent = getID(f, f.type)
except TypeError:
print('ERROR parsing GBK record')
print(f)
sys.exit(1)
if not locusTag:
return genes
else:
return genes
# check for mismatching funannotate ID locus tag basename
if ID and '-T' in ID: # then this is from funannotate, okay to modify - this is to capture apparent tbl2asn local error
# there is a problem, update locusTag with basename of ID
if ID.split('-T')[0] != locusTag:
locusTag = ID.split('-T')[0]
# standard information from every feature
strand = f.location.strand
if strand == 1:
strand = '+'
elif strand == -1:
strand = '-'
start = f.location.nofuzzy_start + 1
end = f.location.nofuzzy_end
chr = record.id
num_parts = len(f.location.parts)
name, Product = (None,)*2
Fivepartial, Threepartial = (False,)*2
DBxref = []
Note = []
GO = []
EC = []
synonyms = []
pseudo = False
if 'pseudo' in f.qualifiers:
pseudo = True
# parse each type somewhat differently
if f.type == 'gene':
try:
name = f.qualifiers['gene'][0]
except KeyError:
pass
if 'gene_synonym' in f.qualifiers:
for z in f.qualifiers['gene_synonym']:
synonyms.append(z)
if not locusTag in genes:
genes[locusTag] = {'name': name, 'type': None, 'transcript': [],
'cds_transcript': [], 'protein': [],
'source': 'GenBank', '5UTR': [[]], '3UTR': [[]],
'codon_start': [], 'ids': [], 'CDS': [],
'mRNA': [], 'strand': strand,
'location': (int(start), int(end)),
'contig': chr, 'product': [],
'gene_synonym': synonyms, 'EC_number': [],
'db_xref': [], 'go_terms': [], 'note': [],
'partialStart': [], 'partialStop': [],
'protein_id': [], 'pseudo': pseudo}
else:
genes[locusTag]['location'] = (int(start), int(end))
genes[locusTag]['strand'] = strand
genes[locusTag]['gene_synonym'] = synonyms
if not genes[locusTag]['name']:
genes[locusTag]['name'] = name
elif f.type in ['tRNA', 'rRNA', 'ncRNA', 'misc_RNA']:
feature_seq = f.extract(record.seq)
try:
name = f.qualifiers['gene'][0]
except KeyError:
pass
try:
Product = f.qualifiers['product'][0]
if Product == 'tRNA-OTHER':
Product = 'tRNA-Xxx'
except KeyError:
Product = None
exonTuples = []
if num_parts < 2: # only single exon
exonTuples.append((int(start), int(end)))
else: # more than 1 exon, so loop through
for i in range(0, num_parts):
ex_start = f.location.parts[i].nofuzzy_start + 1
ex_end = f.location.parts[i].nofuzzy_end
exonTuples.append((int(ex_start), int(ex_end)))
# now we want to sort the positions I think...
if strand == '+':
sortedExons = sorted(exonTuples, key=lambda tup: tup[0])
if str(f.location.start).startswith('<'):
Fivepartial = True
if str(f.location.end).startswith('>'):
Threepartial = True
else:
sortedExons = sorted(
exonTuples, key=lambda tup: tup[0], reverse=True)
if str(f.location.start).startswith('<'):
Threepartial = True
if str(f.location.end).startswith('>'):
Fivepartial = True
# update positions
if f.type == 'misc_RNA':
feature = 'ncRNA'
else:
feature = f.type
if not locusTag in genes:
genes[locusTag] = {'name': name, 'type': feature,
'transcript': [feature_seq],
'cds_transcript': [], 'protein': [],
'source': 'GenBank', '5UTR': [[]], '3UTR': [[]],
'codon_start': [], 'ids': [locusTag+'-T1'],
'CDS': [], 'mRNA': [sortedExons],
'strand': strand,
'location': (int(start), int(end)),
'contig': chr, 'product': [Product],
'protein_id': [], 'pseudo': pseudo,
'gene_synonym': synonyms, 'EC_number': [EC],
'db_xref': [DBxref], 'go_terms': [GO],
'note': [Note], 'partialStart': [Fivepartial],
'partialStop': [Threepartial]}
else:
genes[locusTag]['mRNA'].append(sortedExons)
genes[locusTag]['type'] = feature
genes[locusTag]['transcript'].append(feature_seq)
genes[locusTag]['cds_transcript'].append(None)
genes[locusTag]['protein'].append(None)
genes[locusTag]['ids'].append(
locusTag+'-T'+str(len(genes[locusTag]['ids'])+1))
genes[locusTag]['db_xref'].append(DBxref)
genes[locusTag]['note'].append(Note)
genes[locusTag]['go_terms'].append(GO)
genes[locusTag]['EC_number'].append(EC)
genes[locusTag]['product'].append(Product)
genes[locusTag]['partialStart'].append(Fivepartial)
genes[locusTag]['partialStop'].append(Threepartial)
if not genes[locusTag]['name']:
genes[locusTag]['name'] = name
elif f.type == 'mRNA':
feature_seq = f.extract(record.seq)
try:
name = f.qualifiers['gene'][0]
except KeyError:
pass
exonTuples = []
if num_parts < 2: # only single exon
exonTuples.append((int(start), int(end)))
else: # more than 1 exon, so loop through
for i in range(0, num_parts):
ex_start = f.location.parts[i].nofuzzy_start + 1
ex_end = f.location.parts[i].nofuzzy_end
exonTuples.append((int(ex_start), int(ex_end)))
# now we want to sort the positions I think...
if strand == '+':
sortedExons = sorted(exonTuples, key=lambda tup: tup[0])
if str(f.location.start).startswith('<'):
Fivepartial = True
if str(f.location.end).startswith('>'):
Threepartial = True
else:
sortedExons = sorted(
exonTuples, key=lambda tup: tup[0], reverse=True)
if str(f.location.start).startswith('<'):
Threepartial = True
if str(f.location.end).startswith('>'):
Fivepartial = True
# update positions
if not locusTag in genes:
genes[locusTag] = {'name': name, 'type': f.type,
'transcript': [feature_seq],
'cds_transcript': [], 'protein': [],
'source': 'GenBank', '5UTR': [[]], '3UTR': [[]],
'codon_start': [], 'ids': [], 'CDS': [],
'mRNA': [sortedExons], 'strand': strand,
'location': (int(start), int(end)),
'contig': chr, 'product': [], 'protein_id': [],
'pseudo': pseudo, 'gene_synonym': synonyms,
'EC_number': [],
'db_xref': [], 'go_terms': [],
'note': [], 'partialStart': [Fivepartial],
'partialStop': [Threepartial]}
else:
genes[locusTag]['mRNA'].append(sortedExons)
genes[locusTag]['type'] = f.type
genes[locusTag]['transcript'].append(feature_seq)
genes[locusTag]['partialStart'].append(Fivepartial)
genes[locusTag]['partialStop'].append(Threepartial)
if not genes[locusTag]['name']:
genes[locusTag]['name'] = name
elif f.type == 'exon': # assuming need to overwrite mRNA feature then?
if len(genes[locusTag]['mRNA']) == 0:
genes[locusTag]['mRNA'] = []
genes[locusTag]['transcript'] = []
feature_seq = f.extract(record.seq)
try:
name = f.qualifiers['gene'][0]
except KeyError:
pass
exonTuples = []
if num_parts < 2: # only single exon
exonTuples.append((int(start), int(end)))
else: # more than 1 exon, so loop through
for i in range(0, num_parts):
ex_start = f.location.parts[i].nofuzzy_start + 1
ex_end = f.location.parts[i].nofuzzy_end
exonTuples.append((int(ex_start), int(ex_end)))
# now we want to sort the positions I think...
if strand == '+':
sortedExons = sorted(exonTuples, key=lambda tup: tup[0])
if str(f.location.start).startswith('<'):
Fivepartial = True
if str(f.location.end).startswith('>'):
Threepartial = True
else:
sortedExons = sorted(
exonTuples, key=lambda tup: tup[0], reverse=True)
if str(f.location.start).startswith('<'):
Threepartial = True
if str(f.location.end).startswith('>'):
Fivepartial = True
# update positions
if not locusTag in genes:
genes[locusTag] = {'name': name, 'type': f.type,
'transcript': [feature_seq],
'cds_transcript': [], 'protein': [],
'source': 'GenBank', '5UTR': [[]], '3UTR': [[]],
'codon_start': [], 'ids': [], 'CDS': [],
'mRNA': [sortedExons], 'strand': strand,
'location': (int(start), int(end)),
'contig': chr, 'product': [], 'protein_id': [],
'db_xref': [], 'go_terms': [], 'note': [],
'gene_synonym': synonyms, 'EC_number': [],
'partialStart': [Fivepartial],
'partialStop': [Threepartial], 'pseudo': pseudo}
else:
genes[locusTag]['mRNA'].append(sortedExons)
genes[locusTag]['transcript'].append(feature_seq)
genes[locusTag]['partialStart'].append(Fivepartial)
genes[locusTag]['partialStop'].append(Threepartial)
elif f.type == 'CDS' and 'codon_start' in f.qualifiers:
feature_seq = f.extract(record.seq)
if not ID:
try:
log.info("putative transcript from %s has no ID\n(%s %s %s)" % (
locusTag, locusTag, ID, Parent))
except NameError:
print(("putative transcript from %s has no ID\n(%s %s %s)" %
(locusTag, locusTag, ID, Parent)))
return genes
try:
protSeq = f.qualifiers['translation'][0]
except KeyError:
try:
log.debug("%s has no translation" % ID)
except NameError:
print(("%s has no translation" % ID))
protSeq = ''
cdsTuples = []
phase = int(f.qualifiers['codon_start'][0])
if num_parts < 2: # only single CDS
cdsTuples.append((int(start), int(end)))
else:
for i in range(0, num_parts):
ex_start = f.location.parts[i].nofuzzy_start + 1
ex_end = f.location.parts[i].nofuzzy_end
cdsTuples.append((int(ex_start), int(ex_end)))
if strand == '+':
sortedCDS = sorted(cdsTuples, key=lambda tup: tup[0])
else:
sortedCDS = sorted(cdsTuples, key=lambda tup: tup[0], reverse=True)
# check for annotations
try:
Product = f.qualifiers['product'][0]
except KeyError:
Product = 'hypothetical protein'
try:
name = f.qualifiers['gene'][0]
except KeyError:
pass
# note and dbxref are in a dictionary
for key, value in list(f.qualifiers.items()):
if key == 'note':
notes = value[0].split('; ')
for n in notes:
if n.startswith('GO'):
GO.append(n)
else:
Note.append(n)
elif key == 'db_xref':
for ref in value:
DBxref.append(ref)
elif key == 'EC_number':
for x in value:
EC.append(x)
# update dictionary
if not locusTag in genes:
genes[locusTag] = {'name': name, 'type': 'mRNA',
'transcript': [], '5UTR': [[]], '3UTR': [[]],
'cds_transcript': [feature_seq],
'protein': [], 'source': 'GenBank',
'codon_start': [phase], 'ids': [locusTag+'-T1'],
'CDS': [sortedCDS], 'mRNA': [],
'strand': strand,
'location': (int(start), int(end)),
'contig': chr, 'product': [Product],
'gene_synonym': synonyms, 'EC_number': [EC],
'protein_id': [ID],
'db_xref': [DBxref], 'go_terms': [GO],
'note': [Note], 'partialStart': [],
'partialStop': [], 'pseudo': pseudo}
else:
genes[locusTag]['protein_id'].append(ID)
genes[locusTag]['ids'].append(
locusTag+'-T'+str(len(genes[locusTag]['ids'])+1))
genes[locusTag]['CDS'].append(sortedCDS)
genes[locusTag]['5UTR'].append([])
genes[locusTag]['3UTR'].append([])
genes[locusTag]['product'].append(Product)
genes[locusTag]['protein'].append(protSeq)
genes[locusTag]['cds_transcript'].append(feature_seq)
genes[locusTag]['codon_start'].append(phase)
genes[locusTag]['db_xref'].append(DBxref)
genes[locusTag]['note'].append(Note)
genes[locusTag]['go_terms'].append(GO)
genes[locusTag]['EC_number'].append(EC)
if not genes[locusTag]['type']:
genes[locusTag]['type'] = 'mRNA'
if not genes[locusTag]['name']:
genes[locusTag]['name'] = name
return genes
def bed2interlapNames(bedfile):
# load interlap object from a bed file
inter = defaultdict(InterLap)
with open(bedfile, 'r') as infile:
for line in infile:
line = line.strip()
chr, start, end, name = line.split('\t')[:4]
inter[chr].add((int(start), int(end), name))
return inter
def bed2interlap(bedfile):
# load interlap object from a bed file
inter = defaultdict(InterLap)
with open(bedfile, 'r') as infile:
for line in infile:
line = line.strip()
chr, start, end = line.split('\t')[:3]
inter[chr].add((int(start), int(end)))
return inter
def interlapIntersect(coords, contig, interObj):
# return interlap coords of an intersection
if coords in interObj[contig]:
return True
else:
return False
def gff2interlap(input, fasta):
'''
function to parse GFF3 file, construct scaffold/gene interlap dictionary and funannotate standard annotation dictionary
'''
inter = defaultdict(InterLap)
Genes = {}
Genes = gff2dict(input, fasta, Genes)
for k, v in natsorted(list(Genes.items())):
inter[v['contig']].add((v['location'][0], v['location'][1], k))
return inter, Genes
def gff2interlapDict(input, fasta, inter, Dict):
'''
function to parse GFF3 file, construct scaffold/gene interlap dictionary and funannotate standard annotation dictionary
'''
Genes = {}
Genes = gff2dict(input, fasta, Genes, gap_filter=True)
for k, v in natsorted(list(Genes.items())):
inter[v['contig']].add(
(v['location'][0], v['location'][1], v['strand'], k))
# merge dictionary and return
Dict = merge_dicts(Dict, Genes)
return inter, Dict
def merge_dicts(x, y):
"""Given two dicts, merge them into a new dict as a shallow copy."""
z = x.copy()
z.update(y)
return z
def exonerate2hints(file, outfile):
# mimic exonerate2hints from GFF3 exonerate file
# CDSpart +/- 15 bp to each match
# intron as is
'''
#gff3 via EVM
scaffold_20 exonerate nucleotide_to_protein_match 225035 225823 82.13 + . ID=match.11677.2;Target=VC83_07547 1 96
scaffold_20 exonerate nucleotide_to_protein_match 53957 54342 92.93 + . ID=match.11677.3;Target=VC83_02595 1 129
scaffold_20 exonerate nucleotide_to_protein_match 54397 54904 92.93 + . ID=match.11677.3;Target=VC83_02595 130 299
scaffold_107 exonerate nucleotide_to_protein_match 77634 78119 89.95 - . ID=match.11677.5;Target=VC83_08471 1 163
scaffold_107 exonerate nucleotide_to_protein_match 77501 77546 89.95 - . ID=match.11677.5;Target=VC83_08471 163 178
scaffold_107 exonerate nucleotide_to_protein_match 77385 77422 89.95 - . ID=match.11677.5;Target=VC83_08471 179 191
#corresponding exonerate2hints
scaffold_20 xnt2h CDSpart 225050 225808 . + . src=XNT;grp=VC83_07547;pri=4
scaffold_20 xnt2h CDSpart 53972 54327 . + . src=XNT;grp=VC83_02595;pri=4
scaffold_20 xnt2h intron 54343 54396 . + . src=XNT;grp=VC83_02595;pri=4
scaffold_20 xnt2h CDSpart 54412 54889 . + . src=XNT;grp=VC83_02595;pri=4
scaffold_107 xnt2h CDSpart 77649 78104 . - . src=XNT;grp=VC83_08471;pri=4
scaffold_107 xnt2h intron 77547 77633 . - . src=XNT;grp=VC83_08471;pri=4
scaffold_107 xnt2h CDSpart 77516 77531 . - . src=XNT;grp=VC83_08471;pri=4
scaffold_107 xnt2h intron 77423 77500 . - . src=XNT;grp=VC83_08471;pri=4
scaffold_107 xnt2h CDSpart 77400 77407 . - . src=XNT;grp=VC83_08471;pri=4
'''
Genes = {}
with open(file, 'r') as input:
for line in input:
if line.startswith('\n') or line.startswith('#'):
continue
line = line.rstrip()
contig, source, feature, start, end, score, strand, phase, attributes = line.split(
'\t')
start = int(start)
end = int(end)
ID, Target = (None,)*2
info = attributes.split(';')
for x in info:
if x.startswith('ID='):
ID = x.replace('ID=', '')
elif x.startswith('Target='):
Target = x.replace('Target=', '').split(' ')[0]
if not ID in Genes:
Genes[ID] = {'id': ID, 'target': Target, 'loc': [
(start, end)], 'strand': strand, 'contig': contig}
else:
Genes[ID]['loc'].append((start, end))
# now lets sort through and write hints file
with open(outfile, 'w') as output:
for k, v in natsorted(list(Genes.items())):
if v['strand'] == '+':
sortedCDS = sorted(v['loc'], key=lambda tup: tup[0])
for i, x in enumerate(sortedCDS): # loop through tuples
output.write('{:}\txnt2h\tCDSpart\t{:}\t{:}\t.\t{:}\t.\tsrc=XNT;grp={:};pri=4\n'.format(
v['contig'], x[0]-15, x[1]+15, v['strand'], v['target']))
if len(sortedCDS) > 1:
try:
output.write('{:}\txnt2h\tintron\t{:}\t{:}\t.\t{:}\t.\tsrc=XNT;grp={:};pri=4\n'.format(
v['contig'], x[1]+1, sortedCDS[i+1][0]-1, v['strand'], v['target']))
except IndexError:
pass
else:
sortedCDS = sorted(
v['loc'], key=lambda tup: tup[0], reverse=True)
for i, x in enumerate(sortedCDS): # loop through tuples
output.write('{:}\txnt2h\tCDSpart\t{:}\t{:}\t.\t{:}\t.\tsrc=XNT;grp={:};pri=4\n'.format(
v['contig'], x[0]+15, x[1]-15, v['strand'], v['target']))
if len(sortedCDS) > 1:
try:
output.write('{:}\txnt2h\tintron\t{:}\t{:}\t.\t{:}\t.\tsrc=XNT;grp={:};pri=4\n'.format(
v['contig'], sortedCDS[i+1][1]+1, x[0]-1, v['strand'], v['target']))
except IndexError:
pass
def alignments2dict(input, Genes):
'''
function to take a transcript_alignments file and create dictionary
structure for each alignment
'''
with open(input, 'r') as infile:
for line in infile:
if line.startswith('\n') or line.startswith('#'):
continue
line = line.rstrip()
contig, source, feature, start, end, score, strand, phase, attributes = line.split(
'\t')
start = int(start)
end = int(end)
ID, Target, Extra = (None,)*3
for x in attributes.split(';'):
if x.startswith('ID='):
ID = x.replace('ID=', '')
elif x.startswith('Target='):
Target, Extra = x.split(' ', 1)
Target = Target.replace('Target=', '')
if not ID:
continue
if not ID in Genes:
Genes[ID] = {'mRNA': [(start, end)], 'strand': strand, 'pident': [score],
'location': (start, end), 'contig': contig, 'extra': [Extra]}
else:
if contig != Genes[ID]['contig']:
log.debug('ERROR: {:} mapped to multiple contigs: {:} and {:}'.format(ID, contig, Genes[ID]['contig']))
continue
elif strand != Genes[ID]['strand']:
log.debug('ERROR: {:} mapped has different strands'.format(ID))
continue
else:
Genes[ID]['mRNA'].append((start, end))
Genes[ID]['pident'].append(score)
Genes[ID]['extra'].append(Extra)
# double check mRNA features are contained in gene coordinates
if start < Genes[ID]['location'][0]:
Genes[ID]['location'] = (
start, Genes[ID]['location'][1])
if end > Genes[ID]['location'][1]:
Genes[ID]['location'] = (
Genes[ID]['location'][0], end)
return Genes
def introns_from_exons(input):
introns = []
if len(input) > 1:
for x, y in enumerate(input):
try:
introns.append((y[1]+1, input[x+1][0]-1))
except IndexError:
pass
return introns
def dict2hints(input, hints):
from collections import OrderedDict
'''
function to take simple alignments dictionary and ouput augustus hints file
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# sort the annotations by contig and start location
sGenes = natsorted(iter(input.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
with open(hints, 'w') as hintsout:
for k, v in list(sortedGenes.items()):
sortedExons = sorted(v['mRNA'], key=lambda tup: tup[0])
introns = introns_from_exons(sortedExons)
for i, exon in enumerate(sortedExons):
if i == 0 or i == len(sortedExons)-1:
hintsout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tgrp={:};pri=4;src=E\n'.format(
v['contig'], 'b2h', 'ep', exon[0], exon[1], 0, v['strand'], '.', k))
else:
hintsout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tgrp={:};pri=4;src=E\n'.format(
v['contig'], 'b2h', 'exon', exon[0], exon[1], 0, v['strand'], '.', k))
if len(introns) > 0:
for z in introns:
hintsout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tgrp={:};pri=4;src=E\n'.format(
v['contig'], 'b2h', 'intron', z[0], z[1], 1, v['strand'], '.', k))
def dict2transcriptgff3(input, output):
from collections import OrderedDict
'''
function to take simple alignments dictionary and ouput GFF3 transcripts file
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# sort the annotations by contig and start location
sGenes = natsorted(iter(input.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
with open(output, 'w') as outfile:
outfile.write('##gff-version 3\n')
for k, v in list(sortedGenes.items()):
for i, exon in enumerate(v['mRNA']):
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:};Target={:} {:}\n'.format(
v['contig'], 'genome', 'cDNA_match', exon[0], exon[1], v['pident'][i], v['strand'], '.',
k, k, v['extra'][i]))
def harmonize_transcripts(genome, alignments, gfffile, hintsfile, evidence=None, tmpdir='.', cpus=1, maxintron=3000):
from Bio.SeqIO.FastaIO import SimpleFastaParser
'''
function to check if evidence transcripts are missing from existing alignments and/or
write the augustus hints file
'''
Genes = {}
Genes = alignments2dict(alignments, Genes)
log.info('Parsed {:,} transcript alignments from: {:}'.format(len(Genes), alignments))
if evidence: # if nothing here then just move on
uniqueTranscripts = os.path.join(tmpdir, 'transcript_evidence_unique.fasta')
seqcount = 0
with open(uniqueTranscripts, 'w') as fasta_outfile:
for file in evidence:
with open(file, 'r') as fasta_infile:
for title, seq in SimpleFastaParser(fasta_infile):
if ' ' in title:
id = title.split(' ')[0]
else:
id = title
if not id in Genes:
fasta_outfile.write('>{:}\n{:}\n'.format(title, softwrap(seq)))
seqcount += 1
if seqcount > 0:
log.info('Aligning {:,} unique transcripts [not found in exising alignments] with minimap2'.format(seqcount))
minimapBAM = os.path.join(tmpdir, 'transcript_evidence_unique.bam')
minimapGFF = os.path.join(tmpdir, 'transcript_evidence_unique.gff3')
minimap2Align(uniqueTranscripts, genome, cpus, maxintron, minimapBAM)
mappedReads = bam2gff3(str(minimapBAM), minimapGFF)
if mappedReads > 0:
log.info('Mapped {:,} of these transcripts to the genome, adding to alignments'.format(mappedReads))
Genes = alignments2dict(minimapGFF, Genes)
else:
log.info('Mapped 0 of these transcripts to the genome')
log.info('Creating transcript EVM alignments and Augustus transcripts hintsfile')
dict2transcriptgff3(Genes, gfffile)
dict2hints(Genes, hintsfile)
def gff2dict(file, fasta, Genes, debug=False, gap_filter=False):
'''
general function to take a GFF3 file and return a funannotate standardized dictionary
locustag: {
'contig': contigName
'type': mRNA/rRNA/tRNA/ncRNA
'location': (start, end) #integer tuple
'strand': +/-
'ids': [transcript/protein IDs] #list
'mRNA':[[(ex1,ex1),(ex2,ex2)]] #list of lists of tuples (start, end)
'CDS':[[(cds1,cds1),(cds2,cds2)]] #list of lists of tuples (start, end)
'transcript': [seq1, seq2] #list of mRNA trnascripts
'cds_transcript': [seq1, seq2] #list of mRNA trnascripts (no UTRs)
'protein': [protseq1,protseq2] #list of CDS translations
'codon_start': [1,1] #codon start for translations
'note': [[first note, second note], [first, second, etc]] #list of lists
'name': genename
'product': [hypothetical protein, velvet complex] #list of product definitions
'gene_synonym': Aliases
'EC_number': [[ec number]]
'go_terms': [[GO:0000001,GO:0000002]] #list of lists
'db_xref': [[InterPro:IPR0001,PFAM:004384]] #list of lists
'partialStart': True/False
'partialStop': True/False
'source': annotation source
'phase': [[0,2,1]] list of lists
'5UTR': [[(),()]] #list of lists of tuples (start, end)
'3UTR': [[(),()]] #list of lists of tuples (start, end)
}
'''
idParent = {}
SeqRecords = SeqIO.to_dict(SeqIO.parse(fasta, 'fasta'))
with open(file, 'r') as input:
for line in input:
if line.startswith('\n') or line.startswith('#'):
continue
line = line.rstrip()
contig, source, feature, start, end, score, strand, phase, attributes = line.split('\t')
if feature not in ['gene', 'mRNA', 'exon', 'CDS', 'tRNA',
'ncRNA', 'rRNA', 'pseudogene', 'five_prime_UTR',
'five_prime_utr', 'three_prime_UTR',
'three_prime_utr', 'transcript']:
continue
if not contig in SeqRecords:
continue
start = int(start)
end = int(end)
ID, Parent, Name, Product, GeneFeature, gbkey = (None,)*6
Note, DBxref, GO, synonyms, ECnum = ([],)*5
info = attributes.split(';')
for x in info:
if x.startswith('ID='):
ID = x.replace('ID=', '')
elif x.startswith('Parent='):
Parent = x.replace('Parent=', '')
elif x.startswith('Name='):
Name = x.replace('Name=', '')
elif x.startswith('Note=') or x.startswith('note='):
Note = x.split('ote=')[-1]
if ',' in Note:
Note = Note.split(',')
else:
Note = [Note]
elif x.startswith('Dbxref='):
DBxref = x.replace('Dbxref=', '')
if ',' in DBxref:
DBxref = DBxref.split(',')
else:
DBxref = [DBxref]
elif x.startswith('Ontology_term='):
GO = x.replace('Ontology_term=', '')
if ',' in GO:
GO = GO.split(',')
else:
GO = [GO]
elif x.startswith('EC_number='):
ECnum = x.split('=',1)[-1]
if ',' in ECnum:
ECnum = ECnum.split(',')
else:
ECnum = [ECnum]
elif x.startswith('Product=') or x.startswith('product='):
Product = unquote(x.split('roduct=')[-1])
elif x.startswith('description='):
Product = unquote(x.replace('description=', ''))
elif x.startswith('Alias='):
synonyms = x.replace('Alias=', '')
synonyms = synonyms.split(',')
elif x.startswith('gbkey='): # genbank uses
gbkey = x.split('=', 1)[-1]
if feature == 'gene' or feature == 'pseudogene':
if not ID in Genes:
if feature == 'pseudogene':
pseudoFlag = True
else:
pseudoFlag = False
Genes[ID] = {'name': Name, 'type': None, 'transcript': [],
'cds_transcript': [], 'protein': [], '5UTR': [],
'3UTR': [], 'gene_synonym': synonyms,
'codon_start': [], 'ids': [], 'CDS': [],
'mRNA': [], 'strand': strand,
'EC_number': [],
'location': (start, end), 'contig': contig,
'product': [], 'source': source, 'phase': [],
'db_xref': [], 'go_terms': [], 'note': [],
'partialStart': [], 'partialStop': [],
'pseudo': pseudoFlag}
else:
if start < Genes[ID]['location'][0]:
Genes[ID]['location'] = (
start, Genes[ID]['location'][1])
if end > Genes[ID]['location'][1]:
Genes[ID]['location'] = (Genes[ID]['location'][0], end)
else:
if not ID or not Parent:
sys.stderr.write("Error, can't find ID or Parent. Malformed GFF file.\n")
sys.stderr.write(line)
sys.exit(1)
if feature in ['mRNA', 'transcript', 'tRNA', 'ncRNA', 'rRNA']:
if gbkey and gbkey == 'misc_RNA':
feature = 'ncRNA'
if not Product:
if feature in ['mRNA', 'transcript']:
Product = 'hypothetical protein'
if not Parent in Genes:
Genes[Parent] = {'name': Name, 'type': feature,
'transcript': [], 'cds_transcript': [],
'protein': [], '5UTR': [[]], '3UTR': [[]],
'codon_start': [[]], 'ids': [ID],
'CDS': [[]], 'mRNA': [[]], 'strand': strand,
'location': (start, end), 'contig': contig,
'product': [Product], 'source': source,
'phase': [[]], 'gene_synonym': synonyms,
'db_xref': [DBxref], 'go_terms': [GO],
'EC_number': [ECnum],
'note': [Note], 'partialStart': [False],
'partialStop': [False], 'pseudo': False}
else:
Genes[Parent]['ids'].append(ID)
Genes[Parent]['mRNA'].append([])
Genes[Parent]['CDS'].append([])
Genes[Parent]['phase'].append([])
Genes[Parent]['5UTR'].append([])
Genes[Parent]['3UTR'].append([])
Genes[Parent]['codon_start'].append([])
Genes[Parent]['partialStart'].append(False)
Genes[Parent]['partialStop'].append(False)
Genes[Parent]['product'].append(Product)
Genes[Parent]['db_xref'].append(DBxref)
Genes[Parent]['EC_number'].append(ECnum)
Genes[Parent]['gene_synonym'] += synonyms
Genes[Parent]['go_terms'].append(GO)
Genes[Parent]['note'].append(Note)
Genes[Parent]['type'] = feature
# double check mRNA features are contained in gene coordinates
if start < Genes[Parent]['location'][0]:
# print('{:} update start: {:} to {:}'.format(Parent, Genes[Parent]['location'][0],start))
Genes[Parent]['location'] = (
start, Genes[Parent]['location'][1])
if end > Genes[Parent]['location'][1]:
# print('{:} update stop: {:} to {:}'.format(Parent, Genes[Parent]['location'][1],end))
Genes[Parent]['location'] = (
Genes[Parent]['location'][0], end)
if not ID in idParent:
idParent[ID] = Parent
elif feature == 'exon':
if ',' in Parent:
parents = Parent.split(',')
else:
parents = [Parent]
for p in parents:
if p in idParent:
GeneFeature = idParent.get(p)
if GeneFeature:
if not GeneFeature in Genes:
Genes[GeneFeature] = {'name': Name, 'type': None,
'transcript': [], 'cds_transcript': [],
'protein': [], '5UTR': [[]], '3UTR': [[]],
'codon_start': [[]], 'ids': [p],
'CDS': [], 'mRNA': [[(start, end)]], 'strand': strand,
'location': None, 'contig': contig,
'product': [], 'source': source, 'phase': [[]],
'db_xref': [], 'go_terms': [],
'EC_number': [],
'note': [], 'partialStart': [False],
'partialStop': [False], 'pseudo': False,
'gene_synonym': synonyms}
else:
# determine which transcript this is get index from id
i = Genes[GeneFeature]['ids'].index(p)
Genes[GeneFeature]['mRNA'][i].append(
(start, end))
elif feature == 'CDS':
if ',' in Parent:
parents = Parent.split(',')
else:
parents = [Parent]
for p in parents:
if p in idParent:
GeneFeature = idParent.get(p)
if GeneFeature:
if not GeneFeature in Genes:
Genes[GeneFeature] = {'name': Name, 'type': None,
'transcript': [], 'cds_transcript': [],
'protein': [], '5UTR': [[]], '3UTR': [[]],
'codon_start': [[]], 'ids': [p],
'CDS': [[(start, end)]], 'mRNA': [], 'strand': strand,
'location': None, 'contig': contig,
'product': [], 'source': source, 'phase': [[]],
'db_xref': [], 'go_terms': [],
'EC_number': [],
'note': [], 'partialStart': [False],
'partialStop': [False], 'pseudo': False,
'gene_synonym': synonyms}
else:
# determine which transcript this is get index from id
i = Genes[GeneFeature]['ids'].index(p)
Genes[GeneFeature]['CDS'][i].append(
(start, end))
# add phase
try:
Genes[GeneFeature]['phase'][i].append(int(phase))
except ValueError:
Genes[GeneFeature]['phase'][i].append('?')
elif feature == 'five_prime_UTR' or feature == 'five_prime_utr':
if ',' in Parent:
parents = Parent.split(',')
else:
parents = [Parent]
for p in parents:
if p in idParent:
GeneFeature = idParent.get(p)
if GeneFeature:
if not GeneFeature in Genes:
Genes[GeneFeature] = {'name': Name, 'type': None,
'transcript': [], 'cds_transcript': [],
'protein': [], '5UTR': [[(start, end)]], '3UTR': [[]],
'codon_start': [[]], 'ids': [p],
'CDS': [], 'mRNA': [[(start, end)]], 'strand': strand,
'location': None, 'contig': contig,
'product': [], 'source': source, 'phase': [[]],
'db_xref': [], 'go_terms': [],
'EC_number': [],
'note': [], 'partialStart': [False],
'partialStop': [False], 'pseudo': False,
'gene_synonym': synonyms,}
else:
# determine which transcript this is get index from id
i = Genes[GeneFeature]['ids'].index(p)
Genes[GeneFeature]['5UTR'][i].append(
(start, end))
elif feature == 'three_prime_UTR' or feature == 'three_prime_utr':
if ',' in Parent:
parents = Parent.split(',')
else:
parents = [Parent]
for p in parents:
if p in idParent:
GeneFeature = idParent.get(p)
if GeneFeature:
if not GeneFeature in Genes:
Genes[GeneFeature] = {'name': Name, 'type': None,
'transcript': [], 'cds_transcript': [],
'protein': [], '5UTR': [[]], '3UTR': [[(start, end)]],
'codon_start': [[]], 'ids': [p],
'CDS': [], 'mRNA': [[(start, end)]], 'strand': strand,
'location': None, 'contig': contig,
'product': [], 'source': source, 'phase': [[]],
'db_xref': [], 'go_terms': [],
'EC_number': [],
'note': [], 'partialStart': [False],
'partialStop': [False], 'pseudo': False,
'gene_synonym': synonyms}
else:
# determine which transcript this is get index from id
i = Genes[GeneFeature]['ids'].index(p)
Genes[GeneFeature]['3UTR'][i].append(
(start, end))
# loop through and make sure CDS and exons are properly sorted and codon_start is correct, translate to protein space
for k, v in list(Genes.items()):
for i in range(0, len(v['ids'])):
if v['type'] in ['mRNA', 'tRNA', 'ncRNA', 'rRNA']:
if v['strand'] == '+':
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0])
else:
sortedExons = sorted(
v['mRNA'][i], key=lambda tup: tup[0], reverse=True)
Genes[k]['mRNA'][i] = sortedExons
mrnaSeq = getSeqRegions(SeqRecords, v['contig'], sortedExons)
if gap_filter:
mrnaSeq, Genes[k]['mRNA'][i] = start_end_gap(mrnaSeq, Genes[k]['mRNA'][i])
v['transcript'].append(mrnaSeq)
if v['type'] == 'mRNA':
if not v['CDS'][i]:
sys.stderr.write('ERROR: ID={:} has no CDS features, removing gene model\n'.format(k))
del Genes[k]
continue
if v['strand'] == '+':
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0])
else:
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0], reverse=True)
#get the codon_start by getting first CDS phase + 1
indexStart = [x for x, y in enumerate(v['CDS'][i]) if y[0] == sortedCDS[0][0]]
cdsSeq = getSeqRegions(SeqRecords, v['contig'], sortedCDS)
if gap_filter:
cdsSeq, v['CDS'][i] = start_end_gap(cdsSeq, v['CDS'][i])
protSeq, codon_start = (None,)*2
try:
currentphase = v['phase'][i]
except IndexError:
pass
if '?' in v['phase'][i]: #dont know the phase -- malformed GFF3, try to find best CDS
translateResults = []
for y in [1,2,3]:
protSeq = translate(cdsSeq, v['strand'], y-1)
if not protSeq:
log.debug('Translation of {:} using {:} phase failed'.format(v['ids'][i], y-1))
continue
numStops = protSeq.count('*')
if protSeq[-1] == '*':
numStops -= 1
translateResults.append((y, numStops, protSeq))
sortedResults = sorted(translateResults, key=lambda tup: tup[1])
codon_start = sortedResults[0][0]
protSeq = sortedResults[0][2]
else:
try:
codon_start = int(v['phase'][i][indexStart[0]]) + 1
except IndexError:
pass
#translate and get protein sequence
protSeq = translate(cdsSeq, v['strand'], codon_start-1)
Genes[k]['codon_start'][i] = codon_start
if codon_start > 1:
if v['strand'] == '+':
cdsSeq = cdsSeq[codon_start - 1:]
elif v['strand'] == '-':
endTrunc = len(cdsSeq) - codon_start -1
cdsSeq = cdsSeq[0:endTrunc]
else:
print("ERROR nonsensical strand (%s) for gene %s"%([v['strand'],k]))
Genes[k]['cds_transcript'].append(cdsSeq)
Genes[k]['CDS'][i] = sortedCDS
v['protein'].append(protSeq)
if protSeq:
if protSeq.endswith('*'):
v['partialStop'][i] = False
else:
v['partialStop'][i] = True
if v['codon_start'][i] == 1 and v['protein'][i].startswith('M'):
v['partialStart'][i] = False
else:
v['partialStart'][i] = True
# since its possible updated the mRNA/CDS fields, double check that gene coordinates are ok
if k not in Genes:
continue
all_mRNA_coords = [item for sublist in v['mRNA'] for item in sublist]
try:
Genes[k]['location'] = (min(all_mRNA_coords, key=lambda item: item[0])[0], max(all_mRNA_coords, key=lambda item: item[1])[1])
except ValueError:
continue
# clean up any repeated synonym
if len(v['gene_synonym']) > 1:
uniqueSynonyms = set(v['gene_synonym'])
Genes[k]['gene_synonym'] = list(uniqueSynonyms)
return Genes
def start_end_gap(seq, coords):
if seq.startswith('N'):
oldLen = len(seq)
seq = seq.lstrip('N')
numLeftStripped = oldLen - len(seq)
coords[0] = (coords[0][0]+numLeftStripped, coords[0][1])
if seq.endswith('N'):
oldLen = len(seq)
seq = seq.rstrip('N')
numRightStripped = oldLen - len(seq)
coords[-1] = (coords[-1][0], coords[-1][1]-numRightStripped)
return seq, coords
def simplifyGO(inputList):
simple = []
for x in inputList:
if x.startswith('GO:'):
simple.append(x.strip())
elif ' ' in x:
simple.append(x.split(' ')[1])
return simple
def dict2gff3(input, output, debug=False):
from collections import OrderedDict
'''
function to convert funannotate gene dictionary to gff3 output
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# sort the annotations by contig and start location
sGenes = natsorted(iter(input.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
# then loop through and write GFF3 format
with open(output, 'w') as gffout:
gffout.write("##gff-version 3\n")
for k, v in list(sortedGenes.items()):
if 'pseudo' in v:
if v['pseudo']:
continue
if v['type'] == 'mRNA' and not v['CDS']:
continue
if v['type'] == 'mRNA' and not len(v['ids']) == len(v['mRNA']) == len(v['CDS']):
continue
if v['type'] == 'mRNA' and len(v['CDS']) == 0:
continue
if v['type'] is None:
continue
if v['name']:
if 'gene_synonym' in v and len(v['gene_synonym']) > 0:
gffout.write(
"{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};Name={:};Alias={:};\n".format(
v['contig'], v['source'],v['location'][0],
v['location'][1], v['strand'], k, v['name'],
','.join(v['gene_synonym'])))
else:
gffout.write(
"{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};Name={:};\n".format(
v['contig'], v['source'], v['location'][0],
v['location'][1], v['strand'], k, v['name']))
else:
if 'gene_synonym' in v and len(v['gene_synonym']) > 0:
gffout.write(
"{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};Alias={:};\n".format(
v['contig'], v['source'], v['location'][0],
v['location'][1], v['strand'], k,
','.join(v['gene_synonym'])))
else:
gffout.write(
"{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};\n".format(
v['contig'], v['source'], v['location'][0],
v['location'][1], v['strand'], k))
for i in range(0, len(v['ids'])):
# make sure coordinates are sorted
if v['strand'] == '+':
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0])
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0])
if '5UTR' in v and v['5UTR'][i]:
sortedFive = sorted(
v['5UTR'][i], key=lambda tup: tup[0])
if '3UTR' in v and v['3UTR'][i]:
sortedThree = sorted(
v['3UTR'][i], key=lambda tup: tup[0])
else:
sortedExons = sorted(
v['mRNA'][i], key=lambda tup: tup[0], reverse=True)
sortedCDS = sorted(
v['CDS'][i], key=lambda tup: tup[0], reverse=True)
if '5UTR' in v and v['5UTR'][i]:
sortedFive = sorted(
v['5UTR'][i], key=lambda tup: tup[0], reverse=True)
if '3UTR' in v and v['3UTR'][i]:
sortedThree = sorted(
v['3UTR'][i], key=lambda tup: tup[0], reverse=True)
# build extra annotations for each transcript if applicable
extraAnnotations = ''
if 'gene_synonym' in v and len(v['gene_synonym']) > 0:
extraAnnotations = extraAnnotations + \
'Alias={:};'.format(','.join(v['gene_synonym']))
if len(v['go_terms'][i]) > 0:
go_annotations = simplifyGO(v['go_terms'][i])
extraAnnotations = extraAnnotations + \
'Ontology_term={:};'.format(','.join(go_annotations))
if len(v['db_xref'][i]) > 0:
extraAnnotations = extraAnnotations + \
'Dbxref={:};'.format(','.join(v['db_xref'][i]))
if 'EC_number' in v and len(v['EC_number'][i]) > 0:
extraAnnotations = extraAnnotations + \
'EC_number={:};'.format(','.join(v['EC_number'][i]))
if len(v['note'][i]) > 0:
CleanedNote = [] # need to make sure no commas or semi-colons in these data else will cause problems in parsing GFF3 output downstream
for x in v['note'][i]:
if ';' in x:
x = x.replace(';', '.')
if ':' in x:
base, values = x.split(':', 1)
if not ',' in values:
CleanedNote.append(base+':'+values)
else:
for y in values.split(','):
CleanedNote.append(base+':'+y)
else:
CleanedNote.append(x.replace(',', ''))
extraAnnotations = extraAnnotations + \
'note={:};'.format(','.join(CleanedNote))
# now write mRNA feature
gffout.write(
"{:}\t{:}\t{:}\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};product={:};{:}\n".format(
v['contig'], v['source'], v['type'], v['location'][0],
v['location'][1], v['strand'], v['ids'][i], k,
v['product'][i], extraAnnotations))
if v['type'] in ['mRNA', 'tRNA', 'ncRNA']:
if '5UTR' in v and v['5UTR'][i]:
# if 5'UTR then write those first
num_5utrs = len(v['5UTR'][i])
if num_5utrs > 0:
for z in range(0, num_5utrs):
u_num = z + 1
gffout.write("{:}\t{:}\tfive_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr5p{:};Parent={:};\n".format(
v['contig'], v['source'], sortedFive[z][0], sortedFive[z][1], v['strand'], v['ids'][i],
u_num, v['ids'][i]))
# write the exons
num_exons = len(v['mRNA'][i])
for x in range(0, num_exons):
ex_num = x + 1
gffout.write("{:}\t{:}\texon\t{:}\t{:}\t.\t{:}\t.\tID={:}.exon{:};Parent={:};\n".format(
v['contig'], v['source'], sortedExons[x][0], sortedExons[x][1], v['strand'],
v['ids'][i], ex_num, v['ids'][i]))
# if 3'UTR then write
if '3UTR' in v and v['3UTR'][i]:
num_3utrs = len(v['3UTR'][i])
if num_3utrs > 0:
for z in range(0, num_3utrs):
u_num = z + 1
gffout.write("{:}\t{:}\tthree_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr3p{:};Parent={:};\n".format(
v['contig'], v['source'], sortedThree[z][0], sortedThree[z][1], v['strand'],
v['ids'][i], u_num, v['ids'][i]))
if v['type'] == 'mRNA':
num_cds = len(v['CDS'][i])
# GFF3 phase is 1 less than flat file
current_phase = v['codon_start'][i] - 1
for y in range(0, num_cds):
gffout.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t{:}\tID={:}.cds;Parent={:};\n".format(
v['contig'], v['source'], sortedCDS[y][0], sortedCDS[y][1], v['strand'],
current_phase, v['ids'][i], v['ids'][i]))
current_phase = (
current_phase - (int(sortedCDS[y][1]) - int(sortedCDS[y][0]) + 1)) % 3
if current_phase == 3:
current_phase = 0
def dict2gff3_old(input, output):
from collections import OrderedDict
'''
function to convert funannotate gene dictionary to gff3 output
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# sort the annotations by contig and start location
sGenes = sorted(iter(input.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
# then loop through and write GFF3 format
with open(output, 'w') as gffout:
gffout.write("##gff-version 3\n")
for k, v in list(sortedGenes.items()):
if 'pseudo' in v:
if v['pseudo']:
continue
if v['type'] == 'mRNA' and not v['CDS']:
continue
if v['type'] == 'mRNA' and not len(v['ids']) == len(v['mRNA']) == len(v['CDS']):
continue
if v['type'] == 'mRNA' and len(v['CDS']) == 0:
continue
if v['type'] is None:
continue
if v['name']:
gffout.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};Name={:};\n".format(
v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k, v['name']))
else:
gffout.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};\n".format(
v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k))
for i in range(0, len(v['ids'])):
# build extra annotations for each transcript if applicable
extraAnnotations = ''
if len(v['go_terms'][i]) > 0:
extraAnnotations = extraAnnotations + \
'Ontology_term={:};'.format(','.join(v['go_terms'][i]))
if len(v['db_xref'][i]) > 0:
extraAnnotations = extraAnnotations + \
'Dbxref={:};'.format(','.join(v['db_xref'][i]))
if len(v['note'][i]) > 0:
extraAnnotations = extraAnnotations + \
'note={:};'.format(','.join(v['note'][i]))
# now write mRNA feature
gffout.write("{:}\t{:}\t{:}\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};product={:};{:}\n".format(
v['contig'], v['source'], v['type'], v['location'][0], v['location'][1], v['strand'], v['ids'][i], k, v['product'][i], extraAnnotations))
if v['type'] == 'mRNA' or v['type'] == 'tRNA':
if '5UTR' in v:
# if 5'UTR then write those first
num_5utrs = len(v['5UTR'][i])
if num_5utrs > 0:
for z in range(0, num_5utrs):
u_num = z + 1
gffout.write("{:}\t{:}\tfive_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr5p{:};Parent={:};\n".format(
v['contig'], v['source'], v['5UTR'][i][z][0], v['5UTR'][i][z][1], v['strand'], v['ids'][i], u_num, v['ids'][i]))
# write the exons
num_exons = len(v['mRNA'][i])
for x in range(0, num_exons):
ex_num = x + 1
gffout.write("{:}\t{:}\texon\t{:}\t{:}\t.\t{:}\t.\tID={:}.exon{:};Parent={:};\n".format(
v['contig'], v['source'], v['mRNA'][i][x][0], v['mRNA'][i][x][1], v['strand'], v['ids'][i], ex_num, v['ids'][i]))
# if 3'UTR then write
if '3UTR' in v:
num_3utrs = len(v['3UTR'][i])
if num_3utrs > 0:
for z in range(0, num_3utrs):
u_num = z + 1
gffout.write("{:}\t{:}\tthree_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr3p{:};Parent={:};\n".format(
v['contig'], v['source'], v['3UTR'][i][z][0], v['3UTR'][i][z][1], v['strand'], v['ids'][i], u_num, v['ids'][i]))
if v['type'] == 'mRNA':
num_cds = len(v['CDS'][i])
# GFF3 phase is 1 less than flat file
current_phase = v['codon_start'][i] - 1
for y in range(0, num_cds):
gffout.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t{:}\tID={:}.cds;Parent={:};\n".format(
v['contig'], v['source'], v['CDS'][i][y][0], v['CDS'][i][y][1], v['strand'], current_phase, v['ids'][i], v['ids'][i]))
current_phase = (
current_phase - (int(v['CDS'][i][y][1]) - int(v['CDS'][i][y][0]) + 1)) % 3
if current_phase == 3:
current_phase = 0
def dict2gff3noUTRs(input, output):
from collections import OrderedDict
'''
function to convert funannotate gene dictionary to gff3 output, no UTRs!
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# sort the annotations by contig and start location
sGenes = sorted(iter(input.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
# then loop through and write GFF3 format
with open(output, 'w') as gffout:
gffout.write("##gff-version 3\n")
for k, v in list(sortedGenes.items()):
if v['name']:
gffout.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};Name={:};\n".format(
v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k, v['name']))
else:
gffout.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};\n".format(
v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k))
for i in range(0, len(v['ids'])):
# build extra annotations for each transcript if applicable
extraAnnotations = ''
if len(v['go_terms'][i]) > 0:
extraAnnotations = extraAnnotations + \
'Ontology_term={:};'.format(','.join(v['go_terms'][i]))
if len(v['db_xref'][i]) > 0:
extraAnnotations = extraAnnotations + \
'Dbxref={:};'.format(','.join(v['db_xref'][i]))
if len(v['note'][i]) > 0:
extraAnnotations = extraAnnotations + \
'note={:};'.format(','.join(v['note'][i]))
# now write mRNA feature
gffout.write("{:}\t{:}\t{:}\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};product={:};{:}\n".format(
v['contig'], v['source'], v['type'], v['location'][0], v['location'][1], v['strand'], v['ids'][i], k, v['product'][i], extraAnnotations))
if v['type'] == 'tRNA':
# write the exons and CDS features
num_exons = len(v['mRNA'][i])
for x in range(0, num_exons):
ex_num = x + 1
gffout.write("{:}\t{:}\texon\t{:}\t{:}\t.\t{:}\t.\tID={:}.exon{:};Parent={:};\n".format(
v['contig'], v['source'], v['mRNA'][i][x][0], v['mRNA'][i][x][1], v['strand'], v['ids'][i], ex_num, v['ids'][i]))
elif v['type'] == 'mRNA':
num_cds = len(v['CDS'][i])
# GFF3 phase is 1 less than flat file
current_phase = v['codon_start'][i] - 1
for y in range(0, num_cds):
ex_num = y + 1
gffout.write("{:}\t{:}\texon\t{:}\t{:}\t.\t{:}\t.\tID={:}.exon{:};Parent={:};\n".format(
v['contig'], v['source'], v['CDS'][i][y][0], v['CDS'][i][y][1], v['strand'], v['ids'][i], ex_num, v['ids'][i]))
gffout.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t{:}\tID={:}.cds;Parent={:};\n".format(
v['contig'], v['source'], v['CDS'][i][y][0], v['CDS'][i][y][1], v['strand'], current_phase, v['ids'][i], v['ids'][i]))
current_phase = (
current_phase - (int(v['CDS'][i][y][1]) - int(v['CDS'][i][y][0]) + 1)) % 3
if current_phase == 3:
current_phase = 0
def gtf2dict(input):
Genes = {}
with open(input, 'r') as inFile:
for line in inFile:
if line.startswith('\n') or line.startswith('#'):
continue
line = line.rstrip()
# CM002242 StringTie transcript 4198460 4199001 1000 + . gene_id "STRG.18087"; transcript_id "STRG.18087.2"; cov "5.905163"; FPKM "3.279455"; TPM "9.789504";
# CM002242 StringTie exon 4198460 4198609 1000 + . gene_id "STRG.18087"; transcript_id "STRG.18087.2"; exon_number "1"; cov "6.999466";
contig, source, feature, start, end, score, strand, phase, attributes = line.split(
'\t')
start = int(start)
end = int(end)
ID, transcriptID, TPM = (None,)*3
info = attributes.split(';')
for x in info:
x = x.strip()
x = x.replace('"', '')
if x.startswith('gene_id '):
ID = x.replace('gene_id ', '')
elif x.startswith('transcript_id '):
transcriptID = x.replace('transcript_id ', '')
elif x.startswith('TPM '):
TPM = x.replace('TPM ', '')
if feature == 'transcript':
if not ID in Genes:
Genes[ID] = {'type': 'mRNA', 'codon_start': [1], 'ids': [transcriptID], 'CDS': [[]], 'mRNA': [[]], 'strand': strand,
'location': (start, end), 'contig': contig, 'source': source, 'tpm': [TPM]}
else:
if start < Genes[ID]['location'][0]:
Genes[ID]['location'] = (
start, Genes[ID]['location'][1])
if end > Genes[ID]['location'][1]:
Genes[ID]['location'] = (Genes[ID]['location'][0], end)
Genes[ID]['ids'].append(transcriptID)
Genes[ID]['mRNA'].append([])
Genes[ID]['CDS'].append([])
Genes[ID]['codon_start'].append(1)
Genes[ID]['tpm'].append(TPM)
else:
if not ID or not transcriptID:
print(
"Error, can't find geneID or transcriptID. Malformed GTF file.")
print(line)
sys.exit(1)
if feature == 'exon':
if not ID in Genes:
Genes[ID] = {'type': 'mRNA', 'codon_start': [1], 'ids': [transcriptID], 'CDS': [[(start, end)]], 'mRNA': [[(start, end)]], 'strand': strand,
'location': (start, end), 'contig': contig, 'source': source, 'tpm': []}
else:
if transcriptID in Genes[ID]['ids']: # then add exon
i = Genes[ID]['ids'].index(transcriptID)
Genes[ID]['mRNA'][i].append((start, end))
Genes[ID]['CDS'][i].append((start, end))
# loop through dictionary and make sure properly sorted exons
for k, v in list(Genes.items()):
for i in range(0, len(v['ids'])):
if v['strand'] == '+':
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0])
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0])
else:
sortedExons = sorted(
v['mRNA'][i], key=lambda tup: tup[0], reverse=True)
sortedCDS = sorted(
v['CDS'][i], key=lambda tup: tup[0], reverse=True)
Genes[k]['mRNA'][i] = sortedExons
Genes[k]['CDS'][i] = sortedCDS
return Genes
def Stringtie_dict2gff3(input, output):
from collections import OrderedDict
'''
function to convert funannotate gene dictionary to gff3 output
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# sort the annotations by contig and start location
sGenes = sorted(iter(input.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
# then loop through and write GFF3 format
with open(output, 'w') as outfile:
outfile.write("##gff-version 3\n")
for k, v in list(sortedGenes.items()):
outfile.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};\n".format(
v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k))
for i in range(0, len(v['ids'])):
# build extra annotations for each transcript if applicable
# now write mRNA feature
outfile.write("{:}\t{:}\t{:}\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};TPM={:}\n".format(
v['contig'], v['source'], v['type'], v['location'][0], v['location'][1], v['strand'], v['ids'][i], k, v['tpm'][i]))
if v['type'] == 'mRNA':
if '5UTR' in v:
# if 5'UTR then write those first
num_5utrs = len(v['5UTR'][i])
if num_5utrs > 0:
for z in range(0, num_5utrs):
u_num = z + 1
outfile.write("{:}\t{:}\tfive_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr5p{:};Parent={:};\n".format(
v['contig'], v['source'], v['5UTR'][i][z][0], v['5UTR'][i][z][1], v['strand'], v['ids'][i], u_num, v['ids'][i]))
# write the exons
num_exons = len(v['mRNA'][i])
for x in range(0, num_exons):
ex_num = x + 1
outfile.write("{:}\t{:}\texon\t{:}\t{:}\t.\t{:}\t.\tID={:}.exon{:};Parent={:};\n".format(
v['contig'], v['source'], v['mRNA'][i][x][0], v['mRNA'][i][x][1], v['strand'], v['ids'][i], ex_num, v['ids'][i]))
# if 3'UTR then write
if '3UTR' in v:
num_3utrs = len(v['3UTR'][i])
if num_3utrs > 0:
for z in range(0, num_3utrs):
u_num = z + 1
outfile.write("{:}\t{:}\tthree_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr3p{:};Parent={:};\n".format(
v['contig'], v['source'], v['3UTR'][i][z][0], v['3UTR'][i][z][1], v['strand'], v['ids'][i], u_num, v['ids'][i]))
if v['type'] == 'mRNA':
num_cds = len(v['CDS'][i])
# GFF3 phase is 1 less than flat file
current_phase = v['codon_start'][i] - 1
for y in range(0, num_cds):
outfile.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t{:}\tID={:}.cds;Parent={:};\n".format(
v['contig'], v['source'], v['CDS'][i][y][0], v['CDS'][i][y][1], v['strand'], current_phase, v['ids'][i], v['ids'][i]))
current_phase = (
current_phase - (int(v['CDS'][i][y][1]) - int(v['CDS'][i][y][0]) + 1)) % 3
if current_phase == 3:
current_phase = 0
def Quarry2GFF3(input, output):
with open(output, 'w') as outfile:
outfile.write(("##gff-version 3\n"))
exonCounts = {}
GeneCount = 1
with open(input, 'r') as infile:
for line in infile:
line = line.strip()
contig, source, feature, start, end, score, strand, phase, attributes = line.split(
'\t')
source = 'CodingQuarry'
ID, Parent, Name = (None,)*3
info = attributes.split(';')
for x in info:
if x.startswith('ID='):
ID = x.replace('ID=', '')
elif x.startswith('Parent='):
Parent = x.replace('Parent=', '')
if ID and ' ' in ID:
ID = ID.split(' ')[0]
if Parent and ' ' in Parent:
Parent = Parent.split(' ')[0]
if feature == 'gene':
geneID = 'gene_'+str(GeneCount)
transID = 'transcript_'+str(GeneCount)+'-T1'
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:};Name={:};Alias={:};\n'.format(
contig, source, feature, start, end, score, strand, phase, geneID, geneID, ID))
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:};Parent={:};Alias={:};\n'.format(
contig, source, 'mRNA', start, end, '.', strand, '.', transID, geneID, ID))
GeneCount += 1
elif feature == 'CDS':
if not transID in exonCounts:
exonCounts[transID] = 1
else:
exonCounts[transID] += 1
num = exonCounts.get(transID)
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:}.exon{:};Parent={:};\n'.format(
contig, source, 'exon', start, end, '.', strand, '.', transID, num, transID))
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:}.cds;Parent={:};\n'.format(
contig, source, feature, start, end, score, strand, phase, transID, transID))
def runStringtie(bamfile, cpus, output):
'''
Function to run stringtie from bamfile
Note that when given the bamfile, no way to determine strandeness so will run unstranded
'''
cmd = ['stringtie', '-p', str(cpus), os.path.realpath(bamfile)]
runSubprocess2(cmd, '.', log, os.path.abspath(output))
def runCodingQuarry(genome, stringtie, cpus, output):
'''
run CodingQuarry from stringtie GTF input file
'''
# first get basename directory as need to create tmp CodingQuarry dir
basedir = os.path.dirname(genome)
tmpdir = os.path.join(basedir, 'CodingQuarry')
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
# convert GTF to GFF3 file
stringtieGFF3 = os.path.join(basedir, 'stringtie.gff3')
Genes = gtf2dict(stringtie)
Stringtie_dict2gff3(Genes, stringtieGFF3)
# now setup command and run from tmpdir folder
cmd = ['CodingQuarry', '-p',
str(cpus), '-f', os.path.realpath(genome), '-t', os.path.realpath(stringtieGFF3)]
runSubprocess(cmd, tmpdir, log)
# capture results and reformat to proper GFF3
result = os.path.join(tmpdir, 'out', 'PredictedPass.gff3')
if not checkannotations(result):
log.error('CodingQuarry failed, moving on without result, check logfile')
return False
else:
Quarry2GFF3(result, output)
return True
def runCodingQuarryTrained(genome, species, tmpdir, cpus, output):
# now setup command and run from tmpdir folder
log.info(
'CodingQuarry prediction is running using {:} paremeters'.format(species))
cmd = ['CodingQuarry', '-p',
str(cpus), '-f', os.path.realpath(genome), '-s', species]
log.debug(' '.join(cmd))
myENV = os.environ
if 'QUARRY_PATH' in myENV:
del myENV['QUARRY_PATH']
FNULL = open(os.devnull, 'w')
p1 = subprocess.Popen(cmd, stdout=FNULL, stderr=FNULL,
cwd=tmpdir, env=dict(myENV))
p1.communicate()
# capture results and reformat to proper GFF3
result = os.path.join(tmpdir, 'out', 'PredictedPass.gff3')
if not checkannotations(result):
log.error('CodingQuarry failed, moving on without result, check logfile')
return False
else:
Quarry2GFF3(result, output)
return True
def dict2gtf(input, output):
from collections import OrderedDict
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# sort the annotations by contig and start location
sGenes = sorted(iter(input.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
with open(output, 'w') as gtfout:
for k, v in list(sortedGenes.items()):
if v['type'] != 'mRNA':
continue
if 'pseudo' in v:
if v['pseudo']:
continue
if v['type'] == 'mRNA' and not v['CDS']:
continue
if v['type'] == 'mRNA' and not len(v['ids']) == len(v['mRNA']) == len(v['CDS']):
continue
for i in range(0, len(v['ids'])):
# create attributes string
attributes = 'gene_id "{:}"; transcript_id "{:}";'.format(
k, v['ids'][i])
# if v['name']:
# attributes = attributes + ' Name "{:}";'.format(v['name'])
if len(v['5UTR'][i]) > 0:
for utr in v['5UTR'][i]:
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], '5UTR', utr[0], utr[1], 0, v['strand'], 0, attributes))
if not v['partialStart'][i]:
if v['strand'] == '+':
startCodon = (v['CDS'][i][0][0], v['CDS'][i][0][0]+2)
else:
startCodon = (v['CDS'][i][0][1]-2, v['CDS'][i][0][1])
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], 'start_codon', startCodon[0], startCodon[1], 0, v['strand'], 0, attributes))
for x, cds in enumerate(v['CDS'][i]):
if v['partialStop'][i]: # then just write the whole CDS as no reason to move codon back
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], 'CDS', cds[0], cds[1], 0, v['strand'], v['phase'][i][x], attributes))
else:
if v['strand'] == '+':
if x == len(v['CDS'][i])-1: # this is last one
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], 'CDS', cds[0], cds[1]-3, 0, v['strand'], v['phase'][i][x], attributes))
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], 'stop_codon', cds[1]-2, cds[1], 0, v['strand'], 0, attributes))
else:
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], 'CDS', cds[0], cds[1], 0, v['strand'], v['phase'][i][x], attributes))
else:
if x == len(v['CDS'][i])-1: # this is last one
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], 'CDS', cds[0]+3, cds[1], 0, v['strand'], v['phase'][i][x], attributes))
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], 'stop_codon', cds[0], cds[0]+2, 0, v['strand'], 0, attributes))
else:
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], 'CDS', cds[0], cds[1], 0, v['strand'], v['phase'][i][x], attributes))
if len(v['3UTR'][i]) > 0:
for utr in v['3UTR'][i]:
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], '3UTR', utr[0], utr[1], 0, v['strand'], 0, attributes))
gtfout.write('\n')
def gff3_to_gtf(input, genome, output):
Genes = {}
Genes = gff2dict(input, genome, Genes)
dict2gtf(Genes, output)
def gb2allout(input, GFF, Proteins, Transcripts, DNA):
'''
function to split GBK file into parts, need to be able to deal with multiple transcripts and get naming correct
assumption is that the mRNA and CDS features from multiple transcripts are in order, i.e. the first mRNA feature
you see corresponds to first CDS feature, etc. **hopefully this is an okay assumption**
'''
# idea is to populate the dictionary first, then write GFF, proteins, transcripts, can write DNA on first pass
genes = {}
with open(DNA, 'w') as scaffolds:
with open(input, 'r') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
scaffolds.write(">{:}\n{:}\n".format(
record.id, softwrap(str(record.seq))))
for f in record.features:
gb_feature_add2dict(f, record, genes)
# write GFF
dict2gff3_old(genes, GFF)
# write to protein and transcripts
dict2nucleotides(genes, Proteins, Transcripts)
def minimap2Align(transcripts, genome, cpus, intron, output):
'''
function to align transcripts to genome using minimap2
huge speed increase over gmap + blat
'''
FNULL = open(os.devnull, 'w')
bamthreads = int(round(int(cpus) / 2))
if bamthreads > 4:
bamthreads = 4
minimap2_cmd = ['minimap2', '-ax', 'splice', '-t',
str(cpus), '--cs', '-u', 'b', '-G', str(intron), genome,
transcripts]
samtools_cmd = ['samtools', 'sort', '--reference', genome,
'-@', str(bamthreads), '-o', output, '-']
log.debug('{} | {}'.format(' '.join(minimap2_cmd), ' '. join(samtools_cmd)))
p1 = subprocess.Popen(minimap2_cmd, stdout=subprocess.PIPE, stderr=FNULL)
p2 = subprocess.Popen(samtools_cmd, stdout=subprocess.PIPE, stderr=FNULL, stdin=p1.stdout)
p1.stdout.close()
p2.communicate()
def iso_seq_minimap2(transcripts, genome, cpus, intron, output):
'''
function to align PB iso-seq data
'''
FNULL = open(os.devnull, 'w')
bamthreads = int(round(int(cpus) / 2))
if bamthreads > 4:
bamthreads = 4
minimap2_cmd = ['minimap2', '-ax', 'splice', '-t',
str(cpus), '--cs', '-uf', '-C5', '-G', str(intron), genome,
transcripts]
samtools_cmd = ['samtools', 'sort', '--reference', genome,
'-@', str(bamthreads), '-o', output, '-']
log.debug('{} | {}'.format(' '.join(minimap2_cmd), ' '. join(samtools_cmd)))
p1 = subprocess.Popen(minimap2_cmd, stdout=subprocess.PIPE, stderr=FNULL)
p2 = subprocess.Popen(samtools_cmd, stdout=subprocess.PIPE, stderr=FNULL, stdin=p1.stdout)
p1.stdout.close()
p2.communicate()
def nanopore_cDNA_minimap2(transcripts, genome, cpus, intron, output):
'''
function to nanopore 2d cDNA
'''
FNULL = open(os.devnull, 'w')
bamthreads = int(round(int(cpus) / 2))
if bamthreads > 4:
bamthreads = 4
minimap2_cmd = ['minimap2', '-ax', 'splice', '-t',
str(cpus), '--cs', '-G', str(intron), genome, transcripts]
samtools_cmd = ['samtools', 'sort', '--reference', genome,
'-@', str(bamthreads), '-o', output, '-']
log.debug('{} | {}'.format(' '.join(minimap2_cmd), ' '. join(samtools_cmd)))
p1 = subprocess.Popen(minimap2_cmd, stdout=subprocess.PIPE, stderr=FNULL)
p2 = subprocess.Popen(samtools_cmd, stdout=subprocess.PIPE, stderr=FNULL, stdin=p1.stdout)
p1.stdout.close()
p2.communicate()
def nanopore_mRNA_minimap2(transcripts, genome, cpus, intron, output):
'''
function to nanopore direct mRNA reads
'''
FNULL = open(os.devnull, 'w')
bamthreads = int(round(int(cpus) / 2))
if bamthreads > 4:
bamthreads = 4
minimap2_cmd = ['minimap2', '-ax', 'splice', '-t',
str(cpus), '--cs', '-uf', '-k14', '-G', str(intron),
genome, transcripts]
samtools_cmd = ['samtools', 'sort', '--reference', genome,
'-@', str(bamthreads), '-o', output, '-']
log.debug('{} | {}'.format(' '.join(minimap2_cmd), ' '. join(samtools_cmd)))
p1 = subprocess.Popen(minimap2_cmd, stdout=subprocess.PIPE, stderr=FNULL)
p2 = subprocess.Popen(samtools_cmd, stdout=subprocess.PIPE, stderr=FNULL, stdin=p1.stdout)
p1.stdout.close()
p2.communicate()
def mergeBAMs(*args, **kwargs):
cmd = ['samtools', 'merge', '-@', str(kwargs['cpus']), kwargs['output']]
cmd = cmd + list(args)
runSubprocess(cmd, '.', log)
def catFiles(*args, **kwargs):
cmd = ['cat']
cmd = cmd + list(args)
runSubprocess2(cmd, '.', log, kwargs['output'])
def runGMAP(transcripts, genome, cpus, intron, tmpdir, output):
# first build genome database
build_log = os.path.join(tmpdir, 'gmap-build.log')
with open(build_log, 'w') as logfile:
subprocess.call(['gmap_build', '-D', tmpdir, '-d', 'genome',
'-k', '13', genome], stdout=logfile, stderr=logfile)
# now map transcripts
map_log = os.path.join(tmpdir, 'gmap-map.log')
with open(map_log, 'w') as logfile:
with open(output, 'w') as out:
subprocess.call(['gmap', '--cross-species', '-f', '3', '-K', str(intron), '-n', '1', '-t', str(
cpus), '-B', '5', '-D', tmpdir, '-d', 'genome', transcripts], stdout=out, stderr=logfile)
def runBUSCO(input, Database, cpus, tmpdir, output):
# run busco in protein mapping mode
if (sys.version_info > (3, 0)):
BUSCO = os.path.join(parentdir,
'aux_scripts', 'funannotate-BUSCO2.py')
else:
BUSCO = os.path.join(parentdir,
'aux_scripts', 'funannotate-BUSCO2-py2.py')
cmd = [BUSCO, '-i', input, '-m', 'proteins', '-l',
Database, '-o', 'busco', '-c', str(cpus), '-f']
runSubprocess(cmd, tmpdir, log)
# now parse output and write to annotation file
with open(output, 'w') as out:
with open(os.path.join(tmpdir, 'run_busco', 'full_table_busco.tsv'), 'r') as busco:
for line in busco:
if line.startswith('#'):
continue
col = line.split('\t')
# if diploid these should show up, but problematic for drawing trees....
if col[1] == 'Complete' or col[1] == 'Duplicated':
out.write("%s\tnote\tBUSCO:%s\n" % (col[2], col[0]))
def dupBUSCO2gff(ID, base_folder, locationID):
hmmerfolder = os.path.join(base_folder, 'hmmer_output')
geneID = ''
AugFile = ''
GFFfile = os.path.join(base_folder, 'augustus_output', 'gffs', ID+'.gff')
if geneID == '':
for file in os.listdir(hmmerfolder):
if file.startswith(ID):
with open(os.path.join(hmmerfolder, file), 'r') as hmmer:
for line in hmmer:
if not line.startswith('#'):
longID = line.split()[0]
longID = longID.replace(']', '')
partsID = longID.split('[')
if locationID == partsID[1]:
geneID = partsID[0]
AugFile = os.path.join(
base_folder, 'augustus_output', 'predicted_genes', file)
break
# so now should have gene name, get the GFF from augustus
with open(GFFfile, 'w') as gffout:
with open(AugFile, 'r') as augustus:
for pred in readBlocks(augustus, '# start gene'):
if pred[0].startswith('# This output'):
continue
if pred[0].startswith('##gff-version 3'):
continue
if pred[0].startswith('# Please cite'):
continue
if geneID in pred[0]:
for x in pred:
if not x.startswith('#'):
gffout.write(x)
def getCompleteBuscos(input, ploidy=1):
busco_complete = {}
with open(input, 'r') as infile:
for line in infile:
line = line.rstrip()
if line.startswith('#'):
continue
passing = ['Complete']
if ploidy > 1:
passing.append('Duplicated')
cols = line.split('\t')
if cols[1] in passing:
busco, status, gene, score, length = cols
if gene not in busco_complete:
busco_complete[gene] = busco
return busco_complete
def filterGFF3(keepDict, genome, gff3, output):
#load into Dictionary
Genes = {}
Genes = gff2dict(gff3, genome, Genes)
filtered = {}
for k,v in Genes.items():
if v['ids'][0] in keepDict:
filtered[k] = v
dict2gff3(filtered, output)
def parseBUSCO2genome(input, ploidy, ContigSizes, output):
# input is BUSCO output, ploidy is integer, ContigSizes is dictionary, output is a bedfile, function returns dictionary
busco_complete = {}
hits = {}
with open(output, 'w') as bedfile:
with open(input, 'r') as buscoinput:
for line in buscoinput:
line = line.replace('\n', '')
if line.startswith('#'):
continue
cols = line.split('\t')
if cols[1] == 'Complete' or cols[1] == 'Duplicated':
contig = cols[2]
start = cols[3]
end = cols[4]
score = cols[5]
length = cols[6]
ID = contig+':'+start+'-'+end
if cols[1] == 'Complete':
if not cols[0] in hits:
hits[cols[0]] = (
ID, score, contig, start, end, length)
if ploidy > 1:
if cols[1] == 'Duplicated':
if not cols[0] in hits:
hits[cols[0]] = (
ID, score, contig, start, end, length)
dupBUSCO2gff(
cols[0], os.path.dirname(input), ID)
else:
oldscore = float(hits.get(cols[0])[1])
if float(score) > oldscore:
hits[cols[0]] = (
ID, score, contig, start, end, length)
dupBUSCO2gff(
cols[0], os.path.dirname(input), ID)
for k, v in natsorted(list(hits.items())):
# validate locations for bedfile, move 100 bp in each direction for bedfile
start = int(v[3]) - 100
if start < 1: # negative no good
start = 1
end = int(v[4]) + 100
# check it doesn't go past contig length
if end > ContigSizes.get(contig):
end = ContigSizes.get(contig)
bedfile.write('%s\t%i\t%i\t%s\n' % (contig, start, end, k))
busco_complete[k] = v[0]
return busco_complete
def RepeatBlast(input, cpus, evalue, DataBase, tmpdir, output, diamond=True):
# run blastp against repeats
blast_tmp = os.path.join(tmpdir, 'repeats.xml')
if diamond:
blastdb = os.path.join(DataBase, 'repeats.dmnd')
cmd = ['diamond', 'blastp', '--sensitive', '--query', input, '--threads', str(cpus),
'--out', blast_tmp, '--db', blastdb, '--evalue', str(evalue), '--max-target-seqs', '1', '--outfmt', '5']
else:
blastdb = os.path.join(DataBase, 'REPEATS')
cmd = ['blastp', '-db', blastdb, '-outfmt', '5', '-out', blast_tmp, '-num_threads', str(cpus),
'-max_target_seqs', '1', '-evalue', str(evalue), '-query', input]
runSubprocess4(cmd, '.', log)
# parse results
with open(output, 'w') as out:
with open(blast_tmp, 'r') as results:
for qresult in SearchIO.parse(results, "blast-xml"):
hits = qresult.hits
ID = qresult.id
num_hits = len(hits)
if num_hits > 0:
length = 0
for i in range(0, len(hits[0].hsps)):
length += hits[0].hsps[i].aln_span
pident = hits[0].hsps[0].ident_num / float(length)
out.write("%s\t%s\t%f\t%s\n" %
(ID, hits[0].id, pident, hits[0].hsps[0].evalue))
def eggnog2dict(annotations):
# load in annotation dictionary
EggNog = {}
with open(annotations, 'r') as input:
reader = csv.reader(input, delimiter='\t')
for line in reader:
EggNog[line[1]] = line[5]
return EggNog
def number_present(s):
return any(i.isdigit() for i in s)
def capfirst(x):
return x[0].upper() + x[1:]
def item2index(inputList, item):
# return the index of an item in the input list
item_index = None
for x in inputList:
if item in x:
item_index = inputList.index(x)
return item_index
def getEggNogHeaders(input):
IDi, DBi, OGi, Genei, COGi, Desci = (None,)*6
with open(input, 'r') as infile:
for line in infile:
line = line.replace('\n', '')
if line.startswith('#query_name'): # this is HEADER
headerCols = line.split('\t')
IDi = item2index(headerCols, 'query_name')
Genei = item2index(headerCols, 'predicted_gene_name')
DBi = item2index(headerCols, 'Annotation_tax_scope')
OGi = item2index(headerCols, 'OGs')
COGi = item2index(headerCols, 'COG cat')
Desci = item2index(headerCols, 'eggNOG annot')
break
return IDi, DBi, OGi, Genei, COGi, Desci
def parseEggNoggMapper(input, output):
Definitions = {}
# indexes from header file
IDi, DBi, OGi, Genei, COGi, Desci = getEggNogHeaders(input)
# take annotations file from eggnog-mapper and create annotations
with open(output, 'w') as out:
with open(input, 'r') as infile:
for line in infile:
line = line.replace('\n', '')
if line.startswith('#'):
continue
cols = line.split('\t')
ID = cols[IDi]
DB = cols[DBi].split('[')[0]
OGs = cols[OGi].split(',')
NOG = ''
for x in OGs:
if DB in x:
NOG = 'ENOG41' + x.split('@')[0]
Gene = ''
if cols[Genei] != '':
if not '_' in cols[Genei] and not '.' in cols[Genei] and number_present(cols[Genei]):
Gene = cols[Genei]
Description = cols[Desci]
if NOG == '':
continue
if not NOG in Definitions:
Definitions[NOG] = Description
out.write("%s\tnote\tEggNog:%s\n" % (ID, NOG))
if cols[COGi] != '':
out.write("%s\tnote\tCOG:%s\n" %
(ID, cols[COGi].replace(' ', '')))
if Gene != '':
product = Gene.lower()+'p'
product = capfirst(product)
out.write("%s\tname\t%s\n" % (ID.split('-T')[0], Gene))
out.write("%s\tproduct\t%s\n" % (ID, product))
if Description != '':
out.write("%s\tnote\t%s\n" % (ID, Description))
return Definitions
def batch_iterator(iterator, batch_size):
entry = True # Make sure we loop once
while entry:
batch = []
while len(batch) < batch_size:
try:
entry = next(iterator)
except StopIteration:
entry = None
if entry is None:
# End of file
break
batch.append(entry)
if batch:
yield batch
def fasta2chunks(input, chunks, tmpdir, output):
# split the input fasta file into 20 chunks to process
with open(input, 'r') as seqs:
SeqCount = countfasta(input)
SeqRecords = SeqIO.parse(seqs, 'fasta')
chunks = SeqCount / int(chunks)
# divide into chunks, store in tmp file
folder = os.path.join(tmpdir, output)
if not os.path.exists(folder):
os.makedirs(folder)
else:
shutil.rmtree(folder)
os.makedirs(folder)
for i, batch in enumerate(batch_iterator(SeqRecords, chunks)):
filename = "chunk_%i.fa" % (i+1)
tmpout = os.path.join(folder, filename)
handle = open(tmpout, "w")
SeqIO.write(batch, handle, "fasta")
handle.close()
def signalP(input, tmpdir, output):
# split input file into chunks, 20 should mean < 200 proteins per chunk
from funannotate.check import check_version7
version = check_version7('signalp')
if '.' in version:
version = int(version.split('.')[0])
if version > 4:
cmd = ['signalp', '-stdout', '-org', 'euk', '-format', 'short', '-fasta']
else:
cmd = ['signalp', '-t', 'euk', '-f', 'short']
fasta2chunks(input, 40, tmpdir, 'signalp_tmp')
for file in os.listdir(os.path.join(tmpdir, 'signalp_tmp')):
if file.startswith('chunk'):
file = os.path.join(tmpdir, 'signalp_tmp', file)
tmp_out = re.sub(r'\.fa$','.signalp.out',file)
cmd1 = cmd + [file]
runSubprocess2(cmd1, '.', log, tmp_out)
# now concatenate all outputs
if os.path.isfile(output):
os.remove(output)
with open(output, 'a') as finalout:
for file in os.listdir(os.path.join(tmpdir, 'signalp_tmp')):
if file.endswith('.signalp.out'):
file = os.path.join(tmpdir, 'signalp_tmp', file)
with open(file) as infile:
finalout.write(infile.read())
# cleanup tmp directory
shutil.rmtree(os.path.join(tmpdir, 'signalp_tmp'))
def parseSignalP(sigP, secretome_annot):
sigpDict = {}
version = 4
with open(sigP, 'r') as results:
for line in results:
line = line.rstrip()
if line.startswith('#'):
if line.startswith('# SignalP-5'):
version = 5
elif line.startswith('# SignalP-6'):
version = 6
continue
if version < 5:
col = line.split(' ') # not tab delimited
col = [_f for _f in col if _f] # clean up empty spaces
if col[9] == 'Y': # then there is signal peptide
ID = col[0]
end = int(col[2]) - 1
sigpDict[ID] = [end, '', '']
elif version == 5: # version 5 has different format and tab delimited hooray!
if '\t' in line:
cols = line.split('\t')
if cols[1] != 'OTHER': # then signal peptide
ID, prediction, score1, score2, position = cols[:5]
components = position.split()
pos = components[2].split('-')[0]
prob = components[-1]
aa = components[3].replace('.', '')
sigpDict[ID] = [pos, aa, prob]
else:
if '\t' in line:
cols = line.split('\t')
if cols[1] == 'SP': # then signal peptide
try:
ID, prediction, score1, score2, position = cols[:5]
except ValueError:
sys.stderr.write('signalP parse error: {}\n'.format(line))
continue
components = position.split()
pos = components[2].split('-')[0]
prob = components[-1]
sigpDict[ID] = [pos, '', prob]
with open(secretome_annot, 'w') as secout:
for k, v in natsorted(list(sigpDict.items())):
if v[1] != '':
secout.write("{:}\tnote\tSECRETED:SignalP(1-{:},cutsite={:},prob={:})\n".format(k, v[0], v[1], v[2]))
else:
secout.write("{:}\tnote\tSECRETED:SignalP(1-{:})\n".format(k, v[0]))
def parsePhobiusSignalP(phobius, sigP, membrane_annot, secretome_annot):
# give directory of annotate_misc, first get phobius results
'''
This is what phobius results look like
ID TM SP Prediction
VE00_00001 0 0 o
VE00_00002 2 0 i198-219o283-301i
VE00_00003 0 0 o
VE00_00004 0 Y n8-18c23/24o
VE00_00005 12 0 i49-69o89-107i119-138o144-167i179-200o212-234i280-299o319-341i348-366o378-398i410-430o442-465i
'''
pSecDict = {}
pTMDict = {}
sigpDict = {}
# parsing short format phobius
with open(phobius, 'r') as input1:
for line in input1:
line = line.rstrip()
if line.startswith('ID') or line.startswith('SEQ'):
continue
if '\t' in line:
cols = line.split('\t')
else:
cols = line.split()
geneID = cols[0]
if int(cols[1]) > 0: # then found TM domain
annot = cols[3]
if not geneID in pTMDict:
pTMDict[geneID] = 'TransMembrane:'+cols[1]+' ('+annot+')'
if cols[2] == 'Y': # then sig pep discovered
location = cols[3].split('/')[0]
clevage = location.split('c')[-1]
if not geneID in pSecDict:
pSecDict[geneID] = [clevage, '', '']
if sigP: # will be passed FALSE if signalP data missing
# parse signalp output and turn into annotation file
version = 4
with open(sigP, 'r') as results:
for line in results:
line = line.rstrip()
if line.startswith('#'):
if line.startswith('# SignalP-5'):
version = 5
continue
if version < 5:
col = line.split(' ') # not tab delimited
col = [_f for _f in col if _f] # clean up empty spaces
if col[9] == 'Y': # then there is signal peptide
ID = col[0]
end = int(col[2]) - 1
sigpDict[ID] = [end, '', '']
else: # version 5 has different format and tab delimited hooray!
if '\t' in line:
cols = line.split('\t')
if cols[1] != 'OTHER': # then signal peptide
ID, prediction, score1, score2, position = cols[:5]
components = position.split()
pos = components[2].split('-')[0]
prob = components[-1]
aa = components[3].replace('.', '')
sigpDict[ID] = [pos, aa, prob]
else:
sigpDict = pSecDict
# write annotation files
with open(membrane_annot, 'w') as memout:
for k, v in natsorted(list(pTMDict.items())):
memout.write("%s\tnote\t%s\n" % (k, v))
with open(secretome_annot, 'w') as secout:
for k, v in natsorted(list(sigpDict.items())):
if v[1] != '':
secout.write("{:}\tnote\tSECRETED:SignalP(1-{:},cutsite={:},prob={:})\n".format(k, v[0], v[1], v[2]))
else:
secout.write("{:}\tnote\tSECRETED:SignalP(1-{:})\n".format(k, v[0]))
def n_lower_chars(string):
return sum(1 for c in string if c.islower())
def CheckAugustusSpecies(input):
# get the possible species from augustus
augustus_list = []
for i in os.listdir(os.path.join(os.environ["AUGUSTUS_CONFIG_PATH"], 'species')):
if not i.startswith('.'):
augustus_list.append(i)
augustus_list = set(augustus_list)
if input in augustus_list:
return True
else:
return False
def CheckFunannotateSpecies(input, db):
# get the possible species from funannotateDB dir -- on install mirrored Augustus
species_list = []
for i in os.listdir(os.path.join(db, 'trained_species')):
if not i.startswith('.'):
species_list.append(i)
species_list = set(species_list)
if input in species_list:
return True
else:
return False
def SortRenameHeaders(input, output):
# sort records and write temp file
with open(output, 'w') as out:
with open(input, 'r') as input:
records = list(SeqIO.parse(input, 'fasta'))
records.sort(cmp=lambda x, y: cmp(len(y), len(x)))
counter = 1
for rec in records:
rec.name = ''
rec.description = ''
rec.id = 'scaffold_' + str(counter)
counter += 1
SeqIO.write(records, out, 'fasta')
def validate_tRNA(input, genes, gaps, output):
# run bedtools intersect to keep only input that dont intersect with either genes or gaps
sortedInput = os.path.abspath(input)+'.sorted.gff3'
#sortGFFproper(input, sortedInput)
cmd1 = ['bedtools', 'sort', '-i', input]
with open(sortedInput, 'w') as outfile:
subprocess.call(cmd1, stdout=outfile)
sortedGenes = os.path.abspath(genes)+'.sorted.gff3'
#sortGFFproper(genes, sortedGenes)
cmd2 = ['bedtools', 'sort', '-i', genes]
with open(sortedGenes, 'w') as outfile:
subprocess.call(cmd2, stdout=outfile)
if gaps:
sortedGaps = os.path.abspath(gaps)+'.sorted.gff3'
#sortGFFproper(gaps, sortedGaps)
cmd3 = ['bedtools', 'sort', '-i', gaps]
with open(sortedGaps, 'w') as outfile:
subprocess.call(cmd3, stdout=outfile)
cmd = ['bedtools', 'intersect', '-sorted', '-v', '-a', sortedInput, '-b', sortedGenes]
if gaps:
cmd.append(sortedGaps)
tmpOut = os.path.abspath(output)+'.tmp'
runSubprocess2(cmd, '.', log, tmpOut)
# now sort properly
sortGFFproper(tmpOut, output)
os.remove(tmpOut)
# via https://stackoverflow.com/questions/2154249/identify-groups-of-continuous-numbers-in-a-list
def list2groups(L):
if len(L) < 1:
return
first = last = L[0]
for n in L[1:]:
if n - 1 == last: # Part of the group, bump the end
last = n
else: # Not part of the group, yield current group and start a new
yield first, last
first = last = n
yield first, last # Yield the last group
def checkMask(genome, bedfile):
from Bio.SeqIO.FastaIO import SimpleFastaParser
# load contig names and sizes into dictionary, get masked repeat stats
GenomeLength = 0
maskedSize = 0
masked = {}
ContigSizes = {}
with open(genome, 'r') as input:
for header, Seq in SimpleFastaParser(input):
if ' ' in header:
ID = header.split(' ')[0]
else:
ID = header
if not ID in masked:
masked[ID] = []
if not ID in ContigSizes:
ContigSizes[ID] = len(Seq)
GenomeLength += len(Seq)
maskedSize += n_lower_chars(Seq)
for i, c in enumerate(Seq):
if c.islower():
masked[ID].append(i) # 0 based
if maskedSize == 0: # not softmasked, return False
with open(bedfile, 'w') as bedout:
bedout.write('')
return ContigSizes, GenomeLength, maskedSize, 0.0
else:
counter = 1
with open(bedfile, 'w') as bedout:
for k, v in natsorted(list(masked.items())):
repeats = list(list2groups(v))
for item in repeats:
if len(item) == 2:
bedout.write('{:}\t{:}\t{:}\tRepeat_{:}\n'.format(
k, item[0], item[1], counter))
counter += 1
percentMask = maskedSize / float(GenomeLength)
return ContigSizes, GenomeLength, maskedSize, percentMask
def maskingstats2bed(input, counter, alock):
from Bio.SeqIO.FastaIO import SimpleFastaParser
masked = []
gaps = []
maskedSize = 0
bedfilename = input.replace('.fasta', '.bed')
gapfilename = input.replace('.fasta', '.gaps')
with open(input, 'r') as infile:
for header, Seq in SimpleFastaParser(infile):
if ' ' in header:
ID = header.split(' ')[0]
else:
ID = header
for i, c in enumerate(Seq):
if c == 'N' or c == 'n':
masked.append(i)
maskedSize += 1
gaps.append(i)
elif c.islower():
masked.append(i) # 0 based
maskedSize += 1
if maskedSize > 0: # not softmasked, return False
with open(bedfilename, 'w') as bedout:
repeats = list(list2groups(masked))
for item in repeats:
if len(item) == 2:
bedout.write('{:}\t{:}\t{:}\tRepeat_\n'.format(
ID, item[0], item[1]))
if len(gaps) > 0:
with open(gapfilename, 'w') as gapout:
bedGaps = list(list2groups(gaps))
for item in bedGaps:
if len(item) == 2:
gapout.write(
'{:}\t{:}\t{:}\tassembly-gap_\n'.format(ID, item[0], item[1]))
with alock:
counter.value += maskedSize
def mask_safe_run(*args, **kwargs):
"""Call run(), catch exceptions."""
try:
maskingstats2bed(*args, **kwargs)
except Exception as e:
print(("error: %s run(*%r, **%r)" % (e, args, kwargs)))
def checkMasklowMem(genome, bedfile, gapsfile, cpus):
from Bio.SeqIO.FastaIO import SimpleFastaParser
# load contig names and sizes into dictionary, get masked repeat stats
ContigSizes = {}
tmpdir = os.path.join(os.path.dirname(genome), 'mask_'+str(uuid.uuid4()))
os.makedirs(tmpdir)
file_list = []
with open(genome, 'r') as input:
for header, Seq in SimpleFastaParser(input):
if ' ' in header:
ID = header.split(' ')[0]
else:
ID = header
if not ID in ContigSizes:
ContigSizes[ID] = len(Seq)
with open(os.path.join(tmpdir, ID+'.fasta'), 'w') as fastaout:
fastaout.write('>{:}\n{:}\n'.format(ID, Seq))
file_list.append(os.path.join(tmpdir, ID+'.fasta'))
# num = 1
p = multiprocessing.Pool(processes=cpus)
TotalMask = multiprocessing.Manager().Value('i', 0)
lock = multiprocessing.Manager().Lock()
result = []
for i in file_list:
result.append(p.apply_async(mask_safe_run, [i, TotalMask, lock]))
p.close()
p.join()
repeatNum = 1
gapNum = 1
with open(bedfile, 'w') as bedout:
for file in natsorted(os.listdir(tmpdir)):
if file.endswith('.bed'):
with open(os.path.join(tmpdir, file), 'r') as infile:
for line in infile:
line = line.replace(
'Repeat_', 'Repeat_'+str(repeatNum))
bedout.write(line)
repeatNum += 1
with open(gapsfile, 'w') as gapout:
for file in natsorted(os.listdir(tmpdir)):
if file.endswith('.gaps'):
with open(os.path.join(tmpdir, file), 'r') as infile:
for line in infile:
line = line.replace(
'assembly-gap_', 'assembly-gap_'+str(gapNum))
gapout.write(line)
gapNum += 1
SafeRemove(tmpdir)
GenomeLength = sum(ContigSizes.values())
percentMask = TotalMask.value / float(GenomeLength)
return ContigSizes, GenomeLength, TotalMask.value, percentMask
def RunGeneMarkES(command, input, ini, maxintron, softmask, cpus, tmpdir, output, fungus):
# make directory to run script from
outdir = os.path.join(tmpdir, 'genemark')
if not os.path.isdir(outdir):
os.makedirs(outdir)
if cpus > 64:
cpus = 64
contigs = os.path.abspath(input)
log.info("Running GeneMark-ES on assembly")
cmd = [command, '--ES', '--max_intron', str(maxintron), '--soft_mask', str(
softmask), '--cores', str(cpus), '--sequence', contigs]
if fungus == 'fungus':
cmd = cmd + ['--fungus']
if ini:
cmd = cmd + ['--ini_mod', os.path.abspath(ini)]
runSubprocess3(cmd, outdir, log)
# rename results and grab mod file
try:
os.rename(os.path.join(outdir, 'output', 'gmhmm.mod'),
os.path.join(tmpdir, 'gmhmm.mod'))
except OSError:
log.error("GeneMark-ES failed: {:} file missing, please check logfiles.".format(
os.path.join(outdir, 'output', 'gmhmm.mod')))
# convert genemark gtf to gff3 so GAG can interpret it
gm_gtf = os.path.join(outdir, 'genemark.gtf')
if checkannotations(gm_gtf):
# log.info("Converting GeneMark GTF file to GFF3")
with open(output, 'w') as out:
subprocess.call([GeneMark2GFF, gm_gtf], stdout=out)
def RunGeneMarkET(command, input, ini, evidence, maxintron, softmask, cpus, tmpdir, output, fungus):
# make directory to run script from
outdir = os.path.join(tmpdir, 'genemark')
if not os.path.isdir(outdir):
os.makedirs(outdir)
if cpus > 64:
cpus = 64
contigs = os.path.abspath(input)
# get only intron information from evidence
hintsfile = os.path.join(tmpdir, 'genemark.intron-hints.gff')
with open(hintsfile, 'w') as hints:
with open(evidence, 'r') as evid:
for line in evid:
if '\tintron\t' in line and '\tb2h\t' in line:
tmprow = line.split("\t")
tmprow[5] = "500" # for intron hint score to be 500
hints.write("\t".join(tmprow))
log.info("Running GeneMark-ET on assembly")
cmd = [command, '--ET', os.path.abspath(hintsfile), '--max_intron', str(
maxintron), '--soft_mask', str(softmask), '--cores', str(cpus), '--sequence', contigs]
if fungus == 'fungus':
cmd = cmd + ['--fungus']
if ini:
cmd = cmd + ['--ini_mod', os.path.abspath(ini)]
runSubprocess3(cmd, outdir, log)
# rename results and grab mod file
try:
os.rename(os.path.join(outdir, 'output', 'gmhmm.mod'),
os.path.join(tmpdir, 'gmhmm.mod'))
except OSError:
log.error("GeneMark-ET failed: {:} file missing, please check logfiles.".format(
os.path.join(outdir, 'output', 'gmhmm.mod')))
# convert genemark gtf to gff3 so GAG can interpret it
gm_gtf = os.path.join(outdir, 'genemark.gtf')
if checkannotations(gm_gtf):
# log.info("Converting GeneMark GTF file to GFF3")
with open(output, 'w') as out:
subprocess.call([GeneMark2GFF, gm_gtf], stdout=out)
def dict2glimmer(input, output):
# take funannotate dictionary convert to glimmer training format
with open(output, 'w') as outfile:
for k, v in list(input.items()):
for i in range(0, len(v['ids'])):
for c in v['CDS'][i]:
if v['strand'] == '+':
outfile.write('{:} {:} {:}\n'.format(
v['contig'], c[0], c[1]))
else:
outfile.write('{:} {:} {:}\n'.format(
v['contig'], c[1], c[0]))
outfile.write('\n')
def glimmer2gff3(input, output):
'''
scaffold_39 GlimmerHMM mRNA 23692 25015 . + . ID=scaffold_39.path1.gene12;Name=scaffold_39.path1.gene12
scaffold_39 GlimmerHMM CDS 23692 23886 . + 0 ID=scaffold_39.cds12.1;Parent=scaffold_39.path1.gene12;Name=scaffold_39.path1.gene12;Note=initial-exon
scaffold_39 GlimmerHMM CDS 24282 24624 . + 0 ID=scaffold_39.cds12.2;Parent=scaffold_39.path1.gene12;Name=scaffold_39.path1.gene12;Note=internal-exon
scaffold_39 GlimmerHMM CDS 24711 25015 . + 2 ID=scaffold_39.cds12.3;Parent=scaffold_39.path1.gene12;Name=scaffold_39.path1.gene12;Note=final-exon
scaffold_39 GlimmerHMM mRNA 25874 27899 . - . ID=scaffold_39.path1.gene13;Name=scaffold_39.path1.gene13
scaffold_39 GlimmerHMM CDS 25874 26973 . - 2 ID=scaffold_39.cds13.1;Parent=scaffold_39.path1.gene13;Name=scaffold_39.path1.gene13;Note=final-exon
scaffold_39 GlimmerHMM CDS 27257 27899 . - 0 ID=scaffold_39.cds13.2;Parent=scaffold_39.path1.gene13;Name=scaffold_39.path1.gene13;Note=initial-exon
'''
with open(output, 'w') as outfile:
outfile.write(("##gff-version 3\n"))
exonCounts = {}
GeneCount = 1
skipList = []
idsSeen = {}
with open(input, 'r') as infile:
for i, line in enumerate(infile):
if line.startswith('##sequence-region'):
idsSeen = {}
if line.startswith('#') or line.startswith('\n'):
continue
line = line.strip()
if line.count('\t') < 8:
print('ERROR parsing GlimmerHMM Raw output in line {}:\n {}'.format(i+1, line))
continue
contig, source, feature, start, end, score, strand, phase, attributes = line.split('\t')
ID, Parent, Name = (None,)*3
info = attributes.split(';')
for x in info:
if x.startswith('ID='):
ID = x.replace('ID=', '')
elif x.startswith('Parent='):
Parent = x.replace('Parent=', '')
if Parent and Parent in skipList:
continue
if feature == 'mRNA':
genelen = int(end) - int(start)
if genelen < 150:
if not ID in skipList:
skipList.append(ID)
continue
geneID = 'glimmerG_'+str(GeneCount)
transID = 'glimmerT_'+str(GeneCount)+'-T1'
idsSeen[ID] = (geneID, transID)
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:};Alias={:};\n'.format(
contig, source, 'gene', start, end, score, strand, phase, geneID, ID))
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:};Parent={:};Alias={:};\n'.format(
contig, source, 'mRNA', start, end, '.', strand, '.', transID, geneID, ID))
GeneCount += 1
elif feature == 'CDS':
if Parent in idsSeen:
geneID, transID = idsSeen.get(Parent)
if not transID in exonCounts:
exonCounts[transID] = 1
else:
exonCounts[transID] += 1
num = exonCounts.get(transID)
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:}.exon{:};Parent={:};\n'.format(
contig, source, 'exon', start, end, '.', strand, '.', transID, num, transID))
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:}.cds;Parent={:};\n'.format(
contig, source, feature, start, end, score, strand, phase, transID, transID))
else:
print('ERROR parsing GlimmerHMM Raw output in line {}:\n {}'.format(i+1, line))
def runGlimmerHMM(fasta, gff3, dir, output):
'''
wrapper to run GlimmerHMM training followed by prediction
input is GFF3 format high quality models, i.e. from PASA/transdecoder
output is standard GFF3 format
'''
# generate training directory ontop of the dir that is passed
tmpdir = os.path.join(dir, 'glimmerhmm')
if os.path.isdir(tmpdir):
SafeRemove(tmpdir)
# generate glimmer training input
# load gff3 into dictionary
Genes = {}
Genes = gff2dict(os.path.abspath(gff3), os.path.abspath(fasta), Genes)
glimmExons = os.path.join(dir, 'glimmer.exons')
dict2glimmer(Genes, glimmExons)
# now run trainGlimmerHMM
cmd = ['trainGlimmerHMM', os.path.abspath(
fasta), os.path.abspath(glimmExons), '-d', tmpdir]
runSubprocess4(cmd, '.', log) # runSubproces4 --> stdout/stderr to devnull
# now run GlimmerHMM prediction
# glimmhmm.pl <glimmerhmm_program> <fasta_file> <train_dir> <options>
glimmerRaw = os.path.abspath(os.path.join(dir, 'glimmerHMM.output.raw'))
cmd = ['perl', which_path('glimmhmm.pl'), which_path(
'glimmerhmm'), os.path.abspath(fasta), os.path.abspath(tmpdir), '-g']
runSubprocess2(cmd, dir, log, glimmerRaw)
# now convert to proper GFF3 format
glimmer2gff3(glimmerRaw, output)
return os.path.abspath(tmpdir)
def runGlimmerHMMTrained(fasta, training, dir, output):
glimmerRaw = os.path.abspath(os.path.join(dir, 'glimmerHMM.output.raw'))
cmd = ['perl', which_path('glimmhmm.pl'), which_path(
'glimmerhmm'), os.path.abspath(fasta), os.path.abspath(training), '-g']
runSubprocess2(cmd, dir, log, glimmerRaw)
# now convert to proper GFF3 format
glimmer2gff3(glimmerRaw, output)
def glimmer_run_check(Result, training, weights):
if checkannotations(Result):
log.info('Using existing GlimmerHMM results: {:}'.format(Result))
return False
if not checkannotations(training):
log.info(
'GlimmerHMM training failed, empty training set: {:}'.format(training))
return False
if weights < 1:
log.info(
'Skipping GlimmerHMM prediction as weight set to {:}'.format(weights))
return False
programs = ['trainGlimmerHMM', 'glimmerhmm', 'glimmhmm.pl']
for x in programs:
if not which_path(x):
log.info(
'GlimmerHMM failed, dependency not in $PATH: {:}'.format(x))
return False
return True
def dict2zff(scaffoldDict, GeneDict, output):
# take funannotate dictionary convert to zff training format
with open(output, 'w') as outfile:
for k, v in natsorted(list(scaffoldDict.items())):
outfile.write('>{:}\n'.format(k))
for genes in v:
gd = GeneDict.get(genes)
for i in range(0, len(gd['ids'])):
for num, c in enumerate(gd['CDS'][i]):
if gd['strand'] == '+':
start = c[0]
end = c[1]
else:
start = c[1]
end = c[0]
if num == 0:
outfile.write('Einit\t{:}\t{:}\t{:}\n'.format(
start, end, gd['ids'][i]))
elif num == len(gd['CDS'][i])-1:
outfile.write('Eterm\t{:}\t{:}\t{:}\n'.format(
start, end, gd['ids'][i]))
else:
outfile.write('Exon\t{:}\t{:}\t{:}\n'.format(
start, end, gd['ids'][i]))
def zff2gff3(input, fasta, output):
'''
>scaffold_40
Einit 7104 7391 - 14.809 0 0 1 scaffold_40-snap.1
Eterm 6728 7039 - 1.974 0 0 2 scaffold_40-snap.1
Einit 8935 9070 + 9.578 0 1 0 scaffold_40-snap.2
Exon 9119 9206 + 10.413 2 2 0 scaffold_40-snap.2
Exon 9254 9389 + 21.529 1 0 2 scaffold_40-snap.2
Eterm 9439 10128 + 42.769 0 0 0 scaffold_40-snap.2
Einit 11784 12139 - 38.847 0 2 2 scaffold_40-snap.3
Eterm 11185 11761 - 72.324 1 0 0 scaffold_40-snap.3
Einit 13191 13250 - 7.662 0 0 1 scaffold_40-snap.4
Eterm 12498 13019 - 63.296 0 0 1 scaffold_40-snap.4
Einit 16359 16608 + 41.592 0 1 2 scaffold_40-snap.5
Exon 16628 16712 + 13.780 2 2 0 scaffold_40-snap.5
Exon 16795 17012 + 26.393 1 1 1 scaffold_40-snap.5
Eterm 17224 17381 + 8.331 2 0 2 scaffold_40-snap.5
>scaffold_41
Exon 65 951 - 169.146 1 1 0 scaffold_41-snap.1
'''
# need to load/generate a funannotate dictionary, then output to gff3 format
Genes = {}
contig = ''
with open(input, 'r') as infile:
for line in infile:
line = line.strip()
if line.startswith('#') or line.startswith('\n'):
continue
elif line.startswith('>'):
contig = line[1:]
else:
feature, start, end, strand, score, fiveo, threeo, phase, ID = line.split(
'\t')
start = int(start)
end = int(end)
# phase = int(phase)
phase = '?' # phase in GFF3 doesn't seem to be accurate, so guess it by translation of all 3 frames
if not ID in Genes:
Genes[ID] = {'name': None, 'type': 'mRNA', 'transcript': [], 'cds_transcript': [], 'protein': [], '5UTR': [[]], '3UTR': [[]],
'codon_start': [[]], 'ids': [ID+'-T1'], 'CDS': [[(start, end)]], 'mRNA': [[(start, end)]], 'strand': strand,
'location': (start, end), 'contig': contig, 'product': [[]], 'source': 'snap', 'phase': [[phase]],
'db_xref': [[]], 'EC_number': [[]], 'gene_synonym': [], 'go_terms': [[]], 'note': [[]], 'partialStart': [[]], 'partialStop': [[]], 'pseudo': False}
else:
Genes[ID]['CDS'][0].append((start, end))
Genes[ID]['mRNA'][0].append((start, end))
Genes[ID]['phase'][0].append(phase)
if start < Genes[ID]['location'][0]:
Genes[ID]['location'] = (
start, Genes[ID]['location'][1])
if end > Genes[ID]['location'][1]:
Genes[ID]['location'] = (Genes[ID]['location'][0], end)
# translate, check partial, etc
SeqRecords = SeqIO.to_dict(SeqIO.parse(fasta, 'fasta'))
for k, v in list(Genes.items()):
i = 0
if v['strand'] == '+':
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0])
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0])
else:
sortedExons = sorted(
v['mRNA'][i], key=lambda tup: tup[0], reverse=True)
sortedCDS = sorted(
v['CDS'][i], key=lambda tup: tup[0], reverse=True)
Genes[k]['mRNA'][i] = sortedExons
mrnaSeq = getSeqRegions(SeqRecords, v['contig'], sortedExons)
Genes[k]['transcript'].append(mrnaSeq)
# get the codon_start by getting first CDS phase + 1
indexStart = [x for x, y in enumerate(
v['CDS'][i]) if y[0] == sortedCDS[0][0]]
cdsSeq = getSeqRegions(SeqRecords, v['contig'], sortedCDS)
# this seems to be missing removal of codon_start overhang?
Genes[k]['CDS'][i] = sortedCDS
protSeq,codon_start = (None,)*2
if '?' in v['phase'][i]: # dont know the phase -- malformed GFF3, try to find best CDS
translateResults = []
for y in [1, 2, 3]:
protSeq = translate(cdsSeq, v['strand'], y-1)
numStops = protSeq.count('*')
if protSeq[-1] == '*':
numStops -= 1
translateResults.append((y, numStops, protSeq))
sortedResults = sorted(translateResults, key=lambda tup: tup[1])
codon_start = sortedResults[0][0]
protSeq = sortedResults[0][2]
else:
codon_start = int(v['phase'][i][indexStart[0]]) + 1
# translate and get protein sequence
cdsSeq = cdsSeq[codon_start-1:]
protSeq = translate(cdsSeq, v['strand'], codon_start-1)
Genes[k]['codon_start'][i] = codon_start
if codon_start > 1:
if v['strand'] == '+':
cdsSeq = cdsSeq[codon_start - 1:]
elif v['strand'] == '-':
endTrunc = len(cdsSeq) - codon_start -1
cdsSeq = cdsSeq[0:endTrunc]
else:
print("ERROR nonsensical strand (%s) for gene %s"%([v['strand'],k]))
Genes[k]['cds_transcript'].append(cdsSeq)
if protSeq:
Genes[k]['protein'].append(protSeq)
if protSeq.endswith('*'):
Genes[k]['partialStop'][i] = False
else:
Genes[k]['partialStop'][i] = True
if codon_start == 1 and protSeq.startswith('M'):
Genes[k]['partialStart'][i] = False
else:
Genes[k]['partialStart'][i] = True
# now write to GFF3
dict2gff3(Genes, output)
def cq_run_check(cqResult, bam, stringtie, weight):
if checkannotations(cqResult):
log.info('Using existing CodingQuarry results: {:}'.format(cqResult))
return False
if weight < 1:
log.info(
'Skipping CodingQuarry prediction as weight set to {:}'.format(weight))
return False
if not bam and not stringtie:
log.info('Skipping CodingQuarry as there are no RNA-seq data')
return False
# check if dependencies installed
programs = []
if stringtie and checkannotations(stringtie):
programs = ['CodingQuarry']
elif bam and checkannotations(bam):
programs = ['stringtie', 'CodingQuarry']
for x in programs:
if not which_path(x):
log.info(
'CodingQuarry failed, dependency not in $PATH: {:}'.format(x))
return False
# if you get here should be good
return True
def snap_run_check(snapResult, training, weight):
if checkannotations(snapResult):
log.info('Using existing snap results: {:}'.format(snapResult))
return False
if not checkannotations(training):
log.info(
'Snap training failed, empty training set: {:}'.format(training))
return False
if weight < 1:
log.info(
'Skipping snap prediction as weight set to {:}'.format(weight))
return False
programs = ['fathom', 'snap', 'forge', 'hmm-assembler.pl']
for x in programs:
if not which_path(x):
log.info('Snap failed, dependency not in $PATH: {:}'.format(x))
return False
return True
def runSnap(fasta, gff3, minintron, maxintron, dir, output):
from Bio.SeqIO.FastaIO import SimpleFastaParser
'''
wrapper to run Snap training followed by prediction
input is GFF3 format high quality models, i.e. from PASA/transdecoder
output is standard GFF3 format
'''
from collections import OrderedDict
snapHMM = os.path.join(dir, 'snap-trained.hmm')
snapRaw = os.path.join(dir, 'snap-prediction.zff')
if not checkannotations(snapRaw):
# generate training directory ontop of the dir that is passed
tmpdir = os.path.join(dir, 'snaptrain')
if os.path.isdir(tmpdir):
SafeRemove(tmpdir)
os.makedirs(tmpdir)
# load gff3 into dictionary
Genes = {}
Genes = gff2dict(os.path.abspath(gff3), os.path.abspath(fasta), Genes)
scaff2genes = {}
# sort the dictionary
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
sGenes = sorted(iter(Genes.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
scaff2genes = {}
for k, v in list(sortedGenes.items()):
if not v['contig'] in scaff2genes:
scaff2genes[v['contig']] = [k]
else:
scaff2genes[v['contig']].append(k)
# get only scaffolds that have gene models for training
log.debug('{:} gene models to train snap on {:} scaffolds'.format(
len(sGenes), len(scaff2genes)))
trainingFasta = os.path.join(dir, 'snap-training.scaffolds.fasta')
with open(trainingFasta, 'w') as outfile:
with open(os.path.abspath(fasta), 'r') as infile:
for title, seq in SimpleFastaParser(infile):
if title in list(scaff2genes.keys()):
outfile.write('>{:}\n{:}\n'.format(
title, softwrap(seq)))
# convert to ZFF format
origzff = os.path.join(dir, 'snap.training.zff')
dict2zff(scaff2genes, Genes, origzff)
# now run SNAP training
cmd = ['fathom', os.path.abspath(origzff), os.path.abspath(
trainingFasta), '-categorize', '1000', '-min-intron', str(minintron), '-max-intron', str(maxintron)]
runSubprocess(cmd, tmpdir, log)
cmd = ['fathom', 'uni.ann', 'uni.dna', '-export', '1000', '-plus']
runSubprocess(cmd, tmpdir, log)
cmd = ['forge', 'export.ann', 'export.dna']
runSubprocess(cmd, tmpdir, log)
cmd = ['perl', which_path('hmm-assembler.pl'), 'snap-trained', tmpdir]
runSubprocess2(cmd, '.', log, snapHMM)
# now run SNAP prediction
cmd = ['snap', os.path.abspath(snapHMM), os.path.abspath(fasta)]
runSubprocess2(cmd, '.', log, snapRaw)
# convert zff to proper gff3
zff2gff3(snapRaw, fasta, output)
return os.path.abspath(snapHMM)
def runSnapTrained(fasta, hmm, dir, output):
snapRaw = os.path.join(dir, 'snap-prediction.zff')
# now run SNAP prediction
cmd = ['snap', os.path.abspath(hmm), os.path.abspath(fasta)]
runSubprocess2(cmd, '.', log, snapRaw)
# convert zff to proper gff3
zff2gff3(snapRaw, fasta, output)
def MemoryCheck():
import psutil
mem = psutil.virtual_memory()
RAM = int(mem.total)
return round(RAM / 1024000000)
def systemOS():
if sys.platform == 'darwin':
system_os = 'MacOSX ' + platform.mac_ver()[0]
elif sys.platform == 'linux':
linux_version = distro.linux_distribution()
system_os = linux_version[0] + ' '+linux_version[1]
else:
system_os = sys.platform
return system_os
def SystemInfo():
system_os = systemOS()
python_vers = str(
sys.version_info[0])+'.'+str(sys.version_info[1])+'.'+str(sys.version_info[2])
log.info("OS: %s, %i cores, ~ %i GB RAM. Python: %s" %
(system_os, multiprocessing.cpu_count(), MemoryCheck(), python_vers))
def runtRNAscan(input, tmpdir, output, cpus=1, precalc=False):
tRNAout = os.path.join(tmpdir, 'tRNAscan.out')
tRNAlenOut = os.path.join(tmpdir, 'tRNAscan.len-filtered.out')
if not precalc:
if os.path.isfile(tRNAout): # tRNAscan can't overwrite file, so check
os.remove(tRNAout)
cmd = ['tRNAscan-SE', '-o', tRNAout, '--thread', str(cpus), input]
log.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
log.error('CMD ERROR: {}'.format(' '.join(cmd)))
if stdout:
log.debug(stdout.decode("utf-8"))
if stderr:
log.debug(stderr.decode("utf-8"))
else:
shutil.copyfile(precalc, tRNAout)
if not checkannotations(tRNAout):
log.info('tRNAscan-SE seems to have failed, check logfile for error. You can pass precalculated results to --trnascan')
return False
# enforce NCBI length rules
with open(tRNAlenOut, 'w') as lenOut:
with open(tRNAout, 'r') as infile:
for line in infile:
if line.startswith('Sequence') or line.startswith('Name') or line.startswith('--------'):
lenOut.write('%s' % line)
else:
cols = line.split('\t')
start = cols[2]
end = cols[3]
if int(start) < int(end):
length = abs(int(end) - int(start))
else:
length = abs(int(start) - int(end))
if length < 50 or length > 150:
continue
else:
lenOut.write('%s' % line)
# now convert to GFF3
trna2gff = os.path.join(parentdir, 'aux_scripts', 'trnascan2gff3.pl')
with open(output, 'w') as out:
subprocess.call(['perl', trna2gff, '--input', tRNAlenOut], stdout=out)
return True
def runtbl2asn(folder, template, discrepency, organism, isolate, strain, parameters, version):
'''
function to run NCBI tbl2asn
'''
# get funannotate version
fun_version = get_version()
# input should be a folder
if not os.path.isdir(folder):
log.error("tbl2asn error: %s is not a directory, exiting" % folder)
sys.exit(1)
# based on organism, isolate, strain, construct meta info for -j flag
if not organism:
log.error("tbl2asn error: organism not specified")
sys.exit(1)
meta = "[organism=" + organism + "]"
if isolate:
isolate_meta = "[isolate=" + isolate + "]"
meta = meta + " " + isolate_meta
if strain:
strain_meta = "[strain=" + strain + "]"
meta = meta + " " + strain_meta
cmd = ['tbl2asn', '-y', '"Annotated using '+fun_version+'"', '-N',
str(version), '-p', folder, '-t', template, '-M', 'n', '-Z', discrepency, '-j', '"'+meta+'"', '-V', 'b', '-c', 'fx', '-T', '-a', 'r10u']
# check for custom parameters
if parameters:
params = parameters.split(' ')
cmd = cmd + params
runSubprocess(cmd, '.', log)
return ' '.join(cmd)
def gb2smurf(input, prot_out, smurf_out):
with open(smurf_out, 'w') as smurf:
with open(prot_out, 'w') as proteins:
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
name = re.sub('[^0-9]', '', record.name)
if f.type == "CDS":
proteins.write(">%s\n%s\n" % (f.qualifiers['locus_tag'][0], softwrap(
f.qualifiers['translation'][0].rstrip('*'))))
locus_tag = f.qualifiers.get(
"locus_tag", ["No ID"])[0]
product_name = f.qualifiers.get(
"product", ["No Description"])[0]
mystart = f.location.start
myend = f.location.end
strand = f.location.strand
if strand == 1:
smurf.write("%s\t%s\t%s\t%s\t%s\n" % (locus_tag, name.lstrip(
"0"), int(mystart), int(myend), product_name))
else:
smurf.write("%s\t%s\t%s\t%s\t%s\n" % (locus_tag, name.lstrip(
"0"), int(myend), int(mystart), product_name))
def GAGprotClean(input, output):
'''
gag.py v1 had headers like:
>>evm.model.Contig100.1 protein
gag.py v2 has headers like:
>protein|evm.model.scaffold_1.169 ID=evm.model.scaffold_1.169|Parent=evm.TU.scaffold_1.169|Name=EVM%20prediction%20scaffold_1.169
'''
with open(output, 'w') as outfile:
with open(input, 'ru') as infile:
for rec in SeqIO.parse(infile, 'fasta'):
if rec.id.startswith('protein|'):
ID = rec.id.replace('protein|', '').split(' ')[0]
else:
ID = rec.id.split(' ')[0]
rec.id = ID
rec.name = ''
rec.description = ''
SeqIO.write(rec, outfile, 'fasta')
def OldRemoveBadModels(proteins, gff, length, repeats, BlastResults, tmpdir, output):
# first run bedtools to intersect models where 90% of gene overlaps with repeatmasker region
repeat_temp = os.path.join(tmpdir, 'genome.repeats.to.remove.gff')
cmd = ['bedtools', 'intersect', '-f', '0.9', '-a', gff, '-b', repeats]
runSubprocess2(cmd, '.', log, repeat_temp)
# now remove those proteins that do not have valid starts, less then certain length, and have internal stops
remove = []
reason = {}
# parse the results from bedtools and add to remove list
with open(repeat_temp, 'r') as input:
for line in input:
if "\tgene\t" in line:
ninth = line.split('ID=')[-1]
ID = ninth.split(";")[0]
remove.append(ID)
if not ID in reason:
reason[ID] = 'remove_reason=repeat_overlap;'
# parse the results from BlastP search of transposons
with open(BlastResults, 'r') as input:
for line in input:
col = line.split('\t')
remove.append(col[0])
if not col[0] in reason:
ID = col[0].replace('evm.model.', 'evm.TU.')
reason[ID] = 'remove_reason=repeat_match;'
else:
ID = col[0].replace('evm.model.', 'evm.TU.')
reason[ID] = 'remove_reason=repeat_overalap|repeat_match;'
# I'm only seeing these models with GAG protein translations, so maybe that is a problem? skip enforcing start with M
with open(proteins, 'r') as input:
SeqRecords = SeqIO.parse(input, 'fasta')
for rec in SeqRecords:
Seq = str(rec.seq)[:-1]
ID = rec.id.replace('evm.model.', 'evm.TU.')
if len(Seq) < int(length):
remove.append(ID)
if not ID in reason:
reason[ID] = 'remove_reason=seq_too_short;'
if 'XX' in Seq:
remove.append(ID)
if not rec.id in reason:
reason[ID] = 'remove_reason=model_span_gap;'
remove = [w.replace('evm.TU.', '') for w in remove]
remove = [w.replace('evm.model.', '') for w in remove]
remove = set(remove)
if len(remove) > 0:
remove_match = re.compile(r'\b\evm.(.*?:%s)[\.;]\b' % '|'.join(remove))
with open(output, 'w') as out:
with open(os.path.join(tmpdir, 'bad_models.gff'), 'w') as out2:
with open(gff, 'r') as GFF:
for line in GFF:
if '\tstart_codon\t' in line:
continue
if '\tstop_codon\t' in line:
continue
matchLine = remove_match.search(line)
if not matchLine:
# remove the Name attribute as it sticks around in GBK file
line = re.sub(';Name=.*$', ';', line)
out.write(line)
else:
# print matchLine.group()
# print line
if "\tgene\t" in line:
bad_ninth = line.split('ID=')[-1]
bad_ID = bad_ninth.split(";")[0]
bad_reason = reason.get(bad_ID)
if bad_reason:
line = line.replace(
'\n', ';'+bad_reason+'\n')
# print bad_reason
else:
log.debug(
"%s was removed in removeBadModels function for unknown reason, please check manually" % bad_ID)
line = line.replace(
'\n', ';remove_reason=unknown;\n')
# print 'uknown'
out2.write(line)
else: # if nothing to remove, just print out GFF
with open(output, 'w') as out:
with open(gff, 'r') as GFF:
for line in GFF:
if '\tstart_codon\t' in line:
continue
if '\tstop_codon\t' in line:
continue
# remove the Name attribute as it sticks around in GBK file
line = re.sub(';Name=.*$', ';', line)
out.write(line)
def RemoveBadModels(proteins, gff, length, repeats, BlastResults, tmpdir, methods, output):
reason = {}
tooShort = 0
repeat = 0
gapspan = 0
if 'overlap' in methods:
# first run bedtools to intersect models where 90% of gene overlaps with repeatmasker region
repeat_temp = os.path.join(tmpdir, 'genome.repeats.to.remove.gff')
gffSorted = os.path.abspath(gff)+'.sorted.gff'
bedSorted = os.path.abspath(repeats)+'.sorted.bed'
#sortBedproper(repeats, bedSorted)
cmd1 = ['bedtools', 'sort', '-i', repeats]
with open(bedSorted, 'w') as bedout:
subprocess.call(cmd1, stdout=bedout)
#sortGFFproper(gff, gffSorted)
cmd2 = ['bedtools', 'sort', '-i', gff]
with open(gffSorted, 'w') as gffout:
subprocess.call(cmd2, stdout=gffout)
cmd = ['bedtools', 'intersect', '-sorted', '-f', '0.9', '-a', gffSorted, '-b', bedSorted]
runSubprocess2(cmd, '.', log, repeat_temp)
# parse the results from bedtools and add to remove list
with open(repeat_temp, 'r') as input:
for line in input:
if "\tgene\t" in line:
ninth = line.split('ID=')[-1]
ID = ninth.split(";")[0]
if not ID in reason:
reason[ID] = 'remove_reason=repeat_overlap;'
if 'blast' in methods:
# parse the results from BlastP search of transposons
with open(BlastResults, 'r') as input:
for line in input:
col = line.split('\t')
if not col[0] in reason:
ID = col[0].replace('evm.model.', 'evm.TU.')
reason[ID] = 'remove_reason=repeat_match;'
else:
ID = col[0].replace('evm.model.', 'evm.TU.')
reason[ID] = 'remove_reason=repeat_overlap|repeat_match;'
# always do these checks
# Look for models that are too short
with open(proteins, 'r') as input:
SeqRecords = SeqIO.parse(input, 'fasta')
for rec in SeqRecords:
Seq = str(rec.seq)[:-1]
ID = rec.id.replace('evm.model.', 'evm.TU.')
if len(Seq) < int(length):
if not ID in reason:
reason[ID] = 'remove_reason=seq_too_short;'
if 'XX' in Seq:
if not rec.id in reason:
reason[ID] = 'remove_reason=model_span_gap;'
# now read the EVM gene models in Blocks so you can parse gene ID
numTotal = len(reason)
for k, v in reason.items():
if 'model_span_gap' in v:
gapspan += 1
elif 'seq_too_short' in v:
tooShort += 1
else:
repeat += 1
if numTotal > 0:
log.info("Found {:,} gene models to remove: {:,} too short; {:,} span gaps; {:,} transposable elements".format(
numTotal, tooShort, gapspan, repeat))
with open(output, 'w') as out:
with open(os.path.join(tmpdir, 'bad_models.gff'), 'w') as out2:
with open(gff, 'r') as GFF:
for gene_model in readBlocks(GFF, '\n'):
if len(gene_model) > 1:
if gene_model[0].startswith('\n'):
ID = gene_model[1].split(
'ID=')[-1].split(';')[0]
else:
ID = gene_model[0].split(
'ID=')[-1].split(';')[0]
if ID in reason:
out2.write('#%s removed; %s\n' %
(ID, reason.get(ID)))
for line in gene_model:
if not line.startswith('\n'):
out2.write('%s' % (line))
else:
for line in gene_model:
# remove the Name attribute as it sticks around in GBK file
line = re.sub(';Name=.*$', ';', line)
out.write('%s' % (line))
else: # if nothing to remove, just print out GFF
with open(output, 'w') as out:
with open(gff, 'r') as GFF:
for line in GFF:
if '\tstart_codon\t' in line:
continue
if '\tstop_codon\t' in line:
continue
# remove the Name attribute as it sticks around in GBK file
line = re.sub(';Name=.*$', ';', line)
out.write(line)
def CleantRNAtbl(GFF, TBL, output):
# clean up genbank tbl file from gag output
# try to read through GFF file, make dictionary of tRNA genes and products
TRNA = {}
matches = []
with open(GFF, 'r') as gff:
for line in gff:
if line.startswith('#'):
continue
line = line.replace('\n', '')
scaffold, source, feature, start, end, score, orientation, phase, info = line.split(
'\t')
if feature == 'tRNA':
ID = info.split(';')[0].replace('ID=', '')
ID = ID.replace('-T1', '')
product = info.split('product=')[-1]
TRNA[ID] = product
matches.append(product)
matches = set(matches)
tRNAmatch = re.compile(r'\t\t\tproduct\t%s\n' % '|'.join(matches))
with open(output, 'w') as out:
with open(TBL, 'r') as input:
for line in input:
if line.startswith('\t\t\tlocus_tag\t'):
out.write(line)
geneID = line.split('locus_tag\t')[-1].replace('\n', '')
if geneID in TRNA:
CurrentProduct = TRNA.get(geneID)
if 'tRNA-Xxx' == CurrentProduct:
out.write("\t\t\tpseudo\n")
elif line.startswith("\t\t\tproduct\ttRNA-Xxx"):
out.write(line)
out.write("\t\t\tpseudo\n")
next(input)
next(input)
elif tRNAmatch.search(line):
out.write(line)
next(input)
next(input)
else: # otherwise just write line
out.write(line)
def getFailedProductNames(input, GeneDict):
# input is NCBI tbl2asn discrepency report, parse to get suspect product names
failed = {}
with open(input, 'r') as discrep:
for block in readBlocks(discrep, 'DiscRep_'):
if 'DiscRep_SUB:SUSPECT_PRODUCT_NAMES::' in block[0]:
reason = []
for item in block:
if item.startswith('DiscRep_SUB:'):
bad = item.split('::')[-1].rstrip()
if 'features' in bad.lower():
bad = bad.split('features ')[-1]
reason.append(bad)
elif item.startswith('genome:'):
gene = item.split('\t')[-1].strip()
if gene.startswith('DiscRep'):
continue
if gene in GeneDict:
hit = GeneDict.get(gene)
if not hit[0] in failed:
failed[hit[0]] = (hit[1], gene, reason)
return failed
def ParseErrorReport(input, Errsummary, val, Discrep, output, keep_stops):
errors = []
gapErrors = []
remove = []
with open(Errsummary) as summary:
for line in summary:
if 'ERROR' in line:
# there are probably other errors you are unaware of....
if 'SEQ_DESCR.OrganismIsUndefinedSpecies' in line or 'SEQ_DESCR.BadOrgMod' in line or 'SEQ_FEAT.MissingTrnaAA' in line or 'SEQ_INST.TerminalNs' in line:
pass
elif 'SEQ_FEAT.NoStop' in line:
if keep_stops:
pass
else:
err = line.split(" ")[-1].rstrip()
errors.append(err)
elif 'SEQ_FEAT.FeatureBeginsOrEndsInGap' in line:
err = line.split(" ")[-1].rstrip()
gapErrors.append(err)
else:
err = line.split(" ")[-1].rstrip()
errors.append(err)
# parse the discrepency report and look for overlapping genes, so far, all have been tRNA's in introns, so just get those for now.
with open(Discrep, 'r') as discrep:
# process discrepency report into blocks, then look for block headers where overlapping genes are, remove only tRNA models right now
for block in readBlocks(discrep, 'DiscRep_'):
if 'DiscRep_ALL:OVERLAPPING_GENES::' in block[0] or 'DiscRep_SUB:RNA_CDS_OVERLAP::' in block[0]:
for item in block:
if item.startswith('genome:tRNA'):
gene = item.split('\t')[-1].replace('\n', '')
if gene.startswith('DiscRep'):
continue
tRNA = gene + '_tRNA'
exon = gene + '_exon'
remove.append(gene)
remove.append(tRNA)
remove.append(exon)
if 'DiscRep_ALL:FIND_OVERLAPPED_GENES::' in block[0]:
for item in block:
gene = item.split('\t')[-1].replace('\n', '')
if gene.startswith('DiscRep'):
continue
tRNA = gene + '_tRNA'
exon = gene + '_exon'
remove.append(gene)
remove.append(tRNA)
remove.append(exon)
# there are no errors, then just remove stop/start codons and move on
if len(errors) < 1 and len(remove) < 1:
with open(output, 'w') as out:
with open(input, 'r') as GFF:
for line in GFF:
if '\tstart_codon\t' in line:
continue
if '\tstop_codon\t' in line:
continue
out.write(line)
else:
with open(val) as validate:
for line in validate:
if any(x in line for x in errors):
mRNA = line.split("ncbi|")[-1].replace(']', '').rstrip()
gene = mRNA.replace('evm.model', 'evm.TU')
exon = mRNA + '.exon'
mRNA = mRNA + ';'
remove.append(mRNA)
remove.append(gene)
remove.append(exon)
# this is only picking up tRNAs right now, which "probably" is all that it needs to.....but u never know
if any(x in line for x in gapErrors):
cols = line.split(' ')
if 'Gene:' in cols:
gene = line.split('Gene: ')[-1]
gene = gene.split(' ')[0]
tRNA = gene + '_tRNA'
exon = gene + '_exon'
remove.append(gene)
remove.append(tRNA)
remove.append(exon)
# make sure no empty strings
remove = list([_f for _f in remove if _f])
remove = set(remove)
remove_match = re.compile(r'\b(?:%s)+\b' % '|'.join(remove))
with open(output, 'w') as out:
with open(input, 'r') as GFF:
for line in GFF:
if '\tstart_codon\t' in line:
continue
if '\tstop_codon\t' in line:
continue
if not remove_match.search(line):
if '\tgene\t' in line:
line = line.replace('Name=;', '')
out.write(line)
def antismash_version(input):
# choose v4, v5 or v6 parser
version = 4
with open(input, 'r') as infile:
for rec in SeqIO.parse(infile, 'genbank'):
if 'structured_comment' in rec.annotations:
if 'antiSMASH-Data' in rec.annotations['structured_comment']:
version = int(
rec.annotations['structured_comment']['antiSMASH-Data']['Version'].split('.')[0])
break
return version
def ParseAntiSmash(input, tmpdir, output, annotations):
smash_version = antismash_version(input)
log.info("Now parsing antiSMASH v{:} results, finding SM clusters".format(smash_version))
BackBone = {}
SMCOGs = {}
bbSubType = {}
bbDomains = {}
smProducts = {}
backboneCount = 0
clusterCount = 0
cogCount = 0
# parse antismash genbank to get clusters in bed format and slice the record for each cluster prediction
with open(output, 'w') as antibed:
with open(input, 'r') as input:
SeqRecords = SeqIO.parse(input, 'genbank')
for rec_num,record in enumerate(SeqRecords):
for f in record.features:
locusTag, ID, Parent = (None,)*3
if smash_version < 6:
baseName = 'Cluster'
if '_' in record.id:
try:
numericalContig = '{}_{}'.format(baseName, int(record.id.rsplit('_', 1)[-1]))
except ValueError:
if '.' in record.id:
numericalContig = '{}_{}'.format(baseName, int(record.id.rsplit('.', 1)[0].rsplit('_', 1)[-1]))
else: # just get the numbers
numericalContig = '{}_{}'.format(baseName, int(''.join(filter(str.isdigit, record.id))))
else:
numericalContig = 'Cluster'
# parse v4 differently than version 5
if smash_version == 4:
if f.type == "cluster":
clusterCount += 1
chr = record.id
start = f.location.nofuzzy_start
end = f.location.nofuzzy_end
clusternum = f.qualifiers.get(
"note")[0].replace("Cluster number: ", "")
antibed.write("%s\t%s\t%s\tCluster_%s\t0\t+\n" %
(chr, start, end, clusternum))
Domains = []
if f.type == "CDS":
locusTag, ID, Parent = getID(f, f.type)
if not ID:
continue
ID = ID.replace('ncbi_', '')
if f.qualifiers.get('sec_met'):
for k, v in list(f.qualifiers.items()):
if k == 'sec_met':
for i in v:
if i.startswith('Type:'):
type = i.replace('Type: ', '')
backboneCount += 1
BackBone[ID] = type
if i.startswith('NRPS/PKS subtype:'):
subtype = i.replace(
'NRPS/PKS subtype: ', '')
bbSubType[ID] = subtype
if i.startswith('NRPS/PKS Domain:'):
doms = i.replace(
'NRPS/PKS Domain: ', '')
doms = doms.split('. ')[0]
Domains.append(doms)
bbDomains[ID] = Domains
for k, v in list(f.qualifiers.items()):
if k == 'note':
for i in v:
if i.startswith('smCOG:'):
COG = i.replace('smCOG: ', '')
COG = COG.split(' (')[0]
SMCOGs[ID] = COG
cogCount += 1
elif not i.startswith('smCOG tree'):
notes = i
smProducts[ID] = notes
elif smash_version >= 5:
if f.type == "protocluster":
clusterCount += 1
chr = record.id
start = f.location.nofuzzy_start
# if '<' in start:
# start = start.replace('<', '')
end = f.location.nofuzzy_end
# if '>' in end:
# end = end.replace('>', '')
clusternum = int(f.qualifiers.get(
"protocluster_number")[0])
if smash_version >= 6:
antibed.write("{:}\t{:}\t{:}\t{:}_{:}\t0\t+\n".format(
chr, start, end, numericalContig, clusternum))
else:
antibed.write("{:}\t{:}\t{:}\t{:}.{:}\t0\t+\n".format(
chr, start, end, numericalContig, clusternum))
Domains = []
if f.type == "CDS":
locusTag, ID, Parent = getID(f, f.type)
if not ID:
continue
ID = ID.replace('ncbi_', '')
if f.qualifiers.get('NRPS_PKS'):
for k, v in list(f.qualifiers.items()):
if k == 'NRPS_PKS':
for i in v:
if i.startswith('type:'):
type = i.replace('type: ', '')
backboneCount += 1
BackBone[ID] = type
if i.startswith('NRPS_PKS subtype:'):
subtype = i.replace(
'NRPS_PKS subtype: ', '')
bbSubType[ID] = subtype
if i.startswith('Domain:'):
doms = i.replace(
'Domain: ', '')
doms = doms.split('. ')[0]
Domains.append(doms)
bbDomains[ID] = Domains
for k, v in list(f.qualifiers.items()):
if k == 'gene_functions':
for i in v:
if '(smcogs)' in i:
COG = i.split(
'(smcogs)')[-1].strip()
COG = COG.split(' (')[0]
SMCOGs[ID] = COG
cogCount += 1
elif k == 'gene_kind':
if 'biosynthetic' in v:
backboneCount += 1
# if smash_version == 4:
log.info("Found %i clusters, %i biosynthetic enyzmes, and %i smCOGs predicted by antiSMASH" % (
clusterCount, backboneCount, cogCount))
# now generate the annotations to add to genome
with open(annotations, 'w') as out:
# add product annotations - use bbSubType --> BackBone
for k, v in natsorted(list(BackBone.items())):
ID = k
if k in bbSubType:
hit = bbSubType.get(k)
if hit == 'NRPS':
hit = 'Nonribosomal Peptide Synthase (NRPS)'
if hit == 'Type I Iterative PKS':
hit = 'Type I Iterative Polyketide synthase (PKS)'
else:
hit = v
if hit == 'terpene':
hit = 'terpene cyclase'
elif hit == 'other':
hit = 'putative secondary metabolism biosynthetic enzyme'
elif hit == 'indole':
hit = 'aromatic prenyltransferase (DMATS family)'
elif hit == 'alkaloid' or hit == 'lignan' or hit == 'saccharide' or hit == 'polyketide':
hit = 'putative ' + hit + ' biosynthetic cluster'
elif hit == 'putative':
hit = 'putative uncategorized biosynthetic cluster'
elif '-' in hit:
hit = 'putative ' + hit + ' biosynthetic cluster'
if hit != 'none':
out.write("%s\tproduct\t%s\n" % (ID, hit))
# add annots from smProducts
for k, v in list(smProducts.items()):
ID = k
if v != 'none' and not 'BLAST' in v:
sys.stdout.write("%s\tproduct\t%s\n" % (ID, v))
# add smCOGs into note section
for k, v in list(SMCOGs.items()):
ID = k
if v != 'none':
out.write("%s\tnote\t%s\n" % (ID, v))
return bbDomains, bbSubType, BackBone
def GetClusterGenes(input, GFF, genome, annotations):
# load clusters into InterLap
interClust = bed2interlapNames(input)
# load GFF3 into Dictionary
Genes = {}
Genes = gff2dict(GFF, genome, Genes)
# loop through genes and check if in Clusters
dictClusters = {}
for k, v in natsorted(Genes.items()):
if v['type'] == 'mRNA':
if v['location'] in interClust[v['contig']]:
best_hit = list(interClust[v['contig']].find(v['location']))[0]
clusterName = best_hit[2]
if not clusterName in dictClusters:
dictClusters[clusterName] = v['ids']
else:
dictClusters[clusterName] += v['ids']
# write the output file
with open(annotations, 'w') as annotout:
for k, v in list(dictClusters.items()):
for i in v:
annotout.write("%s\tnote\tantiSMASH:%s\n" % (i, k))
return dictClusters
def splitFASTA(input, outputdir):
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
with open(input, 'r') as InputFasta:
SeqRecords = SeqIO.parse(InputFasta, 'fasta')
for record in SeqRecords:
name = str(record.id)
outputfile = os.path.join(outputdir, name+'.fa')
with open(outputfile, 'w') as output:
SeqIO.write(record, output, 'fasta')
def genomeStats(input):
from Bio.SeqUtils import GC
lengths = []
GeeCee = []
Genes = 0
tRNA = 0
Prots = 0
locus_tag = ''
organism = None
isolate = None
strain = None
uniqueIso = None
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
lengths.append(len(record.seq))
GeeCee.append(str(record.seq))
organism = record.annotations['organism'].replace(
' Unclassified.', '')
for f in record.features:
if f.type == "source":
isolate = f.qualifiers.get("isolate", [None])[0]
strain = f.qualifiers.get("strain", [None])[0]
if f.type == "CDS":
Prots += 1
if f.type == "gene":
Genes += 1
if Genes == 1:
locus_tag = f.qualifiers.get("locus_tag")[
0].split('_')[0]
if f.type == "tRNA":
tRNA += 1
if strain:
log.info("working on %s %s" % (organism, strain))
uniqueIso = strain.replace(' ', '')
elif isolate:
log.info("working on %s %s" % (organism, isolate))
uniqueIso = isolate.replace(' ', '')
else:
log.info("working on %s" % organism)
GenomeSize = sum(lengths)
LargestContig = max(lengths)
ContigNum = len(lengths)
AvgContig = int(round(GenomeSize / ContigNum))
pctGC = round(GC("".join(GeeCee)), 2)
# now get N50
lengths.sort()
nlist = []
for x in lengths:
nlist += [x]*x
if len(nlist) % 2 == 0:
medianpos = int(len(nlist) / 2)
N50 = int((nlist[medianpos] + nlist[medianpos-1]) / 2)
else:
medianpos = int(len(nlist) / 2)
N50 = int(nlist[medianpos])
# return values in a list
return [organism, uniqueIso, locus_tag, "{0:,}".format(GenomeSize)+' bp', "{0:,}".format(LargestContig)+' bp', "{0:,}".format(AvgContig)+' bp', "{0:,}".format(ContigNum), "{0:,}".format(N50)+' bp', "{:.2f}".format(pctGC)+'%', "{0:,}".format(Genes), "{0:,}".format(Prots), "{0:,}".format(tRNA)]
def MEROPS2dict(input):
dict = {}
with open(input, 'r') as fasta:
for line in fasta:
if line.startswith('>'):
cols = line.split(' ')
ID = cols[0].replace('>', '')
family = cols[1].replace('\n', '')
dict[ID] = family
return dict
def getEggNogfromNote(input):
dict = {}
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == 'CDS':
try:
ID = f.qualifiers['locus_tag'][0]
except KeyError:
log.debug("%s has no locus_tag, skipping")
continue
for k, v in list(f.qualifiers.items()):
if k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith('EggNog:'):
hit = i.replace('EggNog:', '')
if not ID in dict:
dict[ID] = hit
return dict
def getStatsfromNote(input, word, Database):
dict = {}
meropsDict = MEROPS2dict(os.path.join(Database, 'merops.formatted.fa'))
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == 'CDS':
try:
ID = f.qualifiers['locus_tag'][0]
except KeyError:
log.debug("%s has no locus_tag, skipping")
continue
for k, v in list(f.qualifiers.items()):
if k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith(word+':'):
hit = i.replace(word+':', '')
if hit.startswith('MER'): # change to family name
hit = meropsDict.get(hit)
if not hit in dict:
dict[hit] = [ID]
else:
dict[hit].append(ID)
return dict
def getSMBackbones(input):
dict = {'NRPS': 0, 'PKS': 0, 'Hybrid': 0}
with open(input, 'r') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
for f in record.features:
if f.type == 'CDS':
product = f.qualifiers['product'][0]
if not product == 'hypothetical protein':
if product == "Hybrid PKS-NRPS":
dict['Hybrid'] += 1
if product == "Nonribosomal Peptide Synthase (NRPS)":
dict['NRPS'] += 1
if 'Polyketide synthase (PKS)' in product:
dict['PKS'] += 1
return dict
def parseGOterms(input, folder, genome):
with open(os.path.join(folder, 'associations.txt'), 'a') as assoc:
with open(os.path.join(folder, genome+'.txt'), 'w') as terms:
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == 'CDS':
try:
ID = f.qualifiers['locus_tag'][0]
except KeyError:
log.debug("%s has no locus_tag, skipping")
continue
GOS = []
for k, v in list(f.qualifiers.items()):
if k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith('GO'):
go_term = i.split(' ')[1]
GOS.append(go_term)
if GOS:
assoc.write("%s\t%s\n" % (ID, ";".join(GOS)))
terms.write("%s\n" % ID)
def getStatsfromDbxref(input, word):
dict = {}
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == 'CDS':
try:
ID = f.qualifiers['locus_tag'][0]
except KeyError:
log.debug("%s has no locus_tag, skipping")
continue
for k, v in list(f.qualifiers.items()):
if k == 'db_xref':
for i in v:
if i.startswith(word+':'):
hit = i.replace(word+':', '')
if not hit in dict:
dict[hit] = [ID]
else:
dict[hit].append(ID)
return dict
def getGBKannotation(input, Database):
'''
Function will loop through GBK file pulling out funannotate functional annotation
and returning a list of dictionaries for each annotation class
'''
# convert merops on the fly, need database
meropsDict = MEROPS2dict(os.path.join(Database, 'merops.formatted.fa'))
SMs = {'NRPS': 0, 'PKS': 0, 'Hybrid': 0}
pfams = {}
iprs = {}
nogs = {}
cogs = {}
merops = {}
cazys = {}
secreted = {}
membrane = {}
buscos = {}
secmet = {}
with open(input, 'r') as infile:
for record in SeqIO.parse(infile, 'genbank'):
for f in record.features:
locusTag, ID, Parent = (None,)*3
if f.type == 'CDS':
locusTag, ID, Parent = getID(f, f.type)
if not ID:
continue
product = f.qualifiers['product'][0]
if product == "Hybrid PKS-NRPS":
SMs['Hybrid'] += 1
if product == "Nonribosomal Peptide Synthase (NRPS)":
SMs['NRPS'] += 1
if 'Polyketide synthase (PKS)' in product:
SMs['PKS'] += 1
for k, v in list(f.qualifiers.items()):
if k == 'db_xref':
for i in v:
if i.startswith('PFAM:'):
hit = i.replace('PFAM:', '')
if not hit in pfams:
pfams[hit] = [ID]
else:
pfams[hit].append(ID)
elif i.startswith('InterPro:'):
hit = i.replace('InterPro:', '')
if not hit in iprs:
iprs[hit] = [ID]
else:
iprs[hit].append(ID)
if k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith('EggNog:'):
hit = i.replace('EggNog:', '')
if not ID in nogs:
nogs[ID] = hit
elif i.startswith('BUSCO:'):
hit = i.replace('BUSCO:', '')
if not hit in buscos:
buscos[hit] = [ID]
else:
buscos[hit].append(ID)
elif i.startswith('MEROPS:'): # change to family name
hit = i.replace('MEROPS:', '')
hit = meropsDict.get(hit)
if not hit in merops:
merops[hit] = [ID]
else:
merops[hit].append(ID)
elif i.startswith('CAZy:'):
hit = i.replace('CAZy:', '')
if not hit in cazys:
cazys[hit] = [ID]
else:
cazys[hit].append(ID)
elif i.startswith('COG:'):
hit = i.replace('COG:', '')
hits = hit.split(',')
for x in hits:
if not x in cogs:
cogs[x] = [ID]
else:
cogs[x].append(ID)
elif i.startswith('SECRETED:'):
hit = i.replace('SECRETED:', '')
if not hit in secreted:
secreted[hit] = [ID]
else:
secreted[hit].append(ID)
elif i.startswith('TransMembrane:'):
hit = i.replace('TransMembrane:', '')
if not hit in membrane:
membrane[hit] = [ID]
else:
membrane[hit].append(ID)
elif i.startswith('antiSMASH:'):
hit = i.replace('antiSMASH:', '')
if not hit in secmet:
secmet[hit] = [ID]
else:
secmet[hit].append(ID)
return [pfams, iprs, nogs, buscos, merops, cazys, cogs, secreted, membrane, secmet, SMs]
def annotationtable(input, Database, HeaderNames, InterProDict, output):
from collections import OrderedDict
'''
Function will create a tsv annotation table from GenBank file
trying to capture all annotation in a parsable tsv file or
something that could be imported into excel
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# convert merops on the fly, need database
meropsDict = MEROPS2dict(os.path.join(Database, 'merops.formatted.fa'))
# get note new/unique note names
uniqueNotes = OrderedDict()
for x in HeaderNames:
if not x in ['BUSCO', 'CAZy', 'COG', 'EggNog', 'SECRETED', 'GO', 'MEROPS', 'TransMembrane']:
uniqueNotes[x] = []
# load genbank into funannotate dictionary (required as we need transcript/cds/etc)
Genes = {}
with open(input, 'r') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
for f in record.features:
gb_feature_add2dict(f, record, Genes)
SeqRecords = SeqIO.to_dict(SeqIO.parse(input, 'genbank'))
sGenes = natsorted(Genes.items(), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
# input should be fully annotation GBK file from funannotate
with open(output, 'w') as outfile:
header = ['GeneID', 'TranscriptID', 'Feature', 'Contig', 'Start',
'Stop', 'Strand', 'Name', 'Product', 'Alias/Synonyms', 'EC_number',
'BUSCO', 'PFAM', 'InterPro', 'EggNog', 'COG', 'GO Terms',
'Secreted', 'Membrane', 'Protease', 'CAZyme']
header += uniqueNotes.keys()
header += ['Notes', 'gDNA', 'mRNA', 'CDS-transcript', 'Translation']
outfile.write('%s\n' % '\t'.join(header))
for k,v in sortedGenes.items():
for i in range(0,len(v['ids'])):
# for each new feature, start with empty lists
pfams = []
iprs = []
GOS = v['go_terms'][i]
nogs = []
cogs = []
merops = []
cazys = []
secreted = []
membrane = []
therest = []
buscos = []
ecnum = []
alias = []
for key,value in uniqueNotes.items():
uniqueNotes[key] = []
# now grab the data
for y in v['db_xref'][i]:
if y.startswith('PFAM:'):
hit = y.replace('PFAM:', '')
pfams.append(hit)
elif y.startswith('InterPro:'):
hit = y.replace('InterPro:', '')
# look up description in dictionary
desc = InterProDict.get(hit)
iprs.append('{:} {:}'.format(hit, desc))
for y in v['gene_synonym']:
alias.append(y)
for y in v['EC_number'][i]:
ecnum.append(y)
for y in v['note'][i]:
if y.startswith('EggNog:'):
hit = y.replace('EggNog:', '')
nogs.append(hit)
elif y.startswith('BUSCO:'):
hit = y.replace('BUSCO:', '')
buscos.append(hit)
elif y.startswith('MEROPS:'): # change to family name
hit = y.replace('MEROPS:', '')
if hit in meropsDict:
hit = meropsDict.get(hit)
merops.append(hit)
else:
log.error("MEROPS database inconsistency: %s not found" % hit)
elif y.startswith('CAZy:'):
hit = y.replace('CAZy:', '')
cazys.append(hit)
elif y.startswith('COG:'):
hit = y.replace('COG:', '')
hits = hit.split(',')
for x in hits:
desc = x + ':'+ resources.COGS.get(x)
cogs.append(desc)
elif y.startswith('SECRETED:'):
hit = y.replace('SECRETED:', '')
secreted.append(hit)
elif y.startswith('TransMembrane:'):
hit = y.replace('TransMembrane:', '')
membrane.append(hit)
elif y.startswith(tuple(uniqueNotes.keys())):
try:
n = y.split(':')[0]
hit = y.split(':', 1)[1]
uniqueNotes[n].append(hit)
except IndexError:
hit = y
therest.append(hit)
else: # capture everything else
hit = y
therest.append(hit)
# bring together output
result = [k, v['ids'][i], v['type'], v['contig'],
str(v['location'][0]), str(v['location'][1]),
v['strand'], v['name'],
v['product'][i],';'.join(alias),
';'.join(ecnum),';'.join(buscos),
';'.join(pfams),';'.join(iprs),
';'.join(nogs),';'.join(cogs),
';'.join(GOS),
';'.join(secreted),
';'.join(membrane),
';'.join(merops),
';'.join(cazys)
]
for key,value in uniqueNotes.items():
result.append(';'.join(value))
gDNA = getSeqRegions(SeqRecords, v['contig'], [v['location']])
try:
Transcript = str(v['transcript'][i])
except IndexError:
if v['cds_transcript'][i]:
Transcript = str(v['cds_transcript'][i])
else:
print('{:} has no mrna or cds transcript'.format(k))
pass
if v['type'] == 'mRNA':
CDSTranscript = str(v['cds_transcript'][i])
try:
Protein = v['protein'][i]
except IndexError:
Protein = ''
print('ERROR: No amino acid sequence exists for {}'.format(v['ids'][i]))
else:
CDSTranscript = ''
Protein = ''
if v['strand'] == '-':
gDNA = RevComp(gDNA)
Transcript = RevComp(Transcript)
CDSTranscript = RevComp(CDSTranscript)
result += [';'.join(therest), gDNA, Transcript,
CDSTranscript, Protein]
# convert any None's to empty string
result = ['' if x is None else x for x in result]
# write to file
outfile.write('%s\n' % '\t'.join(result))
def annotationtableOld(input, Database, output):
'''
Function will create a tsv annotation table from GenBank file
trying to capture all annotation in a parsable tsv file or
something that could be imported into excel
'''
# convert merops on the fly, need database
meropsDict = MEROPS2dict(os.path.join(Database, 'merops.formatted.fa'))
# input should be fully annotation GBK file from funannotate
with open(output, 'w') as outfile:
header = ['GeneID', 'Feature', 'Contig', 'Start', 'Stop', 'Strand', 'Name', 'Product', 'BUSCO', 'PFAM',
'InterPro', 'EggNog', 'COG', 'GO Terms', 'Secreted', 'Membrane', 'Protease', 'CAZyme', 'Notes', 'Translation']
outfile.write('%s\n' % '\t'.join(header))
for record in SeqIO.parse(input, 'genbank'):
Contig = record.id
for f in record.features:
if f.type in ['tRNA', 'ncRNA', 'rRNA']:
ID = f.qualifiers['locus_tag'][0]
Start = f.location.nofuzzy_start
End = f.location.nofuzzy_end
strand = f.location.strand
if strand == 1:
Strand = '+'
elif strand == -1:
Strand = '-'
try:
Product = f.qualifiers['product'][0]
except KeyError:
Product = "None"
result = [ID, f.type, Contig, str(Start), str(
End), Strand, '', Product, '', '', '', '', '', '', '', '', '', '', '', '']
outfile.write('%s\n' % '\t'.join(result))
if f.type == 'CDS':
ID = f.qualifiers['locus_tag'][0]
Start = f.location.nofuzzy_start
End = f.location.nofuzzy_end
strand = f.location.strand
if strand == 1:
Strand = '+'
elif strand == -1:
Strand = '-'
try:
Product = f.qualifiers['product'][0]
except KeyError:
Product = 'hypothetical protein'
try:
Name = f.qualifiers['gene'][0]
except KeyError:
Name = ''
try:
Translation = f.qualifiers['translation'][0]
except KeyError:
Translation = ''
pfams = []
iprs = []
GOS = []
nogs = []
cogs = []
merops = []
cazys = []
secreted = []
membrane = []
therest = []
buscos = []
for k, v in list(f.qualifiers.items()):
if k == 'db_xref':
for i in v:
if i.startswith('PFAM:'):
hit = i.replace('PFAM:', '')
pfams.append(hit)
elif i.startswith('InterPro:'):
hit = i.replace('InterPro:', '')
iprs.append(hit)
elif k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith('GO'):
go_term = i.split(' ')[1]
GOS.append(go_term)
elif i.startswith('EggNog:'):
hit = i.replace('EggNog:', '')
nogs.append(hit)
elif i.startswith('BUSCO:'):
hit = i.replace('BUSCO:', '')
buscos.append(hit)
elif i.startswith('MEROPS:'): # change to family name
hit = i.replace('MEROPS:', '')
if hit in meropsDict:
hit = meropsDict.get(hit)
merops.append(hit)
else:
log.error(
"MEROPS database inconsistency: %s not found" % hit)
elif i.startswith('CAZy:'):
hit = i.replace('CAZy:', '')
cazys.append(hit)
elif i.startswith('COG:'):
hit = i.replace('COG:', '')
hits = hit.split(',')
for x in hits:
desc = x + ':' + resources.COGS.get(x)
cogs.append(desc)
elif i.startswith('SECRETED:'):
hit = i.replace('SECRETED:', '')
secreted.append(hit)
elif i.startswith('TransMembrane:'):
hit = i.replace('TransMembrane:', '')
membrane.append(hit)
else: # capture everything else
hit = i
therest.append(hit)
result = [ID, 'CDS', Contig, str(Start), str(End), Strand, Name, Product, ';'.join(buscos), ';'.join(pfams), ';'.join(iprs), ';'.join(
nogs), ';'.join(cogs), ';'.join(GOS), ';'.join(secreted), ';'.join(membrane), ';'.join(merops), ';'.join(cazys), ';'.join(therest), Translation]
outfile.write('%s\n' % '\t'.join(result))
def ncbiCheckErrors(error, validation, genename, fixOut):
ncbi_error = 0
actual_error = 0
with open(error, 'r') as errors:
for line in errors:
line = line.strip()
if 'ERROR' in line:
num = line.split(' ')[0]
ncbi_error += int(num)
# if errors in summary, then parse validation report, only get errors with gene names
if ncbi_error > 0:
# see if we can get the gene models that need to be fixed
needFixing = {}
with open(validation, 'r') as validationFile:
for line in validationFile:
line = line.strip()
if line.startswith('ERROR') and genename in line:
actual_error += 1
parts = line.split(' ')
for x in parts:
if genename in x:
ID = x.split('|')[-1]
if '-' in ID:
ID = ID.split('-')[0]
reason = line.split(' FEATURE:')[0]
reason = reason.split('] ')[-1]
if not ID in needFixing:
needFixing[ID] = reason
if actual_error > 0:
log.info("There are %i gene models that need to be fixed." %
actual_error)
print('-------------------------------------------------------')
with open(fixOut, 'w') as fix:
fix.write('#GeneID\tError Message\n')
for k, v in natsorted(list(needFixing.items())):
fix.write('%s\t%s\n' % (k, v))
print(('%s\t%s' % (k, v)))
return actual_error
def convert2counts(input):
import pandas as pd
Counts = []
for i in range(0, len(input)):
dict = {}
for k, v in list(input[i].items()):
dict[k] = len(v)
Counts.append(dict)
df = pd.DataFrame(Counts)
df.fillna(0, inplace=True) # fill in zeros for missing data
return df
def gb2proteinortho(input, folder, name):
gffOut = os.path.join(folder, name+'.gff')
FastaOut = os.path.join(folder, name+'.faa')
Transcripts = os.path.join(folder, name+'.transcripts.fa')
genes = {}
with open(input, 'r') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
for f in record.features:
gb_feature_add2dict(f, record, genes)
# now output the files you need
with open(gffOut, 'w') as gff:
with open(FastaOut, 'w') as fasta:
with open(Transcripts, 'w') as transcripts:
for k, v in natsorted(list(genes.items())):
if v['type'] == 'mRNA':
for i, item in enumerate(v['ids']):
transcripts.write(">{:} {:} codon_start={:} strand={:}\n{:}\n".format(
item, k, v['codon_start'][i], v['strand'], v['cds_transcript'][i]))
fasta.write(">%s %s\n%s\n" %
(item, k, v['protein'][i]))
gff.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};product={:};\n".format(
v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], item, k, v['product'][i]))
def drawStackedBar(panda, type, labels, ymax, output, colors=False):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
import numpy as np
from funannotate.stackedBarGraph import StackedBarGrapher as StackedBarGrapher
# stackedbargraph from summary data
SBG = StackedBarGrapher()
# labels
d_labels = panda.index.values
# y-ticks
ticks = np.linspace(0, ymax, 6)
ticks = list(ticks)
nums = [int(x) for x in ticks]
vals = [str(x) for x in nums]
yticks = [nums, vals]
# colors
if not colors:
color_palette = sns.hls_palette(
len(panda.columns), l=.4, s=.8).as_hex()
color_palette = [str(x).upper() for x in color_palette]
else:
color_palette = colors
# set up plot
sns.set_style('darkgrid')
sns.set_context('paper')
fig = plt.figure()
ax = fig.add_subplot(111)
YLabel = "Number of "+type
SBG.stackedBarPlot(ax, panda, color_palette, xLabels=panda.index.values,
endGaps=True, gap=0.25, xlabel="Genomes", ylabel=YLabel, yTicks=yticks)
plt.title(type+" summary")
# get the legend
legends = []
i = 0
for column in panda.columns:
legends.append(mpatches.Patch(
color=color_palette[i], label=panda.columns.values[i] + ": " + labels.get(panda.columns.values[i])))
i += 1
lgd = ax.legend(handles=legends, fontsize=6, loc='upper left',
bbox_to_anchor=(1.02, 1), borderaxespad=0)
plt.ylim([0, ymax])
# set the font size - i wish I knew how to do this proportionately.....but setting to something reasonable.
for item in ax.get_xticklabels():
item.set_fontsize(8)
# setup the plot
fig.subplots_adjust(bottom=0.4)
fig.savefig(output, format='pdf', bbox_extra_artists=(
lgd,), bbox_inches='tight')
plt.close(fig)
def drawHeatmap(df, color, output, labelsize, annotate):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
# get size of table
width = len(df.columns) / 2
height = len(df.index) / 4
fig, ax = plt.subplots(figsize=(width, height))
cbar_ax = fig.add_axes(shrink=0.4)
if annotate:
sns.heatmap(df, linewidths=0.5, cmap=color, ax=ax,
fmt="d", annot_kws={"size": 4}, annot=True)
else:
sns.heatmap(df, linewidths=0.5, cmap=color, ax=ax, annot=False)
plt.yticks(rotation=0)
plt.xticks(rotation=90)
for item in ax.get_xticklabels():
item.set_fontsize(8)
for item in ax.get_yticklabels():
item.set_fontsize(int(labelsize))
fig.savefig(output, format='pdf', dpi=1000, bbox_inches='tight')
plt.close(fig)
def donutplot(df, LongName, output, colors=False):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
# create data
longnames = []
for x in df.columns.tolist():
if x in LongName:
longnames.append(LongName.get(x))
else:
longnames.append(x)
names = df.columns.tolist()
data = df.values.tolist()
species = df.index.values
# get size of table
categories = len(df.columns)
total = len(df.index)
Rows = total // 2
Rows += total % 2
Position = list(range(1, total+1))
# get colors figured out
if not colors:
color_palette = resources.pref_colors
else:
color_palette = colors
# draw figure
if len(species) < 3:
fig = plt.figure(1, figsize=(8, 4))
else:
fig = plt.figure(1, figsize=(8, 8))
for k in range(total):
ax = fig.add_subplot(Rows, 2, Position[k])
# Create a circle for the center of the plot
my_circle = plt.Circle((0, 0), 0.7, color='white')
plt.pie(data[0], labels=names, colors=color_palette)
p = plt.gcf()
p.gca().add_artist(my_circle)
plt.title(species[k])
patches = [mpatches.Patch(color=color_palette[i], label="{:s}".format(
longnames[i])) for i in range(len(longnames))]
plt.legend(handles=patches, bbox_to_anchor=(1, 0.5),
bbox_transform=fig.transFigure, loc="center left", ncol=1)
fig.savefig(output, format='pdf', dpi=1000, bbox_inches='tight')
plt.close(fig)
def drawbarplot(df, output):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import matplotlib.pyplot as plt
import seaborn as sns
# num = len(df.columns) + 1
sns.set(style="darkgrid")
fig = plt.figure()
# colors
if len(df) > len(resources.pref_colors):
colorplot = sns.husl_palette(len(df), l=.5).as_hex()
colorplot = [str(x).upper() for x in colorplot]
else:
colorplot = resources.pref_colors[:len(df)]
ax = sns.barplot(data=df, palette=colorplot)
plt.xlabel('Genomes')
plt.ylabel('Secreted Proteins')
plt.xticks(rotation=90)
fig.savefig(output, format='pdf', dpi=1000, bbox_inches='tight')
plt.close(fig)
def distance2mds(df, distance, type, output):
import numpy as np
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.manifold import MDS
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
# run distance metric on matrix and then plot using NMDS
num = len(df.index)
data = np.array(df).astype(int)
bc_dm = pairwise_distances(data, metric=distance)
mds = MDS(n_components=2, metric=False, max_iter=999,
dissimilarity='precomputed', n_init=10, verbose=0)
result = mds.fit(bc_dm)
coords = result.embedding_
stress = 'stress=' + '{0:.4f}'.format(result.stress_)
# get axis information and make square plus some padding
xcoords = abs(maxabs(coords[:, 0])) + 0.1
ycoords = abs(maxabs(coords[:, 1])) + 0.1
# setup plot
fig = plt.figure()
# colors
if len(df) > len(resources.pref_colors):
colorplot = sns.husl_palette(len(df), l=.5).as_hex()
colorplot = [str(x).upper() for x in colorplot]
else:
colorplot = resources.pref_colors[:len(df)]
for i in range(0, num):
plt.plot(coords[i, 0], coords[i, 1], 'o', markersize=9,
color=colorplot[i], label=df.index.values[i])
plt.xlabel('NMDS axis 1')
plt.ylabel('NMDS axis 2')
plt.ylim(-ycoords, ycoords)
plt.xlim(-xcoords, xcoords)
'''
if num < 13: #if number too large, don't plot
'''
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.title('NMDS analysis of '+type+' domains')
plt.annotate(stress, xy=(1, 0), xycoords='axes fraction',
fontsize=12, ha='right', va='bottom')
fig.savefig(output, format='pdf', dpi=1000, bbox_inches='tight')
plt.close(fig)
def ReciprocalBlast(filelist, protortho, cpus):
'''
function to run reciprocal diamond blast for generating proteinortho input
'''
# generate dmnd databases for each input
for x in filelist:
base = os.path.basename(x)
cmd = ['diamond', 'makedb', '--in', x, '--db', base+'.dmnd']
if not checkannotations(os.path.join(protortho, base+'.dmnd')):
runSubprocess(cmd, protortho, log)
for p in itertools.permutations(filelist, 2):
query = p[0]
target = p[1]
db = os.path.basename(target)+'.dmnd'
outname = target+'.vs.'+query+'.bla'
cmd = ['diamond', 'blastp', '--query', query, '--db', db, '--outfmt', '6',
'--out', outname, '--evalue', '1e-5', '--more-sensitive', '--threads', str(cpus)]
if not checkannotations(os.path.join(protortho, outname)):
runSubprocess4(cmd, protortho, log)
db = os.path.basename(query)+'.dmnd'
outname = query+'.vs.'+target+'.bla'
cmd = ['diamond', 'blastp', '--query', target, '--db', db, '--outfmt', '6',
'--out', outname, '--evalue', '1e-5', '--more-sensitive', '--threads', str(cpus)]
if not checkannotations(os.path.join(protortho, outname)):
runSubprocess4(cmd, protortho, log)
db = os.path.basename(target)+'.dmnd'
outname = target+'.vs.'+target+'.bla'
cmd = ['diamond', 'blastp', '--query', target, '--db', db, '--outfmt', '6',
'--out', outname, '--evalue', '1e-5', '--more-sensitive', '--threads', str(cpus)]
if not checkannotations(os.path.join(protortho, outname)):
runSubprocess4(cmd, protortho, log)
db = os.path.basename(query)+'.dmnd'
outname = query+'.vs.'+query+'.bla'
cmd = ['diamond', 'blastp', '--query', query, '--db', db, '--outfmt', '6',
'--out', outname, '--evalue', '1e-5', '--more-sensitive', '--threads', str(cpus)]
if not checkannotations(os.path.join(protortho, outname)):
runSubprocess4(cmd, protortho, log)
def singletons(poff, name):
with open(poff, 'r') as input:
count = 0
for line in input:
line = line.replace('\n', '')
if line.startswith('#'):
header = line
species = header.split('\t')[3:]
i = species.index(name.replace(' ', '_')) + 3
continue
col = line.split('\t')
if col[0] == '1' and col[i] != '*':
count += 1
return count
def orthologs(poff, name):
with open(poff, 'r') as input:
count = 0
for line in input:
line = line.replace('\n', '')
if line.startswith('#'):
header = line
species = header.split('\t')[3:]
i = species.index(name.replace(' ', '_')) + 3
continue
col = line.split('\t')
if col[0] != '1' and col[i] != '*':
count += 1
return count
def iprTSV2dict(file, terms):
iprDict = {}
with io.open(file, 'r', encoding="utf-8") as infile:
for line in infile:
if line.startswith('ENTRY_AC') or line.startswith('\n'):
continue
line = line.rstrip()
entry, type, name = line.split('\t')
if not entry in iprDict:
iprDict[entry] = name
return iprDict
def iprxml2dict(xmlfile, terms):
import xml.etree.cElementTree as cElementTree
iprDict = {}
for event, elem in cElementTree.iterparse(xmlfile):
if elem.tag == 'interpro':
ID = elem.attrib['id']
if ID in terms:
for x in elem.getchildren():
if x.tag == 'name':
description = x.text
iprDict[ID] = description
elem.clear()
else:
elem.clear()
return iprDict
def pfam2dict(file):
pfamDict = {}
with open(file, 'r') as input:
for line in input:
try:
line = line.decode('utf-8').rstrip()
except AttributeError:
line = line.rstrip()
if line.startswith('PF'): # just check to be sure
cols = line.split('\t')
ID = cols[0]
desc = cols[4]
pfamDict[ID] = desc
return pfamDict
def flipKeyValues(input):
flipped = {}
for k, v in list(input.items()):
for y in v:
if not y in flipped:
flipped[y] = k
return flipped
def dictFlip(input):
# flip the list of dictionaries
outDict = {}
for x in input:
for k, v in natsorted(iter(x.items())):
for i in v:
if i in outDict:
outDict[i].append(k)
else:
outDict[i] = [k]
return outDict
def busco_dictFlip(input):
# flip the list of dictionaries
output = []
for x in input:
outDict = {}
for k, v in natsorted(iter(x.items())):
for i in v:
if i in outDict:
outDict[i].append(k)
else:
outDict[i] = [k]
output.append(outDict)
return output
def dictFlipLookup(input, lookup):
outDict = {}
for x in input:
for k, v in natsorted(iter(x.items())):
# lookup description in another dictionary
if not lookup.get(k) is None:
result = k+': '+lookup.get(k)
else:
result = k+': No description'
result = result.encode('utf-8')
for i in v:
if i in outDict:
outDict[i].append(str(result))
else:
outDict[i] = [str(result)]
return outDict
def copyDirectory(src, dest, overwrite=False):
import shutil
if overwrite:
if os.path.isdir(dest):
shutil.rmtree(dest)
try:
shutil.copytree(src, dest)
# Directories are the same
except shutil.Error as e:
log.debug('Directory not copied. Error: %s' % e)
# Any error saying that the directory doesn't exist
except OSError as e:
log.debug('Directory not copied. Error: %s' % e)
def download_buscos(name, Database):
if name in resources.busco_links:
log.info("Downloading %s busco models" % name)
address = resources.busco_links.get(name)
filename = address.split('/')[-1]
if name == 'fungiv1':
foldername = 'fungi'
else:
foldername = filename.split('.')[0]
cmd = ['wget', '-c', '--tries=0', '--read-timeout=20', address]
runSubprocess(cmd, '.', log)
cmd = ['tar', '-zxf', filename]
runSubprocess(cmd, '.', log)
copyDirectory(os.path.abspath(foldername),
os.path.join(Database, name))
shutil.rmtree(foldername)
os.remove(filename)
else:
log.error("%s not a valid BUSCO database" % name)
validBusco = list(resources.busco_links.keys())
log.error("Valid BUSCO DBs: %s" % (', '.join(validBusco)))
sys.exit(1)
def fasta2dict(Fasta):
answer = dict()
with open(Fasta, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'fasta')
for record in SeqRecords:
if record.id in answer:
print("WARNING - duplicate key!")
else:
answer[record.id] = str(record.seq)
return answer
def ortho2phylogeny(folder, df, num, dict, cpus, bootstrap, tmpdir, outgroup, sp_file, name, sc_buscos, ml_method):
import pylab
from Bio import Phylo
from Bio.Phylo.Consensus import get_support
if outgroup:
# load species fasta ids into dictionary
OutGroup = {}
with open(sp_file, 'r') as sp:
for rec in SeqIO.parse(sp, 'fasta'):
OutGroup[rec.id] = rec.seq
# single copy orthologs are in a dataframe, count and then randomly select
num_species = len(df.columns)
species = df.columns.values
if len(df) == 0:
log.error("0 single copy BUSCO orthologs found, skipping phylogeny")
return
if len(df) < int(num):
number = len(df)
log.info(
"Found %i single copy BUSCO orthologs, will use all to infer phylogeny" % (len(df)))
subsampled = df
else:
number = int(num)
log.info("Found %i single copy BUSCO orthologs, will randomly select %i to infer phylogeny" % (
len(df), number))
subsampled = df.sample(n=number)
if outgroup: # passed a list to extract from parent script
busco_list = sc_buscos
# since you checked for BUSCO id across all previously, loop through first set and print BUSCOs to file
with open(os.path.join(tmpdir, 'phylogeny.buscos.used.txt'), 'w') as busco_out:
with open(os.path.join(tmpdir, 'phylogeny.concat.fa'), 'w') as proteinout:
if outgroup:
proteinout.write(">%s\n" % name)
for y in busco_list:
proteinout.write("%s" % (OutGroup.get(y)))
proteinout.write('\n')
for i in range(0, num_species):
proteinout.write(">%s\n" % species[i])
proteins = fasta2dict(os.path.join(folder, species[i]+'.faa'))
for row in subsampled[species[i]].items():
proteinout.write("%s" % proteins.get(row[1]))
busco_out.write("%s\t%s\n" % (dict[i].get(row[1]), row[1]))
proteinout.write('\n')
cmd = ['mafft', '--anysymbol', '--quiet', os.path.join(tmpdir, 'phylogeny.concat.fa')]
runSubprocess2(cmd, '.', log, os.path.join(tmpdir, 'phylogeny.mafft.fa'))
cmd = ['trimal', '-in', os.path.join(tmpdir, 'phylogeny.mafft.fa'), '-out', os.path.join(
tmpdir, 'phylogeny.trimal.phylip'), '-automated1', '-phylip']
runSubprocess(cmd, '.', log)
if ml_method == 'raxml':
cmd = ['raxmlHPC-PTHREADS', '-T', str(cpus), '-f', 'a', '-m', 'PROTGAMMAAUTO', '-p', '12345',
'-x', '12345', '-#', str(bootstrap), '-s', 'phylogeny.trimal.phylip', '-n', 'nwk']
if outgroup:
cmd = cmd + ['-o', name]
treefile = os.path.join(tmpdir, 'RAxML_bootstrap.nwk')
runSubprocess(cmd, tmpdir, log)
# parse with biopython and draw
trees = list(Phylo.parse(treefile, 'newick'))
best = Phylo.read(os.path.join(tmpdir, 'RAxML_bestTree.nwk'), 'newick')
support_tree = get_support(best, trees)
Phylo.draw(support_tree, do_show=False)
pylab.axis('off')
pylab.savefig(os.path.join(tmpdir, 'ML.phylogeny.pdf'),
format='pdf', bbox_inches='tight', dpi=1000)
else: # run iqtree as faster and better than raxml in initial testing
cmd = ['iqtree', '-s', 'phylogeny.trimal.phylip', '-nt', 'AUTO',
'-ntmax', str(cpus), '-seed', '12345', '-bb', '1000']
if outgroup:
cmd = cmd + ['-o', name]
runSubprocess(cmd, tmpdir, log)
treefile = os.path.join(tmpdir, 'phylogeny.trimal.phylip.treefile')
best = Phylo.read(treefile, 'newick')
Phylo.draw(best, do_show=False)
pylab.axis('off')
pylab.savefig(os.path.join(tmpdir, 'ML.phylogeny.pdf'),
format='pdf', bbox_inches='tight', dpi=1000)
def getTrainResults(input):
with open(input, 'r') as train:
for line in train:
try:
line = line.decode('utf-8')
except AttributeError:
pass
line = line.rstrip()
if line.startswith('nucleotide level'):
line = line.replace(' ', '')
values1 = line.split('|') # get [1] and [2]
if line.startswith('exon level'):
line = line.replace(' ', '') # get [6] and [7]
values2 = line.split('|')
if line.startswith('gene level'):
line = line.replace(' ', '')
values3 = line.split('|') # get [6] and [7]
return (float(values1[1]), float(values1[2]), float(values2[6]), float(values2[7]), float(values3[6]), float(values3[7]))
def count_multi_CDS_genes(input, filterlist):
# take funannotate annotation dictionary and return number of genes with more than one CDS
counter = 0
counter_inList = 0
for k, v in natsorted(list(input.items())):
if len(v['CDS'][0]) > 1:
counter += 1
if k in filterlist:
counter_inList += 1
return len(input), counter, len(filterlist), counter_inList
def selectTrainingModels(input, fasta, genemark_gtf, output):
from collections import OrderedDict
'''
function to take a GFF3 file and filter the gene models so they are non-overalpping
also sort the models by number of exons, the more the better.
'''
def _sortDict(d):
return (len(d[1]['CDS'][0]))
# load gene models into funannotate structured dictionary
gene_inter = defaultdict(InterLap)
Genes = {}
Genes = gff2dict(input, fasta, Genes)
# add to InterLap output proteins
proteins = 'augustus.training.proteins.fa'
ignoreList = []
keeperList = getGenesGTF(genemark_gtf)
# check number of multi-cds genes
countGenes, countGenesCDS, countKeeper, countKeeperCDS = count_multi_CDS_genes(
Genes, keeperList)
log.debug('{:,} PASA genes; {:,} have multi-CDS; {:,} from filterGeneMark; {:,} have multi-CDS'.format(
countGenes, countGenesCDS, countKeeper, countKeeperCDS))
multiCDScheck, keeperCheck = (False,)*2
if countKeeper >= 200:
keeperCheck = True
if keeperCheck:
if countKeeperCDS >= 200:
multiCDScheck = True
else:
if countGenesCDS >= 200:
multiCDScheck = True
log.debug('filterGeneMark GTF filter set to {:}; require genes with multiple CDS set to {:}'.format(
keeperCheck, multiCDScheck))
with open(proteins, 'w') as protout:
for k, v in natsorted(list(Genes.items())):
if keeperCheck and not k in keeperList:
ignoreList.append(k)
continue
if multiCDScheck and len(v['CDS'][0]) < 2:
ignoreList.append(k)
continue
# add to interlap object and write protein out
gene_inter[v['contig']].add(
(v['location'][0], v['location'][1], v['strand'], k, len(v['CDS'][0])))
protout.write('>%s___%i\n%s\n' %
(k, len(v['CDS'][0]), v['protein'][0]))
# make sure gene models are unique, so do pairwise diamond search @ 80% identity
cmd = ['diamond', 'makedb', '--in',
'augustus.training.proteins.fa', '--db', 'aug_training.dmnd']
runSubprocess4(cmd, '.', log)
cmd = ['diamond', 'blastp', '--query', 'augustus.training.proteins.fa', '--db', 'aug_training.dmnd', '--more-sensitive', '-o',
'aug.blast.txt', '-f', '6', 'qseqid', 'sseqid', 'pident', '--query-cover', '80', '--subject-cover', '80', '--id', '80', '--no-self-hits']
runSubprocess4(cmd, '.', log)
blast_results = []
with open('aug.blast.txt', 'r') as blast:
for line in blast:
line = line.rstrip()
line = line.replace('___', '\t')
blast_results.append(line.split('\t'))
sortedBlast = natsorted(
blast_results, key=lambda x: int(x[1]), reverse=True)
blastignore = []
for hit in sortedBlast:
if hit[0] in blastignore or hit[2] in blastignore:
continue
if int(hit[1]) >= int(hit[3]):
if not hit[2] in blastignore:
blastignore.append(hit[2])
else:
if not hit[0] in blastignore:
blastignore.append(hit[0])
log.debug('{:,} models fail blast identity threshold'.format(
len(blastignore)))
SafeRemove('augustus.training.proteins.fa')
SafeRemove('aug_training.dmnd')
SafeRemove('aug.blast.txt')
# now return cleaned genemark GTF file
finalIgnoreList = []
for x in ignoreList:
if not x in finalIgnoreList:
finalIgnoreList.append(x)
for y in blastignore:
if not y in finalIgnoreList:
finalIgnoreList.append(y)
log.debug('{:,} models will be ignored for training Augustus'.format(
len(finalIgnoreList)))
GenesPass = {}
for k, v in natsorted(list(Genes.items())):
if not k in finalIgnoreList and not k in GenesPass:
loc = sorted([v['location'][0], v['location'][1]])
if loc in gene_inter[v['contig']]:
hits = list(gene_inter[v['contig']].find(loc))
sortedHits = sorted(
hits, key=lambda x: int(x[4]), reverse=True)
validHits = []
for y in sortedHits:
if not y[3] in finalIgnoreList and y[3] != k:
validHits.append(y)
if len(validHits) > 0:
if not validHits[0][3] in GenesPass:
GenesPass[validHits[0][3]] = Genes.get(validHits[0][3])
else:
GenesPass[k] = v
# now sort dictionary number of exons
sGenes = sorted(iter(GenesPass.items()), key=_sortDict, reverse=True)
sortedGenes = OrderedDict(sGenes)
log.info("{:,} of {:,} models pass training parameters".format(
len(sortedGenes), len(Genes)))
# x = dict(itertools.islice(sortedGenes.items(), 0, 2500))
final = {}
for i, (k, v) in enumerate(natsorted(list(sortedGenes.items()))):
v['ids'] = ['g_'+str(i+1)+'-T1']
final['g_'+str(i+1)] = v
dict2gff3noUTRs(final, output)
return len(final)
def getGenesGTF(input):
genes = []
with open(input, 'r') as infile:
for line in infile:
if not line.startswith('\n') or not line.startswith('#'):
line = line.rstrip()
info = line.split('\t')[-1]
attributes = info.split(';')
ID = None
for x in attributes:
if x.startswith('gene_id'):
tmp = x.replace('gene_id ', '')
ID = tmp.replace('"', '')
if ID:
if not ID in genes:
genes.append(ID)
return genes
def trainAugustus(AUGUSTUS_BASE, train_species, trainingset,
genome, outdir, cpus, num_training, optimize,
config_path):
if which('randomSplit.pl'):
RANDOMSPLIT = 'randomSplit.pl'
else:
RANDOMSPLIT = os.path.join(AUGUSTUS_BASE, 'scripts', 'randomSplit.pl')
if which('optimize_augustus.pl'):
OPTIMIZE = 'optimize_augustus.pl'
else:
OPTIMIZE = os.path.join(
AUGUSTUS_BASE, 'scripts', 'optimize_augustus.pl')
if which('new_species.pl'):
NEW_SPECIES = 'new_species.pl'
else:
NEW_SPECIES = os.path.join(AUGUSTUS_BASE, 'scripts', 'new_species.pl')
aug_cpus = '--cpus='+str(cpus)
species = '--species='+train_species
aug_log = os.path.join(outdir, 'logfiles', 'augustus_training.log')
TrainSet = os.path.abspath(trainingset)
onlytrain = '--onlytrain='+TrainSet+'.train'
testtrain = TrainSet+'.test'
trainingdir = os.path.join(
outdir, 'predict_misc', 'tmp_opt_'+train_species)
myENV = os.environ
myENV['AUGUSTUS_CONFIG_PATH'] = config_path
with open(aug_log, 'w') as logfile:
if not CheckAugustusSpecies(train_species):
subprocess.call([NEW_SPECIES, '--AUGUSTUS_CONFIG_PATH={:}'.format(
config_path), species], stdout=logfile, stderr=logfile)
# run etraining again to only use best models from EVM for training
p1 = subprocess.Popen(['etraining', species, TrainSet],
cwd=os.path.join(outdir, 'predict_misc'),
stderr=logfile, stdout=logfile, env=dict(myENV))
p1.communicate()
# split off num_training models for testing purposes
subprocess.call([RANDOMSPLIT, TrainSet, str(num_training)],
cwd=os.path.join(outdir, 'predict_misc'))
if os.path.isfile(os.path.join(outdir, 'predict_misc', TrainSet+'.train')):
with open(os.path.join(outdir, 'predict_misc', 'augustus.initial.training.txt'), 'w') as initialtraining:
subprocess.call(['augustus', '--AUGUSTUS_CONFIG_PATH={:}'.format(
config_path), species, TrainSet+'.test'], stdout=initialtraining, cwd=os.path.join(outdir, 'predict_misc'))
train_results = getTrainResults(os.path.join(
outdir, 'predict_misc', 'augustus.initial.training.txt'))
trainTable = [['Feature', 'Specificity', 'Sensitivity'],
['nucleotides', '{:.1%}'.format(
train_results[0]), '{:.1%}'.format(train_results[1])],
['exons', '{:.1%}'.format(
train_results[2]), '{:.1%}'.format(train_results[3])],
['genes', '{:.1%}'.format(
train_results[4]), '{:.1%}'.format(train_results[5])]
]
log.info('Augustus initial training results:')
train_table = print_table(trainTable, return_str=True)
sys.stderr.write(train_table)
if optimize:
# now run optimization
subprocess.call([OPTIMIZE, '--AUGUSTUS_CONFIG_PATH={:}'.format(config_path), species, aug_cpus,
onlytrain, testtrain], cwd=os.path.join(outdir, 'predict_misc'), stderr=logfile, stdout=logfile)
# run etraining again
p2 = subprocess.Popen(['etraining', species, TrainSet], cwd=os.path.join(
outdir, 'predict_misc'), stderr=logfile, stdout=logfile, env=dict(myENV))
p2.communicate()
with open(os.path.join(outdir, 'predict_misc', 'augustus.final.training.txt'), 'w') as finaltraining:
subprocess.call(['augustus', '--AUGUSTUS_CONFIG_PATH={:}'.format(
config_path), species, TrainSet+'.test'], stdout=finaltraining, cwd=os.path.join(outdir, 'predict_misc'))
train_results = getTrainResults(os.path.join(
outdir, 'predict_misc', 'augustus.final.training.txt'))
trainTable = [['Feature', 'Specificity', 'Sensitivity'],
['nucleotides', '{:.1%}'.format(
train_results[0]), '{:.1%}'.format(train_results[1])],
['exons', '{:.1%}'.format(
train_results[2]), '{:.1%}'.format(train_results[3])],
['genes', '{:.1%}'.format(
train_results[4]), '{:.1%}'.format(train_results[5])]
]
log.info('Augustus optimized training results:')
train_table = print_table(trainTable, return_str=True)
sys.stderr.write(train_table)
# clean up tmp folder
shutil.rmtree(trainingdir)
else:
if train_results[4] < 0.50:
log.info(
"Accuracy seems low, you can try to improve by passing the --optimize_augustus option.")
else:
log.error("AUGUSTUS training failed, check logfiles")
sys.exit(1)
def sortList(input, col):
return natsorted(input, key=operator.itemgetter(col))
def sortHints(input, output):
data = []
with open(input, 'r') as infile:
for line in infile:
line = line.rstrip()
data.append(line.split('\t'))
# replicate this: sort -n -k 4,4 | sort -s -n -k 5,5 | sort -s -n -k 3,3 | sort -s -k 1,1
sort1 = sortList(data, 3)
sort2 = sortList(sort1, 4)
sort3 = sortList(sort2, 2)
sort4 = sortList(sort3, 0)
with open(output, 'w') as sort_out:
for line in sort4:
sort_out.write('%s\n' % '\t'.join(line))
def checkgoatools(input):
with open(input, 'r') as goatools:
count = -1
result = False
headercount = 0
for line in goatools:
count += 1
if line.startswith('GO\tNS') or line.startswith('#'):
headercount = count
if line.startswith('GO:'):
result = True
return (result, headercount)
def translatemRNA(input, output):
from Bio.SeqIO.FastaIO import SimpleFastaParser
with open(output, 'w') as outfile:
with open(input, 'r') as fasta:
for header, seq in SimpleFastaParser(fasta):
codon_start = 1
for x in header.split(' '):
if x.startswith('codon_start='):
codon_start = int(
x.replace('codon_start=', '').rstrip())
# transcripts should already be in proper orientation
protSeq = translate(seq, '+', codon_start-1)
outfile.write('>{:}\n{:}\n'.format(header, protSeq))
def alignMAFFT(input, output):
FNULL = open(os.devnull, 'w')
with open(output, 'w') as outfile:
subprocess.call(['mafft', '--anysymbol', '--quiet', input],
stderr=FNULL, stdout=outfile)
def align2Codon(alignment, transcripts, output):
FNULL = open(os.devnull, 'w')
with open(output, 'w') as outfile:
subprocess.call(['perl', os.path.join(parentdir, 'aux_scripts', 'pal2nal.pl'),
alignment, transcripts, '-output', 'fasta'], stderr=FNULL, stdout=outfile)
if getSize(output) < 1:
os.remove(output)
log.debug('dNdS Error: pal2nal failed for %s' % alignment)
def counttaxa(input):
ct = 0
with open(input, 'r') as tree:
line = tree.readline()
ct = line.count(',')+1
return ct
def getMatchFileName(pattern, directory):
result = None
for f in os.listdir(directory):
if pattern in f:
result = os.path.join(directory, f)
return result
def drawPhyMLtree(fasta, tree):
FNULL = open(os.devnull, 'w')
fc = countfasta(fasta)
# need to convert to phylip format
base = os.path.basename(fasta).split('.')[0]
dir = os.path.dirname(fasta)
tmp1 = os.path.join(dir, base+'.draw2tree.phylip')
subprocess.call(['trimal', '-in', fasta, '-out', tmp1, '-phylip'])
# draw tree
subprocess.call(['phyml', '-i', tmp1], stdout=FNULL, stderr=FNULL)
tmp2 = getMatchFileName(base+'.draw2tree.phylip_phyml_tree', dir)
# check that num taxa in tree = input
tc = counttaxa(tmp2)
if tc != fc: # something failed...
log.debug('dNdS Error: phyml tree failed for %s' % fasta)
# retry
subprocess.call(['trimal', '-in', fasta, '-out', tmp1, '-phylip'])
subprocess.call(['phyml', '-i', tmp1], stdout=FNULL, stderr=FNULL)
# rename and clean
os.rename(tmp2, tree)
SafeRemove(tmp1)
stats = getMatchFileName(base+'.draw2tree.phylip_phyml_stats', dir)
SafeRemove(stats)
def simplestTreeEver(fasta, tree):
with open(tree, 'w') as outfile:
with open(fasta, 'r') as input:
ids = []
for rec in SeqIO.parse(input, 'fasta'):
ids.append(rec.id)
outfile.write('(%s,%s);' % (ids[0], ids[1]))
def rundNdSexhaustive(folder):
# setup intermediate files
tmpdir = os.path.dirname(folder)
name = os.path.basename(folder)
transcripts = os.path.join(tmpdir, name+'.transcripts.fa')
prots = os.path.join(tmpdir, name+'.proteins.fa')
aln = os.path.join(tmpdir, name+'.aln')
codon = os.path.join(tmpdir, name+'.codon.aln')
tree = os.path.join(tmpdir, name+'.tree')
log = os.path.join(tmpdir, name+'.log')
finallog = os.path.join(tmpdir, name, name+'.log')
if not checkannotations(finallog):
num_seqs = countfasta(transcripts)
# Translate to protein space
translatemRNA(transcripts, prots)
# align protein sequences
alignMAFFT(prots, aln)
# convert to codon alignment
align2Codon(aln, transcripts, codon)
if checkannotations(codon):
if num_seqs > 2:
# now generate a tree using phyml
drawPhyMLtree(codon, tree)
else:
simplestTreeEver(transcripts, tree)
# now run codeml through ete3
etecmd = ['ete3', 'evol', '--alg', os.path.abspath(codon), '-t', os.path.abspath(
tree), '--models', 'M0', 'M1', 'M2', 'M7', 'M8', '-o', name, '--clear_all', '--codeml_param', 'cleandata,1']
with open(log, 'w') as logfile:
logfile.write('\n%s\n' % ' '.join(etecmd))
subprocess.call(etecmd, cwd=tmpdir,
stdout=logfile, stderr=logfile)
# clean up
for file in os.listdir(tmpdir):
if file.startswith(name+'.'):
os.rename(os.path.join(tmpdir, file),
os.path.join(tmpdir, name, file))
def rundNdSestimate(folder):
# setup intermediate files
tmpdir = os.path.dirname(folder)
name = os.path.basename(folder)
transcripts = os.path.join(tmpdir, name+'.transcripts.fa')
prots = os.path.join(tmpdir, name+'.proteins.fa')
aln = os.path.join(tmpdir, name+'.aln')
codon = os.path.join(tmpdir, name+'.codon.aln')
tree = os.path.join(tmpdir, name+'.tree')
log = os.path.join(tmpdir, name+'.log')
finallog = os.path.join(tmpdir, name, name+'.log')
if not checkannotations(finallog):
num_seqs = countfasta(transcripts)
# Translate to protein space
translatemRNA(transcripts, prots)
# align protein sequences
alignMAFFT(prots, aln)
# convert to codon alignment
align2Codon(aln, transcripts, codon)
if checkannotations(codon):
if num_seqs > 2:
# now generate a tree using phyml
drawPhyMLtree(codon, tree)
else:
simplestTreeEver(transcripts, tree)
# now run codeml through ete3
etecmd = ['ete3', 'evol', '--alg', os.path.abspath(codon), '-t', os.path.abspath(
tree), '--models', 'M0', '-o', name, '--clear_all', '--codeml_param', 'cleandata,1']
with open(log, 'w') as logfile:
logfile.write('\n%s\n' % ' '.join(etecmd))
subprocess.call(etecmd, cwd=tmpdir,
stdout=logfile, stderr=logfile)
# clean up
for file in os.listdir(tmpdir):
if file.startswith(name+'.'):
os.rename(os.path.join(tmpdir, file),
os.path.join(tmpdir, name, file))
def get_subdirs(a_dir):
return [os.path.join(a_dir, name) for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def get_subdirs2(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def parsedNdS(folder):
results = {}
hits = get_subdirs2(folder)
for x in hits:
finallog = os.path.join(folder, x, x+'.log')
# parse logfile to get omega
dnds = 'NA'
m1m2p = 'NA'
m7m8p = 'NA'
if os.path.isfile(finallog):
with open(finallog, 'r') as input:
for line in input:
line = line.strip()
if 'M7' in line and 'M8' in line and '|' in line:
m7m8p = line.split('|')[-1].strip()
m7m8p = m7m8p.replace('*', '')
m7m8p = '{0:.5f}'.format(float(m7m8p))
elif 'M1' in line and 'M2' in line and '|' in line:
m1m2p = line.split('|')[-1].lstrip()
m1m2p = m1m2p.replace('*', '')
m1m2p = '{0:.5f}'.format(float(m1m2p))
elif line.startswith('- Model M0'):
nextline = next(input)
dnds = nextline.split('tree: ')[1].rstrip()
results[x] = (dnds, m1m2p, m7m8p)
return results
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def getBlastDBinfo(input):
'''
function to return a tuple of info using blastdbcmd
tuple: (name, date, #sequences)
'''
cmd = ['blastdbcmd', '-info', '-db', input]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if stderr:
print((stderr.split('\n')[0]))
results = stdout.split('\n\n')
results = [x for x in results if x]
# parse results which are now in list, look for starts with Database and then Date
Name, Date, NumSeqs = (None,)*3
for x in results:
if x.startswith('Database:'):
hit = x.split('\n\t')
Name = hit[0].replace('Database: ', '')
NumSeqs = hit[1].split(' sequences;')[0].replace(',', '')
if x.startswith('Date:'):
Date = x.split('\t')[0].replace('Date: ', '')
return (Name, Date, NumSeqs)
HEADER = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
<meta name="funannotate comparative genomics output" content="">
<meta name="<NAME>" content="">
<title>Funannotate</title>
<!-- Bootstrap core CSS -->
<link href="css/bootstrap.min.css" rel="stylesheet">
<!-- Custom styles for this template -->
<link href="css/starter-template.css" rel="stylesheet">
<script src="js/ie-emulation-modes-warning.js"></script>
</head>
<body>
<nav class="navbar navbar-inverse navbar-fixed-top">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false" aria-controls="navbar">
<span class="sr-only">Toggle navigation</span>
</button>
<a class="navbar-brand" href="index.html">Funannotate</a>
</div>
<div id="navbar" class="collapse navbar-collapse">
<ul class="nav navbar-nav">
<li><a href="stats.html">Stats</a></li>
<li><a href="phylogeny.html">Phylogeny</a></li>
<li><a href="orthologs.html">Orthologs</a></li>
<li><a href="interpro.html">InterPro</a></li>
<li><a href="pfam.html">PFAM</a></li>
<li><a href="merops.html">Merops</a></li>
<li><a href="cazy.html">CAZymes</a></li>
<li><a href="cogs.html">COGs</a></li>
<li><a href="signalp.html">SignalP</a></li>
<li><a href="tf.html">TFs</a></li>
<li><a href="secmet.html">SecMet</a></li>
<li><a href="go.html">GO</a></li>
<li><a href="citation.html">Cite</a></li>
</ul>
</div><!--/.nav-collapse -->
</div>
</nav>
'''
ORTHOLOGS = '''
<div class="container">
<div class="table">
<h2 class="sub-header">Orthologous protein groups</h2>
<div class="table-responsive">
'''
INDEX = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Funannotate Results</h2>
<br>
<p><a href='stats.html'>Genome Summary Stats</a></p>
<p><a href='phylogeny.html'>Maximum likelihood Phylogeny (RAxML)</a></p>
<p><a href='merops.html'>MEROPS Protease Stats</a></p>
<p><a href='cazy.html'>CAZyme carbohydrate activating enzyme Stats</a></p>
<p><a href='cogs.html'>COGs Stats</a></p>
<p><a href='signalp.html'>Secreted proteins (SignalP)</a></p>
<p><a href='interpro.html'>InterProScan Domain Stats</a></p>
<p><a href='tf.html'>Transcription Factor Summary</a></p>
<p><a href='secmet.html'>Secondary Metabolism Cluster Summary</a></p>
<p><a href='pfam.html'>PFAM Domain Stats</a></p>
<p><a href='go.html'>Gene Ontology Enrichment Analysis</a></p>
<p><a href='orthologs.html'>Orthologous proteins</a></p>
<br>
'''
SUMMARY = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Genome Summary Stats</h2>
<div class="table-responsive">
'''
PHYLOGENY = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">RAxML Maximum Likelihood Phylogeny</h2>
<a href='phylogeny/ML.phylogeny.pdf'><img src="phylogeny/ML.phylogeny.pdf" height="500" /></a></div>
'''
NOPHYLOGENY = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Number of species too low to generate phylogeny</h2>
'''
MEROPS = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">MEROPS Protease Families per Genome Results</h2>
<div class='row'>
<div class="col-sm-7"><a href='merops/MEROPS.graph.pdf'><img src="merops/MEROPS.graph.pdf" height="350" /></a></div>
<div class="col-sm-5"><a href='merops/MEROPS.heatmap.pdf'><img src="merops/MEROPS.heatmap.pdf" height="500" /></a></div>
</div>
<div class="table-responsive">
'''
INTERPRO = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">InterProScan Domains per Genome Results</h2>
<div class='row'>
<a href='interpro/InterProScan.nmds.pdf'><img src="interpro/InterProScan.nmds.pdf" height="500" /></a></div>
<div class="table-responsive">
'''
PFAM = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">PFAM Domains per Genome Results</h2>
<div class='row'>
<a href='pfam/PFAM.nmds.pdf'><img src="pfam/PFAM.nmds.pdf" height="500" /></a></div>
<div class="table-responsive">
'''
SIGNALP = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Secreted Proteins per Genome Results</h2>
<div class='row'>
<a href='signalp/signalp.pdf'><img src="signalp/signalp.pdf" height="500" /></a></div>
<div class="table-responsive">
'''
TF = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Fungal Transcription Factors per Genome Results</h2>
<div class='row'>
<a href='tfs/TF.heatmap.pdf'><img src="tfs/TF.heatmap.pdf" height="800" /></a></div>
<div class="table-responsive">
'''
SECMET = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Secondary Metabolism Clusters per Genome Results</h2>
<div class='row'>
<a href='secmet/SM.graph.pdf'><img src="secmet/SM.graph.pdf" height="500" /></a></div>
<div class="table-responsive">
'''
CAZY = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">CAZyme Families per Genome Results</h2>
<div class='row'>
<div class="col-sm-7"><a href='cazy/CAZy.graph.pdf'><img src="cazy/CAZy.graph.pdf" height="350" /></a></div>
<div class="col-sm-5"><a href='cazy/CAZy.heatmap.pdf'><img src="cazy/CAZy.heatmap.pdf" height="600" /></a></div>
</div>
<div class="table-responsive">
'''
COG = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Clusters of Orthologous Groups (COGs) per Genome Results</h2>
<div class='row'>
<a href='cogs/COGS.graph.pdf'><img src="cogs/COGS.graph.pdf" height="500" /></a></div>
<div class="table-responsive">
'''
GO = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">GO ontology enrichment Results</h2>
<div class='row'>
'''
MISSING = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">These data are missing from annotation.</h2>
'''
CITATION = '''
<div class="container">
<div class="starter-template">
<h3 class="sub-header">If you found Funannotate useful please cite:</h3>
<p>Palmer JM. 2016. Funannotate: a fungal genome annotation and comparative genomics pipeline. <a href="https://github.com/nextgenusfs/funannotate">https://github.com/nextgenusfs/funannotate</a>.</p>
'''
FOOTER = '''
</div>
</div>
</div><!-- /.container -->
<!-- Bootstrap core JavaScript
================================================== -->
<!-- Placed at the end of the document so the pages load faster -->
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
<script>window.jQuery || document.write('<script src="js/jquery.min.js"><\/script>')</script>
<script src="js/bootstrap.min.js"></script>
<!-- IE10 viewport hack for Surface/desktop Windows 8 bug -->
<script src="js/ie10-viewport-bug-workaround.js"></script>
</body>
</html>
'''
HEADER2 = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="funannotate comparative genomics output" content="">
<meta name="<NAME>" content="">
<title>Funannotate</title>
<link href="css/bootstrap.min.css" rel="stylesheet">
<link href="css/starter-template.css" rel="stylesheet">
<script src="js/ie-emulation-modes-warning.js"></script>
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/t/bs/dt-1.10.11/datatables.min.css"/>
<script type="text/javascript" src="https://cdn.datatables.net/t/bs/dt-1.10.11/datatables.min.js"></script>
</head>
<body>
<nav class="navbar navbar-inverse navbar-fixed-top">
<div class="container-fluid">
<div class="navbar-header">
<span class="sr-only">Toggle navigation</span>
<a class="navbar-brand" href="index.html">Funannotate</a>
</div>
<div class="navbar-header">
<div id="navbar" class="collapse navbar-collapse">
<ul class="nav navbar-nav">
<li class="active"><a href="stats.html">Stats</a></li>
<li><a href="orthologs.html">Orthologs</a></li>
<li><a href="interpro.html">InterProScan</a></li>
<li><a href="pfam.html">PFAM</a></li>
<li><a href="merops.html">Merops</a></li>
<li><a href="cazy.html">CAZymes</a></li>
<li><a href="signalp.html">SignalP</a></li>
<li><a href="go.html">GO ontology</a></li>
<li><a href="citation.html">Citation</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown" role="button" aria-haspopup="true" aria-expanded="false">Genomes <span class="caret"></span></a>
<ul class="dropdown-menu">
'''
| [
"Bio.SeqIO.FastaIO.SimpleFastaParser",
"tarfile.open",
"io.open",
"seaborn.set_style",
"itertools.izip",
"sys.exit",
"operator.itemgetter",
"subprocess.Popen",
"matplotlib.pyplot.xlabel",
"logging.FileHandler",
"subprocess.call",
"pkg_resources.get_distribution",
"matplotlib.pyplot.xticks",
... | [((909, 958), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'BiopythonWarning'], {}), "('ignore', BiopythonWarning)\n", (930, 958), False, 'import warnings\n'), ((1137, 1199), 'os.path.join', 'os.path.join', (['parentdir', '"""aux_scripts"""', '"""genemark_gtf2gff3.pl"""'], {}), "(parentdir, 'aux_scripts', 'genemark_gtf2gff3.pl')\n", (1149, 1199), False, 'import os\n'), ((817, 842), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (840, 842), False, 'import warnings\n'), ((848, 879), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (869, 879), False, 'import warnings\n'), ((1095, 1120), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1110, 1120), False, 'import os\n'), ((4066, 4094), 're.sub', 're.sub', (['"""\x1b.*?m"""', '""""""', 'text'], {}), "('\\x1b.*?m', '', text)\n", (4072, 4094), False, 'import re\n'), ((13215, 13237), 'os.path.split', 'os.path.split', (['program'], {}), '(program)\n', (13228, 13237), False, 'import os\n'), ((17057, 17147), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'universal_newlines': '(True)', 'stderr': 'DEVNULL'}), '(cmd, stdout=subprocess.PIPE, universal_newlines=True,\n stderr=DEVNULL)\n', (17073, 17147), False, 'import subprocess\n'), ((18679, 18696), 'itertools.izip', 'zip', (['file1', 'file2'], {}), '(file1, file2)\n', (18682, 18696), True, 'from itertools import izip as zip\n'), ((21097, 21117), 'os.path.isdir', 'os.path.isdir', (['input'], {}), '(input)\n', (21110, 21117), False, 'import os\n'), ((21314, 21392), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'cwd': 'dir', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd, cwd=dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (21330, 21392), False, 'import subprocess\n'), ((22649, 22717), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'cwd': 'dir', 'stdout': 'FNULL', 'stderr': 'subprocess.PIPE'}), '(cmd, cwd=dir, stdout=FNULL, stderr=subprocess.PIPE)\n', (22665, 22717), False, 'import subprocess\n'), ((22934, 23012), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'cwd': 'dir', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd, cwd=dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (22950, 23012), False, 'import subprocess\n'), ((25969, 26044), 'os.path.join', 'os.path.join', (['evmpath', '"""EvmUtils"""', '"""gff3_gene_prediction_file_validator.pl"""'], {}), "(evmpath, 'EvmUtils', 'gff3_gene_prediction_file_validator.pl')\n", (25981, 26044), False, 'import os\n'), ((26144, 26242), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'universal_newlines': '(True)'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n universal_newlines=True)\n', (26160, 26242), False, 'import subprocess\n'), ((27736, 27762), 'os.path.dirname', 'os.path.dirname', (['directory'], {}), '(directory)\n', (27751, 27762), False, 'import os\n'), ((27797, 27814), 'os.stat', 'os.stat', (['filename'], {}), '(filename)\n', (27804, 27814), False, 'import os\n'), ((30626, 30745), 'subprocess.Popen', 'subprocess.Popen', (["['tblastn', '-version']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'universal_newlines': '(True)'}), "(['tblastn', '-version'], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE, universal_newlines=True)\n", (30642, 30745), False, 'import subprocess\n'), ((33580, 33633), 'os.path.join', 'os.path.join', (['parentdir', '"""config"""', '"""EOG092C0B3U.prfl"""'], {}), "(parentdir, 'config', 'EOG092C0B3U.prfl')\n", (33592, 33633), False, 'import os\n'), ((34655, 34708), 'os.path.join', 'os.path.join', (['outputdir', '"""transcript_alignments.gff3"""'], {}), "(outputdir, 'transcript_alignments.gff3')\n", (34667, 34708), False, 'import os\n'), ((34718, 34768), 'os.path.join', 'os.path.join', (['outputdir', '"""protein_alignments.gff3"""'], {}), "(outputdir, 'protein_alignments.gff3')\n", (34730, 34768), False, 'import os\n'), ((34778, 34826), 'os.path.join', 'os.path.join', (['outputdir', '"""gene_predictions.gff3"""'], {}), "(outputdir, 'gene_predictions.gff3')\n", (34790, 34826), False, 'import os\n'), ((39076, 39106), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'a.dtype'}), '(shape, dtype=a.dtype)\n', (39084, 39106), True, 'import numpy as np\n'), ((39509, 39579), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s: %(message)s"""'], {'datefmt': '"""[%x %H:%M:%S]"""'}), "('%(asctime)s: %(message)s', datefmt='[%x %H:%M:%S]')\n", (39526, 39579), False, 'import logging\n'), ((39599, 39626), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (39616, 39626), False, 'import logging\n'), ((39669, 39692), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (39690, 39692), False, 'import logging\n'), ((39794, 39822), 'logging.FileHandler', 'logging.FileHandler', (['LOGNAME'], {}), '(LOGNAME)\n', (39813, 39822), False, 'import logging\n'), ((40708, 40729), 'os.path.exists', 'os.path.exists', (['input'], {}), '(input)\n', (40722, 40729), False, 'import os\n'), ((41736, 41762), 'multiprocessing.Pool', 'multiprocessing.Pool', (['cpus'], {}), '(cpus)\n', (41756, 41762), False, 'import multiprocessing\n'), ((42425, 42451), 'multiprocessing.Pool', 'multiprocessing.Pool', (['cpus'], {}), '(cpus)\n', (42445, 42451), False, 'import multiprocessing\n'), ((72366, 72385), 'collections.OrderedDict', 'OrderedDict', (['sGenes'], {}), '(sGenes)\n', (72377, 72385), False, 'from collections import OrderedDict\n'), ((90102, 90148), 'os.path.join', 'os.path.join', (['FUNDB', '"""funannotate-db-info.txt"""'], {}), "(FUNDB, 'funannotate-db-info.txt')\n", (90114, 90148), False, 'import os\n'), ((100499, 100526), 'shutil.copyfile', 'shutil.copyfile', (['fasta', 'DNA'], {}), '(fasta, DNA)\n', (100514, 100526), False, 'import shutil\n'), ((132879, 132898), 'collections.OrderedDict', 'OrderedDict', (['sGenes'], {}), '(sGenes)\n', (132890, 132898), False, 'from collections import OrderedDict\n'), ((159831, 159852), 'collections.defaultdict', 'defaultdict', (['InterLap'], {}), '(InterLap)\n', (159842, 159852), False, 'from collections import defaultdict\n'), ((160167, 160188), 'collections.defaultdict', 'defaultdict', (['InterLap'], {}), '(InterLap)\n', (160178, 160188), False, 'from collections import defaultdict\n'), ((160778, 160799), 'collections.defaultdict', 'defaultdict', (['InterLap'], {}), '(InterLap)\n', (160789, 160799), False, 'from collections import defaultdict\n'), ((168767, 168786), 'collections.OrderedDict', 'OrderedDict', (['sGenes'], {}), '(sGenes)\n', (168778, 168786), False, 'from collections import OrderedDict\n'), ((170189, 170208), 'collections.OrderedDict', 'OrderedDict', (['sGenes'], {}), '(sGenes)\n', (170200, 170208), False, 'from collections import OrderedDict\n'), ((195030, 195049), 'collections.OrderedDict', 'OrderedDict', (['sGenes'], {}), '(sGenes)\n', (195041, 195049), False, 'from collections import OrderedDict\n'), ((203489, 203508), 'collections.OrderedDict', 'OrderedDict', (['sGenes'], {}), '(sGenes)\n', (203500, 203508), False, 'from collections import OrderedDict\n'), ((208302, 208321), 'collections.OrderedDict', 'OrderedDict', (['sGenes'], {}), '(sGenes)\n', (208313, 208321), False, 'from collections import OrderedDict\n'), ((215550, 215569), 'collections.OrderedDict', 'OrderedDict', (['sGenes'], {}), '(sGenes)\n', (215561, 215569), False, 'from collections import OrderedDict\n'), ((221464, 221487), 'os.path.dirname', 'os.path.dirname', (['genome'], {}), '(genome)\n', (221479, 221487), False, 'import os\n'), ((221501, 221538), 'os.path.join', 'os.path.join', (['basedir', '"""CodingQuarry"""'], {}), "(basedir, 'CodingQuarry')\n", (221513, 221538), False, 'import os\n'), ((221652, 221691), 'os.path.join', 'os.path.join', (['basedir', '"""stringtie.gff3"""'], {}), "(basedir, 'stringtie.gff3')\n", (221664, 221691), False, 'import os\n'), ((222046, 222095), 'os.path.join', 'os.path.join', (['tmpdir', '"""out"""', '"""PredictedPass.gff3"""'], {}), "(tmpdir, 'out', 'PredictedPass.gff3')\n", (222058, 222095), False, 'import os\n'), ((222970, 223019), 'os.path.join', 'os.path.join', (['tmpdir', '"""out"""', '"""PredictedPass.gff3"""'], {}), "(tmpdir, 'out', 'PredictedPass.gff3')\n", (222982, 223019), False, 'import os\n'), ((223503, 223522), 'collections.OrderedDict', 'OrderedDict', (['sGenes'], {}), '(sGenes)\n', (223514, 223522), False, 'from collections import OrderedDict\n'), ((229277, 229345), 'subprocess.Popen', 'subprocess.Popen', (['minimap2_cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'FNULL'}), '(minimap2_cmd, stdout=subprocess.PIPE, stderr=FNULL)\n', (229293, 229345), False, 'import subprocess\n'), ((229355, 229445), 'subprocess.Popen', 'subprocess.Popen', (['samtools_cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'FNULL', 'stdin': 'p1.stdout'}), '(samtools_cmd, stdout=subprocess.PIPE, stderr=FNULL, stdin=\n p1.stdout)\n', (229371, 229445), False, 'import subprocess\n'), ((230111, 230179), 'subprocess.Popen', 'subprocess.Popen', (['minimap2_cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'FNULL'}), '(minimap2_cmd, stdout=subprocess.PIPE, stderr=FNULL)\n', (230127, 230179), False, 'import subprocess\n'), ((230189, 230279), 'subprocess.Popen', 'subprocess.Popen', (['samtools_cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'FNULL', 'stdin': 'p1.stdout'}), '(samtools_cmd, stdout=subprocess.PIPE, stderr=FNULL, stdin=\n p1.stdout)\n', (230205, 230279), False, 'import subprocess\n'), ((230912, 230980), 'subprocess.Popen', 'subprocess.Popen', (['minimap2_cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'FNULL'}), '(minimap2_cmd, stdout=subprocess.PIPE, stderr=FNULL)\n', (230928, 230980), False, 'import subprocess\n'), ((230990, 231080), 'subprocess.Popen', 'subprocess.Popen', (['samtools_cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'FNULL', 'stdin': 'p1.stdout'}), '(samtools_cmd, stdout=subprocess.PIPE, stderr=FNULL, stdin=\n p1.stdout)\n', (231006, 231080), False, 'import subprocess\n'), ((231758, 231826), 'subprocess.Popen', 'subprocess.Popen', (['minimap2_cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'FNULL'}), '(minimap2_cmd, stdout=subprocess.PIPE, stderr=FNULL)\n', (231774, 231826), False, 'import subprocess\n'), ((231836, 231926), 'subprocess.Popen', 'subprocess.Popen', (['samtools_cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'FNULL', 'stdin': 'p1.stdout'}), '(samtools_cmd, stdout=subprocess.PIPE, stderr=FNULL, stdin=\n p1.stdout)\n', (231852, 231926), False, 'import subprocess\n'), ((232382, 232420), 'os.path.join', 'os.path.join', (['tmpdir', '"""gmap-build.log"""'], {}), "(tmpdir, 'gmap-build.log')\n", (232394, 232420), False, 'import os\n'), ((232650, 232686), 'os.path.join', 'os.path.join', (['tmpdir', '"""gmap-map.log"""'], {}), "(tmpdir, 'gmap-map.log')\n", (232662, 232686), False, 'import os\n'), ((234108, 234149), 'os.path.join', 'os.path.join', (['base_folder', '"""hmmer_output"""'], {}), "(base_folder, 'hmmer_output')\n", (234120, 234149), False, 'import os\n'), ((234197, 234262), 'os.path.join', 'os.path.join', (['base_folder', '"""augustus_output"""', '"""gffs"""', "(ID + '.gff')"], {}), "(base_folder, 'augustus_output', 'gffs', ID + '.gff')\n", (234209, 234262), False, 'import os\n'), ((239039, 239074), 'os.path.join', 'os.path.join', (['tmpdir', '"""repeats.xml"""'], {}), "(tmpdir, 'repeats.xml')\n", (239051, 239074), False, 'import os\n'), ((244825, 244850), 'funannotate.check.check_version7', 'check_version7', (['"""signalp"""'], {}), "('signalp')\n", (244839, 244850), False, 'from funannotate.check import check_version7\n'), ((245483, 245505), 'os.path.isfile', 'os.path.isfile', (['output'], {}), '(output)\n', (245497, 245505), False, 'import os\n'), ((254128, 254145), 'os.remove', 'os.remove', (['tmpOut'], {}), '(tmpOut)\n', (254137, 254145), False, 'import os\n'), ((258089, 258108), 'os.makedirs', 'os.makedirs', (['tmpdir'], {}), '(tmpdir)\n', (258100, 258108), False, 'import os\n'), ((258640, 258676), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'cpus'}), '(processes=cpus)\n', (258660, 258676), False, 'import multiprocessing\n'), ((260157, 260189), 'os.path.join', 'os.path.join', (['tmpdir', '"""genemark"""'], {}), "(tmpdir, 'genemark')\n", (260169, 260189), False, 'import os\n'), ((260302, 260324), 'os.path.abspath', 'os.path.abspath', (['input'], {}), '(input)\n', (260317, 260324), False, 'import os\n'), ((261085, 261121), 'os.path.join', 'os.path.join', (['outdir', '"""genemark.gtf"""'], {}), "(outdir, 'genemark.gtf')\n", (261097, 261121), False, 'import os\n'), ((261473, 261505), 'os.path.join', 'os.path.join', (['tmpdir', '"""genemark"""'], {}), "(tmpdir, 'genemark')\n", (261485, 261505), False, 'import os\n'), ((261618, 261640), 'os.path.abspath', 'os.path.abspath', (['input'], {}), '(input)\n', (261633, 261640), False, 'import os\n'), ((261705, 261754), 'os.path.join', 'os.path.join', (['tmpdir', '"""genemark.intron-hints.gff"""'], {}), "(tmpdir, 'genemark.intron-hints.gff')\n", (261717, 261754), False, 'import os\n'), ((262888, 262924), 'os.path.join', 'os.path.join', (['outdir', '"""genemark.gtf"""'], {}), "(outdir, 'genemark.gtf')\n", (262900, 262924), False, 'import os\n'), ((268297, 268328), 'os.path.join', 'os.path.join', (['dir', '"""glimmerhmm"""'], {}), "(dir, 'glimmerhmm')\n", (268309, 268328), False, 'import os\n'), ((268336, 268357), 'os.path.isdir', 'os.path.isdir', (['tmpdir'], {}), '(tmpdir)\n', (268349, 268357), False, 'import os\n'), ((268564, 268598), 'os.path.join', 'os.path.join', (['dir', '"""glimmer.exons"""'], {}), "(dir, 'glimmer.exons')\n", (268576, 268598), False, 'import os\n'), ((269312, 269335), 'os.path.abspath', 'os.path.abspath', (['tmpdir'], {}), '(tmpdir)\n', (269327, 269335), False, 'import os\n'), ((279699, 279736), 'os.path.join', 'os.path.join', (['dir', '"""snap-trained.hmm"""'], {}), "(dir, 'snap-trained.hmm')\n", (279711, 279736), False, 'import os\n'), ((279751, 279791), 'os.path.join', 'os.path.join', (['dir', '"""snap-prediction.zff"""'], {}), "(dir, 'snap-prediction.zff')\n", (279763, 279791), False, 'import os\n'), ((282217, 282241), 'os.path.abspath', 'os.path.abspath', (['snapHMM'], {}), '(snapHMM)\n', (282232, 282241), False, 'import os\n'), ((282303, 282343), 'os.path.join', 'os.path.join', (['dir', '"""snap-prediction.zff"""'], {}), "(dir, 'snap-prediction.zff')\n", (282315, 282343), False, 'import os\n'), ((282601, 282624), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (282622, 282624), False, 'import psutil\n'), ((283378, 283414), 'os.path.join', 'os.path.join', (['tmpdir', '"""tRNAscan.out"""'], {}), "(tmpdir, 'tRNAscan.out')\n", (283390, 283414), False, 'import os\n'), ((283432, 283481), 'os.path.join', 'os.path.join', (['tmpdir', '"""tRNAscan.len-filtered.out"""'], {}), "(tmpdir, 'tRNAscan.len-filtered.out')\n", (283444, 283481), False, 'import os\n'), ((285176, 285234), 'os.path.join', 'os.path.join', (['parentdir', '"""aux_scripts"""', '"""trnascan2gff3.pl"""'], {}), "(parentdir, 'aux_scripts', 'trnascan2gff3.pl')\n", (285188, 285234), False, 'import os\n'), ((288951, 289003), 'os.path.join', 'os.path.join', (['tmpdir', '"""genome.repeats.to.remove.gff"""'], {}), "(tmpdir, 'genome.repeats.to.remove.gff')\n", (288963, 289003), False, 'import os\n'), ((330466, 330479), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (330477, 330479), False, 'from collections import OrderedDict\n'), ((331052, 331071), 'collections.OrderedDict', 'OrderedDict', (['sGenes'], {}), '(sGenes)\n', (331063, 331071), False, 'from collections import OrderedDict\n'), ((345154, 345174), 'pandas.DataFrame', 'pd.DataFrame', (['Counts'], {}), '(Counts)\n', (345166, 345174), True, 'import pandas as pd\n'), ((345311, 345346), 'os.path.join', 'os.path.join', (['folder', "(name + '.gff')"], {}), "(folder, name + '.gff')\n", (345323, 345346), False, 'import os\n'), ((345360, 345395), 'os.path.join', 'os.path.join', (['folder', "(name + '.faa')"], {}), "(folder, name + '.faa')\n", (345372, 345395), False, 'import os\n'), ((345412, 345458), 'os.path.join', 'os.path.join', (['folder', "(name + '.transcripts.fa')"], {}), "(folder, name + '.transcripts.fa')\n", (345424, 345458), False, 'import os\n'), ((347029, 347048), 'funannotate.stackedBarGraph.StackedBarGrapher', 'StackedBarGrapher', ([], {}), '()\n', (347046, 347048), True, 'from funannotate.stackedBarGraph import StackedBarGrapher as StackedBarGrapher\n'), ((347122, 347145), 'numpy.linspace', 'np.linspace', (['(0)', 'ymax', '(6)'], {}), '(0, ymax, 6)\n', (347133, 347145), True, 'import numpy as np\n'), ((347518, 347543), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (347531, 347543), True, 'import seaborn as sns\n'), ((347548, 347572), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {}), "('paper')\n", (347563, 347572), True, 'import seaborn as sns\n'), ((347583, 347595), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (347593, 347595), True, 'import matplotlib.pyplot as plt\n'), ((347833, 347861), 'matplotlib.pyplot.title', 'plt.title', (["(type + ' summary')"], {}), "(type + ' summary')\n", (347842, 347861), True, 'import matplotlib.pyplot as plt\n'), ((348242, 348261), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, ymax]'], {}), '([0, ymax])\n', (348250, 348261), True, 'import matplotlib.pyplot as plt\n'), ((348597, 348611), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (348606, 348611), True, 'import matplotlib.pyplot as plt\n'), ((348974, 349011), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (348986, 349011), True, 'import matplotlib.pyplot as plt\n'), ((349277, 349299), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'rotation': '(0)'}), '(rotation=0)\n', (349287, 349299), True, 'import matplotlib.pyplot as plt\n'), ((349304, 349327), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (349314, 349327), True, 'import matplotlib.pyplot as plt\n'), ((349548, 349562), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (349557, 349562), True, 'import matplotlib.pyplot as plt\n'), ((351062, 351178), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'patches', 'bbox_to_anchor': '(1, 0.5)', 'bbox_transform': 'fig.transFigure', 'loc': '"""center left"""', 'ncol': '(1)'}), "(handles=patches, bbox_to_anchor=(1, 0.5), bbox_transform=fig.\n transFigure, loc='center left', ncol=1)\n", (351072, 351178), True, 'import matplotlib.pyplot as plt\n'), ((351262, 351276), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (351271, 351276), True, 'import matplotlib.pyplot as plt\n'), ((351490, 351515), 'seaborn.set', 'sns.set', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (351497, 351515), True, 'import seaborn as sns\n'), ((351526, 351538), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (351536, 351538), True, 'import matplotlib.pyplot as plt\n'), ((351785, 351824), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'df', 'palette': 'colorplot'}), '(data=df, palette=colorplot)\n', (351796, 351824), True, 'import seaborn as sns\n'), ((351829, 351850), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Genomes"""'], {}), "('Genomes')\n", (351839, 351850), True, 'import matplotlib.pyplot as plt\n'), ((351855, 351886), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Secreted Proteins"""'], {}), "('Secreted Proteins')\n", (351865, 351886), True, 'import matplotlib.pyplot as plt\n'), ((351891, 351914), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (351901, 351914), True, 'import matplotlib.pyplot as plt\n'), ((351988, 352002), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (351997, 352002), True, 'import matplotlib.pyplot as plt\n'), ((352514, 352555), 'sklearn.metrics.pairwise.pairwise_distances', 'pairwise_distances', (['data'], {'metric': 'distance'}), '(data, metric=distance)\n', (352532, 352555), False, 'from sklearn.metrics.pairwise import pairwise_distances\n'), ((352566, 352668), 'sklearn.manifold.MDS', 'MDS', ([], {'n_components': '(2)', 'metric': '(False)', 'max_iter': '(999)', 'dissimilarity': '"""precomputed"""', 'n_init': '(10)', 'verbose': '(0)'}), "(n_components=2, metric=False, max_iter=999, dissimilarity='precomputed',\n n_init=10, verbose=0)\n", (352569, 352668), False, 'from sklearn.manifold import MDS\n'), ((352976, 352988), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (352986, 352988), True, 'import matplotlib.pyplot as plt\n'), ((353385, 353410), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""NMDS axis 1"""'], {}), "('NMDS axis 1')\n", (353395, 353410), True, 'import matplotlib.pyplot as plt\n'), ((353415, 353440), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""NMDS axis 2"""'], {}), "('NMDS axis 2')\n", (353425, 353440), True, 'import matplotlib.pyplot as plt\n'), ((353445, 353472), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-ycoords)', 'ycoords'], {}), '(-ycoords, ycoords)\n', (353453, 353472), True, 'import matplotlib.pyplot as plt\n'), ((353477, 353504), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-xcoords)', 'xcoords'], {}), '(-xcoords, xcoords)\n', (353485, 353504), True, 'import matplotlib.pyplot as plt\n'), ((353575, 353637), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '(2)', 'borderaxespad': '(0.0)'}), '(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)\n', (353585, 353637), True, 'import matplotlib.pyplot as plt\n'), ((353641, 353691), 'matplotlib.pyplot.title', 'plt.title', (["('NMDS analysis of ' + type + ' domains')"], {}), "('NMDS analysis of ' + type + ' domains')\n", (353650, 353691), True, 'import matplotlib.pyplot as plt\n'), ((353692, 353792), 'matplotlib.pyplot.annotate', 'plt.annotate', (['stress'], {'xy': '(1, 0)', 'xycoords': '"""axes fraction"""', 'fontsize': '(12)', 'ha': '"""right"""', 'va': '"""bottom"""'}), "(stress, xy=(1, 0), xycoords='axes fraction', fontsize=12, ha=\n 'right', va='bottom')\n", (353704, 353792), True, 'import matplotlib.pyplot as plt\n'), ((353878, 353892), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (353887, 353892), True, 'import matplotlib.pyplot as plt\n'), ((354342, 354377), 'itertools.permutations', 'itertools.permutations', (['filelist', '(2)'], {}), '(filelist, 2)\n', (354364, 354377), False, 'import itertools\n'), ((357466, 357497), 'xml.etree.cElementTree.iterparse', 'cElementTree.iterparse', (['xmlfile'], {}), '(xmlfile)\n', (357488, 357497), True, 'import xml.etree.cElementTree as cElementTree\n'), ((367051, 367072), 'collections.defaultdict', 'defaultdict', (['InterLap'], {}), '(InterLap)\n', (367062, 367072), False, 'from collections import defaultdict\n'), ((371374, 371393), 'collections.OrderedDict', 'OrderedDict', (['sGenes'], {}), '(sGenes)\n', (371385, 371393), False, 'from collections import OrderedDict\n'), ((373174, 373231), 'os.path.join', 'os.path.join', (['outdir', '"""logfiles"""', '"""augustus_training.log"""'], {}), "(outdir, 'logfiles', 'augustus_training.log')\n", (373186, 373231), False, 'import os\n'), ((373247, 373275), 'os.path.abspath', 'os.path.abspath', (['trainingset'], {}), '(trainingset)\n', (373262, 373275), False, 'import os\n'), ((373376, 373440), 'os.path.join', 'os.path.join', (['outdir', '"""predict_misc"""', "('tmp_opt_' + train_species)"], {}), "(outdir, 'predict_misc', 'tmp_opt_' + train_species)\n", (373388, 373440), False, 'import os\n'), ((380198, 380219), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (380208, 380219), False, 'import os\n'), ((380505, 380527), 'os.path.dirname', 'os.path.dirname', (['fasta'], {}), '(fasta)\n', (380520, 380527), False, 'import os\n'), ((380539, 380584), 'os.path.join', 'os.path.join', (['dir', "(base + '.draw2tree.phylip')"], {}), "(dir, base + '.draw2tree.phylip')\n", (380551, 380584), False, 'import os\n'), ((380587, 380653), 'subprocess.call', 'subprocess.call', (["['trimal', '-in', fasta, '-out', tmp1, '-phylip']"], {}), "(['trimal', '-in', fasta, '-out', tmp1, '-phylip'])\n", (380602, 380653), False, 'import subprocess\n'), ((380674, 380740), 'subprocess.call', 'subprocess.call', (["['phyml', '-i', tmp1]"], {'stdout': 'FNULL', 'stderr': 'FNULL'}), "(['phyml', '-i', tmp1], stdout=FNULL, stderr=FNULL)\n", (380689, 380740), False, 'import subprocess\n'), ((381177, 381198), 'os.rename', 'os.rename', (['tmp2', 'tree'], {}), '(tmp2, tree)\n', (381186, 381198), False, 'import os\n'), ((381670, 381693), 'os.path.dirname', 'os.path.dirname', (['folder'], {}), '(folder)\n', (381685, 381693), False, 'import os\n'), ((381705, 381729), 'os.path.basename', 'os.path.basename', (['folder'], {}), '(folder)\n', (381721, 381729), False, 'import os\n'), ((381748, 381794), 'os.path.join', 'os.path.join', (['tmpdir', "(name + '.transcripts.fa')"], {}), "(tmpdir, name + '.transcripts.fa')\n", (381760, 381794), False, 'import os\n'), ((381805, 381848), 'os.path.join', 'os.path.join', (['tmpdir', "(name + '.proteins.fa')"], {}), "(tmpdir, name + '.proteins.fa')\n", (381817, 381848), False, 'import os\n'), ((381857, 381892), 'os.path.join', 'os.path.join', (['tmpdir', "(name + '.aln')"], {}), "(tmpdir, name + '.aln')\n", (381869, 381892), False, 'import os\n'), ((381903, 381944), 'os.path.join', 'os.path.join', (['tmpdir', "(name + '.codon.aln')"], {}), "(tmpdir, name + '.codon.aln')\n", (381915, 381944), False, 'import os\n'), ((381954, 381990), 'os.path.join', 'os.path.join', (['tmpdir', "(name + '.tree')"], {}), "(tmpdir, name + '.tree')\n", (381966, 381990), False, 'import os\n'), ((381999, 382034), 'os.path.join', 'os.path.join', (['tmpdir', "(name + '.log')"], {}), "(tmpdir, name + '.log')\n", (382011, 382034), False, 'import os\n'), ((382048, 382089), 'os.path.join', 'os.path.join', (['tmpdir', 'name', "(name + '.log')"], {}), "(tmpdir, name, name + '.log')\n", (382060, 382089), False, 'import os\n'), ((383135, 383153), 'os.listdir', 'os.listdir', (['tmpdir'], {}), '(tmpdir)\n', (383145, 383153), False, 'import os\n'), ((383374, 383397), 'os.path.dirname', 'os.path.dirname', (['folder'], {}), '(folder)\n', (383389, 383397), False, 'import os\n'), ((383409, 383433), 'os.path.basename', 'os.path.basename', (['folder'], {}), '(folder)\n', (383425, 383433), False, 'import os\n'), ((383452, 383498), 'os.path.join', 'os.path.join', (['tmpdir', "(name + '.transcripts.fa')"], {}), "(tmpdir, name + '.transcripts.fa')\n", (383464, 383498), False, 'import os\n'), ((383509, 383552), 'os.path.join', 'os.path.join', (['tmpdir', "(name + '.proteins.fa')"], {}), "(tmpdir, name + '.proteins.fa')\n", (383521, 383552), False, 'import os\n'), ((383561, 383596), 'os.path.join', 'os.path.join', (['tmpdir', "(name + '.aln')"], {}), "(tmpdir, name + '.aln')\n", (383573, 383596), False, 'import os\n'), ((383607, 383648), 'os.path.join', 'os.path.join', (['tmpdir', "(name + '.codon.aln')"], {}), "(tmpdir, name + '.codon.aln')\n", (383619, 383648), False, 'import os\n'), ((383658, 383694), 'os.path.join', 'os.path.join', (['tmpdir', "(name + '.tree')"], {}), "(tmpdir, name + '.tree')\n", (383670, 383694), False, 'import os\n'), ((383703, 383738), 'os.path.join', 'os.path.join', (['tmpdir', "(name + '.log')"], {}), "(tmpdir, name + '.log')\n", (383715, 383738), False, 'import os\n'), ((383752, 383793), 'os.path.join', 'os.path.join', (['tmpdir', 'name', "(name + '.log')"], {}), "(tmpdir, name, name + '.log')\n", (383764, 383793), False, 'import os\n'), ((384815, 384833), 'os.listdir', 'os.listdir', (['tmpdir'], {}), '(tmpdir)\n', (384825, 384833), False, 'import os\n'), ((386790, 386859), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (386806, 386859), False, 'import subprocess\n'), ((2093, 2121), 'os.dup2', 'os.dup2', (['self.null_fds[0]', '(1)'], {}), '(self.null_fds[0], 1)\n', (2100, 2121), False, 'import os\n'), ((2130, 2158), 'os.dup2', 'os.dup2', (['self.null_fds[1]', '(2)'], {}), '(self.null_fds[1], 2)\n', (2137, 2158), False, 'import os\n'), ((2259, 2287), 'os.dup2', 'os.dup2', (['self.save_fds[0]', '(1)'], {}), '(self.save_fds[0], 1)\n', (2266, 2287), False, 'import os\n'), ((2296, 2324), 'os.dup2', 'os.dup2', (['self.save_fds[1]', '(2)'], {}), '(self.save_fds[1], 2)\n', (2303, 2324), False, 'import os\n'), ((2364, 2390), 'os.close', 'os.close', (['self.null_fds[0]'], {}), '(self.null_fds[0])\n', (2372, 2390), False, 'import os\n'), ((2399, 2425), 'os.close', 'os.close', (['self.null_fds[1]'], {}), '(self.null_fds[1])\n', (2407, 2425), False, 'import os\n'), ((3613, 3630), 'os.makedirs', 'os.makedirs', (['name'], {}), '(name)\n', (3624, 3630), False, 'import os\n'), ((14421, 14454), 'bz2.BZ2File', 'bz2.BZ2File', (['filename', 'mode', 'buff'], {}), '(filename, mode, buff)\n', (14432, 14454), False, 'import bz2\n'), ((15276, 15311), 'gzip.GzipFile', 'gzip.GzipFile', (['filename', 'mode', 'buff'], {}), '(filename, mode, buff)\n', (15289, 15311), False, 'import gzip\n'), ((17345, 17392), 'subprocess.CalledProcessError', 'subprocess.CalledProcessError', (['return_code', 'cmd'], {}), '(return_code, cmd)\n', (17374, 17392), False, 'import subprocess\n'), ((19584, 19595), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (19592, 19595), False, 'import sys\n'), ((21127, 21147), 'shutil.rmtree', 'shutil.rmtree', (['input'], {}), '(input)\n', (21140, 21147), False, 'import shutil\n'), ((21157, 21178), 'os.path.isfile', 'os.path.isfile', (['input'], {}), '(input)\n', (21171, 21178), False, 'import os\n'), ((21697, 21708), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (21705, 21708), False, 'import sys\n'), ((22061, 22127), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'cwd': 'dir', 'stdout': 'out', 'stderr': 'subprocess.PIPE'}), '(cmd, cwd=dir, stdout=out, stderr=subprocess.PIPE)\n', (22077, 22127), False, 'import subprocess\n'), ((22343, 22354), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (22351, 22354), False, 'import sys\n'), ((23269, 23280), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (23277, 23280), False, 'import sys\n'), ((23837, 23848), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (23845, 23848), False, 'import sys\n'), ((24193, 24301), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'cwd': 'dir', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'universal_newlines': '(True)'}), '(cmd, cwd=dir, stdout=subprocess.PIPE, stderr=subprocess.\n PIPE, universal_newlines=True)\n', (24209, 24301), False, 'import subprocess\n'), ((24987, 25053), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'cwd': 'dir', 'stdout': 'out', 'stderr': 'subprocess.PIPE'}), '(cmd, cwd=dir, stdout=out, stderr=subprocess.PIPE)\n', (25003, 25053), False, 'import subprocess\n'), ((25269, 25280), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (25277, 25280), False, 'import sys\n'), ((25588, 25654), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'cwd': 'dir', 'stdout': 'out', 'stderr': 'subprocess.PIPE'}), '(cmd, cwd=dir, stdout=out, stderr=subprocess.PIPE)\n', (25604, 25654), False, 'import subprocess\n'), ((26075, 26098), 'os.path.realpath', 'os.path.realpath', (['input'], {}), '(input)\n', (26091, 26098), False, 'import os\n'), ((27877, 27901), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (27891, 27901), False, 'import os\n'), ((27975, 27986), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (27983, 27986), False, 'import sys\n'), ((28131, 28142), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (28139, 28142), False, 'import sys\n'), ((28220, 28257), 'tarfile.open', 'tarfile.open', (['output_filename', '"""w:gz"""'], {}), "(output_filename, 'w:gz')\n", (28232, 28257), False, 'import tarfile\n'), ((28563, 28592), 'os.path.join', 'os.path.join', (['path', 'file_name'], {}), '(path, file_name)\n', (28575, 28592), False, 'import os\n'), ((31337, 31348), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (31345, 31348), False, 'import sys\n'), ((31397, 31418), 'os.path.isfile', 'os.path.isfile', (['input'], {}), '(input)\n', (31411, 31418), False, 'import os\n'), ((32637, 32668), 'pkg_resources.get_distribution', 'get_distribution', (['"""funannotate"""'], {}), "('funannotate')\n", (32653, 32668), False, 'from pkg_resources import get_distribution\n'), ((33645, 33666), 'os.path.isfile', 'os.path.isfile', (['model'], {}), '(model)\n', (33659, 33666), False, 'import os\n'), ((33769, 33780), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (33777, 33780), False, 'import sys\n'), ((39264, 39368), 'logging.Formatter', 'logging.Formatter', (["(colr.GRN + '%(asctime)s' + colr.END + ': %(message)s')"], {'datefmt': '"""[%b %d %I:%M %p]"""'}), "(colr.GRN + '%(asctime)s' + colr.END + ': %(message)s',\n datefmt='[%b %d %I:%M %p]')\n", (39281, 39368), False, 'import logging\n'), ((39405, 39478), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s: %(message)s"""'], {'datefmt': '"""[%b %d %I:%M %p]"""'}), "('%(asctime)s: %(message)s', datefmt='[%b %d %I:%M %p]')\n", (39422, 39478), False, 'import logging\n'), ((46912, 46949), 'itertools.groupby', 'itertools.groupby', (['f', 'group_separator'], {}), '(f, group_separator)\n', (46929, 46949), False, 'import itertools\n'), ((49224, 49281), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'out', 'stderr': 'subprocess.PIPE'}), '(cmd, stdout=out, stderr=subprocess.PIPE)\n', (49240, 49281), False, 'import subprocess\n'), ((51505, 51532), 'Bio.SeqIO.parse', 'SeqIO.parse', (['gbk', '"""genbank"""'], {}), "(gbk, 'genbank')\n", (51516, 51532), False, 'from Bio import SeqIO\n'), ((51830, 51857), 'Bio.SeqIO.parse', 'SeqIO.parse', (['gbk', '"""genbank"""'], {}), "(gbk, 'genbank')\n", (51841, 51857), False, 'from Bio import SeqIO\n'), ((52978, 53003), 'Bio.SeqIO.FastaIO.SimpleFastaParser', 'SimpleFastaParser', (['infile'], {}), '(infile)\n', (52995, 53003), False, 'from Bio.SeqIO.FastaIO import SimpleFastaParser\n'), ((53985, 54012), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input', '"""fasta"""'], {}), "(input, 'fasta')\n", (53996, 54012), False, 'from Bio import SeqIO\n'), ((54202, 54227), 'os.path.realpath', 'os.path.realpath', (['mapping'], {}), '(mapping)\n', (54218, 54227), False, 'import os\n'), ((72079, 72106), 'Bio.SeqIO.parse', 'SeqIO.parse', (['seqin', '"""fasta"""'], {}), "(seqin, 'fasta')\n", (72090, 72106), False, 'from Bio import SeqIO\n'), ((84169, 84189), 'os.remove', 'os.remove', (['tmpoutput'], {}), '(tmpoutput)\n', (84178, 84189), False, 'import os\n'), ((84208, 84236), 'os.rename', 'os.rename', (['tmpoutput', 'output'], {}), '(tmpoutput, output)\n', (84217, 84236), False, 'import os\n'), ((85168, 85178), 'funannotate.interlap.InterLap', 'InterLap', ([], {}), '()\n', (85176, 85178), False, 'from funannotate.interlap import InterLap\n'), ((89262, 89287), 'Bio.SeqIO.FastaIO.SimpleFastaParser', 'SimpleFastaParser', (['infile'], {}), '(infile)\n', (89279, 89287), False, 'from Bio.SeqIO.FastaIO import SimpleFastaParser\n'), ((90160, 90182), 'os.path.isfile', 'os.path.isfile', (['dbfile'], {}), '(dbfile)\n', (90174, 90182), False, 'import os\n'), ((99814, 99837), 'os.path.isfile', 'os.path.isfile', (['exonBED'], {}), '(exonBED)\n', (99828, 99837), False, 'import os\n'), ((99944, 99979), 'json.dump', 'json.dump', (['stats', 'outfile'], {'indent': '(4)'}), '(stats, outfile, indent=4)\n', (99953, 99979), False, 'import json\n'), ((112089, 112116), 'Bio.SeqIO.parse', 'SeqIO.parse', (['fasta', '"""fasta"""'], {}), "(fasta, 'fasta')\n", (112100, 112116), False, 'from Bio import SeqIO\n'), ((133565, 133595), 'Bio.SeqIO.parse', 'SeqIO.parse', (['infile', '"""genbank"""'], {}), "(infile, 'genbank')\n", (133576, 133595), False, 'from Bio import SeqIO\n'), ((133945, 133975), 'Bio.SeqIO.parse', 'SeqIO.parse', (['infile', '"""genbank"""'], {}), "(infile, 'genbank')\n", (133956, 133975), False, 'from Bio import SeqIO\n'), ((135180, 135210), 'Bio.SeqIO.parse', 'SeqIO.parse', (['infile', '"""genbank"""'], {}), "(infile, 'genbank')\n", (135191, 135210), False, 'from Bio import SeqIO\n'), ((135444, 135464), 'natsort.natsorted', 'natsorted', (['LocusTags'], {}), '(LocusTags)\n', (135453, 135464), False, 'from natsort import natsorted\n'), ((135576, 135587), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (135584, 135587), False, 'import sys\n'), ((171189, 171245), 'os.path.join', 'os.path.join', (['tmpdir', '"""transcript_evidence_unique.fasta"""'], {}), "(tmpdir, 'transcript_evidence_unique.fasta')\n", (171201, 171245), False, 'import os\n'), ((174147, 174174), 'Bio.SeqIO.parse', 'SeqIO.parse', (['fasta', '"""fasta"""'], {}), "(fasta, 'fasta')\n", (174158, 174174), False, 'from Bio import SeqIO\n'), ((221167, 221192), 'os.path.realpath', 'os.path.realpath', (['bamfile'], {}), '(bamfile)\n', (221183, 221192), False, 'import os\n'), ((221228, 221251), 'os.path.abspath', 'os.path.abspath', (['output'], {}), '(output)\n', (221243, 221251), False, 'import os\n'), ((221550, 221571), 'os.path.isdir', 'os.path.isdir', (['tmpdir'], {}), '(tmpdir)\n', (221563, 221571), False, 'import os\n'), ((221581, 221600), 'os.makedirs', 'os.makedirs', (['tmpdir'], {}), '(tmpdir)\n', (221592, 221600), False, 'import os\n'), ((221882, 221906), 'os.path.realpath', 'os.path.realpath', (['genome'], {}), '(genome)\n', (221898, 221906), False, 'import os\n'), ((221914, 221945), 'os.path.realpath', 'os.path.realpath', (['stringtieGFF3'], {}), '(stringtieGFF3)\n', (221930, 221945), False, 'import os\n'), ((222580, 222604), 'os.path.realpath', 'os.path.realpath', (['genome'], {}), '(genome)\n', (222596, 222604), False, 'import os\n'), ((232471, 232588), 'subprocess.call', 'subprocess.call', (["['gmap_build', '-D', tmpdir, '-d', 'genome', '-k', '13', genome]"], {'stdout': 'logfile', 'stderr': 'logfile'}), "(['gmap_build', '-D', tmpdir, '-d', 'genome', '-k', '13',\n genome], stdout=logfile, stderr=logfile)\n", (232486, 232588), False, 'import subprocess\n'), ((233127, 233190), 'os.path.join', 'os.path.join', (['parentdir', '"""aux_scripts"""', '"""funannotate-BUSCO2.py"""'], {}), "(parentdir, 'aux_scripts', 'funannotate-BUSCO2.py')\n", (233139, 233190), False, 'import os\n'), ((233246, 233313), 'os.path.join', 'os.path.join', (['parentdir', '"""aux_scripts"""', '"""funannotate-BUSCO2-py2.py"""'], {}), "(parentdir, 'aux_scripts', 'funannotate-BUSCO2-py2.py')\n", (233258, 233313), False, 'import os\n'), ((234302, 234325), 'os.listdir', 'os.listdir', (['hmmerfolder'], {}), '(hmmerfolder)\n', (234312, 234325), False, 'import os\n'), ((239109, 239147), 'os.path.join', 'os.path.join', (['DataBase', '"""repeats.dmnd"""'], {}), "(DataBase, 'repeats.dmnd')\n", (239121, 239147), False, 'import os\n'), ((239389, 239422), 'os.path.join', 'os.path.join', (['DataBase', '"""REPEATS"""'], {}), "(DataBase, 'REPEATS')\n", (239401, 239422), False, 'import os\n'), ((240438, 240471), 'csv.reader', 'csv.reader', (['input'], {'delimiter': '"""\t"""'}), "(input, delimiter='\\t')\n", (240448, 240471), False, 'import csv\n'), ((244054, 244080), 'Bio.SeqIO.parse', 'SeqIO.parse', (['seqs', '"""fasta"""'], {}), "(seqs, 'fasta')\n", (244065, 244080), False, 'from Bio import SeqIO\n'), ((244186, 244214), 'os.path.join', 'os.path.join', (['tmpdir', 'output'], {}), '(tmpdir, output)\n', (244198, 244214), False, 'import os\n'), ((245163, 245198), 'os.path.join', 'os.path.join', (['tmpdir', '"""signalp_tmp"""'], {}), "(tmpdir, 'signalp_tmp')\n", (245175, 245198), False, 'import os\n'), ((245515, 245532), 'os.remove', 'os.remove', (['output'], {}), '(output)\n', (245524, 245532), False, 'import os\n'), ((245892, 245927), 'os.path.join', 'os.path.join', (['tmpdir', '"""signalp_tmp"""'], {}), "(tmpdir, 'signalp_tmp')\n", (245904, 245927), False, 'import os\n'), ((251809, 251868), 'os.path.join', 'os.path.join', (["os.environ['AUGUSTUS_CONFIG_PATH']", '"""species"""'], {}), "(os.environ['AUGUSTUS_CONFIG_PATH'], 'species')\n", (251821, 251868), False, 'import os\n'), ((252236, 252271), 'os.path.join', 'os.path.join', (['db', '"""trained_species"""'], {}), "(db, 'trained_species')\n", (252248, 252271), False, 'import os\n'), ((253146, 253168), 'os.path.abspath', 'os.path.abspath', (['input'], {}), '(input)\n', (253161, 253168), False, 'import os\n'), ((253320, 253357), 'subprocess.call', 'subprocess.call', (['cmd1'], {'stdout': 'outfile'}), '(cmd1, stdout=outfile)\n', (253335, 253357), False, 'import subprocess\n'), ((253376, 253398), 'os.path.abspath', 'os.path.abspath', (['genes'], {}), '(genes)\n', (253391, 253398), False, 'import os\n'), ((253550, 253587), 'subprocess.call', 'subprocess.call', (['cmd2'], {'stdout': 'outfile'}), '(cmd2, stdout=outfile)\n', (253565, 253587), False, 'import subprocess\n'), ((253993, 254016), 'os.path.abspath', 'os.path.abspath', (['output'], {}), '(output)\n', (254008, 254016), False, 'import os\n'), ((254909, 254933), 'Bio.SeqIO.FastaIO.SimpleFastaParser', 'SimpleFastaParser', (['input'], {}), '(input)\n', (254926, 254933), False, 'from Bio.SeqIO.FastaIO import SimpleFastaParser\n'), ((256441, 256466), 'Bio.SeqIO.FastaIO.SimpleFastaParser', 'SimpleFastaParser', (['infile'], {}), '(infile)\n', (256458, 256466), False, 'from Bio.SeqIO.FastaIO import SimpleFastaParser\n'), ((258033, 258056), 'os.path.dirname', 'os.path.dirname', (['genome'], {}), '(genome)\n', (258048, 258056), False, 'import os\n'), ((258192, 258216), 'Bio.SeqIO.FastaIO.SimpleFastaParser', 'SimpleFastaParser', (['input'], {}), '(input)\n', (258209, 258216), False, 'from Bio.SeqIO.FastaIO import SimpleFastaParser\n'), ((260201, 260222), 'os.path.isdir', 'os.path.isdir', (['outdir'], {}), '(outdir)\n', (260214, 260222), False, 'import os\n'), ((260232, 260251), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (260243, 260251), False, 'import os\n'), ((261517, 261538), 'os.path.isdir', 'os.path.isdir', (['outdir'], {}), '(outdir)\n', (261530, 261538), False, 'import os\n'), ((261548, 261567), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (261559, 261567), False, 'import os\n'), ((262176, 262202), 'os.path.abspath', 'os.path.abspath', (['hintsfile'], {}), '(hintsfile)\n', (262191, 262202), False, 'import os\n'), ((268493, 268514), 'os.path.abspath', 'os.path.abspath', (['gff3'], {}), '(gff3)\n', (268508, 268514), False, 'import os\n'), ((268516, 268538), 'os.path.abspath', 'os.path.abspath', (['fasta'], {}), '(fasta)\n', (268531, 268538), False, 'import os\n'), ((268696, 268718), 'os.path.abspath', 'os.path.abspath', (['fasta'], {}), '(fasta)\n', (268711, 268718), False, 'import os\n'), ((268729, 268756), 'os.path.abspath', 'os.path.abspath', (['glimmExons'], {}), '(glimmExons)\n', (268744, 268756), False, 'import os\n'), ((268996, 269038), 'os.path.join', 'os.path.join', (['dir', '"""glimmerHMM.output.raw"""'], {}), "(dir, 'glimmerHMM.output.raw')\n", (269008, 269038), False, 'import os\n'), ((269121, 269143), 'os.path.abspath', 'os.path.abspath', (['fasta'], {}), '(fasta)\n', (269136, 269143), False, 'import os\n'), ((269145, 269168), 'os.path.abspath', 'os.path.abspath', (['tmpdir'], {}), '(tmpdir)\n', (269160, 269168), False, 'import os\n'), ((269427, 269469), 'os.path.join', 'os.path.join', (['dir', '"""glimmerHMM.output.raw"""'], {}), "(dir, 'glimmerHMM.output.raw')\n", (269439, 269469), False, 'import os\n'), ((269552, 269574), 'os.path.abspath', 'os.path.abspath', (['fasta'], {}), '(fasta)\n', (269567, 269574), False, 'import os\n'), ((269576, 269601), 'os.path.abspath', 'os.path.abspath', (['training'], {}), '(training)\n', (269591, 269601), False, 'import os\n'), ((274997, 275024), 'Bio.SeqIO.parse', 'SeqIO.parse', (['fasta', '"""fasta"""'], {}), "(fasta, 'fasta')\n", (275008, 275024), False, 'from Bio import SeqIO\n'), ((279917, 279947), 'os.path.join', 'os.path.join', (['dir', '"""snaptrain"""'], {}), "(dir, 'snaptrain')\n", (279929, 279947), False, 'import os\n'), ((279959, 279980), 'os.path.isdir', 'os.path.isdir', (['tmpdir'], {}), '(tmpdir)\n', (279972, 279980), False, 'import os\n'), ((280021, 280040), 'os.makedirs', 'os.makedirs', (['tmpdir'], {}), '(tmpdir)\n', (280032, 280040), False, 'import os\n'), ((280397, 280416), 'collections.OrderedDict', 'OrderedDict', (['sGenes'], {}), '(sGenes)\n', (280408, 280416), False, 'from collections import OrderedDict\n'), ((280860, 280910), 'os.path.join', 'os.path.join', (['dir', '"""snap-training.scaffolds.fasta"""'], {}), "(dir, 'snap-training.scaffolds.fasta')\n", (280872, 280910), False, 'import os\n'), ((281304, 281342), 'os.path.join', 'os.path.join', (['dir', '"""snap.training.zff"""'], {}), "(dir, 'snap.training.zff')\n", (281316, 281342), False, 'import os\n'), ((282393, 282413), 'os.path.abspath', 'os.path.abspath', (['hmm'], {}), '(hmm)\n', (282408, 282413), False, 'import os\n'), ((282415, 282437), 'os.path.abspath', 'os.path.abspath', (['fasta'], {}), '(fasta)\n', (282430, 282437), False, 'import os\n'), ((283513, 283536), 'os.path.isfile', 'os.path.isfile', (['tRNAout'], {}), '(tRNAout)\n', (283527, 283536), False, 'import os\n'), ((283735, 283804), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (283751, 283804), False, 'import subprocess\n'), ((284123, 284156), 'shutil.copyfile', 'shutil.copyfile', (['precalc', 'tRNAout'], {}), '(precalc, tRNAout)\n', (284138, 284156), False, 'import shutil\n'), ((285278, 285348), 'subprocess.call', 'subprocess.call', (["['perl', trna2gff, '--input', tRNAlenOut]"], {'stdout': 'out'}), "(['perl', trna2gff, '--input', tRNAlenOut], stdout=out)\n", (285293, 285348), False, 'import subprocess\n'), ((285615, 285636), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (285628, 285636), False, 'import os\n'), ((285722, 285733), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (285730, 285733), False, 'import sys\n'), ((285896, 285907), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (285904, 285907), False, 'import sys\n'), ((290350, 290377), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input', '"""fasta"""'], {}), "(input, 'fasta')\n", (290361, 290377), False, 'from Bio import SeqIO\n'), ((293631, 293683), 'os.path.join', 'os.path.join', (['tmpdir', '"""genome.repeats.to.remove.gff"""'], {}), "(tmpdir, 'genome.repeats.to.remove.gff')\n", (293643, 293683), False, 'import os\n'), ((295371, 295398), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input', '"""fasta"""'], {}), "(input, 'fasta')\n", (295382, 295398), False, 'from Bio import SeqIO\n'), ((305641, 305671), 'Bio.SeqIO.parse', 'SeqIO.parse', (['infile', '"""genbank"""'], {}), "(infile, 'genbank')\n", (305652, 305671), False, 'from Bio import SeqIO\n'), ((316610, 316634), 'os.path.isdir', 'os.path.isdir', (['outputdir'], {}), '(outputdir)\n', (316623, 316634), False, 'import os\n'), ((316644, 316666), 'os.makedirs', 'os.makedirs', (['outputdir'], {}), '(outputdir)\n', (316655, 316666), False, 'import os\n'), ((316729, 316761), 'Bio.SeqIO.parse', 'SeqIO.parse', (['InputFasta', '"""fasta"""'], {}), "(InputFasta, 'fasta')\n", (316740, 316761), False, 'from Bio import SeqIO\n'), ((317278, 317305), 'Bio.SeqIO.parse', 'SeqIO.parse', (['gbk', '"""genbank"""'], {}), "(gbk, 'genbank')\n", (317289, 317305), False, 'from Bio import SeqIO\n'), ((319639, 319666), 'Bio.SeqIO.parse', 'SeqIO.parse', (['gbk', '"""genbank"""'], {}), "(gbk, 'genbank')\n", (319650, 319666), False, 'from Bio import SeqIO\n'), ((320535, 320580), 'os.path.join', 'os.path.join', (['Database', '"""merops.formatted.fa"""'], {}), "(Database, 'merops.formatted.fa')\n", (320547, 320580), False, 'import os\n'), ((320637, 320664), 'Bio.SeqIO.parse', 'SeqIO.parse', (['gbk', '"""genbank"""'], {}), "(gbk, 'genbank')\n", (320648, 320664), False, 'from Bio import SeqIO\n'), ((321831, 321858), 'Bio.SeqIO.parse', 'SeqIO.parse', (['gbk', '"""genbank"""'], {}), "(gbk, 'genbank')\n", (321842, 321858), False, 'from Bio import SeqIO\n'), ((323827, 323854), 'Bio.SeqIO.parse', 'SeqIO.parse', (['gbk', '"""genbank"""'], {}), "(gbk, 'genbank')\n", (323838, 323854), False, 'from Bio import SeqIO\n'), ((324969, 325014), 'os.path.join', 'os.path.join', (['Database', '"""merops.formatted.fa"""'], {}), "(Database, 'merops.formatted.fa')\n", (324981, 325014), False, 'import os\n'), ((325276, 325306), 'Bio.SeqIO.parse', 'SeqIO.parse', (['infile', '"""genbank"""'], {}), "(infile, 'genbank')\n", (325287, 325306), False, 'from Bio import SeqIO\n'), ((330364, 330409), 'os.path.join', 'os.path.join', (['Database', '"""merops.formatted.fa"""'], {}), "(Database, 'merops.formatted.fa')\n", (330376, 330409), False, 'import os\n'), ((330798, 330825), 'Bio.SeqIO.parse', 'SeqIO.parse', (['gbk', '"""genbank"""'], {}), "(gbk, 'genbank')\n", (330809, 330825), False, 'from Bio import SeqIO\n'), ((330950, 330979), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input', '"""genbank"""'], {}), "(input, 'genbank')\n", (330961, 330979), False, 'from Bio import SeqIO\n'), ((337426, 337471), 'os.path.join', 'os.path.join', (['Database', '"""merops.formatted.fa"""'], {}), "(Database, 'merops.formatted.fa')\n", (337438, 337471), False, 'import os\n'), ((337890, 337919), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input', '"""genbank"""'], {}), "(input, 'genbank')\n", (337901, 337919), False, 'from Bio import SeqIO\n'), ((345528, 345555), 'Bio.SeqIO.parse', 'SeqIO.parse', (['gbk', '"""genbank"""'], {}), "(gbk, 'genbank')\n", (345539, 345555), False, 'from Bio import SeqIO\n'), ((346638, 346663), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (346661, 346663), False, 'import warnings\n'), ((346673, 346704), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (346694, 346704), False, 'import warnings\n'), ((346739, 346760), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (346753, 346760), False, 'import matplotlib\n'), ((348680, 348705), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (348703, 348705), False, 'import warnings\n'), ((348715, 348746), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (348736, 348746), False, 'import warnings\n'), ((348781, 348802), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (348795, 348802), False, 'import matplotlib\n'), ((349076, 349175), 'seaborn.heatmap', 'sns.heatmap', (['df'], {'linewidths': '(0.5)', 'cmap': 'color', 'ax': 'ax', 'fmt': '"""d"""', 'annot_kws': "{'size': 4}", 'annot': '(True)'}), "(df, linewidths=0.5, cmap=color, ax=ax, fmt='d', annot_kws={\n 'size': 4}, annot=True)\n", (349087, 349175), True, 'import seaborn as sns\n'), ((349209, 349272), 'seaborn.heatmap', 'sns.heatmap', (['df'], {'linewidths': '(0.5)', 'cmap': 'color', 'ax': 'ax', 'annot': '(False)'}), '(df, linewidths=0.5, cmap=color, ax=ax, annot=False)\n', (349220, 349272), True, 'import seaborn as sns\n'), ((349625, 349650), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (349648, 349650), False, 'import warnings\n'), ((349660, 349691), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (349681, 349691), False, 'import warnings\n'), ((349726, 349747), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (349740, 349747), False, 'import matplotlib\n'), ((350503, 350532), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(8, 4)'}), '(1, figsize=(8, 4))\n', (350513, 350532), True, 'import matplotlib.pyplot as plt\n'), ((350557, 350586), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(8, 8)'}), '(1, figsize=(8, 8))\n', (350567, 350586), True, 'import matplotlib.pyplot as plt\n'), ((350738, 350776), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(0, 0)', '(0.7)'], {'color': '"""white"""'}), "((0, 0), 0.7, color='white')\n", (350748, 350776), True, 'import matplotlib.pyplot as plt\n'), ((350785, 350837), 'matplotlib.pyplot.pie', 'plt.pie', (['data[0]'], {'labels': 'names', 'colors': 'color_palette'}), '(data[0], labels=names, colors=color_palette)\n', (350792, 350837), True, 'import matplotlib.pyplot as plt\n'), ((350850, 350859), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (350857, 350859), True, 'import matplotlib.pyplot as plt\n'), ((350906, 350927), 'matplotlib.pyplot.title', 'plt.title', (['species[k]'], {}), '(species[k])\n', (350915, 350927), True, 'import matplotlib.pyplot as plt\n'), ((351317, 351342), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (351340, 351342), False, 'import warnings\n'), ((351352, 351383), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (351373, 351383), False, 'import warnings\n'), ((352083, 352108), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (352106, 352108), False, 'import warnings\n'), ((352118, 352149), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (352139, 352149), False, 'import warnings\n'), ((352289, 352310), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (352303, 352310), False, 'import matplotlib\n'), ((353262, 353367), 'matplotlib.pyplot.plot', 'plt.plot', (['coords[i, 0]', 'coords[i, 1]', '"""o"""'], {'markersize': '(9)', 'color': 'colorplot[i]', 'label': 'df.index.values[i]'}), "(coords[i, 0], coords[i, 1], 'o', markersize=9, color=colorplot[i],\n label=df.index.values[i])\n", (353270, 353367), True, 'import matplotlib.pyplot as plt\n'), ((354121, 354140), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (354137, 354140), False, 'import os\n'), ((356995, 357031), 'io.open', 'io.open', (['file', '"""r"""'], {'encoding': '"""utf-8"""'}), "(file, 'r', encoding='utf-8')\n", (357002, 357031), False, 'import io\n'), ((359874, 359893), 'os.path.isdir', 'os.path.isdir', (['dest'], {}), '(dest)\n', (359887, 359893), False, 'import os\n'), ((359944, 359970), 'shutil.copytree', 'shutil.copytree', (['src', 'dest'], {}), '(src, dest)\n', (359959, 359970), False, 'import shutil\n'), ((360377, 360408), 'funannotate.resources.busco_links.get', 'resources.busco_links.get', (['name'], {}), '(name)\n', (360402, 360408), True, 'import funannotate.resources as resources\n'), ((360873, 360898), 'shutil.rmtree', 'shutil.rmtree', (['foldername'], {}), '(foldername)\n', (360886, 360898), False, 'import shutil\n'), ((360907, 360926), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (360916, 360926), False, 'import os\n'), ((361126, 361137), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (361134, 361137), False, 'import sys\n'), ((361238, 361263), 'Bio.SeqIO.parse', 'SeqIO.parse', (['gbk', '"""fasta"""'], {}), "(gbk, 'fasta')\n", (361249, 361263), False, 'from Bio import SeqIO\n'), ((363613, 363656), 'os.path.join', 'os.path.join', (['tmpdir', '"""phylogeny.concat.fa"""'], {}), "(tmpdir, 'phylogeny.concat.fa')\n", (363625, 363656), False, 'import os\n'), ((363692, 363734), 'os.path.join', 'os.path.join', (['tmpdir', '"""phylogeny.mafft.fa"""'], {}), "(tmpdir, 'phylogeny.mafft.fa')\n", (363704, 363734), False, 'import os\n'), ((363764, 363806), 'os.path.join', 'os.path.join', (['tmpdir', '"""phylogeny.mafft.fa"""'], {}), "(tmpdir, 'phylogeny.mafft.fa')\n", (363776, 363806), False, 'import os\n'), ((363816, 363863), 'os.path.join', 'os.path.join', (['tmpdir', '"""phylogeny.trimal.phylip"""'], {}), "(tmpdir, 'phylogeny.trimal.phylip')\n", (363828, 363863), False, 'import os\n'), ((364239, 364282), 'os.path.join', 'os.path.join', (['tmpdir', '"""RAxML_bootstrap.nwk"""'], {}), "(tmpdir, 'RAxML_bootstrap.nwk')\n", (364251, 364282), False, 'import os\n'), ((364520, 364544), 'Bio.Phylo.Consensus.get_support', 'get_support', (['best', 'trees'], {}), '(best, trees)\n', (364531, 364544), False, 'from Bio.Phylo.Consensus import get_support\n'), ((364553, 364592), 'Bio.Phylo.draw', 'Phylo.draw', (['support_tree'], {'do_show': '(False)'}), '(support_tree, do_show=False)\n', (364563, 364592), False, 'from Bio import Phylo\n'), ((364601, 364618), 'pylab.axis', 'pylab.axis', (['"""off"""'], {}), "('off')\n", (364611, 364618), False, 'import pylab\n'), ((365084, 365140), 'os.path.join', 'os.path.join', (['tmpdir', '"""phylogeny.trimal.phylip.treefile"""'], {}), "(tmpdir, 'phylogeny.trimal.phylip.treefile')\n", (365096, 365140), False, 'import os\n'), ((365156, 365186), 'Bio.Phylo.read', 'Phylo.read', (['treefile', '"""newick"""'], {}), "(treefile, 'newick')\n", (365166, 365186), False, 'from Bio import Phylo\n'), ((365195, 365226), 'Bio.Phylo.draw', 'Phylo.draw', (['best'], {'do_show': '(False)'}), '(best, do_show=False)\n', (365205, 365226), False, 'from Bio import Phylo\n'), ((365235, 365252), 'pylab.axis', 'pylab.axis', (['"""off"""'], {}), "('off')\n", (365245, 365252), False, 'import pylab\n'), ((372682, 372738), 'os.path.join', 'os.path.join', (['AUGUSTUS_BASE', '"""scripts"""', '"""randomSplit.pl"""'], {}), "(AUGUSTUS_BASE, 'scripts', 'randomSplit.pl')\n", (372694, 372738), False, 'import os\n'), ((372848, 372910), 'os.path.join', 'os.path.join', (['AUGUSTUS_BASE', '"""scripts"""', '"""optimize_augustus.pl"""'], {}), "(AUGUSTUS_BASE, 'scripts', 'optimize_augustus.pl')\n", (372860, 372910), False, 'import os\n'), ((373027, 373083), 'os.path.join', 'os.path.join', (['AUGUSTUS_BASE', '"""scripts"""', '"""new_species.pl"""'], {}), "(AUGUSTUS_BASE, 'scripts', 'new_species.pl')\n", (373039, 373083), False, 'import os\n'), ((379431, 379524), 'subprocess.call', 'subprocess.call', (["['mafft', '--anysymbol', '--quiet', input]"], {'stderr': 'FNULL', 'stdout': 'outfile'}), "(['mafft', '--anysymbol', '--quiet', input], stderr=FNULL,\n stdout=outfile)\n", (379446, 379524), False, 'import subprocess\n'), ((379892, 379909), 'os.remove', 'os.remove', (['output'], {}), '(output)\n', (379901, 379909), False, 'import os\n'), ((381008, 381074), 'subprocess.call', 'subprocess.call', (["['trimal', '-in', fasta, '-out', tmp1, '-phylip']"], {}), "(['trimal', '-in', fasta, '-out', tmp1, '-phylip'])\n", (381023, 381074), False, 'import subprocess\n'), ((381083, 381149), 'subprocess.call', 'subprocess.call', (["['phyml', '-i', tmp1]"], {'stdout': 'FNULL', 'stderr': 'FNULL'}), "(['phyml', '-i', tmp1], stdout=FNULL, stderr=FNULL)\n", (381098, 381149), False, 'import subprocess\n'), ((385017, 385042), 'os.path.join', 'os.path.join', (['a_dir', 'name'], {}), '(a_dir, name)\n', (385029, 385042), False, 'import os\n'), ((385373, 385408), 'os.path.join', 'os.path.join', (['folder', 'x', "(x + '.log')"], {}), "(folder, x, x + '.log')\n", (385385, 385408), False, 'import os\n'), ((385517, 385541), 'os.path.isfile', 'os.path.isfile', (['finallog'], {}), '(finallog)\n', (385531, 385541), False, 'import os\n'), ((1835, 1865), 'os.open', 'os.open', (['os.devnull', 'os.O_RDWR'], {}), '(os.devnull, os.O_RDWR)\n', (1842, 1865), False, 'import os\n'), ((1980, 1989), 'os.dup', 'os.dup', (['(1)'], {}), '(1)\n', (1986, 1989), False, 'import os\n'), ((1991, 2000), 'os.dup', 'os.dup', (['(2)'], {}), '(2)\n', (1997, 2000), False, 'import os\n'), ((2898, 2922), 'gzip.GzipFile', 'gzip.GzipFile', ([], {'fileobj': 'f'}), '(fileobj=f)\n', (2911, 2922), False, 'import gzip\n'), ((9109, 9135), 'itertools.izip', 'zip', (['row', 'fixed_col_widths'], {}), '(row, fixed_col_widths)\n', (9112, 9135), True, 'from itertools import izip as zip\n'), ((9377, 9455), 'textwrap.TextWrapper', 'textwrap.TextWrapper', ([], {'subsequent_indent': 'subsequent_indent', 'width': 'max_col_width'}), '(subsequent_indent=subsequent_indent, width=max_col_width)\n', (9397, 9455), False, 'import textwrap\n'), ((9890, 9927), 'itertools.izip', 'zip', (['row_line', 'col_widths', 'alignments'], {}), '(row_line, col_widths, alignments)\n', (9893, 9927), True, 'from itertools import izip as zip\n'), ((12615, 12635), 'subprocess.call', 'subprocess.call', (['cmd'], {}), '(cmd)\n', (12630, 12635), False, 'import subprocess\n'), ((13144, 13165), 'os.path.isfile', 'os.path.isfile', (['fpath'], {}), '(fpath)\n', (13158, 13165), False, 'import os\n'), ((13170, 13195), 'os.access', 'os.access', (['fpath', 'os.X_OK'], {}), '(fpath, os.X_OK)\n', (13179, 13195), False, 'import os\n'), ((13433, 13460), 'os.path.join', 'os.path.join', (['path', 'program'], {}), '(path, program)\n', (13445, 13460), False, 'import os\n'), ((17431, 17549), 'subprocess.Popen', 'subprocess.Popen', (["['diamond', 'version']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'universal_newlines': '(True)'}), "(['diamond', 'version'], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE, universal_newlines=True)\n", (17447, 17549), False, 'import subprocess\n'), ((21188, 21204), 'os.remove', 'os.remove', (['input'], {}), '(input)\n', (21197, 21204), False, 'import os\n'), ((23537, 23622), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'cwd': 'dir', 'stdin': 'infile', 'stdout': 'out', 'stderr': 'subprocess.PIPE'}), '(cmd, cwd=dir, stdin=infile, stdout=out, stderr=subprocess.PIPE\n )\n', (23553, 23622), False, 'import subprocess\n'), ((24637, 24648), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (24645, 24648), False, 'import sys\n'), ((25894, 25905), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (25902, 25905), False, 'import sys\n'), ((28604, 28629), 'os.path.exists', 'os.path.exists', (['full_path'], {}), '(full_path)\n', (28618, 28629), False, 'import os\n'), ((28634, 28663), 'os.access', 'os.access', (['full_path', 'os.X_OK'], {}), '(full_path, os.X_OK)\n', (28643, 28663), False, 'import os\n'), ((31566, 31587), 'os.path.islink', 'os.path.islink', (['input'], {}), '(input)\n', (31580, 31587), False, 'import os\n'), ((33184, 33306), 'subprocess.Popen', 'subprocess.Popen', (["['augustus', '--version']"], {'stderr': 'subprocess.STDOUT', 'stdout': 'subprocess.PIPE', 'universal_newlines': '(True)'}), "(['augustus', '--version'], stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE, universal_newlines=True)\n", (33200, 33306), False, 'import subprocess\n'), ((33894, 33944), 'os.path.join', 'os.path.join', (['parentdir', '"""config"""', '"""busco_test.fa"""'], {}), "(parentdir, 'config', 'busco_test.fa')\n", (33906, 33944), False, 'import os\n'), ((38368, 38379), 'itertools.izip', 'zip', (['*plist'], {}), '(*plist)\n', (38371, 38379), True, 'from itertools import izip as zip\n'), ((42275, 42293), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (42291, 42293), False, 'import sys\n'), ((42306, 42319), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (42316, 42319), False, 'import time\n'), ((54931, 54954), 'os.path.realpath', 'os.path.realpath', (['input'], {}), '(input)\n', (54947, 54954), False, 'import os\n'), ((79837, 79859), 'os.path.abspath', 'os.path.abspath', (['input'], {}), '(input)\n', (79852, 79859), False, 'import os\n'), ((92783, 92800), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (92792, 92800), False, 'import json\n'), ((98397, 98408), 'os.getpid', 'os.getpid', ([], {}), '()\n', (98406, 98408), False, 'import os\n'), ((99851, 99869), 'os.remove', 'os.remove', (['exonBED'], {}), '(exonBED)\n', (99860, 99869), False, 'import os\n'), ((115760, 115812), 'os.path.join', 'os.path.join', (["os.environ['FUNANNOTATE_DB']", '"""go.obo"""'], {}), "(os.environ['FUNANNOTATE_DB'], 'go.obo')\n", (115772, 115812), False, 'import os\n'), ((135819, 135849), 'Bio.SeqIO.parse', 'SeqIO.parse', (['infile', '"""genbank"""'], {}), "(infile, 'genbank')\n", (135830, 135849), False, 'from Bio import SeqIO\n'), ((139072, 139102), 'Bio.SeqIO.parse', 'SeqIO.parse', (['filein', '"""genbank"""'], {}), "(filein, 'genbank')\n", (139083, 139102), False, 'from Bio import SeqIO\n'), ((141470, 141500), 'Bio.SeqIO.parse', 'SeqIO.parse', (['filein', '"""genbank"""'], {}), "(filein, 'genbank')\n", (141481, 141500), False, 'from Bio import SeqIO\n'), ((142227, 142257), 'Bio.SeqIO.parse', 'SeqIO.parse', (['filein', '"""genbank"""'], {}), "(filein, 'genbank')\n", (142238, 142257), False, 'from Bio import SeqIO\n'), ((171999, 172053), 'os.path.join', 'os.path.join', (['tmpdir', '"""transcript_evidence_unique.bam"""'], {}), "(tmpdir, 'transcript_evidence_unique.bam')\n", (172011, 172053), False, 'import os\n'), ((172079, 172134), 'os.path.join', 'os.path.join', (['tmpdir', '"""transcript_evidence_unique.gff3"""'], {}), "(tmpdir, 'transcript_evidence_unique.gff3')\n", (172091, 172134), False, 'import os\n'), ((228216, 228243), 'Bio.SeqIO.parse', 'SeqIO.parse', (['gbk', '"""genbank"""'], {}), "(gbk, 'genbank')\n", (228227, 228243), False, 'from Bio import SeqIO\n'), ((239769, 239805), 'Bio.SearchIO.parse', 'SearchIO.parse', (['results', '"""blast-xml"""'], {}), "(results, 'blast-xml')\n", (239783, 239805), False, 'from Bio import SearchIO\n'), ((244230, 244252), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (244244, 244252), False, 'import os\n'), ((244266, 244285), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (244277, 244285), False, 'import os\n'), ((244312, 244333), 'shutil.rmtree', 'shutil.rmtree', (['folder'], {}), '(folder)\n', (244325, 244333), False, 'import shutil\n'), ((244346, 244365), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (244357, 244365), False, 'import os\n'), ((244503, 244533), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (244515, 244533), False, 'import os\n'), ((244585, 244620), 'Bio.SeqIO.write', 'SeqIO.write', (['batch', 'handle', '"""fasta"""'], {}), "(batch, handle, 'fasta')\n", (244596, 244620), False, 'from Bio import SeqIO\n'), ((245257, 245298), 'os.path.join', 'os.path.join', (['tmpdir', '"""signalp_tmp"""', 'file'], {}), "(tmpdir, 'signalp_tmp', file)\n", (245269, 245298), False, 'import os\n'), ((245321, 245359), 're.sub', 're.sub', (['"""\\\\.fa$"""', '""".signalp.out"""', 'file'], {}), "('\\\\.fa$', '.signalp.out', file)\n", (245327, 245359), False, 'import re\n'), ((245604, 245639), 'os.path.join', 'os.path.join', (['tmpdir', '"""signalp_tmp"""'], {}), "(tmpdir, 'signalp_tmp')\n", (245616, 245639), False, 'import os\n'), ((252950, 252984), 'Bio.SeqIO.write', 'SeqIO.write', (['records', 'out', '"""fasta"""'], {}), "(records, out, 'fasta')\n", (252961, 252984), False, 'from Bio import SeqIO\n'), ((253622, 253643), 'os.path.abspath', 'os.path.abspath', (['gaps'], {}), '(gaps)\n', (253637, 253643), False, 'import os\n'), ((253807, 253844), 'subprocess.call', 'subprocess.call', (['cmd3'], {'stdout': 'outfile'}), '(cmd3, stdout=outfile)\n', (253822, 253844), False, 'import subprocess\n'), ((258693, 258718), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (258716, 258718), False, 'import multiprocessing\n'), ((258744, 258769), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (258767, 258769), False, 'import multiprocessing\n'), ((259020, 259038), 'os.listdir', 'os.listdir', (['tmpdir'], {}), '(tmpdir)\n', (259030, 259038), False, 'import os\n'), ((259451, 259469), 'os.listdir', 'os.listdir', (['tmpdir'], {}), '(tmpdir)\n', (259461, 259469), False, 'import os\n'), ((260748, 260791), 'os.path.join', 'os.path.join', (['outdir', '"""output"""', '"""gmhmm.mod"""'], {}), "(outdir, 'output', 'gmhmm.mod')\n", (260760, 260791), False, 'import os\n'), ((260811, 260844), 'os.path.join', 'os.path.join', (['tmpdir', '"""gmhmm.mod"""'], {}), "(tmpdir, 'gmhmm.mod')\n", (260823, 260844), False, 'import os\n'), ((261265, 261316), 'subprocess.call', 'subprocess.call', (['[GeneMark2GFF, gm_gtf]'], {'stdout': 'out'}), '([GeneMark2GFF, gm_gtf], stdout=out)\n', (261280, 261316), False, 'import subprocess\n'), ((262551, 262594), 'os.path.join', 'os.path.join', (['outdir', '"""output"""', '"""gmhmm.mod"""'], {}), "(outdir, 'output', 'gmhmm.mod')\n", (262563, 262594), False, 'import os\n'), ((262614, 262647), 'os.path.join', 'os.path.join', (['tmpdir', '"""gmhmm.mod"""'], {}), "(tmpdir, 'gmhmm.mod')\n", (262626, 262647), False, 'import os\n'), ((263068, 263119), 'subprocess.call', 'subprocess.call', (['[GeneMark2GFF, gm_gtf]'], {'stdout': 'out'}), '([GeneMark2GFF, gm_gtf], stdout=out)\n', (263083, 263119), False, 'import subprocess\n'), ((280122, 280143), 'os.path.abspath', 'os.path.abspath', (['gff3'], {}), '(gff3)\n', (280137, 280143), False, 'import os\n'), ((280145, 280167), 'os.path.abspath', 'os.path.abspath', (['fasta'], {}), '(fasta)\n', (280160, 280167), False, 'import os\n'), ((281447, 281471), 'os.path.abspath', 'os.path.abspath', (['origzff'], {}), '(origzff)\n', (281462, 281471), False, 'import os\n'), ((281473, 281503), 'os.path.abspath', 'os.path.abspath', (['trainingFasta'], {}), '(trainingFasta)\n', (281488, 281503), False, 'import os\n'), ((282037, 282061), 'os.path.abspath', 'os.path.abspath', (['snapHMM'], {}), '(snapHMM)\n', (282052, 282061), False, 'import os\n'), ((282063, 282085), 'os.path.abspath', 'os.path.abspath', (['fasta'], {}), '(fasta)\n', (282078, 282085), False, 'import os\n'), ((282848, 282875), 'distro.linux_distribution', 'distro.linux_distribution', ([], {}), '()\n', (282873, 282875), False, 'import distro\n'), ((283593, 283611), 'os.remove', 'os.remove', (['tRNAout'], {}), '(tRNAout)\n', (283602, 283611), False, 'import os\n'), ((288384, 288412), 'Bio.SeqIO.parse', 'SeqIO.parse', (['infile', '"""fasta"""'], {}), "(infile, 'fasta')\n", (288395, 288412), False, 'from Bio import SeqIO\n'), ((293704, 293724), 'os.path.abspath', 'os.path.abspath', (['gff'], {}), '(gff)\n', (293719, 293724), False, 'import os\n'), ((293759, 293783), 'os.path.abspath', 'os.path.abspath', (['repeats'], {}), '(repeats)\n', (293774, 293783), False, 'import os\n'), ((293949, 293985), 'subprocess.call', 'subprocess.call', (['cmd1'], {'stdout': 'bedout'}), '(cmd1, stdout=bedout)\n', (293964, 293985), False, 'import subprocess\n'), ((294129, 294165), 'subprocess.call', 'subprocess.call', (['cmd2'], {'stdout': 'gffout'}), '(cmd2, stdout=gffout)\n', (294144, 294165), False, 'import subprocess\n'), ((306547, 306576), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input', '"""genbank"""'], {}), "(input, 'genbank')\n", (306558, 306576), False, 'from Bio import SeqIO\n'), ((316855, 316892), 'os.path.join', 'os.path.join', (['outputdir', "(name + '.fa')"], {}), "(outputdir, name + '.fa')\n", (316867, 316892), False, 'import os\n'), ((322467, 322507), 'os.path.join', 'os.path.join', (['folder', '"""associations.txt"""'], {}), "(folder, 'associations.txt')\n", (322479, 322507), False, 'import os\n'), ((352477, 352489), 'numpy.array', 'np.array', (['df'], {}), '(df)\n', (352485, 352489), True, 'import numpy as np\n'), ((354435, 354459), 'os.path.basename', 'os.path.basename', (['target'], {}), '(target)\n', (354451, 354459), False, 'import os\n'), ((354821, 354844), 'os.path.basename', 'os.path.basename', (['query'], {}), '(query)\n', (354837, 354844), False, 'import os\n'), ((355207, 355231), 'os.path.basename', 'os.path.basename', (['target'], {}), '(target)\n', (355223, 355231), False, 'import os\n'), ((355595, 355618), 'os.path.basename', 'os.path.basename', (['query'], {}), '(query)\n', (355611, 355618), False, 'import os\n'), ((359907, 359926), 'shutil.rmtree', 'shutil.rmtree', (['dest'], {}), '(dest)\n', (359920, 359926), False, 'import shutil\n'), ((360784, 360811), 'os.path.abspath', 'os.path.abspath', (['foldername'], {}), '(foldername)\n', (360799, 360811), False, 'import os\n'), ((360835, 360863), 'os.path.join', 'os.path.join', (['Database', 'name'], {}), '(Database, name)\n', (360847, 360863), False, 'import os\n'), ((361021, 361049), 'funannotate.resources.busco_links.keys', 'resources.busco_links.keys', ([], {}), '()\n', (361047, 361049), True, 'import funannotate.resources as resources\n'), ((361831, 361855), 'Bio.SeqIO.parse', 'SeqIO.parse', (['sp', '"""fasta"""'], {}), "(sp, 'fasta')\n", (361842, 361855), False, 'from Bio import SeqIO\n'), ((362779, 362828), 'os.path.join', 'os.path.join', (['tmpdir', '"""phylogeny.buscos.used.txt"""'], {}), "(tmpdir, 'phylogeny.buscos.used.txt')\n", (362791, 362828), False, 'import os\n'), ((364384, 364415), 'Bio.Phylo.parse', 'Phylo.parse', (['treefile', '"""newick"""'], {}), "(treefile, 'newick')\n", (364395, 364415), False, 'from Bio import Phylo\n'), ((364443, 364485), 'os.path.join', 'os.path.join', (['tmpdir', '"""RAxML_bestTree.nwk"""'], {}), "(tmpdir, 'RAxML_bestTree.nwk')\n", (364455, 364485), False, 'import os\n'), ((364641, 364681), 'os.path.join', 'os.path.join', (['tmpdir', '"""ML.phylogeny.pdf"""'], {}), "(tmpdir, 'ML.phylogeny.pdf')\n", (364653, 364681), False, 'import os\n'), ((365275, 365315), 'os.path.join', 'os.path.join', (['tmpdir', '"""ML.phylogeny.pdf"""'], {}), "(tmpdir, 'ML.phylogeny.pdf')\n", (365287, 365315), False, 'import os\n'), ((374299, 374356), 'os.path.join', 'os.path.join', (['outdir', '"""predict_misc"""', "(TrainSet + '.train')"], {}), "(outdir, 'predict_misc', TrainSet + '.train')\n", (374311, 374356), False, 'import os\n'), ((375471, 375500), 'sys.stderr.write', 'sys.stderr.write', (['train_table'], {}), '(train_table)\n', (375487, 375500), False, 'import sys\n'), ((377642, 377653), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (377650, 377653), False, 'import sys\n'), ((377714, 377738), 'operator.itemgetter', 'operator.itemgetter', (['col'], {}), '(col)\n', (377733, 377738), False, 'import operator\n'), ((378851, 378875), 'Bio.SeqIO.FastaIO.SimpleFastaParser', 'SimpleFastaParser', (['fasta'], {}), '(fasta)\n', (378868, 378875), False, 'from Bio.SeqIO.FastaIO import SimpleFastaParser\n'), ((380267, 380293), 'os.path.join', 'os.path.join', (['directory', 'f'], {}), '(directory, f)\n', (380279, 380293), False, 'import os\n'), ((381472, 381499), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input', '"""fasta"""'], {}), "(input, 'fasta')\n", (381483, 381499), False, 'from Bio import SeqIO\n'), ((385055, 385072), 'os.listdir', 'os.listdir', (['a_dir'], {}), '(a_dir)\n', (385065, 385072), False, 'import os\n'), ((385186, 385203), 'os.listdir', 'os.listdir', (['a_dir'], {}), '(a_dir)\n', (385196, 385203), False, 'import os\n'), ((9163, 9239), 'textwrap.TextWrapper', 'textwrap.TextWrapper', ([], {'subsequent_indent': 'subsequent_indent', 'width': 'fixed_width'}), '(subsequent_indent=subsequent_indent, width=fixed_width)\n', (9183, 9239), False, 'import textwrap\n'), ((10791, 10823), 're.sub', 're.sub', (['"""\x1b\\\\[4m"""', '""""""', 'row_str'], {}), "('\\x1b\\\\[4m', '', row_str)\n", (10797, 10823), False, 'import re\n'), ((11871, 11907), 'subprocess.call', 'subprocess.call', (['cmd'], {'stdout': 'outfile'}), '(cmd, stdout=outfile)\n', (11886, 11907), False, 'import subprocess\n'), ((12271, 12307), 'subprocess.call', 'subprocess.call', (['cmd'], {'stdout': 'outfile'}), '(cmd, stdout=outfile)\n', (12286, 12307), False, 'import subprocess\n'), ((14014, 14117), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'bufsize': 'buff', 'universal_newlines': '(True)', 'stdin': 'subprocess.PIPE'}), '(command, shell=True, bufsize=buff, universal_newlines=True,\n stdin=subprocess.PIPE)\n', (14030, 14117), False, 'import subprocess\n'), ((26721, 26737), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (26735, 26737), False, 'import hashlib\n'), ((28302, 28330), 'os.path.basename', 'os.path.basename', (['source_dir'], {}), '(source_dir)\n', (28318, 28330), False, 'import os\n'), ((42980, 43007), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input', '"""fasta"""'], {}), "(input, 'fasta')\n", (42991, 43007), False, 'from Bio import SeqIO\n'), ((49539, 49550), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (49547, 49550), False, 'import sys\n'), ((56494, 56517), 'os.path.realpath', 'os.path.realpath', (['input'], {}), '(input)\n', (56510, 56517), False, 'import os\n'), ((59160, 59189), 'itertools.izip', 'zip', (['exons[0::2]', 'exons[1::2]'], {}), '(exons[0::2], exons[1::2])\n', (59163, 59189), True, 'from itertools import izip as zip\n'), ((59218, 59247), 'itertools.izip', 'zip', (['query[0::2]', 'query[1::2]'], {}), '(query[0::2], query[1::2])\n', (59221, 59247), True, 'from itertools import izip as zip\n'), ((85355, 85377), 'numpy.subtract', 'numpy.subtract', (['x', 'loc'], {}), '(x, loc)\n', (85369, 85377), False, 'import numpy\n'), ((144690, 144701), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (144698, 144701), False, 'import sys\n'), ((233598, 233655), 'os.path.join', 'os.path.join', (['tmpdir', '"""run_busco"""', '"""full_table_busco.tsv"""'], {}), "(tmpdir, 'run_busco', 'full_table_busco.tsv')\n", (233610, 233655), False, 'import os\n'), ((245711, 245752), 'os.path.join', 'os.path.join', (['tmpdir', '"""signalp_tmp"""', 'file'], {}), "(tmpdir, 'signalp_tmp', file)\n", (245723, 245752), False, 'import os\n'), ((252642, 252669), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input', '"""fasta"""'], {}), "(input, 'fasta')\n", (252653, 252669), False, 'from Bio import SeqIO\n'), ((258070, 258082), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (258080, 258082), False, 'import uuid\n'), ((258583, 258618), 'os.path.join', 'os.path.join', (['tmpdir', "(ID + '.fasta')"], {}), "(tmpdir, ID + '.fasta')\n", (258595, 258618), False, 'import os\n'), ((260623, 260643), 'os.path.abspath', 'os.path.abspath', (['ini'], {}), '(ini)\n', (260638, 260643), False, 'import os\n'), ((262426, 262446), 'os.path.abspath', 'os.path.abspath', (['ini'], {}), '(ini)\n', (262441, 262446), False, 'import os\n'), ((281057, 281082), 'Bio.SeqIO.FastaIO.SimpleFastaParser', 'SimpleFastaParser', (['infile'], {}), '(infile)\n', (281074, 281082), False, 'from Bio.SeqIO.FastaIO import SimpleFastaParser\n'), ((282768, 282786), 'platform.mac_ver', 'platform.mac_ver', ([], {}), '()\n', (282784, 282786), False, 'import platform\n'), ((283241, 283268), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (283266, 283268), False, 'import multiprocessing\n'), ((286757, 286784), 'Bio.SeqIO.parse', 'SeqIO.parse', (['gbk', '"""genbank"""'], {}), "(gbk, 'genbank')\n", (286768, 286784), False, 'from Bio import SeqIO\n'), ((288713, 288747), 'Bio.SeqIO.write', 'SeqIO.write', (['rec', 'outfile', '"""fasta"""'], {}), "(rec, outfile, 'fasta')\n", (288724, 288747), False, 'from Bio import SeqIO\n'), ((315316, 315363), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\tproduct\\t%s\\n' % (ID, v))"], {}), "('%s\\tproduct\\t%s\\n' % (ID, v))\n", (315332, 315363), False, 'import sys\n'), ((316957, 316993), 'Bio.SeqIO.write', 'SeqIO.write', (['record', 'output', '"""fasta"""'], {}), "(record, output, 'fasta')\n", (316968, 316993), False, 'from Bio import SeqIO\n'), ((322542, 322579), 'os.path.join', 'os.path.join', (['folder', "(genome + '.txt')"], {}), "(folder, genome + '.txt')\n", (322554, 322579), False, 'import os\n'), ((322665, 322692), 'Bio.SeqIO.parse', 'SeqIO.parse', (['gbk', '"""genbank"""'], {}), "(gbk, 'genbank')\n", (322676, 322692), False, 'from Bio import SeqIO\n'), ((354242, 354281), 'os.path.join', 'os.path.join', (['protortho', "(base + '.dmnd')"], {}), "(protortho, base + '.dmnd')\n", (354254, 354281), False, 'import os\n'), ((354725, 354757), 'os.path.join', 'os.path.join', (['protortho', 'outname'], {}), '(protortho, outname)\n', (354737, 354757), False, 'import os\n'), ((355111, 355143), 'os.path.join', 'os.path.join', (['protortho', 'outname'], {}), '(protortho, outname)\n', (355123, 355143), False, 'import os\n'), ((355499, 355531), 'os.path.join', 'os.path.join', (['protortho', 'outname'], {}), '(protortho, outname)\n', (355511, 355531), False, 'import os\n'), ((355883, 355915), 'os.path.join', 'os.path.join', (['protortho', 'outname'], {}), '(protortho, outname)\n', (355895, 355915), False, 'import os\n'), ((362867, 362910), 'os.path.join', 'os.path.join', (['tmpdir', '"""phylogeny.concat.fa"""'], {}), "(tmpdir, 'phylogeny.concat.fa')\n", (362879, 362910), False, 'import os\n'), ((373936, 373972), 'os.path.join', 'os.path.join', (['outdir', '"""predict_misc"""'], {}), "(outdir, 'predict_misc')\n", (373948, 373972), False, 'import os\n'), ((374235, 374271), 'os.path.join', 'os.path.join', (['outdir', '"""predict_misc"""'], {}), "(outdir, 'predict_misc')\n", (374247, 374271), False, 'import os\n'), ((374729, 374798), 'os.path.join', 'os.path.join', (['outdir', '"""predict_misc"""', '"""augustus.initial.training.txt"""'], {}), "(outdir, 'predict_misc', 'augustus.initial.training.txt')\n", (374741, 374798), False, 'import os\n'), ((377234, 377263), 'sys.stderr.write', 'sys.stderr.write', (['train_table'], {}), '(train_table)\n', (377250, 377263), False, 'import sys\n'), ((377318, 377344), 'shutil.rmtree', 'shutil.rmtree', (['trainingdir'], {}), '(trainingdir)\n', (377331, 377344), False, 'import shutil\n'), ((379702, 379754), 'os.path.join', 'os.path.join', (['parentdir', '"""aux_scripts"""', '"""pal2nal.pl"""'], {}), "(parentdir, 'aux_scripts', 'pal2nal.pl')\n", (379714, 379754), False, 'import os\n'), ((380457, 380480), 'os.path.basename', 'os.path.basename', (['fasta'], {}), '(fasta)\n', (380473, 380480), False, 'import os\n'), ((382713, 382735), 'os.path.abspath', 'os.path.abspath', (['codon'], {}), '(codon)\n', (382728, 382735), False, 'import os\n'), ((382743, 382764), 'os.path.abspath', 'os.path.abspath', (['tree'], {}), '(tree)\n', (382758, 382764), False, 'import os\n'), ((383004, 383071), 'subprocess.call', 'subprocess.call', (['etecmd'], {'cwd': 'tmpdir', 'stdout': 'logfile', 'stderr': 'logfile'}), '(etecmd, cwd=tmpdir, stdout=logfile, stderr=logfile)\n', (383019, 383071), False, 'import subprocess\n'), ((383215, 383241), 'os.path.join', 'os.path.join', (['tmpdir', 'file'], {}), '(tmpdir, file)\n', (383227, 383241), False, 'import os\n'), ((383265, 383297), 'os.path.join', 'os.path.join', (['tmpdir', 'name', 'file'], {}), '(tmpdir, name, file)\n', (383277, 383297), False, 'import os\n'), ((384417, 384439), 'os.path.abspath', 'os.path.abspath', (['codon'], {}), '(codon)\n', (384432, 384439), False, 'import os\n'), ((384447, 384468), 'os.path.abspath', 'os.path.abspath', (['tree'], {}), '(tree)\n', (384462, 384468), False, 'import os\n'), ((384684, 384751), 'subprocess.call', 'subprocess.call', (['etecmd'], {'cwd': 'tmpdir', 'stdout': 'logfile', 'stderr': 'logfile'}), '(etecmd, cwd=tmpdir, stdout=logfile, stderr=logfile)\n', (384699, 384751), False, 'import subprocess\n'), ((384895, 384921), 'os.path.join', 'os.path.join', (['tmpdir', 'file'], {}), '(tmpdir, file)\n', (384907, 384921), False, 'import os\n'), ((384945, 384977), 'os.path.join', 'os.path.join', (['tmpdir', 'name', 'file'], {}), '(tmpdir, name, file)\n', (384957, 384977), False, 'import os\n'), ((385102, 385127), 'os.path.join', 'os.path.join', (['a_dir', 'name'], {}), '(a_dir, name)\n', (385114, 385127), False, 'import os\n'), ((385233, 385258), 'os.path.join', 'os.path.join', (['a_dir', 'name'], {}), '(a_dir, name)\n', (385245, 385258), False, 'import os\n'), ((48357, 48384), 'Bio.SeqIO.parse', 'SeqIO.parse', (['gbk', '"""genbank"""'], {}), "(gbk, 'genbank')\n", (48368, 48384), False, 'from Bio import SeqIO\n'), ((60382, 60405), 'os.path.realpath', 'os.path.realpath', (['input'], {}), '(input)\n', (60398, 60405), False, 'import os\n'), ((63352, 63381), 'itertools.izip', 'zip', (['exons[0::2]', 'exons[1::2]'], {}), '(exons[0::2], exons[1::2])\n', (63355, 63381), True, 'from itertools import izip as zip\n'), ((63414, 63443), 'itertools.izip', 'zip', (['query[0::2]', 'query[1::2]'], {}), '(query[0::2], query[1::2])\n', (63417, 63443), True, 'from itertools import izip as zip\n'), ((85944, 85969), 'numpy.subtract', 'numpy.subtract', (['x', 'hit[0]'], {}), '(x, hit[0])\n', (85958, 85969), False, 'import numpy\n'), ((91131, 91156), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (91154, 91156), False, 'import datetime\n'), ((171453, 171484), 'Bio.SeqIO.FastaIO.SimpleFastaParser', 'SimpleFastaParser', (['fasta_infile'], {}), '(fasta_infile)\n', (171470, 171484), False, 'from Bio.SeqIO.FastaIO import SimpleFastaParser\n'), ((178335, 178408), 'sys.stderr.write', 'sys.stderr.write', (['"""Error, can\'t find ID or Parent. Malformed GFF file.\n"""'], {}), '("Error, can\'t find ID or Parent. Malformed GFF file.\\n")\n', (178351, 178408), False, 'import sys\n'), ((178429, 178451), 'sys.stderr.write', 'sys.stderr.write', (['line'], {}), '(line)\n', (178445, 178451), False, 'import sys\n'), ((178472, 178483), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (178480, 178483), False, 'import sys\n'), ((213835, 213846), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (213843, 213846), False, 'import sys\n'), ((258439, 258474), 'os.path.join', 'os.path.join', (['tmpdir', "(ID + '.fasta')"], {}), "(tmpdir, ID + '.fasta')\n", (258451, 258474), False, 'import os\n'), ((260967, 261010), 'os.path.join', 'os.path.join', (['outdir', '"""output"""', '"""gmhmm.mod"""'], {}), "(outdir, 'output', 'gmhmm.mod')\n", (260979, 261010), False, 'import os\n'), ((262770, 262813), 'os.path.join', 'os.path.join', (['outdir', '"""output"""', '"""gmhmm.mod"""'], {}), "(outdir, 'output', 'gmhmm.mod')\n", (262782, 262813), False, 'import os\n'), ((280983, 281005), 'os.path.abspath', 'os.path.abspath', (['fasta'], {}), '(fasta)\n', (280998, 281005), False, 'import os\n'), ((291149, 291187), 'os.path.join', 'os.path.join', (['tmpdir', '"""bad_models.gff"""'], {}), "(tmpdir, 'bad_models.gff')\n", (291161, 291187), False, 'import os\n'), ((293254, 293284), 're.sub', 're.sub', (['""";Name=.*$"""', '""";"""', 'line'], {}), "(';Name=.*$', ';', line)\n", (293260, 293284), False, 'import re\n'), ((296336, 296374), 'os.path.join', 'os.path.join', (['tmpdir', '"""bad_models.gff"""'], {}), "(tmpdir, 'bad_models.gff')\n", (296348, 296374), False, 'import os\n'), ((298000, 298030), 're.sub', 're.sub', (['""";Name=.*$"""', '""";"""', 'line'], {}), "(';Name=.*$', ';', line)\n", (298006, 298030), False, 'import re\n'), ((363282, 363323), 'os.path.join', 'os.path.join', (['folder', "(species[i] + '.faa')"], {}), "(folder, species[i] + '.faa')\n", (363294, 363323), False, 'import os\n'), ((374379, 374448), 'os.path.join', 'os.path.join', (['outdir', '"""predict_misc"""', '"""augustus.initial.training.txt"""'], {}), "(outdir, 'predict_misc', 'augustus.initial.training.txt')\n", (374391, 374448), False, 'import os\n'), ((376444, 376511), 'os.path.join', 'os.path.join', (['outdir', '"""predict_misc"""', '"""augustus.final.training.txt"""'], {}), "(outdir, 'predict_misc', 'augustus.final.training.txt')\n", (376456, 376511), False, 'import os\n'), ((13853, 13898), 'signal.signal', 'signal.signal', (['signal.SIGPIPE', 'signal.SIG_DFL'], {}), '(signal.SIGPIPE, signal.SIG_DFL)\n', (13866, 13898), False, 'import signal\n'), ((28987, 29073), 'subprocess.Popen', 'subprocess.Popen', (['[name]'], {'stdout': 'devnull', 'stderr': 'devnull', 'universal_newlines': '(True)'}), '([name], stdout=devnull, stderr=devnull, universal_newlines\n =True)\n', (29003, 29073), False, 'import subprocess\n'), ((77604, 77615), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (77612, 77615), False, 'import sys\n'), ((234389, 234420), 'os.path.join', 'os.path.join', (['hmmerfolder', 'file'], {}), '(hmmerfolder, file)\n', (234401, 234420), False, 'import os\n'), ((259105, 259131), 'os.path.join', 'os.path.join', (['tmpdir', 'file'], {}), '(tmpdir, file)\n', (259117, 259131), False, 'import os\n'), ((259537, 259563), 'os.path.join', 'os.path.join', (['tmpdir', 'file'], {}), '(tmpdir, file)\n', (259549, 259563), False, 'import os\n'), ((286904, 286937), 're.sub', 're.sub', (['"""[^0-9]"""', '""""""', 'record.name'], {}), "('[^0-9]', '', record.name)\n", (286910, 286937), False, 'import re\n'), ((374647, 374683), 'os.path.join', 'os.path.join', (['outdir', '"""predict_misc"""'], {}), "(outdir, 'predict_misc')\n", (374659, 374683), False, 'import os\n'), ((375737, 375773), 'os.path.join', 'os.path.join', (['outdir', '"""predict_misc"""'], {}), "(outdir, 'predict_misc')\n", (375749, 375773), False, 'import os\n'), ((375921, 375957), 'os.path.join', 'os.path.join', (['outdir', '"""predict_misc"""'], {}), "(outdir, 'predict_misc')\n", (375933, 375957), False, 'import os\n'), ((376088, 376155), 'os.path.join', 'os.path.join', (['outdir', '"""predict_misc"""', '"""augustus.final.training.txt"""'], {}), "(outdir, 'predict_misc', 'augustus.final.training.txt')\n", (376100, 376155), False, 'import os\n'), ((29192, 29283), 'subprocess.Popen', 'subprocess.Popen', (["[name, '-V']"], {'stdout': 'devnull', 'stderr': 'devnull', 'universal_newlines': '(True)'}), "([name, '-V'], stdout=devnull, stderr=devnull,\n universal_newlines=True)\n", (29208, 29283), False, 'import subprocess\n'), ((291692, 291722), 're.sub', 're.sub', (['""";Name=.*$"""', '""";"""', 'line'], {}), "(';Name=.*$', ';', line)\n", (291698, 291722), False, 'import re\n'), ((376358, 376394), 'os.path.join', 'os.path.join', (['outdir', '"""predict_misc"""'], {}), "(outdir, 'predict_misc')\n", (376370, 376394), False, 'import os\n'), ((29394, 29496), 'subprocess.Popen', 'subprocess.Popen', (["[name, '-version-full']"], {'stdout': 'devnull', 'stderr': 'devnull', 'universal_newlines': '(True)'}), "([name, '-version-full'], stdout=devnull, stderr=devnull,\n universal_newlines=True)\n", (29410, 29496), False, 'import subprocess\n'), ((234850, 234919), 'os.path.join', 'os.path.join', (['base_folder', '"""augustus_output"""', '"""predicted_genes"""', 'file'], {}), "(base_folder, 'augustus_output', 'predicted_genes', file)\n", (234862, 234919), False, 'import os\n'), ((29592, 29687), 'subprocess.Popen', 'subprocess.Popen', (["[name, '--help']"], {'stdout': 'devnull', 'stderr': 'devnull', 'universal_newlines': '(True)'}), "([name, '--help'], stdout=devnull, stderr=devnull,\n universal_newlines=True)\n", (29608, 29687), False, 'import subprocess\n'), ((88832, 88843), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (88840, 88843), False, 'import sys\n'), ((237852, 237874), 'os.path.dirname', 'os.path.dirname', (['input'], {}), '(input)\n', (237867, 237874), False, 'import os\n'), ((297470, 297500), 're.sub', 're.sub', (['""";Name=.*$"""', '""";"""', 'line'], {}), "(';Name=.*$', ';', line)\n", (297476, 297500), False, 'import re\n'), ((29793, 29890), 'subprocess.Popen', 'subprocess.Popen', (["[name, '-version']"], {'stdout': 'devnull', 'stderr': 'devnull', 'universal_newlines': '(True)'}), "([name, '-version'], stdout=devnull, stderr=devnull,\n universal_newlines=True)\n", (29809, 29890), False, 'import subprocess\n'), ((238277, 238299), 'os.path.dirname', 'os.path.dirname', (['input'], {}), '(input)\n', (238292, 238299), False, 'import os\n'), ((29983, 30079), 'subprocess.Popen', 'subprocess.Popen', (["[name, 'version']"], {'stdout': 'devnull', 'stderr': 'devnull', 'universal_newlines': '(True)'}), "([name, 'version'], stdout=devnull, stderr=devnull,\n universal_newlines=True)\n", (29999, 30079), False, 'import subprocess\n'), ((30178, 30269), 'subprocess.Popen', 'subprocess.Popen', (["[name, '-h']"], {'stdout': 'devnull', 'stderr': 'devnull', 'universal_newlines': '(True)'}), "([name, '-h'], stdout=devnull, stderr=devnull,\n universal_newlines=True)\n", (30194, 30269), False, 'import subprocess\n'), ((30359, 30457), 'subprocess.Popen', 'subprocess.Popen', (["[name, '--version']"], {'stdout': 'devnull', 'stderr': 'devnull', 'universal_newlines': '(True)'}), "([name, '--version'], stdout=devnull, stderr=devnull,\n universal_newlines=True)\n", (30375, 30457), False, 'import subprocess\n'), ((334000, 334021), 'funannotate.resources.COGS.get', 'resources.COGS.get', (['x'], {}), '(x)\n', (334018, 334021), True, 'import funannotate.resources as resources\n'), ((342204, 342225), 'funannotate.resources.COGS.get', 'resources.COGS.get', (['x'], {}), '(x)\n', (342222, 342225), True, 'import funannotate.resources as resources\n')] |
import time
import cv2
import numpy as np
from PIL import ImageGrab
import win32gui
import win32con
import win32api
import win32com, win32com.client
import re
from playsound import playsound
import jstyleson as json
from os import path
import sys
from collections import deque
import builtins as __builtin__
from datetime import datetime
def print(*args, **kwargs):
timestamp = datetime.now()
if 'timestamp' in kwargs:
if not kwargs['timestamp']:
timestamp = ""
del kwargs['timestamp']
return __builtin__.print(timestamp, *args, " " * 20, **kwargs)
import pytesseract
from pytesseract import TesseractNotFoundError
q = deque(maxlen=100)
root = None
if hasattr(sys, '_MEIPASS'):
root = sys._MEIPASS
base = getattr(sys.modules['__main__'], "__file__", sys.executable) if hasattr(sys, 'frozen') else __file__
root = path.dirname(path.realpath(path.abspath(base)))
path_to_audio = path.abspath(path.join(root, 'assets', 'audio.wav'))
with open("config.json", "r") as file:
cfg = json.load(file)
def capture_sub_window_percentage(hwnd, x_left, x_right, y_top, y_bottom):
bbox = win32gui.GetWindowRect(hwnd)
game_width = bbox[2] - bbox[0]
game_height = bbox[3] - bbox[1]
left_gutter = int(game_width * x_left)
right_gutter = int(game_width * x_right)
top_gutter = int(game_height * y_top)
bottom_gutter = int(game_height * y_bottom)
# win32gui.SetForegroundWindow(hwnd)
new_box = (
bbox[0] + left_gutter,
bbox[1] + top_gutter,
bbox[2] - right_gutter,
bbox[3] - bottom_gutter
)
img = ImageGrab.grab(new_box)
return img
def convert_image_to_text(img):
im = np.asarray(img)
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
text = pytesseract.image_to_string(im).lower()
return text
def auto_accept_invite(hwnd):
img = capture_sub_window_percentage(
hwnd,
cfg["auto_accept"]["bounding_box"]["left"],
cfg["auto_accept"]["bounding_box"]["right"],
cfg["auto_accept"]["bounding_box"]["top"],
cfg["auto_accept"]["bounding_box"]["bottom"])
text = convert_image_to_text(img)
matches = re.finditer(r"(.+) wants to invite you", text, re.IGNORECASE | re.MULTILINE)
for matchNum, match in enumerate(matches, start=1):
person = match.groups()
person = person[0]
print("Invite from `{}`".format(person))
allowed = [x.lower() for x in cfg["auto_accept"]["allowed_names"]]
if len(allowed) == 0 or person.lower() in allowed:
print("\tInvite allowed!".format(person))
try:
# Tarkov doesn't accept inputs unless it's in the foreground
shell = win32com.client.Dispatch("WScript.Shell")
shell.SendKeys('%')
# https://stackoverflow.com/a/46092
win32gui.ShowWindow(hwnd, 9)
win32gui.SetForegroundWindow(hwnd)
win32api.SendMessage(hwnd, win32con.WM_KEYDOWN, 0x59, 0)
time.sleep(0.5)
win32api.SendMessage(hwnd, win32con.WM_KEYUP, 0x59, 0)
except Exception as ex:
print("Failed to bring window to foreground, err: {}".format(ex))
def deployment_warning(hwnd):
img = capture_sub_window_percentage(
hwnd,
cfg["deploy_warning"]["bounding_box"]["left"],
cfg["deploy_warning"]["bounding_box"]["right"],
cfg["deploy_warning"]["bounding_box"]["top"],
cfg["deploy_warning"]["bounding_box"]["bottom"])
text = convert_image_to_text(img)
after_grab = time.time()
if "get ready" in text or "deploying in" in text:
print("Found deployment text")
text = text[text.find('deploying in:'):]
text = "".join(text.split())
matches = re.finditer(r"(\d+):(\d+).(\d+)", text)
for matchNum, match in enumerate(matches, start=1):
min, sec, frac = match.groups()
print("\t{}:{}.{}".format(min, sec, frac))
wholeSec = float('{}.{}'.format(sec, frac))
elapsed = time.time() - after_grab
print('\tTook: {0}'.format(elapsed))
newSec = wholeSec - elapsed
print('\tCurrent spot {}'.format(newSec))
sleep = newSec - int(newSec)
print('\tSleep {}'.format(sleep))
time.sleep(sleep)
for x in reversed(range(int(newSec))):
print("\t\t{}".format(x))
if cfg["deploy_warning"]["staggered"] and x >= 5 and x % 2 == 1:
time.sleep(1)
else:
playsound(path_to_audio)
break
time.sleep(5)
loading_symbols = ['|', '/', '-', '\\']
def runningMeanFast(x, N):
return np.convolve(x, np.ones((N,))/N)[(N-1):]
try:
i = 0
while True:
toplist, winlist = [], []
def enum_cb(hwnd, results):
winlist.append((hwnd, win32gui.GetWindowText(hwnd)))
win32gui.EnumWindows(enum_cb, toplist)
tarkov = [(hwnd, title) for hwnd, title in winlist if cfg["window_name"].lower() in title.lower()]
if len(tarkov) == 0:
print("Cannot find {} window.".format(cfg["window_name"]))
time.sleep(5)
continue
tarkov = tarkov[0]
hwnd = tarkov[0]
limiter = 0.5 # min seconds to wait between loops
while True:
start_time = time.time()
# auto-accept invites
if cfg["auto_accept"]["enabled"]:
auto_accept_invite(hwnd)
# deployment warning
if cfg["deploy_warning"]["enabled"]:
deployment_warning(hwnd)
# One screenshot per second
elapsed = time.time() - start_time
q.append(elapsed)
print('Waiting {0} Avg loop time: {1:.3f}s'.format(loading_symbols[i % 4], sum(q) / len(q)), end="\r", timestamp=False)
i += 1
remaining = limiter - elapsed
if remaining > 0:
time.sleep(remaining)
except TesseractNotFoundError:
print(
"tesseract is not installed or it's not in your PATH. Please find the Windows "
"binaries for download here: https://github.com/UB-Mannheim/tesseract/wiki")
input()
sys.exit(1) | [
"win32gui.GetWindowRect",
"PIL.ImageGrab.grab",
"playsound.playsound",
"time.sleep",
"sys.exit",
"win32api.SendMessage",
"win32gui.SetForegroundWindow",
"collections.deque",
"numpy.asarray",
"win32gui.ShowWindow",
"re.finditer",
"win32com.client.Dispatch",
"numpy.ones",
"win32gui.GetWindow... | [((663, 680), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (668, 680), False, 'from collections import deque\n'), ((384, 398), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (396, 398), False, 'from datetime import datetime\n'), ((535, 590), 'builtins.print', '__builtin__.print', (['timestamp', '*args', "(' ' * 20)"], {}), "(timestamp, *args, ' ' * 20, **kwargs)\n", (552, 590), True, 'import builtins as __builtin__\n'), ((940, 978), 'os.path.join', 'path.join', (['root', '"""assets"""', '"""audio.wav"""'], {}), "(root, 'assets', 'audio.wav')\n", (949, 978), False, 'from os import path\n'), ((1030, 1045), 'jstyleson.load', 'json.load', (['file'], {}), '(file)\n', (1039, 1045), True, 'import jstyleson as json\n'), ((1133, 1161), 'win32gui.GetWindowRect', 'win32gui.GetWindowRect', (['hwnd'], {}), '(hwnd)\n', (1155, 1161), False, 'import win32gui\n'), ((1613, 1636), 'PIL.ImageGrab.grab', 'ImageGrab.grab', (['new_box'], {}), '(new_box)\n', (1627, 1636), False, 'from PIL import ImageGrab\n'), ((1694, 1709), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (1704, 1709), True, 'import numpy as np\n'), ((1719, 1755), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2GRAY'], {}), '(im, cv2.COLOR_BGR2GRAY)\n', (1731, 1755), False, 'import cv2\n'), ((2171, 2246), 're.finditer', 're.finditer', (['"""(.+) wants to invite you"""', 'text', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(.+) wants to invite you', text, re.IGNORECASE | re.MULTILINE)\n", (2182, 2246), False, 'import re\n'), ((3602, 3613), 'time.time', 'time.time', ([], {}), '()\n', (3611, 3613), False, 'import time\n'), ((889, 907), 'os.path.abspath', 'path.abspath', (['base'], {}), '(base)\n', (901, 907), False, 'from os import path\n'), ((3811, 3852), 're.finditer', 're.finditer', (['"""(\\\\d+):(\\\\d+).(\\\\d+)"""', 'text'], {}), "('(\\\\d+):(\\\\d+).(\\\\d+)', text)\n", (3822, 3852), False, 'import re\n'), ((4677, 4690), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4687, 4690), False, 'import time\n'), ((4988, 5026), 'win32gui.EnumWindows', 'win32gui.EnumWindows', (['enum_cb', 'toplist'], {}), '(enum_cb, toplist)\n', (5008, 5026), False, 'import win32gui\n'), ((6306, 6317), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6314, 6317), False, 'import sys\n'), ((1767, 1798), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['im'], {}), '(im)\n', (1794, 1798), False, 'import pytesseract\n'), ((4356, 4373), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (4366, 4373), False, 'import time\n'), ((5246, 5259), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (5256, 5259), False, 'import time\n'), ((5438, 5449), 'time.time', 'time.time', ([], {}), '()\n', (5447, 5449), False, 'import time\n'), ((2719, 2760), 'win32com.client.Dispatch', 'win32com.client.Dispatch', (['"""WScript.Shell"""'], {}), "('WScript.Shell')\n", (2743, 2760), False, 'import win32com, win32com.client\n'), ((2865, 2893), 'win32gui.ShowWindow', 'win32gui.ShowWindow', (['hwnd', '(9)'], {}), '(hwnd, 9)\n', (2884, 2893), False, 'import win32gui\n'), ((2910, 2944), 'win32gui.SetForegroundWindow', 'win32gui.SetForegroundWindow', (['hwnd'], {}), '(hwnd)\n', (2938, 2944), False, 'import win32gui\n'), ((2961, 3015), 'win32api.SendMessage', 'win32api.SendMessage', (['hwnd', 'win32con.WM_KEYDOWN', '(89)', '(0)'], {}), '(hwnd, win32con.WM_KEYDOWN, 89, 0)\n', (2981, 3015), False, 'import win32api\n'), ((3034, 3049), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3044, 3049), False, 'import time\n'), ((3066, 3118), 'win32api.SendMessage', 'win32api.SendMessage', (['hwnd', 'win32con.WM_KEYUP', '(89)', '(0)'], {}), '(hwnd, win32con.WM_KEYUP, 89, 0)\n', (3086, 3118), False, 'import win32api\n'), ((4089, 4100), 'time.time', 'time.time', ([], {}), '()\n', (4098, 4100), False, 'import time\n'), ((4786, 4799), 'numpy.ones', 'np.ones', (['(N,)'], {}), '((N,))\n', (4793, 4799), True, 'import numpy as np\n'), ((5758, 5769), 'time.time', 'time.time', ([], {}), '()\n', (5767, 5769), False, 'import time\n'), ((6052, 6073), 'time.sleep', 'time.sleep', (['remaining'], {}), '(remaining)\n', (6062, 6073), False, 'import time\n'), ((4569, 4582), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4579, 4582), False, 'import time\n'), ((4625, 4649), 'playsound.playsound', 'playsound', (['path_to_audio'], {}), '(path_to_audio)\n', (4634, 4649), False, 'from playsound import playsound\n'), ((4948, 4976), 'win32gui.GetWindowText', 'win32gui.GetWindowText', (['hwnd'], {}), '(hwnd)\n', (4970, 4976), False, 'import win32gui\n')] |
import numpy as np
def R2_nom_denom(y, yhat):
""" Calculates the nominator and denomitor for calculating R-squared
Args:
y (array): data
yhat (array): predicted data data
Returns:
nominator (float or array), denominator (float or array)
"""
y, yhat = np.array(y), np.array(yhat)
with np.errstate(divide="ignore", invalid="ignore"):
nom = np.sum((y - yhat) ** 2, axis=0)
denom = np.sum(y ** 2, axis=0) # Kendricks denominator
return nom, denom
| [
"numpy.sum",
"numpy.array",
"numpy.errstate"
] | [((299, 310), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (307, 310), True, 'import numpy as np\n'), ((312, 326), 'numpy.array', 'np.array', (['yhat'], {}), '(yhat)\n', (320, 326), True, 'import numpy as np\n'), ((336, 382), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (347, 382), True, 'import numpy as np\n'), ((398, 429), 'numpy.sum', 'np.sum', (['((y - yhat) ** 2)'], {'axis': '(0)'}), '((y - yhat) ** 2, axis=0)\n', (404, 429), True, 'import numpy as np\n'), ((446, 468), 'numpy.sum', 'np.sum', (['(y ** 2)'], {'axis': '(0)'}), '(y ** 2, axis=0)\n', (452, 468), True, 'import numpy as np\n')] |
import sys
import cv2
import numpy as np
from line_boundary_check import *
# ----------------------------------------------------------------------------
g_mouse_pos = [0 ,0]
# Mouse event handler
def onMouse(event, x, y, flags, param):
global g_mouse_pos
g_mouse_pos = [x, y]
# ----------------------------------------------------------------------------
class boundaryLine:
def __init__(self, line=(0,0,0,0)):
self.p0 = (line[0], line[1])
self.p1 = (line[2], line[3])
self.color = (0,255,255)
self.lineThinkness = 4
self.textColor = (0,255,255)
self.textSize = 4
self.textThinkness = 2
self.count1 = 0
self.count2 = 0
# Draw single boundary line
def drawBoundaryLine(img, line):
x1, y1 = line.p0
x2, y2 = line.p1
cv2.line(img, (x1, y1), (x2, y2), line.color, line.lineThinkness)
cv2.putText(img, str(line.count1), (x1, y1), cv2.FONT_HERSHEY_PLAIN, line.textSize, line.textColor, line.textThinkness)
cv2.putText(img, str(line.count2), (x2, y2), cv2.FONT_HERSHEY_PLAIN, line.textSize, line.textColor, line.textThinkness)
cv2.drawMarker(img, (x1, y1),line.color, cv2.MARKER_TRIANGLE_UP, 16, 4)
cv2.drawMarker(img, (x2, y2),line.color, cv2.MARKER_TILTED_CROSS, 16, 4)
# Draw multiple boundary lines
def drawBoundaryLines(img, boundaryLines):
for line in boundaryLines:
drawBoundaryLine(img, line)
# in: boundary_line = boundaryLine class object
# trajectory = (x1, y1, x2, y2)
def checkLineCross(boundary_line, trajectory_line):
global audio_enable_flag
global sound_welcome, sound_thankyou
traj_p0 = trajectory_line[0] # Trajectory of an object
traj_p1 = trajectory_line[1]
bLine_p0 = (boundary_line.p0[0], boundary_line.p0[1]) # Boundary line
bLine_p1 = (boundary_line.p1[0], boundary_line.p1[1])
intersect = checkIntersect(traj_p0, traj_p1, bLine_p0, bLine_p1) # Check if intersect or not
if intersect == True:
angle = calcVectorAngle(traj_p0, traj_p1, bLine_p0, bLine_p1) # Calculate angle between trajectory and boundary line
if angle<180:
boundary_line.count1 += 1
else:
boundary_line.count2 += 1
#cx, cy = calcIntersectPoint(traj_p0, traj_p1, bLine_p0, bLine_p1) # Calculate the intersect coordination
#------------------------------------
# Area intrusion detection
class area:
def __init__(self, contour):
self.contour = np.array(contour, dtype=np.int32)
self.count = 0
# Draw areas (polygons)
def drawAreas(img, areas):
for area in areas:
if area.count>0:
color=(0,0,255)
else:
color=(255,0,0)
cv2.polylines(img, [area.contour], True, color,4)
cv2.putText(img, str(area.count), (area.contour[0][0], area.contour[0][1]), cv2.FONT_HERSHEY_PLAIN, 4, color, 2)
# Area intrusion check
def checkAreaIntrusion(area, points):
global audio_enable_flag
global sound_warning
area.count = 0
for pt in points:
if pointPolygonTest(area.contour, pt):
area.count += 1
# ----------------------------------------------------------------------------
# boundary lines
boundaryLines = [
boundaryLine([ 300, 40, 20, 400 ]),
boundaryLine([ 440, 40, 700, 400 ])
]
# Areas
areas = [
area([ [200,200], [500,180], [600,400], [300,300], [100,360] ])
]
def main():
cv2.namedWindow('test')
cv2.setMouseCallback('test', onMouse)
prev_mouse_pos = [0, 0]
trace = []
trace_length = 25
key = -1
while key != 27: # ESC key
img = np.zeros((600, 800, 3), dtype=np.uint8)
for line in boundaryLines:
checkLineCross(line, (prev_mouse_pos, g_mouse_pos))
drawBoundaryLines(img, boundaryLines)
for area in areas:
checkAreaIntrusion(area, (g_mouse_pos,))
drawAreas(img, areas)
trace.append(g_mouse_pos)
if len(trace)>trace_length:
trace = trace[-trace_length:]
cv2.polylines(img, np.array([trace], dtype=np.int32), False, (255,255,0), 1, cv2.LINE_AA)
prev_mouse_pos = g_mouse_pos
cv2.imshow('test', img)
key = cv2.waitKey(50)
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"cv2.setMouseCallback",
"cv2.polylines",
"cv2.line",
"cv2.imshow",
"numpy.array",
"numpy.zeros",
"cv2.waitKey",
"cv2.namedWindow",
"cv2.drawMarker"
] | [((824, 889), 'cv2.line', 'cv2.line', (['img', '(x1, y1)', '(x2, y2)', 'line.color', 'line.lineThinkness'], {}), '(img, (x1, y1), (x2, y2), line.color, line.lineThinkness)\n', (832, 889), False, 'import cv2\n'), ((1142, 1214), 'cv2.drawMarker', 'cv2.drawMarker', (['img', '(x1, y1)', 'line.color', 'cv2.MARKER_TRIANGLE_UP', '(16)', '(4)'], {}), '(img, (x1, y1), line.color, cv2.MARKER_TRIANGLE_UP, 16, 4)\n', (1156, 1214), False, 'import cv2\n'), ((1218, 1291), 'cv2.drawMarker', 'cv2.drawMarker', (['img', '(x2, y2)', 'line.color', 'cv2.MARKER_TILTED_CROSS', '(16)', '(4)'], {}), '(img, (x2, y2), line.color, cv2.MARKER_TILTED_CROSS, 16, 4)\n', (1232, 1291), False, 'import cv2\n'), ((3487, 3510), 'cv2.namedWindow', 'cv2.namedWindow', (['"""test"""'], {}), "('test')\n", (3502, 3510), False, 'import cv2\n'), ((3515, 3552), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""test"""', 'onMouse'], {}), "('test', onMouse)\n", (3535, 3552), False, 'import cv2\n'), ((2534, 2567), 'numpy.array', 'np.array', (['contour'], {'dtype': 'np.int32'}), '(contour, dtype=np.int32)\n', (2542, 2567), True, 'import numpy as np\n'), ((2772, 2822), 'cv2.polylines', 'cv2.polylines', (['img', '[area.contour]', '(True)', 'color', '(4)'], {}), '(img, [area.contour], True, color, 4)\n', (2785, 2822), False, 'import cv2\n'), ((3686, 3725), 'numpy.zeros', 'np.zeros', (['(600, 800, 3)'], {'dtype': 'np.uint8'}), '((600, 800, 3), dtype=np.uint8)\n', (3694, 3725), True, 'import numpy as np\n'), ((4236, 4259), 'cv2.imshow', 'cv2.imshow', (['"""test"""', 'img'], {}), "('test', img)\n", (4246, 4259), False, 'import cv2\n'), ((4274, 4289), 'cv2.waitKey', 'cv2.waitKey', (['(50)'], {}), '(50)\n', (4285, 4289), False, 'import cv2\n'), ((4120, 4153), 'numpy.array', 'np.array', (['[trace]'], {'dtype': 'np.int32'}), '([trace], dtype=np.int32)\n', (4128, 4153), True, 'import numpy as np\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module is to find lowest eigenvalues with Davidson algorithm."""
import logging
import warnings
import numpy
import numpy.linalg
import scipy
import scipy.linalg
import scipy.sparse
import scipy.sparse.linalg
from openfermion.utils._sparse_tools import get_linear_qubit_operator_diagonal
from openfermion.utils._linear_qubit_operator import \
generate_linear_qubit_operator
class DavidsonError(Exception):
"""Exceptions."""
pass
class DavidsonOptions(object):
"""Davidson algorithm iteration options."""
def __init__(self, max_subspace=100, max_iterations=300, eps=1e-6,
real_only=False):
"""
Args:
max_subspace(int): Max number of vectors in the auxiliary subspace.
max_iterations(int): Max number of iterations.
eps(float): The max error for eigen vector error's elements during
iterations: linear_operator * v - v * lambda.
real_only(bool): Desired eigenvectors are real only or not. When one
specifies the real_only to be true but it only has complex ones,
no matter it converges or not, the returned vectors will be
complex.
"""
if max_subspace <= 2 or max_iterations <= 0 or eps <= 0:
raise ValueError('Invalid values for max_subspace, max_iterations '
'and/ or eps: ({}, {}, {}).'.format(
max_subspace, max_iterations, eps))
self.max_subspace = max_subspace
self.max_iterations = max_iterations
self.eps = eps
self.real_only = real_only
def set_dimension(self, dimension):
"""
Args:
dimension(int): Dimension of the matrix, which sets a upper limit on
the work space.
"""
if dimension <= 0:
raise ValueError('Invalid dimension: {}).'.format(dimension))
self.max_subspace = min(self.max_subspace, dimension + 1)
class Davidson(object):
"""Davidson algorithm to get the n states with smallest eigenvalues."""
def __init__(self, linear_operator, linear_operator_diagonal, options=None):
"""
Args:
linear_operator(scipy.sparse.linalg.LinearOperator): The linear
operator which defines a dot function when applying on a vector.
linear_operator_diagonal(numpy.ndarray): The linear operator's
diagonal elements.
options(DavidsonOptions): Iteration options.
"""
if options is None:
options = DavidsonOptions()
if not isinstance(linear_operator,
(scipy.sparse.linalg.LinearOperator,
scipy.sparse.spmatrix)):
raise ValueError(
'linear_operator is not a LinearOperator: {}.'.format(type(
linear_operator)))
self.linear_operator = linear_operator
self.linear_operator_diagonal = linear_operator_diagonal
self.options = options
self.options.set_dimension(len(linear_operator_diagonal))
def get_lowest_n(self, n_lowest=1, initial_guess=None, max_iterations=None):
"""
Returns `n` smallest eigenvalues and corresponding eigenvectors for
linear_operator.
Args:
n(int):
The number of states corresponding to the smallest eigenvalues
and associated eigenvectors for the linear_operator.
initial_guess(numpy.ndarray[complex]): Initial guess of eigenvectors
associated with the `n` smallest eigenvalues.
max_iterations(int): Max number of iterations when not converging.
Returns:
success(bool): Indicates whether it converged, i.e. max elementwise
error is smaller than eps.
eigen_values(numpy.ndarray[complex]): The smallest n eigenvalues.
eigen_vectors(numpy.ndarray[complex]): The smallest n eigenvectors
corresponding with those eigen values.
"""
# Goes through a few checks and preprocessing before iterative
# diagonalization.
# 1. Checks for number of states desired, should be in the range of
# [0, max_subspace).
if n_lowest <= 0 or n_lowest >= self.options.max_subspace:
raise ValueError('n_lowest {} is supposed to be in [1, {}).'.format(
n_lowest, self.options.max_subspace))
# 2. Checks for initial guess vectors' dimension is the same to that of
# the operator.
if initial_guess is None:
initial_guess = generate_random_vectors(
len(self.linear_operator_diagonal), n_lowest,
real_only=self.options.real_only)
if initial_guess.shape[0] != len(self.linear_operator_diagonal):
raise ValueError('Guess vectors have a different dimension with '
'linear opearator diagonal elements: {} != {}.'
.format(initial_guess.shape[1],
len(self.linear_operator_diagonal)))
# 3. Makes sure real guess vector if real_only is specified.
if self.options.real_only:
if not numpy.allclose(numpy.real(initial_guess), initial_guess):
warnings.warn('Initial guess is not real only!', RuntimeWarning)
initial_guess = numpy.real(initial_guess)
# 4. Checks for non-trivial (non-zero) initial guesses.
if numpy.max(numpy.abs(initial_guess)) < self.options.eps:
raise ValueError('Guess vectors are all zero! {}'.format(
initial_guess.shape))
initial_guess = scipy.linalg.orth(initial_guess)
# 5. Makes sure number of initial guess vector is at least n_lowest.
if initial_guess.shape[1] < n_lowest:
initial_guess = append_random_vectors(
initial_guess, n_lowest - initial_guess.shape[1],
real_only=self.options.real_only)
success = False
num_iterations = 0
guess_v = initial_guess
guess_mv = None
max_iterations = max_iterations or self.options.max_iterations
while (num_iterations < max_iterations and not success):
(eigen_values, eigen_vectors, mat_eigen_vectors, max_trial_error,
guess_v, guess_mv) = self._iterate(n_lowest, guess_v, guess_mv)
logging.info("Eigenvalues for iteration %d: %s, error is %f.",
num_iterations, eigen_values, max_trial_error)
if max_trial_error < self.options.eps:
success = True
break
# Make sure it keeps real components only.
if self.options.real_only:
guess_v = numpy.real(guess_v)
# Deals with new directions to make sure they're orthonormal.
# Also makes sure there're new directions added for the next
# iteration, if not, add n_lowest random vectors.
count_mvs = guess_mv.shape[1]
guess_v = orthonormalize(guess_v, count_mvs, self.options.eps)
if guess_v.shape[1] <= count_mvs:
guess_v = append_random_vectors(
guess_v, n_lowest, real_only=self.options.real_only)
# Limits number of vectors to self.options.max_subspace, in this
# case, keep the following:
# 1) first n_lowest eigen_vectors;
# 2) first n_lowest matrix multiplication result for eigen_vectors;
#
# 3) new search directions which will be used for improvement for
# the next iteration.
if guess_v.shape[1] >= self.options.max_subspace:
guess_v = numpy.hstack([
eigen_vectors,
guess_v[:, count_mvs:],
])
guess_mv = mat_eigen_vectors
if self.options.real_only:
if (not numpy.allclose(numpy.real(guess_v), guess_v) or
not numpy.allclose(numpy.real(guess_mv), guess_mv)):
# Forces recalculation for matrix multiplication with
# vectors.
guess_mv = None
num_iterations += 1
if (self.options.real_only and
not numpy.allclose(numpy.real(eigen_vectors), eigen_vectors)):
warnings.warn('Unable to get real only eigenvectors, return '
'complex vectors instead with success state {}.'
.format(success), RuntimeWarning)
return success, eigen_values, eigen_vectors
def _iterate(self, n_lowest, guess_v, guess_mv=None):
"""One iteration with guess vectors.
Args:
n_lowest(int): The first n_lowest number of eigenvalues and
eigenvectors one is interested in.
guess_v(numpy.ndarray(complex)): Guess eigenvectors associated with
the smallest eigenvalues.
guess_mv(numpy.ndarray(complex)): Matrix applied on guess_v,
therefore they should have the same dimension.
Returns:
trial_lambda(numpy.ndarray(float)): The minimal eigenvalues based on
guess eigenvectors.
trial_v(numpy.ndarray(complex)): New guess eigenvectors.
trial_mv(numpy.ndarray(complex)): New guess eigenvectors' matrix
multiplication result.
max_trial_error(float): The max elementwise error for all guess
vectors.
guess_v(numpy.ndarray(complex)): Cached guess eigenvectors to avoid
recalculation for the next iterations.
guess_mv(numpy.ndarray(complex)): Cached guess vectors which is the
matrix product of linear_operator with guess_v.
"""
if guess_mv is None:
guess_mv = self.linear_operator.dot(guess_v)
dimension = guess_v.shape[1]
# Note that getting guess_mv is the most expensive step.
if guess_mv.shape[1] < dimension:
guess_mv = numpy.hstack([guess_mv, self.linear_operator.dot(
guess_v[:, guess_mv.shape[1] : dimension])])
guess_vmv = numpy.dot(guess_v.conj().T, guess_mv)
# Gets new set of eigenvalues and eigenvectors in the vmv space, with a
# smaller dimension which is the number of vectors in guess_v.
#
# Note that we don't get the eigenvectors directly, instead we only get
# a transformation based on the raw vectors, so that mv don't need to be
# recalculated.
trial_lambda, trial_transformation = numpy.linalg.eigh(guess_vmv)
# Sorts eigenvalues in ascending order.
sorted_index = list(reversed(trial_lambda.argsort()[::-1]))
trial_lambda = trial_lambda[sorted_index]
trial_transformation = trial_transformation[:, sorted_index]
if len(trial_lambda) > n_lowest:
trial_lambda = trial_lambda[:n_lowest]
trial_transformation = trial_transformation[:, :n_lowest]
# Estimates errors based on diagonalization in the smaller space.
trial_v = numpy.dot(guess_v, trial_transformation)
trial_mv = numpy.dot(guess_mv, trial_transformation)
trial_error = trial_mv - trial_v * trial_lambda
new_directions, max_trial_error = self._get_new_directions(
trial_error, trial_lambda, trial_v)
if new_directions:
guess_v = numpy.hstack([guess_v, numpy.stack(new_directions).T])
return (trial_lambda, trial_v, trial_mv, max_trial_error,
guess_v, guess_mv)
def _get_new_directions(self, error_v, trial_lambda, trial_v):
"""Gets new directions from error vectors.
Args:
error_v(numpy.ndarray(complex)): Error vectors from the guess
eigenvalues and associated eigenvectors.
trial_lambda(numpy.ndarray(float)): The n_lowest minimal guess
eigenvalues.
trial_v(numpy.ndarray(complex)): Guess eigenvectors associated with
trial_lambda.
Returns:
new_directions(numpy.ndarray(complex)): New directions for searching
for real eigenvalues and eigenvectors.
max_trial_error(float): The max elementwise error for all guess
vectors.
"""
n_lowest = error_v.shape[1]
max_trial_error = 0
# Adds new guess vectors for the next iteration for the first n_lowest
# directions.
origonal_dimension = error_v.shape[0]
new_directions = []
for i in range(n_lowest):
current_error_v = error_v[:, i]
if numpy.max(numpy.abs(current_error_v)) < self.options.eps:
# Already converged for this eigenvector, no contribution to
# search for new directions.
continue
max_trial_error = max(max_trial_error,
numpy.linalg.norm(current_error_v))
diagonal_inverse = numpy.ones(origonal_dimension)
for j in range(origonal_dimension):
# Makes sure error vectors are bounded.
diff_lambda = self.linear_operator_diagonal[j] - trial_lambda[i]
if numpy.abs(diff_lambda) > self.options.eps:
diagonal_inverse[j] /= diff_lambda
else:
diagonal_inverse[j] /= self.options.eps
diagonal_inverse_error = diagonal_inverse * current_error_v
diagonal_inverse_trial = diagonal_inverse * trial_v[:, i]
new_direction = -current_error_v + (trial_v[:, i] * numpy.dot(
trial_v[:, i].conj(), diagonal_inverse_error) / numpy.dot(
trial_v[:, i].conj(), diagonal_inverse_trial))
new_directions.append(new_direction)
return new_directions, max_trial_error
class QubitDavidson(Davidson):
"""Davidson algorithm applied to a QubitOperator."""
def __init__(self, qubit_operator, n_qubits=None, options=None):
"""
Args:
qubit_operator(QubitOperator): A qubit operator which is a linear
operator as well.
n_qubits(int): Number of qubits.
options(DavidsonOptions): Iteration options.
"""
super(QubitDavidson, self).__init__(
generate_linear_qubit_operator(qubit_operator, n_qubits, options),
get_linear_qubit_operator_diagonal(qubit_operator, n_qubits),
options=options)
class SparseDavidson(Davidson):
"""Davidson algorithm for a sparse matrix."""
def __init__(self, sparse_matrix, options=None):
"""
Args:
sparse_matrix(scipy.sparse.spmatrix): A sparse matrix in scipy.
options(DavidsonOptions): Iteration options.
"""
super(SparseDavidson, self).__init__(
sparse_matrix, sparse_matrix.diagonal(), options=options)
def generate_random_vectors(row, col, real_only=False):
"""Generates orthonormal random vectors with col columns.
Args:
row(int): Number of rows for the vectors.
col(int): Number of columns for the vectors.
real_only(bool): Real vectors or complex ones.
Returns:
random_vectors(numpy.ndarray(complex)): Orthonormal random vectors.
"""
random_vectors = numpy.random.rand(row, col)
if not real_only:
random_vectors = random_vectors + numpy.random.rand(row, col) * 1.0j
random_vectors = scipy.linalg.orth(random_vectors)
return random_vectors
def append_random_vectors(vectors, col, max_trial=3, real_only=False):
"""Appends exactly col orthonormal random vectors for vectors.
Assumes vectors is already orthonormal.
Args:
vectors(numpy.ndarray(complex)): Orthonormal original vectors to be
appended.
col(int): Number of columns to be appended.
real_only(bool): Real vectors or complex ones.
Returns:
vectors(numpy.ndarray(complex)): Orthonormal vectors with n columns.
"""
if col <= 0:
return vectors
vector_columns = vectors.shape[1]
total_columns = min(vector_columns + col, vectors.shape[0] + 1)
num_trial = 0
while vector_columns < total_columns:
num_trial += 1
vectors = numpy.hstack([vectors, generate_random_vectors(
vectors.shape[0], total_columns - vector_columns, real_only)])
vectors = orthonormalize(vectors, vector_columns)
# Checks whether there are any new vectors added successfully.
if vectors.shape[1] == vector_columns:
if num_trial > max_trial:
warnings.warn('Unable to generate specified number of random '
'vectors {}: returning {} in total.'.format(
col, vector_columns), RuntimeWarning)
break
else:
num_trial = 1
vector_columns = vectors.shape[1]
return vectors
def orthonormalize(vectors, num_orthonormals=1, eps=1e-6):
"""Orthonormalize vectors, so that they're all normalized and orthogoal.
The first vector is the same to that of vectors, while vector_i is
orthogonal to vector_j, where j < i.
Args:
vectors(numpy.ndarray(complex)): Input vectors to be
orthonormalized.
num_orthonormals(int): First `num_orthonormals` columns are already
orthonormal, so that one doesn't need to make any changes.
eps(float): criterion of elements' max absolute value for zero vectors.
Returns:
ortho_normals(numpy.ndarray(complex)): Output orthonormal vectors.
"""
ortho_normals = vectors
count_orthonormals = num_orthonormals
# Skip unchanged ones.
for i in range(num_orthonormals, vectors.shape[1]):
vector_i = vectors[:, i]
# Makes sure vector_i is orthogonal to all processed vectors.
for j in range(i):
vector_i -= ortho_normals[:, j] * numpy.dot(
ortho_normals[:, j].conj(), vector_i)
# Makes sure vector_i is normalized.
if numpy.max(numpy.abs(vector_i)) < eps:
continue
ortho_normals[:, count_orthonormals] = (vector_i /
numpy.linalg.norm(vector_i))
count_orthonormals += 1
return ortho_normals[:, :count_orthonormals]
| [
"numpy.abs",
"numpy.random.rand",
"numpy.ones",
"openfermion.utils._sparse_tools.get_linear_qubit_operator_diagonal",
"numpy.hstack",
"scipy.linalg.orth",
"numpy.real",
"numpy.dot",
"numpy.stack",
"numpy.linalg.norm",
"numpy.linalg.eigh",
"warnings.warn",
"openfermion.utils._linear_qubit_ope... | [((16137, 16164), 'numpy.random.rand', 'numpy.random.rand', (['row', 'col'], {}), '(row, col)\n', (16154, 16164), False, 'import numpy\n'), ((16285, 16318), 'scipy.linalg.orth', 'scipy.linalg.orth', (['random_vectors'], {}), '(random_vectors)\n', (16302, 16318), False, 'import scipy\n'), ((6328, 6360), 'scipy.linalg.orth', 'scipy.linalg.orth', (['initial_guess'], {}), '(initial_guess)\n', (6345, 6360), False, 'import scipy\n'), ((11354, 11382), 'numpy.linalg.eigh', 'numpy.linalg.eigh', (['guess_vmv'], {}), '(guess_vmv)\n', (11371, 11382), False, 'import numpy\n'), ((11875, 11915), 'numpy.dot', 'numpy.dot', (['guess_v', 'trial_transformation'], {}), '(guess_v, trial_transformation)\n', (11884, 11915), False, 'import numpy\n'), ((11935, 11976), 'numpy.dot', 'numpy.dot', (['guess_mv', 'trial_transformation'], {}), '(guess_mv, trial_transformation)\n', (11944, 11976), False, 'import numpy\n'), ((7063, 7176), 'logging.info', 'logging.info', (['"""Eigenvalues for iteration %d: %s, error is %f."""', 'num_iterations', 'eigen_values', 'max_trial_error'], {}), "('Eigenvalues for iteration %d: %s, error is %f.',\n num_iterations, eigen_values, max_trial_error)\n", (7075, 7176), False, 'import logging\n'), ((13794, 13824), 'numpy.ones', 'numpy.ones', (['origonal_dimension'], {}), '(origonal_dimension)\n', (13804, 13824), False, 'import numpy\n'), ((15134, 15199), 'openfermion.utils._linear_qubit_operator.generate_linear_qubit_operator', 'generate_linear_qubit_operator', (['qubit_operator', 'n_qubits', 'options'], {}), '(qubit_operator, n_qubits, options)\n', (15164, 15199), False, 'from openfermion.utils._linear_qubit_operator import generate_linear_qubit_operator\n'), ((15213, 15273), 'openfermion.utils._sparse_tools.get_linear_qubit_operator_diagonal', 'get_linear_qubit_operator_diagonal', (['qubit_operator', 'n_qubits'], {}), '(qubit_operator, n_qubits)\n', (15247, 15273), False, 'from openfermion.utils._sparse_tools import get_linear_qubit_operator_diagonal\n'), ((19078, 19105), 'numpy.linalg.norm', 'numpy.linalg.norm', (['vector_i'], {}), '(vector_i)\n', (19095, 19105), False, 'import numpy\n'), ((5941, 6005), 'warnings.warn', 'warnings.warn', (['"""Initial guess is not real only!"""', 'RuntimeWarning'], {}), "('Initial guess is not real only!', RuntimeWarning)\n", (5954, 6005), False, 'import warnings\n'), ((6038, 6063), 'numpy.real', 'numpy.real', (['initial_guess'], {}), '(initial_guess)\n', (6048, 6063), False, 'import numpy\n'), ((6150, 6174), 'numpy.abs', 'numpy.abs', (['initial_guess'], {}), '(initial_guess)\n', (6159, 6174), False, 'import numpy\n'), ((7424, 7443), 'numpy.real', 'numpy.real', (['guess_v'], {}), '(guess_v)\n', (7434, 7443), False, 'import numpy\n'), ((8402, 8455), 'numpy.hstack', 'numpy.hstack', (['[eigen_vectors, guess_v[:, count_mvs:]]'], {}), '([eigen_vectors, guess_v[:, count_mvs:]])\n', (8414, 8455), False, 'import numpy\n'), ((13727, 13761), 'numpy.linalg.norm', 'numpy.linalg.norm', (['current_error_v'], {}), '(current_error_v)\n', (13744, 13761), False, 'import numpy\n'), ((16229, 16256), 'numpy.random.rand', 'numpy.random.rand', (['row', 'col'], {}), '(row, col)\n', (16246, 16256), False, 'import numpy\n'), ((18922, 18941), 'numpy.abs', 'numpy.abs', (['vector_i'], {}), '(vector_i)\n', (18931, 18941), False, 'import numpy\n'), ((5882, 5907), 'numpy.real', 'numpy.real', (['initial_guess'], {}), '(initial_guess)\n', (5892, 5907), False, 'import numpy\n'), ((9022, 9047), 'numpy.real', 'numpy.real', (['eigen_vectors'], {}), '(eigen_vectors)\n', (9032, 9047), False, 'import numpy\n'), ((13446, 13472), 'numpy.abs', 'numpy.abs', (['current_error_v'], {}), '(current_error_v)\n', (13455, 13472), False, 'import numpy\n'), ((14029, 14051), 'numpy.abs', 'numpy.abs', (['diff_lambda'], {}), '(diff_lambda)\n', (14038, 14051), False, 'import numpy\n'), ((12222, 12249), 'numpy.stack', 'numpy.stack', (['new_directions'], {}), '(new_directions)\n', (12233, 12249), False, 'import numpy\n'), ((8647, 8666), 'numpy.real', 'numpy.real', (['guess_v'], {}), '(guess_v)\n', (8657, 8666), False, 'import numpy\n'), ((8727, 8747), 'numpy.real', 'numpy.real', (['guess_mv'], {}), '(guess_mv)\n', (8737, 8747), False, 'import numpy\n')] |
# coding=utf-8
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import warnings
import numpy as np
from deslib.base import BaseDS
from deslib.util.aggregation import majority_voting_rule
from deslib.util.diversity import Q_statistic, ratio_errors, \
negative_double_fault, compute_pairwise_diversity
from sklearn import metrics
from sklearn.base import ClusterMixin
from sklearn.cluster import KMeans
class DESClustering(BaseDS):
"""Dynamic ensemble selection-Clustering (DES-Clustering).
This method selects an ensemble of classifiers taking into account the
accuracy and diversity of the base classifiers. The K-means algorithm is
used to define the region of competence. For each cluster, the N most
accurate classifiers are first selected. Then, the J more diverse
classifiers from the N most accurate classifiers are selected to
compose the ensemble.
Parameters
----------
pool_classifiers : list of classifiers (Default = None)
The generated_pool of classifiers trained for the corresponding
classification problem. Each base classifiers should support the method
"predict". If None, then the pool of classifiers is a bagging
classifier.
clustering : sklearn.cluster (Default = None)
The clustering model used to estimate the region of competence.
If None, a KMeans with K = 5 is used.
pct_accuracy : float (Default = 0.5)
Percentage of base classifiers selected based on accuracy
pct_diversity : float (Default = 0.33)
Percentage of base classifiers selected based on diversity
more_diverse : Boolean (Default = True)
Whether we select the most or the least diverse classifiers
to add to the pre-selected ensemble
metric_diversity : String (Default = 'df')
Metric used to estimate the diversity of the base classifiers. Can be
either the double fault (df), Q-statistics (Q), or error correlation.
metric_performance : String (Default = 'accuracy_score')
Metric used to estimate the performance of a base classifier on a
cluster. Can be either any metric from sklearn.metrics.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
DSEL_perc : float (Default = 0.5)
Percentage of the input data used to fit DSEL.
Note: This parameter is only used if the pool of classifier is None or
unfitted.
n_jobs : int, default=-1
The number of parallel jobs to run. None means 1 unless in
a joblib.parallel_backend context. -1 means using all processors.
Doesn’t affect fit method.
References
----------
<NAME>., <NAME>., <NAME>., & <NAME>.
"Using accuracy and more_diverse to select classifiers to build ensembles."
International Joint Conference on Neural Networks (IJCNN)., 2006.
<NAME>., <NAME>, and <NAME>. "Dynamic selection
of classifiers—a comprehensive review."
Pattern Recognition 47.11 (2014): 3665-3680.
<NAME>, <NAME>, and <NAME>, “Dynamic classifier
selection: Recent advances and perspectives,”
Information Fusion, vol. 41, pp. 195 – 216, 2018.
"""
def __init__(self, pool_classifiers=None, clustering=None, with_IH=False,
safe_k=None, IH_rate=0.30, pct_accuracy=0.5,
pct_diversity=0.33, more_diverse=True, metric_diversity='DF',
metric_performance='accuracy_score', n_clusters=5,
random_state=None, DSEL_perc=0.5, n_jobs=-1):
super(DESClustering, self).__init__(pool_classifiers=pool_classifiers,
with_IH=with_IH,
safe_k=safe_k,
IH_rate=IH_rate,
random_state=random_state,
DSEL_perc=DSEL_perc,
n_jobs=n_jobs)
self.metric_diversity = metric_diversity
self.metric_performance = metric_performance
self.clustering = clustering
self.pct_accuracy = pct_accuracy
self.pct_diversity = pct_diversity
self.more_diverse = more_diverse
self.n_clusters = n_clusters
def fit(self, X, y):
""" Train the DS model by setting the Clustering algorithm and
pre-processing the information required to apply the DS
methods.
First the data is divided into K clusters. Then, for each cluster,
the N most accurate classifiers are first selected. Then, the J more
diverse classifiers from the N most accurate classifiers are selected
to compose the ensemble of the corresponding cluster. An ensemble of
classifiers is assigned to each of the K clusters.
Parameters
----------
X : array of shape (n_samples, n_features)
Data used to fit the model.
y : array of shape (n_samples)
class labels of each example in X.
Returns
-------
self
"""
super(DESClustering, self).fit(X, y)
self.N_ = int(self.n_classifiers_ * self.pct_accuracy)
self.J_ = int(np.ceil(self.n_classifiers_ * self.pct_diversity))
self._check_parameters()
self.metric_classifier_ = getattr(metrics, self.metric_performance)
if self.clustering is None:
if self.n_samples_ >= self.n_clusters:
self.clustering_ = KMeans(n_clusters=self.n_clusters,
random_state=self.random_state,
n_jobs=self.n_jobs)
else:
warnings.warn("n_clusters is bigger than DSEL size. "
"Using All DSEL examples as cluster centroids.",
category=RuntimeWarning)
self.clustering_ = KMeans(n_clusters=self.n_samples_,
random_state=self.random_state)
self.clustering_.fit(self.DSEL_data_)
else:
self.clustering_ = self.clustering.fit(self.DSEL_data_)
# set the diversity metric used
self._set_diversity_func()
# Since the clusters are fixed, we can pre-compute the accuracy and
# diversity of each cluster as well as the # selected classifiers
# (indices) for each one. These pre-computed information will be kept
# on those three variables:
self.performance_cluster_ = np.zeros(
(self.clustering_.n_clusters, self.n_classifiers_))
self.diversity_cluster_ = np.zeros(
(self.clustering_.n_clusters, self.n_classifiers_))
self.indices_ = np.zeros((self.clustering_.n_clusters, self.J_),
dtype=int)
self._preprocess_clusters()
return self
def _preprocess_clusters(self):
"""Preprocess the competence as well as the average diversity of each
base classifier for each specific cluster.
This process makes the test routines faster, since the ensemble of
classifiers of each cluster is already predefined.
The class attributes Accuracy_cluster_ and diversity_cluster_ stores
the accuracy and diversity information respectively of each base
classifier for each cluster. The attribute indices_ stores the
pre-selected base classifiers for each cluster.
"""
labels = self.clustering_.predict(self.DSEL_data_)
for cluster_index in range(self.clustering_.n_clusters):
# Get the indices_ of the samples in the corresponding cluster.
sample_indices = np.where(labels == cluster_index)[0]
# Compute performance metric of each classifier in this cluster
score_classifier = self.get_scores_(sample_indices)
self.performance_cluster_[cluster_index, :] = score_classifier
# Get the N_ most accurate classifiers in the cluster
performance_indices = np.argsort(score_classifier)[::-1][0:self.N_]
# Get the target labels for the samples in the corresponding
# cluster for the diversity calculation.
targets = self.DSEL_target_[sample_indices]
self.diversity_cluster_[cluster_index, :] = \
compute_pairwise_diversity(targets,
self.BKS_DSEL_[sample_indices, :],
self.diversity_func_)
diversity_of_selected = self.diversity_cluster_[
cluster_index, performance_indices]
if self.more_diverse:
diversity_indices = np.argsort(diversity_of_selected)[::-1][
0:self.J_]
else:
diversity_indices = np.argsort(diversity_of_selected)[
0:self.J_]
self.indices_[cluster_index, :] = performance_indices[
diversity_indices]
def estimate_competence(self, query, predictions=None):
"""Get the competence estimates of each base classifier :math:`c_{i}`
for the classification of the query sample.
In this case, the competences were already pre-calculated for each
cluster. So this method computes the nearest cluster and get the
pre-calculated competences of the base classifiers for the
corresponding cluster.
Parameters
----------
query : array of shape (n_samples, n_features)
The query sample.
predictions : array of shape (n_samples, n_classifiers)
Predictions of the base classifiers for all test examples.
Returns
-------
competences : array = [n_samples, n_classifiers]
The competence level estimated for each base classifier.
"""
cluster_index = self.clustering_.predict(query)
competences = self.performance_cluster_[cluster_index][:]
return competences
def select(self, query):
"""Select an ensemble with the most accurate and most diverse
classifier for the classification of the query.
The ensemble for each cluster was already pre-calculated in the fit
method. So, this method calculates the closest cluster, and returns
the ensemble associated to this cluster.
Parameters
----------
query : array of shape (n_samples, n_features)
The test examples.
Returns
-------
selected_classifiers : array of shape = [n_samples, self.k]
Indices of the selected base classifier for each test example.
"""
cluster_index = self.clustering_.predict(query)
selected_classifiers = self.indices_[cluster_index, :]
return selected_classifiers
def classify_with_ds(self, query, predictions, probabilities=None,
neighbors=None, distances=None, DFP_mask=None):
"""Predicts the label of the corresponding query sample.
Parameters
----------
query : array of shape = [n_features]
The test sample.
predictions : array of shape (n_samples, n_classifiers)
Predictions of the base classifiers for all test examples.
probabilities : array of shape (n_samples, n_classifiers, n_classes)
Probabilities estimates of each base classifier for all test
examples.
neighbors : array of shape (n_samples, n_neighbors)
Indices of the k nearest neighbors according for each test sample.
distances : array of shape (n_samples, n_neighbors)
Distances of the k nearest neighbors according for each test
sample.
DFP_mask : array of shape (n_samples, n_classifiers)
Mask containing 1 for the selected base classifier and 0 otherwise.
Returns
-------
predicted_label : array of shape (n_samples)
Predicted class label for each test example.
"""
if query.ndim < 2:
query = query.reshape(1, -1)
if predictions.ndim < 2:
predictions = predictions.reshape(1, -1)
if query.shape[0] != predictions.shape[0]:
raise ValueError(
'The arrays query and predictions must have the same number'
' of samples. query.shape is {}'
'and predictions.shape is {}'.format(query.shape,
predictions.shape))
selected_classifiers = self.select(query)
votes = predictions[
np.arange(predictions.shape[0])[:, None], selected_classifiers]
predicted_label = majority_voting_rule(votes)
return predicted_label
def predict_proba_with_ds(self, query, predictions, probabilities,
neighbors=None, distances=None, DFP_mask=None):
"""Predicts the label of the corresponding query sample.
Parameters
----------
query : array of shape (n_samples, n_features)
The test examples.
predictions : array of shape (n_samples, n_classifiers)
Predictions of the base classifiers for all test examples.
probabilities : array of shape (n_samples, n_classifiers, n_classes)
Probabilities estimates of each base classifier for all test
examples.
neighbors : array of shape (n_samples, n_neighbors)
Indices of the k nearest neighbors according for each test sample.
distances : array of shape (n_samples, n_neighbors)
Distances of the k nearest neighbors according for each test sample
DFP_mask : array of shape (n_samples, n_classifiers)
Mask containing 1 for the selected base classifier and 0 otherwise.
Returns
-------
predicted_proba : array of shape (n_samples, n_classes)
Posterior probabilities estimates for each test example.
"""
if query.shape[0] != probabilities.shape[0]:
raise ValueError(
'The arrays query and predictions must have the same number of'
' samples. query.shape is {}'
'and predictions.shape is {}'.format(query.shape,
predictions.shape))
selected_classifiers = self.select(query)
ensemble_proba = probabilities[
np.arange(probabilities.shape[0])[:, None],
selected_classifiers, :]
predicted_proba = np.mean(ensemble_proba, axis=1)
return predicted_proba
def _check_parameters(self):
"""Check if the parameters passed as argument are correct.
Raises
------
ValueError
If the hyper-parameters are incorrect.
"""
if self.metric_diversity not in ['DF', 'Q', 'ratio']:
raise ValueError(
'Diversity metric must be one of the following values:'
' "DF", "Q" or "Ratio"')
try:
getattr(metrics, self.metric_performance)
except AttributeError:
raise ValueError(
"Parameter metric_performance must be a sklearn metrics")
if self.N_ <= 0 or self.J_ <= 0:
raise ValueError("The values of N_ and J_ should be higher than 0"
"N_ = {}, J_= {} ".format(self.N_, self.J_))
if self.N_ < self.J_:
raise ValueError(
"The value of N_ should be greater or equals than J_"
"N_ = {}, J_= {} ".format(self.N_, self.J_))
if self.clustering is not None:
if not isinstance(self.clustering, ClusterMixin):
raise ValueError(
"Parameter clustering must be a sklearn"
" cluster estimator.")
def get_scores_(self, sample_indices):
def precision_function(label_predicted):
targets = self.DSEL_target_[sample_indices]
return self.metric_classifier_(targets, label_predicted)
label_predicted = self.BKS_DSEL_[sample_indices, :]
score_classifier = np.apply_along_axis(
precision_function, 0, label_predicted)
return score_classifier
def _set_diversity_func(self):
"""Set the diversity function to be used according to the
hyper-parameter metric_diversity
The diversity_func_ can be either the Double Fault, Q-Statistics
or Ratio of errors.
"""
if self.metric_diversity == 'DF':
self.diversity_func_ = negative_double_fault
elif self.metric_diversity == 'Q':
self.diversity_func_ = Q_statistic
else:
self.diversity_func_ = ratio_errors
| [
"sklearn.cluster.KMeans",
"numpy.mean",
"numpy.ceil",
"numpy.where",
"deslib.util.diversity.compute_pairwise_diversity",
"numpy.argsort",
"numpy.zeros",
"numpy.apply_along_axis",
"warnings.warn",
"deslib.util.aggregation.majority_voting_rule",
"numpy.arange"
] | [((6885, 6945), 'numpy.zeros', 'np.zeros', (['(self.clustering_.n_clusters, self.n_classifiers_)'], {}), '((self.clustering_.n_clusters, self.n_classifiers_))\n', (6893, 6945), True, 'import numpy as np\n'), ((6993, 7053), 'numpy.zeros', 'np.zeros', (['(self.clustering_.n_clusters, self.n_classifiers_)'], {}), '((self.clustering_.n_clusters, self.n_classifiers_))\n', (7001, 7053), True, 'import numpy as np\n'), ((7091, 7150), 'numpy.zeros', 'np.zeros', (['(self.clustering_.n_clusters, self.J_)'], {'dtype': 'int'}), '((self.clustering_.n_clusters, self.J_), dtype=int)\n', (7099, 7150), True, 'import numpy as np\n'), ((13171, 13198), 'deslib.util.aggregation.majority_voting_rule', 'majority_voting_rule', (['votes'], {}), '(votes)\n', (13191, 13198), False, 'from deslib.util.aggregation import majority_voting_rule\n'), ((15043, 15074), 'numpy.mean', 'np.mean', (['ensemble_proba'], {'axis': '(1)'}), '(ensemble_proba, axis=1)\n', (15050, 15074), True, 'import numpy as np\n'), ((16663, 16722), 'numpy.apply_along_axis', 'np.apply_along_axis', (['precision_function', '(0)', 'label_predicted'], {}), '(precision_function, 0, label_predicted)\n', (16682, 16722), True, 'import numpy as np\n'), ((5553, 5602), 'numpy.ceil', 'np.ceil', (['(self.n_classifiers_ * self.pct_diversity)'], {}), '(self.n_classifiers_ * self.pct_diversity)\n', (5560, 5602), True, 'import numpy as np\n'), ((8723, 8820), 'deslib.util.diversity.compute_pairwise_diversity', 'compute_pairwise_diversity', (['targets', 'self.BKS_DSEL_[sample_indices, :]', 'self.diversity_func_'], {}), '(targets, self.BKS_DSEL_[sample_indices, :], self\n .diversity_func_)\n', (8749, 8820), False, 'from deslib.util.diversity import Q_statistic, ratio_errors, negative_double_fault, compute_pairwise_diversity\n'), ((5838, 5929), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'self.n_clusters', 'random_state': 'self.random_state', 'n_jobs': 'self.n_jobs'}), '(n_clusters=self.n_clusters, random_state=self.random_state, n_jobs=\n self.n_jobs)\n', (5844, 5929), False, 'from sklearn.cluster import KMeans\n'), ((6043, 6177), 'warnings.warn', 'warnings.warn', (['"""n_clusters is bigger than DSEL size. Using All DSEL examples as cluster centroids."""'], {'category': 'RuntimeWarning'}), "(\n 'n_clusters is bigger than DSEL size. Using All DSEL examples as cluster centroids.'\n , category=RuntimeWarning)\n", (6056, 6177), False, 'import warnings\n'), ((6266, 6332), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'self.n_samples_', 'random_state': 'self.random_state'}), '(n_clusters=self.n_samples_, random_state=self.random_state)\n', (6272, 6332), False, 'from sklearn.cluster import KMeans\n'), ((8063, 8096), 'numpy.where', 'np.where', (['(labels == cluster_index)'], {}), '(labels == cluster_index)\n', (8071, 8096), True, 'import numpy as np\n'), ((8418, 8446), 'numpy.argsort', 'np.argsort', (['score_classifier'], {}), '(score_classifier)\n', (8428, 8446), True, 'import numpy as np\n'), ((9213, 9246), 'numpy.argsort', 'np.argsort', (['diversity_of_selected'], {}), '(diversity_of_selected)\n', (9223, 9246), True, 'import numpy as np\n'), ((13081, 13112), 'numpy.arange', 'np.arange', (['predictions.shape[0]'], {}), '(predictions.shape[0])\n', (13090, 13112), True, 'import numpy as np\n'), ((14936, 14969), 'numpy.arange', 'np.arange', (['probabilities.shape[0]'], {}), '(probabilities.shape[0])\n', (14945, 14969), True, 'import numpy as np\n'), ((9087, 9120), 'numpy.argsort', 'np.argsort', (['diversity_of_selected'], {}), '(diversity_of_selected)\n', (9097, 9120), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
import model
from data_reader import load_data, DataReader
flags = tf.flags
# data
flags.DEFINE_string('data_dir', 'data', 'data directory. Should contain train.txt/valid.txt/test.txt with input data')
flags.DEFINE_string('train_dir', 'cv', 'training directory (models and summaries are saved there periodically)')
flags.DEFINE_string('load_model', None, '(optional) filename of the model to load. Useful for re-starting training from a checkpoint')
# model params
flags.DEFINE_integer('rnn_size', 650, 'size of LSTM internal state')
flags.DEFINE_integer('highway_layers', 2, 'number of highway layers')
flags.DEFINE_integer('char_embed_size', 15, 'dimensionality of character embeddings')
flags.DEFINE_string ('kernels', '[1,2,3,4,5,6,7]', 'CNN kernel widths')
flags.DEFINE_string ('kernel_features', '[50,100,150,200,200,200,200]', 'number of features in the CNN kernel')
flags.DEFINE_integer('rnn_layers', 2, 'number of layers in the LSTM')
flags.DEFINE_float ('dropout', 0.5, 'dropout. 0 = no dropout')
# optimization
flags.DEFINE_float ('learning_rate_decay', 0.5, 'learning rate decay')
flags.DEFINE_float ('learning_rate', 1.0, 'starting learning rate')
flags.DEFINE_float ('decay_when', 1.0, 'decay if validation perplexity does not improve by more than this much')
flags.DEFINE_float ('param_init', 0.05, 'initialize parameters at')
flags.DEFINE_integer('num_unroll_steps', 35, 'number of timesteps to unroll for')
flags.DEFINE_integer('batch_size', 20, 'number of sequences to train on in parallel')
flags.DEFINE_integer('max_epochs', 25, 'number of full passes through the training data')
flags.DEFINE_float ('max_grad_norm', 5.0, 'normalize gradients at')
flags.DEFINE_integer('max_word_length', 65, 'maximum word length')
# bookkeeping
flags.DEFINE_integer('seed', 3435, 'random number generator seed')
flags.DEFINE_integer('print_every', 5, 'how often to print current loss')
flags.DEFINE_string ('EOS', '+', '<EOS> symbol. should be a single unused character (like +) for PTB and blank for others')
FLAGS = flags.FLAGS
def run_test(session, m, data, batch_size, num_steps):
"""Runs the model on the given data."""
costs = 0.0
iters = 0
state = session.run(m.initial_state)
for step, (x, y) in enumerate(reader.dataset_iterator(data, batch_size, num_steps)):
cost, state = session.run([m.cost, m.final_state], {
m.input_data: x,
m.targets: y,
m.initial_state: state
})
costs += cost
iters += 1
return costs / iters
def main(_):
''' Trains model from data '''
if not os.path.exists(FLAGS.train_dir):
os.mkdir(FLAGS.train_dir)
print('Created training directory', FLAGS.train_dir)
word_vocab, char_vocab, word_tensors, char_tensors, max_word_length = \
load_data(FLAGS.data_dir, FLAGS.max_word_length, eos=FLAGS.EOS)
train_reader = DataReader(word_tensors['train'], char_tensors['train'],
FLAGS.batch_size, FLAGS.num_unroll_steps)
valid_reader = DataReader(word_tensors['valid'], char_tensors['valid'],
FLAGS.batch_size, FLAGS.num_unroll_steps)
test_reader = DataReader(word_tensors['test'], char_tensors['test'],
FLAGS.batch_size, FLAGS.num_unroll_steps)
print('initialized all dataset readers')
with tf.Graph().as_default(), tf.Session() as session:
# tensorflow seed must be inside graph
tf.set_random_seed(FLAGS.seed)
np.random.seed(seed=FLAGS.seed)
''' build training graph '''
initializer = tf.random_uniform_initializer(-FLAGS.param_init, FLAGS.param_init)
with tf.variable_scope("Model", initializer=initializer):
train_model = model.inference_graph(
char_vocab_size=char_vocab.size,
word_vocab_size=word_vocab.size,
char_embed_size=FLAGS.char_embed_size,
batch_size=FLAGS.batch_size,
num_highway_layers=FLAGS.highway_layers,
num_rnn_layers=FLAGS.rnn_layers,
rnn_size=FLAGS.rnn_size,
max_word_length=max_word_length,
kernels=eval(FLAGS.kernels),
kernel_features=eval(FLAGS.kernel_features),
num_unroll_steps=FLAGS.num_unroll_steps,
dropout=FLAGS.dropout)
train_model.update(model.loss_graph(train_model.logits, FLAGS.batch_size, FLAGS.num_unroll_steps))
# scaling loss by FLAGS.num_unroll_steps effectively scales gradients by the same factor.
# we need it to reproduce how the original Torch code optimizes. Without this, our gradients will be
# much smaller (i.e. 35 times smaller) and to get system to learn we'd have to scale learning rate and max_grad_norm appropriately.
# Thus, scaling gradients so that this trainer is exactly compatible with the original
train_model.update(model.training_graph(train_model.loss * FLAGS.num_unroll_steps,
FLAGS.learning_rate, FLAGS.max_grad_norm))
# create saver before creating more graph nodes, so that we do not save any vars defined below
saver = tf.train.Saver(max_to_keep=50)
''' build graph for validation and testing (shares parameters with the training graph!) '''
with tf.variable_scope("Model", reuse=True):
valid_model = model.inference_graph(
char_vocab_size=char_vocab.size,
word_vocab_size=word_vocab.size,
char_embed_size=FLAGS.char_embed_size,
batch_size=FLAGS.batch_size,
num_highway_layers=FLAGS.highway_layers,
num_rnn_layers=FLAGS.rnn_layers,
rnn_size=FLAGS.rnn_size,
max_word_length=max_word_length,
kernels=eval(FLAGS.kernels),
kernel_features=eval(FLAGS.kernel_features),
num_unroll_steps=FLAGS.num_unroll_steps,
dropout=0.0)
valid_model.update(model.loss_graph(valid_model.logits, FLAGS.batch_size, FLAGS.num_unroll_steps))
if FLAGS.load_model:
saver.restore(session, FLAGS.load_model)
print('Loaded model from', FLAGS.load_model, 'saved at global step', train_model.global_step.eval())
else:
tf.global_variables_initializer().run()
session.run(train_model.clear_char_embedding_padding)
print('Created and initialized fresh model. Size:', model.model_size())
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, graph=session.graph)
''' take learning rate from CLI, not from saved graph '''
session.run(
tf.assign(train_model.learning_rate, FLAGS.learning_rate),
)
''' training starts here '''
best_valid_loss = None
rnn_state = session.run(train_model.initial_rnn_state)
for epoch in range(FLAGS.max_epochs):
epoch_start_time = time.time()
avg_train_loss = 0.0
count = 0
for x, y in train_reader.iter():
count += 1
start_time = time.time()
loss, _, rnn_state, gradient_norm, step, _ = session.run([
train_model.loss,
train_model.train_op,
train_model.final_rnn_state,
train_model.global_norm,
train_model.global_step,
train_model.clear_char_embedding_padding
], {
train_model.input : x,
train_model.targets: y,
train_model.initial_rnn_state: rnn_state
})
avg_train_loss += 0.05 * (loss - avg_train_loss)
time_elapsed = time.time() - start_time
if count % FLAGS.print_every == 0:
print('%6d: %d [%5d/%5d], train_loss/perplexity = %6.8f/%6.7f secs/batch = %.4fs, grad.norm=%6.8f' % (step,
epoch, count,
train_reader.length,
loss, np.exp(loss),
time_elapsed,
gradient_norm))
print('Epoch training time:', time.time()-epoch_start_time)
# epoch done: time to evaluate
avg_valid_loss = 0.0
count = 0
rnn_state = session.run(valid_model.initial_rnn_state)
for x, y in valid_reader.iter():
count += 1
start_time = time.time()
loss, rnn_state = session.run([
valid_model.loss,
valid_model.final_rnn_state
], {
valid_model.input : x,
valid_model.targets: y,
valid_model.initial_rnn_state: rnn_state,
})
if count % FLAGS.print_every == 0:
print("\t> validation loss = %6.8f, perplexity = %6.8f" % (loss, np.exp(loss)))
avg_valid_loss += loss / valid_reader.length
print("at the end of epoch:", epoch)
print("train loss = %6.8f, perplexity = %6.8f" % (avg_train_loss, np.exp(avg_train_loss)))
print("validation loss = %6.8f, perplexity = %6.8f" % (avg_valid_loss, np.exp(avg_valid_loss)))
save_as = '%s/epoch%03d_%.4f.model' % (FLAGS.train_dir, epoch, avg_valid_loss)
saver.save(session, save_as)
print('Saved model', save_as)
''' write out summary events '''
summary = tf.Summary(value=[
tf.Summary.Value(tag="train_loss", simple_value=avg_train_loss),
tf.Summary.Value(tag="valid_loss", simple_value=avg_valid_loss)
])
summary_writer.add_summary(summary, step)
''' decide if need to decay learning rate '''
if best_valid_loss is not None and np.exp(avg_valid_loss) > np.exp(best_valid_loss) - FLAGS.decay_when:
print('validation perplexity did not improve enough, decay learning rate')
current_learning_rate = session.run(train_model.learning_rate)
print('learning rate was:', current_learning_rate)
current_learning_rate *= FLAGS.learning_rate_decay
if current_learning_rate < 1.e-5:
print('learning rate too small - stopping now')
break
session.run(train_model.learning_rate.assign(current_learning_rate))
print('new learning rate is:', current_learning_rate)
else:
best_valid_loss = avg_valid_loss
if __name__ == "__main__":
tf.app.run()
| [
"model.model_size",
"tensorflow.Summary.Value",
"tensorflow.set_random_seed",
"tensorflow.app.run",
"os.path.exists",
"tensorflow.Graph",
"data_reader.load_data",
"tensorflow.Session",
"numpy.exp",
"tensorflow.assign",
"os.mkdir",
"numpy.random.seed",
"model.training_graph",
"tensorflow.va... | [((3284, 3347), 'data_reader.load_data', 'load_data', (['FLAGS.data_dir', 'FLAGS.max_word_length'], {'eos': 'FLAGS.EOS'}), '(FLAGS.data_dir, FLAGS.max_word_length, eos=FLAGS.EOS)\n', (3293, 3347), False, 'from data_reader import load_data, DataReader\n'), ((3368, 3470), 'data_reader.DataReader', 'DataReader', (["word_tensors['train']", "char_tensors['train']", 'FLAGS.batch_size', 'FLAGS.num_unroll_steps'], {}), "(word_tensors['train'], char_tensors['train'], FLAGS.batch_size,\n FLAGS.num_unroll_steps)\n", (3378, 3470), False, 'from data_reader import load_data, DataReader\n'), ((3517, 3619), 'data_reader.DataReader', 'DataReader', (["word_tensors['valid']", "char_tensors['valid']", 'FLAGS.batch_size', 'FLAGS.num_unroll_steps'], {}), "(word_tensors['valid'], char_tensors['valid'], FLAGS.batch_size,\n FLAGS.num_unroll_steps)\n", (3527, 3619), False, 'from data_reader import load_data, DataReader\n'), ((3665, 3765), 'data_reader.DataReader', 'DataReader', (["word_tensors['test']", "char_tensors['test']", 'FLAGS.batch_size', 'FLAGS.num_unroll_steps'], {}), "(word_tensors['test'], char_tensors['test'], FLAGS.batch_size,\n FLAGS.num_unroll_steps)\n", (3675, 3765), False, 'from data_reader import load_data, DataReader\n'), ((11550, 11562), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (11560, 11562), True, 'import tensorflow as tf\n'), ((3071, 3102), 'os.path.exists', 'os.path.exists', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (3085, 3102), False, 'import os\n'), ((3112, 3137), 'os.mkdir', 'os.mkdir', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (3120, 3137), False, 'import os\n'), ((3873, 3885), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3883, 3885), True, 'import tensorflow as tf\n'), ((3954, 3984), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (3972, 3984), True, 'import tensorflow as tf\n'), ((3993, 4024), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'FLAGS.seed'}), '(seed=FLAGS.seed)\n', (4007, 4024), True, 'import numpy as np\n'), ((4085, 4151), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-FLAGS.param_init)', 'FLAGS.param_init'], {}), '(-FLAGS.param_init, FLAGS.param_init)\n', (4114, 4151), True, 'import tensorflow as tf\n'), ((5759, 5789), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(50)'}), '(max_to_keep=50)\n', (5773, 5789), True, 'import tensorflow as tf\n'), ((7176, 7235), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['FLAGS.train_dir'], {'graph': 'session.graph'}), '(FLAGS.train_dir, graph=session.graph)\n', (7197, 7235), True, 'import tensorflow as tf\n'), ((4165, 4216), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'initializer': 'initializer'}), "('Model', initializer=initializer)\n", (4182, 4216), True, 'import tensorflow as tf\n'), ((5904, 5942), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': '(True)'}), "('Model', reuse=True)\n", (5921, 5942), True, 'import tensorflow as tf\n'), ((7336, 7393), 'tensorflow.assign', 'tf.assign', (['train_model.learning_rate', 'FLAGS.learning_rate'], {}), '(train_model.learning_rate, FLAGS.learning_rate)\n', (7345, 7393), True, 'import tensorflow as tf\n'), ((7615, 7626), 'time.time', 'time.time', ([], {}), '()\n', (7624, 7626), False, 'import time\n'), ((3848, 3858), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3856, 3858), True, 'import tensorflow as tf\n'), ((4942, 5020), 'model.loss_graph', 'model.loss_graph', (['train_model.logits', 'FLAGS.batch_size', 'FLAGS.num_unroll_steps'], {}), '(train_model.logits, FLAGS.batch_size, FLAGS.num_unroll_steps)\n', (4958, 5020), False, 'import model\n'), ((5512, 5622), 'model.training_graph', 'model.training_graph', (['(train_model.loss * FLAGS.num_unroll_steps)', 'FLAGS.learning_rate', 'FLAGS.max_grad_norm'], {}), '(train_model.loss * FLAGS.num_unroll_steps, FLAGS.\n learning_rate, FLAGS.max_grad_norm)\n', (5532, 5622), False, 'import model\n'), ((6658, 6736), 'model.loss_graph', 'model.loss_graph', (['valid_model.logits', 'FLAGS.batch_size', 'FLAGS.num_unroll_steps'], {}), '(valid_model.logits, FLAGS.batch_size, FLAGS.num_unroll_steps)\n', (6674, 6736), False, 'import model\n'), ((7130, 7148), 'model.model_size', 'model.model_size', ([], {}), '()\n', (7146, 7148), False, 'import model\n'), ((7783, 7794), 'time.time', 'time.time', ([], {}), '()\n', (7792, 7794), False, 'import time\n'), ((9368, 9379), 'time.time', 'time.time', ([], {}), '()\n', (9377, 9379), False, 'import time\n'), ((6960, 6993), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6991, 6993), True, 'import tensorflow as tf\n'), ((8438, 8449), 'time.time', 'time.time', ([], {}), '()\n', (8447, 8449), False, 'import time\n'), ((9071, 9082), 'time.time', 'time.time', ([], {}), '()\n', (9080, 9082), False, 'import time\n'), ((10777, 10799), 'numpy.exp', 'np.exp', (['avg_valid_loss'], {}), '(avg_valid_loss)\n', (10783, 10799), True, 'import numpy as np\n'), ((10046, 10068), 'numpy.exp', 'np.exp', (['avg_train_loss'], {}), '(avg_train_loss)\n', (10052, 10068), True, 'import numpy as np\n'), ((10154, 10176), 'numpy.exp', 'np.exp', (['avg_valid_loss'], {}), '(avg_valid_loss)\n', (10160, 10176), True, 'import numpy as np\n'), ((10457, 10520), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""train_loss"""', 'simple_value': 'avg_train_loss'}), "(tag='train_loss', simple_value=avg_train_loss)\n", (10473, 10520), True, 'import tensorflow as tf\n'), ((10538, 10601), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""valid_loss"""', 'simple_value': 'avg_valid_loss'}), "(tag='valid_loss', simple_value=avg_valid_loss)\n", (10554, 10601), True, 'import tensorflow as tf\n'), ((10802, 10825), 'numpy.exp', 'np.exp', (['best_valid_loss'], {}), '(best_valid_loss)\n', (10808, 10825), True, 'import numpy as np\n'), ((8864, 8876), 'numpy.exp', 'np.exp', (['loss'], {}), '(loss)\n', (8870, 8876), True, 'import numpy as np\n'), ((9842, 9854), 'numpy.exp', 'np.exp', (['loss'], {}), '(loss)\n', (9848, 9854), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Code to load an expert policy and generate roll-out data for behavioral cloning.
Example usage:
python run_expert_pytorch.py experts/Humanoid-v1.pkl Humanoid-v2 --render \
--num_rollouts 20
Author of this script and included expert policies: <NAME> (<EMAIL>)
"""
import pickle
import tensorflow as tf
import numpy as np
import tf_util
import gym
import load_policy
import matplotlib.pyplot as plt
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import *
def generate_rollout(env, expert_policy_file, max_timesteps, num_rollouts, render, envname):
max_steps = max_timesteps or env.spec.timestep_limit
policy_fn = load_policy.load_policy(expert_policy_file)
with tf.Session() as sess:
tf_util.initialize()
returns = []
observations = []
actions = []
for i in range(num_rollouts):
print('iter', i)
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = policy_fn(obs[None,:])
#print(type(action))
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if render:
env.render()
if steps % 100 == 0: print("%i/%i"%(steps, max_steps))
if steps >= max_steps:
break
returns.append(totalr)
print('returns', returns)
print('mean return', np.mean(returns))
print('std of return', np.std(returns))
result_file = open('result/result_%s.txt' % (envname), "w")
result_file.write("##### before setting #####\n")
result_file.write("mean return: %.4f \n" % np.mean(returns))
result_file.write("std of return: %.4f \n" % np.std(returns))
result_file.close()
return observations, actions
def variablesFromPair(pair, args):
pair[0] = np.reshape(pair[0], (1, -1))
pair[1] = np.reshape(pair[1], (1, -1))
# get the target action index
#target = pair[1].argmax(1)
input_variable = Variable(torch.FloatTensor(pair[0]))
target_variable = Variable(torch.FloatTensor(pair[1]))
#print(target_variable)
return (input_variable, target_variable)
def makePairs(obs, acts):
pairs = []
for i in range(len(obs)):
pair = []
pair.append(obs[i])
pair.append(acts[i])
pairs.append(pair)
return pairs
def train(input_var, target_var, net, net_optimizer, criterion, args):
loss = 0
net_optimizer.zero_grad()
#print(input_var)
net_output = net(input_var)
loss = criterion(net_output, target_var)
loss.backward()
net_optimizer.step()
return loss.data[0]
def trainEpoch(net, pairs, args, test_pairs):
n_epochs = args.epoch
learning_rate = args.lr
iter = 0
net_optimizer = optim.Adam(net.parameters(), lr=learning_rate)
criterion = nn.MSELoss()
plot_losses = []
plot_loss_total = 0
for epoch in range(1, args.epoch+1):
random.shuffle(pairs)
# converting pairs into variable
training_pairs = [variablesFromPair(pair, args) for pair in pairs]
for training_pair in training_pairs:
iter += 1
input_var = training_pair[0]
target_var = training_pair[1]
loss = train(input_var, target_var, net, net_optimizer, criterion, args)
#print(loss)
plot_loss_total += loss
if iter % 500 == 0:
plot_loss_avg = plot_loss_total / 500
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
print("epoch: %d, loss: %.6f, acc on test pairs: %.3f" % (epoch, plot_loss_avg, validate(net, test_pairs, args)))
f = plt.figure()
plt.plot(plot_losses)
plt.ylabel('Loss')
plt.xlabel('Iteration')
f.savefig("result/%s.pdf" % args.envname, bbox_inches='tight')
def validate(net, pairs, args):
valid_pairs = [variablesFromPair(pair, args) for pair in pairs]
correcrt = 0
for pair in valid_pairs:
input_var = pair[0]
target_var = pair[1]
#print(target_var)
output = net(input_var)
#print(output)
_, target_ind = torch.max(output, 1)
_, output_ind = torch.max(output, 1)
#print(output_ind)
if torch.equal(target_ind.data, output_ind.data):
correcrt += 1
return (correcrt/len(pairs) )
def test(env, expert_policy_file, net, max_timesteps, num_rollouts, render):
max_steps = max_timesteps or env.spec.timestep_limit
policy_fn = load_policy.load_policy(expert_policy_file)
with tf.Session() as sess:
tf_util.initialize()
returns = []
for i in range(num_rollouts):
observations = []
actions = []
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
print("at step", steps)
expected_action = policy_fn(obs[None,:])
action = net(Variable(torch.FloatTensor(obs)))
action = action.data.numpy()
action = np.reshape(action, (1,-1))
#print("expected action: ", expected_action)
#print("predicted action: ", action)
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if render:
env.render()
if steps % 100 == 0: print("%i/%i"%(steps, max_steps))
if steps >= max_steps:
break
returns.append(totalr)
print('returns', returns)
print('mean return', np.mean(returns))
print('std of return', np.std(returns))
return returns
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('expert_policy_file', type=str)
parser.add_argument('envname', type=str)
parser.add_argument('--render', action='store_true')
parser.add_argument("--max_timesteps", type=int)
parser.add_argument('--num_rollouts', type=int, default=5,
help='Number of expert roll outs')
parser.add_argument('--hidden_size', type=int, default=64)
parser.add_argument('--epoch', type=int, default=30)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
args = parser.parse_args()
env = gym.make(args.envname)
obs, acts = generate_rollout(env, args.expert_policy_file, args.max_timesteps, \
args.num_rollouts, args.render, args.envname)
num_pairs = len(obs)
pairs = makePairs(obs, acts)
train_pairs = pairs[:int(0.8 * num_pairs)]
test_pairs = pairs[int(0.8 * num_pairs):]
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
net = FFNet(obs_dim, act_dim, args.hidden_size)
#print("obs_dim", obs_dim)
#print("act_dim", act_dim)
trainEpoch(net, train_pairs, args, test_pairs)
#obs1 = train_pairs[0][0]
#act1 = net(Variable(torch.FloatTensor(obs1)))
#expected_act1 = train_pairs[0][1]
#print("target act1: ", expected_act1)
#print("predicted act1: ", act1)
#validate(net, test_pairs, args)
print("####### After training #######")
#print("acc on training pairs: %.3f" % validate(net, training_pairs, args))
returns = test(env, args.expert_policy_file, net, args.max_timesteps, args.num_rollouts, args.render)
result_file = open('result/result_%s.txt' % (args.envname), "a")
result_file.write("##### training setting #####\n")
result_file.write("num of rollouts: %d \n" % args.num_rollouts)
result_file.write("num of epochs: %d \n" % args.epoch)
result_file.write("NN hidden size: %d \n" % args.hidden_size)
result_file.write("learning rate: " + str(args.lr) + " \n" )
result_file.write("mean return: %.4f \n" % np.mean(returns))
result_file.write("std of return: %.4f \n" % np.std(returns))
result_file.close()
if __name__ == '__main__':
main()
| [
"numpy.mean",
"numpy.reshape",
"random.shuffle",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"tensorflow.Session",
"load_policy.load_policy",
"tf_util.initialize",
"torch.FloatTensor",
"torch.max",
"torch.nn.MSELoss",
"matplo... | [((782, 825), 'load_policy.load_policy', 'load_policy.load_policy', (['expert_policy_file'], {}), '(expert_policy_file)\n', (805, 825), False, 'import load_policy\n'), ((2125, 2153), 'numpy.reshape', 'np.reshape', (['pair[0]', '(1, -1)'], {}), '(pair[0], (1, -1))\n', (2135, 2153), True, 'import numpy as np\n'), ((2165, 2193), 'numpy.reshape', 'np.reshape', (['pair[1]', '(1, -1)'], {}), '(pair[1], (1, -1))\n', (2175, 2193), True, 'import numpy as np\n'), ((3035, 3047), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (3045, 3047), True, 'import torch.nn as nn\n'), ((3745, 3757), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3755, 3757), True, 'import matplotlib.pyplot as plt\n'), ((3759, 3780), 'matplotlib.pyplot.plot', 'plt.plot', (['plot_losses'], {}), '(plot_losses)\n', (3767, 3780), True, 'import matplotlib.pyplot as plt\n'), ((3782, 3800), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (3792, 3800), True, 'import matplotlib.pyplot as plt\n'), ((3802, 3825), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (3812, 3825), True, 'import matplotlib.pyplot as plt\n'), ((4485, 4528), 'load_policy.load_policy', 'load_policy.load_policy', (['expert_policy_file'], {}), '(expert_policy_file)\n', (4508, 4528), False, 'import load_policy\n'), ((5494, 5519), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5517, 5519), False, 'import argparse\n'), ((6196, 6218), 'gym.make', 'gym.make', (['args.envname'], {}), '(args.envname)\n', (6204, 6218), False, 'import gym\n'), ((835, 847), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (845, 847), True, 'import tensorflow as tf\n'), ((865, 885), 'tf_util.initialize', 'tf_util.initialize', ([], {}), '()\n', (883, 885), False, 'import tf_util\n'), ((1708, 1724), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (1715, 1724), True, 'import numpy as np\n'), ((1753, 1768), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (1759, 1768), True, 'import numpy as np\n'), ((2281, 2307), 'torch.FloatTensor', 'torch.FloatTensor', (['pair[0]'], {}), '(pair[0])\n', (2298, 2307), False, 'import torch\n'), ((2337, 2363), 'torch.FloatTensor', 'torch.FloatTensor', (['pair[1]'], {}), '(pair[1])\n', (2354, 2363), False, 'import torch\n'), ((3129, 3150), 'random.shuffle', 'random.shuffle', (['pairs'], {}), '(pairs)\n', (3143, 3150), False, 'import random\n'), ((4156, 4176), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (4165, 4176), False, 'import torch\n'), ((4195, 4215), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (4204, 4215), False, 'import torch\n'), ((4243, 4288), 'torch.equal', 'torch.equal', (['target_ind.data', 'output_ind.data'], {}), '(target_ind.data, output_ind.data)\n', (4254, 4288), False, 'import torch\n'), ((4535, 4547), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4545, 4547), True, 'import tensorflow as tf\n'), ((4559, 4579), 'tf_util.initialize', 'tf_util.initialize', ([], {}), '()\n', (4577, 4579), False, 'import tf_util\n'), ((1936, 1952), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (1943, 1952), True, 'import numpy as np\n'), ((2003, 2018), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (2009, 2018), True, 'import numpy as np\n'), ((5370, 5386), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (5377, 5386), True, 'import numpy as np\n'), ((5413, 5428), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (5419, 5428), True, 'import numpy as np\n'), ((7668, 7684), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (7675, 7684), True, 'import numpy as np\n'), ((7735, 7750), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (7741, 7750), True, 'import numpy as np\n'), ((4919, 4946), 'numpy.reshape', 'np.reshape', (['action', '(1, -1)'], {}), '(action, (1, -1))\n', (4929, 4946), True, 'import numpy as np\n'), ((4848, 4870), 'torch.FloatTensor', 'torch.FloatTensor', (['obs'], {}), '(obs)\n', (4865, 4870), False, 'import torch\n')] |
import re
import copy
import pickle
import numpy as np
from collections import OrderedDict
import torch
from torch.autograd import Variable
import global_variables as g
def save_checkpoint(state, filename='./checkpoints/checkpoint.pth.tar'):
print('save model!', filename)
torch.save(state, filename)
def save_pickle(d, path):
print('save pickle to', path)
with open(path, mode='wb') as f:
pickle.dump(d, f)
def load_pickle(path):
print('load', path)
with open(path, mode='rb') as f:
return pickle.load(f)
def get_entities(fpath):
entities = OrderedDict({'R_cuisine': [], 'R_location': [], 'R_price': [], 'R_number': []})
with open(fpath, 'r') as file:
lines = file.readlines()
for l in lines:
wds = l.rstrip().split(' ')[2].split('\t')
slot_type = wds[0] # ex) R_price
slot_val = wds[1] # ex) cheap
# if slot_type not in entities:
# entities[slot_type] = []
if slot_type in entities:
if slot_val not in entities[slot_type]:
entities[slot_type].append(slot_val)
return entities
def load_embd_weights(word2vec, vocab_size, embd_size, w2i):
embedding_matrix = np.zeros((vocab_size, embd_size))
print('embed_matrix.shape', embedding_matrix.shape)
found_ct = 0
for word, idx in w2i.items():
# words not found in embedding index will be all-zeros.
if word in word2vec.wv:
embedding_matrix[idx] = word2vec.wv[word]
found_ct += 1
print(found_ct, 'words are found in word2vec. vocab_size is', vocab_size)
return torch.from_numpy(embedding_matrix).type(torch.FloatTensor)
def preload(fpath, vocab, system_acts):
with open(fpath, 'r') as f:
lines = f.readlines()
for idx, l in enumerate(lines):
l = l.rstrip()
if l != '':
ls = l.split("\t")
t_u = ls[0].split(' ', 1)
# turn = t_u[0]
uttr = t_u[1].split(' ')
if len(ls) == 2: # includes user and system utterance
for w in uttr:
if w not in vocab:
vocab.append(w)
if len(ls) == 2: # includes user and system utterance
sys_act = ls[1]
sys_act = re.sub(r'resto_\S+', '', sys_act)
if sys_act.startswith('api_call'): sys_act = 'api_call'
if sys_act not in system_acts: system_acts.append(sys_act)
vocab = sorted(vocab)
system_acts = sorted(system_acts)
return vocab, system_acts
def load_data(fpath, entities, w2i, system_acts):
'''
store data as dialog (multi turns)
'''
data = []
with open(fpath, 'r') as f:
lines = f.readlines()
# x: user uttr, y: sys act, c: context, b: BoW, p: previous sys act, f: action filter
x, y, c, b, p, f = [], [], [], [], [], []
context = [0] * len(entities.keys())
for idx, l in enumerate(lines):
l = l.rstrip()
if l == '':
data.append((x, y, c, b, p, f))
# reset
x, y, c, b, p, f = [], [], [], [], [], []
context = [0] * len(entities.keys())
else:
ls = l.split("\t")
t_u = ls[0].split(' ', 1)
# turn = t_u[0]
uttr = t_u[1].split(' ')
update_context(context, uttr, entities)
act_filter = generate_act_filter(len(system_acts), context)
bow = get_bow(uttr, w2i)
sys_act = g.SILENT
if len(ls) == 2: # includes user and system utterance
sys_act = ls[1]
sys_act = re.sub(r'resto_\S+', '', sys_act)
if sys_act.startswith('api_call'): sys_act = 'api_call'
else:
continue # TODO
x.append(uttr)
if len(y) == 0:
p.append(g.SILENT)
else:
p.append(y[-1])
y.append(sys_act)
c.append(copy.deepcopy(context))
b.append(bow)
f.append(act_filter)
return data, system_acts
def update_context(context, sentence, entities):
for idx, (ent_key, ent_vals) in enumerate(entities.items()):
for w in sentence:
if w in ent_vals:
context[idx] = 1
def generate_act_filter(action_size, context):
mask = [0] * action_size
# TODO hard coding
# 0 <SILENT>
# 1 any preference on a type of cuisine
# 2 api_call
# 3 great let me do the reservation
# 4 hello what can i help you with today
# 5 here it is
# 6 how many people would be in your party
# 7 i'm on it
# 8 is there anything i can help you with
# 9 ok let me look into some options for you
# 10 sure is there anything else to update
# 11 sure let me find an other option for you
# 12 what do you think of this option:
# 13 where should it be
# 14 which price range are looking for
# 15 you're welcome
# context: {'R_cuisine': [], 'R_location': [], 'R_price': [], 'R_number': []}
mask[0] = 1
mask[7] = 1
mask[8] = 1
if context == [0, 0, 0, 0]:
mask[4] = 1
if context == [1, 1, 1, 1]:
mask[2] = 1
mask[3] = 1
mask[5] = 1
mask[8] = 1
mask[9] = 1
mask[10] = 1
mask[11] = 1
mask[12] = 1
mask[15] = 1
if context[0] == 0: # R_cuisine
mask[1] = 1
if context[1] == 0: # R_location
mask[13] = 1
if context[2] == 0: # R_price
mask[14] = 1
if context[3] == 0: # R_number
mask[6] = 1
return mask
def get_bow(sentence, w2i):
bow = [0] * len(w2i)
for word in sentence:
if word in w2i:
bow[w2i[word]] += 1
return bow
def add_padding(data, seq_len):
pad_len = max(0, seq_len - len(data))
data += [0] * pad_len
data = data[:seq_len]
return data
def make_word_vector(uttrs_list, w2i, dialog_maxlen, uttr_maxlen):
dialog_list = []
for uttrs in uttrs_list:
dialog = []
for sentence in uttrs:
sent_vec = [w2i[w] if w in w2i else w2i[g.UNK] for w in sentence]
sent_vec = add_padding(sent_vec, uttr_maxlen)
dialog.append(sent_vec)
for _ in range(dialog_maxlen - len(dialog)):
dialog.append([0] * uttr_maxlen)
dialog = torch.LongTensor(dialog[:dialog_maxlen])
dialog_list.append(dialog)
return to_var(torch.stack(dialog_list, 0))
def to_var(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def padding(data, default_val, maxlen, pad_seq_len):
for i, d in enumerate(data):
pad_len = maxlen - len(d)
for _ in range(pad_len):
data[i].append([default_val] * pad_seq_len)
return to_var(torch.FloatTensor(data))
def get_data_from_batch(batch, w2i, act2i):
uttrs_list = [d[0] for d in batch]
dialog_maxlen = max([len(uttrs) for uttrs in uttrs_list])
uttr_maxlen = max([len(u) for uttrs in uttrs_list for u in uttrs])
uttr_var = make_word_vector(uttrs_list, w2i, dialog_maxlen, uttr_maxlen)
batch_labels = [d[1] for d in batch]
labels_var = []
for labels in batch_labels:
vec_labels = [act2i[l] for l in labels]
pad_len = dialog_maxlen - len(labels)
for _ in range(pad_len):
vec_labels.append(act2i[g.SILENT])
labels_var.append(torch.LongTensor(vec_labels))
labels_var = to_var(torch.stack(labels_var, 0))
batch_prev_acts = [d[4] for d in batch]
prev_var = []
for prev_acts in batch_prev_acts:
vec_prev_acts = []
for act in prev_acts:
tmp = [0] * len(act2i)
tmp[act2i[act]] = 1
vec_prev_acts.append(tmp)
pad_len = dialog_maxlen - len(prev_acts)
for _ in range(pad_len):
vec_prev_acts.append([0] * len(act2i))
prev_var.append(torch.FloatTensor(vec_prev_acts))
prev_var = to_var(torch.stack(prev_var, 0))
context = copy.deepcopy([d[2] for d in batch])
context = padding(context, 1, dialog_maxlen, len(context[0][0]))
bow = copy.deepcopy([d[3] for d in batch])
bow = padding(bow, 0, dialog_maxlen, len(bow[0][0]))
act_filter = copy.deepcopy([d[5] for d in batch])
act_filter = padding(act_filter, 0, dialog_maxlen, len(act_filter[0][0]))
return uttr_var, labels_var, context, bow, prev_var, act_filter
| [
"collections.OrderedDict",
"pickle.dump",
"torch.LongTensor",
"torch.stack",
"pickle.load",
"torch.from_numpy",
"numpy.zeros",
"torch.cuda.is_available",
"torch.save",
"copy.deepcopy",
"re.sub",
"torch.autograd.Variable",
"torch.FloatTensor"
] | [((283, 310), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (293, 310), False, 'import torch\n'), ((594, 673), 'collections.OrderedDict', 'OrderedDict', (["{'R_cuisine': [], 'R_location': [], 'R_price': [], 'R_number': []}"], {}), "({'R_cuisine': [], 'R_location': [], 'R_price': [], 'R_number': []})\n", (605, 673), False, 'from collections import OrderedDict\n'), ((1252, 1285), 'numpy.zeros', 'np.zeros', (['(vocab_size, embd_size)'], {}), '((vocab_size, embd_size))\n', (1260, 1285), True, 'import numpy as np\n'), ((6777, 6802), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6800, 6802), False, 'import torch\n'), ((6836, 6847), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (6844, 6847), False, 'from torch.autograd import Variable\n'), ((8290, 8326), 'copy.deepcopy', 'copy.deepcopy', (['[d[2] for d in batch]'], {}), '([d[2] for d in batch])\n', (8303, 8326), False, 'import copy\n'), ((8407, 8443), 'copy.deepcopy', 'copy.deepcopy', (['[d[3] for d in batch]'], {}), '([d[3] for d in batch])\n', (8420, 8443), False, 'import copy\n'), ((8519, 8555), 'copy.deepcopy', 'copy.deepcopy', (['[d[5] for d in batch]'], {}), '([d[5] for d in batch])\n', (8532, 8555), False, 'import copy\n'), ((418, 435), 'pickle.dump', 'pickle.dump', (['d', 'f'], {}), '(d, f)\n', (429, 435), False, 'import pickle\n'), ((537, 551), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (548, 551), False, 'import pickle\n'), ((6630, 6670), 'torch.LongTensor', 'torch.LongTensor', (['dialog[:dialog_maxlen]'], {}), '(dialog[:dialog_maxlen])\n', (6646, 6670), False, 'import torch\n'), ((6724, 6751), 'torch.stack', 'torch.stack', (['dialog_list', '(0)'], {}), '(dialog_list, 0)\n', (6735, 6751), False, 'import torch\n'), ((7077, 7100), 'torch.FloatTensor', 'torch.FloatTensor', (['data'], {}), '(data)\n', (7094, 7100), False, 'import torch\n'), ((7745, 7771), 'torch.stack', 'torch.stack', (['labels_var', '(0)'], {}), '(labels_var, 0)\n', (7756, 7771), False, 'import torch\n'), ((8249, 8273), 'torch.stack', 'torch.stack', (['prev_var', '(0)'], {}), '(prev_var, 0)\n', (8260, 8273), False, 'import torch\n'), ((1658, 1692), 'torch.from_numpy', 'torch.from_numpy', (['embedding_matrix'], {}), '(embedding_matrix)\n', (1674, 1692), False, 'import torch\n'), ((7691, 7719), 'torch.LongTensor', 'torch.LongTensor', (['vec_labels'], {}), '(vec_labels)\n', (7707, 7719), False, 'import torch\n'), ((8193, 8225), 'torch.FloatTensor', 'torch.FloatTensor', (['vec_prev_acts'], {}), '(vec_prev_acts)\n', (8210, 8225), False, 'import torch\n'), ((2390, 2423), 're.sub', 're.sub', (['"""resto_\\\\S+"""', '""""""', 'sys_act'], {}), "('resto_\\\\S+', '', sys_act)\n", (2396, 2423), False, 'import re\n'), ((3834, 3867), 're.sub', 're.sub', (['"""resto_\\\\S+"""', '""""""', 'sys_act'], {}), "('resto_\\\\S+', '', sys_act)\n", (3840, 3867), False, 'import re\n'), ((4222, 4244), 'copy.deepcopy', 'copy.deepcopy', (['context'], {}), '(context)\n', (4235, 4244), False, 'import copy\n')] |
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import numpy as np
import scipy.stats
# NOTE: all parameter 'a' is assumed as array-like
def max(a): return np.max(a)
def min(a): return np.min(a)
def range(a): return np.max(a) - np.min(a)
def sum(a): return np.sum(a)
def mean(a): return np.mean(a)
def var(a): return np.var(a)
def var_samp(a): return np.var(a, ddof=1)
def std(a): return np.std(a)
def skewness(a): return scipy.stats.skew(a)
def kurtosis(a): return scipy.stats.kurtosis(a)
def median(a): return np.median(a)
def percentile(a, q): return np.percentile(a, q)
def trimmed_mean(a, proportiontocut): return scipy.stats.trim_mean(a, proportiontocut)
def iqr(a): return scipy.stats.iqr(a)
def q1(a): return np.percentile(a, 25)
def q3(a): return np.percentile(a, 75)
def mode(a):
a = np.array(a)
a = a[np.where(~pd.isnull(a))]
vals, cnts = np.unique(a, return_counts=True)
return vals[np.where(cnts==np.max(cnts))]
def num_row(a): return len(a)
def num_value(a): return np.count_nonzero(~pd.isnull(a))
def num_nan(a): return np.count_nonzero([x is np.nan for x in a])
def num_nullonly(a): return np.count_nonzero([x is None for x in a])
def num_null(a): return np.count_nonzero(pd.isnull(a))
def num_distinct(a): return np.count_nonzero(np.unique(a))
| [
"numpy.mean",
"pandas.isnull",
"numpy.median",
"numpy.unique",
"numpy.std",
"numpy.max",
"numpy.count_nonzero",
"numpy.sum",
"numpy.array",
"numpy.min",
"numpy.percentile",
"numpy.var"
] | [((768, 777), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (774, 777), True, 'import numpy as np\n'), ((802, 811), 'numpy.min', 'np.min', (['a'], {}), '(a)\n', (808, 811), True, 'import numpy as np\n'), ((884, 893), 'numpy.sum', 'np.sum', (['a'], {}), '(a)\n', (890, 893), True, 'import numpy as np\n'), ((919, 929), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (926, 929), True, 'import numpy as np\n'), ((954, 963), 'numpy.var', 'np.var', (['a'], {}), '(a)\n', (960, 963), True, 'import numpy as np\n'), ((993, 1010), 'numpy.var', 'np.var', (['a'], {'ddof': '(1)'}), '(a, ddof=1)\n', (999, 1010), True, 'import numpy as np\n'), ((1035, 1044), 'numpy.std', 'np.std', (['a'], {}), '(a)\n', (1041, 1044), True, 'import numpy as np\n'), ((1174, 1186), 'numpy.median', 'np.median', (['a'], {}), '(a)\n', (1183, 1186), True, 'import numpy as np\n'), ((1221, 1240), 'numpy.percentile', 'np.percentile', (['a', 'q'], {}), '(a, q)\n', (1234, 1240), True, 'import numpy as np\n'), ((1399, 1419), 'numpy.percentile', 'np.percentile', (['a', '(25)'], {}), '(a, 25)\n', (1412, 1419), True, 'import numpy as np\n'), ((1443, 1463), 'numpy.percentile', 'np.percentile', (['a', '(75)'], {}), '(a, 75)\n', (1456, 1463), True, 'import numpy as np\n'), ((1491, 1502), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (1499, 1502), True, 'import numpy as np\n'), ((1557, 1589), 'numpy.unique', 'np.unique', (['a'], {'return_counts': '(True)'}), '(a, return_counts=True)\n', (1566, 1589), True, 'import numpy as np\n'), ((1762, 1806), 'numpy.count_nonzero', 'np.count_nonzero', (['[(x is np.nan) for x in a]'], {}), '([(x is np.nan) for x in a])\n', (1778, 1806), True, 'import numpy as np\n'), ((1838, 1880), 'numpy.count_nonzero', 'np.count_nonzero', (['[(x is None) for x in a]'], {}), '([(x is None) for x in a])\n', (1854, 1880), True, 'import numpy as np\n'), ((838, 847), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (844, 847), True, 'import numpy as np\n'), ((850, 859), 'numpy.min', 'np.min', (['a'], {}), '(a)\n', (856, 859), True, 'import numpy as np\n'), ((1925, 1937), 'pandas.isnull', 'pd.isnull', (['a'], {}), '(a)\n', (1934, 1937), True, 'import pandas as pd\n'), ((1989, 2001), 'numpy.unique', 'np.unique', (['a'], {}), '(a)\n', (1998, 2001), True, 'import numpy as np\n'), ((1720, 1732), 'pandas.isnull', 'pd.isnull', (['a'], {}), '(a)\n', (1729, 1732), True, 'import pandas as pd\n'), ((1524, 1536), 'pandas.isnull', 'pd.isnull', (['a'], {}), '(a)\n', (1533, 1536), True, 'import pandas as pd\n'), ((1622, 1634), 'numpy.max', 'np.max', (['cnts'], {}), '(cnts)\n', (1628, 1634), True, 'import numpy as np\n')] |
"""
Analyze spike shapes - pulled out of IVCurve 2/6/2016 pbm.
Allows routine to be used to analyze spike trains independent of acq4's data models.
Create instance, then call setup to define the "Clamps" object and the spike threshold.
The Clamps object must have the following variables defined:
commandLevels (current injection levels, list)
time_base (np.array of times corresponding to traces)
data_mode (string, indicating current or voltgae clamp)
tstart (time for start of looking at spikes; ms)
tend (time to stop looking at spikes; ms)
trace (the data trace itself, numpy array records x points)
sample_interval (time between samples, sec)
values (command waveforms; why it is called this in acq4 is a mystery)
Note that most of the results from this module are accessed either
as class variables, or through the class variable analysis_summary,
a dictionary with key analysis results.
IVCurve uses the analysis_summary to post results to an sql database.
<NAME>, Ph.D. 2016-2019
for Acq4 (and beyond)
"""
from collections import OrderedDict
import os
import os.path
from pathlib import Path
import inspect
import sys
import itertools
import functools
import numpy as np
import scipy
from . import Utility # pbm's utilities...
from . import Fitting # pbm's fitting stuff...
import pprint
import time
this_source_file = 'ephysanalysis.SpikeAnalysisrc'
class SpikeAnalysis():
def __init__(self):
pass
self.threshold = 0.
self.Clamps = None
self.analysis_summary = {}
self.verbose = False
self.FIGrowth = 1 # use function FIGrowth1 (can use simpler version FIGrowth 2 also)
self.analysis_summary['FI_Growth'] = [] # permit analysis of multiple growth functions.
self.detector = 'argrelmax'
def setup(self, clamps=None, threshold=None, refractory:float=0.0007, peakwidth:float=0.001,
verify=False, interpolate=True, verbose=False, mode='peak', min_halfwidth=0.010,
data_time_units:str = 's', data_volt_units:str='V'):
"""
configure the inputs to the SpikeAnalysis class
Parameters
---------
clamps : class (default: None)
PatchEphys clamp data holding/accessing all ephys data for this analysis
threshold : float (default: None)
Voltage threshold for spike detection
refractory : float (default 0.0007)
Minimum time between detected spikes, in seconds (or units of the clamp
time base)
peakwidth : float (default: 0.001)
When using "peak" as method in findspikes, this is the peak width maximum in sec
min_halfwidth : float (default: 0.010)
minimum spike half width in seconds. Default value is deliberately large...
verify : boolean (default: False)
interpolate : boolean (default: True)
Use interpolation to get spike threshold time and half-widths
mode : string (default: 'peak')
if using detector "peak", this is mode passed to findspikes
verbose : boolean (default: False)
Set true to get lots of print out while running - used
mostly for debugging.
"""
if clamps is None or threshold is None:
raise ValueError("Spike Analysis requires defined clamps and threshold")
self.Clamps = clamps
assert data_time_units in ['s', 'ms']
assert data_volt_units in ['V', 'mV']
self.time_units = data_time_units
self.volt_units = data_volt_units # needed by spike detector for data conversion
self.threshold = threshold
self.refractory = refractory
self.interpolate = interpolate # use interpolation on spike thresholds...
self.peakwidth = peakwidth
self.min_halfwidth = min_halfwidth
self.verify = verify
self.verbose = verbose
self.mode = mode
self.ar_window = 0.1
self.ar_lastspike = 0.075
self.min_peaktotrough = 0.010 # change in V on falling phase to be considered a spike
self.max_spike_look = 0.010 # msec over which to measure spike widths
def set_detector(self, detector:str='argrelmax'):
assert detector in ['argrelmax', 'threshold', 'Kalluri']
self.detector = detector
def analyzeSpikes(self, reset=True):
"""
analyzeSpikes: Using the threshold set in the control panel, count the
number of spikes in the stimulation window (self.Clamps.tstart, self.Clamps.tend)
Updates the spike plot(s).
The following class variables are modified upon successful analysis and return:
self.spikecount: a 1-D numpy array of spike counts, aligned with the
current (command)
self.adapt_ratio: the adaptation ratio of the spike train
self.fsl: a numpy array of first spike latency for each command level
self.fisi: a numpy array of first interspike intervals for each
command level
self.nospk: the indices of command levels where no spike was detected
self.spk: the indices of command levels were at least one spike
was detected
self.analysis_summary : Dictionary of results.
Parameters
----------
None
Returns
-------
Nothing, but see the list of class variables that are modified
"""
if reset:
self.analysis_summary['FI_Growth'] = [] # permit analysis of multiple growth functions.
twin = self.Clamps.tend - self.Clamps.tstart # measurements window in seconds
maxspkrate = 50 # max rate to count in adaptation is 50 spikes/second
minspk = 4
maxspk = int(maxspkrate*twin) # scale max dount by range of spike counts
#print('max spike rate: ', maxspk)
ntr = len(self.Clamps.traces)
self.spikecount = np.zeros(ntr)
self.fsl = np.zeros(ntr)
self.fisi = np.zeros(ntr)
ar = np.zeros(ntr)
self.allisi = []
self.spikes = [[] for i in range(ntr)]
self.spikeIndices = [[] for i in range(ntr)]
#print 'clamp start/end: ', self.Clamps.tstart, self.Clamps.tend
lastspikecount = 0
U = Utility.Utility()
for i in range(ntr): # this is where we should parallelize the analysis for spikes
spikes = U.findspikes(self.Clamps.time_base, np.array(self.Clamps.traces[i]),
self.threshold, t0=self.Clamps.tstart,
t1=self.Clamps.tend,
dt=self.Clamps.sample_interval,
mode=self.mode, # mode to use for finding spikes
interpolate=self.interpolate,
detector=self.detector,
mindip = 1e-2,
refract=self.refractory,
peakwidth=self.peakwidth,
data_time_units=self.time_units,
data_volt_units=self.volt_units,
verify=self.verify,
debug=False)
# print (ntr, i, self.Clamps.values[i], len(spikes))
if len(spikes) == 0:
# print ('no spikes found')
continue
spikes = np.array(spikes)
self.spikes[i] = spikes
# print 'found %d spikes in trace %d' % (len(spikes), i)
self.spikeIndices[i] = [np.argmin(np.fabs(self.Clamps.time_base-t)) for t in spikes]
self.spikecount[i] = len(spikes)
self.fsl[i] = (spikes[0] - self.Clamps.tstart)*1e3
if len(spikes) > 1:
self.fisi[i] = (spikes[1] - spikes[0])*1e3 # first ISI
self.allisi.append(np.diff(spikes)*1e3)
# for Adaptation ratio analysis: limit spike rate, and also only on monotonic increase in rate
# 8/2018:
# AR needs to be tethered to time into stimulus
# Here we return a standardized ar measured during the first 100 msec
# (standard ar)
if (minspk <= len(spikes)) and (self.spikecount[i] > lastspikecount):
spx = spikes[np.where(spikes-self.Clamps.tstart < self.ar_window)] # default is 100 msec
if len(spx) >= 4: # at least 4 spikes
if spx[-1] > self.ar_lastspike+self.Clamps.tstart: # default 75 msec
misi = np.mean(np.diff(spx[-2:]))*1e3 # last ISIs in the interval
ar[i] = misi / self.fisi[i]
lastspikecount = self.spikecount[i] # update rate (sets max rate)
iAR = np.where(ar > 0) # valid AR and monotonically rising
self.adapt_ratio = np.nan
if len(ar[iAR]) > 0:
self.adapt_ratio = np.mean(ar[iAR]) # only where we made the measurement
self.ar = ar # stores all the ar values
self.analysis_summary['AdaptRatio'] = self.adapt_ratio # only the valid values
self.nospk = np.where(self.spikecount == 0)
self.spk = np.where(self.spikecount > 0)[0]
self.analysis_summary['FI_Curve'] = np.array([self.Clamps.values, self.spikecount])
self.analysis_summary['FiringRate'] = np.max(self.spikecount)/(self.Clamps.tend - self.Clamps.tstart)
self.spikes_counted = True
# self.update_SpikePlots()
def analyzeSpikes_brief(self, mode='baseline'):
"""
analyzeSpikes_brief: Using the threshold set in the control panel, count the
number of spikes in a window and fill out ana analysis summary dict with
the spike latencies in that window (from 0 time)
Parameters
----------
mode: str (default : baseline)
baseline: from 0 to self.Clamps.tstart
poststimulus : from self.Clamps.tend to end of trace
evoked : from self.Clamps.start to self.Clamps.end
Returns:
-------
Nothing, but see the list of class variables that are modified
Class variable modified is the
self.analysis_summary : Dictionary of spike times. Key is
'spikes_baseline'
'spikes_poststimulus'
'spikes_evoked'
according to the mode in the call
"""
if mode == 'baseline':
twin = [0., self.Clamps.tstart]
elif mode == 'evoked':
twin = [self.Clamps.tstart,self.Clamps.tend]
elif mode == 'poststimulus':
twin = [self.Clamps.tend, np.max(self.Clamps.time_base)]
else:
raise ValueError(f'{thissourcefile:s}:: analyzeSpikes_brief requires mode to be "baseline", "evoked", or "poststimulus"')
ntr = len(self.Clamps.traces)
allspikes = [[] for i in range(ntr)]
spikeIndices = [[] for i in range(ntr)]
U = Utility.Utility()
for i in range(ntr):
spikes = U.findspikes(self.Clamps.time_base, np.array(self.Clamps.traces[i]),
self.threshold, t0=twin[0],
t1=twin[1],
dt=self.Clamps.sample_interval,
mode=self.mode, # mode to use for finding spikes
interpolate=self.interpolate,
detector=self.detector,
refract=self.refractory,
peakwidth=self.peakwidth,
verify=self.verify,
debug=False)
if len(spikes) == 0:
#print 'no spikes found'
continue
allspikes[i] = spikes
self.analysis_summary[mode+'_spikes'] = allspikes
def _timeindex(self, t):
"""
Find the index into the time_base of the Clamps structure that
corresponds to the time closest to t
Parameters
----------
t : float (time, no default)
Returns
-------
index : int (index to the closest time)
"""
return np.argmin(self.Clamps.time_base-t)
def _initialize_summarymeasures(self):
self.analysis_summary['AP1_Latency'] = np.inf
self.analysis_summary['AP1_HalfWidth'] = np.inf
self.analysis_summary['AP1_HalfWidth_interpolated'] = np.inf
self.analysis_summary['AP2_Latency'] = np.inf
self.analysis_summary['AP2_HalfWidth'] = np.inf
self.analysis_summary['AP2_HalfWidth_interpolated'] = np.inf
self.analysis_summary['FiringRate_1p5T'] = np.inf
self.analysis_summary['AHP_Depth'] = np.inf # convert to mV
def analyzeSpikeShape(self, printSpikeInfo=False, begin_dV=12.0):
"""analyze the spike shape.
Does analysis of ONE protocol, all traces.
Based on the analysis from Druckman et al. Cerebral Cortex, 2013
The results of the analysis are stored in the SpikeAnalysis object
as SpikeAnalysis.analysis_summary, a dictionary with specific keys.
Also available are the raw spike measures, in the 'spikes' dictionary
of the analysis_summary (spike shape dict, with keys by trace number,
each trace with a dict of values)
Every spike is measured, and a number of points on the waveform
are defined for each spike, including the peak, the half-width
on the rising phase, half-width on the falling phase, the
peak of the AHP, the peak-trough time (AP peak to AHP peak),
and a beginning, based on the slope (set in begin_dV)
Parameters
----------
printSpikeInfo : Boolean (default: Fase)
Flag; when set prints arrays, etc, for debugging purposes
begin_dV : float (default: 12 mV/ms)
Slope used to define onset of the spike. The default value
is from Druckmann et al; change this at your own peril!
Returns
-------
Nothing (but see doc notes above)
"""
self._initialize_summarymeasures()
self.madeplot = False
ntr = len(self.Clamps.traces)
# print 'analyzespikeshape, self.spk: ', self.spk
self.spikeShape = OrderedDict()
rmps = np.zeros(ntr)
self.iHold_i = np.zeros(ntr)
U = Utility.Utility()
for i in range(ntr):
# print('rec nspk: ', i, len(self.spikes[i]))
if len(self.spikes[i]) == 0:
continue
if printSpikeInfo:
print(f'{this_source_file:s}:: spikes: ', self.spikes[i])
print((np.array(self.Clamps.values)))
print((len(self.Clamps.traces)))
(rmps[i], r2) = U.measure('mean', self.Clamps.time_base, self.Clamps.traces[i],
0.0, self.Clamps.tstart)
(self.iHold_i[i], r2) = U.measure('mean', self.Clamps.time_base, self.Clamps.cmd_wave[i],
0.0, self.Clamps.tstart)
trspikes = OrderedDict()
for j in range(len(self.spikes[i])):
# print('i,j,etc: ', i, j, begin_dV)
thisspike = self.analyze_one_spike(i, j, begin_dV)
if thisspike is not None:
trspikes[j] = thisspike
self.spikeShape[i] = trspikes
self.iHold = np.mean(self.iHold_i)
self.analysis_summary['spikes'] = self.spikeShape # save in the summary dictionary too
self.analysis_summary['iHold'] = self.iHold
self.analysis_summary['pulseDuration'] = self.Clamps.tend - self.Clamps.tstart
if len(self.spikeShape.keys()) > 0: # only try to classify if there are spikes
self.getClassifyingInfo() # build analysis summary here as well.
if printSpikeInfo:
pp = pprint.PrettyPrinter(indent=4)
for m in sorted(self.spikeShape.keys()):
print(('----\nTrace: %d has %d APs' % (m, len(list(self.spikeShape[m].keys())))))
for n in sorted(self.spikeShape[m].keys()):
pp.pprint(self.spikeShape[m][n])
def analyze_one_spike(self, i, j, begin_dV):
thisspike = {'trace': i, 'AP_number': j, 'AP_beginIndex': None, 'AP_endIndex': None,
'AP_peakIndex': None, 'peak_T': None, 'peak_V': None, 'AP_Latency': None,
'AP_beginV': None, 'halfwidth': None, 'halfwidth_interpolated': None,
'trough_T': None, 'trough_V': None, 'peaktotrough': None,
'current': None, 'iHold': None,
'pulseDuration': None, 'tstart': self.Clamps.tstart} # initialize the structure
thisspike['current'] = self.Clamps.values[i] - self.iHold_i[i]
thisspike['iHold'] = self.iHold_i[i]
thisspike['pulseDuration'] = self.Clamps.tend - self.Clamps.tstart # in seconds
thisspike['AP_peakIndex'] = self.spikeIndices[i][j]
thisspike['peak_T'] = self.Clamps.time_base[thisspike['AP_peakIndex']]
thisspike['peak_V'] = self.Clamps.traces[i][thisspike['AP_peakIndex']] # max voltage of spike
thisspike['tstart'] = self.Clamps.tstart
# find the minimum going forward - that is AHP min
dt = (self.Clamps.time_base[1]-self.Clamps.time_base[0])
dv = np.diff(self.Clamps.traces[i])/dt
# find end of spike (either top of next spike, or end of trace)
k = self.spikeIndices[i][j] + 1 # point to next spike
if j < self.spikecount[i] - 1: #
kend = self.spikeIndices[i][j+1]
else:
kend = int(self.spikeIndices[i][j]+self.max_spike_look/dt)
if kend >= dv.shape[0]:
return(thisspike) # end of spike would be past end of trace
else:
if kend < k:
kend = k + 1
km = np.argmin(dv[k:kend]) + k
# Find trough after spike and calculate peak to trough
kmin = np.argmin(self.Clamps.traces[i][km:kend])+km
thisspike['AP_endIndex'] = kmin
thisspike['trough_T'] = self.Clamps.time_base[thisspike['AP_endIndex']]
thisspike['trough_V'] = self.Clamps.traces[i][kmin]
if thisspike['AP_endIndex'] is not None:
thisspike['peaktotrough'] = thisspike['trough_T'] - thisspike['peak_T']
# find points on spike waveform
# because index is to peak, we look for previous spike
k = self.spikeIndices[i][j]
# print('i, j, spikeindices: ', i, j, self.spikeIndices[i][j])
# print('k: dt: ', k, dt)
if j > 0:
kbegin = self.spikeIndices[i][j-1] # index to previous spike start
else:
kbegin = k - int(0.002/dt) # for first spike - 2 msec prior only
if k <= kbegin:
k = kbegin + 2
if k > len(dv): # end of block of data, so can not measure
return(thisspike)
# print('kbegin, k: ', kbegin, k)
try:
km = np.argmax(dv[kbegin:k]) + kbegin
except:
print(f'{this_source_file:s}:: kbdgin, k: ', kbegin, k)
print(len(dv))
raise
if ((km - kbegin) < 1):
km = kbegin + int((k - kbegin)/2.) + 1
kthresh = np.argmin(np.fabs(dv[kbegin:km] - begin_dV)) + kbegin # point where slope is closest to begin
# print('kthresh, kbegin: ', kthresh, kbegin)
# save values in dict here
thisspike['AP_beginIndex'] = kthresh
thisspike['AP_Latency'] = self.Clamps.time_base[kthresh]
thisspike['AP_beginV'] = self.Clamps.traces[i][thisspike['AP_beginIndex']]
# if successful in defining spike start/end, calculate half widths in two ways:
# closest points in raw data, and by interpolation
if (
(thisspike['AP_beginIndex'] is not None) and
(thisspike['AP_beginIndex'] > 0) and
(thisspike['AP_endIndex'] is not None) and
(thisspike['AP_beginIndex'] < thisspike['AP_peakIndex']) and
(thisspike['AP_peakIndex'] < thisspike['AP_endIndex'])
):
halfv = 0.5*(thisspike['peak_V'] + thisspike['AP_beginV'])
tr = np.array(self.Clamps.traces[i])
xr = self.Clamps.time_base
kup = np.argmin(np.fabs(tr[thisspike['AP_beginIndex']:thisspike['AP_peakIndex']] - halfv))
kup += thisspike['AP_beginIndex']
kdown = np.argmin(np.fabs(tr[thisspike['AP_peakIndex']:thisspike['AP_endIndex']] - halfv))
kdown += thisspike['AP_peakIndex']
if kup is not None and kdown is not None:
thisspike['halfwidth'] = xr[kdown] - xr[kup]
thisspike['hw_up'] = xr[kup] - xr[thisspike['AP_peakIndex']]
thisspike['hw_down'] = xr[thisspike['AP_peakIndex']] - xr[kdown]
thisspike['hw_v'] = halfv
# interpolated spike hwup, down and width
pkt = xr[thisspike['AP_peakIndex']]
if tr[kup] <= halfv:
vi = tr[kup-1:kup+1]
xi = xr[kup-1:kup+1]
else:
vi = tr[kup:kup+2]
xi = xr[kup:kup+2]
m1 = (vi[1]-vi[0])/(xi[1]-xi[0])
b1 = vi[1] - m1*xi[1]
if m1 == 0.0 or np.std(tr) == 0.0:
# print('a: ', vi[1], vi[0], kup, tr[kup:kup+2], tr[kup-1:kup+1], tr[kup], halfv)
return(thisspike)
t_hwup = (halfv-b1)/m1
if tr[kdown] <= halfv:
vi = tr[kdown:kdown+2]
xi = xr[kdown:kdown+2]
u='a'
else:
vi = tr[kdown-1:kdown+1]
xi = xr[kdown-1:kdown+1]
u='b'
m2 = (vi[1]-vi[0])/(xi[1]-xi[0])
b2 = vi[1] - m2*xi[1]
if m2 == 0.0 or np.std(tr) == 0.0:
# print('b: ', vi[1], vi[0], kup , tr[kdown-1:kdown+1], tr[kdown:kdown+2], tr[kdown], halfv)
return(thisspike)
t_hwdown = (halfv-b2)/m2
thisspike['halfwidth'] = t_hwdown-t_hwup
# import matplotlib.pyplot as mpl
# fig, ax = mpl.subplots(1,1)
# ax.plot(xr[kup-10:kdown+10], tr[kup-10:kdown+10])
# ax.plot(t_hwdown, halfv, 'ro')
# ax.plot(t_hwup, halfv, 'bx')
# mpl.show()
if thisspike['halfwidth'] > self.min_halfwidth: # too broad to be acceptable
if self.verbose:
print(f'{this_source_file:s}::\n spikes > min half width', thisspike['halfwidth'])
print(' halfv: ', halfv, thisspike['peak_V'], thisspike['AP_beginV'])
thisspike['halfwidth'] = None
thisspike['halfwidth_interpolated'] = None
else:
thisspike['halfwidth_interpolated'] = t_hwdown - t_hwup
pkvI = tr[thisspike['AP_peakIndex']]
pkvM = np.max(tr[thisspike['AP_beginIndex']:thisspike['AP_endIndex']])
pkvMa = np.argmax(tr[thisspike['AP_beginIndex']:thisspike['AP_endIndex']])
if pkvI != pkvM:
pktrap = True
return(thisspike)
def getIVCurrentThresholds(self):
""" figure out "threshold" for spike, get 150% and 300% points.
Parameters
----------
None
Returns
-------
tuple: (int, int)
The tuple contains the index to command threshold for spikes, and 150% of that threshold
The indices are computed to be as close to the command step values
that are actually used (so, the threshold is absolute; the 150%
value will be the closest estimate given the step sizes used to
collect the data)
"""
icmd = [] # list of command currents that resulted in spikes.
for m in sorted(self.spikeShape.keys()):
n = len(list(self.spikeShape[m].keys())) # number of spikes in the trace
for n in list(self.spikeShape[m].keys()):
icmd.append(self.spikeShape[m][n]['current'])
icmd = np.array(icmd)
try:
iamin = np.argmin(icmd)
except:
print(f'{this_source_file:s}: Problem with command: ')
print('self.spikeShape.keys(): ', self.spikeShape.keys())
print(' m = ', m)
print(' n = ', n)
print(' current? ', self.spikeShape[m][n]['current'])
raise ValueError(f'{this_source_file:s}:getIVCurrentThresholds - icmd seems to be ? : ', icmd)
imin = np.min(icmd)
ia150 = np.argmin(np.abs(1.5*imin-icmd))
iacmdthr = np.argmin(np.abs(imin-self.Clamps.values))
ia150cmdthr = np.argmin(np.abs(icmd[ia150] - self.Clamps.values))
return (iacmdthr, ia150cmdthr) # return threshold indices into self.Clamps.values array at threshold and 150% point
def getClassifyingInfo(self):
"""
Adds the classifying information according to Druckmann et al., Cerebral Cortex, 2013
to the analysis summary
Parameters
----------
None
Returns
-------
Nothing
Modifies the class analysis_summary dictionary to contain a number of results
regarding the AP train, including the first and second spike latency,
the first and second spike halfwidths, the firing rate at 150% of threshold,
and the depth of the AHP
"""
(jthr, j150) = self.getIVCurrentThresholds() # get the indices for the traces we need to pull data from
jthr = int(jthr)
j150 = int(j150)
if j150 not in list(self.spikeShape.keys()):
return
if jthr == j150 and self.verbose:
#print '\n%s:' % self.filename
print('Threshold current T and 1.5T the same: using next up value for j150')
print('jthr, j150, len(spikeShape): ', jthr, j150, len(self.spikeShape))
print('1 ', self.spikeShape[jthr][0]['current']*1e12)
print('2 ', self.spikeShape[j150+1][0]['current']*1e12)
print(' >> Threshold current: %8.3f 1.5T current: %8.3f, next up: %8.3f' % (self.spikeShape[jthr][0]['current']*1e12,
self.spikeShape[j150][0]['current']*1e12, self.spikeShape[j150+1][0]['current']*1e12))
j150 = jthr + 1
spikesfound = False
if len(self.spikeShape[j150]) >= 1 and (0 in list(self.spikeShape[j150].keys())) and self.spikeShape[j150][0]['halfwidth'] is not None:
self.analysis_summary['AP1_Latency'] = (self.spikeShape[j150][0]['AP_Latency'] - self.spikeShape[j150][0]['tstart'])*1e3
self.analysis_summary['AP1_HalfWidth'] = self.spikeShape[j150][0]['halfwidth']*1e3
if self.spikeShape[j150][0]['halfwidth_interpolated'] is not None:
self.analysis_summary['AP1_HalfWidth_interpolated'] = self.spikeShape[j150][0]['halfwidth_interpolated']*1e3
else:
self.analysis_summary['AP1_HalfWidth_interpolated'] = np.nan
spikesfound = True
if len(self.spikeShape[j150]) >= 2 and (1 in list(self.spikeShape[j150].keys())) and self.spikeShape[j150][1]['halfwidth'] is not None:
self.analysis_summary['AP2_Latency'] = (self.spikeShape[j150][1]['AP_Latency'] - self.spikeShape[j150][1]['tstart'])*1e3
self.analysis_summary['AP2_HalfWidth'] = self.spikeShape[j150][1]['halfwidth']*1e3
if self.spikeShape[j150][1]['halfwidth_interpolated'] is not None:
self.analysis_summary['AP2_HalfWidth_interpolated'] = self.spikeShape[j150][1]['halfwidth_interpolated']*1e3
else:
self.analysis_summary['AP2_HalfWidth_interpolated'] = np.nan
if spikesfound:
rate = len(self.spikeShape[j150])/self.spikeShape[j150][0]['pulseDuration'] # spikes per second, normalized for pulse duration
AHPDepth = self.spikeShape[j150][0]['AP_beginV'] - self.spikeShape[j150][0]['trough_V'] # from first spike # first AHP depth
print(f"AHP: Begin = {self.spikeShape[j150][0]['AP_beginV']*1e3:.2f} mV")
print(f" Trough = {self.spikeShape[j150][0]['trough_V']*1e3:.2f} mV")
print(f" Depth = {AHPDepth*1e3:.2f} mV")
self.analysis_summary['FiringRate_1p5T'] = rate
self.analysis_summary['AHP_Depth'] = AHPDepth*1e3 # convert to mV
def fitOne(self, x=None, yd=None, info='', function=None, fixNonMonotonic=True, excludeNonMonotonic=False):
"""Fit the FI plot to an equation that is piecewise linear up to the threshold
called Ibreak, then (1-exp(F/Frate)) for higher currents
Parameters
----------
x : numpy array (no default)
The x data to fit (typically an array of current levels)
yd : numpy array (no default)
The y data to fit (typically an array of spike counts)
if x and yd are none, we extrace from the 'FI_Curve' for this cell.
info : string (default: '')
information to add to a fitted plot
fixNonMonotonic : Boolean (default: True)
If True, only use data up to the maximal firing rate,
discarding the remainder of the steps under the assumption
that the cell is entering depolarization block.
excludeNonMonotonic : Boolean (default: False)
if True, does not even try to fit, and returns None
Returns
-------
None if there is no fitting to be done (excluding non-monotonic or no spikes)
tuple of (fpar, xf, yf, names, error, f, func)
These are the fit parameters
"""
# print('fitone called')
if function is not None:
self.FIGrowth = function
if x is None: # use class data
x = self.analysis_summary['FI_Curve'][0]*1e9
yd = self.analysis_summary['FI_Curve'][1]/self.analysis_summary['pulseDuration'] # convert to rate in spikes/second
if self.FIGrowth == 'fitOneOriginal':
ymax = np.max(yd)
ymax_a = 0.8*ymax
if ymax <= 0.:
return(None)
nonmono = 0
if fixNonMonotonic: # clip at max firing rate
ydiff = np.gradient(yd, x)
xnm = np.where(ydiff < 0.)[0]
if len(xnm) > 0:
imax = xnm[0]+1
else:
imax = len(yd)
# imaxs = [i for i, y in enumerate(yd) if y >= ymax_a] # handle duplicate firing rates
# imax = max(imaxs) # find highest index
dypos = range(0, imax)
x = x[dypos]
yd = yd[dypos]
ymax = np.max(yd)
if np.max(x) < 0.: # skip if max rate is < 0 current
return(None)
ymin = 5.
if ymax < ymin:
ymin = 0.
if ymax > yd[-1] and excludeNonMonotonic:
nonmono += 1
return(None)
# fpnt = np.where(yd > 0) # find first point where cell fires
fire_points = np.where((yd[:-1] > 0) & (yd[1:] > 0))[0] # limit to positive current injections with successive spikes
if len(fire_points) == 0:
return(None)
fbr = fire_points[0]
ibreak0 = x[fbr-1] # use point before first spike as the initial break point
dx = np.abs(np.mean(np.diff(x))) # get current steps
xp = x[fire_points]
xp = xp - ibreak0 - dx
yp = yd[fire_points] # save data with responses
testMethod = "simplex" # 'SLSQP' # L-BFGS-B simplex, SLSQP, 'TNC', 'COBYLA'
if fbr-2 >= 0:
x0 = fbr-2
else:
x0 = 0
if fbr < len(x):
x1 = fbr
else:
x1 = len(x)-1
res = []
err = []
fitter = Fitting.Fitting() # make sure we always work with the same instance
for i in range(-4, 4): # allow breakpoint to move
if fbr + i + 1 > len(x)-1:
continue
x0 = fbr+i
for j in range(0,4): # then vary the width of the linear region
x1 = x0 + j
if x1 >= len(x):
continue
bounds = ((0., 0.), np.sort([x[x0], x[x1]]),
(0., 2.*yp[0]), (0., ymax*10.0), (1e-5, 1e5))
# parameters for FIGrowth 1: ['Fzero', 'Ibreak', 'F1amp', 'F2amp', 'Irate']
# if i == -4 and j == 0:
fitbreak0 = ibreak0
initpars = [0., np.min(bounds[1]),
0., np.mean(bounds[3]), np.mean(bounds[4])]
func = 'FIGrowthExpBreak'
f = fitter.fitfuncmap[func]
(fpar, xf, yf, names) = fitter.FitRegion(np.array([1]), 0, x, yd, t0=fitbreak0, t1=np.max(x),
fitFunc=func, fitPars=initpars, bounds=bounds,
fixedPars=None, method=testMethod)
error = fitter.getFitErr()
res.append({'fpar': fpar, 'xf': xf, 'yf': yf, 'names': names, 'error': error})
err.append(error)
minerr = np.argmin(err)
fpar = res[minerr]['fpar']
xf = res[minerr]['xf']
yf = res[minerr]['yf']
names = res[minerr]['names']
error = res[minerr]['error']
else: # recompute some stuff
# estimate initial parameters and set region of IV curve to be used for fitting
ymax = np.max(yd) # maximum spike rate (not count; see above)
if ymax == 0:
return None
ymax_nm = 0.8*np.max(yd) # maximum spike rate (not count; see above)
dypos = range(len(x))
if fixNonMonotonic and ymax_nm > yd[-1]: # fix non-monotinic firing - clip fitting to current that generates the max firing rate
imaxs = [i for i, y in enumerate(yd) if y >= ymax_nm] # handle duplicate firing rates
imax = max(imaxs) # find highest index
dypos = list(range(0, imax+1))
x = x[dypos] # restrict current and response range to those currents
yd = yd[dypos]
ymax = np.max(yd)
if np.max(x) < 0.: # skip if max rate occurs at negative current level
return None
ymin = 5
if ymax < ymin:
ymin = 0.
if ymax > yd[-1] and excludeNonMonotonic:
nonmono += 1
return None
# Now find first point where cell fires and next step also has cell firing
fire_points = np.where((yd[:-1] > 0) & (yd[1:] > 0))[0] # limit to positive current injections with successive spikes
fbr = fire_points[0]
testMethod = 'SLSQP' # 'SLSQP' # L-BFGS-B simplex, SLSQP, 'TNC', 'COBYLA'
if fbr - 1 >= 0: # set start and end of linear fit
x0 = fbr - 1 # x0 is last point (in current) with no spikes
else:
x0 = 0
if fbr < len(x): # x1 is the next point, which has a spike
x1 = fbr
else:
x1 = len(x) - 1
ibreak0 = x[x0] # use point before first spike as the initial break point
if self.FIGrowth == 'FIGrowthExpBreak':
# print('Exponential model fit')
ixb = fbr # np.argwhere(yd > 0)[0][0]
cons = ( {'type': 'eq', 'fun': lambda xc: xc[0]}, # lock F0 at >= 0
{'type': 'ineq', 'fun': lambda xc: xc[1] - x[ixb-1]}, # ibreak between last no spike and first spiking level
{'type': 'ineq', 'fun': lambda xc: x[ixb] - xc[1]}, # ibreak between last no spike and first spiking level
{'type': 'eq', 'fun': lambda xc: xc[2]}, # F1amp >= 0
{'type': 'ineq', 'fun': lambda xc: xc[3] - xc[2]}, # F2amp > F1amp (must be!)
{'type': 'ineq', 'fun': lambda xc: xc[4]},
)
bounds = ((0., yd[fbr-1]+5), np.sort([x[x0], x[x1]]),
(0., 2*yd[fbr]), (0., ymax*10.0), (0, 1e5))
# # parameters for FIGrowth 1: ['Fzero', 'Ibreak', 'F1amp', 'F2amp', 'Irate']
initpars = [0., ibreak0, yd[fbr], ymax*2, 0.01*np.max(np.diff(yd)/np.diff(x))]
func = 'FIGrowthExpBreak'
fitbreak0 = x[fbr]
f = Fitting.Fitting().fitfuncmap[func]
# now fit the full data set
(fpar, xf, yf, names) = Fitting.Fitting().FitRegion(np.array([1]), 0, x, yd, t0=fitbreak0, t1=x[dypos[-1]],
fitFunc=func, fitPars=initpars, bounds=bounds, constraints=cons, weights=None, #np.sqrt,
fixedPars=None, method=testMethod)
error = Fitting.Fitting().getFitErr()
self.FIKeys = f[6]
elif self.FIGrowth == 'FIGrowthExp': # FIGrowth is 2, Exponential from 0 rate
bounds = (np.sort([x[x0], x[x1]]),
(0., ymax*5.0), (0.0001, 1000.))
# # parameters for FIGrowth 2: [''Ibreak', 'F2amp', 'Irate']
fitbreak0 = ibreak0
if fitbreak0 > 0.:
fitbreak0 = 0.
initpars = [ibreak0, ymax/2., 0.001]
func = 'FIGrowthExp'
f = Fitting.Fitting().fitfuncmap[func]
# now fit the full data set
(fpar, xf, yf, names) = Fitting.Fitting().FitRegion(np.array([1]), 0, x, yd, t0=fitbreak0, t1=np.max(x),
fitFunc=func, fitPars=initpars, bounds=bounds,
fixedPars=None, method=testMethod)
error = Fitting.Fitting().getFitErr()
self.FIKeys = f[6]
imap = [-1, 0, -1, 1, 2]
elif self.FIGrowth == 'piecewiselinear3':
fitbreak0 = ibreak0
# print('ibreak0: ', ibreak0)
if fitbreak0 > 0.:
fitbreak0 = 0.
x1 = np.argwhere(yd > 0.)
initpars = (x[x1[0]-1], 0. , x[x1[0]+1], 1., 20., 100.)
if x[fbr] > 0:
xn = x[fbr]
else:
xn = 0
bounds = ((0., xn), # Ibreak forced to first spike level almost
(0., 20.), # Rate0 (y0)
(0., np.max(x)), # Ibreak1 (x1) # spread it out?
(0., 100.), # IRate1 (k1, k2, k3)
(0., 1000.), #IRate2
(0., 1000.), # Irate3
)
cons = ( {'type': 'ineq', 'fun': lambda x: x[0]},
{'type': 'ineq', 'fun': lambda x: x[1]},
{'type': 'ineq', 'fun': lambda x: x[2] - (x[0] + 0.05 + ibreak0)}, # ibreak1 > 50pA + ibreak0
{'type': 'ineq', 'fun': lambda x: x[3]},
{'type': 'ineq', 'fun': lambda x: x[4] - x[3]},
{'type': 'ineq', 'fun': lambda x: x[5] - x[4]/2.0},
)
func = 'piecewiselinear3'
f = Fitting.Fitting().fitfuncmap[func]
# now fit the full data set
(fpar, xf, yf, names) = Fitting.Fitting().FitRegion(np.array([1]), 0, x, yd, t0=fitbreak0, t1=np.max(x),
fitFunc=func, fitPars=initpars, bounds=bounds, constraints=cons,
fixedPars=None, method=testMethod)
error = Fitting.Fitting().getFitErr()
self.FIKeys = f[6]
elif self.FIGrowth == 'piecewiselinear3_ugh': # use piecewise linear, 3 segment fit
# parameters for pwl3 (piecewise linear...): ['Ibreak', 'Rate0', 'Ibreak1', 'Irate1', 'Irate2', 'Irate3']
fitbreak0 = ibreak0
if fitbreak0 > 0.:
fitbreak0 = 0.
x1 = np.argwhere(yd > 0.)
initpars = (x[x1[0]-1], 0. , x[x1[0]+1], 1., 20., 100.)
if x[fbr] > 0:
xn = x[fbr]
else:
xn = 0
bounds = ((0., xn), # Ibreak forced to first spike level almost
(0., 20.), # Rate0 (y0)
(0., np.max(x)), # Ibreak1 (x1) # spread it out?
(0., 100.), # IRate1 (k1, k2, k3)
(0., 1000.), #IRate2
(0., 1000.), # Irate3
)
cons = ( {'type': 'ineq', 'fun': lambda x: x[0]},
{'type': 'ineq', 'fun': lambda x: x[1]},
{'type': 'ineq', 'fun': lambda x: x[2] - (x[0] + 0.05 + ibreak0)}, # ibreak1 > 50pA + ibreak0
{'type': 'ineq', 'fun': lambda x: x[3]},
{'type': 'ineq', 'fun': lambda x: x[4] - x[3]},
{'type': 'ineq', 'fun': lambda x: x[5] - x[4]/2.0},
)
func = 'piecewiselinear3'
f = Fitting.Fitting().fitfuncmap[func]
# now fit the full data set
(fpar, xf, yf, names) = Fitting.Fitting().FitRegion(np.array([1]), 0, x, yd, t0=fitbreak0, t1=np.max(x),
fitFunc=func, fitPars=initpars, bounds=bounds, constraints=cons,
fixedPars=None, method=testMethod)
error = Fitting.Fitting().getFitErr()
self.FIKeys = f[6]
elif self.FIGrowth == 'FIGrowthPower':
# parameters for power (piecewise linear...): [c, s, 'd']
# data are only fit for the range over which the cell fires
fitbreak0 = ibreak0*1e9
if fitbreak0 > 0.:
fitbreak0 = 0.
ix1 = np.argwhere(yd > 0.) # find first point with spikes
xna = x*1e9
x1 = xna[ix1[0]][0]
initpars = (x1, 3., 0.5) #
bds = [(0., 500.), (0.01, 100.), (0.01, 100)]
# cons = ( {'type': 'ineq', 'fun': lambda x: x[0]},
# {'type': 'ineq', 'fun': lambda x: x[1]},
# {'type': 'ineq', 'fun': lambda x: x[2] - [x[0] + 50.]}, # ibreak1 > 100pA + ibreak0
# {'type': 'ineq', 'fun': lambda x: x[3]},
# {'type': 'ineq', 'fun': lambda x: x[4] - x[3]},
# {'type': 'ineq', 'fun': lambda x: x[4]*0.5 - x[5]},
# )
#
func = 'FIGrowthPower'
f = Fitting.Fitting().fitfuncmap[func]
# now fit the full data set
(fpar, xf, yf, names) = Fitting.Fitting().FitRegion(np.array([1]), 0, xna, yd, t0=fitbreak0, t1=np.max(xna[fpnt]),
fitFunc=func, fitPars=initpars, bounds=bds, constraints=None,
fixedPars=None, method=testMethod)
error = Fitting.Fitting().getFitErr()
self.FIKeys = f[6]
elif self.FIGrowth == 'fitOneOriginal':
pass
else:
raise ValueError('SpikeAnalysis: FIGrowth function %s is not known' % self.FIGrowth)
self.analysis_summary['FI_Growth'].append({'FunctionName': self.FIGrowth, 'function': func,
'names': names, 'error': error, 'parameters': fpar, 'fit': [np.array(xf)*1e-9, yf]})
| [
"numpy.mean",
"collections.OrderedDict",
"numpy.abs",
"numpy.fabs",
"numpy.where",
"numpy.sort",
"numpy.std",
"numpy.diff",
"numpy.argmax",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.argwhere",
"pprint.PrettyPrinter",
"numpy.min",
"numpy.argmin",
"numpy.gradient"
] | [((6089, 6102), 'numpy.zeros', 'np.zeros', (['ntr'], {}), '(ntr)\n', (6097, 6102), True, 'import numpy as np\n'), ((6122, 6135), 'numpy.zeros', 'np.zeros', (['ntr'], {}), '(ntr)\n', (6130, 6135), True, 'import numpy as np\n'), ((6156, 6169), 'numpy.zeros', 'np.zeros', (['ntr'], {}), '(ntr)\n', (6164, 6169), True, 'import numpy as np\n'), ((6183, 6196), 'numpy.zeros', 'np.zeros', (['ntr'], {}), '(ntr)\n', (6191, 6196), True, 'import numpy as np\n'), ((9152, 9168), 'numpy.where', 'np.where', (['(ar > 0)'], {}), '(ar > 0)\n', (9160, 9168), True, 'import numpy as np\n'), ((9513, 9543), 'numpy.where', 'np.where', (['(self.spikecount == 0)'], {}), '(self.spikecount == 0)\n', (9521, 9543), True, 'import numpy as np\n'), ((9640, 9687), 'numpy.array', 'np.array', (['[self.Clamps.values, self.spikecount]'], {}), '([self.Clamps.values, self.spikecount])\n', (9648, 9687), True, 'import numpy as np\n'), ((12769, 12805), 'numpy.argmin', 'np.argmin', (['(self.Clamps.time_base - t)'], {}), '(self.Clamps.time_base - t)\n', (12778, 12805), True, 'import numpy as np\n'), ((14944, 14957), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (14955, 14957), False, 'from collections import OrderedDict\n'), ((14973, 14986), 'numpy.zeros', 'np.zeros', (['ntr'], {}), '(ntr)\n', (14981, 14986), True, 'import numpy as np\n'), ((15010, 15023), 'numpy.zeros', 'np.zeros', (['ntr'], {}), '(ntr)\n', (15018, 15023), True, 'import numpy as np\n'), ((16115, 16136), 'numpy.mean', 'np.mean', (['self.iHold_i'], {}), '(self.iHold_i)\n', (16122, 16136), True, 'import numpy as np\n'), ((25079, 25093), 'numpy.array', 'np.array', (['icmd'], {}), '(icmd)\n', (25087, 25093), True, 'import numpy as np\n'), ((25563, 25575), 'numpy.min', 'np.min', (['icmd'], {}), '(icmd)\n', (25569, 25575), True, 'import numpy as np\n'), ((7780, 7796), 'numpy.array', 'np.array', (['spikes'], {}), '(spikes)\n', (7788, 7796), True, 'import numpy as np\n'), ((9300, 9316), 'numpy.mean', 'np.mean', (['ar[iAR]'], {}), '(ar[iAR])\n', (9307, 9316), True, 'import numpy as np\n'), ((9563, 9592), 'numpy.where', 'np.where', (['(self.spikecount > 0)'], {}), '(self.spikecount > 0)\n', (9571, 9592), True, 'import numpy as np\n'), ((9734, 9757), 'numpy.max', 'np.max', (['self.spikecount'], {}), '(self.spikecount)\n', (9740, 9757), True, 'import numpy as np\n'), ((15782, 15795), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (15793, 15795), False, 'from collections import OrderedDict\n'), ((16589, 16619), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (16609, 16619), False, 'import pprint\n'), ((18083, 18113), 'numpy.diff', 'np.diff', (['self.Clamps.traces[i]'], {}), '(self.Clamps.traces[i])\n', (18090, 18113), True, 'import numpy as np\n'), ((18721, 18762), 'numpy.argmin', 'np.argmin', (['self.Clamps.traces[i][km:kend]'], {}), '(self.Clamps.traces[i][km:kend])\n', (18730, 18762), True, 'import numpy as np\n'), ((20960, 20991), 'numpy.array', 'np.array', (['self.Clamps.traces[i]'], {}), '(self.Clamps.traces[i])\n', (20968, 20991), True, 'import numpy as np\n'), ((25127, 25142), 'numpy.argmin', 'np.argmin', (['icmd'], {}), '(icmd)\n', (25136, 25142), True, 'import numpy as np\n'), ((25602, 25627), 'numpy.abs', 'np.abs', (['(1.5 * imin - icmd)'], {}), '(1.5 * imin - icmd)\n', (25608, 25627), True, 'import numpy as np\n'), ((25654, 25687), 'numpy.abs', 'np.abs', (['(imin - self.Clamps.values)'], {}), '(imin - self.Clamps.values)\n', (25660, 25687), True, 'import numpy as np\n'), ((25719, 25759), 'numpy.abs', 'np.abs', (['(icmd[ia150] - self.Clamps.values)'], {}), '(icmd[ia150] - self.Clamps.values)\n', (25725, 25759), True, 'import numpy as np\n'), ((31261, 31271), 'numpy.max', 'np.max', (['yd'], {}), '(yd)\n', (31267, 31271), True, 'import numpy as np\n'), ((34609, 34623), 'numpy.argmin', 'np.argmin', (['err'], {}), '(err)\n', (34618, 34623), True, 'import numpy as np\n'), ((34978, 34988), 'numpy.max', 'np.max', (['yd'], {}), '(yd)\n', (34984, 34988), True, 'import numpy as np\n'), ((6601, 6632), 'numpy.array', 'np.array', (['self.Clamps.traces[i]'], {}), '(self.Clamps.traces[i])\n', (6609, 6632), True, 'import numpy as np\n'), ((11466, 11497), 'numpy.array', 'np.array', (['self.Clamps.traces[i]'], {}), '(self.Clamps.traces[i])\n', (11474, 11497), True, 'import numpy as np\n'), ((18615, 18636), 'numpy.argmin', 'np.argmin', (['dv[k:kend]'], {}), '(dv[k:kend])\n', (18624, 18636), True, 'import numpy as np\n'), ((19736, 19759), 'numpy.argmax', 'np.argmax', (['dv[kbegin:k]'], {}), '(dv[kbegin:k])\n', (19745, 19759), True, 'import numpy as np\n'), ((20009, 20042), 'numpy.fabs', 'np.fabs', (['(dv[kbegin:km] - begin_dV)'], {}), '(dv[kbegin:km] - begin_dV)\n', (20016, 20042), True, 'import numpy as np\n'), ((21059, 21132), 'numpy.fabs', 'np.fabs', (["(tr[thisspike['AP_beginIndex']:thisspike['AP_peakIndex']] - halfv)"], {}), "(tr[thisspike['AP_beginIndex']:thisspike['AP_peakIndex']] - halfv)\n", (21066, 21132), True, 'import numpy as np\n'), ((21210, 21281), 'numpy.fabs', 'np.fabs', (["(tr[thisspike['AP_peakIndex']:thisspike['AP_endIndex']] - halfv)"], {}), "(tr[thisspike['AP_peakIndex']:thisspike['AP_endIndex']] - halfv)\n", (21217, 21281), True, 'import numpy as np\n'), ((23882, 23945), 'numpy.max', 'np.max', (["tr[thisspike['AP_beginIndex']:thisspike['AP_endIndex']]"], {}), "(tr[thisspike['AP_beginIndex']:thisspike['AP_endIndex']])\n", (23888, 23945), True, 'import numpy as np\n'), ((23970, 24036), 'numpy.argmax', 'np.argmax', (["tr[thisspike['AP_beginIndex']:thisspike['AP_endIndex']]"], {}), "(tr[thisspike['AP_beginIndex']:thisspike['AP_endIndex']])\n", (23979, 24036), True, 'import numpy as np\n'), ((31464, 31482), 'numpy.gradient', 'np.gradient', (['yd', 'x'], {}), '(yd, x)\n', (31475, 31482), True, 'import numpy as np\n'), ((31939, 31949), 'numpy.max', 'np.max', (['yd'], {}), '(yd)\n', (31945, 31949), True, 'import numpy as np\n'), ((31965, 31974), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (31971, 31974), True, 'import numpy as np\n'), ((32333, 32371), 'numpy.where', 'np.where', (['((yd[:-1] > 0) & (yd[1:] > 0))'], {}), '((yd[:-1] > 0) & (yd[1:] > 0))\n', (32341, 32371), True, 'import numpy as np\n'), ((35114, 35124), 'numpy.max', 'np.max', (['yd'], {}), '(yd)\n', (35120, 35124), True, 'import numpy as np\n'), ((35692, 35702), 'numpy.max', 'np.max', (['yd'], {}), '(yd)\n', (35698, 35702), True, 'import numpy as np\n'), ((35718, 35727), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (35724, 35727), True, 'import numpy as np\n'), ((36126, 36164), 'numpy.where', 'np.where', (['((yd[:-1] > 0) & (yd[1:] > 0))'], {}), '((yd[:-1] > 0) & (yd[1:] > 0))\n', (36134, 36164), True, 'import numpy as np\n'), ((37562, 37585), 'numpy.sort', 'np.sort', (['[x[x0], x[x1]]'], {}), '([x[x0], x[x1]])\n', (37569, 37585), True, 'import numpy as np\n'), ((38049, 38062), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (38057, 38062), True, 'import numpy as np\n'), ((7947, 7981), 'numpy.fabs', 'np.fabs', (['(self.Clamps.time_base - t)'], {}), '(self.Clamps.time_base - t)\n', (7954, 7981), True, 'import numpy as np\n'), ((8682, 8736), 'numpy.where', 'np.where', (['(spikes - self.Clamps.tstart < self.ar_window)'], {}), '(spikes - self.Clamps.tstart < self.ar_window)\n', (8690, 8736), True, 'import numpy as np\n'), ((15334, 15362), 'numpy.array', 'np.array', (['self.Clamps.values'], {}), '(self.Clamps.values)\n', (15342, 15362), True, 'import numpy as np\n'), ((31505, 31526), 'numpy.where', 'np.where', (['(ydiff < 0.0)'], {}), '(ydiff < 0.0)\n', (31513, 31526), True, 'import numpy as np\n'), ((32660, 32670), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (32667, 32670), True, 'import numpy as np\n'), ((38492, 38515), 'numpy.sort', 'np.sort', (['[x[x0], x[x1]]'], {}), '([x[x0], x[x1]])\n', (38499, 38515), True, 'import numpy as np\n'), ((38968, 38981), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (38976, 38981), True, 'import numpy as np\n'), ((39510, 39531), 'numpy.argwhere', 'np.argwhere', (['(yd > 0.0)'], {}), '(yd > 0.0)\n', (39521, 39531), True, 'import numpy as np\n'), ((8245, 8260), 'numpy.diff', 'np.diff', (['spikes'], {}), '(spikes)\n', (8252, 8260), True, 'import numpy as np\n'), ((11039, 11068), 'numpy.max', 'np.max', (['self.Clamps.time_base'], {}), '(self.Clamps.time_base)\n', (11045, 11068), True, 'import numpy as np\n'), ((22094, 22104), 'numpy.std', 'np.std', (['tr'], {}), '(tr)\n', (22100, 22104), True, 'import numpy as np\n'), ((22701, 22711), 'numpy.std', 'np.std', (['tr'], {}), '(tr)\n', (22707, 22711), True, 'import numpy as np\n'), ((33626, 33649), 'numpy.sort', 'np.sort', (['[x[x0], x[x1]]'], {}), '([x[x0], x[x1]])\n', (33633, 33649), True, 'import numpy as np\n'), ((33939, 33956), 'numpy.min', 'np.min', (['bounds[1]'], {}), '(bounds[1])\n', (33945, 33956), True, 'import numpy as np\n'), ((33986, 34004), 'numpy.mean', 'np.mean', (['bounds[3]'], {}), '(bounds[3])\n', (33993, 34004), True, 'import numpy as np\n'), ((34006, 34024), 'numpy.mean', 'np.mean', (['bounds[4]'], {}), '(bounds[4])\n', (34013, 34024), True, 'import numpy as np\n'), ((34181, 34194), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (34189, 34194), True, 'import numpy as np\n'), ((39010, 39019), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (39016, 39019), True, 'import numpy as np\n'), ((40740, 40753), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (40748, 40753), True, 'import numpy as np\n'), ((41380, 41401), 'numpy.argwhere', 'np.argwhere', (['(yd > 0.0)'], {}), '(yd > 0.0)\n', (41391, 41401), True, 'import numpy as np\n'), ((44785, 44797), 'numpy.array', 'np.array', (['xf'], {}), '(xf)\n', (44793, 44797), True, 'import numpy as np\n'), ((34223, 34232), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (34229, 34232), True, 'import numpy as np\n'), ((37800, 37811), 'numpy.diff', 'np.diff', (['yd'], {}), '(yd)\n', (37807, 37811), True, 'import numpy as np\n'), ((37812, 37822), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (37819, 37822), True, 'import numpy as np\n'), ((39847, 39856), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (39853, 39856), True, 'import numpy as np\n'), ((40782, 40791), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (40788, 40791), True, 'import numpy as np\n'), ((42610, 42623), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (42618, 42623), True, 'import numpy as np\n'), ((43222, 43243), 'numpy.argwhere', 'np.argwhere', (['(yd > 0.0)'], {}), '(yd > 0.0)\n', (43233, 43243), True, 'import numpy as np\n'), ((8942, 8959), 'numpy.diff', 'np.diff', (['spx[-2:]'], {}), '(spx[-2:])\n', (8949, 8959), True, 'import numpy as np\n'), ((41717, 41726), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (41723, 41726), True, 'import numpy as np\n'), ((42652, 42661), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (42658, 42661), True, 'import numpy as np\n'), ((44120, 44133), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (44128, 44133), True, 'import numpy as np\n'), ((44164, 44181), 'numpy.max', 'np.max', (['xna[fpnt]'], {}), '(xna[fpnt])\n', (44170, 44181), True, 'import numpy as np\n')] |
from pathlib import Path
import PIL.Image as PImage
import tensorflow as tf
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import img_to_array
from collections import Counter
class CNN_Model:
def __init__(self):
self.configSet = False
def create_ds(self, directory):
train_datagen = ImageDataGenerator(rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
vertical_flip=False,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
self.train_gen = train_datagen.flow_from_directory('{}/train'.format(directory),
target_size=(150, 150),
batch_size=32,
class_mode='categorical')
self.val_gen = test_datagen.flow_from_directory('{}/test'.format(directory),
target_size=(150, 150),
batch_size=32,
class_mode='categorical')
self.classes = list(self.train_gen.class_indices.keys())
self.train_num = Counter(self.train_gen.classes)
self.test_num = Counter(self.val_gen.classes)
if not self.configSet:
self.model_config()
else:
self.configSet = True
def model_config(self):
# Initialising the CNN
self.cnn = tf.keras.models.Sequential()
# Step 1 - Convolution
self.cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[150, 150, 3]))
# Step 2 - Pooling
self.cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
# Adding convolutional layer
self.cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'))
self.cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
# Step 3 - Flattening
self.cnn.add(tf.keras.layers.Flatten())
# Step 4 - Full Connection
self.cnn.add(tf.keras.layers.Dense(units=128, activation='relu'))
# Step 5 - Output Layer
self.cnn.add(tf.keras.layers.Dense(units=self.train_gen.num_classes, activation='softmax'))
# Loading model weights
if Path('model.h5').exists():
self.cnn.load_weights('model.h5')
# Compiling the CNN
self.cnn.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
def train_model(self):
self.info = self.cnn.fit(self.train_gen, validation_data=self.val_gen, epochs=1,
workers=4)
def save_weights(self):
self.cnn.save_weights('model.h5')
def predict_img(self, img):
img_tensor = img.resize((150, 150), PImage.ANTIALIAS)
img_tensor = img_to_array(img_tensor)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor /= 255.
prediction = self.cnn.predict(img_tensor)
return self.classes[np.argmax(prediction)], 100 * np.max(prediction)
| [
"keras.preprocessing.image.img_to_array",
"tensorflow.keras.layers.Conv2D",
"pathlib.Path",
"numpy.argmax",
"keras.preprocessing.image.ImageDataGenerator",
"numpy.max",
"collections.Counter",
"tensorflow.keras.layers.Dense",
"numpy.expand_dims",
"tensorflow.keras.layers.Flatten",
"tensorflow.ker... | [((370, 487), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'shear_range': '(0.2)', 'zoom_range': '(0.2)', 'vertical_flip': '(False)', 'horizontal_flip': '(True)'}), '(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2,\n vertical_flip=False, horizontal_flip=True)\n', (388, 487), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((679, 716), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (697, 716), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1432, 1463), 'collections.Counter', 'Counter', (['self.train_gen.classes'], {}), '(self.train_gen.classes)\n', (1439, 1463), False, 'from collections import Counter\n'), ((1488, 1517), 'collections.Counter', 'Counter', (['self.val_gen.classes'], {}), '(self.val_gen.classes)\n', (1495, 1517), False, 'from collections import Counter\n'), ((1709, 1737), 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), '()\n', (1735, 1737), True, 'import tensorflow as tf\n'), ((3103, 3127), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img_tensor'], {}), '(img_tensor)\n', (3115, 3127), False, 'from keras.preprocessing.image import img_to_array\n'), ((3149, 3183), 'numpy.expand_dims', 'np.expand_dims', (['img_tensor'], {'axis': '(0)'}), '(img_tensor, axis=0)\n', (3163, 3183), True, 'import numpy as np\n'), ((1791, 1890), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'activation': '"""relu"""', 'input_shape': '[150, 150, 3]'}), "(filters=32, kernel_size=3, activation='relu',\n input_shape=[150, 150, 3])\n", (1813, 1890), True, 'import tensorflow as tf\n'), ((1937, 1986), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (1962, 1986), True, 'import tensorflow as tf\n'), ((2047, 2115), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=3, activation='relu')\n", (2069, 2115), True, 'import tensorflow as tf\n'), ((2138, 2187), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (2163, 2187), True, 'import tensorflow as tf\n'), ((2241, 2266), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (2264, 2266), True, 'import tensorflow as tf\n'), ((2325, 2376), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(128)', 'activation': '"""relu"""'}), "(units=128, activation='relu')\n", (2346, 2376), True, 'import tensorflow as tf\n'), ((2432, 2509), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'self.train_gen.num_classes', 'activation': '"""softmax"""'}), "(units=self.train_gen.num_classes, activation='softmax')\n", (2453, 2509), True, 'import tensorflow as tf\n'), ((2555, 2571), 'pathlib.Path', 'Path', (['"""model.h5"""'], {}), "('model.h5')\n", (2559, 2571), False, 'from pathlib import Path\n'), ((3290, 3311), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (3299, 3311), True, 'import numpy as np\n'), ((3320, 3338), 'numpy.max', 'np.max', (['prediction'], {}), '(prediction)\n', (3326, 3338), True, 'import numpy as np\n')] |
"""
Mask R-CNN
Train on the nuclei segmentation dataset from the
Kaggle 2018 Data Science Bowl
https://www.kaggle.com/c/data-science-bowl-2018/
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from ImageNet weights
python3 nucleus.py train --dataset=/path/to/dataset --subset=train --weights=imagenet
# Train a new model starting from specific weights file
python3 nucleus.py train --dataset=/path/to/dataset --subset=train --weights=/path/to/weights.h5
# Resume training a model that you had trained earlier
python3 nucleus.py train --dataset=/path/to/dataset --subset=train --weights=last
# Generate submission file
python3 nucleus.py detect --dataset=/path/to/dataset --subset=train --weights=<last or /path/to/weights.h5>
"""
# Set matplotlib backend
# This has to be done before other importa that might
# set it, but only if we're running in script mode
# rather than being imported.
# if __name__ == '__main__':
# import matplotlib
# # Agg backend runs without a display
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
import os
import sys
import json
import datetime
import numpy as np
import skimage.io
from imgaug import augmenters as iaa
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import time
import concurrent.futures
# Root directory of the project
ROOT_DIR = os.getcwd()
if ROOT_DIR.endswith("samples/nucleus"):
# Go up two levels to the repo root
ROOT_DIR = os.path.dirname(os.path.dirname(ROOT_DIR))
print (ROOT_DIR)
DATASET_DIR = '/home/mary/AI/data/nucleus/data-science-bowl-2018'
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
from mrcnn import model as modellib
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.model import log
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
# Results directory
# Save submission files here
RESULTS_DIR = os.path.join(ROOT_DIR, "results/nucleus/")
# The dataset doesn't have a standard train/val split, so I picked
# a variety of images to surve as a validation set.
VAL_IMAGE_IDS = [
"0c2550a23b8a0f29a7575de8c61690d3c31bc897dd5ba66caec201d201a278c2",
"92f31f591929a30e4309ab75185c96ff4314ce0a7ead2ed2c2171897ad1da0c7",
"1e488c42eb1a54a3e8412b1f12cde530f950f238d71078f2ede6a85a02168e1f",
"c901794d1a421d52e5734500c0a2a8ca84651fb93b19cec2f411855e70cae339",
"8e507d58f4c27cd2a82bee79fe27b069befd62a46fdaed20970a95a2ba819c7b",
"<KEY>",
"da5f98f2b8a64eee735a398de48ed42cd31bf17a6063db46a9e0783ac13cd844",
"<KEY>",
"<KEY>",
"97126a9791f0c1176e4563ad679a301dac27c59011f579e808bbd6e9f4cd1034",
"e81c758e1ca177b0942ecad62cf8d321ffc315376135bcbed3df932a6e5b40c0",
"<KEY>",
"<KEY>",
"3ab9cab6212fabd723a2c5a1949c2ded19980398b56e6080978e796f45cbbc90",
"ebc18868864ad075548cc1784f4f9a237bb98335f9645ee727dac8332a3e3716",
"bb61fc17daf8bdd4e16fdcf50137a8d7762bec486ede9249d92e511fcb693676",
"e1bcb583985325d0ef5f3ef52957d0371c96d4af767b13e48102bca9d5351a9b",
"<KEY>",
"<KEY>",
"f4c4db3df4ff0de90f44b027fc2e28c16bf7e5c75ea75b0a9762bbb7ac86e7a3",
"<KEY>",
"<KEY>",
"a4c44fc5f5bf213e2be6091ccaed49d8bf039d78f6fbd9c4d7b7428cfcb2eda4",
"cab4875269f44a701c5e58190a1d2f6fcb577ea79d842522dcab20ccb39b7ad2",
"8ecdb93582b2d5270457b36651b62776256ade3aaa2d7432ae65c14f07432d49",
]
############################################################
# Configurations
############################################################
class NucleusConfig(Config):
"""Configuration for training on the nucleus segmentation dataset."""
# Give the configuration a recognizable name
NAME = "nucleus"
# Adjust depending on your GPU memory
IMAGES_PER_GPU = 6
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + nucleus
# Number of training and validation steps per epoch
STEPS_PER_EPOCH = (657 - len(VAL_IMAGE_IDS)) // IMAGES_PER_GPU
VALIDATION_STEPS = max(1, len(VAL_IMAGE_IDS) // IMAGES_PER_GPU)
# Don't exclude based on confidence. Since we have two classes
# then 0.5 is the minimum anyway as it picks between nucleus and BG
DETECTION_MIN_CONFIDENCE = 0
# Backbone network architecture
# Supported values are: resnet50, resnet101
BACKBONE = "resnet50"
# Input image resizing
# Random crops of size 512x512
IMAGE_RESIZE_MODE = "crop"
IMAGE_MIN_DIM = 512
IMAGE_MAX_DIM = 512
IMAGE_MIN_SCALE = 2.0
# Length of square anchor side in pixels
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)
# ROIs kept after non-maximum supression (training and inference)
POST_NMS_ROIS_TRAINING = 1000
POST_NMS_ROIS_INFERENCE = 2000
# Non-max suppression threshold to filter RPN proposals.
# You can increase this during training to generate more propsals.
RPN_NMS_THRESHOLD = 0.9
# How many anchors per image to use for RPN training
RPN_TRAIN_ANCHORS_PER_IMAGE = 64
# Image mean (RGB)
MEAN_PIXEL = np.array([43.53, 39.56, 48.22])
# If enabled, resizes instance masks to a smaller size to reduce
# memory load. Recommended when using high-resolution images.
USE_MINI_MASK = True
MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask
# Number of ROIs per image to feed to classifier/mask heads
# The Mask RCNN paper uses 512 but often the RPN doesn't generate
# enough positive proposals to fill this and keep a positive:negative
# ratio of 1:3. You can increase the number of proposals by adjusting
# the RPN NMS threshold.
TRAIN_ROIS_PER_IMAGE = 128
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 200
# Max number of final detections per image
DETECTION_MAX_INSTANCES = 400
class NucleusInferenceConfig(NucleusConfig):
# Set batch size to 1 to run one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Don't resize imager for inferencing
IMAGE_RESIZE_MODE = "pad64"
# Non-max suppression threshold to filter RPN proposals.
# You can increase this during training to generate more propsals.
RPN_NMS_THRESHOLD = 0.7
#=================================2
class NoResizeConfig(NucleusConfig):
IMAGE_RESIZE_MODE = "none"
############################################################
# Dataset
############################################################
class NucleusDataset(utils.Dataset):
def load_nucleus(self, dataset_dir, subset):
"""Load a subset of the nuclei dataset.
dataset_dir: Root directory of the dataset
subset: Subset to load. Either the name of the sub-directory,
such as stage1_train, stage1_test, ...etc. or, one of:
* train: stage1_train excluding validation images
* val: validation images from VAL_IMAGE_IDS
"""
# Add classes. We have one class.
# Naming the dataset nucleus, and the class nucleus
self.add_class("nucleus", 1, "nucleus")
# Which subset?
# "val": use hard-coded list above
# "train": use data from stage1_train minus the hard-coded list above
# else: use the data from the specified sub-directory
assert subset in ["train", "val", "stage1_train", "stage1_test", "stage2_test"]
subset_dir = "stage1_train" if subset in ["train", "val"] else subset
dataset_dir = os.path.join(dataset_dir, subset_dir)
if subset == "val":
image_ids = VAL_IMAGE_IDS
else:
# Get image ids from directory names
image_ids = next(os.walk(dataset_dir))[1]
if subset == "train":
image_ids = list(set(image_ids) - set(VAL_IMAGE_IDS))
# Add images
for image_id in image_ids:
self.add_image(
"nucleus",
image_id=image_id,
path=os.path.join(dataset_dir, image_id, "images/{}.png".format(image_id)))
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
info = self.image_info[image_id]
# Get mask directory from image path
mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), "masks")
# Read mask files from .png image
mask = []
for f in next(os.walk(mask_dir))[2]:
if f.endswith(".png"):
m = skimage.io.imread(os.path.join(mask_dir, f)).astype(np.bool)
mask.append(m)
mask = np.stack(mask, axis=-1)
# Return mask, and array of class IDs of each instance. Since we have
# one class ID, we return an array of ones
return mask, np.ones([mask.shape[-1]], dtype=np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "nucleus":
return info["id"]
else:
super(self.__class__, self).image_reference(image_id)
############################################################
# Training
############################################################
def train(model, dataset_dir, subset):
"""Train the model."""
# Training dataset.
dataset_train = NucleusDataset()
dataset_train.load_nucleus(dataset_dir, subset)
dataset_train.prepare()
# Validation dataset
dataset_val = NucleusDataset()
dataset_val.load_nucleus(dataset_dir, "val")
dataset_val.prepare()
# Image augmentation
# http://imgaug.readthedocs.io/en/latest/source/augmenters.html
augmentation = iaa.SomeOf((0, 2), [
iaa.Fliplr(0.5),
iaa.Flipud(0.5),
iaa.OneOf([iaa.Affine(rotate=90),
iaa.Affine(rotate=180),
iaa.Affine(rotate=270)]),
iaa.Multiply((0.8, 1.5)),
iaa.GaussianBlur(sigma=(0.0, 5.0))
])
# *** This training schedule is an example. Update to your needs ***
# If starting from imagenet, train heads only for a bit
# since they have random weights
print("Train network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=20,
augmentation=augmentation,
layers='heads')
print("Train all layers")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=40,
augmentation=augmentation,
layers='all')
############################################################
# RLE Encoding
############################################################
def rle_encode(mask):
"""Encodes a mask in Run Length Encoding (RLE).
Returns a string of space-separated values.
"""
assert mask.ndim == 2, "Mask must be of shape [Height, Width]"
# Flatten it column wise
m = mask.T.flatten()
# Compute gradient. Equals 1 or -1 at transition points
g = np.diff(np.concatenate([[0], m, [0]]), n=1)
# 1-based indicies of transition points (where gradient != 0)
rle = np.where(g != 0)[0].reshape([-1, 2]) + 1
# Convert second index in each pair to lenth
rle[:, 1] = rle[:, 1] - rle[:, 0]
return " ".join(map(str, rle.flatten()))
def rle_decode(rle, shape):
"""Decodes an RLE encoded list of space separated
numbers and returns a binary mask."""
rle = list(map(int, rle.split()))
rle = np.array(rle, dtype=np.int32).reshape([-1, 2])
rle[:, 1] += rle[:, 0]
rle -= 1
mask = np.zeros([shape[0] * shape[1]], np.bool)
for s, e in rle:
assert 0 <= s < mask.shape[0]
assert 1 <= e <= mask.shape[0], "shape: {} s {} e {}".format(shape, s, e)
mask[s:e] = 1
# Reshape and transpose
mask = mask.reshape([shape[1], shape[0]]).T
return mask
def mask_to_rle(image_id, mask, scores):
"Encodes instance masks to submission format."
assert mask.ndim == 3, "Mask must be [H, W, count]"
# If mask is empty, return line with image ID only
if mask.shape[-1] == 0:
return "{},".format(image_id)
# Remove mask overlaps
# Multiply each instance mask by its score order
# then take the maximum across the last dimension
order = np.argsort(scores)[::-1] + 1 # 1-based descending
mask = np.max(mask * np.reshape(order, [1, 1, -1]), -1)
# Loop over instance masks
lines = []
for o in order:
m = np.where(mask == o, 1, 0)
# Skip if empty
if m.sum() == 0.0:
continue
rle = rle_encode(m)
lines.append("{}, {}".format(image_id, rle))
return "\n".join(lines)
############################################################
# Detection
############################################################
def detect(model, dataset_dir, subset):
"""Run detection on images in the given directory."""
print("Running on {}".format(dataset_dir))
# Create directory
if not os.path.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
submit_dir = "submit_{:%Y%m%dT%H%M%S}".format(datetime.datetime.now())
submit_dir = os.path.join(RESULTS_DIR, submit_dir)
os.makedirs(submit_dir)
# Read dataset
dataset = NucleusDataset()
dataset.load_nucleus(dataset_dir, subset)
dataset.prepare()
# Load over images
submission = []
for image_id in dataset.image_ids:
# Load image and run detection
image = dataset.load_image(image_id)
# Detect objects
r = model.detect([image], verbose=0)[0]
# Encode image to RLE. Returns a string of multiple lines
source_id = dataset.image_info[image_id]["id"]
rle = mask_to_rle(source_id, r["masks"], r["scores"])
submission.append(rle)
# Save image with masks
visualize.display_instances(
image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'],
show_bbox=False, show_mask=False,
title="Predictions")
plt.savefig("{}/{}.png".format(submit_dir, dataset.image_info[image_id]["id"]))
# Save to csv file
submission = "ImageId,EncodedPixels\n" + "\n".join(submission)
file_path = os.path.join(submit_dir, "submit.csv")
with open(file_path, "w") as f:
f.write(submission)
print("Saved to ", submit_dir)
def show_image_size(stats):
# Image stats
image_shape = np.array([s['shape'] for s in stats])
image_color = np.array([s['color'] for s in stats])
print("Image Count: ", image_shape.shape[0])
print("Height mean: {:.2f} median: {:.2f} min: {:.2f} max: {:.2f}".format(
np.mean(image_shape[:, 0]), np.median(image_shape[:, 0]),
np.min(image_shape[:, 0]), np.max(image_shape[:, 0])))
print("Width mean: {:.2f} median: {:.2f} min: {:.2f} max: {:.2f}".format(
np.mean(image_shape[:, 1]), np.median(image_shape[:, 1]),
np.min(image_shape[:, 1]), np.max(image_shape[:, 1])))
print("Color mean (RGB): {:.2f} {:.2f} {:.2f}".format(*np.mean(image_color, axis=0)))
# Histograms
fig, ax = plt.subplots(1, 3, figsize=(16, 4))
ax[0].set_title("Height")
_ = ax[0].hist(image_shape[:, 0], bins=20)
ax[1].set_title("Width")
_ = ax[1].hist(image_shape[:, 1], bins=20)
ax[2].set_title("Height & Width")
_ = ax[2].hist2d(image_shape[:, 1], image_shape[:, 0], bins=10, cmap="Blues")
def dataset():
config = NoResizeConfig()
# Load dataset
dataset = NucleusDataset()
# The subset is the name of the sub-directory, such as stage1_train,
# stage1_test, ...etc. You can also use these special values:
# train: loads stage1_train but excludes validation images
# val: loads validation images from stage1_train. For a list
# of validation images see nucleus.py
dataset.load_nucleus(DATASET_DIR, subset="train")
# Must call before using the dataset
dataset.prepare()
print("Image Count: {}".format(len(dataset.image_ids)))
print("Class Count: {}".format(dataset.num_classes))
for i, info in enumerate(dataset.class_info):
print("{:3}. {:50}".format(i, info['name']))
image_ids = np.random.choice(dataset.image_ids, 4)
for image_id in image_ids:
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
visualize.display_top_masks(image, mask, class_ids, dataset.class_names, limit=1)
#Example of loading image by id------------------------------------------------
source_id = "ed5be4b63e9506ad64660dd92a098ffcc0325195298c13c815a73773f1efc279"
# Map source ID to Dataset image_id
# Notice the nucleus prefix: it's the name given to the dataset in NucleusDataset
image_id = dataset.image_from_source_map["nucleus.{}".format(source_id)]
# Load and display
image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
dataset, config, image_id, use_mini_mask=False)
log("molded_image", image)
log("mask", mask)
visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names,
show_bbox=True)
def image_stats(image_id):
"""Returns a dict of stats for one image."""
image = dataset.load_image(image_id)
mask, _ = dataset.load_mask(image_id)
bbox = utils.extract_bboxes(mask)
# Sanity check
assert mask.shape[:2] == image.shape[:2]
# Return stats dict
return {
"id": image_id,
"shape": list(image.shape),
"bbox": [[b[2] - b[0], b[3] - b[1]]
for b in bbox
# Uncomment to exclude nuclei with 1 pixel width
# or height (often on edges)
# if b[2] - b[0] > 1 and b[3] - b[1] > 1
],
"color": np.mean(image, axis=(0, 1)),
}
#load data time -----------------------------------------------------------
t_start = time.time()
with concurrent.futures.ThreadPoolExecutor() as e:
stats = list(e.map(image_stats,dataset.image_ids))
t_total = time.time() - t_start
print("Total time: {:.1f} seconds".format(t_total))
show_image_size(stats)
image_area_bins = [256 ** 2, 600 ** 2, 1300 ** 2]
#get image area----------------------------------------------------
print("Nuclei/Image")
fig, ax = plt.subplots(1, len(image_area_bins), figsize=(16, 4))
area_threshold = 0
for i, image_area in enumerate(image_area_bins):
nuclei_per_image = np.array([len(s['bbox'])
for s in stats
if area_threshold < (s['shape'][0] * s['shape'][1]) <= image_area])
area_threshold = image_area
if len(nuclei_per_image) == 0:
print("Image area <= {:4}**2: None".format(np.sqrt(image_area)))
continue
print("Image area <= {:4.0f}**2: mean: {:.1f} median: {:.1f} min: {:.1f} max: {:.1f}".format(
np.sqrt(image_area), nuclei_per_image.mean(), np.median(nuclei_per_image),
nuclei_per_image.min(), nuclei_per_image.max()))
ax[i].set_title("Image Area <= {:4}**2".format(np.sqrt(image_area)))
_ = ax[i].hist(nuclei_per_image, bins=10)
# get nucleus area
fig, ax = plt.subplots(1, len(image_area_bins), figsize=(16, 4))
area_threshold = 0
for i, image_area in enumerate(image_area_bins):
nucleus_shape = np.array([
b
for s in stats if area_threshold < (s['shape'][0] * s['shape'][1]) <= image_area
for b in s['bbox']])
nucleus_area = nucleus_shape[:, 0] * nucleus_shape[:, 1]
area_threshold = image_area
print("\nImage Area <= {:.0f}**2".format(np.sqrt(image_area)))
print(" Total Nuclei: ", nucleus_shape.shape[0])
print(" Nucleus Height. mean: {:.2f} median: {:.2f} min: {:.2f} max: {:.2f}".format(
np.mean(nucleus_shape[:, 0]), np.median(nucleus_shape[:, 0]),
np.min(nucleus_shape[:, 0]), np.max(nucleus_shape[:, 0])))
print(" Nucleus Width. mean: {:.2f} median: {:.2f} min: {:.2f} max: {:.2f}".format(
np.mean(nucleus_shape[:, 1]), np.median(nucleus_shape[:, 1]),
np.min(nucleus_shape[:, 1]), np.max(nucleus_shape[:, 1])))
print(" Nucleus Area. mean: {:.2f} median: {:.2f} min: {:.2f} max: {:.2f}".format(
np.mean(nucleus_area), np.median(nucleus_area),
np.min(nucleus_area), np.max(nucleus_area)))
# Show 2D histogram
_ = ax[i].hist2d(nucleus_shape[:, 1], nucleus_shape[:, 0], bins=20, cmap="Blues")
def mini_mask():
config = NoResizeConfig() #not resize original image
# Load dataset
dataset = NucleusDataset()
dataset.load_nucleus(DATASET_DIR, subset="train")
# Must call before using the dataset
dataset.prepare()
# Load random image and mask.
image_id = np.random.choice(dataset.image_ids, 1)[0]
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
original_shape = image.shape
# Here actually no resize was done since NoResizeConfig(), just display original image
image, window, scale, padding, _ = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding)
# Compute Bounding box
bbox = utils.extract_bboxes(mask)
# Display image and additional stats
print("image_id: ", image_id, dataset.image_reference(image_id))
print("Original shape: ", original_shape)
log("image", image)
log("mask", mask)
log("class_ids", class_ids)
log("bbox", bbox)
# Display image and instances
visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)
#image_id = np.random.choice(dataset.image_ids, 1)[0]
image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
dataset, config, image_id, use_mini_mask=False)
log("image", image)
log("image_meta", image_meta)
log("class_ids", class_ids)
log("bbox", bbox)
log("mask", mask)
display_images([image] + [mask[:, :, i] for i in range(min(mask.shape[-1], 7))])
# Add augmentation and mask resizing.
image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
dataset, config, image_id, augment=True, use_mini_mask=True)
log("mask", mask)
display_images([image] + [mask[:, :, i] for i in range(min(mask.shape[-1], 7))])
# Display augmented image with restored mask
mask = utils.expand_mask(bbox, mask, image.shape)
visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)
def visualize_anchors():
class RandomCropConfig(NucleusConfig):
IMAGE_RESIZE_MODE = "crop"
IMAGE_MIN_DIM = 256
IMAGE_MAX_DIM = 256
config = NoResizeConfig()
crop_config = RandomCropConfig()
dataset = NucleusDataset()
dataset.load_nucleus(DATASET_DIR, subset="train")
# Must call before using the dataset
dataset.prepare()
## Visualize anchors of one cell at the center of the feature map
# Load and display random image
image_id = np.random.choice(dataset.image_ids, 1)[0]
image, image_meta, _, _, _ = modellib.load_image_gt(dataset, crop_config, image_id)
# Generate Anchors
backbone_shapes = modellib.compute_backbone_shapes(config, image.shape)
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Print summary of anchors
num_levels = len(backbone_shapes)
anchors_per_cell = len(config.RPN_ANCHOR_RATIOS)
print("Count: ", anchors.shape[0])
print("Scales: ", config.RPN_ANCHOR_SCALES)
print("ratios: ", config.RPN_ANCHOR_RATIOS)
print("Anchors per Cell: ", anchors_per_cell)
print("Levels: ", num_levels)
anchors_per_level = []
for l in range(num_levels):
num_cells = backbone_shapes[l][0] * backbone_shapes[l][1]
anchors_per_level.append(anchors_per_cell * num_cells // config.RPN_ANCHOR_STRIDE ** 2)
print("Anchors in Level {}: {}".format(l, anchors_per_level[l]))
# Display
fig, ax = plt.subplots(1, figsize=(10, 10))
ax.imshow(image)
levels = len(backbone_shapes)
for level in range(levels):
colors = visualize.random_colors(levels)
# Compute the index of the anchors at the center of the image
level_start = sum(anchors_per_level[:level]) # sum of anchors of previous levels
level_anchors = anchors[level_start:level_start + anchors_per_level[level]]
print("Level {}. Anchors: {:6} Feature map Shape: {}".format(level, level_anchors.shape[0],
backbone_shapes[level]))
center_cell = backbone_shapes[level] // 2
center_cell_index = (center_cell[0] * backbone_shapes[level][1] + center_cell[1])
level_center = center_cell_index * anchors_per_cell
center_anchor = anchors_per_cell * (
(center_cell[0] * backbone_shapes[level][1] / config.RPN_ANCHOR_STRIDE ** 2) \
+ center_cell[1] / config.RPN_ANCHOR_STRIDE)
level_center = int(center_anchor)
# Draw anchors. Brightness show the order in the array, dark to bright.
for i, rect in enumerate(level_anchors[level_center:level_center + anchors_per_cell]):
y1, x1, y2, x2 = rect
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, facecolor='none',
edgecolor=(i + 1) * np.array(colors[level]) / anchors_per_cell)
ax.add_patch(p)
plt.show()
def Data_Generator():
class RandomCropConfig(NucleusConfig):
IMAGE_RESIZE_MODE = "crop"
IMAGE_MIN_DIM = 256
IMAGE_MAX_DIM = 256
config = NoResizeConfig()
crop_config = RandomCropConfig()
dataset = NucleusDataset()
dataset.load_nucleus(DATASET_DIR, subset="train")
dataset.prepare()
image_id = np.random.choice(dataset.image_ids, 1)[0]
random_rois = 2000
g = modellib.data_generator(
dataset, crop_config, shuffle=True, random_rois=random_rois,
batch_size=4,
detection_targets=True)
# Get Next Image
if random_rois:
[normalized_images, image_meta, rpn_match, rpn_bbox, gt_class_ids, gt_boxes, gt_masks, rpn_rois, rois], \
[mrcnn_class_ids, mrcnn_bbox, mrcnn_mask] = next(g)
log("rois", rois)
log("mrcnn_class_ids", mrcnn_class_ids)
log("mrcnn_bbox", mrcnn_bbox)
log("mrcnn_mask", mrcnn_mask)
else:
[normalized_images, image_meta, rpn_match, rpn_bbox, gt_boxes, gt_masks], _ = next(g)
log("gt_class_ids", gt_class_ids)
log("gt_boxes", gt_boxes)
log("gt_masks", gt_masks)
log("rpn_match", rpn_match, )
log("rpn_bbox", rpn_bbox)
image_id = modellib.parse_image_meta(image_meta)["image_id"][0]
print("image_id: ", image_id, dataset.image_reference(image_id))
# Remove the last dim in mrcnn_class_ids. It's only added
# to satisfy Keras restriction on target shape.
mrcnn_class_ids = mrcnn_class_ids[:, :, 0]
############################################################
# Command Line
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Mask R-CNN for nuclei counting and segmentation')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'detect'")
parser.add_argument('--dataset', required=False,
metavar="/path/to/dataset",
default="/home/mary/AI/data/nucleus/data-science-bowl-2018",
help='Root directory of the dataset')
parser.add_argument('--weights', required=True,
metavar='/path/to/h5',
default=COCO_WEIGHTS_PATH,
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--subset', required=False,
default='train',
metavar="Dataset sub-directory",
help="Subset of dataset to run prediction on")
args = parser.parse_args()
# Validate arguments
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
elif args.command == "detect":
assert args.subset, "Provide --subset to run prediction on"
# print("Weights: ", args.weights)
# print("Dataset: ", args.dataset)
# if args.subset:
# print("Subset: ", args.subset)
# print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = NucleusConfig()
else:
config = NucleusInferenceConfig()
config.display()
#===========================================================================
#dataset()
#mini_mask()
#visualize_anchors()
#Data_Generator()
#===========================================================================
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
print (args.dataset)
print (args.subset)
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model, args.dataset, args.subset)
elif args.command == "detect":
detect(model, args.dataset, args.subset)
else:
print("'{}' is not recognized. "
"Use 'train' or 'detect'".format(args.command))
| [
"mrcnn.model.MaskRCNN",
"numpy.sqrt",
"mrcnn.utils.download_trained_weights",
"imgaug.augmenters.GaussianBlur",
"mrcnn.model.compute_backbone_shapes",
"mrcnn.model.parse_image_meta",
"numpy.argsort",
"numpy.array",
"mrcnn.visualize.display_instances",
"mrcnn.model.log",
"imgaug.augmenters.Fliplr... | [((1591, 1602), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1600, 1602), False, 'import os\n'), ((1846, 1871), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (1861, 1871), False, 'import sys\n'), ((2155, 2198), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""mask_rcnn_coco.h5"""'], {}), "(ROOT_DIR, 'mask_rcnn_coco.h5')\n", (2167, 2198), False, 'import os\n'), ((2326, 2356), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""logs"""'], {}), "(ROOT_DIR, 'logs')\n", (2338, 2356), False, 'import os\n'), ((2421, 2463), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""results/nucleus/"""'], {}), "(ROOT_DIR, 'results/nucleus/')\n", (2433, 2463), False, 'import os\n'), ((5523, 5554), 'numpy.array', 'np.array', (['[43.53, 39.56, 48.22]'], {}), '([43.53, 39.56, 48.22])\n', (5531, 5554), True, 'import numpy as np\n'), ((12251, 12291), 'numpy.zeros', 'np.zeros', (['[shape[0] * shape[1]]', 'np.bool'], {}), '([shape[0] * shape[1]], np.bool)\n', (12259, 12291), True, 'import numpy as np\n'), ((13834, 13871), 'os.path.join', 'os.path.join', (['RESULTS_DIR', 'submit_dir'], {}), '(RESULTS_DIR, submit_dir)\n', (13846, 13871), False, 'import os\n'), ((13876, 13899), 'os.makedirs', 'os.makedirs', (['submit_dir'], {}), '(submit_dir)\n', (13887, 13899), False, 'import os\n'), ((14919, 14957), 'os.path.join', 'os.path.join', (['submit_dir', '"""submit.csv"""'], {}), "(submit_dir, 'submit.csv')\n", (14931, 14957), False, 'import os\n'), ((15123, 15160), 'numpy.array', 'np.array', (["[s['shape'] for s in stats]"], {}), "([s['shape'] for s in stats])\n", (15131, 15160), True, 'import numpy as np\n'), ((15179, 15216), 'numpy.array', 'np.array', (["[s['color'] for s in stats]"], {}), "([s['color'] for s in stats])\n", (15187, 15216), True, 'import numpy as np\n'), ((15814, 15849), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(16, 4)'}), '(1, 3, figsize=(16, 4))\n', (15826, 15849), True, 'import matplotlib.pyplot as plt\n'), ((16902, 16940), 'numpy.random.choice', 'np.random.choice', (['dataset.image_ids', '(4)'], {}), '(dataset.image_ids, 4)\n', (16918, 16940), True, 'import numpy as np\n'), ((17603, 17673), 'mrcnn.model.load_image_gt', 'modellib.load_image_gt', (['dataset', 'config', 'image_id'], {'use_mini_mask': '(False)'}), '(dataset, config, image_id, use_mini_mask=False)\n', (17625, 17673), True, 'from mrcnn import model as modellib\n'), ((17687, 17713), 'mrcnn.model.log', 'log', (['"""molded_image"""', 'image'], {}), "('molded_image', image)\n", (17690, 17713), False, 'from mrcnn.model import log\n'), ((17718, 17735), 'mrcnn.model.log', 'log', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (17721, 17735), False, 'from mrcnn.model import log\n'), ((17740, 17839), 'mrcnn.visualize.display_instances', 'visualize.display_instances', (['image', 'bbox', 'mask', 'class_ids', 'dataset.class_names'], {'show_bbox': '(True)'}), '(image, bbox, mask, class_ids, dataset.\n class_names, show_bbox=True)\n', (17767, 17839), False, 'from mrcnn import visualize\n'), ((18710, 18721), 'time.time', 'time.time', ([], {}), '()\n', (18719, 18721), False, 'import time\n'), ((22010, 22131), 'mrcnn.utils.resize_image', 'utils.resize_image', (['image'], {'min_dim': 'config.IMAGE_MIN_DIM', 'max_dim': 'config.IMAGE_MAX_DIM', 'mode': 'config.IMAGE_RESIZE_MODE'}), '(image, min_dim=config.IMAGE_MIN_DIM, max_dim=config.\n IMAGE_MAX_DIM, mode=config.IMAGE_RESIZE_MODE)\n', (22028, 22131), False, 'from mrcnn import utils\n'), ((22171, 22210), 'mrcnn.utils.resize_mask', 'utils.resize_mask', (['mask', 'scale', 'padding'], {}), '(mask, scale, padding)\n', (22188, 22210), False, 'from mrcnn import utils\n'), ((22249, 22275), 'mrcnn.utils.extract_bboxes', 'utils.extract_bboxes', (['mask'], {}), '(mask)\n', (22269, 22275), False, 'from mrcnn import utils\n'), ((22437, 22456), 'mrcnn.model.log', 'log', (['"""image"""', 'image'], {}), "('image', image)\n", (22440, 22456), False, 'from mrcnn.model import log\n'), ((22461, 22478), 'mrcnn.model.log', 'log', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (22464, 22478), False, 'from mrcnn.model import log\n'), ((22483, 22510), 'mrcnn.model.log', 'log', (['"""class_ids"""', 'class_ids'], {}), "('class_ids', class_ids)\n", (22486, 22510), False, 'from mrcnn.model import log\n'), ((22515, 22532), 'mrcnn.model.log', 'log', (['"""bbox"""', 'bbox'], {}), "('bbox', bbox)\n", (22518, 22532), False, 'from mrcnn.model import log\n'), ((22571, 22649), 'mrcnn.visualize.display_instances', 'visualize.display_instances', (['image', 'bbox', 'mask', 'class_ids', 'dataset.class_names'], {}), '(image, bbox, mask, class_ids, dataset.class_names)\n', (22598, 22649), False, 'from mrcnn import visualize\n'), ((22756, 22826), 'mrcnn.model.load_image_gt', 'modellib.load_image_gt', (['dataset', 'config', 'image_id'], {'use_mini_mask': '(False)'}), '(dataset, config, image_id, use_mini_mask=False)\n', (22778, 22826), True, 'from mrcnn import model as modellib\n'), ((22841, 22860), 'mrcnn.model.log', 'log', (['"""image"""', 'image'], {}), "('image', image)\n", (22844, 22860), False, 'from mrcnn.model import log\n'), ((22865, 22894), 'mrcnn.model.log', 'log', (['"""image_meta"""', 'image_meta'], {}), "('image_meta', image_meta)\n", (22868, 22894), False, 'from mrcnn.model import log\n'), ((22899, 22926), 'mrcnn.model.log', 'log', (['"""class_ids"""', 'class_ids'], {}), "('class_ids', class_ids)\n", (22902, 22926), False, 'from mrcnn.model import log\n'), ((22931, 22948), 'mrcnn.model.log', 'log', (['"""bbox"""', 'bbox'], {}), "('bbox', bbox)\n", (22934, 22948), False, 'from mrcnn.model import log\n'), ((22953, 22970), 'mrcnn.model.log', 'log', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (22956, 22970), False, 'from mrcnn.model import log\n'), ((23147, 23234), 'mrcnn.model.load_image_gt', 'modellib.load_image_gt', (['dataset', 'config', 'image_id'], {'augment': '(True)', 'use_mini_mask': '(True)'}), '(dataset, config, image_id, augment=True,\n use_mini_mask=True)\n', (23169, 23234), True, 'from mrcnn import model as modellib\n'), ((23244, 23261), 'mrcnn.model.log', 'log', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (23247, 23261), False, 'from mrcnn.model import log\n'), ((23408, 23450), 'mrcnn.utils.expand_mask', 'utils.expand_mask', (['bbox', 'mask', 'image.shape'], {}), '(bbox, mask, image.shape)\n', (23425, 23450), False, 'from mrcnn import utils\n'), ((23455, 23533), 'mrcnn.visualize.display_instances', 'visualize.display_instances', (['image', 'bbox', 'mask', 'class_ids', 'dataset.class_names'], {}), '(image, bbox, mask, class_ids, dataset.class_names)\n', (23482, 23533), False, 'from mrcnn import visualize\n'), ((24109, 24163), 'mrcnn.model.load_image_gt', 'modellib.load_image_gt', (['dataset', 'crop_config', 'image_id'], {}), '(dataset, crop_config, image_id)\n', (24131, 24163), True, 'from mrcnn import model as modellib\n'), ((24210, 24263), 'mrcnn.model.compute_backbone_shapes', 'modellib.compute_backbone_shapes', (['config', 'image.shape'], {}), '(config, image.shape)\n', (24242, 24263), True, 'from mrcnn import model as modellib\n'), ((24278, 24438), 'mrcnn.utils.generate_pyramid_anchors', 'utils.generate_pyramid_anchors', (['config.RPN_ANCHOR_SCALES', 'config.RPN_ANCHOR_RATIOS', 'backbone_shapes', 'config.BACKBONE_STRIDES', 'config.RPN_ANCHOR_STRIDE'], {}), '(config.RPN_ANCHOR_SCALES, config.\n RPN_ANCHOR_RATIOS, backbone_shapes, config.BACKBONE_STRIDES, config.\n RPN_ANCHOR_STRIDE)\n', (24308, 24438), False, 'from mrcnn import utils\n'), ((25274, 25307), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(10, 10)'}), '(1, figsize=(10, 10))\n', (25286, 25307), True, 'import matplotlib.pyplot as plt\n'), ((26762, 26772), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26770, 26772), True, 'import matplotlib.pyplot as plt\n'), ((27194, 27321), 'mrcnn.model.data_generator', 'modellib.data_generator', (['dataset', 'crop_config'], {'shuffle': '(True)', 'random_rois': 'random_rois', 'batch_size': '(4)', 'detection_targets': '(True)'}), '(dataset, crop_config, shuffle=True, random_rois=\n random_rois, batch_size=4, detection_targets=True)\n', (27217, 27321), True, 'from mrcnn import model as modellib\n'), ((27817, 27850), 'mrcnn.model.log', 'log', (['"""gt_class_ids"""', 'gt_class_ids'], {}), "('gt_class_ids', gt_class_ids)\n", (27820, 27850), False, 'from mrcnn.model import log\n'), ((27855, 27880), 'mrcnn.model.log', 'log', (['"""gt_boxes"""', 'gt_boxes'], {}), "('gt_boxes', gt_boxes)\n", (27858, 27880), False, 'from mrcnn.model import log\n'), ((27885, 27910), 'mrcnn.model.log', 'log', (['"""gt_masks"""', 'gt_masks'], {}), "('gt_masks', gt_masks)\n", (27888, 27910), False, 'from mrcnn.model import log\n'), ((27915, 27942), 'mrcnn.model.log', 'log', (['"""rpn_match"""', 'rpn_match'], {}), "('rpn_match', rpn_match)\n", (27918, 27942), False, 'from mrcnn.model import log\n'), ((27949, 27974), 'mrcnn.model.log', 'log', (['"""rpn_bbox"""', 'rpn_bbox'], {}), "('rpn_bbox', rpn_bbox)\n", (27952, 27974), False, 'from mrcnn.model import log\n'), ((28513, 28604), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Mask R-CNN for nuclei counting and segmentation"""'}), "(description=\n 'Mask R-CNN for nuclei counting and segmentation')\n", (28536, 28604), False, 'import argparse\n'), ((1715, 1740), 'os.path.dirname', 'os.path.dirname', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (1730, 1740), False, 'import os\n'), ((7930, 7967), 'os.path.join', 'os.path.join', (['dataset_dir', 'subset_dir'], {}), '(dataset_dir, subset_dir)\n', (7942, 7967), False, 'import os\n'), ((9225, 9248), 'numpy.stack', 'np.stack', (['mask'], {'axis': '(-1)'}), '(mask, axis=-1)\n', (9233, 9248), True, 'import numpy as np\n'), ((11694, 11723), 'numpy.concatenate', 'np.concatenate', (['[[0], m, [0]]'], {}), '([[0], m, [0]])\n', (11708, 11723), True, 'import numpy as np\n'), ((13155, 13180), 'numpy.where', 'np.where', (['(mask == o)', '(1)', '(0)'], {}), '(mask == o, 1, 0)\n', (13163, 13180), True, 'import numpy as np\n'), ((13680, 13707), 'os.path.exists', 'os.path.exists', (['RESULTS_DIR'], {}), '(RESULTS_DIR)\n', (13694, 13707), False, 'import os\n'), ((13717, 13741), 'os.makedirs', 'os.makedirs', (['RESULTS_DIR'], {}), '(RESULTS_DIR)\n', (13728, 13741), False, 'import os\n'), ((13792, 13815), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (13813, 13815), False, 'import datetime\n'), ((14512, 14682), 'mrcnn.visualize.display_instances', 'visualize.display_instances', (['image', "r['rois']", "r['masks']", "r['class_ids']", 'dataset.class_names', "r['scores']"], {'show_bbox': '(False)', 'show_mask': '(False)', 'title': '"""Predictions"""'}), "(image, r['rois'], r['masks'], r['class_ids'],\n dataset.class_names, r['scores'], show_bbox=False, show_mask=False,\n title='Predictions')\n", (14539, 14682), False, 'from mrcnn import visualize\n'), ((17079, 17164), 'mrcnn.visualize.display_top_masks', 'visualize.display_top_masks', (['image', 'mask', 'class_ids', 'dataset.class_names'], {'limit': '(1)'}), '(image, mask, class_ids, dataset.class_names,\n limit=1)\n', (17106, 17164), False, 'from mrcnn import visualize\n'), ((18054, 18080), 'mrcnn.utils.extract_bboxes', 'utils.extract_bboxes', (['mask'], {}), '(mask)\n', (18074, 18080), False, 'from mrcnn import utils\n'), ((20217, 20333), 'numpy.array', 'np.array', (["[b for s in stats if area_threshold < s['shape'][0] * s['shape'][1] <=\n image_area for b in s['bbox']]"], {}), "([b for s in stats if area_threshold < s['shape'][0] * s['shape'][1\n ] <= image_area for b in s['bbox']])\n", (20225, 20333), True, 'import numpy as np\n'), ((21712, 21750), 'numpy.random.choice', 'np.random.choice', (['dataset.image_ids', '(1)'], {}), '(dataset.image_ids, 1)\n', (21728, 21750), True, 'import numpy as np\n'), ((24034, 24072), 'numpy.random.choice', 'np.random.choice', (['dataset.image_ids', '(1)'], {}), '(dataset.image_ids, 1)\n', (24050, 24072), True, 'import numpy as np\n'), ((25414, 25445), 'mrcnn.visualize.random_colors', 'visualize.random_colors', (['levels'], {}), '(levels)\n', (25437, 25445), False, 'from mrcnn import visualize\n'), ((27121, 27159), 'numpy.random.choice', 'np.random.choice', (['dataset.image_ids', '(1)'], {}), '(dataset.image_ids, 1)\n', (27137, 27159), True, 'import numpy as np\n'), ((27566, 27583), 'mrcnn.model.log', 'log', (['"""rois"""', 'rois'], {}), "('rois', rois)\n", (27569, 27583), False, 'from mrcnn.model import log\n'), ((27592, 27631), 'mrcnn.model.log', 'log', (['"""mrcnn_class_ids"""', 'mrcnn_class_ids'], {}), "('mrcnn_class_ids', mrcnn_class_ids)\n", (27595, 27631), False, 'from mrcnn.model import log\n'), ((27640, 27669), 'mrcnn.model.log', 'log', (['"""mrcnn_bbox"""', 'mrcnn_bbox'], {}), "('mrcnn_bbox', mrcnn_bbox)\n", (27643, 27669), False, 'from mrcnn.model import log\n'), ((27678, 27707), 'mrcnn.model.log', 'log', (['"""mrcnn_mask"""', 'mrcnn_mask'], {}), "('mrcnn_mask', mrcnn_mask)\n", (27681, 27707), False, 'from mrcnn.model import log\n'), ((30567, 30637), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""training"""', 'config': 'config', 'model_dir': 'args.logs'}), "(mode='training', config=config, model_dir=args.logs)\n", (30584, 30637), True, 'from mrcnn import model as modellib\n'), ((30698, 30769), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'config': 'config', 'model_dir': 'args.logs'}), "(mode='inference', config=config, model_dir=args.logs)\n", (30715, 30769), True, 'from mrcnn import model as modellib\n'), ((9399, 9440), 'numpy.ones', 'np.ones', (['[mask.shape[-1]]'], {'dtype': 'np.int32'}), '([mask.shape[-1]], dtype=np.int32)\n', (9406, 9440), True, 'import numpy as np\n'), ((10340, 10355), 'imgaug.augmenters.Fliplr', 'iaa.Fliplr', (['(0.5)'], {}), '(0.5)\n', (10350, 10355), True, 'from imgaug import augmenters as iaa\n'), ((10365, 10380), 'imgaug.augmenters.Flipud', 'iaa.Flipud', (['(0.5)'], {}), '(0.5)\n', (10375, 10380), True, 'from imgaug import augmenters as iaa\n'), ((10520, 10544), 'imgaug.augmenters.Multiply', 'iaa.Multiply', (['(0.8, 1.5)'], {}), '((0.8, 1.5))\n', (10532, 10544), True, 'from imgaug import augmenters as iaa\n'), ((10554, 10588), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', ([], {'sigma': '(0.0, 5.0)'}), '(sigma=(0.0, 5.0))\n', (10570, 10588), True, 'from imgaug import augmenters as iaa\n'), ((12153, 12182), 'numpy.array', 'np.array', (['rle'], {'dtype': 'np.int32'}), '(rle, dtype=np.int32)\n', (12161, 12182), True, 'import numpy as np\n'), ((12966, 12984), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (12976, 12984), True, 'import numpy as np\n'), ((13042, 13071), 'numpy.reshape', 'np.reshape', (['order', '[1, 1, -1]'], {}), '(order, [1, 1, -1])\n', (13052, 13071), True, 'import numpy as np\n'), ((15357, 15383), 'numpy.mean', 'np.mean', (['image_shape[:, 0]'], {}), '(image_shape[:, 0])\n', (15364, 15383), True, 'import numpy as np\n'), ((15385, 15413), 'numpy.median', 'np.median', (['image_shape[:, 0]'], {}), '(image_shape[:, 0])\n', (15394, 15413), True, 'import numpy as np\n'), ((15423, 15448), 'numpy.min', 'np.min', (['image_shape[:, 0]'], {}), '(image_shape[:, 0])\n', (15429, 15448), True, 'import numpy as np\n'), ((15450, 15475), 'numpy.max', 'np.max', (['image_shape[:, 0]'], {}), '(image_shape[:, 0])\n', (15456, 15475), True, 'import numpy as np\n'), ((15569, 15595), 'numpy.mean', 'np.mean', (['image_shape[:, 1]'], {}), '(image_shape[:, 1])\n', (15576, 15595), True, 'import numpy as np\n'), ((15597, 15625), 'numpy.median', 'np.median', (['image_shape[:, 1]'], {}), '(image_shape[:, 1])\n', (15606, 15625), True, 'import numpy as np\n'), ((15635, 15660), 'numpy.min', 'np.min', (['image_shape[:, 1]'], {}), '(image_shape[:, 1])\n', (15641, 15660), True, 'import numpy as np\n'), ((15662, 15687), 'numpy.max', 'np.max', (['image_shape[:, 1]'], {}), '(image_shape[:, 1])\n', (15668, 15687), True, 'import numpy as np\n'), ((18576, 18603), 'numpy.mean', 'np.mean', (['image'], {'axis': '(0, 1)'}), '(image, axis=(0, 1))\n', (18583, 18603), True, 'import numpy as np\n'), ((18856, 18867), 'time.time', 'time.time', ([], {}), '()\n', (18865, 18867), False, 'import time\n'), ((27990, 28027), 'mrcnn.model.parse_image_meta', 'modellib.parse_image_meta', (['image_meta'], {}), '(image_meta)\n', (28015, 28027), True, 'from mrcnn import model as modellib\n'), ((30966, 30994), 'os.path.exists', 'os.path.exists', (['weights_path'], {}), '(weights_path)\n', (30980, 30994), False, 'import os\n'), ((31008, 31052), 'mrcnn.utils.download_trained_weights', 'utils.download_trained_weights', (['weights_path'], {}), '(weights_path)\n', (31038, 31052), False, 'from mrcnn import utils\n'), ((8916, 8945), 'os.path.dirname', 'os.path.dirname', (["info['path']"], {}), "(info['path'])\n", (8931, 8945), False, 'import os\n'), ((9040, 9057), 'os.walk', 'os.walk', (['mask_dir'], {}), '(mask_dir)\n', (9047, 9057), False, 'import os\n'), ((15751, 15779), 'numpy.mean', 'np.mean', (['image_color'], {'axis': '(0)'}), '(image_color, axis=0)\n', (15758, 15779), True, 'import numpy as np\n'), ((19761, 19780), 'numpy.sqrt', 'np.sqrt', (['image_area'], {}), '(image_area)\n', (19768, 19780), True, 'import numpy as np\n'), ((19807, 19834), 'numpy.median', 'np.median', (['nuclei_per_image'], {}), '(nuclei_per_image)\n', (19816, 19834), True, 'import numpy as np\n'), ((19952, 19971), 'numpy.sqrt', 'np.sqrt', (['image_area'], {}), '(image_area)\n', (19959, 19971), True, 'import numpy as np\n'), ((20519, 20538), 'numpy.sqrt', 'np.sqrt', (['image_area'], {}), '(image_area)\n', (20526, 20538), True, 'import numpy as np\n'), ((20708, 20736), 'numpy.mean', 'np.mean', (['nucleus_shape[:, 0]'], {}), '(nucleus_shape[:, 0])\n', (20715, 20736), True, 'import numpy as np\n'), ((20738, 20768), 'numpy.median', 'np.median', (['nucleus_shape[:, 0]'], {}), '(nucleus_shape[:, 0])\n', (20747, 20768), True, 'import numpy as np\n'), ((20782, 20809), 'numpy.min', 'np.min', (['nucleus_shape[:, 0]'], {}), '(nucleus_shape[:, 0])\n', (20788, 20809), True, 'import numpy as np\n'), ((20811, 20838), 'numpy.max', 'np.max', (['nucleus_shape[:, 0]'], {}), '(nucleus_shape[:, 0])\n', (20817, 20838), True, 'import numpy as np\n'), ((20950, 20978), 'numpy.mean', 'np.mean', (['nucleus_shape[:, 1]'], {}), '(nucleus_shape[:, 1])\n', (20957, 20978), True, 'import numpy as np\n'), ((20980, 21010), 'numpy.median', 'np.median', (['nucleus_shape[:, 1]'], {}), '(nucleus_shape[:, 1])\n', (20989, 21010), True, 'import numpy as np\n'), ((21024, 21051), 'numpy.min', 'np.min', (['nucleus_shape[:, 1]'], {}), '(nucleus_shape[:, 1])\n', (21030, 21051), True, 'import numpy as np\n'), ((21053, 21080), 'numpy.max', 'np.max', (['nucleus_shape[:, 1]'], {}), '(nucleus_shape[:, 1])\n', (21059, 21080), True, 'import numpy as np\n'), ((21192, 21213), 'numpy.mean', 'np.mean', (['nucleus_area'], {}), '(nucleus_area)\n', (21199, 21213), True, 'import numpy as np\n'), ((21215, 21238), 'numpy.median', 'np.median', (['nucleus_area'], {}), '(nucleus_area)\n', (21224, 21238), True, 'import numpy as np\n'), ((21252, 21272), 'numpy.min', 'np.min', (['nucleus_area'], {}), '(nucleus_area)\n', (21258, 21272), True, 'import numpy as np\n'), ((21274, 21294), 'numpy.max', 'np.max', (['nucleus_area'], {}), '(nucleus_area)\n', (21280, 21294), True, 'import numpy as np\n'), ((8126, 8146), 'os.walk', 'os.walk', (['dataset_dir'], {}), '(dataset_dir)\n', (8133, 8146), False, 'import os\n'), ((10401, 10422), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'rotate': '(90)'}), '(rotate=90)\n', (10411, 10422), True, 'from imgaug import augmenters as iaa\n'), ((10443, 10465), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'rotate': '(180)'}), '(rotate=180)\n', (10453, 10465), True, 'from imgaug import augmenters as iaa\n'), ((10486, 10508), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'rotate': '(270)'}), '(rotate=270)\n', (10496, 10508), True, 'from imgaug import augmenters as iaa\n'), ((11806, 11822), 'numpy.where', 'np.where', (['(g != 0)'], {}), '(g != 0)\n', (11814, 11822), True, 'import numpy as np\n'), ((19600, 19619), 'numpy.sqrt', 'np.sqrt', (['image_area'], {}), '(image_area)\n', (19607, 19619), True, 'import numpy as np\n'), ((9136, 9161), 'os.path.join', 'os.path.join', (['mask_dir', 'f'], {}), '(mask_dir, f)\n', (9148, 9161), False, 'import os\n'), ((26686, 26709), 'numpy.array', 'np.array', (['colors[level]'], {}), '(colors[level])\n', (26694, 26709), True, 'import numpy as np\n')] |
import matplotlib as mpl
mpl.use('Agg')
# import matplotlib.pyplot as plt
import numpy as np
import math
import pylab as plt
from matplotlib.ticker import ScalarFormatter
# path = '/home/zgy_ucla_cs/data/dropSeq/'
path = '/home/zgy_ucla_cs/Research/DropSeq/data/reads_barcode/'
read_cluster_size = []
# with open(path + "E31_REP1_HKT73BGX2_cluster_other.fastq") as infile:
# with open(path + "E31_REP2_HHN7NBGX3_cluster_other.fastq") as infile:
# with open(path + "E31_REP3_HHNKFBGX3_cluster_other.fastq") as infile:
# l = 10000
# for line in infile:
# # l-=1
# # print line.count('@')
# size = line.count('@')
# if size <= 100:
# read_cluster_size.append(size)
# np.save(path + 'arr2_100', np.array(read_cluster_size))
# np.save(path + 'All_cluster_size', np.array(read_cluster_size))
read_cluster_size = np.load(path + 'E31_REP3_cell_group_size.npy')
read_cluster_size = read_cluster_size[np.where( read_cluster_size == 50)]
# read_cluster_size = read_cluster_size[np.where( read_cluster_size <= 60)]
print("total clusters:", len(read_cluster_size))
# print(min(read_cluster_size), max(read_cluster_size))
# bins = np.linspace(math.ceil(min(read_cluster_size)),
# math.floor(max(read_cluster_size)),
# 200) # fixed number of bins
# plt.hist(read_cluster_size, bins = bins) # arguments are passed to np.histogram
# plt.title("Cluster size distritbuion, y log scale")
# plt.xlabel("reads counts")
# # plt.gca().set_xscale("log")
# # tk = np.linspace(math.ceil(min(read_cluster_size)),
# # math.floor(max(read_cluster_size)),
# # 20)
# # plt.gca().set_xticks(tk)
# plt.gca().set_yscale("log")
# plt.ylabel("number of cells")
# # for axis in [plt.gca().xaxis, plt.gca().yaxis]:
# # axis.set_major_formatter(ScalarFormatter())
# plt.savefig(path + "E31_REP3_cell_group_size_2_more_100_less.png")
| [
"matplotlib.use",
"numpy.load",
"numpy.where"
] | [((25, 39), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), True, 'import matplotlib as mpl\n'), ((825, 871), 'numpy.load', 'np.load', (["(path + 'E31_REP3_cell_group_size.npy')"], {}), "(path + 'E31_REP3_cell_group_size.npy')\n", (832, 871), True, 'import numpy as np\n'), ((910, 943), 'numpy.where', 'np.where', (['(read_cluster_size == 50)'], {}), '(read_cluster_size == 50)\n', (918, 943), True, 'import numpy as np\n')] |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import op_test
import numpy as np
import unittest
import paddle
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.backward import append_backward
from paddle.distributed.models.moe import utils
from paddle.fluid.framework import _test_eager_guard
def random_routing(topk_idx, topk_value, prob, topk=2):
if topk == 2:
new_topk_idx = np.copy(topk_idx)
for i in range(len(topk_idx)):
val = topk_value[i][1]
if val * 2 < prob[i]:
new_topk_idx[i][1] = -1
return new_topk_idx
else:
raise RuntimeError("only topk=2 is supported now")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestNumberCountAPIFp32(unittest.TestCase):
def setUp(self):
self.dtype = "float32"
self.init()
def init(self):
self.upper_range = 8
self.x = np.random.randint(-1, self.upper_range,
size=(200, 2)).astype('int64')
self.prob = np.random.random((self.x.shape[0], )).astype(self.dtype)
self.topk_value = np.random.random(self.x.shape).astype(self.dtype)
self.out = random_routing(self.x, self.topk_value,
self.prob).astype(self.dtype)
self.place = paddle.CUDAPlace(0)
def func_api_dygraph(self):
paddle.disable_static()
x = paddle.to_tensor(self.x)
value = paddle.to_tensor(self.topk_value)
prob = paddle.to_tensor(self.prob)
out = utils._random_routing(x, value, prob)
assert np.allclose(out.numpy(), self.out)
def test_api_dygraph(self):
with _test_eager_guard():
self.func_api_dygraph()
self.func_api_dygraph()
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestNumberCountAPIFp16(TestNumberCountAPIFp32):
def setUp(self):
self.dtype = "float16"
self.init()
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
| [
"numpy.copy",
"paddle.fluid.framework._test_eager_guard",
"numpy.random.random",
"paddle.CUDAPlace",
"paddle.enable_static",
"paddle.distributed.models.moe.utils._random_routing",
"paddle.disable_static",
"paddle.to_tensor",
"numpy.random.randint",
"unittest.main",
"paddle.fluid.core.is_compiled... | [((2798, 2820), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (2818, 2820), False, 'import paddle\n'), ((2825, 2840), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2838, 2840), False, 'import unittest\n'), ((1123, 1140), 'numpy.copy', 'np.copy', (['topk_idx'], {}), '(topk_idx)\n', (1130, 1140), True, 'import numpy as np\n'), ((2082, 2101), 'paddle.CUDAPlace', 'paddle.CUDAPlace', (['(0)'], {}), '(0)\n', (2098, 2101), False, 'import paddle\n'), ((2143, 2166), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (2164, 2166), False, 'import paddle\n'), ((2179, 2203), 'paddle.to_tensor', 'paddle.to_tensor', (['self.x'], {}), '(self.x)\n', (2195, 2203), False, 'import paddle\n'), ((2220, 2253), 'paddle.to_tensor', 'paddle.to_tensor', (['self.topk_value'], {}), '(self.topk_value)\n', (2236, 2253), False, 'import paddle\n'), ((2269, 2296), 'paddle.to_tensor', 'paddle.to_tensor', (['self.prob'], {}), '(self.prob)\n', (2285, 2296), False, 'import paddle\n'), ((2311, 2348), 'paddle.distributed.models.moe.utils._random_routing', 'utils._random_routing', (['x', 'value', 'prob'], {}), '(x, value, prob)\n', (2332, 2348), False, 'from paddle.distributed.models.moe import utils\n'), ((1409, 1437), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (1435, 1437), True, 'import paddle.fluid.core as core\n'), ((2557, 2585), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (2583, 2585), True, 'import paddle.fluid.core as core\n'), ((2445, 2464), 'paddle.fluid.framework._test_eager_guard', '_test_eager_guard', ([], {}), '()\n', (2462, 2464), False, 'from paddle.fluid.framework import _test_eager_guard\n'), ((1679, 1733), 'numpy.random.randint', 'np.random.randint', (['(-1)', 'self.upper_range'], {'size': '(200, 2)'}), '(-1, self.upper_range, size=(200, 2))\n', (1696, 1733), True, 'import numpy as np\n'), ((1805, 1841), 'numpy.random.random', 'np.random.random', (['(self.x.shape[0],)'], {}), '((self.x.shape[0],))\n', (1821, 1841), True, 'import numpy as np\n'), ((1888, 1918), 'numpy.random.random', 'np.random.random', (['self.x.shape'], {}), '(self.x.shape)\n', (1904, 1918), True, 'import numpy as np\n')] |
__author__ = '<NAME>'
import logging
import cv2
import numpy
import sys
from combining_classifications import combine_majority_vote, combine_mean_rule, combine_minimum_rule
from loading_images import load_face_vectors_from_disk, extract_color_channels
from pca import PCA
from plotting import plot_results
def main():
logging.basicConfig(format='%(levelname)7s: %(message)s', level=logging.INFO)
no_of_persons = 13 # Number of persons
samples_person = 10 # Number of samples per person
image_size = (50, 50) # All face images will be resized to this
combining_functions = [
(combine_majority_vote, "Majority Voting"),
(combine_minimum_rule, "Minimum Rule"),
(combine_mean_rule, "Mean Rule")
]
all_image_numbers = generate_all_image_numbers(no_of_persons, samples_person)
all_face_vectors = load_face_vectors_from_disk(all_image_numbers, image_size, load_channels_bgrhs=True)
color_channels = extract_color_channels(all_face_vectors)
test_different_training(color_channels, no_of_persons, samples_person, combining_functions)
def test_different_training(color_channels, no_of_persons, samples_person, combining_functions):
plot_number_of_training_samples = []
x_min = 1
x_max = samples_person - 1
number_of_diff_trainings = x_max + 1 - x_min
number_of_tests = 15
number_of_results = 3
plot_recognition_rate = numpy.empty((number_of_results, number_of_tests * number_of_diff_trainings))
count = 0
for test_no in range(number_of_tests):
sys.stdout.write("\r%d%%" % (test_no * 100 // number_of_tests))
sys.stdout.flush()
for samples_training in range(x_min, x_max + 1):
results = train_and_test(
color_channels, no_of_persons, samples_person, samples_training, combining_functions)
plot_number_of_training_samples.append(samples_training)
plot_recognition_rate[:, count] = results
count += 1
print()
# Plot results:
plot_results(
x_axis=plot_number_of_training_samples,
y_axis=plot_recognition_rate,
x_min=x_min,
x_max=x_max,
labels=[name for func, name in combining_functions]
)
def train_and_test(color_channels, no_of_persons, samples_person, samples_training, combining_functions):
# split into training and testing:
all_testing_idx, all_training_idx = randomly_split_classes(
no_of_classes=no_of_persons,
samples_per_class=samples_person,
training_samples_per_class=samples_training
)
classifiers = train_classifiers(all_training_idx, samples_training, color_channels)
samples_testing = samples_person - samples_training
predictions = classify_testing_samples(classifiers, all_testing_idx, color_channels,
samples_testing * no_of_persons)
test_classes = [class_no for class_no in range(no_of_persons) for _ in range(samples_testing)]
results = []
for function, function_name in combining_functions:
combined_predictions = function(predictions)
right_classification = numpy.equal(combined_predictions, test_classes)
prediction_rate = numpy.count_nonzero(right_classification) / right_classification.size
logging.debug(
"{}: Prediciton rate:{:.2%} Predictions:{}".format(function_name, prediction_rate, combined_predictions))
results.append(prediction_rate)
return results
def show_vectors_as_images(vectors: numpy.ndarray, image_size, wait_time=None):
for i, vector in enumerate(vectors):
temp_image = vector[0].reshape(image_size)
cv2.imshow("channel-{}".format(i), temp_image)
if wait_time is not None:
cv2.waitKey(wait_time)
def randomly_split_classes(no_of_classes, samples_per_class, training_samples_per_class):
testing_samples_per_class = (samples_per_class - training_samples_per_class)
training_samples_total = no_of_classes * training_samples_per_class
testing_samples_total = no_of_classes * testing_samples_per_class
all_training_idx = numpy.empty(training_samples_total, dtype=int)
all_testing_idx = numpy.empty(testing_samples_total, dtype=int)
for class_no in range(no_of_classes):
# For every person, take training and testing samples randomly
random_permutation = numpy.random.permutation(samples_per_class)
cls_training_idx, cls_testing_idx = random_permutation[:training_samples_per_class], \
random_permutation[training_samples_per_class:]
all_training_idx[class_no * training_samples_per_class:(class_no + 1) * training_samples_per_class] = \
cls_training_idx + samples_per_class * class_no
all_testing_idx[class_no * testing_samples_per_class:(class_no + 1) * testing_samples_per_class] = \
cls_testing_idx + samples_per_class * class_no
return all_testing_idx, all_training_idx
def train_classifiers(all_training_idx, samples_training, color_channels):
logging.debug("Training classifiers..")
classifiers = []
for channel in color_channels:
classifier = PCA(samples_training)
classifier.train(channel[all_training_idx])
classifiers.append(classifier)
return classifiers
def classify_testing_samples(classifiers, all_testing_idx, color_channels, total_testing_samples):
logging.debug("Testing classifiers..")
predictions = numpy.empty((len(classifiers), total_testing_samples, 2), dtype=int)
for no, (channel, classifier) in enumerate(zip(color_channels, classifiers)):
assert (isinstance(classifier, PCA))
predictions[no] = classifier.classify_samples(channel[all_testing_idx])
return predictions
def generate_all_image_numbers(no_of_persons, samples_person):
"""
Generates and returns a list of all possible combinations of imagenumbers
:param no_of_persons: number of persons
:param samples_person: number of samples used per person
:return: array of numbers
"""
return numpy.mgrid[1:samples_person + 1, 1:no_of_persons + 1].T.reshape(-1, 2)[:, ::-1]
if __name__ == '__main__':
main() | [
"logging.basicConfig",
"logging.debug",
"loading_images.load_face_vectors_from_disk",
"numpy.random.permutation",
"pca.PCA",
"numpy.equal",
"loading_images.extract_color_channels",
"numpy.count_nonzero",
"numpy.empty",
"plotting.plot_results",
"sys.stdout.flush",
"cv2.waitKey",
"sys.stdout.w... | [((327, 404), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)7s: %(message)s"""', 'level': 'logging.INFO'}), "(format='%(levelname)7s: %(message)s', level=logging.INFO)\n", (346, 404), False, 'import logging\n'), ((857, 945), 'loading_images.load_face_vectors_from_disk', 'load_face_vectors_from_disk', (['all_image_numbers', 'image_size'], {'load_channels_bgrhs': '(True)'}), '(all_image_numbers, image_size,\n load_channels_bgrhs=True)\n', (884, 945), False, 'from loading_images import load_face_vectors_from_disk, extract_color_channels\n'), ((963, 1003), 'loading_images.extract_color_channels', 'extract_color_channels', (['all_face_vectors'], {}), '(all_face_vectors)\n', (985, 1003), False, 'from loading_images import load_face_vectors_from_disk, extract_color_channels\n'), ((1414, 1490), 'numpy.empty', 'numpy.empty', (['(number_of_results, number_of_tests * number_of_diff_trainings)'], {}), '((number_of_results, number_of_tests * number_of_diff_trainings))\n', (1425, 1490), False, 'import numpy\n'), ((2029, 2199), 'plotting.plot_results', 'plot_results', ([], {'x_axis': 'plot_number_of_training_samples', 'y_axis': 'plot_recognition_rate', 'x_min': 'x_min', 'x_max': 'x_max', 'labels': '[name for func, name in combining_functions]'}), '(x_axis=plot_number_of_training_samples, y_axis=\n plot_recognition_rate, x_min=x_min, x_max=x_max, labels=[name for func,\n name in combining_functions])\n', (2041, 2199), False, 'from plotting import plot_results\n'), ((4124, 4170), 'numpy.empty', 'numpy.empty', (['training_samples_total'], {'dtype': 'int'}), '(training_samples_total, dtype=int)\n', (4135, 4170), False, 'import numpy\n'), ((4193, 4238), 'numpy.empty', 'numpy.empty', (['testing_samples_total'], {'dtype': 'int'}), '(testing_samples_total, dtype=int)\n', (4204, 4238), False, 'import numpy\n'), ((5081, 5120), 'logging.debug', 'logging.debug', (['"""Training classifiers.."""'], {}), "('Training classifiers..')\n", (5094, 5120), False, 'import logging\n'), ((5439, 5477), 'logging.debug', 'logging.debug', (['"""Testing classifiers.."""'], {}), "('Testing classifiers..')\n", (5452, 5477), False, 'import logging\n'), ((1556, 1619), 'sys.stdout.write', 'sys.stdout.write', (["('\\r%d%%' % (test_no * 100 // number_of_tests))"], {}), "('\\r%d%%' % (test_no * 100 // number_of_tests))\n", (1572, 1619), False, 'import sys\n'), ((1628, 1646), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1644, 1646), False, 'import sys\n'), ((3151, 3198), 'numpy.equal', 'numpy.equal', (['combined_predictions', 'test_classes'], {}), '(combined_predictions, test_classes)\n', (3162, 3198), False, 'import numpy\n'), ((3763, 3785), 'cv2.waitKey', 'cv2.waitKey', (['wait_time'], {}), '(wait_time)\n', (3774, 3785), False, 'import cv2\n'), ((4382, 4425), 'numpy.random.permutation', 'numpy.random.permutation', (['samples_per_class'], {}), '(samples_per_class)\n', (4406, 4425), False, 'import numpy\n'), ((5198, 5219), 'pca.PCA', 'PCA', (['samples_training'], {}), '(samples_training)\n', (5201, 5219), False, 'from pca import PCA\n'), ((3225, 3266), 'numpy.count_nonzero', 'numpy.count_nonzero', (['right_classification'], {}), '(right_classification)\n', (3244, 3266), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat May 22 18:46:12 2021
@author: nkele
"""
import cv2
import numpy as np
import imutils
cap =cv2.VideoCapture("lane_red3.mp4")
cap.set(3,640)
cap.set(4,480)
while(True):
try:
ret, frame= cap.read()
cv2.imshow("original", frame)
height = frame.shape[1]
width = frame.shape[0]
frame = frame[int(width/2) : width, 0: height]
hsv= cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_yellow = np.array([0,70,70], np.uint8)
upper_yellow = np.array([50,255,255], np.uint8)
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
kernal = np.ones((5,5), "uint8")
mask = cv2.dilate(mask, kernal)
#res_yellow = cv2.bitwise_and(frame, frame, mask = mask)
cnts= cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts=imutils.grab_contours(cnts)
print(len(cnts))
crd_list = []
for c in cnts:
area = cv2.contourArea(c)
if area>1000:
cv2.drawContours(frame,[c],-1,(0,255,0),3)
M=cv2.moments(c)
cx= int(M["m10"]/M["m00"])
cy= int(M["m01"]/M["m00"])
print("centroid is at: ",cx,cy)
crd_list.append(cx)
cv2.circle(frame,(cx,cy),7,(255,255,255),-1)
#cv2.imshow("frame",frame)
print(crd_list)
crd_avg = int((crd_list[0] + crd_list[1])/2)
frame_centre = int(height/2)
print(crd_avg)
print(frame_centre)
frame = cv2.circle(frame, (crd_avg, int(1*(width/4))), 10, (0,0,255), 2)
frame = cv2.circle(frame, (int(height/2), int(1*(width/4))), 10, (255,0,0), 2)
cv2.imshow("frame",frame)
#print("centroids")
#print("centroid is at: ",cx,cy)
if (frame_centre - crd_avg) > 5:
print("DRIVE RIGHT!!")
elif (crd_avg - frame_centre) > 5:
print("DRIVE LEFT!!")
elif (((frame_centre - crd_avg) <= 5) and ((crd_avg - frame_centre) <= 5)):
print("MOVE ALONG CENTRE!!")
if(cv2.waitKey(40) & 0xFF == ord('q')):
break
except:
break
cap.release()
cv2.destroyAllWindows() | [
"cv2.drawContours",
"numpy.ones",
"cv2.inRange",
"cv2.imshow",
"cv2.contourArea",
"numpy.array",
"cv2.circle",
"imutils.grab_contours",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.moments",
"cv2.findContours",
"cv2.dilate",
"cv2.waitKey"
] | [((147, 180), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""lane_red3.mp4"""'], {}), "('lane_red3.mp4')\n", (163, 180), False, 'import cv2\n'), ((2595, 2618), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2616, 2618), False, 'import cv2\n'), ((302, 331), 'cv2.imshow', 'cv2.imshow', (['"""original"""', 'frame'], {}), "('original', frame)\n", (312, 331), False, 'import cv2\n'), ((517, 555), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (529, 555), False, 'import cv2\n'), ((590, 621), 'numpy.array', 'np.array', (['[0, 70, 70]', 'np.uint8'], {}), '([0, 70, 70], np.uint8)\n', (598, 621), True, 'import numpy as np\n'), ((644, 678), 'numpy.array', 'np.array', (['[50, 255, 255]', 'np.uint8'], {}), '([50, 255, 255], np.uint8)\n', (652, 678), True, 'import numpy as np\n'), ((703, 747), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_yellow', 'upper_yellow'], {}), '(hsv, lower_yellow, upper_yellow)\n', (714, 747), False, 'import cv2\n'), ((778, 802), 'numpy.ones', 'np.ones', (['(5, 5)', '"""uint8"""'], {}), "((5, 5), 'uint8')\n", (785, 802), True, 'import numpy as np\n'), ((828, 852), 'cv2.dilate', 'cv2.dilate', (['mask', 'kernal'], {}), '(mask, kernal)\n', (838, 852), False, 'import cv2\n'), ((947, 1009), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (963, 1009), False, 'import cv2\n'), ((1024, 1051), 'imutils.grab_contours', 'imutils.grab_contours', (['cnts'], {}), '(cnts)\n', (1045, 1051), False, 'import imutils\n'), ((2046, 2072), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (2056, 2072), False, 'import cv2\n'), ((1165, 1183), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (1180, 1183), False, 'import cv2\n'), ((1228, 1276), 'cv2.drawContours', 'cv2.drawContours', (['frame', '[c]', '(-1)', '(0, 255, 0)', '(3)'], {}), '(frame, [c], -1, (0, 255, 0), 3)\n', (1244, 1276), False, 'import cv2\n'), ((1290, 1304), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (1301, 1304), False, 'import cv2\n'), ((1526, 1577), 'cv2.circle', 'cv2.circle', (['frame', '(cx, cy)', '(7)', '(255, 255, 255)', '(-1)'], {}), '(frame, (cx, cy), 7, (255, 255, 255), -1)\n', (1536, 1577), False, 'import cv2\n'), ((2475, 2490), 'cv2.waitKey', 'cv2.waitKey', (['(40)'], {}), '(40)\n', (2486, 2490), False, 'import cv2\n')] |
#/usr/bin/env python3
import h5py
import numpy as np
import pybind_isce3 as isce
from iscetest import data as test_data_dir
from pathlib import Path
import json
from ...focus.backproject import load_h5
from pybind_nisar.workflows.point_target_info import (analyze_point_target,
tofloatvals)
c = isce.core.speed_of_light
def test_backproject():
# load point target simulation data
filename = Path(test_data_dir) / "point-target-sim-rc.h5"
d = load_h5(filename)
# eww gross
signal_data = d["signal_data"]
radar_grid = d["radar_grid"]
orbit = d["orbit"]
doppler = d["doppler"]
center_frequency = d["center_frequency"]
range_sampling_rate = d["range_sampling_rate"]
dem = d["dem"]
dry_tropo_model = d["dry_tropo_model"]
target_azimuth = d["target_azimuth"]
target_range = d["target_range"]
# range bandwidth (Hz)
B = 20e6
# desired azimuth resolution (m)
azimuth_res = 6.
# output chip size
nchip = 129
# how much to upsample the output for point target analysis
upsample_factor = 128
# create 9-point Knab kernel
# use tabulated kernel for performance
kernel = isce.core.KnabKernel(9., B / range_sampling_rate)
kernel = isce.core.TabulatedKernelF32(kernel, 2048)
# create output radar grid centered on the target
dt = radar_grid.az_time_interval
dr = radar_grid.range_pixel_spacing
t0 = target_azimuth - 0.5 * (nchip - 1) * dt
r0 = target_range - 0.5 * (nchip - 1) * dr
out_grid = isce.product.RadarGridParameters(
t0, radar_grid.wavelength, radar_grid.prf, r0, dr,
radar_grid.lookside, nchip, nchip, orbit.reference_epoch)
# init output buffer
out = np.empty((nchip, nchip), np.complex64)
# collect input & output radar_grid, orbit, and Doppler
in_geometry = isce.container.RadarGeometry(radar_grid, orbit, doppler)
out_geometry = isce.container.RadarGeometry(out_grid, orbit, doppler)
# focus to output grid
isce.cuda.focus.backproject(out, out_geometry, signal_data, in_geometry,
dem, center_frequency, azimuth_res, kernel, dry_tropo_model)
# remove range carrier
kr = 4. * np.pi / out_grid.wavelength
r = np.array(out_geometry.slant_range)
out *= np.exp(-1j * kr * r)
info = analyze_point_target(out, nchip//2, nchip//2, nov=upsample_factor,
chipsize=nchip//2)
tofloatvals(info)
# print point target info
print(json.dumps(info, indent=2))
# range resolution (m)
range_res = c / (2. * B)
# range position error & -3 dB main lobe width (m)
range_err = dr * info["range"]["offset"]
range_width = dr * info["range"]["resolution"]
# azimuth position error & -3 dB main lobe width (m)
_, vel = orbit.interpolate(target_azimuth)
azimuth_err = dt * info["azimuth"]["offset"] * np.linalg.norm(vel)
azimuth_width = dt * info["azimuth"]["resolution"] * np.linalg.norm(vel)
# require positioning error < resolution/128
assert(range_err < range_res / 128.)
assert(azimuth_err < azimuth_res / 128.)
# require 3dB width in range to be <= range resolution
assert(range_width <= range_res)
# azimuth response is spread slightly by the antenna pattern so the
# threshold is slightly higher - see
# https://github.jpl.nasa.gov/bhawkins/nisar-notebooks/blob/master/Azimuth%20Resolution.ipynb
assert(azimuth_width <= 6.62)
| [
"pybind_nisar.workflows.point_target_info.analyze_point_target",
"pybind_isce3.container.RadarGeometry",
"pathlib.Path",
"json.dumps",
"numpy.exp",
"numpy.array",
"numpy.empty",
"numpy.linalg.norm",
"pybind_isce3.product.RadarGridParameters",
"pybind_isce3.core.KnabKernel",
"pybind_isce3.cuda.fo... | [((1222, 1272), 'pybind_isce3.core.KnabKernel', 'isce.core.KnabKernel', (['(9.0)', '(B / range_sampling_rate)'], {}), '(9.0, B / range_sampling_rate)\n', (1242, 1272), True, 'import pybind_isce3 as isce\n'), ((1285, 1327), 'pybind_isce3.core.TabulatedKernelF32', 'isce.core.TabulatedKernelF32', (['kernel', '(2048)'], {}), '(kernel, 2048)\n', (1313, 1327), True, 'import pybind_isce3 as isce\n'), ((1571, 1716), 'pybind_isce3.product.RadarGridParameters', 'isce.product.RadarGridParameters', (['t0', 'radar_grid.wavelength', 'radar_grid.prf', 'r0', 'dr', 'radar_grid.lookside', 'nchip', 'nchip', 'orbit.reference_epoch'], {}), '(t0, radar_grid.wavelength, radar_grid.prf,\n r0, dr, radar_grid.lookside, nchip, nchip, orbit.reference_epoch)\n', (1603, 1716), True, 'import pybind_isce3 as isce\n'), ((1774, 1812), 'numpy.empty', 'np.empty', (['(nchip, nchip)', 'np.complex64'], {}), '((nchip, nchip), np.complex64)\n', (1782, 1812), True, 'import numpy as np\n'), ((1892, 1948), 'pybind_isce3.container.RadarGeometry', 'isce.container.RadarGeometry', (['radar_grid', 'orbit', 'doppler'], {}), '(radar_grid, orbit, doppler)\n', (1920, 1948), True, 'import pybind_isce3 as isce\n'), ((1968, 2022), 'pybind_isce3.container.RadarGeometry', 'isce.container.RadarGeometry', (['out_grid', 'orbit', 'doppler'], {}), '(out_grid, orbit, doppler)\n', (1996, 2022), True, 'import pybind_isce3 as isce\n'), ((2055, 2192), 'pybind_isce3.cuda.focus.backproject', 'isce.cuda.focus.backproject', (['out', 'out_geometry', 'signal_data', 'in_geometry', 'dem', 'center_frequency', 'azimuth_res', 'kernel', 'dry_tropo_model'], {}), '(out, out_geometry, signal_data, in_geometry,\n dem, center_frequency, azimuth_res, kernel, dry_tropo_model)\n', (2082, 2192), True, 'import pybind_isce3 as isce\n'), ((2279, 2313), 'numpy.array', 'np.array', (['out_geometry.slant_range'], {}), '(out_geometry.slant_range)\n', (2287, 2313), True, 'import numpy as np\n'), ((2325, 2347), 'numpy.exp', 'np.exp', (['(-1.0j * kr * r)'], {}), '(-1.0j * kr * r)\n', (2331, 2347), True, 'import numpy as np\n'), ((2358, 2453), 'pybind_nisar.workflows.point_target_info.analyze_point_target', 'analyze_point_target', (['out', '(nchip // 2)', '(nchip // 2)'], {'nov': 'upsample_factor', 'chipsize': '(nchip // 2)'}), '(out, nchip // 2, nchip // 2, nov=upsample_factor,\n chipsize=nchip // 2)\n', (2378, 2453), False, 'from pybind_nisar.workflows.point_target_info import analyze_point_target, tofloatvals\n'), ((2460, 2477), 'pybind_nisar.workflows.point_target_info.tofloatvals', 'tofloatvals', (['info'], {}), '(info)\n', (2471, 2477), False, 'from pybind_nisar.workflows.point_target_info import analyze_point_target, tofloatvals\n'), ((457, 476), 'pathlib.Path', 'Path', (['test_data_dir'], {}), '(test_data_dir)\n', (461, 476), False, 'from pathlib import Path\n'), ((2519, 2545), 'json.dumps', 'json.dumps', (['info'], {'indent': '(2)'}), '(info, indent=2)\n', (2529, 2545), False, 'import json\n'), ((2912, 2931), 'numpy.linalg.norm', 'np.linalg.norm', (['vel'], {}), '(vel)\n', (2926, 2931), True, 'import numpy as np\n'), ((2989, 3008), 'numpy.linalg.norm', 'np.linalg.norm', (['vel'], {}), '(vel)\n', (3003, 3008), True, 'import numpy as np\n')] |
"""
This script reproduces Fig. 1 of Izhikevich (2004).
Original implementation references:
Izhikevich E.M. (2004) Which Model to Use for Cortical Spiking Neurons?
IEEE Transactions on Neural Networks, 15:1063-1070 (special issue on temporal coding)
Izhikevich E.M. (2003) Simple Model of Spiking Neurons.
IEEE Transactions on Neural Networks, 14:1569- 1572
http://www.izhikevich.org/publications/whichmod.htm
http://izhikevich.org/publications/figure1.m
See http://www.opensourcebrain.org/projects/izhikevichmodel/wiki for info on issues with the current implementation.
Usage: python izhikevich2004.py <simulator>
where <simulator> is neuron, nest, brian, or another PyNN backend simulator
Requirements: PyNN 0.8 and one or more PyNN-supported simulators
Units: all times are in milliseconds, voltages in millivolts and currents in nanoamps
Version 0.1 - original script written by <NAME> during Google Summer of Code 2013
Version 0.2 - script condensed and updated to use latest development version of PyNN by <NAME>, February and September 2014
:copyright: Copyright 2013-2014 <NAME>, <NAME> and <NAME>
:license: Modified BSD, see LICENSE for details.
"""
from __future__ import division
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from pyNN.utility import get_simulator, normalized_filename
global_time_step = 0.01
plt.rcParams.update({
'lines.linewidth': 0.5,
'legend.fontsize': 'small',
'axes.titlesize': 'small',
'font.size': 6,
'savefig.dpi': 200,
})
def run_simulation(time_step=global_time_step, a=0.02, b=0.2, c=-65.0, d=6.0,
u_init=None, v_init=-70.0, waveform=None, t_stop=100.0,
title="", scalebar_level=0, label_scalebar=False,
save_data=False):
"""
Run a simulation of a single neuron.
Arguments:
time_step - time step used in solving the differential equations
a - time scale of the recovery variable u
b - sensitivity of u to the subthreshold fluctuations of the membrane potential v
c - after-spike reset value of v
d - after-spike reset of u
u_init - initial value of u
v_init - initial value of v
waveform - a tuple of two NumPy arrays, containing time and amplitude data for the injected current
t_stop - duration of the simulation
title - a title to be added to the figure panel for this simulation
scalebar_level - a value between 0 and 1, controlling the vertical placement of the scalebar
label_scalebar - True or False, whether to add a label to the scalebar
"""
global j, fig, gs
# create a neuron and current source
sim.setup(timestep=time_step)
if u_init is None:
u_init = b * v_init
initialValues = {'u': u_init, 'v': v_init}
cell_type = sim.Izhikevich(a=a, b=b, c=c, d=d, i_offset=0.0)
neuron = sim.create(cell_type)
neuron.initialize(**initialValues)
neuron.record('v')
times, amps = waveform
injectedCurrent = sim.StepCurrentSource(times=times, amplitudes=amps)
injectedCurrent.inject_into(neuron)
# run the simulation and retrieve the recorded data
sim.run(t_stop)
data = neuron.get_data().segments[0]
# plot the membrane potential and injected current
gs1 = gridspec.GridSpecFromSubplotSpec(2, 1,
subplot_spec=gs[j//4, j%4],
height_ratios=[8, 1],
hspace=0.0)
ax1 = plt.subplot(gs1[0])
ax2 = plt.subplot(gs1[1])
j += 1
for ax in (ax1, ax2):
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.spines['left'].set_color('None')
ax.spines['right'].set_color('None')
ax.spines['bottom'].set_color('None')
ax.spines['top'].set_color('None')
ax.set_xlim(0.0, t_stop)
ax1.set_title(title)
vm = data.filter(name='v')[0]
i_times, i_vars = stepify(times, amps)
ax1.plot(vm.times, vm)
ax1.set_ylim(-90, 30)
ax2.plot(i_times, i_vars, 'g')
ymin, ymax = amps.min(), amps.max()
padding = (ymax - ymin)/10
ax2.set_ylim(ymin - padding, ymax + padding)
# scale bar
scalebar_y = ymin + (ymax - ymin) * scalebar_level
ax2.plot([t_stop - 20, t_stop], [scalebar_y, scalebar_y],
color='k', linestyle='-', linewidth=1)
if label_scalebar:
ax.text(t_stop, ymin + padding, "20 ms", fontsize=4, horizontalalignment='right')
plt.show(block=False)
fig.canvas.draw()
if save_data:
datfilename = "results/%s_%s.dat" % (title.replace("(","").replace(")","").replace(" ","_"),options.simulator)
datfile = open(datfilename,'w')
for i in range(len(vm)):
datfile.write('%s\t%s\n'%(vm.times[i].magnitude,vm[i][0].magnitude))
datfile.close()
print(' Saved data to %s'%datfilename)
def step(amplitude, t_stop):
"""
Generate the waveform for a current that starts at zero and is stepped up
to the given amplitude at time t_stop/10.
"""
times = np.array([0, t_stop/10, t_stop])
amps = np.array([0, amplitude, amplitude])
return times, amps
def pulse(amplitude, onsets, width, t_stop, baseline=0.0):
"""
Generate the waveform for a series of current pulses.
Arguments:
amplitude - absolute current value during each pulse
onsets - a list or array of times at which pulses begin
width - duration of each pulse
t_stop - total duration of the waveform
baseline - the current value before, between and after pulses.
"""
times = [0]
amps = [baseline]
for onset in onsets:
times += [onset, onset + width]
amps += [amplitude, baseline]
times += [t_stop]
amps += [baseline]
return np.array(times), np.array(amps)
def ramp(gradient, onset, t_stop, baseline=0.0, time_step=global_time_step, t_start=0.0):
"""
Generate the waveform for a current which is initially constant
and then increases linearly with time.
Arguments:
gradient - gradient of the ramp
onset - time at which the ramp begins
t_stop - total duration of the waveform
baseline - current value before the ramp
time_step - interval between increments in the ramp current
t_start - time at which the waveform begins (used to construct waveforms
containing multiple ramps).
"""
if onset > t_start:
times = np.hstack((np.array((t_start, onset)), # flat part
np.arange(onset + time_step, t_stop + time_step, time_step))) # ramp part
else:
times = np.arange(t_start, t_stop + time_step, time_step)
amps = baseline + gradient*(times - onset) * (times > onset)
return times, amps
def stepify(times, values):
"""
Generate an explicitly-stepped version of a time series.
"""
new_times = np.empty((2*times.size - 1,))
new_values = np.empty_like(new_times)
new_times[::2] = times
new_times[1::2] = times[1:]
new_values[::2] = values
new_values[1::2] = values[:-1]
return new_times, new_values
# == Get command-line options, import simulator backend =====================
sim, options = get_simulator()
# == Initialize figure ======================================================
j = 0
plt.ion()
fig = plt.figure(1, facecolor='white', figsize=(6, 6))
gs = gridspec.GridSpec(5, 4)
gs.update(hspace=0.5, wspace=0.4)
# == Sub-plot A: Tonic spiking ==============================================
t_stop = 100.0
run_simulation(a=0.02, b=0.2, c=-65.0, d=6.0, v_init=-70.0,
waveform=step(0.014, t_stop),
t_stop=t_stop, title='(A) Tonic spiking',
label_scalebar=True, save_data=True)
# == Sub-plot B: Phasic spiking =============================================
t_stop = 200.0
run_simulation(a=0.02, b=0.25, c=-65.0, d=6.0, v_init=-64.0,
waveform=step(0.0005, t_stop),
t_stop=t_stop, title='(B) Phasic spiking')
# == Sub-plot C: Tonic bursting =============================================
_stop = 220.0
run_simulation(a=0.02, b=0.2, c=-50.0, d=2.0, v_init=-70.0,
waveform=step(0.015, t_stop),
t_stop=t_stop, title='(C) Tonic bursting', save_data=True)
# == Sub-plot D: Phasic bursting ============================================
t_stop = 200.0
run_simulation(a=0.02, b=0.25, c=-55.0, d=0.05, v_init=-64.0,
waveform=step(0.0006, t_stop),
t_stop=t_stop, title='(D) Phasic bursting')
# == Sub-plot E: Mixed mode =================================================
t_stop = 160.0
run_simulation(a=0.02, b=0.2, c=-55.0, d=4.0, v_init=-70.0,
waveform=step(0.01, t_stop),
t_stop=t_stop, title='(E) Mixed mode')
# == Sub-plot F: Spike Frequency Adaptation (SFA) ===========================
t_stop = 85.0
run_simulation(a=0.01, b=0.2, c=-65.0, d=8.0, v_init=-70.0,
waveform=step(0.03, t_stop),
t_stop=t_stop, title='(F) SFA')
# == Sub-plot G: Class 1 excitable ==========================================
'''
Note: This simulation is supposed to use a different parameterization of the
model, i.e.
V' = tau*(0.04*V^2 + 4.1*V + 108 -u + I)
as opposed to
V' = tau*(0.04*V^2 + 5*V + 140 - u + I)
The alternative parameterization is not currently available in PyNN, therefore
the results of this simulation are not expected to match the original figure.
'''
t_stop = 300.0
run_simulation(a=0.02, b=0.2, c=-65.0, d=6.0, v_init=-70.0,
waveform=ramp(0.000075, 30.0, t_stop),
t_stop=t_stop, title='(G) Class 1 excitable')
# == Sub-plot H: Class 2 excitable ==========================================
t_stop = 300.0
run_simulation(a=0.2, b=0.26, c=-65.0, d=0.0, v_init=-64.0,
waveform=ramp(0.000015, 30.0, t_stop, baseline=-0.0005),
t_stop=t_stop, title='(H) Class 2 excitable')
# == Sub-plot I: Spike latency ==============================================
t_stop = 100.0
run_simulation(a=0.02, b=0.2, c=-65.0, d=6.0, v_init=-70.0,
waveform=pulse(0.00671, # 0.00704 in original
[10], 3, t_stop),
t_stop=t_stop, title='(I) Spike latency',
scalebar_level=0.5)
# == Sub-plot J: Subthreshold oscillation ===================================
t_stop = 200.0
run_simulation(a=0.05, b=0.26, c=-60.0, d=0.0, v_init=-62.0,
waveform=pulse(0.002, [20], 5, t_stop),
t_stop=t_stop, title='(J) Subthreshold oscillation',
scalebar_level=0.5)
# == Sub-plot K: Resonator ==================================================
t_stop = 400.0
T1 = t_stop / 10
T2 = T1 + 20
T3 = 0.7 * t_stop
T4 = T3 + 40
run_simulation(a=0.1, b=0.26, c=-60.0, d=-1.0, v_init=-62.0,
waveform=pulse(0.00065, [T1, T2, T3, T4], 4, t_stop),
t_stop=t_stop, title='(K) Resonator',
scalebar_level=0.5)
# == Sub-plot L: Integrator =================================================
'''
Note: This simulation is supposed to use a different parameterization of the
model, i.e.
V' = tau*(0.04*V^2 + 4.1*V + 108 -u + I)
as opposed to
V' = tau*(0.04*V^2 + 5*V + 140 - u + I)
The alternative parameterization is not currently available in PyNN, therefore
the results of this simulation are not expected to match the original figure.
'''
t_stop = 100.0
T1 = t_stop / 11
T2 = T1 + 5
T3 = 0.7 * t_stop
T4 = T3 + 10
run_simulation(a=0.02, b=-0.1, c=-55.0, d=6.0, v_init=-60.0,
waveform=pulse(0.009, [T1, T2, T3, T4], 2, t_stop),
t_stop=t_stop, title='(L) Integrator',
scalebar_level=0.5)
# == Sub-plot M: Rebound spike ==============================================
t_stop = 200.0
run_simulation(a=0.03, b=0.25, c=-60.0, d=4.0, v_init=-64.0,
waveform=pulse(-0.015, [20], 5, t_stop),
t_stop=t_stop, title='(M) Rebound spike')
# == Sub-plot N: Rebound burst ==============================================
t_stop = 200.0
run_simulation(a=0.03, b=0.25, c=-52.0, d=0.0, v_init=-64.0,
waveform=pulse(-0.015, [20], 5, t_stop),
t_stop=t_stop, title='(N) Rebound burst')
# == Sub-plot O: Threshold variability ======================================
t_stop = 100.0
times = np.array([0, 10, 15, 70, 75, 80, 85, t_stop])
amps = np.array([0, 0.001, 0, -0.006, 0, 0.001, 0, 0])
run_simulation(a=0.03, b=0.25, c=-60.0, d=4.0, v_init=-64.0,
waveform=(times, amps),
t_stop=t_stop, title='(O) Threshold variability')
# == Sub-plot P: Bistability ================================================
t_stop = 300.0
T1 = t_stop/8
T2 = 208 # 216.0 in original
run_simulation(a=0.1, b=0.26, c=-60.0, d=0.0, v_init=-61.0,
waveform=pulse(0.00124, [T1, T2], 5, t_stop, baseline=0.00024),
t_stop=t_stop, title='(P) Bistability',
scalebar_level=0.5)
# == Sub-plot Q: Depolarizing after-potential ===============================
t_stop = 50.0
run_simulation(a=1.0, b=0.18, # 0.2 in original
c=-60.0, d=-21.0, v_init=-70.0,
waveform=pulse(0.02, [9], 2, t_stop),
t_stop=t_stop, title='(Q) DAP',
scalebar_level=0.5)
# == Sub-plot R: Accomodation ===============================================
'''
Note: This simulation is supposed to use a different parameterization of the
model, i.e.
u' = tau*a*(b*(V + 65))
as opposed to
u' = tau*a*(b*V - u)
The alternative parameterization is not currently available in PyNN, therefore
the results of this simulation are not expected to match the original figure.
'''
t_stop = 400.0
parts = (ramp(0.00004, 0.0, 200.0),
(np.array([200.0 + global_time_step, 300.0 - global_time_step]), np.array([0.0, 0.0])),
ramp(0.00032, 300.0, 312.5, t_start=300.0),
(np.array([312.5 + global_time_step, t_stop]), np.array([0.0, 0.0])))
totalTimes, totalAmps = np.hstack(parts)
run_simulation(a=0.02, b=1.0, c=-55.0, d=4.0, v_init=-65.0, u_init=-16.0,
waveform=(totalTimes, totalAmps),
t_stop=t_stop, title='(R) Accomodation',
scalebar_level=0.5)
# == Sub-plot S: Inhibition-induced spiking =================================
t_stop = 350.0
run_simulation(a=-0.02, b=-1.0, c=-60.0, d=8.0, v_init=-63.8,
waveform=pulse(0.075, [50], 170, # 200 in original
t_stop, baseline=0.08),
t_stop=t_stop, title='(S) Inhibition-induced spiking')
# == Sub-plot T: Inhibition-induced bursting ================================
'''
Modifying parameter d from -2.0 to -0.7 in order to reproduce Fig. 1
'''
t_stop = 350.0
run_simulation(a=-0.026, b=-1.0, c=-45.0, d=-0.7, v_init=-63.8,
waveform=pulse(0.075, [50], 200, t_stop, baseline=0.08),
t_stop=t_stop, title='(T) Inhibition-induced bursting')
# == Export figure in PNG format ============================================
filename = normalized_filename("results", "izhikevich2004", "png", options.simulator)
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
fig.savefig(filename)
print("\n Simulation complete. Results can be seen in figure at %s\n"%(filename))
| [
"matplotlib.pyplot.show",
"numpy.hstack",
"matplotlib.use",
"pyNN.utility.normalized_filename",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"numpy.array",
"numpy.empty",
"numpy.empty_like",
"os.path.dirname",
"pyNN.utility.get_simulator",
... | [((1351, 1372), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (1365, 1372), False, 'import matplotlib\n'), ((1539, 1679), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'lines.linewidth': 0.5, 'legend.fontsize': 'small', 'axes.titlesize':\n 'small', 'font.size': 6, 'savefig.dpi': 200}"], {}), "({'lines.linewidth': 0.5, 'legend.fontsize': 'small',\n 'axes.titlesize': 'small', 'font.size': 6, 'savefig.dpi': 200})\n", (1558, 1679), True, 'import matplotlib.pyplot as plt\n'), ((7690, 7705), 'pyNN.utility.get_simulator', 'get_simulator', ([], {}), '()\n', (7703, 7705), False, 'from pyNN.utility import get_simulator, normalized_filename\n'), ((7797, 7806), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (7804, 7806), True, 'import matplotlib.pyplot as plt\n'), ((7814, 7862), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'facecolor': '"""white"""', 'figsize': '(6, 6)'}), "(1, facecolor='white', figsize=(6, 6))\n", (7824, 7862), True, 'import matplotlib.pyplot as plt\n'), ((7869, 7892), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(5)', '(4)'], {}), '(5, 4)\n', (7886, 7892), True, 'import matplotlib.gridspec as gridspec\n'), ((13055, 13100), 'numpy.array', 'np.array', (['[0, 10, 15, 70, 75, 80, 85, t_stop]'], {}), '([0, 10, 15, 70, 75, 80, 85, t_stop])\n', (13063, 13100), True, 'import numpy as np\n'), ((13109, 13156), 'numpy.array', 'np.array', (['[0, 0.001, 0, -0.006, 0, 0.001, 0, 0]'], {}), '([0, 0.001, 0, -0.006, 0, 0.001, 0, 0])\n', (13117, 13156), True, 'import numpy as np\n'), ((14792, 14808), 'numpy.hstack', 'np.hstack', (['parts'], {}), '(parts)\n', (14801, 14808), True, 'import numpy as np\n'), ((15873, 15947), 'pyNN.utility.normalized_filename', 'normalized_filename', (['"""results"""', '"""izhikevich2004"""', '"""png"""', 'options.simulator'], {}), "('results', 'izhikevich2004', 'png', options.simulator)\n", (15892, 15947), False, 'from pyNN.utility import get_simulator, normalized_filename\n'), ((3549, 3657), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(2)', '(1)'], {'subplot_spec': 'gs[j // 4, j % 4]', 'height_ratios': '[8, 1]', 'hspace': '(0.0)'}), '(2, 1, subplot_spec=gs[j // 4, j % 4],\n height_ratios=[8, 1], hspace=0.0)\n', (3581, 3657), True, 'import matplotlib.gridspec as gridspec\n'), ((3793, 3812), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs1[0]'], {}), '(gs1[0])\n', (3804, 3812), True, 'import matplotlib.pyplot as plt\n'), ((3824, 3843), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs1[1]'], {}), '(gs1[1])\n', (3835, 3843), True, 'import matplotlib.pyplot as plt\n'), ((4827, 4848), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (4835, 4848), True, 'import matplotlib.pyplot as plt\n'), ((5442, 5476), 'numpy.array', 'np.array', (['[0, t_stop / 10, t_stop]'], {}), '([0, t_stop / 10, t_stop])\n', (5450, 5476), True, 'import numpy as np\n'), ((5487, 5522), 'numpy.array', 'np.array', (['[0, amplitude, amplitude]'], {}), '([0, amplitude, amplitude])\n', (5495, 5522), True, 'import numpy as np\n'), ((7355, 7386), 'numpy.empty', 'np.empty', (['(2 * times.size - 1,)'], {}), '((2 * times.size - 1,))\n', (7363, 7386), True, 'import numpy as np\n'), ((7403, 7427), 'numpy.empty_like', 'np.empty_like', (['new_times'], {}), '(new_times)\n', (7416, 7427), True, 'import numpy as np\n'), ((6199, 6214), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (6207, 6214), True, 'import numpy as np\n'), ((6216, 6230), 'numpy.array', 'np.array', (['amps'], {}), '(amps)\n', (6224, 6230), True, 'import numpy as np\n'), ((7085, 7134), 'numpy.arange', 'np.arange', (['t_start', '(t_stop + time_step)', 'time_step'], {}), '(t_start, t_stop + time_step, time_step)\n', (7094, 7134), True, 'import numpy as np\n'), ((14546, 14608), 'numpy.array', 'np.array', (['[200.0 + global_time_step, 300.0 - global_time_step]'], {}), '([200.0 + global_time_step, 300.0 - global_time_step])\n', (14554, 14608), True, 'import numpy as np\n'), ((14610, 14630), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (14618, 14630), True, 'import numpy as np\n'), ((14698, 14742), 'numpy.array', 'np.array', (['[312.5 + global_time_step, t_stop]'], {}), '([312.5 + global_time_step, t_stop])\n', (14706, 14742), True, 'import numpy as np\n'), ((14744, 14764), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (14752, 14764), True, 'import numpy as np\n'), ((15971, 15996), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (15986, 15996), False, 'import os\n'), ((6913, 6939), 'numpy.array', 'np.array', (['(t_start, onset)'], {}), '((t_start, onset))\n', (6921, 6939), True, 'import numpy as np\n'), ((6982, 7041), 'numpy.arange', 'np.arange', (['(onset + time_step)', '(t_stop + time_step)', 'time_step'], {}), '(onset + time_step, t_stop + time_step, time_step)\n', (6991, 7041), True, 'import numpy as np\n')] |
import requests
import re
import time
import datetime
import numpy as np
"""
Prerequisite: Create access tokens
You need private access token to have full access to Github search
API.
Generate your access token in [here](https://github.com/settings/tokens)
you don't need to tick on any access permission because you are not
modifying your private repositories.
"""
# input your token here
token = "e6a9b0b2c3598c64aa84add48e13ab94c43c978c"
def extract_repo(result):
return (itm["url"] for itm in result.json()["items"])
def query_backoff(*args, **argv):
max_tries = 5
wait = 120
for _ in range(max_tries):
r = requests.get(*args, **argv)
if r.status_code == 200:
return r
print("Query failed. Wait %d secs and try again: %s" % (wait, r.content))
time.sleep(wait)
wait *= 2
def retrieve_matched_repo(query, num, from_year, to_year=None, n_per_query=5):
headers = {'Authorization': 'token %s' % token}
base_url = 'https://api.github.com/'
from_date = datetime.date(from_year, 1, 1)
if to_year is None:
to_date = datetime.date.today()
else:
to_date = datetime.date(to_year, 1, 1)
date_diff = (to_date - from_date).days
date_list = [from_date + datetime.timedelta(days=d) for d in np.random.choice(date_diff, size=(num // n_per_query, ))]
repos = []
for date in date_list:
yestarday = date - datetime.timedelta(days=7)
payload = {
'q': query +
" sort:updated" +
" created:%d-%02d-%02d..%d-%02d-%02d" % (yestarday.year, yestarday.month, yestarday.day, date.year, date.month, date.day)}
# github block similar queries, so take extra intervals
time.sleep(20)
r = requests.get(base_url + 'search/repositories', params=payload, headers=headers)
repos.extend(list(extract_repo(r))[:n_per_query])
return repos
result = retrieve_matched_repo('tensorflow language:python', 200, 2015)
with open("found_all_tensorflow.txt", 'w') as fout:
for r in result:
fout.write(r + '\n')
result = retrieve_matched_repo('pytorch language:python', 200, 2017)
with open("found_all_pytorch.txt", 'w') as fout:
for r in result:
fout.write(r + '\n')
| [
"numpy.random.choice",
"time.sleep",
"requests.get",
"datetime.timedelta",
"datetime.date",
"datetime.date.today"
] | [((1038, 1068), 'datetime.date', 'datetime.date', (['from_year', '(1)', '(1)'], {}), '(from_year, 1, 1)\n', (1051, 1068), False, 'import datetime\n'), ((641, 668), 'requests.get', 'requests.get', (['*args'], {}), '(*args, **argv)\n', (653, 668), False, 'import requests\n'), ((813, 829), 'time.sleep', 'time.sleep', (['wait'], {}), '(wait)\n', (823, 829), False, 'import time\n'), ((1111, 1132), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1130, 1132), False, 'import datetime\n'), ((1161, 1189), 'datetime.date', 'datetime.date', (['to_year', '(1)', '(1)'], {}), '(to_year, 1, 1)\n', (1174, 1189), False, 'import datetime\n'), ((1735, 1749), 'time.sleep', 'time.sleep', (['(20)'], {}), '(20)\n', (1745, 1749), False, 'import time\n'), ((1762, 1841), 'requests.get', 'requests.get', (["(base_url + 'search/repositories')"], {'params': 'payload', 'headers': 'headers'}), "(base_url + 'search/repositories', params=payload, headers=headers)\n", (1774, 1841), False, 'import requests\n'), ((1262, 1288), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'd'}), '(days=d)\n', (1280, 1288), False, 'import datetime\n'), ((1298, 1353), 'numpy.random.choice', 'np.random.choice', (['date_diff'], {'size': '(num // n_per_query,)'}), '(date_diff, size=(num // n_per_query,))\n', (1314, 1353), True, 'import numpy as np\n'), ((1425, 1451), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (1443, 1451), False, 'import datetime\n')] |
# The following parts are originally part of scikit-bio, with copyright notice
# as reproduced below. The original COPYING.txt file can be found under
# licenses/scikit-bio.txt.
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from composition_stats import (closure, multiplicative_replacement, perturb,
perturb_inv, power, inner, clr, clr_inv, ilr,
ilr_inv, alr, alr_inv, sbp_basis,
_gram_schmidt_basis, center, centralize)
class CompositionTests(TestCase):
def setUp(self):
# Compositional data
self.cdata1 = np.array([[2, 2, 6],
[4, 4, 2]])
self.cdata2 = np.array([2, 2, 6])
self.cdata3 = np.array([[1, 2, 3, 0, 5],
[1, 0, 0, 4, 5],
[1, 2, 3, 4, 5]])
self.cdata4 = np.array([1, 2, 3, 0, 5])
self.cdata5 = [[2, 2, 6], [4, 4, 2]]
self.cdata6 = [[1, 2, 3, 0, 5],
[1, 0, 0, 4, 5],
[1, 2, 3, 4, 5]]
self.cdata7 = [np.exp(1), 1, 1]
self.cdata8 = [np.exp(1), 1, 1, 1]
# Simplicial orthonormal basis obtained from Gram-Schmidt
self.ortho1 = [[0.44858053, 0.10905743, 0.22118102, 0.22118102],
[0.3379924, 0.3379924, 0.0993132, 0.22470201],
[0.3016453, 0.3016453, 0.3016453, 0.09506409]]
# Real data
self.rdata1 = [[0.70710678, -0.70710678, 0., 0.],
[0.40824829, 0.40824829, -0.81649658, 0.],
[0.28867513, 0.28867513, 0.28867513, -0.8660254]]
# Bad datasets
# negative count
self.bad1 = np.array([1, 2, -1])
# zero count
self.bad2 = np.array([[[1, 2, 3, 0, 5]]])
def test_closure(self):
npt.assert_allclose(closure(self.cdata1),
np.array([[.2, .2, .6],
[.4, .4, .2]]))
npt.assert_allclose(closure(self.cdata2),
np.array([.2, .2, .6]))
npt.assert_allclose(closure(self.cdata5),
np.array([[.2, .2, .6],
[.4, .4, .2]]))
with self.assertRaises(ValueError):
closure(self.bad1)
with self.assertRaises(ValueError):
closure(self.bad2)
# make sure that inplace modification is not occurring
closure(self.cdata2)
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_closure_warning(self):
with self.assertRaises(ValueError):
closure([0., 0., 0.])
with self.assertRaises(ValueError):
closure([[0., 0., 0.],
[0., 5., 5.]])
def test_perturb(self):
pmat = perturb(closure(self.cdata1),
closure(np.array([1, 1, 1])))
npt.assert_allclose(pmat,
np.array([[.2, .2, .6],
[.4, .4, .2]]))
pmat = perturb(closure(self.cdata1),
closure(np.array([10, 10, 20])))
npt.assert_allclose(pmat,
np.array([[.125, .125, .75],
[1./3, 1./3, 1./3]]))
pmat = perturb(closure(self.cdata1),
closure(np.array([10, 10, 20])))
npt.assert_allclose(pmat,
np.array([[.125, .125, .75],
[1./3, 1./3, 1./3]]))
pmat = perturb(closure(self.cdata2),
closure([1, 2, 1]))
npt.assert_allclose(pmat, np.array([1./6, 2./6, 3./6]))
pmat = perturb(closure(self.cdata5),
closure(np.array([1, 1, 1])))
npt.assert_allclose(pmat,
np.array([[.2, .2, .6],
[.4, .4, .2]]))
# make sure that inplace modification is not occurring
perturb(closure(self.cdata2), closure([1, 2, 3]))
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_power(self):
pmat = power(closure(self.cdata1), 2)
npt.assert_allclose(pmat,
np.array([[.04/.44, .04/.44, .36/.44],
[.16/.36, .16/.36, .04/.36]]))
pmat = power(closure(self.cdata2), 2)
npt.assert_allclose(pmat, np.array([.04, .04, .36])/.44)
pmat = power(closure(self.cdata5), 2)
npt.assert_allclose(pmat,
np.array([[.04/.44, .04/.44, .36/.44],
[.16/.36, .16/.36, .04/.36]]))
# make sure that inplace modification is not occurring
power(closure(self.cdata2), 4)
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_perturb_inv(self):
pmat = perturb_inv(closure(self.cdata1),
closure([.1, .1, .1]))
imat = perturb(closure(self.cdata1),
closure([10, 10, 10]))
npt.assert_allclose(pmat, imat)
pmat = perturb_inv(closure(self.cdata1),
closure([1, 1, 1]))
npt.assert_allclose(pmat,
closure([[.2, .2, .6],
[.4, .4, .2]]))
pmat = perturb_inv(closure(self.cdata5),
closure([.1, .1, .1]))
imat = perturb(closure(self.cdata1), closure([10, 10, 10]))
npt.assert_allclose(pmat, imat)
# make sure that inplace modification is not occurring
perturb_inv(closure(self.cdata2), closure([1, 2, 3]))
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_inner(self):
a = inner(closure(self.cdata5), closure(self.cdata5))
npt.assert_allclose(a, np.array([[0.80463264, -0.50766667],
[-0.50766667, 0.32030201]]))
b = inner(closure(self.cdata7), closure(self.cdata7))
npt.assert_allclose(b, 0.66666666666666663)
# Make sure that orthogonality holds
npt.assert_allclose(inner(closure(self.ortho1), closure(self.ortho1)),
np.identity(3), rtol=1e-04, atol=1e-06)
with self.assertRaises(ValueError):
inner(closure(self.cdata1), closure(self.cdata8))
# make sure that inplace modification is not occurring
inner(closure(self.cdata1), closure(self.cdata1))
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_multiplicative_replacement(self):
amat = multiplicative_replacement(closure(self.cdata3))
npt.assert_allclose(amat,
np.array([[0.087273, 0.174545, 0.261818,
0.04, 0.436364],
[0.092, 0.04, 0.04, 0.368, 0.46],
[0.066667, 0.133333, 0.2,
0.266667, 0.333333]]),
rtol=1e-5, atol=1e-5)
amat = multiplicative_replacement(closure(self.cdata4))
npt.assert_allclose(amat,
np.array([0.087273, 0.174545, 0.261818,
0.04, 0.436364]),
rtol=1e-5, atol=1e-5)
amat = multiplicative_replacement(closure(self.cdata6))
npt.assert_allclose(amat,
np.array([[0.087273, 0.174545, 0.261818,
0.04, 0.436364],
[0.092, 0.04, 0.04, 0.368, 0.46],
[0.066667, 0.133333, 0.2,
0.266667, 0.333333]]),
rtol=1e-5, atol=1e-5)
# make sure that inplace modification is not occurring
multiplicative_replacement(closure(self.cdata4))
npt.assert_allclose(self.cdata4, np.array([1, 2, 3, 0, 5]))
def multiplicative_replacement_warning(self):
with self.assertRaises(ValueError):
multiplicative_replacement(closure([0, 1, 2]), delta=1)
def test_clr(self):
cmat = clr(closure(self.cdata1))
A = np.array([.2, .2, .6])
B = np.array([.4, .4, .2])
npt.assert_allclose(cmat,
[np.log(A / np.exp(np.log(A).mean())),
np.log(B / np.exp(np.log(B).mean()))])
cmat = clr(closure(self.cdata2))
A = np.array([.2, .2, .6])
npt.assert_allclose(cmat,
np.log(A / np.exp(np.log(A).mean())))
cmat = clr(closure(self.cdata5))
A = np.array([.2, .2, .6])
B = np.array([.4, .4, .2])
npt.assert_allclose(cmat,
[np.log(A / np.exp(np.log(A).mean())),
np.log(B / np.exp(np.log(B).mean()))])
# make sure that inplace modification is not occurring
clr(closure(self.cdata2))
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_clr_inv(self):
npt.assert_allclose(clr_inv(self.rdata1), self.ortho1)
npt.assert_array_almost_equal(clr(clr_inv(self.rdata1)), self.rdata1)
# make sure that inplace modification is not occurring
clr_inv(self.rdata1)
npt.assert_allclose(self.rdata1,
np.array([[0.70710678, -0.70710678, 0., 0.],
[0.40824829, 0.40824829,
-0.81649658, 0.],
[0.28867513, 0.28867513,
0.28867513, -0.8660254]]))
def test_center(self):
cmat = center(closure(self.cdata1))
npt.assert_allclose(cmat,
np.array([0.31010205, 0.31010205, 0.37979590]))
cmat = center(closure(self.cdata5))
npt.assert_allclose(cmat,
np.array([0.31010205, 0.31010205, 0.37979590]))
# make sure that inplace modification is not occurring
center(closure(self.cdata1))
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_centralize(self):
cmat = centralize(closure(self.cdata1))
npt.assert_allclose(cmat,
np.array([[0.22474487, 0.22474487, 0.55051026],
[0.41523958, 0.41523958, 0.16952085]]))
cmat = centralize(closure(self.cdata5))
npt.assert_allclose(cmat,
np.array([[0.22474487, 0.22474487, 0.55051026],
[0.41523958, 0.41523958, 0.16952085]]))
# make sure that inplace modification is not occurring
centralize(closure(self.cdata1))
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_ilr(self):
mat = closure(self.cdata7)
npt.assert_array_almost_equal(ilr(mat),
np.array([0.70710678, 0.40824829]))
# Should give same result as inner
npt.assert_allclose(ilr(closure(self.ortho1)), np.identity(3),
rtol=1e-04, atol=1e-06)
with self.assertRaises(ValueError):
ilr(closure(self.cdata1), basis=self.cdata1)
# make sure that inplace modification is not occurring
ilr(closure(self.cdata1))
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_ilr_basis(self):
table = np.array([[1., 10.],
[1.14141414, 9.90909091],
[1.28282828, 9.81818182],
[1.42424242, 9.72727273],
[1.56565657, 9.63636364]])
basis = np.array([[0.80442968, 0.19557032]])
res = ilr(closure(table), basis=basis)
exp = np.array([np.log(1/10)*np.sqrt(1/2),
np.log(1.14141414 / 9.90909091)*np.sqrt(1/2),
np.log(1.28282828 / 9.81818182)*np.sqrt(1/2),
np.log(1.42424242 / 9.72727273)*np.sqrt(1/2),
np.log(1.56565657 / 9.63636364)*np.sqrt(1/2)])
npt.assert_allclose(res, exp)
def test_ilr_basis_one_dimension_error(self):
table = np.array([[1., 10.],
[1.14141414, 9.90909091],
[1.28282828, 9.81818182],
[1.42424242, 9.72727273],
[1.56565657, 9.63636364]])
basis = np.array([0.80442968, 0.19557032])
with self.assertRaises(ValueError):
ilr(closure(table), basis=basis)
def test_ilr_inv(self):
mat = closure(self.cdata7)
npt.assert_array_almost_equal(ilr_inv(ilr(mat)), mat)
npt.assert_allclose(ilr_inv(np.identity(3)), self.ortho1,
rtol=1e-04, atol=1e-06)
with self.assertRaises(ValueError):
ilr_inv(self.cdata1, basis=self.cdata1)
# make sure that inplace modification is not occurring
ilr_inv(self.cdata1)
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_ilr_basis_isomorphism(self):
# tests to make sure that the isomorphism holds
# with the introduction of the basis.
basis = np.array([[0.80442968, 0.19557032]])
table = np.array([[np.log(1/10)*np.sqrt(1/2),
np.log(1.14141414 / 9.90909091)*np.sqrt(1/2),
np.log(1.28282828 / 9.81818182)*np.sqrt(1/2),
np.log(1.42424242 / 9.72727273)*np.sqrt(1/2),
np.log(1.56565657 / 9.63636364)*np.sqrt(1/2)]]).T
res = ilr(ilr_inv(table, basis=basis), basis=basis)
npt.assert_allclose(res, table.squeeze())
table = np.array([[1., 10.],
[1.14141414, 9.90909091],
[1.28282828, 9.81818182],
[1.42424242, 9.72727273],
[1.56565657, 9.63636364]])
res = ilr_inv(np.atleast_2d(ilr(closure(table), basis=basis)).T,
basis=basis)
npt.assert_allclose(res, closure(table.squeeze()))
def test_ilr_inv_basis(self):
exp = closure(np.array([[1., 10.],
[1.14141414, 9.90909091],
[1.28282828, 9.81818182],
[1.42424242, 9.72727273],
[1.56565657, 9.63636364]]))
basis = np.array([[0.80442968, 0.19557032]])
table = np.array([[np.log(1/10)*np.sqrt(1/2),
np.log(1.14141414 / 9.90909091)*np.sqrt(1/2),
np.log(1.28282828 / 9.81818182)*np.sqrt(1/2),
np.log(1.42424242 / 9.72727273)*np.sqrt(1/2),
np.log(1.56565657 / 9.63636364)*np.sqrt(1/2)]]).T
res = ilr_inv(table, basis=basis)
npt.assert_allclose(res, exp)
def test_ilr_inv_basis_one_dimension_error(self):
basis = clr(np.array([[0.80442968, 0.19557032]]))
table = np.array([[np.log(1/10)*np.sqrt(1/2),
np.log(1.14141414 / 9.90909091)*np.sqrt(1/2),
np.log(1.28282828 / 9.81818182)*np.sqrt(1/2),
np.log(1.42424242 / 9.72727273)*np.sqrt(1/2),
np.log(1.56565657 / 9.63636364)*np.sqrt(1/2)]]).T
with self.assertRaises(ValueError):
ilr_inv(table, basis=basis)
def test_alr(self):
# 2d-composition
comp1 = closure(self.cdata1)
alr2d_byhand = np.array([np.log(comp1[:, 0]/comp1[:, 1]),
np.log(comp1[:, 2]/comp1[:, 1])]).T
alr2d_method = alr(comp1, denominator_idx=1)
npt.assert_allclose(alr2d_byhand, alr2d_method)
# 1d-composition
comp2 = closure(self.cdata2)
alr1d_byhand = np.array([np.log(comp2[0]/comp2[1]),
np.log(comp2[2]/comp2[1])]).T
alr1d_method = alr(comp2, denominator_idx=1)
npt.assert_allclose(alr1d_byhand, alr1d_method)
# make sure that inplace modification is not occurring
alr(closure(self.cdata2))
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_alr_inv(self):
# 2d-composition
comp1 = closure(self.cdata1)
alr2d_byhand = np.array([np.log(comp1[:, 0]/comp1[:, 1]),
np.log(comp1[:, 2]/comp1[:, 1])]).T
alr2d_method = alr(comp1, denominator_idx=1)
B = 1/(1 + np.exp(alr2d_byhand[:, 0]) + np.exp(alr2d_byhand[:, 1]))
A = B * np.exp(alr2d_byhand[:, 0])
C = B * np.exp(alr2d_byhand[:, 1])
alrinv2d_byhand = np.column_stack((A, B, C))
alrinv2d_method = alr_inv(alr2d_method, denominator_idx=1)
npt.assert_allclose(alrinv2d_byhand, alrinv2d_method)
# 1d-composition
comp2 = closure(self.cdata2)
alr1d_byhand = np.array([np.log(comp2[0]/comp2[1]),
np.log(comp2[2]/comp2[1])]).T
alr1d_method = alr(comp2, denominator_idx=1)
B = 1/(1 + np.exp(alr1d_byhand[0]) + np.exp(alr1d_byhand[1]))
A = B * np.exp(alr1d_byhand[0])
C = B * np.exp(alr1d_byhand[1])
alrinv1d_byhand = np.column_stack((A, B, C))[0, :]
alrinv1d_method = alr_inv(alr1d_method, denominator_idx=1)
npt.assert_allclose(alrinv1d_byhand, alrinv1d_method)
# make sure that inplace modification is not occurring
alr_inv(self.rdata1)
npt.assert_allclose(self.rdata1,
np.array([[0.70710678, -0.70710678, 0., 0.],
[0.40824829, 0.40824829,
-0.81649658, 0.],
[0.28867513, 0.28867513,
0.28867513, -0.8660254]]))
def test_sbp_basis_gram_schmidt(self):
gsbasis = clr_inv(_gram_schmidt_basis(5))
sbp = np.array([[1, -1, 0, 0, 0],
[1, 1, -1, 0, 0],
[1, 1, 1, -1, 0],
[1, 1, 1, 1, -1]])
sbpbasis = sbp_basis(sbp)
npt.assert_allclose(gsbasis, sbpbasis)
def test_sbp_basis_elementwise(self):
sbp = np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1],
[1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 0],
[1, 1, 1, 1, 1, 1, -1, 0, 0, 0, 0, 0],
[1, 1, -1, -1, -1, 1, 0, 0, 0, 0, 0, 0],
[1, -1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, -1, -1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, -1, -1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0]])
sbpbasis = sbp_basis(sbp)
# by hand, element-wise
r = np.apply_along_axis(func1d=lambda x: np.sum(x > 0),
axis=1, arr=sbp)
s = np.apply_along_axis(func1d=lambda x: np.sum(x < 0),
axis=1, arr=sbp)
psi = np.zeros(sbp.shape)
for i in range(0, sbp.shape[0]):
for j in range(0, sbp.shape[1]):
if sbp[i, j] == 1:
psi[i, j] = np.sqrt(s[i]/(r[i]*(r[i]+s[i])))
elif sbp[i, j] == -1:
psi[i, j] = -np.sqrt(r[i]/(s[i]*(r[i]+s[i])))
basis_byhand = clr_inv(psi)
npt.assert_allclose(basis_byhand, sbpbasis)
| [
"numpy.identity",
"numpy.sqrt",
"composition_stats.ilr_inv",
"composition_stats.alr_inv",
"numpy.testing.assert_allclose",
"numpy.log",
"numpy.column_stack",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"composition_stats._gram_schmidt_basis",
"numpy.sum",
"composition_stats.ilr",
"compositi... | [((1010, 1042), 'numpy.array', 'np.array', (['[[2, 2, 6], [4, 4, 2]]'], {}), '([[2, 2, 6], [4, 4, 2]])\n', (1018, 1042), True, 'import numpy as np\n'), ((1097, 1116), 'numpy.array', 'np.array', (['[2, 2, 6]'], {}), '([2, 2, 6])\n', (1105, 1116), True, 'import numpy as np\n'), ((1140, 1201), 'numpy.array', 'np.array', (['[[1, 2, 3, 0, 5], [1, 0, 0, 4, 5], [1, 2, 3, 4, 5]]'], {}), '([[1, 2, 3, 0, 5], [1, 0, 0, 4, 5], [1, 2, 3, 4, 5]])\n', (1148, 1201), True, 'import numpy as np\n'), ((1288, 1313), 'numpy.array', 'np.array', (['[1, 2, 3, 0, 5]'], {}), '([1, 2, 3, 0, 5])\n', (1296, 1313), True, 'import numpy as np\n'), ((2129, 2149), 'numpy.array', 'np.array', (['[1, 2, -1]'], {}), '([1, 2, -1])\n', (2137, 2149), True, 'import numpy as np\n'), ((2191, 2220), 'numpy.array', 'np.array', (['[[[1, 2, 3, 0, 5]]]'], {}), '([[[1, 2, 3, 0, 5]]])\n', (2199, 2220), True, 'import numpy as np\n'), ((2888, 2908), 'composition_stats.closure', 'closure', (['self.cdata2'], {}), '(self.cdata2)\n', (2895, 2908), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((5519, 5550), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['pmat', 'imat'], {}), '(pmat, imat)\n', (5538, 5550), True, 'import numpy.testing as npt\n'), ((5960, 5991), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['pmat', 'imat'], {}), '(pmat, imat)\n', (5979, 5991), True, 'import numpy.testing as npt\n'), ((6478, 6520), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['b', '(0.6666666666666666)'], {}), '(b, 0.6666666666666666)\n', (6497, 6520), True, 'import numpy.testing as npt\n'), ((8779, 8804), 'numpy.array', 'np.array', (['[0.2, 0.2, 0.6]'], {}), '([0.2, 0.2, 0.6])\n', (8787, 8804), True, 'import numpy as np\n'), ((8814, 8839), 'numpy.array', 'np.array', (['[0.4, 0.4, 0.2]'], {}), '([0.4, 0.4, 0.2])\n', (8822, 8839), True, 'import numpy as np\n'), ((9060, 9085), 'numpy.array', 'np.array', (['[0.2, 0.2, 0.6]'], {}), '([0.2, 0.2, 0.6])\n', (9068, 9085), True, 'import numpy as np\n'), ((9237, 9262), 'numpy.array', 'np.array', (['[0.2, 0.2, 0.6]'], {}), '([0.2, 0.2, 0.6])\n', (9245, 9262), True, 'import numpy as np\n'), ((9272, 9297), 'numpy.array', 'np.array', (['[0.4, 0.4, 0.2]'], {}), '([0.4, 0.4, 0.2])\n', (9280, 9297), True, 'import numpy as np\n'), ((9867, 9887), 'composition_stats.clr_inv', 'clr_inv', (['self.rdata1'], {}), '(self.rdata1)\n', (9874, 9887), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((11618, 11638), 'composition_stats.closure', 'closure', (['self.cdata7'], {}), '(self.cdata7)\n', (11625, 11638), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((12316, 12447), 'numpy.array', 'np.array', (['[[1.0, 10.0], [1.14141414, 9.90909091], [1.28282828, 9.81818182], [\n 1.42424242, 9.72727273], [1.56565657, 9.63636364]]'], {}), '([[1.0, 10.0], [1.14141414, 9.90909091], [1.28282828, 9.81818182],\n [1.42424242, 9.72727273], [1.56565657, 9.63636364]])\n', (12324, 12447), True, 'import numpy as np\n'), ((12562, 12598), 'numpy.array', 'np.array', (['[[0.80442968, 0.19557032]]'], {}), '([[0.80442968, 0.19557032]])\n', (12570, 12598), True, 'import numpy as np\n'), ((12987, 13016), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['res', 'exp'], {}), '(res, exp)\n', (13006, 13016), True, 'import numpy.testing as npt\n'), ((13084, 13215), 'numpy.array', 'np.array', (['[[1.0, 10.0], [1.14141414, 9.90909091], [1.28282828, 9.81818182], [\n 1.42424242, 9.72727273], [1.56565657, 9.63636364]]'], {}), '([[1.0, 10.0], [1.14141414, 9.90909091], [1.28282828, 9.81818182],\n [1.42424242, 9.72727273], [1.56565657, 9.63636364]])\n', (13092, 13215), True, 'import numpy as np\n'), ((13330, 13364), 'numpy.array', 'np.array', (['[0.80442968, 0.19557032]'], {}), '([0.80442968, 0.19557032])\n', (13338, 13364), True, 'import numpy as np\n'), ((13497, 13517), 'composition_stats.closure', 'closure', (['self.cdata7'], {}), '(self.cdata7)\n', (13504, 13517), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((13868, 13888), 'composition_stats.ilr_inv', 'ilr_inv', (['self.cdata1'], {}), '(self.cdata1)\n', (13875, 13888), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((14191, 14227), 'numpy.array', 'np.array', (['[[0.80442968, 0.19557032]]'], {}), '([[0.80442968, 0.19557032]])\n', (14199, 14227), True, 'import numpy as np\n'), ((14705, 14836), 'numpy.array', 'np.array', (['[[1.0, 10.0], [1.14141414, 9.90909091], [1.28282828, 9.81818182], [\n 1.42424242, 9.72727273], [1.56565657, 9.63636364]]'], {}), '([[1.0, 10.0], [1.14141414, 9.90909091], [1.28282828, 9.81818182],\n [1.42424242, 9.72727273], [1.56565657, 9.63636364]])\n', (14713, 14836), True, 'import numpy as np\n'), ((15431, 15467), 'numpy.array', 'np.array', (['[[0.80442968, 0.19557032]]'], {}), '([[0.80442968, 0.19557032]])\n', (15439, 15467), True, 'import numpy as np\n'), ((15832, 15859), 'composition_stats.ilr_inv', 'ilr_inv', (['table'], {'basis': 'basis'}), '(table, basis=basis)\n', (15839, 15859), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((15868, 15897), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['res', 'exp'], {}), '(res, exp)\n', (15887, 15897), True, 'import numpy.testing as npt\n'), ((16511, 16531), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (16518, 16531), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((16690, 16719), 'composition_stats.alr', 'alr', (['comp1'], {'denominator_idx': '(1)'}), '(comp1, denominator_idx=1)\n', (16693, 16719), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((16728, 16775), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['alr2d_byhand', 'alr2d_method'], {}), '(alr2d_byhand, alr2d_method)\n', (16747, 16775), True, 'import numpy.testing as npt\n'), ((16818, 16838), 'composition_stats.closure', 'closure', (['self.cdata2'], {}), '(self.cdata2)\n', (16825, 16838), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((16985, 17014), 'composition_stats.alr', 'alr', (['comp2'], {'denominator_idx': '(1)'}), '(comp2, denominator_idx=1)\n', (16988, 17014), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((17023, 17070), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['alr1d_byhand', 'alr1d_method'], {}), '(alr1d_byhand, alr1d_method)\n', (17042, 17070), True, 'import numpy.testing as npt\n'), ((17301, 17321), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (17308, 17321), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((17480, 17509), 'composition_stats.alr', 'alr', (['comp1'], {'denominator_idx': '(1)'}), '(comp1, denominator_idx=1)\n', (17483, 17509), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((17698, 17724), 'numpy.column_stack', 'np.column_stack', (['(A, B, C)'], {}), '((A, B, C))\n', (17713, 17724), True, 'import numpy as np\n'), ((17751, 17791), 'composition_stats.alr_inv', 'alr_inv', (['alr2d_method'], {'denominator_idx': '(1)'}), '(alr2d_method, denominator_idx=1)\n', (17758, 17791), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((17800, 17853), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['alrinv2d_byhand', 'alrinv2d_method'], {}), '(alrinv2d_byhand, alrinv2d_method)\n', (17819, 17853), True, 'import numpy.testing as npt\n'), ((17896, 17916), 'composition_stats.closure', 'closure', (['self.cdata2'], {}), '(self.cdata2)\n', (17903, 17916), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((18063, 18092), 'composition_stats.alr', 'alr', (['comp2'], {'denominator_idx': '(1)'}), '(comp2, denominator_idx=1)\n', (18066, 18092), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((18328, 18368), 'composition_stats.alr_inv', 'alr_inv', (['alr1d_method'], {'denominator_idx': '(1)'}), '(alr1d_method, denominator_idx=1)\n', (18335, 18368), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((18377, 18430), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['alrinv1d_byhand', 'alrinv1d_method'], {}), '(alrinv1d_byhand, alrinv1d_method)\n', (18396, 18430), True, 'import numpy.testing as npt\n'), ((18503, 18523), 'composition_stats.alr_inv', 'alr_inv', (['self.rdata1'], {}), '(self.rdata1)\n', (18510, 18523), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((18995, 19081), 'numpy.array', 'np.array', (['[[1, -1, 0, 0, 0], [1, 1, -1, 0, 0], [1, 1, 1, -1, 0], [1, 1, 1, 1, -1]]'], {}), '([[1, -1, 0, 0, 0], [1, 1, -1, 0, 0], [1, 1, 1, -1, 0], [1, 1, 1, 1,\n -1]])\n', (19003, 19081), True, 'import numpy as np\n'), ((19169, 19183), 'composition_stats.sbp_basis', 'sbp_basis', (['sbp'], {}), '(sbp)\n', (19178, 19183), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((19192, 19230), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['gsbasis', 'sbpbasis'], {}), '(gsbasis, sbpbasis)\n', (19211, 19230), True, 'import numpy.testing as npt\n'), ((19288, 19762), 'numpy.array', 'np.array', (['[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1], [1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -\n 1, 0], [1, 1, 1, 1, 1, 1, -1, 0, 0, 0, 0, 0], [1, 1, -1, -1, -1, 1, 0, \n 0, 0, 0, 0, 0], [1, -1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, \n -1, 0, 0, 0, 0, 0, 0], [0, 0, 1, -1, -1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0,\n 1, -1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, -1, -1, 1, 0], [0,\n 0, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0]]'], {}), '([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1], [1, 1, 1, 1, 1, 1, 1, -1, \n -1, -1, -1, 0], [1, 1, 1, 1, 1, 1, -1, 0, 0, 0, 0, 0], [1, 1, -1, -1, -\n 1, 1, 0, 0, 0, 0, 0, 0], [1, -1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [1, 0, \n 0, 0, 0, -1, 0, 0, 0, 0, 0, 0], [0, 0, 1, -1, -1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, -1, -1,\n 1, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0], [0, 0, 0, 0, 0, 0, 0, 0, \n 1, -1, 0, 0]])\n', (19296, 19762), True, 'import numpy as np\n'), ((19994, 20008), 'composition_stats.sbp_basis', 'sbp_basis', (['sbp'], {}), '(sbp)\n', (20003, 20008), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((20281, 20300), 'numpy.zeros', 'np.zeros', (['sbp.shape'], {}), '(sbp.shape)\n', (20289, 20300), True, 'import numpy as np\n'), ((20614, 20626), 'composition_stats.clr_inv', 'clr_inv', (['psi'], {}), '(psi)\n', (20621, 20626), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((20635, 20678), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['basis_byhand', 'sbpbasis'], {}), '(basis_byhand, sbpbasis)\n', (20654, 20678), True, 'import numpy.testing as npt\n'), ((1502, 1511), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (1508, 1511), True, 'import numpy as np\n'), ((1542, 1551), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (1548, 1551), True, 'import numpy as np\n'), ((2279, 2299), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (2286, 2299), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((2329, 2373), 'numpy.array', 'np.array', (['[[0.2, 0.2, 0.6], [0.4, 0.4, 0.2]]'], {}), '([[0.2, 0.2, 0.6], [0.4, 0.4, 0.2]])\n', (2337, 2373), True, 'import numpy as np\n'), ((2435, 2455), 'composition_stats.closure', 'closure', (['self.cdata2'], {}), '(self.cdata2)\n', (2442, 2455), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((2485, 2510), 'numpy.array', 'np.array', (['[0.2, 0.2, 0.6]'], {}), '([0.2, 0.2, 0.6])\n', (2493, 2510), True, 'import numpy as np\n'), ((2537, 2557), 'composition_stats.closure', 'closure', (['self.cdata5'], {}), '(self.cdata5)\n', (2544, 2557), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((2587, 2631), 'numpy.array', 'np.array', (['[[0.2, 0.2, 0.6], [0.4, 0.4, 0.2]]'], {}), '([[0.2, 0.2, 0.6], [0.4, 0.4, 0.2]])\n', (2595, 2631), True, 'import numpy as np\n'), ((2721, 2739), 'composition_stats.closure', 'closure', (['self.bad1'], {}), '(self.bad1)\n', (2728, 2739), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((2797, 2815), 'composition_stats.closure', 'closure', (['self.bad2'], {}), '(self.bad2)\n', (2804, 2815), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((2950, 2969), 'numpy.array', 'np.array', (['[2, 2, 6]'], {}), '([2, 2, 6])\n', (2958, 2969), True, 'import numpy as np\n'), ((3064, 3088), 'composition_stats.closure', 'closure', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (3071, 3088), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((3143, 3186), 'composition_stats.closure', 'closure', (['[[0.0, 0.0, 0.0], [0.0, 5.0, 5.0]]'], {}), '([[0.0, 0.0, 0.0], [0.0, 5.0, 5.0]])\n', (3150, 3186), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((3254, 3274), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (3261, 3274), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((3391, 3435), 'numpy.array', 'np.array', (['[[0.2, 0.2, 0.6], [0.4, 0.4, 0.2]]'], {}), '([[0.2, 0.2, 0.6], [0.4, 0.4, 0.2]])\n', (3399, 3435), True, 'import numpy as np\n'), ((3493, 3513), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (3500, 3513), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((3633, 3694), 'numpy.array', 'np.array', (['[[0.125, 0.125, 0.75], [1.0 / 3, 1.0 / 3, 1.0 / 3]]'], {}), '([[0.125, 0.125, 0.75], [1.0 / 3, 1.0 / 3, 1.0 / 3]])\n', (3641, 3694), True, 'import numpy as np\n'), ((3746, 3766), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (3753, 3766), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((3886, 3947), 'numpy.array', 'np.array', (['[[0.125, 0.125, 0.75], [1.0 / 3, 1.0 / 3, 1.0 / 3]]'], {}), '([[0.125, 0.125, 0.75], [1.0 / 3, 1.0 / 3, 1.0 / 3]])\n', (3894, 3947), True, 'import numpy as np\n'), ((3999, 4019), 'composition_stats.closure', 'closure', (['self.cdata2'], {}), '(self.cdata2)\n', (4006, 4019), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((4044, 4062), 'composition_stats.closure', 'closure', (['[1, 2, 1]'], {}), '([1, 2, 1])\n', (4051, 4062), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((4098, 4135), 'numpy.array', 'np.array', (['[1.0 / 6, 2.0 / 6, 3.0 / 6]'], {}), '([1.0 / 6, 2.0 / 6, 3.0 / 6])\n', (4106, 4135), True, 'import numpy as np\n'), ((4152, 4172), 'composition_stats.closure', 'closure', (['self.cdata5'], {}), '(self.cdata5)\n', (4159, 4172), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((4289, 4333), 'numpy.array', 'np.array', (['[[0.2, 0.2, 0.6], [0.4, 0.4, 0.2]]'], {}), '([[0.2, 0.2, 0.6], [0.4, 0.4, 0.2]])\n', (4297, 4333), True, 'import numpy as np\n'), ((4447, 4467), 'composition_stats.closure', 'closure', (['self.cdata2'], {}), '(self.cdata2)\n', (4454, 4467), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((4469, 4487), 'composition_stats.closure', 'closure', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (4476, 4487), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((4530, 4549), 'numpy.array', 'np.array', (['[2, 2, 6]'], {}), '([2, 2, 6])\n', (4538, 4549), True, 'import numpy as np\n'), ((4599, 4619), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (4606, 4619), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((4686, 4783), 'numpy.array', 'np.array', (['[[0.04 / 0.44, 0.04 / 0.44, 0.36 / 0.44], [0.16 / 0.36, 0.16 / 0.36, 0.04 /\n 0.36]]'], {}), '([[0.04 / 0.44, 0.04 / 0.44, 0.36 / 0.44], [0.16 / 0.36, 0.16 / \n 0.36, 0.04 / 0.36]])\n', (4694, 4783), True, 'import numpy as np\n'), ((4816, 4836), 'composition_stats.closure', 'closure', (['self.cdata2'], {}), '(self.cdata2)\n', (4823, 4836), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((4928, 4948), 'composition_stats.closure', 'closure', (['self.cdata5'], {}), '(self.cdata5)\n', (4935, 4948), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((5015, 5112), 'numpy.array', 'np.array', (['[[0.04 / 0.44, 0.04 / 0.44, 0.36 / 0.44], [0.16 / 0.36, 0.16 / 0.36, 0.04 /\n 0.36]]'], {}), '([[0.04 / 0.44, 0.04 / 0.44, 0.36 / 0.44], [0.16 / 0.36, 0.16 / \n 0.36, 0.04 / 0.36]])\n', (5023, 5112), True, 'import numpy as np\n'), ((5201, 5221), 'composition_stats.closure', 'closure', (['self.cdata2'], {}), '(self.cdata2)\n', (5208, 5221), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((5267, 5286), 'numpy.array', 'np.array', (['[2, 2, 6]'], {}), '([2, 2, 6])\n', (5275, 5286), True, 'import numpy as np\n'), ((5348, 5368), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (5355, 5368), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((5397, 5421), 'composition_stats.closure', 'closure', (['[0.1, 0.1, 0.1]'], {}), '([0.1, 0.1, 0.1])\n', (5404, 5421), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((5443, 5463), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (5450, 5463), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((5488, 5509), 'composition_stats.closure', 'closure', (['[10, 10, 10]'], {}), '([10, 10, 10])\n', (5495, 5509), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((5578, 5598), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (5585, 5598), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((5627, 5645), 'composition_stats.closure', 'closure', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (5634, 5645), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((5709, 5752), 'composition_stats.closure', 'closure', (['[[0.2, 0.2, 0.6], [0.4, 0.4, 0.2]]'], {}), '([[0.2, 0.2, 0.6], [0.4, 0.4, 0.2]])\n', (5716, 5752), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((5812, 5832), 'composition_stats.closure', 'closure', (['self.cdata5'], {}), '(self.cdata5)\n', (5819, 5832), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((5861, 5885), 'composition_stats.closure', 'closure', (['[0.1, 0.1, 0.1]'], {}), '([0.1, 0.1, 0.1])\n', (5868, 5885), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((5907, 5927), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (5914, 5927), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((5929, 5950), 'composition_stats.closure', 'closure', (['[10, 10, 10]'], {}), '([10, 10, 10])\n', (5936, 5950), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((6076, 6096), 'composition_stats.closure', 'closure', (['self.cdata2'], {}), '(self.cdata2)\n', (6083, 6096), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((6098, 6116), 'composition_stats.closure', 'closure', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (6105, 6116), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((6159, 6178), 'numpy.array', 'np.array', (['[2, 2, 6]'], {}), '([2, 2, 6])\n', (6167, 6178), True, 'import numpy as np\n'), ((6225, 6245), 'composition_stats.closure', 'closure', (['self.cdata5'], {}), '(self.cdata5)\n', (6232, 6245), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((6247, 6267), 'composition_stats.closure', 'closure', (['self.cdata5'], {}), '(self.cdata5)\n', (6254, 6267), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((6300, 6364), 'numpy.array', 'np.array', (['[[0.80463264, -0.50766667], [-0.50766667, 0.32030201]]'], {}), '([[0.80463264, -0.50766667], [-0.50766667, 0.32030201]])\n', (6308, 6364), True, 'import numpy as np\n'), ((6426, 6446), 'composition_stats.closure', 'closure', (['self.cdata7'], {}), '(self.cdata7)\n', (6433, 6446), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((6448, 6468), 'composition_stats.closure', 'closure', (['self.cdata7'], {}), '(self.cdata7)\n', (6455, 6468), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((6675, 6689), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (6686, 6689), True, 'import numpy as np\n'), ((6900, 6920), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (6907, 6920), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((6922, 6942), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (6929, 6942), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((7013, 7045), 'numpy.array', 'np.array', (['[[2, 2, 6], [4, 4, 2]]'], {}), '([[2, 2, 6], [4, 4, 2]])\n', (7021, 7045), True, 'import numpy as np\n'), ((7175, 7195), 'composition_stats.closure', 'closure', (['self.cdata3'], {}), '(self.cdata3)\n', (7182, 7195), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((7259, 7403), 'numpy.array', 'np.array', (['[[0.087273, 0.174545, 0.261818, 0.04, 0.436364], [0.092, 0.04, 0.04, 0.368,\n 0.46], [0.066667, 0.133333, 0.2, 0.266667, 0.333333]]'], {}), '([[0.087273, 0.174545, 0.261818, 0.04, 0.436364], [0.092, 0.04, \n 0.04, 0.368, 0.46], [0.066667, 0.133333, 0.2, 0.266667, 0.333333]])\n', (7267, 7403), True, 'import numpy as np\n'), ((7647, 7667), 'composition_stats.closure', 'closure', (['self.cdata4'], {}), '(self.cdata4)\n', (7654, 7667), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((7731, 7787), 'numpy.array', 'np.array', (['[0.087273, 0.174545, 0.261818, 0.04, 0.436364]'], {}), '([0.087273, 0.174545, 0.261818, 0.04, 0.436364])\n', (7739, 7787), True, 'import numpy as np\n'), ((7920, 7940), 'composition_stats.closure', 'closure', (['self.cdata6'], {}), '(self.cdata6)\n', (7927, 7940), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((8004, 8148), 'numpy.array', 'np.array', (['[[0.087273, 0.174545, 0.261818, 0.04, 0.436364], [0.092, 0.04, 0.04, 0.368,\n 0.46], [0.066667, 0.133333, 0.2, 0.266667, 0.333333]]'], {}), '([[0.087273, 0.174545, 0.261818, 0.04, 0.436364], [0.092, 0.04, \n 0.04, 0.368, 0.46], [0.066667, 0.133333, 0.2, 0.266667, 0.333333]])\n', (8012, 8148), True, 'import numpy as np\n'), ((8448, 8468), 'composition_stats.closure', 'closure', (['self.cdata4'], {}), '(self.cdata4)\n', (8455, 8468), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((8511, 8536), 'numpy.array', 'np.array', (['[1, 2, 3, 0, 5]'], {}), '([1, 2, 3, 0, 5])\n', (8519, 8536), True, 'import numpy as np\n'), ((8745, 8765), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (8752, 8765), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((9026, 9046), 'composition_stats.closure', 'closure', (['self.cdata2'], {}), '(self.cdata2)\n', (9033, 9046), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((9203, 9223), 'composition_stats.closure', 'closure', (['self.cdata5'], {}), '(self.cdata5)\n', (9210, 9223), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((9541, 9561), 'composition_stats.closure', 'closure', (['self.cdata2'], {}), '(self.cdata2)\n', (9548, 9561), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((9604, 9623), 'numpy.array', 'np.array', (['[2, 2, 6]'], {}), '([2, 2, 6])\n', (9612, 9623), True, 'import numpy as np\n'), ((9682, 9702), 'composition_stats.clr_inv', 'clr_inv', (['self.rdata1'], {}), '(self.rdata1)\n', (9689, 9702), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((9957, 10103), 'numpy.array', 'np.array', (['[[0.70710678, -0.70710678, 0.0, 0.0], [0.40824829, 0.40824829, -0.81649658,\n 0.0], [0.28867513, 0.28867513, 0.28867513, -0.8660254]]'], {}), '([[0.70710678, -0.70710678, 0.0, 0.0], [0.40824829, 0.40824829, -\n 0.81649658, 0.0], [0.28867513, 0.28867513, 0.28867513, -0.8660254]])\n', (9965, 10103), True, 'import numpy as np\n'), ((10301, 10321), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (10308, 10321), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((10385, 10430), 'numpy.array', 'np.array', (['[0.31010205, 0.31010205, 0.3797959]'], {}), '([0.31010205, 0.31010205, 0.3797959])\n', (10393, 10430), True, 'import numpy as np\n'), ((10455, 10475), 'composition_stats.closure', 'closure', (['self.cdata5'], {}), '(self.cdata5)\n', (10462, 10475), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((10539, 10584), 'numpy.array', 'np.array', (['[0.31010205, 0.31010205, 0.3797959]'], {}), '([0.31010205, 0.31010205, 0.3797959])\n', (10547, 10584), True, 'import numpy as np\n'), ((10666, 10686), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (10673, 10686), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((10757, 10789), 'numpy.array', 'np.array', (['[[2, 2, 6], [4, 4, 2]]'], {}), '([[2, 2, 6], [4, 4, 2]])\n', (10765, 10789), True, 'import numpy as np\n'), ((10887, 10907), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (10894, 10907), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((10971, 11062), 'numpy.array', 'np.array', (['[[0.22474487, 0.22474487, 0.55051026], [0.41523958, 0.41523958, 0.16952085]]'], {}), '([[0.22474487, 0.22474487, 0.55051026], [0.41523958, 0.41523958, \n 0.16952085]])\n', (10979, 11062), True, 'import numpy as np\n'), ((11123, 11143), 'composition_stats.closure', 'closure', (['self.cdata5'], {}), '(self.cdata5)\n', (11130, 11143), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((11207, 11298), 'numpy.array', 'np.array', (['[[0.22474487, 0.22474487, 0.55051026], [0.41523958, 0.41523958, 0.16952085]]'], {}), '([[0.22474487, 0.22474487, 0.55051026], [0.41523958, 0.41523958, \n 0.16952085]])\n', (11215, 11298), True, 'import numpy as np\n'), ((11416, 11436), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (11423, 11436), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((11507, 11539), 'numpy.array', 'np.array', (['[[2, 2, 6], [4, 4, 2]]'], {}), '([[2, 2, 6], [4, 4, 2]])\n', (11515, 11539), True, 'import numpy as np\n'), ((11677, 11685), 'composition_stats.ilr', 'ilr', (['mat'], {}), '(mat)\n', (11680, 11685), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((11725, 11759), 'numpy.array', 'np.array', (['[0.70710678, 0.40824829]'], {}), '([0.70710678, 0.40824829])\n', (11733, 11759), True, 'import numpy as np\n'), ((11860, 11874), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (11871, 11874), True, 'import numpy as np\n'), ((12106, 12126), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (12113, 12126), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((12197, 12229), 'numpy.array', 'np.array', (['[[2, 2, 6], [4, 4, 2]]'], {}), '([[2, 2, 6], [4, 4, 2]])\n', (12205, 12229), True, 'import numpy as np\n'), ((12617, 12631), 'composition_stats.closure', 'closure', (['table'], {}), '(table)\n', (12624, 12631), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((13756, 13795), 'composition_stats.ilr_inv', 'ilr_inv', (['self.cdata1'], {'basis': 'self.cdata1'}), '(self.cdata1, basis=self.cdata1)\n', (13763, 13795), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((13958, 13990), 'numpy.array', 'np.array', (['[[2, 2, 6], [4, 4, 2]]'], {}), '([[2, 2, 6], [4, 4, 2]])\n', (13966, 13990), True, 'import numpy as np\n'), ((14596, 14623), 'composition_stats.ilr_inv', 'ilr_inv', (['table'], {'basis': 'basis'}), '(table, basis=basis)\n', (14603, 14623), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((15160, 15291), 'numpy.array', 'np.array', (['[[1.0, 10.0], [1.14141414, 9.90909091], [1.28282828, 9.81818182], [\n 1.42424242, 9.72727273], [1.56565657, 9.63636364]]'], {}), '([[1.0, 10.0], [1.14141414, 9.90909091], [1.28282828, 9.81818182],\n [1.42424242, 9.72727273], [1.56565657, 9.63636364]])\n', (15168, 15291), True, 'import numpy as np\n'), ((15973, 16009), 'numpy.array', 'np.array', (['[[0.80442968, 0.19557032]]'], {}), '([[0.80442968, 0.19557032]])\n', (15981, 16009), True, 'import numpy as np\n'), ((16417, 16444), 'composition_stats.ilr_inv', 'ilr_inv', (['table'], {'basis': 'basis'}), '(table, basis=basis)\n', (16424, 16444), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((17147, 17167), 'composition_stats.closure', 'closure', (['self.cdata2'], {}), '(self.cdata2)\n', (17154, 17167), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((17210, 17229), 'numpy.array', 'np.array', (['[2, 2, 6]'], {}), '([2, 2, 6])\n', (17218, 17229), True, 'import numpy as np\n'), ((17602, 17628), 'numpy.exp', 'np.exp', (['alr2d_byhand[:, 0]'], {}), '(alr2d_byhand[:, 0])\n', (17608, 17628), True, 'import numpy as np\n'), ((17645, 17671), 'numpy.exp', 'np.exp', (['alr2d_byhand[:, 1]'], {}), '(alr2d_byhand[:, 1])\n', (17651, 17671), True, 'import numpy as np\n'), ((18179, 18202), 'numpy.exp', 'np.exp', (['alr1d_byhand[0]'], {}), '(alr1d_byhand[0])\n', (18185, 18202), True, 'import numpy as np\n'), ((18219, 18242), 'numpy.exp', 'np.exp', (['alr1d_byhand[1]'], {}), '(alr1d_byhand[1])\n', (18225, 18242), True, 'import numpy as np\n'), ((18269, 18295), 'numpy.column_stack', 'np.column_stack', (['(A, B, C)'], {}), '((A, B, C))\n', (18284, 18295), True, 'import numpy as np\n'), ((18593, 18739), 'numpy.array', 'np.array', (['[[0.70710678, -0.70710678, 0.0, 0.0], [0.40824829, 0.40824829, -0.81649658,\n 0.0], [0.28867513, 0.28867513, 0.28867513, -0.8660254]]'], {}), '([[0.70710678, -0.70710678, 0.0, 0.0], [0.40824829, 0.40824829, -\n 0.81649658, 0.0], [0.28867513, 0.28867513, 0.28867513, -0.8660254]])\n', (18601, 18739), True, 'import numpy as np\n'), ((18957, 18979), 'composition_stats._gram_schmidt_basis', '_gram_schmidt_basis', (['(5)'], {}), '(5)\n', (18976, 18979), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((3307, 3326), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (3315, 3326), True, 'import numpy as np\n'), ((3546, 3568), 'numpy.array', 'np.array', (['[10, 10, 20]'], {}), '([10, 10, 20])\n', (3554, 3568), True, 'import numpy as np\n'), ((3799, 3821), 'numpy.array', 'np.array', (['[10, 10, 20]'], {}), '([10, 10, 20])\n', (3807, 3821), True, 'import numpy as np\n'), ((4205, 4224), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (4213, 4224), True, 'import numpy as np\n'), ((4875, 4903), 'numpy.array', 'np.array', (['[0.04, 0.04, 0.36]'], {}), '([0.04, 0.04, 0.36])\n', (4883, 4903), True, 'import numpy as np\n'), ((6602, 6622), 'composition_stats.closure', 'closure', (['self.ortho1'], {}), '(self.ortho1)\n', (6609, 6622), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((6624, 6644), 'composition_stats.closure', 'closure', (['self.ortho1'], {}), '(self.ortho1)\n', (6631, 6644), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((6778, 6798), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (6785, 6798), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((6800, 6820), 'composition_stats.closure', 'closure', (['self.cdata8'], {}), '(self.cdata8)\n', (6807, 6820), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((8672, 8690), 'composition_stats.closure', 'closure', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (8679, 8690), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((9759, 9779), 'composition_stats.clr_inv', 'clr_inv', (['self.rdata1'], {}), '(self.rdata1)\n', (9766, 9779), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((11837, 11857), 'composition_stats.closure', 'closure', (['self.ortho1'], {}), '(self.ortho1)\n', (11844, 11857), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((11989, 12009), 'composition_stats.closure', 'closure', (['self.cdata1'], {}), '(self.cdata1)\n', (11996, 12009), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((13425, 13439), 'composition_stats.closure', 'closure', (['table'], {}), '(table)\n', (13432, 13439), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((13564, 13572), 'composition_stats.ilr', 'ilr', (['mat'], {}), '(mat)\n', (13567, 13572), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((13617, 13631), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (13628, 13631), True, 'import numpy as np\n'), ((17558, 17584), 'numpy.exp', 'np.exp', (['alr2d_byhand[:, 1]'], {}), '(alr2d_byhand[:, 1])\n', (17564, 17584), True, 'import numpy as np\n'), ((18138, 18161), 'numpy.exp', 'np.exp', (['alr1d_byhand[1]'], {}), '(alr1d_byhand[1])\n', (18144, 18161), True, 'import numpy as np\n'), ((12670, 12684), 'numpy.log', 'np.log', (['(1 / 10)'], {}), '(1 / 10)\n', (12676, 12684), True, 'import numpy as np\n'), ((12683, 12697), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (12690, 12697), True, 'import numpy as np\n'), ((12721, 12752), 'numpy.log', 'np.log', (['(1.14141414 / 9.90909091)'], {}), '(1.14141414 / 9.90909091)\n', (12727, 12752), True, 'import numpy as np\n'), ((12753, 12767), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (12760, 12767), True, 'import numpy as np\n'), ((12791, 12822), 'numpy.log', 'np.log', (['(1.28282828 / 9.81818182)'], {}), '(1.28282828 / 9.81818182)\n', (12797, 12822), True, 'import numpy as np\n'), ((12823, 12837), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (12830, 12837), True, 'import numpy as np\n'), ((12861, 12892), 'numpy.log', 'np.log', (['(1.42424242 / 9.72727273)'], {}), '(1.42424242 / 9.72727273)\n', (12867, 12892), True, 'import numpy as np\n'), ((12893, 12907), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (12900, 12907), True, 'import numpy as np\n'), ((12931, 12962), 'numpy.log', 'np.log', (['(1.56565657 / 9.63636364)'], {}), '(1.56565657 / 9.63636364)\n', (12937, 12962), True, 'import numpy as np\n'), ((12963, 12977), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (12970, 12977), True, 'import numpy as np\n'), ((16565, 16598), 'numpy.log', 'np.log', (['(comp1[:, 0] / comp1[:, 1])'], {}), '(comp1[:, 0] / comp1[:, 1])\n', (16571, 16598), True, 'import numpy as np\n'), ((16631, 16664), 'numpy.log', 'np.log', (['(comp1[:, 2] / comp1[:, 1])'], {}), '(comp1[:, 2] / comp1[:, 1])\n', (16637, 16664), True, 'import numpy as np\n'), ((16872, 16899), 'numpy.log', 'np.log', (['(comp2[0] / comp2[1])'], {}), '(comp2[0] / comp2[1])\n', (16878, 16899), True, 'import numpy as np\n'), ((16932, 16959), 'numpy.log', 'np.log', (['(comp2[2] / comp2[1])'], {}), '(comp2[2] / comp2[1])\n', (16938, 16959), True, 'import numpy as np\n'), ((17355, 17388), 'numpy.log', 'np.log', (['(comp1[:, 0] / comp1[:, 1])'], {}), '(comp1[:, 0] / comp1[:, 1])\n', (17361, 17388), True, 'import numpy as np\n'), ((17421, 17454), 'numpy.log', 'np.log', (['(comp1[:, 2] / comp1[:, 1])'], {}), '(comp1[:, 2] / comp1[:, 1])\n', (17427, 17454), True, 'import numpy as np\n'), ((17529, 17555), 'numpy.exp', 'np.exp', (['alr2d_byhand[:, 0]'], {}), '(alr2d_byhand[:, 0])\n', (17535, 17555), True, 'import numpy as np\n'), ((17950, 17977), 'numpy.log', 'np.log', (['(comp2[0] / comp2[1])'], {}), '(comp2[0] / comp2[1])\n', (17956, 17977), True, 'import numpy as np\n'), ((18010, 18037), 'numpy.log', 'np.log', (['(comp2[2] / comp2[1])'], {}), '(comp2[2] / comp2[1])\n', (18016, 18037), True, 'import numpy as np\n'), ((18112, 18135), 'numpy.exp', 'np.exp', (['alr1d_byhand[0]'], {}), '(alr1d_byhand[0])\n', (18118, 18135), True, 'import numpy as np\n'), ((20090, 20103), 'numpy.sum', 'np.sum', (['(x > 0)'], {}), '(x > 0)\n', (20096, 20103), True, 'import numpy as np\n'), ((20203, 20216), 'numpy.sum', 'np.sum', (['(x < 0)'], {}), '(x < 0)\n', (20209, 20216), True, 'import numpy as np\n'), ((20454, 20492), 'numpy.sqrt', 'np.sqrt', (['(s[i] / (r[i] * (r[i] + s[i])))'], {}), '(s[i] / (r[i] * (r[i] + s[i])))\n', (20461, 20492), True, 'import numpy as np\n'), ((14976, 14990), 'composition_stats.closure', 'closure', (['table'], {}), '(table)\n', (14983, 14990), False, 'from composition_stats import closure, multiplicative_replacement, perturb, perturb_inv, power, inner, clr, clr_inv, ilr, ilr_inv, alr, alr_inv, sbp_basis, _gram_schmidt_basis, center, centralize\n'), ((14255, 14269), 'numpy.log', 'np.log', (['(1 / 10)'], {}), '(1 / 10)\n', (14261, 14269), True, 'import numpy as np\n'), ((14268, 14282), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (14275, 14282), True, 'import numpy as np\n'), ((14309, 14340), 'numpy.log', 'np.log', (['(1.14141414 / 9.90909091)'], {}), '(1.14141414 / 9.90909091)\n', (14315, 14340), True, 'import numpy as np\n'), ((14341, 14355), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (14348, 14355), True, 'import numpy as np\n'), ((14382, 14413), 'numpy.log', 'np.log', (['(1.28282828 / 9.81818182)'], {}), '(1.28282828 / 9.81818182)\n', (14388, 14413), True, 'import numpy as np\n'), ((14414, 14428), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (14421, 14428), True, 'import numpy as np\n'), ((14455, 14486), 'numpy.log', 'np.log', (['(1.42424242 / 9.72727273)'], {}), '(1.42424242 / 9.72727273)\n', (14461, 14486), True, 'import numpy as np\n'), ((14487, 14501), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (14494, 14501), True, 'import numpy as np\n'), ((14528, 14559), 'numpy.log', 'np.log', (['(1.56565657 / 9.63636364)'], {}), '(1.56565657 / 9.63636364)\n', (14534, 14559), True, 'import numpy as np\n'), ((14560, 14574), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (14567, 14574), True, 'import numpy as np\n'), ((15495, 15509), 'numpy.log', 'np.log', (['(1 / 10)'], {}), '(1 / 10)\n', (15501, 15509), True, 'import numpy as np\n'), ((15508, 15522), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (15515, 15522), True, 'import numpy as np\n'), ((15549, 15580), 'numpy.log', 'np.log', (['(1.14141414 / 9.90909091)'], {}), '(1.14141414 / 9.90909091)\n', (15555, 15580), True, 'import numpy as np\n'), ((15581, 15595), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (15588, 15595), True, 'import numpy as np\n'), ((15622, 15653), 'numpy.log', 'np.log', (['(1.28282828 / 9.81818182)'], {}), '(1.28282828 / 9.81818182)\n', (15628, 15653), True, 'import numpy as np\n'), ((15654, 15668), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (15661, 15668), True, 'import numpy as np\n'), ((15695, 15726), 'numpy.log', 'np.log', (['(1.42424242 / 9.72727273)'], {}), '(1.42424242 / 9.72727273)\n', (15701, 15726), True, 'import numpy as np\n'), ((15727, 15741), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (15734, 15741), True, 'import numpy as np\n'), ((15768, 15799), 'numpy.log', 'np.log', (['(1.56565657 / 9.63636364)'], {}), '(1.56565657 / 9.63636364)\n', (15774, 15799), True, 'import numpy as np\n'), ((15800, 15814), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (15807, 15814), True, 'import numpy as np\n'), ((16038, 16052), 'numpy.log', 'np.log', (['(1 / 10)'], {}), '(1 / 10)\n', (16044, 16052), True, 'import numpy as np\n'), ((16051, 16065), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (16058, 16065), True, 'import numpy as np\n'), ((16092, 16123), 'numpy.log', 'np.log', (['(1.14141414 / 9.90909091)'], {}), '(1.14141414 / 9.90909091)\n', (16098, 16123), True, 'import numpy as np\n'), ((16124, 16138), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (16131, 16138), True, 'import numpy as np\n'), ((16165, 16196), 'numpy.log', 'np.log', (['(1.28282828 / 9.81818182)'], {}), '(1.28282828 / 9.81818182)\n', (16171, 16196), True, 'import numpy as np\n'), ((16197, 16211), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (16204, 16211), True, 'import numpy as np\n'), ((16238, 16269), 'numpy.log', 'np.log', (['(1.42424242 / 9.72727273)'], {}), '(1.42424242 / 9.72727273)\n', (16244, 16269), True, 'import numpy as np\n'), ((16270, 16284), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (16277, 16284), True, 'import numpy as np\n'), ((16311, 16342), 'numpy.log', 'np.log', (['(1.56565657 / 9.63636364)'], {}), '(1.56565657 / 9.63636364)\n', (16317, 16342), True, 'import numpy as np\n'), ((16343, 16357), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (16350, 16357), True, 'import numpy as np\n'), ((20558, 20596), 'numpy.sqrt', 'np.sqrt', (['(r[i] / (s[i] * (r[i] + s[i])))'], {}), '(r[i] / (s[i] * (r[i] + s[i])))\n', (20565, 20596), True, 'import numpy as np\n'), ((9163, 9172), 'numpy.log', 'np.log', (['A'], {}), '(A)\n', (9169, 9172), True, 'import numpy as np\n'), ((8919, 8928), 'numpy.log', 'np.log', (['A'], {}), '(A)\n', (8925, 8928), True, 'import numpy as np\n'), ((8986, 8995), 'numpy.log', 'np.log', (['B'], {}), '(B)\n', (8992, 8995), True, 'import numpy as np\n'), ((9377, 9386), 'numpy.log', 'np.log', (['A'], {}), '(A)\n', (9383, 9386), True, 'import numpy as np\n'), ((9444, 9453), 'numpy.log', 'np.log', (['B'], {}), '(B)\n', (9450, 9453), True, 'import numpy as np\n')] |
import sys, os, fnmatch, csv
import numpy as np
import pandas as pd
from joblib import dump, load
from sklearn.model_selection import train_test_split
from sklearn_rvm import EMRVR
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import make_scorer, mean_absolute_error
from sklearn.utils import shuffle
sys.path.insert(0, os.path.dirname(os.getcwd()))
# Input and output folders
PATH_DATA_PROCESSED_ML= sys.argv[1]
PATH_OUTPUT = sys.argv[2]
# Step 1: Get all the files in the output folder
file_names = os.listdir(PATH_DATA_PROCESSED_ML)
# Step 2: Get the full paths of the files (without extensions)
files = [os.path.splitext(os.path.join(PATH_DATA_PROCESSED_ML, file_name))[0] for file_name in fnmatch.filter(file_names, "*.h5")]
# Step 3: Load the features
frames = []
for idx, feature_file in enumerate(files):
df_features = pd.read_hdf(feature_file + ".h5")
df_metadata = pd.read_csv(feature_file.replace("extracted_features_", "processed_data_") + ".csv")
# Step 4: Assign labels
df_features['label'] = df_metadata['age_months'][0]
# Step 5: Assign subject code
df_features['code'] = df_metadata['code'][0]
frames.append(df_features)
df = pd.concat(frames)
# Step 6: List all the unique subject IDs
subject_ids = sorted(list(set(df["code"].tolist())))
IDs_train, IDs_temp = train_test_split(subject_ids, test_size=0.3, random_state=42)
IDs_test, IDs_val = train_test_split(IDs_temp, test_size=0.5, random_state=42)
# Step 7: Split the DataFrames into train, validation and test
df_train = df[df['code'].isin(IDs_train)]
df_val = df[df['code'].isin(IDs_val)]
df_test = df[df['code'].isin(IDs_test)]
feature_names = df.columns.values
X_train = df_train.drop(['label', 'code'], axis=1).reset_index(drop=True)
y_train = df_train['label'].reset_index(drop=True)
codes_train = df_train['code'].reset_index(drop=True)
X_val = df_val.drop(['label', 'code'], axis=1).reset_index(drop=True)
y_val = df_val['label'].reset_index(drop=True)
codes_val = df_val['code'].reset_index(drop=True)
X_test = df_test.drop(['label', 'code'], axis=1).reset_index(drop=True)
y_test = df_test['label'].reset_index(drop=True)
codes_test = df_test['code'].reset_index(drop=True)
scaler = StandardScaler()
# X_train = scaler.fit_transform(X_train)
# X_val = scaler.fit_transform(X_val)
# X_test = scaler.fit_transform(X_test)
# MARK: reducing from 64 bit float to 32 bit float, to reduce memory usage
X_train = pd.DataFrame(scaler.fit_transform(X_train)).astype('float32')
X_val = pd.DataFrame(scaler.fit_transform(X_val)).astype('float32')
X_test = pd.DataFrame(scaler.fit_transform(X_test)).astype('float32')
X_train_val = pd.concat([X_train, X_val])
y_train_val = pd.concat([y_train, y_val])
codes_train_val = pd.concat([codes_train, codes_val])
del(file_names, files, df, frames, df_features, df_metadata, df_train, df_test, df_val, IDs_train, IDs_val, IDs_test, IDs_temp)
del(X_train, y_train, codes_train, X_val, y_val, codes_val)
# Shuffle data before using
X_train_val, y_train_val, codes_train_val = shuffle(X_train_val, y_train_val, codes_train_val, random_state=42)
chunked_X_train = np.array_split(X_train_val, 100)
chunked_y_train = np.array_split(y_train_val, 100)
# chunks_X_train_ten = []
# chunks_y_train_ten = []
# for i in range(10):
# chunks_X_train_ten.append(chunked_X_train[i])
# chunks_y_train_ten.append(chunked_y_train[i])
# chunks_X_train_ten = pd.concat(chunks_X_train_ten)
# chunks_y_train_ten = pd.concat(chunks_y_train_ten)
# parameters = {'emrvr__kernel': ['poly', 'rbf', 'sigmoid'],
# 'emrvr__degree': [3, 4, 5, 6, 7],
# 'emrvr__epsilon': uniform(0, 6),
# 'emrvr__gamma': uniform(0.00001, 0.01)
# }
# scorer = make_scorer(mean_absolute_error, greater_is_better=False)
# pipe = make_pipeline(StandardScaler(),
# EMRVR(verbose=True, max_iter=50000))
# RVR_randomsearch = RandomizedSearchCV(pipe, parameters, n_iter=100,
# cv=5, n_jobs=2, scoring=scorer, verbose=10)
# RVR_randomsearch.fit(chunked_X_train[0], chunked_y_train[0])
# output_file = os.path.join(PATH_OUTPUT, 'RVR_randomsearch.joblib')
# dump(RVR_randomsearch, output_file)
from sklearn_rvm import EMRVR
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
EMRVR = make_pipeline(StandardScaler(),
EMRVR(kernel='rbf', epsilon=1.5, gamma=(1/450)))
EMRVR.fit(chunked_X_train[0], chunked_y_train[0])
output_file = os.path.join(PATH_OUTPUT, 'EMRVR.joblib')
dump(EMRVR, output_file) | [
"os.listdir",
"sklearn.model_selection.train_test_split",
"sklearn.utils.shuffle",
"sklearn_rvm.EMRVR.fit",
"joblib.dump",
"os.path.join",
"os.getcwd",
"sklearn.preprocessing.StandardScaler",
"numpy.array_split",
"fnmatch.filter",
"sklearn_rvm.EMRVR",
"pandas.concat",
"pandas.read_hdf"
] | [((660, 694), 'os.listdir', 'os.listdir', (['PATH_DATA_PROCESSED_ML'], {}), '(PATH_DATA_PROCESSED_ML)\n', (670, 694), False, 'import sys, os, fnmatch, csv\n'), ((1344, 1361), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (1353, 1361), True, 'import pandas as pd\n'), ((1482, 1543), 'sklearn.model_selection.train_test_split', 'train_test_split', (['subject_ids'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(subject_ids, test_size=0.3, random_state=42)\n', (1498, 1543), False, 'from sklearn.model_selection import train_test_split\n'), ((1564, 1622), 'sklearn.model_selection.train_test_split', 'train_test_split', (['IDs_temp'], {'test_size': '(0.5)', 'random_state': '(42)'}), '(IDs_temp, test_size=0.5, random_state=42)\n', (1580, 1622), False, 'from sklearn.model_selection import train_test_split\n'), ((2374, 2390), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2388, 2390), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2812, 2839), 'pandas.concat', 'pd.concat', (['[X_train, X_val]'], {}), '([X_train, X_val])\n', (2821, 2839), True, 'import pandas as pd\n'), ((2854, 2881), 'pandas.concat', 'pd.concat', (['[y_train, y_val]'], {}), '([y_train, y_val])\n', (2863, 2881), True, 'import pandas as pd\n'), ((2900, 2935), 'pandas.concat', 'pd.concat', (['[codes_train, codes_val]'], {}), '([codes_train, codes_val])\n', (2909, 2935), True, 'import pandas as pd\n'), ((3198, 3265), 'sklearn.utils.shuffle', 'shuffle', (['X_train_val', 'y_train_val', 'codes_train_val'], {'random_state': '(42)'}), '(X_train_val, y_train_val, codes_train_val, random_state=42)\n', (3205, 3265), False, 'from sklearn.utils import shuffle\n'), ((3285, 3317), 'numpy.array_split', 'np.array_split', (['X_train_val', '(100)'], {}), '(X_train_val, 100)\n', (3299, 3317), True, 'import numpy as np\n'), ((3340, 3372), 'numpy.array_split', 'np.array_split', (['y_train_val', '(100)'], {}), '(y_train_val, 100)\n', (3354, 3372), True, 'import numpy as np\n'), ((4617, 4666), 'sklearn_rvm.EMRVR.fit', 'EMRVR.fit', (['chunked_X_train[0]', 'chunked_y_train[0]'], {}), '(chunked_X_train[0], chunked_y_train[0])\n', (4626, 4666), False, 'from sklearn_rvm import EMRVR\n'), ((4682, 4723), 'os.path.join', 'os.path.join', (['PATH_OUTPUT', '"""EMRVR.joblib"""'], {}), "(PATH_OUTPUT, 'EMRVR.joblib')\n", (4694, 4723), False, 'import sys, os, fnmatch, csv\n'), ((4724, 4748), 'joblib.dump', 'dump', (['EMRVR', 'output_file'], {}), '(EMRVR, output_file)\n', (4728, 4748), False, 'from joblib import dump, load\n'), ((993, 1026), 'pandas.read_hdf', 'pd.read_hdf', (["(feature_file + '.h5')"], {}), "(feature_file + '.h5')\n", (1004, 1026), True, 'import pandas as pd\n'), ((4527, 4543), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4541, 4543), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4567, 4614), 'sklearn_rvm.EMRVR', 'EMRVR', ([], {'kernel': '"""rbf"""', 'epsilon': '(1.5)', 'gamma': '(1 / 450)'}), "(kernel='rbf', epsilon=1.5, gamma=1 / 450)\n", (4572, 4614), False, 'from sklearn_rvm import EMRVR\n'), ((493, 504), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (502, 504), False, 'import sys, os, fnmatch, csv\n'), ((854, 888), 'fnmatch.filter', 'fnmatch.filter', (['file_names', '"""*.h5"""'], {}), "(file_names, '*.h5')\n", (868, 888), False, 'import sys, os, fnmatch, csv\n'), ((785, 832), 'os.path.join', 'os.path.join', (['PATH_DATA_PROCESSED_ML', 'file_name'], {}), '(PATH_DATA_PROCESSED_ML, file_name)\n', (797, 832), False, 'import sys, os, fnmatch, csv\n')] |
import sys
import numpy
from PyQt5.QtWidgets import QApplication, QMessageBox
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui, congruence
from oasys.widgets.exchange import DataExchangeObject
from orangecontrib.xoppy.util.xoppy_xraylib_util import cross_calc, cross_calc_mix, cross_calc_nist
from orangecontrib.xoppy.util.xoppy_xraylib_util import nist_compound_list, density_element, density_nist
from orangecontrib.xoppy.widgets.gui.ow_xoppy_widget import XoppyWidget
# import xraylib
class OWxcrosssec(XoppyWidget):
name = "CrossSec"
id = "orange.widgets.dataxcrosssec"
description = "X-ray Matter Cross Sections"
icon = "icons/xoppy_xcrosssec.png"
priority = 19
category = ""
keywords = ["xoppy", "xcrosssec"]
MAT_FLAG = Setting(2)
DESCRIPTOR = Setting("Si")
MAT_LIST = Setting(177)
DENSITY = Setting("?")
CALCULATE = Setting(1)
GRID = Setting(0)
GRIDSTART = Setting(100.0)
GRIDEND = Setting(10000.0)
GRIDN = Setting(200)
UNIT = Setting(0)
DUMP_TO_FILE = Setting(0) # No
FILE_NAME = Setting("CrossSec.dat")
xtitle = None
ytitle = None
def build_gui(self):
box = oasysgui.widgetBox(self.controlArea, self.name + " Input Parameters", orientation="vertical", width=self.CONTROL_AREA_WIDTH-5)
idx = -1
#widget index 1
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "MAT_FLAG",
label=self.unitLabels()[idx], addSpace=False,
items=['Element(formula)', 'Compound(formula)', 'Compound(table)'],
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 2
idx += 1
box1 = gui.widgetBox(box)
items = nist_compound_list()
gui.comboBox(box1, self, "MAT_LIST",
label=self.unitLabels()[idx], addSpace=False,
items=items,
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 3
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "DESCRIPTOR",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal")
self.show_at(self.unitFlags()[idx], box1)
#widget index 4
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "DENSITY",
label=self.unitLabels()[idx], addSpace=False,
valueType=str, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 5
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "CALCULATE",
label=self.unitLabels()[idx], addSpace=False,
items=['Total','PhotoElectric','Rayleigh','Compton','Total-Rayleigh'],
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 6
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "GRID",
label=self.unitLabels()[idx], addSpace=False,
items=['Standard', 'User defined', 'Single Value'],
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 7
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "GRIDSTART",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 8
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "GRIDEND",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 9
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "GRIDN",
label=self.unitLabels()[idx], addSpace=False,
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 10
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "UNIT",
label=self.unitLabels()[idx], addSpace=False,
items=['barn/atom [Cross Section] *see help*', 'cm^2 [Cross Section] *see help*', 'cm^2/g [Mass abs coef]', 'cm^-1 [Linear abs coef]'],
valueType=int, orientation="horizontal", labelWidth=130)
self.show_at(self.unitFlags()[idx], box1)
# widget index 11
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "DUMP_TO_FILE",
label=self.unitLabels()[idx], addSpace=True,
items=["No", "Yes"],
orientation="horizontal")
self.show_at(self.unitFlags()[idx], box1)
# widget index 12
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "FILE_NAME",
label=self.unitLabels()[idx], addSpace=True)
self.show_at(self.unitFlags()[idx], box1)
gui.rubber(self.controlArea)
def unitLabels(self):
return ['material','table','formula','density',
'Cross section','Energy [eV] grid:',
'Starting Energy [eV]: ','To: ','Number of points','Units',
'Dump to file','File name']
def unitFlags(self):
return ['True','self.MAT_FLAG == 2','self.MAT_FLAG <= 1 ','True',
'True','True',
'self.GRID != 0','self.GRID == 1','self.GRID == 1','True',
'True','self.DUMP_TO_FILE == 1']
def get_help_name(self):
return 'crosssec'
def check_fields(self):
self.DESCRIPTOR = congruence.checkEmptyString(self.DESCRIPTOR, "formula")
if self.GRID > 0:
self.GRIDSTART = congruence.checkPositiveNumber(self.GRIDSTART, "Starting Energy")
if self.GRID == 1:
self.GRIDEND = congruence.checkStrictlyPositiveNumber(self.GRIDEND, "Energy to")
congruence.checkLessThan(self.GRIDSTART, self.GRIDEND, "Starting Energy", "Energy to")
self.GRIDN = congruence.checkStrictlyPositiveNumber(self.GRIDN, "Number of points")
def do_xoppy_calculation(self):
out_dict = self.xoppy_calc_xcrosssec()
if "info" in out_dict.keys():
print(out_dict["info"])
#send exchange
calculated_data = DataExchangeObject("XOPPY", self.get_data_exchange_widget_name())
try:
calculated_data.add_content("xoppy_data", out_dict["data"].T)
calculated_data.add_content("plot_x_col",0)
calculated_data.add_content("plot_y_col",-1)
except:
pass
try:
calculated_data.add_content("labels", out_dict["labels"])
except:
pass
try:
calculated_data.add_content("info",out_dict["info"])
except:
pass
return calculated_data
def extract_data_from_xoppy_output(self, calculation_output):
try:
calculation_output.get_content("xoppy_data")
labels = calculation_output.get_content("labels")
self.xtitle = labels[0]
self.ytitle = labels[1]
except:
QMessageBox.information(self,
"Calculation Result",
"Calculation Result:\n"+calculation_output.get_content("info"),
QMessageBox.Ok)
self.xtitle = None
self.ytitle = None
return calculation_output
def plot_results(self, calculated_data, progressBarValue=80):
self.initializeTabs()
try:
calculated_data.get_content("xoppy_data")
super().plot_results(calculated_data, progressBarValue)
except:
self.plot_info(calculated_data.get_content("info") + "\n", progressBarValue, 0, 0)
def get_data_exchange_widget_name(self):
return "XCROSSSEC"
def getTitles(self):
return ["Calculation Result"]
def getXTitles(self):
if self.xtitle is None:
return [""]
else:
return [self.xtitle]
def getYTitles(self):
if self.ytitle is None:
return [""]
else:
return [self.ytitle]
def getLogPlot(self):
return [(True, True)]
def getVariablesToPlot(self):
return [(0, 1)]
def getLogPlot(self):
return[(True, True)]
def xoppy_calc_xcrosssec(self):
MAT_FLAG = self.MAT_FLAG
MAT_LIST = self.MAT_LIST
# DESCRIPTOR = self.DESCRIPTOR
# density = self.DENSITY
CALCULATE = self.CALCULATE
GRID = self.GRID
GRIDSTART = self.GRIDSTART
GRIDEND = self.GRIDEND
GRIDN = self.GRIDN
UNIT = self.UNIT
if MAT_FLAG == 0: # element
descriptor = self.DESCRIPTOR
# density = element_density(DESCRIPTOR)
try:
density = float(self.DENSITY)
except:
density = density_element(self.DESCRIPTOR, verbose=True)
elif MAT_FLAG == 1: # compund
descriptor = self.DESCRIPTOR
try:
density = float(self.DENSITY)
except:
raise Exception("Density must be entered.")
elif MAT_FLAG == 2: # nist list
descriptor = nist_compound_list()[self.MAT_LIST]
try:
density = float(self.DENSITY)
except:
density = density_nist(descriptor, verbose=True)
print("xoppy_calc_xcrosssec: using density = %g g/cm3"%density)
if GRID == 0:
energy = numpy.arange(0,500)
elefactor = numpy.log10(10000.0 / 30.0) / 300.0
energy = 10.0 * 10**(energy * elefactor)
elif GRID == 1:
if GRIDN == 1:
energy = numpy.array([GRIDSTART])
else:
energy = numpy.linspace(GRIDSTART,GRIDEND,GRIDN)
elif GRID == 2:
energy = numpy.array([GRIDSTART])
if MAT_FLAG == 0: # element
out = cross_calc(descriptor,energy,calculate=CALCULATE,density=density)
elif MAT_FLAG == 1: # compound parse
out = cross_calc_mix(descriptor,energy,calculate=CALCULATE,density=density)
elif MAT_FLAG == 2: # NIST compound
out = cross_calc_nist(descriptor,energy,calculate=CALCULATE,density=density)
calculate_items = ['Total','PhotoElectric','Rayleigh','Compton','Total minus Rayleigh']
unit_items = ['barn/atom','cm^2','cm^2/g','cm^-1']
if energy.size > 1:
tmp_x = out[0,:].copy()
tmp_y = out[UNIT+1,:].copy()
tmp = numpy.vstack((tmp_x,tmp_y))
labels = ["Photon energy [eV]","%s cross section [%s]"%(calculate_items[CALCULATE],unit_items[UNIT])]
to_return = {"application":"xoppy","name":"xcrosssec","data":tmp,"labels":labels}
else:
tmp = None
txt = "xoppy_calc_xcrosssec: Calculated %s cross section: %g %s"%(calculate_items[CALCULATE],out[UNIT+1,0],unit_items[UNIT])
print(txt)
to_return = {"application":"xoppy","name":"xcrosssec","info":txt}
if self.DUMP_TO_FILE:
with open(self.FILE_NAME, "w") as file:
try:
file.write("#F %s\n"%self.FILE_NAME)
file.write("\n#S 1 xoppy CrossSec results\n")
file.write("#N 5\n")
tmp = "#L Photon energy [eV]"
for unit_item in unit_items:
tmp += " %s [%s]"%(calculate_items[CALCULATE],unit_item)
tmp += "\n"
file.write(tmp)
for j in range(out.shape[1]):
# file.write("%19.12e "%energy[j])
file.write(("%19.12e "*out.shape[0]+"\n")%tuple(out[i,j] for i in range(out.shape[0])))
file.close()
print("File written to disk: %s \n"%self.FILE_NAME)
except:
raise Exception("CrossSec: The data could not be dumped onto the specified file!\n")
return to_return
if __name__ == "__main__":
app = QApplication(sys.argv)
w = OWxcrosssec()
w.show()
app.exec()
w.saveSettings()
| [
"oasys.widgets.gui.widgetBox",
"numpy.log10",
"oasys.widgets.congruence.checkEmptyString",
"orangecontrib.xoppy.util.xoppy_xraylib_util.cross_calc_mix",
"orangecontrib.xoppy.util.xoppy_xraylib_util.cross_calc_nist",
"orangecontrib.xoppy.util.xoppy_xraylib_util.density_element",
"oasys.widgets.congruence... | [((828, 838), 'orangewidget.settings.Setting', 'Setting', (['(2)'], {}), '(2)\n', (835, 838), False, 'from orangewidget.settings import Setting\n'), ((856, 869), 'orangewidget.settings.Setting', 'Setting', (['"""Si"""'], {}), "('Si')\n", (863, 869), False, 'from orangewidget.settings import Setting\n'), ((885, 897), 'orangewidget.settings.Setting', 'Setting', (['(177)'], {}), '(177)\n', (892, 897), False, 'from orangewidget.settings import Setting\n'), ((912, 924), 'orangewidget.settings.Setting', 'Setting', (['"""?"""'], {}), "('?')\n", (919, 924), False, 'from orangewidget.settings import Setting\n'), ((941, 951), 'orangewidget.settings.Setting', 'Setting', (['(1)'], {}), '(1)\n', (948, 951), False, 'from orangewidget.settings import Setting\n'), ((963, 973), 'orangewidget.settings.Setting', 'Setting', (['(0)'], {}), '(0)\n', (970, 973), False, 'from orangewidget.settings import Setting\n'), ((990, 1004), 'orangewidget.settings.Setting', 'Setting', (['(100.0)'], {}), '(100.0)\n', (997, 1004), False, 'from orangewidget.settings import Setting\n'), ((1019, 1035), 'orangewidget.settings.Setting', 'Setting', (['(10000.0)'], {}), '(10000.0)\n', (1026, 1035), False, 'from orangewidget.settings import Setting\n'), ((1048, 1060), 'orangewidget.settings.Setting', 'Setting', (['(200)'], {}), '(200)\n', (1055, 1060), False, 'from orangewidget.settings import Setting\n'), ((1072, 1082), 'orangewidget.settings.Setting', 'Setting', (['(0)'], {}), '(0)\n', (1079, 1082), False, 'from orangewidget.settings import Setting\n'), ((1102, 1112), 'orangewidget.settings.Setting', 'Setting', (['(0)'], {}), '(0)\n', (1109, 1112), False, 'from orangewidget.settings import Setting\n'), ((1135, 1158), 'orangewidget.settings.Setting', 'Setting', (['"""CrossSec.dat"""'], {}), "('CrossSec.dat')\n", (1142, 1158), False, 'from orangewidget.settings import Setting\n'), ((13031, 13053), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (13043, 13053), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox\n'), ((1237, 1369), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['self.controlArea', "(self.name + ' Input Parameters')"], {'orientation': '"""vertical"""', 'width': '(self.CONTROL_AREA_WIDTH - 5)'}), "(self.controlArea, self.name + ' Input Parameters',\n orientation='vertical', width=self.CONTROL_AREA_WIDTH - 5)\n", (1255, 1369), True, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((1449, 1467), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (1462, 1467), False, 'from orangewidget import gui\n'), ((1864, 1882), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (1877, 1882), False, 'from orangewidget import gui\n'), ((1899, 1919), 'orangecontrib.xoppy.util.xoppy_xraylib_util.nist_compound_list', 'nist_compound_list', ([], {}), '()\n', (1917, 1919), False, 'from orangecontrib.xoppy.util.xoppy_xraylib_util import nist_compound_list, density_element, density_nist\n'), ((2262, 2280), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (2275, 2280), False, 'from orangewidget import gui\n'), ((2545, 2563), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (2558, 2563), False, 'from orangewidget import gui\n'), ((2876, 2894), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (2889, 2894), False, 'from orangewidget import gui\n'), ((3296, 3314), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (3309, 3314), False, 'from orangewidget import gui\n'), ((3691, 3709), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (3704, 3709), False, 'from orangewidget import gui\n'), ((4026, 4044), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (4039, 4044), False, 'from orangewidget import gui\n'), ((4359, 4377), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (4372, 4377), False, 'from orangewidget import gui\n'), ((4689, 4707), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (4702, 4707), False, 'from orangewidget import gui\n'), ((5160, 5178), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (5173, 5178), False, 'from orangewidget import gui\n'), ((5492, 5510), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (5505, 5510), False, 'from orangewidget import gui\n'), ((5685, 5713), 'orangewidget.gui.rubber', 'gui.rubber', (['self.controlArea'], {}), '(self.controlArea)\n', (5695, 5713), False, 'from orangewidget import gui\n'), ((6354, 6409), 'oasys.widgets.congruence.checkEmptyString', 'congruence.checkEmptyString', (['self.DESCRIPTOR', '"""formula"""'], {}), "(self.DESCRIPTOR, 'formula')\n", (6381, 6409), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((6466, 6531), 'oasys.widgets.congruence.checkPositiveNumber', 'congruence.checkPositiveNumber', (['self.GRIDSTART', '"""Starting Energy"""'], {}), "(self.GRIDSTART, 'Starting Energy')\n", (6496, 6531), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((10424, 10444), 'numpy.arange', 'numpy.arange', (['(0)', '(500)'], {}), '(0, 500)\n', (10436, 10444), False, 'import numpy\n'), ((10867, 10935), 'orangecontrib.xoppy.util.xoppy_xraylib_util.cross_calc', 'cross_calc', (['descriptor', 'energy'], {'calculate': 'CALCULATE', 'density': 'density'}), '(descriptor, energy, calculate=CALCULATE, density=density)\n', (10877, 10935), False, 'from orangecontrib.xoppy.util.xoppy_xraylib_util import cross_calc, cross_calc_mix, cross_calc_nist\n'), ((11480, 11508), 'numpy.vstack', 'numpy.vstack', (['(tmp_x, tmp_y)'], {}), '((tmp_x, tmp_y))\n', (11492, 11508), False, 'import numpy\n'), ((6595, 6660), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.GRIDEND', '"""Energy to"""'], {}), "(self.GRIDEND, 'Energy to')\n", (6633, 6660), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((6677, 6767), 'oasys.widgets.congruence.checkLessThan', 'congruence.checkLessThan', (['self.GRIDSTART', 'self.GRIDEND', '"""Starting Energy"""', '"""Energy to"""'], {}), "(self.GRIDSTART, self.GRIDEND, 'Starting Energy',\n 'Energy to')\n", (6701, 6767), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((6793, 6863), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.GRIDN', '"""Number of points"""'], {}), "(self.GRIDN, 'Number of points')\n", (6831, 6863), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((10468, 10495), 'numpy.log10', 'numpy.log10', (['(10000.0 / 30.0)'], {}), '(10000.0 / 30.0)\n', (10479, 10495), False, 'import numpy\n'), ((10997, 11069), 'orangecontrib.xoppy.util.xoppy_xraylib_util.cross_calc_mix', 'cross_calc_mix', (['descriptor', 'energy'], {'calculate': 'CALCULATE', 'density': 'density'}), '(descriptor, energy, calculate=CALCULATE, density=density)\n', (11011, 11069), False, 'from orangecontrib.xoppy.util.xoppy_xraylib_util import cross_calc, cross_calc_mix, cross_calc_nist\n'), ((9790, 9836), 'orangecontrib.xoppy.util.xoppy_xraylib_util.density_element', 'density_element', (['self.DESCRIPTOR'], {'verbose': '(True)'}), '(self.DESCRIPTOR, verbose=True)\n', (9805, 9836), False, 'from orangecontrib.xoppy.util.xoppy_xraylib_util import nist_compound_list, density_element, density_nist\n'), ((10633, 10657), 'numpy.array', 'numpy.array', (['[GRIDSTART]'], {}), '([GRIDSTART])\n', (10644, 10657), False, 'import numpy\n'), ((10701, 10742), 'numpy.linspace', 'numpy.linspace', (['GRIDSTART', 'GRIDEND', 'GRIDN'], {}), '(GRIDSTART, GRIDEND, GRIDN)\n', (10715, 10742), False, 'import numpy\n'), ((10786, 10810), 'numpy.array', 'numpy.array', (['[GRIDSTART]'], {}), '([GRIDSTART])\n', (10797, 10810), False, 'import numpy\n'), ((11130, 11203), 'orangecontrib.xoppy.util.xoppy_xraylib_util.cross_calc_nist', 'cross_calc_nist', (['descriptor', 'energy'], {'calculate': 'CALCULATE', 'density': 'density'}), '(descriptor, energy, calculate=CALCULATE, density=density)\n', (11145, 11203), False, 'from orangecontrib.xoppy.util.xoppy_xraylib_util import cross_calc, cross_calc_mix, cross_calc_nist\n'), ((10124, 10144), 'orangecontrib.xoppy.util.xoppy_xraylib_util.nist_compound_list', 'nist_compound_list', ([], {}), '()\n', (10142, 10144), False, 'from orangecontrib.xoppy.util.xoppy_xraylib_util import nist_compound_list, density_element, density_nist\n'), ((10269, 10307), 'orangecontrib.xoppy.util.xoppy_xraylib_util.density_nist', 'density_nist', (['descriptor'], {'verbose': '(True)'}), '(descriptor, verbose=True)\n', (10281, 10307), False, 'from orangecontrib.xoppy.util.xoppy_xraylib_util import nist_compound_list, density_element, density_nist\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: hanshengjiang
"""
import numpy as np
#--------------
# auxiliary function
def KLD(p,q):
p = p.ravel()
q = q.ravel()
n = len(p)
s = 0
for i in range(n):
s = s + p[i]*np.log(p[i]/q[i])
return s
#-------------- | [
"numpy.log"
] | [((254, 273), 'numpy.log', 'np.log', (['(p[i] / q[i])'], {}), '(p[i] / q[i])\n', (260, 273), True, 'import numpy as np\n')] |
import mmcv
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from .recall import eval_recalls
def coco_eval(result_files, result_types, coco, max_dets=(100, 300, 1000)):
for res_type in result_types:
assert res_type in [
'proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'
]
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
if result_types == ['proposal_fast']:
ar = fast_eval_recall(result_files, coco, np.array(max_dets))
for i, num in enumerate(max_dets):
print('AR@{}\t= {:.4f}'.format(num, ar[i]))
return
for res_type in result_types:
if isinstance(result_files, str):
result_file = result_files
elif isinstance(result_files, dict):
result_file = result_files[res_type]
else:
assert TypeError('result_files must be a str or dict')
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
img_ids = coco.getImgIds()
iou_type = 'bbox' if res_type == 'proposal' else res_type
cocoEval = COCOeval(coco, coco_dets, iou_type)
cocoEval.params.imgIds = img_ids
if res_type == 'proposal':
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
def fast_eval_recall(results,
coco,
max_dets,
iou_thrs=np.arange(0.5, 0.96, 0.05)):
if mmcv.is_str(results):
assert results.endswith('.pkl')
results = mmcv.load(results)
elif not isinstance(results, list):
raise TypeError(
'results must be a list of numpy arrays or a filename, not {}'.
format(type(results)))
gt_bboxes = []
img_ids = coco.getImgIds()
for i in range(len(img_ids)):
ann_ids = coco.getAnnIds(imgIds=img_ids[i])
ann_info = coco.loadAnns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, max_dets, iou_thrs, print_summary=False)
ar = recalls.mean(axis=1)
return ar
def xyxy2xywh(bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def proposal2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def det2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
json_results.append(data)
return json_results
def segm2json(dataset, results):
bbox_json_results = []
segm_json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different score for det and segm
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['score'] = float(mask_score[i])
data['category_id'] = dataset.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(dataset, results, out_file):
result_files = dict()
if isinstance(results[0], list):
json_results = det2json(dataset, results)
result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = segm2json(dataset, results)
result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['segm'] = '{}.{}.json'.format(out_file, 'segm')
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = proposal2json(dataset, results)
result_files['proposal'] = '{}.{}.json'.format(out_file, 'proposal')
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
| [
"mmcv.is_str",
"pycocotools.cocoeval.COCOeval",
"pycocotools.coco.COCO",
"mmcv.dump",
"numpy.array",
"numpy.zeros",
"mmcv.load",
"numpy.arange"
] | [((383, 400), 'mmcv.is_str', 'mmcv.is_str', (['coco'], {}), '(coco)\n', (394, 400), False, 'import mmcv\n'), ((1639, 1665), 'numpy.arange', 'np.arange', (['(0.5)', '(0.96)', '(0.05)'], {}), '(0.5, 0.96, 0.05)\n', (1648, 1665), True, 'import numpy as np\n'), ((1676, 1696), 'mmcv.is_str', 'mmcv.is_str', (['results'], {}), '(results)\n', (1687, 1696), False, 'import mmcv\n'), ((418, 428), 'pycocotools.coco.COCO', 'COCO', (['coco'], {}), '(coco)\n', (422, 428), False, 'from pycocotools.coco import COCO\n'), ((1214, 1249), 'pycocotools.cocoeval.COCOeval', 'COCOeval', (['coco', 'coco_dets', 'iou_type'], {}), '(coco, coco_dets, iou_type)\n', (1222, 1249), False, 'from pycocotools.cocoeval import COCOeval\n'), ((1758, 1776), 'mmcv.load', 'mmcv.load', (['results'], {}), '(results)\n', (1767, 1776), False, 'import mmcv\n'), ((2500, 2534), 'numpy.array', 'np.array', (['bboxes'], {'dtype': 'np.float32'}), '(bboxes, dtype=np.float32)\n', (2508, 2534), True, 'import numpy as np\n'), ((5853, 5898), 'mmcv.dump', 'mmcv.dump', (['json_results', "result_files['bbox']"], {}), "(json_results, result_files['bbox'])\n", (5862, 5898), False, 'import mmcv\n'), ((560, 578), 'numpy.array', 'np.array', (['max_dets'], {}), '(max_dets)\n', (568, 578), True, 'import numpy as np\n'), ((2591, 2607), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {}), '((0, 4))\n', (2599, 2607), True, 'import numpy as np\n'), ((6215, 6263), 'mmcv.dump', 'mmcv.dump', (['json_results[0]', "result_files['bbox']"], {}), "(json_results[0], result_files['bbox'])\n", (6224, 6263), False, 'import mmcv\n'), ((6273, 6321), 'mmcv.dump', 'mmcv.dump', (['json_results[1]', "result_files['segm']"], {}), "(json_results[1], result_files['segm'])\n", (6282, 6321), False, 'import mmcv\n'), ((2204, 2220), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {}), '((0, 4))\n', (2212, 2220), True, 'import numpy as np\n'), ((6511, 6560), 'mmcv.dump', 'mmcv.dump', (['json_results', "result_files['proposal']"], {}), "(json_results, result_files['proposal'])\n", (6520, 6560), False, 'import mmcv\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.