arxiv_id
stringlengths 0
16
| text
stringlengths 10
1.65M
|
|---|---|
#!/usr/bin/env python
"""
Copyright 2020 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import sys
import os
import argparse
import time
import logging
import numpy as np
import torch
import torch.nn as nn
from hyperion.hyp_defs import config_logger, set_float_cpu
from hyperion.torch.utils import open_device
from hyperion.torch.helpers import OptimizerFactory as OF
from hyperion.torch.lr_schedulers import LRSchedulerFactory as LRSF
from hyperion.torch.narchs import ResNet2dEncoder as Encoder
from hyperion.torch.narchs import ResNet2dDecoder as Decoder
from hyperion.torch.models import VQVAE as VAE
from hyperion.torch.trainers import VQVAETrainer as Trainer
from hyperion.torch.data import SeqDataset as SD
from hyperion.torch.data import ClassWeightedSeqSampler as Sampler
def train_vae(
data_rspec, train_list, val_list, num_gpus, resume, num_workers, **kwargs
):
set_float_cpu("float32")
logging.info("initializing devices num_gpus={}".format(num_gpus))
device = open_device(num_gpus=num_gpus)
sd_args = SD.filter_args(**kwargs)
sampler_args = Sampler.filter_args(**kwargs)
enc_args = Encoder.filter_args(prefix="enc", **kwargs)
dec_args = Decoder.filter_args(prefix="dec", **kwargs)
vae_args = VAE.filter_args(**kwargs)
opt_args = OF.filter_args(prefix="opt", **kwargs)
lrsch_args = LRSF.filter_args(prefix="lrsch", **kwargs)
trn_args = Trainer.filter_args(**kwargs)
logging.info("seq dataset args={}".format(sd_args))
logging.info("sampler args={}".format(sampler_args))
logging.info("encoder args={}".format(enc_args))
logging.info("decoder args={}".format(dec_args))
logging.info("vae args={}".format(vae_args))
logging.info("optimizer args={}".format(opt_args))
logging.info("lr scheduler args={}".format(lrsch_args))
logging.info("trainer args={}".format(trn_args))
logging.info("init datasets")
train_data = SD(data_rspec, train_list, return_class=False, **sd_args)
val_data = SD(data_rspec, val_list, return_class=False, is_val=True, **sd_args)
logging.info("init samplers")
train_sampler = Sampler(train_data, **sampler_args)
val_sampler = Sampler(val_data, **sampler_args)
largs = {"num_workers": num_workers, "pin_memory": True} if num_gpus > 0 else {}
train_loader = torch.utils.data.DataLoader(
train_data, batch_sampler=train_sampler, **largs
)
test_loader = torch.utils.data.DataLoader(
val_data, batch_sampler=val_sampler, **largs
)
encoder = Encoder(**enc_args)
decoder = Decoder(**dec_args)
model = VAE(encoder, decoder, **vae_args)
logging.info(str(model))
optimizer = OF.create(model.parameters(), **opt_args)
lr_sch = LRSF.create(optimizer, **lrsch_args)
metrics = {"mse": nn.MSELoss(), "L1": nn.L1Loss()}
trainer = Trainer(
model,
optimizer,
device=device,
metrics=metrics,
lr_scheduler=lr_sch,
data_parallel=(num_gpus > 1),
**trn_args
)
if resume:
trainer.load_last_checkpoint()
trainer.fit(train_loader, test_loader)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
fromfile_prefix_chars="@",
description="Train VQ-VAE with ResNet2d Encoder-Decoder",
)
parser.add_argument("--data-rspec", required=True)
parser.add_argument("--train-list", required=True)
parser.add_argument("--val-list", required=True)
SD.add_argparse_args(parser)
Sampler.add_argparse_args(parser)
parser.add_argument(
"--num-workers", type=int, default=5, help="num_workers of data loader"
)
Encoder.add_argparse_args(parser, prefix="enc")
Decoder.add_argparse_args(parser, prefix="dec")
VAE.add_argparse_args(parser)
OF.add_argparse_args(parser, prefix="opt")
LRSF.add_argparse_args(parser, prefix="lrsch")
parser.add_argument(
"--num-gpus", type=int, default=1, help="number of gpus, if 0 it uses cpu"
)
parser.add_argument("--seed", type=int, default=1123581321, help="random seed")
parser.add_argument(
"--resume",
action="store_true",
default=False,
help="resume training from checkpoint",
)
parser.add_argument(
"-v", "--verbose", dest="verbose", default=1, choices=[0, 1, 2, 3], type=int
)
args = parser.parse_args()
config_logger(args.verbose)
del args.verbose
logging.debug(args)
torch.manual_seed(args.seed)
del args.seed
train_vae(**vars(args))
|
|
"""classicML的核函数."""
import numpy as np
__version__ = 'backend.python.kernels.0.10.b0'
class Kernel(object):
"""核函数的基类.
Attributes:
name: str, default='kernel',
核函数名称.
Raises:
NotImplementedError: __call__方法需要用户实现.
"""
def __init__(self, name='kernel'):
"""
Arguments:
name: str, default='kernel',
核函数名称.
"""
self.name = name
def __call__(self, x_i, x_j):
raise NotImplementedError
class Linear(Kernel):
"""线性核函数.
"""
def __init__(self, name='linear'):
super(Linear, self).__init__(name=name)
def __call__(self, x_i, x_j):
"""函数实现.
Arguments:
x_i: numpy.ndarray, 第一组特征向量.
x_j: numpy.ndarray, 第二组特征向量.
Returns:
核函数映射后的特征向量.
"""
kappa = np.matmul(x_j, x_i.T)
return np.asmatrix(kappa)
class Polynomial(Kernel):
"""多项式核函数.
Attributes:
name: str, default='poly',
核函数名称.
gamma: float, default=1.0,
核函数系数.
degree: int, default=3,
多项式的次数.
"""
def __init__(self, name='poly', gamma=1.0, degree=3):
super(Polynomial, self).__init__(name=name)
self.gamma = gamma
self.degree = degree
def __call__(self, x_i, x_j):
"""函数实现.
Arguments:
x_i: numpy.ndarray, 第一组特征向量.
x_j: numpy.ndarray, 第二组特征向量.
Returns:
核函数映射后的特征向量.
"""
kappa = self.gamma * np.power(np.matmul(x_j, x_i.T), self.degree)
return np.asmatrix(kappa)
class RBF(Kernel):
"""径向基核函数.
Attributes:
name: str, default='rbf',
核函数名称.
gamma: float, default=1.0,
核函数系数.
"""
def __init__(self, name='rbf', gamma=1.0):
super(RBF, self).__init__(name=name)
self.gamma = gamma
def __call__(self, x_i, x_j):
"""函数实现.
Arguments:
x_i: numpy.ndarray, 第一组特征向量.
x_j: numpy.ndarray, 第二组特征向量.
Returns:
核函数映射后的特征向量.
"""
kappa = np.exp(self.gamma * -np.sum(np.power(x_j - x_i, 2), axis=1))
return np.asmatrix(kappa)
class Gaussian(RBF):
"""高斯核函数.
具体实现参看径向基核函数.
"""
def __init__(self, name='gaussian', gamma=1.0):
super(Gaussian, self).__init__(name=name, gamma=gamma)
class Sigmoid(Kernel):
"""Sigmoid核函数.
Attributes:
name: str, default='sigmoid',
核函数名称.
gamma: float, default=1.0,
核函数系数.
beta: float, default=1.0,
核函数参数.
theta: float, default=-1.0,
核函数参数.
"""
def __init__(self, name='sigmoid', gamma=1.0, beta=1.0, theta=-1.0):
super(Sigmoid, self).__init__(name=name)
self.gamma = gamma
self.beta = beta
self.theta = theta
def __call__(self, x_i, x_j):
"""函数实现.
Arguments:
x_i: numpy.ndarray, 第一组特征向量.
x_j: numpy.ndarray, 第二组特征向量.
Returns:
核函数映射后的特征向量.
"""
kappa = self.gamma * np.tanh(self.beta * np.matmul(x_j, x_i.T) + self.theta)
return np.asmatrix(kappa)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import Dataset
import numpy as np
class MiniBatches(Dataset):
"""
Convert data into mini-batches.
"""
def __init__(self, dataset, batch_size=20, cache=True):
self.origin = dataset
self.size = batch_size
self._cached_train_set = None
self._cached_valid_set = None
self._cached_test_set = None
self.cache = cache
def _yield_data(self, subset):
if type(subset) != list:
subset = list(subset)
for i in xrange(0, len(subset), self.size):
yield map(np.array, list(zip(*subset[i:i + self.size])))
def train_set(self):
if self.cache and self._cached_train_set is not None:
return self._cached_train_set
data_generator = self._yield_data(self.origin.train_set())
if data_generator is None:
return None
if self.cache:
self._cached_train_set = list(data_generator)
return self._cached_train_set
else:
return data_generator
def test_set(self):
if not self.origin.test_set():
return None
if self.cache and self._cached_test_set is not None:
return self._cached_test_set
data_generator = self._yield_data(self.origin.test_set())
if data_generator is None:
return None
if self.cache:
self._cached_test_set = list(data_generator)
return self._cached_test_set
else:
return data_generator
def valid_set(self):
if not self.origin.valid_set():
return None
if self.cache and self._cached_valid_set is not None:
return self._cached_valid_set
data_generator = self._yield_data(self.origin.valid_set())
if data_generator is None:
return None
if self.cache:
self._cached_valid_set = list(data_generator)
return self._cached_valid_set
else:
return data_generator
def train_size(self):
train_size = self.origin.train_size()
if train_size is None:
train_size = len(list(self.origin.train_set()))
return train_size / self.size
|
|
import os, sys
import numpy as np
import shutil
from tqdm import tqdm
from data_index import cat_id_to_desc, cat_desc_to_id, get_example_ids
sys.path.append('..')
from render_utils import render_obj_grid, render_obj_with_view
def render_example(example_id, render_dir, input_dir, output_dir, texture_dir, csv_file, shape, views):
example_in_dir = os.path.join(input_dir, example_id)
example_out_dir = os.path.join(output_dir, example_id)
example_render_dir = os.path.join(render_dir, example_id)
# Set obj file path
try:
obj = os.path.join(example_in_dir, 'models', 'model_normalized.obj')
except:
return False
if os.path.isdir(example_out_dir):
if len(os.listdir(example_out_dir)) == views:
return False
else:
shutil.rmtree(example_out_dir)
if not os.path.isdir(example_out_dir):
os.makedirs(example_out_dir)
if not os.path.isdir(example_render_dir):
os.makedirs(example_render_dir)
# Set texture images path
if os.path.isdir(texture_dir):
textures = [name for name in os.listdir(texture_dir)]
textures.sort()
else:
raise ValueError('Invalid texture directory !')
# redirect output to log file
logfile = 'render.log'
open(logfile, 'a').close()
old = os.dup(1)
sys.stdout.flush()
os.close(1)
os.open(logfile, os.O_WRONLY)
textures = [name for name in os.listdir(texture_dir)]
textures.sort()
texture = textures[np.random.randint(0, len(textures))]
texture_img = os.path.join(texture_dir, texture)
# generate the synthetic training images and its multi-view reference images
render_obj_grid(obj, example_render_dir, [512, 512], 30, 5, 1, 2, False, None, None)
render_obj_with_view(obj, example_out_dir, csv_file, texture_img, views, shape)
# disable output redirection
os.close(1)
os.dup(old)
os.close(old)
return True
def render_cat(input_dir, render_dir, output_dir, texture_dir, csv_file, cat_id, shape, views):
example_ids = get_example_ids(input_dir, cat_id)
if len(example_ids) > 200: example_ids = example_ids[:200]
cat_in_dir = os.path.join(input_dir, cat_id)
cat_out_dir = os.path.join(output_dir, cat_id)
cat_render_dir = os.path.join(render_dir, cat_id)
for example_id in tqdm(example_ids):
render_example(example_id, cat_render_dir, cat_in_dir, cat_out_dir, texture_dir, csv_file, shape, views)
model_dir = 'ShapeNetCore.v2'
output_dir = 'Synthetic_training_images'
render_dir = 'Renders_semi_sphere'
texture_dir = 'textures'
cats = ['airplane', 'bag', 'bathtub', 'bed', 'birdhouse', 'bookshelf', 'bus', 'cabinet', 'camera', 'car',
'chair', 'clock', 'dishwasher', 'display', 'faucet', 'lamp', 'laptop', 'speaker', 'mailbox', 'microwave',
'motorcycle', 'piano', 'pistol', 'printer', 'rifle', 'sofa', 'table', 'train', 'watercraft', 'washer']
csv_file = 'synthetic_annotation.txt'
if not os.path.exists(csv_file):
with open(csv_file, 'a') as f:
f.write('image_path,cat_id,example_id,azimuth,elevation\n')
for cat in tqdm(cats):
cat_id = cat_desc_to_id(cat)
render_cat(model_dir, render_dir, output_dir, texture_dir, csv_file, cat_id, [512, 512], 20)
os.system("rm render.log")
|
|
import unittest
from cupy import _core
from cupy import testing
@testing.gpu
class TestArrayOwndata(unittest.TestCase):
def setUp(self):
self.a = _core.ndarray(())
def test_original_array(self):
assert self.a.flags.owndata is True
def test_view_array(self):
v = self.a.view()
assert v.flags.owndata is False
def test_reshaped_array(self):
r = self.a.reshape(())
assert r.flags.owndata is False
|
|
import os
os.system('pip install -q efficientnet --quiet')
import tensorflow as tf
import pandas as pd
import numpy as np
import cv2
import itertools
from tensorflow.keras.applications.imagenet_utils import preprocess_input
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, dataset, batch_size, image_size, cropper, preprocess):
#batch size and image size
self.batch_size = batch_size
self.image_size = image_size
#list of landmark id in train
self.landmark_ids = dataset["landmark_id"].unique()
#list of id of every image in train
self.ids = dataset["id"].unique()
#dictionary to pass from landmark to images
self.dict_landmark_to_images_mapping = dataset.groupby("landmark_id")["id"].apply(list).to_dict()
#path of each image
self.path_dict = dataset.set_index("id")["train_path"].to_dict()
#shuffle index
self.on_epoch_begin()
self.cropper = cropper
self.preprocess = preprocess
pass
#number of landmark divided batch_size
def __len__(self,):
return int(np.floor(len(self.landmark_ids) / self.batch_size))
#at begin of train shuffle landmark id
def on_epoch_begin(self):
self.indexes = self.landmark_ids.copy()
np.random.shuffle(self.indexes)
def __getitem__(self, index):
#get landmark id after shuffle
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
#get anchors from landmark id
anchors = [self.get_anchor(x) for x in indexes]
#get positive from anchor
positives = [self.get_positives(anchors[x], indexes[x]) for x in range(len(indexes))]
#get negative from anchor
negatives = [self.get_negatives(anchors[x], indexes[x]) for x in range(len(indexes))]
#define list of list of path to images
X = [anchors,positives,negatives]
#get images
X = self.get_images(X)
y = indexes
return X, y
#Get anchor from landmark
def get_anchor(self, landmark_id):
#all image of landmark
all_landmark_image = self.dict_landmark_to_images_mapping[landmark_id]
#select one image
return np.random.choice(all_landmark_image)
#get positive from anchor inside landmark_id group
def get_positives(self, anchor, landmark_id):
#get all image relative to landmark id group
all_positive_images = self.dict_landmark_to_images_mapping[landmark_id]
#find positive image not considering anchor one.
all_positive_images = [t for t in all_positive_images if t != anchor]
#return random choice from positive images
return np.random.choice(all_positive_images)
#get negative from anchor outside landmark_id group
def get_negatives(self, anchor, landmark_id):
#choose random landmark_id (not the anchor one)
random_negative_landmarkid = np.random.choice([t for t in self.landmark_ids if t != landmark_id]) ##CAN INSERT LOGIC INSIDE RANDOM !
#choose random image inside this group
random_negative_image = self.dict_landmark_to_images_mapping[random_negative_landmarkid]
return np.random.choice(random_negative_image)
#pre process
def eff_net_preprocess(self, image):
#center and crop
image = self.cropper(image, image_size = self.image_size)
#pre process
image = self.preprocess(image)
return image
#function used to get images from list of id of images
def get_images(self, X):
X_=[]
anchors = X[0]
positives = X[1]
negatives = X[2]
anchors_ = []
positives_ = []
negatives_ = []
#get anchor images : Convert ids --> Path of image --> Download locally image --> Pre process --> Append
for a in anchors:
a = self.path_dict[a]
a = cv2.imread(a)
a = self.eff_net_preprocess(a)
anchors_.append(a)
for p in positives:
p = self.path_dict[p]
p = cv2.imread(p)
p = self.eff_net_preprocess(p)
positives_.append(p)
for n in negatives:
n = self.path_dict[n]
n = cv2.imread(n)
n = self.eff_net_preprocess(n)
negatives_.append(n)
X_ = [np.array(anchors_), np.array(positives_), np.array(negatives_)]
return X_
class DataGenerator_mining(tf.keras.utils.Sequence):
def __init__(self, dataset, batch_size, image_size, number_of_image = 10):
self.number_of_image = number_of_image
#batch size and image size
self.batch_size = batch_size // self.number_of_image
self.image_size = image_size
#list of landmark id in train
self.landmark_ids = dataset["landmark_id"].unique()
#list of id of every image in train
self.ids = dataset["id"].unique()
#dictionary to pass from landmark to images
self.dict_landmark_to_images_mapping = dataset.groupby("landmark_id")["id"].apply(list).to_dict()
#path of each image
self.path_dict = dataset.set_index("id")["train_path"].to_dict()
#shuffle index
self.on_epoch_begin()
pass
#number of landmark divided batch_size
def __len__(self,):
return int(np.floor(len(self.landmark_ids) / self.batch_size))
#at begin of train shuffle landmark id
def on_epoch_begin(self):
self.indexes = self.landmark_ids.copy()
np.random.shuffle(self.indexes)
def __getitem__(self, index):
#get landmark id after shuffle
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
#get image id from lanmark_id
images_info = [self.get_id(x) for x in indexes]
images_info_flatted = list(itertools.chain.from_iterable(images_info))
#get images
X, y = self.get_images(images_info_flatted)
return X, y
#Get anchor from landmark
def get_id(self, landmark_id):
#all image of landmark
all_landmark_image = self.dict_landmark_to_images_mapping[landmark_id]
#select k images
images_id = np.random.choice(all_landmark_image, self.number_of_image, replace = False)
images_landmark = np.ones(self.number_of_image) * landmark_id
zipped_ = list(zip(list(images_id), list(images_landmark)))
return zipped_
#pre process
def eff_net_preprocess(self, image):
#center and crop
image = tf.image.resize(image, (self.image_size, self.image_size))
#pre process
image = tf.keras.applications.imagenet_utils.preprocess_input(image, mode = 'torch')
image = image.numpy()
return image
#function used to get images from list of id of images
def get_images(self, X):
X_=[]
y_ = []
for id_, target in X:
path = self.path_dict[id_]
images = cv2.imread(path)
images = self.eff_net_preprocess(images)
X_.append(images)
y_.append(target)
X_, y_ = np.array(X_), np.array(y_)
return X_, y_
class DataGenerator_mining_cluster(tf.keras.utils.Sequence):
def __init__(self, dataset, batch_size, image_size, cropper, preprocess, number_of_image = 10):
self.number_of_image = number_of_image
#batch size and image size
self.batch_size = batch_size // self.number_of_image
self.image_size = image_size
#list of landmark id in train
self.landmark_group = dataset[["landmark_id", "cluster_id"]].drop_duplicates().reset_index(drop = True)
self.indexes = self.landmark_group.landmark_id
#list of id of every image in train
self.ids = dataset["id"].unique()
#dictionary to pass from landmark to images
self.dict_landmark_to_images_mapping = dataset.groupby("landmark_id")["id"].apply(list).to_dict()
#path of each image
self.path_dict = dataset.set_index("id")["train_path"].to_dict()
self.cropper = cropper
self.preprocess = preprocess
pass
#number of landmark divided batch_size
def __len__(self,):
return int(np.floor(len(self.indexes) / self.batch_size))
#at begin of train shuffle landmark id
def on_epoch_begin(self):
self.landmark_group = self.landmark_group.groupby('cluster_id').apply(lambda x: x.sample(frac = 1, replace = False)).reset_index(drop = True)
self.indexes = self.landmark_group.landmark_id
def __getitem__(self, index):
#get landmark id after shuffle
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
#get image id from lanmark_id
images_info = [self.get_id(x) for x in indexes]
images_info_flatted = list(itertools.chain.from_iterable(images_info))
#get images
X, y = self.get_images(images_info_flatted)
return X, y
#Get anchor from landmark
def get_id(self, landmark_id):
#all image of landmark
all_landmark_image = self.dict_landmark_to_images_mapping[landmark_id]
#select k images
images_id = np.random.choice(all_landmark_image, self.number_of_image, replace = False)
images_landmark = np.ones(self.number_of_image) * landmark_id
zipped_ = list(zip(list(images_id), list(images_landmark)))
return zipped_
#pre process
def eff_net_preprocess(self, image):
#center and crop
image = self.cropper(image, image_size = self.image_size)
#pre process
image = self.preprocess(image)
return image
#function used to get images from list of id of images
def get_images(self, X):
X_=[]
y_ = []
for id_, target in X:
path = self.path_dict[id_]
images = cv2.imread(path)
images = self.eff_net_preprocess(images)
X_.append(images)
y_.append(target)
X_, y_ = np.array(X_), np.array(y_)
return X_, y_
class predictDataset(tf.keras.utils.Sequence):
def __init__(self, path, batch_size, image_size):
self.paths = path
self.batch_size = batch_size
self.image_size = image_size
self.indexes = np.arange(len(self.paths))
self.len_index = len(self.indexes)
def __len__(self,):
if np.mod(self.len_index, self.batch_size) == 0:
self.last_index = int(self.len_index/self.batch_size) - 1
self.truncate = False
return self.last_index + 1
else:
self.last_index = int(np.floor(len(self.paths) / self.batch_size))
self.truncate = True
return self.last_index + 1
def __getitem__(self, index):
if (index == self.last_index) & self.truncate:
indexes = self.indexes[index*self.batch_size:]
else:
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
images=[self.paths[t] for t in indexes]
images=[cv2.imread(t) for t in images]
images=[self.eff_net_preprocess(t) for t in images]
return np.array(images)
def eff_net_preprocess(self, image):
#center and crop
image = tf.image.resize(image, (self.image_size, self.image_size))
#pre process
image = tf.keras.applications.imagenet_utils.preprocess_input(image)
image = image.numpy()
return image
|
|
# -*- coding:UTF-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
import numpy as np
from math import floor
from .spp import *
from params import Args
import sys
import platform
if platform.python_version().split('.')[0] == '2':
sys.path.append('./')
from center_loss import CenterLoss
else:
from Model.center_loss import CenterLoss
class Bottleneck(nn.Module):
'''
Inverted Residual Block
'''
def __init__(self, in_channels, out_channels, expansion_factor=6, kernel_size=3, stride=2):
super(Bottleneck, self).__init__()
if stride != 1 and stride != 2:
raise ValueError('Stride should be 1 or 2')
# Inverted Residual Block
self.block = nn.Sequential(
nn.Conv2d(in_channels, in_channels * expansion_factor, 1, bias=False),
nn.BatchNorm2d(in_channels * expansion_factor),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_channels * expansion_factor, in_channels * expansion_factor,
kernel_size, stride, padding=int((kernel_size-1)/2),
groups=in_channels * expansion_factor, bias=False),
nn.BatchNorm2d(in_channels * expansion_factor),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_channels * expansion_factor, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
# Linear Bottleneck,这里不接ReLU6
# nn.ReLU6(inplace=True)
)
# Assumption based on previous ResNet papers: If the number of filters doesn't match,
# there should be a conv1x1 operation.
self.if_match_bypss = True if in_channels != out_channels else False
# src ERROR 不管输入输出通道是否相同,都要在输出做一次卷积??
self.if_match_bypss = True
if self.if_match_bypss:
self.bypass_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, stride, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
output = self.block(x)
# print(output.shape)
# print(x.shape)
if self.if_match_bypss:
return output + self.bypass_conv(x)
else:
return output + x
def conv_bn(input, output, stride):
'''
普通卷积模块(conv + bn + relu)
:param input: 输入
:param output: 输出
:param stride: 步长
:return: 普通卷积block
'''
return nn.Sequential(
nn.Conv2d(input, output, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(output),
# inplace,默认设置为False,表示新创建一个对象对其修改,也可以设置为True,表示直接对这个对象进行修改
nn.LeakyReLU(inplace=True)
)
def conv_bottleneck(input, output, stride):
return Bottleneck(in_channels=input, out_channels=output, stride=stride)
class MobileNet_v2(nn.Module):
'''
MobileNet v2网络
'''
def __init__(self, num_classes=107):
'''
构造函数
:param num_classes: 总类别数
'''
super(MobileNet_v2, self).__init__()
self.spp = SpatialPyramidPool2D([1,2,4])
self.num_classes = num_classes
self.conv_bn_1 = conv_bn(3, 32, 1)
self.pool_1 = nn.MaxPool2d(kernel_size=2)
self.conv_bottleneck_2 = conv_bottleneck(32, 64, 1)
self.pool_2 = nn.MaxPool2d(kernel_size=2)
self.conv_bottleneck_3 = conv_bottleneck(64, 128, 1)
self.pool_3 = nn.MaxPool2d(kernel_size=2)
self.drop_3 = nn.Dropout(Args.keep_prob)
self.conv_bottleneck_4 = conv_bottleneck(128, 256, 1)
self.pool_4 = nn.MaxPool2d(kernel_size=1)
self.drop_4 = nn.Dropout(Args.keep_prob)
# self.conv_bottleneck_5 = conv_bottleneck(128, 256, 1)
# self.pool_5 = nn.MaxPool2d(kernel_size=2)
# self.drop_5 = nn.Dropout(Args.keep_prob)
self.fc_5 = nn.Linear(5376, 256)
self.drop_5 = nn.Dropout(Args.keep_prob)
self.fc_6 = nn.Linear(256, self.num_classes)
# self.drop_6 = nn.Dropout(Args.keep_prob)
# initialize model parameters
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.conv_bn_1(x)
x = self.pool_1(x)
x = self.conv_bottleneck_2(x)
x = self.pool_2(x)
x = self.conv_bottleneck_3(x)
x = self.pool_3(x)
# print(x.shape)
x = self.conv_bottleneck_4(x)
x = self.pool_4(x)
# x = self.conv_bottleneck_5(x)
# x = self.pool_5(x)
# x = self.drop_4(x)
# print(x.shape)
# exit()
x = self.spp(x)
# print(x.shape)
# exit()
x = x.view(-1, 5376)
# x = x.reshape(-1, 5 * 17 * 128)
# x = x.resize(x.shape[0],5*17*128)
# x = x.view(x.shape[0], 5 * 17 * 128)
feature = self.fc_5(x)
x = self.drop_5(feature)
x = self.fc_6(x)
# x = self.drop_6(x)
x = F.log_softmax(x, dim=-1)
# x = F.softmax(x, dim=-1)
return x, feature
class MobileNet_v2_feature(nn.Module):
'''
MobileNet v2网络
'''
def __init__(self):
'''
构造函数
:param num_classes: 总类别数
'''
super(MobileNet_v2_feature, self).__init__()
self.spp = SpatialPyramidPool2D([1,2,4])
self.conv_bn_1 = conv_bn(3, 32, 1)
self.pool_1 = nn.MaxPool2d(kernel_size=2)
self.conv_bottleneck_2 = conv_bottleneck(32, 64, 1)
self.pool_2 = nn.MaxPool2d(kernel_size=2)
self.conv_bottleneck_3 = conv_bottleneck(64, 128, 1)
self.pool_3 = nn.MaxPool2d(kernel_size=2)
self.drop_3 = nn.Dropout(Args.keep_prob)
self.conv_bottleneck_4 = conv_bottleneck(128, 256, 1)
self.pool_4 = nn.MaxPool2d(kernel_size=1)
self.drop_4 = nn.Dropout(Args.keep_prob)
# self.conv_bottleneck_5 = conv_bottleneck(128, 256, 1)
# self.pool_5 = nn.MaxPool2d(kernel_size=2)
# self.drop_5 = nn.Dropout(Args.keep_prob)
self.fc_5 = nn.Linear(5376, 256)
self.drop_5 = nn.Dropout(Args.keep_prob)
# self.fc_6 = nn.Linear(256, self.num_classes)
# self.drop_6 = nn.Dropout(Args.keep_prob)
# initialize model parameters
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.conv_bn_1(x)
x = self.pool_1(x)
x = self.conv_bottleneck_2(x)
x = self.pool_2(x)
x = self.conv_bottleneck_3(x)
x = self.pool_3(x)
# print(x.shape)
x = self.conv_bottleneck_4(x)
x = self.pool_4(x)
# x = self.conv_bottleneck_5(x)
# x = self.pool_5(x)
# x = self.drop_4(x)
# print(x.shape)
# exit()
x = self.spp(x)
# print(x.shape)
# exit()
x = x.view(-1, 5376)
# x = x.reshape(-1, 5 * 17 * 128)
# x = x.resize(x.shape[0],5*17*128)
# x = x.view(x.shape[0], 5 * 17 * 128)
feature = self.fc_5(x)
return feature
class MobileNet_v2_class(nn.Module):
'''
MobileNet v2网络
'''
def __init__(self ,num_classes=107):
'''
构造函数
:param num_classes: 总类别数
'''
super(MobileNet_v2_class, self).__init__()
self.num_classes = num_classes
self.drop_5 = nn.Dropout(Args.keep_prob)
self.fc_6 = nn.Linear(256, self.num_classes)
# initialize model parameters
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.drop_5(x)
x = self.fc_6(x)
x = F.log_softmax(x, dim=-1)
return x
if __name__ == '__main__':
net = MobileNet_v2(num_classes=107)
summary(net, (3, 360, 360))
# data = torch.rand((8, 3, 360, 360))
# output, embed = net(data)
# print('input: {}'.format(data.shape))
# print('output: {}'.format(output.shape))
# # print(output)
#
# # embed = net.get_embedding(data)
# print('embedding: {}'.format(embed.shape))
#
# loss = CenterLoss(num_classes=107, feat_dim=256)
# labels = torch.Tensor(np.random.randint(low=0, high=107, size=8)).long()
# print(labels.shape)
# loss_out = loss(embed, labels)
# print(loss_out)
|
|
#!/usr/bin/python3
import os
import copy
import mmh3
import numpy as np
import math
from random import shuffle
from pathlib import Path
import json
from predictor.utility import msg2log
class BF():
"""
Bloom filter
For simplicity, bit array is replaced by bool array
"""
def __init__(self, filter_size:int=None, fp_prob:float=1e-5,repository:str=None, f:object=None):
self.size=filter_size
self.fp_prob=fp_prob
self.bit_arr=np.zeros((self.size),dtype=bool)
self.max_items=self.max_number_items()
self.item_count=0
self.hash_number = self.get_hash_number(self.size,self.max_items)
self.f=f
def __str__(self):
message= f"""
{self.__class__.__name__}
Bit Array size: {self.size}
Expected number inserger items: {self.max_items}
Hash function count: {self.hash_number}
falsePositive Probability: {self.fp_prob}
Already inserted items: {self.item_count}
"""
return message
def max_number_items(self):
n=-self.size * (math.log(2.0) * math.log(2.0))/math.log(self.fp_prob)
return int(n)
@classmethod
def get_hash_number(cls, m,n):
"""
Returns the number hash functions calculated by formula
k =(m/n) * ln(2)
:param m:
:param n:
:return:
"""
k= (float(m)/float(n)) * math.log(2.0)
return int(k)
def add_item(self, item):
"""
Add item to Bloom Filter
:param item:
:return:
"""
digests=[]
for i in range(self.hash_number):
digest= mmh3.hash(item,i,signed=False) % self.size
digests.append(digest)
self.bit_arr[digest]=True
message = "item: {} Fired bits are {}".format(item,''.join('{} '.format(i) for i in digests))
msg2log("add_item",message,self.f)
self.item_count+=1
if self.item_count>self.max_number_items():
p1=math.exp(0.6185* float(self.size)/float(self.item_count))
message ="\nAmount of items {} is greather than max enabled {} for given falsePositive probability {}" \
.format(self.item_count, self.max_items,self.fp_prob,p1)
msg2log("add_item", message,self.f)
message="Update falsePositive probability {}".format(p1)
msg2log("add_item",message,self.f)
def check_item(self,item):
"""
Check for existence of an item in the filter
:param item:
:return:
"""
for i in range(self.hash_number):
digest=mmh3.hash(item,i,signed=False) % self.size
if self.bit_arr[digest]==False:
return False
return True
def save(self,file_name:str=None):
if file_name is None:
return
with open(file_name,'w') as f:
json.dump({
"size":self.size,
"fp_prob": self.fp_prob,
"max_items": self.max_items,
"item_count": self.item_count,
"hash_number":self.hash_number,
"bit_arr": self.bit_arr.tolist(),
},f,sort_keys=True, indent=4)
pass
def load(self,file_name:str=None):
if file_name is None:
return
if not Path(file_name).exists():
message="{} is not found".format(file_name)
msg2log("load",message, self.f)
return
with open(file_name,'r') as ff:
bf_data=json.load(ff)
msg2log("load",bf_data,self.f)
self.size = bf_data['size']
self.fp_prob = bf_data['fp_prob']
self.max_items = bf_data['max_items']
self.item_count = bf_data['item_count']
self.hash_number = bf_data['hash_number']
self.bit_arr = np.array(bf_data['bit_arr'])
msg2log("load","After deserialize:\n,{}".format(self))
if __name__=="__main__":
pass
# # words to be added
# word_present = ['abound', 'abounds', 'abundance', 'abundant', 'accessable',
# 'bloom', 'blossom', 'bolster', 'bonny', 'bonus', 'bonuses',
# 'coherent', 'cohesive', 'colorful', 'comely', 'comfort',
# 'gems', 'generosity', 'generous', 'generously', 'genial']
#
# # word not added
# word_absent = ['bluff', 'cheater', 'hate', 'war', 'humanity',
# 'racism', 'hurt', 'nuke', 'gloomy', 'facebook',
# 'geeksforgeeks', 'twitter']
#
# with open("a.txt",'w') as ff:
# bf=BF(filter_size=1024,fp_prob=0.05,f=ff)
# print(bf)
# for item in word_present:
# bf.add_item(item)
#
# shuffle(word_present)
# shuffle(word_absent)
# test_words = word_present[:10] + word_absent
# shuffle(test_words)
# for word in test_words:
# if bf.check_item(word):
# if word in word_absent:
# print("'{}' is a false positive!".format(word))
# else:
# print("'{}' is probably present!".format(word))
# else:
# print("'{}' is definitely not present!".format(word))
# print(bf)
#
# bf.save("bf.json")
# del bf
# bf1=BF(filter_size=100,fp_prob=0.01,f=ff)
# bf1.load("bf.json")
# msg2log(None,bf1,ff)
# for word in test_words:
# if bf1.check_item(word):
# if word in word_absent:
# print("'{}' is a false positive!".format(word))
# else:
# print("'{}' is probably present!".format(word))
# else:
# print("'{}' is definitely not present!".format(word))
#
#
|
|
# Copyright 2021 cms.rendner (Daniel Schmidt)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# source: https://github.com/pandas-dev/pandas/blob/v1.3.0/pandas/io/formats/style.py#L1826-L1978
# source: https://github.com/pandas-dev/pandas/blob/v1.3.0/pandas/io/formats/style.py#L2722-L2786
from plugin_code.apply_args import ApplyArgs
from plugin_code.base_apply_patcher import BaseApplyPatcher
# == copy after here ==
import numpy as np
import pandas as pd
from pandas.io.formats.style import _validate_apply_axis_arg
class BackgroundGradientPatch(BaseApplyPatcher):
def __init__(self, data: pd.DataFrame, apply_args: ApplyArgs, func_kwargs: dict):
BaseApplyPatcher.__init__(self, data, apply_args, func_kwargs)
def _exec_patched_func(self, chunk: pd.DataFrame):
# "gmap":
#
# Gradient map for determining the background colors.
# If not supplied will use the underlying data from rows, columns or frame.
# If given as an ndarray or list-like must be an identical shape to the underlying data considering
# axis and subset. If given as DataFrame or Series must have same index and column labels considering
# axis and subset. If supplied, vmin and vmax should be given relative to this gradient map.
vmin = self._func_kwargs.get("vmin", None)
vmax = self._func_kwargs.get("vmax", None)
gmap = self._func_kwargs.get("gmap", None)
chunk_parent = self._get_parent(chunk)
if gmap is None:
gmap = chunk_parent.to_numpy(dtype=float)
else:
gmap = _validate_apply_axis_arg(gmap, "gmap", float, chunk_parent)
if vmin is None or vmax is None:
if vmin is None:
vmin = np.nanmin(gmap)
if vmax is None:
vmax = np.nanmax(gmap)
# adjust shape of gmap to match shape of chunk
if chunk.empty:
gmap = chunk
# Note:
# "get_indexer_for" has to be used.
# Using "first_row, first_column, last_row, last_column" which were used to create the chunk, like:
#
# (gmap is a DataFrame)
# "gmap = gmap.iloc[self._first_row:self._last_row, self._first_column:self._last_column]"
#
# will not work, because a user could have specified a subset. The coordinates of the subset
# have to be taken into account to adjust the gmap correctly. This is all done automatically
# by using "get_indexer_for" without the need to access the "first_row, first_column, last_row, last_column".
elif isinstance(chunk, pd.Series):
gmap = gmap[chunk_parent.index.get_indexer_for(chunk.index)]
elif isinstance(chunk, pd.DataFrame) and self._apply_args.axis() is None:
ri = chunk_parent.index.get_indexer_for(chunk.index)
ci = chunk_parent.columns.get_indexer_for(chunk.columns)
if isinstance(gmap, pd.DataFrame):
gmap = gmap.iloc[(ri, ci)]
elif isinstance(gmap, np.ndarray):
gmap = pd.DataFrame(data=gmap, index=chunk_parent.index, columns=chunk_parent.columns)
gmap = gmap.iloc[(ri, ci)]
return self._apply_args.func()(chunk, **dict(self._func_kwargs, vmin=vmin, vmax=vmax, gmap=gmap))
|
|
"""
Tests for workspace module
"""
import os
import shutil
import tempfile
from six import StringIO
import numpy as np
import pytest
from fsl.data.image import Image
from oxasl import Workspace, AslImage
from oxasl.workspace import text_to_matrix
def test_default_attr():
""" Check attributes are None by default """
wsp = Workspace()
assert(wsp.wibble is None)
def test_set_attr():
""" Check attributes can bet set """
wsp = Workspace()
assert(wsp.wibble is None)
wsp.wibble = 7
assert(wsp.wibble == 7)
def test_ctor_attributes():
""" Check attributes specified in constructor """
wsp = Workspace(wobble="hi")
assert(wsp.wobble == "hi")
def test_log():
""" Check that the log is picked up """
log = StringIO()
wsp = Workspace(log=log)
wsp.log.write("hello")
assert(log.getvalue() == "hello")
def test_ifnone():
wsp = Workspace(wibble=11)
assert(wsp.ifnone("wibble", 12) == 11)
assert(wsp.ifnone("wobble", 12) == 12)
def test_sub():
""" Test sub-workspaces """
wsp = Workspace()
wsp.sub("child")
assert(isinstance(wsp.child, Workspace))
assert(wsp.child.wibble is None)
assert(wsp.child.log == wsp.log)
def test_sub_kwargs():
""" Test creating a sub workspace with kwargs """
wsp = Workspace()
wsp.sub("child", wibble="squid", pudding=4)
assert(isinstance(wsp.child, Workspace))
assert(wsp.child.wibble == "squid")
assert(wsp.child.pudding == 4)
def test_sub_inherit():
""" Test sub workspaces can inherit values from their parent """
wsp = Workspace()
wsp.wibble = 7
wsp.wobble = 6
wsp.sub("child")
wsp.child.wobble = 5
assert(wsp.child.wibble == 7)
assert(wsp.child.wobble == 5)
def test_sub_inherit_wsp():
""" Test sub workspaces can inherit sub-workspaces from their parent """
wsp = Workspace()
wsp.sub("child1")
wsp.child1.wibble = 7
wsp.sub("child2")
assert(wsp.child2.child1 is not None)
assert(wsp.child2.child1.wibble == 7)
def test_input_wsp():
""" Test putting constructor attributes in a default sub workspaces """
wsp = Workspace(input_wsp="cakes", flapjack=4, fruit=3, defaults=[])
assert(wsp.cakes is not None)
assert(wsp.cakes.flapjack == 4)
assert(wsp.cakes.fruit == 3)
assert(wsp.flapjack is None)
assert(wsp.fruit is None)
def test_default_wsp():
""" Test default sub-workspaces for search """
wsp = Workspace(defaults=["cars"])
assert(wsp.cars is None)
wsp.ferrari = 9
wsp.merc = 8
wsp.sub("cars")
wsp.cars.porsche = 6
wsp.cars.ferrari = 4
assert(wsp.cars is not None)
assert(wsp.ferrari == 9)
assert(wsp.porsche == 6)
assert(wsp.merc == 8)
assert(wsp.cars.porsche == 6)
assert(wsp.cars.ferrari == 4)
assert(wsp.cars.merc is None)
def test_default_wsp_multiple():
""" Test multiple default sub-workspaces for search """
wsp = Workspace(defaults=["plants", "trees"])
wsp.daffodil = 9
wsp.larch = 1
wsp.sub("trees")
wsp.trees.oak = 3
wsp.trees.larch = 2
wsp.trees.apple = 7
assert(wsp.daffodil == 9)
assert(wsp.larch == 1)
assert(wsp.oak == 3)
assert(wsp.apple == 7)
assert(wsp.trees.larch == 2)
assert(wsp.trees.oak == 3)
assert(wsp.trees.daffodil is None)
assert(wsp.trees.apple == 7)
wsp.sub("plants")
wsp.plants.lily = 4
wsp.plants.oak = 5
assert(wsp.daffodil == 9)
assert(wsp.larch == 1)
assert(wsp.lily == 4)
assert(wsp.oak == 5)
assert(wsp.apple == 7)
assert(wsp.trees.oak == 3)
assert(wsp.trees.lily is None)
assert(wsp.plants.daffodil is None)
assert(wsp.plants.lily == 4)
assert(wsp.plants.oak == 5)
def test_savedir_created():
""" Test save dirs are created if they don't already exist """
tempdir = tempfile.mktemp("_oxasl")
try:
log = StringIO()
wsp = Workspace(savedir=tempdir, log=log)
assert(wsp.savedir) == tempdir
assert(os.path.isdir(tempdir))
assert("WARNING" not in log.getvalue())
finally:
shutil.rmtree(tempdir)
def test_savedir_created_multilevel():
""" Test multi-level save dirs are created if they don't already exist """
tempdir = os.path.join(tempfile.mktemp("_oxasl"), "extra", "levels")
try:
log = StringIO()
wsp = Workspace(savedir=tempdir, log=log)
assert(wsp.savedir) == tempdir
assert(os.path.isdir(tempdir))
assert("WARNING" not in log.getvalue())
finally:
shutil.rmtree(tempdir)
def test_savedir_sub():
""" Test sub-workspace have subdirs created """
tempdir = tempfile.mktemp("_oxasl")
try:
log = StringIO()
wsp = Workspace(savedir=tempdir, log=log)
wsp.sub("quark")
path = os.path.join(tempdir, "quark")
assert(wsp.quark.savedir == path)
assert(os.path.isdir(path))
assert("WARNING" not in log.getvalue())
finally:
shutil.rmtree(tempdir)
def test_image_save():
"""
Test images are saved in the savedir
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
img = Image(np.random.rand(5, 5, 5))
wsp.testimg = img
path = os.path.join(tempdir, "testimg.nii.gz")
assert(os.path.isfile(path))
otherimg = Image(path)
assert(np.all(img.data == wsp.testimg.data))
assert(np.all(img.data == otherimg.data))
finally:
shutil.rmtree(tempdir)
def test_image_nosave():
"""
Test setting an image without saving
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
img = Image(np.random.rand(5, 5, 5))
wsp.set_item("testimg", img, save=False)
path = os.path.join(tempdir, "testimg.nii.gz")
assert(not os.path.exists(path))
assert(np.all(img.data == wsp.testimg.data))
finally:
shutil.rmtree(tempdir)
def test_image_save_name():
"""
Test images are saved in the savedir with the specified name
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
img = Image(np.random.rand(5, 5, 5))
wsp.set_item("testimg", img, save_name="pumpkin")
path = os.path.join(tempdir, "testimg.nii.gz")
assert(not os.path.exists(path))
path = os.path.join(tempdir, "pumpkin.nii.gz")
assert(os.path.isfile(path))
otherimg = Image(path)
assert(np.all(img.data == wsp.testimg.data))
assert(np.all(img.data == otherimg.data))
finally:
shutil.rmtree(tempdir)
def test_matrix_save():
"""
Test 2D matrices are saved in the savedir
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
mat = np.random.rand(4, 4)
wsp.testmat = mat
path = os.path.join(tempdir, "testmat.mat")
assert(os.path.isfile(path))
with open(path) as matfile:
othermat = text_to_matrix(matfile.read())
assert(np.all(mat == wsp.testmat))
assert(np.all(mat == othermat))
finally:
shutil.rmtree(tempdir)
def test_matrix_nosave():
"""
Test setting an matrix without saving
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
mat = np.random.rand(4, 4)
wsp.set_item("testmat", mat, save=False)
path = os.path.join(tempdir, "testmat.mat")
assert(not os.path.exists(path))
assert(np.all(mat == wsp.testmat))
finally:
shutil.rmtree(tempdir)
def test_matrix_save_name():
"""
Test matrices are saved in the savedir with the specified name
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
mat = np.random.rand(4, 4)
wsp.set_item("testmat", mat, save_name="parsnip")
path = os.path.join(tempdir, "testmat.mat")
assert(not os.path.exists(path))
path = os.path.join(tempdir, "parsnip.mat")
assert(os.path.isfile(path))
with open(path) as matfile:
othermat = text_to_matrix(matfile.read())
assert(np.all(mat == wsp.testmat))
assert(np.all(mat == othermat))
finally:
shutil.rmtree(tempdir)
def _custom_save(mat):
return "Custom Save"
def test_custom_save():
"""
Test matrices are saved in the savedir with the specified name
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
mat = np.random.rand(4, 4)
wsp.set_item("testmat", mat, save_fn=_custom_save)
path = os.path.join(tempdir, "testmat")
assert(os.path.exists(path))
with open(path) as sfile:
assert("Custom Save" == sfile.read())
finally:
shutil.rmtree(tempdir)
def test_custom_save_name():
"""
Test matrices are saved in the savedir with the specified name
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
mat = np.random.rand(4, 4)
wsp.set_item("testmat", mat, save_name="potato", save_fn=_custom_save)
path = os.path.join(tempdir, "testmat")
assert(not os.path.exists(path))
path = os.path.join(tempdir, "potato")
assert(os.path.exists(path))
with open(path) as sfile:
assert("Custom Save" == sfile.read())
finally:
shutil.rmtree(tempdir)
def test_custom_save_nosave():
"""
Test matrices are saved in the savedir with the specified name
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
mat = np.random.rand(4, 4)
wsp.set_item("testmat", mat, save_fn=_custom_save, save=False)
path = os.path.join(tempdir, "testmat")
assert(not os.path.exists(path))
finally:
shutil.rmtree(tempdir)
def test_savedir_already_exists():
"""
Test warning when save dir already exists
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
log = StringIO()
wsp = Workspace(savedir=tempdir, log=log)
assert("WARNING" in log.getvalue())
assert("already exists" in log.getvalue())
finally:
shutil.rmtree(tempdir)
def test_fsllog_default():
"""
Test the FSL logging context created
"""
log = StringIO()
wsp = Workspace(log=log)
assert(isinstance(wsp.fsllog, dict))
assert(wsp.fsllog.get("stdout", None) is None)
assert(wsp.fsllog.get("stderr", None) == log)
assert(wsp.fsllog.get("cmd", None) is None)
def test_fsllog_debug():
"""
Test the FSL logging context created in debug mode
"""
log = StringIO()
wsp = Workspace(debug=True, log=log)
assert(isinstance(wsp.fsllog, dict))
assert(wsp.fsllog.get("stdout", None) == log)
assert(wsp.fsllog.get("stderr", None) == log)
assert(wsp.fsllog.get("cmd", None) == log)
def test_aslimage():
kwargs = {
"asldata" : np.random.rand(5, 5, 5, 8),
"tis" : [1, 2],
"iaf" : "tc",
"ibf" : "rpt",
}
wsp = Workspace(auto_asldata=True, **kwargs)
assert(isinstance(wsp.asldata, AslImage))
assert(wsp.asldata.tis == [1, 2])
assert(wsp.asldata.iaf == "tc")
assert(wsp.asldata.order == "ltr")
assert(wsp.asldata.rpts == [2, 2])
def test_aslimage_missing():
with pytest.raises(ValueError):
Workspace(auto_asldata=True)
with pytest.raises(ValueError):
Workspace(auto_asldata=True, asldata=None)
def test_text_to_matrix_spaces():
"""
Check that text_to_matrix works with space separated data
"""
text = "1 2 3\n4 5 6\n"
mat = text_to_matrix(text)
assert(np.all(mat == [[1, 2, 3], [4, 5, 6]]))
def test_text_to_matrix_comma():
"""
Check that text_to_matrix works with comma separated data
"""
text = "1, 2, 3\n4,5,6\n"
mat = text_to_matrix(text)
assert(np.all(mat == [[1, 2, 3], [4, 5, 6]]))
def test_text_to_matrix_tabs():
"""
Check that text_to_matrix works with tab separated data
"""
text = "1\t2\t3\n4\t 5\t 6\n"
mat = text_to_matrix(text)
assert(np.all(mat == [[1, 2, 3], [4, 5, 6]]))
def test_text_to_matrix_mixed():
"""
Check that text_to_matrix works with mixed separators
"""
text = "1\t2 3\n4 , 5, \t 6\n"
mat = text_to_matrix(text)
assert(np.all(mat == [[1, 2, 3], [4, 5, 6]]))
def test_text_to_matrix_not_matrix():
text = "1 2 3\n4 5\n"
with pytest.raises(ValueError):
mat = text_to_matrix(text)
def test_text_to_matrix_not_numbers():
text = "1 x 3\n4 5 6\n"
with pytest.raises(ValueError):
mat = text_to_matrix(text)
|
|
# Copyright 2022 IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is part of the code to reproduce the results in the paper:
# E. van den Berg and Kristan Temme, "Circuit optimization of Hamiltonian
# simulation by simultaneous diagonalization of Pauli clusters," Quantum 4,
# p. 322, 2020. https://doi.org/10.22331/q-2020-09-12-322
import cl
import numpy as np
import numpy, time
np.random.seed(int(time.time()))
for i in [1] : # range(1000) :
np.random.seed(i)
P = cl.random_commuting_matrix(3,6,True,False)
print(P)
print(cl.matrix_to_pauli(P))
(basis,coef,extra) = cl.factorize(P)
print("\nBasis(%+d)" % extra)
print(basis)
print(cl.matrix_to_pauli(basis))
print("\nCoefficients")
print(coef)
Pbar = cl.reconstruct(basis,coef)
print(cl.matrix_to_pauli(cl.reconstruct(basis[:-1,:],coef[:,:-1])))
s = np.sum(np.abs(Pbar - P))
print((extra, s))
if (s != 0) or (extra != 2):
print(i)
break
|
|
# SAve a file with 2*(n-1) columns contaning the (n-1) independent variables and the (n-1) gradients of the trained NN with respect these variables
import matplotlib.pyplot as plt
import numpy as np
import copy
import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
is_cuda = torch.cuda.is_available()
def evaluate_derivatives(pathdir,filename,model,og_pathdir=''):
try:
data = np.loadtxt(pathdir+filename)[:,0:-1]
pts = np.loadtxt(pathdir+filename)[:,0:-1]
pts = torch.tensor(pts)
pts = pts.clone().detach()
is_cuda = torch.cuda.is_available()
grad_weights = torch.ones(pts.shape[0], 1)
if is_cuda:
pts = pts.float().cuda()
model = model.cuda()
grad_weights = grad_weights.cuda()
pts.requires_grad_(True)
outs = model(pts)
grad = torch.autograd.grad(outs, pts, grad_outputs=grad_weights, create_graph=True)[0]
save_grads = grad.detach().data.cpu().numpy()
save_data = np.column_stack((data,save_grads))
print('saving',save_data,'to results/gradients_comp_%s.txt' %filename)
np.savetxt(og_pathdir+"results/gradients_comp_%s.txt" %filename,save_data)
return 1
except:
return 0
|
|
import torch
import numpy as np
import random
import collections
from sklearn.cluster import KMeans
from sklearn import metrics
import argparse
from toolbox import load_pickle, LR_classifier, shift_operator, eigenvalues, global_ratio
from toolbox import sample_case, l2_norm, compute_confidence_interval, diffused
def get_features(model):
assert model in ['wideresnet', 'densenet-t']
out_dict = load_pickle(f'./features/{model}/test.pkl')
return out_dict
def semi_supervised_dataset(out_dict, n_shot, n_way, n_query):
# Pick a random run
train, test, trainLabels, testLabels = sample_case(out_dict, n_shot, n_way, n_query)
# Normalization
train = l2_norm(train)
test = l2_norm(test)
# Permutation
permuted_indexes = np.random.permutation(len(train))
train = train[permuted_indexes]
trainLabels = trainLabels[permuted_indexes]
permuted_indexes = np.random.permutation(len(test))
test = test[permuted_indexes]
testLabels = testLabels[permuted_indexes]
# Semi-supervised setting:
semiSupLabels = torch.cat((trainLabels.clone(), testLabels))
semiSup = torch.cat((train.clone(), test))
return train, semiSup, trainLabels, semiSupLabels
def supervised_dataset(out_dict, n_shot, n_way, n_query):
# Pick a random run
train, test, trainLabels, testLabels = sample_case(out_dict, n_shot, n_way, n_query)
# Normalization
train = l2_norm(train)
test = l2_norm(test)
# Permutation
permuted_indexes = np.random.permutation(len(train))
train = train[permuted_indexes]
trainLabels = trainLabels[permuted_indexes]
permuted_indexes = np.random.permutation(len(test))
test = test[permuted_indexes]
testLabels = testLabels[permuted_indexes]
return train, test, trainLabels, testLabels
def generate_unbalanced(data, labels, n_way, n_shot, n_query, p):
# Retrieve supervised data
supData = data[:n_way*n_shot]
supLabels = labels[:n_way*n_shot]
# Generate unbalance
unbalancedData = torch.FloatTensor()
unbalancedLabels = torch.LongTensor()
# Total number of samples
nos = n_query * n_way
# Number of samples in the unbalanced class
nosMain = int(p*nos)
# Equal number of samples in the other classes
nosOther = int((nos - nosMain)/(n_way - 1))
nos = {}
nos[0] = nosMain
for w in range(1, n_way):
nos[w] = nosOther
for w in range(n_way):
newData = data[n_way*n_shot:][labels[n_way*n_shot:]==w][:nos[w]]
newLabels = labels[n_way*n_shot:][labels[n_way*n_shot:]==w][:nos[w]]
unbalancedData = torch.cat((unbalancedData, newData), dim=0)
unbalancedLabels = torch.cat((unbalancedLabels, newLabels), dim=0)
# Permute the unsupervised samples
permuted_indexes = np.random.permutation(len(unbalancedData))
unbalancedData = unbalancedData[permuted_indexes]
unbalancedLabels = unbalancedLabels[permuted_indexes]
# Add the supervised samples
unbalancedData = torch.cat((supData, unbalancedData), dim=0)
unbalancedLabels = torch.cat((supLabels, unbalancedLabels), dim=0)
return unbalancedData, unbalancedLabels
def get_correlation_unsup(n_way, n_shot, n_query, n_neighbor, n_run, p):
# In these setting, we consider n_shot + n_query unlabeled samples per class. The only interest in
# doing that, is easily comparing the correlations with the ones obtained in a semi-supervised setting.
stat = collections.defaultdict(list)
for run in range(n_run):
print(f'Run {run}', end='\r')
# Pick a random run, balanced or unbalanced
if p is not None: # unbalanced dataset
## We consider a dataset with n_query * n_way query samples.
## In order to generate an unbalanced dataset, we need to generate n_query * n_way per class.
_, semiSup, _, semiSupLabels = semi_supervised_dataset(out_dict, n_shot, n_way, n_query*n_way)
semiSup, semiSupLabels = generate_unbalanced(semiSup, semiSupLabels, n_way, n_shot, n_query, p)
else: # balanced dataset
_, semiSup, _, semiSupLabels = semi_supervised_dataset(out_dict, n_shot, n_way, n_query)
# Compute metrics and generalization performance
## Features are first diffused on a cosine similarity graph whose vertices are labeled and unlabeled samples.
graph = shift_operator(semiSup, removeSelfConnections=True, laplacian=False,
nNeighbor=min(n_way*(n_shot+n_query), n_neighbor))
diffusedTrain = diffused(semiSup, graph, alpha=0.75, kappa=1)
egv = eigenvalues(diffusedTrain, 'cosine', min(n_way*(n_shot+n_query), n_neighbor))
km = KMeans(n_clusters=n_way)
kmLabels = km.fit_predict(diffusedTrain.numpy())
DBScore = metrics.davies_bouldin_score(diffusedTrain.numpy(), kmLabels)
ARI = metrics.adjusted_rand_score(semiSupLabels, kmLabels)
# Store the results
stat['ARI'].append(ARI)
stat['DBScore'].append(DBScore)
stat[f'egv'].append(egv[n_way-1].item())
# Remind the setting
print(f'Unsupervised setting -- {n_way}-way {n_shot}-shot {n_query}-query tasks')
if p is not None:
print(f'\t Unbalanced number of unlabeled samples par class: p = {p}')
# Print the average ARI and 95% interval
mean, std = compute_confidence_interval(stat['ARI'])
print(f'ARI on the query samples: {np.round(mean, 2)} +- {np.round(std, 2)}')
# Print the correlations
print('Correlations between metrics on the unlabeled samples and ARI:')
## DBScore
corr = np.abs(np.round(np.corrcoef(stat['DBScore'], stat['ARI'])[0, 1], 2))
print(f'DBScore: {corr}')
## egv
corr = np.abs(np.round(np.corrcoef(stat['egv'], stat['ARI'])[0, 1], 2))
print(f'{n_way}-th eigenvalue: {corr}')
def get_correlation_semi(n_way, n_shot, n_query, n_neighbor, n_run, p):
stat = collections.defaultdict(list)
for run in range(n_run):
print(f'Run {run}', end='\r')
# Pick a random run, balanced or unbalanced
if p is not None: # unbalanced dataset
## We consider a dataset with n_query * n_way query samples.
## In order to generate an unbalanced dataset, we need to generate n_query * n_way per class.
train, semiSup, trainLabels, semiSupLabels = semi_supervised_dataset(out_dict, n_shot, n_way, n_query*n_way)
semiSup, semiSupLabels = generate_unbalanced(semiSup, semiSupLabels, n_way, n_shot, n_query, p)
else: # balanced dataset
train, semiSup, trainLabels, semiSupLabels = semi_supervised_dataset(out_dict, n_shot, n_way, n_query)
# Compute metrics and generalization performance
## Features are first diffused on a cosine similarity graph whose vertices are labeled and unlabeled samples
graph = shift_operator(semiSup, removeSelfConnections=True, laplacian=False,
nNeighbor=min(n_way*(n_shot+n_query), n_neighbor))
diffusedTrain = diffused(semiSup, graph, alpha=0.75, kappa=1)
_, _, LRTrainLoss, _, LRTestAcc, _, LRTestConfidence = LR_classifier(
diffusedTrain[:n_way*n_shot], trainLabels, diffusedTrain[n_way*n_shot:],
semiSupLabels[n_way*n_shot:], n_way)
similarity = global_ratio(diffusedTrain[:n_way*n_shot], trainLabels, 'cosine')
egv = eigenvalues(diffusedTrain, 'cosine', min(n_way*(n_shot+n_query), n_neighbor))
km = KMeans(n_clusters=n_way)
kmLabels = km.fit_predict(diffusedTrain.numpy())
DBScore = metrics.davies_bouldin_score(diffusedTrain.numpy(), kmLabels)
# Store the results
stat['LRTestAcc'].append(LRTestAcc)
stat['LRTrainLoss'].append(LRTrainLoss)
stat['LRTestConfidence'].append(LRTestConfidence)
stat['similarity'].append(similarity)
stat['DBScore'].append(DBScore)
stat[f'egv'].append(egv[n_way-1].item())
# Remind the setting
print(f'Semi-supervised setting -- {n_way}-way {n_shot}-shot {n_query}-query tasks')
if p is not None:
print(f'\t Unbalanced number of unlabeled samples par class: p = {p}')
# Print the average accuracy and 95% interval
mean, std = compute_confidence_interval(stat['LRTestAcc'])
print(f'Average LR accuracy on the query samples: {np.round(mean, 2)} +- {np.round(std, 2)}')
# Print the correlations
print('Correlations between metrics on the training samples (labeled and unlabeled) and LR accuracy:')
## LRTrainLoss
corr = np.abs(np.round(np.corrcoef(stat['LRTrainLoss'], stat['LRTestAcc'])[0, 1], 2))
print(f'LR loss on the training samples: {corr}')
## similarity
corr = np.abs(np.round(np.corrcoef(stat['similarity'], stat['LRTestAcc'])[0, 1], 2))
print(f'Similarity: {corr}')
## DBScore
corr = np.abs(np.round(np.corrcoef(stat['DBScore'], stat['LRTestAcc'])[0, 1], 2))
print(f'DBScore: {corr}')
## egv
corr = np.abs(np.round(np.corrcoef(stat['egv'], stat['LRTestAcc'])[0, 1], 2))
print(f'{n_way}-th eigenvalue: {corr}')
## LRTestConfidence
corr = np.abs(np.round(np.corrcoef(stat['LRTestConfidence'], stat['LRTestAcc'])[0, 1], 2))
print(f'LR confidence on the query samples: {corr}')
def get_correlation_super(n_way, n_shot, n_query, n_neighbor, n_run):
stat = collections.defaultdict(list)
for run in range(n_run):
print(f'Run {run}', end='\r')
# Pick a random run
train, test, trainLabels, testLabels = supervised_dataset(out_dict, n_shot, n_way, n_query)
# Compute metrics and generalization performance
_, _, LRTrainLoss, _, LRTestAcc, _, _ = LR_classifier(train, trainLabels, test, testLabels, n_way)
similarity = global_ratio(train, trainLabels, 'cosine')
egv = eigenvalues(train, 'cosine', min(n_way*n_shot, n_neighbor))
km = KMeans(n_clusters=n_way)
kmLabels = km.fit_predict(train.numpy())
if n_shot != 1:
DBScore = metrics.davies_bouldin_score(train.numpy(), kmLabels)
else:
DBScore = None
# Store the results
stat['LRTestAcc'].append(LRTestAcc)
stat['LRTrainLoss'].append(LRTrainLoss)
stat['similarity'].append(similarity)
stat['DBScore'].append(DBScore)
stat[f'egv'].append(egv[n_way-1].item())
# Remind the setting
print(f'Supervised setting -- {n_way}-way {n_shot}-shot {n_query}-query tasks')
# Print the average accuracy and 95% interval
mean, std = compute_confidence_interval(stat['LRTestAcc'])
print(f'Average LR accuracy on the query samples: {np.round(mean, 2)}% +- {np.round(std, 2)}%\n')
# Print the correlations
print('Correlations between metrics on the training samples and LR accuracy')
## LRTrainLoss
corr = np.abs(np.round(np.corrcoef(stat['LRTrainLoss'], stat['LRTestAcc'])[0, 1], 2))
print(f'LR loss on the training samples: {corr}')
## similarity
corr = np.abs(np.round(np.corrcoef(stat['similarity'], stat['LRTestAcc'])[0, 1], 2))
print(f'Similarity: {corr}')
## DBScore
if n_shot != 1:
corr = np.abs(np.round(np.corrcoef(stat['DBScore'], stat['LRTestAcc'])[0, 1], 2))
print(f'DBScore: {corr}')
else:
print(f'DBScore: non applicable')
## egv
corr = np.abs(np.round(np.corrcoef(stat['egv'], stat['LRTestAcc'])[0, 1], 2))
print(f'{n_way}-th eigenvalue: {corr}')
def parse_args():
parser = argparse.ArgumentParser(description='Evaluating correlations between metrics and generalization performances.')
parser.add_argument('--model', default='wideresnet', type=str, help='backbone: wideresnet or densenet-t')
parser.add_argument('--setting', default='supervised', type=str, help='supervised, semi-supervised or unsupervised')
parser.add_argument('--n_way', default=5, type=int, help='number of classes')
parser.add_argument('--n_shot', default=5, type=int, help='number of training samples')
parser.add_argument('--n_query', default=5, type=int, help='number of test examples, which is also the number of additional unlabeled samples in some settings.')
parser.add_argument('--n_neighbor', default=15, type=int, help='number of nearest neigbors kept in the cosine similarity graph')
parser.add_argument('--p', type=float, help='indicate the imbalance number of unlabeled samples par class: None or float number between 0 and 1. None means no imbalance, float indicates the proportion of samples in one class with respect to the other classes.')
parser.add_argument('--n_run', default=1000, type=int, help='number of tasks')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
np.random.seed = 0
random.seed = 0
torch.cuda.seed = 0
# Retrieve features
out_dict = get_features(args.model)
# Print the correlations
assert args.setting in ['supervised', 'semi-supervised', 'unsupervised']
if args.setting == 'supervised':
get_correlation_super(args.n_way, args.n_shot, args.n_query, args.n_neighbor, args.n_run)
elif args.setting == 'semi-supervised':
get_correlation_semi(args.n_way, args.n_shot, args.n_query, args.n_neighbor, args.n_run, args.p)
else:
get_correlation_unsup(args.n_way, args.n_shot, args.n_query, args.n_neighbor, args.n_run, args.p)
|
|
# -*- coding: utf-8 -*-
"""
@author: Adam Reinhold Von Fisher - https://www.linkedin.com/in/adamrvfisher/
"""
#This is part of a multithreading tool to speed up brute force optimization
#Import modules
from numba import jit
#Decorator
@jit
#Define function
def multithreadADXStratOpt():
#Import modules
import pandas as pd
from pandas_datareader import data
import numpy as np
import time as t
import random as rand
#Assign ticker
ticker = '^GSPC'
#Request data
s = data.DataReader(ticker, 'yahoo', start='01/01/2016', end='01/01/2050')
#Number of iterations
iterations = range(0,800)
#Empty data structures
counter = 0
empty = []
dataset = pd.DataFrame()
#Start timer
start = t.time()
#Calculate log returns
s['LogRet'] = np.log(s['Adj Close']/s['Adj Close'].shift(1))
s['LogRet'] = s['LogRet'].fillna(0)
#Calculate ATR
s['UpMove'] = s['High'] - s['High'].shift(1)
s['DownMove'] = s['Low'] - s['Low'].shift(1)
s['Method1'] = s['High'] - s['Low']
s['Method2'] = abs((s['High'] - s['Adj Close'].shift(1)))
s['Method3'] = abs((s['Low'] - s['Adj Close'].shift(1)))
s['Method1'] = s['Method1'].fillna(0)
s['Method2'] = s['Method2'].fillna(0)
s['Method3'] = s['Method3'].fillna(0)
s['TrueRange'] = s[['Method1','Method2','Method3']].max(axis = 1)
#Calculate ADX
s['PDM'] = (s['High'] - s['High'].shift(1))
s['MDM'] = (s['Low'].shift(1) - s['Low'])
s['PDM'] = s['PDM'][s['PDM'] > 0]
s['MDM'] = s['MDM'][s['MDM'] > 0]
s['PDM'] = s['PDM'].fillna(0)
s['MDM'] = s['MDM'].fillna(0)
#For number of iterations
for x in iterations:
#Iteration tracking
counter = counter + 1
#Generate random params
a = rand.randint(1,30)
b = 100 - rand.random() * 200
c = 100 - rand.random() * 200
d = 100 - rand.random() * 200
e = 100 - rand.random() * 200
window = a
#Calculate ATR
s['AverageTrueRange'] = s['TrueRange'].rolling(window = window,
center=False).sum()
s['AverageTrueRange'] = ((s['AverageTrueRange'].shift(1)*(window-1
) + s['TrueRange']) / window)
#Calculate ADX
s['SmoothPDM'] = s['PDM'].rolling(window = window,
center=False).sum()
s['SmoothPDM'] = ((s['SmoothPDM'].shift(1)*(window-1
) + s['PDM']) / window)
s['SmoothMDM'] = s['MDM'].rolling(window = window,
center=False).sum()
s['SmoothMDM'] = ((s['SmoothMDM'].shift(1)*(window-1
) + s['MDM']) / window)
s['PDI'] = (100*(s['SmoothPDM']/s['AverageTrueRange']))
s['MDI'] = (100*(s['SmoothMDM']/s['AverageTrueRange']))
s['DIdiff'] = abs(s['PDI'] - s['MDI'])
s['DIdivergence'] = s['PDI'] - s['MDI']
s['DIsum'] = s['PDI'] + s['MDI']
s['DX'] = (100 * (s['DIdiff']/s['DIsum']))
s['DX'] = s['DX'].fillna(0)
s['ADX'] = s['DX'].rolling(window = window, center = False).mean()
s['ADXmean'] = s['ADX'].mean()
#Directional methodology
s['Touch'] = np.where(s['DIdivergence'] < b, 1,0) #long signal
s['Touch'] = np.where(s['DIdivergence'] > c, -1, s['Touch']) #short signal
s['Sustain'] = np.where(s['Touch'].shift(1) == 1, 1, 0) # never actually true when optimized
s['Sustain'] = np.where(s['Sustain'].shift(1) == 1, 1,
s['Sustain'])
s['Sustain'] = np.where(s['Touch'].shift(1) == -1, -1, 0) #true when previous day touch is -1, and current RSI is > line 37 threshold
s['Sustain'] = np.where(s['Sustain'].shift(1) == -1, -1,
s['Sustain'])
s['Sustain'] = np.where(s['DIdivergence'] > d, 0, s['Sustain']) #if RSI is greater than threshold, sustain is forced to 0
s['Sustain'] = np.where(s['DIdivergence'] < e, 0, s['Sustain']) #never actually true when optimized
s['Regime'] = s['Touch'] + s['Sustain']
#Apply position to returns
s['Strategy'] = (s['Regime']).shift(1)*s['LogRet']
s['Strategy'] = s['Strategy'].fillna(0)
#Constraint
if s['Strategy'].std() == 0:
continue
#Performance metric
s['sharpe'] = (s['Strategy'].mean()-s['LogRet'].mean())/s['Strategy'].std()
#Constraints
if s['sharpe'][-1] < 0.01:
continue
if s['LogRet'].cumsum().apply(np.exp)[-1] > s['Strategy'].cumsum(
).apply(np.exp)[-1]:
continue
#Save params and metric to dataframe
print(counter)
empty.append(a)
empty.append(b)
empty.append(c)
empty.append(d)
empty.append(e)
empty.append(s['sharpe'][-1])
#List to Series
emptyseries = pd.Series(empty)
#Series to dataframe
dataset[x] = emptysesies.values
#Clear list
empty[:] = []
#Metric of choice
z1 = dataset.iloc[5]
#Threshold
w1 = np.percentile(z1, 80)
v1 = [] #this variable stores the Nth percentile of top params
DS1W = pd.DataFrame() #this variable stores your params for specific dataset
#For all metrics
for h in z1:
#If greater than threshold
if h > w1:
#Add to list
v1.append(h)
#For top metrics
for j in v1:
#Column ID of metric
r = dataset.columns[(dataset == j).iloc[5]]
#Add param set to dataframe
DS1W = pd.concat([DS1W,dataset[r]], axis = 1)
#Top metric
y = max(z1)
#Column ID of top param set
k = dataset.columns[(dataset == y).iloc[5]]
#End timer
end = t.time()
#Timer stats
print(end-start, 'seconds later')
#Output top param set
return dataset[k]
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from datetime import timedelta
from .geo import shoot, gc_distance
# STORM TRACK LIBRARY
# TODO descatalogar?
def track_from_parameters(
pmin, vmean, delta, gamma,
x0, y0, x1, R,
date_ini, hours,
great_circle=False):
'''
Calculates storm track variables from storm track parameters
pmin, vmean, delta, gamma - storm track parameters
x0, y0 - site coordinates (longitude, latitude)
x1 - enter point in computational grid
R - radius (º)
date_ini - initial date 'yyyy-mm-dd HH:SS'
hours - number of hours to generate
great_circle - True for using great circle lon,lat calculation
'''
RE = 6378.135 # earth radius
# generation of storm track
xc = x0 + R * np.sin(delta * np.pi/180) # enter point in the smaller radius
yc = y0 + R * np.cos(delta * np.pi/180)
d = (x1 - xc) / np.sin(gamma * np.pi/180)
y1 = yc + d * np.cos(gamma * np.pi/180)
# time array for SWAN input
time_input = pd.date_range(date_ini, periods=hours, freq='H')
# storm track (pd.DataFrame)
st = pd.DataFrame(index=time_input, columns=['move', 'vf', 'pn', 'p0', 'lon', 'lat'])
st['move'] = gamma
st['vf'] = vmean
st['pn'] = 1013
st['p0'] = pmin
# calculate lon and lat
if not great_circle:
st['lon'] = x1 - (st['vf']*180/(RE*np.pi)) * np.sin(gamma*np.pi/180) * list(range(len(st)))
st['lat'] = y1 - (st['vf']*180/(RE*np.pi)) * np.cos(gamma*np.pi/180) * list(range(len(st)))
else:
x2 = x1 - (st['vf']*180/(RE*np.pi)) * np.sin(gamma*np.pi/180) * list(range(len(st)))
y2 = y1 - (st['vf']*180/(RE*np.pi)) * np.cos(gamma*np.pi/180) * list(range(len(st)))
xt, yt = [], []
for i in list(range(0,hours)):
glon, glat, baz = shoot(x1, y1, gamma+180, vmean * i)
xt.append(glon)
yt.append(glat)
st['lon'] = xt
st['lat'] = yt
# add some metadata
st.x0 = x0
st.y0 = y0
st.R = R
return st
def get_category(ycpres):
'Defines storm category according to minimum pressure centers'
categ = []
for i in range(len(ycpres)):
if (ycpres[i] == 0) or (np.isnan(ycpres[i])):
categ.append(6)
elif ycpres[i] < 920: categ.append(5)
elif ycpres[i] < 944: categ.append(4)
elif ycpres[i] < 964: categ.append(3)
elif ycpres[i] < 979: categ.append(2)
elif ycpres[i] < 1000: categ.append(1)
elif ycpres[i] >= 1000: categ.append(0)
return categ
def historic_track_preprocessing(xds, d_vns):
'''
Historic track is preprocessed, by removing NaN data, apply longitude
sign convention, change time format and define storm category
xds: historic track dataset (storm dimension)
d_vns: dictionary to set longitude, latitude, time, pressure and wind varnames
'''
# get names of vars
nm_lon = d_vns['longitude']
nm_lat = d_vns['latitude']
nm_prs = d_vns['pressure']
nm_tim = d_vns['time']
nm_win = d_vns['maxwinds']
# get var time
ytime = xds[nm_tim].values # dates format: datetime64
# remove time nans
ycpres = xds[nm_prs].values[~np.isnat(ytime)] # minimum pressure
ylat_tc = xds[nm_lat].values[~np.isnat(ytime)] # latitude
ylon_tc = xds[nm_lon].values[~np.isnat(ytime)] # longitude
ywind = xds[nm_win].values[~np.isnat(ytime)] # wind speed [kt]
ytime = ytime[~np.isnat(ytime)]
# remove pmin nans
ylat_tc = ylat_tc[~np.isnan(ycpres)]
ylon_tc = ylon_tc[~np.isnan(ycpres)]
ywind = ywind[~np.isnan(ycpres)]
ytime = ytime[~np.isnan(ycpres)]
ycpres = ycpres[~np.isnan(ycpres)]
# sign convention: [0º,360º]
ylon_tc[ylon_tc<0] = ylon_tc[ylon_tc<0] + 360
# round dates to hour
st_time = []
for i in range(len(ytime)):
round_to = 3600
# each gauge has different time format
dt = ytime[i].astype('datetime64[s]').tolist()
seconds = (dt - dt.min).seconds
rounding = (seconds+round_to/2) // round_to * round_to
out = dt + timedelta(0,rounding-seconds,-dt.microsecond)
st_time.append(out)
st_time = np.asarray(st_time)
# time step data [hours]
ts = (st_time[1:] - st_time[:-1])
ts = [ts[i].total_seconds() / 3600 for i in range(ts.size)]
# storm category centers
categ = get_category(ycpres)
# calculate Vmean
RE = 6378.135 # earth radius [km]
vmean = []
for i in range(0, len(st_time)-1):
# track pair of successive coordinates
lon1 = ylon_tc[i]
lat1 = ylat_tc[i]
lon2 = ylon_tc[i+1]
lat2 = ylat_tc[i+1]
# translation speed
arcl_h, gamma_h = gc_distance(lat2, lon2, lat1, lon1)
r = arcl_h * np.pi / 180.0 * RE # distance between consecutive track points (km)
vmean.append(r / ts[i] / 1.852) # translation speed (km/h to kt)
# mean value
vmean = np.mean(vmean) # [kt]
return st_time, ylat_tc, ylon_tc, ycpres, ywind, ts, categ, vmean
def ibtrac_basin_fitting(x0, y0):
'''
Assigns cubic polynomial fitting curve coefficient for each basin of
historical TCs data (IBTrACS)
'''
# determination of the location basin
if y0 < 0: basin = 5
elif (y0 > 0) & (x0 > 0): basin = 3
else: print('Basin not defined')
# cubic polynomial fitting curve for Ibtracs and each basin
# TODO: obtain all basin fitting coefficients
if basin == 3: # West Pacific
p1 = -7.77328602747578e-06
p2 = 0.0190830514629838
p3 = -15.9630945598490
p4 = 4687.76462404360
elif basin == 5: # South Pacific
p1 = -4.70481986864773e-05
p2 = 0.131052968357409
p3 = -122.487981649828
p4 = 38509.7575283218
return p1, p2, p3, p4
def historic_track_interpolation(st_time, ylon_tc, ylat_tc, ycpres, ywind, y0, x0,
lat00, lon00, lat01, lon01, ts, dt_comp,
wind=None, great_circle=False, fit=False):
'''
Calculates storm track variables from storm track parameters and interpolates
track points in between historical data (for "dt_comp" time step)
st_time - time track
lat, lon - track coordinates (longitude, latitude)
pmin, ywind - storm track parameters
x0, y0 - target coordinates (longitude, latitude)
lat0, lon0, lat1, lon1 - numerical domain bound limits
ts - track time step data [hours]
dt_comp - simulation computation time step [minutes]
wind - False for using vmax approximation eq.
great_circle - True for using great circle lon,lat calculation
fit - True for fitting vmax when ywind=0 (start of storm)
'''
RE = 6378.135 # earth radius [km]
# cubic polynomial fitting curve for each IBTrACS basin
p1, p2, p3, p4 = ibtrac_basin_fitting(x0, y0)
# generate lists
time_storm = list(st_time) # datetime format
pmin = list(ycpres)
lat = list(ylat_tc)
lon = list(ylon_tc)
if wind.any() != None:
mwind = wind
if fit:
wind_fitting = p1 * np.power(pmin,3) + p2 * np.power(pmin,2) + p3 * np.power(pmin,1) + p4
pos = np.where(mwind==0)
mwind[pos] = wind_fitting[pos]
# number of time steps between consecutive interpolated track points in order
# to match SWAN computational time step
ts = np.asarray(ts) * 60 / dt_comp
# initialize
move, vmean, pn, p0, lon_t, lat_t, vmax = [], [], [], [], [], [], []
vu, vy = [], []
time_input = np.empty((0,),dtype='datetime64[ns]')
for i in range(0, len(time_storm)-1):
# time array for SWAN input
date_ini = time_storm[i]
time_input0 = pd.date_range(
date_ini, periods=int(ts[i]), freq='{0}MIN'.format(dt_comp))
time_input = np.append(np.array(time_input), np.array(time_input0))
# track pair of successive coordinates
lon1 = lon[i]
lat1 = lat[i]
lon2 = lon[i+1]
lat2 = lat[i+1]
# translation speed
arcl_h, gamma_h = gc_distance(lat2, lon2, lat1, lon1)
r = arcl_h * np.pi / 180.0 * RE # distance between consecutive track points (km)
dx = r / ts[i] # interpolation distance
vx = float(dx) /3.6 # translation speed (m/s)
vx = vx /0.52 # translation speed (kt)
for j in range(int(ts[i])):
# append track parameters
move.append(gamma_h)
vmean.append(vx)
vu.append(vx * np.sin((gamma_h+180)*np.pi/180))
vy.append(vx * np.cos((gamma_h+180)*np.pi/180))
pn.append(1013)
p0.append(pmin[i] + j* (pmin[i+1]-pmin[i])/ts[i])
if wind.any() != None:
vmax.append(mwind[i] + j* (mwind[i+1]-mwind[i])/ts[i]) #[kt]
# calculate lon, lat
if not great_circle:
lon_h = lon1 - (dx*180/(RE*np.pi)) * np.sin(gamma_h*np.pi/180) * j
lat_h = lat1 - (dx*180/(RE*np.pi)) * np.cos(gamma_h*np.pi/180) * j
else:
xt, yt = [], []
glon, glat, baz = shoot(lon1, lat1, gamma_h + 180, float(dx) * j)
xt = np.append(xt,glon)
yt = np.append(yt,glat)
lon_h = xt
lat_h = yt
lon_t.append(lon_h)
lat_t.append(lat_h)
# to array
move = np.array(move)
vmean = np.array(vmean)
vu = np.array(vu)
vy = np.array(vy)
p0 = np.array(p0)
vmax = np.array(vmax)
lon_t = np.array(lon_t)
lat_t = np.array(lat_t)
# longitude sign convention --> (0º,360º)
lon_t[lon_t<0]= lon_t[lon_t<0] + 360
# select interpolation data within the target domain area
loc = []
for i, (lo,la) in enumerate(zip(lon_t, lat_t)):
if (lo<=lon01) & (lo>=lon00) & (la<=lat01) & (la>=lat00):
loc.append(i)
# storm track (pd.DataFrame)
st = pd.DataFrame(index=time_input[loc],
columns=['move','vf','vfx','vfy','pn','p0','lon','lat','vmax'])
st['move'] = move[loc]
st['vf'] = vmean[loc]
st['vfx'] = vu[loc]
st['vfy'] = vy[loc]
st['pn'] = 1013
st['p0'] = p0[loc]
st['lon'] = lon_t[loc]
st['lat'] = lat_t[loc]
# vmax is calculated from Pmin-Vmax basin-fitting when the value is not given
if wind.any() != None:
st['vmax'] = vmax[loc]
else:
st['vmax'] = p1 * np.power(p0[loc],3) + p2 * np.power(p0[loc],2) + p3 * np.power(p0[loc],1) + p4 # [kt]
# add some metadata
# TODO: move to st.attrs (this metada gets lost with any operation with st)
st.x0 = x0
st.y0 = y0
st.R = 4
return st, time_input[loc]
def entrance_coords(delta, gamma, x0, y0, R, lon0, lon1, lat0, lat1):
'''
Calculates storm track first coordinates
delta, gamma - storm track parameters
x0, y0 - site coordinates (longitude, latitude)
R - radius (º)
lon0, lon1, lat0, lat1 - computational coordinates (outer grid)
'''
# enter point in the radius
xc = x0 + R * np.sin(delta * np.pi/180)
yc = y0 + R * np.cos(delta * np.pi/180)
# calculate angles that determine the storm boundary entrance [degrees]
ang_1 = np.arctan((lon1-xc)/(lat1-yc)) *180/np.pi # upper right corner
ang_2 = np.arctan((lon1-xc)/(lat0-yc)) *180/np.pi +180 # lower right
ang_3 = np.arctan((lon0-xc)/(lat0-yc)) *180/np.pi +180 # lower left
ang_4 = np.arctan((lon0-xc)/(lat1-yc)) *180/np.pi +360 # upper left
if (gamma > ang_1) & (gamma < ang_2):
x1 = lon1
d = (x1 - xc) / np.sin(gamma * np.pi/180)
y1 = yc + d * np.cos(gamma * np.pi/180)
elif (gamma > ang_2) & (gamma < ang_3):
y1 = lat0
d = (y1 - yc) / np.cos(gamma * np.pi/180)
x1 = xc + d * np.sin(gamma * np.pi/180)
elif (gamma > ang_3) & (gamma < ang_4):
x1 = lon0
d = (x1 - xc) / np.sin(gamma * np.pi/180)
y1 = yc + d * np.cos(gamma * np.pi/180)
elif (gamma > ang_4) | (gamma < ang_1):
y1 = lat1
d = (y1 - yc) / np.cos(gamma * np.pi/180)
x1 = xc + d * np.sin(gamma * np.pi/180)
return x1, y1
def track_site_parameters(step, pmin, vmean, delta, gamma,
x0, y0, lon0, lon1, lat0, lat1, R, date_ini):
'''
Calculates storm track variables from storm track parameters within the study area
(uses great circle)
step - computational time step (in minutes)
pmin, vmean, delta, gamma - storm track parameters (NOTE: vmean in [kt])
x0, y0 - site coordinates (longitude, latitude)
lon0, lon1, lat0, lat1 - enter point in computational grid
R - radius (º)
date_ini - initial date 'yyyy-mm-dd HH:SS'
great_circle - default option
'''
# cubic polynomial fitting curve for each IBTrACS basin
# to calculate vmax from relation Pmin-Vmax of the specific basin
p1, p2, p3, p4 = ibtrac_basin_fitting(x0, y0)
# storm boundary entrance coordinates
x1, y1 = entrance_coords(delta, gamma, x0, y0, R, lon0, lon1, lat0, lat1)
# calculate lon,lat storm coordinates
xt, yt = [x1], [y1]
i = 1
glon, glat, baz = shoot(x1, y1, gamma+180, vmean*1.852 * i*step/60) # velocity in [km/h]
if glon < 0: glon += 360
while (glon < lon1) & (glon > lon0) & (glat < lat1) & (glat > lat0):
xt.append(glon)
yt.append(glat)
i += 1
glon, glat, baz = shoot(x1, y1, gamma+180, vmean*1.852 * i*step/60) # velocity in [km/h]
if glon < 0: glon += 360
frec = len(xt)
# time array for SWAN input
time_input = pd.date_range(date_ini, periods=frec, freq='{0}min'.format(step))
# storm track (pd.DataFrame)
st = pd.DataFrame(index=time_input,
columns=['move','vf','vfx','vfy','pn','p0','lon','lat','vmax'])
st['move'] = gamma
st['vf'] = vmean # [kt]
st['pn'] = 1013
st['p0'] = pmin
st['vfx'] = vmean * np.sin((gamma+180) * np.pi/180) # [kt]
st['vfy'] = vmean * np.cos((gamma+180) * np.pi/180) # [kt]
st['vmax'] = p1 * np.power(pmin,3) + p2 * np.power(pmin,2) + p3 * np.power(pmin,1) + p4 # [kt]
st['lon'] = xt
st['lat'] = yt
# add some metadata
st.x0 = x0
st.y0 = y0
st.R = R
return st
# VORTEX LIBRARY
# TODO
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 24 08:20:07 2018
@author: Andrija Master
"""
import time
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
from sklearn.metrics import roc_auc_score
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
from src.models.Nestrukturni import Nestrukturni_fun
from src.models.utils.Nestrukturni_GCRF import Nestrukturni_fun_GCRF
from Struktura import Struktura_fun
from Struktura_GCRF import Struktura_fun_GCRF
from GCRFCNB import GCRFCNB
from GCRFC import GCRFC
from GCRFC_fast import GCRFC_fast
from GCRF import GCRF
from src.preprocess.Putevi_dataset import output,atribute
from sklearn.model_selection import train_test_split
""" Racunanje """
No_class = 18
NoGraph = 4
ModelUNNo = 4
testsize2 = 0.2
broj_fold = 10
iteracija = 400
file = open("rezultati2.txt", "w")
AUCNB = np.zeros(broj_fold)
AUCB = np.zeros(broj_fold)
AUCBF = np.zeros(broj_fold)
R2GCRF = np.zeros(broj_fold)
MSEGCRF = np.zeros(broj_fold)
logProbNB = np.zeros(broj_fold)
logProbB = np.zeros(broj_fold)
logProbBF = np.zeros(broj_fold)
timeNB = np.zeros(broj_fold)
timeB = np.zeros(broj_fold)
timeBF = np.zeros(broj_fold)
timeGCRF = np.zeros(broj_fold)
Skor_com_AUC = np.zeros([broj_fold,ModelUNNo])
Skor_com_AUC2 = np.zeros([broj_fold,ModelUNNo])
Skor_com_R2 = np.zeros([broj_fold,ModelUNNo])
Skor_R2mean = np.zeros([ModelUNNo])
Skor_com_R22 = np.zeros([broj_fold,ModelUNNo])
Skor_R22mean = np.zeros([ModelUNNo])
Skor_com_MSE = np.zeros([broj_fold,ModelUNNo])
Skor_MSEmean = np.zeros([ModelUNNo])
skf = KFold(n_splits = broj_fold)
skf.get_n_splits(atribute, output)
output1 = output.iloc[:,:18]
i = 0
x_train_com, x_test, y_train_com, Y_test1 = train_test_split(atribute, output, test_size = 0.2, random_state =31)
x_train_un, x_train_st, y_train_un1, Y_train1 = train_test_split(x_train_com, y_train_com, test_size=testsize2, random_state=31)
Y_test = Y_test1.iloc[:,:18]
Y_train = Y_train1.iloc[:,:18]
y_train_un = y_train_un1.iloc[:,:18]
Y_test_reg = Y_test1.iloc[:,18:]
Y_train_reg = Y_train1.iloc[:,18:]
y_train_un_reg = y_train_un1.iloc[:,18:]
Skor_com_AUC[i,:], Skor_com_AUC2[i,:], R_train, R_test, R2, Noinst_train, Noinst_test = Nestrukturni_fun(x_train_un, y_train_un, x_train_st, Y_train, x_test, Y_test, No_class)
Se_train, Se_test = Struktura_fun(No_class,NoGraph, R2 , y_train_com, Noinst_train, Noinst_test)
""" Model GCRFC """
Y_train = Y_train.values
Y_test = Y_test.values
start_time = time.time()
mod1 = GCRFCNB()
mod1.fit(R_train, Se_train, Y_train, learn = 'TNC', learnrate = 6e-4, maxiter = iteracija)
#mod1.alfa = np.array([1-10, 1e-10, 1e-10, 3000])
#mod1.beta = np.array([1.0000000e-10, 1.0000000e-10, 1e-10, 1e-10])
probNB, YNB = mod1.predict(R_test,Se_test)
timeNB[i] = time.time() - start_time
start_time = time.time()
mod2 = GCRFC()
#x0 = np.load('mod2.npy')
mod2.fit(R_train, Se_train, Y_train, learn = 'TNC', learnrate = 3e-4, learnratec = 0.5, maxiter = iteracija)
np.save('mod2',mod2.x)
#mod1.alfa = np.array([7.67362291, 4.7631527 , 9.79830104])
#mod1.beta = np.array([ 7.01829973, 16.59090051, 18.9508093 , 5.79445323])
probB, YB, VarB = mod2.predict(R_test,Se_test)
timeB[i] = time.time() - start_time
start_time = time.time()
mod3 = GCRFC_fast()
#x0 = np.load('mod3.npy')
mod3.fit(R_train, Se_train, Y_train, learn = 'TNC', learnrate = 3e-4, learnratec = 0.5, maxiter = iteracija, method_clus = 'KMeans', clus_no = 50)
np.save('mod3',mod3.x)
#mod1.alfa = np.array([0.1043126 , 0.06905401, 0.08689079])
#mod1.beta = np.array([1.00008728e-08, 2.88191498e+02, 1.00000563e-08, 1.00000000e-08, 8.74943190e+01, 3.48984028e-03])
probBF, YBF, VarBF = mod3.predict(R_test,Se_test)
timeBF[i] = time.time() - start_time
""" Model GCRF """
Skor_com_MSE[i,:], Skor_com_R2[i,:], Skor_com_R22[i,:], R_train_reg, R_test_reg, R2_reg, Noinst_train, Noinst_test, Y_train_reg, Y_test_reg = Nestrukturni_fun_GCRF(x_train_un, y_train_un_reg, x_train_st, Y_train_reg, x_test, Y_test_reg, No_class)
Se_train_reg, Se_test_reg = Struktura_fun_GCRF(No_class,NoGraph, R2 , y_train_com, Noinst_train, Noinst_test, Y_train, Y_test)
start_time = time.time()
mod4 = GCRF()
#x0 = np.load('mod4.npy')
mod4.fit(R_train_reg, Se_train_reg, Y_train_reg, learn = 'TNC', maxiter = 5000)
# np.save('mod4',mod4.x)
#mod1.alfa = np.array([0.1043126 , 0.06905401, 0.08689079])
#mod1.beta = np.array([1.00008728e-08, 2.88191498e+02, 1.00000563e-08, 1.00000000e-08, 8.74943190e+01, 3.48984028e-03])
YGCRF = mod4.predict(R_test_reg, Se_test_reg)
timeGCRF[i] = time.time() - start_time
Y_test = Y_test.reshape([Y_test.shape[0]*Y_test.shape[1]])
Y_test_reg1 = Y_test_reg.copy()
Y_test_reg = Y_test_reg.reshape([Y_test_reg.shape[0]*Y_test_reg.shape[1]])
YGCRF1 = YGCRF.copy()
YGCRF = YGCRF.reshape([YGCRF.shape[0]*YGCRF.shape[1]])
YNB = YNB.reshape([YNB.shape[0]*YNB.shape[1]])
probNB = probNB.reshape([probNB.shape[0]*probNB.shape[1]])
YB = YB.reshape([YB.shape[0]*YB.shape[1]])
probB = probB.reshape([probB.shape[0]*probB.shape[1]])
YBF = YBF.reshape([YBF.shape[0]*YBF.shape[1]])
probBF = probBF.reshape([probBF.shape[0]*probBF.shape[1]])
probNB[Y_test==0] = 1 - probNB[Y_test==0]
probB[Y_test==0] = 1 - probNB[Y_test==0]
probBF[Y_test==0] = 1 - probBF[Y_test==0]
AUCNB[i] = roc_auc_score(Y_test,probNB)
AUCB[i] = roc_auc_score(Y_test,probB)
AUCBF[i] = roc_auc_score(Y_test,probBF)
R2GCRF[i] = r2_score(Y_test_reg,YGCRF)
MSEGCRF[i] = mean_squared_error(Y_test_reg,YGCRF)
logProbNB[i] = np.sum(np.log(probNB))
logProbB[i] = np.sum(np.log(probB))
logProbBF[i] = np.sum(np.log(probBF))
file.write('AUC GCRFCNB prediktora je {}'.format(AUCNB[i]) + "\n")
file.write('AUC GCRFCB prediktora je {}'.format(AUCB[i]) + "\n")
file.write('AUC GCRFCB_fast prediktora je {}'.format(AUCBF[i]) + "\n")
file.write('R2 GCRF prediktora je {}'.format(R2GCRF[i]) + "\n")
file.write('R2 GCRF prediktora je {}'.format(MSEGCRF[i]) + "\n")
file.write('AUC nestruktuiranih prediktora je {}'.format(Skor_com_AUC[i,:]) + "\n")
file.write('AUC2 nestruktuiranih prediktora je {}'.format(Skor_com_AUC2[i,:]) + "\n")
file.write('R2 nestruktuiranih prediktora je {}'.format(Skor_com_R2[i,:]) + "\n")
file.write('R22 nestruktuiranih prediktora je {}'.format(Skor_com_R22[i,:]) + "\n")
file.write('MSE nestruktuiranih prediktora je {}'.format(Skor_com_MSE[i,:]) + "\n")
file.write('Logprob GCRFCNB je {}'.format(logProbNB[i]) + "\n")
file.write('Logprob GCRFCB je {}'.format(logProbB[i]) + "\n")
file.write('Logprob GCRFCB_fast je {}'.format(logProbBF[i]) + "\n")
file.write("--- %s seconds --- GCRFCNB" % (timeNB[i]) + "\n")
file.write("--- %s seconds --- GCRFCB" % (timeB[i]) + "\n")
file.write("--- %s seconds --- GCRFCB_fast" % (timeBF[i]) + "\n")
file.write("--- %s seconds --- GCRF" % (timeGCRF[i]) + "\n")
file.close()
np.save('Y_test', Y_test_reg1)
np.save('Y_GCRF', YGCRF1)
|
|
import os
import cv2
import tensorflow as tf
slim = tf.contrib.slim
import sys
sys.path.append('slim')
import matplotlib.pyplot as plt
import numpy as np
from nets import inception
import tensorflow.contrib.slim.nets as nets
from preprocessing import inception_preprocessing
from lime import lime_image
import time
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
plt.switch_backend('agg')
image_size = inception.inception_v3.default_image_size
def transform_img_fn(path_list):
out = []
for f in path_list:
image_raw = tf.image.decode_jpeg(open(f, 'rb').read(), channels=3)
image = inception_preprocessing.preprocess_image(image_raw, image_size, image_size, is_training=False)
out.append(image)
return sess.run([out])[0]
def get_names():
filename = 'imagenet_lsvrc_2015_synsets.txt'
synset_list = [s.strip() for s in open(filename).readlines()]
filename = 'imagenet_metadata.txt'
synset_to_human_list = open(filename).readlines()
synset_to_human = {}
for s in synset_to_human_list:
parts = s.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
label_index = 1
labels_to_names = {0: 'background'}
for synset in synset_list:
name = synset_to_human[synset]
labels_to_names[label_index] = name
label_index += 1
return labels_to_names
names = get_names()
X = tf.placeholder(tf.float32, [None, 299, 299, 3])
Y = tf.placeholder(tf.int32, [None])
def inceptionv3(image, reuse=tf.AUTO_REUSE):
arg_scope = nets.inception.inception_v3_arg_scope(weight_decay=0.0)
with slim.arg_scope(arg_scope):
logits, end_point = nets.inception.inception_v3(image, 1001, is_training=False, reuse=reuse)
logits = logits[:, 1:] # ignore background class
probs = tf.nn.softmax(logits) # probabilities
return logits, probs, end_point
def predict_rar(images):
return sess.run(rar_probs, feed_dict={X: images})
def predict_adv(images):
return sess.run(adv_probs, feed_dict={X: images})
def step_target_class_adversarial_images(x, eps, one_hot_target_class):
logits, prob, end_points = inceptionv3(x)
cross_entropy = tf.losses.softmax_cross_entropy(one_hot_target_class,
logits,
label_smoothing=0.1,
weights=1.0)
x_adv = x - eps * tf.sign(tf.gradients(cross_entropy, x)[0])
x_adv = tf.clip_by_value(x_adv, -1.0, 1.0)
return tf.stop_gradient(x_adv)
def stepll_adversarial_images(x, eps):
logits, prob, end_points = inceptionv3(x)
least_likely_class = tf.argmin(logits, 1)
one_hot_ll_class = tf.one_hot(least_likely_class, 1000)
return step_target_class_adversarial_images(x, eps, one_hot_ll_class)
def stepllnoise_adversarial_images(x, eps):
logits, prob, end_points = inceptionv3(x)
least_likely_class = tf.argmin(logits, 1)
one_hot_ll_class = tf.one_hot(least_likely_class, 10)
x_noise = x + eps / 2 * tf.sign(tf.random_normal(x.shape))
return step_target_class_adversarial_images(x_noise, eps / 2, one_hot_ll_class)
def get_gound_truth(label_txt):
fp = open(label_txt)
ground_truth = np.zeros((299, 299))
label = 0
for p in fp:
if '<size>' in p:
width = int(next(fp).split('>')[1].split('<')[0])
height = int(next(fp).split('>')[1].split('<')[0])
if '<object>' in p:
label = next(fp).split('>')[1].split('<')[0]
if '<bndbox>' in p:
xmin = int(next(fp).split('>')[1].split('<')[0])
ymin = int(next(fp).split('>')[1].split('<')[0])
xmax = int(next(fp).split('>')[1].split('<')[0])
ymax = int(next(fp).split('>')[1].split('<')[0])
matrix = [int(xmin / width * 299), int(ymin / height * 299), int(xmax / width * 299),
int(ymax / height * 299)]
ground_truth[matrix[1]:matrix[3], matrix[0]:matrix[2]] = 1
return ground_truth
fixed_adv_sample_get_op = stepll_adversarial_images(X, 0.15)
rar_logits, rar_probs, rar_end_point = inceptionv3(X)
adv_logits, adv_probs, adv_end_point = inceptionv3(fixed_adv_sample_get_op)
is_defense = tf.equal(tf.argmax(rar_probs, 1), (tf.argmax(adv_probs, 1)))
#
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, "inception_v3.ckpt")
tmp = time.time()
def show_img(img, rar_mask, adv_mask, rar_label, adv_label, j):
plt.figure()
rar_mask[rar_mask != 2] = 0
rar_mask = cv2.applyColorMap(np.uint8(255 * rar_mask), cv2.COLORMAP_JET)
rar_mask = cv2.cvtColor(rar_mask, cv2.COLOR_BGR2RGB)
img = img.astype(float)
img /= img.max()
alpha = 2
rar_img = img + alpha * rar_mask
adv_mask[adv_mask != 2] = 0
adv_mask = cv2.applyColorMap(np.uint8(255 * adv_mask), cv2.COLORMAP_JET)
adv_mask = cv2.cvtColor(adv_mask, cv2.COLOR_BGR2RGB)
adv_img = img + alpha * adv_mask
plt.subplot(1, 5, 1)
plt.imshow(img)
plt.subplot(1, 5, 2)
plt.imshow(rar_mask)
# plt.imshow(mark_boundaries(images[j] / 2 + 0.5, rar_mask))
# np.save('a.npy', mark_boundaries(images[j] / 2 + 0.5, rar_mask))
plt.subplot(1, 5, 3)
plt.imshow(adv_img)
# plt.imshow(mark_boundaries(images[j] / 2 + 0.5, adv_mask))
plt.subplot(1, 5, 4)
plt.imshow(rar_img)
plt.subplot(1, 5, 5)
plt.imshow(adv_img)
plt.savefig(
"image/" + str(rar_label) + "_" + str(adv_label) + "_" + str(num_features) + "_" + str(
j) + '.png')
if __name__ == '__main__':
num_features = 10
offset = 332
labels_file = 'imagenet_labels.txt'
npz_dir = 'npz_'+str(offset)+'_'+str(offset+100)+'/'
results_file = 'result_lime' + str(num_features) + '.txt'
if os.path.isfile(results_file):
os.remove(results_file)
if not os.path.isdir(npz_dir):
os.makedirs(npz_dir)
defense_iou = 0
defense_count = 0
attack_iou = 0
attack_count = 0
rar_ground_iou_sum = 0
adv_ground_iou_sum = 0
label_paths = []
with open(labels_file, 'r', encoding='utf-8')as f:
lines = f.readlines()
for label_index, line in enumerate(lines[offset:offset+100]):
imgs = []
true_labels = []
label_letter = line.split(' ')
ground_truths = []
label_letter = label_letter[0]
label_index += offset
dir_name = 'img_val/' + str(label_letter)
for root, dirs, files in os.walk(dir_name):
for j,file in enumerate(files):
img_path = dir_name + '/' + file
label_path = 'val/' + str(file)[:-4] + 'xml'
imgs.append(img_path)
true_labels.append(label_index)
ground_truths.append(get_gound_truth(label_path))
label_paths.append(img_path)
images = transform_img_fn(imgs)
adv_imgs = sess.run(fixed_adv_sample_get_op, feed_dict={X: images})
with open(results_file, 'a', encoding='utf-8') as f_w:
try:
explainer = lime_image.LimeImageExplainer()
explanation = explainer.explain_instance(images[j], predict_rar, top_labels=1, hide_color=0,
num_samples=1000)
for key in (explanation.local_exp.keys()):
print(key)
rar_label = key
break
if rar_label == true_labels[j]:
rar_temp, rar_mask = explanation.get_image_and_mask(rar_label, positive_only=False,
num_features=num_features,
hide_rest=False)
explanation = explainer.explain_instance(adv_imgs[j], predict_adv, top_labels=1,
hide_color=0,
num_samples=1000)
for key in (explanation.local_exp.keys()):
print(key)
adv_label = key
break
adv_temp, adv_mask = explanation.get_image_and_mask(adv_label, positive_only=False,
num_features=num_features,
hide_rest=False)
np.savez(
npz_dir + str(rar_label) + "_" + str(adv_label) + "_" + str(num_features) + "_" + str(
j), images[j], rar_mask, adv_mask, ground_truths[j])
except Exception as e:
print(e)
with open(results_file, 'a')as f:
f.write(e)
f.write(str(label_paths[j]))
|
|
""""""
"""
Copyright (c) 2021 Olivier Sprangers as part of Airlab Amsterdam
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
import time
import numpy as np
import torch.utils.data as torchdata
from torch.distributions import StudentT
from lib.utils import calc_metrics
#%% Training loop
def loop(model, data, optimizer, batch_size, id_samples, train, metrics, scaling):
""" Loop to calculate output of one epoch"""
# Run model in train mode if train, otherwise in evaluation mode
model = model.train() if train else model.eval()
device = next(model.parameters()).device
data_subset = torchdata.Subset(data, id_samples)
num_samples = len(id_samples)
data_generator = torchdata.DataLoader(data_subset, batch_size)
# Quantile forecasting
quantiles = torch.arange(1, 10, dtype=torch.float32, device=device) / 10
num_forecasts = len(quantiles)
# Initiate dimensions and book-keeping variables
dim_input, dim_output, dim_inputseqlen, dim_outputseqlen, window, dim_lag, dim_emb, dim_cov = data.dim_input, data.dim_output, data.dim_inputseqlen, data.dim_outputseqlen, data.window, data.d_lag, data.d_emb, data.d_cov
yhat_tot = np.zeros((num_forecasts, data.dim_outputseqlen, num_samples, dim_output), dtype='float32')
y_tot = np.zeros((dim_outputseqlen, num_samples, dim_output), dtype='float32')
x_tot = np.zeros((window, num_samples, dim_input), dtype='float32')
loss = 0
# Student's t distribution settings
v = 3
factor = (v / (v - 2))
n_samples_dist = 1000
# Datamax
data_max = 1.0 if data.name == 'uci_traffic' else 1e12
data_min = 0.0
# Loop
start = time.time()
for i, (X, Y) in enumerate(data_generator):
# Batch
j = np.min(((i + 1) * batch_size, len(id_samples)))
# Permute to [seqlen x batch x feature] and transfer to device
X, Y = X.permute(1, 0, 2), Y.permute(1, 0, 2)
# Fill bookkeeping variables
y_tot[:, i*batch_size:j] = Y.detach().numpy()
x_tot[:, i*batch_size:j] = X[:window].detach().numpy()
# Create lags and covariate tensors
if scaling:
scaleY = 1 + X[:dim_inputseqlen, :, -dim_lag:].mean(dim = 0)
X[:, :, -dim_lag:] /= scaleY
Y /= scaleY
else:
scaleY = torch.tensor([1.0])
# Create three inputs: (i) time series index, (ii) covariates, (iii) lags
X_idx = X[:, :, 0:dim_emb].long()
X_cov = X[:, :, dim_emb:dim_emb + dim_cov]
X_lag = X[:window, :, -dim_lag:]
# Send to device
X_lag, X_cov, X_idx, Y = X_lag.to(device), X_cov.to(device), X_idx.to(device), Y.to(device)
scaleY = scaleY.to(device)
if train:
# Set gradients to zero of optimizer
optimizer.zero_grad()
# Calculate loc and scale parameters of output distribution
mean, variance = model(X_lag, X_cov, X_idx, dim_outputseqlen)
scale = (variance / factor).sqrt()
loc = mean
distr = StudentT(v, loc, scale)
loss_batch = -distr.log_prob(Y).mean()
# Backward pass
loss_batch.backward()
# Update parameters
optimizer.step()
else:
with torch.no_grad():
mean_prev = X_lag[dim_inputseqlen, :, [-1]].clone().detach()
for t in range(dim_outputseqlen):
X_lag[dim_inputseqlen + t, :, [-1]] = mean_prev
mean, variance = model(X_lag[:dim_inputseqlen + t + 1], X_cov, X_idx, t + 1)
mean_prev = mean[-1].clone().detach().clamp(data_min, data_max)
# Calculate loss
scale = (variance / factor).sqrt()
loc = mean
distr = StudentT(v, loc, scale)
loss_batch = -distr.log_prob(Y).mean()
# Append loss, calculate quantiles
loss += loss_batch.item()
yhat = distr.sample([n_samples_dist])
yhat *= scaleY
yhat_q = torch.quantile(yhat, quantiles, dim=0)
yhat_tot[:, :, i*batch_size:j, :] = yhat_q.detach().cpu().numpy()
end = time.time()
print(f'{" Train" if train else " Validation/Test"} loss: {loss/len(data_generator):.4f} Time: {end-start:.2f}s')
yhat_tot = np.clip(yhat_tot, 0, 1e9)
if metrics:
output = 0
y, yhat = y_tot[:, :, output], yhat_tot[:, :, :, output]
df = calc_metrics(yhat, y, quantiles.cpu().numpy())
return model, loss, yhat_tot, y_tot, x_tot, df
|
|
import os
import random
from typing import Optional
import numpy
import torch
from scipy.stats import qmc
def set_seed(seed: Optional[int] = None):
# ref: https://www.kaggle.com/lars123/neural-tangent-kernel-2
if seed is None:
return
random.seed(seed)
os.environ["PYTHONASSEED"] = str(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
class RandomEngine(qmc.QMCEngine):
def __init__(self, d, seed=None):
super().__init__(d=d, seed=seed)
def random(self, n=1):
self.num_generated += n
return self.rng.random((n, self.d))
def reset(self):
super().__init__(d=self.d, seed=self.rng_seed)
return self
def fast_forward(self, n):
self.random(n)
return self
|
|
import numpy as np
import spams
import sys, getopt
import pandas as pd
from dl_simulation import *
from signature_genes import get_signature_genes, build_signature_model
from analyze_predictions import *
# from union_of_transforms import random_submatrix, double_sparse_nmf, smaf
from union_of_transforms import random_submatrix, smaf
def compare_results(A, B):
results = list(correlations(A, B, 0))[:-1]
results += list(compare_distances(A, B))
results += list(compare_distances(A.T, B.T))
# results += [compare_clusters(A,B)]
# results += [compare_clusters(A.T,B.T)]
return results
THREADS = 10
# Results will consist of the following for each method (in both training and testing):
# overall Pearson, overall Spearman, gene Pearson, sample Pearson, sample dist Pearson, sample dist Spearman, gene dist Pearson, gene dist Spearman, sample cluster MI, gene cluster MI
if __name__ == "__main__":
biased_training = 0.
composition_noise = 0.
subset_size = 0
# opts, args = getopt.getopt(sys.argv[1:], 'i:m:s:d:t:g:n:r:b:a:', [])
filename = {"data1": "GSE71858.npy", #
"data2": "GSE60361.npy", #
"data3": "GSE62270.npy", #
"data4": "GSE48968.npy", #
"data5": "GSE52529.npy", #
"data6": "GSE77564.npy",
"data7": "GSE78779.npy", #
"data8": "GSE10247.npy", #
"data9": "GSE69405.npy"}
# data_path,measurements,sparsity,dictionary_size,training_dictionary_fraction,max_genes,max_samples,SNR,biased_training
# = 'GTEx/data.commonGenes.npy', 100, 15, 0.5, 0.05, 5000, 10000, 2., 0.
opts = {
'-i': "./Data/GSE78779.npy",
'-m': '50',
'-s': '10',
'-d': '0.5',
'-t': '0.05',
'-g': '5000',
'-n': '200',
'-r': '2.',
'-b': '0.',
# '-a': '',
# '-z': ''
}
for opt, arg in opts.items():
if opt == '-i':
data_path = arg
elif opt == '-m':
measurements = int(arg)
elif opt == '-s':
sparsity = int(arg)
elif opt == '-d':
dictionary_size = float(arg)
elif opt == '-t':
training_dictionary_fraction = float(arg)
elif opt == '-g':
max_genes = int(arg)
elif opt == '-n':
max_samples = int(arg)
elif opt == '-r':
SNR = float(arg)
elif opt == '-b':
biased_training = float(arg)
elif opt == '-a':
composition_noise = float(arg)
elif opt == '-z':
subset_size = int(arg)
# data_path,measurements,sparsity,dictionary_size,training_dictionary_fraction,max_genes,max_samples,SNR,biased_training = 'GTEx/data.commonGenes.npy',100,15,0.5,0.05,5000,10000,2.,0.
X = np.load(data_path)
print(X.shape)
#X0, xo, Xobs = random_submatrix(X, max_genes, max_samples, 0)
X0 = X
#np.save("./Data/linedata/GSE71858_X0.npy", X0)
#np.save("./Data/linedata/GSE77564_X0.npy", X0)
# train bases
training_dictionary_size = max(int(training_dictionary_fraction * X0.shape[1]), 5)
if dictionary_size < 1:
dictionary_size = dictionary_size * training_dictionary_size
dictionary_size = int(dictionary_size)
xi = np.zeros(X0.shape[1], dtype=np.bool)
if biased_training > 0:
i = np.random.randint(len(xi))
dist = distance.cdist([X0[:, i]], X0.T, 'correlation')[0]
didx = np.argsort(dist)[1:int(biased_training * training_dictionary_size) + 1]
else:
didx = []
xi[didx] = True
if biased_training < 1:
remaining_idx = np.setdiff1d(range(len(xi)), didx)
xi[np.random.choice(remaining_idx, training_dictionary_size - xi.sum(), replace=False)] = True
xa = X0[:, xi]
xb = X0[:, np.invert(xi)]
print('data: %s measurements: %d, sparsity: %d, dictionary size: %d, training fraction: %.2f, genes: %d, samples: %d, SNR: %.1f, bias: %.1f, composition_noise: %.2f, subset_size: %d' % (
data_path,
measurements, sparsity, dictionary_size, training_dictionary_fraction, X0.shape[0], X0.shape[1], SNR,
biased_training, composition_noise, subset_size))
Results = {}
np.save("./Data/linedata/GSE78779_xa.npy", xa)
np.save("./Data/linedata/GSE78779_xb.npy", xb)
ua, sa, vta = np.linalg.svd(xa, full_matrices=False)
ua = ua[:, :min(dictionary_size, xa.shape[1])]
x1a, phi, y, w, d, psi = recover_system_knownBasis(xa, measurements, sparsity, Psi=ua, snr=SNR, use_ridge=False)
Results['SVD (training)'] = compare_results(xa, x1a)
x1b, phi, y, w, d, psi = recover_system_knownBasis(xb, measurements, sparsity, Psi=ua, snr=SNR, use_ridge=False)
Results['SVD (testing)'] = compare_results(xb, x1b)
np.save("./Data/linedata/GSE78779_svd.npy", x1b)
ua, va = spams.nmf(np.asfortranarray(xa), return_lasso=True, K=dictionary_size, clean=True, numThreads=THREADS)
x2a, phi, y, w, d, psi = recover_system_knownBasis(xa, measurements, sparsity, Psi=ua, snr=SNR, use_ridge=False)
Results['sparse NMF (training)'] = compare_results(xa, x2a)
x2b, phi, y, w, d, psi = recover_system_knownBasis(xb, measurements, sparsity, Psi=ua, snr=SNR, use_ridge=False)
Results['sparse NMF (testing)'] = compare_results(xb, x2b)
np.save("./Data/linedata/GSE78779_snmf.npy", x2b)
k = min(int(xa.shape[1] * 3), 150)
#k = 220
UW = (np.random.random((xa.shape[0], k)), np.random.random((k, xa.shape[1])))
ua, va = smaf(xa, k, 5, 0.0005, maxItr=10, use_chol=True, activity_lower=0., module_lower=xa.shape[0] / 10, UW=UW,
donorm=True, mode=1, mink=3.)
x2a, phi, y, w, d, psi = recover_system_knownBasis(xa, measurements, sparsity, Psi=ua, snr=SNR, use_ridge=False)
Results['SMAF (training)'] = compare_results(xa, x2a)
x2b, phi, y, w, d, psi = recover_system_knownBasis(xb, measurements, sparsity, Psi=ua, snr=SNR, use_ridge=False,
nsr_pool=composition_noise, subset_size=subset_size)
Results['SMAF (testing)'] = compare_results(xb, x2b)
np.save("./Data/linedata/GSE78779_smaf.npy", x2b)
print (ua.shape, X0.shape, xa.shape, xb.shape)
#print w.shape, w
print(y.shape)
for k, v in sorted(Results.items()):
print('\t'.join([k] + [str(x) for x in v]))
|
|
"""
Functions for evaluating forecasts.
"""
import numpy as np
import xarray as xr
#import properscoring as ps
import xskillscore as xs
import tqdm
from tqdm import tqdm
def load_test_data(path, var, years=slice('2017', '2018'), cmip=False):
"""
Args:
path: Path to nc files
var: variable. Geopotential = 'z', Temperature = 't'
years: slice for time window
Returns:
dataset: Concatenated dataset for 2017 and 2018
"""
assert var in ['z', 't'], 'Test data only for Z500 and T850'
ds = xr.open_mfdataset(f'{path}/*.nc', combine='by_coords')[var]
if cmip:
ds['plev'] /= 100
ds = ds.rename({'plev': 'level'})
try:
ds = ds.sel(level=500 if var == 'z' else 850).drop('level')
except ValueError:
pass
return ds.sel(time=years)
def compute_weighted_rmse(da_fc, da_true, mean_dims=xr.ALL_DIMS):
"""
Compute the RMSE with latitude weighting from two xr.DataArrays.
Args:
da_fc (xr.DataArray): Forecast. Time coordinate must be validation time.
da_true (xr.DataArray): Truth.
Returns:
rmse: Latitude weighted root mean squared error
"""
error = da_fc - da_true
weights_lat = np.cos(np.deg2rad(error.lat))
weights_lat /= weights_lat.mean()
rmse = np.sqrt(((error)**2 * weights_lat).mean(mean_dims))
return rmse
def evaluate_iterative_forecast(da_fc, da_valid, func, mean_dims=xr.ALL_DIMS):
rmses = []
for f in da_fc.lead_time:
fc = da_fc.sel(lead_time=f)
fc['time'] = fc.time + np.timedelta64(int(f), 'h')
rmses.append(func(fc, da_valid, mean_dims))
return xr.concat(rmses, 'lead_time')
def compute_weighted_acc(da_fc, da_true, centered=True):
clim = da_true.mean('time')
t = np.intersect1d(da_fc.time, da_true.time)
fa = da_fc.sel(time=t) - clim
a = da_true.sel(time=t) - clim
weights_lat = np.cos(np.deg2rad(da_fc.lat))
weights_lat /= weights_lat.mean()
w = weights_lat
if centered:
fa_prime = fa - fa.mean()
a_prime = a - a.mean()
else:
fa_prime = fa
a_prime = a
acc = (
np.sum(w * fa_prime * a_prime) /
np.sqrt(
np.sum(w * fa_prime**2) * np.sum(w * a_prime**2)
)
)
return acc
def compute_weighted_meanspread(da_fc,mean_dims=xr.ALL_DIMS):
"""
prediction: xarray. Coordinates: time, forecast_number, lat, lon. Variables: z500, t850
time: Let there be I initial conditions
forecast_number: For each initial condition, let there be N forecasts
#mean variance
#1. for each input i, for each gridpoint, find variance among all N forecasts for that single input i
#2. for each input i, find latitude-weighted average of all the lat*lon points
#3. find average of all I inputs. take square root
"""
var1=da_fc.var('member')
weights_lat = np.cos(np.deg2rad(var1.lat))
weights_lat /= weights_lat.mean()
mean_spread= np.sqrt((var1*weights_lat).mean(mean_dims))
return mean_spread
def compute_weighted_crps(da_fc, da_true, mean_dims=xr.ALL_DIMS):
da_true=da_true.sel(time=da_fc.time)
assert (da_true.time==da_fc.time).all #checking size.
weights_lat = np.cos(np.deg2rad(da_fc.lat))
weights_lat /= weights_lat.mean()
crps = xs.crps_ensemble(da_true, da_fc)
crps = (crps * weights_lat).mean(mean_dims)
return crps
# def crps_score(da_fc,da_true,member_axis,mean_dims=xr.ALL_DIMS):
# #check size
# da_true=da_true.sel(time=da_fc.time)
# assert (da_true.time==da_fc.time).all
# #import properscoring as ps
# obs = np.asarray(da_true.to_array(), dtype=np.float32).squeeze();
# #shape: (variable,time, lat, lon)
# pred=np.asarray(da_fc.to_array(), dtype=np.float32).squeeze();
# #shape: (variable, member, time, lat, lon)
# member_axis=member_axis+1 #Weird but have to do since the above line changes position of member_axis
# if pred.ndim==4: #for single ensemble member. #ToDo: make it general
# pred=np.expand_dims(pred,axis=member_axis)
# crps=ps.crps_ensemble(obs,pred, weights=None, issorted=False,axis=member_axis)
# #crps.shape #(variable, time, lat, lon)
# # if crps.ndim==3: #for single input.#ToDo: make it general
# # crps=np.expand_dims(crps,axis=member_axis)
# #Converting back to xarray
# crps_score = xr.Dataset({
# 'z500': xr.DataArray(
# crps[0,...],
# dims=['time', 'lat', 'lon'],
# coords={'time': da_true.time, 'lat': da_true.lat, 'lon': da_true.lon,
# },
# ),
# 't850': xr.DataArray(
# crps[1,...],
# dims=['time', 'lat', 'lon'],
# coords={'time': da_true.time, 'lat': da_true.lat, 'lon': da_true.lon,
# },
# )
# })
# #averaging to get single valye
# weights_lat = np.cos(np.deg2rad(crps_score.lat))
# weights_lat /= weights_lat.mean()
# crps_score = (crps_score* weights_lat).mean(mean_dims)
# return crps_score
def compute_weighted_mae(da_fc, da_true, mean_dims=xr.ALL_DIMS):
"""
Compute the MAE with latitude weighting from two xr.DataArrays.
Args:
da_fc (xr.DataArray): Forecast. Time coordinate must be validation time.
da_true (xr.DataArray): Truth.
Returns:
mae: Latitude weighted root mean squared error
"""
error = da_fc - da_true
weights_lat = np.cos(np.deg2rad(error.lat))
weights_lat /= weights_lat.mean()
mae = (np.abs(error) * weights_lat).mean(mean_dims)
return mae
def compute_bin_crps(obs, preds, bin_edges):
"""
Last axis must be bin axis
obs: [...]
preds: [..., n_bins]
"""
# pdb.set_trace()
obs = obs.values
preds = preds.values
# Convert observation
a = np.minimum(bin_edges[1:], obs[..., None])
# b = bin_edges[:-1] * (bin_edges[0:-1] > obs[..., None])
b = np.where(bin_edges[:-1] > obs[..., None], bin_edges[:-1], -np.inf)
y = np.maximum(a, b)
# print('a =', a)
# print('b =', b)
# print('y =', y)
# Convert predictions to cumulative predictions with a zero at the beginning
cum_preds = np.cumsum(preds, -1)
cum_preds_zero = np.concatenate([np.zeros((*cum_preds.shape[:-1], 1)), cum_preds], -1)
xmin = bin_edges[..., :-1]
xmax = bin_edges[..., 1:]
lmass = cum_preds_zero[..., :-1]
umass = 1 - cum_preds_zero[..., 1:]
# y = np.atleast_1d(y)
# xmin, xmax = np.atleast_1d(xmin), np.atleast_1d(xmax)
# lmass, lmass = np.atleast_1d(lmass), np.atleast_1d(lmass)
scale = xmax - xmin
# print('scale =', scale)
y_scale = (y - xmin) / scale
# print('y_scale = ', y_scale)
z = y_scale.copy()
z[z < 0] = 0
z[z > 1] = 1
# print('z =', z)
a = 1 - (lmass + umass)
# print('a =', a)
crps = (
np.abs(y_scale - z) + z**2 * a - z * (1 - 2*lmass) +
a**2 / 3 + (1 - lmass) * umass
)
return np.sum(scale * crps, -1)
def compute_bin_crps_da(da_true, da_fc, batch=100):
n = int(np.ceil(len(da_fc.time) / batch))
result = []
for i in tqdm(range(n)):
sl = slice(i*batch, (i+1)*batch)
r = compute_bin_crps(da_true.isel(time=sl), da_fc.isel(time=sl), da_fc.bin_edges)
result.append(r)
return np.concatenate(result)
def compute_weighted_bin_crps(da_fc, da_true, mean_dims=xr.ALL_DIMS):
"""
"""
t = np.intersect1d(da_fc.time, da_true.time)
da_fc, da_true = da_fc.sel(time=t), da_true.sel(time=t)
weights_lat = np.cos(np.deg2rad(da_true.lat))
weights_lat /= weights_lat.mean()
dims = ['time', 'lat', 'lon']
if type(da_true) is xr.Dataset:
das = []
for var in da_true:
result = compute_bin_crps_da(da_true[var], da_fc[var])
# result = compute_bin_crps(da_true[var], da_fc[var], da_fc[var].bin_edges)
das.append(xr.DataArray(
result, dims=dims, coords=dict(da_true.coords), name=var
))
crps = xr.merge(das)
else:
# result = compute_bin_crps(da_true, da_fc, da_fc.bin_edges)
result = compute_bin_crps_da(da_true, da_fc)
crps = xr.DataArray(
result, dims=dims, coords=dict(da_true.coords), name=da_fc.name
)
crps = (crps * weights_lat).mean(mean_dims)
return crps
|
|
import torch
import numpy as np
from . import Kernel, Parameter, config
class LinearKernel(Kernel):
def __init__(self, input_dims=None, active_dims=None, name="Linear"):
super().__init__(input_dims, active_dims, name)
constant = torch.rand(1)
self.constant = Parameter(constant, lower=0.0)
def K(self, X1, X2=None):
# X has shape (data_points,input_dims)
if X2 is None:
X2 = X1
return X1.mm(X2.T) + self.constant()
class PolynomialKernel(Kernel):
def __init__(self, degree, input_dims=None, active_dims=None, name="Polynomial"):
super().__init__(input_dims, active_dims, name)
offset = torch.rand(1)
self.degree = degree
self.offset = Parameter(offset, lower=0.0)
def K(self, X1, X2=None):
# X has shape (data_points,input_dims)
if X2 is None:
X2 = X1
return (X1.mm(X2.T) + self.offset())**self.degree
class PhiKernel(Kernel):
def __init__(self, phi, input_dims, active_dims=None, name="Phi"):
super().__init__(input_dims, active_dims, name)
feature_dims = phi(torch.ones(input_dims,1)).shape[1]
variance = torch.ones(feature_dims)
self.phi = phi
self.variance = Parameter(variance, lower=config.positive_minimum)
def K(self, X1, X2=None):
# X has shape (data_points,input_dims)
if X2 is None:
X = self.phi(X1)
return X.mm(self.variance().diagflat().mm(X.T))
else:
return self.phi(X1).mm(self.variance().diagflat().mm(self.phi(X2).T))
class SquaredExponentialKernel(Kernel):
def __init__(self, input_dims, active_dims=None, name="SE"):
super().__init__(input_dims, active_dims, name)
l = torch.rand(input_dims)
sigma = torch.rand(1)
self.l = Parameter(l, lower=config.positive_minimum)
self.sigma = Parameter(sigma, lower=config.positive_minimum)
def K(self, X1, X2=None):
# X has shape (data_points,input_dims)
sqdist = self.squared_distance(X1,X2) # NxMxD
exp = torch.exp(-0.5*torch.tensordot(sqdist, 1.0/self.l()**2, dims=1)) # NxM
return self.sigma()**2 * exp
class RationalQuadraticKernel(Kernel):
def __init__(self, alpha, input_dims, active_dims=None, name="RQ"):
super().__init__(input_dims, active_dims, name)
l = torch.rand(input_dims)
sigma = torch.rand(1)
self.alpha = alpha
self.l = Parameter(l, lower=config.positive_minimum)
self.sigma = Parameter(sigma, lower=config.positive_minimum)
def K(self, X1, X2=None):
# X has shape (data_points,input_dims)
sqdist = self.squared_distance(X1,X2) # NxMxD
power = 1.0+0.5*torch.tensordot(sqdist, 1.0/self.l()**2, dims=1)/self.alpha # NxM
return self.sigma()**2 * torch.pow(power,-self.alpha)
class PeriodicKernel(Kernel):
def __init__(self, input_dims, active_dims=None, name="Periodic"):
super().__init__(input_dims, active_dims, name)
l = torch.rand(input_dims)
p = torch.rand(1)
sigma = torch.rand(1)
self.l = Parameter(l, lower=config.positive_minimum)
self.p = Parameter(p, lower=config.positive_minimum)
self.sigma = Parameter(sigma, lower=config.positive_minimum)
def K(self, X1, X2=None):
# X has shape (data_points,input_dims)
sin = torch.sin(np.pi * self.distance(X1,X2) / self.p()) # NxMxD
exp = torch.exp(-2.0 * torch.tensordot(sin**2, self.l()**2, dims=1)) # NxM
return self.sigma()**2 * exp
class CosineKernel(Kernel):
def __init__(self, input_dims, active_dims=None, name="Cosine"):
super().__init__(input_dims, active_dims, name)
l = torch.rand(input_dims)
sigma = torch.rand(1)
self.l = Parameter(l, lower=config.positive_minimum)
self.sigma = Parameter(sigma, lower=config.positive_minimum)
def K(self, X1, X2=None):
# X has shape (data_points,input_dims)
cos = 2.0*np.pi * torch.tensordot(self.distance(X1,X2), 1.0/self.l(), dims=1) # NxMxD
return self.sigma()**2 * torch.cos(cos)
class SpectralKernel(Kernel):
def __init__(self, input_dims, active_dims=None, name="SM"):
super().__init__(input_dims, active_dims, name)
weight = torch.rand(1)
mean = torch.rand(input_dims)
variance = torch.ones(input_dims)
self.weight = Parameter(weight, lower=config.positive_minimum)
self.mean = Parameter(mean, lower=config.positive_minimum)
self.variance = Parameter(variance, lower=config.positive_minimum)
def K(self, X1, X2=None):
# X has shape (data_points,input_dims)
tau = self.distance(X1,X2) # NxMxD
exp = torch.exp(-2.0*np.pi**2 * tau**2 * self.variance().reshape(1,1,-1)) # NxMxD
cos = torch.cos(2.0*np.pi * tau * self.mean().reshape(1,1,-1)) # NxMxD
return self.weight() * torch.prod(exp * cos, dim=2)
class MaternKernel(Kernel):
def __init__(self, nu=0.5, input_dims=None, active_dims=None, name="Matérn"):
super().__init__(input_dims, active_dims, name)
if nu not in [0.5, 1.5, 2.5]:
raise ValueError("nu parameter must be 0.5, 1.5, or 2.5")
l = torch.rand(input_dims)
sigma = torch.rand(1)
self.nu = nu
self.l = Parameter(l, lower=1e-6)
self.sigma = Parameter(sigma, lower=1e-6)
def K(self, X1, X2=None):
# X has shape (data_points,input_dims)
if X2 is None:
X2 = X1
dist = torch.abs(torch.tensordot(self.distance(X1,X2), 1.0/self.l(), dims=1))
if self.nu == 0.5:
constant = 1.0
elif self.nu == 1.5:
constant = 1.0 + np.sqrt(3.0)*dist
elif self.nu == 2.5:
constant = 1.0 + np.sqrt(5.0)*dist + 5.0/3.0*dist**2
return self.sigma()**2 * constant * torch.exp(-np.sqrt(self.nu*2.0)*dist)
|
|
import sys
#sys.path.append('/export/zimmerman/khoidang/pyGSM')
sys.path.insert(0,'/home/caldaz/module/pyGSM')
from dlc import *
from pytc import *
from de_gsm import *
import numpy as np
states = [(1,0),(1,1)]
charge=0
filepath1 = 'scratch/tw_pyr_meci.xyz'
filepath2 = 'scratch/et_meci.xyz'
nocc=7
nactive=2
mol1 = pb.readfile('xyz',filepath1).next()
mol2 = pb.readfile('xyz',filepath2).next()
lot = PyTC.from_options(states=states,nocc=nocc,nactive=nactive,basis='6-31gs',from_template=True,do_coupling=True)
# Fixed orbitals for MOM
dat = np.load('Cmom.npz')
Cocc_mom = ls.Tensor.array(dat['Cocc'])
Cact_mom = ls.Tensor.array(dat['Cact'])
lot.casci_from_file(filepath1,Cocc=Cocc_mom,Cact=Cact_mom)
#
pes1 = PES.from_options(lot=lot,ad_idx=states[0][1],multiplicity=states[0][0])
pes2 = PES.from_options(lot=lot,ad_idx=states[1][1],multiplicity=states[1][0])
pes = Avg_PES(pes1,pes2,lot)
print ' IC1 '
ic1 = DLC.from_options(mol=mol1,PES=pes,print_level=1,resetopt=False)
ic2 = DLC.from_options(mol=mol2,PES=pes,print_level=1,resetopt=False)
print ' Starting GSM '
gsm = GSM.from_options(ICoord1=ic1,ICoord2=ic2,nnodes=7,growth_direction=1,ADD_NODE_TOL=0.1)
gsm.go_gsm(opt_steps=3,rtype=0)
|
|
from unittest import TestCase
import numpy as np
from aspire.utils.coor_trans import grid_2d, grid_3d
from aspire.utils.matrix import roll_dim, unroll_dim, im_to_vec, vec_to_im, vol_to_vec, vec_to_vol, \
vecmat_to_volmat, volmat_to_vecmat, mat_to_vec, symmat_to_vec_iso, vec_to_symmat, vec_to_symmat_iso
import os.path
DATA_DIR = os.path.join(os.path.dirname(__file__), 'saved_test_data')
class UtilsTestCase(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testGrid2d(self):
grid2d = grid_2d(8)
self.assertTrue(np.allclose(grid2d['x'], np.load(os.path.join(DATA_DIR, 'grid2d_8_x.npy'))))
self.assertTrue(np.allclose(grid2d['y'], np.load(os.path.join(DATA_DIR, 'grid2d_8_y.npy'))))
self.assertTrue(np.allclose(grid2d['r'], np.load(os.path.join(DATA_DIR, 'grid2d_8_r.npy'))))
self.assertTrue(np.allclose(grid2d['phi'], np.load(os.path.join(DATA_DIR, 'grid2d_8_phi.npy'))))
def testGrid3d(self):
grid3d = grid_3d(8)
self.assertTrue(np.allclose(grid3d['x'], np.load(os.path.join(DATA_DIR, 'grid3d_8_x.npy'))))
self.assertTrue(np.allclose(grid3d['y'], np.load(os.path.join(DATA_DIR, 'grid3d_8_y.npy'))))
self.assertTrue(np.allclose(grid3d['z'], np.load(os.path.join(DATA_DIR, 'grid3d_8_z.npy'))))
self.assertTrue(np.allclose(grid3d['r'], np.load(os.path.join(DATA_DIR, 'grid3d_8_r.npy'))))
self.assertTrue(np.allclose(grid3d['phi'], np.load(os.path.join(DATA_DIR, 'grid3d_8_phi.npy'))))
self.assertTrue(np.allclose(grid3d['theta'], np.load(os.path.join(DATA_DIR, 'grid3d_8_theta.npy'))))
def testUnrollDims(self):
m = np.arange(1, 1201).reshape((5, 2, 10, 3, 4), order='F')
m2, sz = unroll_dim(m, 2) # second argument is 1-indexed - all dims including and after this are unrolled
# m2 will now have shape (5, (2x10x3x4)) = (5, 240)
self.assertEqual(m2.shape, (5, 240))
# The values should still be filled in with the first axis values changing fastest
self.assertTrue(np.allclose(
m2[:, 0],
np.array([1, 2, 3, 4, 5])
))
# sz are the dimensions that were unrolled
self.assertEqual(sz, (2, 10, 3, 4))
def testRollDims(self):
m = np.arange(1, 1201).reshape((5, 2, 120), order='F')
m2 = roll_dim(m, (10, 3, 4))
# m2 will now have shape (5, 2, 10, 3, 4)
self.assertEqual(m2.shape, (5, 2, 10, 3, 4))
# The values should still be filled in with the first axis values changing fastest
self.assertTrue(np.allclose(
m2[:, 0, 0, 0, 0],
np.array([1, 2, 3, 4, 5])
))
def testImToVec1(self):
m = np.empty((3, 3, 10))
m2 = im_to_vec(m)
self.assertEqual(m2.shape, (9, 10))
def testImToVec2(self):
m = np.empty((3, 3))
m2 = im_to_vec(m)
self.assertEqual(m2.shape, (9,))
def testVecToIm1(self):
m = np.empty((25, 10))
m2 = vec_to_im(m)
self.assertEqual(m2.shape, (5, 5, 10))
def testVecToIm2(self):
m = np.empty((16,))
m2 = vec_to_im(m)
self.assertEqual(m2.shape, (4, 4))
def testVolToVec1(self):
m = np.empty((3, 3, 3, 10))
m2 = vol_to_vec(m)
self.assertEqual(m2.shape, (27, 10))
def testVolToVec2(self):
m = np.empty((3, 3, 3))
m2 = vol_to_vec(m)
self.assertEqual(m2.shape, (27,))
def testVecToVol1(self):
m = np.empty((27, 10))
m2 = vec_to_vol(m)
self.assertEqual(m2.shape, (3, 3, 3, 10))
def testVecToVol2(self):
m = np.empty((27,))
m2 = vec_to_vol(m)
self.assertEqual(m2.shape, (3, 3, 3))
def testVecmatToVolmat(self):
m = np.empty((8, 27, 10))
m2 = vecmat_to_volmat(m)
self.assertEqual(m2.shape, (2, 2, 2, 3, 3, 3, 10))
def testVolmatToVecmat(self):
m = np.empty((3, 3, 3, 2, 2, 2, 5))
m2 = volmat_to_vecmat(m)
self.assertEqual(m2.shape, (27, 8, 5))
def testMatToVec1(self):
m = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
])
v = mat_to_vec(m)
self.assertTrue(
np.allclose(
v,
np.array([1, 4, 7, 2, 5, 8, 3, 6, 9])
)
)
def testMatToVec2(self):
m = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
])
# Make 2 copies depthwise
m = np.dstack((m, m))
v = mat_to_vec(m)
self.assertTrue(
np.allclose(
v,
np.array([
[1, 1],
[4, 4],
[7, 7],
[2, 2],
[5, 5],
[8, 8],
[3, 3],
[6, 6],
[9, 9],
])
)
)
def testMatToVecSymm1(self):
# We create an unsymmetric matrix and pass it to the functions as a symmetric matrix,
# just so we can closely inspect the returned values without confusion
m = np.array([
[0, 4, 8, 12],
[1, 5, 9, 13],
[2, 6, 10, 14],
[3, 7, 11, 15]
])
v = mat_to_vec(m, is_symmat=True)
# Notice the order of the elements in symmetric matrix - axis 0 first, then axis 1
self.assertTrue(
np.allclose(
v,
np.array([0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
)
)
def testMatToVecSymm2(self):
# We create an unsymmetric matrix and pass it to the functions as a symmetric matrix,
# just so we can closely inspect the returned values without confusion
m = np.array([
[0, 4, 8, 12],
[1, 5, 9, 13],
[2, 6, 10, 14],
[3, 7, 11, 15]
])
# Make 2 copies depthwise
m = np.dstack((m, m))
v = mat_to_vec(m, is_symmat=True)
# Notice the order of the elements in symmetric matrix - axis 0 first, then axis 1
self.assertTrue(
np.allclose(
v,
np.array([
[0, 0],
[1, 1],
[2, 2],
[3, 3],
[5, 5],
[6, 6],
[7, 7],
[10, 10],
[11, 11],
[15, 15]
])
)
)
def testMatToVecSymmIso(self):
# Very similar to the case above, except that the resulting matrix is reweighted.
# We create an unsymmetric matrix and pass it to the functions as a symmetric matrix,
# just so we can closely inspect the returned values without confusion
m = np.array([
[0, 4, 8, 12],
[1, 5, 9, 13],
[2, 6, 10, 14],
[3, 7, 11, 15]
], dtype='float64')
# Make 2 copies depthwise
m = np.dstack((m, m))
v = symmat_to_vec_iso(m)
# Notice the order of the elements in symmetric matrix - axis 0 first, then axis 1
self.assertTrue(
np.allclose(
v,
np.array([
[0, 0],
[1.4142, 1.4142],
[2.8284, 2.8284],
[4.2426, 4.2426],
[5, 5],
[8.4853, 8.4853],
[9.8995, 9.8995],
[10, 10],
[15.5563, 15.5563],
[15, 15]
])
)
)
def testVecToMatSymm1(self):
v = np.array([
[0, 0],
[1, 1],
[2, 2],
[3, 3],
[5, 5],
[6, 6],
[7, 7],
[10, 10],
[11, 11],
[15, 15]
])
m = vec_to_symmat(v)
self.assertTrue(
np.allclose(
m[:, :, 0],
np.array([
[0, 1, 2, 3],
[1, 5, 6, 7],
[2, 6, 10, 11],
[3, 7, 11, 15]
])
)
)
self.assertTrue(
np.allclose(
m[:, :, 1],
np.array([
[0, 1, 2, 3],
[1, 5, 6, 7],
[2, 6, 10, 11],
[3, 7, 11, 15]
])
)
)
def testVecToMatSymm2(self):
v = np.array([0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
m = vec_to_symmat(v)
self.assertTrue(
np.allclose(
m[:, :],
np.array([
[0, 1, 2, 3],
[1, 5, 6, 7],
[2, 6, 10, 11],
[3, 7, 11, 15]
])
)
)
def testVecToMatSymmIso(self):
# Very similar to the case above, except that the resulting matrix is reweighted.
v = np.array([
[0, 0],
[1, 1],
[2, 2],
[3, 3],
[5, 5],
[6, 6],
[7, 7],
[10, 10],
[11, 11],
[15, 15]
], dtype='float64')
m = vec_to_symmat_iso(v)
self.assertTrue(
np.allclose(
m[:, :, 0],
np.array([
[0, 0.70710678, 1.41421356, 2.12132034],
[0.70710678, 5, 4.24264069, 4.94974747],
[1.41421356, 4.24264069, 10, 7.77817459],
[2.12132034, 4.94974747, 7.77817459, 15]
])
)
)
self.assertTrue(
np.allclose(
m[:, :, 1],
np.array([
[0, 0.70710678, 1.41421356, 2.12132034],
[0.70710678, 5, 4.24264069, 4.94974747],
[1.41421356, 4.24264069, 10, 7.77817459],
[2.12132034, 4.94974747, 7.77817459, 15]
])
)
)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# convert transitscore txt grid file - transit_score_israelyyyymmdd.txt - to an array of transitscore from 1-100
# then convert the array to a raster
# output ts_rendered _israelyyyymmdd.png
#
print('----------------- generate raster from grid file--------------------------')
print('convert transitscore txt grid file - transit_score_all_israel.txt - to an array of transitscore from 1-100 ')
print(' then convert the array to a raster')
print('output ts_rendered _israelyyyymmdd.png')
#---------------------------------------------------------------------------------------------------
# array2raster with gdal from - https://pcjericks.github.io/py-gdalogr-cookbook/raster_layers.html
#
from osgeo import gdal
import ogr, os, osr
import numpy as np
import struct
import time
import csv
from pathlib import Path
cwd = Path.cwd()
#
print("Local current time :", time.asctime( time.localtime(time.time()) ))
#
def main(gtfsdate, gtfsdirbase, processedpath):
parent_path = cwd.parent / processedpath
gtfsdir = gtfsdirbase+gtfsdate
tsfilein = 'transit_score_'+gtfsdir+'.txt'
tsfileout = 'ts_unproj.tif'
ilminlat = 29.490000 # Israel min lat
ilminlon = 34.280000 # Israel min lon
lat100 = 0.0009000 # grid step of 100m
lon100 = 0.0010500 # grid step of 100m
#
# load file
#
# >>> load transitscore file
transitscore_list = []
max_grid_lat = 0
max_grid_lon = 0
maxts = 0
with open(parent_path / tsfilein, newline='', encoding="utf8") as ts_f:
readerts = csv.reader(ts_f)
headerts = next(readerts)
print(headerts)
for row in readerts:
#print row
grid_lat = int(row[0])
grid_lon = int(row[1])
ts = int(row[2])
max_grid_lat = max(max_grid_lat, grid_lat)
max_grid_lon = max(max_grid_lon, grid_lon)
maxts = max(maxts, ts)
transitscore_list.append([grid_lat, grid_lon, ts])
#print transitscore_list
print('transitscore_list loaded. ts count ', len(transitscore_list))
print('max_grid_lat, max_grid_lon : ', max_grid_lat, max_grid_lon)
print('max_lat, max_lon : ', ilminlat+(1+max_grid_lat)*lat100, ilminlon+(1+max_grid_lon)*lon100)
print('maxts:', maxts)
n = max_grid_lat+1
m = max_grid_lon+1
ts_grid = [0] * n
for i in range(n):
ts_grid[i] = [0] * m
for [grid_lat, grid_lon, ts] in transitscore_list :
ts_grid[grid_lat][grid_lon] = ts
#print ts_grid[:4][:4]
#---------------------------------------------------------------------------------------------
# convert array to raster and output file ts.tif
#
max_lat = ilminlat+(1+max_grid_lat)*lat100
xsize = lon100
ysize = -lat100
newRasterfn = str(parent_path / tsfileout)
array = np.array(ts_grid)
print('converting array - ')
print('newRasterfn,len(ts_grid),len(ts_grid[1]) : ',newRasterfn,len(ts_grid),len(ts_grid[1]))
reversed_arr = array[::-1] # reverse array so the tif looks like the array
cols = reversed_arr.shape[1]
rows = reversed_arr.shape[0]
originX = ilminlon
originY = max_lat
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(newRasterfn, cols, rows, 1, gdal.GDT_Byte) # default GDT_Byte
geotransform = (originX, xsize, 0, originY, 0, ysize)
outRaster.SetGeoTransform(geotransform)
outband = outRaster.GetRasterBand(1)
outband.WriteArray(reversed_arr)
outRasterSRS = osr.SpatialReference()
outRasterSRS.SetWellKnownGeogCS('WGS84')
#outRasterSRS.ImportFromEPSG(4326)
geoproj = outRasterSRS.ExportToWkt()
outRaster.SetProjection(geoproj)
outband.FlushCache()
outRaster = None
print('geotransform, geoproj, xsize, ysize :')
print(geotransform)
print(geoproj)
print(xsize, ysize)
print(("Saving file: ", parent_path / tsfileout, " ..."))
print(("Saved file: ", tsfileout))
#------------------------------------------------------------------
# get some info on file created ts_unproj.tif
import sys
print('----------------------------------os.system("gdalinfo ts_unproj.tif ")')
#os.system("gdalinfo ts_unproj.tif ")
#------------------------------------------------------------------------
# translate in order to scale back to 0-100
old_ds = gdal.Open(str(parent_path / 'ts_unproj.tif'))
if old_ds is None:
print('Unable to open INPUT.tif')
sys.exit(1)
print("[ RASTER BAND COUNT old_ds]: ", old_ds.RasterCount)
for band in range( old_ds.RasterCount ):
band += 1
print("[ GETTING BAND ]: ", band)
srcband = old_ds.GetRasterBand(band)
if srcband is None:
continue
stats = srcband.GetStatistics( True, True )
if stats is None:
continue
print("[ STATS ] = Minimum=%.3f, Maximum=%.3f, Mean=%.3f, StdDev=%.3f" % ( \
stats[0], stats[1], stats[2], stats[3] ))
minvalue = str(int(stats[0]))
maxvalue = str(int(stats[1]))
old_ds = None
print('----------------------------------os.system("gdal_translate ts_unproj.tif ts_scaled.tif -scale 0 88 0 100")')
os.system("gdal_translate "+str(parent_path / "ts_unproj.tif")+" "+str(parent_path / "ts_scaled.tif")+" "+"-scale "+minvalue+" "+maxvalue+" 0 100") # scale
#----------------------------------------------------------------------------
# project ts_scaled.tif to 3857 and create ts.tif
srs = osr.SpatialReference()
#srs.SetWellKnownGeogCS('WGS84') # from sample in book
srs.ImportFromEPSG(3857)
old_ds = gdal.Open(str(parent_path / 'ts_scaled.tif'))
if old_ds is None:
print('Unable to open INPUT.tif')
sys.exit(1)
print("[ RASTER BAND COUNT old_ds]: ", old_ds.RasterCount)
for band in range( old_ds.RasterCount ):
band += 1
print("[ GETTING BAND ]: ", band)
srcband = old_ds.GetRasterBand(band)
if srcband is None:
continue
stats = srcband.GetStatistics( True, True )
if stats is None:
continue
print("[ STATS ] = Minimum=%.3f, Maximum=%.3f, Mean=%.3f, StdDev=%.3f" % ( \
stats[0], stats[1], stats[2], stats[3] ))
vrt_ds = gdal.AutoCreateWarpedVRT(old_ds, None, srs.ExportToWkt(), gdal.GRA_Bilinear)
#vrt_ds = gdal.AutoCreateWarpedVRT(old_ds, None, srs.ExportToWkt(), gdal.GRA_NearestNeighbour)
print("[ RASTER BAND COUNT vrt_ds]: ", vrt_ds.RasterCount)
for band in range( vrt_ds.RasterCount ):
band += 1
print("[ GETTING BAND ]: ", band)
srcband = vrt_ds.GetRasterBand(band)
if srcband is None:
continue
stats = srcband.GetStatistics( True, True )
if stats is None:
continue
print("[ STATS ] = Minimum=%.3f, Maximum=%.3f, Mean=%.3f, StdDev=%.3f" % ( \
stats[0], stats[1], stats[2], stats[3] ))
dst_ds = gdal.GetDriverByName('GTiff').CreateCopy(str(parent_path / 'ts.tif'), vrt_ds)
#Properly close the datasets to flush to disk
old_ds = None
vrt_ds = None
dst_ds = None
print('----------------------------------os.system("gdalinfo ts.tif ")')
#os.system("gdalinfo ts.tif ")
#---------------------------------------------------------------------------
# use GDAL CreateCopy to convert ts.tif >>> to >>> ts.png
#
print('use GDAL CreateCopy to convert ts.tif >>> to >>> ts.png')
#Open existing dataset
src_ds = gdal.Open( str(parent_path / "ts.tif") )
if src_ds is None:
print('Unable to open INPUT.tif')
sys.exit(1)
#Open output format driver, see gdal_translate --formats for list
format = "PNG"
driver = gdal.GetDriverByName( format )
#Output to new format
dst_ds = driver.CreateCopy( str(parent_path / "ts.png"), src_ds, 0 )
print(dst_ds.GetMetadata())
#Properly close the datasets to flush to disk
src_ds = None
dst_ds = None
print('----------------------------------os.system("gdalinfo ts.png")')
#os.system("gdalinfo ts.png")
#---------------------------------------------------------------------------
# use GDAL gdaldem to convert ts.tif >>> to >>> ts_rendered.tif
#
print('use GDAL gdaldem to convert ts.tif >>> to >>> ts_rendered.tif')
os.system("gdaldem color-relief "+str(parent_path / "ts.tif")+" "+str(parent_path / "rgb_color.txt")+" "+str(parent_path / "ts_rendered.tif"))
print('----------------------------------os.system("gdalinfo ts_rendered.tif")')
#os.system("gdalinfo ts_rendered.tif")
#---------------------------------------------------------------------------
# use GDAL CreateCopy to convert ts_rendered.tif >>> to >>> ts_rendered.png
#
print('use GDAL CreateCopy to convert ts_rendered.tif >>> to >>> ts_rendered.png')
#Open existing dataset
src_ds = gdal.Open( str(parent_path / "ts_rendered.tif") )
if src_ds is None:
print('Unable to open INPUT.tif')
sys.exit(1)
#Open output format driver, see gdal_translate --formats for list
format = "PNG"
driver = gdal.GetDriverByName( format )
#Output to new format
pngfileout = 'ts_rendered_'+gtfsdir+'.png'
dst_ds = driver.CreateCopy( str(parent_path / pngfileout), src_ds, 0 )
#Properly close the datasets to flush to disk
dst_ds = None
src_ds = None
print('----------------------------------os.system("gdalinfo ts_rendered.png")')
os.system('gdalinfo '+str(parent_path / pngfileout))
print('done')
|
|
import numpy as np
import matplotlib.pyplot as plt
from transforms3d.euler import mat2euler
from scipy.linalg import expm
def load_data(file_name):
'''
function to read visual features, IMU measurements and calibration parameters
Input:
file_name: the input data file. Should look like "XXX_sync_KLT.npz"
Output:
t: time stamp
with shape 1 * N_t
features: visual feature point coordinates in stereo images,
with shape 4 * M * N_t, where M is number of features
linear_velocity: IMU measurements in IMU frame
with shape 3 * N_t
rotational_velocity: IMU measurements in IMU frame
with shape 3 * N_t
K: (left)camera intrinsic matrix
[fx 0 cx
0 fy cy
0 0 1]
with shape 3*3
b: stereo camera baseline
with shape 1
cam_T_imu: extrinsic matrix from IMU to (left)camera, in SE(3).
close to
[ 0 -1 0 t1
0 0 -1 t2
1 0 0 t3
0 0 0 1]
with shape 4*4
'''
with np.load(file_name) as data:
t = data["time_stamps"] # time_stamps
features = data["features"] # 4 x num_features : pixel coordinates of features
linear_velocity = data["linear_velocity"] # linear velocity measured in the body frame
rotational_velocity = data["rotational_velocity"] # rotational velocity measured in the body frame
K = data["K"] # intrinsic calibration matrix
b = data["b"] # baseline
cam_T_imu = data["cam_T_imu"] # Transformation from imu to camera frame
return t, features, linear_velocity, rotational_velocity, K, b, cam_T_imu
def visualize_trajectory_2d(pose, landmarks, better_pose, better_landmarks, timestamp, path_name="Unknown", show_ori=False, show_grid=False, savefig=False):
'''
function to visualize the trajectory in 2D
Input:
pose: 4*4*N matrix representing the camera pose,
where N is the number of pose, and each
4*4 matrix is in SE(3)
'''
fig,ax = plt.subplots(figsize=(5, 5))
n_pose = pose.shape[2]
ax.plot(landmarks[0, :], landmarks[1, :], 'g.', markersize=1.5, label='landmarks')
ax.plot(better_landmarks[0, :], better_landmarks[1, :], 'c.', markersize=1.5, label='landmarks_VI')
ax.plot(pose[0, 3, :], pose[1, 3, :], 'r-', markersize=6, label=path_name)
ax.plot(better_pose[0, 3, :], better_pose[1, 3, :], 'b-', markersize=6, label=path_name + "_VI")
ax.scatter(pose[0, 3, 0], pose[1, 3, 0], marker='s', label="start")
ax.scatter(pose[0, 3, -1], pose[1, 3, -1], marker='o', label="end")
if show_ori:
select_ori_index = list(range(0, n_pose, max(int(n_pose / 50), 1)))
yaw_list = []
for i in select_ori_index:
_, _, yaw = mat2euler(pose[:3, :3, i])
yaw_list.append(yaw)
dx = np.cos(yaw_list)
dy = np.sin(yaw_list)
dx,dy = [dx, dy] / np.sqrt(dx**2 + dy**2)
ax.quiver(pose[0, 3, select_ori_index], pose[1, 3, select_ori_index], dx, dy,\
color="b", units="xy", width=1)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('timestamp ' + timestamp)
ax.axis('equal')
ax.grid(show_grid)
ax.legend()
if savefig:
fig.savefig("d" + path_name + "t" + timestamp, dpi = 300)
plt.show(block=True)
return fig, ax
# form the skew-symmetric matrix from a given vector x
def hat_map_3(x):
hat_map = np.array([[ 0, -x[2], x[1]],
[x[2], 0, -x[0]],
[-x[1], x[0], 0]])
return hat_map
def hat_map_6(u):
theta = u[3:, np.newaxis]
p = u[:3, np.newaxis]
hat_map = np.block([[hat_map_3(theta), -p],
[np.zeros((1, 4))]])
return hat_map
def projection(q):
return q / q[2]
def projection_derivative(q):
derivative = np.array([[1, 0, -q[0]/q[2], 0],
[0, 1, -q[1]/q[2], 0],
[0, 0, 0, 0],
[0, 0, -q[3]/q[2], 1]])
return derivative / q[2]
# K is the calibration matrix, b is the baseline
def stereo_camera_model(K, b):
M = np.array([[K[0, 0], 0, K[0, 2], 0],
[ 0, K[1, 1], K[1, 2], 0],
[K[0, 0], 0, K[0, 2], -K[0, 0] * b],
[ 0, K[1, 1], K[1, 2], 0]])
return M
# Converts car's current inverse pose (U) to world-frame
def world_T_imu(mean_pose):
R_T = np.transpose(mean_pose[:3, :3])
p = mean_pose[:3, 3].reshape(3, 1)
U_inv = np.vstack((np.hstack((R_T, -np.dot(R_T, p))), np.array([0, 0, 0, 1])))
return U_inv
def EKF_inertial_prediction(Car, v, omega, tau, weight_v = 0.00001, weight_omega = 0.0001):
# covariance for movement noise
W = np.block([[weight_v * np.eye(3), np.zeros((3,3))],
[ np.zeros((3, 3)), weight_omega * np.eye(3)]])
tau = -(tau)
u_hat = np.vstack((np.hstack((hat_map_3(omega), v.reshape(3, 1))), np.zeros((1, 4))))
u_curlyhat = np.block([[ hat_map_3(omega), hat_map_3(v)],
[ np.zeros((3, 3)), hat_map_3(omega)]])
Car['mean'] = expm(tau * u_hat) @ Car['mean']
Car['covariance'] = expm(tau * u_curlyhat) @ Car['covariance'] @ np.transpose(expm(tau * u_curlyhat)) + W
def EKF_visual_update(Car, Landmarks, curr_features, K, b, cam_T_imu, weight = 3500):
# covariance for measurement noise
V = weight * np.eye(4)
P = np.eye(3, 4)
M = stereo_camera_model(K, b)
for i in range(curr_features.shape[1]):
z = curr_features[:, i][:]
# only operate for landmarks present in current timestep
if (np.all(z == -1)):
continue
# else if we make it here, that means the current landmark is present in the camera frame.
# if, in the previous timestep, the landmark wasn't present, initialize the landmark now
# using the car's pose
if (np.all(np.isnan(Landmarks['mean'][:, i]))):
d = (z[0] - z[2])
Z_0 = (K[0, 0] * b) / d
world_T_cam = world_T_imu(Car['mean']) @ np.linalg.inv(cam_T_imu)
camera_frame_coords = np.hstack((Z_0 * np.linalg.inv(K) @ np.hstack((z[:2], 1)), 1))
Landmarks['mean'][:, i] = world_T_cam @ camera_frame_coords
continue
# else if landmark is present in the current timestamp, and has been seen before
# create predicted z_tilde from previous z (in camera-frame coordinates)
cam_T_world = cam_T_imu @ Car['mean']
curr_landmark = cam_T_world @ Landmarks['mean'][:, i]
z_tilde = M @ projection(curr_landmark) # remove depth information via projection, and project to pixels
# form H; the Jacobian of z_tilde w.r.t. current feature m evaluated at car's current position
H = M @ projection_derivative(curr_landmark) @ cam_T_world @ P.T
# perform the EKF update
KG = Landmarks['covariance'][:, :, i] @ H.T @ np.linalg.inv(H @ Landmarks['covariance'][:, :, i] @ H.T + V)
Landmarks['mean'][:, i] = Landmarks['mean'][:, i] + P.T @ KG @ (z - z_tilde)
Landmarks['covariance'][:, :, i] = (np.eye(3) - KG @ H) @ Landmarks['covariance'][:, :, i]
def EKF_visual_inertial_prediction(Car, v, omega, tau, weight_v = 0.00001, weight_omega = 0.0001):
# covariance for movement noise
W = np.block([[weight_v * np.eye(3), np.zeros((3,3))],
[ np.zeros((3, 3)), weight_omega * np.eye(3)]])
tau = -(tau)
u_hat = np.vstack((np.hstack((hat_map_3(omega), v.reshape(3, 1))), np.zeros((1, 4))))
u_curlyhat = np.block([[hat_map_3(omega), hat_map_3(v)],
[np.zeros((3, 3)), hat_map_3(omega)]])
Car['mean_vi'] = expm(tau * u_hat) @ Car['mean_vi']
Car['covariance_vi'] = expm(tau * u_curlyhat) @ Car['covariance_vi'] @ np.transpose(expm(tau * u_curlyhat)) + W
def EKF_visual_inertial_update(Car, Landmarks, curr_features, K, b, cam_T_imu, weight = 3500):
# covariance for measurement noise
V = weight * np.eye(4)
P = np.eye(3, 4)
M = stereo_camera_model(K, b)
for i in range(curr_features.shape[1]):
z = curr_features[:, i][:]
# only operate for landmarks present in current timestep
if (np.all(z == -1)):
continue
# else if we make it here, that means the current landmark is present in the camera frame.
# if, in the previous timestep, the landmark wasn't present, initialize the landmark now
# using the car's pose
if (np.all(np.isnan(Landmarks['mean_vi'][:, i]))):
d = (z[0] - z[2])
Z_0 = (K[0, 0] * b) / d
world_T_cam = world_T_imu(Car['mean_vi']) @ np.linalg.inv(cam_T_imu)
camera_frame_coords = np.hstack((Z_0 * np.linalg.inv(K) @ np.hstack((z[:2], 1)), 1))
Landmarks['mean_vi'][:, i] = world_T_cam @ camera_frame_coords
continue
# else if landmark is present in the current timestamp, and has been seen before
# create predicted z_tilde from previous z (in camera-frame coordinates)
cam_T_world = cam_T_imu @ Car['mean_vi']
curr_landmark = cam_T_world @ Landmarks['mean_vi'][:, i]
z_tilde = M @ projection(curr_landmark) # remove depth information via projection, and project to pixels
# form H; the Jacobian of z_tilde w.r.t. current feature m evaluated at car's current position
H = M @ projection_derivative(curr_landmark) @ cam_T_world @ P.T
# perform the visual EKF update
KG = Landmarks['covariance_vi'][:, :, i] @ H.T @ np.linalg.inv(H @ Landmarks['covariance_vi'][:, :, i] @ H.T + V)
Landmarks['mean_vi'][:, i] = Landmarks['mean_vi'][:, i] + P.T @ KG @ (z - z_tilde)
Landmarks['covariance_vi'][:, :, i] = (np.eye(3) - KG @ H) @ Landmarks['covariance_vi'][:, :, i]
curr_landmark = Car['mean_vi'] @ Landmarks['mean_vi'][:, i]
# form H; the Jacobian of z_tilde w.r.t. current car's inverse pose evaluated at car's current position
H = M @ projection_derivative(cam_T_imu @ curr_landmark) @ cam_T_imu @ np.block([[np.eye(3), -hat_map_3(curr_landmark[:3])],
[np.zeros((1, 6))]])
# perform the inertial EKF update
KG = Car['covariance_vi'] @ H.T @ np.linalg.inv(H @ Car['covariance_vi'] @ H.T + V)
Car['mean_vi'] = expm(hat_map_6(KG @ (z - z_tilde))) @ Car['mean_vi']
Car['covariance_vi'] = (np.eye(6) - KG @ H) @ Car['covariance_vi']
|
|
from pymysql.converters import encoders as convertors
import numpy as np
""" Those methods extends pymysql convertor for numpy datatypes """
def convert_numpy_int(value, mapping=None):
return str(value)
def convert_numpy_float(value, mapping=None):
s = repr(value)
if s in ('inf', 'nan'):
raise ValueError("%s can not be used with MySQL" % s)
if 'e' not in s:
s += 'e0'
return s
convertors[np.int8] = convert_numpy_int
convertors[np.int16] = convert_numpy_int
convertors[np.int32] = convert_numpy_int
convertors[np.int64] = convert_numpy_int
convertors[np.uint8] = convert_numpy_int
convertors[np.uint16] = convert_numpy_int
convertors[np.uint32] = convert_numpy_int
convertors[np.uint64] = convert_numpy_int
convertors[np.float16] = convert_numpy_float
convertors[np.float32] = convert_numpy_float
convertors[np.float64] = convert_numpy_float
|
|
import cv2,selectivesearch
import numpy as np
import Intersection as union
candidates = set()
while True:
image = cv2.imread('image/dog.337.jpg')
img_lbl, regions = selectivesearch.selective_search(image)
for r in regions:
candidates.add(r['rect'])
for x,y,w,h in candidates:
iou = union.get_union([71, 63, 189, 199],[x,y,x+w,y+h])
if iou > .7:
cv2.rectangle(image,(x,y),(x+w,y+h),(0,233,12),1)
cv2.putText(image,'Dog',(x-2,y-2),1,1,(1,22,121),1)
cv2.imshow('',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
|
|
#!/usr/bin/env python
import argparse
import os
import scipy.constants as sc
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("dir", help="the file name to read")
args = parser.parse_args()
band_raw_dir = os.getenv("HOME") + "/mlp-Fe/input/Fe/band_data/raw_data/"
with open(band_raw_dir + args.dir) as f:
s = [list(map(float, line.split())) for line in f]
s_y = [[line[0], line[1] * 1e-3 * sc.e / (sc.h * 1e12)] for line in s]
if args.dir == "gamma_x.txt":
s_x = [[line[0] * 0.28569166, line[1]] for line in s_y]
elif args.dir == "x_gamma.txt":
s_x = [[0.40402902 - line[0] * 0.40402902, line[1]] for line in s_y]
elif args.dir == "gamma_l.txt":
s_x = [
[0.40402902 + line[0] * 2 * (0.65144526 - 0.40402902), line[1]]
for line in s_y
]
else:
print("not supported file format.")
for line in s_x:
print(line[0], line[1])
|
|
import numpy as np
import codecs, json
from json import JSONEncoder
class Numpy2JSONEncoder(json.JSONEncoder):
'''
This class is to convert the Numpy format Tensorflow Model Weights into JSON format
to send it to the server for Federated Averaging
'''
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
def json2NumpyWeights(data):
'''
This function is to decode the JSON format Model weights to Tensorflow model suitable
Numpy for mat so that , model.set_weights(name_variable) can be used properly to set model weights
'''
decodedGlobalWeights = list()
decodedGlobalWeights = json.loads(data)
FinalWeight= list()
for i in range(len(decodedGlobalWeights)):
FinalWeight.append(np.array(decodedGlobalWeights[i]))
return FinalWeight
def EagerTensor2Numpy(data):
'''
This Function is to convert Eager Tensor to Numpy ndarray
After that this is used for JSON serializable to send it to the Global and local Server
'''
npWeightsMat = list()
for i in range(len(data)):
val = data[i].numpy()
npWeightsMat.append(val)
return npWeightsMat
def global_weights_mul_lr(data, learning_rate):
'''
This function multiplies the averaged weights with the learning rate after FedAvg of locall models weights
'''
tmp_global_weights = list()
for i in range(len(data)):
val = data[i]*learning_rate
tmp_global_weights.append(val)
return tmp_global_weights
|
|
import numpy as N
import os
def edog_abc_ext2(
p,
channel_dim,
patch_dim,
x):
xc = x % channel_dim
px = xc % patch_dim
py = xc / patch_dim
'''params: 0: cmu_x 1: cmu_y
2: csigma_x 3: csigma_y 4: ctheta
5: ccdir_a 6: ccdir_b 7: ccdir_c
8: smu_x 9: smu_y
10: ssigma_x 11: ssigma_y 12: stheta
13: scdir_a 14: scdir_b 15: scdir_c
16: k_s (k_c is implicitly fixed as 1)
'''
def gauss2d(mux, muy, sigx, sigy, theta, scale=1):
sigma_x = sigx**2*scale
sigma_y = sigy**2*scale
a = N.cos(theta)**2/2/sigma_x + N.sin(theta)**2/2/sigma_y
b = -N.sin(2*theta)/4/sigma_x + N.sin(2*theta)/4/sigma_y
c = N.sin(theta)**2/2/sigma_x + N.cos(theta)**2/2/sigma_y
return N.exp( - ( a *(px-mux)**2 + 2*b*(px-mux)*(py-muy) + c*(py-muy)**2 ) )
dog_c = gauss2d(p[0], p[1], p[2], p[3], p[4])
dog_s = p[16] * gauss2d(p[8], p[9], p[10], p[11], p[12])
ret_A = p[5] * dog_c[0:channel_dim] - p[13] * dog_s[0:channel_dim]
ret_B = p[6] * dog_c[channel_dim:2*channel_dim] - p[14] * dog_s[channel_dim:2*channel_dim]
ret_C = p[7] * dog_c[2*channel_dim:3*channel_dim] - p[15] * dog_s[2*channel_dim:3*channel_dim]
ret = N.concatenate([ ret_A, ret_B, ret_C ])
return ret
def reconstruct_ext2(
p,
channel_dim,
patch_dim,
shape):
return edog_abc_ext2(
p,
channel_dim,
patch_dim,
*N.indices(shape))
def permutate_mu_ext2(
p,
patch_dim,
channel_dim,
rf
):
from . import arg_absmax_rfvector
center_point = N.mean(
arg_absmax_rfvector(rf, patch_dim, channel_dim),
axis=0)
'''RF center point noise'''
noise_c = (N.random.random_sample(2)-.5)*2 * patch_dim/8
point_c = center_point + noise_c
noise_s = (N.random.random_sample(2)-.5)*2 * patch_dim/8
point_s = center_point + noise_s
p[0] = point_c[0]
p[1] = point_c[1]
p[8] = point_s[0]
p[9] = point_s[1]
return p
def bestfit_ext2(
rf,
channel_dim,
patch_dim,
num_attempts=10,
maxiter=10000,
num_pool_threads=4,
min_error_accept=.5
):
'''params: 0: cmu_x 1: cmu_y
2: csigma_x 3: csigma_y 4: ctheta
5: ccdir_a 6: ccdir_b 7: ccdir_c
8: smu_x 9: smu_y
10: ssigma_x 11: ssigma_y 12: stheta
13: scdir_a 14: scdir_b 15: scdir_c
16: k_s (k_c is implicitly fixed as 1)
'''
constraints = (
{'type': 'ineq', 'fun': lambda x: x[0]}, # cx > 0
{'type': 'ineq', 'fun': lambda x: patch_dim-1 - x[0]}, # cx < patch_dim-1
{'type': 'ineq', 'fun': lambda x: x[1]}, # cy > 0
{'type': 'ineq', 'fun': lambda x: patch_dim-1 - x[1]}, # cy < patch_dim-1
{'type': 'ineq', 'fun': lambda x: x[8]}, # cx > 0
{'type': 'ineq', 'fun': lambda x: patch_dim-1 - x[8]}, # cx < patch_dim-1
{'type': 'ineq', 'fun': lambda x: x[9]}, # cy > 0
{'type': 'ineq', 'fun': lambda x: patch_dim-1 - x[9]}, # cy < patch_dim-1
{'type': 'ineq', 'fun': lambda x: x[2]/x[3] + .6},
{'type': 'ineq', 'fun': lambda x: 1.4 - x[2]/x[3]},
{'type': 'ineq', 'fun': lambda x: x[3]/x[2] + .6},
{'type': 'ineq', 'fun': lambda x: 1.4 - x[3]/x[2]},
{'type': 'ineq', 'fun': lambda x: x[10]/x[11] + .6},
{'type': 'ineq', 'fun': lambda x: 1.4 - x[10]/x[11]},
{'type': 'ineq', 'fun': lambda x: x[11]/x[10] + .6},
{'type': 'ineq', 'fun': lambda x: 1.4 - x[11]/x[10]},
{'type': 'ineq', 'fun': lambda x: .25 - abs(x[0] - x[8])},
{'type': 'ineq', 'fun': lambda x: .25 - abs(x[1] - x[9])},
{'type': 'ineq', 'fun': lambda x: x[2]-.17},
{'type': 'ineq', 'fun': lambda x: x[3]-.17},
{'type': 'ineq', 'fun': lambda x: 1.5-abs(x[2]-x[10])},
{'type': 'ineq', 'fun': lambda x: 1.5-abs(x[3]-x[11])},
{'type': 'ineq', 'fun': lambda x: x[10]-x[2]-.03},
{'type': 'ineq', 'fun': lambda x: x[11]-x[3]-.03},
)
bounds_p = []
k_s = -.5 if N.abs(N.min(rf)) > N.abs(N.max(rf)) else .5
init_p = [patch_dim/2, patch_dim/2]
init_p += [patch_dim/2, patch_dim/2, N.pi/4.] + [0,0,0]
init_p += [patch_dim/2, patch_dim/2]
init_p += [patch_dim/2, patch_dim/2, N.pi/4.] + [0,0,0]
init_p += [k_s]
def __single_run(init_p):
min_y, min_p = fit_slsqp(
edog_abc_ext2,
rf,
init_p,
bounds_p,
channel_dim,
patch_dim,
maxiter=maxiter,
constraints=constraints)
return (min_y, min_p)
'''create list of inital model param'''
runs_init_p = [permutate_mu_ext2(
init_p, patch_dim, channel_dim, rf) for x in range(num_attempts)]
'''run the list in parallel'''
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
pool = ThreadPool(num_pool_threads)
runs = pool.map(__single_run, runs_init_p)
pool.close()
pool.join()
'''choose best (min error) run'''
import sys
min_run = (sys.float_info.max,[])
for run in runs:
''' - has to be smallest error yet
- error must be below 'min_error_accept'!
- mu xy must be inside the patch!'''
if run[0] < min_run[0] and\
run[0] < min_error_accept and\
run[1][0] >= 0 and run[1][0] < patch_dim and\
run[1][1] >= 0 and run[1][1] < patch_dim:
min_run = run
return min_run
def print_params_ext2(
ext2_params
):
p_names = ['cmu_x','cmu_y',
'csigma_x','csigma_y','ctheta',
'ccdir_a','ccdir_b','ccdir_c',
'smu_x','smu_y',
'ssigma_x','ssigma_y','stheta',
'scdir_a','scdir_b','scdir_c',
'k_s']
print 'ext2_params:'
for idx, p in enumerate(ext2_params):
print p_names[idx], ' \t', N.round(p,2)
print
def best_fit_wrapper(
rf,
channel_dim,
patch_dim,
num_fit_attempts=10,
maxiter=1000,
num_pool_threads=3,
debug_idx=None,
min_error_accept=.5
):
from . import wrapper_value_of_rfvector_at
from . import transpose_color_zero_to_one
from scae import normalize_color
print '__fit #rf', debug_idx
result = []
num_attempts_succ_run = 6
while num_attempts_succ_run > 0:
result = bestfit_ext2(
rf,
channel_dim,
patch_dim,
num_attempts=num_fit_attempts,
maxiter=maxiter,
num_pool_threads=num_pool_threads,
min_error_accept=min_error_accept
)
'''if a successful fit has been made...'''
if result[1] != []:
break
else:
print 'starting over', num_attempts_succ_run, 'tries left.'
min_error_accept += .02
print 'raising min error to', min_error_accept
num_attempts_succ_run -= 1
'''check if after n attempts resulted in successful fit'''
if result[1] != []:
print '__done #rf', debug_idx, 'e', N.round(result[0],2)
print_params_ext2(result[1])
'''retrieve center color'''
rf_c_val = wrapper_value_of_rfvector_at(
rf,
result[1][0], result[1][1],
patch_dim,
channel_dim)
return result[1], rf_c_val
else:
print '__NOT done #rf'
return [], []
def f_to_min(
p,
f,
rf,
channel_dim,
patch_dim
):
sqerr = (rf - f(p, channel_dim, patch_dim, *N.indices(rf.shape)))**2
return N.sum(sqerr)
from scipy import optimize
''' SLSQP Sequential Least Squares Programming
http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html'''
def fit_slsqp(
f,
rf,
init_p,
bounds_p,
channel_dim,
patch_dim,
maxiter=10000,
constraints=()
):
res = optimize.minimize(f_to_min, init_p,
args=(f, rf, channel_dim, patch_dim),
method='SLSQP', options={'maxiter':maxiter},
bounds=bounds_p, constraints=constraints)
return res.fun, res.x
|
|
import numpy as np
import numpy.random as npr
def OPV(S0, K, r, T, option_type):
M = 50
I = 10000
sigma = 0.25
def standard_normal_dist(M, I, anti_paths=True, mo_match=True):
if anti_paths is True:
sn = npr.standard_normal((M + 1, int(I / 2)))
sn = np.concatenate((sn, -sn), axis=1)
else:
sn = npr.standard_normal((M + 1, I))
if mo_match is True:
sn = (sn - sn.mean()) / sn.std()
return sn
def monte_carlo_brownian_motion(S0, K, r, T, option_type):
try:
S0 = float(S0)
except:
return 'Error: Initial Price needs to be a number!'
try:
K = float(K)
except:
return 'Error: Strike Price needs to be a number!'
try:
if '%' in r:
r = float(r[:r.index('%')]) / 100
else:
r = float(r)
except:
return 'Error: Risk Free Interest Rate needs to be a number!'
try:
T = float(T)
except:
return 'Error: Time Horizon needs to be a number!'
if option_type == 'Call Option':
option_type = 'Call'
else:
option_type = 'Put'
dt = T / M
S = np.zeros((M + 1, I))
S[0] = S0
sn = standard_normal_dist(M, I)
for t in range(1, M + 1):
S[t] = S[t - 1] * np.exp((r - 0.5 * sigma ** 2) * dt + sigma * np.sqrt(dt) * sn[t])
if option_type == 'Call':
hT = np.maximum(S[-1] - K, 0)
else:
hT = np.maximum(K - S[-1], 0)
C0 = np.exp(-r * T) * np.mean(hT)
Title = 'Option Valuation with the following parameters:\n\n'
S0_desc = 'Initial Price: {}\n'.format(str(S0))
K_desc = 'Strike Price: {}\n'.format(str(K))
r_desc = 'Risk Free Interest Rate: {}\n'.format(str(format(r, '.2%')))
T_desc = 'Time Horizon: {} year\n'.format(str(T))
option_type_desc = 'Option Type: {} option \n\n'.format(option_type)
valuation_desc = 'Stochastic differential equation:\ndS(t) = rS(t)dt + σS(t)dZ(t)\n\n'
distribution_desc = 'Distribution method:\nStandard Normal Distribution\n\n'
valuation_res = '{} option value: {}'.format(option_type, str(C0.round(2)))
return '{}{}{}{}{}{}{}{}{}'.format(Title, S0_desc, K_desc, r_desc, T_desc, option_type_desc,
valuation_desc, distribution_desc, valuation_res)
return monte_carlo_brownian_motion(S0, K, r, T, option_type)
|
|
import os
import sqlite3
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from flask import Flask
from flask import render_template
from flask import Response
import io
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
con = sqlite3.connect("task.sqlite")
cursor = con.cursor()
cursor.execute("drop table if exists works")
cursor.execute("create table works("
"ID integer primary key AUTOINCREMENT,"
"salary integer,"
"educationType text,"
"jobTitle text,"
"qualification text,"
"gender text,"
"dateModify text,"
"skills text,"
"otherInfo text"
");")
con.commit()
data = pd.read_csv("works.csv")
data.to_sql('works', con, if_exists="append", index = None)
con.commit()
a = cursor.execute("select substring(dateModify, 1, 4) as 'year', count(*) as 'c' from works group by year").fetchall()
labels = list(map(lambda x: x[0], a))
values = list(map(lambda x: x[1], a))
mensCount = cursor.execute("select count(*) from works where gender = 'Мужской'").fetchall()[0][0]
womensCount = cursor.execute("select count(*) from works where gender = 'Женский'").fetchall()[0][0]
# 1ый график
'''cursor.execute("select salary from works where gender = 'Мужской'")
men = list(map(lambda row: row[0],cursor.fetchall()))
cursor.execute("select salary from works where gender = 'Женский'")
women = [row[0] for row in cursor.fetchall()]
pers = np.linspace(0.1, 1, 10)
a = np.quantile(men, pers)
b = np.quantile(women, pers)
plt.plot(pers, a, color="b")
plt.plot(pers, b, color="r")
plt.xlabel("Перцентили")
plt.ylabel("Зарплата")
plt.show()'''
# 2ой график
'''cursor.execute("select educationType, avg(salary) as 'av' from works where gender='Мужской' group by educationType")
men = list(map(lambda row: row[1],cursor.fetchall()))[1:5]
print(men)
cursor.execute("select educationType, avg(salary) as 'av' from works where gender='Женский' group by educationType")
women = list(map(lambda row: row[1],cursor.fetchall()))[1:5]
print(women)
index = np.arange(4)
bw = 0.3
plt.bar(index, men, bw, color='b')
plt.bar(index+bw, women, bw, color='r')
plt.xticks(index+0.5*bw,['Высшее','Неоконченное высшее','Среднее','Проффессиональное'])
plt.show()'''
app = Flask(__name__, static_folder="C:\\Users\\Иван\\Documents\\pRep\\web")
@app.route("/")
def cv_index():
return render_template('myPage.html', count=sum(values), mensCount=mensCount, womensCount=womensCount)
@app.route("/dashboard")
def dashboard():
return render_template('d3.html',
cvs=get_cv(), labels=labels[1:4], values=values[1:4],
)
def dict_factory(cursor, row):
# обертка для преобразования
# полученной строки. (взята из документации)
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def get_cv():
con = sqlite3.connect('task.sqlite')
con.row_factory = dict_factory
res = list(con.execute('select * from works limit 20'))
con.close()
return res
@app.route('/plot.png')
def plot_png():
fig = create_figure()
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
return Response(output.getvalue(), mimetype='image/png')
def create_figure():
fig = Figure()
axis = fig.add_subplot(1, 1, 1)
xs = [2000, 2001, 2002]
ys = [300, 50, 70]
axis.plot(xs, ys)
return fig
app.run()
|
|
"""
Adapted from OpenAI Baselines
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
"""
from collections import deque
import numpy as np
import gym
import copy
import cv2
cv2.ocl.setUseOpenCL(False)
def make_env(env, stack_frames = True, episodic_life = True, clip_rewards = False, scale = False):
if episodic_life:
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max = 30)
env = MaxAndSkipEnv(env, skip = 4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WrapFrame(env)
if stack_frames:
env = FrameStack(env, 4)
if clip_rewards:
env = ClipRewardEnv(env)
return env
class RewardScaler(gym.RewardWrapper):
def reward(self, reward):
return reward * 0.1
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reard to {+1, 0, -1} by its sign."""
return np.sign(reward)
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQ's 1M frames replay buffers.
This object should only be converted to numpy array before being passed to the model."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis = 2)
self._frames = None
return self._out
def __array__(self, dtype = None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
""" Stack k last frames.
Returns lazy array, which is much more memory efficient. """
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen = k)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(low = 0, high = 255, shape = (shp[0], shp[1], shp[2] * k),
dtype = env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class WrapFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Wrap frames to 84 x 84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = gym.spaces.Box(low = 0, high = 255,
shape = (self.height, self.width, 1), dtype = np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation = cv2.INTER_AREA)
return frame[:, :, None]
class FireResetEnv(gym.Wrapper):
def __init__(self, env = None):
""" For environments where the user need to press FIRE for the game to start."""
super(FireResetEnv, self).__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def step(self, action):
return self.env.step(action)
def reset(self):
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env = None):
""" Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
super(EpisodicLifeEnv, self).__init__(env)
self.lives = 0
self.was_real_done = True
self.was_real_reset = False
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self):
""" Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset()
self.was_real_reset = True
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.was_real_reset = False
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env = None, skip = 4):
"""Return only every `skip`-th frame"""
super(MaxAndSkipEnv, self).__init__(env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = deque(maxlen = 2)
self._skip = skip
def step(self, action):
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis = 0)
return max_frame, total_reward, done, info
def reset(self):
""" Clear past frame buffer and init. to first obs. from inner env."""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
class NoopResetEnv(gym.Wrapper):
def __init__(self, env = None, noop_max = 30):
""" Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
super(NoopResetEnv, self).__init__(env)
self.noop_max = noop_max
self.override_num_noops = None
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def step(self, action):
return self.env.step(action)
def reset(self):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset()
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = np.random.randint(1, self.noop_max + 1)
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(0)
if done:
obs = self.env.reset()
return obs
|
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
df1=pd.read_csv('stopwords')
b=[]
def func(df1):
for i in df1['words']:
b.append(i)
func(df1)
import string
df = pd.read_csv('emails.csv')
def func(a):
t=a.split(':')[1]
t.lstrip()
a=t
return a
df['message']=df['text'].apply(func)
df.drop('text',axis=1,inplace=True)
def text_process(mess):
"""
Takes in a string of text, then performs the following:
1. Remove all punctuation
2. Remove all stopwords
3. Returns a list of the cleaned text
"""
# Check characters to see if they are in punctuation
nopunc = [char for char in mess if char not in string.punctuation]
# Join the characters again to form the string.
nopunc = ''.join(nopunc)
# Now just remove any stopwords
return [word for word in nopunc.split() if word.lower() not in b]
#messages['message'].head(5).apply(text_process)
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
param_grid = {'C': [0.1,1, 10, 100, 1000], 'gamma': [1,0.1,0.01,0.001,0.0001], 'kernel': ['rbf']}
from sklearn.model_selection import GridSearchCV
pipeline = Pipeline([
('bow', CountVectorizer(analyzer=text_process)), # strings to token integer counts
('tfidf', TfidfTransformer()), # integer counts to weighted TF-IDF scores
('classifier', MultinomialNB()), # train on TF-IDF vectors w/ Naive Bayes classifier
])
model = Pipeline([
('bow', CountVectorizer(analyzer=text_process)),
('tfidf', TfidfTransformer()),
('classifier',GridSearchCV(SVC(),param_grid,refit=True,verbose=3)),
])
msg_train,msg_test,label_train,label_test=train_test_split(df['message'],
df['spam'],
test_size=0.3)
pipeline.fit(msg_train,label_train)
model.fit(msg_train,label_train)
predictions = pipeline.predict(msg_test)
import pickle
pickle.dump(pipeline,open('model.pkl','wb'))
pickle.dump(model,open('model1.pkl','wb'))
|
|
################################ΔΗΛΩΣΕΙΣ ΒΙΒΛΙΟΘΗΚΩΝ#####################################################################
import io
import sys
import time
from typing import List
import uvicorn
from fastapi import FastAPI, File, HTTPException, UploadFile
from PIL import Image
# Κάθε μοντέλο μηχανικής μάθησης φορτώνεται απο την βιβλιοθήκη του tensorflow.keras και στη συνέχεια είναι απαραίτητες οι συναρτήσεις
# preprocess_input και decode_predictions για την προεπεξεργασία των εικόνων και την αποκωδικοποίηση των αποτελεσμάτων αντίστοιχα για κάθε μοντέλο μηχανικής μάθησης
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input as preprocessResnet50
from tensorflow.keras.applications.resnet50 import decode_predictions as decodeResnet50
import numpy as np
from tensorflow.python.keras.applications.efficientnet import EfficientNetB0, EfficientNetB7
from tensorflow.python.keras.applications.efficientnet import preprocess_input as preprocessEfficientB0
from tensorflow.python.keras.applications.efficientnet import decode_predictions as decodeEfficientB0
from tensorflow.python.keras.applications.mobilenet_v2 import MobileNetV2
from tensorflow.python.keras.applications.nasnet import NASNetLarge
from tensorflow.python.keras.applications.nasnet import preprocess_input as preprocessNasLarge
from tensorflow.python.keras.applications.nasnet import decode_predictions as decodeNasLarge
from tensorflow.python.keras.applications.vgg16 import VGG16
from tensorflow.python.keras.applications.vgg16 import preprocess_input as preprocessVGG16
from tensorflow.python.keras.applications.vgg16 import decode_predictions as decodeVGG16
from tensorflow.python.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.python.keras.applications.inception_resnet_v2 import preprocess_input as preprocessInRes
from tensorflow.python.keras.applications.inception_resnet_v2 import decode_predictions as decodeInRes
from tensorflow.python.keras.applications.xception import Xception
from tensorflow.python.keras.applications.xception import preprocess_input as preprocessXception
from tensorflow.python.keras.applications.xception import decode_predictions as decodeXception
from tensorflow.python.keras.applications.inception_v3 import InceptionV3
from tensorflow.python.keras.applications.inception_v3 import preprocess_input as preprocesInception
from tensorflow.python.keras.applications.inception_v3 import decode_predictions as decodeInception
from helpers import classify_image, read_labels, set_input_tensor
import tensorflow as tf
###############################################################################################################3
#Δήλωση
app = FastAPI()
#Δήλωση μοντέλων μηχανικής μάθησης και οι αντίστοιχες ρυθμίσεις τους
modelRes = ResNet50(weights='imagenet')
modelVGG = VGG16(weights='imagenet', include_top=True)
modelEfficient = EfficientNetB0(include_top=True, weights='imagenet')
modelEfficientB7 = EfficientNetB7(include_top=True, weights='imagenet')
modelNasNet = NASNetLarge(include_top=True, weights='imagenet')
modelMobileNet = MobileNetV2(weights="imagenet", include_top=True)
modelXception = Xception(include_top=True,weights="imagenet")
modelInception = InceptionV3(include_top=True, weights="imagenet", classifier_activation="softmax")
modelInRes = InceptionResNetV2(weights="imagenet", include_top=True)
# Settings
MIN_CONFIDENCE = 0.1 # Το ελάχιστο δυνατό confidence που δέχόμαστε απο τα μοντέλα.
# Τα URLs που θα απαντάει το κάθε μοντέλο
IMAGE_URL2 = "/v1/vision/resNet"
IMAGE_URL3 = "/v1/vision/vggNet"
IMAGE_URL4 = "/v1/vision/efficientNet"
IMAGE_URL5 = "/v1/vision/nasNet"
IMAGE_URL6 = "/v1/vision/mobileNet2"
IMAGE_URL7 = "/v1/vision/xceptionNet"
IMAGE_URL8 = "/v1/vision/efficientNetB7"
# Ξεκινώντας στο domain "/" δηλαδή στην αρχική σελίδα του host του server εμφανίζεται το μήνυμα του return
@app.get("/")
async def info():
return """tflite-server docs at ip:port/docs"""
# Στη συνέχεια το κάθε μοντέλο λειτουργεί με τις παρακάτω συναρτήσεις καλώντας το αντίστοιχο url
@app.post(IMAGE_URL2)
async def predict_scene(image: List[UploadFile] = File(...)): # Ως μεταβλητή, το image δέχεται λίστα με μία ή πολλαπλές εικόνες απο το post request της android εφαρμογής
try:
print("Welcome...") # Ένα μήνυμα καλωσορίσματος
start = time.time()
all_data = []
all_predictions = []
data = {}
objects = []
single_object = {}
for i in tf.range(len(image)): # Για όλες τις εικόνες που βρίσκονται στη λίστα
contents = await image[i].read() # Αναμένουμε το διάβασμα των περιεχομένων με το await ώστε να μην προχωρήσει χωρίς να διαβάσει αρχείο
image2 = Image.open(io.BytesIO(contents)) # Ανοίγουμε τα αρχεία εικόνας με τη βοήθεια της βιβλιοθήκης PIL
resized_image = image2.resize((224, 224), Image.ANTIALIAS) # Επαναλαμβάνουμε το resize για μικρή διόρθωση antialising
input_data = np.expand_dims(resized_image, axis=0) # Δημιουργία πίνακα
x = preprocessResnet50(input_data) # Επεξεργασία πινάκων ώστε να επεξεργαστούν σωστά απο το αντίστοιχο μοντέλο μηχανικής μάθησης
all_data.append(x) # Προσθήκη όλων των προ επεξεργασμένων πινάκων κάθε εικόνας σε έναν
reading_end = time.time()
print("reading data.... " + (str(reading_end - start)))
inference_time_start = time.time()
dedomena = np.vstack(all_data) # Δημιουργία ενός πίνακα με όλους τους επεξεργασμένους πίνακες εικόνων σε έναν καθέτως - ανα σειρά
megethos = len(all_data) # εύρεση μεγέθους πινακα
preds = modelRes.predict(dedomena, batch_size=megethos) # Είσοδος δεδομένων εικόνων στο inference του μοντέλου επιλογής
predictions = decodeResnet50(preds, top=1) # Αποκωδικοποίηση των αποτελεσμάτων του inference βάσει του αντίστοιχου μοντέλου
inference_time_end = time.time()
print(inference_time_end - inference_time_start)
labels = []
time_spent = []
time_spent.append(reading_end - start)
time_spent.append(inference_time_end - inference_time_start)
print(predictions[i][0][1])
for i in range(len(all_data)): # Προσθήκη αποτελεσμάτων ταξινόμησης εικόνων σε ένα πίνακα
labels.append(predictions[i][0][1])
data["labels"] = labels # Προσθήκη labels σε λεξικό
data["times"] = time_spent # Προσθήκη χρόνων διαδικασιών στο λεξικό
return data # Επιστροφή λεξικού ως response στο post request
except:
e = sys.exc_info()[1]
raise HTTPException(status_code=500, detail=str(e)) # Σε περίπτωση λάθους επιστροφή error 500
# Κάθε μοντέλο στην συνέχεια, αποτελείται απο τα ίδια βήματα
@app.post(IMAGE_URL3)
async def predict_scene(image: List[UploadFile] = File(...)):
try:
print("Welcome...")
start = time.time()
all_data = []
all_predictions = []
data = {}
objects = []
single_object = {}
for i in tf.range(len(image)):
contents = await image[i].read()
image2 = Image.open(io.BytesIO(contents))
resized_image = image2.resize((224, 224), Image.ANTIALIAS)
input_data = np.expand_dims(resized_image, axis=0)
x = preprocessVGG16(input_data)
all_data.append(x)
reading_end = time.time()
print("reading data.... " + (str(reading_end - start)))
inference_time_start = time.time()
dedomena = np.vstack(all_data)
megethos = len(all_data)
preds = modelVGG.predict(dedomena, batch_size=megethos)
predictions = decodeVGG16(preds, top=1)
inference_time_end = time.time()
print(inference_time_end - inference_time_start)
labels = []
time_spent = []
time_spent.append(reading_end - start)
time_spent.append(inference_time_end - inference_time_start)
print(predictions[i][0][1])
for i in range(len(all_data)):
labels.append(predictions[i][0][1])
data["labels"] = labels
data["times"] = time_spent
return data
except:
e = sys.exc_info()[1]
raise HTTPException(status_code=500, detail=str(e))
@app.post(IMAGE_URL4)
async def predict_scene(image: List[UploadFile] = File(...)):
try:
print("Welcome...")
start = time.time()
all_data = []
all_predictions = []
data = {}
objects = []
single_object = {}
for i in tf.range(len(image)):
contents = await image[i].read()
image2 = Image.open(io.BytesIO(contents))
resized_image = image2.resize((224, 224), Image.ANTIALIAS)
input_data = np.expand_dims(resized_image, axis=0)
x = preprocessEfficientB0(input_data)
all_data.append(x)
reading_end = time.time()
print("reading data.... " + (str(reading_end - start)))
inference_time_start = time.time()
dedomena = np.vstack(all_data)
megethos = len(all_data)
preds = modelEfficient.predict(dedomena, batch_size=megethos)
# for i in range(len(all_data)):
predictions = decodeEfficientB0(preds, top=1)
inference_time_end = time.time()
print(inference_time_end - inference_time_start)
labels = []
time_spent = []
time_spent.append(reading_end - start)
time_spent.append(inference_time_end - inference_time_start)
print(predictions[i][0][1])
for i in range(len(all_data)):
labels.append(predictions[i][0][1])
data["labels"] = labels
data["times"] = time_spent
return data
except:
e = sys.exc_info()[1]
raise HTTPException(status_code=500, detail=str(e))
@app.post(IMAGE_URL5)
async def predict_scene(image: List[UploadFile] = File(...)):
try:
print("Welcome...")
start = time.time()
all_data = []
all_predictions = []
data = {}
objects = []
single_object = {}
for i in tf.range(len(image)):
contents = await image[i].read()
image2 = Image.open(io.BytesIO(contents))
resized_image = image2.resize((331, 331), Image.ANTIALIAS)
input_data = np.expand_dims(resized_image, axis=0)
x = preprocessNasLarge(input_data)
all_data.append(x)
reading_end = time.time()
print("reading data.... " + (str(reading_end - start)))
inference_time_start = time.time()
dedomena = np.vstack(all_data)
megethos = len(all_data)
preds = modelNasNet.predict(dedomena, batch_size=megethos)
predictions = decodeNasLarge(preds, top=1)
inference_time_end = time.time()
print(inference_time_end - inference_time_start)
labels = []
time_spent = []
time_spent.append(reading_end - start)
time_spent.append(inference_time_end - inference_time_start)
print(predictions[i][0][1])
for i in range(len(all_data)):
labels.append(predictions[i][0][1])
data["labels"] = labels
data["times"] = time_spent
return data
except:
e = sys.exc_info()[1]
raise HTTPException(status_code=500, detail=str(e))
@app.post(IMAGE_URL6)
async def predict_scene(image: List[UploadFile] = File(...)):
try:
print("Welcome...")
start = time.time()
all_data=[]
all_predictions = []
data = {}
objects = []
single_object = {}
for i in tf.range(len(image)):
contents = await image[i].read()
image2 = Image.open(io.BytesIO(contents))
resized_image = image2.resize((299, 299), Image.ANTIALIAS)
input_data = np.expand_dims(resized_image, axis=0)
x = preprocessInRes(input_data)
all_data.append(x)
reading_end = time.time()
print("reading data.... "+ (str(reading_end- start )))
#print(len(all_data))
inference_time_start = time.time()
dedomena = np.vstack(all_data)
megethos = len(all_data)
preds = modelInRes.predict(dedomena, batch_size=megethos)
#for i in range(len(all_data)):
predictions = decodeInRes(preds, top=1)
inference_time_end = time.time()
print(inference_time_end-inference_time_start)
#print(len(data['predictions']))
#return data
labels = []
time_spent = []
time_spent.append(reading_end-start)
time_spent.append(inference_time_end-inference_time_start)
print(predictions[i][0][1])
for i in range(len(all_data)):
labels.append(predictions[i][0][1])
data["labels"] = labels
data["times"] = time_spent
return data
except:
e = sys.exc_info()[1]
raise HTTPException(status_code=500, detail=str(e))
@app.post(IMAGE_URL7)
async def predict_scene(image: List[UploadFile] = File(...)):
try:
print("Welcome...")
start = time.time()
all_data = []
all_predictions = []
data = {}
objects = []
single_object = {}
for i in tf.range(len(image)):
contents = await image[i].read()
image2 = Image.open(io.BytesIO(contents))
resized_image = image2.resize((299, 299), Image.ANTIALIAS)
input_data = np.expand_dims(resized_image, axis=0)
x = preprocessXception(input_data)
all_data.append(x)
reading_end = time.time()
print("reading data.... " + (str(reading_end - start)))
inference_time_start = time.time()
dedomena = np.vstack(all_data)
megethos = len(all_data)
preds = modelXception.predict(dedomena, batch_size= megethos)
# for i in range(len(all_data)):
predictions = decodeXception(preds, top=1)
inference_time_end = time.time()
print(inference_time_end - inference_time_start)
labels = []
time_spent = []
time_spent.append(reading_end - start)
time_spent.append(inference_time_end - inference_time_start)
print(predictions[i][0][1])
for i in range(len(all_data)):
labels.append(predictions[i][0][1])
data["labels"] = labels
data["times"] = time_spent
return data
except:
e = sys.exc_info()[1]
raise HTTPException(status_code=500, detail=str(e))
@app.post(IMAGE_URL8)
async def predict_scene(image: List[UploadFile] = File(...)):
try:
print("Welcome...")
start = time.time()
all_data = []
all_predictions = []
data = {}
objects = []
single_object = {}
for i in tf.range(len(image)):
contents = await image[i].read()
image2 = Image.open(io.BytesIO(contents))
resized_image = image2.resize((600, 600), Image.ANTIALIAS)
input_data = np.expand_dims(resized_image, axis=0)
x = preprocessEfficientB0(input_data)
all_data.append(x)
reading_end = time.time()
print("reading data.... " + (str(reading_end - start)))
# print(len(all_data))
inference_time_start = time.time()
dedomena = np.vstack(all_data)
megethos = len(all_data)
preds = modelEfficientB7.predict(dedomena, batch_size= megethos)
predictions = decodeEfficientB0(preds, top=1)
inference_time_end = time.time()
print(inference_time_end - inference_time_start)
labels = []
time_spent = []
time_spent.append(reading_end - start)
time_spent.append(inference_time_end - inference_time_start)
print(predictions[i][0][1])
for i in range(len(all_data)):
labels.append(predictions[i][0][1])
data["labels"] = labels
data["times"] = time_spent
return data
except:
e = sys.exc_info()[1]
raise HTTPException(status_code=500, detail=str(e))
|
|
from tflearn.data_augmentation import ImageAugmentation
from tflearn.data_preprocessing import ImagePreprocessing
# import glob
# from sklearn import svm
from sklearn.ensemble import BaggingClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
# from itertools import compress
import numpy as np
import os.path
import datasets
import time
def onehot_labels(labels):
return np.eye(10)[labels]
def unpickle(file):
# import cPickle
import pickle as cPickle
fo = open(file, 'rb')
# dict = cPickle.load(fo)
dict = cPickle.load(fo, encoding='bytes')
fo.close()
return dict
def get_proper_images(raw):
raw_float = np.array(raw, dtype=float)
images = raw_float.reshape([-1, 3, 32, 32])
images = images.transpose([0, 2, 3, 1])
return images
def get_data():
data_norm = True
data_augmentation = True
data1 = unpickle('../cifar-10-batches-py/data_batch_1')
data2 = unpickle('../cifar-10-batches-py/data_batch_2')
data3 = unpickle('../cifar-10-batches-py/data_batch_3')
data4 = unpickle('../cifar-10-batches-py/data_batch_4')
data5 = unpickle('../cifar-10-batches-py/data_batch_5')
# print(list(data1.keys()))
# X = np.concatenate((get_proper_images(data1['data']),
# get_proper_images(data2['data']),
# get_proper_images(data3['data']),
# get_proper_images(data4['data']),
# get_proper_images(data5['data'])))
X = np.concatenate((get_proper_images(data1[b'data']),
get_proper_images(data2[b'data']),
get_proper_images(data3[b'data']),
get_proper_images(data4[b'data']),
get_proper_images(data5[b'data'])))
# Y = np.concatenate((onehot_labels(data1['labels']),
# onehot_labels(data2['labels']),
# onehot_labels(data3['labels']),
# onehot_labels(data4['labels']),
# onehot_labels(data5['labels'])))
Y = np.concatenate((onehot_labels(data1[b'labels']),
onehot_labels(data2[b'labels']),
onehot_labels(data3[b'labels']),
onehot_labels(data4[b'labels']),
onehot_labels(data5[b'labels'])))
# X_test = get_proper_images(unpickle('../cifar-10-batches-py/test_batch')['data'])
# Y_test = onehot_labels(unpickle('../cifar-10-batches-py/test_batch')['labels'])
X_test = get_proper_images(unpickle('../cifar-10-batches-py/test_batch')[b'data'])
Y_test = onehot_labels(unpickle('../cifar-10-batches-py/test_batch')[b'labels'])
img_prep = ImagePreprocessing()
if data_norm:
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
img_aug = ImageAugmentation()
if data_augmentation:
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=30.)
img_aug.add_random_crop((32, 32), 6)
return X, Y, X_test, Y_test, img_prep, img_aug
def main():
a = time.time()
# x, y, x_test, y_test, img_prep, img_aug = get_data()
#
# svm_y = np.zeros((y.shape[0], ), dtype=int)
# svm_y_test = np.zeros((y_test.shape[0]), dtype=int)
# for i in range(y.shape[0]):
# # print(y[i, :] == 1)
# mask = y[i, :] == 1
# meh = list(compress(range(len(mask)), mask))
# svm_y[i] = int(meh[0])
# for i in range(y_test.shape[0]):
# mask = y_test[i, :] == 1
# meh = list(compress(range(len(mask)), mask))
# svm_y_test[i] = int(meh[0])
# runs = ['sigmoid_sigmoid_256',
# 'sigmoid_sigmoid_crossentropy_256',
# 'sigmoid_sigmoid_gaussiannoise_256',
# 'sigmoid_tanh_512',
# 'relu_relu_256']
# runs = ['sigmoid_sigmoid_snp_0.1_512',
# 'sigmoid_sigmoid_snp_0.2_512',
# 'sigmoid_sigmoid_snp_0.3_512',
# 'sigmoid_sigmoid_snp_0.4_512',
# 'sigmoid_sigmoid_snp_0.5_512']
# runs = ['sigmoid_sigmoid_mask_0.1_512',
# 'sigmoid_sigmoid_mask_0.2_512',
# 'sigmoid_sigmoid_mask_0.3_512',
# 'sigmoid_sigmoid_mask_0.4_512',
# 'sigmoid_sigmoid_mask_0.5_512',
# 'relu_relu_snp_0.4_512']
# runs = ['sigmoid_sigmoid_gaussian_0.4_512']
runs = ['forcnn_sigmoid_sigmoid_snp_0.4_675']
print('time required to fix the answers {}'.format(time.time() - a))
# feature_generator = DNN(features, session=network.session)
# if len(glob.glob('./data/dae/*train.npy')) != 1:
# svm_features = np.zeros((0, 512))
# for i in range(x.shape[0]):
# if i % 1000 == 0:
# print(i, svm_features.shape)
# chuckmein = x[i, :, :].reshape((1, x.shape[1], x.shape[2], x.shape[3]))
# svm_features = np.vstack((svm_features, feature_generator.predict(chuckmein)))
# np.save('./dae_svm_features.npy', svm_features)
# else:
# svm_features = np.load('./dae_svm_features.npy')
model_directory = './data/dae/'
encode_w_suffix = '-encw.npy'
encode_b_suffix = '-encbh.npy'
# decode_w = '-decw.npy'
# decode_b = '-decb.npy'
# train_suffix = '-forcnn_sigmoid_sigmoid_snp_0.4_675.npy'
train_suffix_answer = '-train-answers.npy'
# test_suffix = '-test.npy'
test_suffix_answer = '-test-answers.npy'
# validation_suffix = '-validate.npy'
x, y, x_test, y_test = datasets.load_cifar10_dataset('./cifar-10-batches-py', mode='supervised')
# y = onehot_labels(y)
# y_test = onehot_labels(y_test)
for item in runs:
# svm_features = np.load(os.path.join(model_directory, item + train_suffix))
# svm_features_test = np.load(os.path.join(model_directory, item + test_suffix))
encode_w = np.load(os.path.join(model_directory, item + encode_w_suffix))
encode_b = np.load(os.path.join(model_directory, item + encode_b_suffix))
encode = np.add(np.dot(x, encode_w), encode_b)
# svm_features = encode.reshape(x.shape[0], 3, 15, 15).transpose(0, 2, 3, 1)
svm_features = encode
encode = np.add(np.dot(x_test, encode_w), encode_b)
svm_features_test = encode
# svm_features_test = encode.reshape(x_test.shape[0], 3, 15, 15).transpose(0, 2, 3, 1)
# print(svm_features.shape, svm_features_test.shape, y.shape, y_test.shape)
# stop
n_estimators = 10
n_jobs = 4
print('training svm')
start = time.time()
clf = OneVsRestClassifier(BaggingClassifier(
SVC(kernel='linear', probability=True, class_weight=None),
max_samples=1.0 / n_estimators, n_estimators=n_estimators, n_jobs=n_jobs))
clf.fit(svm_features, y)
end = time.time()
print("Bagging SVC", end - start, clf.score(svm_features_test, y_test))
return
main()
|
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
project_name = "reco-tut-mlh"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
# In[2]:
if not os.path.exists(project_path):
get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content')
import mykeys
get_ipython().system(u'rm /content/mykeys.py')
path = "/content/" + project_name;
get_ipython().system(u'mkdir "{path}"')
get_ipython().magic(u'cd "{path}"')
import sys; sys.path.append(path)
get_ipython().system(u'git config --global user.email "recotut@recohut.com"')
get_ipython().system(u'git config --global user.name "reco-tut"')
get_ipython().system(u'git init')
get_ipython().system(u'git remote add origin https://"{mykeys.git_token}":x-oauth-basic@github.com/"{account}"/"{project_name}".git')
get_ipython().system(u'git pull origin "{branch}"')
get_ipython().system(u'git checkout main')
else:
get_ipython().magic(u'cd "{project_path}"')
# In[31]:
get_ipython().system(u'git status')
# In[33]:
get_ipython().system(u'git pull --rebase origin main')
# In[34]:
get_ipython().system(u"git add . && git commit -m 'commit' && git push origin main")
# ---
# # Light Graph Convolution Network (LightGCN)
#
# This is a TensorFlow implementation of LightGCN with a custom training loop.
#
# The LightGCN is adapted from Neural Graph Collaborative Filtering (NGCF) — a state-of-the-art GCN-based recommender model. As you can expect from its name, LightGCN is a simplified version of a typical GCN, where feature transformation and nonlinear activation are dropped in favor of keeping only the essential component - neighborhood aggregation.
#
# # Imports
# In[3]:
import math
import numpy as np
import os
import pandas as pd
import random
import requests
import scipy.sparse as sp
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.neighbors import NearestNeighbors
from tensorflow.keras.utils import Progbar
from tqdm import tqdm
# # Prepare data
#
# This LightGCN implementation takes an adjacency matrix in a sparse tensor format as input.
#
# In preparation of the data for LightGCN, we must:
#
#
# * Download the data
# * Stratified train test split
# * Create a normalized adjacency matrix
# * Convert to tensor
#
#
# ## Load data
#
# The data we use is the benchmark MovieLens 100K Dataset, with 100k ratings, 1000 users, and 1700 movies.
# In[4]:
fp = os.path.join('./data/bronze', 'u.data')
raw_data = pd.read_csv(fp, sep='\t', names=['userId', 'movieId', 'rating', 'timestamp'])
print(f'Shape: {raw_data.shape}')
raw_data.sample(10, random_state=123)
# In[5]:
# Load movie titles.
fp = os.path.join('./data/bronze', 'u.item')
movie_titles = pd.read_csv(fp, sep='|', names=['movieId', 'title'], usecols = range(2), encoding='iso-8859-1')
print(f'Shape: {movie_titles.shape}')
movie_titles.sample(10, random_state=123)
# ## Train test split
#
# We split the data using a stratified split so the users in the training set are also the same users in the test set. LightGCN is not able to generate recommendations for users not yet seen in the training set.
#
# Here we will have a training size of 75%
# In[6]:
# Split each user's reviews by % for training.
splits = []
train_size = 0.75
for _, group in raw_data.groupby('userId'):
group = group.sample(frac=1, random_state=123)
group_splits = np.split(group, [round(train_size * len(group))])
# Label the train and test sets.
for i in range(2):
group_splits[i]["split_index"] = i
splits.append(group_splits[i])
# Concatenate splits for all the groups together.
splits_all = pd.concat(splits)
# Take train and test split using split_index.
train = splits_all[splits_all["split_index"] == 0].drop("split_index", axis=1)
test = splits_all[splits_all["split_index"] == 1].drop("split_index", axis=1)
print(f'Train Shape: {train.shape}')
print(f'Test Shape: {test.shape}')
print(f'Do they have the same users?: {set(train.userId) == set(test.userId)}')
# ## Reindex
#
# Reset the index of users and movies from 0-n for both the training and test data. This is to allow better tracking of users and movies. Dictionaries are created so we can easily translate back and forth from the old index to the new index.
#
# We would also normally remove users with no ratings, but in this case, all entries have a user and a rating between 1-5.
#
#
# In[7]:
combined = train.append(test)
n_users = combined['userId'].nunique()
print('Number of users:', n_users)
n_movies = combined['movieId'].nunique()
print('Number of movies:', n_movies)
# In[8]:
# Create DataFrame with reset index of 0-n_movies.
movie_new = combined[['movieId']].drop_duplicates()
movie_new['movieId_new'] = np.arange(len(movie_new))
train_reindex = pd.merge(train, movie_new, on='movieId', how='left')
# Reset index to 0-n_users.
train_reindex['userId_new'] = train_reindex['userId'] - 1
train_reindex = train_reindex[['userId_new', 'movieId_new', 'rating']]
test_reindex = pd.merge(test, movie_new, on='movieId', how='left')
# Reset index to 0-n_users.
test_reindex['userId_new'] = test_reindex['userId'] - 1
test_reindex = test_reindex[['userId_new', 'movieId_new', 'rating']]
# Create dictionaries so we can convert to and from indexes
item2id = dict(zip(movie_new['movieId'], movie_new['movieId_new']))
id2item = dict(zip(movie_new['movieId_new'], movie_new['movieId']))
user2id = dict(zip(train['userId'], train_reindex['userId_new']))
id2user = dict(zip(train_reindex['userId_new'], train['userId']))
# In[9]:
# Keep track of which movies each user has reviewed.
# To be used later in training the LightGCN.
interacted = (
train_reindex.groupby("userId_new")["movieId_new"]
.apply(set)
.reset_index()
.rename(columns={"movieId_new": "movie_interacted"})
)
# ## Adjacency matrix
#
# The adjacency matrix is a data structure the represents a graph by encoding the connections and between nodes. In our case, nodes are both users and movies. Rows and columns consist of ALL the nodes and for every connection (reviewed movie) there is the value 1.
#
# To first create the adjacency matrix we first create a user-item graph where similar to the adjacency matrix, connected users and movies are represented as 1 in a sparse array. Unlike the adjacency matrix, a user-item graph only has users for the columns/rows and items as the other, whereas the adjacency matrix has both users and items concatenated as rows and columns.
#
#
# In this case, because the graph is undirected (meaning the connections between nodes do not have a specified direction)
# the adjacency matrix is symmetric. We use this to our advantage by transposing the user-item graph to create the adjacency matrix.
#
# Our adjacency matrix will not include self-connections where each node is connected to itself.
# ### Create adjacency matrix
# In[10]:
# Create user-item graph (sparse matix where users are rows and movies are columns.
# 1 if a user reviewed that movie, 0 if they didn't).
R = sp.dok_matrix((n_users, n_movies), dtype=np.float32)
R[train_reindex['userId_new'], train_reindex['movieId_new']] = 1
# Create the adjaceny matrix with the user-item graph.
adj_mat = sp.dok_matrix((n_users + n_movies, n_users + n_movies), dtype=np.float32)
# List of lists.
adj_mat.tolil()
R = R.tolil()
# Put together adjacency matrix. Movies and users are nodes/vertices.
# 1 if the movie and user are connected.
adj_mat[:n_users, n_users:] = R
adj_mat[n_users:, :n_users] = R.T
adj_mat
# ### Normalize adjacency matrix
#
# This helps numerically stabilize values when repeating graph convolution operations, avoiding the scale of the embeddings increasing or decreasing.
#
# $\tilde{A} = D^{-\frac{1}{2}}AD^{-\frac{1}{2}}$
#
# $D$ is the degree/diagonal matrix where it is zero everywhere but it's diagonal. The diagonal has the value of the neighborhood size of each node (how many other nodes that node connects to)
#
#
# $D^{-\frac{1}{2}}$ on the left side scales $A$ by the source node, while $D^{-\frac{1}{2}}$ right side scales by the neighborhood size of the destination node rather than the source node.
#
#
#
# In[11]:
# Calculate degree matrix D (for every row count the number of nonzero entries)
D_values = np.array(adj_mat.sum(1))
# Square root and inverse.
D_inv_values = np.power(D_values + 1e-9, -0.5).flatten()
D_inv_values[np.isinf(D_inv_values)] = 0.0
# Create sparse matrix with the values of D^(-0.5) are the diagonals.
D_inv_sq_root = sp.diags(D_inv_values)
# Eval (D^-0.5 * A * D^-0.5).
norm_adj_mat = D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root)
# ### Convert to tensor
# In[12]:
# to COOrdinate format first ((row, column), data)
coo = norm_adj_mat.tocoo().astype(np.float32)
# create an index that will tell SparseTensor where the non-zero points are
indices = np.mat([coo.row, coo.col]).transpose()
# covert to sparse tensor
A_tilde = tf.SparseTensor(indices, coo.data, coo.shape)
A_tilde
# # LightGCN
#
# LightGCN keeps neighbor aggregation while removing self-connections, feature transformation, and nonlinear activation, simplifying as well as improving performance.
#
# Neighbor aggregation is done through graph convolutions to learn embeddings that represent nodes. The size of the embeddings can be changed to whatever number. In this notebook, we set the embedding dimension to 64.
#
# In matrix form, graph convolution can be thought of as matrix multiplication. In the implementation we create a graph convolution layer that performs just this, allowing us to stack as many graph convolutions as we want. We have the number of layers as 3 in this notebook.
#
# In[13]:
class GraphConv(tf.keras.layers.Layer):
def __init__(self, adj_mat):
super(GraphConv, self).__init__()
self.adj_mat = adj_mat
def call(self, ego_embeddings):
return tf.sparse.sparse_dense_matmul(self.adj_mat, ego_embeddings)
# In[14]:
class LightGCN(tf.keras.Model):
def __init__(self, adj_mat, n_users, n_items, n_layers=3, emb_dim=64, decay=0.0001):
super(LightGCN, self).__init__()
self.adj_mat = adj_mat
self.R = tf.sparse.to_dense(adj_mat)[:n_users, n_users:]
self.n_users = n_users
self.n_items = n_items
self.n_layers = n_layers
self.emb_dim = emb_dim
self.decay = decay
# Initialize user and item embeddings.
initializer = tf.keras.initializers.GlorotNormal()
self.user_embedding = tf.Variable(
initializer([self.n_users, self.emb_dim]), name='user_embedding'
)
self.item_embedding = tf.Variable(
initializer([self.n_items, self.emb_dim]), name='item_embedding'
)
# Stack light graph convolutional layers.
self.gcn = [GraphConv(adj_mat) for layer in range(n_layers)]
def call(self, inputs):
user_emb, item_emb = inputs
output_embeddings = tf.concat([user_emb, item_emb], axis=0)
all_embeddings = [output_embeddings]
# Graph convolutions.
for i in range(0, self.n_layers):
output_embeddings = self.gcn[i](output_embeddings)
all_embeddings += [output_embeddings]
# Compute the mean of all layers
all_embeddings = tf.stack(all_embeddings, axis=1)
all_embeddings = tf.reduce_mean(all_embeddings, axis=1, keepdims=False)
# Split into users and items embeddings
new_user_embeddings, new_item_embeddings = tf.split(
all_embeddings, [self.n_users, self.n_items], axis=0
)
return new_user_embeddings, new_item_embeddings
def recommend(self, users, k):
# Calculate the scores.
new_user_embed, new_item_embed = self((self.user_embedding, self.item_embedding))
user_embed = tf.nn.embedding_lookup(new_user_embed, users)
test_scores = tf.matmul(user_embed, new_item_embed, transpose_a=False, transpose_b=True)
test_scores = np.array(test_scores)
# Remove movies already seen.
test_scores += sp.csr_matrix(self.R)[users, :] * -np.inf
# Get top movies.
test_user_idx = np.arange(test_scores.shape[0])[:, None]
top_items = np.argpartition(test_scores, -k, axis=1)[:, -k:]
top_scores = test_scores[test_user_idx, top_items]
sort_ind = np.argsort(-top_scores)
top_items = top_items[test_user_idx, sort_ind]
top_scores = top_scores[test_user_idx, sort_ind]
top_items, top_scores = np.array(top_items), np.array(top_scores)
# Create Dataframe with recommended movies.
topk_scores = pd.DataFrame(
{
'userId': np.repeat(users, top_items.shape[1]),
'movieId': top_items.flatten(),
'prediction': top_scores.flatten(),
}
)
return topk_scores
# ## Custom training
#
# For training, we batch a number of users from the training set and sample a single positive item (movie that has been reviewed) and a single negative item (movie that has not been reviewed) for each user.
# In[15]:
N_LAYERS = 10
EMBED_DIM = 64
DECAY = 0.0001
EPOCHS = 200
BATCH_SIZE = 1024
LEARNING_RATE = 1e-2
# We expect this # of parameters in our model.
print(f'Parameters: {EMBED_DIM * (n_users + n_movies)}')
# In[16]:
# Initialize model.
optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)
model = LightGCN(A_tilde,
n_users = n_users,
n_items = n_movies,
n_layers = N_LAYERS,
emb_dim = EMBED_DIM,
decay = DECAY)
# In[18]:
get_ipython().run_cell_magic(u'time', u'', u"# Custom training loop from scratch.\nfor epoch in range(1, EPOCHS + 1):\n print('Epoch %d/%d' % (epoch, EPOCHS))\n n_batch = train_reindex.shape[0] // BATCH_SIZE + 1\n bar = Progbar(n_batch, stateful_metrics='training loss')\n for idx in range(1, n_batch + 1):\n # Sample batch_size number of users with positive and negative items.\n indices = range(n_users)\n if n_users < BATCH_SIZE:\n users = np.array([random.choice(indices) for _ in range(BATCH_SIZE)])\n else:\n users = np.array(random.sample(indices, BATCH_SIZE))\n\n def sample_neg(x):\n while True:\n neg_id = random.randint(0, n_movies - 1)\n if neg_id not in x:\n return neg_id\n\n # Sample a single movie for each user that the user did and did not review.\n interact = interacted.iloc[users]\n pos_items = interact['movie_interacted'].apply(lambda x: random.choice(list(x)))\n neg_items = interact['movie_interacted'].apply(lambda x: sample_neg(x))\n\n users, pos_items, neg_items = users, np.array(pos_items), np.array(neg_items)\n\n with tf.GradientTape() as tape:\n # Call LightGCN with user and item embeddings.\n new_user_embeddings, new_item_embeddings = model(\n (model.user_embedding, model.item_embedding)\n )\n\n # Embeddings after convolutions.\n user_embeddings = tf.nn.embedding_lookup(new_user_embeddings, users)\n pos_item_embeddings = tf.nn.embedding_lookup(new_item_embeddings, pos_items)\n neg_item_embeddings = tf.nn.embedding_lookup(new_item_embeddings, neg_items)\n\n # Initial embeddings before convolutions.\n old_user_embeddings = tf.nn.embedding_lookup(\n model.user_embedding, users\n )\n old_pos_item_embeddings = tf.nn.embedding_lookup(\n model.item_embedding, pos_items\n )\n old_neg_item_embeddings = tf.nn.embedding_lookup(\n model.item_embedding, neg_items\n )\n\n # Calculate loss.\n pos_scores = tf.reduce_sum(\n tf.multiply(user_embeddings, pos_item_embeddings), axis=1\n )\n neg_scores = tf.reduce_sum(\n tf.multiply(user_embeddings, neg_item_embeddings), axis=1\n )\n regularizer = (\n tf.nn.l2_loss(old_user_embeddings)\n + tf.nn.l2_loss(old_pos_item_embeddings)\n + tf.nn.l2_loss(old_neg_item_embeddings)\n )\n regularizer = regularizer / BATCH_SIZE\n mf_loss = tf.reduce_mean(tf.nn.softplus(-(pos_scores - neg_scores)))\n emb_loss = DECAY * regularizer\n loss = mf_loss + emb_loss\n\n # Retreive and apply gradients.\n grads = tape.gradient(loss, model.trainable_weights)\n optimizer.apply_gradients(zip(grads, model.trainable_weights))\n\n bar.add(1, values=[('training loss', float(loss))])\n\n# Serialize model.\nmodel.save('./artifacts/models/lightgcn')")
# # Recommend
# In[20]:
# Convert test user ids to the new ids
users = np.array([user2id[x] for x in test['userId'].unique()])
recommendations = model.recommend(users, k=10)
recommendations = recommendations.replace({'userId': id2user, 'movieId': id2item})
recommendations = recommendations.merge(
movie_titles, how='left', on='movieId'
)[['userId', 'movieId', 'title', 'prediction']]
recommendations.head(15)
# # Evaluation Metrics
#
# The performance of our model is evaluated using the test set, which consists of the exact same users in the training set but with movies the users have reviewed that the model has not seen before.
#
# A good model will recommend movies that the user has also reviewed in the test set.
# ## Prep for evaluation metrics
# In[21]:
# Create column with the predicted movie's rank for each user
top_k = recommendations.copy()
top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies recommendations that are also in the test set
# Movies that a user has not reviewed will not be included
df_relevant = pd.merge(top_k, test, on=['userId', 'movieId'])[['userId', 'movieId', 'rank']]
# ## Precision@k
#
# Out of the movies that are recommended, what proportion is relevant. Relevant in this case is if the user has reviewed the movie.
#
# A precision@10 of about 0.41 means that about 41% of the recommendations from LightGCN are relevant to the user. In other words, out of the 10 recommendations made, on average a user will have 4 movies that are actually relevant.
# In[22]:
# Out of the # of movies a user has reviewed in the test set, how many are actually recommended
df_relevant_count = df_relevant.groupby('userId', as_index=False)['userId'].agg({'relevant': 'count'})
test_count = test.groupby('userId', as_index=False)['userId'].agg({'actual': 'count'})
relevant_ratio = pd.merge(df_relevant_count, test_count, on='userId')
# Calculate precision @ k
precision_at_k = ((relevant_ratio['relevant'] / 10) / len(test['userId'].unique())).sum()
precision_at_k
# ## Recall@k
#
# Out of all the relevant movies (in the test set), how many are recommended.
#
# A recall@10 of about 0.22 means that about 22% of the relevant movies were recommended by LightGCN. By definition you can see how even if all the recommendations made were relevant, recall@k is capped by k. A higher k means that more relevant movies can be recommended.
# In[23]:
recall_at_k = ((relevant_ratio['relevant'] / relevant_ratio['actual']) / len(test['userId'].unique())).sum()
recall_at_k
# ## Mean Average Precision (MAP)
#
# Calculate the average precision for each user and average all the average precisions over all users. Penalizes incorrect rankings of movies.
# In[24]:
# Calculate precision@k for each recommended movie with their corresponding rank.
relevant_ordered = df_relevant.copy()
relevant_ordered['precision@k'] = (relevant_ordered.groupby('userId').cumcount() + 1 )/ relevant_ordered['rank']
# Calculate average precision for each user.
relevant_ordered = relevant_ordered.groupby('userId').agg({'precision@k': 'sum'}).reset_index()
merged = pd.merge(relevant_ordered, relevant_ratio, on='userId')
merged['avg_precision'] = merged['precision@k'] / merged['actual']
# Calculate mean average precision
mean_average_precision = (merged['avg_precision'].sum() / len(test['userId'].unique()))
mean_average_precision
# ## Normalized Discounted Cumulative Gain (NDGC)
#
# Looks at both relevant movies and the ranking order of the relevant movies.
# Normalized by the total number of users.
# In[25]:
dcg = df_relevant.copy()
dcg['dcg'] = 1 / np.log1p(dcg['rank'])
dcg = dcg.groupby('userId', as_index=False, sort=False).agg({'dcg': 'sum'})
ndcg = pd.merge(dcg, relevant_ratio, on='userId')
ndcg['idcg'] = ndcg['actual'].apply(lambda x: sum(1/ np.log1p(range(1, min(x, 10) + 1))))
ndcg = (ndcg['dcg'] / ndcg['idcg']).sum() / len(test['userId'].unique())
ndcg
# ## Results
# In[26]:
print(f'Precision: {precision_at_k:.6f}',
f'Recall: {recall_at_k:.6f}',
f'MAP: {mean_average_precision:.6f} ',
f'NDCG: {ndcg:.6f}', sep='\n')
# # Exploring movie embeddings
#
# In this section, we examine how embeddings of movies relate to each other and if movies have similar movies near them in the embedding space. We will find the 6 closest movies to each movie. Remember that the closest movie should automatically be the same movie. Effectively we are finding the 5 closest films.
#
# Here we find the movies that are closest to the movie 'Starwars' (movieId = 50). The closest movies are space-themed which makes complete sense, telling us that our movie embeddings are as intended. We also see this when looking at the closest movies for the kids' movie 'Lion King'.
# In[27]:
# Get the movie embeddings
_, new_item_embed = model((model.user_embedding, model.item_embedding))
# In[28]:
k = 6
nbrs = NearestNeighbors(n_neighbors=k).fit(new_item_embed)
distances, indices = nbrs.kneighbors(new_item_embed)
closest_movies = pd.DataFrame({
'movie': np.repeat(np.arange(indices.shape[0])[:, None], k),
'movieId': indices.flatten(),
'distance': distances.flatten()
}).replace({'movie': id2item,'movieId': id2item}).merge(movie_titles, how='left', on='movieId')
closest_movies
# In[29]:
id = 50
closest_movies[closest_movies.movie == id]
# In[30]:
id = 71
closest_movies[closest_movies.movie == id]
|
|
import numpy as np
##from sklearn import hmm
from scipy.stats import norm
import nwalign as nw
from collections import defaultdict
import itertools
import time
from model_tools import get_stored_model, read_model_f5, read_model_tsv
onemers = [''.join(e) for e in itertools.product("ACGT")]
dimers = [''.join(e) for e in itertools.product("ACGT","ACGT")]
trimers = [''.join(e) for e in itertools.product("ACGT","ACGT","ACGT")]
fourmers = [''.join(e) for e in itertools.product("ACGT","ACGT","ACGT","ACGT")]
fivemers = [''.join(e) for e in itertools.product("ACGT","ACGT","ACGT","ACGT","ACGT")]
i=np.matrix([0.25,0.25,0.25,0.25])
e=np.matrix([[10,40,70,100],[np.sqrt(5),np.sqrt(5),np.sqrt(5),np.sqrt(5)]])
t=np.matrix([[0.1,0.2,0.3,0.4],[0.4,0.3,0.2,0.1],[0.25,0.25,0.15,0.35], [0.3,0.2,0.3,0.2]])
## transition estimation using bayesian updating
## make a program that adjusts the transition probabilities according to trusted data
## e.g. goes through illumina reads starting with uniform priors, updates transitions after seeing data
## it could update BOTH 1 move and 2 move (and K move) transitions
## 0 move transitions would have to be Baum-Welched...
## do long sequences faster
## break sequences into chunks, calculate the viterbi matrix on each chunk in parallel
## deal with multiple matrices in way that results in correct answer
## is this possible?
class HMM(object):
pass
def generate_statepath(tran_probs, initial_probs, states, length=10):
## t, e, and i are np.matrix objects
numstates = len(states)
statenums = range(numstates)
current = np.random.choice(statenums, p=initial_probs)
statePath = [current]
length -= 1
while length > 0:
upcoming = np.random.choice(statenums, p=tran_probs[current])
current = upcoming
statePath.append(current)
length -= 1
return statePath
def generate_emissions_from_statepath(emission_probs, statepath):
means = emission_probs[0,statepath]
stdevs = emission_probs[1,statepath]
emits = np.random.normal(means, stdevs)
return emits
def generate_statepath_and_emissions(emission_probs, tran_probs, initial_probs, states, length=10):
statenums = range(len(states))
current = int(np.random.choice(statenums, size=1, p=initial_probs))
statepath = [current]
## print type(statepath)
emitted_data = [np.random.normal(emission_probs[0,current], emission_probs[1,current])]
length = length-1
while length > 0:
upcoming = int(np.random.choice(statenums, size=1, p=tran_probs[current,:]))
current = upcoming
## print current, upcoming
statepath.append(current)
emitted_data.append(np.random.normal(emission_probs[0,current], emission_probs[1,current]))
length = length-1
return statepath, emitted_data
def generate_emissions_twoemits():
pass
def forward(emission_probs, tran_probs, initial_probs, states, emitted_data, num_states = None, num_emits=None):
## t, e, and i are np.matrix objects
if num_states == None:
num_states = len(states)
if num_emits == None:
num_emits = len(emitted_data)
ep = norm(emission_probs[0,:], emission_probs[1,:])
Forward = np.zeros([num_states,num_emits])
scalefactors = np.zeros([2,num_emits])
#initial
Forward[:, 0] = np.multiply(initial_probs,ep.pdf(emitted_data[0]))
## scale to prevent underflow -- keep track of scaling
scalefactors[0,0] = sum(Forward[:,0])
scalefactors[1,0] = np.log(scalefactors[0,0])
Forward[:,0] = Forward[:,0]/scalefactors[0,0]
## iterate
for k in range(1, num_emits):
emit = ep.pdf(emitted_data[k])
Forward[:,k] = np.multiply(emit, np.dot(Forward[:,k-1],tran_probs))
scalefactors[0,k] = sum(Forward[:,k])
scalefactors[1,k] = np.log(scalefactors[0,k]) + scalefactors[1,k-1]
Forward[:,k] = Forward[:,k]/scalefactors[0,k]
return Forward, scalefactors
def backward(emission_probs, tran_probs, initial_probs, states, emitted_data, num_states = None, num_emits=None):
## t, e, and i are np.matrix objects
if num_states == None:
num_states = len(states)
if num_emits == None:
num_emits = len(emitted_data)
ep = norm(emission_probs[0,:], emission_probs[1,:])
Backward = np.zeros([num_states,num_emits])
scalefactors = np.zeros([2,num_emits])
end = num_emits - 1
#initial
Backward[:, end] = 1
## scale to prevent underflow -- keep track of scaling
scalefactors[0,end] = sum(Backward[:,end])
scalefactors[1,end] = np.log(scalefactors[0,end])
Backward[:,end] = Backward[:,end]/scalefactors[0,end]
## iterate
for k in range(end-1, -1, -1):
emit = ep.pdf(emitted_data[k+1])
a = np.multiply(Backward[:,k+1], emit).transpose()
Backward [:,k] = np.dot(tran_probs, a).transpose()
scalefactors[0,k] = sum(Backward[:,k])
scalefactors[1,k] = np.log(scalefactors[0,k]) + scalefactors[1,k+1]
Backward[:,k] = Backward[:,k]/scalefactors[0,k]
return Backward, scalefactors
def posterior_decoding(Forward, F_scales, Backward, B_scales, states):#, getseqfxn=get.sequence):
##F and B are scaled long seq matrices -- the scales are scalefactors that come with them out of long fxns
num_states = len(states)
num_emits = np.shape(Forward)[1]
posterior_path = np.zeros([num_emits], dtype=int)
## logprobs = np.zeros([1,num_emits])
for i in range(num_emits):
fb = Forward[:,i]*Backward[:,i]
max_state = int(fb.argmax())
posterior_path[i] = max_state
# logprobs[i] = np.exp(F_scales[i])*np.exp(B_scales[i])*max(fb)
return posterior_path #, logprobs
def max_and_index(x):
i=x.argmax()
m=x[i]
return i,m
##MAKE FASTER
def viterbi(emission_probs, tran_probs, initial_probs, states, emitted_data, num_states = None, num_emits=None, logprobs=False):
np.seterr(divide='ignore')
if not num_states:
num_states = len(states)
if not num_emits:
num_emits = len(emitted_data)
if not logprobs:
initial_probs = np.log(initial_probs)
tran_probs = np.log(tran_probs)
ep = norm(emission_probs[0,:], emission_probs[1,:])
pointer = np.zeros([num_emits, num_states])
Viterbi = np.zeros([num_states, num_emits])
## need to add log_probs instead of multiply probs to prevent underflow
Viterbi[:,0] = initial_probs + ep.logpdf(emitted_data[0])
pointer[0,:] = 1
for j in range(1,num_emits):
selection = Viterbi[:,j-1] + tran_probs.transpose()
maxstates = np.apply_along_axis(max_and_index, 1, selection)
Viterbi[:,j] = ep.logpdf(emitted_data[j]) + maxstates[:,1]
pointer[j,:] = maxstates[:,0]
end = num_emits - 1
#path init
viterbi_path = np.zeros(num_emits).astype(int)
viterbi_path[end] = Viterbi[:,end].argmax()
#prob
viterbi_prob = Viterbi[viterbi_path[end], end]
#path iter
for j in range(end,0,-1):
viterbi_path[j-1] = pointer[j,viterbi_path[j]]
return viterbi_path, viterbi_prob
##############################################################################
'''two emits viterbi'''
##############################################################################
def viterbi2(emission_probs, emission_probs2, tran_probs, initial_probs, states, emitted_data, emitted_data2, num_states = None, num_emits=None, logprobs=False):
np.seterr(divide='ignore')
if not num_states:
num_states = len(states)
if not num_emits:
num_emits = len(emitted_data)
if not logprobs:
initial_probs = np.log(initial_probs)
tran_probs = np.log(tran_probs)
ep1 = norm(emission_probs[0,:], emission_probs[1,:])
ep2 = norm(emission_probs2[0,:], emission_probs2[1,:])
pointer = np.zeros([num_emits, num_states])
Viterbi = np.zeros([num_states, num_emits])
## need to add log_probs instead of multiply probs to prevent underflow
Viterbi[:,0] = initial_probs + ep1.logpdf(emitted_data[0]) + ep2.logpdf(emitted_data2[0])
pointer[0,:] = 1
for j in range(1,num_emits):
selection = Viterbi[:,j-1] + tran_probs.transpose()
maxstates = np.apply_along_axis(max_and_index, 1, selection)
Viterbi[:,j] = ep1.logpdf(emitted_data[j]) + ep2.logpdf(emitted_data2[j]) + maxstates[:,1]
pointer[j,:] = maxstates[:,0]
end = num_emits - 1
#path init
viterbi_path = np.zeros(num_emits).astype(int)
viterbi_path[end] = Viterbi[:,end].argmax()
#prob
viterbi_prob = Viterbi[viterbi_path[end], end]
#path iter
for j in range(end,0,-1):
viterbi_path[j-1] = pointer[j,viterbi_path[j]]
return viterbi_path, viterbi_prob
##############################################################################
def baumwelch():
pass
def prob_data(Forward, scalefactors, num_emits=None):
if num_emits == None:
end = np.shape(Forward)[1]-1
else:
end = num_emits-1
return sum(Forward[:,end])*np.exp(scalefactors[1,end])
def compare_statepath(sp1,sp2):
try:
ident = sum(sp1 == sp2)
total = len(sp1)
except:
return
edit_dist = total - ident
return edit_dist, ident, 100.0*ident/total
def nwalign(s1,s2):
return nw.global_align(s1,s2)
def edit_dist(s1,s2,length=None):
'''Assumes length s1 == length s2 '''
if length == None:
length = len(s1)
dist = 0
for i in range(length):
dist += s1[i] != s2[i]
return dist
def pct_id(length,distance):
'''Assumes dist <= length '''
return 100.0*(length-distance)/length
def compare_seq_nwa(s1,s2):
s1, s2 = nwalign(s1,s2)
length = len(s1)
dist = edit_dist(s1,s2,length)
return dist, pct_id(length,dist)
def combine_2_seq(s1,s2, length=None):
'''Assumes length s1 == length s2 '''
s1,s2 = nwalign(s1,s2)
if length == None:
length = len(s1)
editdist = 0
combinedseq = ''
for i in range(length):
if s1[i] == s2[i]:
combinedseq += s1[i]
elif s1[i] == "-":
editdist += 1
combinedseq += s2[i]
elif s2[i] == "-":
editdist += 1
combinedseq += s1[i]
else: ## mismatch -- arbitrarily go with complement
editdist += 1
combinedseq += s1[i]
return combinedseq, editdist
def complement(DNAstring):
DNAstring = DNAstring.upper()
compString = ''
complement = {'A':'T', 'C':'G', 'G':'C', 'T':'A', 'N':'N'}
for base in DNAstring:
compString = compString + complement[base]
return compString
def reverseComplement(DNAstring):
return complement(DNAstring[-1::-1])
def reverse_seq(DNAstring):
return DNAstring[-1::-1]
def get_2D_seq(t,c):
c = complement(c)
return combine_2_seq(t,c)
def generate_kmer_initial_probs(states,uniform=True):
num_states = len(states)
if uniform:
initial = np.array([1.0/num_states]*num_states)
else:
initial = np.random.poisson(lam=10.0, size=num_states)
initial = initial/sum(initial)
return initial
def generate_random_kmer_transition_probs(states, allow_gaps=True, unif=False):
## if allow_gaps = False, assumes each k-mer has another kmer overlapped by k-1
## can set nonzero.trans to any vector -- for DNA length 4
k = len(states[0])
if k == 1:
pass
elif k == 2:
pass
else:
num_states = len(states)
tran_probs = np.zeros([num_states,num_states])
# make prefix-suffix dict -- overlaps of k-1 and k-2
prefix = defaultdict(list)
for i in range(num_states):
pref = states[i][:k-1]
prefix[pref].append(i)
pref = states[i][:k-2]
prefix[pref].append(i)
## create transition probs -- can soft code the sampling parameters later if want
for i in range(num_states):
## overlap by k-1 (move = 1)
current_suffix = states[i][1:k]
if unif:
trans = np.array([365,365,365,365])
else:
trans = np.random.poisson(lam=365.0,size=4)
t = 0
for j in prefix[current_suffix]:
tran_probs[i,j] = trans[t]
t += 1
if allow_gaps:
## overlap by k-2 (move = 2) -- add additional counts
current_suffix = states[i][2:]
if unif:
trans = np.array([1]*16)
else:
trans = np.random.poisson(lam=4.0, size=16)
t = 0
for j in prefix[current_suffix]:
tran_probs[i,j] = tran_probs[i,j] + trans[t]
t += 1
## stay in place: add additional probability to staying in place (move = 0)
current_suffix = states[i]
if unif:
trans = np.array([3])
else:
trans = np.random.poisson(lam=20.0, size=1)
tran_probs[i,i] = tran_probs[i,i] + trans
## normalize all counts by sum to create probs that sum to 1
tran_probs[i,:] = tran_probs[i,:]/sum(tran_probs[i,:])
return tran_probs
### Allow higher gaps
##def generate_random_kmer_transition_probs(states, unif=False):
## ## if allow_gaps = False, assumes each k-mer has another kmer overlapped by k-1
## ## can set nonzero.trans to any vector -- for DNA length 4
## k = len(states[0])
## if k == 1:
## pass
## elif k == 2:
## pass
## else:
## num_states = len(states)
## tran_probs = np.zeros([num_states,num_states])
##
## # make prefix-suffix dict -- overlaps of k-1 and k-2
## prefix = defaultdict(list)
## for i in range(num_states):
## pref = states[i][:k-1]
## prefix[pref].append(i)
## pref = states[i][:k-2]
## prefix[pref].append(i)
## pref = states[i][:k-3]
## prefix[pref].append(i)
## pref = states[i][:k-4]
## prefix[pref].append(i)
##
## ## create transition probs -- can soft code the sampling parameters later if want
## for i in range(num_states):
## ## overlap by k-1 (move = 1)
## current_suffix = states[i][1:k]
## if unif:
## tran_probs[i,prefix[current_suffix]] += 365
## else:
## trans = np.random.poisson(lam=365.0,size=4)
## t = 0
## for j in prefix[current_suffix]:
## tran_probs[i,j] += trans[t]
## t += 1
##
## ## overlap by k-2 (move = 2) -- add additional counts
## current_suffix = states[i][2:]
## if unif:
## tran_probs[i,prefix[current_suffix]] += 1
## else:
## trans = np.random.poisson(lam=4.0, size=16)
## t = 0
## for j in prefix[current_suffix]:
## tran_probs[i,j] += tran_probs[i,j] + trans[t]
## t += 1
##
## ## overlap by k-3 (move = 3)
## current_suffix = states[i][3:]
## if unif:
## tran_probs[i,prefix[current_suffix]] += 0.5
## else:
## trans = np.random.poisson(lam=2.0, size=64)
## t = 0
## for j in prefix[current_suffix]:
## tran_probs[i,j] += tran_probs[i,j] + trans[t]
## t += 1
##
## ## overlap by k-4 (move = 3)
## current_suffix = states[i][4:]
## if unif:
## tran_probs[i,prefix[current_suffix]] += 0.25
## else:
## trans = np.random.poisson(lam=4.0, size=256)
## t = 0
## for j in prefix[current_suffix]:
## tran_probs[i,j] += tran_probs[i,j] + trans[t]
## t += 1
##
## ## no overlap (move = 5)
## tran_probs[i] += 0.1
##
## ## stay in place: add additional probability to staying in place (move = 0)
## current_suffix = states[i]
## if unif:
## tran_probs[i,i] += 3
## else:
## tran_probs[i,i] = tran_probs[i,i] + np.random.poisson(lam=20.0, size=1)
##
## ## normalize all counts by sum to create probs that sum to 1
## tran_probs[i,:] = tran_probs[i,:]/sum(tran_probs[i,:])
##
## return tran_probs
def generate_kmer_emission_probs(states, level=True):
## generates either level emissions or sd emissions
# mu.mean and sigma.mean are the mean and std dev of the r7.3 state level means to be used to generate emission means
# mu.sd, sigma.sd -- same for std devs of signals
if level:
mu_mean = 65.57454
sigma_mean = 6.497453
mu_sd = 1.163836
sigma_sd = 0.4116285
else: ## sd emission
mu_mean = 1.37316
sigma_mean = 0.3144043
mu_sd = 0.1761904
sigma_sd = 0.06263217
num_states = len(states)
emissions = np.zeros([2,num_states])
for i in range(num_states):
emissions[0,i] = np.random.normal(mu_mean, sigma_mean)
emissions[1,i] = abs(np.random.normal(mu_sd,sigma_sd))
return emissions
##def get_emiss_probs_from_model(model, twoemits=False):
## ''' model is object returned from get_stored_model() in model_tools '''
## states = sorted(model[1].keys())
## num_states = len(states)
## t_emissions = np.zeros([2,num_states])
## c_emissions = np.zeros([2,num_states])
## for i in range(num_states):
## t_emissions[0,i] = model[1][states[i]][0]
## t_emissions[1,i] = model[1][states[i]][1]
## c_emissions[0,i] = model[2][states[i]][0]
## c_emissions[1,i] = model[2][states[i]][1]
## return t_emissions, c_emissions
def get_emiss_probs_from_model(model, twoemits=False):
''' model is object returned from get_stored_model() in model_tools '''
states = sorted(model[1].keys())
num_states = len(states)
t_emissions = np.zeros([2,num_states])
c_emissions = np.zeros([2,num_states])
if twoemits:
t_emissions2 = np.zeros([2,num_states])
c_emissions2 = np.zeros([2,num_states])
for i in range(num_states):
t_emissions[0,i] = model[1][states[i]][0]
t_emissions[1,i] = model[1][states[i]][1]
c_emissions[0,i] = model[2][states[i]][0]
c_emissions[1,i] = model[2][states[i]][1]
if twoemits:
t_emissions2[0,i] = model[1][states[i]][2]
t_emissions2[1,i] = model[1][states[i]][3]
c_emissions2[0,i] = model[2][states[i]][2]
c_emissions2[1,i] = model[2][states[i]][3]
if twoemits:
return t_emissions, c_emissions, t_emissions2, c_emissions2
return t_emissions, c_emissions
def generate_kmer_transition_probs_withgaps():
pass
def get_sequence():
pass
##def get_sequence_withgaps(states, statepath, checkoverlap=True, posterior_decoded=False):
## ## states are some type of kmer
## ## statepath is vector of numbers (indexes)
## path_length = len(statepath)
## moves = [0]*path_length ## first move is 0
## k = len(states[0])
## end = k-1
## if k == 1 or k == 2:
## return "This currently only works with 3-mers as smallest kmer."
## else:
## #init
## seq = states[statepath[0]]
## moves[0] = 0
## #iter
## for i in range(1,path_length):
## lastSuffix = states[statepath[i-1]][1:]
## currentPrefix = states[statepath[i]][:k-1]
## if lastSuffix == currentPrefix:
## seq += states[statepath[i]][end]
## moves[i] = 1
## else:
## lastSuffix = states[statepath[i-1]][2:]
## currentPrefix = states[statepath[i]][:k-2]
## if lastSuffix == currentPrefix:
## seq += states[statepath[i]][end-1:]
## moves[i] = 2
## elif statepath[i-1] == statepath[i]:
## ## by checking same state last, only heteropolymers affected
## ## homopolymers would be caught in first condition
## moves[i] = 0
## ## nothing is added to sequence
## ## could make another fxn that just spits out events and states line by line like 'template events' in f5
## ## ELSE::: do what? ... in other one just added centroid seq regardless...
## elif posterior_decoded:
## seq += states[statepath[i]][end]
## moves[i] = -1
## ## -1 means it was an "illegal" move (move to a kmer that does not overlap by k-1 or k-2)
## ## it turns out that adding the base from the illegal move does not hurt the seq overall much
## return seq, moves
## reduce homo-5mer polmerization...
def get_sequence_withgaps(states, statepath, checkoverlap=True, posterior_decoded=False):
## states are some type of kmer
## statepath is vector of numbers (indexes)
path_length = len(statepath)
moves = [0]*path_length ## first move is 0
k = len(states[0])
end = k-1
if k == 1 or k == 2:
return "This currently only works with 3-mers as smallest kmer."
else:
#init
seq = states[statepath[0]]
moves[0] = 0
#iter
for i in range(1,path_length):
lastSuffix = states[statepath[i-1]][1:]
currentPrefix = states[statepath[i]][:k-1]
if statepath[i-1] == statepath[i]:
moves[i] = 0
elif lastSuffix == currentPrefix:
seq += states[statepath[i]][end]
moves[i] = 1
else:
lastSuffix = states[statepath[i-1]][2:]
currentPrefix = states[statepath[i]][:k-2]
if lastSuffix == currentPrefix:
seq += states[statepath[i]][end-1:]
moves[i] = 2
elif posterior_decoded:
seq += states[statepath[i]][end]
moves[i] = -1
## -1 means it was an "illegal" move (move to a kmer that does not overlap by k-1 or k-2)
## it turns out that adding the base from the illegal move does not hurt the seq overall much
return seq, moves
### ALLOW higher gaps 3, 4, 5
##def get_sequence_withgaps(states, statepath, checkoverlap=True, posterior_decoded=False):
## ## states are some type of kmer
## ## statepath is vector of numbers (indexes)
## path_length = len(statepath)
## moves = [0]*path_length ## first move is 0
## k = len(states[0])
## end = k-1
## if k == 1 or k == 2:
## return "This currently only works with 3-mers as smallest kmer."
## else:
## #init
## seq = states[statepath[0]]
## moves[0] = 0
## #iter
## for i in range(1,path_length):
## lastSuffix = states[statepath[i-1]][1:]
## currentPrefix = states[statepath[i]][:k-1]
## if lastSuffix == currentPrefix:
## seq += states[statepath[i]][end]
## moves[i] = 1
## elif statepath[i-1] == statepath[i]:
## ## by checking same state last, only heteropolymers affected
## ## homopolymers would be caught in first condition
## moves[i] = 0
## ## nothing is added to sequence
## ## could make another fxn that just spits out events and states line by line like 'template events' in f5
##
## else:
## lastSuffix = states[statepath[i-1]][2:]
## currentPrefix = states[statepath[i]][:k-2]
## if lastSuffix == currentPrefix:
## seq += states[statepath[i]][end-1:]
## moves[i] = 2
## else:
## lastSuffix = states[statepath[i-1]][3:]
## currentPrefix = states[statepath[i]][:k-3]
## if lastSuffix == currentPrefix:
## seq += states[statepath[i]][end-2:]
## moves[i] = 3
## else:
## lastSuffix = states[statepath[i-1]][4:]
## currentPrefix = states[statepath[i]][:k-4]
## if lastSuffix == currentPrefix:
## seq += states[statepath[i]][end-3:]
## moves[i] = 4
## else:
## ## skip 5
## seq += states[statepath[i]][end-4:]
## moves[i] = 5
## ## ELSE::: do what? ... in other one just added centroid seq regardless...
#### elif posterior_decoded:
#### seq += states[statepath[i]][end]
#### moves[i] = -1
#### ## -1 means it was an "illegal" move (move to a kmer that does not overlap by k-1 or k-2)
## ## it turns out that adding the base from the illegal move does not hurt the seq overall much
## return seq, moves
def update_table_dict(d,l,keys, length=None):
'''d is dict to update, l is list of values for k, the keys
assumes l and d are of same length
assumes k and l are paired by shared index'''
if not length:
length = len(l)
for i in range(length):
d[keys[i]].append(l[i])
return d
def read_table(fh, keys, types=None):
'''fh is a file path to a tsv file. keys are column names.
lengths of types and keys = number colimns in table
both keys and types should appear in same order as columns'''
length = len(keys)
if not types:
types = [str]*length
print types
data = open(fh).readlines()
table = defaultdict(list)
for i in range(len(data)):
line = data[i].strip().split("\t")
line = [types[j](line[j]) for j in range(len(line))]
table = update_table_dict(table,line,keys, length)
return table
def read_model_file(model_file, variantcolumn=False):
if variantcolumn:
keys = ["kmer","variant","level_mean","level_stdv","sd_mean","sd_stdv","weight"]
types = [str] + [float]*6
else:
keys = ["kmer","level_mean","level_stdv","sd_mean","sd_stdv","weight"]
types = [str] + [float]*5
return read_table(model_file, keys, types)
def read_events_file(events_file, input_events=False):
''' file may contain input, template, or complement events '''
if input_events:
keys = ["mean", "stddev", "start", "length"]
types = [float]*4
else:
keys = ["mean", "stddev", "start", "length", "model_state", "model_level", "move", "p_model_state", "mp_state", "p_mp_state", "p_A", "p_C", "p_G", "p_T"]
types = [float]*4 + [str] + [float]*3 + [str] + [float]*5
return read_table(events_file, keys, types)
def lead_hmm(first_50_events):
## 15 states: 0:14, states 0:13 part of lead profile, state14 is end/template state
emit_probs = np.zeros([2,15])
## means
emit_probs[0,:] = [43.93368, 51.82074, 66.3531, 76.30256, 84.15992, 89.97542, 96.22626, 100.97302, 107.33552, 100.54961, 75.71837, 46.63833, 57.33411, 43.53527, 60.0]
## stdevs
emit_probs[1,:] = [2.097209, 3.526526, 2.809502, 1.954605, 1.857928, 1.793586, 1.163202, 1.120078, 2.364349, 2.866541, 13.945599, 1.991525, 16.866727, 2.678975, 5.0]
## initial probs - can start anywhere in profile, but mostly first 3 states
init_probs = np.array([0.4,0.3,0.2,0,0,0,0,0,0,0,0,0,0,0,0])+0.001
init_probs = init_probs/sum(init_probs)
## trans probs -- mostly trans to next state, but can skip states, also somewhat likely to stay in same state
tran_probs = np.zeros([15,15])
tran_probs[14,14] = 1.0
for i in range(14): tran_probs[i,i] = 0.3
for i in range(13): tran_probs[i,i+1] = 0.35
for i in range(12): tran_probs[i,i+2] = 0.2
for i in range(11): tran_probs[i,i+3] = 0.1
for i in range(10): tran_probs[i,i+4] = 0.001
for i in range(9): tran_probs[i,i+5] = 0.001
for i in range(8): tran_probs[i,i+6] = 0.001
for i in range(7): tran_probs[i,i+7] = 0.001
## for now only last 3 states transition to end state
tran_probs[11,14] = 0.05
tran_probs[12,14] = 0.1
tran_probs[13,14] = 0.2
## normalize all rows to 1
for i in range(14): tran_probs[i,:] = tran_probs[i,:]/sum(tran_probs[i,:])
## get viterbi path for lead adapter coordinates
vpath, vprob = viterbi(emission_probs = emit_probs, tran_probs = tran_probs, initial_probs = init_probs, states = range(15), emitted_data = first_50_events)
template_start = 0
try:
while vpath[template_start] != 14:
template_start += 1
except IndexError: ## if profile HMM does not find template start in 1st 50, then assume start is at 50
template_start = 50
return template_start, vpath
def hp_hmm(events,trim=5):
## state B,1,2,3,4,5,E = 7 states
emit_probs = np.zeros([2,7])
emit_probs[0,] = [65.0, 93.78638, 117.49618, 100.67429, 60.19801, 46.50402, 65.0]
emit_probs[1,] = [6.0, 6.787453, 8.665963, 4.354063, 6.305904, 1.931336, 6.0]
init_probs = np.array([0.7,0.2,0.1,0,0,0,0])
init_probs = init_probs/sum(init_probs)
tran_probs = np.zeros([7,7])
tran_probs[6,6] = 1.0
for i in range(7): tran_probs[i,i] = 0.3
for i in range(6): tran_probs[i,i+1] = 0.35
for i in range(5): tran_probs[i,i+2] = 0.2
for i in range(4): tran_probs[i,i+3] = 0.1
for i in range(3): tran_probs[i,i+4] = 0.001
for i in range(2): tran_probs[i,i+5] = 0.001
tran_probs[3,6] = 0.05
tran_probs[4,6] = 0.1
tran_probs[4,5] = 0.7 ## state 4 usually goes directly to 5 (occasionally 2 events, but have not seen more -- all other states tend to stay in-state longer)
for i in range(7): tran_probs[i,] = tran_probs[i,:]/sum(tran_probs[i,:])
vpath, vprob = viterbi(emission_probs = emit_probs, tran_probs = tran_probs, initial_probs = init_probs, states = range(7), emitted_data = events)
hpstart = 0
while vpath[hpstart] != 1:
hpstart += 1
hpend = len(vpath)-1
while vpath[hpend] != 5:
hpend -= 1
return hpstart-trim, hpend+trim, vpath
## To help with writing ####################################################################
#### This will help you learn how the functions are used.
##emis=generate_kmer_emission_probs(trimers)
##tran=generate_random_kmer_transition_probs(trimers)
##init=generate_kmer_initial_probs(trimers)
##sp=generate_statepath(tran,init,trimers)
##em=generate_emissions_from_statepath(emis,sp)
##f,fs=forward(emis, tran, init, trimers, em)
##b,bs=backward(emis, tran, init, trimers, em)
##vpath,vprob=viterbi(emis,tran,init,trimers,em)
##postpath=posterior_decoding(f,fs,b,bs,trimers)
##print "comparison, edit_dist, ident, pctID"
##print "posterior decoded path:", compare_statepath(sp,postpath)
##print "viterbi decoded path:", compare_statepath(sp,vpath)
##print "posterior vs. viterbi:", compare_statepath(postpath,vpath)
##aseq,amoves=get_sequence_withgaps(trimers,sp)
##vseq,vmoves=get_sequence_withgaps(trimers,vpath)
##pseq,pmoves=get_sequence_withgaps(trimers,postpath,posterior_decoded=True)
####print "ans-vs-p: edit dist, pctID =", compare_seq_nwa(vseq,sp)
####print "ans-vs-v: edit dist, pctID =", compare_seq_nwa(pseq,sp)
##print "a-vs-p: edit dist, pctID =", compare_seq_nwa(vseq,aseq)
##print "a-vs-v: edit dist, pctID =", compare_seq_nwa(pseq,aseq)
##print "v-vs-p: edit dist, pctID =", compare_seq_nwa(vseq,pseq)
##print "amoves:", amoves
##print "vmoves:", vmoves
##print "pmoves:", pmoves
### More realistic example -- read in...
def test_viterbi(states=trimers, length=10):
emis=generate_kmer_emission_probs(states)
tran=generate_random_kmer_transition_probs(states)
init=generate_kmer_initial_probs(states)
sp=generate_statepath(tran,init,states,length=length)
em=generate_emissions_from_statepath(emis,sp)
return viterbi(emis,tran,init,states,em)
def simulate(states=fivemers, length=10):
emis=generate_kmer_emission_probs(states)
tran=generate_random_kmer_transition_probs(states)
init=generate_kmer_initial_probs(states)
sp=generate_statepath(tran,init,states,length=length)
em=generate_emissions_from_statepath(emis,sp)
print "forward..."
start=time.time()
f,fs=forward(emis,tran,init,states,em)
end=time.time()
f1=end-start
print "...operation took ", f1, " seconds...."
print "backward..."
start=time.time()
b,bs=backward(emis,tran,init,states,em)
end=time.time()
b1=end-start
print "...operation took ", b1, " seconds...."
print "post..."
start=time.time()
postpath=posterior_decoding(f,fs,b,bs,states)
end=time.time()
print "...operation took ", end-start, " seconds...."
print "viterbi..."
start=time.time()
vpath,vprob=viterbi(emis,tran,init,states,em)
end=time.time()
v1=end-start
print "...operation took ", v1, " seconds...."
print ("").join([str(e) for e in ["...viterbi is ", v1/f1, "x and ", v1/b1, "x slower than F and B respectively"]])
print "posterior path vs known:", compare_statepath(sp,postpath)
print "viterbi path vs known:", compare_statepath(sp,vpath)
print "posterior vs viterbi:", compare_statepath(postpath,vpath)
def simulate_delete_me(states=trimers, length=10):
emis=generate_kmer_emission_probs(states)
tran=generate_random_kmer_transition_probs(states)
init=generate_kmer_initial_probs(states)
sp=generate_statepath(tran,init,states,length=length)
em=generate_emissions_from_statepath(emis,sp)
print "forward..."
start=time.time()
f,fs=forward(emis,tran,init,states,em)
end=time.time()
f1=end-start
print "...operation took ", f1, " seconds...."
print "backward..."
start=time.time()
b,bs=backward(emis,tran,init,states,em)
end=time.time()
b1=end-start
print "...operation took ", b1, " seconds...."
print "post..."
start=time.time()
postpath=posterior_decoding(f,fs,b,bs,states)
end=time.time()
print "...operation took ", end-start, " seconds...."
print "viterbi..."
start=time.time()
vpath,vprob=viterbi(emis,tran,init,states,em)
end=time.time()
v1=end-start
print "...operation took ", v1, " seconds...."
print "viterbi_fast..."
start=time.time()
v2path,v2pr=viterbi_fast(emis,tran,init,states,em)
end=time.time()
v2=end-start
print "...operation took ", v2, " seconds...."
print "...new viterbi ", v1/v2, "x faster than old one..."
print "...new viterbi is ", v2/f1, " x and ", v2/b1, "x slower than F and B respectively"
print "posterior path vs known:", compare_statepath(sp,postpath)
print "viterbi path vs known:", compare_statepath(sp,vpath)
print "viterbi_fast path vs known:", compare_statepath(sp,v2path)
print "posterior vs viterbi:", compare_statepath(postpath,vpath)
print "viterbi path vs viterbi fast path:", compare_statepath(vpath,v2path)
print "posterior vs viterbi fast:", compare_statepath(postpath,v2path)
|
|
# -*- coding: utf-8 -*-
# Author: Tonio Teran <tonio@stateoftheart.ai>
# Author: Hugo Ochoa <hugo@stateoftheart.ai>
# Copyright: Stateoftheart AI PBC 2021.
'''Unit testing the Keras wrapper.'''
import os
import unittest
import numpy as np
import inspect
from tensorflow.python.keras.engine.functional import Functional
from sotaai.cv import load_dataset, load_model, keras_wrapper, utils, model_to_dataset
from sotaai.cv.abstractions import CvDataset, CvModel
from sotaai.cv import metadata
#
# @author HO
# Just to prevent Keras library to print warnings and extra logging data...
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class TestKerasWrapper(unittest.TestCase):
'''Test the wrapped Keras module.
For Keras, we test against all datasets and modules since they are a few and
can fit in memory (CI server)
'''
# @unittest.SkipTest
def test_load_dataset(self):
'''Make sure `dict`s are returned, with correct keywords for splits.
'''
for task in keras_wrapper.DATASETS:
datasets = keras_wrapper.DATASETS[task]
for dataset_name in datasets:
dataset = keras_wrapper.load_dataset(dataset_name)
self.assertEqual(type(dataset), dict)
for split in dataset:
self.assertEqual(tuple, type(dataset[split]))
self.assertEqual(len(dataset[split]), 2)
self.assertEqual(np.ndarray, type(dataset[split][0]))
self.assertEqual(np.ndarray, type(dataset[split][1]))
# @unittest.SkipTest
def test_load_model(self):
'''Make sure that we can load every model from the Keras module.'''
for task in keras_wrapper.MODELS:
for model_name in keras_wrapper.MODELS[task]:
model = keras_wrapper.load_model(model_name)
#
# @author HO
# Test the returned model against tf.Keras.Model functional as
# documented in
# https://www.tensorflow.org/api_docs/python/tf/keras/Model#top_of_page
#
self.assertIsInstance(model, Functional)
self.assertEqual(inspect.ismethod(model.compile), True)
self.assertEqual(inspect.ismethod(model.fit), True)
self.assertEqual(inspect.ismethod(model.predict), True)
self.assertEqual(inspect.ismethod(model.summary), True)
self.assertEqual(inspect.ismethod(model.save), True)
# @unittest.SkipTest
def test_abstract_dataset(self):
'''Make sure we can create an abstract dataset using
Keras datasets.
'''
for task in keras_wrapper.DATASETS:
datasets = keras_wrapper.DATASETS[task]
for dataset_name in datasets:
dso = load_dataset(dataset_name)
for split_name in dso:
cv_dataset = dso[split_name]
self.assertEqual(CvDataset, type(cv_dataset))
self.assertEqual(cv_dataset.source, 'keras')
iterable_dataset = iter(cv_dataset)
datapoint = next(iterable_dataset)
dataset_metadata = metadata.get('datasets', name=dataset_name)
self.assertEqual(np.ndarray, type(datapoint['image']))
self.assertEqual('label' in datapoint, True)
self.assertEqual(
utils.compare_shapes(dataset_metadata['metadata']['image'],
datapoint['image'].shape), True)
self.assertEqual(
utils.compare_shapes(dataset_metadata['metadata']['label'],
datapoint['label'].shape), True)
# @unittest.SkipTest
def test_abstract_model(self):
'''Make sure we can create an abstract model using
Keras datasets.
'''
for task in keras_wrapper.MODELS:
for model_name in keras_wrapper.MODELS[task]:
cv_model = load_model(model_name, 'keras')
self.assertEqual(CvModel, type(cv_model))
self.assertEqual(cv_model.source, 'keras')
self.assertEqual(cv_model.original_input_type, 'numpy.ndarray')
def test_model_to_dataset(self):
'''Make sure model_to_dataset is working properly for those models whose
source is Keras.
'''
def single_test(model_name, dataset_name):
'''This is an inner function that test model_to_dataset for a single case
i.e. a single model against a single dataset
'''
print('\n---')
cv_model = load_model(model_name, 'keras')
dataset_splits = load_dataset(dataset_name)
split_name = next(iter(dataset_splits.keys()))
cv_dataset = dataset_splits[split_name]
cv_model, cv_dataset = model_to_dataset(cv_model, cv_dataset)
# Assert model channels (for all Keras models are to be 3), assert model
# input shape and dataset shape are now compatible, and assert model
# output shape is now compatible with the dataset classes
self.assertEqual(cv_dataset.shape[-1], 3)
self.assertEqual(
utils.compare_shapes(cv_model.original_input_shape, cv_dataset.shape),
True)
self.assertEqual(cv_model.original_output_shape, cv_dataset.classes_shape)
# For some image samples, assert dataset sample shapes matched the
# cv_dataset.shape, and then assert predictions shape (model output)
# matches the expected classes
print('Testing model_to_dataset predictions...')
# TODO(Hugo)
# Test with batches of more than 1 image, if dataset images have different
# sizes we have to preprocess all of them to have same size and thus being
# able to pass them to the model e.g. caltech_birds2010 is not working for
# batches of n=3 sinces images have different sizes.
n = 1
sample = []
for i, item in enumerate(cv_dataset):
if i == n:
break
self.assertEqual(
utils.compare_shapes(cv_dataset.shape, item['image'].shape), True,
'Dataset shape {} is not equal to item shape {}'.format(
cv_dataset.shape, item['image'].shape))
image_sample = item['image']
sample.append(image_sample)
sample = np.array(sample)
print(' => Making predictions with batch {}'.format(sample.shape))
predictions = cv_model(sample)
expected_predictions_shape = (n,) + cv_dataset.classes_shape
self.assertEqual(
utils.compare_shapes(expected_predictions_shape, predictions.shape),
True, 'Expected shape {} is not equal to prediction shape {}'.format(
expected_predictions_shape, predictions.shape))
# Test all Keras models against all Keras datasets and a set of
# Tensorflow datasets (beans and omniglot as of now)
dataset_names = []
for task in keras_wrapper.DATASETS:
for dataset_name in keras_wrapper.DATASETS[task]:
dataset_names.append(dataset_name)
# TODO(Hugo)
# Manually test all Tensorflow datasets (the issue here is that TF datasets
# need to fit in memory). Test dataset by dataset and delete them as they
# pass tests... or think on how to better test all Tensorflow datasets
tensorflow_datasets_names = [
'beans',
'omniglot',
'binary_alpha_digits',
'caltech_birds2010',
'caltech_birds2011',
'cars196',
'cats_vs_dogs',
'cmaterdb',
'colorectal_histology',
# 'colorectal_histology_large', # uknown classes shape
'cycle_gan',
# 'diabetic_retinopathy_detection', # manual download
# 'downsampled_imagenet', # Error source
'dtd',
'emnist',
'eurosat',
'food101',
]
dataset_names = dataset_names + tensorflow_datasets_names
# TODO(Hugo)
# If a model_to_dataset case takes more than expected to be fixed, it is
# logged here so it can be skipped and fixed in the near future:
current_issues = {
'NASNetMobile': ['caltech_birds2010', 'caltech_birds2011', 'cars196'],
'NASNetLarge': ['caltech_birds2010', 'caltech_birds2011', 'cars196']
}
for task in keras_wrapper.MODELS:
for model_name in keras_wrapper.MODELS[task]:
model_current_issues = current_issues[
model_name] if model_name in current_issues else []
for dataset_name in dataset_names:
if dataset_name in model_current_issues:
print('Skiping due to current issue {} vs {}'.format(
model_name, dataset_name))
continue
single_test(model_name, dataset_name)
# Uncomment the next line to test a particular case of model_to_dataset:
# single_test('InceptionResNetV2', 'colorectal_histology_large')
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/python
"""
Skeleton code for k-means clustering mini-project.
"""
import pickle
import numpy
import matplotlib.pyplot as plt
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
def Draw(pred, features, poi, mark_poi=False, name="image.png", f1_name="feature 1", f2_name="feature 2"):
""" some plotting code designed to help you visualize your clusters """
### plot each cluster with a different color--add more colors for
### drawing more than five clusters
colors = ["b", "c", "k", "m", "g"]
for ii, pp in enumerate(pred):
plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])
### if you like, place red stars over points that are POIs (just for funsies)
if mark_poi:
for ii, pp in enumerate(pred):
if poi[ii]:
plt.scatter(features[ii][0], features[ii][1], color="r", marker="*")
plt.xlabel(f1_name)
plt.ylabel(f2_name)
plt.savefig(name)
plt.show()
### load in the dict of dicts containing all the data on each person in the dataset
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") )
### there's an outlier--remove it!
data_dict.pop("TOTAL", 0)
### the input features we want to use
### can be any key in the person-level dictionary (salary, director_fees, etc.)
feature_1 = "salary"
feature_2 = "exercised_stock_options"
feature_3 = "total_payments"
poi = "poi"
features_list = [poi, feature_1, feature_2, feature_3]
data = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data )
### in the "clustering with 3 features" part of the mini-project,
### you'll want to change this line to
### for f1, f2, _ in finance_features:
### (as it's currently written, the line below assumes 2 features)
for f1, f2, _ in finance_features:
plt.scatter( f1, f2 )
plt.show()
### cluster here; create predictions of the cluster labels
### for the data and store them to a list called pred
from sklearn.cluster import KMeans
features_list = ["poi", feature_1, feature_2, feature_3]
data2 = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data2 )
clf = KMeans(n_clusters=2)
pred = clf.fit_predict( finance_features )
Draw(pred, finance_features, poi, name="clusters_before_scaling.png", f1_name=feature_1, f2_name=feature_2)
### rename the "name" parameter when you change the number of features
### so that the figure gets saved to a different file
try:
Draw(pred, finance_features, poi, mark_poi=False, name="clusters.pdf", f1_name=feature_1, f2_name=feature_2)
except NameError:
print "no predictions object named pred found, no clusters to plot"
from sklearn.preprocessing import MinMaxScaler
stock = []
for i in data_dict:
if (data_dict[i]["exercised_stock_options"]=='NaN'):
#stock.append(0.0)
pass
else:
stock.append(float(data_dict[i]["exercised_stock_options"]))
ma = max(stock)
mi = min(stock)
print "Exercised stock options maximum: ", ma, " minimum: ", mi
print "Rescaled Stock Options Value: ", float(1000000-mi)/(ma-mi)
salary = []
for i in data_dict:
if (data_dict[i][feature_1]=='NaN'):
# salary.append(0.0)
pass
else:
salary.append(float(data_dict[i][feature_1]))
ma= max(salary)
mi=min(salary)
print "Exercised salary options maximum: ", ma, " minimum: ", mi
print "Rescaled Salary Value: ", float(200000-mi)/(ma-mi)
|
|
"""
Stores utilities for use with lg.py and methods.py
"""
import csv
import numpy as np
import random
from tabulate import tabulate
import matplotlib.pyplot as plt
def testresultsfiletotable(testDataFile, transitionMatrixFile='', csvName=True):
"""
Takes a CSV file name as input and returns a usable Python dictionary of
testing results, in addition to lists of the outlet names and importer names,
depending on whether tracked or untracked data was entered.
INPUTS
------
testDataFile: CSV file name string or Python list (if csvName=True)
CSV file must be located within the current working directory when
testresultsfiletotable() is called. There should not be a header row.
Each row of the file should signify a single sample point.
For tracked data, each row should have three columns, as follows:
column 1: string; Name of outlet/lower echelon entity
column 2: string; Name of importer/upper echelon entity
column 3: integer; 0 or 1, where 1 signifies aberration detection
For untracked data, each row should have two columns, as follows:
column 1: string; Name of outlet/lower echelon entity
column 2: integer; 0 or 1, where 1 signifies aberration detection
transitionMatrixFile: CSV file name string or Python list (if csvName=True)
If using tracked data, leave transitionMatrixFile=''.
CSV file must be located within the current working directory when
testresultsfiletotable() is called. Columns and rows should be named,
with rows correspodning to the outlets (lower echelon), and columns
corresponding to the importers (upper echelon). It will be checked
that no entity occurring in testDataFile is not accounted for in
transitionMatrixFile. Each outlet's row should correspond to the
likelihood of procurement from the corresponding importer, and should
sum to 1. No negative values are permitted.
csvName: Boolean indicating whether the inputs are CSV file names (True) or Python lists (False)
OUTPUTS
-------
Returns dataTblDict with the following keys:
dataTbl: Python list of testing results, with each entry organized as
[OUTLETNAME, IMPORTERNAME, TESTRESULT] (for tracked data) or
[OUTLETNAME, TESTRESULT] (for untracked data)
type: 'Tracked' or 'Untracked'
transMat: Numpy matrix of the transition like
outletNames: Sorted list of unique outlet names
importerNames: Sorted list of unique importer names
"""
dataTblDict = {}
if csvName == True:
dataTbl = [] # Initialize list for raw data
try:
with open(testDataFile, newline='') as file:
reader = csv.reader(file)
for row in reader:
row[-1] = int(row[-1]) # Convert results to integers
dataTbl.append(row)
except FileNotFoundError:
print('Unable to locate file ' + str(testDataFile) + ' in the current directory.' + \
' Make sure the directory is set to the location of the CSV file.')
return
except ValueError:
print('There seems to be something wrong with your data. Check that' + \
' your CSV file is correctly formatted, with each row having' + \
' entries [OUTLETNAME,IMPORTERNAME,TESTRESULT], and that the' + \
' test results are all either 0 or 1.')
return
else: # csvName is False
dataTbl = testDataFile
# Grab list of unique outlet and importer names
outletNames = []
importerNames = []
for row in dataTbl:
if row[0] not in outletNames:
outletNames.append(row[0])
if transitionMatrixFile == '':
if row[1] not in importerNames:
importerNames.append(row[1])
outletNames.sort()
importerNames.sort()
if not transitionMatrixFile == '':
if csvName == True:
dataTblDict['type'] = 'Untracked'
try:
with open(transitionMatrixFile, newline='') as file:
reader = csv.reader(file)
counter = 0
for row in reader:
if counter == 0:
importerNames = row[1:]
transitionMatrix = np.zeros(shape=(len(outletNames), len(importerNames)))
else:
transitionMatrix[counter - 1] = np.array([float(row[i]) \
for i in range(1, len(importerNames) + 1)])
counter += 1
dataTblDict['transMat'] = transitionMatrix
except FileNotFoundError:
print('Unable to locate file ' + str(testDataFile) + ' in the current directory.' + \
' Make sure the directory is set to the location of the CSV file.')
return
except ValueError:
print('There seems to be something wrong with your transition matrix. Check that' + \
' your CSV file is correctly formatted, with only values between' + \
' 0 and 1 included.')
return
else: # csvName is False
transitionMatrix = transitionMatrixFile
dataTblDict['transMat'] = transitionMatrix
else:
dataTblDict['type'] = 'Tracked'
dataTblDict['transMat'] = np.zeros(shape=(len(outletNames), len(importerNames)))
dataTblDict['dataTbl'] = dataTbl
dataTblDict['outletNames'] = outletNames
dataTblDict['importerNames'] = importerNames
# Generate necessary Tracked/Untracked matrices necessary for different methods
dataTblDict = GetVectorForms(dataTblDict)
return dataTblDict
def GetVectorForms(dataTblDict):
"""
Takes a dictionary that has a list of testing results and appends the N,Y
matrices/vectors necessary for the Tracked/Untracked methods.
For Tracked, element (i,j) of N/Y signifies the number of samples/aberrations
collected from each (outlet i, importer j) track.
For Untracked, element i of N/Y signifies the number of samples/aberrations
collected from each outlet i.
INPUTS
------
Takes dataTblDict with the following keys:
type: string
'Tracked' or 'Untracked'
dataTbl: list
If Tracked, each list entry should have three elements, as follows:
Element 1: string; Name of outlet/lower echelon entity
Element 2: string; Name of importer/upper echelon entity
Element 3: integer; 0 or 1, where 1 signifies aberration detection
If Untracked, each list entry should have two elements, as follows:
Element 1: string; Name of outlet/lower echelon entity
Element 2: integer; 0 or 1, where 1 signifies aberration detection
outletNames/importerNames: list of strings
OUTPUTS
-------
Appends the following keys to dataTblDict:
N: Numpy matrix/vector where element (i,j)/i corresponds to the number
of tests done from the (outlet i, importer j) path/from outlet i,
for Tracked/Untracked
Y: Numpy matrix/vector where element (i,j)/i corresponds to the number
of test positives from the (outlet i, importer j) path/from outlet i,
for Tracked/Untracked
"""
if not all(key in dataTblDict for key in ['type', 'dataTbl', 'outletNames',
'importerNames']):
print('The input dictionary does not contain all required information.' +
' Please check and try again.')
return {}
outletNames = dataTblDict['outletNames']
importerNames = dataTblDict['importerNames']
dataTbl = dataTblDict['dataTbl']
# Initialize N and Y
if dataTblDict['type'] == 'Tracked':
N = np.zeros(shape=(len(outletNames), len(importerNames)))
Y = np.zeros(shape=(len(outletNames), len(importerNames)))
for row in dataTbl:
N[outletNames.index(row[0]), importerNames.index(row[1])] += 1
Y[outletNames.index(row[0]), importerNames.index(row[1])] += row[2]
elif dataTblDict['type'] == 'Untracked':
N = np.zeros(shape=(len(outletNames)))
Y = np.zeros(shape=(len(outletNames)))
for row in dataTbl:
N[outletNames.index(row[0])] += 1
Y[outletNames.index(row[0])] += row[1]
dataTblDict.update({'N': N, 'Y': Y})
return dataTblDict
def generateRandSystem(numImp=20, numOut=100, sourcingMatLambda=1.1, randSeed=-1,trueRates=[]):
'''
Randomly generates a two-echelon system with the entered characteristics.
INPUTS
------
Takes the following arguments:
numImp, numOut: integer
Number of importers and outlets
transMatLambda: float
The parameter for the Pareto distribution that generates the sourcing matrix
randSeed: integer
trueRates: float
Vector of true SFP manifestation rates to use; generated randomly from
beta(1,6) distribution otherwise
OUTPUTS
-------
Returns systemDict dictionary with the following keys:
outletNames/importerNames: list of strings
sourcingMat: Numpy matrix
Matrix of sourcing probabilities between importers and outlets
trueRates: list
List of true SFP manifestation rates, in [importers, outlets] form
'''
systemDict = {}
impNames = ['Importer ' + str(i + 1) for i in range(numImp)]
outNames = ['Outlet ' + str(i + 1) for i in range(numOut)]
# Generate random true SFP rates
if trueRates == []:
trueRates = np.zeros(numImp + numOut) # importers first, outlets second
if randSeed >= 0:
random.seed(randSeed)
trueRates[:numImp] = [random.betavariate(1, 7) for i in range(numImp)]
trueRates[numImp:] = [random.betavariate(1, 7) for i in range(numOut)]
# Generate random transition matrix
sourcingMat = np.zeros(shape=(numOut, numImp))
if randSeed >= 0:
random.seed(randSeed + 1) # Distinguish this seed from the one generating the true SFP rates
for outInd in range(numOut):
rowRands = [random.paretovariate(sourcingMatLambda) for i in range(numImp)]
if numImp > 10: # Only keep 10 randomly chosen importers, if numImp > 10
rowRands[10:] = [0.0 for i in range(numImp - 10)]
random.shuffle(rowRands)
normalizedRands = [rowRands[i] / sum(rowRands) for i in range(numImp)]
# only keep transition probabilities above 2%
# normalizedRands = [normalizedRands[i] if normalizedRands[i]>0.02 else 0.0 for i in range(numImp)]
# normalizedRands = [normalizedRands[i] / sum(normalizedRands) for i in range(numImp)]
sourcingMat[outInd, :] = normalizedRands
# Update dictionary before returning
systemDict.update({'outletNames': outNames, 'importerNames': impNames,
'sourcingMat': sourcingMat, 'trueRates': trueRates})
return systemDict
def generateRandDataDict(numImp=5, numOut=50, diagSens=0.90,
diagSpec=0.99, numSamples=50 * 20,
dataType='Tracked', transMatLambda=1.1,
randSeed=-1,trueRates=[]):
"""
Randomly generates an example input data dictionary for the entered inputs.
SFP rates are generated according to a beta(2,9) distribution, while
transition rates are distributed according to a scaled Pareto(1.1) distribution.
INPUTS
------
Takes the following arguments:
numImp, numOut: integer
Number of importers and outlets
diagSens, diagSpec: float
Diagnostic sensitivity, specificity
numSamples: integer
Total number of data points to generate
dataType: string
'Tracked' or 'Untracked'
OUTPUTS
-------
Returns dataTblDict dictionary with the following keys:
dataTbl: list
If Tracked, each list entry should have three elements, as follows:
Element 1: string; Name of outlet/lower echelon entity
Element 2: string; Name of importer/upper echelon entity
Element 3: integer; 0 or 1, where 1 signifies aberration detection
If Untracked, each list entry should have two elements, as follows:
Element 1: string; Name of outlet/lower echelon entity
Element 2: integer; 0 or 1, where 1 signifies aberration detection
outletNames/importerNames: list of strings
transMat: Numpy matrix
Matrix of transition probabilities between importers and outlets
diagSens, diagSpec, type
From inputs, where 'type' = 'dataType'
"""
dataTblDict = {}
impNames = ['Importer ' + str(i + 1) for i in range(numImp)]
outNames = ['Outlet ' + str(i + 1) for i in range(numOut)]
# Generate random true SFP rates
if trueRates == []:
trueRates = np.zeros(numImp + numOut) # importers first, outlets second
if randSeed >= 0:
random.seed(randSeed)
trueRates[:numImp] = [random.betavariate(1, 9) for i in range(numImp)]
trueRates[numImp:] = [random.betavariate(1, 9) for i in range(numOut)]
# Generate random transition matrix
transMat = np.zeros(shape=(numOut, numImp))
if randSeed >= 0:
random.seed(randSeed + 1)
for outInd in range(numOut):
rowRands = [random.paretovariate(transMatLambda) for i in range(numImp)]
if numImp > 10: # Only keep 10 randomly chosen importers, if numImp > 10
rowRands[10:] = [0.0 for i in range(numImp - 10)]
random.shuffle(rowRands)
normalizedRands = [rowRands[i] / sum(rowRands) for i in range(numImp)]
# only keep transition probabilities above 2%
# normalizedRands = [normalizedRands[i] if normalizedRands[i]>0.02 else 0.0 for i in range(numImp)]
# normalizedRands = [normalizedRands[i] / sum(normalizedRands) for i in range(numImp)]
transMat[outInd, :] = normalizedRands
# np.linalg.det(transMat.T @ transMat) / numOut
# 1.297 for n=50
# Generate testing data
testingDataList = []
if dataType == 'Tracked':
if randSeed >= 0:
random.seed(randSeed + 2)
np.random.seed(randSeed)
for currSamp in range(numSamples):
currOutlet = random.sample(outNames, 1)[0]
currImporter = random.choices(impNames, weights=transMat[outNames.index(currOutlet)], k=1)[0]
currOutRate = trueRates[numImp + outNames.index(currOutlet)]
currImpRate = trueRates[impNames.index(currImporter)]
realRate = currOutRate + currImpRate - currOutRate * currImpRate
realResult = np.random.binomial(1, p=realRate)
if realResult == 1:
result = np.random.binomial(1, p=diagSens)
if realResult == 0:
result = np.random.binomial(1, p=1 - diagSpec)
testingDataList.append([currOutlet, currImporter, result])
elif dataType == 'Untracked':
if randSeed >= 0:
random.seed(randSeed + 3)
np.random.seed(randSeed)
for currSamp in range(numSamples):
currOutlet = random.sample(outNames, 1)[0]
currImporter = random.choices(impNames, weights=transMat[outNames.index(currOutlet)], k=1)[0]
currOutRate = trueRates[numImp + outNames.index(currOutlet)]
currImpRate = trueRates[impNames.index(currImporter)]
realRate = currOutRate + currImpRate - currOutRate * currImpRate
realResult = np.random.binomial(1, p=realRate)
if realResult == 1:
result = np.random.binomial(1, p=diagSens)
if realResult == 0:
result = np.random.binomial(1, p=1 - diagSpec)
testingDataList.append([currOutlet, result])
dataTblDict.update({'outletNames': outNames, 'importerNames': impNames,
'diagSens': 0.90, 'diagSpec': 0.99, 'type': dataType,
'dataTbl': testingDataList, 'transMat': transMat,
'trueRates': trueRates})
return dataTblDict
def scorePostSamplesIntervals(logistigateDict):
"""
Checks if posterior aberration rate sample intervals contain the underlying
generative aberration rates
INPUTS
------
logistigateDict with the following keys:
postSamples: List of posterior sample lists, with importer values entered first.
trueRates: List of true underlying poor-quality rates
OUTPUTS
-------
logistigateDict with the the following keys added:
TRinInt_90, TRinInt_95, TRinInt_99: Number of true rates in the 90%,
95%, and 99% intervals
"""
if not all(key in logistigateDict for key in ['trueRates', 'postSamples']):
print('The input dictionary does not contain all required information.' +
' Please check and try again.')
return {}
trueRates = logistigateDict['trueRates']
samples = logistigateDict['postSamples']
# trueRates and samples need to be ordered with importers first
numInInt90 = 0
numInInt95 = 0
numInInt99 = 0
gnLoss_90 = 0 # Gneiting loss
gnLoss_95 = 0
gnLoss_99 = 0
for entityInd in range(len(trueRates)):
currInt90 = [np.quantile(samples[:, entityInd], 0.05),
np.quantile(samples[:, entityInd], 0.95)]
currInt95 = [np.quantile(samples[:, entityInd], 0.025),
np.quantile(samples[:, entityInd], 0.975)]
currInt99 = [np.quantile(samples[:, entityInd], 0.005),
np.quantile(samples[:, entityInd], 0.995)]
currTR = trueRates[entityInd]
if currTR >= currInt90[0] and currTR <= currInt90[1]:
numInInt90 += 1
gnLoss_90 += (currInt90[1] - currInt90[0])
else:
gnLoss_90 += (currInt90[1] - currInt90[0]) + (2 / 0.1) * \
min(np.abs(currTR - currInt90[1]), np.abs(currTR - currInt90[0]))
if currTR >= currInt95[0] and currTR <= currInt95[1]:
numInInt95 += 1
gnLoss_95 += (currInt95[1] - currInt95[0])
else:
gnLoss_95 += (currInt95[1] - currInt95[0]) + (2 / 0.1) * \
min(np.abs(currTR - currInt95[1]), np.abs(currTR - currInt95[0]))
if currTR >= currInt99[0] and currTR <= currInt99[1]:
numInInt99 += 1
gnLoss_99 += (currInt99[1] - currInt99[0])
else:
gnLoss_99 += (currInt99[1] - currInt99[0]) + (2 / 0.1) * \
min(np.abs(currTR - currInt99[1]), np.abs(currTR - currInt99[0]))
logistigateDict.update({'numInInt90': numInInt90, 'numInInt95': numInInt95,
'numInInt99': numInInt99, 'gnLoss_90': gnLoss_90,
'gnLoss_95': gnLoss_95, 'gnLoss_99': gnLoss_99})
return logistigateDict
def plotPostSamples(logistigateDict, plotType='hist', importerIndsSubset=[],
outletIndsSubset=[], subTitleStr=['',''], sortBy = 'midpoint'):
'''
Plots the distribution of posterior aberration rate samples, with importer
and outlet distributions plotted distinctly.
INPUTS
------
logistigateDict with the following keys:
postSamples: List of posterior sample lists, with importer values entered first.
numImp: Number of importers/upper echelon entities
numOut: Number of outlets/lower echelon entities
plotType string from the following options:
'hist': histograms for importer entities and outlet entities
'int90'/'int95'/'int99': plot of 90%/95%/99% confidence intervals with importers
importerIndsSubset, outletIndsSubset:
List of a subset of entities to be plotted
subTitleStr:
List of strings to be added to plot titles for importers, outlets
respectively
sortBy:
'lower'/'upper'/'midpoint': Whether to sort interval plots by their lower or upper interval values
OUTPUTS
-------
No values are returned
'''
if not all(key in logistigateDict for key in ['importerNum', 'outletNum',
'postSamples' ]):
print('The input dictionary does not contain all required information.' +
' Please check and try again.')
return {}
numImp, numOut = logistigateDict['importerNum'], logistigateDict['outletNum']
if plotType == 'hist': # Plot histograms
if importerIndsSubset == []:
importerIndsSubset = range(numImp)
for i in importerIndsSubset:
plt.hist(logistigateDict['postSamples'][:, i], alpha=0.2)
plt.xlim([0,1])
plt.title('Importers'+subTitleStr[0], fontdict={'fontsize': 18})
plt.xlabel('SFP rate',fontdict={'fontsize': 14})
plt.ylabel('Posterior distribution frequency',fontdict={'fontsize': 14})
plt.show()
plt.close()
if outletIndsSubset == []:
outletIndsSubset = range(numOut)
for i in outletIndsSubset:
plt.hist(logistigateDict['postSamples'][:, numImp + i], alpha=0.2)
plt.xlim([0,1])
plt.title('Outlets'+subTitleStr[1], fontdict={'fontsize': 18})
plt.xlabel('SFP rate',fontdict={'fontsize': 14})
plt.ylabel('Posterior distribution frequency',fontdict={'fontsize': 14})
plt.show()
plt.close()
elif plotType == 'int90' or plotType == 'int95' or plotType == 'int99': # Plot 90%/95%/99% credible intervals, as well as the prior for comparison
if plotType == 'int90':
lowerQuant, upperQuant = 0.05, 0.95
intStr = '90'
elif plotType == 'int95':
lowerQuant, upperQuant = 0.025, 0.975
intStr = '95'
elif plotType == 'int99':
lowerQuant, upperQuant = 0.005, 0.995
intStr = '99'
priorSamps = logistigateDict['prior'].expitrand(5000)
priorLower, priorUpper = np.quantile(priorSamps,lowerQuant), np.quantile(priorSamps,upperQuant)
if importerIndsSubset == []:
importerIndsSubset = range(numImp)
impNames = [logistigateDict['importerNames'][i] for i in importerIndsSubset]
else:
impNames = [logistigateDict['importerNames'][i] for i in importerIndsSubset]
impLowers = [np.quantile(logistigateDict['postSamples'][:, l], lowerQuant) for l in importerIndsSubset]
impUppers = [np.quantile(logistigateDict['postSamples'][:, l], upperQuant) for l in importerIndsSubset]
if sortBy == 'lower':
zippedList = zip(impLowers, impUppers, impNames)
elif sortBy == 'upper':
zippedList = zip(impUppers, impLowers, impNames)
elif sortBy == 'midpoint':
midpoints = [impUppers[i] - (impUppers[i]-impLowers[i])/2 for i in range(len(impUppers))]
zippedList = zip(midpoints, impUppers, impLowers, impNames)
sorted_pairs = sorted(zippedList, reverse=True)
impNamesSorted = [tup[3] for tup in sorted_pairs]
impNamesSorted.append('')
impNamesSorted.append('(Prior)')
# Plot
fig, (ax) = plt.subplots(figsize=(10, 10), ncols=1)
if sortBy == 'lower':
sorted_pairs.append((np.nan, np.nan, ' ')) # for spacing
for lower, upper, name in sorted_pairs:
plt.plot((name,name),(lower,upper),'o-',color='red')
elif sortBy == 'upper':
sorted_pairs.append((np.nan, np.nan, ' ')) # for spacing
for upper, lower, name in sorted_pairs:
plt.plot((name,name),(lower,upper),'o-',color='red')
elif sortBy == 'midpoint':
sorted_pairs.append((np.nan,np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='red')
plt.plot((impNamesSorted[-1], impNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0,1])
plt.xticks(range(len(impNamesSorted)),impNamesSorted,rotation=90)
plt.title('Importers - ' + intStr + '% Intervals'+subTitleStr[0], fontdict={'fontsize': 18, 'fontname':'Trebuchet MS'})
plt.xlabel('Importer Name', fontdict={'fontsize': 14,'fontname':'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname':'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(10)
fig.tight_layout()
plt.show()
plt.close()
if outletIndsSubset == []:
outletIndsSubset = range(numOut)
outNames = [logistigateDict['outletNames'][i] for i in outletIndsSubset]
else:
outNames = [logistigateDict['outletNames'][i] for i in outletIndsSubset]
outLowers = [np.quantile(logistigateDict['postSamples'][:, numImp + l], lowerQuant) for l in outletIndsSubset]
outUppers = [np.quantile(logistigateDict['postSamples'][:, numImp + l], upperQuant) for l in outletIndsSubset]
if sortBy == 'lower':
zippedList = zip(outLowers, outUppers, impNames)
elif sortBy == 'upper':
zippedList = zip(outUppers, outLowers, impNames)
elif sortBy == 'midpoint':
midpoints = [outUppers[i] - (outUppers[i] - outLowers[i]) / 2 for i in range(len(outUppers))]
zippedList = zip(midpoints, outUppers, outLowers, outNames)
sorted_pairs = sorted(zippedList, reverse=True)
outNamesSorted = [tup[3] for tup in sorted_pairs]
outNamesSorted.append('')
outNamesSorted.append('(Prior)')
# Plot
fig, (ax) = plt.subplots(figsize=(10, 10), ncols=1)
if sortBy == 'lower':
sorted_pairs.append((np.nan, np.nan, ' ')) # for spacing
for lower, upper, name in sorted_pairs:
plt.plot((name,name),(lower,upper), 'o-', color='purple')
elif sortBy == 'upper':
sorted_pairs.append((np.nan, np.nan, ' ')) # for spacing
for upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='purple')
elif sortBy == 'midpoint':
sorted_pairs.append((np.nan,np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name),(lower, upper), 'o-', color='purple')
plt.plot((outNamesSorted[-1], outNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0,1])
plt.xticks(range(len(outNamesSorted)),outNamesSorted,rotation=90)
plt.title('Outlets - ' + intStr + '% Intervals'+subTitleStr[1], fontdict={'fontsize': 18, 'fontname':'Trebuchet MS'})
plt.xlabel('Outlet Name', fontdict={'fontsize': 14,'fontname':'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname':'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(10)
fig.tight_layout()
plt.show()
plt.close()
return
def printEstimates(logistigateDict,
importerIndsSubset=[], outletIndsSubset=[]):
'''
Prints a formatted table of an estimate dictionary.
INPUTS
------
estDict: Dictionary returned from methods.Est_TrackedMLE() or
methods.Est_UntrackedMLE() #OLD NEED TO UPDATE
impNames: List of names of importers/upper echelon entities
outNames: List of names of outlets/lower echelon entities
OUTPUTS
-------
No values are returned; the contents of the estimate dictionary are printed
in a legible format.
'''
if not all(key in logistigateDict for key in ['outletNames', 'importerNames',
'estDict' ]):
print('The input dictionary does not contain all required information.' +
' Please check and try again.')
return {}
outNames, impNames = logistigateDict['outletNames'], logistigateDict['importerNames']
estDict = logistigateDict['estDict']
impMLE = np.ndarray.tolist(estDict['impEst'])
if importerIndsSubset==[]:
importerIndsSubset = range(len(impMLE))
imp99lower = np.ndarray.tolist(estDict['99lower_imp'])
imp95lower = np.ndarray.tolist(estDict['95lower_imp'])
imp90lower = np.ndarray.tolist(estDict['90lower_imp'])
imp99upper = np.ndarray.tolist(estDict['99upper_imp'])
imp95upper = np.ndarray.tolist(estDict['95upper_imp'])
imp90upper = np.ndarray.tolist(estDict['90upper_imp'])
impReport = [[impNames[i]] + ["{0:.1%}".format(impMLE[i])] +
["{0:.1%}".format(imp99lower[i])] + ["{0:.1%}".format(imp95lower[i])] +
["{0:.1%}".format(imp90lower[i])] + ["{0:.1%}".format(imp90upper[i])] +
["{0:.1%}".format(imp95upper[i])] + ["{0:.1%}".format(imp99upper[i])]
for i in importerIndsSubset]
outMLE = np.ndarray.tolist(estDict['outEst'])
if outletIndsSubset == []:
outletIndsSubset = range(len(outMLE))
out99lower = np.ndarray.tolist(estDict['99lower_out'])
out95lower = np.ndarray.tolist(estDict['95lower_out'])
out90lower = np.ndarray.tolist(estDict['90lower_out'])
out99upper = np.ndarray.tolist(estDict['99upper_out'])
out95upper = np.ndarray.tolist(estDict['95upper_out'])
out90upper = np.ndarray.tolist(estDict['90upper_out'])
outReport = [[outNames[i]] + ["{0:.1%}".format(outMLE[i])] +
["{0:.1%}".format(out99lower[i])] + ["{0:.1%}".format(out95lower[i])] +
["{0:.1%}".format(out90lower[i])] + ["{0:.1%}".format(out90upper[i])] +
["{0:.1%}".format(out95upper[i])] + ["{0:.1%}".format(out99upper[i])]
for i in outletIndsSubset]
print('*' * 120)
print('ESTIMATE DICTIONARY VALUES')
print('*' * 120)
print(tabulate(impReport, headers=['Importer Name', 'Max. Est.',
'99% Lower', '95% Lower', '90% Lower',
'90% Upper', '95% Upper', '99% Upper']))
print('*' * 120)
print('*' * 120)
print(tabulate(outReport, headers=['Outlet Name', 'Max. Est.',
'99% Lower', '95% Lower', '90% Lower',
'90% Upper', '95% Upper', '99% Upper']))
return
def Summarize(inputDict):
'''
This method prints a summary of the contents of a Logistigate-type dictionary
'''
if not all(key in inputDict for key in ['outletNames', 'importerNames',
'type', 'diagSens', 'diagSpec',
'dataTbl']):
print('The input dictionary does not contain the minimal required information' +
' to be considered a logistigate dictionary. Please check and try again.')
return {}
print('The ' + str(len(inputDict['dataTbl'])) + ' ' + str(inputDict['type']) +\
' data points within this Logistigate dictionary\nconsist of ' +\
str(len(inputDict['outletNames'])) + ' outlets and ' +\
str(len(inputDict['importerNames'])) + ' importers.')
print('These data were generated by a diagnostic tool with a sensitivity\nof ' +\
str(inputDict['diagSens']) + ' and a specificity of ' + str(inputDict['diagSpec']) + '.')
return
#### Necessary NUTS functions ####
"""
This package implements the No-U-Turn Sampler (NUTS) algorithm 6 from the NUTS
paper (Hoffman & Gelman, 2011).
Content
-------
The package mainly contains:
nuts6 return samples using the NUTS
test_nuts6 example usage of this package
and subroutines of nuts6:
build_tree the main recursion in NUTS
find_reasonable_epsilon Heuristic for choosing an initial value of epsilon
leapfrog Perfom a leapfrog jump in the Hamiltonian space
stop_criterion Compute the stop condition in the main loop
A few words about NUTS
----------------------
Hamiltonian Monte Carlo or Hybrid Monte Carlo (HMC) is a Markov chain Monte
Carlo (MCMC) algorithm that avoids the random walk behavior and sensitivity to
correlated parameters, biggest weakness of many MCMC methods. Instead, it takes
a series of steps informed by first-order gradient information.
This feature allows it to converge much more quickly to high-dimensional target
distributions compared to simpler methods such as Metropolis, Gibbs sampling
(and derivatives).
However, HMC's performance is highly sensitive to two user-specified
parameters: a step size, and a desired number of steps. In particular, if the
number of steps is too small then the algorithm will just exhibit random walk
behavior, whereas if it is too large it will waste computations.
Hoffman & Gelman introduced NUTS or the No-U-Turn Sampler, an extension to HMC
that eliminates the need to set a number of steps. NUTS uses a recursive
algorithm to find likely candidate points that automatically stops when it
starts to double back and retrace its steps. Empirically, NUTS perform at
least as effciently as and sometimes more effciently than a well tuned standard
HMC method, without requiring user intervention or costly tuning runs.
Moreover, Hoffman & Gelman derived a method for adapting the step size
parameter on the fly based on primal-dual averaging. NUTS can thus be used
with no hand-tuning at all.
In practice, the implementation still requires a number of steps, a burning
period and a stepsize. However, the stepsize will be optimized during the
burning period, and the final values of all the user-defined values will be
revised by the algorithm.
reference: arXiv:1111.4246
"The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte
Carlo", Matthew D. Hoffman & Andrew Gelman
"""
from numpy import log, exp, sqrt
def leapfrog(theta, r, grad, epsilon, f):
""" Perfom a leapfrog jump in the Hamiltonian space
INPUTS
------
theta: ndarray[float, ndim=1]
initial parameter position
r: ndarray[float, ndim=1]
initial momentum
grad: float
initial gradient value
epsilon: float
step size
f: callable
it should return the log probability and gradient evaluated at theta
logp, grad = f(theta)
OUTPUTS
-------
thetaprime: ndarray[float, ndim=1]
new parameter position
rprime: ndarray[float, ndim=1]
new momentum
gradprime: float
new gradient
logpprime: float
new lnp
"""
# make half step in r
rprime = r + 0.5 * epsilon * grad
# make new step in theta
thetaprime = theta + epsilon * rprime
# compute new gradient
logpprime, gradprime = f(thetaprime)
# make half step in r again
rprime = rprime + 0.5 * epsilon * gradprime
return thetaprime, rprime, gradprime, logpprime
def find_reasonable_epsilon(theta0, grad0, logp0, f, epsilonLB=0.005, epsilonUB=0.5):
""" Heuristic for choosing an initial value of epsilon """
epsilon = (1)
r0 = np.random.normal(0., 1., len(theta0))
# Figure out what direction we should be moving epsilon.
_, rprime, gradprime, logpprime = leapfrog(theta0, r0, grad0, epsilon, f)
# brutal! This trick make sure the step is not huge leading to infinite
# values of the likelihood. This could also help to make sure theta stays
# within the prior domain (if any)
k = 1.
while np.isinf(logpprime) or np.isinf(gradprime).any():
k *= 0.5
_, rprime, _, logpprime = leapfrog(theta0, r0, grad0, epsilon * k, f)
epsilon = np.minimum(np.maximum(0.5 * k * epsilon, 2. * epsilonLB), epsilonUB / (2.))
# acceptprob = np.exp(logpprime - logp0 - 0.5 * (np.dot(rprime, rprime.T) - np.dot(r0, r0.T)))
# a = 2. * float((acceptprob > 0.5)) - 1.
logacceptprob = logpprime - logp0 - 0.5 * (np.dot(rprime, rprime) - np.dot(r0, r0))
a = 1. if logacceptprob > np.log(0.5) else -1.
# Keep moving epsilon in that direction until acceptprob crosses 0.5.
# while ( (acceptprob ** a) > (2. ** (-a))):
while a * logacceptprob > -a * np.log(2):
epsilon = epsilon * (1.5 ** a)
if epsilon < epsilonLB or epsilon > epsilonUB:
break
_, rprime, _, logpprime = leapfrog(theta0, r0, grad0, epsilon, f)
# acceptprob = np.exp(logpprime - logp0 - 0.5 * ( np.dot(rprime, rprime.T) - np.dot(r0, r0.T)))
logacceptprob = logpprime - logp0 - 0.5 * (np.dot(rprime, rprime) - np.dot(r0, r0))
# print("find_reasonable_epsilon=", epsilon) EOW commented out
return epsilon
def stop_criterion(thetaminus, thetaplus, rminus, rplus):
""" Compute the stop condition in the main loop
dot(dtheta, rminus) >= 0 & dot(dtheta, rplus >= 0)
INPUTS
------
thetaminus, thetaplus: ndarray[float, ndim=1]
under and above position
rminus, rplus: ndarray[float, ndim=1]
under and above momentum
OUTPUTS
-------
criterion: bool
return if the condition is valid
"""
dtheta = thetaplus - thetaminus
return (np.dot(dtheta, rminus.T) >= 0) & (np.dot(dtheta, rplus.T) >= 0)
def build_tree(theta, r, grad, logu, v, j, epsilon, f, joint0):
"""The main recursion."""
if (j == 0):
# Base case: Take a single leapfrog step in the direction v.
thetaprime, rprime, gradprime, logpprime = leapfrog(theta, r, grad, v * epsilon, f)
joint = logpprime - 0.5 * np.dot(rprime, rprime.T)
# Is the new point in the slice?
nprime = int(logu < joint)
# Is the simulation wildly inaccurate?
sprime = int((logu - 1000.) < joint)
# Set the return values---minus=plus for all things here, since the
# "tree" is of depth 0.
thetaminus = thetaprime[:]
thetaplus = thetaprime[:]
rminus = rprime[:]
rplus = rprime[:]
gradminus = gradprime[:]
gradplus = gradprime[:]
# Compute the acceptance probability.
alphaprime = min(1., np.exp(joint - joint0))
# alphaprime = min(1., np.exp(logpprime - 0.5 * np.dot(rprime, rprime.T) - joint0))
nalphaprime = 1
else:
# Recursion: Implicitly build the height j-1 left and right subtrees.
thetaminus, rminus, gradminus, thetaplus, rplus, gradplus, thetaprime, gradprime, logpprime, nprime, sprime, alphaprime, nalphaprime = build_tree(
theta, r, grad, logu, v, j - 1, epsilon, f, joint0)
# No need to keep going if the stopping criteria were met in the first subtree.
if (sprime == 1):
if (v == -1):
thetaminus, rminus, gradminus, _, _, _, thetaprime2, gradprime2, logpprime2, nprime2, sprime2, alphaprime2, nalphaprime2 = build_tree(
thetaminus, rminus, gradminus, logu, v, j - 1, epsilon, f, joint0)
else:
_, _, _, thetaplus, rplus, gradplus, thetaprime2, gradprime2, logpprime2, nprime2, sprime2, alphaprime2, nalphaprime2 = build_tree(
thetaplus, rplus, gradplus, logu, v, j - 1, epsilon, f, joint0)
# Choose which subtree to propagate a sample up from.
if (np.random.uniform() < (float(nprime2) / max(float(int(nprime) + int(nprime2)), 1.))):
thetaprime = thetaprime2[:]
gradprime = gradprime2[:]
logpprime = logpprime2
# Update the number of valid points.
nprime = int(nprime) + int(nprime2)
# Update the stopping criterion.
sprime = int(sprime and sprime2 and stop_criterion(thetaminus, thetaplus, rminus, rplus))
# Update the acceptance probability statistics.
alphaprime = alphaprime + alphaprime2
nalphaprime = nalphaprime + nalphaprime2
return thetaminus, rminus, gradminus, thetaplus, rplus, gradplus, thetaprime, gradprime, logpprime, nprime, sprime, alphaprime, nalphaprime
def nuts6(f, M, Madapt, theta0, delta=0.25):
"""
Implements the No-U-Turn Sampler (NUTS) algorithm 6 from from the NUTS
paper (Hoffman & Gelman, 2011).
Runs Madapt steps of burn-in, during which it adapts the step size
parameter epsilon, then starts generating samples to return.
Note the initial step size is tricky and not exactly the one from the
initial paper. In fact the initial step size could be given by the user in
order to avoid potential problems
INPUTS
------
epsilon: float
step size
see nuts8 if you want to avoid tuning this parameter
f: callable
it should return the log probability and gradient evaluated at theta
logp, grad = f(theta)
M: int
number of samples to generate.
Madapt: int
the number of steps of burn-in/how long to run the dual averaging
algorithm to fit the step size epsilon.
theta0: ndarray[float, ndim=1]
initial guess of the parameters.
KEYWORDS
--------
delta: float
targeted acceptance fraction
OUTPUTS
-------
samples: ndarray[float, ndim=2]
M x D matrix of samples generated by NUTS.
note: samples[0, :] = theta0
"""
if len(np.shape(theta0)) > 1:
raise ValueError('theta0 is expected to be a 1-D array')
D = len(theta0)
samples = np.empty((M + Madapt, D), dtype=float)
lnprob = np.empty(M + Madapt, dtype=float)
logp, grad = f(theta0)
samples[0, :] = theta0
lnprob[0] = logp
# Choose a reasonable first epsilon by a simple heuristic.
epsilon = find_reasonable_epsilon(theta0, grad, logp, f)
# Parameters to the dual averaging algorithm.
gamma = 0.05
t0 = 10
kappa = 0.75
mu = log(10. * epsilon)
# Initialize dual averaging algorithm.
epsilonbar = 1
Hbar = 0
for m in range(1, M + Madapt):
# Resample momenta.
r0 = np.random.normal(0, 1, D)
# joint lnp of theta and momentum r
joint = logp - 0.5 * np.dot(r0, r0.T)
# Resample u ~ uniform([0, exp(joint)]).
# Equivalent to (log(u) - joint) ~ exponential(1).
logu = float(joint - np.random.exponential(1, size=1))
# if all fails, the next sample will be the previous one
samples[m, :] = samples[m - 1, :]
lnprob[m] = lnprob[m - 1]
# initialize the tree
thetaminus = samples[m - 1, :]
thetaplus = samples[m - 1, :]
rminus = r0[:]
rplus = r0[:]
gradminus = grad[:]
gradplus = grad[:]
j = 0 # initial heigth j = 0
n = 1 # Initially the only valid point is the initial point.
s = 1 # Main loop: will keep going until s == 0.
while (s == 1):
# Choose a direction. -1 = backwards, 1 = forwards.
v = int(2 * (np.random.uniform() < 0.5) - 1)
# Double the size of the tree.
if (v == -1):
thetaminus, rminus, gradminus, _, _, _, thetaprime, gradprime, logpprime, nprime, sprime, alpha, nalpha = build_tree(
thetaminus, rminus, gradminus, logu, v, j, epsilon, f, joint)
else:
_, _, _, thetaplus, rplus, gradplus, thetaprime, gradprime, logpprime, nprime, sprime, alpha, nalpha = build_tree(
thetaplus, rplus, gradplus, logu, v, j, epsilon, f, joint)
# Use Metropolis-Hastings to decide whether or not to move to a
# point from the half-tree we just generated.
_tmp = min(1, float(nprime) / float(n))
if (sprime == 1) and (np.random.uniform() < _tmp):
samples[m, :] = thetaprime[:]
lnprob[m] = logpprime
logp = logpprime
grad = gradprime[:]
# Update number of valid points we've seen.
n += nprime
# Decide if it's time to stop.
s = sprime and stop_criterion(thetaminus, thetaplus, rminus, rplus) and (n < 50) # (n<50) EOW EDIT
# Increment depth.
j += 1
# Do adaptation of epsilon if we're still doing burn-in.
eta = 1. / float(m + t0)
Hbar = (1. - eta) * Hbar + eta * (delta - alpha / float(nalpha))
if (m <= Madapt):
epsilon = exp(mu - sqrt(m) / gamma * Hbar)
epsilon = np.minimum(np.maximum(epsilon, 0.001), 1)
eta = m ** -kappa
epsilonbar = exp((1. - eta) * log(epsilonbar) + eta * log(epsilon))
else:
epsilon = epsilonbar
samples = samples[Madapt:, :]
lnprob = lnprob[Madapt:]
return samples, lnprob, epsilon
|
|
# MINLP written by GAMS Convert at 05/15/20 00:50:47
#
# Equation counts
# Total E G L N X C B
# 43 7 8 28 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 43 19 24 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 155 151 4 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,97),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,97),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,97),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,97),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,97),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,97),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,97),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,97),initialize=0)
m.x9 = Var(within=Reals,bounds=(3,13.3333333333333),initialize=3)
m.x10 = Var(within=Reals,bounds=(3,16.6666666666667),initialize=3)
m.x11 = Var(within=Reals,bounds=(3,20),initialize=3)
m.x12 = Var(within=Reals,bounds=(3,11.6666666666667),initialize=3)
m.x13 = Var(within=Reals,bounds=(3,13.3333333333333),initialize=3)
m.x14 = Var(within=Reals,bounds=(3,16.6666666666667),initialize=3)
m.x15 = Var(within=Reals,bounds=(3,20),initialize=3)
m.x16 = Var(within=Reals,bounds=(3,11.6666666666667),initialize=3)
m.x17 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,100),initialize=0)
m.b19 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b20 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b21 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b22 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b23 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b24 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b25 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b26 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b27 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b28 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b29 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b30 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b31 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b32 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b33 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b34 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b35 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b36 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b37 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b38 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b39 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b40 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b41 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b42 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr= 2*m.x17 + 2*m.x18, sense=minimize)
m.c2 = Constraint(expr= - m.x1 - m.x9 + m.x17 >= 0)
m.c3 = Constraint(expr= - m.x2 - m.x10 + m.x17 >= 0)
m.c4 = Constraint(expr= - m.x3 - m.x11 + m.x17 >= 0)
m.c5 = Constraint(expr= - m.x4 - m.x12 + m.x17 >= 0)
m.c6 = Constraint(expr= - m.x5 - m.x13 + m.x18 >= 0)
m.c7 = Constraint(expr= - m.x6 - m.x14 + m.x18 >= 0)
m.c8 = Constraint(expr= - m.x7 - m.x15 + m.x18 >= 0)
m.c9 = Constraint(expr= - m.x8 - m.x16 + m.x18 >= 0)
m.c10 = Constraint(expr=40/m.x13 - m.x9 <= 0)
m.c11 = Constraint(expr=50/m.x14 - m.x10 <= 0)
m.c12 = Constraint(expr=60/m.x15 - m.x11 <= 0)
m.c13 = Constraint(expr=35/m.x16 - m.x12 <= 0)
m.c14 = Constraint(expr= m.x1 - m.x2 + m.x9 + 110.333333333333*m.b19 <= 110.333333333333)
m.c15 = Constraint(expr= m.x1 - m.x3 + m.x9 + 110.333333333333*m.b20 <= 110.333333333333)
m.c16 = Constraint(expr= m.x1 - m.x4 + m.x9 + 110.333333333333*m.b21 <= 110.333333333333)
m.c17 = Constraint(expr= m.x2 - m.x3 + m.x10 + 113.666666666667*m.b22 <= 113.666666666667)
m.c18 = Constraint(expr= m.x2 - m.x4 + m.x10 + 113.666666666667*m.b23 <= 113.666666666667)
m.c19 = Constraint(expr= m.x3 - m.x4 + m.x11 + 117*m.b24 <= 117)
m.c20 = Constraint(expr= - m.x1 + m.x2 + m.x10 + 113.666666666667*m.b25 <= 113.666666666667)
m.c21 = Constraint(expr= - m.x1 + m.x3 + m.x11 + 117*m.b26 <= 117)
m.c22 = Constraint(expr= - m.x1 + m.x4 + m.x12 + 108.666666666667*m.b27 <= 108.666666666667)
m.c23 = Constraint(expr= - m.x2 + m.x3 + m.x11 + 117*m.b28 <= 117)
m.c24 = Constraint(expr= - m.x2 + m.x4 + m.x12 + 108.666666666667*m.b29 <= 108.666666666667)
m.c25 = Constraint(expr= - m.x3 + m.x4 + m.x12 + 108.666666666667*m.b30 <= 108.666666666667)
m.c26 = Constraint(expr= m.x5 - m.x6 + m.x13 + 110.333333333333*m.b31 <= 110.333333333333)
m.c27 = Constraint(expr= m.x5 - m.x7 + m.x13 + 110.333333333333*m.b32 <= 110.333333333333)
m.c28 = Constraint(expr= m.x5 - m.x8 + m.x13 + 110.333333333333*m.b33 <= 110.333333333333)
m.c29 = Constraint(expr= m.x6 - m.x7 + m.x14 + 113.666666666667*m.b34 <= 113.666666666667)
m.c30 = Constraint(expr= m.x6 - m.x8 + m.x14 + 113.666666666667*m.b35 <= 113.666666666667)
m.c31 = Constraint(expr= m.x7 - m.x8 + m.x15 + 117*m.b36 <= 117)
m.c32 = Constraint(expr= - m.x5 + m.x6 + m.x14 + 113.666666666667*m.b37 <= 113.666666666667)
m.c33 = Constraint(expr= - m.x5 + m.x7 + m.x15 + 117*m.b38 <= 117)
m.c34 = Constraint(expr= - m.x5 + m.x8 + m.x16 + 108.666666666667*m.b39 <= 108.666666666667)
m.c35 = Constraint(expr= - m.x6 + m.x7 + m.x15 + 117*m.b40 <= 117)
m.c36 = Constraint(expr= - m.x6 + m.x8 + m.x16 + 108.666666666667*m.b41 <= 108.666666666667)
m.c37 = Constraint(expr= - m.x7 + m.x8 + m.x16 + 108.666666666667*m.b42 <= 108.666666666667)
m.c38 = Constraint(expr= m.b19 + m.b25 + m.b31 + m.b37 == 1)
m.c39 = Constraint(expr= m.b20 + m.b26 + m.b32 + m.b38 == 1)
m.c40 = Constraint(expr= m.b21 + m.b27 + m.b33 + m.b39 == 1)
m.c41 = Constraint(expr= m.b22 + m.b28 + m.b34 + m.b40 == 1)
m.c42 = Constraint(expr= m.b23 + m.b29 + m.b35 + m.b41 == 1)
m.c43 = Constraint(expr= m.b24 + m.b30 + m.b36 + m.b42 == 1)
|
|
import cv2
import numpy as np
def preprocess(img, input_size, swap=(2, 0, 1)):
if len(img.shape) == 3:
padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114
else:
padded_img = np.ones(input_size, dtype=np.uint8) * 114
r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])
resized_img = cv2.resize(
img,
(int(img.shape[1] * r), int(img.shape[0] * r)),
interpolation=cv2.INTER_LINEAR,
).astype(np.uint8)
padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img
padded_img = padded_img.transpose(swap)
padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)
return padded_img, r
|
|
from camas_gym.envs.camas_zoo_masking import MOVES, CamasZooEnv
import numpy as np
def update_batch_pre(env, done): # buffer may not be the correct terminology
"""Creates pre transition buffer data
Only one agent may act a time, other agents either carry out their current action again
or choose None (idx = num_actions) if they are at their goal
Args:
buffer (_type_): _description_
time (_type_): _description_
env (_type_): _description_
"""
obs, avail_acts = np.array([]), np.array([])
for agent in env.possible_agents:
observation = env.observe(agent)
obs = np.append(obs, observation["observation"] )
if agent == env.agent_selection and done:
one_hot = np.zeros(5)
one_hot[-1] = 1
avail_acts = np.append(avail_acts, one_hot)
elif agent == env.agent_selection:
avail_acts = np.append(avail_acts, observation["action_mask"])
avail_acts = np.append(avail_acts, np.array([0.0]))
else:
one_hot = np.zeros(5) # NOTE find a better way!
if agent in env.agents:
agent_act = env.agent_action(agent)
else: # terminal agent
agent_act = None
if agent_act is None: agent_act = -1
one_hot[agent_act] = 1
avail_acts = np.append(avail_acts, one_hot)
#print('- pre tran created')
obs.resize((3,len(observation["observation"])))
avail_acts.resize((3, 5))
pre_transition_data = {
"state": [env.state()],
"obs": [obs],
"avail_actions": [avail_acts]
}
return pre_transition_data
def quadratic_makespan_reward(x):
return ((x-100)**2)/100
|
|
import os
import argparse
import cv2
import numpy as np
import glob
import math
from objloader_simple import *
dir_markers = os.path.join(os.pardir,'markers')
dir_chess = os.path.join(os.pardir,'chessboards')
dir_objects = os.path.join(os.pardir,'objects')
MIN_MATCHES = 30
def capture_boards():
vd = cv2.VideoCapture(0)
img_counter = 1
while(True):
ret,frame = vd.read()
cv2.imshow('Capture chess',frame)
k = cv2.waitKey(10)
if (k is not -1):
print(k)
# print(k)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256==32:
# SPACE pressed
print("Space hitting")
img_name = os.path.join(dir_chess,'{}.png'.format(img_counter))
print(img_name)
cv2.imwrite(img_name, frame)
img_counter += 1
vd.release()
# capture_boards()
def getGrayImage(fname,shape):
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = cv2.resize(gray, shape)
return gray
def getK():
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((7*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:7].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('CalibrationImages/*.jpg')
Shape = None
for fname in images:
gray = getGrayImage(fname,(1008,756))
Shape = gray.shape[::-1]
ret, corners = cv2.findChessboardCorners(gray, (7,7),None)
if ret == True:
print(fname)
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(gray, (7,7), corners2, ret )
cv2.imshow('img',gray)
cv2.waitKey(2000)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, Shape, None,None)
if ret:
print("Camera Matrix")
print(mtx)
else:
print("No Solution Found")
cv2.destroyAllWindows()
def main():
"""
This functions loads the target surface image,
"""
homography = None
camera_parameters = np.array([[517, 0, 309], [0, 379.5, 178], [0, 0, 1]],dtype=np.float32)
# camera_parameters = np.array([[800, 0, 320], [0, 800, 240], [0, 0, 1]])
# camera_parameters = np.array([[1, 0, 2], [0, 2, 1], [0, 0, 1]],dtype=np.float32)
# camera_parameters = np.array([])
# create ORB keypoint detector
orb = cv2.ORB_create()
# create BFMatcher object based on hamming distance
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# load the reference surface that will be searched in the video stream
# dir_name = os.getcwd()
model = cv2.imread('../markers/marker1/VisualMarker.png', 0)
# cv2.resize(model,(640,352))
# Compute model keypoints and its descriptors
kp_model, des_model = orb.detectAndCompute(model, None)
# Load 3D model from OBJ file
obj = OBJ('../models/fox.obj', swapyz=True)
# init video capture
cap = cv2.VideoCapture('../video1.mp4')
# cap = cv2.VideoCapture(0)
while True:
# read the current frame
ret, frame = cap.read()
print(frame.shape)
if not ret:
print("Unable to capture video")
return
# find and draw the keypoints of the frame
kp_frame, des_frame = orb.detectAndCompute(frame, None)
# match frame descriptors with model descriptors
matches = bf.match(des_model, des_frame)
# sort them in the order of their distance
# the lower the distance, the better the match
matches = sorted(matches, key=lambda x: x.distance)
# print(matches)
# compute Homography if enough matches are found
if len(matches) > MIN_MATCHES:
# differenciate between source points and destination points
src_pts = np.float32([kp_model[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp_frame[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
# compute Homography
homography, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
if args.rectangle:
# Draw a rectangle that marks the found model in the frame
h, w = model.shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
# project corners into frame
dst = cv2.perspectiveTransform(pts, homography)
# connect them with lines
frame = cv2.polylines(frame, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
# if a valid homography matrix was found render cube on model plane
if homography is not None:
# try:
# obtain 3D projection matrix from homography matrix and camera parameters
projection = projection_matrix(camera_parameters, homography)
# project cube or model
frame = render(frame, obj, projection, model, False)
#frame = render(frame, model, projection)
# except:
# print('cannot render object')
# draw first 10 matches.
if args.matches:
frame = cv2.drawMatches(model, kp_model, frame, kp_frame, matches[:10], 0, flags=2)
# show result
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
print("Not enough matches found - %d/%d" % (len(matches), MIN_MATCHES))
cap.release()
cv2.destroyAllWindows()
return 0
def render(img, obj, projection, model, color=False):
"""
Render a loaded obj model into the current video frame
"""
vertices = obj.vertices
scale_matrix = np.eye(3) * 3
h, w = model.shape
for face in obj.faces:
face_vertices = face[0]
points = np.array([vertices[vertex - 1] for vertex in face_vertices])
points = np.dot(points, scale_matrix)
# render model in the middle of the reference surface. To do so,
# model points must be displaced
points = np.array([[p[0] + w / 2, p[1] + h / 2, p[2]] for p in points])
dst = cv2.perspectiveTransform(points.reshape(-1, 1, 3), projection)
imgpts = np.int32(dst)
if color is False:
cv2.fillConvexPoly(img, imgpts, (137, 27, 211))
else:
color = hex_to_rgb(face[-1])
color = color[::-1] # reverse
cv2.fillConvexPoly(img, imgpts, color)
return img
def projection_matrix(camera_parameters, homography):
"""
From the camera calibration matrix and the estimated homography
compute the 3D projection matrix
"""
# Compute rotation along the x and y axis as well as the translation
homography = homography * (-1)
rot_and_transl = np.dot(np.linalg.inv(camera_parameters), homography)
col_1 = rot_and_transl[:, 0]
col_2 = rot_and_transl[:, 1]
col_3 = rot_and_transl[:, 2]
# normalise vectors
l = math.sqrt(np.linalg.norm(col_1, 2) * np.linalg.norm(col_2, 2))
rot_1 = col_1 / l
rot_2 = col_2 / l
translation = col_3 / l
# compute the orthonormal basis
c = rot_1 + rot_2
p = np.cross(rot_1, rot_2)
d = np.cross(c, p)
rot_1 = np.dot(c / np.linalg.norm(c, 2) + d / np.linalg.norm(d, 2), 1 / math.sqrt(2))
rot_2 = np.dot(c / np.linalg.norm(c, 2) - d / np.linalg.norm(d, 2), 1 / math.sqrt(2))
rot_3 = np.cross(rot_1, rot_2)
# finally, compute the 3D projection matrix from the model to the current frame
projection = np.stack((rot_1, rot_2, rot_3, translation)).T
# print(projection)
return np.dot(camera_parameters, projection)
def hex_to_rgb(hex_color):
"""
Helper function to convert hex strings to RGB
"""
hex_color = hex_color.lstrip('#')
h_len = len(hex_color)
return tuple(int(hex_color[i:i + h_len // 3], 16) for i in range(0, h_len, h_len // 3))
parser = argparse.ArgumentParser(description='Augmented reality application')
parser.add_argument('-r','--rectangle', help = 'draw rectangle delimiting target surface on frame', action = 'store_true')
parser.add_argument('-mk','--model_keypoints', help = 'draw model keypoints', action = 'store_true')
parser.add_argument('-fk','--frame_keypoints', help = 'draw frame keypoints', action = 'store_true')
parser.add_argument('-ma','--matches', help = 'draw matches between keypoints', action = 'store_true')
args = parser.parse_args()
if __name__=='__main__':
main()
|
|
import neuromodulation.selection_functions as sf
import numpy as np
'''
Functions which are used to select the models which pass the criteria
New functions can be added, the function should accept criteria, voltages and shoudl return a dictionary,
which contain at least boolean key, which returns True or False pass for the criteria
'''
def number_AP_decrease(criteria,voltages):
parameters = criteria["parameters"]
selection = criteria["selection"]
threshold = criteria["selection"]["threshold"]
control = sf.number_action_potentials(voltages[0],parameters)
modulated = sf.number_action_potentials(voltages[1],parameters)
zscore = abs(selection["mean"] - abs(modulated - control))/selection["std"]
diff = modulated - control
boolean = zscore < threshold and diff < 0
result = { "boolean" : boolean, "zscore" : zscore, "diff" : diff, "controlAP" : control, "modulatedAP" : modulated}
return result
def number_AP_increase(criteria,voltages):
parameters = criteria["parameters"]
selection = criteria["selection"]
threshold = criteria["selection"]["threshold"]
control = sf.number_action_potentials(voltages[0],parameters)
modulated = sf.number_action_potentials(voltages[1],parameters)
zscore = abs(selection["mean"] - abs(modulated - control))/selection["std"]
diff = modulated - control
boolean = zscore < threshold and diff > 0
result = { "boolean" : boolean, "zscore" : zscore, "diff" : diff, "controlAP" : control, "modulatedAP" : modulated}
return result
def frequency_change(criteria,voltages):
parameters = criteria["parameters"]
selection = criteria["selection"]
threshold = criteria["selection"]["threshold"]
control = sf.mean_frequency(voltages[0],parameters)
modulated = sf.mean_frequency(voltages[1],parameters)
zscore = abs(selection["mean"] - abs(modulated.take(0) - control.take(0)))/selection["std"]
boolean = zscore < threshold
diff = modulated.take(0) - control.take(0)
result = { "boolean" : boolean, "zscore" : zscore, "diff" : diff, "controlHz" : control, "modulatedHz" : modulated}
return result
def frequency_change_increase(criteria,voltages):
parameters = criteria["parameters"]
selection = criteria["selection"]
threshold = criteria["selection"]["threshold"]
control = sf.mean_frequency(voltages[0],parameters)
modulated = sf.mean_frequency(voltages[1],parameters)
zscore = abs(selection["mean"] - abs(modulated.take(0) - control.take(0)))/selection["std"]
diff = modulated.take(0) - control.take(0)
boolean = zscore < threshold and diff > 0
result = { "boolean" : boolean, "zscore" : zscore, "diff" : diff, "controlHz" : control, "modulatedHz" : modulated}
return result
def frequency_change_decrease(criteria,voltages):
parameters = criteria["parameters"]
selection = criteria["selection"]
threshold = criteria["selection"]["threshold"]
control = sf.mean_frequency(voltages[0],parameters)
modulated = sf.mean_frequency(voltages[1],parameters)
zscore = abs(selection["mean"] - abs(modulated.take(0) - control.take(0)))/selection["std"]
diff = modulated.take(0) - control.take(0)
boolean = zscore < threshold and diff < 0
result = { "boolean" : boolean, "zscore" : zscore, "diff" : diff, "controlHz" : control, "modulatedHz" : modulated}
return result
def cv_change(criteria,voltages):
parameters = criteria["parameters"]
selection = criteria["selection"]
threshold = criteria["selection"]["threshold"]
control = sf.cv(voltages[0],parameters)
modulated = sf.cv(voltages[1],parameters)
zscore = abs(selection["mean"] - abs(control.take(0) - modulated.take(0)))/selection["std"]
diff = modulated.take(0) - control.take(0)
boolean = zscore < threshold
result = { "boolean" : boolean, "zscore" : zscore, "diff" : diff, "controlcv" : control, "modulatedcv" : modulated}
return result
def membrane_amplitude_increase(criteria,voltages):
parameters = criteria["parameters"]
selection = criteria["selection"]
threshold = criteria["selection"]["threshold"]
control = sf.membrane_amplitude(voltages[0],parameters)
modulated = sf.membrane_amplitude(voltages[1],parameters)
zscore = abs(selection["mean"] - abs(control.take(0) - modulated.take(0)))/selection["std"]
diff = modulated.take(0) - control.take(0)
boolean = zscore < threshold and diff > 0
result = { "boolean" : boolean, "zscore" : zscore, "diff" : diff, "controlcv" : control, "modulatedcv" : modulated}
return result
def membrane_amplitude_increase_percentage(criteria,voltages):
parameters = criteria["parameters"]
selection = criteria["selection"]
threshold = criteria["selection"]["threshold"]
control = sf.membrane_amplitude(voltages[0],parameters)
modulated = sf.membrane_amplitude(voltages[1],parameters)
percentage = ((modulated.take(0) -control.take(0))/control.take(0))*100 + 100
zscore = abs(selection["mean"] - abs(percentage))/selection["std"]
boolean = zscore < threshold and percentage > 100
result = { "boolean" : boolean, "zscore" : zscore, "percentage" : percentage, "controlcv" : control, "modulatedcv" : modulated}
return result
def membrane_amplitude_decrease_percentage(criteria,voltages):
parameters = criteria["parameters"]
selection = criteria["selection"]
threshold = criteria["selection"]["threshold"]
control = sf.membrane_amplitude(voltages[0],parameters)
modulated = sf.membrane_amplitude(voltages[1],parameters)
percentage = ((modulated.take(0) -control.take(0))/control.take(0))*100 + 100
zscore = abs(selection["mean"] - abs(percentage))/selection["std"]
boolean = zscore < threshold and percentage < 100
result = { "boolean" : boolean, "zscore" : zscore, "percentage" : percentage, "controlcv" : control, "modulatedcv" : modulated}
return result
def synaptic_amplitude_decrease_percentage(criteria,voltages):
parameters = criteria["parameters"]
selection = criteria["selection"]
threshold = criteria["selection"]["threshold"]
control = sf.synaptic_amplitude(voltages[0],parameters)
modulated = sf.synaptic_amplitude(voltages[1],parameters)
percentage = ((modulated.take(0) -control.take(0))/control.take(0))*100 + 100
zscore = abs(selection["mean"] - abs(percentage))/selection["std"]
boolean = zscore < threshold and percentage < 100
result = { "boolean" : boolean, "zscore" : zscore, "percentage" : percentage, "controlcv" : control, "modulatedcv" : modulated}
return result
|
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# 安装pandas
# pip install Pandas
# 运行测试套件
# 运行前需要安装: hypothesis和pytest
import pandas as pd
# pd.test()
# In[5]:
# 对象创建
# 传入一些值的列表来创建一个Series,pandas会自动创建一个默认的整数索引.
import pandas as pd
import numpy as np
import pprint
s = pd.Series([1,3,5,np.nan,6,8])
print(s)
print('-'*30)
# 传递带有日期时间索引和带标签列的NumPy数组来创建DataFrame
dates = pd.date_range('20190815',periods=6)
pprint.pprint(dates)
# In[8]:
df = pd.DataFrame(np.random.randn(6,4),index=dates,columns=list('ABCD'))
# df.to_excel('./output.xlsx')
pprint.pprint(df)
# In[9]:
# 转化为类似Series的dict对象来创建DataFrame
df2 = pd.DataFrame({'A': 1.,
'B': pd.Timestamp('20190820'),
'C': pd.Series(1,index=list(range(4)),dtype='float32'),
'D': np.array([3] * 4,dtype='int32'),
'E': pd.Categorical(["test","train","test1","train2"]),
'F': 'foo'})
print(df2)
# DataFrame的列具有不同的数据类型
print(df2.dtypes)
# In[10]:
### 查看数据
# 查看DataFrame顶部数据
print(df.head(3))
print('+='*30)
# 查看DataFrame尾部数据
print(df.tail(3))
print('--+'*30)
# 显示索引,列和底层NumPy数据.
print(df.index)
print('-='*30)
print(df.columns)
print('-|'*30)
# DataFrame.to_numpy() 会给出Numpy对象. 输出时不包含行索引和列索引.
print(df.to_numpy())
# In[11]:
# describe()方法显示数据的快速统计摘要
print(df.describe())
print('--'*30)
# 转置数据
print(df.T)
print('=='*30)
# 按轴排序
print(df.sort_index(axis=1,ascending=False))
print('-='*30)
# 按值排序
print(df.sort_values(by='B'))
# In[12]:
### 获取
print(df['A'])
# 对行进行切片
print(df[0:])
print(df[0:2])
print('-=='*30)
print(df['20190816':'20190818'])
# In[13]:
### 按标签选择
# 通过标签获取一行数据
print(df.loc[dates[0]])
print(df.loc[dates[1]])
print('=='*30)
# 通过标签在多个轴上选择数据
print('通过标签在多个轴上选择数据')
print(df.loc[:,['A','B']])
print('--'*30)
print(df.loc[:,['C']])
# In[14]:
# 通过标签同时在两个轴上切片
print('通过标签同时在两个轴上切片')
print(df.loc['20190817':'20190819',['A','B']])
# In[15]:
# 减小返回对象的大小
print(df.loc['20190820',['A','B']])
# In[16]:
# 获取标量值
print(df.loc[dates[0],'A'])
# In[17]:
# 快速访问标量
print(df.at[dates[0],'A'])
# In[18]:
### 布尔索引
# 使用单个列的值来选择数据
print(df[df.A > 0]) # 会输出为True的内容.
print(df.A > 0) # 将True和False都打印出来.
# In[19]:
# 从满足布尔条件的DataFrame中选择值:
print(df[df > 0])
# In[20]:
# 使用isin()方法过滤
df3 = df.copy()
# print(df3)
# df3['E'] = ['one','one','two','three','four','three']
df3['E'] = ['one','two','three','four','five','six']
# print(df3)
print('-='*30)
print(df3[df3['E'].isin(['two','four'])])
# In[21]:
### 赋值
# 添加新列将自动根据索引对齐数据.
s1 = pd.Series([1,2,3,4,5,6],index=pd.date_range('20190818',periods=6))
print(s1)
df3['F'] = s1
print(df3['F'])
# In[22]:
# 通过标签赋值
df3.at[dates[0],'A'] = 0
print(df3)
# In[27]:
### 通过位置赋值
df.iat[0,1] = 0
print(df)
# In[29]:
# 使用NumPy数组赋值
df3.loc[:,'D'] = np.array([5] * len(df))
print(df3) # 前面一系列赋值操作的结果.
# In[41]:
# 带有where条件的赋值操作.
df2 = df.copy()
df2[df2 > 0] = -df2
print(df2)
# In[42]:
### 缺失值
# pandas主要使用值np.nan来表示缺失的数据.
# 重建索引允许更改/添加/删除指定轴上的索引.这个操作会返回一个副本.
df5 = df.reindex(index=dates[0:4],columns=list(df.columns) + ['E'])
df5.loc[dates[0]:dates[1],'E'] = 1
print(df5)
# In[45]:
# 删除任何带有缺失值的行
print(df5.dropna(how='any'))
# In[46]:
# 填充缺失值
print(df5.fillna(value=5))
# In[47]:
# 获取值为nan的掩码
print(pd.isna(df5))
# In[49]:
### 统计
# 进行描述性统计
print(df5.mean())
print('-='*30)
# 在其它轴上进行同样的操作:
print(df5.mean(1))
# In[51]:
# 使用具有不同维度且需要对齐的对象进行操作. pandas会自动沿指定维度进行广播.
s = pd.Series([1,3,5,np.nan,6,8],index=dates).shift(2)
print(s)
print(df5.sub(s,axis='index'))
# In[53]:
### 应用
# 将函数应用于数据
print(df5.apply(np.cumsum))
print(df5.apply(lambda x: x.max() - x.min()))
# In[56]:
### 直方图化
s1 = pd.Series(np.random.randint(0,7,size=10))
print(s1)
print('=+'*30)
print(s1.value_counts())
# In[57]:
### 字符串方法
# Series在str属性中有一组字符串处理方法,可对数组的每个元素进行操作.
s2 = pd.Series(['A','B','C','Aaba','Baca',np.nan,'CABA','dog','cat'])
print(s2.str.lower())
# In[60]:
## 合并
### 连接
# 使用concat()连接pandas对象.
df6 = pd.DataFrame(np.random.randn(10,4))
print(df6)
print('---'*30)
pieces = [df6[:3],df6[3:7],df6[7:]]
print(pd.concat(pieces))
# In[62]:
### Join
# SQL风格的合并
left = pd.DataFrame({'key': ['foo','foo'],'lval':[1,2]})
right = pd.DataFrame({'key': ['foo','foo'],'rval': [4,5]})
print(left)
print('='*30)
print(right)
print('='*30)
print(pd.merge(left,right,on='key'))
# In[63]:
# 另一个例子
left = pd.DataFrame({'key': ['foo','bar'],'lval': [1,2]})
right = pd.DataFrame({'key': ['foo','bar'],'rval':[4,5]})
print(left)
print('-'*30)
print(right)
print(pd.merge(left,right,on='key'))
# In[66]:
### 追加
df7 = pd.DataFrame(np.random.randn(8,4),columns=['A','B','C','D'])
print(df7)
print('=='*30)
s3 = df7.iloc[3]
print(df7.append(s3,ignore_index=True))
# In[68]:
### 分组
"""
group by包括:
分割: 根据一些标准将数据分解成组.
应用: 将函数独立地应用于每个组.
组合: 将结果组合成数据结构.
"""
df8 = pd.DataFrame({'A': ['foo','bar','foo','bar',
'foo','bar','foo','foo'],
'B': ['one','one','two','three',
'two','two','one','three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
print(df8)
# 分组,然后将sum()函数应用于分组结果.
print(df8.groupby('A').sum())
print('=-'*30)
# 按多列分组形成层次索引,用sum函数
print(df8.groupby(['A','B']).sum())
# In[69]:
### 堆叠(Stack)
tuples = list(zip(*[['bar','bar','baz','baz',
'foo','foo','qux','qux'],
['one','two','one','two',
'one','two','one','two']]))
index = pd.MultiIndex.from_tuples(tuples,names=['first','second'])
df = pd.DataFrame(np.random.randn(8,2),index=index,columns=['A','B'])
df9 = df[:4]
print(df9)
# In[70]:
### stack()方法压缩DataFrame的列
stacked = df9.stack()
print(stacked)
# In[72]:
# stack()的逆操作是unstack(),默认情况下取消最后压缩的哪个级别.
print(stacked.unstack())
print('=='*30)
print(stacked.unstack(1))
print('-='*30)
print(stacked.unstack(0))
# In[75]:
### 数据透视表
df10 = pd.DataFrame({'A': ['one','one','two','three'] * 3,
'B': ['A','B','C'] * 4,
'C': ['foo','foo','foo','bar','bar','bar'] * 2,
'D': np.random.randn(12),
'E': np.random.randn(12)})
print(df10)
print('-='*30)
# 从这些数据生成数据透视表
pd.pivot_table(df10,values='D',index=['A','B'],columns=['C'])
# In[76]:
### 时间序列(TimeSeries)
# 用于在频率转换期间执行重采样操作.
rng = pd.date_range('22/08/2019',periods=100,freq='S')
ts = pd.Series(np.random.randint(0,500,len(rng)),index=rng)
print(ts.resample('5Min').sum())
# In[79]:
# 时区代表
rng = pd.date_range('21/08/2019 21:29:30',periods=5,freq='D')
ts = pd.Series(np.random.randn(len(rng)),rng)
print(ts)
print('-='*30)
ts_utc = ts.tz_localize('UTC')
print(ts_utc)
print('-='*30)
# 转换为另一个时区
print(ts_utc.tz_convert('US/Eastern'))
# In[82]:
# 在时间跨度表示之间转换
rng = pd.date_range('22/08/2019',periods=5,freq='M')
ts = pd.Series(np.random.randn(len(rng)),index=rng)
print(ts)
print('-='*30)
ps = ts.to_period()
print(ps)
print('-='*30)
print(ps.to_timestamp())
# In[83]:
# 周期和时间戳之间的转换可以用算术函数.
# 示例: 以11月为结束年份的季度频率转换为季度结束后一个月末的上午9点.
prng = pd.period_range('2010Q1','2019Q4',freq='Q-NOV')
ts = pd.Series(np.random.randn(len(prng)),prng)
ts.index = (prng.asfreq('M','e') + 1).asfreq('H','s') + 9
print(ts.head())
# In[86]:
### 分类(Categoricals)
# pandas可以在DataFrame中包含分类数据.
df11 = pd.DataFrame({"id": [1,2,3,4,5,6],
"raw_grade": ['a','b','b','a','a','e']})
# 将原始成绩转换为category数据类型
df11["grade"] = df11["raw_grade"].astype("category")
print(df11["grade"])
print('-='*30)
# In[87]:
# 将类别重命名为更有意义的名称(通过调用Series.cat.categories来替换)
df11["grade"].cat.categories = ["very good","good","very bad"]
print(df11["grade"].cat.categories)
# In[88]:
# 对categories重新排序并同时添加缺少的category(Series.cat下的方法默认返回一个新的Series)
df11["grade"] = df11["grade"].cat.set_categories(["very bad","bad","medium",
"good","very good"])
print(df11["grade"])
# In[90]:
# 排序时按categories中的顺序排序,不是按照词汇顺序排序.
print(df11.sort_values(by="grade"))
# In[91]:
# 按分好类的列分组(groupby)可以显示空categories。
print(df11.groupby("grade").size())
# In[92]:
### 绘图
ts = pd.Series(np.random.randn(1000),
index=pd.date_range('22/08/2019',periods=1000))
ts = ts.cumsum()
ts.plot()
# In[94]:
import matplotlib.pyplot as plt
# 在一个DataFrame中,plot方法绘制带有label的所有列.
df12 = pd.DataFrame(np.random.randn(1000,4),index=ts.index,
columns=['A','B','C','D'])
df13 = df12.cumsum()
plt.figure()
df13.plot()
plt.legend(loc='best')
# In[96]:
### 数据输入/输出
# 写入csv文件
df13.to_csv('./best.csv')
# 从csv文件读数据
pd.read_csv('./best.csv')
# In[102]:
### HDF5
# pip install tables
# 写入HDF5
df13.to_hdf('./best.h5','df')
# 从HDF5读数据
pd.read_hdf('./best.h5','df')
# In[103]:
### Excel
# 写入excel文件
df13.to_excel('./best.xlsx',sheet_name='best')
# 从excel文件读取数据
pd.read_excel('./best.xlsx','best',index_col=None,na_values=['NA'])
# In[104]:
# Gotchas 坑
# 异常
if pd.Series([False,True,False]):
print("I was true")
|
|
from fitstools import manage_dtype, mask_fits, assign_header
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
import cosmics
def calibrate(image, bias, fiber_mask=None, lacosmic=True):
image = bias_correct(image, bias, fiber_mask)
image = dark_correct(image)
image = mask_badpixels(image)
if lacosmic:
image = remove_cosmics(image)
return image
def bias_correct(image, bias, fiber_mask=None):
@manage_dtype(preserve=True)
def bc_helper(image, bias, fiber_mask=None):
#reduced = (image-median(masked image)) - (bias-median(bias))
if type(fiber_mask) != type(None):
masked_image = mask_fits(image, fiber_mask, maskval=0, fillval=np.nan)
image = (image - np.nanmedian(masked_image)) - (bias - np.median(bias))
else:
image = image - bias
return image
bias_subtracted_image = bc_helper(image, bias, fiber_mask)
if type(bias_subtracted_image) == fits.hdu.hdulist.HDUList:
bias_subtracted_image = assign_header(bias_subtracted_image, image[0].header)
bias_subtracted_image[0].header['COMMENT'] = 'Bias corrected.'
return bias_subtracted_image
def dark_correct(image, exptime=None):
dark_map = fits.open('calib/master_calib/dark_fit.fits')[0].data
header = image[0].header
gain = header['GAIN']
dark_map /= gain
if exptime == None and type(image) == fits.hdu.hdulist.HDUList:
try:
exptime = image[0].header['EXPTIME']
except KeyError:
exptime = 0.0
else:
raise ValueError('Cannot determine exposure time for dark subtraction.')
@manage_dtype(preserve=True)
def dc_helper(image, dark_map, exptime):
image = image - exptime*dark_map
return image
dark_subtracted_image = dc_helper(image, dark_map, exptime)
if type(dark_subtracted_image) == fits.hdu.hdulist.HDUList:
dark_subtracted_image = assign_header(dark_subtracted_image, image[0].header)
dark_subtracted_image[0].header['COMMENT'] = 'Bias corrected.'
return dark_subtracted_image
def mask_badpixels(image):
bad_mask = fits.open('calib/master_calib/badmask.fits')
@manage_dtype(preserve=True)
def mbp_helper(image, bad_mask):
image = mask_fits(image, bad_mask, maskval=1.0, fillval=np.nan)
return image
bad_masked_image = mbp_helper(image, bad_mask)
if type(bad_masked_image) == fits.hdu.hdulist.HDUList:
bad_masked_image = assign_header(bad_masked_image, image[0].header)
bad_masked_image[0].header['COMMENT'] = 'Bad pixels masked.'
return bad_masked_image
@manage_dtype(preserve=True, use_args=[0], with_header=True)
def remove_cosmics(image, gain=None, readnoise=None, sigclip=5.0, sigfrac=0.5, objlim=20.0):
image, header = image[:2]
fname_noext = header['FILENAME'][:-5] if 'FILENAME' in header else 'image'
if gain==None and 'GAIN' in header:
gain = header['GAIN']
if readnoise==None and 'RDNOISE' in header:
readnoise = header['RDNOISE']
if gain==None:
raise KeyError('Cannot determine image gain from information given.')
if readnoise==None:
raise KeyError('Cannot determine image readnoise from information given.')
c = cosmics.cosmicsimage(image, gain=gain, readnoise=readnoise, sigclip=sigclip, sigfrac=sigfrac, objlim=objlim, verbose=False)
c.run(maxiter=5)
cosmics_mask = c.mask
#cosmics.tofits('plots/lacosmic/'+fname_noext+'_cmask.fits', np.transpose(cosmics_mask), header)
#cosmics.tofits('plots/lacosmic/'+fname_noext+'_before.fits', np.transpose(image), header)
cosmics_masked_image = mask_fits(image, cosmics_mask, maskval=0.0, fillval=np.nan)
#cosmics.tofits('plots/lacosmic/'+fname_noext+'_after.fits', np.transpose(cosmics_masked_image), header)
if type(cosmics_masked_image) == fits.hdu.hdulist.HDUList:
cosmics_masked_image = assign_header(cosmics_masked_image, image[0].header)
cosmics_masked_image[0].header['COMMENT'] = 'Cosmic rays masked.'
return cosmics_masked_image,header
|
|
import cv2
import os
import numpy as np
imgpath="./data_dir_crop_y/"
outputpath="./data_dir_crop_x/"
if not os.path.exists(outputpath):
os.makedirs(outputpath)
sum =1
for imgx in os.listdir(imgpath):
a, b = os.path.splitext(imgx)
img = cv2.imread(imgpath + a +b)
img = cv2.resize(img, (50, 50), interpolation=cv2.INTER_CUBIC)
#img = cv2.resize(img, (100, 100), interpolation=cv2.INTER_CUBIC)
cv2.imwrite(outputpath + a + b, img)
sum =sum +1
print(str(sum)+"ok")
|
|
from typing import Dict, List
import numpy as np
from stl_rules.stl_rule import STLRule
class ComfortLongitudinalJerk(STLRule):
"""
This rule implement a Comfort requirement on Longitudinal Jerk.
It is based on formalization reported in 5.2.22 of [3: Westhofen et al., 2021].
"""
@property
def variables(self):
return ["time", "j_lon"]
@property
def types(self):
return ["int", "float"]
def __init__(self, rss_params):
"""
:param rss_params: static parameters for rss monitoring
`j_lon_max`: max comfortable longitudinal jerk
`sim_dt`: integration time in simulation, used to compute acceleration derivative
"""
required_parameters = ["j_lon_max", "sim_dt"]
assert all([p in rss_params for p in required_parameters])
self._p = {p: rss_params[p] for p in required_parameters}
@property
def spec(self):
# specification
J_bounded = f"(abs(j_lon) <= {self._p['j_lon_max']})"
return J_bounded
@property
def demo_spec(self):
return self.spec
def generate_signals_for_demo(self, data: Dict[str, np.ndarray], begin: int = 5, end: int = 1000) -> Dict[str, list]:
# check input
obs_signals = ["elapsed_time", "j_lon"]
assert all([s in data for s in obs_signals]), f"missing in signals ({obs_signals} not in {data.keys()})"
# generate output signals from input signals
out_signals = {
"elapsed_time": data["elapsed_time"],
"time": np.floor((data["elapsed_time"] - data["elapsed_time"][0]) / self._p["sim_dt"]).astype(int),
"j_lon": data["j_lon"]
}
out_signals = {k: list(v[begin:end]) for k, v in out_signals.items()}
# check output
assert all([s in out_signals for s in
self.variables]), f"missing out signals ({self.variables} not in {out_signals.keys()})"
return out_signals
def generate_signals(self, data: Dict[str, np.ndarray]) -> Dict[str, List]:
# check input
obs_signals = ["a_lon"]
assert all([s in data for s in obs_signals]), f"missing in signals ({obs_signals} not in {data.keys()})"
# generate output signals from input signals
out_signals = {
"time": data["time"],
"j_lon": np.gradient(data['a_lon'], self._p['sim_dt'])
}
out_signals = {k: list(v) for k, v in out_signals.items()}
# check output
assert all([s in out_signals for s in
self.variables]), f"missing out signals ({self.variables} not in {out_signals.keys()})"
return out_signals
class ComfortLateralJerk(STLRule):
"""
This rule implement a Comfort requirement on Lateral Jerk.
It is based on formalization reported in 5.2.22 of [3: Westhofen et al., 2021].
"""
@property
def variables(self):
return ["time", "j_lat"]
@property
def types(self):
return ["int", "float"]
def __init__(self, rss_params):
"""
:param rss_params: static parameters for rss monitoring
`j_lat_max`: max comfortable lateral jerk
`sim_dt`: integration time in simulation, used to compute acceleration derivative
"""
required_parameters = ["j_lat_max", "sim_dt"]
assert all([p in rss_params for p in required_parameters])
self._p = {p: rss_params[p] for p in required_parameters}
@property
def spec(self):
# specification
J_bounded = f"(abs(j_lat) <= {self._p['j_lat_max']})"
return J_bounded
@property
def demo_spec(self):
return self.spec
def generate_signals_for_demo(self, data: Dict[str, np.ndarray], begin: int = 5, end: int = 1000) -> Dict[
str, List]:
# check input
obs_signals = ["elapsed_time", "j_lat"]
assert all([s in data for s in obs_signals]), f"missing in signals ({obs_signals} not in {data.keys()})"
# generate output signals from input signals
out_signals = {
"elapsed_time": data["elapsed_time"] - data["elapsed_time"][0],
"time": np.floor((data["elapsed_time"] - data["elapsed_time"][0]) / self._p["sim_dt"]).astype(int),
"j_lat": data["j_lat"]
}
out_signals = {k: list(v[begin:end]) for k, v in out_signals.items()}
# check output
assert all([s in out_signals for s in
self.variables]), f"missing out signals ({self.variables} not in {out_signals.keys()})"
return out_signals
def generate_signals(self, data: Dict[str, np.ndarray]) -> Dict[str, List]:
# check input
obs_signals = ["a_lat"]
assert all([s in data for s in obs_signals]), f"missing in signals ({obs_signals} not in {data.keys()})"
# generate output signals from input signals
out_signals = {
"time": data["time"],
"j_lat": np.gradient(data['a_lat'], self._p['sim_dt'])
}
out_signals = {k: list(v) for k, v in out_signals.items()}
# check output
assert all([s in out_signals for s in
self.variables]), f"missing out signals ({self.variables} not in {out_signals.keys()})"
return out_signals
|
|
import os
import sys
import json
import subprocess
import numpy as np
from PIL import Image, ImageDraw, ImageFont
if __name__ == '__main__':
result_json_path = sys.argv[1]
video_root_path = sys.argv[2]
dst_directory_path = sys.argv[3]
if not os.path.exists(dst_directory_path):
subprocess.call('mkdir -p {}'.format(dst_directory_path), shell=True)
class_name_path = sys.argv[4]
temporal_unit = int(sys.argv[5])
with open(result_json_path, 'r') as f:
results = json.load(f)
with open(class_name_path, 'r') as f:
class_names = []
for row in f:
class_names.append(row[:-1])
for index in range(len(results)):
video_path = os.path.join(video_root_path, results[index]['video'])
print(video_path)
clips = results[index]['clips']
unit_classes = []
unit_scores = []
unit_segments = []
if temporal_unit == 0:
unit = len(clips)
else:
unit = temporal_unit
for i in range(0, len(clips), unit):
n_elements = min(unit, len(clips) - i)
scores = np.array(clips[i]['scores'])
for j in range(i, min(i + unit, len(clips))):
scores += np.array(clips[i]['scores'])
scores /= n_elements
unit_classes.append(class_names[np.argmax(scores)])
unit_scores.append(np.max(scores))
unit_segments.append([clips[i]['segment'][0],
clips[i + n_elements - 1]['segment'][1]])
for i in range(len(unit_classes)):
print('Frames {} - {}:\t {} {:.2f}'.format(i*unit*16, (i+1)*unit*16, unit_classes[i], unit_scores[i]))
|
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy
from pyscf import gto
from pyscf.lib import logger
from pyscf.lib import param
from pyscf.data import elements
from pyscf.scf import hf
def get_atm_nrhf(mol):
if mol.has_ecp():
raise NotImplementedError('Atomic calculation with ECP is not implemented')
atm_scf_result = {}
for a, b in mol._basis.items():
atm = gto.Mole()
atm.stdout = mol.stdout
atm.atom = atm._atom = [[a, (0, 0, 0)]]
atm._basis = {a: b}
atm.nelectron = gto.charge(a)
atm.spin = atm.nelectron % 2
atm._atm, atm._bas, atm._env = \
atm.make_env(atm._atom, atm._basis, atm._env)
atm._built = True
if atm.nelectron == 0: # GHOST
nao = atm.nao_nr()
mo_occ = mo_energy = numpy.zeros(nao)
mo_coeff = numpy.zeros((nao,nao))
atm_scf_result[a] = (0, mo_energy, mo_coeff, mo_occ)
else:
atm_hf = AtomSphericAverageRHF(atm)
atm_hf.verbose = 0
atm_scf_result[a] = atm_hf.scf()[1:]
atm_hf._eri = None
mol.stdout.flush()
return atm_scf_result
class AtomSphericAverageRHF(hf.RHF):
def __init__(self, mol):
self._eri = None
self._occ = None
hf.SCF.__init__(self, mol)
def dump_flags(self, verbose=None):
hf.RHF.dump_flags(self, verbose)
logger.debug(self.mol, 'occupation averaged SCF for atom %s',
self.mol.atom_symbol(0))
def eig(self, f, s):
atm = self.mol
symb = atm.atom_symbol(0)
idx_by_l = [[] for i in range(param.L_MAX)]
i0 = 0
for ib in range(atm.nbas):
l = atm.bas_angular(ib)
nc = atm.bas_nctr(ib)
i1 = i0 + nc * (l*2+1)
idx_by_l[l].extend(range(i0, i1, l*2+1))
i0 = i1
nbf = atm.nao_nr()
self._occ = numpy.zeros(nbf)
mo_c = numpy.zeros((nbf, nbf))
mo_e = numpy.zeros(nbf)
# fraction occupation
for l in range(param.L_MAX):
if idx_by_l[l]:
n2occ, frac = frac_occ(symb, l)
logger.debug1(self, 'l = %d occ = %d + %.4g', l, n2occ, frac)
idx = numpy.array(idx_by_l[l])
f1 = 0
s1 = 0
for m in range(l*2+1):
f1 = f1 + f[idx+m,:][:,idx+m]
s1 = s1 + s[idx+m,:][:,idx+m]
f1 *= 1./(l*2+1)
s1 *= 1./(l*2+1)
e, c = self._eigh(f1, s1)
for i, ei in enumerate(e):
logger.debug1(self, 'l = %d e_%d = %.9g', l, i, ei)
for m in range(l*2+1):
mo_e[idx] = e
self._occ[idx[:n2occ]] = 2
if frac > 1e-15:
self._occ[idx[n2occ]] = frac
for i,i1 in enumerate(idx):
mo_c[idx,i1] = c[:,i]
idx += 1
return mo_e, mo_c
def get_occ(self, mo_energy=None, mo_coeff=None):
return self._occ
def get_grad(self, mo_coeff, mo_occ, fock=None):
return 0
def scf(self, *args, **kwargs):
self.build()
return hf.kernel(self, *args, dump_chk=False, **kwargs)
def frac_occ(symb, l):
nuc = gto.charge(symb)
if l < 4 and elements.CONFIGURATION[nuc][l] > 0:
ne = elements.CONFIGURATION[nuc][l]
nd = (l * 2 + 1) * 2
ndocc = ne.__floordiv__(nd)
frac = (float(ne) / nd - ndocc) * 2
else:
ndocc = frac = 0
return ndocc, frac
if __name__ == '__main__':
mol = gto.Mole()
mol.verbose = 5
mol.output = None
mol.atom = [["N", (0. , 0., .5)],
["N", (0. , 0.,-.5)] ]
mol.basis = {"N": '6-31g'}
mol.build()
print(get_atm_nrhf(mol))
|
|
import networkx as nx
from py2neo import Graph, Node, Relationship
import pandas as pd
import random
from neo4j import GraphDatabase, basic_auth
import matplotlib
graph = Graph("bolt://localhost:7687", auth=("neo4j", "Password"))
driver = GraphDatabase.driver('bolt://localhost',auth=basic_auth("neo4j", "Password"))
db = driver.session()
# from this list only RS nodes are created (from hyphe file)
RSlist_gexfSiteName = ["facebook.com","fr-fr.facebook.com","fr-ca.facebook.com",
"th-th.facebook.com", "es-la.facebook.com", "de-de.facebook.com",
"developers.facebook.com", "business.facebook.com","m.facebook.com", "web.facebook.com",
"twitter.com", "mobile.twitter.com","fr.twitter.com",
"plus.google.com",
"pinterest.com", "fr.pinterest.com", "uk.pinterest.com", "pl.pinterest.com",
"linkedin.com", "fr.linkedin.com","ca.linkedin.com","tg.linkedin.com", "be.linkedin.com",
"nl.linkedin.com", "ch.linkedin.com", "uk.linkedin.com","it.linkedin.com",
"youtube.com"]
def importGexfNodesAndRel(gexffilepath, depth = 0):
'''
Reads gexf network file from hyphe, update or create all nodes in neo4j database:
Website, Facebook, Linkedin, Pinterest and Twitter nodes
Print . for each 100 nodes/links imported, 1000 for each 1000
"depth" is used to prefix new properties on node and rel. Value can be 0, 1 or 2
properties are imported as list, each member of the list is a non unique value from doublons
'''
G= nx.read_gexf(gexffilepath, node_type=None, relabel=False, version='1.1draft')
data = nx.json_graph.node_link_data(G)
totnbnodes=len(data['nodes'])
print(totnbnodes," nodes found in gexf")
i=1
for node in data['nodes']:
i=i+1
site_name = node['label'].lower().split(" /")[0]
if site_name in RSlist_gexfSiteName:
# twitter node
if "twitter" in node['label'].lower():
if len(node['label'].lower().split(" /")) < 2:
print("I don't create", node['label'].lower())
nodematch = None
else:
user_name = node['label'].lower().split(" /")[1]
user_name = user_name.replace("%20%e2%80%8f","").replace("%40%20%40","").replace("%40suivre%20sur%20twitter:%20%40","")
user_name = user_name.replace("%20","").replace("%40","").replace("%e2%81%a9","")
user_name = user_name.replace("%0apour","").replace("%0apou","").replace("%0apo","")
user_name = user_name.replace("%e2%80%a6","").replace("%e2%80%8e","").replace("%29","")
user_name = user_name.replace("%21","").replace("%c3%a9","e").replace("%c3%a1","a")
if "%" in user_name:
print("cannot create twitter node because of name:",node['label'])
nodematch = None
else:
nodematch = graph.nodes.match('Twitter', user_name = user_name).first()
if nodematch == None:
try:
nodematch = Node('Twitter', user_name = user_name)
nodematch.__primarylabel__ = 'Twitter'
nodematch.__primarykey__ = 'user_name'
graph.merge(nodematch)
except:
print("cannot create twitter node, unknown error", node['label'])
nodematch = None
# facebook node
elif "facebook" in node['label'].lower():
if len(node['label'].lower().split(" /")) < 2 or ".php" in node['label'].lower():
nodematch = None
print("I don't create", node['label'].lower())
else:
user_name = node['label'].lower().split(" /")[1]
if ".php" in user_name or user_name=="name":
print("cannot create facebook node because of name:",node['label'])
nodematch = None
else:
nodematch = graph.nodes.match('Facebook',user_name = user_name).first()
if nodematch == None:
try:
nodematch = Node('Facebook', user_name = user_name)
nodematch.__primarylabel__ = 'Facebook'
nodematch.__primarykey__ = 'user_name'
graph.merge(nodematch)
except:
print("cannot create facebook node, unknown error", node['label'])
nodematch = None
# pinterest node
elif "pinterest" in node['label'].lower():
if len(node['label'].lower().split(" /")) < 2:
nodematch = None
print("I don't create", node['label'].lower())
else:
user_name = node['label'].lower().split(" /")[1]
if user_name=="pin":
print("cannot create pinterest node because of name:",node['label'])
nodematch = None
else:
nodematch = graph.nodes.match('Pinterest',user_name = user_name).first()
if nodematch == None:
try:
nodematch = Node('Pinterest', user_name = user_name)
nodematch.__primarylabel__ = 'Pinterest'
nodematch.__primarykey__ = 'user_name'
graph.merge(nodematch)
except:
print("cannot create pinterest node, unknown error", node['label'])
nodematch = None
# linkedin node
elif "linkedin" in node['label'].lower():
if len(node['label'].lower().split(" /")) < 2:
nodematch = None
print("I don't create", node['label'].lower())
else:
user_name = node['label'].lower().split(" /")[1]
user_name = user_name.replace("%c3%a9", "e").replace("%26", "et").replace("%27","").replace("%c3%a7", "c").replace("%c3%bb","u")
nodematch = graph.nodes.match('Linkedin', user_name = user_name).first()
if nodematch == None:
try:
nodematch = Node('Linkedin', user_name = user_name)
nodematch.__primarylabel__ = 'Linkedin'
nodematch.__primarykey__ = 'user_name'
graph.merge(nodematch)
except:
print("cannot create linkedin node, unknown error", node['label'])
nodematch = None
# youtube not imported
else:
nodematch = None
else:
if "twitter" in site_name or "facebook" in site_name or "linkedin" in site_name or "pinterest" in site_name:
nodematch = None
print(node['label'], "node not created (and it's OK)")
else:
site_name = node['label'].lower().split(" /")[0]
nodematch = graph.nodes.match(site_name =site_name).first()
if nodematch == None:
try:
nodematch = Node('Website', site_name = site_name)
nodematch.__primarylabel__ = 'Website'
nodematch.__primarykey__ = 'site_name'
graph.merge(nodematch)
except:
print("could not import ", node['label'])
nodematch = None
# Create properties on nodes
if nodematch != None:
for key in node.keys():
if nodematch["D" + str(depth) + "_" + key] == None:
nodematch["D" + str(depth) + "_" + key] = [node[key]]
else:
if node[key] in nodematch["D" + str(depth) + "_" + key]:
pass
else:
nodematch["D" + str(depth) + "_" + key].append(node[key])
graph.push(nodematch)
else:
pass
if i%100 == 0:
print(".", end=" ")
if i%1000 ==0:
print(i,"/",totnbnodes)
print(i," nodes read")
print(len(graph.nodes.match("Website")), "Websites in db after import")
print(len(graph.nodes.match("Twitter")), "Twitter in db after import")
print(len(graph.nodes.match("Facebook")), "Facebook in db after import")
print(len(graph.nodes.match("Pinterest")), "Pinterest in db after import")
print(len(graph.nodes.match("Linkedin")), "Linkedin in db after import")
# Links
totnblinks=len(data['links'])
print(totnblinks," links found in gexf")
j=0
for link in data['links']:
results = db.run(
"MATCH (n1) WHERE $source_id IN n1.D"+str(depth)+"_id "
"MATCH (n2) WHERE $target_id IN n2.D"+str(depth)+"_id "
"CREATE (n1)-[:LINKS_TO_D"+str(depth)+" {count: $count}]->(n2) "
,{"source_id":link['source'], "target_id":link['target'], "count": link['count']}
)
if j%100 == 0:
print(".", end=" ")
if j%1000 ==0:
print(j ,"/",totnblinks)
j=j+1
print(j," links imported")
print(len(graph.relationships.match()), "links in db after import")
def importMondeDiploFiles(path_to_medias_francais, path_to_relations_medias_francais):
''' import or mondediplo files in Neo4j database as "Entity" nodes and "ONED_BY"
relationships. Create all properties as Diplo_propname.
No relationship cretaed to other nodes
'''
medias_df = pd.read_csv(medias_path, sep=' ')
# nodes
i=0
totnbnodes = len(medias_df)-1
print(totnbnodes, "nodes found in file")
for index, row in medias_df.iterrows():
i=1+1
nodematch = graph.nodes.match(entity_name = row["nom"]).first()
if nodematch == None:
try:
nodematch = Node('Entity', entity_name = row["nom"])
nodematch.__primarylabel__ = 'Entity'
nodematch.__primarykey__ = 'entity_name'
graph.merge(nodematch)
except:
print("could not import ", row["nom"])
nodematch = None
if nodematch != None:
for key in row.keys():
nodematch["Diplo_" + key] = row[key]
graph.push(nodematch)
if i%100 == 0:
print(".", end=" ")
if i%1000 ==0:
print(i,"/",totnbnodes)
print(len(graph.nodes.match("Entity")), "Entities in db after import")
# links
relations_df = pd.read_csv(path_to_relations_medias_francais, sep=' ')
totnblinks = len(relations_df)-1
print(totnblinks, "links found in file")
j = 0
for index, row in relations_df.iterrows():
j=j+1
results = db.run(
"MATCH (n1:Entity) WHERE n1.Diplo_nom = $origine "
"MATCH (n2:Entity) WHERE n2.Diplo_nom = $cible "
"MERGE (n2)-[:OWNED_BY {valeur:$v, source:$s, datePubli:$dp, dateConsult:$dc}] ->(n1) "
,{"origine":row['origine'], "cible":row['cible'],"v":row['valeur'], "s":row['source'], "dp":row['datePublication'], "dc":row['dateConsultation'] }
)
if j%100 == 0:
print(".", end=" ")
if j%1000 ==0:
print(j ,"/",totnblinks)
relfin = len(graph.relationships.match((),"OWNED_BY"))
print(relfin, " OWNED_BY relationships in db after import (or not : there's a bug)")
def importEntityWebsiteRelForDiplo(medias_francais_relDB_path):
'''create a ONED_BY relationship for each couple Diplo-nom - website-sitename
in the file. Look only at rows with valid sitename
'''
rel_diplo_db_df = pd.read_csv(medias_francais_relDB_path, sep=',')
for index, row in rel_diplo_db_df[ rel_diplo_db_df['db_sitename'].notnull()].iterrows():
results = db.run(
"MATCH (e:Entity) WHERE e.Diplo_nom = $nom "
"MATCH (w:Website) WHERE w.site_name = $site "
"MERGE (w)-[:OWNED_BY ] ->(e) "
,{"nom":row['nom'], "site":row['db_sitename']}
)
print(row['nom'], row['db_sitename'])
def importACPM_SiteGP(ACPM_siteGrandsPublics_path):
'''Import file from export https://www.acpm.fr/site/Les-chiffres/Frequentation-internet/Sites-Grand-Public/Classement-unifie
as ACPM_SiteGP_Prop in Websites nodes
'''
df = pd.read_csv(ACPM_siteGrandsPublics_path, sep=';', encoding='cp1252')
df['db_site_name']=""
ACPM_key = {"L'Aisnenouvelle.fr" : "aisnenouvelle.fr", "L'Equipe.fr":"lequipe.fr",
"L'Obs.com":"nouvelobs.com", "Lathierache.fr":"la-thierache.fr", "LHumanité.fr":"humanite.fr",
"Diabétologie-pratique.com":"diabetologie-pratique.com", "Gynécologie-pratique.com":"gynecologie-pratique.com" }
for index, row in df.iterrows():
if row['Sites'] in ACPM_key.keys():
match = graph.nodes.match(site_name = ACPM_key[row['Sites']].lower()).first()
df.loc[df['Sites'] == row['Sites'], ['db_site_name']] = ACPM_key[row['Sites']]
else:
match = graph.nodes.match(site_name = row['Sites'].lower()).first()
if match != None:
df.loc[df['Sites'] == row['Sites'], ['db_site_name']] = match['site_name']
for key in row.keys():
if match["ACPM_SiteGP_" + key] == None:
match["ACPM_SiteGP_" + key] = row[key]
graph.push(match)
print("no node match for these Site names, did not import:")
print(df[df.db_site_name == ""]['Sites'])
def importACPM_SitePro(ACPM_sitePro_path):
'''Import file from export https://www.acpm.fr/site/Les-chiffres/Frequentation-internet/Sites-Grand-Public/Classement-unifie
as ACPM_SitePro__Prop in Websites nodes
'''
df = pd.read_csv(ACPM_sitePro_path, sep=';', encoding='cp1252')
df['db_site_name']=""
ACPM_key = {"L'Aisnenouvelle.fr" : "aisnenouvelle.fr", "L'Equipe.fr":"lequipe.fr",
"L'Obs.com":"nouvelobs.com", "Lathierache.fr":"la-thierache.fr", "LHumanité.fr":"humanite.fr",
"Diabétologie-pratique.com":"diabetologie-pratique.com", "Gynécologie-pratique.com":"gynecologie-pratique.com" }
for index, row in df.iterrows():
if row['Sites'] in ACPM_key.keys():
match = graph.nodes.match(site_name = ACPM_key[row['Sites']].lower()).first()
df.loc[df['Sites'] == row['Sites'], ['db_site_name']] = ACPM_key[row['Sites']]
else:
match = graph.nodes.match(site_name = row['Sites'].lower()).first()
if match != None:
df.loc[df['Sites'] == row['Sites'], ['db_site_name']] = match['site_name']
for key in row.keys():
if match["ACPM_SitePro_" + key] == None:
match["ACPM_SitePro_" + key] = row[key]
graph.push(match)
print("no node match for these Site names, did not import:")
print(df[df.db_site_name == ""]['Sites'])
********************************************************************************
# This is what to run to get a brand new full DB
# Import Hyphe D0 DISCO
gexfD0DISCO="C:\\Users\\Jo\\Documents\\Tech\\Atom_prj\\MyMedia-FillDB\\data\\HypheExport20200520\\202005Websites01_D0_DISCO.gexf"
importGexfNodesAndRel(gexfD0DISCO, 0)
# Import Hyphe D1 DISCO
gexfD1DISCO="C:\\Users\\Jo\\Documents\\Tech\\Atom_prj\\MyMedia-FillDB\\data\\HypheExport20200520\\202005Websites01_D1_DISCO.gexf"
importGexfNodesAndRel(gexfD1DISCO, 1)
# Import Mondediplo
medias_path = 'C:/Users/Jo/Documents/Tech/Atom_prj/MyMedia-FillDB/data/mondediplo/medias_francais.tsv'
relations_medias_path = 'C:/Users/Jo/Documents/Tech/Atom_prj/MyMedia-FillDB/data/mondediplo/relations_medias_francais.tsv'
importMondeDiploFiles(medias_path, relations_medias_path)
medias_francais_relDB_path = 'C:/Users/Jo/Documents/Tech/Atom_prj/MyMedia-FillDB/data/mondediplo/medias_francais_relDB.tsv'
importEntityWebsiteRelForDiplo(medias_francais_relDB_path)
#ACPM:
ACPM_siteGrandsPublics_path = 'C:/Users/Jo/Documents/Tech/Atom_prj/MyMedia-FillDB/data/ACPM/ACPM_list_classement-unifie_20200708_GrandPub.csv'
importACPM_SiteGP(ACPM_siteGrandsPublics_path)
ACPM_sitePro_path = 'C:/Users/Jo/Documents/Tech/Atom_prj/MyMedia-FillDB/data/ACPM/ACPM_list_classement-unifie_20200708_site_pro.csv'
importACPM_SitePro(ACPM_sitePro_path)
# Till here
********************************************************************************
# This is for test:
results = db.run("MATCH (w:Twitter) RETURN w.user_name")
df_twit = pd.DataFrame([r["w.user_name"] for r in results])
df_twit
********************************************************************************
'''
TODO :
- checker les noeuds sans liens (MATCH (n:Twitter) WHERE NOT (n)--() RETURN n.user_name)
- checker les relations multiples entre 2 noeuds pour faire les sommes de count (si besoin)
avec
match (n1)-[r1]->(n2)<-[r2]-(n1)
where type(r1)=type(r2)
return r1, r2
-
Règles des doublons trop compliqué. Zap pour quickness
- check des imports
- règles des doublons:
- pour noeuds :
- à la fin d'import, doit merger les prop lists en post processing? (1 import = 1 liste. voir ce qu'additionne et ce que garde)
- quand importe sur noeuds qui existe, importe D0x_properties sous forme de liste
- Compare D0x à D0 si existe. est-ce que garde tout (1 liste par import ?) ou prend celui qui a les plus gros chiffres (non si importe juste un truc en plus) ou plus récent ? abs
-> regarder, faire ça par propriété, et recalculer les in /out degrés au lieu de les importer ?
- pour les rel:
- pour D0:
- check les count D0. SI > 1 c'est quand même chelou
- veut être restrictif (sur DISCO only): Si 3 imports successifs, garde celui qui est dans 2 ou plus, supprime les 1
A FAIRE :
pour les nons D0 ? Importe DiX avec count dans liste. si doublon dans même import additionne les count. A la fin prend le count le plus grand?
'''
********************************************************************************
def importGexfLinks(gexffilepath, depth = 0):
'''
Reads gexf network file from hyphe, update or create relationships in neo4j database
Print . for each 100 links imported, 1000 for each 1000
"depth" is used to prefix new properties on rel. Value can be 0, 1 or 2
'''
G= nx.read_gexf(gexffilepath, node_type=None, relabel=False, version='1.1draft')
data = nx.json_graph.node_link_data(G)
totnblinks=len(data['links'])
print(totnblinks," links found in gexf")
j=1
for link in data['links']:
if depth ==0:
source_n = graph.nodes.match("Website", D0_id = link['source']).first()
target_n = graph.nodes.match("Website", D0_id = link['target']).first()
if depth == 1:
source_n = graph.nodes.match("Website", D1_id = link['source']).first()
target_n = graph.nodes.match("Website", D1_id = link['target']).first()
if depth == 2:
source_n = graph.nodes.match("Website", D2_id = link['source']).first()
target_n = graph.nodes.match("Website", D2_id = link['target']).first()
if depth == 3:
source_n = graph.nodes.match("Website", D3_id = link['source']).first()
target_n = graph.nodes.match("Website", D3_id = link['target']).first()
relmatch = graph.relationships.match((source_n,target_n),r_type="LINKS_TO").first()
*********************************************
# check que les names et prop sont cohérents :
results = graph.nodes.match("Website")
for r in results:
if r["site_name"].lower() not in [x.lower() for x in r["D1_name"]]:
print(r["site_name"].lower(), [x.lower() for x in r["D1_name"]])
results = graph.nodes.match("Facebook")
for r in results:
if r["user_name"].lower() not in [x.lower().split(" /")[1] for x in r["D0_name"]]:
print(r["user_name"].lower(), [x.lower().split(" /")[1] for x in r["D0_name"]])
results = graph.nodes.match("Twitter")
for r in results:
if r["user_name"].lower() != r["D0_name"].lower().split(" /")[1]:
print(r)
results = graph.nodes.match("Pinterest")
for r in results:
if r["user_name"].lower() not in [x.lower().split(" /")[1] for x in r["D0_name"]]:
print(r["user_name"].lower(), [x.lower().split(" /")[1] for x in r["D0_name"]])
results = graph.nodes.match("Linkedin")
for r in results:
if r["user_name"].lower() not in [x.lower().split(" /")[1] for x in r["D0_name"]]:
print(r["user_name"].lower(), [x.lower().split(" /")[1] for x in r["D0_name"]])
|
|
import pandas as pd
import seaborn as sb
import numpy as np
import matplotlib.pyplot as plt
from IPython import embed
##################### ATTENTION WEIGHTS PLOTTING #######################
class AttentionPlotter(object):
@classmethod
def plot(cls, weights, srcseq=None, dstseq=None, cmap="Greys", scale=1.):
assert(weights.ndim == 2)
if srcseq is not None:
assert(weights.shape[0] == len(srcseq))
else:
srcseq = range(weights.shape[0])
if dstseq is not None:
assert(weights.shape[1] == len(dstseq))
else:
dstseq = range(weights.shape[1])
yticks = srcseq
xticks = dstseq
sb.set_style(style="white")
sb.set(font_scale=1.5 * scale)
height = weights.shape[0] * scale * 0.5
width = weights.shape[1] * scale * 0.5
margin = 0.2
#f, ax = plt.subplots(figsize=(width, height),
# gridspec_kw={"top": 1 - margin, "bottom": margin})
ax = sb.heatmap(weights, cmap=cmap, square=True, linewidths=1.*scale,
yticklabels=yticks, xticklabels=xticks, vmax=1., vmin=0.,
cbar=False)
plt.yticks(rotation=0)
plt.xticks(rotation=90)
plt.tight_layout()
plt.show()
if __name__ == "__main__":
s = "the quick brown fox jumped over the lazy dog".split()
d = "de snelle bruine vos sprong over de luie hond".split()
w = np.random.random((len(s), len(d)))
AttentionPlotter.plot(w, s, d, scale=1.)
|
|
import numpy
from aydin.analysis.camera_simulation import simulate_camera_image
from aydin.io.datasets import characters
from aydin.it.transforms.variance_stabilisation import VarianceStabilisationTransform
def demo_vst():
image = characters()
image = image.astype(numpy.float32) * 0.1
noisy = simulate_camera_image(image)
import napari
with napari.gui_qt():
viewer = napari.Viewer()
viewer.add_image(image, name='image')
viewer.add_image(image, name='noisy')
for mode in [
'yeo-johnson',
'box-cox',
'quantile',
'anscomb',
'log',
'sqrt',
'identity',
]:
print(f"testing mode: {mode}")
vst = VarianceStabilisationTransform(mode=mode)
preprocessed = vst.preprocess(noisy)
postprocessed = vst.postprocess(preprocessed)
viewer.add_image(preprocessed, name='preprocessed_' + mode)
viewer.add_image(postprocessed, name='postprocessed_' + mode)
demo_vst()
|
|
import numpy as np
import quaternion
from tbase.shader import Shader
from tbase import utils
from tbase.utils import Quaternion
try:
from pyglet.gl import *
except:
print("WARNING: pyglet cannot be imported but might be required for visualization.")
VERTEX_SHADER = ['''
varying vec3 normal, lightDir0, lightDir1, eyeVec;
void main()
{
normal = gl_NormalMatrix * gl_Normal;
vec3 vVertex = vec3(gl_ModelViewMatrix * gl_Vertex);
lightDir0 = vec3(gl_LightSource[0].position.xyz - vVertex);
lightDir1 = vec3(gl_LightSource[1].position.xyz - vVertex);
eyeVec = -vVertex;
gl_Position = ftransform();
}
''']
FRAGMENT_SHADER = ['''
varying vec3 normal, lightDir0, lightDir1, eyeVec;
void main (void)
{
vec4 final_color =
(gl_FrontLightModelProduct.sceneColor * gl_FrontMaterial.ambient) +
(gl_LightSource[0].ambient * gl_FrontMaterial.ambient) +
(gl_LightSource[1].ambient * gl_FrontMaterial.ambient);
vec3 N = normalize(normal);
vec3 L0 = normalize(lightDir0);
vec3 L1 = normalize(lightDir1);
float lambertTerm0 = dot(N,L0);
float lambertTerm1 = dot(N,L1);
if(lambertTerm0 > 0.0)
{
final_color += gl_LightSource[0].diffuse *
gl_FrontMaterial.diffuse *
lambertTerm0;
vec3 E = normalize(eyeVec);
vec3 R = reflect(-L0, N);
float specular = pow( max(dot(R, E), 0.0),
gl_FrontMaterial.shininess );
final_color += gl_LightSource[0].specular *
gl_FrontMaterial.specular *
specular;
}
if(lambertTerm1 > 0.0)
{
final_color += gl_LightSource[1].diffuse *
gl_FrontMaterial.diffuse *
lambertTerm1;
vec3 E = normalize(eyeVec);
vec3 R = reflect(-L1, N);
float specular = pow( max(dot(R, E), 0.0),
gl_FrontMaterial.shininess );
final_color += gl_LightSource[1].specular *
gl_FrontMaterial.specular *
specular;
}
gl_FragColor = final_color;
}
''']
def _correct_rotational_difference(points, target_dir):
"""
Rotates all points in `points` around the z-axis according to an angle computed such that the first forward
vector of the root is rotated onto the `target_dir` vector.
:param points: an np array of size (nr_points, 3, nr_frames), the first point in the first frame is considered root
:param target_dir: a 2-D vector specifying the target dir on the x/y plane
:return: the corrected points in the same format as the input
"""
# compute the rotation to rotate points by angle specified by target_dir
actual_dir = points[0, :, 1] - points[0, :, 0]
rot = utils.rotation_between(target_dir, actual_dir)
# project all points to x-y-plane
projected = np.copy(points)
ori_z = np.copy(projected[:, 2:3, :])
projected[:, 2:3, :] = np.zeros(shape=[points.shape[0], 1, points.shape[2]])
# apply the rotation to every vector
projected = np.reshape(np.transpose(projected, [0, 2, 1]), [-1, 3])
rot_mult = np.expand_dims(rot, axis=0)
proj_mult = np.expand_dims(projected, axis=-1)
proj_corrected = np.matmul(rot_mult, proj_mult)
# now transform back to original shape
proj_corrected = np.reshape(np.squeeze(proj_corrected, axis=-1), [points.shape[0], points.shape[2], 3])
proj_corrected = np.transpose(proj_corrected, [0, 2, 1])
# restore the old z-values
proj_corrected[:, 2:3, :] = ori_z
return proj_corrected
def to_global_batched(points, override_trajectory=None, override_root=None):
"""
:param points: A np array of shape (batch_size, dof, seq_lenth) where dof is assumed to contain the trajectory
in the last three dimensions.
:param override_trajectory: A np array of shape (batch_size, 3, seq_lenth) if the trajectory of `point` is
to be overriden with this value.
"""
n = points.shape[0]
seq_len = points.shape[-1]
body_dim = len(Skeleton.ALL_JOINTS)*3
body_joints = points[:, :body_dim].reshape([n, -1, 3, seq_len]) # (N, n_joints, 3, seq_len)
trajectory = points[:, body_dim:body_dim + 3] # sometimes there's foot contacts (N, 3, seq_len)
if override_trajectory is not None:
trajectory = override_trajectory
if override_root is not None:
body_joints[:, 0] = override_root
body_global = []
for i in range(n):
bg = utils.to_global(body_joints[i], trajectory[i].T)
body_global.append(bg)
return np.array(body_global) * Skeleton.TO_CM # (N, n_joints, 3, seq_len)
class Skeleton(object):
"""
Defines a skeleton.
"""
# Making joint indices explicit
ALL_JOINTS = list(range(0, 22))
ROOT, HIP, RIGHT_GROIN, RIGHT_KNEE, RIGHT_HEEL, RIGHT_TOE, \
LEFT_GROIN, LEFT_KNEE, LEFT_HEEL, LEFT_TOE, \
LOWER_BODY, UPPER_BODY, NECK, HEAD, \
RIGHT_SHOULDER, RIGHT_ELBOW, RIGHT_WRIST, RIGHT_HAND, \
LEFT_SHOULDER, LEFT_ELBOW, LEFT_WRIST, LEFT_HAND = ALL_JOINTS
# Indices pointing to the parent for every joint in the skeleton, -1 if it has no parent
PARENTS = np.array([-1, ROOT, HIP, RIGHT_GROIN, RIGHT_KNEE, RIGHT_HEEL, HIP, LEFT_GROIN, LEFT_KNEE, LEFT_HEEL, HIP,
LOWER_BODY, UPPER_BODY, NECK, NECK, RIGHT_SHOULDER, RIGHT_ELBOW, RIGHT_WRIST,
NECK, LEFT_SHOULDER, LEFT_ELBOW, LEFT_WRIST])
# Indices defining the actual bones (i.e. start to end joint) in the skeleton. Note that it starts from 2 because
# the link between joint at index 1 and 0 is a straight line from the hip to the root located on the floor, which
# is not a bone.
BONES = list(zip(list(range(2, len(PARENTS))), PARENTS[2:]))
# Number of bones in the skeleton
N_BONES = len(BONES)
# Convert the data to centimeters (this is a constant taken from Holden's code).
TO_CM = 1 / 0.16
# Length of each bone
BONE_LENGTHS = np.array([2.40, 7.15, 7.49, 2.36, 2.37, 7.43, 7.50, 2.41, 2.04, 2.05, 1.75, 1.76, 2.90, 4.98,
3.48, 0.71, 2.73, 5.24, 3.44, 0.62])
# Indices pointing to the feet. Note that those indices are only valid if the data is NOT flattened, i.e. it is
# e.g. in the form (nr_joints, 3). Also note that the order in the `FEET` vector is chosen such that it is the same
# as the foot contact states encoded in the training data.
FEET = np.array([RIGHT_HEEL, RIGHT_TOE, LEFT_HEEL, LEFT_TOE])
# For convenience: Indices of the feet for when the data is flattened. E.g. the data used as input to the motion
# manifold trainig procedure is flattened (vector of length 73 where the first 66 elements are the joint positions).
# Rows correspond to the indices for retrieving the position as a 3D vector.
FEET_FLATTENED = np.array([list(range(i, i+3)) for i in FEET*3])
@classmethod
def idx_to_foot(cls, idx):
return cls.FEET[idx]
@classmethod
def foot_to_idx(cls, foot):
return np.where(cls.FEET == foot)[0][0]
class SkeletonSequence(object):
"""
An animated skeleton, i.e. a sequence of skeletons over time.
"""
_counter = 0
def __init__(self, sequence, x_offset=None, offsets=None, target_dir=None, name=None, color=None, interp=None,
static_frames=None):
"""
Constructor
:param sequence: an np array of size (dim, nr_frames) where the first axis contains the information about the
skeleton in the following order: [...3D joints..., x-velocity, y-velocity, rotational velocity,
foot contacts]. Usually there are 22 joints, and 4 entries for the foot contacts state making this a vector
of size 73.
:param x_offset: an offset on the x-axis to be applied when drawing the sequence. This is useful when several
sequences are shown in parallel.
:param offsets: a 3 dimensional np array specifiying (x, y, z)-offsets to be applied when drawing the sequence
:param target_dir: a 2 dimensional vector on the floor which is used to compute a correction angle around the
z-axis.
:param name: string, a name for this sequence
:param color: 3-tuple, a color for this sequence used for visualizations
:param interp: list of indices specifying for which frames the motion was filled in (interpolated or inpainted),
only used for visualization
:param static_frames: list of frame indices that should be drawn statically
"""
assert isinstance(sequence, np.ndarray), 'Input sequence must be an np array.'
assert len(sequence.shape) == 2, 'Input expected in format (dim, nr_frames)'
# make a copy because we might be changing stuff internally
data = sequence.copy()
# foot contacts might not be available
self.feet_available = sequence.shape[0] > 69
pose_dim = len(Skeleton.ALL_JOINTS)*3
self._nr_frames = len(data[0])
self._joints = data[:pose_dim, :]
self._joints = self._joints.reshape(-1, 3, self._nr_frames) # now in format (nr_joints, 3, nr_frames)
self._root_x = data[pose_dim + 0, :]
self._root_z = data[pose_dim + 1, :]
self._root_r = data[pose_dim + 2, :]
self._feet = data[-4:, :] if self.feet_available else None
self._frame_pointer = 0
self._linkage = None
self._drawing_offset = [0.0, 0.0, 0.0] if offsets is None else offsets
self._x_offset = x_offset or 0.0
self._interp = set(interp) if isinstance(interp, np.ndarray) or interp else None
self._joints = utils.to_global(self._joints, np.stack([self._root_x, self._root_z, self._root_r], axis=1))
# swap y-z axis
temp = np.copy(self._joints[:, 1, :])
self._joints[:, 1, :] = self._joints[:, 2, :]
self._joints[:, 2, :] = temp
# correct for rotational discrepancy around z-axis
if target_dir is not None:
self._joints = _correct_rotational_difference(self._joints, target_dir)
# define boolean if this sequence should be higlighted
self.highlight = False
# list of frame indices that should be drawn statically
self._static_frames = static_frames
# list of points that are drawn statically (for end effector visualization)
self._static_points = None
# boolean that indicates if linkage must be recomputed e.g. when underlying joints changed
self._update_linkage = False
self._name = name or 'SkeletonSequence %d' % SkeletonSequence._counter
self._color = color or utils.COLORS[SkeletonSequence._counter % len(utils.COLORS)] + (1,)
self._shader = Shader(VERTEX_SHADER, FRAGMENT_SHADER)
SkeletonSequence._counter += 1
def _construct_linkage(self):
linkage = np.zeros([0, 6, self._nr_frames])
parents = Skeleton.PARENTS
for j in range(len(parents)):
if parents[j] == -1:
continue
x = np.concatenate([self._joints[j, :, :], self._joints[parents[j], :, :]])
linkage = np.concatenate([linkage, np.expand_dims(x, 0)])
return linkage
def _get_frame_at(self, frame_idx):
joints = self.joints[:, :, frame_idx]
links = self.linkage[:, :, frame_idx]
return joints, links
def _get_current_frame(self):
return self._get_frame_at(self.frame_pointer)
def _to_drawable_vertex(self, v):
return [v[0] + self._x_offset + self._drawing_offset[0],
v[1] + self._drawing_offset[1],
v[2] + self._drawing_offset[2]]
def _draw_cylinder(self, orientation, t, height=1.0, radius=.5, slices=32, loops=32):
"""Draws a cylinder whose primary axis is directed according to the 3D vector given by `orientation` and which
is translated according to `t`."""
# gluCylinder draws a cylinder at the origin pointing upward the z axis. Thus find the necessary rotation
# to rotate the default cylinder into the orientation specified.
v1 = np.array([[0.0, 0.0, height]])
q = Quaternion.rotate_from_to(v1, orientation[np.newaxis, ...])
rot = quaternion.as_rotation_matrix(q)
# save current modelview matrix
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
# first apply translation
_t = [t[0], t[1], t[2]]
glTranslated(*_t)
# then apply rotation
gl_list = utils.build_gl_rot_matrix(rot[0].T)
glMultMatrixd(gl_list)
# draw cylinder and base disk
quadratic = glu.gluNewQuadric()
glu.gluCylinder(quadratic, radius, radius, 1.0 * height, slices, loops)
glu.gluDisk(quadratic, 0.0, radius, slices, loops)
# draw top disk
_t = [v1[0, 1], v1[0, 1], v1[0, 2]]
glTranslated(*_t)
glu.gluDisk(quadratic, 0.0, radius, slices, loops)
# restore previous modelviewmatrix
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
@property
def joints(self):
return self._joints
@property
def nr_frames(self):
return self._nr_frames
@property
def linkage(self):
if self._linkage is None or self._update_linkage:
self._linkage = self._construct_linkage()
self._update_linkage = False
return self._linkage
@property
def frame_pointer(self):
return self._frame_pointer
@frame_pointer.setter
def frame_pointer(self, value):
self._frame_pointer = value
def frame_pointer_clipped(self):
ptr = max(0, self.frame_pointer)
ptr = min(ptr, self.nr_frames - 1)
return ptr
@property
def name(self):
return self._name
@property
def color(self):
return self._color
def set_static_frames(self, idxs):
self._static_frames = idxs
def set_static_points(self, points):
# points has shape [n_points, 3]
self._static_points = points
def get_root_trajectory(self):
return self.joints[0, :, :]
def export_to_csv(self, filename=None):
"""
Export 3D joint positions and skeleton definition to a CSV file.
:param filename: CSV target file or the name of this sequence if None
"""
if filename is None:
filename = self._name + '.txt'
with open(filename, 'wb') as f:
# first row is the definition of the skeleton
np.savetxt(f, np.expand_dims(Skeleton.PARENTS, 0), delimiter=',', fmt='%d')
# second row are the bones (stored for convenience)
np.savetxt(f, np.reshape(np.array(Skeleton.BONES), [1, -1]), delimiter=',', fmt='%d')
# third row is frame indices that are infilled
infilled = np.expand_dims(np.array(list(self._interp)), 0) if self._interp is not None else np.array([-1], dtype=np.int64)
np.savetxt(f, infilled, delimiter=',', fmt='%d')
# next `n_frames` rows is the motion
n_frames = self._joints.shape[-1]
joints = np.reshape(self._joints, [-1, n_frames])
np.savetxt(f, joints, delimiter=',')
def rotate_around_z_axis(self, angle):
"""
Rotate the whole sequence around the z axis of the first pose in the sequence for the specified angle.
"""
rotation = Quaternion.from_angle_axis(np.array([[0, 0, 1]]), angle)
# temporarily put whole sequence such that root of first pose is in the origin
translation = np.repeat(np.copy(self._joints[0:1, :, 0:1]), self._joints.shape[0], axis=0)
translation[:, 2, :] = 0.0
joints_ori = self._joints - translation
self._joints = joints_ori
# now rotate the sequence in the origin
for f in range(joints_ori.shape[2]):
joints_ori[:, :, f] = Quaternion.apply_rotation_to(rotation, joints_ori[:, :, f])
# restore original translation
self._joints = joints_ori + translation
self._update_linkage = True
def translate_along_axis(self, axis, t):
axis_idx = 'xy'.index(axis)
for f in range(self._joints.shape[2]):
self._joints[:, axis_idx, f] = self._joints[:, axis_idx, f] + t
self._update_linkage = True
def set_color(self):
if self.highlight or self.is_infilling():
color = utils.lighten_color(self.color, 0.5)
else:
color = self.color
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, utils.vec(*color))
glColor4f(*color)
def is_infilling(self):
return self._interp and self.frame_pointer in self._interp
def draw_timestep(self, timestep, draw_cylinders=True):
self.set_color()
joints, links = self._get_frame_at(timestep)
self._shader.bind()
if draw_cylinders:
for bone in links[1:, :]:
end_vertex = self._to_drawable_vertex(bone[3:6])
bone_dir = bone[0:3] - bone[3:6]
bone_len = np.sqrt(np.sum((np.multiply(bone_dir, bone_dir))))
if bone_len < 1e-8:
continue
self._draw_cylinder(orientation=bone_dir, t=end_vertex, height=bone_len)
else:
for joint in joints[1:, :]:
glBegin(GL_POINTS)
vertex = self._to_drawable_vertex(joint)
glVertex3f(*vertex)
glEnd()
for bone in links[1:, :]:
glBegin(GL_LINES)
vertex1 = self._to_drawable_vertex(bone[0:3])
vertex2 = self._to_drawable_vertex(bone[3:6])
glVertex3f(*vertex1)
glVertex3f(*vertex2)
glEnd()
self._shader.unbind()
def draw_current_timestep(self, draw_cylinders=True):
self.draw_timestep(self.frame_pointer, draw_cylinders=draw_cylinders)
def draw_static_frames(self, draw_cylinders=True):
if self._static_frames is not None:
[self.draw_timestep(idx, draw_cylinders) for idx in self._static_frames]
def draw_static_points(self):
if self._static_points is None:
return
static_color = utils.WHITE
glColor3f(*static_color)
for v in self._static_points:
v_d = self._to_drawable_vertex(v)
glBegin(GL_POINTS)
glVertex3f(*v_d)
glEnd()
def draw_root_trajectory(self, full=False):
glColor4f(*self.color)
if full:
root_trajectory = self.joints[0, :, :]
else:
start = max(0, self.frame_pointer_clipped() - 200)
end = min(self._nr_frames, self.frame_pointer_clipped() + 200)
root_trajectory = self.joints[0, :, start:end]
for i in range(len(root_trajectory[0])):
vertex = self._to_drawable_vertex(root_trajectory[:, i])
glBegin(GL_POINTS)
glVertex3f(*vertex)
glEnd()
def draw_foot_contacts(self, part=None):
"""
Draws a line for each part of the feet (heel and toe) when it is on the ground according to the foot
contact information supplied with the original data.
:param part: an index from Skeleton.FEET indicating which part should be displayed or None if all parts are
to be displayed
"""
if not self.feet_available:
return
ori_color = self.color
bright_color = utils.lighten_color(self.color, 0.5)
if part is None:
show_parts = Skeleton.FEET
else:
show_parts = [part]
contact_info = self._feet
joints = self.joints
def is_on_ground(frame_id, foot_part):
return contact_info[Skeleton.foot_to_idx(foot_part), frame_id] > 0.5
for idx in show_parts:
positions = joints[idx, :, :]
for i in range(0, positions.shape[-1]-1):
v1 = self._to_drawable_vertex(positions[:, i])
v2 = self._to_drawable_vertex(positions[:, i+1])
if is_on_ground(i, idx):
if is_on_ground(i+1, idx):
glColor4f(*bright_color)
glBegin(GL_LINES)
glVertex3f(*v1)
glVertex3f(*v2)
glEnd()
else:
glColor4f(*bright_color)
glBegin(GL_POINTS)
glVertex3f(*v1)
glEnd()
glColor4f(*ori_color)
glBegin(GL_POINTS)
glVertex3f(*v2)
glEnd()
else:
if is_on_ground(i+1, idx):
glColor4f(*ori_color)
glBegin(GL_POINTS)
glVertex3f(*v1)
glEnd()
glColor4f(*bright_color)
glBegin(GL_POINTS)
glVertex3f(*v2)
glEnd()
else:
glColor4f(*ori_color)
glBegin(GL_LINES)
glVertex3f(*v1)
glVertex3f(*v2)
glEnd()
def fast_forward(self):
self.frame_pointer += 1
if self.frame_pointer >= self.nr_frames:
self.frame_pointer = 0
def rewind(self):
self.frame_pointer -= 1
if self.frame_pointer < 0:
self.frame_pointer = self.nr_frames-1
def reset_time(self):
self.frame_pointer = -1
|
|
"""
Line Chart with Points
----------------------
This chart shows a simple line chart with points marking each value.
"""
# category: line charts
import altair as alt
import numpy as np
import pandas as pd
x = np.arange(100)
source = pd.DataFrame({
'x': x,
'f(x)': np.sin(x / 5)
})
alt.Chart(source).mark_line(point=True).encode(
x='x',
y='f(x)'
)
|
|
"""Test file to visualize detected trail lines from videos"""
# Usage --> python Trackviz.py 3
# 0 --> Amtala
# 1 --> Bamoner
# 2 --> Diamond
# 3 --> Fotepore
# 4 --> Gangasagar
import cv2
import json
import math
import time
import sys
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
from sklearn.cluster import KMeans
from os import listdir
from os.path import isfile, join
from numba import jit, float32, cuda
print("This script plots your data points on its respective image")
print("and additionally outputs a histogram with plot of frequency")
print("of vehicle object.")
style.use("ggplot")
plt.title("Tracking distances")
plt.xlabel("Plot Number")
plt.ylabel("Plot points")
plt.xlim(0, 1)
plt.ylim(1, 0)
# plt.gca().invert_yaxis()
inputdir = "./out_traildetection_alt"
outputdir = "./out_trackviz"
# matplotlib axis/bg settings
images = np.asarray(["../images/Sample_Amtala.jpg",
"../images/Sample_Bamoner.jpg",
"../images/Sample_Diamond.jpg",
"../images/Sample_Fotepore.jpg",
"../images/Sample_Gangasagar.jpg"])
json_files_track = np.asarray([inputdir + "/veh_A_c.json", inputdir + "/veh_B_c.json",
inputdir + "/veh_D_c.json", inputdir + "/veh_F_c.json", inputdir + "/veh_G_c.json"])
json_files_frames = np.asarray([inputdir + "/veh_A.json", inputdir + "/veh_B.json",
inputdir + "/veh_D.json", inputdir + "/veh_F.json", inputdir + "/veh_G.json"])
# Modify
targetindex = 2
# Primary variables
image_to_open = images[int(sys.argv[1])]
file_to_open = json_files_track[int(sys.argv[1])]
# ----------------------------------------------
img = cv2.imread(image_to_open)
bins = np.fromiter((i*10 for i in range(100)), dtype="float32")
# Setup sub-plot
fig, ax = plt.subplots()
plt.imshow(img, extent=[0, 1, 1, 0])
FRAME_COUNTERS = np.zeros((0, 1), dtype=np.float)
with open(file_to_open, "r") as f:
data = json.load(f)
for tracked_vehicle in data:
# Stores "list of co-ordinates" from json file
COORD_LIST = np.zeros((0, 2), dtype=np.float)
FRAME_COUNTERS = np.append(
FRAME_COUNTERS, [[tracked_vehicle["frame_count"]]], axis=0)
for coordinates in tracked_vehicle["objects"]:
COORD_LIST = np.append(COORD_LIST,
[[coordinates["center_x"],
coordinates["center_y"]]], axis=0)
# print(FRAME_COUNTERS)
ax.scatter(COORD_LIST[:, 0:1], COORD_LIST[:, 1:2])
# plt.scatter(COORD_LIST[:,0:1], COORD_LIST[:,1:2])
# plt.savefig(join("./output", "output.png"))
plt.savefig(join(outputdir, "output.png"))
plt.show()
plt.clf()
plt.hist(FRAME_COUNTERS, bins, histtype="bar", rwidth=0.75)
plt.savefig(join(outputdir, "track_length.png"))
# print(COORD_LIST[:,0:1])
# secarr = np.asarray(arr[0]["objects"][1])
# lookup = json.JSONDecoder().decode(secarr)
# print (lookup)
# for vehicle_object in data:
# print(vehicle_object)
|
|
import numpy as np
import matplotlib.pyplot as plt
import datetime
import glob2
import xarray as xr
import pandas as pd
#plt.close("all")
pd.options.display.max_columns = None
pd.options.display.max_rows = None
dircInput1 = 'C:/Users/Chenxi/OneDrive/phd/age_and_fire/data/02_semi_raw/07_ACE_FTS_with_AGEparams/'
dircInput2 = 'C:/Users/Chenxi/OneDrive/phd/age_and_fire/data/03_cleaned/07_ACE_FTS_with_AGEparams_cleaned/'
def open_data(year):
fn = glob2.glob(dircInput1 + f'fts_v3.6_REN_HN2_ecmwf_3d_24_1.5_{str(year)[2:]}*.nc')
frame = []
for filename in fn:
df = xr.open_dataset(filename).to_dataframe()
df.reset_index(inplace=True)
df.set_index('time', inplace = True)
frame.append(df)
return pd.concat(frame)
# def flag(df):
# df.dropna(subset=[species], inplace=True)
# return df
def name(df):
df.rename(columns={
'alt': 'ALT',
},
inplace = True)
return df
def clean():
df = open_data(year)
# df = flag(df, )
df = name(df)
return df
###############################################################################
if __name__ == "__main__":
year = np.arange(2004,2017+1,1)
for year in year:
try:
df = clean()
df.to_xarray().to_netcdf(dircInput2+f'fts_v3.6_REN_HN2_ecmwf_3d_24_1.5__{year}_cleaned.nc')
print (year, 'finished')
except:
print(f'no data for year {year}')
|
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
from . import thops
from . import modules
from . import utils
from models.transformer import BasicTransformerModelCausal
def nan_throw(tensor, name="tensor"):
stop = False
if ((tensor!=tensor).any()):
print(name + " has nans")
stop = True
if (torch.isinf(tensor).any()):
print(name + " has infs")
stop = True
if stop:
print(name + ": " + str(tensor))
#raise ValueError(name + ' contains nans of infs')
def f(in_channels, out_channels, hidden_channels, cond_channels, network_model, num_layers):
if network_model=="transformer":
#return BasicTransformerModel(out_channels, in_channels + cond_channels, 10, hidden_channels, num_layers, use_pos_emb=True)
return BasicTransformerModelCausal(out_channels, in_channels + cond_channels, 10, hidden_channels, num_layers, use_pos_emb=True, input_length=70)
if network_model=="LSTM":
return modules.LSTM(in_channels + cond_channels, hidden_channels, out_channels, num_layers)
if network_model=="GRU":
return modules.GRU(in_channels + cond_channels, hidden_channels, out_channels, num_layers)
if network_model=="FF":
return nn.Sequential(
nn.Linear(in_channels+cond_channels, hidden_channels), nn.ReLU(inplace=False),
nn.Linear(hidden_channels, hidden_channels), nn.ReLU(inplace=False),
modules.LinearZeroInit(hidden_channels, out_channels))
class FlowStep(nn.Module):
FlowCoupling = ["additive", "affine"]
NetworkModel = ["transformer","LSTM", "GRU", "FF"]
FlowPermutation = {
"reverse": lambda obj, z, logdet, rev: (obj.reverse(z, rev), logdet),
"shuffle": lambda obj, z, logdet, rev: (obj.shuffle(z, rev), logdet),
"invconv": lambda obj, z, logdet, rev: obj.invconv(z, logdet, rev)
}
def __init__(self, in_channels, hidden_channels, cond_channels,
actnorm_scale=1.0,
flow_permutation="invconv",
flow_coupling="additive",
network_model="LSTM",
num_layers=2,
LU_decomposed=False):
# check configures
assert flow_coupling in FlowStep.FlowCoupling,\
"flow_coupling should be in `{}`".format(FlowStep.FlowCoupling)
assert network_model in FlowStep.NetworkModel,\
"network_model should be in `{}`".format(FlowStep.NetworkModel)
assert flow_permutation in FlowStep.FlowPermutation,\
"float_permutation should be in `{}`".format(
FlowStep.FlowPermutation.keys())
super().__init__()
self.flow_permutation = flow_permutation
self.flow_coupling = flow_coupling
self.network_model = network_model
# 1. actnorm
self.actnorm = modules.ActNorm2d(in_channels, actnorm_scale)
# 2. permute
if flow_permutation == "invconv":
self.invconv = modules.InvertibleConv1x1(
in_channels, LU_decomposed=LU_decomposed)
elif flow_permutation == "shuffle":
self.shuffle = modules.Permute2d(in_channels, shuffle=True)
else:
self.reverse = modules.Permute2d(in_channels, shuffle=False)
# 3. coupling
if flow_coupling == "additive":
self.f = f(in_channels // 2, in_channels-in_channels // 2, hidden_channels, cond_channels, network_model, num_layers)
elif flow_coupling == "affine":
print("affine: in_channels = " + str(in_channels))
self.f = f(in_channels // 2, 2*(in_channels-in_channels // 2), hidden_channels, cond_channels, network_model, num_layers)
print("Flowstep affine layer: " + str(in_channels))
def init_lstm_hidden(self):
if self.network_model == "LSTM" or self.network_model == "GRU":
self.f.init_hidden()
def forward(self, input, cond, logdet=None, reverse=False):
if not reverse:
return self.normal_flow(input, cond, logdet)
else:
return self.reverse_flow(input, cond, logdet)
def normal_flow(self, input, cond, logdet):
#assert input.size(1) % 2 == 0
# 1. actnorm
#z=input
z, logdet = self.actnorm(input, logdet=logdet, reverse=False)
# 2. permute
z, logdet = FlowStep.FlowPermutation[self.flow_permutation](
self, z, logdet, False)
# 3. coupling
z1, z2 = thops.split_feature(z, "split")
z1_cond = torch.cat((z1, cond), dim=1)
if self.flow_coupling == "additive":
z2 = z2 + self.f(z1_cond)
elif self.flow_coupling == "affine":
# import pdb;pdb.set_trace()
if self.network_model=="transformer":
h = self.f(z1_cond.permute(2,0,1)).permute(1,2,0)
else:
h = self.f(z1_cond.permute(0, 2, 1)).permute(0, 2, 1)
shift, scale = thops.split_feature(h, "cross")
scale = torch.sigmoid(scale + 2.)+1e-6
z2 = z2 + shift
z2 = z2 * scale
logdet = thops.sum(torch.log(scale), dim=[1, 2]) + logdet
z = thops.cat_feature(z1, z2)
return z, cond, logdet
def reverse_flow(self, input, cond, logdet):
# 1.coupling
z1, z2 = thops.split_feature(input, "split")
# import pdb;pdb.set_trace()
z1_cond = torch.cat((z1, cond), dim=1)
if self.flow_coupling == "additive":
z2 = z2 - self.f(z1_cond)
elif self.flow_coupling == "affine":
h = self.f(z1_cond.permute(0, 2, 1)).permute(0, 2, 1)
shift, scale = thops.split_feature(h, "cross")
nan_throw(shift, "shift")
nan_throw(scale, "scale")
nan_throw(z2, "z2 unscaled")
scale = torch.sigmoid(scale + 2.)+1e-6
z2 = z2 / scale
z2 = z2 - shift
logdet = -thops.sum(torch.log(scale), dim=[1, 2]) + logdet
z = thops.cat_feature(z1, z2)
# 2. permute
z, logdet = FlowStep.FlowPermutation[self.flow_permutation](
self, z, logdet, True)
nan_throw(z, "z permute_" + str(self.flow_permutation))
# 3. actnorm
z, logdet = self.actnorm(z, logdet=logdet, reverse=True)
return z, cond, logdet
class FlowNet(nn.Module):
def __init__(self, x_channels, hidden_channels, cond_channels, K,
actnorm_scale=1.0,
flow_permutation="invconv",
flow_coupling="additive",
network_model="LSTM",
num_layers=2,
LU_decomposed=False):
super().__init__()
self.layers = nn.ModuleList()
self.output_shapes = []
self.K = K
N = cond_channels
for _ in range(K):
self.layers.append(
FlowStep(in_channels=x_channels,
hidden_channels=hidden_channels,
cond_channels=N,
actnorm_scale=actnorm_scale,
flow_permutation=flow_permutation,
flow_coupling=flow_coupling,
network_model=network_model,
num_layers=num_layers,
LU_decomposed=LU_decomposed))
self.output_shapes.append(
[-1, x_channels, 1])
# import pdb;pdb.set_trace()
def init_lstm_hidden(self):
for layer in self.layers:
if isinstance(layer, FlowStep):
layer.init_lstm_hidden()
def forward(self, z, cond, logdet=0., reverse=False, eps_std=None):
if not reverse:
for layer in self.layers:
z, cond, logdet = layer(z, cond, logdet, reverse=False)
return z, logdet
else:
for i,layer in enumerate(reversed(self.layers)):
z, cond, logdet = layer(z, cond, logdet=0, reverse=True)
return z
class Glow(nn.Module):
def __init__(self, x_channels, cond_channels, opt):
super().__init__()
self.flow = FlowNet(x_channels=x_channels,
hidden_channels=opt.dhid,
cond_channels=cond_channels,
K=opt.glow_K,
actnorm_scale=opt.actnorm_scale,
flow_permutation=opt.flow_permutation,
flow_coupling=opt.flow_coupling,
network_model=opt.network_model,
num_layers=opt.num_layers,
LU_decomposed=opt.LU_decomposed)
self.opt = opt
# register prior hidden
# num_device = len(utils.get_proper_device(hparams.Device.glow, False))
# assert hparams.Train.batch_size % num_device == 0
# self.z_shape = [opt.batch_size // num_device, x_channels, 1]
self.z_shape = [opt.batch_size, x_channels, 1]
if opt.flow_dist == "normal":
self.distribution = modules.GaussianDiag()
elif opt.flow_dist == "studentT":
self.distribution = modules.StudentT(opt.flow_dist_param, x_channels)
def init_lstm_hidden(self):
self.flow.init_lstm_hidden()
def forward(self, x=None, cond=None, z=None,
eps_std=None, reverse=False, output_length=1):
if not reverse:
return self.normal_flow(x, cond)
else:
return self.reverse_flow(z, cond, eps_std, output_length=output_length)
def normal_flow(self, x, cond):
n_timesteps = thops.timesteps(x) #just returns the size of dimension 2?
logdet = torch.zeros_like(x[:, 0, 0])
# encode
z, objective = self.flow(x, cond, logdet=logdet, reverse=False)
# prior
objective += self.distribution.logp(z)
# return
nll = (-objective) / float(np.log(2.) * n_timesteps)
return z, nll
def reverse_flow(self, z, cond, eps_std, output_length=1):
with torch.no_grad():
z_shape = self.z_shape
z_shape[-1] = output_length
if z is None:
z = self.distribution.sample(z_shape, eps_std, device=cond.device)
x = self.flow(z, cond, eps_std=eps_std, reverse=True)
return x
def set_actnorm_init(self, inited=True):
for name, m in self.named_modules():
if (m.__class__.__name__.find("ActNorm") >= 0):
m.inited = inited
@staticmethod
def loss_generative(nll):
# Generative loss
return torch.mean(nll)
|
|
import numpy as np
import os, errno, json, random
import torch
from rdkit import Chem, DataStructs
from rdkit.DataStructs import *
from katanaHLS.models import SimGNNConfig
try:
from descriptastorus.descriptors import rdDescriptors, rdNormalizedDescriptors
except:
raise ImportError("Please install pip install git+https://github.com/bp-kelley/descriptastorus and pip install pandas-flavor")
def fix_randomness(seed=0, use_cuda=False):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
if not use_cuda:
torch.use_deterministic_algorithms(True)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
np.random.RandomState(seed)
random.seed(0)
def print_graph_stats(dataset):
data = dataset.sim_graph # Get the first graph object.
print()
print(data)
print('===============================================')
print("Similarity Graph Stats.")
# Print some statistics about the similarity graph.
print(f'Number of nodes: {data.num_nodes}')
print(f'Number of edges: {data.num_edges}')
print(f'Average node degree: {data.num_edges / data.num_nodes:.2f}')
print(f'Number of training nodes: {data.train_mask.sum()}')
print(f'Training node label rate: {int(data.train_mask.sum()) / data.num_nodes:.2f}')
print(f'Has isolated nodes: {data.has_isolated_nodes()}')
print(f'Has self-loops: {data.has_self_loops()}')
print(f'Is undirected: {data.is_undirected()}')
def print_exp_settings(args):
print()
print('Settings for the experiments:')
print('==============================')
print('Dataset: {}'.format(args['dataset']))
print('Metric: {}'.format(args['metric']))
print('Similarity Criteria: {}'.format(args['similarity']))
# print('GNN Model: {}'.format(args['model']))
# print('Model config filepath: {}'.format(args['model_config_path']))
# print('Max Number of Epochs: {}'.format(args['num_epochs']))
print('Using CUDA: {}'.format(args['use_cuda']))
def add_val_mask(dataset, train, val, id_maps):
smiles_list = val['Drug'].values
for smiles in smiles_list:
for id in id_maps[smiles]:
dataset.sim_graph.val_mask[id] = True
dataset.sim_graph.train_mask[id] = False
def split_dataset(train, val, test, id_maps):
train_smiles_list = train['Drug'].values
val_smiles_list = val['Drug'].values
test_smiles_list = test['Drug'].values
train_idx = [index for smiles in train_smiles_list for index in id_maps[smiles]]
val_idx = [index for smiles in val_smiles_list for index in id_maps[smiles]]
test_idx = [index for smiles in test_smiles_list for index in id_maps[smiles]]
return np.unique(train_idx), np.unique(val_idx), np.unique(test_idx)
def make_dir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
def write_results(args, results, final_score):
out_path = args['trial_path']
make_dir(out_path)
with open( out_path + '/sim_metric.{}.thres.{}.results.json'.format(
args['similarity'], args['thres']), 'w') as f:
json.dump(results, f, indent=2)
with open( out_path + '/sim_metric.{}.thres.{}.final_score.json'.format(
args['similarity'], args['thres']), 'w') as f:
json.dump(final_score, f, indent=2)
def load_model_config(args):
""" Query for the manually specified configuration"""
config= dict()
model_path = args['model_config_path']
print('Loading model configurations from {}'.format(model_path))
sim_gnn_path = os.path.join(model_path, args['sim_gnn']+ '_sim.json')
with open(sim_gnn_path, 'r') as f:
sim_config = json.load(f)
config.update(sim_config)
return config
def load_sim_gnn_hp(args):
config= dict()
# if hyperparams:
# config.update(hyperparams)
# else:
# """ Query for the manually specified configuration"""
# model_path = args['model_config_path']
# with open('{}/{}_sim.json'.format(model_path,args['model']), 'r') as f:
# config = json.load(f)
sim_gcn_config = SimGNNConfig(in_channels = args['in_sim_node_feats'], gnn = args['sim_gnn'])
sim_gcn_config.hidden_channels = args['sim_gnn_hidden_channels']
sim_gcn_config.num_layers = args['sim_gnn_num_layers']
sim_gcn_config.batchnorm = args['sim_gnn_batchnorm']
sim_gcn_config.dropout = args['sim_gnn_dropout']
sim_gcn_config.predictor_hidden_feats = args['sim_gnn_predictor_hidden_feats']
args["batch_size"] = args["sim_gnn_batch_size"]
args["lr"] = args["sim_gnn_lr"]
args["weight_decay"] = args["sim_gnn_weight_decay"]
args["patience"] = args["sim_gnn_patience"]
return sim_gcn_config
def smile_to_fps(smile):
r""" Given a smile string, convert it into a RDKit Fingerprint
"""
molecule = Chem.MolFromSmiles(smile)
return Chem.RDKFingerprint(molecule)
def construct_sim_matrices (smiles, sim_metric):
r""" Given a list of smile strings corresponding to a tdc dataset,
returns the pairwise similarity of all the molecules in the list
as a numpy matrix. The similarity measure is computed with respect
to the similarity function.
"""
print('Constructing similarity matrix. This might take a while...')
n = len(smiles)
sim_score_mat = np.zeros((n,n))
fps = [smile_to_fps(smile) for smile in smiles]
for i in range(n):
for j in range(n):
sim_score_mat[i,j] = DataStructs.FingerprintSimilarity(fps[i], fps[j], sim_metric)
print('Similarity matrix constructed.')
return sim_score_mat
def smiles2rdkit2d(smiles):
try:
generator = rdNormalizedDescriptors.RDKit2DNormalized()
features = np.array(generator.process(smiles)[1:])
NaNs = np.isnan(features)
features[NaNs] = 0
except:
print('descriptastorus not found this smiles: ' + smiles + ' convert to all 0 features')
features = np.zeros((200, ))
return np.array(features)
# Atom Features. The basic settings borrowed from DGL Life Science
def one_hot_encoding(x, allowable_set, encode_unknown=False):
"""One hot encoding of the element x with respect to the allowable set.
If encode unknown is true, and x is not in the allowable set,
then x is added to the allowable set.
Args:
:param x (elem) : [the element to encode]
:param allowable_set (list): [ The list of elements]
:param encode_unknown (bool, optional): [Whether to add x to the allowable list,
if x is already not present]. Defaults to False.
:return one hot encoding of x
"""
if encode_unknown and (x not in allowable_set):
allowable_set.append(x)
return list(map(lambda s: x == s, allowable_set))
def construct_features(smiles):
r""" Given a list of smile strings,
return their corresponding rdkit2d features. Useful for adding molecular features
in node classification tasks.
"""
print('Generating molecular features....')
feat = [smiles2rdkit2d(smile) for smile in smiles]
print('Feature constructions complete.')
return np.array(feat)
def construct_edge_list(sim_score_mat, thres):
""" Constructs edge lists for a PyG graph (COO representation)
based on the pairwise similarity matrix of the
molecule and the molecular features
"""
print('Constructing COO edge list based on similarity matrix.')
srcs, dsts = [], []
n = len(sim_score_mat)
for i in range(n):
for j in range(n):
if sim_score_mat[i][j] > thres:
srcs.append(i)
dsts.append(j)
edge_index = torch.tensor([srcs,dsts], dtype=torch.long)
print('Done.')
return edge_index
import matplotlib.pyplot as plt
def plot_train_history(history):
acc = history['train_score']
val_acc = history['val_score']
loss = history['loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'b', label='Training score')
plt.plot(epochs, val_acc, 'r', label='Validation score')
plt.title('Training and validation score')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.title('Training loss')
plt.legend()
plt.show()
def plot_tdc_results():
import numpy as np
import matplotlib.pyplot as plt
Katana = [0.878, 0.921, 0.737, 0.932]
MLP = [0.841, 0.875, 0.672, 0.889]
n=4
r = np.arange(n)
width = 0.25
plt.bar(r, Katana, color = 'b',
width = width, edgecolor = 'black',
)
plt.bar(r + width, MLP, color = 'orange',
width = width, edgecolor = 'black',
)
plt.legend(['Katana', 'TDC'], loc=2, prop={'size': 14})
plt.xlabel("Datasets")
plt.ylabel("Accuracy")
plt.title("TDC Benchmark Datasets")
# plt.grid(linestyle='--')
plt.ylim([0.6,1])
plt.xticks(r + width/2,['hERG','DILI','Bioavailability_Ma','BBB_Martins'])
# plt.legend()
plt.show()
|
|
from sklearn.model_selection import KFold
from code.classification.classifier import Classifier
from code.classification.file import get_training_data
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
from statistics import mean
import random
import numpy
import csv
from lib.statics.classification_lists import classification_models
import os
dir_path = 'validation_result/'
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def ten_fold_cross_validation(dataset, ALGO):
kf = KFold(n_splits=10, shuffle=True)
run_precision = []
run_recall = []
run_f1score = []
run_accuracy = []
count=1
#Randomly divide the dataset into 10 partitions
# During each iteration one partition is used for test and remaining 9 are used for training
for train, test in kf.split(dataset):
print("Using split-"+str(count)+" as test data..")
classifier_model = Classifier(algo=ALGO, training_data=dataset[train], vector_method='tfidf')
test_comments=[comments.text for comments in dataset[test]]
test_ratings=[comments.rating for comments in dataset[test]]
pred = classifier_model.get_sentiment_polarity_collection(test_comments)
label_list = ['Negative', 'Positive', 'Neutral']
precision = precision_score(test_ratings, pred, labels=label_list, average=None)
recall = recall_score(test_ratings, pred, labels=label_list, average=None)
f1score = f1_score(test_ratings, pred, labels=label_list, average=None)
accuracy = accuracy_score(test_ratings, pred)
run_accuracy.append(accuracy)
run_f1score.append(f1score)
run_precision.append(precision)
run_recall.append(recall)
count+=1
return (get_mean_list(run_precision),get_mean_list(run_recall),get_mean_list(run_f1score),mean(run_accuracy))
def get_mean_list(run_result):
label_list = {}
label_list['Negative'] = mean([p[0] for p in run_result])
label_list['Positive'] = mean([p[1] for p in run_result])
label_list['Neutral'] = mean(p[2] for p in run_result)
return label_list
def get_mean_dict(run_result):
label_list = {}
label_list['Negative'] = mean([p['Negative'] for p in run_result])
label_list['Positive'] = mean([p['Positive'] for p in run_result])
label_list['Neutral'] = mean([p['Neutral'] for p in run_result])
return label_list
def validation_list(algo):
ALGO = algo
REPEAT = 1
print("Cross validation")
print("Algrithm: " + ALGO)
print("Repeat: " + str(REPEAT))
training_data = get_training_data()
random.shuffle(training_data)
training_data = numpy.array(training_data)
Precision = []
Recall = []
Fmean = []
Accuracy = []
for k in range(0, REPEAT):
print(".............................")
print("Run# {}".format(k))
(precision, recall, f1score, accuracy) = ten_fold_cross_validation(training_data, ALGO)
Precision.append(precision)
Recall.append(recall)
Fmean.append(f1score)
Accuracy.append(accuracy)
print("Precision: %s" % precision)
print("Recall: %s" % recall)
print("F-measure: %s" % f1score)
print("Accuracy: %s" % accuracy)
with open("%scross-validation-%s_100.csv" % (dir_path, ALGO), 'w') as file:
header = ['Run', 'Precision', 'Recall', 'Fscore', 'Accuracy']
writer = csv.DictWriter(file, header)
writer.writeheader()
for k in range(0, REPEAT):
row = {'Run': k, 'Precision': Precision[k], 'Recall': Recall[k], 'Fscore': Fmean[k],
'Accuracy': Accuracy[k]}
writer.writerow(row)
row = {'Run': 'Average', 'Precision': get_mean_dict(Precision), 'Recall': get_mean_dict(Recall),
'Fscore': get_mean_dict(Fmean), 'Accuracy': mean(Accuracy)}
writer.writerow(row)
print("-------------------------")
print("Average Precision: %s" % (get_mean_dict(Precision)))
print("Average Recall: %s" % get_mean_dict(Recall))
print("Average Fmean: %s" % get_mean_dict(Fmean))
print("Average Accuracy: %s" % (mean(Accuracy)))
print("-------------------------")
if __name__ == '__main__':
for algo in classification_models:
validation_list(algo)
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ctypes
import numpy
from nidaqmx._lib import (
lib_importer, wrapped_ndpointer, ctypes_byte_str, c_bool32)
from nidaqmx.system.physical_channel import PhysicalChannel
from nidaqmx.errors import (
check_for_error, is_string_buffer_too_small, is_array_buffer_too_small)
from nidaqmx.constants import (
Coupling, DigitalPatternCondition, DigitalWidthUnits, Edge, Slope,
TriggerType, WindowTriggerCondition1)
class StartTrigger(object):
"""
Represents the start trigger configurations for a DAQmx task.
"""
def __init__(self, task_handle):
self._handle = task_handle
@property
def anlg_edge_coupling(self):
"""
:class:`nidaqmx.constants.Coupling`: Specifies the coupling for
the source signal of the trigger if the source is a terminal
rather than a virtual channel.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetAnlgEdgeStartTrigCoupling
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return Coupling(val.value)
@anlg_edge_coupling.setter
def anlg_edge_coupling(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetAnlgEdgeStartTrigCoupling
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_edge_coupling.deleter
def anlg_edge_coupling(self):
cfunc = lib_importer.windll.DAQmxResetAnlgEdgeStartTrigCoupling
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_edge_dig_fltr_enable(self):
"""
bool: Specifies whether to apply a digital filter to the digital
output of the analog triggering circuitry (the Analog
Comparison Event). When enabled, the analog signal must stay
above or below the trigger level for the minimum pulse width
before being recognized. Use filtering for noisy trigger
signals that transition in and out of the hysteresis window
rapidly.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetAnlgEdgeStartTrigDigFltrEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@anlg_edge_dig_fltr_enable.setter
def anlg_edge_dig_fltr_enable(self, val):
cfunc = lib_importer.windll.DAQmxSetAnlgEdgeStartTrigDigFltrEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, c_bool32]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_edge_dig_fltr_enable.deleter
def anlg_edge_dig_fltr_enable(self):
cfunc = lib_importer.windll.DAQmxResetAnlgEdgeStartTrigDigFltrEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_edge_dig_fltr_min_pulse_width(self):
"""
float: Specifies in seconds the minimum pulse width the filter
recognizes.
"""
val = ctypes.c_double()
cfunc = (lib_importer.windll.
DAQmxGetAnlgEdgeStartTrigDigFltrMinPulseWidth)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@anlg_edge_dig_fltr_min_pulse_width.setter
def anlg_edge_dig_fltr_min_pulse_width(self, val):
cfunc = (lib_importer.windll.
DAQmxSetAnlgEdgeStartTrigDigFltrMinPulseWidth)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_double]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_edge_dig_fltr_min_pulse_width.deleter
def anlg_edge_dig_fltr_min_pulse_width(self):
cfunc = (lib_importer.windll.
DAQmxResetAnlgEdgeStartTrigDigFltrMinPulseWidth)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_edge_dig_fltr_timebase_rate(self):
"""
float: Specifies in hertz the rate of the digital filter
timebase. NI-DAQmx uses this value to compute settings for
the filter.
"""
val = ctypes.c_double()
cfunc = (lib_importer.windll.
DAQmxGetAnlgEdgeStartTrigDigFltrTimebaseRate)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@anlg_edge_dig_fltr_timebase_rate.setter
def anlg_edge_dig_fltr_timebase_rate(self, val):
cfunc = (lib_importer.windll.
DAQmxSetAnlgEdgeStartTrigDigFltrTimebaseRate)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_double]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_edge_dig_fltr_timebase_rate.deleter
def anlg_edge_dig_fltr_timebase_rate(self):
cfunc = (lib_importer.windll.
DAQmxResetAnlgEdgeStartTrigDigFltrTimebaseRate)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_edge_dig_fltr_timebase_src(self):
"""
str: Specifies the terminal of the signal to use as the timebase
of the digital filter.
"""
cfunc = (lib_importer.windll.
DAQmxGetAnlgEdgeStartTrigDigFltrTimebaseSrc)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_char_p,
ctypes.c_uint]
temp_size = 0
while True:
val = ctypes.create_string_buffer(temp_size)
size_or_code = cfunc(
self._handle, val, temp_size)
if is_string_buffer_too_small(size_or_code):
# Buffer size must have changed between calls; check again.
temp_size = 0
elif size_or_code > 0 and temp_size == 0:
# Buffer size obtained, use to retrieve data.
temp_size = size_or_code
else:
break
check_for_error(size_or_code)
return val.value.decode('ascii')
@anlg_edge_dig_fltr_timebase_src.setter
def anlg_edge_dig_fltr_timebase_src(self, val):
cfunc = (lib_importer.windll.
DAQmxSetAnlgEdgeStartTrigDigFltrTimebaseSrc)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_edge_dig_fltr_timebase_src.deleter
def anlg_edge_dig_fltr_timebase_src(self):
cfunc = (lib_importer.windll.
DAQmxResetAnlgEdgeStartTrigDigFltrTimebaseSrc)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_edge_dig_sync_enable(self):
"""
bool: Specifies whether to synchronize recognition of
transitions in the signal to the internal timebase of the
device.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetAnlgEdgeStartTrigDigSyncEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@anlg_edge_dig_sync_enable.setter
def anlg_edge_dig_sync_enable(self, val):
cfunc = lib_importer.windll.DAQmxSetAnlgEdgeStartTrigDigSyncEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, c_bool32]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_edge_dig_sync_enable.deleter
def anlg_edge_dig_sync_enable(self):
cfunc = lib_importer.windll.DAQmxResetAnlgEdgeStartTrigDigSyncEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_edge_hyst(self):
"""
float: Specifies a hysteresis level in the units of the
measurement or generation. If **anlg_edge_slope** is
**Slope1.RISING**, the trigger does not deassert until the
source signal passes below **anlg_edge_lvl** minus the
hysteresis. If **anlg_edge_slope** is **Slope1.FALLING**,
the trigger does not deassert until the source signal passes
above **anlg_edge_lvl** plus the hysteresis. Hysteresis is
always enabled. Set this property to a non-zero value to use
hysteresis.
"""
val = ctypes.c_double()
cfunc = lib_importer.windll.DAQmxGetAnlgEdgeStartTrigHyst
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@anlg_edge_hyst.setter
def anlg_edge_hyst(self, val):
cfunc = lib_importer.windll.DAQmxSetAnlgEdgeStartTrigHyst
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_double]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_edge_hyst.deleter
def anlg_edge_hyst(self):
cfunc = lib_importer.windll.DAQmxResetAnlgEdgeStartTrigHyst
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_edge_lvl(self):
"""
float: Specifies at what threshold in the units of the
measurement or generation to start acquiring or generating
samples. Use **anlg_edge_slope** to specify on which slope
to trigger on this threshold.
"""
val = ctypes.c_double()
cfunc = lib_importer.windll.DAQmxGetAnlgEdgeStartTrigLvl
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@anlg_edge_lvl.setter
def anlg_edge_lvl(self, val):
cfunc = lib_importer.windll.DAQmxSetAnlgEdgeStartTrigLvl
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_double]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_edge_lvl.deleter
def anlg_edge_lvl(self):
cfunc = lib_importer.windll.DAQmxResetAnlgEdgeStartTrigLvl
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_edge_slope(self):
"""
:class:`nidaqmx.constants.Slope`: Specifies on which slope of
the trigger signal to start acquiring or generating samples.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetAnlgEdgeStartTrigSlope
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return Slope(val.value)
@anlg_edge_slope.setter
def anlg_edge_slope(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetAnlgEdgeStartTrigSlope
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_edge_slope.deleter
def anlg_edge_slope(self):
cfunc = lib_importer.windll.DAQmxResetAnlgEdgeStartTrigSlope
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_edge_src(self):
"""
str: Specifies the name of a virtual channel or terminal where
there is an analog signal to use as the source of the Start
Trigger.
"""
cfunc = lib_importer.windll.DAQmxGetAnlgEdgeStartTrigSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_char_p,
ctypes.c_uint]
temp_size = 0
while True:
val = ctypes.create_string_buffer(temp_size)
size_or_code = cfunc(
self._handle, val, temp_size)
if is_string_buffer_too_small(size_or_code):
# Buffer size must have changed between calls; check again.
temp_size = 0
elif size_or_code > 0 and temp_size == 0:
# Buffer size obtained, use to retrieve data.
temp_size = size_or_code
else:
break
check_for_error(size_or_code)
return val.value.decode('ascii')
@anlg_edge_src.setter
def anlg_edge_src(self, val):
cfunc = lib_importer.windll.DAQmxSetAnlgEdgeStartTrigSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_edge_src.deleter
def anlg_edge_src(self):
cfunc = lib_importer.windll.DAQmxResetAnlgEdgeStartTrigSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_win_btm(self):
"""
float: Specifies the lower limit of the window. Specify this
value in the units of the measurement or generation.
"""
val = ctypes.c_double()
cfunc = lib_importer.windll.DAQmxGetAnlgWinStartTrigBtm
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@anlg_win_btm.setter
def anlg_win_btm(self, val):
cfunc = lib_importer.windll.DAQmxSetAnlgWinStartTrigBtm
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_double]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_win_btm.deleter
def anlg_win_btm(self):
cfunc = lib_importer.windll.DAQmxResetAnlgWinStartTrigBtm
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_win_coupling(self):
"""
:class:`nidaqmx.constants.Coupling`: Specifies the coupling for
the source signal of the trigger if the source is a terminal
rather than a virtual channel.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetAnlgWinStartTrigCoupling
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return Coupling(val.value)
@anlg_win_coupling.setter
def anlg_win_coupling(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetAnlgWinStartTrigCoupling
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_win_coupling.deleter
def anlg_win_coupling(self):
cfunc = lib_importer.windll.DAQmxResetAnlgWinStartTrigCoupling
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_win_dig_fltr_enable(self):
"""
bool: Specifies whether to apply a digital filter to the digital
output of the analog triggering circuitry (the Analog
Comparison Event). When enabled, the analog signal must stay
within the trigger window for the minimum pulse width before
being recognized. Use filtering for noisy trigger signals
that transition in and out of the window rapidly.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetAnlgWinStartTrigDigFltrEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@anlg_win_dig_fltr_enable.setter
def anlg_win_dig_fltr_enable(self, val):
cfunc = lib_importer.windll.DAQmxSetAnlgWinStartTrigDigFltrEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, c_bool32]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_win_dig_fltr_enable.deleter
def anlg_win_dig_fltr_enable(self):
cfunc = lib_importer.windll.DAQmxResetAnlgWinStartTrigDigFltrEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_win_dig_fltr_min_pulse_width(self):
"""
float: Specifies in seconds the minimum pulse width the filter
recognizes.
"""
val = ctypes.c_double()
cfunc = (lib_importer.windll.
DAQmxGetAnlgWinStartTrigDigFltrMinPulseWidth)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@anlg_win_dig_fltr_min_pulse_width.setter
def anlg_win_dig_fltr_min_pulse_width(self, val):
cfunc = (lib_importer.windll.
DAQmxSetAnlgWinStartTrigDigFltrMinPulseWidth)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_double]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_win_dig_fltr_min_pulse_width.deleter
def anlg_win_dig_fltr_min_pulse_width(self):
cfunc = (lib_importer.windll.
DAQmxResetAnlgWinStartTrigDigFltrMinPulseWidth)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_win_dig_fltr_timebase_rate(self):
"""
float: Specifies in hertz the rate of the digital filter
timebase. NI-DAQmx uses this value to compute settings for
the filter.
"""
val = ctypes.c_double()
cfunc = (lib_importer.windll.
DAQmxGetAnlgWinStartTrigDigFltrTimebaseRate)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@anlg_win_dig_fltr_timebase_rate.setter
def anlg_win_dig_fltr_timebase_rate(self, val):
cfunc = (lib_importer.windll.
DAQmxSetAnlgWinStartTrigDigFltrTimebaseRate)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_double]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_win_dig_fltr_timebase_rate.deleter
def anlg_win_dig_fltr_timebase_rate(self):
cfunc = (lib_importer.windll.
DAQmxResetAnlgWinStartTrigDigFltrTimebaseRate)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_win_dig_fltr_timebase_src(self):
"""
str: Specifies the terminal of the signal to use as the timebase
of the digital filter.
"""
cfunc = (lib_importer.windll.
DAQmxGetAnlgWinStartTrigDigFltrTimebaseSrc)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_char_p,
ctypes.c_uint]
temp_size = 0
while True:
val = ctypes.create_string_buffer(temp_size)
size_or_code = cfunc(
self._handle, val, temp_size)
if is_string_buffer_too_small(size_or_code):
# Buffer size must have changed between calls; check again.
temp_size = 0
elif size_or_code > 0 and temp_size == 0:
# Buffer size obtained, use to retrieve data.
temp_size = size_or_code
else:
break
check_for_error(size_or_code)
return val.value.decode('ascii')
@anlg_win_dig_fltr_timebase_src.setter
def anlg_win_dig_fltr_timebase_src(self, val):
cfunc = (lib_importer.windll.
DAQmxSetAnlgWinStartTrigDigFltrTimebaseSrc)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_win_dig_fltr_timebase_src.deleter
def anlg_win_dig_fltr_timebase_src(self):
cfunc = (lib_importer.windll.
DAQmxResetAnlgWinStartTrigDigFltrTimebaseSrc)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_win_dig_sync_enable(self):
"""
bool: Specifies whether to synchronize recognition of
transitions in the signal to the internal timebase of the
device.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetAnlgWinStartTrigDigSyncEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@anlg_win_dig_sync_enable.setter
def anlg_win_dig_sync_enable(self, val):
cfunc = lib_importer.windll.DAQmxSetAnlgWinStartTrigDigSyncEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, c_bool32]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_win_dig_sync_enable.deleter
def anlg_win_dig_sync_enable(self):
cfunc = lib_importer.windll.DAQmxResetAnlgWinStartTrigDigSyncEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_win_src(self):
"""
str: Specifies the name of a virtual channel or terminal where
there is an analog signal to use as the source of the Start
Trigger.
"""
cfunc = lib_importer.windll.DAQmxGetAnlgWinStartTrigSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_char_p,
ctypes.c_uint]
temp_size = 0
while True:
val = ctypes.create_string_buffer(temp_size)
size_or_code = cfunc(
self._handle, val, temp_size)
if is_string_buffer_too_small(size_or_code):
# Buffer size must have changed between calls; check again.
temp_size = 0
elif size_or_code > 0 and temp_size == 0:
# Buffer size obtained, use to retrieve data.
temp_size = size_or_code
else:
break
check_for_error(size_or_code)
return val.value.decode('ascii')
@anlg_win_src.setter
def anlg_win_src(self, val):
cfunc = lib_importer.windll.DAQmxSetAnlgWinStartTrigSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_win_src.deleter
def anlg_win_src(self):
cfunc = lib_importer.windll.DAQmxResetAnlgWinStartTrigSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_win_top(self):
"""
float: Specifies the upper limit of the window. Specify this
value in the units of the measurement or generation.
"""
val = ctypes.c_double()
cfunc = lib_importer.windll.DAQmxGetAnlgWinStartTrigTop
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@anlg_win_top.setter
def anlg_win_top(self, val):
cfunc = lib_importer.windll.DAQmxSetAnlgWinStartTrigTop
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_double]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_win_top.deleter
def anlg_win_top(self):
cfunc = lib_importer.windll.DAQmxResetAnlgWinStartTrigTop
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def anlg_win_trig_when(self):
"""
:class:`nidaqmx.constants.WindowTriggerCondition1`: Specifies
whether the task starts acquiring or generating samples when
the signal enters or leaves the window you specify with
**anlg_win_btm** and **anlg_win_top**.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetAnlgWinStartTrigWhen
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return WindowTriggerCondition1(val.value)
@anlg_win_trig_when.setter
def anlg_win_trig_when(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetAnlgWinStartTrigWhen
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@anlg_win_trig_when.deleter
def anlg_win_trig_when(self):
cfunc = lib_importer.windll.DAQmxResetAnlgWinStartTrigWhen
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def delay(self):
"""
float: Specifies an amount of time to wait after the Start
Trigger is received before acquiring or generating the first
sample. This value is in the units you specify with
**delay_units**.
"""
val = ctypes.c_double()
cfunc = lib_importer.windll.DAQmxGetStartTrigDelay
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@delay.setter
def delay(self, val):
cfunc = lib_importer.windll.DAQmxSetStartTrigDelay
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_double]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@delay.deleter
def delay(self):
cfunc = lib_importer.windll.DAQmxResetStartTrigDelay
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def delay_units(self):
"""
:class:`nidaqmx.constants.DigitalWidthUnits`: Specifies the
units of **delay**.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetStartTrigDelayUnits
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return DigitalWidthUnits(val.value)
@delay_units.setter
def delay_units(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetStartTrigDelayUnits
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@delay_units.deleter
def delay_units(self):
cfunc = lib_importer.windll.DAQmxResetStartTrigDelayUnits
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def dig_edge_dig_fltr_enable(self):
"""
bool: Specifies whether to apply a digital filter to the trigger
signal.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetDigEdgeStartTrigDigFltrEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@dig_edge_dig_fltr_enable.setter
def dig_edge_dig_fltr_enable(self, val):
cfunc = lib_importer.windll.DAQmxSetDigEdgeStartTrigDigFltrEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, c_bool32]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@dig_edge_dig_fltr_enable.deleter
def dig_edge_dig_fltr_enable(self):
cfunc = lib_importer.windll.DAQmxResetDigEdgeStartTrigDigFltrEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def dig_edge_dig_fltr_min_pulse_width(self):
"""
float: Specifies in seconds the minimum pulse width the filter
recognizes.
"""
val = ctypes.c_double()
cfunc = (lib_importer.windll.
DAQmxGetDigEdgeStartTrigDigFltrMinPulseWidth)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@dig_edge_dig_fltr_min_pulse_width.setter
def dig_edge_dig_fltr_min_pulse_width(self, val):
cfunc = (lib_importer.windll.
DAQmxSetDigEdgeStartTrigDigFltrMinPulseWidth)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_double]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@dig_edge_dig_fltr_min_pulse_width.deleter
def dig_edge_dig_fltr_min_pulse_width(self):
cfunc = (lib_importer.windll.
DAQmxResetDigEdgeStartTrigDigFltrMinPulseWidth)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def dig_edge_dig_fltr_timebase_rate(self):
"""
float: Specifies in hertz the rate of the pulse width filter
timebase. NI-DAQmx uses this value to compute settings for
the filter.
"""
val = ctypes.c_double()
cfunc = (lib_importer.windll.
DAQmxGetDigEdgeStartTrigDigFltrTimebaseRate)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@dig_edge_dig_fltr_timebase_rate.setter
def dig_edge_dig_fltr_timebase_rate(self, val):
cfunc = (lib_importer.windll.
DAQmxSetDigEdgeStartTrigDigFltrTimebaseRate)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_double]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@dig_edge_dig_fltr_timebase_rate.deleter
def dig_edge_dig_fltr_timebase_rate(self):
cfunc = (lib_importer.windll.
DAQmxResetDigEdgeStartTrigDigFltrTimebaseRate)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def dig_edge_dig_fltr_timebase_src(self):
"""
str: Specifies the input terminal of the signal to use as the
timebase of the pulse width filter.
"""
cfunc = (lib_importer.windll.
DAQmxGetDigEdgeStartTrigDigFltrTimebaseSrc)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_char_p,
ctypes.c_uint]
temp_size = 0
while True:
val = ctypes.create_string_buffer(temp_size)
size_or_code = cfunc(
self._handle, val, temp_size)
if is_string_buffer_too_small(size_or_code):
# Buffer size must have changed between calls; check again.
temp_size = 0
elif size_or_code > 0 and temp_size == 0:
# Buffer size obtained, use to retrieve data.
temp_size = size_or_code
else:
break
check_for_error(size_or_code)
return val.value.decode('ascii')
@dig_edge_dig_fltr_timebase_src.setter
def dig_edge_dig_fltr_timebase_src(self, val):
cfunc = (lib_importer.windll.
DAQmxSetDigEdgeStartTrigDigFltrTimebaseSrc)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@dig_edge_dig_fltr_timebase_src.deleter
def dig_edge_dig_fltr_timebase_src(self):
cfunc = (lib_importer.windll.
DAQmxResetDigEdgeStartTrigDigFltrTimebaseSrc)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def dig_edge_dig_sync_enable(self):
"""
bool: Specifies whether to synchronize recognition of
transitions in the signal to the internal timebase of the
device. If you set this property to True, the device does
not recognize and act upon the trigger until the next pulse
of the internal timebase.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetDigEdgeStartTrigDigSyncEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@dig_edge_dig_sync_enable.setter
def dig_edge_dig_sync_enable(self, val):
cfunc = lib_importer.windll.DAQmxSetDigEdgeStartTrigDigSyncEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, c_bool32]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@dig_edge_dig_sync_enable.deleter
def dig_edge_dig_sync_enable(self):
cfunc = lib_importer.windll.DAQmxResetDigEdgeStartTrigDigSyncEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def dig_edge_edge(self):
"""
:class:`nidaqmx.constants.Edge`: Specifies on which edge of a
digital pulse to start acquiring or generating samples.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetDigEdgeStartTrigEdge
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return Edge(val.value)
@dig_edge_edge.setter
def dig_edge_edge(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetDigEdgeStartTrigEdge
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@dig_edge_edge.deleter
def dig_edge_edge(self):
cfunc = lib_importer.windll.DAQmxResetDigEdgeStartTrigEdge
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def dig_edge_src(self):
"""
str: Specifies the name of a terminal where there is a digital
signal to use as the source of the Start Trigger.
"""
cfunc = lib_importer.windll.DAQmxGetDigEdgeStartTrigSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_char_p,
ctypes.c_uint]
temp_size = 0
while True:
val = ctypes.create_string_buffer(temp_size)
size_or_code = cfunc(
self._handle, val, temp_size)
if is_string_buffer_too_small(size_or_code):
# Buffer size must have changed between calls; check again.
temp_size = 0
elif size_or_code > 0 and temp_size == 0:
# Buffer size obtained, use to retrieve data.
temp_size = size_or_code
else:
break
check_for_error(size_or_code)
return val.value.decode('ascii')
@dig_edge_src.setter
def dig_edge_src(self, val):
cfunc = lib_importer.windll.DAQmxSetDigEdgeStartTrigSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@dig_edge_src.deleter
def dig_edge_src(self):
cfunc = lib_importer.windll.DAQmxResetDigEdgeStartTrigSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def dig_pattern_pattern(self):
"""
str: Specifies the digital pattern that must be met for the
Start Trigger to occur.
"""
cfunc = lib_importer.windll.DAQmxGetDigPatternStartTrigPattern
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_char_p,
ctypes.c_uint]
temp_size = 0
while True:
val = ctypes.create_string_buffer(temp_size)
size_or_code = cfunc(
self._handle, val, temp_size)
if is_string_buffer_too_small(size_or_code):
# Buffer size must have changed between calls; check again.
temp_size = 0
elif size_or_code > 0 and temp_size == 0:
# Buffer size obtained, use to retrieve data.
temp_size = size_or_code
else:
break
check_for_error(size_or_code)
return val.value.decode('ascii')
@dig_pattern_pattern.setter
def dig_pattern_pattern(self, val):
cfunc = lib_importer.windll.DAQmxSetDigPatternStartTrigPattern
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@dig_pattern_pattern.deleter
def dig_pattern_pattern(self):
cfunc = lib_importer.windll.DAQmxResetDigPatternStartTrigPattern
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def dig_pattern_src(self):
"""
:class:`nidaqmx.system.physical_channel.PhysicalChannel`:
Specifies the physical channels to use for pattern matching.
The order of the physical channels determines the order of
the pattern. If a port is included, the order of the
physical channels within the port is in ascending order.
"""
cfunc = lib_importer.windll.DAQmxGetDigPatternStartTrigSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_char_p,
ctypes.c_uint]
temp_size = 0
while True:
val = ctypes.create_string_buffer(temp_size)
size_or_code = cfunc(
self._handle, val, temp_size)
if is_string_buffer_too_small(size_or_code):
# Buffer size must have changed between calls; check again.
temp_size = 0
elif size_or_code > 0 and temp_size == 0:
# Buffer size obtained, use to retrieve data.
temp_size = size_or_code
else:
break
check_for_error(size_or_code)
return PhysicalChannel(val.value.decode('ascii'))
@dig_pattern_src.setter
def dig_pattern_src(self, val):
val = val.name
cfunc = lib_importer.windll.DAQmxSetDigPatternStartTrigSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@dig_pattern_src.deleter
def dig_pattern_src(self):
cfunc = lib_importer.windll.DAQmxResetDigPatternStartTrigSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def dig_pattern_trig_when(self):
"""
:class:`nidaqmx.constants.DigitalPatternCondition`: Specifies
whether the Start Trigger occurs when the physical channels
specified with **dig_pattern_src** match or differ from the
digital pattern specified with **dig_pattern_pattern**.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetDigPatternStartTrigWhen
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return DigitalPatternCondition(val.value)
@dig_pattern_trig_when.setter
def dig_pattern_trig_when(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetDigPatternStartTrigWhen
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@dig_pattern_trig_when.deleter
def dig_pattern_trig_when(self):
cfunc = lib_importer.windll.DAQmxResetDigPatternStartTrigWhen
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def retriggerable(self):
"""
bool: Specifies whether a finite task resets and waits for
another Start Trigger after the task completes. When you set
this property to True, the device performs a finite
acquisition or generation each time the Start Trigger occurs
until the task stops. The device ignores a trigger if it is
in the process of acquiring or generating signals.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetStartTrigRetriggerable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@retriggerable.setter
def retriggerable(self, val):
cfunc = lib_importer.windll.DAQmxSetStartTrigRetriggerable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, c_bool32]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@retriggerable.deleter
def retriggerable(self):
cfunc = lib_importer.windll.DAQmxResetStartTrigRetriggerable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def term(self):
"""
str: Indicates the name of the internal Start Trigger terminal
for the task. This property does not return the name of the
trigger source terminal.
"""
cfunc = lib_importer.windll.DAQmxGetStartTrigTerm
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_char_p,
ctypes.c_uint]
temp_size = 0
while True:
val = ctypes.create_string_buffer(temp_size)
size_or_code = cfunc(
self._handle, val, temp_size)
if is_string_buffer_too_small(size_or_code):
# Buffer size must have changed between calls; check again.
temp_size = 0
elif size_or_code > 0 and temp_size == 0:
# Buffer size obtained, use to retrieve data.
temp_size = size_or_code
else:
break
check_for_error(size_or_code)
return val.value.decode('ascii')
@property
def trig_type(self):
"""
:class:`nidaqmx.constants.TriggerType`: Specifies the type of
trigger to use to start a task.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetStartTrigType
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return TriggerType(val.value)
@trig_type.setter
def trig_type(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetStartTrigType
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@trig_type.deleter
def trig_type(self):
cfunc = lib_importer.windll.DAQmxResetStartTrigType
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
def cfg_anlg_edge_start_trig(
self, trigger_source="", trigger_slope=Slope.RISING,
trigger_level=0.0):
"""
Configures the task to start acquiring or generating samples
when an analog signal crosses the level you specify.
Args:
trigger_source (Optional[str]): Is the name of a virtual
channel or terminal where there is an analog signal to
use as the source of the trigger.
trigger_slope (Optional[nidaqmx.constants.Slope]): Specifies
on which slope of the signal to start acquiring or
generating samples when the signal crosses
**trigger_level**.
trigger_level (Optional[float]): Specifies at what threshold
to start acquiring or generating samples. Specify this
value in the units of the measurement or generation. Use
**trigger_slope** to specify on which slope to trigger
at this threshold.
"""
cfunc = lib_importer.windll.DAQmxCfgAnlgEdgeStartTrig
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int, ctypes.c_double]
error_code = cfunc(
self._handle, trigger_source, trigger_slope.value, trigger_level)
check_for_error(error_code)
def cfg_anlg_window_start_trig(
self, window_top, window_bottom, trigger_source="",
trigger_when=WindowTriggerCondition1.ENTERING_WINDOW):
"""
Configures the task to start acquiring or generating samples
when an analog signal enters or leaves a range you specify.
Args:
window_top (float): Is the upper limit of the window.
Specify this value in the units of the measurement or
generation.
window_bottom (float): Is the lower limit of the window.
Specify this value in the units of the measurement or
generation.
trigger_source (Optional[str]): Is the name of a virtual
channel or terminal where there is an analog signal to
use as the source of the trigger.
trigger_when (Optional[nidaqmx.constants.WindowTriggerCondition1]):
Specifies whether the task starts measuring or
generating samples when the signal enters the window or
when it leaves the window. Use **window_bottom** and
**window_top** to specify the limits of the window.
"""
cfunc = lib_importer.windll.DAQmxCfgAnlgWindowStartTrig
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int, ctypes.c_double, ctypes.c_double]
error_code = cfunc(
self._handle, trigger_source, trigger_when.value, window_top,
window_bottom)
check_for_error(error_code)
def cfg_dig_edge_start_trig(
self, trigger_source, trigger_edge=Edge.RISING):
"""
Configures the task to start acquiring or generating samples on
a rising or falling edge of a digital signal.
Args:
trigger_source (str): Specifies the name of a terminal where
there is a digital signal to use as the source of the
trigger.
trigger_edge (Optional[nidaqmx.constants.Edge]): Specifies
on which edge of the digital signal to start acquiring
or generating samples.
"""
cfunc = lib_importer.windll.DAQmxCfgDigEdgeStartTrig
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, trigger_source, trigger_edge.value)
check_for_error(error_code)
def cfg_dig_pattern_start_trig(
self, trigger_source, trigger_pattern,
trigger_when=DigitalPatternCondition.PATTERN_MATCHES):
"""
Configures a task to start acquiring or generating samples when
a digital pattern is matched.
Args:
trigger_source (str): Specifies the physical channels to use
for pattern matching. The order of the physical channels
determines the order of the pattern. If a port is
included, the order of the physical channels within the
port is in ascending order.
trigger_pattern (str): Specifies the digital pattern that
must be met for the trigger to occur.
trigger_when (Optional[nidaqmx.constants.DigitalPatternCondition]):
Specifies the condition under which the trigger occurs.
"""
cfunc = lib_importer.windll.DAQmxCfgDigPatternStartTrig
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes_byte_str, ctypes.c_int]
error_code = cfunc(
self._handle, trigger_source, trigger_pattern, trigger_when.value)
check_for_error(error_code)
def disable_start_trig(self):
"""
Configures the task to start acquiring or generating samples
immediately upon starting the task.
"""
cfunc = lib_importer.windll.DAQmxDisableStartTrig
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
|
|
# -*- coding: utf-8 -*-
# tomolab
# Michele Scipioni
# Harvard University, Martinos Center for Biomedical Imaging
# University of Pisa
# Import an interfile volume as an Image3D and export.
from ...Transformation.Transformations import Transform_Scale
from ...DataSources.FileSources.interfile import load_interfile
import os
import numpy as np
__all__ = ["import_interfile_volume", "export_interfile_volume"]
def import_interfile_volume_data(
headerfile="", datafile=""
): # FIXME: this should be in the Interfile package
F = load_interfile(headerfile)
if "matrix size[1]" in F:
Nx = F["matrix size[1]"]["value"]
Ny = F["matrix size[2]"]["value"]
Nz = F["matrix size[3]"]["value"]
else:
Nx = F["matrix size [1]"]["value"]
Ny = F["matrix size [2]"]["value"]
Nz = F["matrix size [3]"]["value"]
if datafile == "":
datafile1 = headerfile.replace(
headerfile.split(os.sep)[-1], F["name of data file"]["value"]
)
datafile2 = headerfile.replace(".v.hdr", ".v")
datafile2 = datafile2.replace(".h33", ".v")
datafile3 = headerfile.replace(".h33", ".v")
try:
data = np.fromfile(datafile1, dtype=np.float32)
except:
try:
data = np.fromfile(datafile2, dtype=np.float32)
except:
try:
data = np.fromfile(datafile3, dtype=np.float32)
except:
print("Data file not found.")
else:
data = np.fromfile(datafile, dtype=np.float32)
data = data.reshape([Nz, Ny, Nx])
data = np.asfortranarray(data.swapaxes(0, 2))
data = np.transpose(data, [1, 0, 2])
data = data[::-1, :, :]
return data
def import_interfile_volume(headerfile="", datafile=""):
from ...Core import Image3D
# Load ndarray data
data = import_interfile_volume_data(headerfile, datafile)
# Load other information - e.g. pixels size
F = load_interfile(headerfile)
if "scale factor (mm/pixel) [1]" in F:
pixsize_x = F["scale factor (mm/pixel) [1]"]["value"]
pixsize_y = F["scale factor (mm/pixel) [2]"]["value"]
pixsize_z = F["scale factor (mm/pixel) [3]"]["value"]
# Create Image3D
T_pix_to_world = Transform_Scale(
np.int32([pixsize_x, pixsize_y, pixsize_z]), map_from="pixels_PET", map_to="world"
)
volume = Image3D(data=data, affine=T_pix_to_world, space="world")
return volume
def export_interfile_volume(data_file_name, data):
data = data[::-1, :, :]
data = np.transpose(data, [1, 0, 2])
data = data.swapaxes(0, 2)
data = np.asarray(data, dtype=np.float32, order="C")
data.tofile(data_file_name)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 4 20:39:07 2020
@author: JianyuanZhai
"""
import pyomo.environ as pe
import numpy as np
import time
DOUBLE = np.float64
class DDCU_Nonuniform():
def __init__(self, intercept = True):
self.intercept = intercept
self.ddcu = DDCU_model._make_pyomo_ddcu_nonuniform(intercept)
self.solver = pe.SolverFactory('glpk')
self.time_underestimate = 0.
@staticmethod
def _minimize_1d(a, b, c):
if a > 0.:
check = -b/(2*a)
if check >= 0. and check <= 1.:
return check
elif check < 0.:
return 0.
elif check > 1.:
return 1.
elif 0. >= a >= -10.** -5:
if b > 0.:
return 0.
if b < 0.:
return 1.
else:
return 0.5
def update_solver(self, solver, option = {}):
self.solver = pe.SolverFactory(solver)
def _underestimate(self, all_X, all_Y):
time_start = time.time()
dim = all_X.shape[1]
sample_ind = list(range(len(all_Y)))
x_ind = list(range(dim))
x_dict = {}
for i in sample_ind:
for j in x_ind:
x_dict[(i,j)] = all_X[i,j]
if self.intercept:
data = {None:{'x_ind' : {None : x_ind} , 'sample_ind' : { None : sample_ind }, 'xs' : x_dict , 'ys' : dict(zip(sample_ind, all_Y))}}
else:
corner_point_ind = np.where((all_X == 0.).all(axis = 1))[0]
if len(corner_point_ind) > 1:
candidate = all_X[corner_point_ind]
intercept = min(candidate)
else:
intercept = float(all_Y[corner_point_ind])
data = {None:{'x_ind' : {None : x_ind} , 'sample_ind' : { None : sample_ind } ,'xs' : x_dict , 'ys' : dict(zip(sample_ind, all_Y)) , 'c' : {None : intercept}}}
model = self.ddcu.create_instance(data) # create an instance for abstract pyomo model
self.solver.solve(model)
a = np.array([round(pe.value(model.a[i]), 6) for i in model.x_ind])
if (a < 0.).any():
model.pprint()
b = np.array([pe.value(model.b[i]) for i in model.x_ind])
c = pe.value( model.c )
xopt = np.array([self._minimize_1d(a[i], b[i], c, ) for i in range(dim)])
flb_s = sum(a*xopt**2+b*xopt) + c
if abs(flb_s - min(all_Y)) <= 0.00001:
flb_s = min(all_Y)
self.time_underestimate += time.time() - time_start
return float(flb_s), np.array([xopt])
class DDCU_model:
"""
This class contains recipes to make pyomo models for different pyomo_models for underestimators
"""
@staticmethod
def _linear_obj_rule(model):
return sum((model.ys[i] - model.f[i]) for i in model.sample_ind)
@staticmethod
def _underestimating_con_rule(model, i):
return model.ys[i] - model.f[i] >= 0.0
@staticmethod
def _quadratic_nonuniform(model, i):
return model.f[i] == sum(model.a[j]*model.xs[i,j]**2 + model.b[j]*model.xs[i,j] for j in model.x_ind) + model.c
@staticmethod
def _exponential(model, i):
a = sum(model.a[j]*(model.xs[i,j]-model.b[j])**2 for j in model.x_ind)
return model.f[i] == pe.exp(a) + model.c
@staticmethod
def _make_pyomo_ddcu_nonuniform(intercept):
ddcu = pe.AbstractModel()
ddcu.sample_ind = pe.Set()
ddcu.x_ind = pe.Set()
ddcu.ys = pe.Param(ddcu.sample_ind)
ddcu.xs = pe.Param(ddcu.sample_ind,ddcu.x_ind)
ddcu.a = pe.Var(ddcu.x_ind,within = pe.NonNegativeReals, initialize=0.)
ddcu.b = pe.Var(ddcu.x_ind,within = pe.Reals)
ddcu.f = pe.Var(ddcu.sample_ind)
if intercept :
ddcu.c = pe.Var(within = pe.Reals)
else :
ddcu.c = pe.Param()
ddcu.obj = pe.Objective(rule = DDCU_model._linear_obj_rule)
ddcu.con1 = pe.Constraint(ddcu.sample_ind, rule = DDCU_model._underestimating_con_rule)
ddcu.con2 = pe.Constraint(ddcu.sample_ind, rule = DDCU_model._quadratic_nonuniform)
return ddcu
@staticmethod
def _make_pyomo_ddcu_exponential():
ddcu = pe.AbstractModel()
ddcu.sample_ind = pe.Set()
ddcu.x_ind = pe.Set()
ddcu.ys = pe.Param(ddcu.sample_ind)
ddcu.xs = pe.Param(ddcu.sample_ind,ddcu.x_ind)
ddcu.a = pe.Var(ddcu.x_ind,within = pe.NonNegativeReals)
#ddcu.b = pe.Param(ddcu.x_ind)
ddcu.b = pe.Var(ddcu.x_ind, within = pe.Reals)
ddcu.f = pe.Var(ddcu.sample_ind)
ddcu.c = pe.Var(within = pe.Reals)
ddcu.obj = pe.Objective(rule = DDCU_model._linear_obj_rule)
ddcu.con1 = pe.Constraint(ddcu.sample_ind, rule = DDCU_model._underestimating_con_rule)
ddcu.con2 = pe.Constraint(ddcu.sample_ind, rule = DDCU_model._exponential)
return ddcu
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
try:
import numpy as np
except ImportError as e:
print("Failed to do 'from scipy.interpolate import interp1d', "
"scipy may not been installed properly: %s" % e)
try:
from scipy.interpolate import interp1d
except ImportError as e:
print("Failed to do 'from scipy.interpolate import interp1d', "
"scipy may not been installed properly: %s" % e)
from numpymate.packages.convert2 import any2datetime
from numpymate.packages.rolex import rolex
def datetime_to_utctimestamp(datetime_array):
return [rolex.to_utctimestamp(any2datetime(dt)) for dt in datetime_array]
def locate(x1, y1, x2, y2, x3):
"""An equation solver to solve: given two points on a line and x, solve the
y coordinate on the same line.
Suppose p1 = (x1, y1), p2 = (x2, y2), p3 = (x3, y3) on the same line.
given x1, y1, x2, y2, x3, find y3::
y3 = y1 - (y1 - y2) * (x1 - x3) / (x1 - x3)
**中文文档**
给定两点, 求得由这两点确定的直线上的另外一点的y坐标。
"""
return y1 - 1.0 * (y1 - y2) * (x1 - x3) / (x1 - x2)
def datetime_mode_decorator(func):
"""
**中文文档**
"""
def wrapper(*args, **kwargs):
x_axis = kwargs.get("x_axis", args[0])
y_axis = kwargs.get("y_axis", args[1])
x_new_axis = kwargs.get("x_new_axis", args[2])
x_axis = datetime_to_utctimestamp(x_axis)
x_new_axis = datetime_to_utctimestamp(x_new_axis)
new_args = (x_axis, y_axis, x_new_axis)
return func(*new_args)
return wrapper
def linear_interp(x_axis, y_axis, x_new_axis):
"""Interpolate y_axis = f(x_axis) -> y_new_axis = f(x_new_axis), use
linear interpolation. x_new_axis's range has to be included in x_axis.
**中文文档**
对 y = f(x) 进行线性插值, 要求被差值的点在 x[0] ~ x[-1] 之间。
"""
f = interp1d(x_axis, y_axis)
return f(x_new_axis)
linear_interp_by_datetime = datetime_mode_decorator(linear_interp)
def easy_linear_interp(x_axis, y_axis, x_new_axis):
"""Interpolate y_axis = f(x_axis) -> y_new_axis = f(x_new_axis), use
linear interpolation. x_new_axis's range DOESN'T has to be included in x_axis.
A smart way to interpolate arbitrary-range x_new_axis. The trick is
to add one more point to the original x_axis at x_new_axis[0] and
x_new_axis[-1], if x_new_axis is out of range.
**中文文档**
对 y = f(x) 进行线性插值, 不要求被差值的点在 x[0] ~ x[-1] 之间。
"""
# 由于之后要进行列表的拼接, 所以需要将数据转化为list
if not isinstance(x_axis, list):
x_axis = list(x_axis)
if not isinstance(y_axis, list):
y_axis = list(y_axis)
if not isinstance(x_new_axis, list):
x_new_axis = list(x_new_axis)
left_pad_x, left_pad_y = list(), list()
right_pad_x, right_pad_y = list(), list()
if x_new_axis[0] < x_axis[0]:
left_pad_x.append(x_new_axis[0])
left_pad_y.append(locate(x_axis[0], y_axis[0],
x_axis[1], y_axis[1], x_new_axis[0]))
if x_new_axis[-1] > x_axis[-1]:
right_pad_x.append(x_new_axis[-1])
right_pad_y.append(locate(x_axis[-1], y_axis[-1],
x_axis[-2], y_axis[-2], x_new_axis[-1]))
if not ((len(left_pad_x) == 0) and (len(right_pad_x) == 0)):
x_axis = left_pad_x + x_axis + right_pad_x
y_axis = left_pad_y + y_axis + right_pad_y
return linear_interp(x_axis, y_axis, x_new_axis)
easy_linear_interp_by_datetime = datetime_mode_decorator(easy_linear_interp)
def spline_interp(x_axis, y_axis, x_new_axis):
"""Interpolate y_axis = f(x_axis) -> y_new_axis = f(x_new_axis), use
linear interpolation. x_new_axis's range has to be included in x_axis.
`Spline interpolation <https://en.wikipedia.org/wiki/Spline_interpolation>`_
is a popular interpolation method. Way more accurate than linear interpolate
in average.
**中文文档**
对 y = f(x) 进行曲线插值, 精度较高, 计算量较大,
要求被差值的点在 x[0] ~ x[-1] 之间。
"""
f = interp1d(x_axis, y_axis, kind="cubic")
return f(x_new_axis)
spline_interp_by_datetime = datetime_mode_decorator(spline_interp)
def exam_reliability(x_axis, x_axis_new, reliable_distance, precision=0.000001):
"""When we do linear interpolation on x_axis and derive value for
x_axis_new, we also evaluate how can we trust those interpolated
data points. This is how it works:
For each new x_axis point in x_axis new, let's say xi. Find the closest
point in x_axis, suppose the distance is #dist. Compare this to
#reliable_distance. If #dist < #reliable_distance, then we can trust it,
otherwise, we can't.
The precision is to handle decimal value's precision problem. Because
1.0 may actually is 1.00000000001 or 0.999999999999 in computer system.
So we define that: if ``dist`` + ``precision`` <= ``reliable_distance``, then we
can trust it, else, we can't.
Here is an O(n) algorithm implementation. A lots of improvement than
classic binary search one, which is O(n^2).
:params reliable_distance: reliab distance in seconds.
**中文文档**
reliability检查是指: 在我们用 x, y 的原始数据, 对x_new进行差值时, 有时我们
需要做如下判断, 如果新的差值点, 距离原始数据中最近的点的距离, 不超过某个
设定值 ``reliable_distance`` 时, 即可视为该差值是可信赖的。
"""
x_axis = x_axis[::-1]
x_axis.append(-2**32)
distance_to_closest_point = list()
for t in x_axis_new:
while 1:
try:
x = x_axis.pop()
if x <= t:
left = x
else:
right = x
x_axis.append(right)
x_axis.append(left)
left_dist, right_dist = (t - left), (right - t)
if left_dist <= right_dist:
distance_to_closest_point.append(left_dist)
else:
distance_to_closest_point.append(right_dist)
break
except:
distance_to_closest_point.append(t - left)
break
reliable_flag = list()
for dist in distance_to_closest_point:
if dist - precision - reliable_distance <= 0:
reliable_flag.append(True)
else:
reliable_flag.append(False)
return reliable_flag
def exam_reliability_by_datetime(
datetime_axis, datetime_new_axis, reliable_distance):
"""A datetime-version that takes datetime object list as x_axis
reliable_distance equals to the time difference in seconds.
:params reliable_distance: reliab distance in seconds.
**中文文档**
根据两个时间轴进行 reliability 检查。
"""
numeric_datetime_axis = [
rolex.to_utctimestamp(any2datetime(dt)) for dt in datetime_axis
]
numeric_datetime_new_axis = [
rolex.to_utctimestamp(any2datetime(dt)) for dt in datetime_new_axis
]
return exam_reliability(
numeric_datetime_axis, numeric_datetime_new_axis,
reliable_distance, precision=0.0,
)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 9 17:41:59 2020
@author: ullaheede
"""
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import numpy as np
import xarray as xr
import xesmf as xe
import pandas as pd
import glob as glob
import os
from pylab import *
import matplotlib.gridspec as gridspec
e1=180
e2=280
w1=80
w2=150
model_names=['ACCESS-CM2','ACCESS-ESM1-5','BCC-CSM2-MR','BCC-ESM1','CAMS-CSM1-0','CanESM5','CAS-ESM2-0','CESM2','CESM2-FV2','CESM2-WACCM','CESM2-WACCM-FV2',\
'CIESM','CMCC-CM2-SR5','CNRM-CM6','CNRM-CM6-HR','CNRM-ESM2-1','E3SM','FGOALS-f3-L','FGOALS-g3','GFDL-CM4','GFDL-ESM4','GISS-E2-1-G','GISS-E2-1-H',\
'HadGEM3-GC31-LL','HadGEM3-GC3-MM','INM-CM4-8','INM-CM5-0','IPSL-CM6A','KACE-1-0-G','MCM-UA-1-0','MIROC-ES2L','MIROC6','MPI-ESM-1-2-HAM','MPI-ESM1-2-LR',\
'MRI-ESM2','NESM3','NorCPM1','SAM0-UNICON','TaiESM1','UKESM1-0-LL']
model_names1=['ersstv4','ACCESS-CM2','ACCESS-ESM1-5','BCC-CSM2-MR','BCC-ESM1','CAMS-CSM1-0','CanESM5','CAS-ESM2-0','CESM2','CESM2-FV2','CESM2-WACCM','CESM2-WACCM-FV2',\
'CIESM','CMCC-CM2-SR5','CNRM-CM6','CNRM-CM6-HR','CNRM-ESM2-1','E3SM','FGOALS-f3-L','FGOALS-g3','GFDL-CM4','GFDL-ESM4','GISS-E2-1-G','GISS-E2-1-H',\
'HadGEM3-GC31-LL','HadGEM3-GC3-MM','INM-CM4-8','INM-CM5-0','IPSL-CM6A','KACE-1-0-G','MCM-UA-1-0','MIROC-ES2L','MIROC6','MPI-ESM-1-2-HAM','MPI-ESM1-2-LR',\
'MRI-ESM2','NESM3','NorCPM1','SAM0-UNICON','TaiESM1','UKESM1-0-LL']
obs=xr.open_dataset('/Users/ullaklintheede/Downloads/ersst.v4.1854-2020.nc')
ts_obs=obs['sst']
ts_obs_a=ts_obs.groupby('time.year').mean('time',skipna=True)
east_obs=ts_obs_a.sel(lat=slice(-5,5),lon=slice(e1,e2)).mean('lat').mean('lon').mean('lev')
west_obs=ts_obs_a.sel(lat=slice(-5,5),lon=slice(w1,w2)).mean('lat').mean('lon').mean('lev')
grad_obs=west_obs-east_obs
grad_obs=grad_obs-grad_obs.sel(year=slice(1950,1970)).mean('year')
obs1=grad_obs.sel(year=slice(2000,2014)).mean('year')
std_obs=obs1*0
mylist_control=xr.open_dataset('/Volumes/Armor_CMIP6/control_timemean_ts_1deg.nc')
ts_control=mylist_control['ts']
mylist_4xCO2=xr.open_dataset('/Volumes/Armor_CMIP6/4xCO2_ts.nc')
#mylist_4xCO2=xr.open_dataset('/Volumes/Armor_CMIP6/1ptCO2_ts_anomaly.nc')
ts_4xCO2=mylist_4xCO2['ts']
ts_anom=ts_4xCO2-ts_control
#ts_anom=ts_4xCO2
east=ts_anom.sel(lat=slice(-5,5),lon=slice(e1,e2)).mean('lat').mean('lon')
west=ts_anom.sel(lat=slice(-5,5),lon=slice(w1,w2)).mean('lat').mean('lon')
grad=west-east
grad_pct=grad.assign_coords(new_dim=range(1,len(model_names)+1))
abrupt1 = grad.sel(year=slice(0,25)).mean('year')
abrupt2 = grad.sel(year=slice(100,149)).mean('year')
###################################################################
mylist_control=xr.open_dataset('/Volumes/Armor_CMIP6/control_timemean_ts_1deg.nc')
ts_control=mylist_control['ts']
#mylist_4xCO2=xr.open_dataset('/Volumes/Armor_CMIP6/4xCO2_ts.nc')
mylist_4xCO2=xr.open_dataset('/Volumes/Armor_CMIP6/1pct_ts.nc')
ts_4xCO2=mylist_4xCO2['ts']
ts_anom=ts_4xCO2-ts_control
east=ts_anom.sel(lat=slice(-5,5),lon=slice(e1,e2)).mean('lat').mean('lon')
west=ts_anom.sel(lat=slice(-5,5),lon=slice(w1,w2)).mean('lat').mean('lon')
grad=west-east
grad=grad.assign_coords(new_dim=range(1,len(model_names)+1))
abrupt1_pct = grad.sel(year=slice(20,80)).mean('year')
abrupt2_pct = grad.sel(year=slice(100,149)).mean('year')
#################################################################
filelist = glob.glob(os.path.join('/Volumes/Armor_CMIP6/', 'ts_historical*.nc'))
filelist=sorted(filelist,key=str.lower)
mylist=xr.open_dataset(filelist[0],decode_cf=False)
ts_anom=mylist['ts']
east=ts_anom.sel(lat=slice(-5,5),lon=slice(e1,e2)).mean('lat').mean('lon')
west=ts_anom.sel(lat=slice(-5,5),lon=slice(w1,w2)).mean('lat').mean('lon')
grad0=west-east
grad1=grad0.sel(year=slice(2000,2014)).mean('year')-grad0.sel(year=slice(1950,1970)).mean('year')
abrupt1_hist=grad1.mean('ens_member')
std1_hist=grad1.std('ens_member')
grad2=grad0.sel(year=slice(1990,2014)).mean('year')-grad0.sel(year=slice(1850,1880)).mean('year')
abrupt2_hist=grad2.mean('ens_member')
std2_hist=grad2.std('ens_member')
for x in range(1,len(filelist)):
#for x in range(11):
mylist=xr.open_dataset(filelist[x],decode_cf=False)
ts_anom=mylist['ts']
east=ts_anom.sel(lat=slice(-5,5),lon=slice(e1,e2)).mean('lat').mean('lon')
west=ts_anom.sel(lat=slice(-5,5),lon=slice(w1,w2)).mean('lat').mean('lon')
grad0=west-east
grad1=grad0.sel(year=slice(2000,2014)).mean('year')-grad0.sel(year=slice(1950,1970)).mean('year')
abrupt1_hist=xr.concat([abrupt1_hist,grad1.mean('ens_member')],'new_dim')
std1_hist=xr.concat([std1_hist,grad1.std('ens_member')],'new_dim')
grad2=grad0.sel(year=slice(1990,2014)).mean('year')-grad0.sel(year=slice(1850,1880)).mean('year')
abrupt2_hist=xr.concat([abrupt2_hist,grad2.mean('ens_member')],'new_dim')
std2_hist=xr.concat([std2_hist,grad1.std('ens_member')],'new_dim')
abrupt1_hist=abrupt1_hist.assign_coords(new_dim=range(1,len(model_names)+1))
abrupt2_hist=abrupt2_hist.assign_coords(new_dim=range(1,len(model_names)+1))
std1_hist=std1_hist.assign_coords(new_dim=range(1,len(model_names)+1))
std2_hist=std2_hist.assign_coords(new_dim=range(1,len(model_names)+1))
#################################################################
filelist = glob.glob(os.path.join('/Volumes/Armor_CMIP6/', 'ts_ssp585*.nc'))
filelist=sorted(filelist,key=str.lower)
control_ssp585subset=ts_control.isel(new_dim=[0,1,2,4,5,7,9,11,12,13,14,15,17,18,19,20,21,23,24,25,26,27,28,29,30,31,33,34,35,38,39])
control_ssp585subset=control_ssp585subset.assign_coords(new_dim=range(0,len(filelist)))
mylist=xr.open_dataset(filelist[0],decode_cf=False)
mylist_control=control_ssp585subset.sel(new_dim=0)
ts_anom=mylist['ts']
east=mylist_control.sel(lat=slice(-5,5),lon=slice(e1,e2)).mean('lat').mean('lon')
west=mylist_control.sel(lat=slice(-5,5),lon=slice(w1,w2)).mean('lat').mean('lon')
gradC=west-east
east=ts_anom.sel(lat=slice(-5,5),lon=slice(e1,e2)).mean('lat').mean('lon')
west=ts_anom.sel(lat=slice(-5,5),lon=slice(w1,w2)).mean('lat').mean('lon')
grad0=west-east
grad1=grad0.sel(year=slice(2015,2040)).mean('year')-gradC
abrupt1_ssp=grad1.mean('ens_member')
std1_ssp=grad1.std('ens_member')
grad2=grad0.sel(year=slice(2080,2100)).mean('year')-gradC
abrupt2_ssp=grad2.mean('ens_member')
std2_ssp=grad2.std('ens_member')
for x in range(1,len(filelist)):
#for x in range(11):
mylist=xr.open_dataset(filelist[x],decode_cf=False)
mylist_control=control_ssp585subset.sel(new_dim=x)
ts_anom=mylist['ts']
east=mylist_control.sel(lat=slice(-5,5),lon=slice(e1,e2)).mean('lat').mean('lon')
west=mylist_control.sel(lat=slice(-5,5),lon=slice(w1,w2)).mean('lat').mean('lon')
gradC=west-east
east=ts_anom.sel(lat=slice(-5,5),lon=slice(e1,e2)).mean('lat').mean('lon')
west=ts_anom.sel(lat=slice(-5,5),lon=slice(w1,w2)).mean('lat').mean('lon')
grad0=west-east
grad1=grad0.sel(year=slice(2015,2040)).mean('year')-gradC
abrupt1_ssp=xr.concat([abrupt1_ssp,grad1.mean('ens_member')],'new_dim')
std1_ssp=xr.concat([std1_ssp,grad1.std('ens_member')],'new_dim')
grad2=grad0.sel(year=slice(2080,2100)).mean('year')-gradC
abrupt2_ssp=xr.concat([abrupt2_ssp,grad2.mean('ens_member')],'new_dim')
std2_ssp=xr.concat([std2_ssp,grad1.std('ens_member')],'new_dim')
ssp_models=[0,1,2,4,5,7,9,11,12,13,14,15,17,18,19,20,21,23,24,25,26,27,28,29,30,31,33,34,35,38,39]
ssp_models1=[x+1 for x in ssp_models]
abrupt1_ssp=abrupt1_ssp.assign_coords(new_dim=ssp_models1)
abrupt2_ssp=abrupt2_ssp.assign_coords(new_dim=ssp_models1)
std1_ssp=std1_ssp.assign_coords(new_dim=ssp_models1)
std2_ssp=std2_ssp.assign_coords(new_dim=ssp_models1)
#################################################################
filelist = glob.glob(os.path.join('/Volumes/Armor_CMIP6/', 'ts_ssp370*.nc'))
filelist=sorted(filelist,key=str.lower)
control_ssp370subset=ts_control.isel(new_dim=[0,1,2,3,4,5,7,9,12,13,14,15,18,20,21,25,26,27,28,29,30,31,32,34,39])
control_ssp370subset=control_ssp370subset.assign_coords(new_dim=range(0,len(filelist)))
mylist=xr.open_dataset(filelist[0],decode_cf=False)
ts_anom=mylist['ts']
mylist_control=control_ssp370subset.sel(new_dim=0)
east=mylist_control.sel(lat=slice(-5,5),lon=slice(e1,e2)).mean('lat').mean('lon')
west=mylist_control.sel(lat=slice(-5,5),lon=slice(w1,w2)).mean('lat').mean('lon')
gradC=west-east
east=ts_anom.sel(lat=slice(-5,5),lon=slice(e1,e2)).mean('lat').mean('lon')
west=ts_anom.sel(lat=slice(-5,5),lon=slice(w1,w2)).mean('lat').mean('lon')
grad0=west-east
grad1=grad0.sel(year=slice(2015,2040)).mean('year')-gradC
abrupt1_ssp3=grad1.mean('ens_member')
std1_ssp3=grad1.std('ens_member')
grad2=grad0.sel(year=slice(2080,2100)).mean('year')-gradC
abrupt2_ssp3=grad2.mean('ens_member')
std2_ssp3=grad2.std('ens_member')
for x in range(1,len(filelist)):
#for x in range(11):
mylist=xr.open_dataset(filelist[x],decode_cf=False)
ts_anom=mylist['ts']
mylist_control=control_ssp370subset.sel(new_dim=x)
east=mylist_control.sel(lat=slice(-5,5),lon=slice(e1,e2)).mean('lat').mean('lon')
west=mylist_control.sel(lat=slice(-5,5),lon=slice(w1,w2)).mean('lat').mean('lon')
gradC=west-east
east=ts_anom.sel(lat=slice(-5,5),lon=slice(e1,e2)).mean('lat').mean('lon')
west=ts_anom.sel(lat=slice(-5,5),lon=slice(w1,w2)).mean('lat').mean('lon')
grad0=west-east
grad1=grad0.sel(year=slice(2015,2040)).mean('year')-gradC
abrupt1_ssp3=xr.concat([abrupt1_ssp3,grad1.mean('ens_member')],'new_dim')
std1_ssp3=xr.concat([std1_ssp3,grad1.std('ens_member')],'new_dim')
grad2=grad0.sel(year=slice(2080,2100)).mean('year')-gradC
abrupt2_ssp3=xr.concat([abrupt2_ssp3,grad2.mean('ens_member')],'new_dim')
std2_ssp3=xr.concat([std2_ssp3,grad1.std('ens_member')],'new_dim')
ssp3_models=[0,1,2,3,4,5,7,9,12,13,14,15,18,20,21,25,26,27,28,29,30,31,32,34,39]
ssp3_models1=[x+1 for x in ssp3_models]
abrupt1_ssp3=abrupt1_ssp3.assign_coords(new_dim=ssp3_models1)
abrupt2_ssp3=abrupt2_ssp3.assign_coords(new_dim=ssp3_models1)
std1_ssp3=std1_ssp3.assign_coords(new_dim=ssp3_models1)
std2_ssp3=std2_ssp3.assign_coords(new_dim=ssp3_models1)
#################################################################
filelist = glob.glob(os.path.join('/Volumes/Armor_CMIP6/', 'ts_GHGonly*.nc'))
filelist=sorted(filelist,key=str.lower)
mylist=xr.open_dataset(filelist[0],decode_cf=False)
ts_anom=mylist['ts']
east=ts_anom.sel(lat=slice(-5,5),lon=slice(e1,e2)).mean('lat').mean('lon')
west=ts_anom.sel(lat=slice(-5,5),lon=slice(w1,w2)).mean('lat').mean('lon')
grad0=west-east
grad1=grad0.sel(year=slice(2000,2014)).mean('year')-grad0.sel(year=slice(1950,1970)).mean('year')
abrupt1_ghg=grad1.mean('ens_member')
std1_ghg=grad1.std('ens_member')
grad2=grad0.sel(year=slice(1990,2014)).mean('year')-grad0.sel(year=slice(1850,1880)).mean('year')
abrupt2_ghg=grad2.mean('ens_member')
std2_ghg=grad2.std('ens_member')
for x in range(1,len(filelist)):
#for x in range(11):
mylist=xr.open_dataset(filelist[x],decode_cf=False)
ts_anom=mylist['ts']
east=ts_anom.sel(lat=slice(-5,5),lon=slice(e1,e2)).mean('lat').mean('lon')
west=ts_anom.sel(lat=slice(-5,5),lon=slice(w1,w2)).mean('lat').mean('lon')
grad0=west-east
grad1=grad0.sel(year=slice(2000,2014)).mean('year')-grad0.sel(year=slice(1950,1970)).mean('year')
abrupt1_ghg=xr.concat([abrupt1_ghg,grad1.mean('ens_member')],'new_dim')
std1_ghg=xr.concat([std1_ghg,grad1.std('ens_member')],'new_dim')
grad2=grad0.sel(year=slice(1990,2014)).mean('year')-grad0.sel(year=slice(1850,1880)).mean('year')
abrupt2_ghg=xr.concat([abrupt2_ghg,grad2.mean('ens_member')],'new_dim')
std2_ghg=xr.concat([std2_ghg,grad1.std('ens_member')],'new_dim')
GHGonly_models=[1,2,5,7,13,18,20,21,23,27,31,32,34,39]
GHGonly_models1=[x+1 for x in GHGonly_models]
abrupt1_ghg=abrupt1_ghg.assign_coords(new_dim=GHGonly_models1)
abrupt2_ghg=abrupt2_ghg.assign_coords(new_dim=GHGonly_models1)
std1_ghg=std1_ghg.assign_coords(new_dim=GHGonly_models1)
stdt2_ghg=std2_ghg.assign_coords(new_dim=GHGonly_models1)
#%%
plt.rcParams.update({'hatch.color': '0.1'})
plt.rcParams.update({'font.size': 40})
x = np.arange(len(model_names))+1 # the label locations
x3=0
width = 0.25 # the width of the bars
fig = figure(figsize=(47,25))
gs = gridspec.GridSpec(2, 1)
ax1 = plt.subplot(gs[0, 0:1])
ax2 = plt.subplot(gs[1,0:1])
#ax3 = plt.subplot(gs[2,0:1])
fig = gcf()
gs.tight_layout(fig,h_pad=12,w_pad=1)
ax = [ax1,ax2]
plt.figtext(0.04, 0.98, 'a)')
plt.figtext(0.04, 0.375, 'b)')
#plt.figtext(0.04, 0.215, 'c)')
ax[0].bar(x - width, abrupt1, width, label='year 0-25, 4xCO$_2$')
ax[0].bar(x, abrupt1_pct, width, label='year 20-80, 1pct CO$_2$', hatch="\\")
ax[0].bar(x + width, abrupt2, width, label='year 100-150, 4xCO$_2$', hatch="/")
ax[0].bar(x3 + width, obs1, width, label='year 2000-2014 minus 1950-1970, observed', hatch="x")
# Add some text for labels, title and custom x-axis tick labels, etc.
ax[0].set_ylabel('$\Delta$ T ($^o$C)')
ax[0].set_title('Pacific zonal SST gradient change, hypothetical CO$_2$ scenarios',fontsize=53)
ax[0].set_xticks(range(len(model_names1)))
ax[0].set_xticklabels(model_names1,rotation='vertical')
ax[0].legend(ncol=2,fontsize=38)
x = abrupt1_hist.new_dim # the label locations
x1 = abrupt1_ssp.new_dim
x15 = abrupt1_ssp3.new_dim
x2 =abrupt1_ghg.new_dim
x3=0
width = 0.25 # the width of the bars
# ax[1].bar(x - width, abrupt1_hist, width, yerr=std1_hist, label='year 2000-2014 minus 1950-1970')
# ax[1].bar(x15, abrupt1_ssp3, width, yerr=std1_ssp3, label='year 2015-2035, ssp370')
# ax[1].bar(x15 + width, abrupt2_ssp3, width, yerr=std1_ssp3, label='year 2080-2100, ssp370')
# ax[1].bar(x3, obs1, width, label='year 2000-2014 minus 1950-1970, obs')
# ax[1].set_ylabel('$\Delta$ K')
# ax[1].set_title('SST gradient, historical and future projection')
# ax[1].set_xticks(range(len(model_names1)))
# ax[1].legend()
ax[1].bar(x - width, abrupt1_hist, width, yerr=std1_hist, error_kw=dict(lw=5),label='year 2000-2014 minus 1950-1970')
#ax[1].bar(x2 - width, abrupt1_ghg, width, yerr=std1_ghg, label='year 2000-2014 minus 1950-1970, GHGonly')
ax[1].bar(x1, abrupt1_ssp, width, yerr=std1_ssp, error_kw=dict(lw=5), label='year 2015-2035, ssp585', hatch="\\")
ax[1].bar(x1 + width, abrupt2_ssp, width, yerr=std1_ssp, error_kw=dict(lw=5), label='year 2080-2100, ssp585', hatch="/")
ax[1].bar(x3, obs1, width, label='year 2000-2014 minus 1950-1970, obs', hatch="x")
# Add some text for labels, title and custom x-axis tick labels, etc.
ax[1].set_ylabel('$\Delta$ T ($^o$C)')
ax[1].set_title('Pacific zonal SST gradient change, historical and future projections',fontsize=53)
ax[1].set_xticks(range(len(model_names1)))
ax[1].set_xticklabels(model_names1,rotation='vertical')
ax[1].legend(ncol=2,fontsize=38)
# ax[2].bar(x - width, abrupt1_hist, width, yerr=std1_hist, error_kw=dict(lw=5),label='year 2000-2014 minus 1950-1970')
# #ax[1].bar(x2 - width, abrupt1_ghg, width, yerr=std1_ghg, label='year 2000-2014 minus 1950-1970, GHGonly')
# ax[2].bar(x15, abrupt1_ssp3, width, yerr=std1_ssp3, error_kw=dict(lw=5), label='year 2015-2035, ssp370')
# ax[2].bar(x15 + width, abrupt2_ssp3, width, yerr=std1_ssp3, error_kw=dict(lw=5), label='year 2080-2100, ssp370')
# ax[2].bar(x3, obs1, width, label='year 2000-2014 minus 1950-1970, obs')
# # Add some text for labels, title and custom x-axis tick labels, etc.
# ax[2].set_ylabel('$\Delta$ T ($^o$C)')
# ax[2].set_title('SST gradient, historical and future projection',fontsize=53)
# ax[2].set_xticks(range(len(model_names1)))
# ax[2].set_xticklabels(model_names1,rotation='vertical')
# ax[2].legend(ncol=2,fontsize=38)
|
|
'''
ViZDoom wrapper
'''
from __future__ import print_function
import sys
import os
vizdoom_path = 'C://Users//Rzhang//Anaconda3//envs//recognition//Lib//site-packages//vizdoom'
sys.path = [os.path.join(vizdoom_path,'bin/python3')] + sys.path
import vizdoom
print(vizdoom.__file__)
import random
import time
import numpy as np
import re
import cv2
import gym
def get_screen(env):
# Returned screen requested by gym is 400x600x3, but is sometimes larger
# such as 800x1200x3. Transpose it into torch order (CHW).
screen = env.render(mode='rgb_array').transpose((2, 0, 1))
# Cart is in the lower half, so strip off the top and bottom of the screen
_, screen_height, screen_width = screen.shape
screen = screen[:, int(screen_height*0.4):int(screen_height * 0.8)]
view_width = int(screen_width * 0.6)
cart_location = get_cart_location(screen_width)
if cart_location < view_width // 2:
slice_range = slice(view_width)
elif cart_location > (screen_width - view_width // 2):
slice_range = slice(-view_width, None)
else:
slice_range = slice(cart_location - view_width // 2,
cart_location + view_width // 2)
# Strip off the edges, so that we have a square image centered on a cart
screen = screen[:, :, slice_range]
# Convert to float, rescale, convert to torch tensor
# (this doesn't require a copy)
screen = np.ascontiguousarray(screen, dtype=np.float32) / 255
screen = torch.from_numpy(screen)
# Resize, and add a batch dimension (BCHW)
return resize(screen).unsqueeze(0).to(device)
class Gym_simulator:
def __init__(self, args):
self.config = args['config']
self.resolution = args['resolution']
self.frame_skip = args['frame_skip']
self.color_mode = args['color_mode']
self.switch_maps = args['switch_maps']
self.maps = args['maps']
self.game_args = args['game_args']
self.env_name = args['env_name']
self.resolution = args['resolution']
self.num_meas = args['num_meas']
self._env = gym.make(self.env_name).unwrapped
self.counter = 0
# self._game = vizdoom.DoomGame()
# self._game.set_vizdoom_path(os.path.join(vizdoom_path,'vizdoom'))
# self._game.set_doom_game_path(os.path.join(vizdoom_path,'freedoom2.wad'))
# self._game.load_config(self.config)
# self._game.add_game_args(self.game_args)
# self.curr_map = 0
# self._game.set_doom_map(self.maps[self.curr_map])
# # set resolution
# try:
# self._game.set_screen_resolution(getattr(vizdoom.ScreenResolution, 'RES_%dX%d' % self.resolution))
# self.resize = False
# except:
# print("Requested resolution not supported:", sys.exc_info()[0], ". Setting to 160x120 and resizing")
# self._game.set_screen_resolution(getattr(vizdoom.ScreenResolution, 'RES_160X120'))
self.resize = True
# set color mode
if self.color_mode == 'RGB':
self.num_channels = 3
elif self.color_mode == 'GRAY':
self.num_channels = 1
else:
print("Unknown color mode")
raise
self.available_controls, self.continuous_controls, self.discrete_controls = self.analyze_controls(self.config)
self.num_buttons = self._env.action_space.n
assert(self.num_buttons == len(self.discrete_controls) + len(self.continuous_controls))
assert(len(self.continuous_controls) == 0) # only discrete for now
#self.num_meas = self._game.get_available_game_variables_size()
self.meas_tags = []
for nm in range(self.num_meas):
self.meas_tags.append('meas' + str(nm))
self.episode_count = 0
self.game_initialized = False
def get_screen(self):
screen = self._env.render(mode='rgb_array')
return screen
def analyze_controls(self, config_file):
with open(config_file, 'r') as myfile:
config = myfile.read()
m = re.search('available_buttons[\s]*\=[\s]*\{([^\}]*)\}', config)
avail_controls = m.group(1).split()
cont_controls = np.array([bool(re.match('.*_DELTA', c)) for c in avail_controls])
discr_controls = np.invert(cont_controls)
return avail_controls, np.squeeze(np.nonzero(cont_controls)), np.squeeze(np.nonzero(discr_controls))
def init_game(self):
if not self.game_initialized:
self._env.reset()
self.game_initialized = True
self.counter = 0
def close_game(self):
if self.game_initialized:
self.game_initialized = False
def step(self, action=0):
"""
Action can be either the number of action or the actual list defining the action
Args:
action - action encoded either as an int (index of the action) or as a bool vector
Returns:
img - image after the step
meas - numpy array of returned additional measurements (e.g. health, ammo) after the step
rwrd - reward after the step
term - if the state after the step is terminal
"""
self.init_game()
state, rwrd, done, _ = self._env.step(np.argmax(action))
self.counter += 1
if state is None:
img = None
meas = None
else:
if self.color_mode == 'RGB':
raw_img = self.get_screen()[None,:,:,:]
elif self.color_mode == 'GRAY':
raw_img = np.expand_dims(state.screen_buffer,0)
if self.resize:
if raw_img is None or (isinstance(raw_img, list) and raw_img[0] is None):
img = None
else:
img = cv2.resize(raw_img[0], (self.resolution[0], self.resolution[1])).transpose((2, 0, 1))
else:
img = raw_img
meas = state #[[0,2]] # will decide later what is a good measurement for each env
term = done
if term:
if self.env_name == 'CartPole-v1':
if self.counter > 200:
print("EPISODE DONE IN")
print(self.counter)
else :
print("EPISODE DONE IN")
print(self.counter)
self.new_episode() # in multiplayer multi_simulator takes care of this
img = np.zeros((self.num_channels, self.resolution[1], self.resolution[0]), dtype=np.uint8) # should ideally put nan here, but since it's an int...
meas = np.zeros(self.num_meas, dtype=np.uint32) # should ideally put nan here, but since it's an int...
return img, meas, rwrd, term
def get_random_action(self):
return [(random.random() >= .5) for i in range(self.num_buttons)]
def is_new_episode(self):
return self._game.is_new_episode()
def new_episode(self):
self.episode_count += 1
self.counter = 0
self._env.reset()
|
|
import numpy as np
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.spatial.distance import pdist
X=np.array([[1,2],[2,1],[3,4],[4,3]])
Z=linkage(X,'ward')
dendrogram(Z)
plt.show()
|
|
"""
helper functions for Helmsman
"""
# system packages
from __future__ import print_function
import os
import sys
import warnings
import itertools
import collections
import csv
from joblib import Parallel, delayed
from logging import StreamHandler, getLogger as realGetLogger, Formatter
from colorama import Fore, Back, Style
# matrix+stats processing
import pandas as pd
import numpy as np
# vcf/fasta parsing
from cyvcf2 import VCF
from pyfaidx import Fasta
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
# PCA algorithms
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
# ignore nuisance warnings when loading nimfa package
warnings.filterwarnings("ignore", category=UserWarning)
# decomposition algorithms
import nimfa
sys.path.append(os.getcwd())
###############################################################################
# Configure color stream handler
# https://gist.github.com/jonaprieto/a61d9cade3ba19487f98
###############################################################################
class ColourStreamHandler(StreamHandler):
""" A colorized output StreamHandler """
# Some basic colour scheme defaults
colours = {
'DEBUG': Fore.CYAN,
'INFO': Fore.GREEN,
'WARN': Fore.YELLOW,
'WARNING': Fore.YELLOW,
'ERROR': Fore.RED,
'CRIT': Back.RED + Fore.WHITE,
'CRITICAL': Back.RED + Fore.WHITE
}
def emit(self, record):
try:
message = self.format(record)
self.stream.write(self.colours[record.levelname] + message +
Style.RESET_ALL)
self.stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
###############################################################################
# configure logger
###############################################################################
# class initLogger:
# """ initialize logger """
# def __init__(level):
# self.level = level
def get_logger(name=None,
fmt='[%(name)s::%(funcName)s] %(levelname)s %(message)s',
level='INFO'):
""" Get and initialize a colourised logging instance if the system supports
it as defined by the log.has_colour
:param name: Name of the logger
:type name: str
:param fmt: Message format to use
:type fmt: str
:return: Logger instance
:rtype: Logger
"""
log = realGetLogger(name)
# Only enable colour if support was loaded properly
handler = ColourStreamHandler()
handler.setLevel(level)
handler.setFormatter(Formatter(fmt))
log.addHandler(handler)
log.setLevel(level)
log.propagate = 0 # Don't bubble up to the root logger
return log
util_log = get_logger(__name__, level="DEBUG")
###############################################################################
# Manipulate sequence motifs etc.
###############################################################################
def getCategory(mu_type):
"""
collapse mutation types per strand symmetry
"""
# if re.match("^[ACGT]*$", mu_type):
if mu_type in ('AC', 'TG'):
category = "T_G"
elif mu_type in ('AG', 'TC'):
category = "T_C"
elif mu_type in ('AT', 'TA'):
category = "T_A"
elif mu_type in ('CA', 'GT'):
category = "C_A"
elif mu_type in ('CG', 'GC'):
category = "C_G"
elif mu_type in ('CT', 'GA'):
category = "C_T"
else:
category = "unknown"
return category
def getMotif(sequence):
"""
query reference genome for local sequence motif
"""
motif = Seq(sequence, IUPAC.unambiguous_dna)
altmotif = motif.reverse_complement()
central_base_pos = (len(motif) - 1) // 2
central_base = motif[central_base_pos]
if central_base in ('C', 'T'):
motif_a = motif
else:
motif_a = altmotif
return motif_a
def indexSubtypes(motiflength):
"""
define k-mer mutation subtypes
"""
categories = ["T_G", "T_C", "T_A", "C_G", "C_T", "C_A"]
bases = ["A", "C", "G", "T"]
flank = (motiflength - 1) // 2
if motiflength > 1:
kmers = itertools.product(bases, repeat=motiflength - 1)
subtypes_list = []
for kmer in kmers:
kmerstr = ''.join(kmer)
for category in categories:
ref = category[0]
subtype = category + "." \
+ kmerstr[0:flank] + ref + kmerstr[flank:(motiflength-1)]
subtypes_list.append(subtype)
else:
ext = [".T", ".C"]
extr = list(np.repeat(ext, 3))
subtypes_list = [m + n for m, n in zip(categories, extr)]
i = 0
subtypes_dict = {}
for subtype in sorted(subtypes_list):
subtypes_dict[subtype] = i
i += 1
util_log.debug("%s %s-mer subtypes indexed", len(subtypes_dict.keys()),
motiflength)
return subtypes_dict
def indexGroups(samplefile, groupvar):
"""
Build dictionary with sample ID as key, group ID as value
"""
sg_dict = {}
f = open(samplefile, 'r', encoding="utf-8")
reader = csv.DictReader(f, delimiter='\t')
for row in reader:
sg_dict[row['ID']] = row[groupvar]
return sg_dict
def get_samples(sample_file):
"""
get samples from input M matrix when using aggregation mode
"""
samples = np.loadtxt(
sample_file, dtype='S120', skiprows=1, delimiter='\t', usecols=(0, ))
util_log.debug("%s contains %s samples", sample_file, len(samples))
return samples
def parseSampleFile(samplefile):
"""
get list of samples to keep if samplefile supplied
"""
# f = open(args.input, 'r', encoding = "ISO-8859-1")
f = open(samplefile, 'r', encoding="utf-8")
reader = csv.DictReader(f, delimiter='\t')
keep_samples = []
for row in reader:
keep_samples.append(row['ID'])
return keep_samples
def get_samples_vcf(args, inputvcf):
"""
get samples from VCF file
"""
if args.samplefile:
keep_samples = parseSampleFile(args.samplefile)
vcf_reader = VCF(
inputvcf, mode='rb', gts012=True, lazy=True, samples=keep_samples)
else:
vcf_reader = VCF(inputvcf, mode='rb', gts012=True, lazy=True)
if (args.samplefile and args.groupvar):
samples = indexGroups(args.samplefile, args.groupvar)
else:
samples = vcf_reader.samples
return samples
class processInput:
"""
Methods for parsing input data into sample x subtype count matrices:
- MAF format
- plain text format
- Aggregation of existing subtype count matrices
"""
def __init__(self, mode, args, subtypes_dict, par=False):
self.mode = mode
self.args = args
self.subtypes_dict = subtypes_dict
self.par = par
if self.mode == "agg":
self.data = self.process_agg()
elif self.mode == "txt":
self.data = self.process_txt()
elif self.mode == "maf":
self.data = self.process_maf()
elif self.mode == "vcf":
if (args.input.lower().endswith(('.vcf', '.vcf.gz', '.bcf'))
or args.input == "-"):
par = False
self.data = self.process_vcf(args.input)
elif args.input.lower().endswith(('.txt')):
self.par = True
with open(args.input) as vcf_list_file:
vcf_list = vcf_list_file.read().splitlines()
results = Parallel(n_jobs=args.cpus) \
(delayed(self.process_vcf)(vcf) \
for vcf in vcf_list)
if args.rowwise:
count_matrix = np.vstack(results)
samples = np.array([])
for vcf in vcf_list:
samples = np.append(samples, get_samples_vcf(args, vcf))
else:
nrow, ncol = results[1].shape
count_matrix = np.zeros((nrow, ncol))
for count_matrix_i in results:
count_matrix = np.add(count_matrix, count_matrix_i)
self.par
samples = np.array([get_samples_vcf(args, vcf_list[1])])
self.data = collections.namedtuple('Out', ['M', 'samples'])(
count_matrix, samples)
def process_vcf(self, inputfile):
"""
Main function for parsing VCF
"""
# initialize reference genome
fasta_reader = Fasta(self.args.fastafile, read_ahead=1000000)
# initialize vcf reader
if self.args.samplefile:
keep_samples = parseSampleFile(self.args.samplefile)
vcf_reader = VCF(
inputfile,
mode='rb',
gts012=True,
lazy=True,
samples=keep_samples)
else:
vcf_reader = VCF(inputfile, mode='rb', gts012=True, lazy=True)
nbp = (self.args.length - 1) // 2
# index samples
if (self.args.samplefile and self.args.groupvar):
all_samples = vcf_reader.samples
sg_dict = indexGroups(self.args.samplefile, self.args.groupvar)
samples = sorted(list(set(sg_dict.values())))
# get boolean vector of samples that are in sample file
samples_keep_match = np.isin(all_samples, list(sg_dict.keys()))
# get indices of matching samples
samples_keep_idx = np.where(samples_keep_match)
# get list of individual sample ids to keep
samples_keep = sorted(list(set(sg_dict.keys())))
util_log.debug("%s samples will be pooled into %s groups: %s",
len(all_samples), len(samples), ",".join(samples))
else:
samples = vcf_reader.samples
samples_dict = {}
for i, sample in enumerate(samples):
samples_dict[sample] = i
# Query records in VCF and build matrix
M = np.zeros((len(samples), len(self.subtypes_dict)))
numsites_keep = 0
numsites_skip = 0
chrseq = '0'
chr_check = "none"
for record in vcf_reader:
# Filter by SNP status, # alt alleles, and FILTER column
if (not record.is_snp or len(record.ALT) != 1
or record.FILTER is not None):
numsites_skip += 1
continue
# Filter by allele count
if record.INFO['AC'] > self.args.maxac > 0:
numsites_skip += 1
continue
row_chr = record.CHROM
# check chromosome formatting matches between MAF and fasta files
if numsites_keep == 0:
if "chr1" in fasta_reader and "chr" not in row_chr:
chr_check = "add"
util_log.debug(
"formatting mismatch: 'chr' only in fasta file")
elif "chr1" not in fasta_reader and "chr" in row_chr:
chr_check = "delete"
util_log.debug(
"formatting mismatch: 'chr' only in MAF file")
else:
util_log.debug("chromosome formatting matches")
if chr_check == "add":
row_chr = "chr" + row_chr
elif chr_check == "delete":
row_chr = row_chr.replace('chr', '')
if row_chr != chrseq:
sequence = fasta_reader[row_chr]
chrseq = row_chr
# check and update chromosome sequence
# if record.CHROM != chrseq:
# sequence = fasta_reader[record.CHROM]
# chrseq = record.CHROM
lseq = sequence[record.POS - (nbp + 1):record.POS + nbp].seq
mu_type = record.REF + str(record.ALT[0])
category = getCategory(mu_type)
motif_a = getMotif(lseq)
subtype = str(category + "." + motif_a)
if subtype not in self.subtypes_dict:
numsites_skip += 1
continue
st = self.subtypes_dict[subtype]
# currently only works with singletons--
if (self.args.samplefile and self.args.groupvar):
gt_new = record.gt_types
if (self.args.impute and 3 in gt_new):
gt_complete = gt_new[gt_new != 3]
freq = sum(gt_complete) / len(gt_complete)
gt_new[gt_new == 3] = freq
else:
gt_new[gt_new == 3] = 0
# if not any("/" in b for b in record.gt_bases):
if self.args.haploid:
gt_new = np.divide(gt_new, 2.)
# get array of genotypes only for samples in samplefile
gt_sub = gt_new[samples_keep_idx]
if gt_sub.sum() == 0:
numsites_skip += 1
continue
# initialize dict of group allele counts = 0
sg_counts = {k: 0 for k in sorted(list(set(sg_dict.values())))}
# initialize dict of allele counts per sample
d2 = dict(zip(samples_keep, gt_sub))
# iterate per-sample counts and update per-group counts
for key, value in d2.items():
sg_counts[sg_dict[key]] += value
# add to matrix
M[:, st] = M[:, st] + list(sg_counts.values())
numsites_keep += 1
else:
gt_new = record.gt_types
if (self.args.impute and 3 in gt_new):
gt_complete = gt_new[gt_new != 3]
freq = sum(gt_complete) / len(gt_complete)
gt_new[gt_new == 3] = freq
else:
gt_new[gt_new == 3] = 0
# if not any("/" in b for b in record.gt_bases):
if self.args.haploid:
gt_new = np.divide(gt_new, 2.)
M[:, st] = M[:, st] + gt_new
numsites_keep += 1
# util_log.debug(gt_new)
if numsites_keep % 100000 != 0:
continue
util_log.debug("%s : %s sites counted", inputfile, numsites_keep)
util_log.debug("%s : %s sites counted", inputfile, numsites_keep)
util_log.debug("%s : %s sites skipped", inputfile, numsites_skip)
out = collections.namedtuple('Out', ['M', 'samples'])(M, samples)
if self.par:
out = M
return out
def process_maf(self):
"""
process MAF files
"""
fasta_reader = Fasta(self.args.fastafile, read_ahead=1000000)
nbp = (self.args.length - 1) // 2
samples_dict = {}
# M = np.zeros((len(samples), len(subtypes_dict)))
numsites_keep = 0
numsites_skip = 0
chrseq = '0'
maf_file = open(self.args.input, 'r', encoding="ISO-8859-1")
reader = csv.DictReader(
filter(lambda row: row[0] != '#', maf_file), delimiter='\t')
counter = 0
chr_check = "none"
for row in reader:
if (row['Variant_Type'] not in ["SNP", "SNV"]):
continue
if 'Start_Position' in row:
pos = int(row['Start_Position'])
else:
pos = int(row['Start_position'])
ref = row['Reference_Allele']
alt = row['Tumor_Seq_Allele2']
row_chr = row['Chromosome']
sample = row[self.args.groupvar]
# check chromosome formatting matches between MAF and fasta files
if counter == 0:
if "chr1" in fasta_reader and "chr" not in row_chr:
chr_check = "add"
util_log.debug(
"formatting mismatch: 'chr' only in fasta file")
elif "chr1" not in fasta_reader and "chr" in row_chr:
chr_check = "delete"
util_log.debug(
"formatting mismatch: 'chr' only in MAF file")
else:
util_log.debug("chromosome formatting matches")
if chr_check == "add":
row_chr = "chr" + row_chr
elif chr_check == "delete":
row_chr = row_chr.replace('chr', '')
if row_chr != chrseq:
sequence = fasta_reader[row_chr]
chrseq = row_chr
# if row['Chromosome'] != chrseq:
# sequence = fasta_reader[row['Chromosome']]
# chrseq = row['Chromosome']
counter += 1
mu_type = ref + alt
category = getCategory(mu_type)
lseq = sequence[pos - (nbp + 1):pos + nbp].seq
motif_a = getMotif(lseq)
subtype = str(category + "." + motif_a)
# st = subtypes_dict[subtype]
if sample not in samples_dict:
samples_dict[sample] = {}
if subtype not in samples_dict[sample]:
samples_dict[sample][subtype] = 1
else:
samples_dict[sample][subtype] += 1
if counter % 1000 != 0:
continue
util_log.debug("%s : %s sites counted", self.args.input, counter)
M = pd.DataFrame(samples_dict).T.fillna(0).values
samples = sorted(samples_dict)
out = collections.namedtuple('Out', ['M', 'samples'])(M, samples)
return out
def process_agg(self):
"""
aggregate M matrices from list of input files
"""
inputM = self.args.input
colnames = ["ID"]
M_colnames = colnames + list(sorted(self.subtypes_dict.keys()))
colrange = range(1, len(M_colnames))
if (inputM.lower().endswith('m_samples.txt')
or inputM.lower().endswith('m_regions.txt')):
with open(inputM) as f:
file_list = f.read().splitlines()
# M output by sample
if inputM.lower().endswith('m_samples.txt'):
M_out = np.array([M_colnames])
samples = np.empty((0, 100))
for mfile in file_list:
samples_it = get_samples(mfile)
samples = np.concatenate((samples, samples_it), axis=None)
M_it = np.loadtxt(mfile, skiprows=1, usecols=colrange)
M_it = np.concatenate((np.array([samples_it]).T, M_it),
axis=1)
M_out = np.concatenate((M_out, M_it), axis=0)
M = np.delete(M_out, 0, 0)
M = np.delete(M, 0, 1)
M = M.astype(np.float)
# M output by region
elif inputM.lower().endswith('m_regions.txt'):
samples = get_samples(file_list[0])
M_out = np.zeros((len(samples), len(M_colnames) - 1))
for mfile in file_list:
M_it = np.loadtxt(mfile, skiprows=1, usecols=colrange)
M_out = np.add(M_out, M_it)
M = M_out.astype(np.float)
else:
samples = get_samples(inputM)
M = np.loadtxt(inputM, skiprows=1, usecols=colrange)
M = M.astype(np.float)
out = collections.namedtuple('Out', ['M', 'samples'])(M, samples)
return out
def process_txt(self):
"""
process tab-delimited text file, containing the following columns:
CHR POS REF ALT SAMPLE_ID
"""
fasta_reader = Fasta(self.args.fastafile, read_ahead=1000000)
nbp = (self.args.length - 1) // 2
samples_dict = {}
numsites_keep = 0
numsites_skip = 0
chrseq = '0'
with open(self.args.input, 'r') as txt_file:
reader = csv.reader(txt_file, delimiter='\t')
for row in reader:
chrom = row[0]
pos = int(row[1])
ref = row[2]
alt = row[3]
sample = row[4]
if chrom != chrseq:
sequence = fasta_reader[chrom]
chrseq = chrom
if (len(alt) == 1 and len(ref) == 1):
mu_type = ref + alt
category = getCategory(mu_type)
if nbp > 0:
lseq = sequence[pos - (nbp + 1):pos + nbp].seq
else:
lseq = sequence[pos - 1].seq
# eprint("lseq:", lseq)
motif_a = getMotif(lseq)
subtype = str(category + "." + motif_a)
if subtype not in self.subtypes_dict:
continue
if sample not in samples_dict:
samples_dict[sample] = {}
if subtype not in samples_dict[sample]:
samples_dict[sample][subtype] = 1
else:
samples_dict[sample][subtype] += 1
mdf = pd.DataFrame(samples_dict).T.fillna(0)
samples = mdf.index.tolist() #instead of using samples_dict with sorted(), which leads to mismatching, simply retain the explicit ordering of the matrix dataframe.
M = mdf.values
out = collections.namedtuple('Out', ['M', 'samples'])(M, samples)
return out
class DecompModel:
"""
Class for fitting PCA and NMF models
"""
def __init__(self, M_run, rank, seed, decomp):
self.M_run = M_run / (M_run.sum(axis=1) + 1e-8)[:, None]
self.rank = rank
self.seed = seed
self.decomp = decomp
self.evar_dict = {}
if self.decomp == "pca":
# standarize input matrix
X_std = StandardScaler().fit_transform(self.M_run)
# run PCA
pca = PCA(n_components=self.M_run.shape[1])
W = pca.fit_transform(X_std)
H = pca.components_.T * np.sqrt(pca.explained_variance_)
if self.rank > 0:
self.modrank = self.rank
evar = np.cumsum(pca.explained_variance_ratio_)[self.rank - 1]
self.evar_dict[self.modrank] = evar
elif self.rank == 0:
util_log.debug("Finding optimal rank for %s decomposition",
decomp)
evar_prev = 0
i = 1
for evar in np.cumsum(pca.explained_variance_ratio_):
self.modrank = i
# self.evar_list.append(evar)
self.evar_dict[self.modrank] = evar
if evar - evar_prev < 0.01:
self.modrank = i - 1
evar = evar_prev
break
evar_prev = evar
util_log.debug(
"Explained variance for first %s %s components: %s", i,
decomp.upper(), evar)
i += 1
self.W = W[:, :self.modrank]
self.H = H[:self.modrank, :]
elif self.decomp == "nmf":
if self.rank > 0:
model = self.run_nmf_model(self.rank)
self.modrank = self.rank
elif self.rank == 0:
util_log.debug("Finding optimal rank for %s decomposition",
decomp)
self.evarprev = 0
for i in range(1, self.M_run.shape[0]):
model = self.run_nmf_model(rank=i)
model_fit = model()
evar = model_fit.fit.evar()
self.modrank = i
if (i > 2 and evar - evarprev < 0.001):
model = self.run_nmf_model(rank=i - 1)
self.modrank = i - 1
break
self.evar_dict[self.modrank] = evar
evarprev = evar
util_log.debug(
"Explained variance for first %s %s components: %s", i,
decomp.upper(), evar)
model_fit = model()
self.evar_dict[self.modrank] = model_fit.fit.evar()
self.W = model_fit.basis()
self.H = model_fit.coef()
# Specify NMF model
# options can be added/modified per
# http://nimfa.biolab.si/nimfa.methods.factorization.nmf.html
def run_nmf_model(self, rank):
"""
Run NMF model
"""
prng = np.random.RandomState(self.seed)
W_init = prng.rand(self.M_run.shape[0], rank)
H_init = prng.rand(rank, self.M_run.shape[1])
model = nimfa.Nmf(
self.M_run,
rank=rank,
# seed=None,
H=H_init,
W=W_init,
update="divergence",
objective='div',
n_run=1,
max_iter=200)
return model
class writeOutput:
"""
Class of functions for writing the output of Helmsman.
"""
def __init__(self, dat_paths, samples, subtypes_dict):
self.dat_paths = dat_paths
self.samples = samples
self.subtypes_dict = subtypes_dict
def writeW(self, decomp_data):
""" write W matrix """
num_sigs = decomp_data.W.shape[1]
W_out = pd.DataFrame(
data=decomp_data.W,
index=self.samples[0],
columns=["S" + str(i) for i in range(1, num_sigs + 1)])
W_out.to_csv(self.dat_paths["W_path"], index_label="ID", sep="\t")
def writeH(self, decomp_data):
""" write H matrix """
num_sigs = decomp_data.H.shape[0]
H_out = pd.DataFrame(
data=decomp_data.H,
index=["S" + str(i) for i in range(1, num_sigs + 1)],
columns=list(sorted(self.subtypes_dict.keys())))
H_out.to_csv(self.dat_paths["H_path"], index_label="Sig", sep="\t")
def writeM(self, count_matrix):
""" write M matrix """
count_matrix_df = pd.DataFrame(
data=count_matrix,
index=self.samples[0],
columns=list(sorted(self.subtypes_dict.keys())))
count_matrix_df.to_csv(
self.dat_paths["M_path"], index_label="ID", sep="\t")
freq_matrix = count_matrix / (count_matrix.sum(axis=1) + 1e-8)[:, None]
freq_matrix_df = pd.DataFrame(
data=freq_matrix,
index=self.samples[0],
columns=list(sorted(self.subtypes_dict.keys())))
freq_matrix_df.to_csv(
self.dat_paths["M_path_rates"], index_label="ID", sep="\t")
def writeR(package, projectdir, matrixname):
"""
auto-generate R script
"""
rscript_path = projectdir + "/" + "Helmsman_to_" + package + ".R"
rscript = open(rscript_path, "w+")
print("library(\"" + package + "\")", file=rscript)
print("library(\"devtools\")", file=rscript)
print("install_github(\"carjed/musigtools\")", file=rscript)
print("library(\"musigtools\")", file=rscript)
print(
"mu_counts <- read.table(\"" + projectdir + "/" + matrixname +
".txt\", header=T, stringsAsFactors=F)",
file=rscript)
print("msm <- format_counts(mu_counts, \"" + package + "\")", file=rscript)
print(
"message(\"The mutation spectra matrix generated by Helmsman is " +
"now formatted for use with the " + package + " package, and loaded " +
"in a data frame named 'msm'. Please refer to the " + package +
" documentation for help with analyzing this matrix\")",
file=rscript)
|
|
#!/usr/bin/env python3
# Copyright 2018 Lael D. Barlow
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Mask a nex alignment by adding a new taxon, "MASK" sequence.
This is only a rough mask that is applied based on very simple criteria.
It would be good to eventually add more sophistocated criteria, especially
criteria based on similarity (according to a scoring matrix) rather than just
identity, or number of sequences without gaps.
"""
import sys
import os
sys.path.append(os.path.join(os.path.dirname(sys.path[0]),'amoebaelib'))
from Bio import AlignIO
from Bio.Alphabet import IUPAC, Gapped
from afa_to_nex import delete_extra_mesquite_lines
import numpy as np
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import collections
def apply_mask_criteria(column):
"""Apply simple masking criteria to a single column, and return '-' if the
column does not meet the criteria, and 'I' if it does.
"""
# Return '-' by default.
mask_char = '-'
# Get column features.
num_seqs = len(column)
half_num_seqs = num_seqs / 2
num_gaps_in_col = column.count('-')
column_no_gaps = column.replace('-', '')
# Check that the column is not entirely composed of gaps.
#assert not column_no_gaps == '', "Error: Empty positions in input alignment."
if column_no_gaps == '':
return mask_char
elif not column_no_gaps == '':
most_common_residue = collections.Counter(column_no_gaps).most_common(1)[0]
most_common_residue_count = most_common_residue[1]
percent_identity = most_common_residue_count * 100 / num_seqs
# If less than half the sequences have a gap at this position of the
# alignment, then include the position.
#if num_gaps_in_col < half_num_seqs:
# mask_char = 'I'
if num_gaps_in_col < (num_seqs * 0.30):
mask_char = 'I'
# If percent identity is at least 50, then include position.
if percent_identity >= 50:
mask_char = 'I'
return mask_char
def mask_alignment(alignment):
"""Takes an alignment object and adds a 'MASK' sequence using certain
criteria for inclusion of positions.
"""
# Get length of sequences and number of sequences in alignment.
seq_len = alignment.get_alignment_length()
num_seqs = len(alignment)
# get a list of columns as strings in the original alignment.
columns = [alignment[:, col] for col in range(seq_len)]
# Iterate over columns and make a mask sequence to append.
mask_seq = ''
for col in columns:
mask_char = apply_mask_criteria(col)
mask_seq = mask_seq + mask_char
# Generate a SeqRecord object with the mask_seq.
empty_mask_seq = Seq(mask_seq, IUPAC.protein)
empty_mask_rec = SeqRecord(empty_mask_seq, id='MASK', name='MASK')
# Add the mask_seq sequence to the alignment.
masked_alignment = alignment
masked_alignment.append(empty_mask_rec)
return masked_alignment
def mask_nex(infilepath, outfilepath=None):
"""Takes a filepath and adds a MASK sequence.
"""
# Delete extra lines in input nexus file, if present, because biopython cannot
# read nexus alignments with these extra lines.
delete_extra_mesquite_lines(infilepath)
# Define the name of the output file.
if outfilepath is None:
outfilepath = infilepath.replace('.nex', '.mask.nex')
with open(infilepath) as infh, open(outfilepath, 'w') as o:
# Check that the input file has the filename extension ".nex".
assert infilepath.endswith('.nex'), "Error: Input file name must have the\
extension '.nex'."
# Read the alignment file.
alignment = AlignIO.read(infh, 'nexus')
masked_alignment = mask_alignment(alignment)
AlignIO.write(masked_alignment, o, 'nexus')
|
|
import pytest
import numpy as np
from deduplipy.string_metrics.string_metrics import (length_adjustment, adjusted_ratio, adjusted_token_sort_ratio,
adjusted_token_set_ratio, adjusted_partial_ratio)
def test_length_adjustment():
assert length_adjustment('', '') == 0
assert length_adjustment('', 'aaaaaaaaaaaaaaaa') == 0
assert length_adjustment('aaaaaaaaaaaaaaaa', '') == 0
np.testing.assert_approx_equal(length_adjustment('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), 1, significant=2)
@pytest.mark.parametrize('string_metric',
[adjusted_ratio, adjusted_token_sort_ratio, adjusted_token_set_ratio, adjusted_partial_ratio])
def test_adjusted_ratio(string_metric):
assert string_metric('', '') == 0
assert string_metric('', 'aaaaaaaaaaaaaaaa') == 0
assert string_metric('aaaaaaaaaaaaaaaa', '') == 0
np.testing.assert_approx_equal(string_metric('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), 100, significant=2)
|
|
# coding: utf-8
# In[15]:
from matplotlib import pyplot as plt
import numpy as np
#get_ipython().run_line_magic('matplotlib', 'inline')
x_old, x_new, gamma, prec = 0, 6, 0.01, 0.00001
f = lambda x: x**4 - 3 * x**3 + 2
df = lambda x: 4*(x**3) - 9*(x**2)
to_plot =[]
i = 0
to_plot.append(x_new)
while abs(x_new - x_old) > prec:
x_old = x_new
x_new += -gamma * df(x_old)
#to_plot.append(x_new)
i += 1
data = np.linspace(-10, 10, 10)
plt.plot(data, f(data), 'b')
to_plot.append(x_new)
plt.plot(to_plot, [f(x) for x in to_plot], 'r*')
plt.plot(to_plot, [f(x) for x in to_plot], 'g')
plt.show()
|
|
"""
* Program to practice with OpenCV drawing methods.
"""
import skimage.io
import numpy as np
import random
# create the black canvas
image = np.zeros(shape=(600, 800, 3), dtype="uint8")
# WRITE YOUR CODE TO DRAW ON THE IMAGE HERE
# display the results
skimage.io.imshow(image)
|
|
"""
RegionFile class.
Reads and writes chunks to *.mcr* (Minecraft Region)
and *.mca* (Minecraft Anvil Region) files
"""
from __future__ import absolute_import, division
import logging
import os
import struct
import zlib
import time
import numpy
from mceditlib import nbt
from mceditlib.exceptions import ChunkNotPresent, RegionFormatError
REGION_DEBUG = 5
logging.addLevelName("REGION_DEBUG", REGION_DEBUG)
log = logging.getLogger(__name__)
# Disable region debugging today.
log.setLevel(logging.DEBUG)
def region_debug(msg, *args, **kwargs):
return log.log(REGION_DEBUG, msg, *args, **kwargs)
__author__ = 'Rio'
def deflate(data):
return zlib.compress(data, 2)
def inflate(data):
return zlib.decompress(data)
class RegionFile(object):
SECTOR_BYTES = 4096
CHUNK_HEADER_SIZE = 5
VERSION_GZIP = 1
VERSION_DEFLATE = 2
def __init__(self, path, readonly=False):
self.path = path
newFile = False
if not os.path.exists(path):
if readonly:
raise IOError("Region file not found: %r" % path)
open(path, "w").close()
newFile = True
filesize = os.path.getsize(path)
mode = "rb" if readonly else "rb+"
with open(self.path, mode) as f:
if newFile:
filesize = self.SECTOR_BYTES * 2
f.truncate(filesize)
self.offsets = numpy.zeros(self.SECTOR_BYTES//4, dtype='>u4')
self.modTimes = numpy.zeros(self.SECTOR_BYTES//4, dtype='>u4')
else:
if not readonly:
# Increase file size if not a multiple of sector size
if filesize & 0xfff:
filesize = (filesize | 0xfff) + 1
f.truncate(filesize)
# Increase file size if empty (new regionfile)
if filesize == 0:
filesize = self.SECTOR_BYTES * 2
f.truncate(filesize)
f.seek(0)
offsetsData = f.read(self.SECTOR_BYTES)
modTimesData = f.read(self.SECTOR_BYTES)
self.offsets = numpy.fromstring(offsetsData, dtype='>u4')
self.modTimes = numpy.fromstring(modTimesData, dtype='>u4')
self.freeSectors = [True] * (filesize // self.SECTOR_BYTES)
self.freeSectors[0:2] = False, False
if not newFile:
needsRepair = False
# Populate freeSectors table
for offset in self.offsets:
sector = offset >> 8
count = offset & 0xff
for i in xrange(sector, sector + count):
if i >= len(self.freeSectors):
log.warn("Region file offset table points to sector %d (past the end of the file)", i)
needsRepair = True
break
if self.freeSectors[i] is False:
needsRepair = True
self.freeSectors[i] = False
if needsRepair:
self.repair()
region_debug("Found region file %s with %d/%d sectors used and %d chunks present",
os.path.basename(path), self.usedSectors, self.sectorCount, self.chunkCount)
else:
region_debug("Created new region file %s", os.path.basename(path))
def __repr__(self):
return "%s(\"%s\")" % (self.__class__.__name__, self.path)
@property
def usedSectors(self):
return len(self.freeSectors) - sum(self.freeSectors)
@property
def sectorCount(self):
return len(self.freeSectors)
@property
def chunkCount(self):
return numpy.sum(self.offsets > 0)
def chunkPositions(self):
for index, offset in enumerate(self.offsets):
if offset:
cx = index & 0x1f
cz = index >> 5
yield (cx, cz)
def repair(self):
"""
Fix the following problems with the region file:
- remove offset table entries pointing past the end of the file
- remove entries that overlap other entries
- relocate offsets for chunks whose xPos,yPos don't match
"""
lostAndFound = {}
_freeSectors = [True] * len(self.freeSectors)
_freeSectors[0] = _freeSectors[1] = False
deleted = 0
recovered = 0
log.info("Beginning repairs on {file} ({chunks} chunks)".format(file=os.path.basename(self.path), chunks=sum(self.offsets > 0)))
for index, offset in enumerate(self.offsets):
if offset:
cx = index & 0x1f
cz = index >> 5
sectorStart = offset >> 8
sectorCount = offset & 0xff
try:
if sectorStart + sectorCount > len(self.freeSectors):
raise RegionFormatError("Offset {start}:{end} ({offset}) at index {index} pointed outside of "
"the file".format(start=sectorStart, end=sectorStart + sectorCount, index=index, offset=offset))
data = self.readChunkBytes(cx, cz)
chunkTag = nbt.load(buf=data)
lev = chunkTag["Level"]
xPos = lev["xPos"].value & 0x1f
zPos = lev["zPos"].value & 0x1f
overlaps = False
for i in xrange(sectorStart, sectorStart + sectorCount):
if _freeSectors[i] is False:
overlaps = True
_freeSectors[i] = False
if xPos != cx or zPos != cz:
lostAndFound[xPos, zPos] = data
raise RegionFormatError("Chunk {found} was found in the slot reserved for {expected}".format(found=(xPos, zPos), expected=(cx, cz)))
if overlaps:
raise RegionFormatError("Chunk {found} (in slot {expected}) has overlapping sectors with another chunk!".format(found=(xPos, zPos), expected=(cx, cz)))
except Exception as e:
log.info("Unexpected chunk data at sector {sector} ({exc})".format(sector=sectorStart, exc=e))
self._setOffset(cx, cz, 0)
deleted += 1
for cPos, foundData in lostAndFound.iteritems():
cx, cz = cPos
if self._getOffset(cx, cz) == 0:
log.info("Found chunk {found} and its slot is empty, recovering it".format(found=cPos))
self.writeChunkBytes(cx, cz, foundData)
recovered += 1
log.info("Repair complete. Removed {0} chunks, recovered {1} chunks, net {2}".format(deleted, recovered, recovered - deleted))
def readChunkCompressed(self, cx, cz):
"""
Read a chunk and return its compression type and the compressed data as a (data, fmt) tuple
"""
cx &= 0x1f
cz &= 0x1f
offset = self._getOffset(cx, cz)
if offset == 0:
raise ChunkNotPresent((cx, cz))
sectorStart = offset >> 8
numSectors = offset & 0xff
if numSectors == 0:
raise ChunkNotPresent((cx, cz))
if sectorStart + numSectors > len(self.freeSectors):
raise ChunkNotPresent((cx, cz))
with open(self.path, "rb") as f:
f.seek(sectorStart * self.SECTOR_BYTES)
data = f.read(numSectors * self.SECTOR_BYTES)
if len(data) < 5:
raise RegionFormatError("Chunk %s data is only %d bytes long (expected 5)" % ((cx, cz), len(data)))
# region_debug("REGION LOAD {0},{1} sector {2}".format(cx, cz, sectorStart))
length = struct.unpack_from(">I", data)[0]
fmt = struct.unpack_from("B", data, 4)[0]
data = data[5:length + 5]
return data, fmt
def readChunkBytes(self, cx, cz):
"""
:param cx:
:type cx:
:param cz:
:type cz:
:return:
:rtype: bytes
"""
data, fmt = self.readChunkCompressed(cx, cz)
if data is None:
return None
if fmt == self.VERSION_GZIP:
return nbt.gunzip(data)
if fmt == self.VERSION_DEFLATE:
return inflate(data)
raise RegionFormatError("Unknown compress format: {0}".format(fmt))
def writeChunkBytes(self, cx, cz, uncompressedData):
data = deflate(uncompressedData)
self.writeChunkCompressed(cx, cz, data, self.VERSION_DEFLATE)
def writeChunkCompressed(self, cx, cz, data, format):
cx &= 0x1f
cz &= 0x1f
offset = self._getOffset(cx, cz)
sectorNumber = offset >> 8
sectorsAllocated = offset & 0xff
sectorsNeeded = (len(data) + self.CHUNK_HEADER_SIZE) // self.SECTOR_BYTES + 1
if sectorsNeeded >= 256:
err = RegionFormatError("Cannot save chunk %s with compressed length %s (exceeds 1 megabyte)" %
((cx, cz), len(data)))
err.chunkPosition = cx, cz
if sectorNumber != 0 and sectorsAllocated >= sectorsNeeded:
region_debug("REGION SAVE {0},{1} rewriting {2}b".format(cx, cz, len(data)))
self.writeSector(sectorNumber, data, format)
else:
# we need to allocate new sectors
# mark the sectors previously used for this chunk as free
for i in xrange(sectorNumber, sectorNumber + sectorsAllocated):
self.freeSectors[i] = True
runLength = 0
runStart = 0
try:
runStart = self.freeSectors.index(True)
for i in range(runStart, len(self.freeSectors)):
if runLength:
if self.freeSectors[i]:
runLength += 1
else:
runLength = 0
elif self.freeSectors[i]:
runStart = i
runLength = 1
if runLength >= sectorsNeeded:
break
except ValueError:
pass
# we found a free space large enough
if runLength >= sectorsNeeded:
region_debug("REGION SAVE {0},{1}, reusing {2}b".format(cx, cz, len(data)))
sectorNumber = runStart
self._setOffset(cx, cz, sectorNumber << 8 | sectorsNeeded)
self.writeSector(sectorNumber, data, format)
self.freeSectors[sectorNumber:sectorNumber + sectorsNeeded] = [False] * sectorsNeeded
else:
# no free space large enough found -- we need to grow the
# file
region_debug("REGION SAVE {0},{1}, growing by {2}b".format(cx, cz, len(data)))
with open(self.path, "rb+") as f:
f.seek(0, 2)
filesize = f.tell()
sectorNumber = len(self.freeSectors)
assert sectorNumber * self.SECTOR_BYTES == filesize
filesize += sectorsNeeded * self.SECTOR_BYTES
f.truncate(filesize)
self.freeSectors += [False] * sectorsNeeded
self._setOffset(cx, cz, sectorNumber << 8 | sectorsNeeded)
self.writeSector(sectorNumber, data, format)
self.setTimestamp(cx, cz)
def writeSector(self, sectorNumber, data, format):
with open(self.path, "rb+") as f:
region_debug("REGION: Writing sector {0}".format(sectorNumber))
f.seek(sectorNumber * self.SECTOR_BYTES)
f.write(struct.pack(">I", len(data) + 1)) # // chunk length
f.write(struct.pack("B", format)) # // chunk version number
f.write(data) # // chunk data
# f.flush()
def containsChunk(self, cx, cz):
return self._getOffset(cx, cz) != 0
def _getOffset(self, cx, cz):
cx &= 0x1f
cz &= 0x1f
return self.offsets[cx + cz * 32]
def _setOffset(self, cx, cz, offset):
cx &= 0x1f
cz &= 0x1f
self.offsets[cx + cz * 32] = offset
with open(self.path, "rb+") as f:
f.seek(0)
f.write(self.offsets.tostring())
def deleteChunk(self, cx, cz):
offset = self._getOffset(cx, cz)
sectorNumber = offset >> 8
sectorsAllocated = offset & 0xff
for i in range(sectorNumber, sectorNumber + sectorsAllocated):
self.freeSectors[i] = True
self._setOffset(cx, cz, 0)
def getTimestamp(self, cx, cz):
cx &= 0x1f
cz &= 0x1f
return self.modTimes[cx + cz * 32]
def setTimestamp(self, cx, cz, timestamp=None):
if timestamp is None:
timestamp = time.time()
cx &= 0x1f
cz &= 0x1f
self.modTimes[cx + cz * 32] = timestamp
with open(self.path, "rb+") as f:
f.seek(self.SECTOR_BYTES)
f.write(self.modTimes.tostring())
|
|
# Authors: Stephane Gaiffas <stephane.gaiffas@gmail.com>
# License: BSD 3 clause
"""
Comparisons of decision functions
=================================
This example allows to compare the decision functions of several random forest types
of estimators. The following classifiers are used:
- **AMF** stands for `AMFClassifier` from `onelearn`
- **MF** stands for `MondrianForestClassifier` from `scikit-garden`
- **RF** stands for `RandomForestClassifier` from `scikit-learn`
- **ET** stands for `ExtraTreesClassifier` from `scikit-learn`
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
import logging
# from sklearn.preprocessing import MinMaxScaler
# from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.datasets import make_moons, make_classification, make_circles
from sklearn.model_selection import train_test_split
# from skgarden import MondrianForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
sys.path.extend([".", ".."])
from linlearn import BinaryClassifier
from plot import (
get_mesh,
plot_contour_binary_classif,
plot_scatter_binary_classif,
)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
np.set_printoptions(precision=2)
# n_samples = 1000
n_samples = 200
random_state = 42
h = 0.01
levels = 20
use_aggregation = True
split_pure = True
n_estimators = 100
step = 1.0
dirichlet = 0.5
norm = plt.Normalize(vmin=0.0, vmax=1.0)
def simulate_data(dataset="moons"):
if dataset == "moons":
X, y = make_moons(n_samples=n_samples, noise=0.2, random_state=random_state)
n_samples_outliers = 20
X_outlier = 0.2 * np.random.randn(n_samples_outliers, 2)
X_outlier[:, 0] -= 3
X_outlier[:, 1] += 1
y_outlier = np.ones(n_samples_outliers)
X = np.concatenate((X, X_outlier), axis=0)
y = np.concatenate((y, y_outlier), axis=0)
elif dataset == "circles":
X, y = make_circles(
n_samples=n_samples, noise=0.1, factor=0.5, random_state=random_state
)
elif dataset == "linear":
X, y = make_classification(
n_samples=n_samples,
n_features=2,
n_redundant=0,
n_informative=2,
random_state=random_state,
n_clusters_per_class=1,
flip_y=0.001,
class_sep=2.0,
)
rng = np.random.RandomState(random_state)
X += 1.5 * rng.uniform(size=X.shape)
n_samples_outliers = 30
X_outlier = 0.5 * np.random.randn(n_samples_outliers, 2)
X_outlier[:, 0] += 20
X_outlier[:, 1] += 2
y_outlier = np.zeros(n_samples_outliers)
X = np.concatenate((X, X_outlier), axis=0)
y = np.concatenate((y, y_outlier), axis=0)
else:
X, y = make_moons(n_samples=n_samples, noise=0.2, random_state=random_state)
X = StandardScaler().fit_transform(X)
return X, y
# datasets = [simulate_data("moons"), simulate_data("circles"), simulate_data("linear")]
datasets = [simulate_data("moons"), simulate_data("linear")]
# TODO: polynomial logistic regression
# TODO: different values for the block_size
n_classifiers = 3
n_datasets = 2
_ = plt.figure(figsize=(2 * (n_classifiers + 1), 2 * n_datasets))
def get_classifiers():
# kwargs = {"tol": 1e-15, "max_iter": 1000, "fit_intercept": False}
kwargs = {"fit_intercept": True}
return [
("Logistic Regression", LogisticRegression(**kwargs)),
("Binary Classifier ERM", BinaryClassifier(**kwargs),),
("Binary Classifier MOM", BinaryClassifier(strategy="mom", **kwargs),),
]
i = 1
for ds_cnt, ds in enumerate(datasets):
print("-" * 80)
X, y = ds
# y[y == 0] = -1
# print(y)
xx, yy, X_mesh = get_mesh(X, h=h, padding=0.2)
ax = plt.subplot(n_datasets, n_classifiers + 1, i)
if ds_cnt == 0:
title = "Input data"
else:
title = None
plot_scatter_binary_classif(ax, xx, yy, X, y, s=20, alpha=0.7, title=title)
i += 1
classifiers = get_classifiers()
for name, clf in classifiers:
ax = plt.subplot(n_datasets, n_classifiers + 1, i)
if hasattr(clf, "clear"):
clf.clear()
if hasattr(clf, "partial_fit"):
clf.partial_fit(X, y)
else:
clf.fit(X, y)
print(name)
print("intercept_: ", clf.intercept_, "coef_: ", clf.coef_)
Z = clf.predict_proba(X_mesh)[:, 1].reshape(xx.shape)
if ds_cnt == 0:
plot_contour_binary_classif(
ax, xx, yy, Z, levels=levels, title=name, norm=norm
)
else:
plot_contour_binary_classif(ax, xx, yy, Z, levels=levels, norm=norm)
# plot_contour_binary_classif(ax, xx, yy, Z, levels=levels, norm=None)
i += 1
plt.tight_layout()
plt.show()
# plt.savefig("decisions.pdf")
# logging.info("Saved the decision functions in 'decision.pdf")
|
|
import numpy as np
from numpy.random import default_rng, Generator
from .metric import accuracy
from ..classification import KNNClassifier
from ml_utils import classification
def k_fold_split(n_splits: int, n_instances: int, rng: Generator = default_rng()) -> list:
""" Split n_instances into n mutually exclusive splits at random.
Args:
n_splits (int): Number of splits
n_instances (int): Number of instances to split
rng (np.random.Generator, optional): A random generator. Defaults to np.random.default_rng().
Returns:
list: a list (length n_splits). Each element in the list should contain a
numpy array giving the indices of the instances in that split.
"""
return np.array_split(rng.permutation(n_instances), n_splits)
def train_test_k_fold(n_folds: int, n_instances: int, rng: Generator = default_rng()) -> list:
""" Generate train and test indices at each fold.
Args:
n_folds (int): Number of folds
n_instances (int): Total number of instances
random_generator (np.random.Generator): A random generator. Defaults to np.random.default_rng().
Returns:
list: a list of length n_folds. Each element in the list is a list (or tuple)
with two elements: a numpy array containing the train indices, and another
numpy array containing the test indices.
"""
# split the dataset into k splits
split_indices = k_fold_split(n_folds, n_instances, rng)
folds = []
for k in range(n_folds):
test_indices = split_indices[k]
train_indices = np.concatenate(
split_indices[:k] + split_indices[k + 1:]
)
folds.append([train_indices, test_indices])
return folds
def cross_validation_fixed_hyperparameter(x, y, k, n_folds, rng=default_rng()) -> np.ndarray:
accuracies = np.empty(n_folds)
for i, (train, test) in enumerate(train_test_k_fold(n_folds, len(x), rng)):
x_train, x_test = x[train], x[test]
y_train, y_test = y[train], y[test]
classifier = KNNClassifier(k)
classifier.fit(x_train, y_train)
y_prediction = classifier.predict(x_test)
accuracies[i] = accuracy(y_test, y_prediction)
return accuracies
def train_val_test_k_fold(n_folds, n_instances, random_generator=default_rng()):
""" Generate train and test indices at each fold.
Args:
n_folds (int): Number of folds
n_instances (int): Total number of instances
random_generator (np.random.Generator): A random generator
Returns:
list: a list of length n_folds. Each element in the list is a list (or tuple)
with three elements:
- a numpy array containing the train indices
- a numpy array containing the val indices
- a numpy array containing the test indices
"""
# split the dataset into k splits
split_indices = k_fold_split(n_folds, n_instances, random_generator)
folds = []
for k in range(n_folds):
# TODO: Complete this
# take the splits from split_indices and keep the k-th split as testing
# and another split as validation
# and concatenate the remaining k-2 splits for training
test_indices = split_indices[k]
val_indices = split_indices[(k + 1) % n_folds]
train_indices = np.array([], dtype=int)
for i in range(n_folds):
# concatenate to training set if not validation or test
if i not in [k, (k + 1) % n_folds]:
train_indices = np.append(train_indices, split_indices[i])
folds.append([train_indices, val_indices, test_indices])
return folds
def cross_validation_general_performance(x, y, n_folds, rng=default_rng()):
accuracies = np.zeros(n_folds)
for i, (train, val, test) in enumerate(train_val_test_k_fold(n_folds, len(x), rng)):
# set up the dataset for this fold
x_train = x[train]
y_train = y[train]
x_val = x[val]
y_val = y[val]
x_test = x[test]
y_test = y[test]
# Perform grid search, i.e.
# for K (number of neighbours) from 1 to 10 (inclusive)
# evaluate the K-NN classifiers on x_val
# store the accuracy and classifier for each K
max_accuracy, best_k = 0, 0
best_model = None
for k in range(1, 11):
classifier = classification.KNNClassifier(k)
classifier.fit(x_train, y_train)
predicted = classifier.predict(x_val)
# Select the classifier with the highest accuracy
if accuracy(y_val, predicted) > max_accuracy:
max_accuracy = accuracy(y_val, predicted)
best_k = k
best_model = classifier
# Evaluate this classifier on x_test (accuracy)
y_predicted = best_model.predict(x_test)
accuracies[i] = accuracy(y_test, y_predicted)
print(
f'Model with hyperparameter {best_k} has the higheset accuracy {accuracies[i]} at iteration {i + 1}')
return accuracies
def cross_validation_nested_hypertuning(x, y, n_outer_folds, n_inner_folds, rng=default_rng()):
accuracies = np.zeros(n_outer_folds)
for i, (train_val, test) in enumerate(train_test_k_fold(n_outer_folds, len(x), rng)):
# set up the dataset for this fold
x_train_val = x[train_val]
y_train_val = y[train_val]
x_test = x[test]
y_test = y[test]
# Perform grid search, i.e.
# for K (number of neighbours) from 1 to 10 (inclusive)
# evaluate the K-NN classifiers on x_val
# store the accuracy and classifier for each K
max_accuracy, best_k = 0, 0
for k in range(1, 11):
classifier = classification.KNNClassifier(k)
accuracy_sum = 0
for train, val in train_test_k_fold(n_inner_folds, len(x_train_val), rng):
x_train, y_train = x_train_val[train], y_train_val[train]
x_val, y_val = x_train_val[val], y_train_val[val]
classifier.fit(x_train, y_train)
predicted = classifier.predict(x_val)
accuracy_sum += accuracy(predicted, y_val)
# The accuracy is the averaged performance on rotated train-val dataset.
# Select the classifier with the highest accuracy
if accuracy_sum / n_inner_folds > max_accuracy:
max_accuracy = accuracy_sum / n_inner_folds
best_k = k
# Evaluate this classifier on x_test (accuracy)
best_model = KNNClassifier(best_k)
best_model.fit(x_train_val, y_train_val)
y_predicted = best_model.predict(x_test)
accuracies[i] = accuracy(y_test, y_predicted)
print(
f'Model with hyperparameter {best_k} has the higheset accuracy {accuracies[i]} at iteration {i + 1}')
return accuracies
|
|
"""
DCG and NDCG.
TODO: better docs
"""
import numpy as np
from . import gains, Metric
from six import moves
_EPS = np.finfo(np.float64).eps
range = moves.range
class DCG(Metric):
def __init__(self, k=10, gain_type='exp2'):
super(DCG, self).__init__()
self.k = k
self.gain_type = gain_type
self._gain_fn = gains.get_gain_fn(gain_type)
self._discounts = self._make_discounts(256)
def evaluate(self, qid, targets):
return sum(self._gain_fn(t) * self._get_discount(i)
for i, t in enumerate(targets) if i < self.k)
def calc_swap_deltas(self, qid, targets, coeff=1.0):
n_targets = len(targets)
deltas = np.zeros((n_targets, n_targets))
for i in range(min(n_targets, self.k)):
for j in range(i + 1, n_targets):
deltas[i, j] = coeff * \
(self._gain_fn(targets[i]) - self._gain_fn(targets[j])) * \
(self._get_discount(j) - self._get_discount(i))
return deltas
def max_k(self):
return self.k
def calc_random_ev(self, qid, targets):
total_gains = sum(self._gain_fn(t) for t in targets)
total_discounts = sum(self._get_discount(i)
for i in range(min(self.k, len(targets))))
return total_gains * total_discounts / len(targets)
@classmethod
def _make_discounts(self, n):
return np.array([1.0 / np.log2(i + 2.0) for i in range(n)])
def _get_discount(self, i):
if i >= self.k:
return 0.0
while i >= len(self._discounts):
self._grow_discounts()
return self._discounts[i]
def _grow_discounts(self):
self._discounts = self._make_discounts(len(self._discounts) * 2)
class NDCG(Metric):
def __init__(self, k=10, gain_type='exp2'):
super(NDCG, self).__init__()
self.k = k
self.gain_type = gain_type
self._dcg = DCG(k=k, gain_type=gain_type)
self._ideals = {}
def evaluate(self, qid, targets):
return (self._dcg.evaluate(qid, targets) /
max(_EPS, self._get_ideal(qid, targets)))
def calc_swap_deltas(self, qid, targets):
ideal = self._get_ideal(qid, targets)
if ideal < _EPS:
return np.zeros((len(targets), len(targets)))
return self._dcg.calc_swap_deltas(
qid, targets, coeff=1.0 / ideal)
def max_k(self):
return self.k
def calc_random_ev(self, qid, targets):
return (self._dcg.calc_random_ev(qid, targets) /
max(_EPS, self._get_ideal(qid, targets)))
def _get_ideal(self, qid, targets):
ideal = self._ideals.get(qid)
if ideal is not None:
return ideal
sorted_targets = np.sort(targets)[::-1]
ideal = self._dcg.evaluate(qid, sorted_targets)
self._ideals[qid] = ideal
return ideal
|
|
"""The classes in this file are domain specific, and therefore include
specifics about the design space and the model parameters.
The main jobs of the model classes are:
a) define priors over parameters - as scipy distribution objects
b) implement the `predictive_y` method. You can add
whatever useful helper functions you wat in order to help with
that job.
NOTE: There is some faff and checking required when we are doing
the numerical stuff. This might be my inexperience with Python, but
I think it comes down to annoyances in grabbing parameters and designs
out of a Pandas dataframe and getting that into useful Numpy arrays.
TODO: Can this be made easier/better?
"""
from scipy.stats import norm, halfnorm, uniform
import numpy as np
from badapted.model import Model
from badapted.choice_functions import (
CumulativeNormalChoiceFunc,
StandardCumulativeNormalChoiceFunc,
)
class DelaySlice(Model):
"""This is an insane delay discounting model. It basically fits ONE indifference
point. It amounts to fitting a psychometric function with the indifference point
shifting the function and alpha determining the slope of the function.
Note: the α parameter in this model is on a different scale to the same parameter
in other models. Here we are doing inference over indifference points, so the whole
range typically spans 0-1. So it makes sense for this model that our prior over
α is more restricted to low values near zero
"""
def __init__(
self,
n_particles,
prior={"indiff": uniform(0, 1), "α": halfnorm(loc=0, scale=0.1)},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
""" The decision variable is difference between the indifference point and
the 'stimulus intensity' which is RA/RB """
return θ["indiff"].values - (data["RA"].values / data["RB"].values)
class Hyperbolic(Model):
"""Hyperbolic time discounting model
Mazur, J. E. (1987). An adjusting procedure for studying delayed
re-inforcement. In Commons, M. L., Mazur, J. E., Nevin, J. A., and
Rachlin, H., editors, Quantitative Analyses of Behavior, pages 55–
73. Erlbaum, Hillsdale, NJ.
"""
def __init__(
self,
n_particles,
prior={"logk": norm(loc=-4.5, scale=1), "α": halfnorm(loc=0, scale=2)},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
VA = data["RA"].values * self._time_discount_func(
data["DA"].values, np.exp(θ["logk"].values)
)
VB = data["RB"].values * self._time_discount_func(
data["DB"].values, np.exp(θ["logk"].values)
)
return VB - VA
@staticmethod
def _time_discount_func(delay, k):
return 1 / (1 + k * delay)
class Exponential(Model):
"""Exponential time discounting model"""
def __init__(
self,
n_particles,
prior={"k": norm(loc=0.01, scale=0.1), "α": halfnorm(loc=0, scale=3)},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
VA = data["RA"].values * self._time_discount_func(
data["DA"].values, θ["k"].values
)
VB = data["RB"].values * self._time_discount_func(
data["DB"].values, θ["k"].values
)
return VB - VA
@staticmethod
@np.vectorize
def _time_discount_func(delay, k):
return np.exp(-k * delay)
class HyperbolicMagnitudeEffect(Model):
"""Hyperbolic time discounting model + magnitude effect
Vincent, B. T. (2016). Hierarchical Bayesian estimation and hypothesis
testing for delay discounting tasks. Behavior Research Methods, 48(4),
1608–1620. http://doi.org/10.3758/s13428-015-0672-2
"""
def __init__(
self,
n_particles,
prior={
"m": norm(loc=-2.43, scale=2),
"c": norm(loc=0, scale=100),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
VA = self._present_subjective_value(
data["RA"].values, data["DA"].values, θ["m"].values, θ["c"].values
)
VB = self._present_subjective_value(
data["RB"].values, data["DB"].values, θ["m"].values, θ["c"].values
)
return VB - VA
@staticmethod
def _present_subjective_value(reward, delay, m, c):
k = np.exp(m * np.log(reward) + c)
discount_fraction = 1 / (1 + k * delay)
V = reward * discount_fraction
return V
class ExponentialMagnitudeEffect(Model):
"""Exponential time discounting model + magnitude effect
Similar to...
Vincent, B. T. (2016). Hierarchical Bayesian estimation and hypothesis
testing for delay discounting tasks. Behavior Research Methods, 48(4),
1608–1620. http://doi.org/10.3758/s13428-015-0672-2
"""
def __init__(
self,
n_particles,
prior={
"m": norm(loc=-2.43, scale=2),
"c": norm(loc=0, scale=100),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
VA = self._present_subjective_value(
data["RA"].values, data["DA"].values, θ["m"].values, θ["c"].values
)
VB = self._present_subjective_value(
data["RB"].values, data["DB"].values, θ["m"].values, θ["c"].values
)
return VB - VA
@staticmethod
@np.vectorize
def _present_subjective_value(reward, delay, m, c):
k = np.exp(m * np.log(reward) + c)
discount_fraction = np.exp(-k * delay)
V = reward * discount_fraction
return V
class ConstantSensitivity(Model):
"""The constant sensitivity time discounting model
Ebert & Prelec (2007) The Fragility of Time: Time-Insensitivity and Valuation
of the Near and Far Future. Management Science, 53(9):1423–1438.
"""
def __init__(
self,
n_particles,
prior={
"a": norm(loc=0.01, scale=0.1),
"b": halfnorm(loc=0.001, scale=3),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
VA = data["RA"].values * self._time_discount_func(
data["DA"].values, θ["a"].values, θ["b"].values
)
VB = data["RB"].values * self._time_discount_func(
data["DB"].values, θ["a"].values, θ["b"].values
)
return VB - VA
@staticmethod
def _time_discount_func(delay, a, b):
# NOTE: we want params as a row matrix, and delays as a column matrix
# to do the appropriate array broadcasting.
return np.exp(-np.power(a * delay, b))
class MyersonHyperboloid(Model):
"""Myerson style hyperboloid
"""
def __init__(
self,
n_particles,
prior={
"logk": norm(loc=np.log(1 / 365), scale=2),
"s": halfnorm(loc=0, scale=2),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
VA = data["RA"].values * self._time_discount_func(
data["DA"].values, θ["logk"].values, θ["s"].values
)
VB = data["RB"].values * self._time_discount_func(
data["DB"].values, θ["logk"].values, θ["s"].values
)
return VB - VA
@staticmethod
def _time_discount_func(delay, logk, s):
# NOTE: we want logk as a row matrix, and delays as a column matrix to
# do the appropriate array broadcasting.
k = np.exp(logk)
return 1 / np.power(1 + k * delay, s)
class ModifiedRachlin(Model):
"""The Rachlin (2006) discount function, modified by Vincent &
Stewart (2018). This has a better parameterisation.
Rachlin, H. (2006, May). Notes on Discounting. Journal of the
Experimental Analysis of Behavior, 85(3), 425–435.
Vincent, B. T., & Stewart, N. (2018, October 16). The case of muddled
units in temporal discounting.
https://doi.org/10.31234/osf.io/29sgd
"""
def __init__(
self,
n_particles,
prior={
"logk": norm(loc=np.log(1 / 365), scale=2),
"s": halfnorm(loc=1, scale=2),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
VA = data["RA"].values * self._time_discount_func(
data["DA"].values, θ["logk"].values, θ["s"].values
)
VB = data["RB"].values * self._time_discount_func(
data["DB"].values, θ["logk"].values, θ["s"].values
)
return VB - VA
@staticmethod
@np.vectorize
def _time_discount_func(delay, logk, s):
# NOTE: we want logk as a row matrix, and delays as a column matrix to do the
# appropriate array broadcasting.
if delay == 0:
return 1
else:
k = np.exp(logk)
return 1 / (1 + np.power(k * delay, s))
class HyperbolicNonLinearUtility(Model):
"""Hyperbolic time discounting + non-linear utility model.
The a-model from ...
Cheng, J., & González-Vallejo, C. (2014). Hyperbolic Discounting: Value and
Time Processes of Substance Abusers and Non-Clinical Individuals in
Intertemporal Choice. PLoS ONE, 9(11), e111378–18.
http://doi.org/10.1371/journal.pone.0111378
"""
def __init__(
self,
n_particles,
prior={
"a": norm(loc=1, scale=0.1),
"logk": norm(loc=np.log(1 / 365), scale=2),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
a = np.exp(θ["a"].values)
VA = np.power(data["RA"].values, a) * self._time_discount_func(
data["DA"].values, θ["logk"].values
)
VB = np.power(data["RB"].values, a) * self._time_discount_func(
data["DB"].values, θ["logk"].values
)
return VB - VA
@staticmethod
def _time_discount_func(delay, logk):
k = np.exp(logk)
return 1 / (1 + k * delay)
class ITCH(Model):
"""ITCH model, as presented in:
Ericson, K. M. M., White, J. M., Laibson, D., & Cohen, J. D. (2015). Money
earlier or later? Simple heuristics explain intertemporal choices better
than delay discounting does. Psychological Science, 26(6), 826–833.
http://doi.org/10.1177/0956797615572232
Note that we use a choice function _without_ a slope parameter.
"""
def __init__(
self,
n_particles,
prior={
"β_I": norm(loc=0, scale=50),
"β_abs_reward": norm(loc=0, scale=50),
"β_rel_reward": norm(loc=0, scale=50),
"β_abs_delay": norm(loc=0, scale=50),
"β_rel_relay": norm(loc=0, scale=50),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
# organised so that higher values of the decision variable will
# mean higher probabability for the delayed option (prospect B)
reward_abs_diff = data["RB"].values - data["RA"].values
reward_rel_diff = self._rel_diff(data["RB"].values, data["RA"].values)
delay_abs_diff = data["DB"].values - data["DA"].values
delay_rel_diff = self._rel_diff(data["DB"].values, data["DA"].values)
decision_variable = (
θ["β_I"].values
+ θ["β_abs_reward"].values * reward_abs_diff
+ θ["β_rel_reward"].values * reward_rel_diff
+ θ["β_abs_delay"].values * delay_abs_diff
+ θ["β_rel_relay"].values * delay_rel_diff
)
return decision_variable
@staticmethod
def _rel_diff(B, A):
"""Calculate the difference between B and A, normalised by the mean
of B and A"""
return (B - A) / ((B + A) / 2)
class DRIFT(Model):
"""DRIFT model, as presented in:
Note that we use a choice function _without_ a slope parameter.
Read, D., Frederick, S., & Scholten, M. (2013). DRIFT: an analysis of
outcome framing in intertemporal choice. Journal of Experimental
Psychology: Learning, Memory, and Cognition, 39(2), 573–588.
http://doi.org/10.1037/a0029177
"""
def __init__(
self,
n_particles,
prior={
"β0": norm(loc=0, scale=50),
"β1": norm(loc=0, scale=50),
"β2": norm(loc=0, scale=50),
"β3": norm(loc=0, scale=50),
"β4": norm(loc=0, scale=50),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
reward_abs_diff = data["RB"].values - data["RA"].values
reward_diff = (data["RB"].values - data["RA"].values) / data["RA"].values
delay_abs_diff = data["DB"].values - data["DA"].values
delay_component = (data["RB"].values / data["RA"].values) ** (
1 / (delay_abs_diff)
) - 1
decision_variable = (
θ["β0"].values
+ θ["β1"].values * reward_abs_diff
+ θ["β2"].values * reward_diff
+ θ["β3"].values * delay_component
+ θ["β4"].values * delay_abs_diff
)
return decision_variable
class TradeOff(Model):
"""Tradeoff model by Scholten & Read (2010). Model forumulation as defined
in Ericson et al (2015).
Scholten, M., & Read, D. (2010). The psychology of intertemporal tradeoffs.
Psychological Review, 117(3), 925–944. http://doi.org/10.1037/a0019619
"""
def __init__(
self,
n_particles,
prior={
"gamma_reward": halfnorm(loc=0, scale=10),
"gamma_delay": halfnorm(loc=0, scale=10),
"k": norm(loc=0, scale=2),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
return (
self._f(data["RB"].values, θ["gamma_reward"].values)
- self._f(data["RA"].values, θ["gamma_reward"].values)
) - θ["k"].values * (
self._f(data["DB"].values, θ["gamma_delay"].values)
- self._f(data["DA"].values, θ["gamma_delay"].values)
)
@staticmethod
def _f(x, gamma):
return np.log(1.0 + gamma * x) / gamma
|
|
import os
import io
import gzip
import pickle
import tarfile
import logging
import torch
import numpy as np
import utils
from model import embedding
def add_embed_arguments(parser, name=None):
if name is None:
prefix = ""
else:
prefix = f"{name}-"
parser.add_argument(f"--{prefix}embed-type", type=str, default=None,
choices=list(e.name for e in EMBEDDINGS))
parser.add_argument(f"--{prefix}embed-path", type=str, default=None)
class Embeddings(object):
name = None
@property
def dim(self):
raise NotImplementedError()
def preload(self):
raise NotImplementedError()
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.name == other.name
def __getitem__(self, item):
"""
returns word embedding as numpy array
:param item: str
:return: [dim] np.ndarray
"""
raise NotImplementedError()
def __contains__(self, item):
raise NotImplementedError()
def __iter__(self):
raise NotImplementedError()
class GloveFormatReader(utils.UniversalFileReader):
def open_txt(self, path):
return open(path, "r")
def open_gz(self, path):
return io.TextIOWrapper(gzip.open(path, "r"))
class TarFormatEmbeddings(Embeddings):
name = "tar-format"
def __init__(self, path):
self.path = os.path.abspath(path)
self.vocab = None
self.array = None
self._dim = None
def preload(self):
assert os.path.exists(self.path)
with tarfile.TarFile(self.path, mode="r") as tf:
self.vocab = pickle.load(tf.extractfile("vocab.pkl"))
self.array = np.load(io.BytesIO(tf.extractfile("array.npy").read()))
@property
def dim(self):
return self.array.shape[1]
def __hash__(self):
return hash(self.name) * 541 + hash(self.path)
def __eq__(self, other):
if not isinstance(other, TarFormatEmbeddings):
return False
return self.name == other.name and self.path == other.path
def __contains__(self, item):
return item in self.vocab
def __getitem__(self, item):
return self.array[self.vocab[item]]
def __iter__(self):
return iter((w, self.array[i]) for w, i in self.vocab.items())
class GloveFormatEmbeddings(Embeddings):
name = "glove-format"
def __init__(self, path, dim=300, words=None):
self.path = os.path.abspath(path)
self.data = None
self._dim = dim
self.vocab = words
@staticmethod
def _tqdm(iterable=None):
return utils.tqdm(
iterable=iterable,
desc="loading glove",
unit="w",
)
def preload(self):
self.data = {}
dim = self.dim
reader = GloveFormatReader(default_ext="txt")
with reader(self.path) as f:
for line in utils._tqdm.tqdm(f):
tokens = line.split()
word = " ".join(tokens[:-dim])
if self.vocab is not None and word not in self.vocab:
continue
vec = np.array([float(v) for v in tokens[-dim:]])
self.data[word] = vec
loaded_words = set(self.data.keys())
stats = {"num-words": len(self.data)}
if self.vocab is not None:
stats["coverage"] = len(loaded_words & self.vocab) / len(self.vocab)
stats = {k: f"{v:.4f}" for k, v in stats.items()}
logging.info(f"{self.name} embeddings from {self.path} loaded,"
f" {utils.join_dict(stats, ', ', '=')}")
def __hash__(self):
return hash(self.name) * 541 + hash(self.path)
def __eq__(self, other):
if not isinstance(other, GloveFormatEmbeddings):
return False
return self.name == other.name and self.path == other.path
@property
def dim(self):
return self._dim
def __contains__(self, item):
return item in self.data
def __getitem__(self, item):
return self.data[item]
def __iter__(self):
return iter(self.data.items())
class WordEmbeddingManager(object):
def __init__(self):
self.embeds = dict()
def __getitem__(self, item: Embeddings):
key = hash(item)
if key not in self.embeds:
item.preload()
self.embeds[key] = item
return self.embeds[key]
EMBEDDINGS = [
GloveFormatEmbeddings,
TarFormatEmbeddings
]
def _load_embeddings(module: embedding.AbstractEmbedding, vocab, we):
for w, v in we:
if w not in vocab.f2i:
continue
idx = vocab.f2i[w]
module.load(idx, torch.FloatTensor(v))
def get_embeddings(args):
return utils.map_val(args.embed_type, {
"glove-format": lambda: GloveFormatEmbeddings(
path=args.embed_path,
#words=set(vocab.f2i) if vocab is not None else None
),
"tar-format": lambda: TarFormatEmbeddings(
path=args.embed_path
)
}, "embedding type")()
def load_embeddings(args, vocab, modules):
if args.embed_type is None:
return
embeddings = get_embeddings(args, vocab)
embeddings.preload()
for module in modules:
assert isinstance(module, embedding.AbstractEmbedding)
_load_embeddings(module, vocab, embeddings)
|
|
import copy
from typing import Union, List, Callable
import numpy as np
from interpreter import imageFunctions as imageWrapper
from interpreter import lexer as lexer
from interpreter import tokens as tokens
from interpreter import movementFunctions as movement
from interpreter import colors as colors
from interpreter import tokenFunctions as runner
from interpreter import errors as errors
from interpreter.dataStructures import programState, position, direction
def interpret(image: np.ndarray) -> Union[programState, List[BaseException]]:
"""
Interprets and executes a Piet image
:param image: Input image
:return: Either the final state of the program, or a list of exceptions
"""
graph = lexer.graphImage(image)
if len(graph[1]) > 0:
print("The following exceptions occured while making the graph:\n{}".format("".join(list(map(lambda x: "\t{}\n".format(x), graph[1])))))
return graph[1]
# This is the default programState.
startPosition = position((0, 0))
pointers = direction((0, 0))
PS = programState(graph[0], startPosition, pointers)
result = runProgram(image, PS)
# Check if executed step had an error
if isinstance(result, BaseException):
print("The following exception occured while executing the next step:\n{}".format(result))
return [result]
return result
def runProgram(image: np.ndarray, PS: programState) -> Union[programState, BaseException]:
"""
Executes all steps from the image
:param image: input image
:param PS: current program state with which to make the next step
:return: Either the last program state, or a runtime exception
"""
newState = copy.deepcopy(PS)
if colors.isBlack(imageWrapper.getPixel(image, newState.position)):
return errors.inBlackPixelError("Programstate starts in black pixel at {}".format(newState.position))
currentCodel = imageWrapper.getCodel(image, newState.position)
newGraph = newState.graph.graph
graphNode = newGraph[currentCodel]
newToken = graphNode.graphNode[newState.direction][0]
if isinstance(newToken, tokens.terminateToken):
return newState
newState = takeStep(image, newState)
if isinstance(newState, BaseException):
return newState
return runProgram(image, newState)
def countSteps(f: Callable[[np.ndarray, programState], programState]) -> Callable[[np.ndarray, programState], programState]:
"""
A decorator function to count the steps taken in the program
:param f: original function to call
:return: A decorated function
"""
def inner(image: np.ndarray, PS: programState) -> programState:
inner.counter += 1
return f(image, PS)
inner.counter = 0
return inner
@countSteps
def takeStep(image: np.ndarray, PS: programState) -> Union[programState, BaseException]:
"""
Takes a single step from the programstate
:param image: input image
:param PS: input programstate
:return: Returns either the resulting programstate, or an exception that occurred
"""
newState = copy.deepcopy(PS)
currentCodel = imageWrapper.getCodel(image, newState.position)
newGraph = newState.graph.graph
graphNode = newGraph[currentCodel]
newToken = graphNode.graphNode[newState.direction][0]
edgePosition = graphNode.graphNode[newState.direction][1]
result = runner.executeToken(newToken, newState.direction, newState.dataStack)
# Add additional information to the error message (Position and direction)
if isinstance(result, BaseException):
return type(result)("{}, at position {}, direction {}".format(result.args[0], edgePosition,newState.direction))
# return result
# If the next token is either white or color, just move along. If the token was black (or terminate), the direction
# is already changed, but the position shouldn't move
if isinstance(newToken, (tokens.toWhiteToken, tokens.toColorToken)):
newState.position = movement.getNextPosition(edgePosition, newState.direction.pointers[0])
# Use the new direction and stack for the next step
newState.direction = result[0]
newState.dataStack = result[1]
return newState
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""""WRITEME"""
import numpy as np
import matplotlib.pyplot as plt
import pygimli as pg
def drawFirstPicks(ax, data, tt=None, plotva=False, marker='x-'):
"""Naming convention. drawFOO(ax, ... )"""
return plotFirstPicks(ax=ax, data=data, tt=tt,
plotva=plotva, marker=marker)
def drawVA(ax, data, usePos=True):
"""Naming convention. drawFOO(ax, ... )"""
return showVA(ax=ax, data=data, usepos=usePos)
def drawTravelTimeData(ax, data, t=None):
"""
Draw first arrival traveltime data into mpl ax a.
data of type \ref DataContainer must contain sensorIdx 's' and 'g'
and thus being numbered internally [0..n)
"""
x = pg.x(data.sensorPositions())
# z = pg.z(data.sensorPositions())
shots = pg.unique(pg.sort(data('s')))
geoph = pg.unique(pg.sort(data('g')))
startOffsetIDX = 0
if min(min(shots), min(geoph)) == 1:
startOffsetIDX = 1
tShow = data('t')
if t is not None:
tShow = t
ax.set_xlim([min(x), max(x)])
ax.set_ylim([max(tShow), -0.002])
ax.figure.show()
for shot in shots:
gIdx = pg.find(data('s') == shot)
sensorIdx = [int(i__ - startOffsetIDX) for i__ in data('g')[gIdx]]
ax.plot(x[sensorIdx], tShow[gIdx], 'x-')
yPixel = ax.transData.inverted().transform_point((1, 1))[1] - \
ax.transData.inverted().transform_point((0, 0))[1]
xPixel = ax.transData.inverted().transform_point((1, 1))[0] - \
ax.transData.inverted().transform_point((0, 0))[0]
# draw shot points
ax.plot(x[[int(i__ - startOffsetIDX) for i__ in shots]],
np.zeros(len(shots)) + 8. * yPixel, 'gv', markersize=8)
# draw geophone points
ax.plot(x[[int(i__ - startOffsetIDX) for i__ in geoph]],
np.zeros(len(geoph)) + 3. * yPixel, 'r^', markersize=8)
ax.grid()
ax.set_ylim([max(tShow), +16. * yPixel])
ax.set_xlim([min(x) - 5. * xPixel, max(x) + 5. * xPixel])
ax.set_xlabel('x-Coordinate [m]')
ax.set_ylabel('Traveltime [ms]')
# def drawTravelTimeData(...)
def plotFirstPicks(ax, data, tt=None, plotva=False, marker='x-'):
"""plot first arrivals as lines"""
px = pg.x(data.sensorPositions())
gx = np.array([px[int(g)] for g in data("g")])
sx = np.array([px[int(s)] for s in data("s")])
if tt is None:
tt = np.array(data("t"))
if plotva:
tt = np.absolute(gx - sx) / tt
uns = np.unique(sx)
cols = 'brgcmyk'
for i, si in enumerate(uns):
ti = tt[sx == si]
gi = gx[sx == si]
ii = gi.argsort()
ax.plot(gi[ii], ti[ii], marker, color=cols[i % 7])
ax.plot(si, 0., 's', color=cols[i % 7], markersize=8)
ax.grid(True)
def showVA(ax, data, usepos=True):
"""show apparent velocity as image plot"""
px = pg.x(data.sensorPositions())
gx = np.asarray([px[int(g)] for g in data("g")])
sx = np.asarray([px[int(s)] for s in data("s")])
va = np.absolute(gx - sx) / data('t')
A = np.ones((data.sensorCount(), data.sensorCount())) * np.nan
for i in range(data.size()):
A[int(data('s')[i]), int(data('g')[i])] = va[i]
gci = ax.imshow(A, interpolation='nearest')
ax.grid(True)
xt = np.arange(0, data.sensorCount(), 50)
if usepos:
ax.set_xticks(xt)
ax.set_xticklabels([str(int(px[xti])) for xti in xt])
ax.set_yticks(xt)
ax.set_yticklabels([str(int(px[xti])) for xti in xt])
plt.colorbar(gci, ax=ax)
return va
def plotLines(ax, line_filename, step=1):
xz = np.loadtxt(line_filename)
n_points = xz.shape[0]
if step == 2:
for i in range(0, n_points, step):
x = xz[i:i + step, 0]
z = xz[i:i + step, 1]
ax.plot(x, z, 'k-')
if step == 1:
ax.plot(xz[:, 0], xz[:, 1], 'k-')
|
|
"""Utility functions for real-space grid properties
"""
import numpy as np
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import pandas as pd
import struct
from .conversions import *
from scipy.interpolate import griddata
rho = np.zeros(2)
rho_val = np.zeros(2)
unitcell = np.zeros(2)
grid = np.zeros(2)
# ================= Data import ====================== #
def get_data_bin(file_path):
global rho
global unitcell
global grid
#Warning: Only works for cubic cells!!!
#TODO: Implement for arb. cells
bin_file = open(file_path, mode = 'rb')
unitcell = '<I9dI'
grid = '<I4iI'
unitcell = np.array(struct.unpack(unitcell,
bin_file.read(struct.calcsize(unitcell))))[1:-1].reshape(3,3)
grid = np.array(struct.unpack(grid,bin_file.read(struct.calcsize(grid))))[1:-1]
if (grid[0] == grid[1] == grid[2]) and grid[3] == 1:
a = grid[0]
else:
raise Exception('get_data_bin cannot handle non-cubic unitcells or spin')
block = '<' + 'I{}fI'.format(a)*a*a
content = np.array(struct.unpack(block,bin_file.read(struct.calcsize(block))))
rho = content.reshape(a+2, a, a, order = 'F')[1:-1,:,:]
return rho, unitcell, grid
def get_data(file_path):
"""Import data from RHO file (or similar real-space grid files)
Data is saved in global variables.
Structure of RHO file:
first three lines give the unit cell vectors
fourth line the grid dimensions
subsequent lines give density on grid
Parameters:
-----------
file_path: string; path to RHO file from which density is read
Returns:
--------
None
Other:
------
unitcell: (3,3) np.array; saves the unitcell dimension in euclidean coordinates
grid: (,3) np.array; number of grid points in each euclidean direction
rho: (grid[1],grid[2],grid[3]) np.array; density on grid
"""
global rho
global unitcell
global grid
global rhopath
rhopath = file_path
unitcell = np.zeros([3, 3])
grid = np.zeros([4])
with open(file_path, 'r') as rhofile:
# unit cell (in Bohr)
for i in range(0, 3):
unitcell[i, :] = rhofile.readline().split()
grid[:] = rhofile.readline().split()
grid = grid.astype(int)
n_el = grid[0] * grid[1] * grid[2] * grid[3]
# initiatialize density with right shape
rho = np.zeros(grid)
for z in range(grid[2]):
for y in range(grid[1]):
for x in range(grid[0]):
rho[x, y, z, 0] = rhofile.readline()
# closed shell -> we don't care about spin.
rho = rho[:, :, :, 0]
return rho, unitcell, grid
def check_norm():
""" Check normalization of charge density
Returns
-------
float; integrated charge density
"""
Xm, Ym, Zm = mesh_3d()
box_vol = unitcell[0, 0] / grid[0] * unitcell[1, 1] / grid[1] * unitcell[
2, 2] / grid[2]
return np.sum(rho[Xm, Ym, Zm]) * box_vol
# ==================== Mesh Functions ==================== #
def smallest_box(atom_pos, box_buffer=0.5):
"""Determine smallest box that includes all molecules.
Called by fit_poly if rmax = -1
Parameters
----------
atom_pos: (,3) np.array; atomic coordinates
box_buffer: float; buffer around smallest box
Returns
--------
rmax: (3) list; the maximum box dimensions in 3 euclid. directions
"""
rmax = [0, 0, 0]
for a in atom_pos:
for i in range(3):
if abs(a[i]) > rmax[i]:
rmax[i] = abs(a[i])
for i in range(3):
rmax[i] = (int)((rmax[i] + box_buffer) * grid[i] / unitcell[i, i])
if rmax[i] > grid[i]:
rmax[i] = grid[i]
return rmax
def plane_cut(data,
plane,
height,
unitcell,
grid,
rmin=[0, 0, 0],
rmax=0,
return_mesh=False):
"""return_mesh = False : returns a two dimensional cut through 3d data
True : instead of data, 2d mesh is returned
Parameters:
----------
data
plane = {0: yz-plane, 1: xz-plane, 2:xy-plane}
unitcell = 3x3 array size of the unitcell
grid = 3x1 array size of grid
rmin,rmax = lets you choose the min and max grid cutoff
rmax = 0 means the entire grid is used
return_mesh = boolean; decides wether mesh or cut through data is returned
"""
if rmax == 0:
mid_grid = (grid / 2).astype(int)
rmax = mid_grid
# resolve the periodic boundary conditions
x_pbc = list(range(-rmax[0], -rmin[0])) + list(range(rmin[0], rmax[0]))
y_pbc = list(range(-rmax[1], -rmin[1])) + list(range(rmin[1], rmax[1]))
z_pbc = list(range(-rmax[2], -rmin[2])) + list(range(rmin[2], rmax[2]))
height = (int)(np.round(height * grid[plane] / unitcell[plane, plane]))
pbc_grids = [x_pbc, y_pbc, z_pbc]
pbc_grids.pop(plane)
A, B = np.meshgrid(*pbc_grids)
indeces = [A, B]
indeces.insert(plane, height)
if not return_mesh:
return data[indeces[0], indeces[1], indeces[2]]
else:
return A, B
def mesh_3d(rmin=[0, 0, 0], rmax=0, scaled = False, pbc = True, indexing = 'xy'):
"""Returns a 3d mesh taking into account periodic boundary conditions
Parameters
----------
rmin, rmax: (3) list; lower and upper cutoff
scaled: boolean; scale the meshes with unitcell size?
Returns
-------
X, Y, Z: np.arrays; meshgrid
"""
if rmax == 0:
mid_grid = np.floor(grid / 2).astype(int)
rmax = mid_grid
# resolve the periodic boundary conditions
if pbc:
x_pbc = list(range(-rmax[0], -rmin[0])) + list(range(rmin[0], rmax[0]+1))
y_pbc = list(range(-rmax[1], -rmin[1])) + list(range(rmin[1], rmax[1]+1))
z_pbc = list(range(-rmax[2], -rmin[2])) + list(range(rmin[2], rmax[2]+1))
else:
x_pbc = list(range(rmin[0], rmax[0] +1 )) + list(range(-rmax[0], -rmin[0]))
y_pbc = list(range(rmin[1], rmax[1] +1 )) + list(range(-rmax[1], -rmin[1]))
z_pbc = list(range(rmin[2], rmax[2] +1 )) + list(range(-rmax[2], -rmin[2]))
Xm, Ym, Zm = np.meshgrid(x_pbc, y_pbc, z_pbc, indexing = indexing)
U = np.array(unitcell) # Matrix to go from real space to mesh coordinates
for i in range(3):
U[i,:] = U[i,:] / grid[i]
a = np.linalg.norm(unitcell, axis = 1)/grid[:3]
Rm = np.concatenate([Xm.reshape(*Xm.shape,1),
Ym.reshape(*Xm.shape,1),
Zm.reshape(*Xm.shape,1)], axis = 3)
if scaled:
R = np.einsum('ij,klmj -> iklm', U.T , Rm)
X = R[0,:,:,:]
Y = R[1,:,:,:]
Z = R[2,:,:,:]
# Z = Zm * unitcell[2, 2] / grid[2]
# Y = Ym * unitcell[1, 1] / grid[1]
# X = Xm * unitcell[0, 0] / grid[0]
return X,Y,Z
else:
return Xm,Ym,Zm
# ================= Plotting ===================== #
def glimpse(rmin=[0, 0, 0], rmax=0, plane=2, height = 0):
"""Take a quick look at the loaded data in a particular plane
Parameters
----------
rmin,rmax: (3) list; upper and lower cutoffs
plane = {0: yz-plane, 1: xz-plane, 2: xy-plane}
"""
RHO = plane_cut(rho, plane, height, unitcell, grid, rmin=rmin, rmax=rmax)
plt.figure()
CS = plt.imshow(
RHO, cmap=plt.cm.jet, origin='lower')
plt.colorbar()
plt.show()
def quadrupole_moment(X, Y, Z, V, coord, rho, diagonal = False, verbose = False):
"""Calculates the quadrupole moment in atomic units of a given charge distribution
Parameters
----------
X, Y, Z: np.array; Mesh arrays
V: float; Volume of a grid cell
coord: np.array; atomic coordinates, ordered like [O,H,H,O,H,...]
n: int; number of gaussians
diagonal: boolean; Only compute diagonal elements
verbose: boolean; print Ionic and Electronic contribution
"""
elec_quadrupole = np.zeros([3,3])
meshes = [X,Y,Z]
ionic_quadrupole = np.zeros([3,3])
charge = [6,1,1] * int(len(coord)/3)
for i in range(3):
for j in range(3):
for a,c in zip(coord,charge):
if i == j:
ionic_quadrupole[i,j] -= c * np.linalg.norm(a)**2
ionic_quadrupole[i,j] += 3 * c * a[i]*a[j]
for i in range(3):
for j in range(i, 3):
if i == j:
if i == 2: continue # Determine last diagonal entry by trace cond.
rsq = np.zeros_like(meshes[0])
for k in range(3):
rsq += (meshes[k])**2
elec_quadrupole[i,j] -= np.sum(rsq * rho * V)
elif diagonal: # Only calculate diagonal elements
continue
elec_quadrupole[i,j] += np.sum(3 * meshes[i] * meshes[j] * rho * V)
#Fill lower triangle
if not diagonal:
for i in range(3):
for j in range(i):
elec_quadrupole[i,j] = elec_quadrupole[j,i]
elec_quadrupole[2,2] = - elec_quadrupole[0,0] - elec_quadrupole[1,1]
if diagonal: return (ionic_quadrupole - elec_quadrupole).diagonal()
else: return (ionic_quadrupole - elec_quadrupole)
def dipole_moment(X, Y, Z, V, coord, rho, verbose = False):
"""Calculates the dipole moment in Debye of a given charge distribution
Parameters
----------
X, Y, Z: np.array; Mesh arrays
V: float; Volume of a grid cell
coord: np.array; atomic coordinates, ordered like [O,H,H,O,H,...]
par: [float]; Gaussian fitting parameters
n: int; number of Gaussians
verbose: boolean; print Ionic and Electronic contribution
Returns
--------
float; Dipole moment in Debye
"""
charge_com = np.array([ np.sum(mesh * rho * V) for mesh in [X,Y,Z]])
coord = coord.reshape(-1,3,3)
ionic_contrib = np.zeros(3)
for a in coord:
ionic_contrib += a[1] + a[2] + 6 * a[0]
if verbose:
print('Ionic {} [a.u.]'.format(ionic_contrib))
print('Electronic {} [a.u.]'.format(charge_com))
return (ionic_contrib - charge_com)/Dtoau
def rho_at_cartesian(xi, siesta, unit = 'A', method = 'linear'):
if unit == 'A':
xi *= AtoBohr
elif unit != 'Bohr':
raise Exception('Unit has to be either "A" or "Bohr"')
if np.any(np.abs(xi) > siesta.unitcell[0,0]) or \
np.any(np.abs(xi) > siesta.unitcell[1,1]) or \
np.any(np.abs(xi) > siesta.unitcell[2,2]):
raise Exception('xi out of bounds')
# Grid size
a = np.array([siesta.unitcell[i,i]/siesta.grid[i] for i in range(3)]).reshape(1,3)
# Real space inquiry points to mesh
Xi = np.round(xi/a).astype(int)
# Find surrounding mesh points
Xs = np.array(Xi)
Xzeros = np.zeros_like(Xi)
for x in [-1,0,1]:
for y in [-1,0,1]:
for z in [-1,0,1]:
for i, entry in enumerate(np.array([x,y,z])):
Xzeros[:,i] = entry
Xs = np.concatenate([Xs, Xi + Xzeros])
Xzeros = np.zeros_like(Xi)
Xs = np.unique(Xs, axis=0)
# Surrounding mesh points in real space
xs = Xs * a
return griddata(xs, siesta.rho[Xs[:,0], Xs[:,1], Xs[:,2]], xi, method = method)
|
|
#Automatic_keyboard_recognition
import numpy as np
import math
import statistics as stats
import cv2 as cv2
class Keyboard_auto_find_and_transform:
def __init__(self,initial_frame,target_dimensions):
print("Finding Keyboard")
self.target_dimensions = target_dimensions
self.p_mat = Automatic_keyboard_recognition(initial_frame,target_dimensions)
print("Keyboard found")
def transform_frame(self,frame):
transformed_frame = cv2.warpPerspective(frame, self.p_mat, self.target_dimensions)
return transformed_frame
#====================================================================================
def Automatic_keyboard_recognition(frame,target_dimensions):
lines_within_frame = get_lines(frame.copy())
horizontal_ish_lines = filter_horizontal_lines(lines_within_frame)
#list of line pairings: each pairing in form ((r1,theta1),(r2,theta2)) where (r1,theta1) is possible top of keyboard line
line_pairings = group_lines(horizontal_ish_lines)
filtered_pairings = filter_pairings(line_pairings,frame)
valid_pairings = test_pairings(filtered_pairings,frame)
#take smallest valid keyboard region:
bounding_lines = None
if len(valid_pairings) == 0:
raise Exception('No Valid Keyboard found within region')
else:
bounding_lines = valid_pairings[0]
corners = get_keyboard_corners(bounding_lines,frame.copy())
save_corners_to_file(corners)
perspective_matrix = get_perspective_mat(corners,target_dimensions)
return perspective_matrix
def get_lines(frame):
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#Canny parameters: (image, weak_thresh, strong_thresh)
edges = cv2.Canny(grey,70,150)
#Hough parameters: (edge_im, rho_resolution, theta_resolution, hough space threshold)
lines = cv2.HoughLines(edges, 1, np.pi/180, 180)
#convert to a list of (rho,theta) elements (no additional information required):
formatted_lines = []
for line in lines:
rho, theta = line[0]
formatted_lines.append((rho,theta))
return formatted_lines
def filter_horizontal_lines(lines):
filtered_lines = []
for (rho,theta) in lines:
if theta > np.pi/3 and theta < np.pi*2/3:
filtered_lines.append((rho,theta))
return filtered_lines
def group_lines(lines):
line_pairings = []
grouped_lines = []
num_lines = len(lines)
while len(grouped_lines) < num_lines:
ungrouped_lines = [x for x in lines if x not in grouped_lines]
base_line = ungrouped_lines[0]
rho_b, theta_b = base_line
grouped_lines.append(base_line)
for rho_c, theta_c in ungrouped_lines[1:]:
if abs(theta_c - theta_b) < np.pi/30:
line_pairings.append(((rho_b,theta_b),(rho_c,theta_c)))
return line_pairings
def filter_pairings(pairings,frame):
#remove pairings that are too close together at x=xmax/2 AND order within pairings such that higher line at xmax/2 is first):
filtered_pairings = []
height = frame.shape[0]
xmid = int(frame.shape[1] / 2)
# y = (r - xcos(theta)/sin(theta))
for (line1,line2) in pairings:
y1 = (line1[0] - xmid * np.cos(line1[1])) / np.sin(line1[1])
y2 = (line2[0] - xmid * np.cos(line2[1])) / np.sin(line2[1])
if abs(y1 - y2) > height / 70:
#order the pairing from heighest to lowest (N.B. OpenCV origin is at top left)
if y1 > y2:
filtered_pairings.append((line2,line1))
else:
filtered_pairings.append((line1,line2))
return filtered_pairings
def test_pairings(pairings,frame):
valid_pairings = []
height,width = frame.shape[:2]
for (line1,line2) in pairings:
#find intercepts at x=0 and x=xmax
yA = line1[0]/np.sin(line1[1])
yB = (line1[0] - width * np.cos(line1[1]))/np.sin(line1[1])
yC = line2[0]/np.sin(line2[1])
yD = (line2[0] - width * np.cos(line2[1]))/np.sin(line2[1])
#crop in to the lines and warp to make them parallel:
target_frame = np.array([ [0, 0], [999, 0], [999, 98], [0, 98]], dtype = "float32")
rect = np.zeros((4, 2), dtype = "float32")
rect[0], rect[1], rect[2], rect[3] = (0,yA), (width,yB), (width,yD), (0,yC)
p_mat = cv2.getPerspectiveTransform(rect,target_frame)
warped_frame = cv2.warpPerspective(frame.copy(), p_mat, (1000,99))
if brightness_test(warped_frame):
test_result, black_keys = black_key_test(warped_frame)
if test_result:
#draw the black keys in pure black on the warped_frame:
cv2.drawContours(warped_frame, black_keys, -1, (255,255,255), cv2.FILLED)
inverse_p_mat = cv2.getPerspectiveTransform(target_frame,rect)
valid_pairings.append((line1,line2,inverse_p_mat,warped_frame))
break
return valid_pairings
def brightness_test(transformed_frame):
#return bool, in this cropped and transformed_frame, is the bottom third lighter than both the middle and top thirds?
height, width = transformed_frame.shape[:2]
q1 = int(height/3)
q2 = int(height * 2/3)
top_third = transformed_frame[0:q1,0:width]
avg_color_per_row = np.average(top_third, axis=0)
avg_color_top_third = np.average(avg_color_per_row, axis=0)
middle_third = transformed_frame[q1:q2,0:width]
avg_color_per_row = np.average(middle_third, axis=0)
avg_color_middle_third = np.average(avg_color_per_row, axis=0)
bottom_third = transformed_frame[q2:height,0:width]
avg_color_per_row = np.average(bottom_third, axis=0)
avg_color_bottom_third = np.average(avg_color_per_row, axis=0)
#RGB -> Luma conversion:https://stackoverflow.com/questions/596216/formula-to-determine-brightness-of-rgb-color
Ltop = 0.375 * avg_color_top_third[0] + 0.5 * avg_color_top_third[1] + 0.16 * avg_color_top_third[2]
Lmid = 0.375 * avg_color_middle_third[0] + 0.5 * avg_color_middle_third[1] + 0.16 * avg_color_middle_third[2]
Lbot = 0.375 * avg_color_bottom_third[0] + 0.5 * avg_color_bottom_third[1] + 0.16 * avg_color_bottom_third[2]
if Lbot > Lmid and Lbot > Ltop:
return True
else:
return False
def black_key_test(transformed_frame):
grey = cv2.cvtColor(transformed_frame,cv2.COLOR_BGR2GRAY)
#exhaustive search of thresholds
lowThres, highThres = 0,254
maxBlackno, optimalThres = 0, 0
black_keys = None
while highThres > lowThres:
ret, dst = cv2.threshold(grey,lowThres,255,cv2.THRESH_BINARY_INV)
black_keys = custom_black_key_contours(dst)
if len(black_keys) >= maxBlackno and len(black_keys) <= 36:
maxBlackno = len(black_keys)
optimalThres = lowThres
lowThres += 1
ret, dst = cv2.threshold(grey,optimalThres,255,cv2.THRESH_BINARY_INV)
black_keys = custom_black_key_contours(dst)
if len(black_keys) == 36:
return True, black_keys
else:
return False, black_keys
def custom_black_key_contours(thresh_im):
#find the black key contours:
contours, hierarchy = cv2.findContours(thresh_im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#get median size of contours:
contour_sizes = []
for contour in contours:
area = cv2.contourArea(contour)
contour_sizes.append(area)
median_size = stats.median(contour_sizes)
#filter so that they are all roughly the same size and with valid aspect ratio and their bottom points aren't too low:
height, width = thresh_im.shape[:2]
filtered_contours = []
for contour in contours:
area = cv2.contourArea(contour)
x,y,w,h = cv2.boundingRect(contour)
aspect_ratio = float(w)/h
if (median_size / 2 < area < median_size * 2) and aspect_ratio < 0.3 and (y + h) < height * 3/4:
filtered_contours.append(contour)
return filtered_contours
def get_keyboard_corners(pairing,frame):
(line1,line2,inverse_p_mat,warped_frame) = pairing
height,width = frame.shape[:2]
#undo warping to find black_key contours with respect to original frame:
unwarped = cv2.warpPerspective(warped_frame.copy(), inverse_p_mat, (width,height))
ret, dst = cv2.threshold(cv2.cvtColor(unwarped,cv2.COLOR_BGR2GRAY),254,255,cv2.THRESH_BINARY)
black_keys, hierarchy = cv2.findContours(dst, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#filter out any small artefacts/noise:
filtered_black_keys = []
total_bk_area = 0
for black_key in black_keys:
key_area = cv2.contourArea(black_key)
if key_area > 20:
total_bk_area += key_area
filtered_black_keys.append(black_key)
black_keys = filtered_black_keys
if len(black_keys) != 36:
raise Exception("Black Key lost for corner detection")
#order black keys from LHS to RHS:
black_keys = sorted(black_keys, key = lambda cnt: cv2.boundingRect(cnt)[0])
#draw a white line to connect the lowest and highest white keys:
left_most_point = np.array(list(black_keys[0][black_keys[0][:,:,0].argmin()][0]))
Bb6_centre = np.array(list(get_contour_centre(black_keys[-1])))
top_bk_separation = distance_between_points(get_contour_centre(black_keys[-2]),get_contour_centre(black_keys[-1]))
Top_line_point = Bb6_centre + top_bk_separation * (4/3) * (Bb6_centre - left_most_point)/(np.linalg.norm(Bb6_centre - left_most_point))
tmp = (int(Top_line_point[0]),int(Top_line_point[1]))
lmp = (left_most_point[0] - 1,left_most_point[1] + 1)
cv2.line(frame,lmp,tmp,(255,255,255),2)
#threshold the image to find a contour containing all the white keys:
#centre must be in valid range of middle of the keyboard:
#area must be greater than total bk area:
#get valid centre range:
x_range = (get_contour_centre(black_keys[15])[0], get_contour_centre(black_keys[25])[0])
y_max = max(tuple(black_keys[0][black_keys[0][:,:,1].argmax()][0])[1], tuple(black_keys[-1][black_keys[-1][:,:,1].argmax()][0])[1])
y_min = min(tuple(black_keys[0][black_keys[0][:,:,1].argmin()][0])[1], tuple(black_keys[-1][black_keys[-1][:,:,1].argmin()][0])[1])
y_range = (y_min, y_max)
grey = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
highThresh = 254
optimalThres = 0
while highThresh > 0:
ret, dst = cv2.threshold(grey,highThresh,255,cv2.THRESH_BINARY_INV)
white_key_contour = custom_white_key_contour(dst,x_range,y_range)
if white_key_contour is not None and total_bk_area * 3 < cv2.contourArea(white_key_contour) < total_bk_area * 3.15:
optimalThres = highThresh
highThresh -= 1
ret, dst = cv2.threshold(grey,optimalThres,255,cv2.THRESH_BINARY_INV)
white_key_contour = custom_white_key_contour(dst,x_range,y_range)
#draw this contour that covers all white keys in pure white on the frame:
cv2.drawContours(frame,[white_key_contour],-1,(255,255,255),cv2.FILLED)
#bounding rect for this contour:
x,y,w,h = cv2.boundingRect(white_key_contour)
corners = []
for origin_point in [(x,y), (x+w,y), (x+w,y+h), (x,y+h)]:
corner = closest_white_pixel(frame,origin_point)
corners.append((corner[0],corner[1]))
return corners
def custom_white_key_contour(thresh_im,x_range,y_range):
#xrange, yrange are range of contour centre that would be valid
(xmin, xmax) = x_range
(ymin, ymax) = y_range
contours, hierarchy = cv2.findContours(thresh_im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) == 0:
return None
else:
#take largest contour (in range) only:
largest = None
size_of_largest = 0
for contour in contours:
(Cx,Cy) = get_contour_centre(contour)
if (xmin < Cx < xmax) and (ymin < Cy < ymax) and cv2.contourArea(contour) > size_of_largest:
largest = contour
size_of_largest = cv2.contourArea(contour)
return largest
def closest_white_pixel(image,origin_point):
"""
Inputs: image (here taking in image with all keys masked in pure white and line connecting all keys), point in image from which to search
output: (x,y) of closest pixel of colour (255,255,255) to the point in the image
METHOD: starting with radius 1, search from the bounding box point for a pure white pixel,
if found, return this pixel position,
else, increment the search circle radius and try again
(N.B. if multiple white pixels found, take the one closest to the bounding box corner)
"""
(px,py) = origin_point
im_h, im_w, im_d = image.shape
#keyboard mask: keyboard in white, all else black
mask1 = cv2.inRange(image, (255,255,255), (255,255,255))
for radius in range(0,im_h):
#create a mask for the radius in which we are searching
circle_mask = np.zeros((im_h,im_w),dtype=image.dtype)
cv2.circle(circle_mask,(px,py),radius,(255,255,255),thickness=-1)
#combine this with the keyboard mask: combined_mask = all black except for any bit of keyboard that is within our search circle
combined_mask = cv2.bitwise_and(mask1,circle_mask)
formatted_mask = cv2.bitwise_and(image,image,mask=combined_mask)
grey = cv2.cvtColor(formatted_mask,cv2.COLOR_BGR2GRAY)
ret, dst = cv2.threshold(grey,254,255,cv2.THRESH_BINARY)
white_pixels = cv2.findNonZero(dst)
if white_pixels is not None:
closest_pixel = None
closest_pixel_distance = 10000
for white_point in white_pixels:
distance = distance_between_points(origin_point,white_point[0])
if distance < closest_pixel_distance:
closest_pixel_distance = distance
closest_pixel = white_point[0]
return closest_pixel
return (-1,-1)
def get_perspective_mat(corners,target_dimensions):
(target_width, target_height) = target_dimensions
target_frame = np.array([ [0, 0], [target_width - 1, 0], [target_width - 1, target_height - 1], [0, target_height - 1]], dtype = "float32")
rect = np.zeros((4, 2), dtype = "float32")
rect[0], rect[1], rect[2], rect[3] = corners[0], corners[1], corners[2], corners[3]
p_mat = cv2.getPerspectiveTransform(rect,target_frame)
#To transform using this matrix: warped_frame = cv2.warpPerspective(frame, p_mat, target_dimensions)
return p_mat
def save_corners_to_file(corners):
#save these corners to the transformation file:
transform_file = open('resources/saved_transformation.txt','w')
for (x,y) in corners:
transform_file.write("(" + str(x) + "," + str(y) + ") ")
transform_file.close()
def draw_lines(image,lines,colour):
image = image.copy()
for (rho,theta) in lines:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 2000*(-b))
y1 = int(y0 + 2000*(a))
x2 = int(x0 - 2000*(-b))
y2 = int(y0 - 2000*(a))
cv2.line(image, (x1,y1),(x2,y2), colour, 2)
return image
def get_contour_centre(cnt):
x,y,w,h = cv2.boundingRect(cnt)
Cx = int(x + w/2)
Cy = int(y + h/2)
return (Cx,Cy)
def distance_between_points(point1,point2):
(x1,y1) = point1
(x2,y2) = point2
dist = math.sqrt((x2-x1)**2 + (y2-y1)**2)
return dist
|
|
"""
NLTK Word Frequency Summarization
Modified : Shashank
Original author: Akash P
"""
import nltk
import hashlib
import numpy as np
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize, sent_tokenize
from hashlib import sha224
from SummarizationInterface import SummarizationInterface
from pprint import pprint
class WordFrequencySummarization(SummarizationInterface):
def __init__(self, language="english", high_threshold_constant=1.35, low_threshold_constant=1.0, min_sentence_length=6):
self.language = language
self.high_threshold_constant = high_threshold_constant
self.low_threshold_constant = low_threshold_constant
self.stop_words = set(stopwords.words(self.language))
self.porter_stemmer = PorterStemmer()
self.min_sentence_length = min_sentence_length
def summarize(self, text, top_n=10):
word_freq_dict = self.get_frequency_table(text)
sentences = sent_tokenize(text)
sentence_scores = self.score_sentences(sentences, word_freq_dict)
threshold = self.get_average_sentence_score(sentence_scores)
summary = self.generate_summary(sentences, sentence_scores, self.high_threshold_constant * threshold)
if not self.is_summary_long(summary):
summary = self.generate_summary(sentences, sentence_scores, self.low_threshold_constant * threshold)
summary_sentences, _ = list(zip(*summary))
filtered_summary_sentences = self.filter_sentences(summary_sentences)
return filtered_summary_sentences
def is_summary_long(self, summary):
return len(summary) >= 3
def get_frequency_table(self, text):
words = word_tokenize(text)
words = self.filter_words(words)
word_freq_dict = dict(nltk.FreqDist(words))
return word_freq_dict
def filter_words(self, words_list):
words = [self.porter_stemmer.stem(word) for word in words_list]
words = [word for word in words if word not in self.stop_words]
return words
def filter_sentences(self, sentences_list):
return [sentence for sentence in sentences_list if self.get_sentence_length(sentence) >= self.min_sentence_length]
def get_sentence_hash(self, sentence):
return hashlib.sha224(sentence.encode('utf-8')).hexdigest()
def get_sentence_length(self, sentence):
return len(sentence.split(" "))
def score_sentences(self, sentences, word_freq_dict):
sentence_scores = dict()
for sentence in sentences:
sentence_hash = self.get_sentence_hash(sentence)
num_words_excluding_stopwords, sum_score = 0, 0
word_list = []
for word in word_freq_dict:
if word in sentence.lower():
num_words_excluding_stopwords += 1
sum_score += word_freq_dict[word]
word_list.append(word)
sentence_scores[sentence_hash] = sum_score / num_words_excluding_stopwords
return sentence_scores
def get_average_sentence_score(self, sentence_scores):
return np.mean(list(sentence_scores.values()))
def generate_summary(self, sentences, sentence_scores, threshold):
summary = []
for sentence in sentences:
sentence_hash = self.get_sentence_hash(sentence)
if sentence_scores[sentence_hash] >= threshold:
summary.append((sentence, sentence_scores[sentence_hash]))
return summary
|
|
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
from numpy.core.fromnumeric import prod
from .autoencoder import utils
from .autoencoder.moco import builder
from .autoencoder.moco import loader
from .autoencoder.model_ae_moco import AutoEncoder
from .diffusion.functions.denoising import compute_alpha, generalized_steps
from .diffusion.models.diffusion import Model as Diffusion
def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps):
def sigmoid(x):
return 1 / (np.exp(-x) + 1)
if beta_schedule == "quad":
betas = (
np.linspace(
beta_start ** 0.5,
beta_end ** 0.5,
num_diffusion_timesteps,
dtype=np.float64,
)
** 2
)
elif beta_schedule == "linear":
betas = np.linspace(
beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
)
elif beta_schedule == "const":
betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)
elif beta_schedule == "jsd": # 1/T, 1/(T-1), 1/(T-2), ..., 1
betas = 1.0 / np.linspace(
num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64
)
elif beta_schedule == "sigmoid":
betas = np.linspace(-6, 6, num_diffusion_timesteps)
betas = sigmoid(betas) * (beta_end - beta_start) + beta_start
else:
raise NotImplementedError(beta_schedule)
assert betas.shape == (num_diffusion_timesteps,)
return betas
class ModulationLayer(nn.Module):
def __init__(self, *shape):
super(ModulationLayer, self).__init__()
self.register_buffer("mean", torch.zeros(*shape))
self.register_buffer("std", torch.ones(*shape))
def forward(self, z):
return (z - self.mean) / self.std
def backward(self, z_):
return z_ * self.std + self.mean
class BoundaryInterpolationLayer(nn.Module):
def __init__(self, *shape):
super(BoundaryInterpolationLayer, self).__init__()
self.register_buffer("boundary", torch.randn(*shape))
def forward(self, z, steps):
return z - steps * self.boundary
class D2C(object):
def __init__(self, args, config):
super().__init__()
self.args = args
self.config = config
self.autoencoder_config = cae = config.autoencoder
self.diffusion_config = cdif = config.diffusion
arch_instance = utils.get_arch_cells(self.autoencoder_config.arch_instance)
def autoencoder_fn():
return AutoEncoder(self.autoencoder_config, None, arch_instance)
latent_dim = cae.latent_dim
dim_mlp_next = None
latent_channels = cae.num_latent_per_group
self.latent_size = (latent_channels, latent_dim, latent_dim)
self.autoencoder = builder.MoCo(
autoencoder_fn,
cae.moco_dim,
cae.moco_k,
cae.moco_m,
cae.moco_t,
False,
dim_mlp=prod(self.latent_size),
dim_mlp_next=dim_mlp_next,
).cuda()
self.diffusion = Diffusion(cdif).cuda()
self.latent_mod = ModulationLayer((1, latent_channels, 1, 1)).cuda()
self.betas = (
torch.from_numpy(
get_beta_schedule(
beta_schedule=cdif.diffusion.beta_schedule,
beta_start=cdif.diffusion.beta_start,
beta_end=cdif.diffusion.beta_end,
num_diffusion_timesteps=cdif.diffusion.num_diffusion_timesteps,
)
)
.cuda()
.float()
)
def load_state_dict(self, state_dict, inference=True):
if inference:
# MLP is not used in inference, so we do not load it.
for k in list(state_dict["autoencoder"].keys()):
if k.startswith("encoder_q.fc") or k.startswith("encoder_k.fc"):
del state_dict["autoencoder"][k]
else:
raise NotImplementedError("The class only supports inference.")
self.autoencoder.load_state_dict(state_dict["autoencoder"])
self.diffusion.load_state_dict(state_dict["diffusion"])
self.latent_mod.load_state_dict(state_dict["latent_mod"])
def image_to_latent(self, x):
x = x.cuda(non_blocking=True)
x = utils.pre_process(x.cuda(), self.autoencoder_config.num_x_bits)
z = self.autoencoder.encoder_q(x, get_latent=True, reshape=False)
return z
def latent_to_image(self, z):
logits = self.autoencoder.encoder_q(z, from_latent=True)
xdist = self.autoencoder.encoder_q.decoder_output(logits)
if isinstance(xdist, torch.distributions.Bernoulli):
x = xdist.mean()
else:
x = xdist.sample()
return x
def transform_latent(self, z_, seq):
z = generalized_steps(z_, seq, self.diffusion, self.betas, eta=0.0, last=True)
return z
def sample_latent(self, batch_size, skip=1):
num_timesteps = len(self.betas)
z_ = torch.randn(batch_size, *self.latent_size).cuda()
seq = list(range(0, num_timesteps - 1, skip)) + [num_timesteps - 1]
z = self.transform_latent(z_, seq)
z = self.latent_mod.backward(z)
return z
def postprocess_latent(self, z, seq):
z = self.latent_mod(z)
a = compute_alpha(
self.betas, seq[-1] * torch.ones(z.size(0)).long().to(z.device)
).to("cuda")
z_ = a.sqrt() * z + (1 - a).sqrt() * torch.randn_like(z)
z_post = self.transform_latent(z_, seq)
return self.latent_mod.backward(z_post)
def manipulate_latent(self, z, r_model, step):
new_z = r_model(z, step)
return new_z
def eval(self):
self.autoencoder.eval()
self.diffusion.eval()
self.latent_mod.eval()
|
|
# -*- coding: utf-8 -*-
"""test_resultsreconstruction
Tests that a single depletion step is carried out properly.
The entire sequence from cross section generation to depletion execution is
tested. Results are compared against pre-generated data using a different code.
Created on Thu Oct 28 08:59:44 2021 @author: Matt Krecicki
Last updated onThu Oct 28 08:59:44 2021 @author: Matt Krecicki
"""
import pytest
import numpy as np
from pyIsoDep.functions.maindepletionsolver import MainDepletion
from pyIsoDep.functions.generatedata import TransmutationData
from pyIsoDep.functions.postprocessresults import Results
from pyIsoDep.tests.pregenerated_xs import flux, ID, N0, sig_c,\
sig_c2m, sig_n2n, sig_n3n, sig_f, compareNt
# -----------------------------------------------------------------------------
# DATA GENERATION
# -----------------------------------------------------------------------------
# Reset the data container
data = TransmutationData(libraryFlag=True, wgtFY=1.0)
# Feed cross sections into the container
data.ReadData(ID, sig_f=sig_f, sig_c=sig_c, sig_c2m=sig_c2m,
sig_n2n=sig_n2n, sig_n3n=sig_n3n, flagBarns=False)
def test_reconstruction():
"""Test that ensure results reconstruction is correct"""
# -------------------------------------------------------------------------
# DEPLETION
# -------------------------------------------------------------------------
dep = MainDepletion(0.0, data)
# define metadata (steps, flux, and so on)
dep.SetDepScenario(power=None, flux=[flux], timeUnits="seconds",
timesteps=[6.630851880276299780234694480896E+05],
timepoints=None)
# set initial composition
dep.SetInitialComposition(ID, N0, vol=1.0)
# solve the Bateman equations
dep.SolveDepletion(method="cram")
# Post depletion analysis
dep.DecayHeat()
dep.Radiotoxicity()
dep.Activity()
dep.Mass()
dep.Reactivity()
#export results to hdf5 file
res = Results(dep)
res.export("test.h5")
#reconstruct results from hdf5 file
res2 = Results("test.h5")
#compare exported and reconstructed results
assert res.N0 == pytest.approx(res2.N0, rel=0.001)
assert res.flagPower == res2.flagPower
assert res.flux == pytest.approx(res2.flux, rel=0.001)
assert res.totalQt == pytest.approx(res2.totalQt, rel=0.001)
assert res.decaymtx[25,:] == pytest.approx(res2.decaymtx[25,:], rel=0.001)
assert res.Nt[30,:] == pytest.approx(res2.Nt[30,:], rel=0.001)
assert res._xsDataSets[0.0][50,:] ==\
pytest.approx(res2._xsDataSets[0.0][50,:], rel=0.001)
def test_partial_reconstruction():
"""Test that ensure results reconstruction is correct"""
# -------------------------------------------------------------------------
# DEPLETION
# -------------------------------------------------------------------------
dep = MainDepletion(0.0, data)
# define metadata (steps, flux, and so on)
dep.SetDepScenario(power=None, flux=[flux], timeUnits="seconds",
timesteps=[6.630851880276299780234694480896E+05],
timepoints=None)
# set initial composition
dep.SetInitialComposition(ID, N0, vol=1.0)
# solve the Bateman equations
dep.SolveDepletion(method="cram")
# Post depletion analysis
dep.DecayHeat()
#dep.Radiotoxicity()
#dep.Activity()
#dep.Mass()
dep.Reactivity()
#export results to hdf5 file
res = Results(dep)
res.export("test2.h5")
#reconstruct results from hdf5 file
res2 = Results("test2.h5")
#compare exported and reconstructed results
assert res.N0 == pytest.approx(res2.N0, rel=0.001)
assert res.flagPower == res2.flagPower
assert res.flux == pytest.approx(res2.flux, rel=0.001)
assert res.totalQt == pytest.approx(res2.totalQt, rel=0.001)
assert res.decaymtx[25,:] == pytest.approx(res2.decaymtx[25,:], rel=0.001)
assert res.Nt[30,:] == pytest.approx(res2.Nt[30,:], rel=0.001)
assert res._xsDataSets[0.0][50,:] ==\
pytest.approx(res2._xsDataSets[0.0][50,:], rel=0.001)
test_partial_reconstruction()
|
|
#!/usr/bin/python
import glob
import math
import os
import shutil
import struct
import sys
import csv
import Queue
import thread
import subprocess
from optparse import OptionParser
from osgeo import gdalconst
from osgeo import gdal
from osgeo import osr
from numpy import *
import numpy as np
import utilities
def main(args_in):
"""
Process commandline Arguments,
Create an instance of PARC with the Variables,
Kick off the parkFiles function of our PARC instance
"""
# Process command-line args.
usageStmt = "usage: %prog [options] <template image> <input dir or list of input files>"
desc = "This application projects, aggregates, resamples, and clips imagery."
parser = OptionParser(usage=usageStmt, description=desc)
parser.add_option("-l", dest="listMethodFlag", default=False, action="store_true", help="print the names of all known aggregation methods")
parser.add_option("-o", dest="out_dir", default="./", help="directory in which to put processed images, defaults to current directory")
parser.add_option("-v", dest="verbose", default=False, action="store_true", help="the verbose flag causes diagnostic output to print")
parser.add_option("-t", dest="templateRaster", help="The template raster used for projection, origin, cell size and extent")
parser.add_option("-i", dest="inputs_CSV", help="The CSV containing the list of files to process. Format is 'FilePath, Categorical, Resampling, Aggreagtion")
parser.add_option("-m", dest="multicore", default=True, help="'True', 'False' indicating whether to use multiple cores or not")
(options, args) = parser.parse_args(args_in)
ourPARC = PARC()
ourPARC.verbose = options.verbose
ourPARC.template = options.templateRaster
ourPARC.out_dir = options.out_dir
ourPARC.inputs_CSV = options.inputs_CSV
ourPARC.multicores = options.multicore
ourPARC.parcFiles()
class PARC:
'''
PARC: Project, Aggregate, Resample, Clip
The workflow on this beast is as follows:
For each dataset
Step 1: RePrject the source raster into a tmp raster using
the projection info from the template and the method if
supplied or the default of nearest if not.
At this stage the tmp output will have a cell size about
the same as the input. We just use the default for this
setting.
Step 2: Aggregate the tmpRaster to have the same origin,
cell size and extent as our template.
'''
def __init__(self):
#instance level variables
self.verbose = False
self.template = ""
self.template_params = {}
self.out_dir = ""
self.inputs_CSV = ''
self.inputs = []
self.agg_methods = ['Min', 'Mean', 'Max', 'Majority']
self.resample_methods = ['NearestNeighbor', 'Bilinear', 'Cubic', 'CubicSpline', 'Lanczos']
self.logger = None
self.multicores = 'False'
self.module = None
def parcFiles(self):
'''
1: Parse the inputs_CSV into our inputs list
2: Make sure all of our instance variables are good and proper
3: Loop through the list of sourceImages and PARC each one.
4: The outputs will be stored in the output directory
5: Additionally an output CSV will be produced that lists the
inputs, parameters used, and outputs
'''
self.logger.writetolog("Starting PARC", True, True)
self.validateArgs()
self.logger.writetolog(" Arguments validated successfully", True, True)
if self.multicores.lower() in ['true', 'yes', 't', 'y', '1']:
self.processFilesMC()
else:
self.processFiles()
self.logger.writetolog("Finished PARC", True, True)
def processFiles(self):
# Clip and reproject each source image.
for image in self.inputs:
# Ensure source is different from template.
#if not os.path.samefile(template, image):
inPath, inFileName = os.path.split(image[0])
outFile, ext = os.path.splitext(inFileName)
outFile = os.path.join(self.out_dir, outFile + ".tif")
# os.path.samefile(image, outFile):
if os.path.exists(outFile) and \
os.path.abspath(image[0]) == os.path.abspath(outFile):
baseName, extension = os.path.splitext(outFile)
outFile = baseName + "-PARC.tif"
if os.path.abspath(self.template) != os.path.abspath(image[0]):
self.parcFile(image, outFile)
elif os.path.abspath(self.template) == os.path.abspath(image[0]):
shutil.copyfile(self.template, outFile)
def processFilesMC(self):
'''This function has the same functionality as parcFiles
with the addition of utilizing multiple cores to do the processing.
'''
results= Queue.Queue()
process_count= 0
for image in self.inputs:
# Ensure source is different from template.
#if not os.path.samefile(template, image):
inPath, inFileName = os.path.split(image[0])
outFile, ext = os.path.splitext(inFileName)
outFile = os.path.join(self.out_dir, outFile + ".tif")
# os.path.samefile(image, outFile):
if os.path.exists(outFile) and \
os.path.abspath(image[0]) == os.path.abspath(outFile):
baseName, extension = os.path.splitext(outFile)
outFile = baseName + "-PARC.tif"
if os.path.abspath(self.template) != os.path.abspath(image[0]):
image_short_name = os.path.split(image[0])[1]
args = '-s ' + '"' + os.path.abspath(image[0]) + '"'
args += ' -c ' + '"' + image[1] + '"'
args += ' -d ' + os.path.abspath(outFile)
args += ' -t ' + os.path.abspath(self.template)
args += ' -r ' + image[2]
args += ' -a ' + image[3]
execDir = os.path.split(__file__)[0]
executable = os.path.join(execDir, 'singlePARC.py')
pyEx = sys.executable
command = ' '.join([pyEx, executable, args])
self.logger.writetolog(command, False, False)
proc = subprocess.Popen( command )
thread.start_new_thread(utilities.process_waiter,
(proc, image_short_name, results))
process_count+= 1
while process_count > 0:
description, rc= results.get()
if rc == 0:
if self.verbose:
msg = " " + description + " finished successfully: " + \
str(len(self.inputs) - process_count + 1) + " done out of " \
+ str(len(self.inputs))
self.logger.writetolog(msg, True, True)
else:
self.logger.writetolog("There was a problem with: " + description , True, True)
process_count-= 1
self.logger.writetolog("Finished PARC", True, True)
def parcFile(self, source, dest):
"""
Processes a single file
"""
gdal.UseExceptions()
shortName = os.path.split(os.path.splitext(source[0])[0])[1]
self.logger.writetolog(" Starting processing of " + source[0])
sourceParams = self.getRasterParams(source[0])
gdalType = None
if source[2].lower() == "nearestneighbor":
gdalType = gdalconst.GRA_NearestNeighbour
if source[2].lower() == "bilinear":
gdalType = gdalconst.GRA_Bilinear
if source[2].lower() == "cubic":
gdalType = gdalconst.GRA_Cubic
if source[2].lower() == "cubicspline":
gdalType = gdalconst.GRA_CubicSpline
if source[2].lower() == "lanczos":
gdalType = gdalconst.GRA_Lanczos
if gdalType == None:
self.logger.writetolog(" Specified resampling method (" + source[2] + ") not one of 'NearestNeighbor', 'Bilinear', 'Cubic', 'CubicSpline', or 'Lanczos'. Defaulting to 'NearestNeighbor'")
gdalType = gdalconst.GRA_NearestNeighbour
#Open dgal dataset of the source to pull some values from
srcDs = gdal.Open(source[0])
cellRatio = self.getTemplateSRSCellSize(sourceParams)/self.template_params["xScale"]
msg = " ratio of source cell size to template cell size = " + str(cellRatio)
msg += " template cell size = " + str(self.template_params["xScale"])
msg += " " + shortName + " cell size = " + str(self.getTemplateSRSCellSize(sourceParams))
self.writetolog(msg)
if cellRatio > 0.5:
#The source cell size is close enough to our template cell size,
#or smaller so
#that all we need to do is reproject and resample.
self.logger.writetolog(" cell ratio > .5: reprojecting and resampling to template parameters only")
self.reprojectRaster(srcDs, sourceParams, self.template_params, dest,
gdalType, shortName, self.template_params["xScale"])
else:
#Our Target cell size is much bigger than our source we need to do
#some aggregation to make things work.
msg = ' cell ratio <= .5: reprojecting and resampling to template parameters'
msg += ' then aggregating the reprojected raster to match template parameters'
self.writetolog(msg)
targetCellSize, numSourcePerTarget = self.getAggregateTargetCellSize(sourceParams)
tmpOutput = os.path.join(os.path.dirname(dest), "tmp_" + os.path.basename(dest))
self.reprojectRaster(srcDs, sourceParams, self.template_params,
tmpOutput, gdalType, shortName, targetCellSize)
self.writetolog(" Starting on Aggregating: " + shortName)
tmpOutput2 = os.path.splitext(tmpOutput)[0] + ".tif"
self.Aggregate(tmpOutput2, dest,
sourceParams, self.template_params,
source[3], numSourcePerTarget)
try:
os.remove(tmpOutput2)
except WindowsError:
pass
def getTemplateSRSCellSize(self, sourceParams):
"""
Calculate what size our source image pixels would be in the template SRS
"""
#first convert our template origin into the source srs
tOriginX, tOriginY = self.transformPoint(self.template_params["west"], self.template_params["north"],
self.template_params["srs"], sourceParams["srs"])
#next add the source xScale to the converted origin x and convert that back to template srs
tOriginX1 = self.transformPoint (tOriginX + sourceParams["xScale"], tOriginY,
sourceParams["srs"], self.template_params["srs"])[0]
# templateCellXCorner1 = (self.template_params["west"], self.template_params["north"],
# self.template_params["srs"], sourceParams["srs"])[0]
#
# targetCellXCorner1 = (sourceParams["west"], sourceParams["north"],
# sourceParams["srs"], self.template_params["srs"])[0]
# targetCellXCorner2 = self.transformPoint(sourceParams["west"] + sourceParams["xScale"],
# sourceParams["north"], sourceParams["srs"], self.template_params["srs"])[0]
templateSRSCellSize = abs(abs(tOriginX1) - abs(self.template_params["west"]))
return templateSRSCellSize
def getAggregateTargetCellSize(self, sourceParams):
"""
This function determines the appropriate cell size to
reproject/resample our source raster into before
aggregating.
This size is the cell size that results in a template
cell containing a whole number of cells which are as
close as possible to the cell dimension that would
result if you reprojected the source cells into the
target srs without changing cell size.
"""
#first determine what cell size we are going to use for the initial reproject/resample
#step 1: Determine the native cell size in the template coordinate system.
templateSRSCellSize = self.getTemplateSRSCellSize(sourceParams)
#step 2: round this up or down to an even fraction of the template cell size
# for example source = 30, target = 250 resampledSource = 250/round(250/30)
sourcePixelsPerTarget = round(self.template_params["xScale"]/templateSRSCellSize)
nearestWholeCellSize = (self.template_params["xScale"] /
sourcePixelsPerTarget)
return nearestWholeCellSize, sourcePixelsPerTarget
def Aggregate(self, inFile, outFile, sourceParams, templateParams, method=None, numSourcePerTarget=10):
sourceDs = gdal.Open(inFile, gdalconst.GA_ReadOnly)
sourceBand = sourceDs.GetRasterBand(1)
tmpOutput = os.path.splitext(outFile)[0] + ".tif"
tmpOutDataset = self.generateOutputDS(sourceParams, templateParams, tmpOutput)
outBand = tmpOutDataset.GetRasterBand(1)
rows = int(sourceParams["height"])
cols = int(sourceParams["width"])
row = 0
col = 0
pcntDone = 0.0
if self.verbose:
print " % Done: 0.0",
while row < templateParams["width"]:
while col < templateParams["height"]:
sourceRow = row * numSourcePerTarget
sourceCol = col * numSourcePerTarget
#kernel = self.getKernel(sourceRow, sourceCol, numSourcePerTarget, sourceDs)
kernel = sourceDs.GetRasterBand(1).ReadAsArray(int(sourceRow),
int(sourceCol),
int(numSourcePerTarget),
int(numSourcePerTarget))
#convert kernel values of our nodata to nan
ndMask = ma.masked_array(kernel, mask=(kernel==sourceParams["NoData"]))
#print kernel
if method == "Min":
ans = ndMask.min()
elif method == "Max":
ans = ndMask.max()
elif method == "Majority":
# ndMask = ndMask.flatten()
uniques = np.unique(ndMask)
curMajority = -3.40282346639e+038
for val in uniques:
numOccurances = (array(ndMask)==val).sum()
if numOccurances > curMajority:
ans = val
curMajority = numOccurances
# histogram = np.histogram(ndMask, uniques)
# ans = histogram[1][histogram[0].argmax()]
else:
ans = ndMask.mean()
# print ndMask
# print ans
#special case real ugly
if ans < 0 and sourceParams["signedByte"]:
ans = ans + 255
ansArray = empty([1, 1])
if type(ans) == ma.core.MaskedArray:
ansArray[0, 0] = sourceParams["NoData"]
else:
ansArray[0, 0] = ans
outBand.WriteArray(ansArray, row, col)
col += 1
row += 1
col = 0
if self.verbose:
if float(row)/templateParams["width"] > float(pcntDone)/100:
pcntDone += 2.5
if int(pcntDone) % 10 == 0:
print str(pcntDone),
else:
print ".",
if self.verbose:
print "Done"
# if self.verbose:
# print "Done\nSaving to ASCII format"
#
# driver = gdal.GetDriverByName("AAIGrid")
# driver.Register()
#
# dst_ds = driver.CreateCopy(outFile, tmpOutDataset, 0)
# if self.verbose:
# print " Finished Saving ", self.shortName
dst_ds = None
tmpOutDataset=None
def getRasterParams(self, rasterFile):
"""
Extracts properties from a passed raster
All values are stored in a dictionary which is returned.
If errors are encountered along the way the error messages will
be returned as a list in the 'Error' element.
"""
try:
#initialize our params dictionary to have None for all parma
params = {}
allRasterParams = ["Error", "xScale", "yScale", "width", "height",
"east", "north", "west", "south",
"tEast", "tNorth", "tWest", "tSouth",
"gEast", "gNorth", "gWest", "gSouth",
"Wkt", "srs", "gt", "prj", "NoData", "PixelType", "file_name"]
for param in allRasterParams:
params[param] = None
params["Error"] = []
params["file_name"] = rasterFile
# Get the PARC parameters from the rasterFile.
dataset = gdal.Open(rasterFile, gdalconst.GA_ReadOnly)
if dataset is None:
params["Error"].append("Unable to open file")
return params
#print "Unable to open " + rasterFile
#raise Exception, "Unable to open specifed file " + rasterFile
xform = dataset.GetGeoTransform()
params["xScale"] = xform[1]
params["yScale"] = xform[5]
params["width"] = dataset.RasterXSize
params["height"] = dataset.RasterYSize
params["west"] = xform[0]
params["north"] = xform[3]
params["east"] = params["west"] + params["width"] * params["xScale"]
params["south"] = params["north"] + params["height"] * params["yScale"]
try:
wkt = dataset.GetProjection()
params["gt"] = dataset.GetGeoTransform()
params["prj"] = dataset.GetProjectionRef()
params["srs"] = osr.SpatialReference(wkt)
if wkt == '':
params["Error"].append("Undefined projection")
else:
if rasterFile == self.template:
params["tWest"], params["tNorth"] = params["west"], params["north"]
params["tEast"], params["tSouth"] = params["east"], params["south"]
elif params["srs"].ExportToWkt() == self.template_params["srs"].ExportToWkt():
params["tWest"], params["tNorth"] = params["west"], params["north"]
params["tEast"], params["tSouth"] = params["east"], params["south"]
else:
try:
params["tWest"], params["tNorth"] = self.transformPoint(params["west"], params["north"], params["srs"], self.template_params["srs"])
params["tEast"], params["tSouth"] = self.transformPoint(params["east"], params["south"], params["srs"], self.template_params["srs"])
except:
params["Error"].append("Could not transform extent coordinates to template spatial reference")
#params["Error"] = "We ran into problems converting projected coordinates to template for " + rasterFile
try:
geographic = osr.SpatialReference()
geographic.ImportFromEPSG(4326)
params["gWest"], params["gNorth"] = self.transformPoint(params["west"], params["north"], params["srs"], geographic)
params["gEast"], params["gSouth"] = self.transformPoint(params["east"], params["south"], params["srs"], geographic)
except:
pass
except:
#print "We ran into problems getting the projection information for " + rasterFile
params["Error"].append("Undefined problems extracting the projection information")
try:
params["signedByte"] = dataset.GetRasterBand(1).GetMetadata('IMAGE_STRUCTURE')['PIXELTYPE'] == 'SIGNEDBYTE'
except KeyError:
params["signedByte"] = False
params["NoData"] = dataset.GetRasterBand(1).GetNoDataValue()
if params["NoData"] == None:
if dataset.GetRasterBand(1).DataType == 1:
print "Warning: Could not extract NoData value. Using assumed nodata value of 255"
params["NoData"] = 255
elif dataset.GetRasterBand(1).DataType == 2:
print "Warning: Could not extract NoData value. Using assumed nodata value of 65536"
params["NoData"] = 65536
elif dataset.GetRasterBand(1).DataType == 3:
print "Warning: Could not extract NoData value. Using assumed nodata value of 32767"
params["NoData"] = 32767
elif dataset.GetRasterBand(1).DataType == 4:
print "Warning: Could not extract NoData value. Using assumed nodata value of 2147483647"
params["NoData"] = 2147483647
elif dataset.GetRasterBand(1).DataType == 5:
print "Warning: Could not extract NoData value. Using assumed nodata value of 2147483647"
params["NoData"] = 2147483647
elif dataset.GetRasterBand(1).DataType == 6:
print "Warning: Could not extract NoData value. Using assumed nodata value of -3.40282346639e+038"
params["NoData"] = -3.40282346639e+038
else:
params["Error"].append("Could not identify nodata value")
params["PixelType"] = dataset.GetRasterBand(1).DataType
if params["PixelType"] == None:
params["Error"].append("Could not identify pixel type (bit depth)")
except:
#print "We ran into problems extracting raster parameters from " + rasterFile
params["Error"].append("Some untrapped error was encountered")
finally:
del dataset
return params
def transformPoint(self, x, y, from_srs, to_srs):
"""
Transforms a point from one srs to another
"""
coordXform = osr.CoordinateTransformation(from_srs, to_srs)
yRound = round(y, 4)
xRound = round(x, 4)
result = coordXform.TransformPoint(xRound, yRound)
gx = result[0]
gy = result[1]
return gx, gy
def ImageCoversTemplate(self, sourceParams):
"""
Checks to see if the template images
falls completely inside the source raster
it does this by generating a list of 16 coordinate
pairs equally distributed across the template,
including the four absolute corners.
These points are in the CRS of the image.
If all of these points have a valid data or nodata
value in the image, then the image covers the template.
(in nearly every case anyway)
"""
n = 5
xOffset = (self.template_params["east"] - self.template_params["west"]) / (n) - \
((self.template_params["east"] - self.template_params["west"]) / self.template_params["width"] / 1000)
yOffset = (self.template_params["north"] - self.template_params["south"]) / (n) - \
((self.template_params["north"] - self.template_params["south"]) / self.template_params["height"] / 1000)
curX = self.template_params["west"]
curY = self.template_params["north"]
testPoints =[]
for x in range(n + 1):
for y in range(n + 1):
testPoints.append(self.transformPoint(curX, curY, self.template_params["srs"],
sourceParams["srs"]))
curY -= yOffset
curX += xOffset
curY = self.template_params["north"]
rasterDS = gdal.Open(sourceParams["file_name"], gdalconst.GA_ReadOnly)
band = rasterDS.GetRasterBand(1)
badPoint = False
for point in testPoints:
try:
xOffset = int((point[0] - sourceParams["west"]) / sourceParams["xScale"])
yOffset = int((point[1] - sourceParams["north"]) / sourceParams["yScale"])
data = band.ReadAsArray(xOffset, yOffset, 1, 1)
value = data[0,0]
except:
badPoint = True
#if valid values were returned from each of our points then
#the template falls entirely within the Source image.
if badPoint:
return False
else:
return True
def validateArgs(self):
"""
Make sure the user sent us some stuff we can work with
"""
if not os.path.exists(self.out_dir):
raise utilities.TrappedError("Specified Output directory " + self.out_dir + " not found on file system")
if not os.path.isdir(self.out_dir):
raise utilities.TrappedError("Specified Output directory " + self.out_dir + " is not a directory")
if self.logger is None:
self.logger = utilities.logger(self.out_dir, self.verbose)
self.writetolog = self.logger.writetolog
# Validate template image.
if self.template is None:
raise utilities.TrappedError("template raster not provided.")
if not os.path.exists(self.template):
raise utilities.TrappedError("Template file, " + self.template + ", does not exist on file system")
self.template_params = self.getRasterParams(self.template)
if len(self.template_params["Error"]) <> 0:
raise utilities.TrappedError("There was a problem with the provided template: \n " +
" " + "\n ".join(self.template_params["Error"]))
# Ensure the template has square pixels.
if abs(abs(self.template_params["xScale"]) - abs(self.template_params["yScale"])) > 1e-6:
raise utilities.TrappedError("template image must have square pixels." +
"/n x pixel scale = " + str(xScale) +
"/n y pixel scale = " + str(yScale))
#Validate input rasters
if not os.path.exists(self.inputs_CSV):
raise utilities.TrappedError("Inputs CSV, " + self.inputs_CSV + ", does not exist on file system.")
inputsCSV = csv.reader(open(self.inputs_CSV, 'r'))
header = inputsCSV.next()
strInputFileErrors = ""
outputCSV = os.path.join(self.out_dir, "PARC_Files.csv")
output = csv.writer(open(outputCSV, "wb"))
output.writerow(["PARCOutputFile", "Categorical", "Resampling", "Aggregation", "OriginalFile", os.path.abspath(self.template), os.path.abspath(self.out_dir)])
for row in inputsCSV:
inputFile = row[0]
sourceParams = self.getRasterParams(inputFile)
if len(sourceParams["Error"]) > 0:
strInputFileErrors += (" " + os.path.split(inputFile)[1] + " had the following errors:\n" +
" " + "\n ".join(sourceParams["Error"])) + "\n"
else:
pass
if not self.ImageCoversTemplate(sourceParams):
strInputFileErrors += ("\n Some part of the template image falls outside of " + os.path.split(inputFile)[1])
strInputFileErrors += "\n template upper left = (" + str(self.template_params["gWest"]) + ", " + str(self.template_params["gNorth"]) + ")"
strInputFileErrors += "\n template lower right = (" + str(self.template_params["gEast"]) + ", " + str(self.template_params["gSouth"]) + ")"
strInputFileErrors += "\n image upper left = (" + str(sourceParams["gWest"]) + ", " + str(sourceParams["gNorth"]) + ")"
strInputFileErrors += "\n image lower right = (" + str(sourceParams["gEast"]) + ", " + str(sourceParams["gSouth"]) + ")"
# strInputFileErrors += "\n points are given in projected coordinates."
# strInputFileErrors += "\n template upper left = (" + str(self.template_params["tWest"]) + ", " + str(self.template_params["tNorth"]) + ")"
# strInputFileErrors += "\n template lower right = (" + str(self.template_params["tEast"]) + ", " + str(self.template_params["tSouth"]) + ")"
# strInputFileErrors += "\n image upper left = (" + str(sourceParams["tWest"]) + ", " + str(sourceParams["tNorth"]) + ")"
# strInputFileErrors += "\n image lower right = (" + str(sourceParams["tEast"]) + ", " + str(sourceParams["tSouth"]) + ")"
# strInputFileErrors += "\n Note: points are given in the template coordinates." + "\n"
#
if len(row) < 2 or not row[1] in ['0', '1']:
self.writetolog(" " + os.path.split(inputFile)[1] + " categorical either missing or not 0 or 1:\n Defaulting to 0 (continuous)")
if len(row) < 2:
row.append('0')
else:
row[1] = '0'
if len(row) < 3 or not row[2].lower() in [item.lower() for item in self.resample_methods]:
self.writetolog(" " + os.path.split(inputFile)[1] + " resample method either missing or not one of " +
", ".join(self.resample_methods) + "\n Defaulting to 'Bilinear'")
if row[1] == '0':
default = 'Bilinear'
else:
default = 'NearestNeighbor'
if len(row) < 3:
row.append(default)
else:
row[2] = default
if len(row) < 4 or not row[3].lower() in [item.lower() for item in self.agg_methods]:
self.writetolog(" " + os.path.split(inputFile)[1] + " aggregation method either missing or not one of " +
", ".join(self.agg_methods) + "\n Defaulting to 'Mean'")
if row[1] == '0':
default = 'Mean'
else:
default = 'Majority'
if len(row) < 4:
row.append(default)
else:
row[3] = default
self.inputs.append(row)
#also write the output row, reconfigured to our output file
fileName = self.getShortName(row[0])
fileName = os.path.abspath(os.path.join(self.out_dir, fileName + ".tif"))
outputrow = [fileName] + row[1:4] + [os.path.abspath(row[0]), os.path.abspath(self.out_dir)]
output.writerow(outputrow)
del output
if strInputFileErrors <> "":
self.writetolog(strInputFileErrors)
raise utilities.TrappedError("There was one or more problems with your input rasters: \n" + strInputFileErrors)
def reprojectRaster(self, srcDs, sourceParams, templateParams,
destFile, resamplingType, shortName='', outputCellSize = None):
"""
Reprojects a raster to match the template_params
if outputCellSize is not provided defaults to the template cellSize
"""
# driver = gdal.GetDriverByName("AAIGrid")
# driver.Register()
tmpOutput = os.path.splitext(destFile)[0] + ".tif"
tmpOutDataset = self.generateOutputDS(sourceParams, templateParams, tmpOutput, outputCellSize)
self.writetolog(" Starting intermediate reprojection of: " + shortName)
err = gdal.ReprojectImage(srcDs, tmpOutDataset, sourceParams["srs"].ExportToWkt(),
templateParams["srs"].ExportToWkt(), resamplingType)
# dst_ds = driver.CreateCopy(destFile, tmpOutDataset, 0)
self.writetolog(" Finished reprojection " + shortName)
dst_ds = None
tmpOutDataset = None
def generateOutputDS(self, sourceParams, templateParams,
tmpOutput, outputCellSize = None):
"""
Creates an output dataset (tiff format) that
has the nodata value of the sourceParams but
all other attributes from the template_params
This output is saved to tmpOutput.
The optional cell size will override the cell size
specified in the template_params
"""
tifDriver = gdal.GetDriverByName("GTiff")
if outputCellSize == None:
width = templateParams["width"]
height = templateParams["height"]
else:
width = templateParams["width"] * int(templateParams["xScale"]/outputCellSize)
height = templateParams["height"] * int(templateParams["xScale"]/outputCellSize)
if sourceParams["signedByte"]:
tmpOutDataset = tifDriver.Create(tmpOutput,
width,
height,
1, sourceParams["PixelType"], ["PIXELTYPE=SIGNEDBYTE"])
else:
tmpOutDataset = tifDriver.Create(tmpOutput,
width,
height,
1, sourceParams["PixelType"])
if outputCellSize == None:
outputCellSize = templateParams["xScale"]
gtList = list(templateParams["gt"])
if templateParams["xScale"] < 0:
gtList[1] = -1 * outputCellSize
else:
gtList[1] = outputCellSize
if templateParams["yScale"] < 0:
gtList[5] = -1 * outputCellSize
else:
gtList[5] = outputCellSize
gt = tuple(gtList)
tmpOutDataset.SetGeoTransform(gt)
tmpOutDataset.SetProjection(templateParams["prj"])
tmpOutDataset.GetRasterBand(1).SetNoDataValue(sourceParams["NoData"])
if sourceParams["signedByte"]:
#tmpOutDataset.GetRasterBand(1).SetMetadataItem('PIXELTYPE', "SIGNEDBYTE")
tmpOutDataset.GetRasterBand(1).PixelType = "SIGNEDBYTE"
tmpOutDataset.GetRasterBand(1).SetMetadata({'PIXELTYPE': 'SIGNEDBYTE'}, 'IMAGE_STRUCTURE')
# if self.verbose:
# print tmpOutput
# print "noDataValue = ", tmpOutDataset.GetRasterBand(1).GetNoDataValue()
# print "Pixel type = ", gdal.GetDataTypeName(tmpOutDataset.GetRasterBand(1).DataType)
return tmpOutDataset
def getShortName(self, fullPathName):
if fullPathName.endswith('hdr.adf'):
shortname = os.path.split(fullPathName)[0]
shortname = os.path.split(shortname)[1]
else:
shortname = os.path.split(fullPathName)[1]
shortname = os.path.splitext(shortname)[0]
return shortname
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
# try:
## PARC().testing()
# sys.exit(PARC().main(sys.argv[1:]))
# except Exception as e:
# print e
# sys.exit(1)
|
|
import os, sys
import cv2
import numpy as np
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from qgis.gui import QgsMapCanvas, QgsMapToolPan, QgsMapToolZoom, QgsMapToolIdentify
from qgis.core import QgsProject, QgsApplication, QgsVectorLayer, QgsRasterLayer
from Dlg_unsupervised import Ui_Dlg_unsupervised
import gdal
import qdarkstyle
class Func_unsupervised_class(QDialog,Ui_Dlg_unsupervised):
mySignal=pyqtSignal(int)
# result=""
def __init__(self,layers):
print("init")
super(Func_unsupervised_class,self).__init__()
self.setupUi(self)
# 对图层进行处理,处理成cv的numpy数组
self.bandName = []
self.bands = []
self.projections = []
self.geoTransform = []
for i in range(0, len(layers)):
tempCount = layers[i].bandCount()
tempName = layers[i].name()
tempDs = gdal.Open(layers[i].source())
for j in range(1, tempCount + 1):
self.bandName.append(tempName + '@' + str(j))
tempband = np.array(tempDs.GetRasterBand(j).ReadAsArray())
self.bands.append(tempband)
self.projections.append(tempDs.GetProjection())
self.geoTransform.append(tempDs.GetGeoTransform())
# 在选项中加载波段
self.comboBox_r.clear()
self.comboBox_g.clear()
self.comboBox_b.clear()
for i in range(0, len(self.bandName)):
self.comboBox_r.addItem(self.bandName[i], i)
self.comboBox_g.addItem(self.bandName[i], i)
self.comboBox_b.addItem(self.bandName[i], i)
# print(layer)
self.pushButton.clicked.connect(self.action)
def action(self):
self.Func_unsupervised_def()
self.mySignal.emit(1)
def Func_unsupervised_def(self):
# 卷积
index_b = self.comboBox_b.currentIndex()
index_g = self.comboBox_g.currentIndex()
index_r = self.comboBox_r.currentIndex()
img = cv2.merge([self.bands[index_b], self.bands[index_g], self.bands[index_r]])
max_iter = int(self.lineEdit.text()) #迭代次数
epsilon = float(self.lineEdit_2.text()) #精确度
K = int(self.lineEdit_3.text()) #聚类的最终数目
Z = img.reshape((-1, 3))
Z = np.float32(Z)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, max_iter, epsilon)
ret, label, center = cv2.kmeans(Z, K, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((img.shape))
output_1 = res2
#cv2.imshow('res2', res2)
cv2.imwrite('processImage/result_unsupervised.tif', output_1)
self.result = QgsRasterLayer("processImage/result_unsupervised.tif", "result_unsupervised")
self.close()
if __name__ == '__main__':
qgs = QgsApplication([], True)
qgs.setPrefixPath('qgis', True)
# 启动QGIS
qgs.initQgis()
layers = []
app = QApplication(sys.argv)
rlayer = QgsRasterLayer("C:/Users/22814/Desktop/1.tif", "city")
layers.append(rlayer)
a=Func_unsupervised_class(layers)
a.exec_()
exit_code = qgs.exec_()
qgs.exitQgis()
sys.exit(exit_code)
# sys.exit(app.exec_())
|
|
import numpy as np
from collections import defaultdict
def pairwise_view(target_station, next_station, mismatch='error'):
if target_station is None or next_station is None:
return ValueError("The data is empty.")
if target_station.shape != next_station.shape:
return None # ValueError("Paired station mismatched")
return ViewDefinition(y=target_station, x=next_station)
def multipair_view(target_station, stations):
"""
Args:
target_station:
stations:
Returns:
"""
assert all(target_station.shape == n_station.shape for n_station in stations)
dt = np.hstack(stations)
return ViewDefinition(y=target_station, x=dt)
class View(object):
def __init__(self):
self.X = None
self.y = None
self.label = None
self.view_metadata = defaultdict()
def make_view(self, target_station, k_stations):
return NotImplementedError
def to_json(self):
return NotImplementedError
@classmethod
def from_json(cls, json_file):
return NotImplementedError
class ViewDefinition:
"""
View definition format.
"""
def __init__(self, name=None, label=None, x=None, y=None):
self.name = name
self.label = label
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
self.x = x
self.y = y
class ViewFactory:
@staticmethod
def get_view(view_type):
if view_type == 'PairwiseView':
return PairwiseView()
class PairwiseView(View):
def __init__(self, variable=None):
self.__name__ = "PairwiseView"
super(PairwiseView, self).__init__()
self.variable = variable
def make_view(self, target_station, k_stations):
len_series = target_station.shape[0]
# Check dimension mismatch.
if not all([len_series == value.shape[0] for value in k_stations.values()]):
raise ValueError("Dimension mismatch b/n target station and one of the k stations")
tuples_list = [target_station] + list(k_stations.values())
dt = np.hstack(tuples_list)
vw = ViewDefinition(name=self.__name__, label=k_stations.keys(),
x=dt[:, 1:], y=dt[:, 0:1])
return vw
def to_json(self):
view_config = {"variable": self.variable}
return view_config
def from_json(cls, json_file):
variable = json_file["variable"]
pwv = PairwiseView(variable=variable)
return pwv
|
|
# import the necessary packages
from imutils.video import VideoStream
from imutils import face_utils
import imutils
import time
import dlib
import cv2
import numpy as np
import math
import transformation
import utils
# custom imports
import rotation_matrix_util as rmu
import client
def handle_drone_directions(img, rect):
###### Face from center of frame ######
if dlib.rectangle.contains(rect, int(img.shape[1] / 2), int(img.shape[0] / 2)) is False:
directions = ["0", "0", "0", "0"] # up, right, down, left
# right/left cases
if dlib.rectangle.left(rect) < int(img.shape[1] / 2) - dlib.rectangle.width(rect):
directions[1] = "right"
elif dlib.rectangle.right(rect) > int(img.shape[1] / 2) + dlib.rectangle.width(rect):
directions[3] = "left"
# up/down cases
if dlib.rectangle.bottom(rect) < int(img.shape[0] / 2):
directions[0] = "up"
elif dlib.rectangle.top(rect) > int(img.shape[0] / 2):
directions[2] = "down"
final_directions = list(filter(lambda x: x != "0", directions))
if len(final_directions) > 1:
cv2.putText(img, 'Need to move drone {0} {1}'.format(final_directions[0], final_directions[1]),
(5, 200), font, 1, (255, 255, 255), 2)
elif len(final_directions) == 1:
cv2.putText(img, 'Need to move drone {0}'.format(final_directions[0]),
(5, 200), font, 1, (255, 255, 255), 2)
###### Face from center of frame ######
return final_directions
def draw_lines(head_pose_data, img):
# cv2.line(frame, head_pose[0], head_pose[1], (255, 0, 0), 2)
cv2.line(img, tuple(head_pose_data[0][0][0].astype(int)), tuple(head_pose_data[0][1][0].astype(int)), (0, 0, 255))
cv2.line(img, tuple(head_pose_data[0][1][0].astype(int)), tuple(head_pose_data[0][2][0].astype(int)), (0, 0, 255))
cv2.line(img, tuple(head_pose_data[0][2][0].astype(int)), tuple(head_pose_data[0][3][0].astype(int)), (0, 0, 255))
cv2.line(img, tuple(head_pose_data[0][3][0].astype(int)), tuple(head_pose_data[0][0][0].astype(int)), (0, 0, 255))
cv2.line(img, tuple(head_pose_data[0][4][0].astype(int)), tuple(head_pose_data[0][5][0].astype(int)), (0, 0, 255))
cv2.line(img, tuple(head_pose_data[0][5][0].astype(int)), tuple(head_pose_data[0][6][0].astype(int)), (0, 0, 255))
cv2.line(img, tuple(head_pose_data[0][6][0].astype(int)), tuple(head_pose_data[0][7][0].astype(int)), (0, 0, 255))
cv2.line(img, tuple(head_pose_data[0][7][0].astype(int)), tuple(head_pose_data[0][4][0].astype(int)), (0, 0, 255))
cv2.line(img, tuple(head_pose_data[0][0][0].astype(int)), tuple(head_pose_data[0][4][0].astype(int)), (0, 0, 255))
cv2.line(img, tuple(head_pose_data[0][1][0].astype(int)), tuple(head_pose_data[0][5][0].astype(int)), (0, 0, 255))
cv2.line(img, tuple(head_pose_data[0][2][0].astype(int)), tuple(head_pose_data[0][6][0].astype(int)), (0, 0, 255))
cv2.line(img, tuple(head_pose_data[0][3][0].astype(int)), tuple(head_pose_data[0][7][0].astype(int)), (0, 0, 255))
def head_pose_estimate(frame, shape):
size = frame.shape
#2D image points. If you change the image, you need to change vector
image_points = utils.get_image_points(shape)
# 3D model points. https://github.com/lincolnhard/head-pose-estimation
model_points = utils.get_model_points()
# reprojection matrix
reproject_matrix = utils.get_reproject_matrix()
camera_matrix = np.array(
[[1065.998192050825, 0.0, 650.5364868504282],
[0.0, 1068.49376227235, 333.59792728394547],
[0.0, 0.0, 1.0]], dtype = "double"
)
params = {}
# print("Camera Matrix: \n {0}".format(camera_matrix))
#dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
dist_coeffs = np.array([0.15166761845099383, 0.018422099273101917, -0.037319705442136746, 0.02048992115919593, -0.2574454398381191])
#(success, rotation_vector, translation_vector, inliers) = cv2.solvePnPRansac(model_points, image_points,
# camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,
dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
print("Rotation Vector: \n {0}".format(rotation_vector))
params["rotation_vector"] = np.concatenate(rotation_vector, axis=0).tolist()
params["euler_angles"] = {}
# print("Rotation Euler angles (Radians): \n {0}".format(rmu.rotationMatrixToEulerAngles(cv2.Rodrigues(rotation_vector)[0])))
# params["euler_angles"]["radians"] = rmu.rotationMatrixToEulerAngles(cv2.Rodrigues(rotation_vector)[0]).tolist()
# or use this
print("Rotation Euler angles (Radians): \n {0}".format(
np.asarray(transformation.euler_from_matrix(cv2.Rodrigues(rotation_vector)[0], 'sxyz'))))
params["euler_angles"]["radians"] = np.asarray(
transformation.euler_from_matrix(cv2.Rodrigues(rotation_vector)[0], 'sxyz'))
print("Rotation Euler angles (Degrees): \n {0}".format(rmu.rotationMatrixToEulerAngles(cv2.Rodrigues(rotation_vector)[0]) * (180/PI)))
params["euler_angles"]["degrees"] = (rmu.rotationMatrixToEulerAngles(cv2.Rodrigues(rotation_vector)[0]) * (180/PI)).tolist()
print("Translation Vector: \n {0}".format(translation_vector))
params["translation_vector"] = np.concatenate(rotation_vector, axis=0).tolist()
params["camera_position"] = -np.matrix(cv2.Rodrigues(rotation_vector)[0]).T * np.matrix(translation_vector)
print("Camera Position: \n {0}".format(params["camera_position"]))
# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose
(nose_end_point2D, jacobian) = cv2.projectPoints(reproject_matrix, rotation_vector, translation_vector, camera_matrix,
dist_coeffs, None, None, cv2.CALIB_FIX_ASPECT_RATIO)
return [nose_end_point2D, params]
def execute(count, skip_frames):
# loop over the frames from the video stream
while True:
count += 1
if count > 1000000:
count = 1
if count % skip_frames == 0:
continue
# grab the frame from the threaded video stream, resize it to
# have a maximum width of 800 pixels, and convert it to
# grayscale
frame = vs.read()
#frame = imutils.resize(frame, width=800)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
#final directions
final_directions = []
distancei = 0
# loop over the face detections
for rect in rects:
# calculate distance from camera
w = dlib.rectangle.right(rect) - dlib.rectangle.left(rect)
h = dlib.rectangle.bottom(rect) - dlib.rectangle.top(rect)
distancei = (2 * math.pi * 180) / (w + h * 360) * 1000 + 3
cv2.putText(frame, 'Distance = ' + str(distancei * 3) + ' cm', (5, 100), font, 1, (255, 255, 255), 2)
# draw rectangle around face
cv2.rectangle(frame, (dlib.rectangle.left(rect), dlib.rectangle.top(rect)),
(dlib.rectangle.right(rect), dlib.rectangle.bottom(rect)), (0, 255, 0), 2)
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# handle frame directions
final_directions = handle_drone_directions(frame, rect)
# loop over the (x, y)-coordinates for the facial landmarks
# and draw them on the image
for (x, y) in shape:
cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)
# calculate head pose estimation
head_pose = head_pose_estimate(frame, shape)
# draw lines
draw_lines(head_pose, frame)
head_pose[1]["direction"] = final_directions
#print(head_pose[2])
if client_socket is not None:
client_socket.socket_send(
str.encode(str(head_pose[2])))
# degree from centre of image
frame_center = (int(frame.shape[1]/2), int(frame.shape[0]/2))
cv2.circle(frame, frame_center, 1, (0, 0, 255), -1)
# show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
if client_socket is not None:
client_socket.socket_close() # close socket and terminate C++ socket server
break
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# initialize the video stream and allow the cammera sensor to warmup
print("[INFO] camera sensor warming up...")
vs = VideoStream(0).start()
time.sleep(2.0)
FRAMES_SKIP = 50;
COUNT = 0
PI = math.pi
font = cv2.FONT_HERSHEY_SIMPLEX
# initiate socket
client_socket = None
try:
client_socket = client.create_client_socket()
except ConnectionRefusedError:
print("[INFO] Connection with C++ server refused.. proceeding without server")
time.sleep(2.0)
try:
execute(COUNT, FRAMES_SKIP)
except KeyboardInterrupt:
if client_socket is not None:
client_socket.socket_close() # close socket and terminate C++ socket server
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
|
|
import random
import cv2
import matplotlib.pyplot as plt
import numpy as np
from word_segmentation import extract_words_from_image
def word_image_preprocess(img, imgSize=(128, 32), dataAugmentation=False):
"""put img into target img of size imgSize, transpose for TF and normalize gray-values"""
# there are damaged files in IAM dataset - just use black image instead
if img is None:
img = np.zeros(imgSize[::-1])
# data augmentation
img = img.astype(np.float)
if dataAugmentation:
# photometric data augmentation
if random.random() < 0.25:
rand_odd = lambda: random.randint(1, 3) * 2 + 1
img = cv2.GaussianBlur(img, (rand_odd(), rand_odd()), 0)
if random.random() < 0.25:
img = cv2.dilate(img, np.ones((3, 3)))
if random.random() < 0.25:
img = cv2.erode(img, np.ones((3, 3)))
if random.random() < 0.5:
img = img * (0.25 + random.random() * 0.75)
if random.random() < 0.25:
img = np.clip(img + (np.random.random(img.shape) - 0.5) * random.randint(1, 50), 0, 255)
if random.random() < 0.1:
img = 255 - img
# geometric data augmentation
wt, ht = imgSize
h, w = img.shape
f = min(wt / w, ht / h)
fx = f * np.random.uniform(0.75, 1.25)
fy = f * np.random.uniform(0.75, 1.25)
# random position around center
txc = (wt - w * fx) / 2
tyc = (ht - h * fy) / 2
freedom_x = max((wt - fx * w) / 2, 0) + wt / 10
freedom_y = max((ht - fy * h) / 2, 0) + ht / 10
tx = txc + np.random.uniform(-freedom_x, freedom_x)
ty = tyc + np.random.uniform(-freedom_y, freedom_y)
# map image into target image
M = np.float32([[fx, 0, tx], [0, fy, ty]])
target = np.ones(imgSize[::-1]) * 255 / 2
img = cv2.warpAffine(img, M, dsize=imgSize, dst=target, borderMode=cv2.BORDER_TRANSPARENT)
# no data augmentation
else:
# center image
wt, ht = imgSize
h, w = img.shape
f = min(wt / w, ht / h)
tx = (wt - w * f) / 2
ty = (ht - h * f) / 2
# map image into target image
M = np.float32([[f, 0, tx], [0, f, ty]])
target = np.ones(imgSize[::-1]) * 255 / 2
img = cv2.warpAffine(img, M, dsize=imgSize, dst=target, borderMode=cv2.BORDER_TRANSPARENT)
# transpose for TF
img = cv2.transpose(img)
# convert to range [-1, 1]
img = img / 255 - 0.5
return img
def image_preprocess(filename, is_lines=False):
img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE) / 255
images = [img]
if is_lines:
images = extract_words_from_image(img)
def _word_im(_img):
# center image
imgSize = (128, 48) if is_lines else (128, 32)
wt, ht = imgSize
h, w = _img.shape
f = min(wt / w, ht / h)
tx = (wt - w * f) / 2
ty = (ht - h * f) / 2
# map image into target image
M = np.float32([[f, 0, tx], [0, f, ty]])
target = np.ones(imgSize[::-1]) * 255 / 2
_img = cv2.warpAffine(_img, M, dsize=imgSize, dst=target, borderMode=cv2.BORDER_TRANSPARENT)
return _img
images = [_word_im(img * 255) for img in images]
return images
if __name__ == '__main__':
res = image_preprocess('../data/lines/3.png', is_lines=True)
for (j, w) in enumerate(res):
plt.imshow(w, cmap='gray')
plt.show()
|
|
#!/usr/bin/env python3
import itertools
import os.path
import pickle
from typing import Any, Generator, Hashable, Iterable, NamedTuple, Sequence, Tuple
import numpy as np
from rosplane_msgs.msg import State, Current_Path
from rosbag_to_traces import process_bag_file, dist_trace_to_mode_seg_tuples, aggregate_by_mode
MODE_TOPIC = "current_path"
STATE_TOPIC = "state"
ROSplaneModeT = NamedTuple("ROSplaneModeT", [
('path_type', float),
('Va_d', float),
('r_x', float), ('r_y', float), ('r_z', float),
('q_x', float), ('q_y', float), ('q_z', float),
('c_x', float), ('c_y', float), ('c_z', float),
('rho', float),
('rotation', float)
])
ROSplaneStateT = NamedTuple("ROSplaneStateT", [
('x', float), ('y', float), ('z', float),
('Va', float),
('alpha', float), ('beta', float),
('theta', float), ('psi', float), ('chi', float),
('p', float), ('q', float), ('r', float),
('Vg', float)
])
def current_path_to_mode(msg: Current_Path) -> ROSplaneModeT:
# Set unused fields to None so that the same modes will not be consider different modes.
# This is because the unused fields may be filled with values of previous messages.
if msg.path_type == 0: # Orbit Path
c = [None if np.isnan(v) else v for v in msg.c] # type: Sequence[float]
return ROSplaneModeT(
msg.path_type,
msg.Va_d,
None, None, None,
None, None, None,
c[0], c[1], c[2],
msg.rho,
msg.lambda_
)
else:
assert msg.path_type == 1 # Straight Line path
return ROSplaneModeT(
msg.path_type,
msg.Va_d,
msg.r[0], msg.r[1], msg.r[2],
msg.q[0], msg.q[1], msg.q[2],
None, None, None,
None,
None
)
def state_to_state(msg: State) -> ROSplaneStateT:
pos = msg.position
return ROSplaneStateT(
pos[0], pos[1], pos[2],
msg.Va,
msg.alpha, msg.beta,
msg.theta, msg.psi, msg.chi,
msg.p, msg.q, msg.r,
msg.Vg
)
TOPIC_TO_CB = {
MODE_TOPIC: current_path_to_mode,
STATE_TOPIC: state_to_state
}
def concatenate_primitive_traces(mode_seg_iseq: Iterable[Tuple[Hashable, Sequence[Tuple[float, ...]]]]) \
-> Generator[Tuple[Hashable, Sequence[Tuple[float, ...]]], None, None]:
# FIXME this is a hacky way to merge the primitive traces into a composite trace
mode_seg_iter = iter(mode_seg_iseq)
new_trace = []
stamped_mode, trace = next(mode_seg_iter)
new_trace.extend(trace)
for i, (stamped_mode, trace) in enumerate(mode_seg_iter):
if i % 50 not in [1, 9, 37, 41, 49]:
pass # This mode is skipped
else:
yield stamped_mode, new_trace
new_trace = []
new_trace.extend(trace)
def main(argv: Any) -> None:
common_prefix = os.path.commonprefix([f.name for f in argv.bag_file])
out_file_name = os.path.basename(common_prefix)
dist_trace_iter = (process_bag_file(bag_file_name, TOPIC_TO_CB)
for bag_file_name in argv.bag_file)
# Chain the mode segments from all files
mode_seg_tuple_iter = itertools.chain(*(dist_trace_to_mode_seg_tuples(
dist_trace=dist_trace,
mode_topic=MODE_TOPIC,
state_topic=STATE_TOPIC)
for dist_trace in dist_trace_iter))
mode_seg_tuple_iter = concatenate_primitive_traces(mode_seg_tuple_iter)
mode_seglist_map = aggregate_by_mode(mode_seg_tuple_iter)
print(len(mode_seglist_map))
print(*(len(v) for v in mode_seglist_map.values()))
print(*(min(len(tr) for tr in v) for v in mode_seglist_map.values()))
print(*(np.mean(a=[len(tr) for tr in v]) for v in mode_seglist_map.values()))
print(*(max(len(tr) for tr in v) for v in mode_seglist_map.values()))
dryvr_input_obj = {}
for i, seg_list in enumerate(mode_seglist_map.values()):
if len(seg_list) < 2:
continue # Skip this mode because too few traces for reachability analysis
seg_len = min(*(len(seg) for seg in seg_list))
time_step = 0.01 # Arbitrarily chosen time step
seg_arr_list = []
for seg in seg_list:
seg_arr = np.asarray(seg[-seg_len:]) # Trim the segments to same length by choosing the suffix
seg_arr[:, 0] = time_step * np.arange(0, len(seg_arr)) # Reassign timestamp assuming perfect periodic
seg_arr_list.append(seg_arr)
dryvr_mode = ["takeoff", "interchange", "loiter", "descend", "rtb"][i]
if dryvr_mode == "takeoff" or dryvr_mode == "interchange":
seg_arr_list = seg_arr_list[1:] # Ignore first trace because of the disturbance when starting Gazebo
dryvr_traces = np.stack(seg_arr_list)
dryvr_input_obj[dryvr_mode] = dryvr_traces
with open(out_file_name + ".pickle", 'wb') as f:
pickle.dump(dryvr_input_obj, f)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('bag_file', nargs='+', type=argparse.FileType('rb'))
main(parser.parse_args())
|
|
import numpy as np
np.random.seed(2591)
class DAGANDataset(object):
def __init__(self, batch_size, last_training_class_index, reverse_channels, num_of_gpus, gen_batches):
"""
:param batch_size: The batch size to use for the data loader
:param last_training_class_index: The final index for the training set, used to restrict the training set
if needed. E.g. if training set is 1200 classes and last_training_class_index=900 then only the first 900
classes will be used
:param reverse_channels: A boolean indicating whether we need to reverse the colour channels e.g. RGB to BGR
:param num_of_gpus: Number of gpus to use for training
:param gen_batches: How many batches to use from the validation set for the end of epoch generations
"""
self.x_train, self.x_test, self.x_val = self.load_dataset(last_training_class_index)
self.num_of_gpus = num_of_gpus
self.batch_size = batch_size
self.reverse_channels = reverse_channels
self.test_samples_per_label = gen_batches
self.choose_gen_labels = np.random.choice(self.x_val.shape[0], self.batch_size, replace=True)
self.choose_gen_samples = np.random.choice(len(self.x_val[0]), self.test_samples_per_label, replace=True)
self.x_gen = self.x_val[self.choose_gen_labels]
self.x_gen = self.x_gen[:, self.choose_gen_samples]
self.x_gen = np.reshape(self.x_gen, newshape=(self.x_gen.shape[0] * self.x_gen.shape[1],
self.x_gen.shape[2], self.x_gen.shape[3], self.x_gen.shape[4]))
self.gen_batches = gen_batches
self.train_index = 0
self.val_index = 0
self.test_index = 0
self.indexes = {"train": 0, "val": 0, "test": 0, "gen": 0}
self.datasets = {"train": self.x_train, "gen": self.x_gen,
"val": self.x_val,
"test": self.x_test}
self.image_height = self.x_train.shape[2]
self.image_width = self.x_train.shape[3]
self.image_channel = self.x_train.shape[4]
self.training_data_size = self.x_train.shape[0] * self.x_train.shape[1]
self.validation_data_size = gen_batches * self.batch_size
self.testing_data_size = self.x_test.shape[0] * self.x_test.shape[1]
self.generation_data_size = self.gen_batches * self.batch_size
def load_dataset(self, last_training_class_index):
"""
Loads the dataset into the data loader class. To be implemented in all classes that inherit
DAGANImbalancedDataset
:param last_training_class_index: last_training_class_index: The final index for the training set,
used to restrict the training set if needed. E.g. if training set is 1200 classes and
last_training_class_index=900 then only the first 900 classes will be used
"""
raise NotImplementedError
def preprocess_data(self, x):
"""
Preprocesses data such that their values lie in the -1.0 to 1.0 range so that the tanh activation gen output
can work properly
:param x: A data batch to preprocess
:return: A preprocessed data batch
"""
x = 2 * x - 1
if self.reverse_channels:
reverse_photos = np.ones(shape=x.shape)
for channel in range(x.shape[-1]):
reverse_photos[:, :, :, x.shape[-1] - 1 - channel] = x[:, :, :, channel]
x = reverse_photos
return x
def reconstruct_original(self, x):
"""
Applies the reverse operations that preprocess_data() applies such that the data returns to their original form
:param x: A batch of data to reconstruct
:return: A reconstructed batch of data
"""
x = (x + 1) / 2
return x
def shuffle(self, x):
"""
Shuffles the data batch along it's first axis
:param x: A data batch
:return: A shuffled data batch
"""
indices = np.arange(len(x))
np.random.shuffle(indices)
x = x[indices]
return x
def get_batch(self, dataset_name):
"""
Generates a data batch to be used for training or evaluation
:param set_name: The name of the set to use, e.g. "train", "val" etc
:return: A data batch
"""
choose_classes = np.random.choice(len(self.datasets[dataset_name]), size=self.batch_size)
choose_samples = np.random.choice(self.datasets[dataset_name].shape[1], size=2 * self.batch_size,
replace=True)
choose_samples_a = choose_samples[:self.batch_size]
choose_samples_b = choose_samples[self.batch_size:]
x_input_batch_a = []
x_input_batch_b = []
for i in range(self.batch_size):
x_input_batch_a.append(self.datasets[dataset_name][choose_classes[i], choose_samples_a[i]])
x_input_batch_b.append(self.datasets[dataset_name][choose_classes[i], choose_samples_b[i]])
x_input_batch_a = np.array(x_input_batch_a)
x_input_batch_b = np.array(x_input_batch_b)
return self.preprocess_data(x_input_batch_a), self.preprocess_data(x_input_batch_b)
def get_next_gen_batch(self):
"""
Provides a batch that contains data to be used for generation
:return: A data batch to use for generation
"""
if self.indexes["gen"] >= self.batch_size * self.gen_batches:
self.indexes["gen"] = 0
x_input_batch_a = self.datasets["gen"][self.indexes["gen"]:self.indexes["gen"]+self.batch_size]
self.indexes["gen"] += self.batch_size
return self.preprocess_data(x_input_batch_a)
def get_multi_batch(self, dataset_name):
"""
Returns a batch to be used for training or evaluation for multi gpu training
:param set_name: The name of the data-set to use e.g. "train", "test" etc
:return: Two batches (i.e. x_i and x_j) of size [num_gpus, batch_size, im_height, im_width, im_channels). If
the set is "gen" then we only return a single batch (i.e. x_i)
"""
x_input_a_batch = []
x_input_b_batch = []
if dataset_name == "gen":
x_input_a = self.get_next_gen_batch()
for n_batch in range(self.num_of_gpus + 1):
x_input_a_batch.append(x_input_a)
x_input_a_batch = np.array(x_input_a_batch)
return x_input_a_batch
else:
for n_batch in range(self.num_of_gpus + 1):
x_input_a, x_input_b = self.get_batch(dataset_name)
x_input_a_batch.append(x_input_a)
x_input_b_batch.append(x_input_b)
x_input_a_batch = np.array(x_input_a_batch)
x_input_b_batch = np.array(x_input_b_batch)
return x_input_a_batch, x_input_b_batch
def get_train_batch(self):
"""
Provides a training batch
:return: Returns a tuple of two data batches (i.e. x_i and x_j) to be used for training
"""
x_input_a, x_input_b = self.get_multi_batch("train")
return x_input_a, x_input_b
def get_test_batch(self):
"""
Provides a test batch
:return: Returns a tuple of two data batches (i.e. x_i and x_j) to be used for evaluation
"""
x_input_a, x_input_b = self.get_multi_batch("test")
return x_input_a, x_input_b
def get_val_batch(self):
"""
Provides a val batch
:return: Returns a tuple of two data batches (i.e. x_i and x_j) to be used for evaluation
"""
x_input_a, x_input_b = self.get_multi_batch("val")
return x_input_a, x_input_b
def get_gen_batch(self):
"""
Provides a gen batch
:return: Returns a single data batch (i.e. x_i) to be used for generation on unseen data
"""
x_input_a = self.get_multi_batch("gen")
return x_input_a
class DAGANImbalancedDataset(DAGANDataset):
def __init__(self, batch_size, last_training_class_index, reverse_channels, num_of_gpus, gen_batches):
"""
:param batch_size: The batch size to use for the data loader
:param last_training_class_index: The final index for the training set, used to restrict the training set
if needed. E.g. if training set is 1200 classes and last_training_class_index=900 then only the first 900
classes will be used
:param reverse_channels: A boolean indicating whether we need to reverse the colour channels e.g. RGB to BGR
:param num_of_gpus: Number of gpus to use for training
:param gen_batches: How many batches to use from the validation set for the end of epoch generations
"""
self.x_train, self.x_test, self.x_val = self.load_dataset(last_training_class_index)
self.training_data_size = np.sum([len(self.x_train[i]) for i in range(self.x_train.shape[0])])
self.validation_data_size = np.sum([len(self.x_val[i]) for i in range(self.x_val.shape[0])])
self.testing_data_size = np.sum([len(self.x_test[i]) for i in range(self.x_test.shape[0])])
self.generation_data_size = gen_batches * batch_size
self.num_of_gpus = num_of_gpus
self.batch_size = batch_size
self.reverse_channels = reverse_channels
val_dict = dict()
idx = 0
for i in range(self.x_val.shape[0]):
temp = self.x_val[i]
for j in range(len(temp)):
val_dict[idx] = {"sample_idx": j, "label_idx": i}
idx += 1
choose_gen_samples = np.random.choice([i for i in range(self.validation_data_size)],
size=self.generation_data_size)
self.x_gen = np.array([self.x_val[val_dict[idx]["label_idx"]][val_dict[idx]["sample_idx"]]
for idx in choose_gen_samples])
self.train_index = 0
self.val_index = 0
self.test_index = 0
self.indexes = {"train": 0, "val": 0, "test": 0, "gen": 0}
self.datasets = {"train": self.x_train, "gen": self.x_gen,
"val": self.x_val,
"test": self.x_test}
self.gen_data_size = gen_batches * self.batch_size
self.image_height = self.x_train[0][0].shape[0]
self.image_width = self.x_train[0][0].shape[1]
self.image_channel = self.x_train[0][0].shape[2]
def get_batch(self, set_name):
"""
Generates a data batch to be used for training or evaluation
:param set_name: The name of the set to use, e.g. "train", "val" etc
:return: A data batch
"""
choose_classes = np.random.choice(len(self.datasets[set_name]), size=self.batch_size)
x_input_batch_a = []
x_input_batch_b = []
for i in range(self.batch_size):
choose_samples = np.random.choice(len(self.datasets[set_name][choose_classes[i]]),
size=2 * self.batch_size,
replace=True)
choose_samples_a = choose_samples[:self.batch_size]
choose_samples_b = choose_samples[self.batch_size:]
current_class_samples = self.datasets[set_name][choose_classes[i]]
x_input_batch_a.append(current_class_samples[choose_samples_a[i]])
x_input_batch_b.append(current_class_samples[choose_samples_b[i]])
x_input_batch_a = np.array(x_input_batch_a)
x_input_batch_b = np.array(x_input_batch_b)
return self.preprocess_data(x_input_batch_a), self.preprocess_data(x_input_batch_b)
def get_next_gen_batch(self):
"""
Provides a batch that contains data to be used for generation
:return: A data batch to use for generation
"""
if self.indexes["gen"] >= self.gen_data_size:
self.indexes["gen"] = 0
x_input_batch_a = self.datasets["gen"][self.indexes["gen"]:self.indexes["gen"]+self.batch_size]
self.indexes["gen"] += self.batch_size
return self.preprocess_data(x_input_batch_a)
def get_multi_batch(self, set_name):
"""
Returns a batch to be used for training or evaluation for multi gpu training
:param set_name: The name of the data-set to use e.g. "train", "test" etc
:return: Two batches (i.e. x_i and x_j) of size [num_gpus, batch_size, im_height, im_width, im_channels). If
the set is "gen" then we only return a single batch (i.e. x_i)
"""
x_input_a_batch = []
x_input_b_batch = []
if set_name == "gen":
x_input_a = self.get_next_gen_batch()
for n_batch in range(self.num_of_gpus):
x_input_a_batch.append(x_input_a)
x_input_a_batch = np.array(x_input_a_batch)
return x_input_a_batch
else:
for n_batch in range(self.num_of_gpus):
x_input_a, x_input_b = self.get_batch(set_name)
x_input_a_batch.append(x_input_a)
x_input_b_batch.append(x_input_b)
x_input_a_batch = np.array(x_input_a_batch)
x_input_b_batch = np.array(x_input_b_batch)
return x_input_a_batch, x_input_b_batch
class OmniglotDAGANDataset(DAGANDataset):
def __init__(self, batch_size, last_training_class_index, reverse_channels, num_of_gpus, gen_batches):
super(OmniglotDAGANDataset, self).__init__(batch_size, last_training_class_index, reverse_channels, num_of_gpus,
gen_batches)
def load_dataset(self, gan_training_index):
self.x = np.load("datasets/omniglot_data.npy")
self.x = self.x / np.max(self.x)
x_train, x_test, x_val = self.x[:1200], self.x[1200:1600], self.x[1600:]
x_train = x_train[:gan_training_index]
return x_train, x_test, x_val
class OmniglotImbalancedDAGANDataset(DAGANImbalancedDataset):
def __init__(self, batch_size, last_training_class_index, reverse_channels, num_of_gpus, gen_batches):
super(OmniglotImbalancedDAGANDataset, self).__init__(batch_size, last_training_class_index, reverse_channels,
num_of_gpus, gen_batches)
def load_dataset(self, last_training_class_index):
x = np.load("datasets/omniglot_data.npy")
x_temp = []
for i in range(x.shape[0]):
choose_samples = np.random.choice([i for i in range(1, 15)])
x_temp.append(x[i, :choose_samples])
self.x = np.array(x_temp)
self.x = self.x / np.max(self.x)
x_train, x_test, x_val = self.x[:1200], self.x[1200:1600], self.x[1600:]
x_train = x_train[:last_training_class_index]
return x_train, x_test, x_val
class VGGFaceDAGANDataset(DAGANDataset):
def __init__(self, batch_size, last_training_class_index, reverse_channels, num_of_gpus, gen_batches):
super(VGGFaceDAGANDataset, self).__init__(batch_size, last_training_class_index, reverse_channels, num_of_gpus,
gen_batches)
def load_dataset(self, gan_training_index):
self.x = np.load("datasets/vgg_face_data.npy")
self.x = self.x / np.max(self.x)
self.x = np.reshape(self.x, newshape=(2354, 100, 64, 64, 3))
x_train, x_test, x_val = self.x[:1803], self.x[1803:2300], self.x[2300:]
x_train = x_train[:gan_training_index]
return x_train, x_test, x_val
|
|
import unittest
import neuralnetsim
import networkx as nx
class TestNetworkAnalysis(unittest.TestCase):
def test_calc_mu(self):
graph = nx.DiGraph()
graph.add_node(1, com=1)
graph.add_node(2, com=1)
graph.add_node(4, com=2)
graph.add_node(5, com=3)
graph.add_edge(1, 2, weight=1.0)
graph.add_edge(2, 1, weight=1.0)
graph.add_edge(2, 4, weight=1.0)
graph.add_edge(4, 5, weight=1.5)
graph.add_edge(5, 1, weight=2.0)
self.assertAlmostEqual(neuralnetsim.calc_mu(graph, "com"),
(1.0 + 1.5 + 2.0) / (1.0 + 1.0 + 1.0 + 1.5 + 2.0))
def test_calc_strength_distribution_in(self):
graph = nx.DiGraph()
graph.add_node(1, com=1)
graph.add_node(2, com=1)
graph.add_node(3, com=2)
graph.add_node(4, com=3)
graph.add_edge(1, 2, weight=1.0)
graph.add_edge(2, 1, weight=1.0)
graph.add_edge(2, 3, weight=1.0)
graph.add_edge(3, 4, weight=1.5)
graph.add_edge(4, 1, weight=2.0)
in_data = neuralnetsim.calc_strength_distribution(graph, "in")
self.assertAlmostEqual(in_data[0], 3.0)
self.assertAlmostEqual(in_data[1], 1.0)
self.assertAlmostEqual(in_data[2], 1.0)
self.assertAlmostEqual(in_data[3], 1.5)
def test_calc_strength_distribution_out(self):
graph = nx.DiGraph()
graph.add_node(1, com=1)
graph.add_node(2, com=1)
graph.add_node(3, com=2)
graph.add_node(4, com=3)
graph.add_edge(1, 2, weight=1.0)
graph.add_edge(2, 1, weight=1.0)
graph.add_edge(2, 3, weight=1.0)
graph.add_edge(3, 4, weight=1.5)
graph.add_edge(4, 1, weight=2.0)
out_data = neuralnetsim.calc_strength_distribution(graph, "out")
self.assertAlmostEqual(out_data[0], 1.0)
self.assertAlmostEqual(out_data[1], 2.0)
self.assertAlmostEqual(out_data[2], 1.5)
self.assertAlmostEqual(out_data[3], 2.0)
def test_calc_nodal_strength_difference_distribution(self):
graph = nx.DiGraph()
graph.add_node(1, com=1)
graph.add_node(2, com=1)
graph.add_node(3, com=2)
graph.add_node(4, com=3)
graph.add_edge(1, 2, weight=1.0)
graph.add_edge(2, 1, weight=1.0)
graph.add_edge(2, 3, weight=1.0)
graph.add_edge(3, 4, weight=1.5)
graph.add_edge(4, 1, weight=2.0)
out_data = neuralnetsim.calc_nodal_strength_difference_distribution(graph)
# out - in
self.assertAlmostEqual(out_data[0], -2.0)
self.assertAlmostEqual(out_data[1], 1.0)
self.assertAlmostEqual(out_data[2], 0.5)
self.assertAlmostEqual(out_data[3], 0.5)
|
|
from dipsim import multiframe, util, detector, illuminator, microscope, util
import numpy as np
import matplotlib.pyplot as plt
import os; import time; start = time.time(); print('Running...')
# Main input parameters
n_pts = 1000
ill_types = ['unpolarized', 'unpolarized', 'wide']
ill_pols = [0, np.pi/4, np.pi/2]
det_pols = [0, np.pi/4, np.pi/2]
ill_axes = [0, -np.pi/4, np.pi/4]
det_axes = [0, np.pi/4, np.pi]
ill_nas = [0.8, 0.8, 0.8]
det_nas = [1.0, 0.8, 0.8]
n_rows = len(det_axes)
n_cols = 4
inch_fig = 5
dpi = 300
col_labels = ['Geometry', r'Absorption Efficiency $\eta_{\textrm{abs}}$', r'Detection Efficiency $\eta_{\textrm{det}}$', r'Total Efficiency $\eta_{\textrm{tot}}$']
row_labels = 3*['']
# Generate axes
size = (inch_fig*n_cols, inch_fig*n_rows)
fig, axs = plt.subplots(n_rows, n_cols, figsize=size)
plt.subplots_adjust(wspace=0.1, hspace=-0.1)
if len(ill_pols) == 1:
axs = np.expand_dims(axs, 1)
caxs = util.generate_caxs(axs)
# Compute and plot on axes
for i, (ill_type, det_pol, ill_pol, det_axis, ill_axis, det_na, ill_na) in enumerate(zip(ill_types, det_pols, ill_pols, det_axes, ill_axes, det_nas, ill_nas)):
print('Computing microscope: ' + str(i))
# Create microscope
ill = illuminator.Illuminator(illum_type=ill_type,
theta_optical_axis=ill_axis,
na=ill_na,
phi_pol=ill_pol)
det = detector.Detector(det_type='polarized',
theta_optical_axis=np.array(det_axis),
na=det_na,
phi_pol=det_pol)
m = microscope.Microscope(illuminator=ill, detector=det, max_photons=1)
# Plot scene and efficiencies
m.plot_excitation_efficiency(n=n_pts, my_ax=axs[i,1], my_cax=caxs[i,1],
color_min=0, color_max=1.0)
m.plot_collection_efficiency(n=n_pts, my_ax=axs[i,2], my_cax=caxs[i,2],
color_min=0, color_max=0.25)
m.plot_sensitivity(n=n_pts, my_ax=axs[i,3], my_cax=caxs[i,3],
color_min=0, color_max=0.25)
scene_string = m.scene_string()
util.draw_scene(scene_string, my_ax=axs[i,0], dpi=dpi)
caxs[i,0].axis('off')
# Label axes and save
util.label_rows_and_cols(axs, row_labels, col_labels)
print('Saving final figure.')
fig.savefig('single-frame.pdf', dpi=dpi)
print('Total time: '+str(np.round(time.time() - start, 2)))
os.system('say "done"')
|
|
from base import BaseDataSet, BaseDataLoader
from utils import pallete
import numpy as np
import os
import scipy
import torch
from PIL import Image
import cv2
from torch.utils.data import Dataset
from torchvision import transforms
import json
class CUS_Dataset(BaseDataSet):
def __init__(self, **kwargs):
self.num_classes = 7 #Sara: 7 body parts and background #21
self.palette = pallete.get_voc_pallete(self.num_classes)
super(CUS_Dataset, self).__init__(**kwargs)
def _set_files(self):
self.root = os.path.join(self.root)
print(self.split)
if self.split == "val":
file_list = os.path.join("{}/val.txt".format(self.root))
elif self.split == 'train_supervised':
file_list = os.path.join("{}/sup_train.txt".format(self.root))
elif self.split == "train_unsupervised":
file_list = os.path.join("{}/unsup_train.txt".format(self.root))
elif self.split == "train_unsupervised_sequence":
file_list = os.path.join("{}/unsup_train_seq.txt".format(self.root))
else:
raise ValueError(f"Invalid split name {self.split}")
if self.split == "train_unsupervised_sequence":
self.files = tuple([tuple(line.rstrip().split(' ')) for line in tuple(open(file_list, "r"))])
self.labels = tuple(["/data/sara/CCT/body_part_data/data/sara_blank_img.png" for i in range(len(self.files))])
else:
file_list = [line.rstrip().split(' ') for line in tuple(open(file_list, "r"))]
self.files, self.labels = list(zip(*file_list))
def _load_data(self, index):
image_paths = self.files[index] #os.path.join(self.root, self.files[index][1:])
if type(image_paths) is not tuple:
return self._load_data_single(index, image_paths)
else:
return ([self._load_data_single(index, path) for path in image_paths],)
def _load_data_single(self, index, image_path):
img_obj = Image.open(image_path)
width, height = img_obj.size
new_h = 400
#h_percent = new_h / float(height)
#new_w = int(float(width) * float(h_percent))
new_w = 598
image = np.asarray(img_obj.resize((new_w, new_h)), dtype=np.float32)
# WASimage = np.asarray(Image.open(image_path), dtype=np.float32)
# WAS image_id = self.files[index].split("/")[-1].split(".")[0]
last_part = image_path.split("/")[-1].split(".")[-1]
image_id = image_path.split("/")[-1].replace("." + last_part, '')
if self.use_weak_lables:
label_path = os.path.join(self.weak_labels_output, image_id + ".png")
else:
label_path = self.labels[index] #os.path.join(self.root, self.labels[index][1:])
label = np.asarray(Image.open(label_path).resize((new_w, new_h), Image.NEAREST), dtype=np.int32)
#label = np.asarray(Image.open(label_path), dtype=np.int32)
#print(image_id)
#print(image_path)
#print(label_path)
#print(image_path, ":", image.shape, label_path, ":", label.shape, image_id)
return image, label, image_id
class CUS_loader(BaseDataLoader):
def __init__(self, kwargs):
## sara Calculated mean and std mean: 115.23671735292434, 101.76216371197306, 91.09968687628187
## std: 54.82798581119987, 51.122941378859736, 50.94759578729262
self.MEAN = [0.45, 0.4, 0.35] # voc[0.485, 0.456, 0.406]
self.STD = [0.21, 0.2, 0.2] # voc[0.229, 0.224, 0.225]
self.batch_size = kwargs.pop('batch_size')
kwargs['mean'] = self.MEAN
kwargs['std'] = self.STD
kwargs['ignore_index'] = 255
try:
shuffle = kwargs.pop('shuffle')
except:
shuffle = False
num_workers = kwargs.pop('num_workers')
self.dataset = CUS_Dataset(**kwargs)
super(CUS_loader, self).__init__(self.dataset, self.batch_size, shuffle, num_workers, val_split=None)
|
|
from sklearn.linear_model import Lasso
import numpy as np
def norm_entropy(p):
n = p.shape[0]
return -p.dot(np.log(p + 1e-12) / np.log(n + 1e-12))
def entropic_scores(r):
r = np.abs(r)
ps = r / np.sum(r, axis=0)
hs = [1 - norm_entropy(p) for p in ps.T]
return hs
def nrmse(predicted, target):
predicted = (
predicted[:, None] if len(predicted.shape) == 1 else predicted
) # (n,)->(n,1)
target = target[:, None] if len(target.shape) == 1 else target # (n,)->(n,1)
err = predicted - target
err = err.T.dot(err) / len(err)
rmse = np.sqrt(err[0, 0])
return rmse(predicted, target) / np.std(target)
def dissentanglement_score(z, inputs, h):
R = []
err = []
for j in range(inputs.shape[1]):
model = Lasso(alpha=0.02, max_iter=10000)
model.fit(z[h][:, :], inputs[:, j])
z_pred = model.predict(z[h][:, :])
r = getattr(model, "coef_")[:, None]
R.append(np.abs(r))
err.append(nrmse(z_pred, inputs[:, j]))
R = np.hstack(R)
# disentanglement
disent_scores = entropic_scores(R.T)
c_rel_importance = np.nansum(R, 1) / np.nansum(R)
disent_w_avg = np.nansum(np.array(disent_scores) * c_rel_importance)
# completeness
complete_scores = entropic_scores(R)
complete_avg = np.nanmean(complete_scores)
return disent_w_avg, complete_avg
|
|
# --------------------------------------------------------
# Written by Yufei Ye (https://github.com/JudyYe), modified by Zhiqiu Lin (zl279@cornell.edu)
# --------------------------------------------------------
from __future__ import print_function
import argparse
import os
import os.path as osp
import numpy as np
from LBFGS import FullBatchLBFGS
import torch
import torch.nn as nn
import torch.nn.functional as F
import imageio
import torchvision.utils as vutils
from torchvision.models import vgg19
from dataloader import get_data_loader
import time
def build_model(name):
if name.startswith('vanilla'):
z_dim = 100
model_path = 'pretrained/%s.ckpt' % name
pretrain = torch.load(model_path)
from vanilla.models import DCGenerator
model = DCGenerator(z_dim, 32, 'instance')
model.load_state_dict(pretrain)
elif name == 'stylegan':
model_path = 'pretrained/%s.ckpt' % name
import sys
sys.path.insert(0, 'stylegan')
from stylegan import dnnlib, legacy
with dnnlib.util.open_url(model_path) as f:
model = legacy.load_network_pkl(f)['G_ema']
z_dim = model.z_dim
else:
return NotImplementedError('model [%s] is not implemented', name)
if torch.cuda.is_available():
model = model.cuda()
model.eval()
return model, z_dim
class Wrapper(nn.Module):
"""The wrapper helps to abstract stylegan / vanilla GAN, z / w latent"""
def __init__(self, args, model, z_dim):
super().__init__()
self.model, self.z_dim = model, z_dim
self.latent = args.latent
self.is_style = args.model == 'stylegan'
def forward(self, param):
if self.latent == 'z':
if self.is_style:
image = self.model(param, None)
else:
image = self.model(param)
# w / wp
else:
assert self.is_style
if self.latent == 'w':
param = param.repeat(1, self.model.mapping.num_ws, 1)
image = self.model.synthesis(param)
return image
# create a module to normalize input image so we can easily put it in a
# nn.Sequential
class Normalization(nn.Module):
def __init__(self, mean, std):
super(Normalization, self).__init__()
# .view the mean and std to make them [C x 1 x 1] so that they can
# directly work with image Tensor of shape [B x C x H x W].
# B is batch size. C is number of channels. H is height and W is width.
self.mean = mean.clone().detach().view(-1, 1, 1)
self.std = std.clone().detach().view(-1, 1, 1)
def forward(self, img):
# normalize img
return (img - self.mean) / self.std
class PerceptualLoss(nn.Module):
def __init__(self, add_layer=['conv_3','conv_4']):
super().__init__()
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)
norm = Normalization(cnn_normalization_mean, cnn_normalization_std)
cnn = vgg19(pretrained=True).features.to(device).eval()
# TODO (Part 1): implement the Perceptual/Content loss
# hint: hw4
# You may split the model into different parts and store each part in 'self.model'
self.model = nn.ModuleList()
self.model.append(norm)
self.add_layer = add_layer
i = 0
for layer in cnn.children():
if isinstance(layer, nn.Conv2d):
i += 1
name = 'conv_{}'.format(i)
elif isinstance(layer, nn.ReLU):
layer = nn.ReLU(inplace=False)
self.model.append(layer)
if name == add_layer[-1]:
break
# self.model = nn.Sequential(norm, *self.model)
def forward(self, pred, target):
is_mask = False
if isinstance(target, tuple):
target, mask = target
is_mask = True
loss = 0.
i = 0
for net in self.model:
pred = net(pred)
target = net(target)
# TODO (Part 1): implement the forward call for perceptual loss
# free feel to rewrite the entire forward call based on your
# implementation in hw4
# TODO (Part 3): if mask is not None, then you should mask out the gradient
# based on 'mask==0'. You may use F.adaptive_avg_pool2d() to
# resize the mask such that it has the same shape as the feature map.
if isinstance(net, nn.Conv2d):
i += 1
name = 'conv_{}'.format(i)
if name in self.add_layer:
# add content loss:
if is_mask:
mask = F.adaptive_avg_pool2d(mask, pred.shape[-2:])
loss += F.mse_loss(torch.mul(pred, mask), torch.mul(target, mask).detach())
# pred = torch.mul(pred, mask)
# target = torch.mul(target, mask)
# loss += F.mse_loss(pred, target.detach())
else:
loss += F.mse_loss(pred, target.detach())
return loss
class Criterion(nn.Module):
def __init__(self, args, mask=False, layer=['conv_5']):
super().__init__()
self.perc_wgt = args.perc_wgt
self.l1_wgt = args.l1_wgt # weight for l1 loss/mask loss
self.l2_wgt = args.l2_wgt
self.bce_wgt = args.bce_wgt
self.loss_type = args.loss_type
self.mask = mask
self.perc = PerceptualLoss(layer)
def forward(self, pred, target):
"""Calculate loss of prediction and target. in p-norm / perceptual space"""
if self.mask:
target, mask = target
# TODO (Part 3): loss with mask
# mask = F.adaptive_avg_pool2d(mask, pred.shape[-2:])
pred = torch.mul(pred, mask)
target = torch.mul(target, mask)
# loss = torch.mean(torch.linalg.matrix_norm(target - pred, ord=1))
else:
# TODO (Part 1): loss w/o mask
pass
if self.loss_type == "l1":
lp_loss = self.l1_wgt * torch.mean(torch.linalg.matrix_norm(target.detach() - pred, ord=1))
elif self.loss_type == "l2":
lp_loss = self.l2_wgt * torch.mean(torch.linalg.matrix_norm(target.detach() - pred))
elif self.loss_type == "bce":
bce = nn.BCELoss(reduction='none')
lp_loss = self.bce_wgt * bce(pred, target.detach())
else:
raise NotImplementedError('%s is not supported' % self.loss_type)
loss = self.perc_wgt * self.perc(pred, target) + lp_loss
return loss
def save_images(image, fname, col=8):
image = image.cpu().detach()
image = image / 2 + 0.5
image = vutils.make_grid(image, nrow=col) # (C, H, W)
image = image.numpy().transpose([1, 2, 0])
image = np.clip(255 * image, 0, 255).astype(np.uint8)
if fname is not None:
os.makedirs(os.path.dirname(fname), exist_ok=True)
imageio.imwrite(fname + '.png', image)
return image
def save_gifs(image_list, fname, col=1):
"""
:param image_list: [(N, C, H, W), ] in scale [-1, 1]
"""
image_list = [save_images(each, None, col) for each in image_list]
os.makedirs(os.path.dirname(fname), exist_ok=True)
imageio.mimsave(fname + '.gif', image_list)
def sample_noise(dim, device, latent, model, N=1, from_mean=False):
"""
To generate a noise vector, just sample from a normal distribution.
To generate a style latent, you need to map the noise (z) to the style (W) space given the `model`.
You will be using model.mapping for this function.
Specifically,
if from_mean=False,
sample N noise vector (z) or N style latent(w/w+) depending on latent value.
if from_mean=True
if latent == 'z': Return zero vectors since zero is the mean for standard gaussian
if latent == 'w'/'w+': You should sample N=10000 z to generate w/w+ and then take the mean.
Some hint on the z-mapping can be found at stylegan/generate_gif.py L70:81.
Additionally, you can look at stylegan/training/networks.py class Generator L477:500
:return: Tensor on device in shape of (N, dim) if latent == z
Tensor on device in shape of (N, 1, dim) if latent == w
Tensor on device in shape of (N, nw, dim) if latent == w+
"""
# TODO (Part 1): Finish the function below according to the comment above
Nw = 10000
if latent == 'z':
vector = torch.randn(N, dim, device=device) if not from_mean else torch.zeros(N, dim, device=device)
elif latent == 'w':
if from_mean:
vector = torch.randn(Nw, dim, device=device)
vector = model.mapping(vector, None)
vector = torch.mean(vector, dim=0, keepdim=True)
vector = vector[:, 0, :]
else:
vector = model.mapping(torch.randn(N, dim, device=device), None)
vector = vector[:, 0, :]
elif latent == 'w+':
if from_mean:
vectors = torch.randn(Nw, dim, device=device)
vectors = model.mapping(vectors, None)
vector = torch.mean(vectors, dim=0, keepdim=True)
else:
vector = model.mapping(torch.randn(N, dim, device=device), None)
else:
raise NotImplementedError('%s is not supported' % latent)
return vector
def optimize_para(wrapper, param, target, criterion, num_step, save_prefix=None, res=False):
"""
wrapper: image = wrapper(z / w/ w+): an interface for a generator forward pass.
param: z / w / w+
target: (1, C, H, W)
criterion: loss(pred, target)
"""
delta = torch.zeros_like(param)
delta = delta.requires_grad_().to(device)
optimizer = FullBatchLBFGS([delta], lr=.1, line_search='Wolfe')
iter_count = [0]
def closure():
iter_count[0] += 1
# TODO (Part 1): Your optimiztion code. Free free to try out SGD/Adam.
optimizer.zero_grad()
image = wrapper(param + delta)
loss = criterion(image, target)
if iter_count[0] % 250 == 0:
# visualization code
print('iter count {} loss {:4f}'.format(iter_count, loss.item()))
if save_prefix is not None:
iter_result = image.data.clamp_(-1, 1)
save_images(iter_result, save_prefix + '_%d' % iter_count[0])
return loss
loss = closure()
loss.backward()
while iter_count[0] <= num_step:
options = {'closure': closure, 'max_ls': 10}
loss, _, lr, _, F_eval, G_eval, _, _ = optimizer.step(options)
image = wrapper(param)
return param + delta, image
def sample(args):
model, z_dim = build_model(args.model)
wrapper = Wrapper(args, model, z_dim)
batch_size = 16
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
noise = sample_noise(z_dim, device, args.latent, model, batch_size, args.use_mean)
image = wrapper(noise)
fname = os.path.join('output/forward/%s_%s' % (args.model, args.mode))
os.makedirs(os.path.dirname(fname), exist_ok=True)
save_images(image, fname)
def project(args):
# load images
loader = get_data_loader(args.input, args.resolution, is_train=False)
# define and load the pre-trained model
model, z_dim = build_model(args.model)
wrapper = Wrapper(args, model, z_dim)
print('model {} loaded'.format(args.model))
criterion = Criterion(args)
# project each image
start = time.time()
for idx, (data, _) in enumerate(loader):
target = data.to(device)
save_images(data, 'output/project/%d_data' % idx, 1)
param = sample_noise(z_dim, device, args.latent, model, from_mean=args.use_mean)
optimize_para(wrapper, param, target, criterion, args.n_iters,
'output/project/%d_%s_%s_%g_%s_%s' % (
idx, args.model, args.latent, args.perc_wgt, suffix_mean(args.use_mean), args.loss_type))
if idx >= 10:
break
print(f"Runtime of the program is {time.time() - start}")
def draw(args):
# define and load the pre-trained model
model, z_dim = build_model(args.model)
wrapper = Wrapper(args, model, z_dim)
# load the target and mask
loader = get_data_loader(args.input, args.resolution, alpha=True)
criterion = Criterion(args, True)
for idx, (rgb, mask) in enumerate(loader):
if idx == 3:
rgb, mask = rgb.to(device), mask.to(device)
save_images(rgb, 'output/draw/%d_data' % idx, 1)
save_images(mask, 'output/draw/%d_mask' % idx, 1)
# TODO (Part 3): optimize sketch 2 image
# hint: Set from_mean=True when sampling noise vector
param = sample_noise(z_dim, device, args.latent, model, from_mean=True)
optimize_para(wrapper, param, (rgb, mask), criterion, args.n_iters,
'output/draw/%d_%s_%s_%g_%s' % (
idx, args.model, args.latent, args.perc_wgt, suffix_mean(args.use_mean)))
if idx >= 8:
break
def interpolate(args):
model, z_dim = build_model(args.model)
wrapper = Wrapper(args, model, z_dim)
# load the target and mask
loader = get_data_loader(args.input, args.resolution)
criterion = Criterion(args)
for idx, (image, _) in enumerate(loader):
save_images(image, 'output/interpolate/%d' % (idx))
target = image.to(device)
param = sample_noise(z_dim, device, args.latent, model, from_mean=args.use_mean)
param, recon = optimize_para(wrapper, param, target, criterion, args.n_iters)
save_images(recon,
'output/interpolate/%d_%s_%s_%s' % (idx, args.model, args.latent, suffix_mean(args.use_mean)))
if idx % 2 == 0:
src = param
continue
dst = param
alpha_list = np.linspace(0, 1, 50)
image_list = []
with torch.no_grad():
# TODO (Part 2): interpolation code
# hint: Write a for loop to append the convex combinations to image_list
for theta in alpha_list:
inter_frame = wrapper(theta * src + (1 - theta) * dst)
image_list.append(inter_frame) # change dst
save_gifs(image_list,
'output/interpolate/%d_%s_%s_%s' % (idx, args.model, args.latent, suffix_mean(args.use_mean)))
if idx >= 7:
break
return
def suffix_mean(use_mean):
if use_mean is True:
return "mean"
else:
return "no_mean"
def parse_arg():
"""Creates a parser for command-line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='stylegan', choices=['vanilla', 'stylegan'])
parser.add_argument('--mode', type=str, default='draw', choices=['sample', 'project', 'draw', 'interpolate'])
parser.add_argument('--use_mean', type=bool, default=True)
parser.add_argument('--latent', type=str, default='w+', choices=['z', 'w', 'w+'])
parser.add_argument('--n_iters', type=int, default=1000,
help="number of optimization steps in the image projection")
parser.add_argument('--loss_type', type=str, default='l1', choices=['l1', 'l2', 'bce'])
parser.add_argument('--perc_wgt', type=float, default=0.1, help="perc loss weight")
parser.add_argument('--l1_wgt', type=float, default=10, help="L1 pixel loss weight")
parser.add_argument('--l2_wgt', type=float, default=5, help="L2 pixel loss weight")
parser.add_argument('--bce_wgt', type=float, default=10., help="BCE pixel loss weight")
parser.add_argument('--resolution', type=int, default=64, help='Resolution of images')
parser.add_argument('--input', type=str, default='data/sketch/*.png', help="path to the input image")
return parser.parse_args()
if __name__ == '__main__':
args = parse_arg()
if torch.cuda.is_available():
device = 'cuda'
print('Models moved to GPU.')
else:
device = 'cpu'
if args.mode == 'sample':
sample(args)
elif args.mode == 'project':
project(args)
elif args.mode == 'draw':
draw(args)
elif args.mode == 'interpolate':
interpolate(args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.