arxiv_id
stringlengths 0
16
| text
stringlengths 10
1.65M
|
|---|---|
# -*- coding: utf-8 -*-
"""
Name: Anomaly Detection for Anonymous Dataset
Author: Pablo Reynoso
Date: 2022-03-22
Version: 1.0
"""
"""## 0) Libraries/Frameworks"""
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers
from tensorflow.keras.models import Model
"""## 1) Understanding the Dummy Dataset"""
ae_data = pd.read_csv('./dummy_data.csv', header=None)
ae_data = ae_data.add_prefix('var_')
print(ae_data.shape)
ae_data.describe()
"""## 2) Labeling Anomalies in Dummy Dataset"""
# Outliers Presence in Dummy Dataset
ax1 = sns.boxplot(data=ae_data, orient="h", palette="Set2")
plt.title('Dummy Dataset Outliers')
data_ = ae_data.copy(deep=True)
# Finding Threshold for Variables Outliers %
print('var_0 - outliers %: '+str(data_[(data_['var_0'] > 166)].shape[0])+'/10,000')
print('var_1 - outliers %: '+str(data_[(data_['var_1'] > 217)].shape[0])+'/10,000')
print('var_2 - outliers %: '+str(data_[(data_['var_2'] > 149)].shape[0])+'/10,000')
print('var_3 - outliers %: '+str(data_[(data_['var_3'] > 166)].shape[0])+'/10,000')
print('var_4 - outliers %: '+str(data_[(data_['var_4'] > 197)].shape[0])+'/10,000')
print('var_5 - outliers %: '+str(data_[(data_['var_5'] > 81)].shape[0])+'/10,000')
print('var_6 - outliers %: '+str(data_[(data_['var_6'] > 202)].shape[0])+'/10,000')
print('var_7 - outliers %: '+str(data_[(data_['var_7'] > 184)].shape[0])+'/10,000')
# Finding the Anomalous Samples in Dummy Dataset (*considering 37.5% of anomaly in every sample)
var0_a = data_[(data_['var_0'] > 166)].index.tolist()
var1_a = data_[(data_['var_1'] > 217)].index.tolist()
var2_a = data_[(data_['var_2'] > 149)].index.tolist()
var3_a = data_[(data_['var_3'] > 166)].index.tolist()
var4_a = data_[(data_['var_4'] > 197)].index.tolist()
var5_a = data_[(data_['var_5'] > 81)].index.tolist()
var6_a = data_[(data_['var_6'] > 202)].index.tolist()
var7_a = data_[(data_['var_7'] > 184)].index.tolist()
outliers = var0_a + var1_a + var2_a + var3_a + var4_a + var5_a + var6_a + var7_a
anomalies = {x:outliers.count(x) for x in outliers}
anomalies = {k: v for k, v in sorted(anomalies.items(), key=lambda item: item[1], reverse=True)}
anomalies = {k: v for k, v in anomalies.items() if v > 2}
print(anomalies)
# Labeling Anomalous Samples
ae_data['normal'] = 1
ae_data.loc[list(anomalies.keys()), 'normal'] = 0
ae_data['normal']
ae_data
"""## 3) Splitting, Normalizing, Subsetting by Label *Dummy Dataset* for Supervised Anomaly Detection"""
# Dataframe to Numpy
raw_data = ae_data.values
# The last element contains the anomaly-labels
labels = raw_data[:, -1]
# The other data points are the dummy data
data = raw_data[:, 0:-1]
train_data, test_data, train_labels, test_labels = train_test_split(
data, labels, test_size=0.20, random_state=9
)
# Normalize Data
min_val = tf.reduce_min(train_data)
max_val = tf.reduce_max(train_data)
train_data = (train_data - min_val) / (max_val - min_val)
test_data = (test_data - min_val) / (max_val - min_val)
train_data = tf.cast(train_data, tf.float32)
test_data = tf.cast(test_data, tf.float32)
# Filtering Train/Test into Normal & Anomalous Subsets
train_labels = train_labels.astype(bool)
test_labels = test_labels.astype(bool)
normal_train_data = train_data[train_labels]
normal_test_data = test_data[test_labels]
anomalous_train_data = train_data[~train_labels]
anomalous_test_data = test_data[~test_labels]
plt.grid()
plt.plot(np.arange(8), normal_train_data[0])
plt.title("A Normal Sample")
plt.show()
plt.grid()
plt.plot(np.arange(8), anomalous_train_data[0])
plt.title("An Anomalous Sample")
plt.show()
"""## 4) Autoencoder Architechture"""
class AnomalyDetector(Model):
def __init__(self):
super(AnomalyDetector, self).__init__()
self.encoder = tf.keras.Sequential([
layers.Dense(8, activation="relu"),
layers.Dense(4, activation="relu"),
layers.Dense(2, activation="relu")])
self.decoder = tf.keras.Sequential([
layers.Dense(2, activation="relu"),
layers.Dense(4, activation="relu"),
layers.Dense(8, activation="sigmoid")])
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
autoencoder = AnomalyDetector()
autoencoder.compile(optimizer='adam', loss='mae')
history = autoencoder.fit(normal_train_data, normal_train_data,
epochs=35,
batch_size=64,
validation_data=(test_data, test_data),
shuffle=True)
plt.plot(history.history["loss"], label="Training Loss")
plt.plot(history.history["val_loss"], label="Validation Loss")
plt.legend()
"""## 5.1) Reconstruction Error - Normal Samples"""
encoded_data = autoencoder.encoder(normal_test_data).numpy()
decoded_data = autoencoder.decoder(encoded_data).numpy()
plt.plot(normal_test_data[0], 'b')
plt.plot(decoded_data[0], 'r')
plt.fill_between(np.arange(8), decoded_data[0], normal_test_data[0], color='lightcoral')
plt.legend(labels=["Input", "Reconstruction", "Error"])
plt.show()
"""## 5.2) Reconstruction Error - Anomalous Samples"""
encoded_data = autoencoder.encoder(anomalous_test_data).numpy()
decoded_data = autoencoder.decoder(encoded_data).numpy()
plt.plot(anomalous_test_data[0], 'b')
plt.plot(decoded_data[0], 'r')
plt.fill_between(np.arange(8), decoded_data[0], anomalous_test_data[0], color='lightcoral')
plt.legend(labels=["Input", "Reconstruction", "Error"])
plt.show()
"""## 6) Finding Threshold for Anomaly Detection"""
reconstructions = autoencoder.predict(normal_train_data)
train_loss = tf.keras.losses.mae(reconstructions, normal_train_data)
plt.hist(train_loss[None,:], bins=20)
plt.xlabel("Train loss")
plt.ylabel("No of examples")
plt.show()
threshold = np.mean(train_loss) + np.std(train_loss)
print("Threshold: ", threshold)
reconstructions = autoencoder.predict(anomalous_test_data)
test_loss = tf.keras.losses.mae(reconstructions, anomalous_test_data)
plt.hist(test_loss[None, :], bins=20)
plt.xlabel("Test loss")
plt.ylabel("No of examples")
plt.show()
"""## 7) Autoencoder Anomaly Classifier """
def predict(model, data, threshold):
reconstructions = model(data)
loss = tf.keras.losses.mae(reconstructions, data)
return tf.math.less(loss, threshold)
def print_stats(predictions, labels):
print("Accuracy = {}".format(accuracy_score(labels, predictions)))
print("Precision = {}".format(precision_score(labels, predictions)))
print("Recall = {}".format(recall_score(labels, predictions)))
preds = predict(autoencoder, test_data, threshold)
print_stats(preds, test_labels)
|
|
import pylab as pl
import numpy as np
def tickline():
pl.xlim(0, 10), pl.ylim(-1, 1), pl.yticks([])
ax = pl.gca()
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_minor_locator(pl.MultipleLocator(0.1))
ax.plot(np.arange(11), np.zeros(11), color='none')
return ax
locators = [
'pl.NullLocator()',
'pl.MultipleLocator(1.0)',
'pl.FixedLocator([0, 2, 8, 9, 10])',
'pl.IndexLocator(3, 1)',
'pl.LinearLocator(5)',
'pl.LogLocator(2, [1.0])',
'pl.AutoLocator()',
]
n_locators = len(locators)
size = 512, 40 * n_locators
dpi = 72.0
figsize = size[0] / float(dpi), size[1] / float(dpi)
fig = pl.figure(figsize=figsize, dpi=dpi)
fig.patch.set_alpha(0)
for i, locator in enumerate(locators):
pl.subplot(n_locators, 1, i + 1)
ax = tickline()
ax.xaxis.set_major_locator(eval(locator))
pl.text(5, 0.3, locator[3:], ha='center')
pl.subplots_adjust(bottom=.01, top=.99, left=.01, right=.99)
pl.show()
|
|
import torch
import torch.nn as nn
import pandas as pd
import yaml
import logging
import sys
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
from discriminator import Discriminator_Agnostic, Discriminator_Awareness, Generator
from dfencoder.autoencoder import AutoEncoder
from sklearn import preprocessing
from dfencoder.dataframe import EncoderDataFrame
from geomloss import SamplesLoss # See also ImagesLoss, VolumesLoss
def train(**parameters):
"""Hyperparameter"""
learning_rate = parameters['learning_rate']
epochs = parameters['epochs']
input_length = parameters['input_length']
dataframe = parameters['dataframe']
generator = AutoEncoder(input_length)
discriminator_agnostic = Discriminator_Agnostic(input_length)
discriminator_awareness = Discriminator_Awareness(input_length)
"""Optimizer"""
generator_optimizer = torch.optim.Adam(generator.parameters(), lr=learning_rate)
discriminator_agnostic_optimizer = torch.optim.Adam(
discriminator_agnostic.parameters(), lr=learning_rate
)
discriminator_awareness_optimizer = torch.optim.Adam(
discriminator_awareness.parameters(), lr=learning_rate
)
"""Loss function"""
loss = nn.BCELoss()
"""Training steps"""
for i in tqdm(epochs):
X = dataframe['normal_features']
S = dataframe['sensitive_features']
Y = dataframe['target']
Z = generator.generator_fit(X)
ZS = torch.cat((Z,S),1)
print(ZS)
sys.exit(1)
predictor_agnostic = discriminator_agnostic.forward(Z)
predictor_awareness = discriminator_awareness.forward(Z, S)
loss_agnostic = loss(predictor_agnostic, Y)
loss_awareness = loss(predictor_awareness, Y)
final_loss = (loss_agnostic + loss_awareness) / 2
final_loss.backward()
generator_optimizer.step()
discriminator_agnostic_optimizer.step()
discriminator_awareness_optimizer.step()
if __name__ == "__main__":
"""Device"""
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
"""Load configuration"""
with open("/home/trduong/Data/counterfactual_fairness_game_theoric/configuration.yml", 'r') as stream:
try:
conf = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
"""Set up logging"""
logger = logging.getLogger('genetic')
file_handler = logging.FileHandler(filename=conf['log_train_law'])
stdout_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
file_handler.setFormatter(formatter)
stdout_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stdout_handler)
logger.setLevel(logging.DEBUG)
"""Load data"""
data_path = conf['data_law']
df = pd.read_csv(data_path)
"""Setup features"""
sensitive_feature = ['race', 'sex']
normal_feature = ['LSAT', 'UGPA']
categorical_feature = ['race', 'sex']
full_feature = sensitive_feature + normal_feature
target = 'ZFYA'
selected_race = ['White', 'Black']
df = df[df['race'].isin(selected_race)]
df = df.reset_index(drop = True)
df_generator = df[normal_feature]
"""Preprocess data"""
df['LSAT'] = (df['LSAT']-df['LSAT'].mean())/df['LSAT'].std()
df['UGPA'] = (df['UGPA']-df['UGPA'].mean())/df['UGPA'].std()
df['ZFYA'] = (df['ZFYA']-df['ZFYA'].mean())/df['ZFYA'].std()
le = preprocessing.LabelEncoder()
df['race'] = le.fit_transform(df['race'])
df['sex'] = le.fit_transform(df['sex'])
df = df[['LSAT', 'UGPA', 'sex', 'race', 'ZFYA']]
df_autoencoder = df.copy()
"""Setup antuo encoder"""
dfencoder_model = AutoEncoder(
encoder_layers=[512, 512, 32], # model architecture
decoder_layers=[], # decoder optional - you can create bottlenecks if you like
activation='relu',
swap_p=0.2, # noise parameter
lr=0.01,
lr_decay=.99,
batch_size=512, # 512
verbose=False,
optimizer='sgd',
scaler='gauss_rank', # gauss rank scaling forces your numeric features into standard normal distributions
)
dfencoder_model.to(device)
dfencoder_model.fit(df_autoencoder[full_feature], epochs=1)
# df = pd.get_dummies(df, columns = ['sex'])
# df = pd.get_dummies(df, columns = ['race'])
# print(df)
# df = pd.get_dummies(df.sex, prefix='Sex')
# sensitive_feature = ['sex_0','sex_1', 'race_0', 'race_1']
# sys.exit(1)
# X, y = df[['LSAT', 'UGPA', 'sex', 'race']].values, df['ZFYA'].values
"""Setup hyperparameter"""
logger.debug('Setup hyperparameter')
parameters = {}
parameters['epochs'] = 2
parameters['learning_rate'] = 0.001
parameters['dataframe'] = df
parameters['batch_size'] = 256
parameters['problem'] = 'regression'
"""Hyperparameter"""
learning_rate = parameters['learning_rate']
epochs = parameters['epochs']
dataframe = parameters['dataframe']
batch_size = parameters['batch_size']
problem = parameters['problem']
"""Setup generator and discriminator"""
# dfencoder_model = torch.load(conf['ae_model_law'])
emb_size = 32
# generator = Generator(df_generator.shape[1])
generator= AutoEncoder(
encoder_layers=[64, 64, emb_size], # model architecture
decoder_layers=[], # decoder optional - you can create bottlenecks if you like
encoder_dropout = 0.85,
decoder_dropout = 0.85,
activation='relu',
swap_p=0.2, # noise parameter
lr=0.001,
lr_decay=.99,
batch_size=512, # 512
verbose=False,
optimizer='adamW',
scaler='gauss_rank', # gauss rank scaling forces your numeric features into standard normal distributions
)
discriminator_agnostic = Discriminator_Agnostic(emb_size, problem)
# discriminator_awareness = Discriminator_Awareness(emb_size+len(sensitive_feature), problem)
discriminator_awareness = Discriminator_Awareness(emb_size+32, problem)
# generator.to(device)
discriminator_agnostic.to(device)
discriminator_awareness.to(device)
"""Setup generator"""
df_generator = df[normal_feature]
generator.build_model(df_generator)
"""Optimizer"""
generator_optimizer = torch.optim.Adam(
generator.parameters(), lr=learning_rate
)
discriminator_agnostic_optimizer = torch.optim.Adam(
discriminator_agnostic.parameters(), lr=learning_rate
)
discriminator_awareness_optimizer = torch.optim.Adam(
discriminator_awareness.parameters(), lr=learning_rate
)
# lr_decay = torch.optim.lr_scheduler.ExponentialLR(generator_optimizer, lr_decay)
scheduler = torch.optim.lr_scheduler.StepLR(generator_optimizer, step_size=10, gamma=0.1)
scheduler_discriminator_env = torch.optim.lr_scheduler.StepLR(discriminator_awareness_optimizer, step_size=10, gamma=0.1)
scheduler_discriminator = torch.optim.lr_scheduler.StepLR(discriminator_agnostic_optimizer, step_size=10, gamma=0.1)
"""Training"""
n_updates = len(df)// batch_size
logger.debug('Training')
logger.debug('Number of updates {}'.format(n_updates))
logger.debug('Dataframe length {}'.format(len(df)))
logger.debug('Batchsize {}'.format((batch_size)))
# loss = torch.nn.SmoothL1Loss()
loss = torch.nn.MSELoss()
# loss = torch.nn.L1Loss
dist_loss = SamplesLoss(loss="sinkhorn", p=2, blur=.05)
epochs = 2
# epochs = 1
for i in (range(epochs)):
# logger.debug('Epoch {}'.format((i)))
# scheduler.step()
# scheduler_discriminator_env.step()
# scheduler_discriminator.step()
for j in tqdm(range(n_updates)):
df_term_generator = df_generator.loc[batch_size*j:batch_size*(j+1)]
df_term_generator = EncoderDataFrame(df_term_generator)
df_term_discriminator = df.loc[batch_size*j:batch_size*(j+1)].reset_index(drop = True)
df_term_autoencoder = df_autoencoder.loc[batch_size*j:batch_size*(j+1)].reset_index(drop = True)
X = torch.tensor(df_term_discriminator[normal_feature].values.astype(np.float32)).to(device)
# S = torch.Tensor(df_term_discriminator[sensitive_feature].values).to(device)
Y = torch.Tensor(df_term_discriminator[target].values).to(device).reshape(-1,1)
num, bin, cat = generator.forward(df_term_generator)
encode_output = torch.cat((num , bin), 1)
Z = generator.encode(encode_output)
sensitive_representation = dfencoder_model.get_representation(
df_term_autoencoder[['UGPA','LSAT','sex', 'race']]
)
# logger.debug(Z)
# logger.debug("=================================")
# ZS = torch.cat((Z,S),1)
# print(ZS)
# sys.exit(1)
# Z = generator(X)
# print(Z)
distLoss = torch.tensor(0).to(device).float()
predictor_agnostic = discriminator_agnostic(Z)
predictor_awareness = discriminator_awareness(Z,sensitive_representation)
# for s in sensitive_feature:
# index_positive = df_term_discriminator.index[df_term_discriminator[s] == 0].tolist()
# index_negative = df_term_discriminator.index[df_term_discriminator[s] == 1].tolist()
# print(predictor_agnostic)
# print(index_positive)
# # sys.exit(1)
# print(predictor_agnostic[[1,2,3]])
# print(len(predictor_agnostic))
# print(len(index_positive))
# print(predictor_agnostic[index_positive])
# sys.exit(1)
# sys.exit(1)
# index_positive = torch.tensor(df_autoencoder.index[df_autoencoder[s] == 0].tolist()).to(device)
# index_negative = torch.tensor(df_autoencoder.index[df_autoencoder[s] == 1].tolist()).to(device)
# print(predictor_agnostic[index_positive])
# if len(index_positive) != 0:
# # print((index_positive))
# # print((predictor_agnostic))
# # sys.exit(1)
# ys_positive = predictor_agnostic[index_positive]
# # print(ys_positive)
# # print(predictor_awareness)
# ys_hat_positive = predictor_awareness[index_positive]
# distLoss += dist_loss(ys_positive, ys_hat_positive)
# if len(index_negative) != 0:
# ys_negative = predictor_agnostic[index_negative]
# ys_hat_negative = predictor_awareness[index_negative]
# distLoss += dist_loss(ys_negative, ys_hat_negative)
# print(distLoss)
# sys.exit(1)
# print("Distribution loss ", distLoss)
loss_agnostic = loss(predictor_agnostic, Y)
loss_awareness = loss(predictor_awareness, Y)
final_loss = loss_agnostic + F.leaky_relu(loss_agnostic - F.sigmoid(loss_awareness))
# final_loss = 100*loss_agnostic + 0.0001*F.leaky_relu(loss_agnostic - loss_awareness)
print(final_loss)
# final_loss = loss_agnostic + F.relu(loss_agnostic - loss_awareness)
# final_loss = loss_agnostic + F.gelu(loss_agnostic - loss_awareness)
# final_loss = loss_agnostic + F.prelu(loss_agnostic - loss_awareness, torch.tensor(0.5).to(device))
# final_loss = loss_agnostic + F.rrelu(loss_agnostic - loss_awareness)
generator_optimizer.zero_grad()
discriminator_agnostic_optimizer.zero_grad()
discriminator_awareness_optimizer.zero_grad()
final_loss.backward()
generator_optimizer.step()
discriminator_agnostic_optimizer.step()
discriminator_awareness_optimizer.step()
# sys.exit(1)
# scheduler.step()
# scheduler_discriminator_env.step()
# scheduler_discriminator.step()
# if i % 10 == 0:
logger.debug('Epoch {}'.format(i))
logger.debug('Loss Agnostic {}'.format(loss_agnostic))
logger.debug('Loss Awareness {}'.format(loss_awareness))
logger.debug('Final loss {}'.format(final_loss))
logger.debug('Gap {}'.format(loss_agnostic - loss_awareness))
logger.debug('LeakyRelu Gap {}'.format(F.leaky_relu(loss_agnostic - loss_awareness)))
logger.debug('-------------------')
# df_result = pd.read_csv(conf['result_law'])
# X = torch.tensor(df_generator[normal_feature].values.astype(np.float32)).to(device)
# Z = generator(X)
# predictor_agnostic = discriminator_agnostic(Z)
# df_result['inv_prediction'] = predictor_agnostic.cpu().detach().numpy().reshape(-1)
num, bin, cat = generator.forward(df_generator)
encode_output = torch.cat((num ,bin), 1)
Z = generator.encode(encode_output)
predictor_agnostic = discriminator_agnostic(Z)
df_result = pd.read_csv(conf['result_law'])
df_result['inv_prediction'] = predictor_agnostic.cpu().detach().numpy().reshape(-1)
df_result.to_csv(conf['result_law'], index = False)
sys.modules[__name__].__dict__.clear()
# sys.exit(1)
|
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import cv2
from sklearn.preprocessing import LabelEncoder
from keras.utils.np_utils import to_categorical
# In[2]:
from train_valid_split import train_valid_split
from train_valid_split import train_valid_dict_generator
# In[3]:
from PIL import Image
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
# In[4]:
train_labels = pd.read_csv("df_train.csv")
train, valid = train_valid_split(train_labels)
# In[6]:
train_df, valid_df = train_valid_dict_generator(train,valid,train_labels)
# transform the above imported dictionaries into dataframes
train_df_final = pd.DataFrame(train_df.items(), columns=['Image', 'Id'])
valid_df_final = pd.DataFrame(valid_df.items(), columns=['Image', 'Id'])
# ### Apply Transformations
# In[7]:
def datagens():
train_datagen = ImageDataGenerator(featurewise_center = False,
samplewise_center = False,
featurewise_std_normalization = False,
samplewise_std_normalization = False,
zca_whitening = False,
rotation_range = 10,
zoom_range = 0.1,
width_shift_range = 0.1,
height_shift_range = 0.1,
horizontal_flip = True,
vertical_flip = True)
# In[8]:
valid_datagen = ImageDataGenerator(featurewise_center = False,
samplewise_center = False,
featurewise_std_normalization = False,
samplewise_std_normalization = False,
zca_whitening = False,
rotation_range = 10,
zoom_range = 0.1,
width_shift_range = 0.1,
height_shift_range = 0.1,
horizontal_flip = True,
vertical_flip = True)
return train_datagen, valid_datagen
# In[9]:
def prepareImages(df, shape, path):
z_train = np.zeros((shape, 40, 40, 3))
count = 0
for fig in df['Image']:
#load images into images of size 100x100x3
img = image.load_img(path + fig, target_size=(40, 40, 3))
x = image.img_to_array(img)
x = preprocess_input(x)
z_train[count] = x
if (count%500 == 0):
print("Processing image: ", count+1, ", ", fig)
count += 1
z_train = z_train / 255.0
return z_train
def prepareLabels(df, number_of_classes):
y_train = df["Id"]
label_encoder = LabelEncoder()
y_train = label_encoder.fit_transform(y_train)
y_train = to_categorical(y_train, num_classes = number_of_classes)
return y_train
|
|
import json
import gc
from keras.models import Model
from keras.layers import Input, Concatenate, Average
from keras import backend as K
from keras.optimizers import Adam
from keras.utils import generic_utils
import numpy as np
from layers import GradientPenalty, RandomWeightedAverage
import utils
class WGANGP(object):
def __init__(self, gen, disc, load_fn_root=None,
gradient_penalty_weight=10, lr_disc=0.0001, lr_gen=0.0001,
avg_seed=None, num_channels=1):
if load_fn_root is not None:
load_files = self.filenames_from_root(load_fn_root)
with open(load_files["gan_params"]) as f:
params = json.load(f)
gradient_penalty_weight = params["gradient_penalty_weight"]
lr_disc = params["lr_disc"]
lr_gen = params["lr_gen"]
self.gen = gen
self.disc = disc
self.gradient_penalty_weight = gradient_penalty_weight
self.lr_disc = lr_disc
self.lr_gen = lr_gen
self.num_channels = num_channels
self.build_wgan_gp()
if load_fn_root is not None:
self.load(load_files)
def filenames_from_root(self, root):
fn = {
"gen_weights": root+"-gen_weights.h5",
"disc_weights": root+"-disc_weights.h5",
"gen_opt_weights": root+"-gen_opt_weights.h5",
"disc_opt_weights": root+"-disc_opt_weights.h5",
"gan_params": root+"-gan_params.json"
}
return fn
def load(self, load_files):
self.gen.load_weights(load_files["gen_weights"])
self.disc.load_weights(load_files["disc_weights"])
self.disc.trainable = False
self.gen_trainer._make_train_function()
utils.load_opt_weights(self.gen_trainer,
load_files["gen_opt_weights"])
self.disc.trainable = True
self.gen.trainable = False
self.disc_trainer._make_train_function()
utils.load_opt_weights(self.disc_trainer,
load_files["disc_opt_weights"])
self.gen.trainable = True
def save(self, save_fn_root):
paths = self.filenames_from_root(save_fn_root)
self.gen.save_weights(paths["gen_weights"], overwrite=True)
self.disc.save_weights(paths["disc_weights"], overwrite=True)
utils.save_opt_weights(self.disc_trainer, paths["disc_opt_weights"])
utils.save_opt_weights(self.gen_trainer, paths["gen_opt_weights"])
params = {
"gradient_penalty_weight": self.gradient_penalty_weight,
"lr_disc": self.lr_disc,
"lr_gen": self.lr_gen
}
with open(paths["gan_params"], 'w') as f:
json.dump(params, f)
def build_wgan_gp(self):
# find shapes for inputs
noise_shapes = utils.input_shapes(self.gen, "noise")
# Create optimizers
self.opt_disc = Adam(self.lr_disc, beta_1=0.5, beta_2=0.9)
self.opt_gen = Adam(self.lr_gen, beta_1=0.5, beta_2=0.9)
# Create generator training network
self.disc.trainable = False
noise_in = [Input(shape=s) for s in noise_shapes]
gen_in = noise_in
gen_out = self.gen(gen_in)
gen_out = utils.ensure_list(gen_out)
disc_in_gen = gen_out
disc_out_gen = self.disc(disc_in_gen)
self.gen_trainer = Model(inputs=gen_in, outputs=disc_out_gen)
self.gen_trainer.compile(loss=wasserstein_loss,
optimizer=self.opt_gen)
self.disc.trainable = True
# Create discriminator training network
self.gen.trainable = False
disc_in_real = Input(shape=(None,None,self.num_channels))
noise_in = [Input(shape=s) for s in noise_shapes]
disc_in_fake = self.gen(noise_in)
disc_in_avg = RandomWeightedAverage()([disc_in_real,disc_in_fake])
disc_out_real = self.disc(disc_in_real)
disc_out_fake = self.disc(disc_in_fake)
disc_out_avg = self.disc(disc_in_avg)
disc_gp = GradientPenalty()([disc_out_avg, disc_in_avg])
self.disc_trainer = Model(inputs=[disc_in_real]+noise_in,
outputs=[disc_out_real,disc_out_fake,disc_gp])
self.disc_trainer.compile(
loss=[wasserstein_loss, wasserstein_loss, 'mse'],
loss_weights=[1.0, 1.0, self.gradient_penalty_weight],
optimizer=self.opt_disc
)
self.gen.trainable = True
def train(self, batch_gen, noise_gen, num_gen_batches=1,
training_ratio=1, show_progress=True):
disc_target_real = None
if show_progress:
# Initialize progbar and batch counter
progbar = generic_utils.Progbar(
num_gen_batches*batch_gen.batch_size)
batch_counter = 1
for k in range(num_gen_batches):
# train discriminator
disc_loss = None
disc_loss_n = 0
for rep in range(training_ratio):
# generate some real samples
Y_real = next(batch_gen)
noise = noise_gen()
if disc_target_real is None: # on the first iteration
# run discriminator once just to find the shapes
disc_outputs = self.disc_trainer.predict(
[Y_real]+noise)
disc_target_real = np.ones(disc_outputs[0].shape,
dtype=np.float32)
disc_target_fake = -disc_target_real
gen_target = disc_target_real
gp_target = np.zeros(disc_outputs[2].shape,
dtype=np.float32)
disc_target = [disc_target_real, disc_target_fake,
gp_target]
del disc_outputs
try:
self.gen.trainable = False
dl = self.disc_trainer.train_on_batch(
[Y_real]+noise, disc_target)
finally:
self.gen.trainable = True
if disc_loss is None:
disc_loss = np.array(dl)
else:
disc_loss += np.array(dl)
disc_loss_n += 1
del Y_real
disc_loss /= disc_loss_n
try:
self.disc.trainable = False
gen_loss = self.gen_trainer.train_on_batch(
noise_gen(), gen_target)
finally:
self.disc.trainable = True
if show_progress:
losses = []
for (i,dl) in enumerate(disc_loss):
losses.append(("D{}".format(i), dl))
for (i,gl) in enumerate([gen_loss]):
losses.append(("G{}".format(i), gl))
progbar.add(batch_gen.batch_size,
values=losses)
gc.collect()
def wasserstein_loss(y_true, y_pred):
return K.mean(y_true * y_pred, axis=-1)
|
|
import paddle.v2 as paddle
import numpy as np
# init paddle
paddle.init(use_gpu=False)
# network config
x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(2))
y_predict = paddle.layer.fc(input=x, size=1, act=paddle.activation.Linear())
y = paddle.layer.data(name='y', type=paddle.data_type.dense_vector(1))
cost = paddle.layer.square_error_cost(input=y_predict, label=y)
# create parameters
parameters = paddle.parameters.create(cost)
# create optimizer
optimizer = paddle.optimizer.Momentum(momentum=0)
# create trainer
trainer = paddle.trainer.SGD(cost=cost,
parameters=parameters,
update_equation=optimizer)
# event_handler to print training info
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 1 == 0:
print "Pass %d, Batch %d, Cost %f" % (event.pass_id, event.batch_id,
event.cost)
# define training dataset reader
def train_reader():
train_x = np.array([[1, 1], [1, 2], [3, 4], [5, 2]])
train_y = np.array([[-2], [-3], [-7], [-7]])
def reader():
for i in xrange(train_y.shape[0]):
yield train_x[i], train_y[i]
return reader
# define feeding map
feeding = {'x': 0, 'y': 1}
# training
trainer.train(
reader=paddle.batch(
train_reader(), batch_size=1),
feeding=feeding,
event_handler=event_handler,
num_passes=100)
|
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Szymon Biliński
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import logging
import matplotlib as mlt
import matplotlib.pyplot as plt
import networkx as nx
import os
log = logging.getLogger('analyzer')
class Analyzer(object):
"""Abstract base class for model analyzers."""
__metaclass__ = abc.ABCMeta
def __init__(self, model):
"""Initializes a new instance of the Analyzer class."""
self.model = model
self._graph = None
@property
def graph(self):
"""Returns a NetworkX graph model."""
if self._graph is None:
self._graph = self._build_graph(self.model)
return self._graph
def _build_graph(self, model):
log.debug('Building NetworkX graph...')
graph = nx.DiGraph()
for node in model.nodes:
graph.add_node(node.id, size=node.size)
for node in model.nodes:
for conn in node.connections:
graph.add_edge(node.id, conn)
log.debug('NetworkX graph size: nodes=%d edges=%d', graph.number_of_nodes(), graph.number_of_edges())
return graph
class Writer(Analyzer):
"""A graph model writer"""
def __init__(self, model):
"""Initializes a new instance of the Writer class."""
super(Writer, self).__init__(model)
def write(self, path, data_format='dot'):
"""Writes the underlying graph model to a specific file."""
if data_format == 'dot':
nx.write_dot(self.graph, path)
elif data_format == 'gml':
nx.write_gml(self.graph, path)
elif data_format == 'graphml':
nx.write_graphml(self.graph, path)
else:
raise AssertionError('Invalid format: %s' % data_format)
class Plotter(Analyzer):
"""A graph plotter."""
node_colors = ['#ffd070', '#e6ff6f', '#ff886f', '#6f9eff', '#cf6fff']
def __init__(self, model):
"""Initializes a new instance of the Plotter class."""
super(Plotter, self).__init__(model)
def plot(self, **kwargs):
"""Plots the underlying graph."""
plt.figure(facecolor='#fefefe', dpi=80, frameon=True)
plt.axis('off')
try:
positions = nx.graphviz_layout(self.graph)
except ImportError as err:
log.info('Graphviz not available: error=%s', err)
log.info('Falling back to spring layout...')
positions = nx.spring_layout(self.graph)
#FIXME: Caused by bin/coffea in some cases
except TypeError as err:
log.warn('Graphviz layout failed: error=%s', err)
log.warn('Falling back to spring layout...')
positions = nx.spring_layout(self.graph)
if 'calc_node_size' in kwargs and kwargs['calc_node_size']:
node_size = self._node_size_vector
if node_size is None:
node_size = 300
else:
node_size = 300
log.debug('Drawing nodes...')
nx.draw_networkx_nodes(self.graph, positions,
node_color=self.node_colors,
node_size=node_size,
alpha=0.8)
log.debug('Drawing edges...')
nx.draw_networkx_edges(self.graph, positions,
edge_color='#666666',
alpha=0.75)
log.debug('Drawing labels...')
nx.draw_networkx_labels(self.graph,positions,
font_color='#222222',
font_family='courier new',
font_weight='bold')
log.debug('Plotting graph...')
try:
filename = kwargs['filename']
plt.savefig(filename, bbox_inches='tight')
except KeyError:
plt.show()
@property
def _node_size_vector(self):
log.debug('Calculating size vector...')
size_vect = []
for _, attrs in self.graph.nodes_iter(data = True):
if 'size' in attrs:
size_vect.append(attrs['size'])
else:
# External node may not have a size attribute
size_vect.append(0)
max_val = max(size_vect)
if max_val != 0:
log.debug('Normalizing size vector...')
size_vect = [200 + s / (max_val*1.0)*800 for s in size_vect]
else:
size_vect = None
return size_vect
|
|
from growcut import growcut_python
from numba import autojit
benchmarks = (
("growcut_numba",
autojit(growcut_python.growcut_python)),
)
|
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
from tvm import te
import logging
import sys, time, subprocess
import json
import os
def schedule(attrs):
cfg, s, output = attrs.auto_config, attrs.scheduler, attrs.outputs[0]
th_vals, rd_vals = [attrs.get_extent(x) for x in output.op.axis], [attrs.get_extent(x) for x in output.op.reduce_axis]
C = output
A, B = C.op.input_tensors
y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
# storage_align params
factor = 16
offset = 8
layout = 'NN'
for opt in attrs.options:
if opt.startswith('layout/'):
layout = opt[len('layout/'):]
break
'''
if dtype == 'int8':
factor = 32
offset = 16
'''
# create cache stages
AA = s.cache_read(A, "shared", [C])
if (layout == "NN" or layout == "TN"):
s[AA].storage_align(AA.op.axis[0], factor, offset)
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
if (layout == "TT" or layout == "NT"):
s[BB].storage_align(BB.op.axis[0], factor, offset)
BL = s.cache_read(BB, "local", [C])
CL = s.cache_write(C, "local")
# autotvm search space definition
cfg.define_knob("bx", [2, 4, 8])
cfg.define_knob("by", [16, 32, 64])
cfg.define_knob("step_k", [8, 16, 32])
cfg.define_knob("v", [4, 8])
by = cfg['by'].val
bx = cfg['bx'].val
step_k = cfg['step_k'].val
v = cfg['v'].val
# thread tile
TX, TY = 8, 1
# warp tile
cfg.define_knob("warp_m", [16, 8, 32])
warp_tile_m = cfg['warp_m'].val # it could be 8, 16, 32 on CUDA version >= 10.0
warp_tile_k = 16 # it must be 16
# block tile
tile_x = bx * TX
tile_y = by * TY
yo, ty = s[C].split(y, tile_y)
ty, yi = s[C].split(ty, TY)
# schedule for C stage
xo, xi = s[C].split(x, tile_x)
WX = min(warp_tile_m, tile_x)
tz, xi = s[C].split(xi, WX)
tx, xi = s[C].split(xi, TX)
s[C].reorder(yo, xo, tz, ty, tx, yi, xi)
s[C].bind(yo, te.thread_axis("blockIdx.y"))
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
# schedule for CL stage
ko, ki = s[CL].split(k, step_k * warp_tile_k)
kl, ki = s[CL].split(ki, warp_tile_k)
s[CL].compute_at(s[C], tx)
yo, xo = CL.op.axis
s[CL].reorder(ko, kl, ki, yo, xo)
# schedule for AA stage
s[AA].compute_at(s[CL], ko)
xo, xi = s[AA].split(s[AA].op.axis[1], factor=bx*v)
tz, tx = s[AA].split(xi, factor=(WX//TX)*v)
tx, vec = s[AA].split(tx, factor=v)
fused = s[AA].fuse(s[AA].op.axis[0], xo)
_, ty = s[AA].split(fused, factor=by)
s[AA].bind(ty, te.thread_axis("threadIdx.y"))
s[AA].bind(tz, te.thread_axis("threadIdx.z"))
s[AA].bind(tx, te.thread_axis("threadIdx.x"))
# vectorization is very important for float16/int8 inputs
s[AA].vectorize(vec)
# schedule for BB stage
s[BB].compute_at(s[CL], ko)
xo, xi = s[BB].split(s[BB].op.axis[1], factor=bx*v)
tz, tx = s[BB].split(xi, factor=(WX//TX)*v)
tx, vec = s[BB].split(tx, factor=v)
fused = s[BB].fuse(s[BB].op.axis[0], xo)
_, ty = s[BB].split(fused, factor=by)
s[BB].bind(ty, te.thread_axis("threadIdx.y"))
s[BB].bind(tz, te.thread_axis("threadIdx.z"))
s[BB].bind(tx, te.thread_axis("threadIdx.x"))
s[BB].vectorize(vec)
s[AL].compute_at(s[CL], kl)
s[BL].compute_at(s[CL], kl)
s[CL].pragma(ko, 'tensor_core')
|
|
"""
Render the models from 24 elevation angles, as in thesis NMR
Save as an image.
9. 17. 2020
created by Zheng Wen
9. 19. 2020
ALL RENDER ARE FINISHED WITHOUT TEXTURE
Run from anaconda console
NOTE:
RENDER FROM ORIGINAL SHOULD BE RANGE(360, 0, -15)
HERE RANGE(0, 360, 15)
SOLUTION: RENAME FILES OR GENERATE DATASETS IN FOLLOWING SEQUENCES:
0, 23, 22, ..., 1
"""
import matplotlib.pyplot as plt
import os
import tqdm
import numpy as np
import imageio
import soft_renderer as sr
os.environ["CUDA_VISIBLE_DEVICES"] = "7"
CLASS_IDS_ALL = (
"02958343", # car
# "02691156", # airplane
# "03001627", # chair
)
RENDERED_CLASS = ()
BROKEN_ONJ = (
'/mnt/zhengwen/model_synthesis/shapeNetCore/ShapeNetCore.v1/03624134/000000__broken__67ada28ebc79cc75a056f196c127ed77'
'/mnt/zhengwen/model_synthesis/shapeNetCore/ShapeNetCore.v1/04074963/000000__broken__b65b590a565fa2547e1c85c5c15da7fb'
)
category_to_num = {}
RENDER_IMAGE_NAME_RGB = 'RGB'
RENDER_IMAGE_NAME_D = 'depth'
RENDER_IMAGE_NAME_NORMAL = 'normal'
camera_distance = 3.5
elevation = 30
azimuth = 0
root_dir = r'/mnt/zhengwen/model_synthesis/shapeNetCore/ShapeNet_OCT_29/ShapeNetCore.v1'
sub_root_count = 0
for sub_root_dir in sorted(os.listdir(root_dir)):
if os.path.isdir(os.path.join(root_dir, sub_root_dir)) and (sub_root_dir in CLASS_IDS_ALL) and (sub_root_dir not in RENDERED_CLASS):
category_to_num[sub_root_dir] = 0
obj_count = 0
respo_rgb = []
respo_d = []
respo_normal = []
for obj_dir in sorted(os.listdir(os.path.join(root_dir, sub_root_dir))):
print(sub_root_count, obj_count)
obj_file_i = os.path.join(os.path.join(os.path.join(os.path.join(root_dir, sub_root_dir)), obj_dir),
'model.obj')
if os.path.join(os.path.join(os.path.join(root_dir, sub_root_dir)), obj_dir) in BROKEN_ONJ:
continue
img_file_rgb = os.path.join(os.path.join(os.path.join(os.path.join(root_dir, sub_root_dir)), obj_dir),
RENDER_IMAGE_NAME_RGB)
img_file_depth = os.path.join(os.path.join(os.path.join(os.path.join(root_dir, sub_root_dir)), obj_dir),
RENDER_IMAGE_NAME_D)
img_file_normal = os.path.join(os.path.join(os.path.join(os.path.join(root_dir, sub_root_dir)), obj_dir),
RENDER_IMAGE_NAME_NORMAL)
mesh = sr.Mesh.from_obj(obj_file_i)
renderer = sr.SoftRenderer(camera_mode='look_at')
respo_sub_rgb = []
respo_sub_d = []
respo_sub_normal = []
for azimuth in range(0, 360, 15):
count = azimuth // 15
# rest mesh to initial state
mesh.reset_()
renderer.transform.set_eyes_from_angles(camera_distance, elevation, azimuth)
images = renderer.render_mesh(mesh)
image_rgb = images[0].detach().cpu().numpy()[0]
respo_sub_rgb.append(image_rgb[:, 128 - 32: 128 + 32, 128 - 32: 128 + 32])
imageio.imwrite(img_file_rgb + '_' + str(count) + '.png', (255 * image_rgb[:, 128 - 32: 128 + 32, 128 - 32: 128 + 32]).transpose((1, 2, 0)).astype(np.uint8))
image_d = images[1].detach().cpu().numpy()[0]
image_d[image_d != 0] = 2 * 1 / image_d[image_d != 0]
imageio.imwrite(img_file_depth + '_' + str(count) + '.png', (255 * image_d[:, 128 - 32: 128 + 32, 128 - 32: 128 + 32]).transpose((1, 2, 0)).astype(np.uint8))
respo_sub_d.append(image_d[:, 128 - 32: 128 + 32, 128 - 32: 128 + 32])
image_normal = images[2].detach().cpu().numpy()[0]
imageio.imwrite(img_file_normal + '_' + str(count) + '.png', (255 * image_normal[:, 128 - 32: 128 + 32, 128 - 32: 128 + 32]).transpose((1, 2, 0)).astype(np.uint8))
respo_sub_normal.append(image_normal[:, 128 - 32: 128 + 32, 128 - 32: 128 + 32])
obj_count += 1
respo_sub_rgb = np.array(respo_sub_rgb)
respo_sub_d = np.array(respo_sub_d)
respo_sub_normal = np.array(respo_sub_normal)
respo_rgb.append(respo_sub_rgb)
respo_d.append(respo_sub_d)
respo_normal.append(respo_sub_normal)
sub_root_count += 1
respo_rgb = np.array(respo_rgb)
respo_d = np.array(respo_d)
respo_normal = np.array(respo_normal)
np.save(sub_root_dir + "_rgb.npz", respo_rgb)
np.save(sub_root_dir + "_depth.npz", respo_d)
np.save(sub_root_dir + "_norma.npz", respo_normal)
|
|
"""
helpers
=======
Collection of internal helper functions and classes, used by different
modules.
"""
import numpy as np
__all__ = ['check_vecsize', 'maxreldiff', 'Struct']
def check_vecsize(v,n=None):
"""
Check whether 'v' is a 1D numpy array. If 'n' is given, also check
whether its length is equal to that.
"""
return (isinstance(v,np.ndarray) and v.ndim == 1 and
(n is None or v.shape[0] == n))
def maxreldiff(a,b):
"""
Returns maximum relative difference (component-wise) over all entries of
numpy arrays 'a', 'b' (same dimensionalities).
"""
return (np.abs(a-b)/np.maximum(np.maximum(np.abs(a),np.abs(b)),1e-8)).max()
# Provides type for structure without class attributes, can have any number
# and type of object attributes.
class Struct:
pass
|
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init
from mmdet.core import delta2bbox
from mmdet.ops import nms
from ..registry import HEADS
from .anchor_head import AnchorHead
from mmdet.core.bbox.geometry import bbox_overlaps
import numpy as np
@HEADS.register_module
class RPNHead(AnchorHead):
def __init__(self, in_channels, **kwargs):
super(RPNHead, self).__init__(2, in_channels, **kwargs)
def _init_layers(self):
self.rpn_conv = nn.Conv2d(
self.in_channels, self.feat_channels, 3, padding=1)
self.rpn_cls = nn.Conv2d(self.feat_channels,
self.num_anchors * self.cls_out_channels, 1)
self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1)
def init_weights(self):
normal_init(self.rpn_conv, std=0.01)
normal_init(self.rpn_cls, std=0.01)
normal_init(self.rpn_reg, std=0.01)
def forward_single(self, x):
x = self.rpn_conv(x)
x = F.relu(x, inplace=True)
rpn_cls_score = self.rpn_cls(x)
rpn_bbox_pred = self.rpn_reg(x)
return rpn_cls_score, rpn_bbox_pred
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
img_metas,
cfg,
gt_bboxes_ignore=None):
losses = super(RPNHead, self).loss(
cls_scores,
bbox_preds,
gt_bboxes,
None,
img_metas,
cfg,
gt_bboxes_ignore=gt_bboxes_ignore)
return dict(
loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox'])
def get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
gt_bboxes,
gt_labels,
rescale=False,
parent_scores=None):
mlvl_proposals = []
for idx in range(len(cls_scores)):
rpn_cls_score = cls_scores[idx]
rpn_bbox_pred = bbox_preds[idx]
assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
if self.use_sigmoid_cls:
rpn_cls_score = rpn_cls_score.reshape(-1)
scores = rpn_cls_score.sigmoid()
else:
rpn_cls_score = rpn_cls_score.reshape(-1, 2)
scores = rpn_cls_score.softmax(dim=1)[:, 1]
rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)
anchors = mlvl_anchors[idx]
if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
_, topk_inds = scores.topk(cfg.nms_pre)
rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
anchors = anchors[topk_inds, :]
scores = scores[topk_inds]
proposals = delta2bbox(anchors, rpn_bbox_pred, self.target_means,
self.target_stds, img_shape)
if cfg.min_bbox_size > 0:
w = proposals[:, 2] - proposals[:, 0] + 1
h = proposals[:, 3] - proposals[:, 1] + 1
valid_inds = torch.nonzero((w >= cfg.min_bbox_size) &
(h >= cfg.min_bbox_size)).squeeze()
proposals = proposals[valid_inds, :]
scores = scores[valid_inds]
proposals = torch.cat([proposals, scores.unsqueeze(-1)], dim=-1)
if cfg.nms_resampling is not None: # only used in training
if cfg.nms_resampling[0] == 'discrete':
a_r = cfg.nms_resampling[1]
a_c = cfg.nms_resampling[2]
a_f = cfg.nms_resampling[3]
proposals = self.nms_resampling_discrete(proposals, gt_bboxes, gt_labels, a_r, a_c, a_f)
elif cfg.nms_resampling[0] == 'linear':
thresh = cfg.nms_resampling[1]
proposals = self.nms_resampling_linear(proposals, gt_bboxes, gt_labels, thresh)
else:
proposals, _ = nms(proposals, cfg.nms_thr)
proposals = proposals[:cfg.nms_post, :]
mlvl_proposals.append(proposals)
proposals = torch.cat(mlvl_proposals, 0)
if cfg.nms_across_levels:
proposals, _ = nms(proposals, cfg.nms_thr)
proposals = proposals[:cfg.max_num, :]
else:
scores = proposals[:, 4]
num = min(cfg.max_num, proposals.shape[0])
_, topk_inds = scores.topk(num)
proposals = proposals[topk_inds, :]
return proposals
def nms_resampling_linear(self, proposals, gt_bboxes, gt_labels, thresh):
assert any(gt_labels>0)
iou = bbox_overlaps(proposals[:, :4], gt_bboxes)
max_iou, gt_assignment = iou.max(dim=1)
proposals_labels = gt_labels[gt_assignment]
# proposal is considered as background when its iou with gt < 0.3
proposals_labels[max_iou < 0.3] = 0
proposals_labels = proposals_labels.cpu().numpy()
t = thresh[proposals_labels]
keep = self.nms_py(proposals.cpu().numpy(), t)
keep = np.array(keep)
return proposals[keep, :]
def nms_py(self, dets, thresh):
"""
greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh
:param dets: [[x1, y1, x2, y2 score]]
:param thresh: retain overlap < thresh
:return: indexes to keep
"""
if dets.shape[0] == 0:
return []
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh[i])[0]
order = order[inds + 1]
return keep
def nms_resampling_discrete(self, proposals, gt_bboxes, gt_labels, a_r, a_c, a_f):
assert any(gt_labels>0)
# proposal is considered as background when its iou with gt < 0.3
select_thresh = 0.3
out= []
rare, common, frequent = self.get_category_frequency(gt_labels.device)
rare_gtbox = torch.zeros((2000, 4), device=gt_labels.device)
rare_gtbox_idx = 0
common_gtbox = torch.zeros((2000, 4), device=gt_labels.device)
common_gtbox_idx = 0
frequent_gtbox = torch.zeros((2000, 4), device=gt_labels.device)
frequent_gtbox_idx = 0
for gt_bbox, gt_label in zip(gt_bboxes, gt_labels):
if gt_label in rare:
rare_gtbox[rare_gtbox_idx, ...] = gt_bbox
rare_gtbox_idx += 1
elif gt_label in common:
common_gtbox[common_gtbox_idx, ...] = gt_bbox
common_gtbox_idx += 1
else:
frequent_gtbox[frequent_gtbox_idx, ...] = gt_bbox
frequent_gtbox_idx += 1
rare_gtbox = rare_gtbox[:rare_gtbox_idx, ...]
common_gtbox = common_gtbox[:common_gtbox_idx, ...]
frequent_proposals, _ = nms(proposals, a_f)
if len(rare_gtbox) > 0:
rare_proposals, _ = nms(proposals, a_r)
rare_overlaps = bbox_overlaps(rare_gtbox, rare_proposals[:, :4])
rare_max_overlaps, rare_argmax_overlaps = rare_overlaps.max(dim=0)
rare_pos_inds = rare_max_overlaps >= select_thresh
rare_proposals = rare_proposals[rare_pos_inds, :]
out.append(rare_proposals)
frequent_rare_overlaps = bbox_overlaps(rare_gtbox, frequent_proposals[:, :4])
frequent_rare_max_overlaps, frequent_rare_argmax_overlaps = frequent_rare_overlaps.max(dim=0)
valid_inds = frequent_rare_max_overlaps < select_thresh
frequent_proposals = frequent_proposals[valid_inds, :]
if len(common_gtbox) > 0:
common_proposals, _ = nms(proposals, a_c)
common_overlaps = bbox_overlaps(common_gtbox, common_proposals[:, :4])
common_max_overlaps, common_argmax_overlaps = common_overlaps.max(dim=0)
common_pos_inds = common_max_overlaps >= select_thresh
common_proposals = common_proposals[common_pos_inds, :]
out.append(common_proposals)
frequent_common_overlaps = bbox_overlaps(common_gtbox, frequent_proposals[:, :4])
frequent_common_max_overlaps, frequent_common_argmax_overlaps = frequent_common_overlaps.max(dim=0)
valid_inds = frequent_common_max_overlaps < select_thresh
frequent_proposals = frequent_proposals[valid_inds, :]
out.append(frequent_proposals)
if len(out) > 1:
out_proposals = torch.cat(out, 0)
else:
out_proposals = frequent_proposals
return out_proposals
def get_category_frequency(self, device):
# rare, common, frequent are defined by the LVIS v0.5 dataset
rare = torch.tensor([1, 7, 10, 14, 15, 16, 21, 22, 31, 38, 39, 40, 42, 46, 49, 51, 52,
64, 65, 70, 72, 74, 83, 86, 94, 100, 101, 105, 106, 107, 113, 116, 117,
120, 122, 125, 127, 130, 131, 136, 140, 142, 143, 144, 147, 150, 155, 159,
161, 163, 164, 167, 169, 173, 181, 182, 184, 196, 199, 203, 205, 206, 209,
213, 214, 217, 218, 219, 226, 227, 231, 236, 238, 239, 241, 242, 243, 245,
246, 249, 250, 251, 252, 253, 255, 258, 259, 265, 266, 270, 271, 273, 280,
284, 287, 291, 293, 295, 296, 298, 300, 303, 304, 306, 307, 310, 311, 313,
316, 317, 318, 320, 321, 322, 324, 326, 328, 329, 330, 335, 336, 342, 344,
350, 351, 354, 356, 357, 358, 359, 360, 361, 366, 368, 369, 370, 372, 378,
379, 385, 386, 388, 389, 393, 394, 402, 403, 404, 406, 408, 411, 413, 414,
417, 420, 421, 423, 427, 430, 433, 434, 435, 438, 439, 441, 442, 446, 454,
455, 456, 462, 464, 469, 473, 476, 477, 478, 483, 485, 486, 488, 489, 493,
495, 496, 498, 509, 510, 512, 514, 515, 516, 518, 521, 524, 525, 526, 527,
530, 534, 541, 542, 543, 545, 548, 551, 552, 553, 555, 556, 562, 564, 569,
572, 573, 581, 582, 584, 585, 586, 587, 590, 592, 593, 594, 596, 597, 600,
602, 605, 609, 610, 612, 613, 616, 617, 626, 627, 629, 630, 631, 634, 636,
643, 645, 646, 650, 656, 658, 659, 663, 664, 665, 671, 674, 676, 677, 683,
684, 686, 690, 696, 698, 700, 703, 712, 713, 716, 722, 723, 724, 725, 727,
730, 732, 734, 735, 739, 741, 742, 745, 749, 755, 759, 765, 767, 768, 769,
772, 773, 775, 777, 778, 782, 783, 785, 790, 791, 795, 796, 797, 799, 800,
804, 806, 807, 808, 809, 816, 818, 821, 822, 823, 825, 826, 828, 833, 834,
836, 837, 841, 843, 845, 847, 857, 863, 864, 865, 866, 867, 869, 870, 871,
872, 873, 876, 878, 883, 887, 893, 894, 898, 899, 901, 902, 905, 906, 908,
916, 919, 920, 921, 922, 923, 927, 928, 931, 932, 934, 940, 941, 945, 946,
947, 949, 951, 952, 954, 955, 956, 957, 959, 960, 962, 963, 964, 970, 975,
976, 989, 991, 992, 999, 1000, 1002, 1004, 1006, 1009, 1010, 1011, 1013, 1016,
1021, 1023, 1026, 1027, 1029, 1030, 1033, 1034, 1047, 1048, 1049, 1050, 1051,
1056, 1067, 1068, 1069, 1073, 1074, 1077, 1078, 1087, 1095, 1100, 1104, 1112,
1133, 1136, 1138, 1139, 1140, 1141, 1145, 1147, 1149, 1151, 1153, 1154, 1157,
1159, 1166, 1167, 1168, 1169, 1170, 1172, 1179, 1180, 1181, 1187, 1188, 1189,
1190, 1204, 1205, 1206, 1214, 1216, 1219, 1225, 1226, 1228], device=device)
common = torch.tensor([2, 5, 6, 8, 9, 11, 18, 19, 23, 25, 26, 27, 28, 29, 33, 37, 44,
47, 48, 54, 55, 62, 63, 68, 71, 73, 75, 76, 80, 82, 85, 92, 93,
97, 98, 102, 103, 108, 109, 111, 114, 115, 119, 121, 123, 128,
129, 134, 135, 141, 145, 148, 149, 151, 152, 153, 156, 157, 158,
162, 165, 166, 168, 171, 175, 176, 177, 178, 186, 188, 189, 190,
192, 193, 195, 200, 201, 202, 204, 207, 210, 215, 216, 220, 222,
223, 224, 225, 228, 230, 232, 233, 234, 244, 247, 248, 254, 256,
257, 262, 264, 267, 268, 272, 274, 275, 278, 279, 283, 285, 286,
290, 292, 294, 297, 305, 309, 312, 314, 315, 319, 325, 331, 332,
333, 337, 338, 339, 340, 341, 343, 346, 348, 349, 355, 363, 364,
367, 373, 374, 375, 376, 380, 381, 384, 387, 391, 396, 398, 399,
400, 401, 405, 409, 412, 415, 419, 424, 425, 426, 431, 432, 440,
443, 445, 448, 449, 450, 453, 457, 460, 461, 463, 466, 468, 470,
471, 472, 474, 479, 481, 482, 484, 487, 490, 491, 492, 494, 497,
499, 500, 501, 503, 505, 507, 511, 513, 519, 520, 522, 523, 528,
529, 532, 533, 535, 536, 537, 539, 540, 544, 547, 549, 557, 561,
563, 565, 566, 567, 570, 574, 576, 583, 588, 589, 591, 595, 598,
599, 604, 607, 608, 611, 614, 618, 620, 622, 623, 633, 635, 640,
644, 647, 648, 657, 660, 661, 662, 667, 668, 670, 675, 678, 679,
681, 682, 685, 689, 692, 693, 694, 695, 701, 705, 706, 707, 709,
719, 731, 733, 737, 738, 740, 743, 744, 748, 750, 751, 752, 753,
754, 756, 757, 758, 760, 762, 763, 766, 774, 776, 780, 781, 786,
787, 788, 789, 792, 794, 798, 801, 802, 803, 810, 811, 814, 815,
820, 832, 835, 838, 839, 844, 848, 849, 854, 855, 856, 858, 860,
861, 868, 875, 877, 879, 880, 881, 882, 884, 885, 886, 888, 889,
891, 892, 897, 903, 904, 907, 909, 911, 915, 917, 918, 936, 938,
939, 942, 943, 944, 948, 950, 953, 958, 967, 968, 971, 972, 978,
981, 987, 988, 990, 994, 995, 1003, 1005, 1007, 1008, 1015, 1017,
1019, 1020, 1022, 1025, 1031, 1032, 1036, 1041, 1052, 1053, 1055,
1058, 1059, 1060, 1061, 1064, 1066, 1071, 1079, 1081, 1082, 1083,
1085, 1086, 1088, 1089, 1090, 1093, 1096, 1101, 1102, 1103, 1105,
1106, 1107, 1108, 1109, 1110, 1114, 1116, 1120, 1121, 1124, 1125,
1126, 1127, 1131, 1134, 1142, 1146, 1148, 1150, 1152, 1156, 1158,
1160, 1161, 1163, 1165, 1171, 1173, 1174, 1175, 1176, 1178, 1182,
1185, 1186, 1191, 1192, 1193, 1194, 1195, 1197, 1198, 1199, 1202,
1203, 1207, 1208, 1209, 1210, 1212, 1217, 1218, 1220, 1221, 1222,
1223, 1227, 1230], device=device)
frequent = torch.tensor([3, 4, 12, 13, 17, 20, 24, 30, 32, 34, 35, 36, 41, 43, 45, 50, 53,
56, 57, 58, 59, 60, 61, 66, 67, 69, 77, 78, 79, 81, 84, 87, 88,
89, 90, 91, 95, 96, 99, 104, 110, 112, 118, 124, 126, 132, 133,
137, 138, 139, 146, 154, 160, 170, 172, 174, 179, 180, 183, 185,
187, 191, 194, 197, 198, 208, 211, 212, 221, 229, 235, 237, 240,
260, 261, 263, 269, 276, 277, 281, 282, 288, 289, 299, 301, 302,
308, 323, 327, 334, 345, 347, 352, 353, 362, 365, 371, 377, 382,
383, 390, 392, 395, 397, 407, 410, 416, 418, 422, 428, 429, 436,
437, 444, 447, 451, 452, 458, 459, 465, 467, 475, 480, 502, 504,
506, 508, 517, 531, 538, 546, 550, 554, 558, 559, 560, 568, 571,
575, 577, 578, 579, 580, 601, 603, 606, 615, 619, 621, 624, 625,
628, 632, 637, 638, 639, 641, 642, 649, 651, 652, 653, 654, 655,
666, 669, 672, 673, 680, 687, 688, 691, 697, 699, 702, 704, 708,
710, 711, 714, 715, 717, 718, 720, 721, 726, 728, 729, 736, 746,
747, 761, 764, 770, 771, 779, 784, 793, 805, 812, 813, 817, 819,
824, 827, 829, 830, 831, 840, 842, 846, 850, 851, 852, 853, 859,
862, 874, 890, 895, 896, 900, 910, 912, 913, 914, 924, 925, 926,
929, 930, 933, 935, 937, 961, 965, 966, 969, 973, 974, 977, 979,
980, 982, 983, 984, 985, 986, 993, 996, 997, 998, 1001, 1012, 1014,
1018, 1024, 1028, 1035, 1037, 1038, 1039, 1040, 1042, 1043, 1044,
1045, 1046, 1054, 1057, 1062, 1063, 1065, 1070, 1072, 1075, 1076,
1080, 1084, 1091, 1092, 1094, 1097, 1098, 1099, 1111, 1113, 1115,
1117, 1118, 1119, 1122, 1123, 1128, 1129, 1130, 1132, 1135, 1137,
1143, 1144, 1155, 1162, 1164, 1177, 1183, 1184, 1196, 1200, 1201,
1211, 1213, 1215, 1224, 1229], device=device)
return rare, common, frequent
|
|
#!/usr/bin/env python
"""
Extract custom features
-----------------------
This example shows how to extract features from the tissue image using a custom function.
The custom feature calculation function can be any python function that takes an image as input, and
returns a list of features.
Here, we show a simple example by defining a function to calculate the mean of the images.
Custom features are calculated by using ``features = 'custom'``, which calls
:func:`squidpy.im.ImageContainer.features_custom`.
In addition to ``feature_name`` and ``channels`` we can specify the following ``features_kwargs``:
- ``func`` - custom feature extraction function.
- ``additional_layers`` - names of image layers that should be passed to ``func`` together with ``layer``.
- additional keyword arguments for ``func``.
.. seealso::
See :ref:`sphx_glr_auto_examples_image_compute_features.py` for general usage of
:func:`squidpy.im.calculate_image_features`.
"""
import scanpy as sc
import squidpy as sq
###############################################################################
# Let's load the H&E Visium dataset.
# get spatial dataset including high-resolution tissue image
img = sq.datasets.visium_hne_image_crop()
adata = sq.datasets.visium_hne_adata_crop()
###############################################################################
# Define a custom feature extraction function.
def mean_fn(arr):
"""Compute mean of arr."""
import numpy as np
return np.mean(arr)
###############################################################################
# Now we can extract features using `mean_fn` by providing it within ``features_kwargs``.
sq.im.calculate_image_features(
adata,
img,
features="custom",
features_kwargs={"custom": {"func": mean_fn}},
key_added="custom_features",
show_progress_bar=False,
)
###############################################################################
# The result is stored in ``adata.obsm['custom_features']``.
adata.obsm["custom_features"].head()
###############################################################################
# Use :func:`squidpy.pl.extract` to plot the histogram features on the tissue image or have a look at
# `our interactive visualization tutorial <../../external_tutorials/tutorial_napari.ipynb>`_ to learn
# how to use our interactive :mod:`napari` plugin.
sc.pl.spatial(
sq.pl.extract(adata, "custom_features"),
color=[None, "mean_fn_0"],
bw=True,
)
###############################################################################
# You can also pass more than one image layer to the custom feature extraction function.
# For this, specify the necessary additional layer names using ``additional_layers`` in ``features_kwargs``.
# The specified image layers will be passed to the custom feature extraction function.
#
# Here, we show this behavior by defining a feature extraction function that sums two image layers:
def sum_fn(arr, extra_layer):
"""Compute sum of two image layers."""
import numpy as np
return np.sum(arr + extra_layer)
img.add_img(img["image"].values, layer="extra_layer")
sq.im.calculate_image_features(
adata,
img,
layer="image",
features="custom",
features_kwargs={"custom": {"func": sum_fn, "additional_layers": ["extra_layer"]}},
key_added="custom_features",
show_progress_bar=False,
)
|
|
from numpy import log10
from conversions import *
#====================================================================
# FUSELAGE GROUP
#
# airframe, pressurization, crashworthiness
#
# ALL UNITS IN IMPERIAL
#====================================================================
f_lgloc = 1.0#1.16 # 1.1627 landing gear on fus, 1.0 otherwise
f_lgret = 1.0#1.1437 # retractable landing gear (goes into the fuselage)
f_ramp = 1.0 # no cargo ramp, 1.27 otherwise
f_tfold = 0.0 # tail of fold weight 0.05 assumed
f_wfold = 0.0 # wing and rotor fold weight
f_mar = 0.0 # marinization weight
f_press = 0.0 # pressurization (fraction basic body weight)
f_cw = 0.06 # crashworthiness weight (fraction fuselage weight)
nz = 4.0 # design ultimate load factor
a = -2.3979 #
b = 1.0 # empirical factors for
c = -0.0866 # wetted area (source?)
d = 0.8099 #
imodel = 'afdd84' # because why not
#imodel = 'afdd82'
nz = 3.5 # load factor
def fuselage_weight(gtow, tech_factor, l_fus):
"""
function to calculate airframe mass from AFDD model
Inputs:
1. gtow : take-off weight in lbs
2. tech_factor : technology factor that scales resulting weight predictions up/down
3. l_fus : fuselage length in feet
Output:
1. weight dictionary with breakdown and total for fuselage weight
"""
# predict wetted area from GTOW
S_body = 10**(c+d*log10(gtow))
# AFDD84 Universal model to predict weight
wght_basic = 25.41*f_lgloc*f_lgret*f_ramp*(gtow*0.001)**0.4879 * \
(gtow*nz*0.001)**0.2075 * (S_body **0.1676) * \
(l_fus**0.1512)
# find additional weights for folding and other mechanisms/additions
wght_tfold = f_tfold * wght_basic # tail folding weight
wght_wfold = f_wfold * (wght_basic + wght_tfold) # wing folding weight
wght_mar = f_mar * wght_basic # marinization weight
wght_press = f_press * wght_basic # pressurization weight
wght_cw = f_cw * ( wght_basic + wght_tfold + wght_wfold + wght_mar + wght_press )
# Apply tech factors
wght_basic = wght_basic*tech_factor
wght_tfold = wght_tfold*tech_factor
wght_mar = wght_mar *tech_factor
wght_wfold = wght_wfold*tech_factor
wght_press = wght_press*tech_factor
wght_cw = wght_cw *tech_factor
total = wght_basic + wght_tfold + wght_wfold + wght_mar + wght_press + wght_cw
# store kg mass in a dictionary and return the dictionary
fuselage = { 'basic': wght_basic*lb2kg, 'tail_folding': wght_tfold*lb2kg,
'marinization': wght_mar *lb2kg, 'wing_folding': wght_wfold*lb2kg,
'pressurization': wght_press*lb2kg, 'crashworth' : wght_cw*lb2kg,
'total' : total *lb2kg}
return fuselage
|
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Evaluate PINNs for Navier-Stokes equation scenario"""
import numpy as np
from mindspore import context, load_checkpoint, load_param_into_net
from src.NavierStokes.dataset import generate_training_set_navier_stokes
from src.NavierStokes.net import PINNs_navier
def eval_PINNs_navier(ck_path, path, num_neuron=20):
"""
Evaluation of PINNs for Navier-Stokes equation scenario.
Args:
ck_path (str): path of the dataset for Navier-Stokes equation scenario
path (str): path of the dataset for Navier-Stokes equation
num_neuron (int): number of neurons for fully connected layer in the network
"""
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
layers = [3, num_neuron, num_neuron, num_neuron, num_neuron, num_neuron, num_neuron, num_neuron,
num_neuron, 2]
_, lb, ub = generate_training_set_navier_stokes(10, 10, path, 0)
n = PINNs_navier(layers, lb, ub)
param_dict = load_checkpoint(ck_path)
load_param_into_net(n, param_dict)
lambda1_pred = n.lambda1.asnumpy()
lambda2_pred = n.lambda2.asnumpy()
error_lambda_1 = np.abs(lambda1_pred - 1.0)*100
error_lambda_2 = np.abs(lambda2_pred - 0.01)/0.01 * 100
print(f'Error of lambda 1 is {error_lambda_1[0]:.6f}%')
print(f'Error of lambda 2 is {error_lambda_2[0]:.6f}%')
return error_lambda_1, error_lambda_2
|
|
from __future__ import absolute_import
import os, re, collections
import requests, nltk
import numpy as np
import pandas as pd
import tensorflow as tf
import xml.etree.ElementTree as ET
from TF2.extract_features_Builtin import *
type = 'bert'
if type == 'bert':
bert_folder = 'Pretrained/uncased_L-12_H-768_A-12/'
bert_config = bert_folder + 'bert_config.json'
vocab_file = bert_folder + 'vocab.txt'
bert_ckpt = bert_folder + 'bert_model.ckpt'
pmc_id = '4304705'
url = 'https://www.ncbi.nlm.nih.gov/pmc/oai/oai.cgi?verb=GetRecord&identifier=oai:pubmedcentral.nih.gov:'+pmc_id+'&metadataPrefix=pmc'
d = requests.get(url).content.decode('utf-8')
xmldata = re.sub('xmlns="[^"]+"', '', d)
xml_handle = ET.fromstring(xmldata)
# get abstact sentences from xml
abstract = xml_handle.findall('.//abstract')
abs_text = ET.tostring(abstract[0],method='text').decode('utf-8')
abs_text = re.sub('\n',' ',abs_text)
abs_text = re.sub(r'\s+',' ',abs_text)
abs_sents = nltk.sent_tokenize(abs_text)
tf.compat.v1.logging.set_verbosity('ERROR')
# Return vectors in pandas frame
Emb_Vectors = Ext_Features(input=abs_sents, bert_config_file=bert_config, vocab_file=vocab_file, init_checkpoint=bert_ckpt,
input_type='string', layers = '-1', max_seq_length=128, do_lower_case=True, batch_size=32,
use_tpu = False, master = None, num_tpu_cores=8, use_one_hot_embeddings=False)
Emb_Vectors.head(5)
|
|
import numpy as np
import torch
import torch.nn as nn
import layers
class GCN(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim,
dropout=0.5):
"""
Parameters
----------
input_dim : int
Dimension of input node features.
hidden_dims : list of ints
Dimensions of hidden layers. Must be non empty.
output_dim : int
Dimension of output node features.
dropout : float
Dropout rate. Default: 0.5.
"""
super(GCN, self).__init__()
self.convs = nn.ModuleList([layers.GraphConvolution(input_dim, hidden_dims[0])])
self.convs.extend([layers.GraphConvolution(hidden_dims[i-1], hidden_dims[i]) for i in range(1, len(hidden_dims))])
self.convs.extend([layers.GraphConvolution(hidden_dims[-1], output_dim)])
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
def forward(self, features, adj):
"""
Parameters
----------
features : torch.Tensor
An (n x input_dim) tensor of input node features.
adj : torch.sparse.LongTensor
An (n x n) sparse tensor representing the normalized adjacency
matrix of the graph.
Returns
-------
out : torch.Tensor
An (n x output_dim) tensor of output node features.
"""
out = features
for conv in self.convs[:-1]:
out = self.dropout(self.relu(conv(out, adj)))
out = self.convs[-1](out, adj)
return out
|
|
'''Action decision module'''
from pdb import set_trace as T
import numpy as np
from collections import defaultdict
import torch
from torch import nn
from forge.blade.io.stimulus.static import Stimulus
from forge.ethyr.torch.policy import attention
from forge.ethyr.torch.policy import functional
from pcgrl.game.io.action import static
class Output(nn.Module):
def __init__(self, config):
'''Network responsible for selecting actions
Args:
config: A Config object
'''
super().__init__()
self.config = config
self.h = config.HIDDEN
self.net = DiscreteAction(self.config, self.h, self.h)
self.arg = nn.Embedding(static.Action.n, self.h)
#self.net = FlatAction(self.config, self.h, self.h)
def names(self, nameMap, args):
'''Lookup argument indices from name mapping'''
return np.array([nameMap.get(e) for e in args])
def forward(self, obs, lookup):
'''Populates an IO object with actions in-place
Args:
obs : An IO object specifying observations
vals : A value prediction for each agent
observationTensor : A fixed size observation representation
entityLookup : A fixed size representation of each entity
manager : A RolloutManager object
'''
rets = defaultdict(dict)
for atn in static.Action.edges:
for arg in atn.edges:
if arg.argType == static.Fixed:
batch = obs.shape[0]
idxs = [e.idx for e in arg.edges]
#cands = lookup[static.Fixed.__name__][idxs]
cands = self.arg.weight[idxs]
cands = cands.repeat(batch, 1, 1)
#Fixed arg
else:
#Temp hack, rename
cands = lookup[Stimulus.Entity]
#lens = [cands.shape[1] for e in range(cands.shape[0])]
lens = None
logits = self.net(obs, cands, lens)
#String names for RLlib for now
#rets[atn.__name__][arg.__name__] = logits
rets[atn][arg] = logits
return rets
class Action(nn.Module):
'''Head for selecting an action'''
def forward(self, x, mask=None):
xIdx = functional.classify(x, mask)
return x, xIdx
class FlatAction(Action):
def __init__(self, config, xdim, h):
super().__init__()
self.net = nn.Linear(xdim, 4)
def forward(self, stim, args, lens):
x = self.net(stim).squeeze(1)
return super().forward(x)
class DiscreteAction(Action):
'''Head for making a discrete selection from
a variable number of candidate actions'''
def __init__(self, config, xdim, h):
super().__init__()
self.net = attention.DotReluBlock(h)
def forward(self, stim, args, lens):
x = self.net(stim, args)
return x
'''
lens = torch.LongTensor(lens).unsqueeze(-1)
n, maxLen = x.shape[0], x.shape[-1]
inds = torch.arange(maxLen).expand_as(x)
mask = inds < lens
'''
#Un-None and fix this mask. Need to fix dims
x, xIdx = super().forward(x, mask=None)
#x = [e[:l] for e, l in zip(x, lens)]
return x, xIdx
|
|
import os
import unittest
import numpy as np
from gnes.encoder.audio.vggish import VggishEncoder
class TestVggishEncoder(unittest.TestCase):
@unittest.skip
def setUp(self):
self.dirname = os.path.dirname(__file__)
self.video_path = os.path.join(self.dirname, 'videos')
self.video_bytes = [open(os.path.join(self.video_path, _), 'rb').read()
for _ in os.listdir(self.video_path)]
self.audios = [np.random.rand(10, 96, 64),
np.random.rand(15, 96, 64),
np.random.rand(5, 96, 64)]
self.vggish_yaml = os.path.join(self.dirname, 'yaml', 'vggish-encoder.yml')
@unittest.skip
def test_vggish_encoding(self):
self.encoder = VggishEncoder.load_yaml(self.vggish_yaml)
vec = self.encoder.encode(self.audios)
self.assertEqual(len(vec.shape), 2)
self.assertEqual(vec.shape[0], len(self.audios))
self.assertEqual(vec.shape[1], 128)
|
|
from flask import Flask,jsonify,request
import pandas as pd
import numpy as np
import time
from sklearn.model_selection import train_test_split
import sys
import turicreate as tc
sys.path.append("..")
import json
from flask_cors import CORS
from flask import request
import datetime
import json as json
from pymongo import MongoClient
from sklearn.svm import SVR
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.ensemble import RandomForestRegressor
from bson import ObjectId
import math
from flask_ngrok import run_with_ngrok
app=Flask(__name__)
CORS(app)
run_with_ngrok(app)
url='mongodb+srv://test:test@cluster0-12rwi.azure.mongodb.net/test?retryWrites=true&w=majority'
db_name='shop_list'
def read_json(url,db_name,table_name):
client = MongoClient(url)
db = client.get_database(db_name)
if(table_name=="customers"):
return(db.customers)
elif(table_name=="transactions"):
return(db.transactions)
elif(table_name=="itemlist"):
return(db.itemlist)
elif(table_name=="category"):
return(db.category)
elif(table_name=="rta"):
return(db.rta)
elif(table_name=="Recent_purchases"):
return(db.Recent_purchases)
#functions for recommendation -->>
#To get the overall users list
def get_user():
users_table=read_json(url,db_name,"customers")
res=users_table.find({},{"_id":0})
users=[]
for i in res:
users.append(str(i["cust_id"]))
return users
#To get the the data for recommendation
def get_data(users):
user_data=[]#output 1
item_data=[]#output 2
target_data=[]#output 3
transactions_table=read_json(url,db_name,"transactions")
for user in users:
#An object to find in the table
query={}
query["cust_id"]=int(user)
res=transactions_table.find(query,{"_id":0,"cust_id":0})#ignoring the _id and cust_id fields
for obj in res:
for enteries in obj["Transaction"]:
user_data.append(str(user))
item_data.append(str(enteries["item_id"]))
target_data.append(len(enteries["item_transactions"]))
return user_data,item_data,target_data
#Functions for prediction algorithms -->>
def calc_error(predicted,actual):
error=0
for i in range(0,len(actual)):
error=error+((actual[i]-predicted[i])*(actual[i]-predicted[i]))
error=error/len(actual)
return math.sqrt(error)
#Prefetches the dates and quantity with corresponding to item_id in recent purchases
def prefetch(item_id_dict,item_info):
for x in item_info:
for y in x["Transaction"]:
if(item_id_dict.get(y['item_id'])!=None):
dates=[]
quantity=[]
item_trans = y['item_transactions']
for z in item_trans:
dates.append(z['date'])
quantity.append(z['quantity'])
item_id_dict[y['item_id']]["dates"]=dates
item_id_dict[y['item_id']]["quantity"]=quantity
return item_id_dict
def removeOutliers(frequency,threshold):
modified_freq=[]
modified_quantity=[]
for freq,arr in frequency.items():
if(len(arr)==1):
modified_freq.append(freq)
modified_quantity.append(arr[0])
else:
z=stats.zscore(arr)
for idx in range(0,len(z)):
if(np.isnan(z[idx])==True):
modified_freq.append(freq)
modified_quantity.append(arr[idx])
elif(abs(z[idx])<threshold):
modified_freq.append(freq)
modified_quantity.append(arr[idx])
return modified_freq,modified_quantity
def get_dates_quantity(dates,quantity,remove_outliers=0,outliers_threshold=0):
dates_arr=[]
frequency_distribution={}
for i in range(1,len(dates)):
frequency=(dates[i]-dates[i-1]).astype('int64')
dates_arr.append(frequency)
frequency_distribution[frequency]=[]
quantity=quantity[1:]
if(remove_outliers==1):
for idx in range(0,len(dates_arr)):
frequency_distribution[dates_arr[idx]].append(quantity[idx])
modified_dates,modified_quantity=removeOutliers(frequency_distribution,outliers_threshold)
modified_dates=np.array(modified_dates).astype('int64')
modified_dates=np.reshape(modified_dates,(len(modified_dates),1))
return modified_dates,modified_quantity
else:
dates_arr=np.array(dates_arr).astype('int64')
dates_arr=np.reshape(dates_arr,(len(dates_arr),1))
return (dates_arr,quantity)
def algo(dates,quantity,gap):
dates = np.array(dates).astype('datetime64[D]')
#preparing frequncy array(dates_arr)
(dates_arr , quantity) = get_dates_quantity(dates,quantity,0,1.5)
#INITIALISING THE MODEL
svr_rbf=SVR(kernel='rbf',C=1e3,gamma=0.1)
random_forest = RandomForestRegressor(n_estimators=5,random_state=10)
#FITTING THE MODEL
#svr_poly.fit(dates_arr,quantity)-- CURRENTLY NOT USING POLY
svr_rbf.fit(dates_arr,quantity)
random_forest.fit(dates_arr,quantity);
#READING THE CURRENT TIMESTAMP TO FIND THE GAP
predict_dates = gap
predict_dates = np.reshape(predict_dates,(1,1))
#PREDICTING FROM THE FITTED MODEL
if predict_dates > max(dates_arr):
maximum = max(dates_arr)[0]
k = 0
max_quant = 0
for i in dates_arr:
if (i[0] == maximum):
if (quantity[k] > max_quant):
max_quant = quantity[k]
k += 1
return(round(max_quant))
rbf= svr_rbf.predict(dates_arr)
rf=random_forest.predict(dates_arr)#rf=Random Forest
rounded_rbf=[]
rounded_rf=[]
for i in range(0,len(rbf)):
rounded_rbf.append(round(rbf[i]))
rounded_rf.append(round(rf[i]))
error_rbf=calc_error(rounded_rbf,quantity)
error_rf=calc_error(rounded_rf,quantity)
#print(error_rbf,error_rf) -->> ERROR PRINTING
if(error_rbf<=error_rf):
return svr_rbf.predict(predict_dates)[0]
else:
return random_forest.predict(predict_dates)[0]
@app.route('/ml/recommend',methods=['GET'])
#Main function for recommendation
def recommend():
user_id = request.args.get('userid')
users=get_user()
#users=[25]
user_data,item_data,target_data=get_data(users)
user_arr=[]
user_arr.append(str(user_id))
sf = tc.SFrame({'user_id':user_data,'item_id':item_data,'frequency':target_data})
m = tc.item_similarity_recommender.create(sf,target="frequency",similarity_type='cosine')
#recom=m.recommend(users,k=10) UNCOMMENT IF want to test for all users
recom=m.recommend(user_arr,k=10)
output={}
output["item_id"]=[]
for items in recom["item_id"]:
output["item_id"].append(items)
return json.dumps(output)
@app.route('/ml/predict',methods=['GET'])
def predict():
userid = request.args.get('userid')
transaction =read_json(url,db_name,"transactions")
recent_purchases = read_json(url,db_name,"Recent_purchases")#Getting the rta table
# itemlist = db.itemlist
user_dict={}
user_dict["cust_id"]=int(userid)
item_info = transaction.find(user_dict,{"Transaction.item_transactions.date":1, "Transaction.item_transactions.quantity":1,"Transaction.item_id":1,"_id":0})
itemDetails = recent_purchases.find(user_dict,{'_id':0})#Mongo query
output = []
item_id_dict={}#Stores the item and dates and quantity array
item_info_dict=[] #stores the avg , last_date and item_id
for item in itemDetails:
for one_item in item['recents']:
item_obj_dict={}
item_id_dict[one_item["item_id"]]={}
item_obj_dict["item_id"]=one_item["item_id"]
item_obj_dict["avg"]=one_item["avg"]
item_obj_dict["last_date"]=one_item["last_date"]
item_info_dict.append(item_obj_dict)
item_id_dict=prefetch(item_id_dict,item_info)
for one_item in item_info_dict:
avg = one_item['avg'] #Fetch the avg of an item for a particular user
datetimeobj = datetime.datetime.now()
date = datetimeobj.strftime("%Y") + "-" +datetimeobj.strftime("%m") + "-" + datetimeobj.strftime("%d")
last_date_of_purchase=one_item['last_date']
t = (datetime.datetime.strptime(date,"%Y-%m-%d") - datetime.datetime.strptime(last_date_of_purchase,"%Y-%m-%d"))
t = t.days
avg=math.ceil(avg)
if(avg !=0 and ((avg)-2)<=t and t<=(avg+3)):
item_pred = {}
itemid = one_item['item_id']
item_dict=item_id_dict.get(itemid)
if(len(item_dict["dates"])>2 and len(item_dict["quantity"])>2):
ans = algo(dates=item_dict["dates"],quantity=item_dict["quantity"],gap=t)
dictionary = dict({'item_id' : itemid})
# itemName = itemlist.find( dictionary, {'item_name':1 ,'item_id':1, '_id':0})
item_pred['itemID'] = itemid
# for name in itemName['item_name']:
item_pred['itemName'] = "Test_items"
item_pred['Quantity'] = round(ans)
output.append(item_pred)
# else:
# print("Hello")
# customer_dict={}
# customer_dict["cust_id"]=user
# info_dict={}
# info_dict["recent.item_id"]=one_item["item_id"]
# recent_transactions.update(customer_dict,{'$pull':info_dict})
json_output=json.dumps(output)
return json_output
if __name__=='__main__':
app.run()
|
|
#! /usr/bin/env python
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.models import Count
from django.db.models import Q
from face_manager.models import Person, Face
from filepopulator.models import ImageFile
from itertools import chain
from PIL import Image
from random import randint
import cv2
import numpy as np
import os
import pickle
import shutil
import time
def get_and_resize(in_path, out_path):
im = Image.open(in_path)
newsize = (224, 224)
im = im.resize(newsize, resample=Image.LANCZOS)
im.save(out_path)
class Command(BaseCommand):
def __init__(self):
super(Command).__init__()
self.min_faces = 50
self.out_dir = '/code/MISC_DATA/image_pkls'
self.alt_out_dir = '/photos/pkls'
def image_resize(self, image, width = None, height = None, inter = cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation = inter)
# return the resized image
return resized
def img_to_pkl(self, face, set_ign_idx = False):
if not set_ign_idx:
out_file = os.path.join(self.out_dir, f'imchip_{face.id}.pkl')
out_alt_file = os.path.join(self.alt_out_dir, f'imchip_{face.id}.pkl')
else:
out_file = os.path.join(self.out_dir, f'imchip_ign_{face.id}.pkl')
out_alt_file = os.path.join(self.alt_out_dir, 'ignore', f'imchip_ign_{face.id}.pkl')
if os.path.exists(out_file) or os.path.exists(out_alt_file):
# print(f"File {out_file} already good")
return
data = {}
data['person_name'] = face.declared_name.person_name
if not set_ign_idx:
data['index'] = self.names_list.index(face.declared_name)
else:
data['index'] = -999
data['left'] = face.box_left
data['right'] = face.box_right
data['bottom'] = face.box_bottom
data['top'] = face.box_top
data['width'] = face.box_right - face.box_left
data['height'] = face.box_bottom - face.box_top
data['face_id'] = face.id
data['img_file'] = face.source_image_file.filename
data['date_taken'] = face.source_image_file.dateTaken
data['date_modified'] = face.source_image_file.dateModified
# Read in the image
image = cv2.imread(data['img_file'])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
im_height, im_width = image.shape[:2]
# Calculate how much bigger it needs to be for rotating - sqrt(2) is ideal.
max_extent = max(data['width'], data['height'])
scale_up_size = int(np.ceil(np.sqrt(2) * max_extent))
width_add_nominal = (scale_up_size - data['width']) // 2
margin_left = data['left']
margin_right = im_width - data['right']
width_add_final = np.min((margin_left, margin_right, width_add_nominal))
chip_left = data['left'] - width_add_final
chip_right = data['right'] + width_add_final
height_add_nominal = (scale_up_size - data['height']) // 2
margin_top = data['top']
margin_bottom = im_height - data['bottom']
height_add_final = np.min((height_add_nominal, margin_top, margin_bottom))
chip_top = data['top'] - height_add_final
chip_bottom = data['bottom'] + height_add_final
img_chipped = image[chip_top:chip_bottom, chip_left:chip_right]
h, w = img_chipped.shape[:2]
if h == 0 or w == 0:
return
# You can get back the image chip by just getting the center point and
# taking the width//2 and height//2 from that.
if scale_up_size > 800:
img_chipped = self.image_resize(img_chipped, height=800)
print(img_chipped.shape)
data['chipped_image'] = img_chipped
with open(out_file, 'wb') as fh:
pickle.dump(data, fh)
def handle(self, *args, **options):
names = Person.objects.annotate(c=Count('face_declared')) \
.filter(c__gt=self.min_faces) \
.filter(~Q(person_name__in=settings.IGNORED_NAMES) )
self.names_list = list(names)
# images = Face.objects.filter(declared_name__in=names).order_by('?')
# cnt = 0
# n_imgs = images.count()
# for p_img in images.iterator():
# if cnt % 500 == 0 and cnt > 0:
# print(f"{cnt}/{n_imgs} | {cnt/n_imgs * 100:.2f}%")
# cnt += 1
# self.img_to_pkl(p_img)
# exit()
criterion_rejected = Q( declared_name__person_name__in=['.ignore', '.realignore'])
ign_faces = Face.objects.filter(criterion_rejected).order_by('?')
cnt = 0
n_imgs = ign_faces.count()
for ign in ign_faces.iterator():
if cnt % 500 == 0 and cnt > 0:
print(f"{cnt}/{n_imgs} | {cnt/n_imgs * 100:.2f}%")
cnt += 1
self.img_to_pkl(ign, True)
# ignore_dir = '/code/MISC_DATA/ignore_chips'
# try:
# os.makedirs(ignore_dir)
# except:
# pass
|
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
SMALL_SIZE = 14
MEDIUM_SIZE = 18
LARGE_SIZE = 22
HEAD_WIDTH = 1
HEAD_LEN = 1
FAMILY = "Times New Roman"
plt.rc("font", size=SMALL_SIZE, family=FAMILY)
plt.rc("axes", titlesize=MEDIUM_SIZE, labelsize=MEDIUM_SIZE, linewidth=2.0)
plt.rc("xtick", labelsize=SMALL_SIZE)
plt.rc("ytick", labelsize=SMALL_SIZE)
plt.rc("legend", fontsize=SMALL_SIZE)
plt.rc("figure", titlesize=LARGE_SIZE)
data = pd.read_csv("metrics.csv", header=0)
only_numbers = data.loc[:, data.columns != "App Key"]
normalized_numbers = (only_numbers - only_numbers.min()) / (only_numbers.max() - only_numbers.min())
# this is for annotation of points on the scatter plot
point_labels = {
0: {"txt": 'min', "x_shift": 0.03, "y_shift": 0.03},
1: {"txt": 'data', "x_shift": 0.03, "y_shift": -0.03},
2: {"txt": 'ml', "x_shift": -0.03, "y_shift": 0.03}
}
def plot_and_save(columns, plot_filename, legend_location):
fig, ax = plt.subplots()
markers = ['o', 's', '*']
m_sizes = [8, 8, 12]
m = 0
for colname, col in normalized_numbers.iloc[:, columns].iteritems():
fbp_values = col.loc[:2].tolist()
soa_values = col.loc[3:].tolist()
sns.lineplot(x=fbp_values, y=soa_values, ax=ax, label=colname, marker=markers[m], markersize=m_sizes[m], linestyle=':')
u = np.diff(fbp_values)
v = np.diff(soa_values)
pos_x = fbp_values[:-1] + u / 2
pos_y = soa_values[:-1] + v / 2
norm = np.sqrt(u ** 2 + v ** 2)
ax.quiver(pos_x, pos_y, u / norm, v / norm, angles="xy", zorder=5, pivot="mid")
for i, (fv, sv) in enumerate(zip(fbp_values, soa_values)):
ax.annotate(point_labels[i]['txt'], (fv+point_labels[i]['x_shift'], sv+point_labels[i]['y_shift']))
m = m + 1
plt.xlabel("FBP-based")
plt.xlim(-0.1, 1.1)
plt.ylabel("SOA-based")
plt.ylim(-0.1, 1.1)
plt.legend()
ax.grid(linestyle="-", linewidth="0.25", color="grey")
ax.legend(fancybox=True, loc=legend_location)
plt.savefig(f"figs/{plot_filename}")
# sizes
plot_and_save([0, 1], "sizes.pdf", "lower right")
plot_and_save([2, 5, 6], "complexities.pdf", "best")
|
|
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Oliver Backhouse <olbackhouse@gmail.com>
# George Booth <george.booth@kcl.ac.uk>
#
'''
Auxiliary space class and helper functions.
'''
import time
import numpy as np
import scipy.linalg.blas
from pyscf import lib
from pyscf.lib import logger
from pyscf import __config__
from pyscf.lib.parameters import LARGE_DENOM
class AuxiliarySpace(object):
''' Simple container to hold the energies, couplings and chemical
potential associated with an auxiliary space.
Attributes:
energy : 1D array
Energies of the poles
coupling : 2D array
Coupling vector of the poles to each physical state
chempot : float
Chemical potental associated with the energies
'''
def __init__(self, energy, coupling, chempot=0.0):
self.energy = np.asarray(energy)
self.coupling = np.asarray(coupling, order='C')
self.chempot = chempot
self.sort()
def sort(self):
''' Sort in-place via the energies to make slicing easier.
'''
arg = np.argsort(self.energy)
self.energy = self.energy[arg]
self.coupling = self.coupling[:,arg]
def real_freq_spectrum(self, *args, **kwargs):
''' See subclasses.
'''
raise NotImplementedError
def compress(self, *args, **kwargs):
''' See subclasses.
'''
raise NotImplementedError
def get_occupied(self):
''' Returns a copy of the current AuxiliarySpace object
containing only the poles with energy less than the
chemical potential. The object should already be sorted.
Returns:
:class:`AuxiliarySpace` with only the occupied auxiliaries
'''
nocc = np.searchsorted(self.energy, self.chempot)
energy = np.copy(self.energy[:nocc])
coupling = np.copy(self.coupling[:,:nocc])
return self.__class__(energy, coupling, chempot=self.chempot)
def get_virtual(self):
''' Returns a copy of the current AuxiliarySpace object
containing only the poles with energy greater than the
chemical potential. The object should already be sorted.
Returns:
:class:`AuxiliarySpace` with only the virtual auxiliaries
'''
nocc = np.searchsorted(self.energy, self.chempot)
energy = np.copy(self.energy[nocc:])
coupling = np.copy(self.coupling[:,nocc:])
return self.__class__(energy, coupling, chempot=self.chempot)
def get_array(self, phys, out=None, chempot=0.0):
''' Expresses the auxiliaries as an array, i.e. the extended
Fock matrix in AGF2 or Hamiltonian of ADC(2).
Args:
phys : 2D array
Physical (1p + 1h) part of the matrix
Kwargs:
out : 2D array
If provided, use to store output
chempot : float
Scale energies (by default, :attr:`chempot` is not used
and energies retain their values). Default 0.0
Returns:
Array representing the coupling of the auxiliary space to
the physical space
'''
_check_phys_shape(self, phys)
dtype = np.result_type(phys.dtype, self.energy.dtype, self.coupling.dtype)
if out is None:
out = np.zeros((self.nphys+self.naux,)*2, dtype=dtype)
sp = slice(None, self.nphys)
sa = slice(self.nphys, None)
out[sp,sp] = phys
out[sp,sa] = self.coupling
out[sa,sp] = self.coupling.conj().T
out[sa,sa][np.diag_indices(self.naux)] = self.energy - chempot
return out
def dot(self, phys, vec, out=None, chempot=0.0):
''' Returns the dot product of :func:`get_array` with a vector.
Args:
phys : 2D array
Physical (1p + 1h) part of the matrix
vec : ndarray
Vector to compute dot product with
Kwargs:
out : 2D array
If provided, use to store output
chempot : float
Scale energies (by default, :attr:`chempot` is not used
and energies retain their values). Default 0.0
Returns:
ndarray with shape of :attr:`vec`
'''
_check_phys_shape(self, phys)
vec = np.asarray(vec)
input_shape = vec.shape
vec = vec.reshape((self.nphys+self.naux, -1))
dtype = np.result_type(self.coupling.dtype, vec.dtype)
sp = slice(None, self.nphys)
sa = slice(self.nphys, None)
if out is None:
out = np.zeros(vec.shape, dtype=dtype)
out = out.reshape(vec.shape)
out[sp] = np.dot(phys, vec[sp])
out[sp] += np.dot(self.coupling, vec[sa])
out[sa] = np.dot(vec[sp].T, self.coupling).conj().T
out[sa] += (self.energy[:,None] - chempot) * vec[sa]
out = out.reshape(input_shape)
return out
def eig(self, phys, out=None, chempot=0.0):
''' Computes the eigenvalues and eigenvectors of the array
returned by :func:`get_array`.
Args:
phys : 2D array
Physical (1p + 1h) part of the matrix
Kwargs:
out : 2D array
If provided, use to store output
chempot : float
Scale energies (by default, :attr:`chempot` is not used
and energies retain their values). Default 0.0
Returns:
tuple of ndarrays (eigenvalues, eigenvectors)
'''
_check_phys_shape(self, phys)
h = self.get_array(phys, chempot=chempot, out=out)
w, v = np.linalg.eigh(h)
return w, v
def moment(self, n, squeeze=True):
''' Builds the nth moment of the spectral distribution.
Args:
n : int or list of int
Moment(s) to compute
Kwargs:
squeeze : bool
If True, use :func:`np.squeeze` on output so that in
the case of :attr:`n` being an int, a 2D array is
returned. If False, output is always 3D. Default True.
Returns:
ndarray of moments
'''
n = np.asarray(n)
n = n.reshape(n.size)
energy_factored = self.energy[None] ** n[:,None]
v = self.coupling
moms = lib.einsum('xk,yk,nk->nxy', v, v.conj(), energy_factored)
if squeeze:
moms = np.squeeze(moms)
return moms
def remove_uncoupled(self, tol):
''' Removes poles with very low spectral weight (uncoupled
to the physical space) in-place.
Args:
tol : float
Threshold for the spectral weight (squared norm)
'''
v = self.coupling
w = np.linalg.norm(v, axis=0) ** 2
arg = w >= tol
self.energy = self.energy[arg]
self.coupling = self.coupling[:,arg]
def save(self, chkfile, key=None):
''' Saves the auxiliaries in chkfile
Args:
chkfile : str
Name of chkfile
key : str
Key to be used in h5py object. It can contain "/" to
represent the path in the HDF5 storage structure.
'''
if key is None:
key = 'aux'
lib.chkfile.dump(chkfile, key, self.__dict__)
@classmethod
def load(cls, chkfile, key=None):
''' Loads the auxiliaries from a chkfile
Args:
chkfile : str
Name of chkfile
key : str
Key to be used in h5py object. It can contain "/" to
represent the path in the HDF5 storage structure.
'''
if key is None:
key = 'aux'
dct = lib.chkfile.load(chkfile, key)
return cls(dct['energy'], dct['coupling'], chempot=dct['chempot'])
def copy(self):
''' Returns a copy of the current object.
Returns:
AuxiliarySpace
'''
energy = np.copy(self.energy)
coupling = np.copy(self.coupling)
return self.__class__(energy, coupling, chempot=self.chempot)
@property
def nphys(self):
return self.coupling.shape[0]
@property
def naux(self):
return self.coupling.shape[1]
class SelfEnergy(AuxiliarySpace):
''' Defines a self-energy represented as a :class:`AuxiliarySpace`
object.
'''
def real_freq_spectrum(self, grid, eta=0.02):
raise ValueError('Convert SelfEnergy to GreensFunction before '
'building a spectrum.')
def get_greens_function(self, phys):
''' Returns a :class:`GreensFunction` by solving the Dyson
equation.
Args:
phys : 2D array
Physical space (1p + 1h), typically the Fock matrix
Returns:
:class:`GreensFunction`
'''
w, v = self.eig(phys)
v = v[:self.nphys]
return GreensFunction(w, v, chempot=self.chempot)
def make_rdm1(self, phys, chempot=None, occupancy=2):
''' Returns the first-order reduced density matrix associated
with the self-energy via the :class:`GreensFunction`.
Args:
phys : 2D array
Physical space (1p + 1h), typically the Fock matrix
Kwargs:
chempot : float
If provided, use instead of :attr:`self.chempot`
occupancy : int
Occupancy of the states, i.e. 2 for RHF and 1 for UHF
'''
gf = self.get_greens_function(phys)
return gf.make_rdm1(phys, chempot=chempot, occupancy=occupancy)
def compress(self, phys=None, n=(None, 0), tol=1e-12):
''' Compress the auxiliaries via moments of the particle and
hole Green's function and self-energy. Resulting :attr:`naux`
depends on the chosen :attr:`n`.
Kwargs:
phys : 2D array or None
Physical space (1p + 1h), typically the Fock matrix.
Only required if :attr:`n[0]` is not None.
n : tuple of int
Compression level of the Green's function and
self-energy, respectively.
tol : float
Linear dependecy tolerance. Default value is 1e-12
Returns:
:class:`SelfEnergy` with reduced auxiliary dimension.
Raises:
MemoryError if the compression according to Green's
function moments will exceed the maximum allowed memory.
'''
ngf, nse = n
se = self
if nse is None and ngf is None:
return self.copy()
if nse is not None:
se = compress_via_se(se, n=nse)
if ngf is not None:
se = compress_via_gf(se, phys, n=ngf, tol=tol)
return se
class GreensFunction(AuxiliarySpace):
''' Defines a Green's function represented as a
:class:`AuxiliarySpace` object.
'''
def real_freq_spectrum(self, grid, eta=0.02):
''' Express the auxiliaries as a spectral function on the real
frequency axis.
Args:
grid : 1D array
Real frequency grid
Kwargs:
eta : float
Peak broadening factor in Hartrees. Default is 0.02.
Returns:
ndarray of the spectrum, with the first index being the
frequency
'''
e_shifted = self.energy - self.chempot
v = self.coupling
spectrum = np.zeros((grid.size, self.nphys, self.nphys), dtype=complex)
blksize = 240
p1 = 0
for block in range(0, grid.size, blksize):
p0, p1 = p1, min(p1 + blksize, grid.size)
denom = grid[p0:p1,None] - (e_shifted + eta*1.0j)[None]
spectrum[p0:p1] = lib.einsum('xk,yk,wk->wxy', v, v.conj(), 1./denom)
return -1/np.pi * np.trace(spectrum.imag, axis1=1, axis2=2)
def make_rdm1(self, chempot=None, occupancy=2):
''' Returns the first-order reduced density matrix associated
with the Green's function.
Kwargs:
chempot : float
If provided, use instead of :attr:`self.chempot`
occupancy : int
Occupancy of the states, i.e. 2 for RHF and 1 for UHF
'''
if chempot is None:
chempot = self.chempot
arg = self.energy < chempot
v_occ = self.coupling[:,arg]
rdm1 = np.dot(v_occ, v_occ.T.conj()) * occupancy
return rdm1
def compress(self, *args, **kwargs):
raise ValueError('Compression must be performed on SelfEnergy '
'rather than GreensFunction.')
def combine(*auxspcs):
''' Combine a set of :class:`AuxiliarySpace` objects. attr:`chempot`
is inherited from the first element.
'''
nphys = [auxspc.nphys for auxspc in auxspcs]
if not all([x == nphys[0] for x in nphys]):
raise ValueError('Size of physical space must be the same to '
'combine AuxiliarySpace objects.')
nphys = nphys[0]
naux = sum([auxspc.naux for auxspc in auxspcs])
dtype = np.result_type(*[auxspc.coupling for auxspc in auxspcs])
energy = np.zeros((naux,))
coupling = np.zeros((nphys, naux), dtype=dtype)
p1 = 0
for auxspc in auxspcs:
p0, p1 = p1, p1 + auxspc.naux
energy[p0:p1] = auxspc.energy
coupling[:,p0:p1] = auxspc.coupling
auxspc = auxspcs[0].__class__(energy, coupling, chempot=auxspcs[0].chempot)
return auxspc
def davidson(auxspc, phys, chempot=None, nroots=1, which='SM', tol=1e-14, maxiter=None, ntrial=None):
''' Diagonalise the result of :func:`AuxiliarySpace.get_array` using
the sparse :func:`AuxiliarySpace.dot` method, with the Davidson
algorithm.
This algorithm may perform poorly for IPs or EAs if they are
not extremal eigenvalues, which they are not in standard AGF2.
Args:
auxspc : AuxiliarySpace or subclass
Auxiliary space object to solve for
phys : 2D array
Physical space (1p + 1h), typically the Fock matrix
Kwargs:
chempot : float
If provided, use instead of :attr:`self.chempot`
nroots : int
Number of roots to solve for. Default 1.
which : str
Which eigenvalues to solve for. Options are:
`LM` : Largest (in magnitude) eigenvalues.
`SM` : Smallest (in magnitude) eigenvalues.
`LA` : Largest (algebraic) eigenvalues.
`SA` : Smallest (algebraic) eigenvalues.
Default 'SM'.
tol : float
Convergence threshold
maxiter : int
Maximum number of iterations. Default 10*dim
ntrial : int
Maximum number of trial vectors. Default
min(dim, max(2*nroots+1, 20))
Returns:
tuple of ndarrays (eigenvalues, eigenvectors)
'''
_check_phys_shape(auxspc, phys)
dim = auxspc.nphys + auxspc.naux
if maxiter is None:
maxiter = 10 * dim
if ntrial is None:
ntrial = min(dim, max(2*nroots+1, 20))
if which not in ['SM', 'LM', 'SA', 'LA']:
raise ValueError(which)
if which in ['SM', 'LM']:
abs_op = np.absolute
else:
abs_op = lambda x: x
if which in ['SM', 'SA']:
order = 1
else:
order = -1
matvec = lambda x: auxspc.dot(phys, np.asarray(x))
diag = np.concatenate([np.diag(phys), auxspc.energy])
guess = [np.zeros((dim)) for n in range(nroots)]
mask = np.argsort(abs_op(diag))[::order]
for i in range(nroots):
guess[i][mask[i]] = 1
def pick(w, v, nroots, callback):
mask = np.argsort(abs_op(w))
mask = mask[::order]
w = w[mask]
v = v[:,mask]
return w, v, 0
conv, w, v = lib.davidson1(matvec, guess, diag, tol=tol, nroots=nroots,
max_space=ntrial, max_cycle=maxiter, pick=pick)
return conv, w, v
def _band_lanczos(se_occ, n=0, max_memory=None):
''' Perform the banded Lanczos algorithm for compression of a
self-energy according to consistency in its separate
particle and hole moments.
'''
nblk = n+1
nphys, naux = se_occ.coupling.shape
bandwidth = nblk * nphys
q = np.zeros((bandwidth, naux))
t = np.zeros((bandwidth, bandwidth))
r = np.zeros((naux))
# cholesky qr factorisation of v.T
coupling = se_occ.coupling
x = np.dot(coupling, coupling.T)
try:
v_tri = np.linalg.cholesky(x).T
except np.linalg.LinAlgError:
w, v = np.linalg.eigh(x)
w[w < 1e-20] = 1e-20
x_posdef = np.dot(np.dot(v, np.diag(w)), v.T)
v_tri = np.linalg.cholesky(x_posdef).T
q[:nphys] = np.dot(np.linalg.inv(v_tri).T, coupling)
for i in range(bandwidth):
r[:] = se_occ.energy * q[i]
start = max(i-nphys, 0)
if start != i:
r -= np.dot(t[i,start:i], q[start:i])
for j in range(i, min(i+nphys, bandwidth)):
t[i,j] = t[j,i] = np.dot(r, q[j])
# r := -t[i,j] * q[j] + r
scipy.linalg.blas.daxpy(q[j], r, a=-t[i,j])
if (i+nphys) < bandwidth:
len_r = np.linalg.norm(r)
t[i,i+nphys] = t[i+nphys,i] = len_r
q[i+nphys] = r / (len_r + 1./LARGE_DENOM)
return v_tri, t
def _compress_part_via_se(se_occ, n=0):
''' Compress the auxiliaries of the occupied or virtual part of
the self-energy according to consistency in its moments.
'''
if se_occ.nphys > se_occ.naux:
# breaks this version of the algorithm and is also pointless
e = se_occ.energy.copy()
v = se_occ.coupling.copy()
else:
v_tri, t = _band_lanczos(se_occ, n=n)
e, v = np.linalg.eigh(t)
v = np.dot(v_tri.T, v[:se_occ.nphys])
return e, v
def _compress_via_se(se, n=0):
''' Compress the auxiliaries of the seperate occupied and
virtual parts of the self-energy according to consistency
in its moments.
'''
if se.naux == 0:
return se.energy, se.coupling
se_occ = se.get_occupied()
se_vir = se.get_virtual()
e = []
v = []
if se_occ.naux > 0:
e_occ, v_occ = _compress_part_via_se(se_occ, n=n)
e.append(e_occ)
v.append(v_occ)
if se_vir.naux > 0:
e_vir, v_vir = _compress_part_via_se(se_vir, n=n)
e.append(e_vir)
v.append(v_vir)
e = np.concatenate(e, axis=0)
v = np.concatenate(v, axis=-1)
return e, v
def compress_via_se(se, n=0):
''' Compress the auxiliaries of the seperate occupied and
virtual parts of the self-energy according to consistency
in its moments.
Args:
se : SelfEnergy
Auxiliaries of the self-energy
Kwargs:
n : int
Truncation parameter, conserves the seperate particle
and hole moments to order 2*n+1.
Returns:
:class:`SelfEnergy` with reduced auxiliary dimension
Ref:
[1] H. Muther, T. Taigel and T.T.S. Kuo, Nucl. Phys., 482,
1988, pp. 601-616.
[2] D. Van Neck, K. Piers and M. Waroquier, J. Chem. Phys.,
115, 2001, pp. 15-25.
[3] H. Muther and L.D. Skouras, Nucl. Phys., 55, 1993,
pp. 541-562.
[4] Y. Dewulf, D. Van Neck, L. Van Daele and M. Waroquier,
Phys. Lett. B, 396, 1997, pp. 7-14.
'''
e, v = _compress_via_se(se, n=n)
se_red = SelfEnergy(e, v, chempot=se.chempot)
return se_red
def _build_projector(se, phys, n=0, tol=1e-12):
''' Builds the vectors which project the auxiliary space into a
compress one with consistency in the seperate particle and
hole moments up to order 2n+1.
'''
_check_phys_shape(se, phys)
nphys, naux = se.coupling.shape
w, v = se.eig(phys)
def _part(w, v, s):
en = w[s][None] ** np.arange(n+1)[:,None]
v = v[:,s]
p = np.einsum('xi,pi,ni->xpn', v[nphys:], v[:nphys], en)
return p.reshape(naux, nphys*(n+1))
p = np.hstack((_part(w, v, w < se.chempot),
_part(w, v, w >= se.chempot)))
norm = np.linalg.norm(p, axis=0, keepdims=True)
norm[np.absolute(norm) == 0] = 1./LARGE_DENOM
p /= norm
w, p = np.linalg.eigh(np.dot(p, p.T))
p = p[:, w > tol]
nvec = p.shape[1]
p = np.block([[np.eye(nphys), np.zeros((nphys, nvec))],
[np.zeros((naux, nphys)), p]])
return p
def _compress_via_gf(se, phys, n=0, tol=1e-12):
''' Compress the auxiliaries of the seperate occupied and
virtual parts of the self-energy according to consistency
in the moments of the Green's function
'''
nphys = se.nphys
p = _build_projector(se, phys, n=n, tol=tol)
h_tilde = np.dot(p.T, se.dot(phys, p))
p = None
e, v = np.linalg.eigh(h_tilde[nphys:,nphys:])
v = np.dot(h_tilde[:nphys,nphys:], v)
return e, v
def compress_via_gf(se, phys, n=0, tol=1e-12):
''' Compress the auxiliaries of the seperate occupied and
virtual parts of the self-energy according to consistency
in the moments of the Green's function
Args:
se : SelfEnergy
Auxiliaries of the self-energy
phys : 2D array
Physical space (1p + 1h), typically the Fock matrix
Kwargs:
n : int
Truncation parameter, conserves the seperate particle
and hole moments to order 2*n+1.
tol : float
Linear dependecy tolerance. Default value is 1e-12
Returns:
:class:`SelfEnergy` with reduced auxiliary dimension
'''
e, v = _compress_via_gf(se, phys, n=n, tol=tol)
se_red = SelfEnergy(e, v, chempot=se.chempot)
return se_red
def _check_phys_shape(auxspc, phys):
if np.shape(phys) != (auxspc.nphys, auxspc.nphys):
raise ValueError('Size of physical space must be the same as '
'leading dimension of couplings.')
|
|
from __future__ import annotations
__all__ = ['Mosaic', 'Tile', 'get_fusion']
import dataclasses
from collections import defaultdict
from collections.abc import Callable, Iterable, Iterator
from dataclasses import dataclass, field
from functools import partial
from itertools import chain
from typing import NamedTuple, Protocol, TypeVar, cast
import cv2
import numpy as np
from .. import chunked, map_n
from ._util import get_trapz
Vec = tuple[int, int]
# TODO: allow result of .map/.map_batched to have different tile and step
# ------------------------------- basic types --------------------------------
class NumpyLike(Protocol):
@property
def shape(self) -> tuple[int, ...]:
...
def __getitem__(self, key: slice | tuple[slice, ...]) -> np.ndarray:
...
class Tile(NamedTuple):
idx: Vec
vec: Vec
data: np.ndarray
# ---------------------------- utility functions -----------------------------
def _apply(fn: Callable[[np.ndarray], np.ndarray], obj: Tile) -> Tile:
r = fn(obj.data)
assert r.shape[:2] == obj.data.shape[:2], \
'Tile shape alteration (besides channel count) is forbidden'
return obj._replace(data=r)
def _apply_batched(fn: Callable[[list[np.ndarray]], Iterable[np.ndarray]],
ts: tuple[Tile, ...]) -> list[Tile]:
*rs, = fn([t.data for t in ts])
assert len(rs) == len(ts)
assert all(r.shape[:2] == t.data.shape[:2]
for r, t in zip(rs, ts)), \
'Tile shape alteration (besides channel count) is forbidden'
return [t._replace(data=r) for t, r in zip(ts, rs)]
def _reweight(weight: np.ndarray, tile: np.ndarray) -> np.ndarray:
assert tile.dtype.kind == 'f'
return np.einsum('hwc,h,w -> hwc', tile, weight, weight, optimize=True)
def _crop(tiles: Iterable[Tile], shape: tuple[int, ...]) -> Iterator[Tile]:
h, w = shape
for iyx, (y, x), tile in tiles:
yield Tile(iyx, (y, x), tile[:h - y, :w - x])
def get_fusion(tiles: Iterable[Tile],
shape: tuple[int, ...] | None = None) -> np.ndarray | None:
r: np.ndarray | None = None
if shape is None: # Collect all the tiles to compute destination size
tiles = [*tiles]
# N arrays of (yx + hw)
yx_hw = np.array([[[t.vec, t.data.shape[:2]] for t in tiles]]).sum(1)
# bottom left most edge
shape = *yx_hw.max(0).tolist(),
else:
assert len(shape) == 2
for _, (y, x), tile in tiles:
if not tile.size:
continue
h, w, c = tile.shape
if r is None: # First iteration, initilize
r = np.zeros((*shape, c), tile.dtype)
assert c == r.shape[2]
v = r[y:, x:][:h, :w] # View to destination
v[:] = tile[:v.shape[0], :v.shape[1]] # Crop if needed
return r
# ------------------------------ mosaic setting ------------------------------
@dataclass
class Mosaic:
"""
Helper to split image to tiles and process them.
Parameters:
- step - Step between consecutive tiles
- overlap - Count of pixels that will be shared among overlapping tiles
So tile size = overlap + non-overlapped area + overlap = step + overlap,
and non-overlapped area = step - overlap.
"""
step: int
overlap: int
def __post_init__(self):
assert 0 <= self.overlap <= self.step
assert self.overlap % 2 == 0 # That may be optional
def get_kernel(self) -> np.ndarray:
return get_trapz(self.step, self.overlap)
def iterate(self,
image: NumpyLike,
max_workers: int = 1) -> _TiledArrayView:
"""Read tiles from input image"""
shape = image.shape[:2]
ishape = *(len(range(0, s + self.overlap, self.step)) for s in shape),
cells = np.ones(ishape, dtype=np.bool_)
return _TiledArrayView(self, shape, cells, image, max_workers)
# --------------------------------- actions ----------------------------------
_Self = TypeVar('_Self', bound='_BaseView')
@dataclass
class _BaseView:
m: Mosaic
shape: tuple[int, ...]
cells: np.ndarray
@property
def ishape(self) -> tuple[int, ...]:
return self.cells.shape
def __len__(self) -> int:
return int(self.cells.sum())
def report(self) -> dict[str, str]:
"""Cells and area usage"""
used = int(self.cells.sum())
total = self.cells.size
coverage = (used / total) * (1 + self.m.overlap / self.m.step) ** 2
return {'cells': f'{used}/{total}', 'coverage': f'{coverage:.0%}'}
def __iter__(self) -> Iterator[Tile]:
raise NotImplementedError
def map(self: _Self,
fn: Callable[[np.ndarray], np.ndarray],
/,
max_workers: int = 0) -> _Self:
"""
Applies function to each tile.
Note: change of tile shape besides channel count is forbidden.
Each tile is HWC-ordered ndarray.
Supports threading.
"""
tile_fn = partial(_apply, fn)
tiles = map_n(tile_fn, self, max_workers=max_workers)
return cast(
_Self, # don't narrow type
_IterView(self.m, self.shape, self.cells, tiles))
def pool(self: _Self, stride: int = 1) -> _Self:
"""Resizes each tile to desired stride"""
if stride == 1:
return self
shape = *(len(range(0, s, stride)) for s in self.shape),
return cast( # don't narrow type
_Self,
_DecimatedView(
Mosaic(self.m.step // stride, self.m.overlap // stride), shape,
self.cells, stride, self))
def crop(self) -> _BaseView:
return _IterView(self.m, self.shape, self.cells, _crop(
self, self.shape))
@dataclass
class _View(_BaseView):
def map_batched(self,
fn: Callable[[list[np.ndarray]], Iterable[np.ndarray]],
batch_size: int = 1,
max_workers: int = 0) -> _View:
"""
Applies function to batches of tiles.
Note: change of tile shape besides channel count is forbidden.
Each tile is HWC-ordered ndarray.
Supports threading.
"""
tile_fn = partial(_apply_batched, fn)
chunks = chunked(self, batch_size)
batches = map_n(tile_fn, chunks, max_workers=max_workers)
tiles = chain.from_iterable(batches)
return _IterView(self.m, self.shape, self.cells, tiles)
def transform(self, fn: Callable[[Iterable[Tile]],
Iterable[Tile]]) -> _View:
"""
TODO: fill docs
"""
tiles = fn(self)
return _IterView(self.m, self.shape, self.cells, tiles)
def reweight(self) -> _View:
"""
Applies weight to tile edges to prepare them for summation
if overlap exists.
Note: no need to call this method if you have already applied it.
"""
if not self.m.overlap: # No need
return self
weight = self.m.get_kernel()
tile_fn = partial(_reweight, weight)
return self.map(tile_fn)
def merge(self) -> _BaseView:
"""
Removes overlapping regions from all the tiles if any.
Tiles should be reweighted if overlap exists.
"""
if self.m.overlap:
return _UniqueTileView(self.m, self.shape, self.cells, self)
return self
def zip_with(
self, view: np.ndarray,
v_scale: int) -> Iterator[tuple[Vec, Vec, np.ndarray, np.ndarray]]:
"""Extracts tiles from `view` simultaneously with tiles from self"""
assert v_scale >= 1
for tile in self:
tw, th = tile.data.shape[:2]
v = view[tile.vec[0] // v_scale:,
tile.vec[1] // v_scale:][:tw // v_scale, :th // v_scale]
yield *tile, v
@dataclass
class _IterView(_View):
source: Iterable[Tile]
def __iter__(self) -> Iterator[Tile]:
return iter(self.source)
@dataclass
class _DecimatedView(_View):
"""
Decimates tiles.
Doesn't change size uniformity.
Yields decimated views of original tiles.
"""
stride: int
source: Iterable[Tile]
def __iter__(self) -> Iterator[Tile]:
for t in self.source:
yield Tile(
t.idx,
tuple(c // self.stride for c in t.vec), # type: ignore
t.data[::self.stride, ::self.stride],
)
@dataclass
class _TiledArrayView(_View):
"""
Extracts tiles from array.
Yields same-sized tiles with overlaps.
"""
data: NumpyLike
max_workers: int
def select(self, mask: np.ndarray, scale: int) -> _TiledArrayView:
"""Drop tiles where `mask` is 0"""
assert mask.ndim == 2
mask = mask.astype('u1')
ih, iw = self.ishape
step = self.m.step // scale
pad = self.m.overlap // (scale * 2)
mh, mw = (ih * step), (iw * step)
if mask.shape[:2] != (mh, mw):
mask_pad = [(0, s1 - s0) for s0, s1 in zip(mask.shape, (mh, mw))]
mask = np.pad(mask, mask_pad)[:mh, :mw]
if self.m.overlap:
kernel = np.ones((3, 3), dtype='u1')
mask = cv2.dilate(mask, kernel, iterations=pad)
if pad:
mask = np.pad(mask[:-pad, :-pad], [[pad, 0], [pad, 0]])
cells = mask.reshape(ih, step, iw, step).any((1, 3))
return dataclasses.replace(self, cells=cells)
def _get_tile(self, iy: int, ix: int) -> Tile:
"""Read non-overlapping tile of source image"""
(y0, y1), (x0, x1) = ((self.m.step * i - self.m.overlap,
self.m.step * (i + 1)) for i in (iy, ix))
if iy and self.cells[iy - 1, ix]:
y0 += self.m.overlap
if ix and self.cells[iy, ix - 1]:
x0 += self.m.overlap
return Tile((iy, ix), (y0, x0), self.data[y0:y1, x0:x1])
def _rejoin_tiles(self, image_parts: Iterable[Tile]) -> Iterator[Tile]:
"""Joins non-overlapping parts to tiles"""
assert self.m.overlap
overlap = self.m.overlap
cells = np.pad(self.cells, [(0, 1), (0, 1)])
step = self.m.step
row = defaultdict[int, np.ndarray]()
for (iy, ix), _, part in image_parts:
# Lazy init, first part is always whole
if row.default_factory is None:
row.default_factory = partial(np.zeros, part.shape, part.dtype)
if (tile := row.pop(ix, None)) is not None:
tile[-part.shape[0]:, -part.shape[1]:] = part
else:
tile = part
yield Tile((iy, ix), (iy * step - overlap, ix * step - overlap),
tile)
if cells[iy, ix + 1]:
row[ix + 1][:, :overlap] = tile[:, -overlap:]
if cells[iy + 1, ix]:
row[ix][:overlap, :] = tile[-overlap:, :]
def __iter__(self) -> Iterator[Tile]:
"""
Yield complete tiles built from source image.
Each tile will have size `(step + overlap)`
"""
ys, xs = np.where(self.cells)
parts = map_n(self._get_tile, ys, xs, max_workers=self.max_workers)
return self._rejoin_tiles(parts) if self.m.overlap else iter(parts)
@dataclass
class _UniqueTileView(_BaseView):
"""
Applies weighted average over overlapping regions.
Yields tiles without overlaps, so their size can differ.
"""
source: Iterable[Tile]
_cells: np.ndarray = field(init=False, repr=False)
_row: dict[int, np.ndarray] = field(init=False, repr=False)
_carry: list[np.ndarray] = field(init=False, repr=False)
def __post_init__(self):
self._cells = np.pad(self.cells, [(0, 1), (0, 1)])
self._row = {}
self._carry = []
def _update(self, obj: Tile) -> Tile:
"""
Blends edges of overlapping tiles and returns non-overlapping parts
"""
(iy, ix), (y, x), tile = obj
overlap = self.m.overlap
step = self.m.step
if iy and self._cells[iy - 1, ix]: # TOP exists
top = self._row.pop(ix)
tile[:overlap, step - top.shape[1]:step] += top
else:
tile = tile[overlap:] # cut TOP
y += overlap
if ix and self._cells[iy, ix - 1]: # LEFT exists
left = self._carry.pop()
if self._cells[iy + 1, [ix - 1, ix]].all():
tile[-left.shape[0]:, :overlap] += left
else: # cut BOTTOM-LEFT
tile[-left.shape[0] - overlap:-overlap, :overlap] += left
else:
tile = tile[:, overlap:] # cut LEFT
x += overlap
tile, right = np.split(tile, [-overlap], axis=1)
if self._cells[iy, ix + 1]: # RIGHT exists
if not (iy and self._cells[iy - 1, [ix, ix + 1]].all()):
right = right[-step:] # cut TOP-RIGHT
if not self._cells[iy + 1, [ix, ix + 1]].all():
right = right[:-overlap] # cut BOTTOM-RIGHT
self._carry.append(right)
tile, bottom = np.split(tile, [-overlap])
if self._cells[iy + 1, ix]: # BOTTOM exists
if not (ix and self._cells[[iy, iy + 1], ix - 1].all()):
# cut BOTTOM-LEFT
bottom = bottom[:, -(step - overlap):]
self._row[ix] = bottom
return Tile((iy, ix), (y, x), tile)
def __iter__(self) -> Iterator[Tile]:
assert self.m.overlap
return map(self._update, self.source)
|
|
"""Common functions to marshal data to/from PyTorch
"""
import collections
from typing import Optional, Sequence, Union, Dict
import numpy as np
import torch
from torch import nn
__all__ = [
"rgb_image_from_tensor",
"tensor_from_mask_image",
"tensor_from_rgb_image",
"count_parameters",
"transfer_weights",
"maybe_cuda",
"mask_from_tensor",
"logit",
"to_numpy",
"to_tensor",
]
def logit(x: torch.Tensor, eps=1e-5) -> torch.Tensor:
"""
Compute inverse of sigmoid of the input.
Note: This function has not been tested for numerical stability.
:param x:
:param eps:
:return:
"""
x = torch.clamp(x, eps, 1.0 - eps)
return torch.log(x / (1.0 - x))
def count_parameters(model: nn.Module, keys: Optional[Sequence[str]] = None) -> Dict[str, int]:
"""
Count number of total and trainable parameters of a model
:param model: A model
:param keys: Optional list of top-level blocks
:return: Tuple (total, trainable)
"""
if keys is None:
keys = ["encoder", "decoder", "logits", "head", "final"]
total = int(sum(p.numel() for p in model.parameters()))
trainable = int(sum(p.numel() for p in model.parameters() if p.requires_grad))
parameters = {"total": total, "trainable": trainable}
for key in keys:
if hasattr(model, key) and model.__getattr__(key) is not None:
parameters[key] = int(sum(p.numel() for p in model.__getattr__(key).parameters()))
return parameters
def to_numpy(x) -> np.ndarray:
"""
Convert whatever to numpy array
:param x: List, tuple, PyTorch tensor or numpy array
:return: Numpy array
"""
if isinstance(x, np.ndarray):
return x
elif isinstance(x, torch.Tensor):
return x.detach().cpu().numpy()
elif isinstance(x, (list, tuple, int, float)):
return np.array(x)
else:
raise ValueError("Unsupported type")
def to_tensor(x, dtype=None) -> torch.Tensor:
if isinstance(x, torch.Tensor):
if dtype is not None:
x = x.type(dtype)
return x
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
if dtype is not None:
x = x.type(dtype)
return x
if isinstance(x, (list, tuple)):
x = np.ndarray(x)
x = torch.from_numpy(x)
if dtype is not None:
x = x.type(dtype)
return x
raise ValueError("Unsupported input type" + str(type(x)))
def tensor_from_rgb_image(image: np.ndarray) -> torch.Tensor:
image = np.moveaxis(image, -1, 0)
image = np.ascontiguousarray(image)
image = torch.from_numpy(image)
return image
def tensor_from_mask_image(mask: np.ndarray) -> torch.Tensor:
if len(mask.shape) == 2:
mask = np.expand_dims(mask, -1)
return tensor_from_rgb_image(mask)
def rgb_image_from_tensor(image: torch.Tensor, mean, std, max_pixel_value=255.0, dtype=np.uint8) -> np.ndarray:
image = np.moveaxis(to_numpy(image), 0, -1)
mean = to_numpy(mean)
std = to_numpy(std)
rgb_image = (max_pixel_value * (image * std + mean)).astype(dtype)
return rgb_image
def mask_from_tensor(mask: torch.Tensor, squeeze_single_channel=False, dtype=None) -> np.ndarray:
mask = np.moveaxis(to_numpy(mask), 0, -1)
if squeeze_single_channel and mask.shape[-1] == 1:
mask = np.squeeze(mask, -1)
if dtype is not None:
mask = mask.astype(dtype)
return mask
def maybe_cuda(x: Union[torch.Tensor, nn.Module]) -> Union[torch.Tensor, nn.Module]:
"""
Move input Tensor or Module to CUDA device if CUDA is available.
:param x:
:return:
"""
if torch.cuda.is_available():
return x.cuda()
return x
def transfer_weights(model: nn.Module, model_state_dict: collections.OrderedDict):
"""
Copy weights from state dict to model, skipping layers that are incompatible.
This method is helpful if you are doing some model surgery and want to load
part of the model weights into different model.
:param model: Model to load weights into
:param model_state_dict: Model state dict to load weights from
:return: None
"""
for name, value in model_state_dict.items():
try:
model.load_state_dict(collections.OrderedDict([(name, value)]), strict=False)
except Exception as e:
print(e)
|
|
from functools import lru_cache
import numpy as np
from scipy.linalg import eigh_tridiagonal, eigvalsh_tridiagonal
from scipy.optimize import minimize
from waveforms.math.signal import complexPeaks
class Transmon():
def __init__(self, **kw):
self.Ec = 0.2
self.EJ = 20
self.d = 0
if kw:
self._set_params(**kw)
def _set_params(self, **kw):
if {"EJ", "Ec", "d"} <= set(kw):
return self._set_params_EJ_Ec_d(kw['EJ'], kw['Ec'], kw['d'])
elif {"EJ", "Ec"} <= set(kw):
return self._set_params_EJ_Ec(kw['EJ'], kw['Ec'])
elif {"f01", "alpha"} <= set(kw):
if 'ng' not in kw:
return self._set_params_f01_alpha(kw['f01'], kw['alpha'])
else:
return self._set_params_f01_alpha(kw['f01'], kw['alpha'],
kw['ng'])
elif {"f01_max", "f01_min"} <= set(kw):
if {"alpha1", "alpha2"} <= set(kw):
return self._set_params_f01_max_min_alpha(
kw['f01_max'], kw['f01_min'], kw['alpha1'], kw['alpha2'],
kw.get('ng', 0))
elif {"alpha"} <= set(kw):
return self._set_params_f01_max_min_alpha(
kw['f01_max'], kw['f01_min'], kw['alpha'], kw['alpha'],
kw.get('ng', 0))
elif {"alpha1"} <= set(kw):
return self._set_params_f01_max_min_alpha(
kw['f01_max'], kw['f01_min'], kw['alpha1'], kw['alpha1'],
kw.get('ng', 0))
raise TypeError('_set_params() got an unexpected keyword arguments')
def _set_params_EJ_Ec(self, EJ, Ec):
self.Ec = Ec
self.EJ = EJ
def _set_params_EJS_Ec_d(self, EJS, Ec, d):
self.Ec = Ec
self.EJ = EJS
self.d = d
def _set_params_f01_alpha(self, f01, alpha, ng=0):
Ec = -alpha
EJ = (f01 - alpha)**2 / 8 / Ec
def err(x, target=(f01, alpha)):
EJ, Ec = x
levels = self._levels(Ec, EJ, ng=ng)
f01 = levels[1] - levels[0]
f12 = levels[2] - levels[1]
alpha = f12 - f01
return (target[0] - f01)**2 + (target[1] - alpha)**2
ret = minimize(err, x0=[EJ, Ec])
self._set_params_EJ_Ec(*ret.x)
def _set_params_f01_max_min_alpha(self,
f01_max,
f01_min,
alpha1,
alpha2=None,
ng=0):
if alpha2 is None:
alpha2 = alpha1
Ec = -alpha1
EJS = (f01_max - alpha1)**2 / 8 / Ec
d = (f01_min + Ec)**2 / (8 * EJS * Ec)
def err(x, target=(f01_max, alpha1, f01_min, alpha2)):
EJS, Ec, d = x
levels = self._levels(Ec, self._flux_to_EJ(0, EJS), ng=ng)
f01_max = levels[1] - levels[0]
f12 = levels[2] - levels[1]
alpha1 = f12 - f01_max
levels = self._levels(Ec, self._flux_to_EJ(0.5, EJS), ng=ng)
f01_min = levels[1] - levels[0]
f12 = levels[2] - levels[1]
alpha2 = f12 - f01_min
return (target[0] - f01_max)**2 + (target[1] - alpha1)**2 + (
target[2] - f01_min)**2 + (target[3] - alpha2)**2
ret = minimize(err, x0=[EJS, Ec, d])
self._set_params_EJS_Ec_d(*ret.x)
@staticmethod
def _flux_to_EJ(flux, EJS, d=0):
F = np.pi * flux
EJ = EJS * np.sqrt(np.cos(F)**2 + d**2 * np.sin(F)**2)
return EJ
@staticmethod
def _levels(Ec, EJ, ng=0.0, gridSize=51, select_range=(0, 10)):
n = np.arange(gridSize) - gridSize // 2
w = eigvalsh_tridiagonal(4 * Ec * (n - ng)**2,
-EJ / 2 * np.ones(gridSize - 1),
select='i',
select_range=select_range)
return w
@lru_cache(maxsize=128)
def levels(self, flux=0, ng=0):
return self._levels(self.Ec, self._flux_to_EJ(flux, self.EJ, self.d),
ng)
@property
def EJ1_EJ2(self):
return (1 + self.d) / (1 - self.d)
def chargeParityDiff(self, flux=0, ng=0, k=0):
a = self.levels(flux, ng=0 + ng)
b = self.levels(flux, ng=0.5 + ng)
return (a[1 + k] - a[k]) - (b[1 + k] - b[k])
class FakeQPU():
def __init__(self,
N,
EJ=15e9,
Ec=220e6,
d=0.1,
EJ_error=0.01,
Ec_error=0.01,
zCrosstalkSigma=0.1,
seed=1234):
np.random.seed(seed)
self.N = N
self.M = np.eye(N) + zCrosstalkSigma * np.random.randn(N, N)
self.bias0 = np.random.randn(N)
self.qubits = [
Transmon(EJ=EJ * (1 + EJ_error * np.random.randn()),
Ec=Ec * (1 + Ec_error * np.random.randn()),
d=d) for i in range(N)
]
self.fr = 6.5e9 + np.arange(N) * 20e6 + 3e6 * np.random.randn(N)
self.g = 60e6 + 5e6 * np.random.randn(N)
self.QL = 5000 + 100 * np.random.randn(N)
self.Qc = 6000 + 100 * np.random.randn(N)
self.Gamma = 0.03e6 + 1e3 * np.random.randn(N)
self.readoutBias = np.zeros(N)
self.driveBias = np.zeros(N)
self.driveFrequency = np.zeros(N)
self.driveOmega = np.zeros(N)
self.driveDuration = np.zeros(N)
self.readoutFrequency = np.zeros(N)
self.fluxNoise = 0.001
self.signalNoise = 0.05
self.phi = 0.6 * np.random.randn(N)
self.P1 = np.zeros(N)
def fluxList(self, bias):
return self.M @ bias + self.bias0 + self.fluxNoise * np.random.randn(
self.N)
def state(self):
return [np.random.choice([0, 1], p=[1 - p1, p1]) for p1 in self.P1]
def S21(self, x):
fluxList = self.fluxList(self.readoutBias)
state = self.state()
levels = [q.levels(flux) for q, flux in zip(self.qubits, fluxList)]
peaks = []
for l, s, fr, g, QL, Qc, phi in zip(levels, state, self.fr, self.g,
self.QL, self.Qc, self.phi):
f01 = l[1] - l[0]
f12 = l[2] - l[1]
if s == 0:
chi = g**2 / (f01 - fr)
else:
chi = 2 * g**2 / (f12 - fr) - g**2 / (f01 - fr)
fc = fr - chi
width = fc / (2 * QL)
amp = -QL / np.abs(Qc) * np.exp(1j * phi)
peaks.append((fc, width, amp))
return complexPeaks(x, peaks, 1)
@staticmethod
def population(Omega, Delta, Gamma, t):
return 0.5 * Omega / np.sqrt(Omega**2 + Delta**2) * (1 - np.exp(
-4 / 3 * Gamma * t) * np.cos(np.sqrt(Omega**2 + Delta**2) * t))
def calcP1(self):
for i, (bias, freq, Omega, t) in enumerate(
zip(self.driveBias, self.driveFrequency, self.driveOmega,
self.driveDuration)):
q = self.qubits[i]
l = q.levels(bias)
Delta = freq - l[1] + l[0]
self.P1[i] = self.population(Omega, Delta, self.Gamma[i], t)
def signal(self):
s = self.S21(self.readoutFrequency)
return s + self.signalNoise * (np.random.randn(*s.shape) +
1j * np.random.randn(*s.shape))
if __name__ == "__main__":
q = Transmon(f01=4.2, alpha=4.010 - 4.2)
levels = q.levels()
f01 = levels[1] - levels[0]
f12 = levels[2] - levels[1]
print("chargeParityDiff:")
for k in range(4):
diff = q.chargeParityDiff(k=k)
print(f" ({k},{k+1}) diff = {diff * 1e3:8.4f} MHz",
f"(T = {1/np.abs(diff) / 2e3:.1f} us)")
print(
f"EJ = {q.EJ:.4f} GHz, Ec = {q.Ec*1e3:.4f} MHz, EJ/Ec={q.EJ/q.Ec:.2f}")
print(f"f01 = {f01:.4f} GHz, alpha = {(f12-f01)*1e3:.1f} MHz")
|
|
import time
import json
import logging
import random
import os
import pyautogui
import pyscreenshot as ImageGrab
import sys
import tkinter as tk
from tkinter import *
import numpy
from pynput.mouse import Listener as MouseListener
from pynput import mouse
from model.character import Character
# This class contains all logics and actions.
path = os.getcwd()
#a = os.path.dirname(os.path.abspath(__file__))
parent = os.path.dirname(path)
class Actuator():
screen = None
bot = None
def __init__(self, bot, screen, keyListener):
self.screen = screen
self.bot = bot
self.character = Character(bot)
self.keyListener = keyListener
self.x1 = 0
self.x2 = 0
self.y1 = 0
self.y2 = 0
self.already_checked = False
self.autoSio = None
self.np_im = None
def get_list_of_points_bar(self):
file = open(parent + "\src\conf\config_screen.txt", "r")
contents = file.read()
list = []
indexPrevious = 0
indexNext = 0
for i in range (20):
value = ""
indexPrevious = contents.index('"', indexNext + 1)
indexNext = contents.index('"', indexPrevious + 1)
for x in range(indexPrevious + 1, indexNext):
value += contents[x]
list.append(value)
return list
def change_generator_to_list(self, vector_life, vector_mana):
for i in range(0, 10):
vector_life[i] = list(vector_life[i])
vector_mana[i] = list(vector_mana[i])
def self_equip_rings_and_amulets(self, screen, character, mustEquipSSA, must_equip_energy, must_equip_might, currentLife, currentMana):
if (character.value_total_life.isdigit() == False or character.value_total_mana.isdigit() == False):
return
currentLifePercent = (float(currentLife/int(character.value_total_life)) * 100)
currentManaPercent = (float(currentMana/int(character.value_total_mana)) * 100)
if (character.life_to_pull_ssa.isdigit() and currentLifePercent < int(character.life_to_pull_ssa) and len(mustEquipSSA) == 0):
pyautogui.press(character.key_to_press_pulling_ssa)
if (character.value_to_pull_ring.isdigit() and character.key_to_pull_ring != ' ' and character.ring_type != ' '):
if (character.bar_to_pull_ring == 'MANA' and int(character.value_to_pull_ring) >= currentManaPercent):
if (character.ring_type == 'Might' and len(must_equip_might) == 0):
pyautogui.press(character.key_to_pull_ring)
elif (character.ring_type == 'Energy' and len(must_equip_energy) == 0):
pyautogui.press(character.key_to_pull_ring)
if (character.bar_to_pull_ring == 'LIFE' and int(character.value_to_pull_ring) >= currentLifePercent):
if (character.ring_type == 'Might' and len(must_equip_might) == 0):
pyautogui.press(character.key_to_pull_ring)
elif (character.ring_type == 'Energy' and len(must_equip_energy) == 0):
pyautogui.press(character.key_to_pull_ring)
elif (character.bar_to_pull_ring == 'LIFE'):
if (character.ring_type == 'Energy' and len(must_equip_energy) != 0):
pyautogui.press(character.key_to_pull_ring)
def self_heal(self, screen, character, mustEquipSSA, must_equip_energy, must_equip_might, currentLife, currentMana):
bot = self.bot
if (character.value_total_life.isdigit() == False or character.value_total_mana.isdigit() == False):
return
currentLifePercent = (float(currentLife/int(character.value_total_life)) * 100)
currentManaPercent = (float(currentMana/int(character.value_total_mana)) * 100)
if (character.key_to_press_when_life_90 != " " and currentLifePercent <= 90 and currentLifePercent > 70):
pyautogui.press(character.key_to_press_when_life_90)
elif (character.key_to_press_when_life_70 != " " and currentLifePercent <= 70 and currentLifePercent > 50):
pyautogui.press(character.key_to_press_when_life_70)
elif (character.key_to_press_when_life_50 != " " and currentLifePercent <= 50):
pyautogui.press(character.key_to_press_when_life_50)
if (character.mana_percent_to_cure.isdigit() and currentManaPercent <= int(character.mana_percent_to_cure) and character.key_to_press_healing_mana != " "):
pyautogui.press(character.key_to_press_healing_mana)
if (character.mana_percent_to_train.isdigit() and currentManaPercent > int(character.mana_percent_to_train) and character.key_to_press_training_mana != " "):
pyautogui.press(character.key_to_press_training_mana)
# Auto update max Life/Mana
if (currentLife > int(character.value_total_life)):
value_total_life = currentLife
bot.screen["totalLife"].delete(0, END)
bot.screen["totalLife"].insert(0, str(currentLife))
if (currentMana > int(character.value_total_mana)):
value_total_mana = currentMana
bot.screen["totalMana"].delete(0, END)
bot.screen["totalMana"].insert(0, str(currentMana))
def identify_numbers_on_image(self, imgLife, imgMana, vector_life, vector_mana):
for x in range(0, 10):
vector_life[x] = pyautogui.locateAll(parent + '\src\images\\' + str(x) + '.png', imgLife, grayscale=True, confidence=.95)
vector_mana[x] = pyautogui.locateAll(parent + '\src\images\\' + str(x) + '.png', imgMana, grayscale=True, confidence=.95)
def convert_numbers_to_string(self, validIndex, vector, currentValue):
while(validIndex):
max = 2000
indexRemoved = 0
insideIndexRemove = 0
for value in vector:
if (vector[value] != None):
for valueIntoItem in vector[value]:
if (max > valueIntoItem[0]):
indexRemoved = value
insideIndexRemove = valueIntoItem
max = valueIntoItem[0]
if (insideIndexRemove != 0):
vector[indexRemoved].remove(insideIndexRemove)
currentValue += str(indexRemoved)
validIndex -= 1
return currentValue
def active_anti_idle(self):
direction = random.randint(0, 4)
if (direction == 1):
pyautogui.hotkey('ctrl', 'up')
elif (direction == 2):
pyautogui.hotkey('ctrl', 'right')
elif(direction == 3):
pyautogui.hotkey('ctrl', 'down')
else:
pyautogui.hotkey('ctrl', 'left')
def check_sio_bar(self):
listPoints = self.get_list_of_points_bar()
self.autoSio = pyautogui.screenshot()
self.autoSio = self.autoSio.crop((int(listPoints[16]), int(listPoints[17]), int(listPoints[18]), int(listPoints[19])))
hasLifeBarSio = pyautogui.locateAll(parent + '\src\images\sio.png', self.autoSio, grayscale=True, confidence=.95)
listLifeBarSio = list(hasLifeBarSio)
if (len(listLifeBarSio) != 0 and self.already_checked == False):
self.already_checked = True
x1 = listLifeBarSio[0][0] + 8
y1 = listLifeBarSio[0][1]
x2 = listLifeBarSio[0][2] + x1 + 1
y2 = listLifeBarSio[0][3] + y1
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
children_widgets = self.screen.winfo_children()
for child_widget in children_widgets:
if child_widget.winfo_class() == 'Button':
if (str(child_widget) == ".!button"):
child_widget.configure(bg="green")
def auto_sio_partner(self, life_to_use_sio, key_sio):
self.check_sio_bar()
# Cut image on the party list
self.autoSio = self.autoSio.crop((int(self.x1), int(self.y1), int(self.x2), int(self.y2)))
self.np_im = numpy.array(self.autoSio)
# Get RGB value of the cutted image
blue, green, red = self.np_im[..., 0], self.np_im[..., 1], self.np_im[..., 2]
total = int(self.x2- self.x1 + self.y2 - self.y1)
cont_life = 0
cont_blue = 0
cont_green = 0
cont_red = 0
for pixel in blue:
for i in range(len(pixel)):
if (pixel[i] <= 70 and pixel[i] != 0):
cont_blue += 1
for pixel in red:
for i in range(len(pixel)):
if (pixel[i] <= 70 and pixel[i] != 0):
cont_red += 1
for pixel in green:
for i in range(len(pixel)):
if (pixel[i] <= 70 and pixel[i] != 0):
cont_green += 1
cont_life = int((cont_blue + cont_green + cont_red)/3)
if (key_sio != ' ' and life_to_use_sio != ' '):
if (life_to_use_sio == '90%' and cont_life >= 10):
pyautogui.press(key_sio)
cont_life = 0
elif (life_to_use_sio == '70%' and cont_life >= 60):
pyautogui.press(key_sio)
cont_life = 0
elif (life_to_use_sio == '50%' and cont_life >= 80):
pyautogui.press(key_sio)
cont_life = 0
def core(self):
bot = self.bot
FLAG_TIME_ANTI_IDLE = 0
FLAG_TIME_AUTO_SPELL = 0
FLAG_TIME_AUTO_UTAMO = 0
time.sleep(1)
while (True):
FLAG_TIME_AUTO_SPELL += 1
FLAG_TIME_ANTI_IDLE += 1
FLAG_TIME_AUTO_UTAMO += 1
if (bot.paused == True):
break
# Take screenshot
im = pyautogui.screenshot()
# Create copy of the screenshot
life = im
mana = im
equipment = im
# Cut screenshot according coordinates on the screen
listPoints = self.get_list_of_points_bar()
life = life.crop((int(listPoints[0]), int(listPoints[1]), int(listPoints[2]), int(listPoints[3])))
mana = mana.crop((int(listPoints[4]), int(listPoints[5]), int(listPoints[6]), int(listPoints[7])))
equipment = equipment.crop((int(listPoints[12]), int(listPoints[13]), int(listPoints[14]), int(listPoints[15])))
# Check if screen of the bot is active. This means user is configuring.
screenBot = pyautogui.locateAll(parent + '\src\images\\bot.png', im, grayscale=True, confidence=.70)
lstScreen = list(screenBot)
# Check if has SSA on the equipment.
hasSSA = pyautogui.locateAll(parent + '\src\images\ssa.png', equipment, grayscale=True, confidence=.90)
listHasSSA = list(hasSSA)
# Check if has energy or might ring on the equipment.
has_energy_ring = pyautogui.locateAll(parent + '\src\images\energy_ring.png', equipment, grayscale=True, confidence=.90)
list_has_energy_ring = list(has_energy_ring)
has_might_ring = pyautogui.locateAll(parent + '\src\images\might_ring.png', equipment, grayscale=True, confidence=.90)
list_has_might_ring = list(has_might_ring)
if (len(lstScreen) != 0):
self.keyListener.stop()
continue
if (self.keyListener.running == False):
self.keyListener.resume()
vector_life = {}
vector_mana = {}
# Passing cutted images to identify which numbers its being showed on the life/mana bar.
self.identify_numbers_on_image(life, mana, vector_life, vector_mana)
validIndexLife = 0
validIndexMana = 0
lifeValue = ""
manaValue = ""
# Change generator returned from vector_life and vector_mana to list
self.change_generator_to_list(vector_life, vector_mana)
for i in range(0, 10):
validIndexLife += (sum(x is not None for x in vector_life[i]))
validIndexMana += (sum(x is not None for x in vector_mana[i]))
if (validIndexLife == validIndexMana and validIndexMana == 0):
continue;
# Convert numbers from the screen to strings
lifeValue = self.convert_numbers_to_string(validIndexLife, vector_life, lifeValue)
manaValue = self.convert_numbers_to_string(validIndexMana, vector_mana, manaValue)
self.screen.title('Tibia Bot - Running - Life: ' + str(lifeValue) + ' // Mana: ' + str(manaValue))
self.character.set_all_attributes_about_character()
if (lifeValue.isnumeric() and manaValue.isnumeric()):
self.self_heal(bot.screen, self.character, listHasSSA, list_has_energy_ring, list_has_might_ring, int(lifeValue), int(manaValue))
self.self_equip_rings_and_amulets(bot.screen, self.character, listHasSSA, list_has_energy_ring, list_has_might_ring, int(lifeValue), int(manaValue))
food = im
food = food.crop((int(listPoints[8]), int(listPoints[9]), int(listPoints[10]), int(listPoints[11])))
hasHungry = pyautogui.locateAll(parent + '\src\images\\food.png', food, grayscale=True, confidence=.75)
lstHasHungry = list(hasHungry)
hasSpeed = pyautogui.locateAll(parent + '\src\images\speed.png', food, grayscale=True, confidence=.75)
lstHasSpeed = list(hasSpeed)
hasUtamo = pyautogui.locateAll(parent + '\src\images\\utamo.png', food, grayscale=True, confidence=.75)
listHasUtamo = list(hasUtamo)
hasUtito = pyautogui.locateAll(parent + '\src\images\\utito.jpeg', food, grayscale=True, confidence=.75)
listHasUtito = list(hasUtito)
mustEatFood = bot.screen["eatFood"].get()
mustUseAutoSpell = bot.screen["autoSpell"].get()
mustUseHur = bot.screen["autoRun"].get()
mustUseUtamo = bot.screen["autoUtamo"].get()
mustUseUtito= bot.screen["autoUtito"].get()
isAntiIdleOn= bot.screen["antiIdle"].get()
life_to_use_sio = bot.screen["lifeToUseSio"].get()
if (self.already_checked):
self.auto_sio_partner(life_to_use_sio, self.character.key_sio)
if (len(lstHasHungry) != 0 and mustEatFood and self.character.key_eat_food != " "):
pyautogui.press(self.character.key_eat_food)
if (len(lstHasSpeed) == 0 and mustUseHur and self.character.key_spell_hur != " "):
pyautogui.press(self.character.key_spell_hur)
if (len(listHasUtamo) == 0 and self.character.key_auto_utamo != "" and mustUseUtamo or (190 * 5) <= (FLAG_TIME_AUTO_UTAMO)):
pyautogui.press(self.character.key_auto_utamo)
FLAG_TIME_AUTO_UTAMO = 0
elif (len(listHasUtito) == 0 and mustUseUtito and self.character.key_auto_utito != " "):
pyautogui.press(self.character.key_auto_utito)
elif (isAntiIdleOn and (60 * 5) < FLAG_TIME_ANTI_IDLE):
self.active_anti_idle()
FLAG_TIME_ANTI_IDLE = 0
|
|
# Author: Samuel Marchal samuel.marchal@aalto.fi Sebastian Szyller sebastian.szyller@aalto.fi Mika Juuti mika.juuti@aalto.fi
# Copyright 2019 Secure Systems Group, Aalto University, https://ssg.aalto.fi
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import autograd
from torch import nn
import numpy as np
import torch
def load_server(load_location='', model_class=None):
net = model_class()
net.load_state_dict(torch.load(load_location))
return net
# function responsible for making the prediction with your model
# adjust to your needs; by default, works with usual MNIST setup
def model_handle(oracle: nn.Module) -> callable:
def predict(img_query: np.ndarray) -> np.ndarray:
img_torch = torch.from_numpy(img_query).view(1, 1, 28, 28).float()
img_torch = to_range(img_torch, img_torch.min(), img_torch.max(), -1., 1.)
return oracle(autograd.Variable(img_torch)).data.cpu().numpy()
return predict
# adjust to the normalization range
def to_range(x, old_min, old_max, new_min, new_max):
old_range = (old_max - old_min)
new_range = (new_max - new_min)
return (((x - old_min) * new_range) / old_range) + new_min
|
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
class Bandit:
def __init__(self , m , INIT_VAL):
self.m = m #true mean
self.mean = INIT_VAL
self.N = 0.0000000001
def pull(self):
return np.random.randn() + self.m
def push(self , x):
self.N += 1
self.mean = (1 - (1.0)/self.N) * self.mean + 1.0/self.N*x
def experiment(m1 , m2 , m3 , eps , N):
b1 , b2 , b3 = Bandit(m1 , 10) , Bandit(m2 , 10) , Bandit(m3 , 10)
bandits = [b1 , b2 , b3]
data = []
for _ in range(N):
pos = np.argmax([b.mean + np.sqrt(2*np.log(_ + 1)/b.N) for b in bandits])
retval = bandits[pos].pull()
bandits[pos].push(retval)
data.append(retval)
data = np.array(data)
cumulative_average = np.cumsum(data) / (np.arange(N) + 1)
for b in bandits:
print(b.mean)
return cumulative_average
c_1 = experiment(1.0, 2.0, 3.0, 0.1, 10)
c_05 = experiment(1.0, 2.0, 3.0, 0.05, 1000000)
c_2 = experiment(1.0, 2.0, 3.0, 0.2, 1000000)
plt.plot(c_1, label='eps = 0.1')
plt.plot(c_05, label='eps = 0.05')
plt.plot(c_2, label='eps = 0.2')
plt.legend()
plt.xscale('log')
plt.show()
|
|
# -*- coding: utf-8 -*-
import os
import configparser
import argparse
import numpy as np
import signal
import shutil
import cv2
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import progressbar
import tensorflow as tf
from . import ae_factory as factory
from . import utils as u
def main():
workspace_path = os.environ.get('AE_WORKSPACE_PATH')
if workspace_path is None:
print('Please define a workspace path:\n')
print('export AE_WORKSPACE_PATH=/path/to/workspace\n')
exit(-1)
gentle_stop = np.array((1,), dtype=np.bool)
gentle_stop[0] = False
def on_ctrl_c(signal, frame):
gentle_stop[0] = True
signal.signal(signal.SIGINT, on_ctrl_c)
parser = argparse.ArgumentParser()
parser.add_argument("experiment_name")
parser.add_argument("-d", action='store_true', default=False)
parser.add_argument("-gen", action='store_true', default=False)
arguments = parser.parse_args()
full_name = arguments.experiment_name.split('/')
experiment_name = full_name.pop()
experiment_group = full_name.pop() if len(full_name) > 0 else ''
debug_mode = arguments.d
generate_data = arguments.gen
cfg_file_path = u.get_config_file_path(workspace_path, experiment_name, experiment_group)
log_dir = u.get_log_dir(workspace_path, experiment_name, experiment_group)
checkpoint_file = u.get_checkpoint_basefilename(log_dir)
ckpt_dir = u.get_checkpoint_dir(log_dir)
train_fig_dir = u.get_train_fig_dir(log_dir)
dataset_path = u.get_dataset_path(workspace_path)
if not os.path.exists(cfg_file_path):
print('Could not find config file:\n')
print('{}\n'.format(cfg_file_path))
exit(-1)
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
if not os.path.exists(train_fig_dir):
os.makedirs(train_fig_dir)
if not os.path.exists(dataset_path):
os.makedirs(dataset_path)
args = configparser.ConfigParser()
args.read(cfg_file_path)
shutil.copy2(cfg_file_path, log_dir)
with tf.variable_scope(experiment_name):
dataset = factory.build_dataset(dataset_path, args)
queue = factory.build_queue(dataset, args)
encoder = factory.build_encoder(queue.x, args, is_training=True)
decoder = factory.build_decoder(queue.y, encoder, args, is_training=True)
ae = factory.build_ae(encoder, decoder, args)
codebook = factory.build_codebook(encoder, dataset, args)
train_op = factory.build_train_op(ae, args)
saver = tf.train.Saver(save_relative_paths=True)
num_iter = args.getint('Training', 'NUM_ITER') if not debug_mode else 100000
save_interval = args.getint('Training', 'SAVE_INTERVAL')
model_type = args.get('Dataset', 'MODEL')
if model_type=='dsprites':
dataset.get_sprite_training_images(args)
else:
dataset.get_training_images(dataset_path, args)
dataset.load_bg_images(dataset_path)
if generate_data:
print('finished generating synthetic training data for ' + experiment_name)
print('exiting...')
exit()
widgets = ['Training: ', progressbar.Percentage(),
' ', progressbar.Bar(),
' ', progressbar.Counter(), ' / %s' % num_iter,
' ', progressbar.ETA(), ' ']
bar = progressbar.ProgressBar(maxval=num_iter,widgets=widgets)
gpu_options = tf.GPUOptions(allow_growth=True, per_process_gpu_memory_fraction = 0.9)
config = tf.ConfigProto(gpu_options=gpu_options)
with tf.Session(config=config) as sess:
chkpt = tf.train.get_checkpoint_state(ckpt_dir)
if chkpt and chkpt.model_checkpoint_path:
saver.restore(sess, chkpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
merged_loss_summary = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(ckpt_dir, sess.graph)
if not debug_mode:
print('Training with %s model' % args.get('Dataset','MODEL'), os.path.basename(args.get('Paths','MODEL_PATH')))
bar.start()
queue.start(sess)
for i in range(ae.global_step.eval(), num_iter):
if not debug_mode:
sess.run(train_op)
if i % 10 == 0:
loss = sess.run(merged_loss_summary)
summary_writer.add_summary(loss, i)
bar.update(i)
if (i+1) % save_interval == 0:
saver.save(sess, checkpoint_file, global_step=ae.global_step)
this_x, this_y = sess.run([queue.x, queue.y])
reconstr_train = sess.run(decoder.x,feed_dict={queue.x:this_x})
train_imgs = np.hstack(( u.tiles(this_x, 4, 4), u.tiles(reconstr_train, 4,4),u.tiles(this_y, 4, 4)))
cv2.imwrite(os.path.join(train_fig_dir,'training_images_%s.png' % i), train_imgs*255)
else:
this_x, this_y = sess.run([queue.x, queue.y])
reconstr_train = sess.run(decoder.x,feed_dict={queue.x:this_x})
cv2.imshow('sample batch', np.hstack(( u.tiles(this_x, 3, 3), u.tiles(reconstr_train, 3,3),u.tiles(this_y, 3, 3))) )
k = cv2.waitKey(0)
if k == 27:
break
if gentle_stop[0]:
break
queue.stop(sess)
if not debug_mode:
bar.finish()
if not gentle_stop[0] and not debug_mode:
print('To create the embedding run:\n')
print('ae_embed {}\n'.format(full_name))
if __name__ == '__main__':
main()
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2015, PyStan developers
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
#-----------------------------------------------------------------------------
from pystan._compat import PY2, string_types, implements_to_string, izip
from collections import OrderedDict
if PY2:
from collections import Callable, Iterable
else:
from collections.abc import Callable, Iterable
import datetime
import io
import itertools
import logging
import numbers
import os
import platform
import shutil
import string
import sys
import tempfile
import time
import distutils
from distutils.core import Extension
import Cython
from Cython.Build.Inline import _get_build_extension
from Cython.Build.Dependencies import cythonize
import numpy as np
import pystan.api
import pystan.misc
import pystan.diagnostics
logger = logging.getLogger('pystan')
def load_module(module_name, module_path):
"""Load the module named `module_name` from `module_path`
independently of the Python version."""
if sys.version_info >= (3,0):
import pyximport
pyximport.install()
sys.path.append(module_path)
return __import__(module_name)
else:
import imp
module_info = imp.find_module(module_name, [module_path])
return imp.load_module(module_name, *module_info)
def _map_parallel(function, args, n_jobs):
"""multiprocessing.Pool(processors=n_jobs).map with some error checking"""
# Following the error checking found in joblib
multiprocessing = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None
if multiprocessing:
try:
import multiprocessing
import multiprocessing.pool
except ImportError:
multiprocessing = None
if sys.platform.startswith("win") and PY2:
msg = "Multiprocessing is not supported on Windows with Python 2.X. Setting n_jobs=1"
logger.warning(msg)
n_jobs = 1
# 2nd stage: validate that locking is available on the system and
# issue a warning if not
if multiprocessing:
try:
_sem = multiprocessing.Semaphore()
del _sem # cleanup
except (ImportError, OSError) as e:
multiprocessing = None
logger.warning('{}. _map_parallel will operate in serial mode'.format(e))
if multiprocessing and int(n_jobs) not in (0, 1):
if n_jobs == -1:
n_jobs = None
try:
pool = multiprocessing.Pool(processes=n_jobs)
map_result = pool.map(function, args)
finally:
pool.close()
pool.join()
else:
map_result = list(map(function, args))
return map_result
# NOTE: StanModel instance stores references to a compiled, uninstantiated
# C++ model.
@implements_to_string
class StanModel:
"""
Model described in Stan's modeling language compiled from C++ code.
Instances of StanModel are typically created indirectly by the functions
`stan` and `stanc`.
Parameters
----------
file : string {'filename', 'file'}
If filename, the string passed as an argument is expected to
be a filename containing the Stan model specification.
If file, the object passed must have a 'read' method (file-like
object) that is called to fetch the Stan model specification.
charset : string, 'utf-8' by default
If bytes or files are provided, this charset is used to decode.
model_name: string, 'anon_model' by default
A string naming the model. If none is provided 'anon_model' is
the default. However, if `file` is a filename, then the filename
will be used to provide a name.
model_code : string
A string containing the Stan model specification. Alternatively,
the model may be provided with the parameter `file`.
stanc_ret : dict
A dict returned from a previous call to `stanc` which can be
used to specify the model instead of using the parameter `file` or
`model_code`.
include_paths : list of strings
Paths for #include files defined in Stan program code.
boost_lib : string
The path to a version of the Boost C++ library to use instead of
the one supplied with PyStan.
eigen_lib : string
The path to a version of the Eigen C++ library to use instead of
the one in the supplied with PyStan.
verbose : boolean, False by default
Indicates whether intermediate output should be piped to the console.
This output may be useful for debugging.
allow_undefined : boolean, False by default
If True, the C++ code can be written even if there are undefined
functions.
includes : list, None by default
If not None, the elements of this list will be assumed to be the
names of custom C++ header files that should be included.
include_dirs : list, None by default
If not None, the directories in this list are added to the search
path of the compiler.
kwargs : keyword arguments
Additional arguments passed to `stanc`.
Attributes
----------
model_name : string
model_code : string
Stan code for the model.
model_cpp : string
C++ code for the model.
module : builtins.module
Python module created by compiling the C++ code for the model.
Methods
-------
show
Print the Stan model specification.
sampling
Draw samples from the model.
optimizing
Obtain a point estimate by maximizing the log-posterior.
get_cppcode
Return the C++ code for the module.
get_cxxflags
Return the 'CXXFLAGS' used for compiling the model.
get_include_paths
Return include_paths used for compiled model.
See also
--------
stanc: Compile a Stan model specification
stan: Fit a model using Stan
Notes
-----
More details of Stan, including the full user's guide and
reference manual can be found at <URL: http://mc-stan.org/>.
There are three ways to specify the model's code for `stan_model`.
1. parameter `model_code`, containing a string to whose value is
the Stan model specification,
2. parameter `file`, indicating a file (or a connection) from
which to read the Stan model specification, or
3. parameter `stanc_ret`, indicating the re-use of a model
generated in a previous call to `stanc`.
References
----------
The Stan Development Team (2013) *Stan Modeling Language User's
Guide and Reference Manual*. <URL: http://mc-stan.org/>.
Examples
--------
>>> model_code = 'parameters {real y;} model {y ~ normal(0,1);}'
>>> model_code; m = StanModel(model_code=model_code)
... # doctest: +ELLIPSIS
'parameters ...
>>> m.model_name
'anon_model'
"""
def __init__(self, file=None, charset='utf-8', model_name="anon_model",
model_code=None, stanc_ret=None, include_paths=None,
boost_lib=None, eigen_lib=None, verbose=False,
obfuscate_model_name=True, extra_compile_args=None,
allow_undefined=False, include_dirs=None, includes=None):
if stanc_ret is None:
stanc_ret = pystan.api.stanc(file=file,
charset=charset,
model_code=model_code,
model_name=model_name,
verbose=verbose,
include_paths=include_paths,
obfuscate_model_name=obfuscate_model_name,
allow_undefined=allow_undefined)
if not isinstance(stanc_ret, dict):
raise ValueError("stanc_ret must be an object returned by stanc.")
stanc_ret_keys = {'status', 'model_code', 'model_cppname',
'cppcode', 'model_name', 'include_paths'}
if not all(n in stanc_ret_keys for n in stanc_ret):
raise ValueError("stanc_ret lacks one or more of the keys: "
"{}".format(str(stanc_ret_keys)))
elif stanc_ret['status'] != 0: # success == 0
raise ValueError("stanc_ret is not a successfully returned "
"dictionary from stanc.")
self.model_cppname = stanc_ret['model_cppname']
self.model_name = stanc_ret['model_name']
self.model_code = stanc_ret['model_code']
self.model_cppcode = stanc_ret['cppcode']
self.model_include_paths = stanc_ret['include_paths']
if allow_undefined or include_dirs or includes:
logger.warning("External C++ interface is an experimental feature. Be careful.")
msg = "COMPILING THE C++ CODE FOR MODEL {} NOW."
logger.info(msg.format(self.model_name))
if verbose:
msg = "OS: {}, Python: {}, Cython {}".format(sys.platform,
sys.version,
Cython.__version__)
logger.info(msg)
if boost_lib is not None:
# FIXME: allow boost_lib, eigen_lib to be specified
raise NotImplementedError
if eigen_lib is not None:
raise NotImplementedError
# module_name needs to be unique so that each model instance has its own module
nonce = abs(hash((self.model_name, time.time())))
self.module_name = 'stanfit4{}_{}'.format(self.model_name, nonce)
lib_dir = tempfile.mkdtemp(prefix='pystan_')
pystan_dir = os.path.dirname(__file__)
if include_dirs is None:
include_dirs = []
elif not isinstance(include_dirs, list):
raise TypeError("'include_dirs' needs to be a list: type={}".format(type(include_dirs)))
include_dirs += [
lib_dir,
pystan_dir,
os.path.join(pystan_dir, "stan", "src"),
os.path.join(pystan_dir, "stan", "lib", "stan_math"),
os.path.join(pystan_dir, "stan", "lib", "stan_math", "lib", "eigen_3.3.3"),
os.path.join(pystan_dir, "stan", "lib", "stan_math", "lib", "boost_1.69.0"),
os.path.join(pystan_dir, "stan", "lib", "stan_math", "lib", "sundials_4.1.0", "include"),
np.get_include(),
]
model_cpp_file = os.path.join(lib_dir, self.model_cppname + '.hpp')
if includes is not None:
code = ""
for fn in includes:
code += '#include "{0}"\n'.format(fn)
ind = self.model_cppcode.index("static int current_statement_begin__;")
self.model_cppcode = "\n".join([
self.model_cppcode[:ind], code, self.model_cppcode[ind:]
])
with io.open(model_cpp_file, 'w', encoding='utf-8') as outfile:
outfile.write(self.model_cppcode)
pyx_file = os.path.join(lib_dir, self.module_name + '.pyx')
pyx_template_file = os.path.join(pystan_dir, 'stanfit4model.pyx')
with io.open(pyx_template_file, 'r', encoding='utf-8') as infile:
s = infile.read()
template = string.Template(s)
with io.open(pyx_file, 'w', encoding='utf-8') as outfile:
s = template.safe_substitute(model_cppname=self.model_cppname)
outfile.write(s)
stan_macros = [
('BOOST_RESULT_OF_USE_TR1', None),
('BOOST_NO_DECLTYPE', None),
('BOOST_DISABLE_ASSERTS', None),
]
build_extension = _get_build_extension()
# compile stan models with optimization (-O2)
# (stanc is compiled without optimization (-O0) currently, see #33)
if extra_compile_args is None:
extra_compile_args = []
if platform.platform().startswith('Win'):
if build_extension.compiler in (None, 'msvc'):
logger.warning("MSVC compiler is not supported")
extra_compile_args = [
'/EHsc',
'-DBOOST_DATE_TIME_NO_LIB',
'/std:c++14',
] + extra_compile_args
else:
# Windows, but not msvc, likely mingw
# fix bug in MingW-W64
# use posix threads
extra_compile_args = [
'-O2',
'-ftemplate-depth-256',
'-Wno-unused-function',
'-Wno-uninitialized',
'-std=c++1y',
"-D_hypot=hypot",
"-pthread",
"-fexceptions",
] + extra_compile_args
else:
# linux or macOS
extra_compile_args = [
'-O2',
'-ftemplate-depth-256',
'-Wno-unused-function',
'-Wno-uninitialized',
'-std=c++1y',
] + extra_compile_args
distutils.log.set_verbosity(verbose)
extension = Extension(name=self.module_name,
language="c++",
sources=[pyx_file],
define_macros=stan_macros,
include_dirs=include_dirs,
extra_compile_args=extra_compile_args)
cython_include_dirs = ['.', pystan_dir]
build_extension.extensions = cythonize([extension],
include_path=cython_include_dirs,
quiet=not verbose)
build_extension.build_temp = os.path.dirname(pyx_file)
build_extension.build_lib = lib_dir
redirect_stderr = not verbose and pystan.misc._has_fileno(sys.stderr)
if redirect_stderr:
# silence stderr for compilation
orig_stderr = pystan.misc._redirect_stderr()
try:
build_extension.run()
finally:
if redirect_stderr:
# restore stderr
os.dup2(orig_stderr, sys.stderr.fileno())
self.module = load_module(self.module_name, lib_dir)
self.module_filename = os.path.basename(self.module.__file__)
# once the module is in memory, we no longer need the file on disk
# but we do need a copy of the file for pickling and the module name
with io.open(os.path.join(lib_dir, self.module_filename), 'rb') as f:
self.module_bytes = f.read()
shutil.rmtree(lib_dir, ignore_errors=True)
self.fit_class = getattr(self.module, "StanFit4Model")
def __str__(self):
# NOTE: returns unicode even for Python 2.7, implements_to_string
# decorator creates __unicode__ and __str__
s = u"StanModel object '{}' coded as follows:\n{}"
return s.format(self.model_name, self.model_code)
def show(self):
print(self)
@property
def dso(self):
# warning added in PyStan 2.8.0
logger.warning('DeprecationWarning: Accessing the module with `dso` is deprecated and will be removed in a future version. '\
'Use `module` instead.')
return self.module
def get_cppcode(self):
return self.model_cppcode
def get_cxxflags(self):
# FIXME: implement this?
raise NotImplementedError
def get_include_paths(self):
return self.model_include_paths
def __getstate__(self):
"""Specify how instances are to be pickled
self.module is unpicklable, for example.
"""
state = self.__dict__.copy()
del state['module']
del state['fit_class']
return state
def __setstate__(self, state):
self.__dict__.update(state)
lib_dir = tempfile.mkdtemp()
with io.open(os.path.join(lib_dir, self.module_filename), 'wb') as f:
f.write(self.module_bytes)
try:
self.module = load_module(self.module_name, lib_dir)
self.fit_class = getattr(self.module, "StanFit4Model")
except Exception as e:
logger.warning(e)
logger.warning("Something went wrong while unpickling "
"the StanModel. Consider recompiling.")
# once the module is in memory, we no longer need the file on disk
shutil.rmtree(lib_dir, ignore_errors=True)
def optimizing(self, data=None, seed=None,
init='random', sample_file=None, algorithm=None,
verbose=False, as_vector=True, **kwargs):
"""Obtain a point estimate by maximizing the joint posterior.
Parameters
----------
data : dict
A Python dictionary providing the data for the model. Variables
for Stan are stored in the dictionary as expected. Variable
names are the keys and the values are their associated values.
Stan only accepts certain kinds of values; see Notes.
seed : int or np.random.RandomState, optional
The seed, a positive integer for random number generation. Only
one seed is needed when multiple chains are used, as the other
chain's seeds are generated from the first chain's to prevent
dependency among random number streams. By default, seed is
``random.randint(0, MAX_UINT)``.
init : {0, '0', 'random', function returning dict, list of dict}, optional
Specifies how initial parameter values are chosen:
- 0 or '0' initializes all to be zero on the unconstrained support.
- 'random' generates random initial values. An optional parameter
`init_r` controls the range of randomly generated initial values
for parameters in terms of their unconstrained support;
- list of size equal to the number of chains (`chains`), where the
list contains a dict with initial parameter values;
- function returning a dict with initial parameter values. The
function may take an optional argument `chain_id`.
sample_file : string, optional
File name specifying where samples for *all* parameters and other
saved quantities will be written. If not provided, no samples
will be written. If the folder given is not writable, a temporary
directory will be used. When there are multiple chains, an
underscore and chain number are appended to the file name.
By default do not write samples to file.
algorithm : {"LBFGS", "BFGS", "Newton"}, optional
Name of optimization algorithm to be used. Default is LBFGS.
verbose : boolean, optional
Indicates whether intermediate output should be piped to the console.
This output may be useful for debugging. False by default.
as_vector : boolean, optional
Indicates an OrderedDict will be returned rather than a nested
dictionary with keys 'par' and 'value'.
Returns
-------
optim : OrderedDict
Depending on `as_vector`, returns either an OrderedDict having
parameters as keys and point estimates as values or an OrderedDict
with components 'par' and 'value'. ``optim['par']`` is a dictionary
of point estimates, indexed by the parameter name.
``optim['value']`` stores the value of the log-posterior (up to an
additive constant, the ``lp__`` in Stan) corresponding to the point
identified by `optim`['par'].
Other parameters
----------------
iter : int, optional
The maximum number of iterations.
save_iterations : bool, optional
refresh : int, optional
init_alpha : float, optional
For BFGS and LBFGS, default is 0.001.
tol_obj : float, optional
For BFGS and LBFGS, default is 1e-12.
tol_rel_obj : int, optional
For BFGS and LBFGS, default is 1e4.
tol_grad : float, optional
For BFGS and LBFGS, default is 1e-8.
tol_rel_grad : float, optional
For BFGS and LBFGS, default is 1e7.
tol_param : float, optional
For BFGS and LBFGS, default is 1e-8.
history_size : int, optional
For LBFGS, default is 5.
Refer to the manuals for both CmdStan and Stan for more details.
Examples
--------
>>> from pystan import StanModel
>>> m = StanModel(model_code='parameters {real y;} model {y ~ normal(0,1);}')
>>> f = m.optimizing()
"""
algorithms = {"BFGS", "LBFGS", "Newton"}
if algorithm is None:
algorithm = "LBFGS"
if algorithm not in algorithms:
raise ValueError("Algorithm must be one of {}".format(algorithms))
if data is None:
data = {}
seed = pystan.misc._check_seed(seed)
fit = self.fit_class(data, seed)
m_pars = fit._get_param_names()
p_dims = fit._get_param_dims()
if 'lp__' in m_pars:
idx_of_lp = m_pars.index('lp__')
del m_pars[idx_of_lp]
del p_dims[idx_of_lp]
if isinstance(init, numbers.Number):
init = str(init)
elif isinstance(init, Callable):
init = init()
elif not isinstance(init, Iterable) and \
not isinstance(init, string_types):
raise ValueError("Wrong specification of initial values.")
stan_args = dict(init=init,
seed=seed,
method="optim",
algorithm=algorithm)
if sample_file is not None:
stan_args['sample_file'] = pystan.misc._writable_sample_file(sample_file)
# check that arguments in kwargs are valid
valid_args = {"iter", "save_iterations", "refresh",
"init_alpha", "tol_obj", "tol_grad", "tol_param",
"tol_rel_obj", "tol_rel_grad", "history_size"}
for arg in kwargs:
if arg not in valid_args:
raise ValueError("Parameter `{}` is not recognized.".format(arg))
# This check is is to warn users of older versions of PyStan
if kwargs.get('method'):
raise ValueError('`method` is no longer used. Specify `algorithm` instead.')
stan_args.update(kwargs)
stan_args = pystan.misc._get_valid_stan_args(stan_args)
ret, sample = fit._call_sampler(stan_args)
pars = pystan.misc._par_vector2dict(sample['par'], m_pars, p_dims)
if not as_vector:
return OrderedDict([('par', pars), ('value', sample['value'])])
else:
return pars
def sampling(self, data=None, pars=None, chains=4, iter=2000,
warmup=None, thin=1, seed=None, init='random',
sample_file=None, diagnostic_file=None, verbose=False,
algorithm=None, control=None, n_jobs=-1, **kwargs):
"""Draw samples from the model.
Parameters
----------
data : dict
A Python dictionary providing the data for the model. Variables
for Stan are stored in the dictionary as expected. Variable
names are the keys and the values are their associated values.
Stan only accepts certain kinds of values; see Notes.
pars : list of string, optional
A list of strings indicating parameters of interest. By default
all parameters specified in the model will be stored.
chains : int, optional
Positive integer specifying number of chains. 4 by default.
iter : int, 2000 by default
Positive integer specifying how many iterations for each chain
including warmup.
warmup : int, iter//2 by default
Positive integer specifying number of warmup (aka burn-in) iterations.
As `warmup` also specifies the number of iterations used for step-size
adaption, warmup samples should not be used for inference.
`warmup=0` forced if `algorithm=\"Fixed_param\"`.
thin : int, 1 by default
Positive integer specifying the period for saving samples.
seed : int or np.random.RandomState, optional
The seed, a positive integer for random number generation. Only
one seed is needed when multiple chains are used, as the other
chain's seeds are generated from the first chain's to prevent
dependency among random number streams. By default, seed is
``random.randint(0, MAX_UINT)``.
algorithm : {"NUTS", "HMC", "Fixed_param"}, optional
One of algorithms that are implemented in Stan such as the No-U-Turn
sampler (NUTS, Hoffman and Gelman 2011), static HMC, or ``Fixed_param``.
Default is NUTS.
init : {0, '0', 'random', function returning dict, list of dict}, optional
Specifies how initial parameter values are chosen: 0 or '0'
initializes all to be zero on the unconstrained support; 'random'
generates random initial values; list of size equal to the number
of chains (`chains`), where the list contains a dict with initial
parameter values; function returning a dict with initial parameter
values. The function may take an optional argument `chain_id`.
sample_file : string, optional
File name specifying where samples for *all* parameters and other
saved quantities will be written. If not provided, no samples
will be written. If the folder given is not writable, a temporary
directory will be used. When there are multiple chains, an underscore
and chain number are appended to the file name. By default do not
write samples to file.
verbose : boolean, False by default
Indicates whether intermediate output should be piped to the
console. This output may be useful for debugging.
control : dict, optional
A dictionary of parameters to control the sampler's behavior. Default
values are used if control is not specified. The following are
adaptation parameters for sampling algorithms.
These are parameters used in Stan with similar names:
- `adapt_engaged` : bool, default True
- `adapt_gamma` : float, positive, default 0.05
- `adapt_delta` : float, between 0 and 1, default 0.8
- `adapt_kappa` : float, between default 0.75
- `adapt_t0` : float, positive, default 10
In addition, the algorithm HMC (called 'static HMC' in Stan) and NUTS
share the following parameters:
- `stepsize`: float or list of floats, positive
- `stepsize_jitter`: float, between 0 and 1
- `metric` : str, {"unit_e", "diag_e", "dense_e"}
- `inv_metric` : np.ndarray or str
In addition, depending on which algorithm is used, different parameters
can be set as in Stan for sampling. For the algorithm HMC we can set
- `int_time`: float, positive
For algorithm NUTS, we can set
- `max_treedepth` : int, positive
n_jobs : int, optional
Sample in parallel. If -1 all CPUs are used. If 1, no parallel
computing code is used at all, which is useful for debugging.
Returns
-------
fit : StanFit4Model
Instance containing the fitted results.
Other parameters
----------------
chain_id : int or iterable of int, optional
`chain_id` can be a vector to specify the chain_id for all chains or
an integer. For the former case, they should be unique. For the latter,
the sequence of integers starting from the given `chain_id` are used
for all chains.
init_r : float, optional
`init_r` is only valid if `init` == "random". In this case, the initial
values are simulated from [-`init_r`, `init_r`] rather than using the
default interval (see the manual of Stan).
test_grad: bool, optional
If `test_grad` is ``True``, Stan will not do any sampling. Instead,
the gradient calculation is tested and printed out and the fitted
StanFit4Model object is in test gradient mode. By default, it is
``False``.
append_samples`: bool, optional
refresh`: int, optional
Argument `refresh` can be used to control how to indicate the progress
during sampling (i.e. show the progress every \code{refresh} iterations).
By default, `refresh` is `max(iter/10, 1)`.
check_hmc_diagnostics : bool, optional
After sampling run `pystan.diagnostics.check_hmc_diagnostics` function.
Default is `True`. Checks for n_eff and rhat skipped if the flat
parameter count is higher than 1000, unless user explicitly defines
``check_hmc_diagnostics=True``.
Examples
--------
>>> from pystan import StanModel
>>> m = StanModel(model_code='parameters {real y;} model {y ~ normal(0,1);}')
>>> m.sampling(iter=100)
"""
# NOTE: in this function, iter masks iter() the python function.
# If this ever turns out to be a problem just add:
# iter_ = iter
# del iter # now builtins.iter is available
if diagnostic_file is not None:
raise NotImplementedError("diagnostic_file not supported yet")
if data is None:
data = {}
if warmup is None:
warmup = int(iter // 2)
if not all(isinstance(arg, numbers.Integral) for arg in (iter, thin, warmup)):
raise ValueError('only integer values allowed as `iter`, `thin`, and `warmup`.')
algorithms = ("NUTS", "HMC", "Fixed_param") # , "Metropolis")
algorithm = "NUTS" if algorithm is None else algorithm
if algorithm not in algorithms:
raise ValueError("Algorithm must be one of {}".format(algorithms))
if algorithm=="Fixed_param":
if warmup > 0:
logger.warning("`warmup=0` forced with `algorithm=\"Fixed_param\"`.")
warmup = 0
elif algorithm == "NUTS" and warmup == 0:
if (isinstance(control, dict) and control.get("adapt_engaged", True)) or control is None:
raise ValueError("Warmup samples must be greater than 0 when adaptation is enabled (`adapt_engaged=True`)")
seed = pystan.misc._check_seed(seed)
fit = self.fit_class(data, seed)
m_pars = fit._get_param_names()
p_dims = fit._get_param_dims()
if isinstance(pars, string_types):
pars = [pars]
if pars is not None and len(pars) > 0:
# Implementation note: this does not set the params_oi for the
# instances of stan_fit which actually make the calls to
# call_sampler. This is because we need separate instances of
# stan_fit in each thread/process. So update_param_oi needs to
# be called in every stan_fit instance.
fit._update_param_oi(pars)
if not all(p in m_pars for p in pars):
pars = np.asarray(pars)
unmatched = pars[np.invert(np.in1d(pars, m_pars))]
msg = "No parameter(s): {}; sampling not done."
raise ValueError(msg.format(', '.join(unmatched)))
else:
pars = m_pars
if chains < 1:
raise ValueError("The number of chains is less than one; sampling"
"not done.")
check_hmc_diagnostics = kwargs.pop('check_hmc_diagnostics', None)
# check that arguments in kwargs are valid
valid_args = {"chain_id", "init_r", "test_grad", "append_samples", "refresh", "control"}
for arg in kwargs:
if arg not in valid_args:
raise ValueError("Parameter `{}` is not recognized.".format(arg))
args_list = pystan.misc._config_argss(chains=chains, iter=iter,
warmup=warmup, thin=thin,
init=init, seed=seed, sample_file=sample_file,
diagnostic_file=diagnostic_file,
algorithm=algorithm,
control=control, **kwargs)
# number of samples saved after thinning
warmup2 = 1 + (warmup - 1) // thin
n_kept = 1 + (iter - warmup - 1) // thin
n_save = n_kept + warmup2
if n_jobs is None:
n_jobs = -1
# disable multiprocessing if we only have a single chain
if chains == 1:
n_jobs = 1
assert len(args_list) == chains
call_sampler_args = izip(itertools.repeat(data), args_list, itertools.repeat(pars))
call_sampler_star = self.module._call_sampler_star
ret_and_samples = _map_parallel(call_sampler_star, call_sampler_args, n_jobs)
samples = [smpl for _, smpl in ret_and_samples]
# _organize_inits strips out lp__ (RStan does it in this method)
inits_used = pystan.misc._organize_inits([s['inits'] for s in samples], m_pars, p_dims)
random_state = np.random.RandomState(args_list[0]['seed'])
perm_lst = [random_state.permutation(int(n_kept)) for _ in range(chains)]
fnames_oi = fit._get_param_fnames_oi()
n_flatnames = len(fnames_oi)
fit.sim = {'samples': samples,
# rstan has this; name clashes with 'chains' in samples[0]['chains']
'chains': len(samples),
'iter': iter,
'warmup': warmup,
'thin': thin,
'n_save': [n_save] * chains,
'warmup2': [warmup2] * chains,
'permutation': perm_lst,
'pars_oi': fit._get_param_names_oi(),
'dims_oi': fit._get_param_dims_oi(),
'fnames_oi': fnames_oi,
'n_flatnames': n_flatnames}
fit.model_name = self.model_name
fit.model_pars = m_pars
fit.par_dims = p_dims
fit.mode = 0 if not kwargs.get('test_grad') else 1
fit.inits = inits_used
fit.stan_args = args_list
fit.stanmodel = self
fit.date = datetime.datetime.now()
if args_list[0]["metric_file_flag"]:
inv_metric_dir, _ = os.path.split(args_list[0]["metric_file"])
shutil.rmtree(inv_metric_dir, ignore_errors=True)
# If problems are found in the fit, this will print diagnostic
# messages.
if (check_hmc_diagnostics is None and algorithm in ("NUTS", "HMC")) and fit.mode != 1:
if n_flatnames > 1000:
msg = "Maximum (flat) parameter count (1000) exceeded: " +\
"skipping diagnostic tests for n_eff and Rhat.\n" +\
"To run all diagnostics call pystan.check_hmc_diagnostics(fit)"
logger.warning(msg)
checks = ["divergence", "treedepth", "energy"]
pystan.diagnostics.check_hmc_diagnostics(fit, checks=checks) # noqa
else:
pystan.diagnostics.check_hmc_diagnostics(fit) # noqa
elif (check_hmc_diagnostics and algorithm in ("NUTS", "HMC")) and fit.mode != 1:
pystan.diagnostics.check_hmc_diagnostics(fit) # noqa
return fit
def vb(self, data=None, pars=None, iter=10000,
seed=None, init='random', sample_file=None, diagnostic_file=None, verbose=False,
algorithm=None, **kwargs):
"""Call Stan's variational Bayes methods.
Parameters
----------
data : dict
A Python dictionary providing the data for the model. Variables
for Stan are stored in the dictionary as expected. Variable
names are the keys and the values are their associated values.
Stan only accepts certain kinds of values; see Notes.
pars : list of string, optional
A list of strings indicating parameters of interest. By default
all parameters specified in the model will be stored.
seed : int or np.random.RandomState, optional
The seed, a positive integer for random number generation. Only
one seed is needed when multiple chains are used, as the other
chain's seeds are generated from the first chain's to prevent
dependency among random number streams. By default, seed is
``random.randint(0, MAX_UINT)``.
sample_file : string, optional
File name specifying where samples for *all* parameters and other
saved quantities will be written. If not provided, samples will be
written to a temporary file and read back in. If the folder given is
not writable, a temporary directory will be used. When there are
multiple chains, an underscore and chain number are appended to the
file name. By default do not write samples to file.
diagnostic_file : string, optional
File name specifying where diagnostics for the variational fit
will be written.
iter : int, 10000 by default
Positive integer specifying how many iterations for each chain
including warmup.
algorithm : {'meanfield', 'fullrank'}
algorithm}{One of "meanfield" and "fullrank" indicating which
variational inference algorithm is used. meanfield: mean-field
approximation; fullrank: full-rank covariance. The default is
'meanfield'.
verbose : boolean, False by default
Indicates whether intermediate output should be piped to the
console. This output may be useful for debugging.
Other optional parameters, refer to the manuals for both CmdStan
and Stan.
- `iter`: the maximum number of iterations, defaults to 10000
- `grad_samples` the number of samples for Monte Carlo enumerate of
gradients, defaults to 1.
- `elbo_samples` the number of samples for Monte Carlo estimate of ELBO
(objective function), defaults to 100. (ELBO stands for "the evidence
lower bound".)
- `eta` positive stepsize weighting parameters for variational
inference but is ignored if adaptation is engaged, which is the case
by default.
- `adapt_engaged` flag indicating whether to automatically adapt the
stepsize and defaults to True.
- `tol_rel_obj`convergence tolerance on the relative norm of the
objective, defaults to 0.01.
- `eval_elbo`, evaluate ELBO every Nth iteration, defaults to 100
- `output_samples` number of posterior samples to draw and save,
defaults to 1000.
- `adapt_iter` number of iterations to adapt the stepsize if
`adapt_engaged` is True and ignored otherwise.
Returns
-------
results : dict
Dictionary containing information related to results.
Examples
--------
>>> from pystan import StanModel
>>> m = StanModel(model_code='parameters {real y;} model {y ~ normal(0,1);}')
>>> results = m.vb()
>>> # results saved on disk in format inspired by CSV
>>> print(results['args']['sample_file'])
"""
if data is None:
data = {}
algorithms = ("meanfield", "fullrank")
algorithm = "meanfield" if algorithm is None else algorithm
if algorithm not in algorithms:
raise ValueError("Algorithm must be one of {}".format(algorithms))
seed = pystan.misc._check_seed(seed)
fit = self.fit_class(data, seed)
m_pars = fit._get_param_names()
if isinstance(pars, string_types):
pars = [pars]
if pars is not None and len(pars) > 0:
fit._update_param_oi(pars)
if not all(p in m_pars for p in pars):
pars = np.asarray(pars)
unmatched = pars[np.invert(np.in1d(pars, m_pars))]
msg = "No parameter(s): {}; sampling not done."
raise ValueError(msg.format(', '.join(unmatched)))
else:
pars = m_pars
if isinstance(init, numbers.Number):
init = str(init)
elif isinstance(init, Callable):
init = init()
elif not isinstance(init, Iterable) and \
not isinstance(init, string_types):
raise ValueError("Wrong specification of initial values.")
stan_args = dict(iter=iter,
init=init,
chain_id=1,
seed=seed,
method="variational",
algorithm=algorithm)
if sample_file is not None:
stan_args['sample_file'] = pystan.misc._writable_sample_file(sample_file)
else:
stan_args['sample_file'] = os.path.join(tempfile.mkdtemp(), 'output.csv')
if diagnostic_file is not None:
stan_args['diagnostic_file'] = diagnostic_file
# check that arguments in kwargs are valid
valid_args = {'elbo_samples', 'eta', 'adapt_engaged', 'eval_elbo',
'grad_samples', 'output_samples', 'adapt_iter',
'tol_rel_obj'}
for arg in kwargs:
if arg not in valid_args:
raise ValueError("Parameter `{}` is not recognized.".format(arg))
stan_args.update(kwargs)
stan_args = pystan.misc._get_valid_stan_args(stan_args)
ret, sample = fit._call_sampler(stan_args, pars_oi=pars)
logger.warning('Automatic Differentiation Variational Inference (ADVI) is an EXPERIMENTAL ALGORITHM.')
logger.warning('ADVI samples may be found on the filesystem in the file `{}`'.format(sample.args['sample_file'].decode('utf8')))
return OrderedDict([('args', sample.args), ('inits', sample.inits), ('sampler_params', sample.sampler_params), ('sampler_param_names', sample.sampler_param_names), ('mean_pars', sample.mean_pars), ('mean_par_names', sample.mean_par_names)])
|
|
#
# File:
# skewt2.py
#
# Synopsis:
# Draws skew-T visualizations using dummy data.
#
# Category:
# Skew-T
#
# Author:
# Author: Fred Clare (based on an NCL example of Dennis Shea)
#
# Date of original publication:
# March, 2005
#
# Description:
# This example draws two skew-T plots using real data. The
# winds from a (bogus) pibal are drawn using different colors.
#
# Effects illustrated:
# o Reading from an ASCII file.
# o Flagging missing values.
# o Plotting soundings and winds.
# o Plotting wind barbs at height levels.
#
# Output:
# This example produce two visualizations:
# 1.) A Raob sounding with no winds.
# 2,) A Raob sounding with wind barbs at height levels and a
# height scale in feet.
#
# Notes:
# This example was updated in January 2006 to include the new
# Skew-T resource names decided on.
#
from __future__ import print_function
import Ngl
import numpy
import os
nlvl = 30
ncol = 16
TestData = Ngl.asciiread(os.path.join(Ngl.pynglpath("data"),
"asc",
"sounding_testdata.asc"),
[nlvl, ncol],
"float")
p = TestData[:,1]
z = TestData[:,2]
tc = TestData[:,5] + 2. # for demo purposes
tdc = TestData[:,9]
#
# Set winds to missing values so that they will not be plotted.
#
wspd = -999.*numpy.ones(nlvl,'f')
wdir = -999.*numpy.ones(nlvl,'f')
#
# Plot 1 - Create background skew-T and plot sounding.
#
wks_type = "png"
wks = Ngl.open_wks(wks_type, "skewt2")
skewtOpts = Ngl.Resources()
skewtOpts.sktWindSpeedMissingV = -999. # Missing value for
# wind speed.
skewtOpts.sktWindDirectionMissingV = -999. # Missing value for
# wind direction.
skewtOpts.sktColoredBandsOn = True # Default is False
skewtOpts.tiMainString = "Raob Data; No Winds"
skewt_bkgd = Ngl.skewt_bkg(wks, skewtOpts)
skewt_data = Ngl.skewt_plt(wks, skewt_bkgd, p, tc, tdc, z, \
wspd, wdir, skewtOpts)
Ngl.draw(skewt_bkgd)
Ngl.draw(skewt_data)
Ngl.frame(wks)
#
# Plot 2 - Create background skew-T and plot sounding and winds.
#
wspd = Ngl.fspan(0., 150., nlvl) # wind speed at each level.
wdir = Ngl.fspan(0., 360., nlvl) # wind direction.
#
# Create a few artificial "pibal" reports.
#
hght = numpy.array([1500., 6000., 10000., 15000.], 'f') # Meters
hspd = numpy.array([ 50., 27., 123., 13.], 'f')
hdir = numpy.array([ 315., 225., 45., 135.], 'f')
dataOpts = Ngl.Resources() # Options describing
# data and plotting.
dataOpts.sktHeightWindBarbsOn = True # Plot wind barbs at
# height levels.
dataOpts.sktPressureWindBarbComponents = "SpeedDirection" # Wind speed and
# dir [else: u,v].
dataOpts.sktHeightWindBarbPositions = hght # height of wind reports
dataOpts.sktHeightWindBarbSpeeds = hspd # speed
# [or u components]
dataOpts.sktHeightWindBarbDirections = hdir # direction
# [or v components]
skewtOpts = Ngl.Resources()
skewtOpts.sktHeightScaleOn = True # default is False
skewtOpts.sktHeightScaleUnits = "feet" # default is "feet"
skewtOpts.sktColoredBandsOn = True # default is False
skewtOpts.sktGeopotentialWindBarbColor = "Red"
skewtOpts.tiMainString = "Raob; [Wind Reports]"
skewt_bkgd = Ngl.skewt_bkg(wks, skewtOpts)
skewt_data = Ngl.skewt_plt(wks, skewt_bkgd, p, tc, tdc, z, \
wspd, wdir, dataOpts)
Ngl.draw(skewt_bkgd)
Ngl.draw(skewt_data)
Ngl.frame(wks)
Ngl.end()
|
|
#!/usr/bin/python3
import gzip
import os
import sys
import re
import numpy as np
import prediction_v4_module as pr
import pandas as pd
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn import linear_model
from sklearn import tree
def read_features(f_handle, label):
df = pd.read_csv(f_handle, sep="\t", header=0, index_col=0)
X = np.array(df)
Y = np.repeat(label, X.shape[0])
names = list(df.columns)
return X, Y, names
if __name__=="__main__":
f_path = sys.argv[1] # path/to/species/folder
Xpos, Ypos, names1 = read_features(f_path + "positive.features.txt", 1) # sys.argv[1]
Xneg, Yneg, names2 = read_features(f_path + "negative.features.txt", 0) # sys.argv[2] for alternative negative set
if names1 == names2:
names = names1
X_all = np.r_[Xpos, Xneg]
Y = np.r_[Ypos, Yneg]
RANDOM_SEED = 123
PRNG = np.random.RandomState(RANDOM_SEED)
outfile_root = f_path + "D_" # sys.argv[3]
pr.plot_ind_histograms(X_all, names, outfile_root + 'ind_histograms.pdf')
X_all, names, adjpvals = pr.apply_ttest(X_all, names, outfile_root + "adj_pvals_features.txt")
X_scaled = preprocessing.scale(X_all)
pr.make_scatter_plots(X_scaled, names, outfile_root + 'scatter_plots.pdf')
data = np.zeros((3,4))
dfper = pd.DataFrame(data, columns=['accuracy', 'precision', 'recall', 'roc_auc'], index=['DT', 'LR', 'RF']) # performance table
clf = linear_model.LogisticRegressionCV(refit=True, random_state=PRNG)
LR_imp, dfper = pr.build_model(clf, X_scaled, Y, names, "LR", dfper, outfile_root + "LR_features.txt")
clf = tree.DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=None,
max_features=None, max_leaf_nodes=None,
min_impurity_split=1e-07, min_samples_leaf=5,
min_samples_split=2, min_weight_fraction_leaf=0.0,
presort=False, random_state=PRNG, splitter='best')
DT_imp, dfper = pr.build_model(clf, X_scaled, Y, names, "DT", dfper, outfile_root + "DT_features.txt")
clf = RandomForestClassifier(n_estimators=1000, random_state=PRNG)
#RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
# max_depth=None, max_features='auto', max_leaf_nodes=None,
# min_impurity_split=1e-07, min_samples_leaf=1,
# min_samples_split=2, min_weight_fraction_leaf=0.0,
# n_estimators=10, n_jobs=1, oob_score=False, random_state=None,
# verbose=0, warm_start=False)
## maximum features to look for a split is sqrt(n_features).
RF_imp, dfper = pr.build_model(clf, X_scaled, Y, names, "RF", dfper, outfile_root + "RF_features.txt")
pr.plot_ROC(X_scaled, Y, outfile_root + 'ROC.png', PRNG)
dfper.to_csv(path_or_buf= outfile_root + "performance.txt", sep=',')
direction = pr.find_direction(X_all)
## feature importance table for different methods
dffeat = pd.DataFrame({'-log(p)' : -np.log(adjpvals),
'LR' : LR_imp,
'DT' : DT_imp,
'RF' : RF_imp,
'effect' : direction}, index= names)
dffeat.to_csv(path_or_buf= outfile_root + "features_report.txt" , sep=',', header=True, index=True)
|
|
import os
import numpy as np
import pandas as pd
from surili_core.workspace import Workspace
class Dataframes:
@staticmethod
def from_directory_structure(x_key: str = 'x', y_key: str = 'y'):
def apply(path: str):
data = Workspace.from_path(path) \
.folders \
.flatmap(lambda fs: fs.files) \
.map(lambda f: (f, os.path.basename(os.path.dirname(f)))) \
.to_list()
data = np.array(data)
data = pd.DataFrame(data, columns=[x_key, y_key])
return data
return apply
|
|
from scipy.signal import get_window
def fourier_smooth(yi, d, fmax, shape='boxcar'):
"""y = fourier_smooth(yi, d, fmax, shape='boxcar').
Smoothing function that low-pass filters a signal
yi with sampling time d. Spectral components with
frequencies above a cut-off fmax are blocked, while
lower frequencies are multiplied with a transfer
function with a given shape.
yi = {y1,y2,...,xn}
d = sampling time
fmax = low-pass cut-off frequency
shape = transfer function shape (default 'boxcar')
"""
spectrum = np.fft.rfft(yi, norm='forward')
width = int(fmax * yi.size * d) + 1
transfer = np.zeros(spectrum.size)
window = get_window(shape, 2 * width - 1, False)
transfer[:width] = window[width - 1:]
spectrum *= transfer
y = np.fft.irfft(spectrum, yi.size, norm='forward')
return y
|
|
import json
import plotly
import random as rn
import numpy as np
import pandas as pd
import string
import pickle
import collections
from collections import Counter
import nltk
nltk.download(['punkt', 'wordnet', 'stopwords'])
import re
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.corpus import wordnet as wn
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
stop_words.remove('no')
stop_words.remove('not')
stop_words.add('please')
stop_words.add('would')
stop_words.add('should')
stop_words.add('could')
from langdetect import detect, DetectorFactory
DetectorFactory.seed = 14
from bs4 import BeautifulSoup
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from plotly.graph_objs import Table
# deprecated in scikit-learn 0.21, removed in 0.23
#from sklearn.externals import joblib
import joblib
from sqlalchemy import create_engine
app = Flask(__name__)
CONTRACTION_MAP = {
"ain't": "is not",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'd've": "I would have",
"I'll": "I will",
"I'll've": "I will have",
"I'm": "I am",
"I've": "I have",
"i'd": "i would",
"i'd've": "i would have",
"i'll": "i will",
"i'll've": "i will have",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"it'd": "it would",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so as",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"
}
# see: https://pypi.org/project/langdetect/
# Language detection algorithm is non-deterministic,
# which means that if you try to run it on a text which is either
# too short or too ambiguous, you might get different results everytime you run it.
# Therefore the DetectorFactory.seed is necessary.
def lang_detect(text):
'''
Detects the language of the input text reflections
Input
a string text
Output
a list of detected languages
'''
DetectorFactory.seed = 14
lang = []
for refl in text:
lang.append(detect(refl))
return lang
def check_word_en(text):
'''
Checks if the word is an English one by usage of the WordNet vocabulary
Input
text string to check if being part of the English vocabulary
Output
returns the remaining English word list if available and informs
the user if non English words or strings are available
'''
text_str = []
for word in text.split():
print(word)
# removed check of having a single EN coded word, detect() is not reliable on single words;
# Check to see if the words are in the dictionary
if wn.synsets(word):
text_str.append(word)
else:
message = "The en coding text part '" + word + \
"' is not in the wordnet dictionary. Change your input message."
print(message)
return text_str
# function from Dipanjan's repository:
# https://github.com/dipanjanS/practical-machine-learning-with-python/blob/master/bonus%\
# 20content/nlp%20proven%20approach/NLP%20Strategy%20I%20-%20Processing%20and%20Understanding%20Text.ipynb
def expand_contractions(text, contraction_mapping):
'''
Expands shortened text parts included in the disaster text message
Input
text: text message that shall be controlled of having shortened text
contraction_mapping: dictionary with shortened key text and their long text version value
Output
expanded_text
'''
contractions_pattern = re.compile('({})'.format('|'.join(contraction_mapping.keys())),
flags=re.IGNORECASE|re.DOTALL)
def expand_match(contraction):
match = contraction.group(0)
first_char = match[0]
expanded_contraction = contraction_mapping.get(match)\
if contraction_mapping.get(match)\
else contraction_mapping.get(match.lower())
expanded_contraction = first_char+expanded_contraction[1:]
return expanded_contraction
expanded_text = contractions_pattern.sub(expand_match, text)
expanded_text = re.sub("'", "", expanded_text)
return expanded_text
def tokenize(text):
'''
Tokenises the given text data
Input
text: the new disaster text message
Output
clean_tokens: list of cleaned tokens, means English words which are normalised, contracted,
tokenised, lemmatised and removed from English stop words
'''
soup = BeautifulSoup(text, 'lxml')
souped = soup.get_text()
try:
bom_removed = souped.decode("utf-8-sig").replace(u"\ufffd", "?")
except:
bom_removed = souped
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, bom_removed)
for url in detected_urls:
text = bom_removed.replace(url, "urlplaceholder")
# change the negation wordings like don't to do not, won't to will not
# or other contractions like I'd to I would, I'll to I will etc. via dictionary
text = expand_contractions(text, CONTRACTION_MAP)
# remove punctuation [!”#$%&’()*+,-./:;<=>?@[\]^_`{|}~]
text = text.translate(str.maketrans('','', string.punctuation))
# remove numbers
letters_only = re.sub("[^a-zA-Z]", " ", text)
# during ETL pipeline we have reduced the dataset on English messages ('en' language coding,
# but there can be some wrong codings
tokens = word_tokenize(letters_only, language='english')
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
# remove stop words and take care of words having at least 2 characters
if (len(clean_tok) > 2) & (clean_tok not in stop_words):
clean_tokens.append(clean_tok)
return clean_tokens
def compute_word_counts(messages, load=True, filepath='../data/counts.npz'):
'''
Function computes the top 20 words in the dataset with counts of each term
Input:
messages: list or numpy array
load: Boolean value if load or run model
filepath: filepath to save or load data
)
Output:
top_words: list
top_counts: list
'''
if load:
# load arrays
data = np.load(filepath)
return list(data['top_words']), list(data['top_counts'])
else:
# get top words
counter = Counter()
for message in messages:
tokens = tokenize(message)
for token in tokens:
counter[token] += 1
# top 20 words
top = counter.most_common(20)
top_words = [word[0] for word in top]
top_counts = [count[1] for count in top]
# save arrays
np.savez(filepath, top_words=top_words, top_counts=top_counts)
return list(top_words), list(top_counts)
# load data
engine = create_engine('sqlite:///../data/Disaster_Messages_engine.db')
df = pd.read_sql_table('Messages_Categories_table', engine)
# load model
model = joblib.load("../models/classifier.pkl")
# index webpage displays visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
#for text in df['message'].values:
# tokenized_ = tokenize(text)
# top 20 word counts with list objects
load=False
top_words_20, top_counter_20 = compute_word_counts(messages=df['message'].values,
load=load, filepath='../data/counts.npz')
# extract data needed for visuals
# Genre message distribution
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
# Category message distribution
df_related_1 = df.query("related == 1")
dict_cat = df_related_1.iloc[:, 5:].sum().to_dict()
sorted_feature = sorted(dict_cat.items(), key=lambda kv: kv[1])
dict_sorted = collections.OrderedDict(sorted_feature)
labels = list(dict_sorted.keys())
values = list(dict_sorted.values())
# create visuals
# shows genre and category distribution graphs
figures = [
{
'data': [
Bar(
x=genre_names,
y=genre_counts
)
],
'layout': {
'title': 'Message Distribution by Genre',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
},
{
'data': [
Bar(
x=labels,
y=values
)
],
'layout': {
'title': 'Messages Distribution by Category',
'yaxis': {
'title': "Count",
'automargin':True
},
'xaxis': {
'title': "Category",
'tickangle': -45,
'automargin':True
}
}
},
{
'data': [
Table(
header=dict(
values=['<b>Counter</b>', '<b>Words</b>'],
line_color='darkslategray',
fill_color='grey',
align=['center'],
font=dict(color='white', size=12)
),
cells=dict(
values=[top_counter_20, top_words_20],
line_color='darkslategray',
fill_color = 'white',
align = ['center'],
font = dict(color = 'darkslategray', size = 11)
)
)
],
'layout': {
'title': '<b>20 Top Words of Messages</b>'
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(figures)]
graphJSON = json.dumps(figures, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query (note: this is the text query part of the master.html)
# future toDo:
# - if it is identified being a proper disaster message, and not a nonsense one,
# related category shall be 1
# - as genre category this should be classified as 'direct' message
query = request.args.get('query', '')
print("Query")
print(query)
modified_query = ""
classification_labels = np.zeros((35,), dtype=int)
classification_results = dict(zip(df.columns[4:], classification_labels))
if not query:
print("NO query string available.")
else:
# a query string exists
print("Language coding of query string:")
print(detect(query))
if detect(query) != 'en':
message = "The query string '" + query + "' is not detected being English. Change your input message."
else:
# creates word tokens out of the query string
query_tokens = tokenize(query)
print("Tokenized query text:")
print(query_tokens)
text = ' '.join(query_tokens)
# checks if the words are English words of the wordnet dictionary,
# if not remove it from the query tokens and inform the user (on command line tool by now)
modified_query_list = check_word_en(text)
modified_query = ' '.join(modified_query_list)
print("Modified query text:")
print(modified_query)
print(type(modified_query))
if not modified_query:
classification_labels = np.zeros((35,), dtype=int)
else:
# use model to predict classification for query
classification_labels = model.predict([modified_query])[0]
print("model classification labels are:")
print(classification_labels)
print(type(classification_labels))
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query = "The original message text is: '" + query +
"' and the modified English query words are: '" + modified_query + "'",
classification_result=classification_results
)
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
|
|
import os
import numpy as np
import argparse
import pickle
from nms import nms
def class_agnostic_nms(boxes, scores, iou=0.7):
if len(boxes) > 1:
boxes, scores = nms(np.array(boxes), np.array(scores), iou)
return list(boxes), list(scores)
else:
return boxes, scores
def parse_det_pkl(path):
with open(path, "rb") as f:
file_to_boxes_dict = pickle.load(f)
return file_to_boxes_dict
def parse_arguments():
"""
Parse the command line arguments
"""
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input_dir_path", required=True,
help="The path to the directory containing the annotations for separate queries.")
ap.add_argument("-iou", "--nms_iou_theshold", required=False, type=float, default=0.5,
help="The iou threshold used to merge the detections.")
args = vars(ap.parse_args())
return args
def main():
args = parse_arguments()
input_dir_path = args["input_dir_path"]
nms_iou_theshold = args["nms_iou_theshold"]
pkl_files = [name for name in os.listdir(input_dir_path) if os.path.isfile(os.path.join(input_dir_path, name))]
tq_to_dets = []
for file in pkl_files:
tq_to_dets.append(parse_det_pkl(f"{input_dir_path}/{file}"))
combined_img_to_boxes = {}
image_names = tq_to_dets[0].keys()
for img in image_names:
all_boxes = []
all_scores = []
for tq_to_det in tq_to_dets:
boxes, scores = tq_to_det[img]
all_boxes += boxes
all_scores += scores
combined_img_to_boxes[img] = class_agnostic_nms(all_boxes, all_scores, nms_iou_theshold)
# Save the combined detections
output_path = f"{input_dir_path}/combined.pkl"
with open(output_path, "wb") as f:
pickle.dump(combined_img_to_boxes, f)
if __name__ == "__main__":
main()
|
|
# coding=utf-8
import os, sys
import shutil
import sys
import time
import shutil
import re
import cv2
import numpy as np
import tensorflow as tf
import codecs
from collections import Counter
import matplotlib.pyplot as plt
import glob
from PIL import Image
from cnocr import CnOcr
from fuzzywuzzy import fuzz
sys.path.append(os.getcwd())
from nets import model_train as model
from utils.rpn_msr.proposal_layer import proposal_layer
from utils.text_connector.detectors import TextDetector
tf.app.flags.DEFINE_string('data_path', 'data\\frames\\', '')
tf.app.flags.DEFINE_string('output_path', 'data\\text_position\\', '')
tf.app.flags.DEFINE_string('ocr_path', 'data\\to_ocr\\', '')
tf.app.flags.DEFINE_string('srt_path', 'data\\to_srt\\', '')
tf.app.flags.DEFINE_string('gpu', '0', '')
tf.app.flags.DEFINE_string('checkpoint_path', 'checkpoints_mlt\\', '')
FLAGS = tf.app.flags.FLAGS
def video_to_frames(path):
videoCap = cv2.VideoCapture(path)
# 帧频
fps = videoCap.get(cv2.CAP_PROP_FPS)
# 视频总帧数
total_framesX = int(videoCap.get(cv2.CAP_PROP_FRAME_COUNT))
# 图像尺寸
image_size = (int(videoCap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(videoCap.get(cv2.CAP_PROP_FRAME_WIDTH)))
print('fps: ', fps)
print('Total Frames: ', total_framesX)
print('Video Resolution: ', image_size)
if total_framesX < 1:
print('fail to read video')
return
else:
print('Extracting frames, please wait...')
# 获取文件夹目录
ex_folder = FLAGS.data_path
if not os.path.exists(ex_folder):
os.mkdir(ex_folder)
# start from the first frame
current_frame = 1
# 扫描字幕次数 (每秒扫描一次)
loop_times = int(total_framesX/fps)
# 逐帧扫描
# loop_times = int(total_framesX)
for i in range(loop_times):
sucess, frame = videoCap.read()
if frame is None:
# print('video: %s finish at %d frame.' % (video_filename, current_frame))
break
im = frame[:, :, 0]
img = Image.fromarray(im)
timeline = str(current_frame) + ".png"
imgname = os.path.join(ex_folder, timeline)
img.save(imgname)
for j in range(int(fps)): #跳过剩下的帧,因为字幕持续时间往往1s以上
sucess, frame = videoCap.read()
current_frame += 1
return fps
def get_images():
files = []
exts = ['jpg', 'png', 'jpeg', 'JPG']
for parent, dirnames, filenames in os.walk(FLAGS.data_path):
for filename in filenames:
for ext in exts:
if filename.endswith(ext):
files.append(os.path.join(parent, filename))
break
print('Find {} images'.format(len(files)))
return files
def detect_waterprint(raw_srt_path):
if not os.path.exists(raw_srt_path):
print('Raw Srt File Do Not Exist')
return
f = codecs.open(raw_srt_path, mode='r', encoding='utf-8') # 打开txt文件,以‘utf-8’编码读取
line = f.readline() # 以行的形式进行读取文件
ymin = []
ymax = []
xmin = []
xmax = []
while line:
text_position = line.split('\t')[1].split('[')[1].split(']')[0].split(', ')
ymin.append(int(text_position[0]))
ymax.append(int(text_position[1]))
xmin.append(int(text_position[2]))
xmax.append(int(text_position[3]))
line = f.readline()
f.close()
waterprint_area = (Counter(ymin).most_common()[0][0], Counter(ymax).most_common()[0][0], Counter(xmin).most_common()[0][0], Counter(xmax).most_common()[0][0])
return waterprint_area
def delete_waterprint(raw_srt_path, waterprint_area):
print('We detected that the watermark area is about: '+ str(waterprint_area))
choice = input('If the watermark area is located correctly,input "y" else Press ENTER: ')
if choice == 'y':
# 根据视频分辨率可以自定义偏差
y_deviation = 100
x_deviation = 100
y_min_bottom = waterprint_area[0] - y_deviation
y_min_upper = waterprint_area[0] + y_deviation
y_max_bottom = waterprint_area[1] - y_deviation
y_max_upper = waterprint_area[1] + y_deviation
x_min_bottom = waterprint_area[2] - x_deviation
x_min_upper = waterprint_area[2] + x_deviation
x_max_bottom = waterprint_area[3] - x_deviation
x_max_upper = waterprint_area[3] + x_deviation
with open(raw_srt_path,'r',encoding='utf-8') as r:
lines=r.readlines()
with open(raw_srt_path,'w',encoding='utf-8') as w:
for l in lines:
pos = l.split('\t')[1].split('[')[1].split(']')[0].split(', ')
y_min = int(pos[0])
y_max = int(pos[1])
x_min = int(pos[2])
x_max = int(pos[3])
count = 0
if (y_min >= y_min_bottom) and (y_min <= y_min_upper):
count = count + 1
if (y_max >= y_max_bottom) and (y_max <= y_max_upper):
count = count + 1
if (x_min >= x_min_bottom) and (x_min <= x_min_upper):
count = count + 1
if (x_max >= x_max_bottom) and (x_max <= x_max_upper):
count = count + 1
if count < 4:
w.write(l)
print('ALL water print text are removed')
else:
return
def detect_subtitle_area(raw_srt_path):
if not os.path.exists(raw_srt_path):
print('Raw Srt File Do Not Exist')
return
f = codecs.open(raw_srt_path, mode='r', encoding='utf-8') # 打开txt文件,以‘utf-8’编码读取
line = f.readline() # 以行的形式进行读取文件
ymin = []
ymax = []
while line:
text_position = line.split('\t')[1].split('[')[1].split(']')[0].split(', ')
ymin.append(int(text_position[0]))
ymax.append(int(text_position[1]))
line = f.readline()
f.close()
subtitle_area = (Counter(ymin).most_common()[0][0], Counter(ymax).most_common()[0][0])
print(subtitle_area)
return subtitle_area
def nonsubtitle_filter(raw_srt_path, subtitle_area):
print('We detected that the subtitle area is about: '+ str(subtitle_area))
choice = input('If the subtitle area is located correctly,input "y" else Press ENTER: ')
if choice == 'y':
# 根据视频分辨率可以自定义偏差
y_deviation = 50
y_min_bottom = subtitle_area[0] - y_deviation
y_min_upper = subtitle_area[0] + y_deviation
y_max_bottom = subtitle_area[1] - y_deviation
y_max_upper = subtitle_area[1] + y_deviation
with open(raw_srt_path,'r',encoding='utf-8') as r:
lines=r.readlines()
with open(raw_srt_path,'w',encoding='utf-8') as w:
for l in lines:
pos = l.split('\t')[1].split('[')[1].split(']')[0].split(', ')
y_min = int(pos[0])
y_max = int(pos[1])
count = 0
if (y_min >= y_min_bottom) and (y_min <= y_min_upper):
count = count + 1
if (y_max >= y_max_bottom) and (y_max <= y_max_upper):
count = count + 1
if count >= 2:
w.write(l)
print('ALL non subtitle area text are removed')
else:
return
def clear_buff():
if os.path.exists(FLAGS.data_path):
shutil.rmtree(FLAGS.data_path)
if os.path.exists(FLAGS.output_path):
shutil.rmtree(FLAGS.output_path)
if os.path.exists(FLAGS.ocr_path):
shutil.rmtree(FLAGS.ocr_path)
if os.path.exists(FLAGS.srt_path):
shutil.rmtree(FLAGS.srt_path)
# 文本区域范围
def text_range(txt):
ranges = []
text_postion_info = open('{}'.format(txt)).read().split('\n')[ : -1]
area_num = len(text_postion_info)
for i in range(area_num):
text_postion_info[i] = text_postion_info[i].split(',')
x1 = int(text_postion_info[i][0])
y1 = int(text_postion_info[i][1])
x2 = int(text_postion_info[i][2])
y2 = int(text_postion_info[i][3])
x3 = int(text_postion_info[i][4])
y3 = int(text_postion_info[i][5])
x4 = int(text_postion_info[i][6])
y4 = int(text_postion_info[i][7])
ymin = min(y1, y3)
ymax = max(y1, y3)
xmin = min(x1, x3)
xmax = max(x1, x3)
ranges.append([ymin,ymax,xmin,xmax])
return ranges
def to_textImg():
# 创建输入管道
images = glob.glob(FLAGS.data_path + "*.png")
print(images)
txts = glob.glob(FLAGS.output_path + "*.txt")
print(txts)
# 排序以便对应
images.sort(key = lambda x:x.split('\\')[-1].split('.png')[0] )
txts.sort(key = lambda x:x.split('\\')[-1].split('.txt')[0] )
# 输出目录
output_folder = FLAGS.ocr_path
if not os.path.exists(output_folder):
os.mkdir(output_folder)
for i in range(len(images)):
img, (rh, rw) = resize_image(cv2.imread(images[i]))
tr = text_range(txts[i])
if tr == []:
continue
for j in range(len(tr)):
text_image = img[tr[j][0]:tr[j][1], tr[j][2]:tr[j][3]]
output = output_folder + images[i].split('\\')[-1].split('.png')[0]
if not os.path.exists(output):
os.mkdir(output)
imgname = os.path.join(output, str(tr[j]) + '.png')
cv2.imwrite(imgname, text_image)
def resize_image(img):
img_size = img.shape
im_size_min = np.min(img_size[0:2])
im_size_max = np.max(img_size[0:2])
im_scale = float(600) / float(im_size_min)
if np.round(im_scale * im_size_max) > 1200:
im_scale = float(1200) / float(im_size_max)
new_h = int(img_size[0] * im_scale)
new_w = int(img_size[1] * im_scale)
new_h = new_h if new_h // 16 == 0 else (new_h // 16 + 1) * 16
new_w = new_w if new_w // 16 == 0 else (new_w // 16 + 1) * 16
re_im = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
return re_im, (new_h / img_size[0], new_w / img_size[1])
# 去除非中英文字符
def cleantxt(raw):
fil = re.compile(u'[^0-9a-zA-Z\u4e00-\u9fa5.,,?\-“”]+', re.UNICODE)
return fil.sub('', raw)
def to_raw_srt(path, srt_dir):
ocr = CnOcr()
if not os.path.exists(srt_dir):
os.mkdir(srt_dir)
dir_list = [int(r) for r in os.listdir(path) if r.isdigit()]
dir_list.sort()
file = srt_dir + '\\to_srt' + '.txt'
if os.path.exists(file):
os.remove(file)
for i in dir_list:
child_dir = path + '\\' + str(i)
file_list = os.listdir(child_dir)
for j in file_list:
frame_no = str(child_dir.split('\\')[-1])
text_position = j.split('.')[0]
# OCR识别调用
content = cleantxt("".join(ocr.ocr_for_single_line(child_dir + '\\' + j)))
with open(file, 'a+',encoding='utf-8') as f:
f.write(frame_no + '\t' + text_position + '\t' + content + '\n')
f.close()
def text_detect():
if os.path.exists(FLAGS.output_path):
shutil.rmtree(FLAGS.output_path)
os.makedirs(FLAGS.output_path)
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
with tf.get_default_graph().as_default():
input_image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_image')
input_im_info = tf.placeholder(tf.float32, shape=[None, 3], name='input_im_info')
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
bbox_pred, cls_pred, cls_prob = model.model(input_image)
variable_averages = tf.train.ExponentialMovingAverage(0.997, global_step)
saver = tf.train.Saver(variable_averages.variables_to_restore())
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
ckpt_state = tf.train.get_checkpoint_state(FLAGS.checkpoint_path)
model_path = os.path.join(FLAGS.checkpoint_path, os.path.basename(ckpt_state.model_checkpoint_path))
print('Restore from {}'.format(model_path))
saver.restore(sess, model_path)
im_fn_list = get_images()
for im_fn in im_fn_list:
print('===============')
print(im_fn)
start = time.time()
try:
im = cv2.imread(im_fn)[:, :, ::-1]
except:
print("Error reading image {}!".format(im_fn))
continue
img, (rh, rw) = resize_image(im)
h, w, c = img.shape
im_info = np.array([h, w, c]).reshape([1, 3])
bbox_pred_val, cls_prob_val = sess.run([bbox_pred, cls_prob],
feed_dict={input_image: [img],
input_im_info: im_info})
textsegs, _ = proposal_layer(cls_prob_val, bbox_pred_val, im_info)
scores = textsegs[:, 0]
textsegs = textsegs[:, 1:5]
textdetector = TextDetector(DETECT_MODE='H')
boxes = textdetector.detect(textsegs, scores[:, np.newaxis], img.shape[:2])
boxes = np.array(boxes, dtype=np.int)
cost_time = (time.time() - start)
print("cost time: {:.2f}s".format(cost_time))
for i, box in enumerate(boxes):
cv2.polylines(img, [box[:8].astype(np.int32).reshape((-1, 1, 2))], True, color=(0, 255, 0),
thickness=2)
img = cv2.resize(img, None, None, fx=1.0 / rh, fy=1.0 / rw, interpolation=cv2.INTER_LINEAR)
cv2.imwrite(os.path.join(FLAGS.output_path, os.path.basename(im_fn)), img[:, :, ::-1])
with open(os.path.join(FLAGS.output_path, os.path.splitext(os.path.basename(im_fn))[0]) + ".txt",
"w") as f:
for i, box in enumerate(boxes):
line = ",".join(str(box[k]) for k in range(8))
line += "," + str(scores[i]) + "\n"
f.writelines(line)
def frames_to_timecode(framerate,frames):
"""
视频 通过视频帧转换成时间
:param framerate: 视频帧率
:param frames: 当前视频帧数
:return:时间(00:00:01:01)
"""
return '{0:02d}:{1:02d}:{2:02d},{3:02d}'.format(int(frames / (3600 * framerate)),
int(frames / (60 * framerate) % 60),
int(frames / framerate % 60),
int(frames % framerate))
def generate_srtfile(raw_srt_path, fps):
if not os.path.exists(raw_srt_path):
print('Raw Srt File Do Not Exist')
return
#自定义重复文本判断的精度
similarity_threshold = 92
with open(raw_srt_path,'r',encoding='utf-8') as r:
lines = r.readlines()
list = []
list.append(lines[0])
for line in lines[1:]:
current_frame = line.split('\t')[0]
current_position = line.split('\t')[1]
current_content = line.split('\t')[2]
similarity = fuzz.ratio(current_content, list[-1].split('\t')[2])
if similarity < similarity_threshold:
# 不存在就添加进去
# 此处可以添加纠错网络
list.append(list[-1].split('\t')[0].split('-')[-1] + '-' + current_frame + '\t' + current_position + '\t' + current_content)
else:
# 存在则修改帧数范围
# 此处可以添加纠错网络
list[-1] = list[-1].split('\t')[0] + '-' + current_frame + '\t' + current_position + '\t' + current_content
# f = open(raw_srt_path, 'w')
# for i in list:
# f.write(i)
# f.close()
print('Duplicates are all removed')
f = open(raw_srt_path.split('.txt')[0] + '.srt', 'w',encoding='utf-8')
line_num = 1
for i in list:
time_start = frames_to_timecode(fps, int(i.split('\t')[0].split('-')[0]))
time_end = frames_to_timecode(fps, int(i.split('\t')[0].split('-')[-1]))
content = i.split('\t')[2]
f.write(str(line_num) + '\n' + time_start + ' --> ' + time_end + '\n' + content + '\n')
line_num = line_num + 1
f.close()
print('Srt File Was Generated at: ',raw_srt_path.split('.txt')[0])
def main(argv=None):
clear_buff()
videopath = input("please input your video file path name:").strip()
fps = video_to_frames(videopath)
text_detect()
to_textImg()
to_raw_srt(FLAGS.ocr_path, FLAGS.srt_path)
to_srt_path = FLAGS.srt_path + '\\to_srt.txt'
delete_waterprint(to_srt_path, detect_waterprint(to_srt_path))
nonsubtitle_filter(to_srt_path, detect_subtitle_area(to_srt_path))
generate_srtfile(to_srt_path, fps)
if __name__ == '__main__':
tf.app.run()
|
|
'''
Video game description language -- plotting functions.
@author: Tom Schaul
'''
import pylab
from scipy import ones
from pylab import cm
from random import random
def featurePlot(size, states, fMap, plotdirections=False):
""" Visualize a feature that maps each state in a maze to a continuous value.
If the states depend on the agent's current orientation, they are split into 4.
Optionally indicate this orientation on the plot too.
Black corresponds to non-state positions. """
from ontology import LEFT, RIGHT, UP, DOWN
if len(states[0]) > 3:
polar = True
M = ones((size[0] * 2, size[1] * 2))
offsets = {LEFT: (1, 0),
UP: (0, 0),
RIGHT: (0, 1),
DOWN: (1, 1)}
else:
polar = False
M = ones(size)
cmap = cm.RdGy # @UndefinedVariable
vmax = -min(fMap) + (max(fMap) - min(fMap)) * 1
vmin = -max(fMap)
M *= vmin
for si, s in enumerate(states):
obs = fMap[si]
if polar:
x, y, d = s[:3]
o1, o2 = offsets[d]
M[2 * x + o1, 2 * y + o2] = obs
else:
x, y = s[:2]
M[x, y] = obs
pylab.imshow(-M.T, cmap=cmap, interpolation='nearest', vmin=vmin, vmax=vmax)
if polar and plotdirections:
for i in range(1, size[0]):
pylab.plot([i * 2 - 0.5] * 2, [2 - 0.5, (size[1] - 1) * 2 - 0.5], 'k')
for i in range(1, size[1]):
pylab.plot([2 - 0.49, (size[0] - 1) * 2 - 0.49], [i * 2 - 0.49] * 2, 'k')
for s in states:
x, y, d = s[:3]
o1, o2 = offsets[d]
pylab.plot([o1 + 2 * x, o1 + 2 * x + d[0] * 0.4], [o2 + 2 * y, o2 + 2 * y + d[1] * 0.4], 'k-')
pylab.plot([o1 + 2 * x], [o2 + 2 * y], 'k.')
if polar:
pylab.xlim(-0.5, size[0] * 2 - 0.5)
pylab.ylim(-0.5, size[1] * 2 - 0.5)
else:
pylab.xlim(-0.5, size[0] - 0.5)
pylab.ylim(-0.5, size[1] - 0.5)
pylab.xticks([])
pylab.yticks([])
def addTrajectory(state_seq, color='r'):
""" Draw the trajectory corresponding to a sequence of states on top of a featureplot. """
def transform(s):
x, y = s[:2]
x += random() * 0.6 - 0.3
y += random() * 0.6 - 0.3
if len(s) > 3:
x = x * 2 + 0.5
y = y * 2 + 0.5
return x, y
oldx, oldy = transform(state_seq[0])
for s in state_seq[1:]:
x, y = transform(s)
pylab.plot([oldx, x], [oldy, y], '.-' + color)
oldx, oldy = x, y
|
|
import numpy as np
import tensorflow as tf
from collections import OrderedDict
from copy import deepcopy
import logging
import traceback
import sys
from ma_policy.variable_schema import VariableSchema, BATCH, TIMESTEPS
from ma_policy.util import shape_list
from ma_policy.layers import (entity_avg_pooling_masked, entity_max_pooling_masked,
entity_concat, concat_entity_masks, residual_sa_block,
circ_conv1d)
logger = logging.getLogger(__name__)
def construct_tf_graph(all_inputs, spec, act, scope='', reuse=False,):
'''
Construct tensorflow graph from spec. See mas/ppo/base-architectures.jsonnet for examples.
Args:
main_inp (tf) -- input activations
other_inp (dict of tf) -- other input activations such as state
spec (list of dicts) -- network specification. see Usage below
scope (string) -- tf variable scope
reuse (bool) -- tensorflow reuse flag
Usage:
Each layer spec has optional arguments: nodes_in and nodes_in. If these arguments
are omitted, then the default in and out nodes will be 'main'. For layers such as
concatentation, these arguments must be specified.
Dense layer (MLP) --
{
'layer_type': 'dense'
'units': int (number of neurons)
'activation': 'relu', 'tanh', or '' for no activation
}
LSTM layer --
{
'layer_type': 'lstm'
'units': int (hidden state size)
}
Concat layer --
Two use cases.
First: the first input has one less dimension than the second input. In this case,
broadcast the first input along the second to last dimension and concatenated
along last dimension
Second: Both inputs have the same dimension, and will be concatenated along last
dimension
{
'layer_type': 'concat'
'nodes_in': ['node_one', 'node_two']
'nodes_out': ['node_out']
}
Entity Concat Layer --
Concatenate along entity dimension (second to last)
{
'layer_type': 'entity_concat'
'nodes_in': ['node_one', 'node_two']
'nodes_out': ['node_out']
}
Entity Self Attention --
Self attention over entity dimension (second to last)
See policy.utils:residual_sa_block for args
{
'layer_type': 'residual_sa_block'
'nodes_in': ['node_one']
'nodes_out': ['node_out']
...
}
Entity Pooling --
Pooling along entity dimension (second to last)
{
'layer_type': 'entity_pooling'
'nodes_in': ['node_one', 'node_two']
'nodes_out': ['node_out']
'type': (optional string, default 'avg_pooling') type of pooling
Current options are 'avg_pooling' and 'max_pooling'
}
Circular 1d convolution layer (second to last dimension) --
{
'layer_type': 'circ_conv1d',
'filters': number of filters
'kernel_size': kernel size
'activation': 'relu', 'tanh', or '' for no activation
}
Flatten outer dimension --
Flatten all dimensions higher or equal to 3 (necessary after conv layer)
{
'layer_type': 'flatten_outer',
}
Layernorm --
'''
# Make a new dict to not overwrite input
inp = {k: v for k, v in all_inputs.items()}
inp['main'] = inp['observation_self']
valid_activations = {'relu': tf.nn.relu, 'tanh': tf.tanh, '': None}
state_variables = OrderedDict()
logger.info(f"Spec:\n{spec}")
entity_locations = {}
reset_ops = []
with tf.variable_scope(scope, reuse=reuse):
for i, layer in enumerate(spec):
try:
layer = deepcopy(layer)
layer_type = layer.pop('layer_type')
extra_layer_scope = layer.pop('scope', '')
nodes_in = layer.pop('nodes_in', ['main'])
nodes_out = layer.pop('nodes_out', ['main'])
with tf.variable_scope(extra_layer_scope, reuse=reuse):
if layer_type == 'dense':
assert len(nodes_in) == len(nodes_out), f"Dense layer must have same number of nodes in as nodes out. \
Nodes in: {nodes_in}, Nodes out {nodes_out}"
layer['activation'] = valid_activations[layer['activation']]
layer_name = layer.pop('layer_name', f'dense{i}')
for j in range(len(nodes_in)):
inp[nodes_out[j]] = tf.layers.dense(inp[nodes_in[j]],
name=f'{layer_name}-{j}',
kernel_initializer=tf.contrib.layers.xavier_initializer(),
reuse=reuse,
**layer)
elif layer_type == 'lstm':
layer_name = layer.pop('layer_name', f'lstm{i}')
with tf.variable_scope(layer_name, reuse=reuse):
assert len(nodes_in) == len(nodes_out) == 1
cell = tf.contrib.rnn.BasicLSTMCell(layer['units'])
initial_state = tf.contrib.rnn.LSTMStateTuple(inp[scope + f'_lstm{i}_state_c'],
inp[scope + f'_lstm{i}_state_h'])
inp[nodes_out[0]], state_out = tf.nn.dynamic_rnn(cell,
inp[nodes_in[0]],
initial_state=initial_state)
state_variables[scope + f'_lstm{i}_state_c'] = state_out.c
state_variables[scope + f'_lstm{i}_state_h'] = state_out.h
elif layer_type == 'concat':
layer_name = layer.pop('layer_name', f'concat{i}')
with tf.variable_scope(layer_name):
assert len(nodes_out) == 1, f"Concat op must only have one node out. Nodes Out: {nodes_out}"
assert len(nodes_in) == 2, f"Concat op must have two nodes in. Nodes In: {nodes_in}"
assert (len(shape_list(inp[nodes_in[0]])) == len(shape_list(inp[nodes_in[1]])) or
len(shape_list(inp[nodes_in[0]])) == len(shape_list(inp[nodes_in[1]])) - 1),\
f"shapes were {nodes_in[0]}:{shape_list(inp[nodes_in[0]])}, {nodes_in[1]}:{shape_list(inp[nodes_in[1]])}"
inp0, inp1 = inp[nodes_in[0]], inp[nodes_in[1]]
# tile inp0 along second to last dimension to match inp1
if len(shape_list(inp[nodes_in[0]])) == len(shape_list(inp1)) - 1:
inp0 = tf.expand_dims(inp[nodes_in[0]], -2)
tile_dims = [1 for i in range(len(shape_list(inp0)))]
tile_dims[-2] = shape_list(inp1)[-2]
inp0 = tf.tile(inp0, tile_dims)
inp[nodes_out[0]] = tf.concat([inp0, inp1], -1)
elif layer_type == 'entity_concat':
layer_name = layer.pop('layer_name', f'entity-concat{i}')
with tf.variable_scope(layer_name):
ec_inps = [inp[node_in] for node_in in nodes_in]
inp[nodes_out[0]] = entity_concat(ec_inps)
if "masks_in" in layer:
masks_in = [inp[_m] if _m is not None else None for _m in layer["masks_in"]]
inp[layer["mask_out"]] = concat_entity_masks(ec_inps, masks_in)
# Store where the entities are. We'll store with key nodes_out[0]
_ent_locs = {}
loc = 0
for node_in in nodes_in:
shape_in = shape_list(inp[node_in])
n_ent = shape_in[2] if len(shape_in) == 4 else 1
_ent_locs[node_in] = slice(loc, loc + n_ent)
loc += n_ent
entity_locations[nodes_out[0]] = _ent_locs
elif layer_type == 'residual_sa_block':
layer_name = layer.pop('layer_name', f'self-attention{i}')
with tf.variable_scope(layer_name):
assert len(nodes_in) == 1, "self attention should only have one input"
sa_inp = inp[nodes_in[0]]
mask = inp[layer.pop('mask')] if 'mask' in layer else None
internal_layer_name = layer.pop('internal_layer_name', f'residual_sa_block{i}')
inp[nodes_out[0]] = residual_sa_block(sa_inp, mask, **layer,
scope=internal_layer_name,
reuse=reuse)
elif layer_type == 'entity_pooling':
pool_type = layer.get('type', 'avg_pooling')
assert pool_type in ['avg_pooling', 'max_pooling'], f"Pooling type {pool_type} \
not available. Pooling type must be either 'avg_pooling' or 'max_pooling'."
layer_name = layer.pop('layer_name', f'entity-{pool_type}-pooling{i}')
with tf.variable_scope(layer_name):
if 'mask' in layer:
mask = inp[layer.pop('mask')]
assert mask.get_shape()[-1] == inp[nodes_in[0]].get_shape()[-2], \
f"Outer dim of mask must match second to last dim of input. \
Mask shape: {mask.get_shape()}. Input shape: {inp[nodes_in[0]].get_shape()}"
if pool_type == 'avg_pooling':
inp[nodes_out[0]] = entity_avg_pooling_masked(inp[nodes_in[0]], mask)
elif pool_type == 'max_pooling':
inp[nodes_out[0]] = entity_max_pooling_masked(inp[nodes_in[0]], mask)
else:
if pool_type == 'avg_pooling':
inp[nodes_out[0]] = tf.reduce_mean(inp[nodes_in[0]], -2)
elif pool_type == 'max_pooling':
inp[nodes_out[0]] = tf.reduce_max(inp[nodes_in[0]], -2)
elif layer_type == 'circ_conv1d':
assert len(nodes_in) == len(nodes_out) == 1, f"Circular convolution layer must have one nodes and one nodes out. \
Nodes in: {nodes_in}, Nodes out {nodes_out}"
layer_name = layer.pop('layer_name', f'circ_conv1d{i}')
with tf.variable_scope(layer_name, reuse=reuse):
inp[nodes_out[0]] = circ_conv1d(inp[nodes_in[0]], **layer)
elif layer_type == 'flatten_outer':
layer_name = layer.pop('layer_name', f'flatten_outer{i}')
with tf.variable_scope(layer_name, reuse=reuse):
# flatten all dimensions higher or equal to 3
inp0 = inp[nodes_in[0]]
inp0_shape = shape_list(inp0)
inp[nodes_out[0]] = tf.reshape(inp0, shape=inp0_shape[0:2] + [np.prod(inp0_shape[2:])])
elif layer_type == "layernorm":
layer_name = layer.pop('layer_name', f'layernorm{i}')
with tf.variable_scope(layer_name, reuse=reuse):
inp[nodes_out[0]] = tf.contrib.layers.layer_norm(inp[nodes_in[0]], begin_norm_axis=2)
else:
raise NotImplementedError(f"Layer type -- {layer_type} -- not yet implemented")
except Exception:
traceback.print_exc(file=sys.stdout)
print(f"Error in {layer_type} layer: \n{layer}\nNodes in: {nodes_in}, Nodes out: {nodes_out}")
sys.exit()
return inp, state_variables, reset_ops
def construct_schemas_zero_state(spec, ob_space, scope=''):
'''
Takes a network spec (as specified in construct_tf_graph docstring) and returns
input schemas and zero states.
'''
schemas = OrderedDict()
zero_states = OrderedDict()
for i, layer in enumerate(spec):
layer = deepcopy(layer)
layer_type = layer.pop('layer_type')
if layer_type == 'lstm':
size = tf.contrib.rnn.BasicLSTMCell(layer['units']).state_size
schemas[scope + f'_lstm{i}_state_c'] = VariableSchema(shape=[BATCH, size.c], dtype=tf.float32)
schemas[scope + f'_lstm{i}_state_h'] = VariableSchema(shape=[BATCH, size.h], dtype=tf.float32)
zero_states[scope + f'_lstm{i}_state_c'] = np.expand_dims(np.zeros(size.c, dtype=np.float32), 0)
zero_states[scope + f'_lstm{i}_state_h'] = np.expand_dims(np.zeros(size.h, dtype=np.float32), 0)
return schemas, zero_states
|
|
import nnet
from MVNormal import MVNormal
import theano_helpers
import svn
import random
from DropoutMask import *
|
|
import numpy as np
import seaborn as sns
palette = sns.color_palette('colorblind')
metric_en_name = {
'Błąd aproksymacji (AE) prawdopodobieństwa a posteriori': 'Approximation error for posterior',
r'Błąd estymacji częstości etykietowania': 'Label frequency estimation error',
r'Błąd estymacji prawdopodobieństwa a priori': 'Prior probability estimation error',
'AUC': 'AUC',
'Czas wykonania': 'Training time',
'Iteracje metody': 'Number of iterations',
'Ewaluacje funkcji w trakcie optymalizacji': 'Number of evaluations',
}
is_metric_increasing = {
'Błąd aproksymacji (AE) prawdopodobieństwa a posteriori': True,
r'Błąd estymacji częstości etykietowania': True,
r'Błąd estymacji prawdopodobieństwa a priori': True,
'AUC': False,
'Czas wykonania': True,
'Iteracje metody': True,
'Ewaluacje funkcji w trakcie optymalizacji': True,
}
metric_ylim = {
'Błąd aproksymacji (AE) prawdopodobieństwa a posteriori': (0, 0.5),
r'Błąd estymacji częstości etykietowania': (0, 0.5),
r'Błąd estymacji prawdopodobieństwa a priori': (0, 0.5),
'AUC': (None, None),
'Czas wykonania': (None, None),
'Iteracje metody': (None, None),
'Ewaluacje funkcji w trakcie optymalizacji': (None, None),
}
metric_short_name = {
'Błąd aproksymacji (AE) prawdopodobieństwa a posteriori': 'AE',
r'Błąd estymacji częstości etykietowania': 'LFE',
r'Błąd estymacji prawdopodobieństwa a priori': 'CPE',
'AUC': 'AUC',
'Czas wykonania': 'time',
'Iteracje metody': 'it',
'Ewaluacje funkcji w trakcie optymalizacji': 'ev',
}
best_function = {
'Błąd aproksymacji (AE) prawdopodobieństwa a posteriori': np.min,
r'Błąd estymacji częstości etykietowania': np.min,
r'Błąd estymacji prawdopodobieństwa a priori': np.min,
'AUC': np.max,
'Czas wykonania': np.min,
'Iteracje metody': np.min,
'Ewaluacje funkcji w trakcie optymalizacji': np.min,
}
marker_styles = {
'Naive - TIcE': {
'color': 'brown',
'marker': 'o',
'fillstyle': 'full'
},
'Naive - EN': {
'color': 'brown',
'marker': 'o',
'fillstyle': 'none'
},
'Weighted - TIcE': {
'color': 'gray',
'marker': '^',
'fillstyle': 'full'
},
'Weighted - EN': {
'color': 'gray',
'marker': '^',
'fillstyle': 'none'
},
'Ward - TIcE': {
'color': 'gray',
'marker': '^',
'fillstyle': 'full'
},
'Ward - EN': {
'color': 'gray',
'marker': '^',
'fillstyle': 'none'
},
'Joint': {
'color': 'black',
'marker': 's',
'fillstyle': 'none'
},
'MM': {
'color': palette[0],
'marker': 'h',
'fillstyle': 'none'
},
'DCCP': {
'color': palette[2],
'marker': 'X',
'fillstyle': 'none'
},
'CCCP': {
'color': palette[3],
'marker': 'D',
'fillstyle': 'none'
},
'Oracle': {
'color': palette[4],
'marker': None,
'fillstyle': 'none'
},
}
marker_styles['Ward'] = marker_styles['Ward - EN']
marker_styles['Weighted'] = marker_styles['Weighted - EN']
marker_styles['EN'] = marker_styles['Weighted - EN']
marker_styles['TIcE'] = marker_styles['Weighted - TIcE']
marker_styles['Naive'] = marker_styles['Naive - EN']
draw_order = [
'Naive',
'Naive - EN',
'Naive - TIcE',
'Weighted',
'Weighted - EN',
'Weighted - TIcE',
'Weighted (EN)',
'Weighted (TIcE)',
'EN',
'TIcE',
'Ward',
'Ward - EN',
'Ward - TIcE',
'Joint',
'MM',
'CCCP',
'DCCP',
'Oracle'
]
|
|
"""Generative Adversarial Networks."""
from deepchem.models import TensorGraph
from deepchem.models.tensorgraph import layers
from collections import Sequence
import numpy as np
import tensorflow as tf
import time
class GAN(TensorGraph):
"""Implements Generative Adversarial Networks.
A Generative Adversarial Network (GAN) is a type of generative model. It
consists of two parts called the "generator" and the "discriminator". The
generator takes random noise as input and transforms it into an output that
(hopefully) resembles the training data. The discriminator takes a set of
samples as input and tries to distinguish the real training samples from the
ones created by the generator. Both of them are trained together. The
discriminator tries to get better and better at telling real from false data,
while the generator tries to get better and better at fooling the discriminator.
In many cases there also are additional inputs to the generator and
discriminator. In that case it is known as a Conditional GAN (CGAN), since it
learns a distribution that is conditional on the values of those inputs. They
are referred to as "conditional inputs".
Many variations on this idea have been proposed, and new varieties of GANs are
constantly being proposed. This class tries to make it very easy to implement
straightforward GANs of the most conventional types. At the same time, it
tries to be flexible enough that it can be used to implement many (but
certainly not all) variations on the concept.
To define a GAN, you must create a subclass that provides implementations of
the following methods:
get_noise_input_shape()
get_data_input_shapes()
create_generator()
create_discriminator()
If you want your GAN to have any conditional inputs you must also implement:
get_conditional_input_shapes()
The following methods have default implementations that are suitable for most
conventional GANs. You can override them if you want to customize their
behavior:
create_generator_loss()
create_discriminator_loss()
get_noise_batch()
"""
def __init__(self, **kwargs):
"""Construct a GAN.
This class accepts all the keyword arguments from TensorGraph.
"""
super(GAN, self).__init__(use_queue=False, **kwargs)
# Create the inputs.
self.noise_input = layers.Feature(shape=self.get_noise_input_shape())
self.data_inputs = []
for shape in self.get_data_input_shapes():
self.data_inputs.append(layers.Feature(shape=shape))
self.conditional_inputs = []
for shape in self.get_conditional_input_shapes():
self.conditional_inputs.append(layers.Feature(shape=shape))
# Create the generator.
self.generator = self.create_generator(self.noise_input,
self.conditional_inputs)
if not isinstance(self.generator, Sequence):
raise ValueError('create_generator() must return a list of Layers')
if len(self.generator) != len(self.data_inputs):
raise ValueError(
'The number of generator outputs must match the number of data inputs'
)
for g, d in zip(self.generator, self.data_inputs):
if g.shape != d.shape:
raise ValueError(
'The shapes of the generator outputs must match the shapes of the data inputs'
)
for g in self.generator:
self.add_output(g)
# Create the discriminator.
self.discrim_train = self.create_discriminator(self.data_inputs,
self.conditional_inputs)
# Make a copy of the discriminator that takes the generator's output as
# its input.
replacements = {}
for g, d in zip(self.generator, self.data_inputs):
replacements[d] = g
for c in self.conditional_inputs:
replacements[c] = c
self.discrim_gen = self.discrim_train.copy(replacements, shared=True)
# Make a list of all layers in the generator and discriminator.
def add_layers_to_set(layer, layers):
if layer not in layers:
layers.add(layer)
for i in layer.in_layers:
add_layers_to_set(i, layers)
gen_layers = set()
for layer in self.generator:
add_layers_to_set(layer, gen_layers)
discrim_layers = set()
add_layers_to_set(self.discrim_train, discrim_layers)
discrim_layers -= gen_layers
# Create submodels for training the generator and discriminator.
gen_loss = self.create_generator_loss(self.discrim_gen)
discrim_loss = self.create_discriminator_loss(self.discrim_train,
self.discrim_gen)
self.generator_submodel = self.create_submodel(
layers=gen_layers, loss=gen_loss)
self.discriminator_submodel = self.create_submodel(
layers=discrim_layers, loss=discrim_loss)
def get_noise_input_shape(self):
"""Get the shape of the generator's noise input layer.
Subclasses must override this to return a tuple giving the shape of the
noise input. The actual Input layer will be created automatically. The
first dimension must be None, since it will correspond to the batch size.
"""
raise NotImplementedError("Subclasses must implement this.")
def get_data_input_shapes(self):
"""Get the shapes of the inputs for training data.
Subclasses must override this to return a list of tuples, each giving the
shape of one of the inputs. The actual Input layers will be created
automatically. This list of shapes must also match the shapes of the
generator's outputs. The first dimension of each shape must be None, since
it will correspond to the batch size.
"""
raise NotImplementedError("Subclasses must implement this.")
def get_conditional_input_shapes(self):
"""Get the shapes of any conditional inputs.
Subclasses may override this to return a list of tuples, each giving the
shape of one of the conditional inputs. The actual Input layers will be
created automatically. The first dimension of each shape must be None,
since it will correspond to the batch size.
The default implementation returns an empty list, meaning there are no
conditional inputs.
"""
return []
def get_noise_batch(self, batch_size):
"""Get a batch of random noise to pass to the generator.
This should return a NumPy array whose shape matches the one returned by
get_noise_input_shape(). The default implementation returns normally
distributed values. Subclasses can override this to implement a different
distribution.
"""
size = list(self.get_noise_input_shape())
size[0] = batch_size
return np.random.normal(size=size)
def create_generator(self, noise_input, conditional_inputs):
"""Create the generator.
Subclasses must override this to construct the generator and return its
output layers.
Parameters
----------
noise_input: Input
the Input layer from which the generator can read random noise. The shape
will match the return value from get_noise_input_shape().
conditional_inputs: list
the Input layers for any conditional inputs to the network. The number
and shapes of these inputs will match the return value from
get_conditional_input_shapes().
Returns
-------
A list of Layer objects that produce the generator's outputs. The number and
shapes of these layers must match the return value from get_data_input_shapes(),
since generated data must have the same form as training data.
"""
raise NotImplementedError("Subclasses must implement this.")
def create_discriminator(self, data_inputs, conditional_inputs):
"""Create the discriminator.
Subclasses must override this to construct the discriminator and return its
output layer.
Parameters
----------
data_inputs: list
the Input layers from which the discriminator can read the input data.
The number and shapes of these inputs will match the return value from
get_data_input_shapes(). The samples read from these layers may be either
training data or generated data.
conditional_inputs: list
the Input layers for any conditional inputs to the network. The number
and shapes of these inputs will match the return value from
get_conditional_input_shapes().
Returns
-------
A Layer object that outputs the probability of each sample being a training
sample. The shape of this layer must be [None]. That is, it must output a
one dimensional tensor whose length equals the batch size.
"""
raise NotImplementedError("Subclasses must implement this.")
def create_generator_loss(self, discrim_output):
"""Create the loss function for the generator.
The default implementation is appropriate for most cases. Subclasses can
override this if the need to customize it.
Parameters
----------
discrim_output: Layer
the output from the discriminator on a batch of generated data. This is
its estimate of the probability that each sample is training data.
Returns
-------
A Layer object that outputs the loss function to use for optimizing the
generator.
"""
return -layers.ReduceMean(layers.Log(discrim_output + 1e-10))
def create_discriminator_loss(self, discrim_output_train, discrim_output_gen):
"""Create the loss function for the discriminator.
The default implementation is appropriate for most cases. Subclasses can
override this if the need to customize it.
Parameters
----------
discrim_output_train: Layer
the output from the discriminator on a batch of generated data. This is
its estimate of the probability that each sample is training data.
discrim_output_gen: Layer
the output from the discriminator on a batch of training data. This is
its estimate of the probability that each sample is training data.
Returns
-------
A Layer object that outputs the loss function to use for optimizing the
discriminator.
"""
training_data_loss = layers.Log(discrim_output_train + 1e-10)
gen_data_loss = layers.Log(1 - discrim_output_gen + 1e-10)
return -layers.ReduceMean(training_data_loss + gen_data_loss)
def fit_gan(self,
batches,
generator_steps=1.0,
max_checkpoints_to_keep=5,
checkpoint_interval=1000,
restore=False):
"""Train this model on data.
Parameters
----------
batches: iterable
batches of data to train the discriminator on, each represented as a dict
that maps Layers to values. It should specify values for all members of
data_inputs and conditional_inputs.
generator_steps: float
the number of training steps to perform for the generator for each batch.
This can be used to adjust the ratio of training steps for the generator
and discriminator. For example, 2.0 will perform two training steps for
every batch, while 0.5 will only perform one training step for every two
batches.
max_checkpoints_to_keep: int
the maximum number of checkpoints to keep. Older checkpoints are discarded.
checkpoint_interval: int
the frequency at which to write checkpoints, measured in batches. Set
this to 0 to disable automatic checkpointing.
restore: bool
if True, restore the model from the most recent checkpoint before training
it.
"""
if not self.built:
self.build()
if restore:
self.restore()
gen_train_fraction = 0.0
discrim_error = 0.0
gen_error = 0.0
discrim_average_steps = 0
gen_average_steps = 0
time1 = time.time()
with self._get_tf("Graph").as_default():
if checkpoint_interval > 0:
saver = tf.train.Saver(max_to_keep=max_checkpoints_to_keep)
for feed_dict in batches:
# Every call to fit_generator() will increment global_step, but we only
# want it to get incremented once for the entire batch, so record the
# value and keep resetting it.
global_step = self.global_step
# Train the discriminator.
feed_dict = dict(feed_dict)
feed_dict[self.noise_input] = self.get_noise_batch(self.batch_size)
discrim_error += self.fit_generator(
[feed_dict],
submodel=self.discriminator_submodel,
checkpoint_interval=0)
self.global_step = global_step
discrim_average_steps += 1
# Train the generator.
if generator_steps > 0.0:
gen_train_fraction += generator_steps
while gen_train_fraction >= 1.0:
feed_dict[self.noise_input] = self.get_noise_batch(self.batch_size)
gen_error += self.fit_generator(
[feed_dict],
submodel=self.generator_submodel,
checkpoint_interval=0)
self.global_step = global_step
gen_average_steps += 1
gen_train_fraction -= 1.0
self.global_step = global_step + 1
# Write checkpoints and report progress.
if discrim_average_steps == checkpoint_interval:
saver.save(self.session, self.save_file, global_step=self.global_step)
discrim_loss = discrim_error / max(1, discrim_average_steps)
gen_loss = gen_error / max(1, gen_average_steps)
print(
'Ending global_step %d: generator average loss %g, discriminator average loss %g'
% (self.global_step, gen_loss, discrim_loss))
discrim_error = 0.0
gen_error = 0.0
discrim_average_steps = 0
gen_average_steps = 0
# Write out final results.
if checkpoint_interval > 0:
if discrim_average_steps > 0 and gen_average_steps > 0:
discrim_loss = discrim_error / discrim_average_steps
gen_loss = gen_error / gen_average_steps
print(
'Ending global_step %d: generator average loss %g, discriminator average loss %g'
% (self.global_step, gen_loss, discrim_loss))
saver.save(self.session, self.save_file, global_step=self.global_step)
time2 = time.time()
print("TIMING: model fitting took %0.3f s" % (time2 - time1))
def predict_gan_generator(self,
batch_size=1,
noise_input=None,
conditional_inputs=[]):
"""Use the GAN to generate a batch of samples.
Parameters
----------
batch_size: int
the number of samples to generate. If either noise_input or
conditional_inputs is specified, this argument is ignored since the batch
size is then determined by the size of that argument.
noise_input: array
the value to use for the generator's noise input. If None (the default),
get_noise_batch() is called to generate a random input, so each call will
produce a new set of samples.
conditional_inputs: list of arrays
the values to use for all conditional inputs. This must be specified if
the GAN has any conditional inputs.
Returns
-------
An array (if the generator has only one output) or list of arrays (if it has
multiple outputs) containing the generated samples.
"""
if noise_input is not None:
batch_size = len(noise_input)
elif len(conditional_inputs) > 0:
batch_size = len(conditional_inputs[0])
if noise_input is None:
noise_input = self.get_noise_batch(batch_size)
batch = {}
batch[self.noise_input] = noise_input
for layer, value in zip(self.conditional_inputs, conditional_inputs):
batch[layer] = value
return self.predict_on_generator([batch])
def _set_empty_inputs(self, feed_dict, layers):
"""Set entries in a feed dict corresponding to a batch size of 0."""
for layer in layers:
shape = list(layer.shape)
shape[0] = 0
feed_dict[layer] = np.zeros(shape)
class WGAN(GAN):
"""Implements Wasserstein Generative Adversarial Networks.
This class implements Wasserstein Generative Adversarial Networks (WGANs) as
described in Arjovsky et al., "Wasserstein GAN" (https://arxiv.org/abs/1701.07875).
A WGAN is conceptually rather different from a conventional GAN, but in
practical terms very similar. It reinterprets the discriminator (often called
the "critic" in this context) as learning an approximation to the Earth Mover
distance between the training and generated distributions. The generator is
then trained to minimize that distance. In practice, this just means using
slightly different loss functions for training the generator and discriminator.
WGANs have theoretical advantages over conventional GANs, and they often work
better in practice. In addition, the discriminator's loss function can be
directly interpreted as a measure of the quality of the model. That is an
advantage over conventional GANs, where the loss does not directly convey
information about the quality of the model.
The theory WGANs are based on requires the discriminator's gradient to be
bounded. The original paper achieved this by clipping its weights. This
class instead does it by adding a penalty term to the discriminator's loss, as
described in https://arxiv.org/abs/1704.00028. This is sometimes found to
produce better results.
There are a few other practical differences between GANs and WGANs. In a
conventional GAN, the discriminator's output must be between 0 and 1 so it can
be interpreted as a probability. In a WGAN, it should produce an unbounded
output that can be interpreted as a distance.
When training a WGAN, you also should usually use a smaller value for
generator_steps. Conventional GANs rely on keeping the generator and
discriminator "in balance" with each other. If the discriminator ever gets
too good, it becomes impossible for the generator to fool it and training
stalls. WGANs do not have this problem, and in fact the better the
discriminator is, the easier it is for the generator to improve. It therefore
usually works best to perform several training steps on the discriminator for
each training step on the generator.
"""
def __init__(self, gradient_penalty=10.0, **kwargs):
"""Construct a WGAN.
In addition to the following, this class accepts all the keyword arguments
from TensorGraph.
Parameters
----------
gradient_penalty: float
the magnitude of the gradient penalty loss
"""
super(WGAN, self).__init__(**kwargs)
self.gradient_penalty = gradient_penalty
def create_generator_loss(self, discrim_output):
return layers.ReduceMean(discrim_output)
def create_discriminator_loss(self, discrim_output_train, discrim_output_gen):
gradient_penalty = GradientPenaltyLayer(discrim_output_train, self)
return gradient_penalty + layers.ReduceMean(discrim_output_train -
discrim_output_gen)
class GradientPenaltyLayer(layers.Layer):
"""Implements the gradient penalty loss term for WGANs."""
def __init__(self, discrim_output_train, gan):
super(GradientPenaltyLayer, self).__init__([discrim_output_train])
self.gan = gan
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
gradients = tf.gradients(self.in_layers[0], self.gan.data_inputs)
norm2 = 0.0
for g in gradients:
g2 = tf.square(g)
dims = len(g.shape)
if dims > 1:
g2 = tf.reduce_sum(g2, axis=list(range(1, dims)))
norm2 += g2
penalty = tf.square(tf.sqrt(norm2) - 1.0)
self.out_tensor = self.gan.gradient_penalty * tf.reduce_mean(penalty)
return self.out_tensor
|
|
from operator import index
import os
import subprocess
from collections import defaultdict
from concurrent.futures import ProcessPoolExecutor
import pandas as pd
import numpy as np
import pysam
from scipy.io import mmwrite
from scipy.sparse import coo_matrix
import celescope.tools.utils as utils
from celescope.__init__ import HELP_DICT
from celescope.tools.step import Step, s_common
from celescope.rna.mkref import parse_genomeDir_rna
OTSU_READ_MIN = 30
def parse_vcf(vcf_file, cols=('chrom', 'pos', 'alleles',), infos=('VID',)):
'''
parse vcf into df
'''
vcf = pysam.VariantFile(vcf_file)
rec_dict_list = []
for rec in vcf.fetch():
rec_dict = {}
for col in cols:
rec_dict[col] = getattr(rec, col)
# if ref == alt: alleles=(ref,)
# else alleles=(ref, alt)
if col == 'alleles':
rec_dict['ref'] = rec_dict['alleles'][0]
rec_dict['alt'] = '.'
if len(rec_dict['alleles']) >= 2:
rec_dict['alt'] = ','.join(rec_dict['alleles'][1:])
for info in infos:
rec_dict[info] = rec.info[info]
rec_dict_list.append(rec_dict)
df = pd.DataFrame.from_dict(rec_dict_list)
vcf.close()
return df
def read_CID(CID_file):
df_index = pd.read_csv(CID_file, sep='\t', index_col=0, dtype=object)
df_valid = df_index[df_index['valid'] == 'True']
return df_index, df_valid
@utils.add_log
def call_snp(CID, outdir, fasta):
call_snp.logger.info('Processing Cell %s' % CID)
bam = f'{outdir}/cells/cell{CID}/cell{CID}.bam'
sorted_bam = f'{outdir}/cells/cell{CID}/cell{CID}_sorted.bam'
cmd_sort = (
f'samtools sort {bam} -o {sorted_bam}'
)
subprocess.check_call(cmd_sort, shell=True)
# mpileup
bcf = f'{outdir}/cells/cell{CID}/cell{CID}.bcf'
cmd_mpileup = (
f'bcftools mpileup -Ou '
f'-f {fasta} '
f'{sorted_bam} -o {bcf} '
)
subprocess.check_call(cmd_mpileup, shell=True)
# call
out_vcf = f'{outdir}/cells/cell{CID}/cell{CID}.vcf'
cmd_call = (
f'bcftools call -mv -Ov '
f'-o {out_vcf} '
f'{bcf}'
f'>/dev/null 2>&1 '
)
subprocess.check_call(cmd_call, shell=True)
# norm
norm_vcf = f'{outdir}/cells/cell{CID}/cell{CID}_norm.vcf'
cmd_norm = (
f'bcftools norm -d none '
f'-f {fasta} '
f'{out_vcf} '
f'-o {norm_vcf} '
)
subprocess.check_call(cmd_norm, shell=True)
# call all position
out_all_vcf = f'{outdir}/cells/cell{CID}/cell{CID}_all.vcf'
cmd_all_call = (
f'bcftools call -m -Ov '
f'-o {out_all_vcf} '
f'{bcf}'
f'>/dev/null 2>&1 '
)
subprocess.check_call(cmd_all_call, shell=True)
# norm all
norm_all_vcf = f'{outdir}/cells/cell{CID}/cell{CID}_all_norm.vcf'
cmd_all_norm = (
f'bcftools norm -d none '
f'-f {fasta} '
f'{out_all_vcf} '
f'-o {norm_all_vcf} '
)
subprocess.check_call(cmd_all_norm, shell=True)
def map_vcf_row(row, df_cell_vcf):
"""
get ref and UMI for each variant
row: each row from merged_vcf
"""
pos = row['pos']
chrom = row['chrom']
alt = row['alt']
df_pos = df_cell_vcf[(df_cell_vcf['pos'] == pos) & (df_cell_vcf['chrom'] == chrom)]
df_ref = df_pos[df_pos['alt'] == '.']
df_alt = df_pos[df_pos['alt'] == alt]
ref_UMI = 0
alt_UMI = 0
if df_ref.shape[0] != 0:
ref_UMI = get_DP4(df_ref, 'ref')
if df_alt.shape[0] != 0:
alt_UMI = get_DP4(df_alt, 'alt')
return ref_UMI, alt_UMI
def get_DP4(row, alt):
DP4 = row['DP4'].iloc[0]
if alt == 'ref':
indexs = [0, 1]
elif alt == 'alt':
indexs = [2, 3]
umi = sum([DP4[index] for index in indexs])
return umi
@utils.add_log
def cell_UMI(CID, outdir, df_vcf):
"""
help function to get ref and alt UMI for each (cell, variant)
"""
cell_UMI.logger.info(str(CID) + ' cell')
norm_all_vcf = f'{outdir}/cells/cell{CID}/cell{CID}_all_norm.vcf'
df_cell_vcf = parse_vcf(norm_all_vcf, infos=['DP4'])
dict_list = []
for _index, row in df_vcf.iterrows():
ref_UMI, alt_UMI = map_vcf_row(row, df_cell_vcf)
if (ref_UMI + alt_UMI) != 0:
VID = row['VID']
dic = {
'VID': VID,
'CID': CID,
'ref_count': ref_UMI,
'alt_count': alt_UMI,
}
dict_list.append(dic)
df_UMI = pd.DataFrame(dict_list)
return df_UMI
class Variant_calling(Step):
"""
Features
- Perform variant calling.
Output
`{sample}_VID.tsv` A unique numeric ID is assigned for each variant.
`{sample}_CID.tsv` A unique numeric ID is assigned for each cell.
`{sample}_variant_count.tsv` Reference and variant supporting reads/UMIs count.
`{sample}_support.mtx` Support matrix, only high quality bases are considered.
0 : no reads/UMIs cover the position.
1 : all reads/UMIs at the position support the ref allele.
2 : all reads/UMIs at the position support the alt allele.
3 : one or more reads/UMIs support both the alt and the ref allele.
"""
def __init__(self, args, step_name):
Step.__init__(self, args, step_name)
self.barcodes, _num = utils.read_barcode_file(args.match_dir)
self.fasta = parse_genomeDir_rna(args.genomeDir)['fasta']
self.df_vcf = None
self.splitN_bam = f'{self.out_prefix}_splitN.bam'
self.CID_file = f'{self.out_prefix}_CID.tsv'
self.VID_file = f'{self.out_prefix}_VID.tsv'
self.merged_vcf_file = f'{self.out_prefix}_merged.vcf'
self.filter_vcf_file = f'{self.out_prefix}_filter.vcf'
self.variant_count_file = f'{self.out_prefix}_variant_count.tsv'
self.otsu_dir = f'{self.out_prefix}_otsu/'
self.otsu_threshold_file = f'{self.otsu_dir}/{self.sample}_otsu_threshold.tsv'
self.filter_variant_count_file = f'{self.out_prefix}_filter_variant_count.tsv'
self.summarize_capture_vid = f'{self.out_prefix}_variant_ncell.tsv'
if args.min_support_read == 'auto':
utils.check_mkdir(self.otsu_dir)
self.support_matrix_file = f'{self.out_prefix}_support.mtx'
@utils.add_log
def SplitNCigarReads(self):
cmd = (
f'gatk '
f'SplitNCigarReads '
f'-R {self.fasta} '
f'-I {self.args.bam} '
f'-O {self.splitN_bam} '
)
Variant_calling.SplitNCigarReads.logger.info(cmd)
subprocess.check_call(cmd, shell=True)
@utils.add_log
def split_bam(self):
'''
input:
bam: bam from splitN
barcodes: cell barcodes, list
ouput:
bam_dict: assign reads to cell barcodes and UMI
count_dict: UMI counts per cell
CID: assign ID(1-based) to cells
'''
# init
bam_dict = defaultdict(list)
CID_dict = defaultdict(dict)
cells_dir = f'{self.outdir}/cells/'
# read bam and split
samfile = pysam.AlignmentFile(self.splitN_bam, "rb")
header = samfile.header
for read in samfile:
try:
barcode = read.get_tag('CB')
except KeyError:
continue
if barcode in self.barcodes:
CID = self.barcodes.index(barcode) + 1
read.set_tag(tag='CL', value=f'CELL{CID}', value_type='Z')
# assign read to barcode
bam_dict[barcode].append(read)
samfile.close()
self.split_bam.logger.info('writing cell bam...')
# write new bam
CID = 0
for barcode in self.barcodes:
# init
CID += 1
CID_dict[CID]['barcode'] = barcode
CID_dict[CID]['valid'] = False
# out bam
if barcode in bam_dict:
cell_dir = f'{cells_dir}/cell{CID}'
cell_bam_file = f'{cell_dir}/cell{CID}.bam'
if not os.path.exists(cell_dir):
os.makedirs(cell_dir)
CID_dict[CID]['valid'] = True
cell_bam = pysam.AlignmentFile(
f'{cell_bam_file}', "wb", header=header)
for read in bam_dict[barcode]:
cell_bam.write(read)
cell_bam.close()
# out CID
df_CID = pd.DataFrame(CID_dict).T
df_CID.index.name = 'CID'
df_CID.to_csv(self.CID_file, sep='\t')
@utils.add_log
def call_all_snp(self):
all_res = []
_df_index, df_valid = self.read_CID()
CID_arg = df_valid.index
outdir_arg = [self.outdir] * len(CID_arg)
fasta_arg = [self.fasta] * len(CID_arg)
with ProcessPoolExecutor(self.thread) as pool:
for res in pool.map(call_snp, CID_arg, outdir_arg, fasta_arg):
all_res.append(res)
def read_CID(self):
return read_CID(self.CID_file)
@utils.add_log
def merge_vcf(self):
'''
merge cell vcf into one non-duplicated vcf
add VID(variant ID) and CID(cell ID)
'''
_df_index, df_valid = self.read_CID()
CIDs = df_valid.index
# variant dict
v_cols = ['chrom', 'pos', 'alleles']
v_dict = {}
for CID in CIDs:
CID = str(CID)
vcf_file = f'{self.outdir}/cells/cell{CID}/cell{CID}_norm.vcf'
vcf = pysam.VariantFile(vcf_file, 'r')
for rec in vcf.fetch():
v = ','.join([str(getattr(rec, col)) for col in v_cols])
if not v in v_dict:
v_dict[v] = dict()
v_dict[v]['CID'] = [CID]
v_dict[v]['record'] = rec
else:
v_dict[v]['CID'].append(CID)
vcf.close()
# output
def get_vcf_header(CIDs):
CID = CIDs[0]
vcf_file = f'{self.outdir}/cells/cell{CID}/cell{CID}_norm.vcf'
vcf = pysam.VariantFile(vcf_file, 'r')
header = vcf.header
vcf.close()
return header
vcf_header = get_vcf_header(CIDs)
vcf_header.info.add('VID', number=1, type='String', description='Variant ID')
vcf_header.info.add('CID', number=1, type='String', description='Cell ID')
merged_vcf = pysam.VariantFile(self.merged_vcf_file, 'w', header=vcf_header)
VID = 0
for v in sorted(v_dict.keys()):
VID += 1
rec = v_dict[v]['record']
CID = ','.join(v_dict[v]['CID'])
record = merged_vcf.new_record()
cols = ['chrom', 'pos', 'alleles']
for col in cols:
setattr(record, col, getattr(rec, col))
record.info['VID'] = str(VID)
record.info['CID'] = CID
merged_vcf.write(record)
merged_vcf.close()
@utils.add_log
def write_VID_file(self):
df_vcf = parse_vcf(self.merged_vcf_file)
df_VID = df_vcf.loc[:, ['VID', 'chrom', 'pos', 'ref', 'alt']]
df_VID.to_csv(self.VID_file, sep='\t', index=False)
def otsu_threshold(self, array):
threshold = utils.otsu_min_support_read(array, self.otsu_plot)
return threshold
@utils.add_log
def get_UMI(self):
'''
get variant and ref UMI supporting an allele
'''
_df_index, df_valid = self.read_CID()
df_UMI_list = []
CID_arg = list(df_valid.index)
outdir_arg = [self.outdir] * len(CID_arg)
df_vcf = parse_vcf(self.merged_vcf_file)
vcf_arg = [df_vcf] * len(CID_arg)
with ProcessPoolExecutor(self.thread) as pool:
for res in pool.map(cell_UMI, CID_arg, outdir_arg, vcf_arg):
df_UMI_list.append(res)
df_UMI = pd.concat(df_UMI_list)
df_UMI['VID'] = df_UMI['VID'].astype('int')
df_UMI.sort_values(by=['VID', 'CID'], inplace=True)
df_UMI.to_csv(self.variant_count_file, sep='\t', index=False)
df_UMI = pd.read_csv(self.variant_count_file, sep='\t', header=0)
df_filter = df_UMI.copy()
if self.args.min_support_read == 'auto':
df_otsu_threshold = pd.DataFrame(columns=['VID', 'threshold'])
VIDs = np.unique(df_UMI['VID'].values)
for VID in VIDs:
df = df_UMI[df_UMI['VID'] == VID]
df_alt = df[df['alt_count'] > 0]
array = list(df_alt['alt_count'])
if array:
if len(array) >= OTSU_READ_MIN:
min_support_read = utils.otsu_min_support_read(array, f'{self.otsu_dir}/{VID}.png')
else:
min_support_read = np.median(array) * 0.2
df_otsu_threshold = df_otsu_threshold.append(
{'VID': VID, 'threshold': min_support_read}, ignore_index=True)
df_otsu_threshold.to_csv(self.otsu_threshold_file, sep='\t', index=False)
df_filter.loc[((df_filter['VID'] == VID) & (df_filter['alt_count'] < min_support_read)), 'alt_count'] = 0
else:
min_support_read = int(self.args.min_support_read)
df_filter.loc[df_filter['alt_count'] < min_support_read, 'alt_count'] = 0
df_filter.loc[:,"vid_judge"] = df_filter.loc[:,"ref_count"] + df_filter.loc[:,"alt_count"]
df_filter_tmp = df_filter[df_filter.loc[:,"vid_judge"] > 0]
#summarize
vid_summarize = {}
#add vid col
vid = list(df_filter_tmp.loc[:,"VID"])
vid_summarize["VID"] = list(set(vid))
#add cell colum
vid_summarize["ncell_cover"] = list(df_filter_tmp.groupby("VID")["vid_judge"].count())
#count table
variant_count = (df_filter_tmp.loc[:,"alt_count"] != 0).astype(int)
ref_count = (df_filter_tmp.loc[:,"ref_count"] != 0).astype(int)
#add VID colums
variant_count["VID"] = df_filter_tmp.loc[:,"VID"]
ref_count["VID"] = df_filter_tmp.loc[:,"VID"]
vid_summarize["ncell_ref"] = list(ref_count.groupby("VID").sum())
vid_summarize["ncell_alt"] = list(variant_count.groupby("VID").sum())
vid_summarize = pd.DataFrame(vid_summarize)
#keep number of cells with variant read count only and number of cells with reference read count only
vid_summarize.loc[:,"both_ref_and_variant"] = (vid_summarize.loc[:,"ncell_ref"] + vid_summarize.loc[:,"ncell_alt"]) - vid_summarize.loc[:,"ncell_cover"]
vid_summarize.loc[:,"ncell_ref"] = vid_summarize.loc[:,"ncell_ref"] - vid_summarize.loc[:,"both_ref_and_variant"]
vid_summarize.loc[:,"ncell_alt"] = vid_summarize.loc[:,"ncell_alt"] - vid_summarize.loc[:,"both_ref_and_variant"]
df_filter = df_filter.drop("vid_judge",axis=1)
df_filter.to_csv(self.filter_variant_count_file, sep='\t', index=False)
vid_summarize = vid_summarize.drop("both_ref_and_variant",axis=1)
vid_summarize.to_csv(self.summarize_capture_vid,sep = '\t',index = False)
@utils.add_log
def filter_vcf(self):
"""
filter cells with zero variant UMI
"""
df_filter = pd.read_csv(self.filter_variant_count_file, sep='\t')
df_filter = df_filter[df_filter['alt_count']>0]
vcf = pysam.VariantFile(self.merged_vcf_file, 'r')
vcf_header = vcf.header
filter_vcf = pysam.VariantFile(self.filter_vcf_file, 'w', header=vcf_header)
for rec in vcf.fetch():
VID = int(rec.info['VID'])
CIDs = df_filter[df_filter['VID']==VID]['CID'].values
rec.info['CID'] = ','.join([str(CID) for CID in CIDs])
if rec.info['CID']:
filter_vcf.write(rec)
filter_vcf.close()
vcf.close()
@utils.add_log
def write_support_matrix(self):
def set_support_bit(row):
ref_bit = 1 if row['ref_count'] > 0 else 0
alt_bit = 2 if row['alt_count'] > 0 else 0
support_bit = ref_bit + alt_bit
return support_bit
df_variant_count = pd.read_csv(self.filter_variant_count_file, sep='\t')
df_variant_count['support'] = df_variant_count.apply(set_support_bit, axis=1)
support_mtx = coo_matrix(
(df_variant_count.support, (df_variant_count.VID - 1, df_variant_count.CID - 1))
)
mmwrite(self.support_matrix_file, support_mtx)
def run(self):
self.SplitNCigarReads()
self.split_bam()
self.call_all_snp()
self.merge_vcf()
self.write_VID_file()
self.get_UMI()
self.filter_vcf()
self.write_support_matrix()
self.clean_up()
@utils.add_log
def variant_calling(args):
step_name = 'variant_calling'
variant_calling_obj = Variant_calling(args, step_name)
variant_calling_obj.run()
def get_opts_variant_calling(parser, sub_program):
parser.add_argument("--genomeDir", help=HELP_DICT['genomeDir'], required=True)
parser.add_argument(
"--min_support_read",
help="""Minimum number of reads support a variant. If `auto`(default), otsu method will be used to determine this value.""",
default='auto',
)
if sub_program:
parser.add_argument(
"--bam",
help='Input BAM file from step `target_metrics`. ',
required=True
)
parser.add_argument(
"--match_dir",
help=HELP_DICT['match_dir'],
required=True
)
s_common(parser)
|
|
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph import to_variable
from paddle.fluid.dygraph import Layer
from paddle.fluid.dygraph import Conv2D
from paddle.fluid.dygraph import BatchNorm
from paddle.fluid.dygraph import Dropout
from resnet_dilated import ResNet50
# pool with different bin_size
# interpolate back to input size
# concat
class PSPModule(Layer):
def __init__(self, num_channels, bin_size_list):
super(PSPModule, self).__init__()
self.bin_size_list = bin_size_list
num_filters = num_channels // len(bin_size_list)
self.features = []
for i in range(len(bin_size_list)):
self.features.append(
fluid.dygraph.Sequential(
Conv2D(num_channels, num_filters, 1),
BatchNorm(num_filters, act='relu')
)
)
def forward(self, inputs):
out = [inputs]
for idx, f in enumerate(self.features):
x = fluid.layers.adaptive_pool2d(inputs, self.bin_size_list[idx])
x = f(x)
x = fluid.layers.interpolate(x, inputs.shape[2::], align_corners=True)
out.append(x)
out = fluid.layers.concat(out, axis=1) # NCHW
return out
class PSPNet(Layer):
def __init__(self, num_classes=59, backbone='resnet50'):
super(PSPNet, self).__init__()
res = ResNet50(pretrained=False)
# stem: res.conv, res.pool2d_max
self.layer0 = fluid.dygraph.Sequential(
res.conv,
res.pool2d_max
)
self.layer1 = res.layer1
self.layer2 = res.layer2
self.layer3 = res.layer3
self.layer4 = res.layer4
num_channels = 2048
# psp: 2048 -> 2048*2
self.pspmodule = PSPModule(num_channels, [1, 2, 3, 6])
num_channels *= 2
# cls: 2048*2 -> 512 -> num_classes
self.classifier = fluid.dygraph.Sequential(
Conv2D(num_channels, num_filters=512, filter_size=3, padding=1),
BatchNorm(512, act='relu'),
Dropout(0.1),
Conv2D(512, num_classes, filter_size=1)
)
# aux: 1024 -> 256 -> num_classes
def forward(self, inputs):
x = self.layer0(inputs)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.pspmodule(x)
x = self.classifier(x)
x = fluid.layers.interpolate(x, inputs.shape[2::], align_corners=True)
# aux: tmp_x = layer3
return x
def main():
with fluid.dygraph.guard(fluid.CPUPlace()):
x_data=np.random.rand(2,3, 473, 473).astype(np.float32)
x = to_variable(x_data)
model = PSPNet(num_classes=59)
model.train()
pred, aux = model(x)
print(pred.shape, aux.shape)
if __name__ =="__main__":
main()
|
|
import sys, os
import subprocess
import numpy as np
import pandas as pd
from Bio.PDB import *
from Bio import SeqIO
from Bio import AlignIO
from Bio import Align
import itertools as it
def filterandparse_sequences(fastaOUT, theta):
"""
filter for gaps and N characters
"""
data = pd.read_csv("../../../data/18SrRNA/pipeline_mapping/gaps_matrix_final.txt", header=0, sep='\t', index_col=0)
seqs = SeqIO.parse("refseq_18S_final.fasta", "fasta")
records = []
for s in seqs:
current_id = s.id
current_N = np.sum( np.array(s.seq) == "N" )
current_gaps = np.array( data[current_id] )
if (int(current_N) == 0 and np.mean(current_gaps) < theta) or ("NMR" in current_id): # NMR seq is from scaffold, but keep it anyways
records.append(s)
SeqIO.write(records, fastaOUT, "fasta")
def ssu_aln(fastaIN, alnOUT):
"""
align filtered sequences
"""
TMPDIR = 'TMPSSU'
cmd_ssualign = 'ssu-align --dna -f' + ' ' + fastaIN + ' ' + TMPDIR
subprocess.call(cmd_ssualign, shell=True)
cmd_ssumask1 = 'ssu-mask' + ' ' + TMPDIR
subprocess.call(cmd_ssumask1, shell=True)
cmd_ssumask2 = 'ssu-mask --stk2afa' + ' ' + TMPDIR
subprocess.call(cmd_ssumask2, shell=True)
aln = AlignIO.parse(TMPDIR + "/" + TMPDIR+".eukarya.mask.stk", "stockholm")
AlignIO.write(aln, alnOUT, "phylip")
if __name__ == "__main__":
filterandparse_sequences("tmp.15.fasta", 0.15)
ssu_aln("tmp.15.fasta", "18Saln_final_15.phy")
filterandparse_sequences("tmp.20.fasta", 0.20)
ssu_aln("tmp.20.fasta", "18Saln_final_20.phy")
filterandparse_sequences("tmp.25.fasta", 0.25)
ssu_aln("tmp.25.fasta", "18Saln_final_25.phy")
filterandparse_sequences("tmp.30.fasta", 0.30)
ssu_aln("tmp.30.fasta", "18Saln_final_30.phy")
filterandparse_sequences("tmp.50.fasta", 0.50)
ssu_aln("tmp.50.fasta", "18Saln_final_50.phy")
filterandparse_sequences("tmp.100.fasta", 1)
ssu_aln("tmp.100.fasta", "18Saln_final_100.phy")
|
|
"""
Data from https://www.isi.edu/~lerman/downloads/digg2009.html
Extract network and diffusion cascades from Digg
"""
import os
import pandas as pd
import networkx as nx
import numpy as np
from urllib.request import urlopen
from zipfile import ZipFile
def extract_network(file):
friends = pd.read_csv(file,header=None)
#--------- Remove self friendships
friends = friends[friends[2]!=friends[3]]
#--------- Repeat the reciprocal edges and append them
reciprocal = friends[friends[0]==1]
friends = friends.drop(0,1)
reciprocal = reciprocal.drop(0,1)
#---- Create the reciprocal edge for each pair
tmp = reciprocal[2].copy()
reciprocal[2] = reciprocal[3]
reciprocal[3] = tmp
#--------- Find the edges that already exist in the dataset as reciprocal, and remove them,
#--------- to avoid overwriting the currect time of the reciprocal edges that already exist
to_remove = reciprocal.reset_index().merge(friends,left_on=[2,3],right_on=[2,3]).set_index('index').index
reciprocal = reciprocal.drop(to_remove)
friends = friends.append(reciprocal)
friends[friends.duplicated([2,3],keep=False)] #-- this should be empty
#----------- Store the weighted follow network
friends.columns = ["time","a","b"]
friends = friends[["a","b","time"]]
friends.to_csv("../digg_network.txt",index=False,sep=" ",header=False)
def extract_cascades(file):
#----------- Derive and store the train and test cascades
votes = pd.read_csv(file,header=None)
votes.columns = ["time","user","post"]
votes = votes.sort_values(by=["time"])
#---- Find the threshold after which the cascades are test cascades (final 20% of cascades)
start_times = votes.groupby("post")["time"].min() #--- take into consideration only the starting time of each cascade
start_times = start_times.sort_values()
no_test_cascades = round(20*len(start_times)/100)
threshold = min(start_times.tail(no_test_cascades))
#sum(start_times<threshold )/start_times.shape[0]
f_train = open("train_cascades.txt","w")
f_test = open("test_cascades.txt","w")
#--------- For each cascade
for i in votes["post"].unique():
print("Preprocessing post with id: ", i)
sub = votes[votes["post"]==i]
s = ""
#---- id:time, id:time etc...
for post in sub.sort_values(by=['time']).iterrows():
s = s+str(post[1]["user"])+" "+str(post[1]["time"])+";"#":"+str(post[1]["time"])+","
s = s[:-1]
#---- Check if it has started before or after the threshold
if(min(sub["time"])<threshold):
f_train.write(s+"\n")
else:
f_test.write(s+"\n")
f_train.close()
f_test.close()
def download():
zipresp = urlopen("http://www.isi.edu/~lerman/downloads/digg_votes.zip")
tempzip = open("digg_votes.zip", "wb")
tempzip.write(zipresp.read())
tempzip.close()
zf = ZipFile("digg_votes.zip")
zf.extractall()
zf.close()
zipresp = urlopen("http://www.isi.edu/~lerman/downloads/digg_friends.zip")
tempzip = open("digg_friends.zip", "wb")
tempzip.write(zipresp.read())
tempzip.close()
zf = ZipFile("digg_friends.zip")
zf.extractall()
zf.close()
def digg_preprocessing(path):
os.chdir(path)
# download()
file_friends = "../digg_friends_sliced.csv"
file_casc = "../digg_votes1_sliced.csv"
extract_network(file_friends)
extract_cascades(file_casc)
|
|
import numpy as np
import brainscore
from brainio.assemblies import DataAssembly
from brainscore.benchmarks._properties_common import PropertiesBenchmark, _assert_texture_activations
from brainscore.benchmarks._properties_common import calc_texture_modulation, calc_sparseness, calc_variance_ratio
from brainscore.metrics.ceiling import NeuronalPropertyCeiling
from brainscore.metrics.distribution_similarity import BootstrapDistributionSimilarity, ks_similarity
from result_caching import store
ASSEMBLY_NAME = 'movshon.FreemanZiemba2013_V1_properties'
REGION = 'V1'
TIMEBINS = [(70, 170)]
PARENT_TEXTURE_MODULATION = 'V1-texture_modulation'
PARENT_SELECTIVITY = 'V1-response_selectivity'
PARENT_MAGNITUDE = 'V1-response_magnitude'
PROPERTY_NAMES = ['texture_modulation_index', 'absolute_texture_modulation_index', 'texture_selectivity',
'noise_selectivity', 'texture_sparseness', 'noise_sparseness', 'variance_ratio', 'sample_variance',
'family_variance', 'max_texture', 'max_noise']
BIBTEX = """@article{Freeman2013,
author = {Freeman, Jeremy and Ziemba, Corey M. and Heeger, David J. and Simoncelli, E. P. and Movshon, J. A.},
doi = {10.1038/nn.3402},
issn = {10976256},
journal = {Nature Neuroscience},
number = {7},
pages = {974--981},
pmid = {23685719},
publisher = {Nature Publishing Group},
title = {{A functional and perceptual signature of the second visual area in primates}},
url = {http://dx.doi.org/10.1038/nn.3402},
volume = {16},
year = {2013}
}
"""
RESPONSE_THRESHOLD = 5
def _MarquesFreemanZiemba2013V1Property(property_name, parent):
assembly = brainscore.get_assembly(ASSEMBLY_NAME)
similarity_metric = BootstrapDistributionSimilarity(similarity_func=ks_similarity, property_name=property_name)
ceil_func = NeuronalPropertyCeiling(similarity_metric)
return PropertiesBenchmark(identifier=f'dicarlo.Marques_freemanziemba2013-{property_name}', assembly=assembly,
neuronal_property=freemanziemba2013_properties, similarity_metric=similarity_metric,
timebins=TIMEBINS,
parent=parent, ceiling_func=ceil_func, bibtex=BIBTEX, version=1)
def MarquesFreemanZiemba2013V1TextureModulationIndex():
property_name = 'texture_modulation_index'
parent = PARENT_TEXTURE_MODULATION
return _MarquesFreemanZiemba2013V1Property(property_name=property_name, parent=parent)
def MarquesFreemanZiemba2013V1AbsoluteTextureModulationIndex():
property_name = 'absolute_texture_modulation_index'
parent = PARENT_TEXTURE_MODULATION
return _MarquesFreemanZiemba2013V1Property(property_name=property_name, parent=parent)
def MarquesFreemanZiemba2013V1TextureSelectivity():
property_name = 'texture_selectivity'
parent = PARENT_SELECTIVITY
return _MarquesFreemanZiemba2013V1Property(property_name=property_name, parent=parent)
def MarquesFreemanZiemba2013V1TextureSparseness():
property_name = 'texture_sparseness'
parent = PARENT_SELECTIVITY
return _MarquesFreemanZiemba2013V1Property(property_name=property_name, parent=parent)
def MarquesFreemanZiemba2013V1VarianceRatio():
property_name = 'variance_ratio'
parent = PARENT_SELECTIVITY
return _MarquesFreemanZiemba2013V1Property(property_name=property_name, parent=parent)
def MarquesFreemanZiemba2013V1MaxTexture():
property_name = 'max_texture'
parent = PARENT_MAGNITUDE
return _MarquesFreemanZiemba2013V1Property(property_name=property_name, parent=parent)
def MarquesFreemanZiemba2013V1MaxNoise():
property_name = 'max_noise'
parent = PARENT_MAGNITUDE
return _MarquesFreemanZiemba2013V1Property(property_name=property_name, parent=parent)
@store(identifier_ignore=['responses', 'baseline'])
def freemanziemba2013_properties(model_identifier, responses, baseline):
_assert_texture_activations(responses)
responses = responses.sortby(['type', 'family', 'sample'])
type = np.array(sorted(set(responses.type.values)))
family = np.array(sorted(set(responses.family.values)))
sample = np.array(sorted(set(responses.sample.values)))
responses = responses.values
baseline = baseline.values
assert responses.shape[0] == baseline.shape[0]
n_neuroids = responses.shape[0]
responses = responses.reshape(n_neuroids, len(type), len(family), len(sample))
responses_spikes = responses / 10
responses_spikes = np.sqrt(responses_spikes) + np.sqrt(responses_spikes + 1)
responses -= baseline.reshape((-1, 1, 1, 1))
max_texture = np.max((responses.reshape((n_neuroids, 2, -1)))[:, 1, :], axis=1, keepdims=True)
max_noise = np.max((responses.reshape((n_neuroids, 2, -1)))[:, 0, :], axis=1, keepdims=True)
max_response = np.max(responses.reshape((n_neuroids, -1)), axis=1, keepdims=True)
responses_family = responses.mean(axis=3)
texture_modulation_index = np.zeros((n_neuroids, 1))
texture_selectivity = np.zeros((n_neuroids, 1))
noise_selectivity = np.zeros((n_neuroids, 1))
texture_sparseness = np.zeros((n_neuroids, 1))
noise_sparseness = np.zeros((n_neuroids, 1))
variance_ratio = np.zeros((n_neuroids, 1))
sample_variance = np.zeros((n_neuroids, 1))
family_variance = np.zeros((n_neuroids, 1))
for neur in range(n_neuroids):
texture_modulation_index[neur] = calc_texture_modulation(responses_family[neur])[0]
texture_selectivity[neur] = calc_sparseness(responses_family[neur, 1])
noise_selectivity[neur] = calc_sparseness(responses_family[neur, 0])
texture_sparseness[neur] = calc_sparseness(responses[neur, 1])
noise_sparseness[neur] = calc_sparseness(responses[neur, 0])
variance_ratio[neur], sample_variance[neur], family_variance[neur] = \
calc_variance_ratio(responses_spikes[neur, 1])
absolute_texture_modulation_index = np.abs(texture_modulation_index)
properties_data = np.concatenate((texture_modulation_index, absolute_texture_modulation_index, texture_selectivity,
noise_selectivity, texture_sparseness, noise_sparseness, variance_ratio,
sample_variance, family_variance, max_texture, max_noise), axis=1)
good_neuroids = max_response > RESPONSE_THRESHOLD
properties_data = properties_data[np.argwhere(good_neuroids)[:, 0], :]
properties_data = DataAssembly(properties_data, coords={'neuroid_id': ('neuroid', range(properties_data.shape[0])),
'region': ('neuroid', ['V1'] * properties_data.shape[0]),
'neuronal_property': PROPERTY_NAMES},
dims=['neuroid', 'neuronal_property'])
return properties_data
|
|
import matplotlib.pyplot as plt
import numpy as np
from . import implantation_range, reflection_coeff
from . import estimate_inventory_with_gp_regression
DEFAULT_TIME = 1e7
database_inv_sig = {}
def fetch_inventory_and_error(time):
"""Fetch the inventory and error for a given time
Args:
time (float): time (s)
Returns:
callable, callable: inventory(T, c), standard deviation(T, c)
"""
if time in database_inv_sig.keys(): # fetch in database
inv_T_c_local = database_inv_sig[time]["inv"]
sig_inv_local = database_inv_sig[time]["sig"]
else: # if time is not in the database
GP = estimate_inventory_with_gp_regression(time=time)
def inv_T_c_local(T, c):
if c == 0:
val = 0
else:
val = 10**GP((T, np.log10(c)))[0][0]
return val
def sig_inv_local(T, c):
if c == 0:
val = 0
else:
val = GP((T, np.log10(c)))[1][0]
return val
# add to database for later use
database_inv_sig[time] = {
"inv": inv_T_c_local,
"sig": sig_inv_local
}
return inv_T_c_local, sig_inv_local
def compute_inventory(T, c_max, time):
"""Computes the monoblock inventory as a function of the surface
temperature, surface concentration and exposure time.
If the time is not already in database_inv_sig, another gaussian
regression is performed.
Args:
T (list): Surface temperature (K)
c_max (list): Surface concentration (H m-3)
time (float): Exposure time (s)
Returns:
numpy.array, numpy.array: list of inventories (H/m), list of standard
deviation
"""
inv_T_c_local, sig_inv_local = fetch_inventory_and_error(time)
# compute inventory (H/m) along divertor
inventories = [
float(inv_T_c_local(T_, c)) for T_, c in zip(T, c_max)]
sigmas = [
float(sig_inv_local(T_, c)) for T_, c in zip(T, c_max)]
inventories, sigmas = np.array(inventories), np.array(sigmas)
return inventories, sigmas
def compute_c_max(
T, E_ion, E_atom, angles_ion, angles_atom,
ion_flux, atom_flux, full_export=False, isotope="H"):
"""Computes the surface concentration based on exposure conditions.
Args:
T (numpy.array): Surface temperature (K)
E_ion (numpy.array): Ion incident energy (eV)
E_atom (numpy.array): Atom incident energy (eV)
angles_ion (numpy.array): Angle of incidence of ions (deg)
angles_atom (numpy.array): Angle of incidence of atoms (deg)
ion_flux (numpy.array): Ion flux (m-2 s-1)
atom_flux (numpy.array): Atom flux (m-2 s-1)
full_export (bool, optional): If True, the output will contain the
surface concentration due to ions and atoms. Defaults to False.
isotope (str, optional): Type of hydrogen isotope amongst "H", "D",
"T". Defaults to "H".
Returns:
numpy.array or (numpy.array, numpy.array, numpy.array): surface
concentration or (surface concentration, surface conc. ions,
surface conc. atoms)
"""
# Diffusion coefficient Fernandez et al Acta Materialia (2015)
# https://doi.org/10.1016/j.actamat.2015.04.052
D_0_W = 1.9e-7
E_D_W = 0.2
k_B = 8.617e-5
D = D_0_W*np.exp(-E_D_W/k_B/T)
if isotope == "D":
D *= 1/2**0.5
elif isotope == "T":
D *= 1/3**0.5
# implantation ranges
implantation_range_ions = [
float(implantation_range(energy, angle))
for energy, angle in zip(E_ion, angles_ion)]
implantation_range_atoms = [
float(implantation_range(energy, angle))
for energy, angle in zip(E_atom, angles_atom)]
# reflection coefficients
reflection_coeff_ions = [
float(reflection_coeff(energy, angle))
for energy, angle in zip(E_ion, angles_ion)]
reflection_coeff_atoms = [
float(reflection_coeff(energy, angle))
for energy, angle in zip(E_atom, angles_atom)]
reflection_coeff_ions = np.array(reflection_coeff_ions)
reflection_coeff_atoms = np.array(reflection_coeff_atoms)
implantation_range_ions = np.array(implantation_range_ions)
implantation_range_atoms = np.array(implantation_range_atoms)
# compute c_max
c_max_ions = (1 - reflection_coeff_ions) * \
ion_flux*implantation_range_ions/D
c_max_atoms = (1 - reflection_coeff_atoms) * \
atom_flux*implantation_range_atoms/D
c_max = c_max_ions + c_max_atoms
if full_export:
return c_max, c_max_ions, c_max_atoms
else:
return c_max
def compute_surface_temperature(heat_flux):
"""Computes the surface temperature based on the thermal study
performed in Delaporte-Mathurin et al, SREP 2020
https://www.nature.com/articles/s41598-020-74844-w
"""
return 1.1e-4*heat_flux + 323
if __name__ == "__main__":
pass
|
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# Install it using pip install hmmlearn
from hmmlearn import hmm
# Set random seed for reproducibility
np.random.seed(1000)
if __name__ == '__main__':
# Create a Multinomial HMM
hmm_model = hmm.MultinomialHMM(n_components=2,
n_iter=100,
random_state=1000)
# Define a list of observations
observations = np.array([[0], [1], [1],
[0], [1], [1],
[1], [0], [1],
[0], [0], [0],
[1], [0], [1],
[1], [0], [1],
[0], [0], [1],
[0], [1], [0],
[0], [0], [1],
[0], [1], [0],
[1], [0], [0],
[0], [0], [0]],
dtype=np.int32)
# Fit the model using the Forward-Backward algorithm
hmm_model.fit(observations)
# Check the convergence
print('Converged: {}'.format(hmm_model.monitor_.converged))
# Print the transition probability matrix
print('\nTransition probability matrix:')
print(hmm_model.transmat_)
# Create a test sequence
sequence = np.array([[1], [1], [1],
[0], [1], [1],
[1], [0], [1],
[0], [1], [0],
[1], [0], [1],
[1], [0], [1],
[1], [0], [1],
[0], [1], [0],
[1], [0], [1],
[1], [1], [0],
[0], [1], [1],
[0], [1], [1]],
dtype=np.int32)
# Find the the most likely hidden states using the Viterbi algorithm
lp, hs = hmm_model.decode(sequence)
print('\nMost likely hidden state sequence:')
print(hs)
print('\nLog-propability:')
print(lp)
# Compute the posterior probabilities
pp = hmm_model.predict_proba(sequence)
print('\nPosterior probabilities:')
print(pp)
sns.set()
fig, ax = plt.subplots(figsize=(22, 10))
ax.plot(pp[:, 0], "o-", linewidth=3.0, label="On-time")
ax.plot(pp[:, 1], "o-", linewidth=3.0, linestyle="dashed", label="Delayed")
ax.set_xlabel("Time", fontsize=22)
ax.set_ylabel("State", fontsize=22)
ax.legend(fontsize=22)
plt.show()
# Repeat the prediction with a sequence of On-time (0) flights
sequence0 = np.array([[0], [0], [0],
[0], [0], [0],
[0], [0], [0],
[0], [0], [0],
[0], [0], [0],
[0], [0], [0],
[0], [0], [0],
[0], [0], [0],
[0], [0], [0],
[0], [0], [0],
[0], [0], [0]],
dtype=np.int32)
pp0 = hmm_model.predict_proba(sequence0)
fig, ax = plt.subplots(figsize=(22, 10))
ax.plot(pp0[:, 0], "o-", linewidth=3.0, label="On-time")
ax.plot(pp0[:, 1], "o-", linewidth=3.0, linestyle="dashed", label="Delayed")
ax.set_xlabel("Time", fontsize=22)
ax.set_ylabel("State", fontsize=22)
ax.legend(fontsize=22)
plt.show()
|
|
#-*- coding:utf-8 -*-
#'''
# Created on 2020/9/10 10:32
#
# @Author: Jun Wang
#'''
import os
import time
from tqdm import tqdm
from collections import OrderedDict
import numpy as np
from numpy.random import choice
import pandas as pd
import matplotlib.pyplot as plt
import PIL
from torch.nn import functional as F
from sklearn.metrics import roc_auc_score, f1_score, recall_score, confusion_matrix
import torch
from torch import nn, Tensor
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, models
import torchvision
from sklearn.model_selection import train_test_split
np.random.seed(42)
DATA_DIR = '~/'
# train_dir = os.path.join(DATA_DIR, 'train')
# test_dir = os.path.join(DATA_DIR, 'test')
train_dir = '~/data/train/'
test_dir = '~/data/test/'
"""
def train_validation_split(df, val_fraction=0.1):
val_ids = np.random.choice(df.id, size=int(len(df) * val_fraction))
val_df = df.query('id in @val_ids')
train_df = df.query('id not in @val_ids')
return train_df, val_df
train_label_df, val_label_df = train_validation_split(pd.read_csv(os.path.join(DATA_DIR, 'train_labels.csv')),
val_fraction=0.1)
"""
# DATA_DIR = '/home/dl/zy_Histopathologic_CanDet/input/'
labels = pd.read_csv('/home/ubuntu/junwang/paper/Boosted_EffNet/code/data/train_labels.csv')
train_label_df, val_label_df = train_test_split(labels, stratify=labels.label, test_size=0.1, random_state=123)
# os.environ['CUDA_VISIBLE_DEVICES'] = "2, 3"
def SPC(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred).astype(np.float32)
TN = cm[1, 1]
FP = cm[0, 1]
return TN / (TN + FP)
def function_timer(function):
def wrapper(*args, **kwargs):
start = time.time()
result = function(*args, **kwargs)
duration = time.time() - start
hours = int(duration // 60 ** 2)
minutes = int((duration % 60 ** 2) // 60)
seconds = int(duration % 60)
print(f'execution-time of function "{function.__name__}": {hours}h {minutes}m {seconds}s')
return result
return wrapper
class HistoPatches(Dataset):
def __init__(self,
image_dir: str,
label_df=None,
transform=transforms.ToTensor(),
sample_n=None,
in_memory=False):
"""
@ image_dir: path to directory with images
@ label_df: df with image id (str) and label (0/1) - only for labeled test-set
@ transforms: image transformation; by default no transformation
@ sample_n: if not None, only use that many observations
"""
self.image_dir = image_dir
self.label_df = label_df
self.transform = transform
self.in_memory = in_memory
if label_df is not None:
if sample_n:
self.label_df = self.label_df.sample(n=sample_n)
ids = set(self.label_df.id)
self.img_files = [f for f in os.listdir(image_dir) if f.split('.')[0] in ids]
else:
if sample_n is not None:
print('subsampling is currently only implemented when a label-dataframe is provided.')
return
self.img_files = os.listdir(image_dir)
if in_memory:
self.id2image = self._load_images()
print(f'Initialized datatset with {len(self.img_files)} images.\n')
@function_timer
def _load_images(self):
print('loading images in memory...')
id2image = {}
for file_name in self.img_files:
img = PIL.Image.open(os.path.join(self.image_dir, file_name))
X = self.transform(img)
id_ = file_name.split('.')[0]
id2image[id_] = X
return id2image
def __getitem__(self, idx):
file_name = self.img_files[idx]
id_ = file_name.split('.')[0]
if self.in_memory:
X = self.id2image[id_]
else:
img = PIL.Image.open(os.path.join(self.image_dir, file_name))
X = self.transform(img)
if self.label_df is not None:
y = float(self.label_df.query('id == @id_').label)
return X, y
else:
return X, id_
def __len__(self):
return len(self.img_files)
memory = False
batchsize = 256
image_trans = transforms.Compose([ # transforms.CenterCrop(30),
transforms.ToTensor(),
transforms.Normalize(mean=[0.70017236, 0.5436771, 0.6961061],
std=[0.22246036, 0.26757348, 0.19798167])
])
# pad + RandomCrop, RCC
train_trans = transforms.Compose([
transforms.Pad(8),
transforms.RandomVerticalFlip(),
transforms.RandomCrop(size=96),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.70017236, 0.5436771, 0.6961061],
std=[0.22246036, 0.26757348, 0.19798167])
])
train = HistoPatches(train_dir,
train_label_df,
transform=train_trans,
# sample_n=1000,
in_memory=memory)
val = HistoPatches(train_dir,
val_label_df,
transform=image_trans,
# sample_n=100,
in_memory=memory)
train_loader = DataLoader(train, batch_size=batchsize, shuffle=True, num_workers=8)
val_loader = DataLoader(val, batch_size=batchsize * 4, shuffle=False, num_workers=4)
# net = models.densenet121(pretrained=False)
# model = torch.load('/home/dl/zy/zy_Histopathologic_CanDet/models/densenet121-a639ec97.pth')
#
# model = {k.replace('.1.', '1.'): v for k, v in model.items()}
# model = {k.replace('.2.', '2.'): v for k, v in model.items()}
# net.load_state_dict(model)
from torchvision.models.densenet import _densenet
model = 'efficientnet-b3'
from efficientnet_pytorch import EfficientNet
def load_network(model: nn.Module, path):
state = torch.load(str(path))
model.load_state_dict(state)
return model
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1,
padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1,
padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class mynet(nn.Module):
def __init__(self):
super(mynet, self).__init__()
self.model = EfficientNet.from_name(model)
# RDS
self.model._conv_stem = nn.Conv2d(self.model._conv_stem.in_channels, self.model._conv_stem.out_channels,
kernel_size=3, stride=1, bias=False, padding=1) # RDS
self.model._fc = nn.Linear(600, 1)
self.model._dropout = nn.Dropout(0.3)
self.semodule1 = SEModule(32, 8)
self.semodule2 = SEModule(48, 8)
self.semodule3 = SEModule(136, 8)
self.semodule4 = SEModule(384, 16)
def extract_features(self, inputs):
""" Returns output of the final convolution layer """
# Stem
x = self.model._swish(self.model._bn0(self.model._conv_stem(inputs)))
output = []
# Blocks
for idx, block in enumerate(self.model._blocks):
drop_connect_rate = self.model._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self.model._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
# print(len(output), x.shape)
output.append(x)
# Head
x = self.model._swish(self.model._bn1(self.model._conv_head(x)))
output.append(x)
# SE and FF
x = torch.cat([F.adaptive_avg_pool2d(self.semodule1(output[4]), 1),
F.adaptive_avg_pool2d(self.semodule2(output[7]), 1),
F.adaptive_avg_pool2d(self.semodule3(output[17]), 1),
F.adaptive_avg_pool2d(self.semodule4(output[25]), 1)], 1)
return x
def forward(self, x):
# See note [TorchScript super()]
bs = x.size(0)
# Convolution layers
x = self.extract_features(x)
# Pooling and final linear layer
# x = self.model._avg_pooling(x)
x = x.view(bs, -1)
x = self.model._dropout(x)
x = self.model._fc(x)
return x
net = mynet()
@function_timer
def train_model(net, train, validation, optimizer, device, max_epoch=100, verbose=False):
"""
This function returns nothing. The parametes of @net are updated in-place
and the error statistics are written to a global variable. This allows to
stop the training at any point and still have the results.
@ net: a defined model - can also be pretrained
@ train, test: DataLoaders of training- and test-set
@ max_epoch: stop training after this number of epochs
"""
global error_df # to track error log even when training aborted
error_df = pd.DataFrame(
columns=['train_bce', 'train_acc', 'train_auc', 'train_SEN', 'train_SPE', 'train_F1 score', 'val_bce',
'val_acc', 'val_auc', 'val_SEN', 'val_SPE', 'val_F1 score'])
criterion = nn.BCEWithLogitsLoss()
# scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [15, 23])
net.to(device)
print(
'epoch\tLR\ttr-BCE\ttr-Acc\ttr-AUC\ttr-SEN\ttr-SPE\ttr-F1-score\t\tval-BCE\tval-Acc\tval-AUC\tval-SEN\tval-SPE\tval-F1-score')
for epoch in tqdm(range(max_epoch)):
net.train()
training_bce = training_acc = training_auc = training_SEN = training_SPE = training_f1 = 0
# print('qingkaishinidebiaoyan')
for X, y in train:
# print(X.shape, y.shape)
X, y = X.to(device), y.to(device)
optimizer.zero_grad()
# prediction and error:
out = net(X).squeeze()
labels = y.detach().cpu().numpy()
probabilities = torch.sigmoid(out).detach().cpu().numpy()
predictions = probabilities.round()
loss = criterion(out.type(torch.DoubleTensor).cuda(), y)
training_bce += loss.item()
training_acc += np.mean(labels == predictions) * 100
training_auc += roc_auc_score(y_true=labels, y_score=probabilities)
training_SEN += recall_score(y_true=labels, y_pred=predictions)
training_SPE += SPC(y_true=labels, y_pred=predictions)
training_f1 += f1_score(y_true=labels, y_pred=predictions)
# update parameters:
loss.backward()
optimizer.step()
with torch.no_grad(): # no backpropagation necessary
net.eval()
validation_bce = validation_acc = validation_auc = validation_SEN = validation_SPE = validation_f1 = 0
for X, y in validation:
X, y = X.to(device), y.to(device)
# prediction and error:
out = net(X).squeeze()
labels = y.detach().cpu().numpy()
probabilities = torch.sigmoid(out).detach().cpu().numpy()
predictions = probabilities.round()
validation_bce += criterion(out.type(torch.DoubleTensor).cuda(), y).item()
validation_acc += np.mean(labels == predictions) * 100
validation_auc += roc_auc_score(y_true=labels, y_score=probabilities)
validation_SEN += recall_score(y_true=labels, y_pred=predictions)
validation_SPE += SPC(y_true=labels, y_pred=predictions)
validation_f1 += f1_score(y_true=labels, y_pred=predictions)
# convert to batch loss:
training_bce /= len(train)
training_acc /= len(train)
training_auc /= len(train)
training_SEN /= len(train)
training_SPE /= len(train)
training_f1 /= len(train)
validation_bce /= len(validation)
validation_acc /= len(validation)
validation_auc /= len(validation)
validation_SEN /= len(validation)
validation_SPE /= len(validation)
validation_f1 /= len(validation)
scheduler.step()
torch.save(net.state_dict(), 'checkpoint/'+model +'_'+str(epoch)+ '_net.pt')
# torch.save(net.state_dict(), f'epoch{epoch}.pt')
error_stats = [training_bce, training_acc, training_auc, training_SEN, training_SPE, training_f1,
validation_bce, validation_acc, validation_auc, validation_SEN, validation_SPE, validation_f1]
error_df = error_df.append(pd.Series(error_stats, index=error_df.columns), ignore_index=True)
print(
'{}\t{:.4f}\t{:.4f}\t{:.2f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t\t{:.4f}\t{:.2f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}'
.format(epoch, optimizer.param_groups[0]['lr'], *error_stats))
optimizer = torch.optim.Adam(net.parameters(), lr=0.003) # 5e-6)
net = nn.DataParallel(net, device_ids=[0, 1, 2, 3]) # multi-gpu
train_model(net,
train_loader,
val_loader,
optimizer,
device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'),
max_epoch=30,
verbose=False)
model = model + '-ALL' # save model
torch.save(net.state_dict(), 'checkpoint/'+model + '_net.pth')
error_df.to_csv('./' + model + '.csv')
|
|
import numpy as np
from autograd import numpy as anp
from autograd import jacobian
from scipy.optimize import least_squares
from core.calib.kruppa.common import mul3
class KruppaSolver(object):
"""
Hartley's formulation
https://ieeexplore.ieee.org/document/574792
"""
def __init__(self, verbose=2):
self.cache_ = {}
self.params_ = dict(
ftol=1e-10,
xtol=1e-9,
#gtol=1e-16,
loss='huber',
max_nfev=1024,
method='trf',
#method='lm',
verbose=verbose,
#tr_solver='lsmr',
tr_solver='exact',
f_scale=0.1
#f_scale=1.0
)
self.jac = jacobian(self.err_anp)#, np=anp)
def wrap_K(self, K):
return K[(0,0,1,1),(0,2,1,2)]
#return K[(0,0,0,1,1),(0,1,2,1,2)]
def unwrap_K(self, K, np=np):
#res = np.array([
# K[0], K[2], K[2],
# K[1], K[3], K[4],
# K[2], K[4], 1.0]).reshape(3,3)
res = np.array([
K[0], K[1]*K[3], K[1],
K[1]*K[3], K[2], K[3],
K[1], K[3], 1.0]).reshape(3,3)
return res
def A2K(self, A):
return A.dot(A.T)
def K2A(self, K):
""" closed-form decomposition for {A | A.AT=K} """
# M = A.AT, A = upper triangular
# M.T = A.AT
#k1,k2,k3,k4,k5 = K[(0,0,0,1,1),(0,1,2,1,2)]
k1,k3,k4,k5 = K[(0,0,1,1),(0,2,1,2)]
k2 = k3 * k5
tmp = np.sqrt(k4 - k5 ** 2)
e00 = np.sqrt( k1 - k3**2 - (k2-k3*k5)**2 / (k4-k5**2) )
e01 = (k2 - k3*k5) / tmp# np.sqrt(k4-k5**2)
e02 = k3
e11 = tmp
e12 = k5
res = np.float32([e00,e01,e02,0,e11,e12,0,0,1]).reshape(3,3)
if np.any(np.isnan(res)):
return None
return res
def err_anp(self, K):
e = self.err(K, np=anp)
return e
def err(self, K, np=np):
# ( NOTE : K != cameraMatrix)
K = self.unwrap_K(K, np=np)
u1,u2,u3 = [self.cache_[k] for k in ['u1','u2','u3']]
v1,v2,v3 = [self.cache_[k] for k in ['v1','v2','v3']]
s1,s2 = [self.cache_[k] for k in ['s1','s2']]
Ws = self.cache_['Ws']
nmr1 = mul3(v2,K,v2,np=np)
dmr1 = (s1*s1) * mul3(u1,K,u1,np=np)
e1 = (nmr1 / dmr1)
nmr2 = -mul3(v2,K,v1,np=np)
dmr2 = (s1*s2) * mul3(u1,K,u2,np=np)
e2 = (nmr2 / dmr2)
nmr3 = mul3(v1,K,v1,np=np)
dmr3 = (s2*s2) * mul3(u2,K,u2,np=np)
e3 = (nmr3 / dmr3)
#err12 = nmr1 * dmr2 - nmr2 * dmr1
#err23 = nmr2 * dmr3 - nmr3 * dmr2
#err31 = nmr3 * dmr1 - nmr1 * dmr3
err12 = ((e1 - e2)).ravel()
err23 = ((e2 - e3)).ravel()
err31 = ((e1 - e3)).ravel()
# TODO : utilize Ws
return np.concatenate([err12, err23, err31])
#def err_USV(self, USV, np=np):
# K = self.cache_['K']
# u1,u2,u3 = USV[...,:3*3].reshape(-1,3,3)
# s1,s2,s3 = USV[...,3*3:-3*3].reshape(-1,3)
# v1,v2,v3 = USV[...,-3*3:].reshape(-1,3,3)
# nmr1 = mul3(v2,K,v2,np=np)
# dmr1 = (s1*s1) * mul3(u1,K,u1,np=np)
# e1 = (nmr1 / dmr1)
# nmr2 = -mul3(v2,K,v1,np=np)
# dmr2 = (s1*s2) * mul3(u1,K,u2,np=np)
# e2 = (nmr2 / dmr2)
# nmr3 = mul3(v1,K,v1,np=np)
# dmr3 = (s2*s2) * mul3(u2,K,u2,np=np)
# e3 = (nmr3 / dmr3)
# err12 = (e1 - e2).ravel()
# err23 = (e2 - e3).ravel()
# err31 = (e3 - e1).ravel()
# return np.concatenate([err12, err23, err31])
def __call__(self, A, Fs, Ws):
# A = camera Matrix
# Fs = Nx3x3 Fundamental Matrix
U, S, Vt = np.linalg.svd(Fs)
u1, u2, u3 = U[...,:,0], U[...,:,1], U[...,:,2]
v1, v2, v3 = Vt[...,0,:], Vt[...,1,:], Vt[...,2,:]
s1, s2 = S[...,0], S[...,1]
for k in ['u1','u2','u3','v1','v2','v3','s1','s2']:
self.cache_[k] = vars()[k]
self.cache_['Ws'] = Ws
K = self.A2K(A)
res = least_squares(
self.err,
self.wrap_K(K),
#x_scale=np.abs( self.wrap_K(K) ),
x_scale='jac',
jac=self.jac,
**self.params_
)
K = self.unwrap_K(res.x)
#print 'K (optimized)'
return self.K2A(K)
|
|
"""
TODO
- set up datastream
- num_parallel_calls til map
- add cache?
- add oversampling https://github.com/tensorflow/tensorflow/issues/14451
- make wandb callback
- set up early stopping
"""
import os
from functools import partial
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from transformers import TFBertModel, AutoTokenizer, BertConfig
import wandb
from wandb.keras import WandbCallback
"""
os.environ["WANDB_MODE"] = "dryrun"
wandb.init(project='hope_emoji')
config = wandb.config
config.epochs = 1
config.batch_size = 32
config.optimizer = 'nadam'
"""
class EmoBert():
def __init__(self,
n_labels=150,
model_str="bert-base-multilingual-cased"):
self.n_labels = n_labels
self.tokenizer = AutoTokenizer.from_pretrained(model_str)
self.transformer = TFBertModel.from_pretrained(model_str)
self.config = BertConfig.from_pretrained(model_str)
self.max_len = self.config.max_position_embeddings
def init_input_pipeline(self,
tf_record="data/twitter_emoji_sent.tfrecords",
validation_size=100000,
batch=1):
ds = tf.data.TFRecordDataset(
filenames=[tf_record])
ds = ds.shuffle(1000, reshuffle_each_iteration=True)
ds = ds.map(self.parse)
ds = ds.batch(batch)
self.val_dataset = ds.take(validation_size)
self.dataset = ds
@staticmethod
def to_categorical(parsed_label):
indices = parsed_label
tensor = tf.zeros(150, dtype=tf.dtypes.int64)
t = tf.Variable(tensor) # to allow for item assignment
for indice in indices:
t[indice-1].assign(1)
return t.read_value()
def tokenize(self, sent):
sent = sent.numpy().decode("utf-8")
tokens = self.tokenizer.encode(sent,
return_tensors="tf",
padding="max_length",
add_special_tokens=True,
truncation=True)
return tokens
def parse(self, example_proto):
features = {
'sent': tf.io.FixedLenFeature([], tf.string),
'labels': tf.io.VarLenFeature(tf.int64)
}
parsed_features = tf.io.parse_single_example(example_proto, features)
inputs = tf.py_function(
self.tokenize, [parsed_features['sent']], tf.int32)
inputs = tf.reshape(inputs, (512,))
_ = tf.sparse.to_dense(parsed_features['labels'])
output = tf.py_function(
self.to_categorical, [_], tf.int64)
output = tf.reshape(output, (150,))
return inputs, output
def add_classification_layer(self):
"""
"""
def create_model(self):
input_layer = tf.keras.Input(shape=(self.max_len,), dtype='int64')
bert = self.transformer(input_layer)
# select the pooler output (as opposed to the last hidden state)
bert = bert[1]
# classification layers
drop = layers.Dropout(0.1)(bert)
out = layers.Dense(self.n_labels,
kernel_initializer=tf.keras.initializers.
TruncatedNormal(mean=0.0, stddev=0.00),
name="emoji_classification",
activation="sigmoid")(drop)
self.model = tf.keras.Model(inputs=input_layer, outputs=out)
self.model.compile(
optimizer="nadam",
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryCrossentropy()],
)
def fit(self):
self.model.fit(self.val_dataset, epochs=1)
if __name__ == "__main__":
# main()
eb = EmoBert()
eb.init_input_pipeline()
eb.create_model()
eb.fit()
res = eb.dataset.take(1)
res = iter(res)
x, y = next(res)
x.shape
y.shape
# fit
eb.model.fit(eb.dataset, epochs=1, batch_size=1, class_weight=class_weight)
# batch_size=1, callback=[WandbCallback()])
x.shape
|
|
"""Test the vasprun.xml parser."""
# pylint: disable=unused-import,redefined-outer-name,unused-argument,unused-wildcard-import,wildcard-import
# pylint: disable=invalid-name
import pytest
import numpy as np
from aiida_vasp.utils.fixtures import *
from aiida_vasp.utils.aiida_utils import get_data_class
@pytest.mark.parametrize(['vasprun_parser'], [('basic',)], indirect=True)
def test_parse_vasprun(vasprun_parser):
"""Load a reference vasprun.xml and compare the result to a reference string."""
quantity = vasprun_parser.get_quantity('occupancies')
quantity = quantity['total']
occ = quantity[0]
occupancies = np.array([[[1., 1., 1., 1., 0.6667, 0.6667, 0.6667, -0., -0., -0.]]])
assert occ.all() == occupancies.all()
@pytest.mark.parametrize(['vasprun_parser'], [('basic',)], indirect=True)
def test_parse_vasprun_version(vasprun_parser):
"""Load a reference vasprun.xml and fetch the VASP version."""
version = vasprun_parser.get_quantity('version')
assert version == '5.4.1'
@pytest.mark.parametrize(['vasprun_parser'], [('basic',)], indirect=True)
def test_parse_vasprun_kpoints(vasprun_parser):
"""Load a reference vasprun.xml and test that the parsed k-points are correct."""
kpoints = vasprun_parser.get_quantity('kpoints')
np.testing.assert_allclose(kpoints['points'][0], np.array([0.0, 0.0, 0.0]), atol=0., rtol=1.0e-7)
np.testing.assert_allclose(kpoints['points'][-1], np.array([0.42857143, -0.42857143, 0.42857143]), atol=0., rtol=1.0e-7)
@pytest.mark.parametrize(['vasprun_parser'], [('basic',)], indirect=True)
def test_parse_vasprun_structure(vasprun_parser):
"""Load a reference vasprun.xml and test that the parsed k-points are correct."""
structure = vasprun_parser.get_quantity('structure')
# Check the unit cell
np.testing.assert_allclose(structure['unitcell'][0], np.array([5.46503124, 0.0, 0.0]), atol=0., rtol=1.0e-7)
np.testing.assert_allclose(structure['unitcell'][1], np.array([0.0, 5.46503124, 0.0]), atol=0., rtol=1.0e-7)
np.testing.assert_allclose(structure['unitcell'][2], np.array([0.0, 0.0, 5.46503124]), atol=0., rtol=1.0e-7)
# Check first and last position
np.testing.assert_allclose(structure['sites'][0]['position'], np.array([0.0, 0.0, 0.0]), atol=0., rtol=1.0e-7)
np.testing.assert_allclose(structure['sites'][7]['position'], np.array([4.09877343, 4.09877343, 1.36625781]), atol=0., rtol=1.0e-7)
@pytest.mark.parametrize(['vasprun_parser'], [('basic',)], indirect=True)
def test_parse_vasprun_final_force(vasprun_parser):
"""Load a reference vasprun.xml and test that the forces are returned correctly."""
forces = vasprun_parser.get_quantity('forces')
forces = forces['final']
forces_check = np.array([[-0.24286901, 0., 0.], [-0.24286901, 0., 0.], [3.41460162, 0., 0.], [0.44305748, 0., 0.],
[-0.73887169, 0.43727184, 0.43727184], [-0.94708885, -0.85011586, 0.85011586],
[-0.94708885, 0.85011586, -0.85011586], [-0.73887169, -0.43727184, -0.43727184]])
# Check first, third and last position
np.testing.assert_allclose(forces[0], forces_check[0], atol=0., rtol=1.0e-7)
np.testing.assert_allclose(forces[2], forces_check[2], atol=0., rtol=1.0e-7)
np.testing.assert_allclose(forces[7], forces_check[7], atol=0., rtol=1.0e-7)
@pytest.mark.parametrize(['vasprun_parser'], [('basic',)], indirect=True)
def test_parse_vasprun_final_stress(vasprun_parser):
"""Load a reference vasprun.xml and test that the stress are returned correctly."""
stress = vasprun_parser.get_quantity('stress')
stress = stress['final']
stress_check = np.array([[-0.38703740, 0.00000000, 0.00000000], [0.00000000, 12.52362644, -25.93894358],
[0.00000000, -25.93894358, 12.52362644]])
# Check entries
np.testing.assert_allclose(stress[0], stress_check[0], atol=0., rtol=1.0e-7)
np.testing.assert_allclose(stress[1], stress_check[1], atol=0., rtol=1.0e-7)
np.testing.assert_allclose(stress[2], stress_check[2], atol=0., rtol=1.0e-7)
@pytest.mark.parametrize(['vasprun_parser'], [('dielectric',)], indirect=True)
def test_parse_vasprun_dielectrics(vasprun_parser):
"""Load a reference vasprun.xml and test that the dielectrics are returned correctly."""
dielectrics = vasprun_parser.get_quantity('dielectrics')
imag = dielectrics['idiel']
real = dielectrics['rdiel']
energy = dielectrics['ediel']
# Test shape of arrays
assert imag.shape == (1000, 6)
assert real.shape == (1000, 6)
assert energy.shape == (1000,)
# Test a few entries
np.testing.assert_allclose(imag[0], np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), atol=0., rtol=1.0e-7)
np.testing.assert_allclose(imag[500], np.array([0.0933, 0.0924, 0.0924, 0.0, 0.0082, 0.0]), atol=0., rtol=1.0e-7)
np.testing.assert_allclose(imag[999], np.array([0.0035, 0.0035, 0.0035, 0.0, 0.0, 0.0]), atol=0., rtol=1.0e-7)
np.testing.assert_allclose(real[0], np.array([12.0757, 11.4969, 11.4969, 0.0, 0.6477, 0.0]), atol=0., rtol=1.0e-7)
np.testing.assert_allclose(real[500], np.array([-0.5237, -0.5366, -0.5366, 0.0, 0.0134, 0.0]), atol=0., rtol=1.0e-7)
np.testing.assert_allclose(real[999],
np.array([6.57100000e-01, 6.55100000e-01, 6.55100000e-01, 0.0, -1.00000000e-04, 0.0]),
atol=0.,
rtol=1.0e-7)
assert energy[500] == pytest.approx(10.2933)
@pytest.mark.parametrize(['vasprun_parser'], [('disp_details',)], indirect=True)
def test_parse_vasprun_epsilon(vasprun_parser):
"""Load a reference vasprun.xml and test that epsilon is returned correctly."""
result = vasprun_parser.get_quantity('dielectrics')
epsilon = result['epsilon']
epsilon_ion = result['epsilon_ion']
# Test shape of arrays
assert epsilon.shape == (3, 3)
assert epsilon_ion.shape == (3, 3)
# Test a few entries
test = np.array([[13.05544887, -0., 0.], [-0., 13.05544887, -0.], [0., 0., 13.05544887]])
np.testing.assert_allclose(epsilon, test, atol=0., rtol=1.0e-7)
test = np.array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])
np.testing.assert_allclose(epsilon_ion, test, atol=0., rtol=1.0e-7)
@pytest.mark.parametrize(['vasprun_parser'], [('localfield',)], indirect=True)
def test_parse_vasprun_born(vasprun_parser):
"""Load a reference vasprun.xml and test that the Born effective charges are
returned correctly."""
born = vasprun_parser.get_quantity('born_charges')
born = born['born_charges']
# Test shape of array
assert born.shape == (8, 3, 3)
# Test a few entries
np.testing.assert_allclose(born[0][0], np.array([6.37225000e-03, 0.0, 0.0]), atol=0., rtol=1.0e-7)
np.testing.assert_allclose(born[0][-1], np.array([-4.21760000e-04, -2.19570210e-01, 3.20709600e-02]), atol=0., rtol=1.0e-7)
np.testing.assert_allclose(born[4][0], np.array([1.68565200e-01, -2.92058000e-02, -2.92058000e-02]), atol=0., rtol=1.0e-7)
@pytest.mark.parametrize(['vasprun_parser'], [('basic',)], indirect=True)
def test_parse_vasprun_dos(vasprun_parser):
"""Load a reference vasprun.xml and test that the density of states are
returned correctly."""
result = vasprun_parser.get_quantity('dos')
dos = result['tdos']
energy = result['energy']
# Test shape of array
assert dos.shape == (301,)
assert energy.shape == (301,)
# Test a few entries
assert dos[150] == pytest.approx(4.1296)
assert energy[150] == pytest.approx(2.3373)
@pytest.mark.parametrize(['vasprun_parser'], [('spin',)], indirect=True)
def test_parse_vasprun_dos_spin(vasprun_parser):
"""Load a reference vasprun.xml and test that the spin decomposed
density of states is returned correctly."""
result = vasprun_parser.get_quantity('dos')
dos = result['tdos']
# Test shape of array
assert dos.shape == (
2,
1000,
)
# Test a few entries
assert dos[0, 500] == pytest.approx(0.9839)
assert dos[1, 500] == pytest.approx(0.9844)
@pytest.mark.parametrize(['vasprun_parser'], [('partial',)], indirect=True)
def test_parse_vasprun_pdos(vasprun_parser):
"""Load a reference vasprun.xml and test that the projected
density of states is returned correctly."""
result = vasprun_parser.get_quantity('dos')
dos = result['pdos']
energy = result['energy']
# Test shape of array
assert dos.shape == (8, 1000, 9)
assert energy.shape == (1000,)
# Test a few entries
np.testing.assert_allclose(dos[3, 500], np.array([0.0770, 0.0146, 0.0109, 0.0155, 0.0, 0.0, 0.0, 0.0, 0.0]), atol=0., rtol=1.0e-7)
np.testing.assert_allclose(dos[7, 500], np.array([0.0747, 0.0121, 0.0092, 0.0116, 0.0, 0.0, 0.0, 0.0, 0.0]), atol=0., rtol=1.0e-7)
assert energy[500] == pytest.approx(0.01)
@pytest.mark.parametrize(['vasprun_parser'], [('partial',)], indirect=True)
def test_parse_vasprun_projectors(vasprun_parser):
"""Load a reference vasprun.xml and test that the state projectors are
returned correctly."""
proj = vasprun_parser.get_quantity('projectors')
proj = proj['projectors']
# Test shape of array
assert proj.shape == (8, 64, 21, 9)
# Test a few entries
np.testing.assert_allclose(proj[0, 0, 5], np.array([0.0, 0.012, 0.0123, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), atol=0., rtol=1.0e-7)
np.testing.assert_allclose(proj[7, 0, 5], np.array([0.1909, 0.0001, 0.0001, 0.0001, 0.0, 0.0, 0.0, 0.0, 0.0]), atol=0., rtol=1.0e-7)
np.testing.assert_allclose(proj[4, 3, 5], np.array([0.2033, 0.0001, 0.0001, 0.0001, 0.0, 0.0, 0.0, 0.0, 0.0]), atol=0., rtol=1.0e-7)
@pytest.mark.parametrize(['vasprun_parser'], [('basic',)], indirect=True)
def test_parse_vasprun_eigenvalues_occupancies(vasprun_parser):
"""Load a reference vasprun.xml and test that the eigenvalues are
returned correctly."""
eigen = vasprun_parser.get_quantity('eigenvalues')
eigen = eigen['total']
occ = vasprun_parser.get_quantity('occupancies')
occ = occ['total']
# Test shape of array
assert eigen.shape == (64, 21)
assert occ.shape == (64, 21)
# Test a few entries
assert eigen[0, 0] == pytest.approx(-6.2348)
assert eigen[0, 15] == pytest.approx(5.8956)
assert eigen[6, 4] == pytest.approx(-1.7424)
assert occ[0, 0] == pytest.approx(1.0)
assert occ[0, 15] == pytest.approx(0.6949)
assert occ[6, 4] == pytest.approx(1.0)
@pytest.mark.parametrize(['vasprun_parser'], [('spin',)], indirect=True)
def test_parse_vasprun_eigenocc_spin_result(vasprun_parser):
"""Load a reference vasprun.xml and test that the spin decomposed eigenvalues
are returned correctly."""
eigen = vasprun_parser.get_quantity('eigenvalues')
occ = vasprun_parser.get_quantity('occupancies')
# Test shape of array
assert eigen['up'].shape == (64, 25)
assert occ['up'].shape == (64, 25)
# Test a few entries
assert eigen['up'][0, 0] == pytest.approx(-6.2363)
assert eigen['up'][0, 15] == pytest.approx(5.8939)
assert eigen['up'][6, 4] == pytest.approx(-1.7438)
assert eigen['down'][0, 0] == pytest.approx(-6.2357)
assert eigen['down'][0, 15] == pytest.approx(5.8946)
assert eigen['down'][6, 4] == pytest.approx(-1.7432)
assert occ['up'][0, 0] == pytest.approx(1.0)
assert occ['up'][0, 15] == pytest.approx(0.6955)
assert occ['up'][6, 4] == pytest.approx(1.0)
assert occ['down'][0, 0] == pytest.approx(1.0)
assert occ['down'][0, 15] == pytest.approx(0.6938)
assert occ['down'][6, 4] == pytest.approx(1.0)
@pytest.mark.parametrize(['vasprun_parser'], [('basic',)], indirect=True)
def test_parse_vasprun_toten(vasprun_parser):
"""Load a reference vasprun.xml and test that one of the total energies
is returned correctly."""
result = vasprun_parser.get_quantity('energies')
assert set(result.keys()) == set(['energy_extrapolated', 'energy_extrapolated_electronic', 'electronic_steps'])
energies = result['energy_extrapolated']
test_array = np.array([-42.91113621])
np.testing.assert_allclose(test_array, energies, atol=0., rtol=1.0e-7)
# Test number of entries
assert energies.shape == (1,)
# Electronic steps should be one
test_array = np.array([1])
np.testing.assert_allclose(test_array, result['electronic_steps'], atol=0., rtol=1.0e-7)
# Testing on VASP 5, where the extrapolated energy should be the following due to a bug
test_array = np.array([-0.00236711])
with np.testing.assert_raises(AssertionError):
np.testing.assert_allclose(test_array, result['energy_extrapolated'], atol=0., rtol=1.0e-7)
# Instead we correct and it should be
test_array = np.array([-42.911136])
np.testing.assert_allclose(test_array, result['energy_extrapolated'], atol=0., rtol=1.0e-7)
@pytest.mark.parametrize(['vasprun_parser'], [(['basic', 'vasprun.xml', {
'energy_type': ['energy_free', 'energy_no_entropy']
}],)],
indirect=True)
def test_toten_multiple(vasprun_parser):
"""Load a reference vasprun.xml and test that multiple total energies
are returned properly."""
result = vasprun_parser.get_quantity('energies')
assert set(result.keys()) == set(
['electronic_steps', 'energy_free_electronic', 'energy_free', 'energy_no_entropy', 'energy_no_entropy_electronic'])
test_array = np.array([-42.91231976])
np.testing.assert_allclose(test_array, result['energy_free_electronic'], atol=0., rtol=1.0e-7)
np.testing.assert_allclose(test_array, result['energy_free'], atol=0., rtol=1.0e-7)
test_array = np.array([-42.90995265])
np.testing.assert_allclose(test_array, result['energy_no_entropy_electronic'], atol=0., rtol=1.0e-7)
np.testing.assert_allclose(test_array, result['energy_no_entropy'], atol=0., rtol=1.0e-7)
@pytest.mark.parametrize(['vasprun_parser'], [(['basic', 'vasprun.xml', {'electronic_step_energies': True}],)], indirect=True)
def test_parse_vasprun_toten_electronic(vasprun_parser):
"""Load a reference vasprun.xml and test that the total energies
are returned correctly for the electronic steps."""
result = vasprun_parser.get_quantity('energies')
# Test that the default arrays are present
assert set(result.keys()) == set(['energy_extrapolated', 'energy_extrapolated_electronic', 'electronic_steps'])
energies = result['energy_extrapolated_electronic']
test_array = np.array([-42.91113666, -42.91113621])
np.testing.assert_allclose(test_array, energies, atol=0., rtol=1.0e-7)
# Test number of entries
assert energies.shape == (2,)
# Electronic steps should be two
test_array = np.array([2])
np.testing.assert_allclose(test_array, result['electronic_steps'], atol=0., rtol=1.0e-7)
# Testing on VASP 5, where the extrapolated energy should be the following due to a bug
test_array = np.array([-0.00236711])
with np.testing.assert_raises(AssertionError):
np.testing.assert_allclose(test_array, result['energy_extrapolated'], atol=0., rtol=1.0e-7)
# Instead we correct and it should be
test_array = np.array([-42.911136])
np.testing.assert_allclose(test_array, result['energy_extrapolated'], atol=0., rtol=1.0e-7)
@pytest.mark.parametrize(['vasprun_parser'], [('relax',)], indirect=True)
def test_parse_vasprun_toten_relax(vasprun_parser):
"""Load a reference vasprun.xml and check that the total energies are
returned correctly for relaxation runs."""
result = vasprun_parser.get_quantity('energies')
assert set(result.keys()) == set(['energy_extrapolated', 'energy_extrapolated_electronic', 'electronic_steps'])
energies = result['energy_extrapolated_electronic']
test_array = np.array([
-42.91113348, -43.27757545, -43.36648855, -43.37734069, -43.38062479, -43.38334165, -43.38753003, -43.38708193, -43.38641449,
-43.38701639, -43.38699488, -43.38773717, -43.38988315, -43.3898822, -43.39011239, -43.39020751, -43.39034244, -43.39044584,
-43.39087657
])
# Test energies
np.testing.assert_allclose(test_array, energies, atol=0., rtol=1.0e-7)
# Test number of entries
assert energies.shape == test_array.shape
# Electronic steps should be entries times one
np.testing.assert_allclose(np.ones(19, dtype=int), result['electronic_steps'], atol=0., rtol=1.0e-7)
# Testing on VASP 5, where the extrapolated energy should be the following due to a bug
test_array = np.array([
-0.00236637, -0.00048614, -0.00047201, -0.00043261, -0.00041668, -0.00042584, -0.00043637, -0.00042806, -0.00042762, -0.00043875,
-0.00042731, -0.00042705, -0.00043064, -0.00043051, -0.00043161, -0.00043078, -0.00043053, -0.00043149, -0.00043417
])
with np.testing.assert_raises(AssertionError):
np.testing.assert_allclose(test_array, result['energy_extrapolated'], atol=0., rtol=1.0e-7)
# Instead we correct and it should be
test_array = np.array([
-42.911133, -43.277575, -43.366489, -43.377341, -43.380625, -43.383342, -43.38753, -43.387082, -43.386414, -43.387016, -43.386995,
-43.387737, -43.389883, -43.389882, -43.390112, -43.390208, -43.390342, -43.390446, -43.390877
])
np.testing.assert_allclose(test_array, result['energy_extrapolated'], atol=0., rtol=1.0e-7)
@pytest.mark.parametrize(['vasprun_parser'], [(['relax', 'vasprun.xml', {'electronic_step_energies': True}],)], indirect=True)
def test_parse_vasprun_toten_relax_electronic(vasprun_parser):
"""Load a reference vasprun.xml and check that the total energies
are returned correctly for both the electronic and ionic steps."""
result = vasprun_parser.get_quantity('energies')
assert set(result.keys()) == set(['energy_extrapolated', 'energy_extrapolated_electronic', 'electronic_steps'])
energies = result['energy_extrapolated_electronic']
test_array_energies = [
np.array([
163.37398579, 14.26925896, -23.05190509, -34.91615104, -40.20080347, -42.18390876, -42.97469852, -43.31556073, -43.60169068,
-43.61723125, -43.61871511, -43.61879751, -43.12548175, -42.90647187, -42.91031846, -42.91099027, -42.91111107, -42.91113348
]),
np.array([-43.34236449, -43.31102002, -43.27768275, -43.27791002, -43.27761357, -43.27757545]),
np.array([-43.40320524, -43.38084022, -43.36835045, -43.36666248, -43.36666583, -43.36649036, -43.36648855]),
np.array([-43.37749056, -43.37749102, -43.37734414, -43.37734069]),
np.array([-43.38117265, -43.38082881, -43.38063293, -43.38062479]),
np.array([-43.38337336, -43.38334165]),
np.array([-43.38778922, -43.38766017, -43.38752953, -43.38753003]),
np.array([-43.38714489, -43.38708193]),
np.array([-43.38640951, -43.38641449]),
np.array([-43.3874799, -43.3871553, -43.38701949, -43.38701639]),
np.array([-43.38790942, -43.38727062, -43.38700335, -43.38699488]),
np.array([-43.38774394, -43.38773717]),
np.array([-43.38984942, -43.3899134, -43.38988315]),
np.array([-43.38988117, -43.3898822]),
np.array([-43.39032165, -43.39017866, -43.39011239]),
np.array([-43.39021044, -43.39020751]),
np.array([-43.39034135, -43.39034244]),
np.array([-43.39044466, -43.39044584]),
np.array([-43.39084354, -43.39088709, -43.39087657])
]
test_array_steps = np.array([18, 6, 7, 4, 4, 2, 4, 2, 2, 4, 4, 2, 3, 2, 3, 2, 2, 2, 3])
# Build a flattened array (not using flatten from NumPy as the content is staggered) and
# test number of electronic steps per ionic step
test_array_energies_flattened = np.array([])
for ionic_step in test_array_energies:
test_array_energies_flattened = np.append(test_array_energies_flattened, ionic_step)
assert energies.shape == test_array_energies_flattened.shape
np.testing.assert_allclose(test_array_energies_flattened, energies, atol=0., rtol=1.0e-7)
np.testing.assert_allclose(test_array_steps, result['electronic_steps'], atol=0., rtol=1.0e-7)
test_array_energies = np.array([
-0.00236637, -0.00048614, -0.00047201, -0.00043261, -0.00041668, -0.00042584, -0.00043637, -0.00042806, -0.00042762, -0.00043875,
-0.00042731, -0.00042705, -0.00043064, -0.00043051, -0.00043161, -0.00043078, -0.00043053, -0.00043149, -0.00043417
])
# Testing on VASP 5, where the extrapolated energy should be the following due to a bug
with np.testing.assert_raises(AssertionError):
np.testing.assert_allclose(test_array_energies, result['energy_extrapolated'], atol=0., rtol=1.0e-7)
# Instead we correct and it should be
test_array_energies = np.array([
-42.911133, -43.277575, -43.366489, -43.377341, -43.380625, -43.383342, -43.38753, -43.387082, -43.386414, -43.387016, -43.386995,
-43.387737, -43.389883, -43.389882, -43.390112, -43.390208, -43.390342, -43.390446, -43.390877
])
np.testing.assert_allclose(test_array_energies, result['energy_extrapolated'], atol=0., rtol=1.0e-7)
@pytest.mark.parametrize(['vasprun_parser'], [('disp',)], indirect=True)
def test_parse_vasprun_hessian(vasprun_parser):
"""Load a reference vasprun.xml and check that the Hessian matrix
are returned correctly."""
hessian = vasprun_parser.get_quantity('hessian')
hessian = hessian['hessian']
# Test shape
assert hessian.shape == (24, 24)
# Test a few entries
assert np.allclose(
hessian[0],
np.array([
-4.63550410e-01, 0.00000000e+00, 0.00000000e+00, -5.91774100e-02, 0.00000000e+00, 0.00000000e+00, 3.09711000e-02,
0.00000000e+00, 0.00000000e+00, 3.20435400e-02, 0.00000000e+00, 0.00000000e+00, 1.15129840e-01, -8.16138200e-02, 8.17234700e-02,
1.14879520e-01, 8.11324800e-02, 8.27409500e-02, 1.14879520e-01, -8.11324800e-02, -8.27409500e-02, 1.15129840e-01,
8.16138200e-02, -8.17234700e-02
]))
assert np.allclose(
hessian[-2],
np.array([
8.16138200e-02, 1.15195590e-01, -8.38411100e-02, -8.17234700e-02, 1.14875090e-01, -8.53388100e-02, 3.46686900e-02,
7.00672700e-02, 2.54288300e-02, -8.26222700e-02, 1.16185510e-01, 7.95575600e-02, -3.05970000e-04, 3.16827300e-02,
2.86379000e-03, 5.42080000e-04, 3.27613500e-02, 1.12576000e-03, -1.34305000e-03, -5.86811100e-02, 2.83374000e-03,
4.91688400e-02, -4.22101090e-01, 5.73736900e-02
]))
@pytest.mark.parametrize(['vasprun_parser'], [('disp',)], indirect=True)
def test_parse_vasprun_dynmat(vasprun_parser):
"""Load a reference vasprun.xml and check that the dynamical eigenvectors and eigenvalues
are returned correctly."""
result = vasprun_parser.get_quantity('dynmat')
dynvec = result['dynvec']
dyneig = result['dyneig']
# test shape
assert dynvec.shape == (24, 24)
assert dyneig.shape == (24,)
# test a few entries
assert np.allclose(
dynvec[0],
np.array([
7.28517310e-17, 7.25431601e-02, -4.51957676e-02, 1.15412776e-16, 4.51957676e-02, -7.25431601e-02, -1.37347223e-16,
5.16257351e-01, -5.16257351e-01, 8.16789156e-17, 8.95098005e-02, -8.95098005e-02, -4.43838008e-17, -6.38031134e-02,
6.38031134e-02, -1.80132830e-01, -2.97969516e-01, 2.97969516e-01, 1.80132830e-01, -2.97969516e-01, 2.97969516e-01,
-2.09989969e-16, -6.38031134e-02, 6.38031134e-02
]))
assert np.allclose(
dynvec[4],
np.array([
-5.29825122e-13, -2.41759046e-01, -3.28913434e-01, -5.30734671e-13, -3.28913434e-01, -2.41759046e-01, 3.26325910e-13,
-3.80807441e-02, -3.80807441e-02, -9.22956103e-13, -2.99868012e-01, -2.99868012e-01, 1.64418993e-01, 1.81002749e-01,
1.81002749e-01, 3.11984195e-13, 2.73349550e-01, 2.73349550e-01, 2.59853610e-13, 2.73349550e-01, 2.73349550e-01, -1.64418993e-01,
1.81002749e-01, 1.81002749e-01
]))
assert dyneig[0] == pytest.approx(-1.36621537e+00)
assert dyneig[4] == pytest.approx(-8.48939361e-01)
@pytest.mark.parametrize(['vasprun_parser'], [('spin',)], indirect=True)
def test_band_properties(fresh_aiida_env, vasprun_parser):
"""Load a reference vasprun.xml and check that key properties of the electric structure
are returned correctly."""
data = vasprun_parser.get_quantity('band_properties')
assert data['cbm'] == pytest.approx(6.5536)
assert data['vbm'] == pytest.approx(6.5105)
assert data['is_direct_gap'] is False
assert data['band_gap'] == pytest.approx(0.04310, rel=1e-3)
|
|
from ..base import GreeksFDM, Option as _Option
from ..vanillaoptions import GBSOption as _GBSOption
import numpy as _np
from scipy.optimize import root_scalar as _root_scalar
import sys as _sys
import warnings as _warnings
import numdifftools as _nd
from ..utils import docstring_from
class RollGeskeWhaleyOption(_Option):
"""
Roll-Geske-Whaley Calls on Dividend Paying Stocks
Calculates the option price of an American call on a stock
paying a single dividend with specified time to divident
payout. The option valuation formula derived by Roll, Geske
and Whaley is used.
Parameters
----------
Parameters
----------
S : float
Level or index price.
K : float
Strike price.
t : float
Time-to-maturity in fractional years. i.e. 1/12 for 1 month, 1/252 for 1 business day, 1.0 for 1 year.
td : float
Time to dividend payout in fractional years. i.e. 1/12 for 1 month, 1/252 for 1 business day, 1.0 for 1 year.
r : float
Risk-free-rate in decimal format (i.e. 0.01 for 1%).
D : float
A single dividend with time to dividend payout td.
sigma : float
Annualized volatility of the underlying asset. Optional if calculating implied volatility.
Required otherwise. By default None.
Notes
-----
put price does not exist.
Returns
-------
RollGeskeWhaleyOption object.
Example
-------
>>> import finoptions as fo
>>> opt = fo.RollGeskeWhaleyOption(S=80, K=82, t=1/3, td=1/4, r=0.06, D=4, sigma=0.30)
>>> opt.call()
>>> opt.greeks(call=True)
References
----------
[1] Haug E.G., The Complete Guide to Option Pricing Formulas
"""
__name__ = "RollGeskeWhaleyOption"
__title__ = "Roll-Geske-Whaley Calls on Dividend Paying Stocks"
def __init__(
self,
S: float,
K: float,
t: float,
td: float,
r: float,
D: float,
sigma: float = None,
):
if self._check_array(S, K, t, td, r, D, sigma) == True:
raise TypeError("Arrays not supported as arguments for this option class")
self._S = S
self._K = K
self._t = t
self._td = td
self._r = r
self._D = D
self._sigma = sigma
# Settings:
self._big = 100000000
self._eps = 1.0e-5
self._greeks = GreeksFDM(self)
def is_call_optimal(self):
"""
Method to determine if it is currently optimal to exercise the option.
Returns
-------
True if it is optimal to exercise the option.
False if it is NOT optimal to exercise the option.
"""
if self._D <= self._K * (1 - _np.exp(-self._r * (self._t - self._td))):
return False
else:
return True
def call(self):
# Compute:
Sx = self._S - self._D * _np.exp(-self._r * self._td)
if self._D <= self._K * (1 - _np.exp(-self._r * (self._t - self._td))):
result = _GBSOption(
Sx, self._K, self._t, self._r, b=self._r, sigma=self._sigma
).call()
# print("\nWarning: Not optimal to exercise\n")
return result
ci = _GBSOption(
self._S, self._K, self._t - self._td, self._r, b=self._r, sigma=self._sigma
).call()
HighS = self._S
while (ci - HighS - self._D + self._K > 0) & (HighS < self._big):
HighS = HighS * 2
ci = _GBSOption(
HighS,
self._K,
self._t - self._td,
self._r,
b=self._r,
sigma=self._sigma,
).call()
if HighS > self._big:
result = _GBSOption(
Sx, self._K, self._t, self._r, b=self._r, sigma=self._sigma
).call()
raise ValueError("HighS > big setting")
LowS = 0
I = HighS * 0.5
ci = _GBSOption(
I, self._K, self._t - self._td, self._r, b=self._r, sigma=self._sigma
).call()
# Search algorithm to find the critical stock price I
while (abs(ci - I - self._D + self._K) > self._eps) & (
(HighS - LowS) > self._eps
):
if ci - I - self._D + self._K < 0:
HighS = I
else:
LowS = I
I = (HighS + LowS) / 2
ci = _GBSOption(
I, self._K, self._t - self._td, self._r, b=self._r, sigma=self._sigma
).call()
a1 = (_np.log(Sx / self._K) + (self._r + self._sigma ** 2 / 2) * self._t) / (
self._sigma * _np.sqrt(self._t)
)
a2 = a1 - self._sigma * _np.sqrt(self._t)
b1 = (_np.log(Sx / I) + (self._r + self._sigma ** 2 / 2) * self._td) / (
self._sigma * _np.sqrt(self._td)
)
b2 = b1 - self._sigma * _np.sqrt(self._td)
result = (
Sx * self._CND(b1)
+ Sx * self._CBND(a1, -b1, -_np.sqrt(self._td / self._t))
- self._K
* _np.exp(-self._r * self._t)
* self._CBND(a2, -b2, -_np.sqrt(self._td / self._t))
- (self._K - self._D) * _np.exp(-self._r * self._td) * self._CND(b2)
)
return result
def get_params(self):
return {
"S": self._S,
"K": self._K,
"t": self._t,
"td": self._td,
"r": self._r,
"D": self._D,
"sigma": self._sigma,
}
def summary(self, printer=True):
"""
Print summary report of option
Parameters
----------
printer : bool
True to print summary. False to return a string.
"""
out = f"Title: {self.__title__} Valuation\n\nParameters:\n\n"
params = self.get_params()
for p in params:
out += f" {p} = {params[p]}\n"
try:
# if self._sigma or its variations are not None add call and put prices
price = f"\nOption Price:\n\n call: {round(self.call(),6)}\n"
out += price
except:
pass
out += f" Optimal to Exercise Call Option: {self.is_call_optimal()}"
if printer == True:
print(out)
else:
return out
def delta(self):
"""
Method to return delta greek for a call options using Finite Difference Methods.
Parameters
----------
None
Returns
-------
float
"""
return self._greeks.delta()
def theta(self):
"""
Method to return theta greek for a call options using Finite Difference Methods.
Parameters
----------
None
Returns
-------
float
"""
return self._greeks.theta()
def vega(self):
"""
Method to return vega greek for a call options using Finite Difference Methods.
Parameters
----------
None
Returns
-------
float
"""
return self._greeks.vega()
def rho(self):
"""
Method to return rho greek for a call options using Finite Difference Methods.
Parameters
----------
None
Returns
-------
float
"""
return self._greeks.rho()
def lamb(self):
"""
Method to return lamb greek for a call options using Finite Difference Methods.
Parameters
----------
None
Returns
-------
float
"""
return self._greeks.lamb()
def gamma(self):
"""
Method to return delta greek for a call options using Finite Difference Methods.
Parameters
----------
None
Returns
-------
float
"""
return self._greeks.gamma()
def greeks(self):
"""
Method to return greeks as a dictiontary for a call option using Finite Difference Methods.
Parameters
----------
None
Returns
-------
float
"""
return self._greeks.greeks()
def volatility(
self,
price: float,
tol=_sys.float_info.epsilon,
maxiter=10000,
verbose=False,
):
"""
Compute the implied volatility of the RollGeskeWhaleyOption.
Parameters
----------
price : float
Current price of the option
tol : float
max tolerance to fit the price to. By default system tolerance.
maxiter : int
number of iterations to run to fit price.
verbose : bool
True to return full optimization details from root finder function. False to just return the implied volatility numbers.
Returns
-------
float
Example
-------
"""
sol = self._volatility(price, True, tol, maxiter, verbose)
return sol
class BAWAmericanApproxOption(_Option):
"""
Barone-Adesi and Whaley Approximation. Calculates the option price of an
American call or put option on an underlying asset for a given cost-of-carry
rate. The quadratic approximation method by Barone-Adesi and Whaley is used.
Parameters
----------
S : float
Level or index price.
K : float
Strike price.
t : float
Time-to-maturity in fractional years. i.e. 1/12 for 1 month, 1/252 for 1 business day, 1.0 for 1 year.
r : float
Risk-free-rate in decimal format (i.e. 0.01 for 1%).
b : float
Annualized cost-of-carry rate, e.g. 0.1 means 10%
sigma : float
Annualized volatility of the underlying asset. Optional if calculating implied volatility.
Required otherwise. By default None.
Returns
-------
Option object.
Example
-------
>>> import finoptions as fo
>>> opt = fo.BAWAmericanApproxOption(10.0, 8.0, 1.0, 0.02, 0.01, 0.1)
>>> opt.call()
>>> opt.put()
>>> opt.greeks(call=True)
References
----------
[1] Haug E.G., The Complete Guide to Option Pricing Formulas
"""
__name__ = "BAWAmericanApproxOption"
__title__ = "Barone-Adesi and Whaley Approximation"
def __init__(
self, S: float, K: float, t: float, r: float, b: float, sigma: float = None
):
# only being used for check_array. Remove __init__ once arrays work.
if self._check_array(S, K, t, r, b, sigma) == True:
raise TypeError("Arrays not supported as arguments for this option class")
super().__init__(S, K, t, r, b, sigma)
self._greeks = GreeksFDM(self)
def _bawKc(self):
# Newton Raphson algorithm to solve for the critical commodity
# price for a Call.
# Calculation of seed value, Si
n = 2 * self._b / self._sigma ** 2
m = 2 * self._r / self._sigma ** 2
q2u = (-(n - 1) + _np.sqrt((n - 1) ** 2 + 4 * m)) / 2
Su = self._K / (1 - 1 / q2u)
h2 = (
-(self._b * self._t + 2 * self._sigma * _np.sqrt(self._t))
* self._K
/ (Su - self._K)
)
Si = self._K + (Su - self._K) * (1 - _np.exp(h2))
X = 2 * self._r / (self._sigma ** 2 * (1 - _np.exp(-self._r * self._t)))
d1 = (_np.log(Si / self._K) + (self._b + self._sigma ** 2 / 2) * self._t) / (
self._sigma * _np.sqrt(self._t)
)
Q2 = (-(n - 1) + _np.sqrt((n - 1) ** 2 + 4 * X)) / 2
LHS = Si - self._K
RHS = (
_GBSOption(Si, self._K, self._t, self._r, self._b, self._sigma).call()
+ (1 - _np.exp((self._b - self._r) * self._t) * self._CND(d1)) * Si / Q2
)
bi = (
_np.exp((self._b - self._r) * self._t) * self._CND(d1) * (1 - 1 / Q2)
+ (
1
- _np.exp((self._b - self._r) * self._t)
* self._CND(d1)
/ (self._sigma * _np.sqrt(self._t))
)
/ Q2
)
E = 0.000001
# Newton Raphson algorithm for finding critical price Si
while abs(LHS - RHS) / self._K > E:
Si = (self._K + RHS - bi * Si) / (1 - bi)
d1 = (
_np.log(Si / self._K) + (self._b + self._sigma ** 2 / 2) * self._t
) / (self._sigma * _np.sqrt(self._t))
LHS = Si - self._K
RHS = (
_GBSOption(Si, self._K, self._t, self._r, self._b, self._sigma).call()
+ (1 - _np.exp((self._b - self._r) * self._t) * self._CND(d1)) * Si / Q2
)
bi = (
_np.exp((self._b - self._r) * self._t) * self._CND(d1) * (1 - 1 / Q2)
+ (
1
- _np.exp((self._b - self._r) * self._t)
* self._CND(d1)
/ (self._sigma * _np.sqrt(self._t))
)
/ Q2
)
# Return Value:
return Si
def _bawKp(self):
# Newton Raphson algorithm to solve for the critical commodity
# price for a Put.
# Calculation of seed value, Si
n = 2 * self._b / self._sigma ** 2
m = 2 * self._r / self._sigma ** 2
q1u = (-(n - 1) - _np.sqrt((n - 1) ** 2 + 4 * m)) / 2
Su = self._K / (1 - 1 / q1u)
h1 = (
(self._b * self._t - 2 * self._sigma * _np.sqrt(self._t))
* self._K
/ (self._K - Su)
)
Si = Su + (self._K - Su) * _np.exp(h1)
X = 2 * self._r / (self._sigma ** 2 * (1 - _np.exp(-self._r * self._t)))
d1 = (_np.log(Si / self._K) + (self._b + self._sigma ** 2 / 2) * self._t) / (
self._sigma * _np.sqrt(self._t)
)
Q1 = (-(n - 1) - _np.sqrt((n - 1) ** 2 + 4 * X)) / 2
LHS = self._K - Si
RHS = (
_GBSOption(Si, self._K, self._t, self._r, self._b, self._sigma).put()
- (1 - _np.exp((self._b - self._r) * self._t) * self._CND(-d1)) * Si / Q1
)
bi = (
-_np.exp((self._b - self._r) * self._t) * self._CND(-d1) * (1 - 1 / Q1)
- (
1
+ _np.exp((self._b - self._r) * self._t)
* self._CND(-d1)
/ (self._sigma * _np.sqrt(self._t))
)
/ Q1
)
E = 0.000001
# Newton Raphson algorithm for finding critical price Si
while abs(LHS - RHS) / self._K > E:
Si = (self._K - RHS + bi * Si) / (1 + bi)
d1 = (
_np.log(Si / self._K) + (self._b + self._sigma ** 2 / 2) * self._t
) / (self._sigma * _np.sqrt(self._t))
LHS = self._K - Si
RHS = (
_GBSOption(Si, self._K, self._t, self._r, self._b, self._sigma).put()
- (1 - _np.exp((self._b - self._r) * self._t) * self._CND(-d1))
* Si
/ Q1
)
bi = (
-_np.exp((self._b - self._r) * self._t) * self._CND(-d1) * (1 - 1 / Q1)
- (
1
+ _np.exp((self._b - self._r) * self._t)
* self._CND(-d1)
/ (self._sigma * _np.sqrt(self._t))
)
/ Q1
)
# Return Value:
return Si
def call(self):
"""
Returns the calculated price of a call option according to the
Barone-Adesi and Whaley Approximation option price model.
Returns
-------
float
Example
-------
>>> import finoptions as fo
>>> opt = fo.BAWAmericanApproxOption(10.0, 8.0, 1.0, 0.02, 0.01, 0.1)
>>> opt.call()
References
----------
[1] Haug E.G., The Complete Guide to Option Pricing Formulas
"""
if self._b >= self._r:
result = _GBSOption(
self._S, self._K, self._t, self._r, self._b, self._sigma
).call()
else:
Sk = self._bawKc()
n = 2 * self._b / self._sigma ** 2
X = 2 * self._r / (self._sigma ** 2 * (1 - _np.exp(-self._r * self._t)))
d1 = (
_np.log(Sk / self._K) + (self._b + self._sigma ** 2 / 2) * self._t
) / (self._sigma * _np.sqrt(self._t))
Q2 = (-(n - 1) + _np.sqrt((n - 1) ** 2 + 4 * X)) / 2
a2 = (Sk / Q2) * (
1 - _np.exp((self._b - self._r) * self._t) * self._CND(d1)
)
if self._S < Sk:
result = (
_GBSOption(
self._S, self._K, self._t, self._r, self._b, self._sigma
).call()
+ a2 * (self._S / Sk) ** Q2
)
else:
result = self._S - self._K
# Return Value:
return result
def put(self):
"""
Returns the calculated price of a call option according to the
Barone-Adesi and Whaley Approximation option price model.
Returns
-------
float
Example
-------
>>> import finoptions as fo
>>> opt = fo.BAWAmericanApproxOption(10.0, 8.0, 1.0, 0.02, 0.01, 0.1)
>>> opt.put()
References
----------
[1] Haug E.G., The Complete Guide to Option Pricing Formulas
"""
Sk = self._bawKp()
n = 2 * self._b / self._sigma ** 2
X = 2 * self._r / (self._sigma ** 2 * (1 - _np.exp(-self._r * self._t)))
d1 = (_np.log(Sk / self._K) + (self._b + self._sigma ** 2 / 2) * self._t) / (
self._sigma * _np.sqrt(self._t)
)
Q1 = (-(n - 1) - _np.sqrt((n - 1) ** 2 + 4 * X)) / 2
a1 = -(Sk / Q1) * (1 - _np.exp((self._b - self._r) * self._t) * self._CND(-d1))
if self._S > Sk:
result = (
_GBSOption(
self._S, self._K, self._t, self._r, self._b, self._sigma
).put()
+ a1 * (self._S / Sk) ** Q1
)
else:
result = self._K - self._S
return result
@docstring_from(GreeksFDM.delta)
def delta(self, call: bool = True):
return self._greeks.delta(call=call)
@docstring_from(GreeksFDM.theta)
def theta(self, call: bool = True):
return self._greeks.theta(call=call)
@docstring_from(GreeksFDM.rho)
def rho(self, call: bool = True):
return self._greeks.rho(call=call)
@docstring_from(GreeksFDM.lamb)
def lamb(self, call: bool = True):
return self._greeks.lamb(call=call)
@docstring_from(GreeksFDM.gamma)
def gamma(self):
return self._greeks.gamma()
@docstring_from(GreeksFDM.greeks)
def greeks(self, call: bool = True):
# need to override so that the overridden vega is used
gk = {
"delta": self.delta(call),
"theta": self.theta(call),
"vega": self.vega(),
"rho": self.rho(call),
"lambda": self.lamb(call),
"gamma": self.gamma(),
}
return gk
@docstring_from(GreeksFDM.vega)
def vega(self):
# same for both call and put options
# over-rode parent class vega as it is unstable for larger step sizes of sigma.
fd = self._greeks._make_partial_der(
"sigma", True, self, n=1, step=self._sigma / 10
)
return float(fd(self._sigma))
class BSAmericanApproxOption(_Option):
"""
BSAmericanApproxOption evaluates American calls or puts on stocks, futures, and currencies
due to the approximation method of Bjerksund and Stensland (1993)
Parameters
----------
S : float
Level or index price.
K : float
Strike price.
t : float
Time-to-maturity in fractional years. i.e. 1/12 for 1 month, 1/252 for 1 business day, 1.0 for 1 year.
r : float
Risk-free-rate in decimal format (i.e. 0.01 for 1%).
b : float
Annualized cost-of-carry rate, e.g. 0.1 means 10%
sigma : float
Annualized volatility of the underlying asset. Optional if calculating implied volatility.
Required otherwise. By default None.
Returns
-------
Option object.
Example
-------
>>> import finoptions as fo
>>> opt = fo.basic_american_options.BSAmericanApproxOption(10.0, 8.0, 1.0, 0.02, 0.01, 0.1)
>>> opt.call()
>>> opt.put()
>>> opt.greeks(call=True)
References
----------
[1] Haug E.G., The Complete Guide to Option Pricing Formulas
[2] Bjerksund P., Stensland G. (1993);Closed Form Approximation of American Options, Scandinavian Journal of Management 9, 87–99
"""
__name__ = "BSAmericanApproxOption"
__title__ = "The Bjerksund and Stensland (1993) American Approximation Option"
def __init__(
self, S: float, K: float, t: float, r: float, b: float, sigma: float = None
):
# only being used for check_array. Remove __init__ once arrays work.
if self._check_array(S, K, t, r, b, sigma) == True:
raise TypeError("Arrays not supported as arguments for this option class")
super().__init__(S, K, t, r, b, sigma)
self._greeks = GreeksFDM(self)
# override make_partial_der because call() and put() return dicts
self._greeks._make_partial_der = self._make_partial_der
def _make_partial_der(self, wrt, call, opt, **kwargs):
"""
Create monad from Option methods call and put for use
in calculating the partial derivatives or greeks with
respect to wrt.
"""
# need to override since call/put method return dicts.
def _func(x):
tmp = opt.copy()
tmp.set_param(wrt, x)
if call == True:
return tmp.call()["OptionPrice"]
else:
return tmp.put()["OptionPrice"]
fd = _nd.Derivative(_func, **kwargs)
return fd
@docstring_from(GreeksFDM.delta)
def delta(self, call: bool = True):
return self._greeks.delta(call=call)
@docstring_from(GreeksFDM.theta)
def theta(self, call: bool = True):
return self._greeks.theta(call=call)
@docstring_from(GreeksFDM.rho)
def rho(self, call: bool = True):
return self._greeks.rho(call=call)
@docstring_from(GreeksFDM.gamma)
def gamma(self):
return self._greeks.gamma()
@docstring_from(GreeksFDM.greeks)
def greeks(self, call: bool = True):
# need to override so that the overridden lamb is used
gk = {
"delta": self.delta(call),
"theta": self.theta(call),
"vega": self.vega(),
"rho": self.rho(call),
"lambda": self.lamb(call),
"gamma": self.gamma(),
}
return gk
@docstring_from(GreeksFDM.lamb)
def lamb(self, call: bool = True):
if call == True:
price = self.call()["OptionPrice"]
else:
price = self.put()["OptionPrice"]
return self.delta(call=call) * self._S / price
@docstring_from(GreeksFDM.vega)
def vega(self):
# same for both call and put options, overriden as it needs smaller step sizes
fd = self._make_partial_der("sigma", True, self, n=1, step=self._sigma / 10)
return fd(self._sigma) * 1
def call(self):
"""
Returns the calculated price of a call option according to the
The Bjerksund and Stensland (1993) American Approximation option price model.
Returns
-------
dict(str:float) with OptionPrice and TriggerPrice
Example
-------
>>> import finoptions as fo
>>> opt = fo.BSAmericanApproxOption(10.0, 8.0, 1.0, 0.02, 0.01, 0.1)
>>> opt.call()
References
----------
[1] Haug E.G., The Complete Guide to Option Pricing Formulas
"""
return self._BSAmericanCallApprox(
self._S, self._K, self._t, self._r, self._b, self._sigma
)
def put(self):
"""
Returns the calculated price of a put option according to the
The Bjerksund and Stensland (1993) American Approximation option price model.
Returns
-------
dict(str:float) with OptionPrice and TriggerPrice
Example
-------
>>> import finoptions as fo
>>> opt = fo.BSAmericanApproxOption(10.0, 8.0, 1.0, 0.02, 0.01, 0.1)
>>> opt.put()
References
----------
[1] Haug E.G., The Complete Guide to Option Pricing Formulas
"""
# Use the Bjerksund and Stensland put-call transformation
return self._BSAmericanCallApprox(
self._K, self._S, self._t, self._r - self._b, -self._b, self._sigma
)
def _BSAmericanCallApprox(self, S, X, Time, r, b, sigma):
# Call Approximation:
if b >= r:
# Never optimal to exersice before maturity
result = dict(
OptionPrice=_GBSOption(S, X, Time, r, b, sigma).call(),
TriggerPrice=_np.nan,
)
else:
Beta = (1 / 2 - b / sigma ** 2) + _np.sqrt(
(b / sigma ** 2 - 1 / 2) ** 2 + 2 * r / sigma ** 2
)
BInfinity = Beta / (Beta - 1) * X
B0 = max(X, r / (r - b) * X)
ht = -(b * Time + 2 * sigma * _np.sqrt(Time)) * B0 / (BInfinity - B0)
# Trigger Price I:
I = B0 + (BInfinity - B0) * (1 - _np.exp(ht))
alpha = (I - X) * I ** (-Beta)
if S >= I:
result = dict(OptionPrice=S - X, TriggerPrice=I)
else:
result = dict(
OptionPrice=alpha * S ** Beta
- alpha * self._bsPhi(S, Time, Beta, I, I, r, b, sigma)
+ self._bsPhi(S, Time, 1, I, I, r, b, sigma)
- self._bsPhi(S, Time, 1, X, I, r, b, sigma)
- X * self._bsPhi(S, Time, 0, I, I, r, b, sigma)
+ X * self._bsPhi(S, Time, 0, X, I, r, b, sigma),
TriggerPrice=I,
)
return result
def _bsPhi(self, S, Time, gamma, H, I, r, b, sigma):
# Utility function phi:
lamb = (-r + gamma * b + 0.5 * gamma * (gamma - 1) * sigma ** 2) * Time
d = -(_np.log(S / H) + (b + (gamma - 0.5) * sigma ** 2) * Time) / (
sigma * _np.sqrt(Time)
)
kappa = 2 * b / (sigma ** 2) + (2 * gamma - 1)
result = (
_np.exp(lamb)
* S ** gamma
* (
self._CND(d)
- (I / S) ** kappa
* self._CND(d - 2 * _np.log(I / S) / (sigma * _np.sqrt(Time)))
)
)
return result
def summary(self, printer=True):
"""
Print summary report of option
Parameters
----------
printer : bool
True to print summary. False to return a string.
"""
out = f"Title: {self.__title__} Valuation\n\nParameters:\n\n"
params = self.get_params()
for p in params:
out += f" {p} = {params[p]}\n"
try:
# if self._sigma or its variations are not None add call and put prices
price = f"\nOption Price:\n\n call: {round(self.call()['OptionPrice'],6)}, trigger: {round(self.call()['TriggerPrice'],6)}\n put: {round(self.put()['OptionPrice'],6)}, trigger: {round(self.put()['TriggerPrice'],6)}"
out += price
except:
pass
if printer == True:
print(out)
else:
return out
|
|
#!/usr/bin/env python3
import argparse
from pathlib import Path
import numpy as np
from matplotlib import pyplot as plt
from tqdm import tqdm
import dns
def main():
parser = argparse.ArgumentParser("Computes direction-dependent dropoffs.")
parser.add_argument(
"statePath",
type=str,
help="path to the state of interest.",
)
parser.add_argument(
"--tex", action="store_true", dest="tex", help="use LaTeX to render text."
)
parser.add_argument(
"--noshow", action="store_true", dest="noshow", help="do not display the plots."
)
args = vars(parser.parse_args())
statePath = Path(args["statePath"])
tex = args["tex"]
noshow = args["noshow"]
state, header = dns.readState(statePath)
forcing, nx, ny, nz, Lx, Lz, Re, tilt_angle, dt, itime, time = header
(
drop_x,
drop_y,
drop_z,
) = dnsdrop(state, header)
wavenums_x = np.arange(drop_x.shape[0])
wavenums_y = np.arange(drop_y.shape[0])
wavenums_z = np.arange(drop_z.shape[0])
dns.setPlotDefaults(tex=tex)
figuresDir = dns.createFiguresDir(statePath.parent)
if abs(tilt_angle) > 0:
title = f"$\\mathrm{{Re}}={Re:.1f}$, $L=({Lx:.1f},{dns.Ly:.1f},{Lz:.1f})$, $\\theta={tilt_angle:.1f}$, $N=({nx},{ny},{nz})$"
else:
title = f"$\\mathrm{{Re}}={Re:.1f}$, $L=({Lx:.1f},{dns.Ly:.1f},{Lz:.1f})$, $N=({nx},{ny},{nz})$"
fig, ax = plt.subplots()
ax.plot(
wavenums_x[1:],
drop_x[1:],
label="$\\max_{{n_y \\neq 0,\\, n_z \\neq 0}} |{{\\bf u}}(n, n_y, n_z)|$",
)
ax.plot(
wavenums_y[1:],
drop_y[1:],
label="$\\max_{{n_x \\neq 0,\\, n_z \\neq 0}} |{{\\bf u}}(n_x, n, n_z)|$",
)
ax.plot(
wavenums_z[1:],
drop_z[1:],
label="$\\max_{{n_x \\neq 0,\\, n_y \\neq 0}} |{{\\bf u}}(n_x, n_y, n)|$",
)
ax.grid(True, which="both")
ax.set_xlabel("$n$")
ax.xaxis.get_major_locator().set_params(integer=True)
ax.set_xscale("log")
ax.set_yscale("log")
ax.legend()
ax.set_title(title)
fig.savefig(figuresDir / f"{statePath.name}_drops.png")
if not noshow:
plt.show()
def dnsdrop(state, header):
forcing, nx, ny, nz, Lx, Lz, Re, tilt_angle, dt, itime, time = header
nxp, nyp, nzp = nx // 2 - 1, ny // 2 - 1, nz // 2 - 1
drop_x = np.zeros((nxp + 1))
drop_y = np.zeros((nyp + 1))
drop_z = np.zeros((nzp + 1))
norm2 = np.sum((np.conj(state) * state).real, axis=3)
norm2[:, 0, :] = 0.5 * norm2[:, 0, :]
norm = np.sqrt(norm2)
for i in range(1, nxp + 1):
drop_x[i] = max(np.amax(norm[i, 1:, 1:]), np.amax(norm[-i, 1:, 1:]))
for j in range(nyp + 1):
drop_y[j] = np.amax(norm[1:, j, 1:])
for k in range(nzp + 1):
drop_z[k] = max(np.amax(norm[1:, 1:, k]), np.amax(norm[1:, 1:, -k]))
return (
drop_x,
drop_y,
drop_z,
)
if __name__ == "__main__":
main()
|
|
import matplotlib
matplotlib.use('TkAgg')
import pymc3 as pm
import pandas as pd
import matplotlib
import numpy as np
import pickle as pkl
import datetime
from BaseModel import BaseModel
import isoweek
from matplotlib import rc
from shared_utils import *
from pymc3.stats import quantiles
from matplotlib import pyplot as plt
from config import * # <-- to select the right model
# from pandas import register_matplotlib_converters
# register_matplotlib_converters() # the fk python
def temporal_contribution(model_i=15, combinations=combinations, save_plot=False):
use_ia, use_report_delay, use_demographics, trend_order, periodic_order = combinations[model_i]
plt.style.use('ggplot')
with open('../data/counties/counties.pkl', "rb") as f:
county_info = pkl.load(f)
C1 = "#D55E00"
C2 = "#E69F00"
C3 = C2 # "#808080"
if use_report_delay:
fig = plt.figure(figsize=(25, 10))
grid = plt.GridSpec(4, 1, top=0.93, bottom=0.12,
left=0.11, right=0.97, hspace=0.28, wspace=0.30)
else:
fig = plt.figure(figsize=(16, 10))
grid = plt.GridSpec(3, 1, top=0.93, bottom=0.12,
left=0.11, right=0.97, hspace=0.28, wspace=0.30)
disease = "covid19"
prediction_region = "germany"
data = load_daily_data(disease, prediction_region, county_info)
first_day = pd.Timestamp('2020-04-01')
last_day = data.index.max()
_, target_train, _, _ = split_data(
data,
train_start=first_day,
test_start=last_day - pd.Timedelta(days=1),
post_test=last_day + pd.Timedelta(days=1)
)
tspan = (target_train.index[0], target_train.index[-1])
model = BaseModel(tspan,
county_info,
["../data/ia_effect_samples/{}_{}.pkl".format(disease,
i) for i in range(100)],
include_ia=use_ia,
include_report_delay=use_report_delay,
include_demographics=True,
trend_poly_order=4,
periodic_poly_order=4)
features = model.evaluate_features(
target_train.index, target_train.columns)
trend_features = features["temporal_trend"].swaplevel(0, 1).loc["09162"]
periodic_features = features["temporal_seasonal"].swaplevel(0, 1).loc["09162"]
#t_all = t_all_b if disease == "borreliosis" else t_all_cr
trace = load_final_trace()
trend_params = pm.trace_to_dataframe(trace, varnames=["W_t_t"])
periodic_params = pm.trace_to_dataframe(trace, varnames=["W_t_s"])
TT = trend_params.values.dot(trend_features.values.T)
TP = periodic_params.values.dot(periodic_features.values.T)
TTP = TT + TP
# add report delay if used
#if use_report_delay:
# delay_features = features["temporal_report_delay"].swaplevel(0,1).loc["09162"]
# delay_params = pm.trace_to_dataframe(trace,varnames=["W_t_d"])
# TD =delay_params.values.dot(delay_features.values.T)
# TTP += TD
# TD_quantiles = quantiles(TD, (25, 75))
TT_quantiles = quantiles(TT, (25, 75))
TP_quantiles = quantiles(TP, (25, 75))
TTP_quantiles = quantiles(TTP, (2.5,25, 75,97.5))
dates = [pd.Timestamp(day) for day in target_train.index.values]
days = [ (day - min(dates)).days for day in dates]
# Temporal trend+periodic effect
if use_report_delay:
ax_tp = fig.add_subplot(grid[0, 0])
else:
ax_tp = fig.add_subplot(grid[0, 0])
ax_tp.fill_between(days, np.exp(TTP_quantiles[25]), np.exp(
TTP_quantiles[75]), alpha=0.5, zorder=1, facecolor=C1)
ax_tp.plot(days, np.exp(TTP.mean(axis=0)),
"-", color=C1, lw=2, zorder=5)
ax_tp.plot(days, np.exp(
TTP_quantiles[25]), "-", color=C2, lw=2, zorder=3)
ax_tp.plot(days, np.exp(
TTP_quantiles[75]), "-", color=C2, lw=2, zorder=3)
ax_tp.plot(days, np.exp(
TTP_quantiles[2.5]), "--", color=C2, lw=2, zorder=3)
ax_tp.plot(days, np.exp(
TTP_quantiles[97.5]), "--", color=C2, lw=2, zorder=3)
#ax_tp.plot(days, np.exp(TTP[:25, :].T),
# "--", color=C3, lw=1, alpha=0.5, zorder=2)
ax_tp.tick_params(axis="x", rotation=45)
# Temporal trend effect
ax_t = fig.add_subplot(grid[1, 0], sharex=ax_tp)
#ax_t.fill_between(days, np.exp(TT_quantiles[25]), np.exp(
# TT_quantiles[75]), alpha=0.5, zorder=1, facecolor=C1)
ax_t.plot(days, np.exp(TT.mean(axis=0)),
"-", color=C1, lw=2, zorder=5)
#ax_t.plot(days, np.exp(
# TT_quantiles[25]), "-", color=C2, lw=2, zorder=3)
#ax_t.plot(days, np.exp(
# TT_quantiles[75]), "-", color=C2, lw=2, zorder=3)
#ax_t.plot(days, np.exp(TT[:25, :].T),
# "--", color=C3, lw=1, alpha=0.5, zorder=2)
ax_t.tick_params(axis="x", rotation=45)
ax_t.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
# Temporal periodic effect
ax_p = fig.add_subplot(grid[2, 0], sharex=ax_tp)
#ax_p.fill_between(days, np.exp(TP_quantiles[25]), np.exp(
# TP_quantiles[75]), alpha=0.5, zorder=1, facecolor=C1)
ax_p.plot(days, np.exp(TP.mean(axis=0)),
"-", color=C1, lw=2, zorder=5)
ax_p.set_ylim([-0.0001,0.001])
#ax_p.plot(days, np.exp(
# TP_quantiles[25]), "-", color=C2, lw=2, zorder=3)
#ax_p.plot(days, np.exp(
# TP_quantiles[75]), "-", color=C2, lw=2, zorder=3)
#ax_p.plot(days, np.exp(TP[:25, :].T),
# "--", color=C3, lw=1, alpha=0.5, zorder=2)
ax_p.tick_params(axis="x", rotation=45)
ax_p.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
ticks = ['2020-03-02','2020-03-12','2020-03-22','2020-04-01','2020-04-11','2020-04-21','2020-05-1','2020-05-11','2020-05-21']
labels = ['02.03.2020','12.03.2020','22.03.2020','01.04.2020','11.04.2020','21.04.2020','01.05.2020','11.05.2020','21.05.2020']
#if use_report_delay:
# ax_td = fig.add_subplot(grid[2, 0], sharex=ax_p)
#
# ax_td.fill_between(days, np.exp(TD_quantiles[25]), np.exp(
# TD_quantiles[75]), alpha=0.5, zorder=1, facecolor=C1)
# ax_td.plot(days, np.exp(TD.mean(axis=0)),
# "-", color=C1, lw=2, zorder=5)
# ax_td.plot(days, np.exp(
# TD_quantiles[25]), "-", color=C2, lw=2, zorder=3)
# ax_td.plot(days, np.exp(
# TD_quantiles[75]), "-", color=C2, lw=2, zorder=3)
# ax_td.plot(days, np.exp(TD[:25, :].T),
# "--", color=C3, lw=1, alpha=0.5, zorder=2)
# ax_td.tick_params(axis="x", rotation=45)
#ax_tp.set_title("campylob." if disease ==
# "campylobacter" else disease, fontsize=22)
ax_p.set_xlabel("time [days]", fontsize=22)
ax_p.set_ylabel("periodic\ncontribution", fontsize=22)
ax_t.set_ylabel("trend\ncontribution", fontsize=22)
ax_tp.set_ylabel("combined\ncontribution", fontsize=22)
#if use_report_delay:
# ax_td.set_ylabel("r.delay\ncontribution", fontsize=22)
ax_t.set_xlim(days[0], days[-1])
ax_t.tick_params(labelbottom=False, labelleft=True, labelsize=18, length=6)
ax_p.tick_params(labelbottom=True, labelleft=True, labelsize=18, length=6)
ax_tp.tick_params(labelbottom=False, labelleft=True, labelsize=18, length=6)
#ax_p.set_xticks(ticks)#,labels)
if save_plot:
fig.savefig("../figures/temporal_contribution_{}.pdf".format(model_i))
#return fig
if __name__ == "__main__":
_ = temporal_contribution(15, combinations,save_plot=True)
|
|
import codecs
import hashlib
import json
import logging
import numbers
import os
import re
import shutil
import sys
import six
from six.moves.collections_abc import Sequence as SixSequence
import wandb
from wandb import util
from wandb._globals import _datatypes_callback
from wandb.compat import tempfile
from wandb.util import has_num
from .interface import _dtypes
if wandb.TYPE_CHECKING:
from typing import (
TYPE_CHECKING,
ClassVar,
Dict,
Optional,
Type,
Union,
Sequence,
Tuple,
Set,
Any,
List,
cast,
)
if TYPE_CHECKING: # pragma: no cover
from .interface.artifacts import ArtifactEntry
from .wandb_artifacts import Artifact as LocalArtifact
from .wandb_run import Run as LocalRun
from wandb.apis.public import Artifact as PublicArtifact
import numpy as np # type: ignore
import pandas as pd # type: ignore
import matplotlib # type: ignore
import plotly # type: ignore
import PIL # type: ignore
import torch # type: ignore
from typing import TextIO
TypeMappingType = Dict[str, Type["WBValue"]]
NumpyHistogram = Tuple[np.ndarray, np.ndarray]
ValToJsonType = Union[
dict,
"WBValue",
Sequence["WBValue"],
"plotly.Figure",
"matplotlib.artist.Artist",
"pd.DataFrame",
object,
]
ImageDataType = Union[
"matplotlib.artist.Artist", "PIL.Image", "TorchTensorType", "np.ndarray"
]
ImageDataOrPathType = Union[str, "Image", ImageDataType]
TorchTensorType = Union["torch.Tensor", "torch.Variable"]
_MEDIA_TMP = tempfile.TemporaryDirectory("wandb-media")
_DATA_FRAMES_SUBDIR = os.path.join("media", "data_frames")
def _safe_sdk_import():
"""Safely import due to circular deps"""
from .wandb_artifacts import Artifact as LocalArtifact
from .wandb_run import Run as LocalRun
return LocalRun, LocalArtifact
class _WBValueArtifactSource(object):
# artifact: "PublicArtifact"
# name: Optional[str]
def __init__(self, artifact, name = None):
self.artifact = artifact
self.name = name
class _WBValueArtifactTarget(object):
# artifact: "LocalArtifact"
# name: Optional[str]
def __init__(self, artifact, name = None):
self.artifact = artifact
self.name = name
class WBValue(object):
"""
Abstract parent class for things that can be logged by `wandb.log()` and
visualized by wandb.
The objects will be serialized as JSON and always have a _type attribute
that indicates how to interpret the other fields.
"""
# Class Attributes
_type_mapping = None
# override _log_type to indicate the type which the subclass deserializes
_log_type = None
# Instance Attributes
# _artifact_source: Optional[_WBValueArtifactSource]
# _artifact_target: Optional[_WBValueArtifactTarget]
def __init__(self):
self._artifact_source = None
self._artifact_target = None
def to_json(self, run_or_artifact):
"""Serializes the object into a JSON blob, using a run or artifact to store additional data.
Args:
run_or_artifact (wandb.Run | wandb.Artifact): the Run or Artifact for which this object should be generating
JSON for - this is useful to to store additional data if needed.
Returns:
dict: JSON representation
"""
raise NotImplementedError
@classmethod
def from_json(
cls, json_obj, source_artifact
):
"""Deserialize a `json_obj` into it's class representation. If additional resources were stored in the
`run_or_artifact` artifact during the `to_json` call, then those resources are expected to be in
the `source_artifact`.
Args:
json_obj (dict): A JSON dictionary to deserialize
source_artifact (wandb.Artifact): An artifact which will hold any additional resources which were stored
during the `to_json` function.
"""
raise NotImplementedError
@classmethod
def with_suffix(cls, name, filetype = "json"):
"""Helper function to return the name with suffix added if not already
Args:
name (str): the name of the file
filetype (str, optional): the filetype to use. Defaults to "json".
Returns:
str: a filename which is suffixed with it's `_log_type` followed by the filetype
"""
if cls._log_type is not None:
suffix = cls._log_type + "." + filetype
else:
suffix = filetype
if not name.endswith(suffix):
return name + "." + suffix
return name
@staticmethod
def init_from_json(
json_obj, source_artifact
):
"""Looks through all subclasses and tries to match the json obj with the class which created it. It will then
call that subclass' `from_json` method. Importantly, this function will set the return object's `source_artifact`
attribute to the passed in source artifact. This is critical for artifact bookkeeping. If you choose to create
a wandb.Value via it's `from_json` method, make sure to properly set this `artifact_source` to avoid data duplication.
Args:
json_obj (dict): A JSON dictionary to deserialize. It must contain a `_type` key. The value of
this key is used to lookup the correct subclass to use.
source_artifact (wandb.Artifact): An artifact which will hold any additional resources which were stored
during the `to_json` function.
Returns:
wandb.Value: a newly created instance of a subclass of wandb.Value
"""
class_option = WBValue.type_mapping().get(json_obj["_type"])
if class_option is not None:
obj = class_option.from_json(json_obj, source_artifact)
obj._set_artifact_source(source_artifact)
return obj
return None
@staticmethod
def type_mapping():
"""Returns a map from `_log_type` to subclass. Used to lookup correct types for deserialization.
Returns:
dict: dictionary of str:class
"""
if WBValue._type_mapping is None:
WBValue._type_mapping = {}
frontier = [WBValue]
explored = set([])
while len(frontier) > 0:
class_option = frontier.pop()
explored.add(class_option)
if class_option._log_type is not None:
WBValue._type_mapping[class_option._log_type] = class_option
for subclass in class_option.__subclasses__():
if subclass not in explored:
frontier.append(subclass)
return WBValue._type_mapping
def __eq__(self, other):
return id(self) == id(other)
def __ne__(self, other):
return not self.__eq__(other)
def to_data_array(self):
"""Converts the object to a list of primitives representing the underlying data"""
raise NotImplementedError
def _set_artifact_source(
self, artifact, name = None
):
assert (
self._artifact_source is None
), "Cannot update artifact_source. Existing source: {}/{}".format(
self._artifact_source.artifact, self._artifact_source.name
)
self._artifact_source = _WBValueArtifactSource(artifact, name)
def _set_artifact_target(
self, artifact, name = None
):
assert (
self._artifact_target is None
), "Cannot update artifact_target. Existing target: {}/{}".format(
self._artifact_target.artifact, self._artifact_target.name
)
self._artifact_target = _WBValueArtifactTarget(artifact, name)
def _get_artifact_reference_entry(self):
ref_entry = None
# If the object is coming from another artifact
if self._artifact_source and self._artifact_source.name:
ref_entry = self._artifact_source.artifact.get_path(
type(self).with_suffix(self._artifact_source.name)
)
# Else, if the object is destined for another artifact
elif (
self._artifact_target
and self._artifact_target.name
and self._artifact_target.artifact._logged_artifact is not None
):
# Currently, we do not have a way to obtain a reference URL without waiting for the
# upstream artifact to be logged. This implies that this only works online as well.
self._artifact_target.artifact.wait()
ref_entry = self._artifact_target.artifact.get_path(
type(self).with_suffix(self._artifact_target.name)
)
return ref_entry
class Histogram(WBValue):
"""wandb class for histograms.
This object works just like numpy's histogram function
https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
Examples:
Generate histogram from a sequence
```python
wandb.Histogram([1,2,3])
```
Efficiently initialize from np.histogram.
```python
hist = np.histogram(data)
wandb.Histogram(np_histogram=hist)
```
Arguments:
sequence: (array_like) input data for histogram
np_histogram: (numpy histogram) alternative input of a precoomputed histogram
num_bins: (int) Number of bins for the histogram. The default number of bins
is 64. The maximum number of bins is 512
Attributes:
bins: ([float]) edges of bins
histogram: ([int]) number of elements falling in each bin
"""
MAX_LENGTH = 512
_log_type = "histogram"
def __init__(
self,
sequence = None,
np_histogram = None,
num_bins = 64,
):
if np_histogram:
if len(np_histogram) == 2:
self.histogram = (
np_histogram[0].tolist()
if hasattr(np_histogram[0], "tolist")
else np_histogram[0]
)
self.bins = (
np_histogram[1].tolist()
if hasattr(np_histogram[1], "tolist")
else np_histogram[1]
)
else:
raise ValueError(
"Expected np_histogram to be a tuple of (values, bin_edges) or sequence to be specified"
)
else:
np = util.get_module(
"numpy", required="Auto creation of histograms requires numpy"
)
self.histogram, self.bins = np.histogram(sequence, bins=num_bins)
self.histogram = self.histogram.tolist()
self.bins = self.bins.tolist()
if len(self.histogram) > self.MAX_LENGTH:
raise ValueError(
"The maximum length of a histogram is %i" % self.MAX_LENGTH
)
if len(self.histogram) + 1 != len(self.bins):
raise ValueError("len(bins) must be len(histogram) + 1")
def to_json(self, run = None):
return {"_type": self._log_type, "values": self.histogram, "bins": self.bins}
def __sizeof__(self):
"""This returns an estimated size in bytes, currently the factor of 1.7
is used to account for the JSON encoding. We use this in tb_watcher.TBHistory
"""
return int((sys.getsizeof(self.histogram) + sys.getsizeof(self.bins)) * 1.7)
class Media(WBValue):
"""A WBValue that we store as a file outside JSON and show in a media panel
on the front end.
If necessary, we move or copy the file into the Run's media directory so that it gets
uploaded.
"""
# _path: Optional[str]
# _run: Optional["LocalRun"]
# _caption: Optional[str]
# _is_tmp: Optional[bool]
# _extension: Optional[str]
# _sha256: Optional[str]
# _size: Optional[int]
def __init__(self, caption = None):
super(Media, self).__init__()
self._path = None
# The run under which this object is bound, if any.
self._run = None
self._caption = caption
def _set_file(
self, path, is_tmp = False, extension = None
):
self._path = path
self._is_tmp = is_tmp
self._extension = extension
if extension is not None and not path.endswith(extension):
raise ValueError(
'Media file extension "{}" must occur at the end of path "{}".'.format(
extension, path
)
)
with open(self._path, "rb") as f:
self._sha256 = hashlib.sha256(f.read()).hexdigest()
self._size = os.path.getsize(self._path)
@classmethod
def get_media_subdir(cls):
raise NotImplementedError
@staticmethod
def captions(
media_items,
):
if media_items[0]._caption is not None:
return [m._caption for m in media_items]
else:
return False
def is_bound(self):
return self._run is not None
def file_is_set(self):
return self._path is not None and self._sha256 is not None
def bind_to_run(
self,
run,
key,
step,
id_ = None,
):
"""Bind this object to a particular Run.
Calling this function is necessary so that we have somewhere specific to
put the file associated with this object, from which other Runs can
refer to it.
"""
if not self.file_is_set():
raise AssertionError("bind_to_run called before _set_file")
# The following two assertions are guaranteed to pass
# by definition file_is_set, but are needed for
# mypy to understand that these are strings below.
assert isinstance(self._path, six.string_types)
assert isinstance(self._sha256, six.string_types)
if run is None:
raise TypeError('Argument "run" must not be None.')
self._run = run
# Following assertion required for mypy
assert self._run is not None
if self._extension is None:
_, extension = os.path.splitext(os.path.basename(self._path))
else:
extension = self._extension
if id_ is None:
id_ = self._sha256[:8]
file_path = _wb_filename(key, step, id_, extension)
media_path = os.path.join(self.get_media_subdir(), file_path)
new_path = os.path.join(self._run.dir, media_path)
util.mkdir_exists_ok(os.path.dirname(new_path))
if self._is_tmp:
shutil.move(self._path, new_path)
self._path = new_path
self._is_tmp = False
_datatypes_callback(media_path)
else:
shutil.copy(self._path, new_path)
self._path = new_path
_datatypes_callback(media_path)
def to_json(self, run):
"""Serializes the object into a JSON blob, using a run or artifact to store additional data. If `run_or_artifact`
is a wandb.Run then `self.bind_to_run()` must have been previously been called.
Args:
run_or_artifact (wandb.Run | wandb.Artifact): the Run or Artifact for which this object should be generating
JSON for - this is useful to to store additional data if needed.
Returns:
dict: JSON representation
"""
# NOTE: uses of Audio in this class are a temporary hack -- when Ref support moves up
# into Media itself we should get rid of them
from wandb.data_types import Audio
json_obj = {}
run_class, artifact_class = _safe_sdk_import()
if isinstance(run, run_class):
if not self.is_bound():
raise RuntimeError(
"Value of type {} must be bound to a run with bind_to_run() before being serialized to JSON.".format(
type(self).__name__
)
)
assert (
self._run is run
), "We don't support referring to media files across runs."
# The following two assertions are guaranteed to pass
# by definition is_bound, but are needed for
# mypy to understand that these are strings below.
assert isinstance(self._path, six.string_types)
json_obj.update(
{
"_type": "file", # TODO(adrian): This isn't (yet) a real media type we support on the frontend.
"path": util.to_forward_slash_path(
os.path.relpath(self._path, self._run.dir)
),
"sha256": self._sha256,
"size": self._size,
}
)
artifact_entry = self._get_artifact_reference_entry()
if artifact_entry is not None:
json_obj["artifact_path"] = artifact_entry.ref_url()
elif isinstance(run, artifact_class):
if self.file_is_set():
# The following two assertions are guaranteed to pass
# by definition of the call above, but are needed for
# mypy to understand that these are strings below.
assert isinstance(self._path, six.string_types)
assert isinstance(self._sha256, six.string_types)
artifact = run # Checks if the concrete image has already been added to this artifact
name = artifact.get_added_local_path_name(self._path)
if name is None:
if self._is_tmp:
name = os.path.join(
self.get_media_subdir(), os.path.basename(self._path)
)
else:
# If the files is not temporary, include the first 8 characters of the file's SHA256 to
# avoid name collisions. This way, if there are two images `dir1/img.png` and `dir2/img.png`
# we end up with a unique path for each.
name = os.path.join(
self.get_media_subdir(),
self._sha256[:8],
os.path.basename(self._path),
)
# if not, check to see if there is a source artifact for this object
if (
self._artifact_source
is not None
# and self._artifact_source.artifact != artifact
):
default_root = self._artifact_source.artifact._default_root()
# if there is, get the name of the entry (this might make sense to move to a helper off artifact)
if self._path.startswith(default_root):
name = self._path[len(default_root) :]
name = name.lstrip(os.sep)
# Add this image as a reference
path = self._artifact_source.artifact.get_path(name)
artifact.add_reference(path.ref_url(), name=name)
elif isinstance(self, Audio) and Audio.path_is_reference(
self._path
):
artifact.add_reference(self._path, name=name)
else:
entry = artifact.add_file(
self._path, name=name, is_tmp=self._is_tmp
)
name = entry.path
json_obj["path"] = name
json_obj["_type"] = self._log_type
return json_obj
@classmethod
def from_json(
cls, json_obj, source_artifact
):
"""Likely will need to override for any more complicated media objects"""
return cls(source_artifact.get_path(json_obj["path"]).download())
def __eq__(self, other):
"""Likely will need to override for any more complicated media objects"""
return (
isinstance(other, self.__class__)
and hasattr(self, "_sha256")
and hasattr(other, "_sha256")
and self._sha256 == other._sha256
)
class BatchableMedia(Media):
"""Parent class for Media we treat specially in batches, like images and
thumbnails.
Apart from images, we just use these batches to help organize files by name
in the media directory.
"""
def __init__(self):
super(BatchableMedia, self).__init__()
@classmethod
def seq_to_json(
cls,
seq,
run,
key,
step,
):
raise NotImplementedError
class Object3D(BatchableMedia):
"""
Wandb class for 3D point clouds.
Arguments:
data_or_path: (numpy array, string, io)
Object3D can be initialized from a file or a numpy array.
The file types supported are obj, gltf, babylon, stl. You can pass a path to
a file or an io object and a file_type which must be one of `'obj', 'gltf', 'babylon', 'stl'`.
The shape of the numpy array must be one of either:
```python
[[x y z], ...] nx3
[x y z c], ...] nx4 where c is a category with supported range [1, 14]
[x y z r g b], ...] nx4 where is rgb is color
```
"""
SUPPORTED_TYPES = set(
["obj", "gltf", "glb", "babylon", "stl", "pts.json"]
)
_log_type = "object3D-file"
def __init__(
self, data_or_path, **kwargs
):
super(Object3D, self).__init__()
if hasattr(data_or_path, "name"):
# if the file has a path, we just detect the type and copy it from there
data_or_path = data_or_path.name # type: ignore
if hasattr(data_or_path, "read"):
if hasattr(data_or_path, "seek"):
data_or_path.seek(0) # type: ignore
object_3d = data_or_path.read() # type: ignore
extension = kwargs.pop("file_type", None)
if extension is None:
raise ValueError(
"Must pass file type keyword argument when using io objects."
)
if extension not in Object3D.SUPPORTED_TYPES:
raise ValueError(
"Object 3D only supports numpy arrays or files of the type: "
+ ", ".join(Object3D.SUPPORTED_TYPES)
)
tmp_path = os.path.join(
_MEDIA_TMP.name, util.generate_id() + "." + extension
)
with open(tmp_path, "w") as f:
f.write(object_3d)
self._set_file(tmp_path, is_tmp=True)
elif isinstance(data_or_path, six.string_types):
path = data_or_path
extension = None
for supported_type in Object3D.SUPPORTED_TYPES:
if path.endswith(supported_type):
extension = supported_type
break
if not extension:
raise ValueError(
"File '"
+ path
+ "' is not compatible with Object3D: supported types are: "
+ ", ".join(Object3D.SUPPORTED_TYPES)
)
self._set_file(data_or_path, is_tmp=False)
# Supported different types and scene for 3D scenes
elif isinstance(data_or_path, dict) and "type" in data_or_path:
if data_or_path["type"] == "lidar/beta":
data = {
"type": data_or_path["type"],
"vectors": data_or_path["vectors"].tolist()
if "vectors" in data_or_path
else [],
"points": data_or_path["points"].tolist()
if "points" in data_or_path
else [],
"boxes": data_or_path["boxes"].tolist()
if "boxes" in data_or_path
else [],
}
else:
raise ValueError(
"Type not supported, only 'lidar/beta' is currently supported"
)
tmp_path = os.path.join(_MEDIA_TMP.name, util.generate_id() + ".pts.json")
json.dump(
data,
codecs.open(tmp_path, "w", encoding="utf-8"),
separators=(",", ":"),
sort_keys=True,
indent=4,
)
self._set_file(tmp_path, is_tmp=True, extension=".pts.json")
elif _is_numpy_array(data_or_path):
np_data = data_or_path
# The following assertion is required for numpy to trust that
# np_data is numpy array. The reason it is behind a False
# guard is to ensure that this line does not run at runtime,
# which would cause a runtime error if the user's machine did
# not have numpy installed.
if wandb.TYPE_CHECKING and TYPE_CHECKING:
assert isinstance(np_data, np.ndarray)
if len(np_data.shape) != 2 or np_data.shape[1] not in {3, 4, 6}:
raise ValueError(
"""The shape of the numpy array must be one of either
[[x y z], ...] nx3
[x y z c], ...] nx4 where c is a category with supported range [1, 14]
[x y z r g b], ...] nx4 where is rgb is color"""
)
list_data = np_data.tolist()
tmp_path = os.path.join(_MEDIA_TMP.name, util.generate_id() + ".pts.json")
json.dump(
list_data,
codecs.open(tmp_path, "w", encoding="utf-8"),
separators=(",", ":"),
sort_keys=True,
indent=4,
)
self._set_file(tmp_path, is_tmp=True, extension=".pts.json")
else:
raise ValueError("data must be a numpy array, dict or a file object")
@classmethod
def get_media_subdir(cls):
return os.path.join("media", "object3D")
def to_json(self, run_or_artifact):
json_dict = super(Object3D, self).to_json(run_or_artifact)
json_dict["_type"] = Object3D._log_type
_, artifact_class = _safe_sdk_import()
if isinstance(run_or_artifact, artifact_class):
if self._path is None or not self._path.endswith(".pts.json"):
raise ValueError(
"Non-point cloud 3D objects are not yet supported with Artifacts"
)
return json_dict
@classmethod
def seq_to_json(
cls,
seq,
run,
key,
step,
):
seq = list(seq)
jsons = [obj.to_json(run) for obj in seq]
for obj in jsons:
expected = util.to_forward_slash_path(cls.get_media_subdir())
if not obj["path"].startswith(expected):
raise ValueError(
"Files in an array of Object3D's must be in the {} directory, not {}".format(
expected, obj["path"]
)
)
return {
"_type": "object3D",
"filenames": [
os.path.relpath(j["path"], cls.get_media_subdir()) for j in jsons
],
"count": len(jsons),
"objects": jsons,
}
class Molecule(BatchableMedia):
"""
Wandb class for Molecular data
Arguments:
data_or_path: (string, io)
Molecule can be initialized from a file name or an io object.
"""
SUPPORTED_TYPES = set(
["pdb", "pqr", "mmcif", "mcif", "cif", "sdf", "sd", "gro", "mol2", "mmtf"]
)
_log_type = "molecule-file"
def __init__(self, data_or_path, **kwargs):
super(Molecule, self).__init__()
if hasattr(data_or_path, "name"):
# if the file has a path, we just detect the type and copy it from there
data_or_path = data_or_path.name # type: ignore
if hasattr(data_or_path, "read"):
if hasattr(data_or_path, "seek"):
data_or_path.seek(0) # type: ignore
molecule = data_or_path.read() # type: ignore
extension = kwargs.pop("file_type", None)
if extension is None:
raise ValueError(
"Must pass file type keyword argument when using io objects."
)
if extension not in Molecule.SUPPORTED_TYPES:
raise ValueError(
"Molecule 3D only supports files of the type: "
+ ", ".join(Molecule.SUPPORTED_TYPES)
)
tmp_path = os.path.join(
_MEDIA_TMP.name, util.generate_id() + "." + extension
)
with open(tmp_path, "w") as f:
f.write(molecule)
self._set_file(tmp_path, is_tmp=True)
elif isinstance(data_or_path, six.string_types):
extension = os.path.splitext(data_or_path)[1][1:]
if extension not in Molecule.SUPPORTED_TYPES:
raise ValueError(
"Molecule only supports files of the type: "
+ ", ".join(Molecule.SUPPORTED_TYPES)
)
self._set_file(data_or_path, is_tmp=False)
else:
raise ValueError("Data must be file name or a file object")
@classmethod
def get_media_subdir(cls):
return os.path.join("media", "molecule")
def to_json(self, run_or_artifact):
json_dict = super(Molecule, self).to_json(run_or_artifact)
json_dict["_type"] = self._log_type
if self._caption:
json_dict["caption"] = self._caption
return json_dict
@classmethod
def seq_to_json(
cls,
seq,
run,
key,
step,
):
seq = list(seq)
jsons = [obj.to_json(run) for obj in seq]
for obj in jsons:
expected = util.to_forward_slash_path(cls.get_media_subdir())
if not obj["path"].startswith(expected):
raise ValueError(
"Files in an array of Molecule's must be in the {} directory, not {}".format(
cls.get_media_subdir(), obj["path"]
)
)
return {
"_type": "molecule",
"filenames": [obj["path"] for obj in jsons],
"count": len(jsons),
"captions": Media.captions(seq),
}
class Html(BatchableMedia):
"""
Wandb class for arbitrary html
Arguments:
data: (string or io object) HTML to display in wandb
inject: (boolean) Add a stylesheet to the HTML object. If set
to False the HTML will pass through unchanged.
"""
_log_type = "html-file"
def __init__(self, data, inject = True):
super(Html, self).__init__()
data_is_path = isinstance(data, six.string_types) and os.path.exists(data)
data_path = ""
if data_is_path:
assert isinstance(data, six.string_types)
data_path = data
with open(data_path, "r") as file:
self.html = file.read()
elif isinstance(data, six.string_types):
self.html = data
elif hasattr(data, "read"):
if hasattr(data, "seek"):
data.seek(0)
self.html = data.read()
else:
raise ValueError("data must be a string or an io object")
if inject:
self.inject_head()
if inject or not data_is_path:
tmp_path = os.path.join(_MEDIA_TMP.name, util.generate_id() + ".html")
with open(tmp_path, "w") as out:
out.write(self.html)
self._set_file(tmp_path, is_tmp=True)
else:
self._set_file(data_path, is_tmp=False)
def inject_head(self):
join = ""
if "<head>" in self.html:
parts = self.html.split("<head>", 1)
parts[0] = parts[0] + "<head>"
elif "<html>" in self.html:
parts = self.html.split("<html>", 1)
parts[0] = parts[0] + "<html><head>"
parts[1] = "</head>" + parts[1]
else:
parts = ["", self.html]
parts.insert(
1,
'<base target="_blank"><link rel="stylesheet" type="text/css" href="https://app.wandb.ai/normalize.css" />',
)
self.html = join.join(parts).strip()
@classmethod
def get_media_subdir(cls):
return os.path.join("media", "html")
def to_json(self, run_or_artifact):
json_dict = super(Html, self).to_json(run_or_artifact)
json_dict["_type"] = self._log_type
return json_dict
@classmethod
def from_json(
cls, json_obj, source_artifact
):
return cls(source_artifact.get_path(json_obj["path"]).download(), inject=False)
@classmethod
def seq_to_json(
cls,
seq,
run,
key,
step,
):
base_path = os.path.join(run.dir, cls.get_media_subdir())
util.mkdir_exists_ok(base_path)
meta = {
"_type": "html",
"count": len(seq),
"html": [h.to_json(run) for h in seq],
}
return meta
class Video(BatchableMedia):
"""
Wandb representation of video.
Arguments:
data_or_path: (numpy array, string, io)
Video can be initialized with a path to a file or an io object.
The format must be "gif", "mp4", "webm" or "ogg".
The format must be specified with the format argument.
Video can be initialized with a numpy tensor.
The numpy tensor must be either 4 dimensional or 5 dimensional.
Channels should be (time, channel, height, width) or
(batch, time, channel, height width)
caption: (string) caption associated with the video for display
fps: (int) frames per second for video. Default is 4.
format: (string) format of video, necessary if initializing with path or io object.
"""
_log_type = "video-file"
EXTS = ("gif", "mp4", "webm", "ogg")
# _width: Optional[int]
# _height: Optional[int]
def __init__(
self,
data_or_path,
caption = None,
fps = 4,
format = None,
):
super(Video, self).__init__()
self._fps = fps
self._format = format or "gif"
self._width = None
self._height = None
self._channels = None
self._caption = caption
if self._format not in Video.EXTS:
raise ValueError("wandb.Video accepts %s formats" % ", ".join(Video.EXTS))
if isinstance(data_or_path, six.BytesIO):
filename = os.path.join(
_MEDIA_TMP.name, util.generate_id() + "." + self._format
)
with open(filename, "wb") as f:
f.write(data_or_path.read())
self._set_file(filename, is_tmp=True)
elif isinstance(data_or_path, six.string_types):
_, ext = os.path.splitext(data_or_path)
ext = ext[1:].lower()
if ext not in Video.EXTS:
raise ValueError(
"wandb.Video accepts %s formats" % ", ".join(Video.EXTS)
)
self._set_file(data_or_path, is_tmp=False)
# ffprobe -v error -select_streams v:0 -show_entries stream=width,height -of csv=p=0 data_or_path
else:
if hasattr(data_or_path, "numpy"): # TF data eager tensors
self.data = data_or_path.numpy() # type: ignore
elif _is_numpy_array(data_or_path):
self.data = data_or_path
else:
raise ValueError(
"wandb.Video accepts a file path or numpy like data as input"
)
self.encode()
def encode(self):
mpy = util.get_module(
"moviepy.editor",
required='wandb.Video requires moviepy and imageio when passing raw data. Install with "pip install moviepy imageio"',
)
tensor = self._prepare_video(self.data)
_, self._height, self._width, self._channels = tensor.shape
# encode sequence of images into gif string
clip = mpy.ImageSequenceClip(list(tensor), fps=self._fps)
filename = os.path.join(
_MEDIA_TMP.name, util.generate_id() + "." + self._format
)
if wandb.TYPE_CHECKING and TYPE_CHECKING:
kwargs = {}
try: # older versions of moviepy do not support logger argument
kwargs = {"logger": None}
if self._format == "gif":
clip.write_gif(filename, **kwargs)
else:
clip.write_videofile(filename, **kwargs)
except TypeError:
try: # even older versions of moviepy do not support progress_bar argument
kwargs = {"verbose": False, "progress_bar": False}
if self._format == "gif":
clip.write_gif(filename, **kwargs)
else:
clip.write_videofile(filename, **kwargs)
except TypeError:
kwargs = {
"verbose": False,
}
if self._format == "gif":
clip.write_gif(filename, **kwargs)
else:
clip.write_videofile(filename, **kwargs)
self._set_file(filename, is_tmp=True)
@classmethod
def get_media_subdir(cls):
return os.path.join("media", "videos")
def to_json(self, run_or_artifact):
json_dict = super(Video, self).to_json(run_or_artifact)
json_dict["_type"] = self._log_type
if self._width is not None:
json_dict["width"] = self._width
if self._height is not None:
json_dict["height"] = self._height
if self._caption:
json_dict["caption"] = self._caption
return json_dict
def _prepare_video(self, video):
"""This logic was mostly taken from tensorboardX"""
np = util.get_module(
"numpy",
required='wandb.Video requires numpy when passing raw data. To get it, run "pip install numpy".',
)
if video.ndim < 4:
raise ValueError(
"Video must be atleast 4 dimensions: time, channels, height, width"
)
if video.ndim == 4:
video = video.reshape(1, *video.shape)
b, t, c, h, w = video.shape
if video.dtype != np.uint8:
logging.warning("Converting video data to uint8")
video = video.astype(np.uint8)
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
# pad to nearest power of 2, all at once
if not is_power2(video.shape[0]):
len_addition = int(2 ** video.shape[0].bit_length() - video.shape[0])
video = np.concatenate(
(video, np.zeros(shape=(len_addition, t, c, h, w))), axis=0
)
n_rows = 2 ** ((b.bit_length() - 1) // 2)
n_cols = video.shape[0] // n_rows
video = np.reshape(video, newshape=(n_rows, n_cols, t, c, h, w))
video = np.transpose(video, axes=(2, 0, 4, 1, 5, 3))
video = np.reshape(video, newshape=(t, n_rows * h, n_cols * w, c))
return video
@classmethod
def seq_to_json(
cls,
seq,
run,
key,
step,
):
base_path = os.path.join(run.dir, cls.get_media_subdir())
util.mkdir_exists_ok(base_path)
meta = {
"_type": "videos",
"count": len(seq),
"videos": [v.to_json(run) for v in seq],
"captions": Video.captions(seq),
}
return meta
# Allows encoding of arbitrary JSON structures
# as a file
#
# This class should be used as an abstract class
# extended to have validation methods
class JSONMetadata(Media):
"""
JSONMetadata is a type for encoding arbitrary metadata as files.
"""
def __init__(self, val):
super(JSONMetadata, self).__init__()
self.validate(val)
self._val = val
ext = "." + self.type_name() + ".json"
tmp_path = os.path.join(_MEDIA_TMP.name, util.generate_id() + ext)
util.json_dump_uncompressed(
self._val, codecs.open(tmp_path, "w", encoding="utf-8")
)
self._set_file(tmp_path, is_tmp=True, extension=ext)
@classmethod
def get_media_subdir(cls):
return os.path.join("media", "metadata", cls.type_name())
def to_json(self, run_or_artifact):
json_dict = super(JSONMetadata, self).to_json(run_or_artifact)
json_dict["_type"] = self.type_name()
return json_dict
# These methods should be overridden in the child class
@classmethod
def type_name(cls):
return "metadata"
def validate(self, val):
return True
class ImageMask(Media):
"""
Wandb class for image masks, useful for segmentation tasks
"""
_log_type = "mask"
def __init__(self, val, key):
"""
Args:
val (dict): dictionary following 1 of two forms:
{
"mask_data": 2d array of integers corresponding to classes,
"class_labels": optional mapping from class ids to strings {id: str}
}
{
"path": path to an image file containing integers corresponding to classes,
"class_labels": optional mapping from class ids to strings {id: str}
}
key (str): id for set of masks
"""
super(ImageMask, self).__init__()
if "path" in val:
self._set_file(val["path"])
else:
np = util.get_module(
"numpy", required="Semantic Segmentation mask support requires numpy"
)
# Add default class mapping
if "class_labels" not in val:
classes = np.unique(val["mask_data"]).astype(np.int32).tolist()
class_labels = dict((c, "class_" + str(c)) for c in classes)
val["class_labels"] = class_labels
self.validate(val)
self._val = val
self._key = key
ext = "." + self.type_name() + ".png"
tmp_path = os.path.join(_MEDIA_TMP.name, util.generate_id() + ext)
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
image = pil_image.fromarray(val["mask_data"].astype(np.int8), mode="L")
image.save(tmp_path, transparency=None)
self._set_file(tmp_path, is_tmp=True, extension=ext)
def bind_to_run(
self,
run,
key,
step,
id_ = None,
):
# bind_to_run key argument is the Image parent key
# the self._key value is the mask's sub key
super(ImageMask, self).bind_to_run(run, key, step, id_=id_)
class_labels = self._val["class_labels"]
run._add_singleton(
"mask/class_labels",
str(key) + "_wandb_delimeter_" + self._key,
class_labels,
)
@classmethod
def get_media_subdir(cls):
return os.path.join("media", "images", cls.type_name())
@classmethod
def from_json(
cls, json_obj, source_artifact
):
return cls(
{"path": source_artifact.get_path(json_obj["path"]).download()}, key="",
)
def to_json(self, run_or_artifact):
json_dict = super(ImageMask, self).to_json(run_or_artifact)
run_class, artifact_class = _safe_sdk_import()
if isinstance(run_or_artifact, run_class):
json_dict["_type"] = self.type_name()
return json_dict
elif isinstance(run_or_artifact, artifact_class):
# Nothing special to add (used to add "digest", but no longer used.)
return json_dict
else:
raise ValueError("to_json accepts wandb_run.Run or wandb_artifact.Artifact")
@classmethod
def type_name(cls):
return cls._log_type
def validate(self, val):
np = util.get_module(
"numpy", required="Semantic Segmentation mask support requires numpy"
)
# 2D Make this work with all tensor(like) types
if "mask_data" not in val:
raise TypeError(
'Missing key "mask_data": A mask requires mask data(A 2D array representing the predctions)'
)
else:
error_str = "mask_data must be a 2d array"
shape = val["mask_data"].shape
if len(shape) != 2:
raise TypeError(error_str)
if not (
(val["mask_data"] >= 0).all() and (val["mask_data"] <= 255).all()
) and issubclass(val["mask_data"].dtype.type, np.integer):
raise TypeError("Mask data must be integers between 0 and 255")
# Optional argument
if "class_labels" in val:
for k, v in list(val["class_labels"].items()):
if (not isinstance(k, numbers.Number)) or (
not isinstance(v, six.string_types)
):
raise TypeError(
"Class labels must be a dictionary of numbers to string"
)
return True
class BoundingBoxes2D(JSONMetadata):
"""
Wandb class for 2D bounding boxes
"""
_log_type = "bounding-boxes"
# TODO: when the change is made to have this produce a dict with a _type, define
# it here as _log_type, associate it in to_json
def __init__(self, val, key):
"""
Args:
val (dict): dictionary following the form:
{
"class_labels": optional mapping from class ids to strings {id: str}
"box_data": list of boxes: [
{
"position": {
"minX": float,
"maxX": float,
"minY": float,
"maxY": float,
},
"class_id": 1,
"box_caption": optional str
"scores": optional dict of scores
},
...
],
}
key (str): id for set of bounding boxes
"""
super(BoundingBoxes2D, self).__init__(val)
self._val = val["box_data"]
self._key = key
# Add default class mapping
if "class_labels" not in val:
np = util.get_module(
"numpy", required="Semantic Segmentation mask support requires numpy"
)
classes = (
np.unique(list([box["class_id"] for box in val["box_data"]]))
.astype(np.int32)
.tolist()
)
class_labels = dict((c, "class_" + str(c)) for c in classes)
self._class_labels = class_labels
else:
self._class_labels = val["class_labels"]
def bind_to_run(
self,
run,
key,
step,
id_ = None,
):
# bind_to_run key argument is the Image parent key
# the self._key value is the mask's sub key
super(BoundingBoxes2D, self).bind_to_run(run, key, step, id_=id_)
run._add_singleton(
"bounding_box/class_labels",
str(key) + "_wandb_delimeter_" + self._key,
self._class_labels,
)
@classmethod
def type_name(cls):
return "boxes2D"
def validate(self, val):
# Optional argument
if "class_labels" in val:
for k, v in list(val["class_labels"].items()):
if (not isinstance(k, numbers.Number)) or (
not isinstance(v, six.string_types)
):
raise TypeError(
"Class labels must be a dictionary of numbers to string"
)
boxes = val["box_data"]
if not isinstance(boxes, list):
raise TypeError("Boxes must be a list")
for box in boxes:
# Required arguments
error_str = "Each box must contain a position with: middle, width, and height or \
\nminX, maxX, minY, maxY."
if "position" not in box:
raise TypeError(error_str)
else:
valid = False
if (
"middle" in box["position"]
and len(box["position"]["middle"]) == 2
and has_num(box["position"], "width")
and has_num(box["position"], "height")
):
valid = True
elif (
has_num(box["position"], "minX")
and has_num(box["position"], "maxX")
and has_num(box["position"], "minY")
and has_num(box["position"], "maxY")
):
valid = True
if not valid:
raise TypeError(error_str)
# Optional arguments
if ("scores" in box) and not isinstance(box["scores"], dict):
raise TypeError("Box scores must be a dictionary")
elif "scores" in box:
for k, v in list(box["scores"].items()):
if not isinstance(k, six.string_types):
raise TypeError("A score key must be a string")
if not isinstance(v, numbers.Number):
raise TypeError("A score value must be a number")
if ("class_id" in box) and not isinstance(
box["class_id"], six.integer_types
):
raise TypeError("A box's class_id must be an integer")
# Optional
if ("box_caption" in box) and not isinstance(
box["box_caption"], six.string_types
):
raise TypeError("A box's caption must be a string")
return True
def to_json(self, run_or_artifact):
run_class, artifact_class = _safe_sdk_import()
if isinstance(run_or_artifact, run_class):
return super(BoundingBoxes2D, self).to_json(run_or_artifact)
elif isinstance(run_or_artifact, artifact_class):
# TODO (tim): I would like to log out a proper dictionary representing this object, but don't
# want to mess with the visualizations that are currently available in the UI. This really should output
# an object with a _type key. Will need to push this change to the UI first to ensure backwards compat
return self._val
else:
raise ValueError("to_json accepts wandb_run.Run or wandb_artifact.Artifact")
@classmethod
def from_json(
cls, json_obj, source_artifact
):
return cls({"box_data": json_obj}, "")
class Classes(Media):
_log_type = "classes"
# _class_set: Sequence[dict]
def __init__(self, class_set):
"""Classes is holds class metadata intended to be used in concert with other objects when visualizing artifacts
Args:
class_set (list): list of dicts in the form of {"id":int|str, "name":str}
"""
super(Classes, self).__init__()
for class_obj in class_set:
assert "id" in class_obj and "name" in class_obj
self._class_set = class_set
@classmethod
def from_json(
cls,
json_obj,
source_artifact,
):
return cls(json_obj.get("class_set")) # type: ignore
def to_json(
self, run_or_artifact
):
json_obj = {}
# This is a bit of a hack to allow _ClassesIdType to
# be able to operate fully without an artifact in play.
# In all other cases, artifact should be a true artifact.
if run_or_artifact is not None:
json_obj = super(Classes, self).to_json(run_or_artifact)
json_obj["_type"] = Classes._log_type
json_obj["class_set"] = self._class_set
return json_obj
def get_type(self):
return _ClassesIdType(self)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
if isinstance(other, Classes):
return self._class_set == other._class_set
else:
return False
class Image(BatchableMedia):
"""
Wandb class for images.
Arguments:
data_or_path: (numpy array, string, io) Accepts numpy array of
image data, or a PIL image. The class attempts to infer
the data format and converts it.
mode: (string) The PIL mode for an image. Most common are "L", "RGB",
"RGBA". Full explanation at https://pillow.readthedocs.io/en/4.2.x/handbook/concepts.html#concept-modes.
caption: (string) Label for display of image.
"""
MAX_ITEMS = 108
# PIL limit
MAX_DIMENSION = 65500
_log_type = "image-file"
# format: Optional[str]
# _grouping: Optional[str]
# _caption: Optional[str]
# _width: Optional[int]
# _height: Optional[int]
# _image: Optional["PIL.Image"]
# _classes: Optional["Classes"]
# _boxes: Optional[Dict[str, "BoundingBoxes2D"]]
# _masks: Optional[Dict[str, "ImageMask"]]
def __init__(
self,
data_or_path,
mode = None,
caption = None,
grouping = None,
classes = None,
boxes = None,
masks = None,
):
super(Image, self).__init__()
# TODO: We should remove grouping, it's a terrible name and I don't
# think anyone uses it.
self._grouping = None
self._caption = None
self._width = None
self._height = None
self._image = None
self._classes = None
self._boxes = None
self._masks = None
# Allows the user to pass an Image object as the first parameter and have a perfect copy,
# only overriding additional metdata passed in. If this pattern is compelling, we can generalize.
if isinstance(data_or_path, Image):
self._initialize_from_wbimage(data_or_path)
elif isinstance(data_or_path, six.string_types):
self._initialize_from_path(data_or_path)
else:
self._initialize_from_data(data_or_path, mode)
self._set_initialization_meta(grouping, caption, classes, boxes, masks)
def _set_initialization_meta(
self,
grouping = None,
caption = None,
classes = None,
boxes = None,
masks = None,
):
if grouping is not None:
self._grouping = grouping
if caption is not None:
self._caption = caption
if classes is not None:
if not isinstance(classes, Classes):
self._classes = Classes(classes)
else:
self._classes = classes
if boxes:
if not isinstance(boxes, dict):
raise ValueError('Images "boxes" argument must be a dictionary')
boxes_final = {}
for key in boxes:
box_item = boxes[key]
if isinstance(box_item, BoundingBoxes2D):
boxes_final[key] = box_item
elif isinstance(box_item, dict):
boxes_final[key] = BoundingBoxes2D(box_item, key)
self._boxes = boxes_final
if masks:
if not isinstance(masks, dict):
raise ValueError('Images "masks" argument must be a dictionary')
masks_final = {}
for key in masks:
mask_item = masks[key]
if isinstance(mask_item, ImageMask):
masks_final[key] = mask_item
elif isinstance(mask_item, dict):
masks_final[key] = ImageMask(mask_item, key)
self._masks = masks_final
self._width, self._height = self._image.size # type: ignore
def _initialize_from_wbimage(self, wbimage):
self._grouping = wbimage._grouping
self._caption = wbimage._caption
self._width = wbimage._width
self._height = wbimage._height
self._image = wbimage._image
self._classes = wbimage._classes
self._path = wbimage._path
self._is_tmp = wbimage._is_tmp
self._extension = wbimage._extension
self._sha256 = wbimage._sha256
self._size = wbimage._size
self.format = wbimage.format
self._artifact_source = wbimage._artifact_source
self._artifact_target = wbimage._artifact_target
# We do not want to implicitly copy boxes or masks, just the image-related data.
# self._boxes = wbimage._boxes
# self._masks = wbimage._masks
def _initialize_from_path(self, path):
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
self._set_file(path, is_tmp=False)
self._image = pil_image.open(path)
self._image.load()
ext = os.path.splitext(path)[1][1:]
self.format = ext
def _initialize_from_data(self, data, mode = None,):
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
if util.is_matplotlib_typename(util.get_full_typename(data)):
buf = six.BytesIO()
util.ensure_matplotlib_figure(data).savefig(buf)
self._image = pil_image.open(buf)
elif isinstance(data, pil_image.Image):
self._image = data
elif util.is_pytorch_tensor_typename(util.get_full_typename(data)):
vis_util = util.get_module(
"torchvision.utils", "torchvision is required to render images"
)
if hasattr(data, "requires_grad") and data.requires_grad:
data = data.detach()
data = vis_util.make_grid(data, normalize=True)
self._image = pil_image.fromarray(
data.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
)
else:
if hasattr(data, "numpy"): # TF data eager tensors
data = data.numpy()
if data.ndim > 2:
data = data.squeeze() # get rid of trivial dimensions as a convenience
self._image = pil_image.fromarray(
self.to_uint8(data), mode=mode or self.guess_mode(data)
)
tmp_path = os.path.join(_MEDIA_TMP.name, util.generate_id() + ".png")
self.format = "png"
self._image.save(tmp_path, transparency=None)
self._set_file(tmp_path, is_tmp=True)
@classmethod
def from_json(
cls, json_obj, source_artifact
):
classes = None
if json_obj.get("classes") is not None:
classes = source_artifact.get(json_obj["classes"]["path"])
masks = json_obj.get("masks")
_masks = None
if masks:
_masks = {}
for key in masks:
_masks[key] = ImageMask.from_json(masks[key], source_artifact)
_masks[key]._set_artifact_source(source_artifact)
_masks[key]._key = key
boxes = json_obj.get("boxes")
_boxes = None
if boxes:
_boxes = {}
for key in boxes:
_boxes[key] = BoundingBoxes2D.from_json(boxes[key], source_artifact)
_boxes[key]._key = key
return cls(
source_artifact.get_path(json_obj["path"]).download(),
caption=json_obj.get("caption"),
grouping=json_obj.get("grouping"),
classes=classes,
boxes=_boxes,
masks=_masks,
)
@classmethod
def get_media_subdir(cls):
return os.path.join("media", "images")
def bind_to_run(
self,
run,
key,
step,
id_ = None,
):
super(Image, self).bind_to_run(run, key, step, id_)
if self._boxes is not None:
for i, k in enumerate(self._boxes):
id_ = "{}{}".format(id_, i) if id_ is not None else None
self._boxes[k].bind_to_run(run, key, step, id_)
if self._masks is not None:
for i, k in enumerate(self._masks):
id_ = "{}{}".format(id_, i) if id_ is not None else None
self._masks[k].bind_to_run(run, key, step, id_)
def to_json(self, run_or_artifact):
json_dict = super(Image, self).to_json(run_or_artifact)
json_dict["_type"] = Image._log_type
json_dict["format"] = self.format
if self._width is not None:
json_dict["width"] = self._width
if self._height is not None:
json_dict["height"] = self._height
if self._grouping:
json_dict["grouping"] = self._grouping
if self._caption:
json_dict["caption"] = self._caption
run_class, artifact_class = _safe_sdk_import()
if isinstance(run_or_artifact, artifact_class):
artifact = run_or_artifact
if (
self._masks is not None or self._boxes is not None
) and self._classes is None:
raise ValueError(
"classes must be passed to wandb.Image which have masks or bounding boxes when adding to artifacts"
)
if self._classes is not None:
# Here, rather than give each class definition it's own name (and entry), we
# purposely are giving a non-unique class name of /media/cls.classes.json.
# This may create user confusion if if multiple different class definitions
# are expected in a single artifact. However, we want to catch this user pattern
# if it exists and dive deeper. The alternative code is provided below.
#
class_name = os.path.join("media", "cls")
#
# class_name = os.path.join(
# "media", "classes", os.path.basename(self._path) + "_cls"
# )
#
classes_entry = artifact.add(self._classes, class_name)
json_dict["classes"] = {
"type": "classes-file",
"path": classes_entry.path,
"digest": classes_entry.digest,
}
elif not isinstance(run_or_artifact, run_class):
raise ValueError("to_json accepts wandb_run.Run or wandb_artifact.Artifact")
if self._boxes:
json_dict["boxes"] = {
k: box.to_json(run_or_artifact) for (k, box) in self._boxes.items()
}
if self._masks:
json_dict["masks"] = {
k: mask.to_json(run_or_artifact) for (k, mask) in self._masks.items()
}
return json_dict
def guess_mode(self, data):
"""
Guess what type of image the np.array is representing
"""
# TODO: do we want to support dimensions being at the beginning of the array?
if data.ndim == 2:
return "L"
elif data.shape[-1] == 3:
return "RGB"
elif data.shape[-1] == 4:
return "RGBA"
else:
raise ValueError(
"Un-supported shape for image conversion %s" % list(data.shape)
)
@classmethod
def to_uint8(cls, data):
"""
Converts floating point image on the range [0,1] and integer images
on the range [0,255] to uint8, clipping if necessary.
"""
np = util.get_module(
"numpy",
required="wandb.Image requires numpy if not supplying PIL Images: pip install numpy",
)
# I think it's better to check the image range vs the data type, since many
# image libraries will return floats between 0 and 255
# some images have range -1...1 or 0-1
dmin = np.min(data)
if dmin < 0:
data = (data - np.min(data)) / np.ptp(data)
if np.max(data) <= 1.0:
data = (data * 255).astype(np.int32)
# assert issubclass(data.dtype.type, np.integer), 'Illegal image format.'
return data.clip(0, 255).astype(np.uint8)
@classmethod
def seq_to_json(
cls,
seq,
run,
key,
step,
):
"""
Combines a list of images into a meta dictionary object describing the child images.
"""
if wandb.TYPE_CHECKING and TYPE_CHECKING:
seq = cast(Sequence["Image"], seq)
jsons = [obj.to_json(run) for obj in seq]
media_dir = cls.get_media_subdir()
for obj in jsons:
expected = util.to_forward_slash_path(media_dir)
if not obj["path"].startswith(expected):
raise ValueError(
"Files in an array of Image's must be in the {} directory, not {}".format(
cls.get_media_subdir(), obj["path"]
)
)
num_images_to_log = len(seq)
width, height = seq[0]._image.size # type: ignore
format = jsons[0]["format"]
def size_equals_image(image):
img_width, img_height = image._image.size # type: ignore
return img_width == width and img_height == height # type: ignore
sizes_match = all(size_equals_image(img) for img in seq)
if not sizes_match:
logging.warning(
"Images sizes do not match. This will causes images to be display incorrectly in the UI."
)
meta = {
"_type": "images/separated",
"width": width,
"height": height,
"format": format,
"count": num_images_to_log,
}
captions = Image.all_captions(seq)
if captions:
meta["captions"] = captions
all_masks = Image.all_masks(seq, run, key, step)
if all_masks:
meta["all_masks"] = all_masks
all_boxes = Image.all_boxes(seq, run, key, step)
if all_boxes:
meta["all_boxes"] = all_boxes
return meta
@classmethod
def all_masks(
cls,
images,
run,
run_key,
step,
):
all_mask_groups = []
for image in images:
if image._masks:
mask_group = {}
for k in image._masks:
mask = image._masks[k]
mask_group[k] = mask.to_json(run)
all_mask_groups.append(mask_group)
else:
all_mask_groups.append(None)
if all_mask_groups and not all(x is None for x in all_mask_groups):
return all_mask_groups
else:
return False
@classmethod
def all_boxes(
cls,
images,
run,
run_key,
step,
):
all_box_groups = []
for image in images:
if image._boxes:
box_group = {}
for k in image._boxes:
box = image._boxes[k]
box_group[k] = box.to_json(run)
all_box_groups.append(box_group)
else:
all_box_groups.append(None)
if all_box_groups and not all(x is None for x in all_box_groups):
return all_box_groups
else:
return False
@classmethod
def all_captions(
cls, images
):
return cls.captions(images)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
if not isinstance(other, Image):
return False
else:
return (
self._grouping == other._grouping
and self._caption == other._caption
and self._width == other._width
and self._height == other._height
and self._image == other._image
and self._classes == other._classes
)
def to_data_array(self):
res = []
if self._image is not None:
data = list(self._image.getdata())
for i in range(self._image.height):
res.append(data[i * self._image.width : (i + 1) * self._image.width])
return res
class Plotly(Media):
"""
Wandb class for plotly plots.
Arguments:
val: matplotlib or plotly figure
"""
_log_type = "plotly-file"
@classmethod
def make_plot_media(
cls, val
):
if util.is_matplotlib_typename(util.get_full_typename(val)):
if util.matplotlib_contains_images(val):
return Image(val)
val = util.matplotlib_to_plotly(val)
return cls(val)
def __init__(self, val):
super(Plotly, self).__init__()
# First, check to see if the incoming `val` object is a plotfly figure
if not util.is_plotly_figure_typename(util.get_full_typename(val)):
# If it is not, but it is a matplotlib figure, then attempt to convert it to plotly
if util.is_matplotlib_typename(util.get_full_typename(val)):
if util.matplotlib_contains_images(val):
raise ValueError(
"Plotly does not currently support converting matplotlib figures containing images. \
You can convert the plot to a static image with `wandb.Image(plt)` "
)
val = util.matplotlib_to_plotly(val)
else:
raise ValueError(
"Logged plots must be plotly figures, or matplotlib plots convertible to plotly via mpl_to_plotly"
)
tmp_path = os.path.join(_MEDIA_TMP.name, util.generate_id() + ".plotly.json")
val = _numpy_arrays_to_lists(val.to_plotly_json())
util.json_dump_safer(val, codecs.open(tmp_path, "w", encoding="utf-8"))
self._set_file(tmp_path, is_tmp=True, extension=".plotly.json")
@classmethod
def get_media_subdir(cls):
return os.path.join("media", "plotly")
def to_json(self, run_or_artifact):
json_dict = super(Plotly, self).to_json(run_or_artifact)
json_dict["_type"] = self._log_type
return json_dict
def history_dict_to_json(
run, payload, step = None
):
# Converts a History row dict's elements so they're friendly for JSON serialization.
if step is None:
# We should be at the top level of the History row; assume this key is set.
step = payload["_step"]
# We use list here because we were still seeing cases of RuntimeError dict changed size
for key in list(payload):
val = payload[key]
if isinstance(val, dict):
payload[key] = history_dict_to_json(run, val, step=step)
else:
payload[key] = val_to_json(run, key, val, namespace=step)
return payload
# TODO: refine this
def val_to_json(
run,
key,
val,
namespace = None,
):
# Converts a wandb datatype to its JSON representation.
if namespace is None:
raise ValueError(
"val_to_json must be called with a namespace(a step number, or 'summary') argument"
)
converted = val
typename = util.get_full_typename(val)
if util.is_pandas_data_frame(val):
raise ValueError(
"We do not support DataFrames in the Summary or History. Try run.log({{'{}': wandb.Table(dataframe=df)}})".format(
key
)
)
elif util.is_matplotlib_typename(typename) or util.is_plotly_typename(typename):
val = Plotly.make_plot_media(val)
elif isinstance(val, SixSequence) and all(isinstance(v, WBValue) for v in val):
assert run
# This check will break down if Image/Audio/... have child classes.
if (
len(val)
and isinstance(val[0], BatchableMedia)
and all(isinstance(v, type(val[0])) for v in val)
):
if wandb.TYPE_CHECKING and TYPE_CHECKING:
val = cast(Sequence["BatchableMedia"], val)
items = _prune_max_seq(val)
for i, item in enumerate(items):
item.bind_to_run(run, key, namespace, id_=i)
return items[0].seq_to_json(items, run, key, namespace)
else:
# TODO(adrian): Good idea to pass on the same key here? Maybe include
# the array index?
# There is a bug here: if this array contains two arrays of the same type of
# anonymous media objects, their eventual names will collide.
# This used to happen. The frontend doesn't handle heterogenous arrays
# raise ValueError(
# "Mixed media types in the same list aren't supported")
return [val_to_json(run, key, v, namespace=namespace) for v in val]
if isinstance(val, WBValue):
assert run
if isinstance(val, Media) and not val.is_bound():
if hasattr(val, "_log_type") and val._log_type == "table":
# Special conditional to log tables as artifact entries as well.
# I suspect we will generalize this as we transition to storing all
# files in an artifact
_, artifact_class = _safe_sdk_import()
# we sanitize the key to meet the constraints defined in wandb_artifacts.py
# in this case, leaving only alpha numerics or underscores.
sanitized_key = re.sub(r"[^a-zA-Z0-9_]+", "", key)
art = artifact_class(
"run-{}-{}".format(run.id, sanitized_key), "run_table"
)
art.add(val, key)
run.log_artifact(art)
val.bind_to_run(run, key, namespace)
return val.to_json(run)
return converted # type: ignore
def _is_numpy_array(data):
np = util.get_module(
"numpy", required="Logging raw point cloud data requires numpy"
)
return isinstance(data, np.ndarray)
def _wb_filename(
key, step, id, extension
):
return "{}_{}_{}{}".format(str(key), str(step), str(id), extension)
def _numpy_arrays_to_lists(
payload
):
# Casts all numpy arrays to lists so we don't convert them to histograms, primarily for Plotly
if isinstance(payload, dict):
res = {}
for key, val in six.iteritems(payload):
res[key] = _numpy_arrays_to_lists(val)
return res
elif isinstance(payload, SixSequence) and not isinstance(payload, six.string_types):
return [_numpy_arrays_to_lists(v) for v in payload]
elif util.is_numpy_array(payload):
if wandb.TYPE_CHECKING and TYPE_CHECKING:
payload = cast("np.ndarray", payload)
return [_numpy_arrays_to_lists(v) for v in payload.tolist()]
# Protects against logging non serializable objects
elif isinstance(payload, Media):
return str(payload.__class__.__name__)
return payload
def _prune_max_seq(seq):
# If media type has a max respect it
items = seq
if hasattr(seq[0], "MAX_ITEMS") and seq[0].MAX_ITEMS < len(seq): # type: ignore
logging.warning(
"Only %i %s will be uploaded."
% (seq[0].MAX_ITEMS, seq[0].__class__.__name__) # type: ignore
)
items = seq[: seq[0].MAX_ITEMS] # type: ignore
return items
def _data_frame_to_json(
df, run, key, step
):
"""!NODOC Encode a Pandas DataFrame into the JSON/backend format.
Writes the data to a file and returns a dictionary that we use to represent
it in `Summary`'s.
Arguments:
df (pandas.DataFrame): The DataFrame. Must not have columns named
"wandb_run_id" or "wandb_data_frame_id". They will be added to the
DataFrame here.
run (wandb_run.Run): The Run the DataFrame is associated with. We need
this because the information we store on the DataFrame is derived
from the Run it's in.
key (str): Name of the DataFrame, ie. the summary key path in which it's
stored. This is for convenience, so people exploring the
directory tree can have some idea of what is in the Parquet files.
step: History step or "summary".
Returns:
A dict representing the DataFrame that we can store in summaries or
histories. This is the format:
{
'_type': 'data-frame',
# Magic field that indicates that this object is a data frame as
# opposed to a normal dictionary or anything else.
'id': 'asdf',
# ID for the data frame that is unique to this Run.
'format': 'parquet',
# The file format in which the data frame is stored. Currently can
# only be Parquet.
'project': 'wfeas',
# (Current) name of the project that this Run is in. It'd be
# better to store the project's ID because we know it'll never
# change but we don't have that here. We store this just in
# case because we use the project name in identifiers on the
# back end.
'path': 'media/data_frames/sdlk.parquet',
# Path to the Parquet file in the Run directory.
}
"""
pandas = util.get_module("pandas")
fastparquet = util.get_module("fastparquet")
missing_reqs = []
if not pandas:
missing_reqs.append("pandas")
if not fastparquet:
missing_reqs.append("fastparquet")
if len(missing_reqs) > 0:
raise wandb.Error(
"Failed to save data frame. Please run 'pip install %s'"
% " ".join(missing_reqs)
)
data_frame_id = util.generate_id()
df = df.copy() # we don't want to modify the user's DataFrame instance.
for _, series in df.items():
for i, val in enumerate(series):
if isinstance(val, WBValue):
series.iat[i] = six.text_type(
json.dumps(val_to_json(run, key, val, namespace=step))
)
# We have to call this wandb_run_id because that name is treated specially by
# our filtering code
df["wandb_run_id"] = pandas.Series(
[six.text_type(run.id)] * len(df.index), index=df.index
)
df["wandb_data_frame_id"] = pandas.Series(
[six.text_type(data_frame_id)] * len(df.index), index=df.index
)
frames_dir = os.path.join(run.dir, _DATA_FRAMES_SUBDIR)
util.mkdir_exists_ok(frames_dir)
path = os.path.join(frames_dir, "{}-{}.parquet".format(key, data_frame_id))
fastparquet.write(path, df)
return {
"id": data_frame_id,
"_type": "data-frame",
"format": "parquet",
"project": run.project_name(), # we don't have the project ID here
"entity": run.entity,
"run": run.id,
"path": path,
}
class _ClassesIdType(_dtypes.Type):
name = "classesId"
legacy_names = ["wandb.Classes_id"]
types = [Classes]
def __init__(
self,
classes_obj = None,
valid_ids = None,
):
if valid_ids is None:
valid_ids = _dtypes.UnionType()
elif isinstance(valid_ids, list):
valid_ids = _dtypes.UnionType(
[_dtypes.ConstType(item) for item in valid_ids]
)
elif isinstance(valid_ids, _dtypes.UnionType):
valid_ids = valid_ids
else:
raise TypeError("valid_ids must be None, list, or UnionType")
if classes_obj is None:
classes_obj = Classes(
[
{"id": _id.params["val"], "name": str(_id.params["val"])}
for _id in valid_ids.params["allowed_types"]
]
)
elif not isinstance(classes_obj, Classes):
raise TypeError("valid_ids must be None, or instance of Classes")
else:
valid_ids = _dtypes.UnionType(
[
_dtypes.ConstType(class_obj["id"])
for class_obj in classes_obj._class_set
]
)
self.wb_classes_obj_ref = classes_obj
self.params.update({"valid_ids": valid_ids})
def assign(self, py_obj = None):
return self.assign_type(_dtypes.ConstType(py_obj))
def assign_type(self, wb_type):
valid_ids = self.params["valid_ids"].assign_type(wb_type)
if not isinstance(valid_ids, _dtypes.InvalidType):
return self
return _dtypes.InvalidType()
@classmethod
def from_obj(cls, py_obj = None):
return cls(py_obj)
def to_json(self, artifact = None):
cl_dict = super(_ClassesIdType, self).to_json(artifact)
# TODO (tss): Refactor this block with the similar one in wandb.Image.
# This is a bit of a smell that the classes object does not follow
# the same file-pattern as other media types.
if artifact is not None:
class_name = os.path.join("media", "cls")
classes_entry = artifact.add(self.wb_classes_obj_ref, class_name)
cl_dict["params"]["classes_obj"] = {
"type": "classes-file",
"path": classes_entry.path,
"digest": classes_entry.digest, # is this needed really?
}
else:
cl_dict["params"]["classes_obj"] = self.wb_classes_obj_ref.to_json(artifact)
return cl_dict
@classmethod
def from_json(
cls, json_dict, artifact = None,
):
classes_obj = None
if (
json_dict.get("params", {}).get("classes_obj", {}).get("type")
== "classes-file"
):
if artifact is not None:
classes_obj = artifact.get(
json_dict.get("params", {}).get("classes_obj", {}).get("path")
)
else:
raise RuntimeError("Expected artifact to be non-null.")
else:
classes_obj = Classes.from_json(
json_dict["params"]["classes_obj"], artifact
)
return cls(classes_obj)
class _VideoFileType(_dtypes.Type):
name = "video-file"
types = [Video]
class _HtmlFileType(_dtypes.Type):
name = "html-file"
types = [Html]
class _Object3DFileType(_dtypes.Type):
name = "object3D-file"
types = [Object3D]
_dtypes.TypeRegistry.add(_ClassesIdType)
_dtypes.TypeRegistry.add(_VideoFileType)
_dtypes.TypeRegistry.add(_HtmlFileType)
_dtypes.TypeRegistry.add(_Object3DFileType)
__all__ = [
"Histogram",
"Object3D",
"Molecule",
"Html",
"Video",
"ImageMask",
"BoundingBoxes2D",
"Classes",
"Image",
"Plotly",
"history_dict_to_json",
"val_to_json",
]
|
|
# Bismillah
# Bagian 1 - Import library yang dibutuhkan
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def compare_values(act_col, sat_col):
act_vals = []
sat_vals = []
# Buat List dulu agar bisa dicek
for a_val in act_col:
act_vals.append(a_val)
for s_val in sat_col:
sat_vals.append(s_val)
print('Values in ACT only: ')
for val_a in act_vals:
if (val_a not in sat_vals):
print(val_a)
print('--------------------')
print('Values in SAT only: ')
for val_s in sat_vals:
if (val_s not in sat_vals):
print(val_s)
def fix_participation(column):
return column.apply(lambda cells: cells.strip('%'))
def convert_to_float(exam_df):
features = [col for col in exam_df.columns if col != 'State']
exam_df[features] = exam_df[features].astype(float)
return exam_df
# Bagian 2 - Load the data
sat_17 = pd.read_csv('D:/Phyton Code/Contoh dari Github/\
sat_act_analysis-master/data/sat_2017.csv')
sat_18 = pd.read_csv('D:/Phyton Code/Contoh dari Github/\
sat_act_analysis-master/data/sat_2018.csv')
act_17 = pd.read_csv('D:/Phyton Code/Contoh dari Github/\
sat_act_analysis-master/data/act_2017.csv')
act_18 = pd.read_csv('D:/Phyton Code/Contoh dari Github/\
sat_act_analysis-master/data/act_2018.csv')
# Exploring the data and cleaning corrupted data
print('SAT 2017 shape = ', sat_17.shape)
print('SAT 2018 shape = ', sat_18.shape)
print('ACT 2017 shape = ', act_17.shape)
print('ACT 2018 shape = ', act_18.shape)
act_18['State'].value_counts()
act_18[act_18['State'] == 'Maine']
# drop the incorrect data, drop a row
act_18.drop(act_18.index[0], inplace=True)
act_18.reset_index(drop=True, inplace=True)
act_18.shape
compare_values(act_17['State'], sat_17['State'])
compare_values(act_18['State'], sat_18['State'])
act_17[act_17['State'] == 'National']
act_17.drop(act_17.index[0], inplace=True)
act_17.reset_index(drop=True, inplace=True)
act_17.shape
act_18[act_18['State'] == 'National']
act_18.drop(act_18.index[23], inplace=True)
act_18.reset_index(drop=True, inplace=True)
act_18.shape
# Ganti nama atau data menggunakan attribute replace
act_18.replace({'State':{'Washington, D.C.': 'District of Columbia'}},
inplace=True)
# #atau
# act_18['State'].replace({'Washington, D.C.': 'District of Columbia'},
# inplace=True)
# final check of consistency
print("FINAL CHECK ACT DATA \n")
compare_values(act_17['State'], sat_17['State'])
print("FINAL CHECK SAT DATA \n")
compare_values(act_18['State'], sat_18['State'])
# Membandingkan nama kolom dalam setiap data dengan menggunakan atribut
# (.columns)
print('SAT 2017 column names = ', sat_17.columns, "\n")
print('SAT 2018 column names = ', sat_18.columns, "\n")
print('ACT 2017 column names = ', act_17.columns, "\n")
print('ACT 2018 column names = ', act_18.columns, "\n")
# removing unecessary columns unsing (.drop()) method
sat_17.drop(columns=['Evidence-Based Reading and Writing', 'Math'], inplace=True)
sat_18.drop(columns=['Evidence-Based Reading and Writing', 'Math'], inplace=True)
act_17.drop(columns=['English', 'Math', 'Reading', 'Science'], inplace=True)
# check again
print('SAT 2017 column names = ', sat_17.columns, "\n")
print('SAT 2018 column names = ', sat_18.columns, "\n")
print('ACT 2017 column names = ', act_17.columns, "\n")
print('ACT 2018 column names = ', act_18.columns, "\n")
print('SAT 2017 Missing Data:', "\n", sat_17.isnull().sum(), '\n')
print('SAT 2018 Missing Data:', "\n", sat_18.isnull().sum(), '\n')
print('ACT 2017 Missing Data:', "\n", act_17.isnull().sum(), '\n')
print('ACT 2018 Missing Data:', "\n", act_18.isnull().sum(), '\n')
print('SAT 2017 Data Type:', "\n", sat_17.dtypes, '\n')
print('SAT 2018 Data Type:', "\n", sat_18.dtypes, '\n')
print('ACT 2017 Data Type:', "\n", act_17.dtypes, '\n')
print('ACT 2018 Data Type:', "\n", act_18.dtypes, '\n')
# Fix the participation type
sat_17['Participation'] = fix_participation(sat_17['Participation'])
sat_18['Participation'] = fix_participation(sat_18['Participation'])
act_17['Participation'] = fix_participation(act_17['Participation'])
act_18['Participation'] = fix_participation(act_18['Participation'])
# convert to float type
sat_17 = convert_to_float(sat_17)
sat_18 = convert_to_float(sat_18)
act_18 = convert_to_float(act_18)
# remove corrupted character
act_17['Composite'] = act_17['Composite'].apply(lambda x: x.strip('x'))
# convert again to float type
act_17 = convert_to_float(act_17)
# rename the columns
new_act_17_cols = {
'State': 'state',
'Participation': 'act_participation_17',
'Composite': 'act_composite_17'}
act_17.rename(columns=new_act_17_cols, inplace=True)
new_act_18_cols = {
'State': 'state',
'Participation': 'act_participation_18',
'Composite': 'act_composite_18'}
act_18.rename(columns=new_act_18_cols, inplace=True)
new_sat_17_cols = {
'State': 'state',
'Participation': 'sat_participation_17',
'Total': 'sat_score_17'}
sat_17.rename(columns=new_sat_17_cols, inplace=True)
new_sat_18_cols = {
'State': 'state',
'Participation': 'sat_participation_18',
'Total': 'sat_score_18'}
sat_18.rename(columns=new_sat_18_cols, inplace=True)
# sort the data
sat_17.sort_values(by=['state'], inplace=True)
sat_18.sort_values(by=['state'], inplace=True)
act_17.sort_values(by=['state'], inplace=True)
act_18.sort_values(by=['state'], inplace=True)
# reset the index
sat_17 = sat_17.reset_index(drop=True)
sat_18 = sat_18.reset_index(drop=True)
act_17 = act_17.reset_index(drop=True)
act_18 = act_18.reset_index(drop=True)
df1 = pd.merge(sat_17, sat_18, left_index=True, on='state', how='outer')
df2 = pd.merge(act_17, act_18, left_index=True, on='state', how='outer')
df = pd.merge(df1, df2, left_index=True, on='state', how='outer')
data = [sat_17, sat_18, act_17, act_18]
fd = pd.concat(data, join='inner', axis=1)
# Plotting
plt.figure(figsize = (15,10))
plt.title('SAT and ACT Correlation Heatmap', fontsize = 16);
# Mask to remove redundancy from the heatmap.
mask = np.zeros_like(df.corr(), dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
sns.heatmap(df.corr(), mask=mask, vmin=-1, vmax = 1, cmap = "coolwarm", annot = True);
plt.savefig('heatmap.png')
plt.figure(figsize = (8,6))
features = ['sat_participation_17', 'sat_participation_18', 'act_participation_17', 'act_participation_18']
plt.title('SAT and ACT Participation Rate Correlations', fontsize = 16);
mask = np.zeros_like(df[features].corr(), dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
sns.heatmap(df[features].corr(), mask=mask, vmin=-1, vmax = 1, cmap = "coolwarm", annot = True);
plt.savefig('heatmap01.png')
plt.figure(figsize = (8,6))
features = ['sat_score_17', 'sat_score_18', 'act_composite_17', 'act_composite_18']
plt.title('Average SAT Score vs Average ACT Composite Score Correlations', fontsize = 16);
mask = np.zeros_like(df[features].corr(), dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
sns.heatmap(df[features].corr(), mask=mask, vmin=-1, vmax = 1, cmap = "coolwarm", annot = True);
plt.savefig('heatmap02.png')
# Boxplots comparing the average participation rates of the 2017 ACT, 2018 ACT, 2017 SAT, and 2018 SAT.
fig, ax = plt.subplots(nrows = 2, ncols = 2, figsize = (12,8))
sns.boxplot(df.sat_participation_17, ax = ax[0,0], orient="h", color = 'orange').set(
xlabel='', title='SAT Participation Rates 2017');
sns.boxplot(df.sat_participation_18, ax = ax[0,1], orient="h", color = 'orange').set(
xlabel='', title='SAT Participation Rates 2018');
sns.boxplot(df.act_participation_17, ax = ax[1,0], orient="h", color= 'pink').set(
xlabel='', title='ACT Participation Rates 2017');
sns.boxplot(df.act_participation_18, ax = ax[1,1], orient="h", color = 'pink').set(
xlabel='', title='ACT Participation Rates 2018');
plt.tight_layout()
plt.savefig('boxplot.png');
plt.figure(figsize = (15,8))
# SAT Participation Rates 2017 histogram
plt.subplot(1,2,1)
sns.distplot(df.sat_participation_17, kde=False,bins=8);
plt.title('SAT Participation Rates 2017 Distribution', fontsize=16)
plt.xlabel('Participation Rate', fontsize=14)
plt.ylabel('Frequency', fontsize=14)
plt.xlim(0, 101)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
# ACT Participation Rates 2017 histogram
plt.subplot(1,2,2)
sns.distplot(df.act_participation_17, kde=False, bins=8);
plt.title('ACT Participation Rates 2017 Distribution', fontsize=16)
plt.xlabel('Participation Rate', fontsize=14)
plt.ylabel('Frequency', fontsize=14)
plt.xlim(0, 101)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.tight_layout()
plt.savefig('histo01.png');
plt.figure(figsize = (15,8))
# SAT Participation Rates 2018 histogram
plt.subplot(1,2,1)
sns.distplot(df.sat_participation_18, kde=False, bins=8);
plt.title('SAT Participation Rates 2018 Distribution', fontsize=16);
plt.xlabel('Participation Rate', fontsize=14)
plt.ylabel('Frequency', fontsize=14)
plt.xlim(0, 101)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
# ACT Participation Rates 2018 histogram
plt.subplot(1,2,2)
sns.distplot(df.act_participation_18,kde=False,bins=8);
plt.title('ACT Participation Rates 2018 Distribution', fontsize=16);
plt.xlabel('Participation Rate', fontsize=14)
plt.ylabel('Frequency', fontsize=14)
plt.xlim(0, 101)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.tight_layout()
plt.savefig('histo02.png');
|
|
#!/usr/bin/python
import os, sys
import json
from typing import Optional
import numpy as np
import re
### YOUR CODE HERE: write at least three functions which solve
### specific tasks by transforming the input x and returning the
### result. Name them according to the task ID as in the three
### examples below. Delete the three examples. The tasks you choose
### must be in the data/training directory, not data/evaluation.
#Student Name: Arshad Ali
#Student ID: 20236061
#GitHub Repo: https://github.com/gibbsali87/ARC
"""
In this solution, we have input data where one side of the data set is different from the rest.
However, it was effortless to see on the Visual website for a human. It was a bit of challenge to code
that but I enjoyed it.
In the start, I took the approach of dividing the data into two sets and then take the set,
which including the solution. Although it did work for some fo the Grids it was not the right solution,
so I had to change my approach. Furthermore, I looked for the index where the data change started and ended.
This was a much better approach and solved all the Grids.
In all of the solution, I have used pure python. I have used a dictionary in two of the three solutions.
All the grids are returning True for the three solution solved.
"""
def solve_0b148d64(x):
myList = [x]
newList = []
a = None
a1 = 0
b = None
b1 = 0
c = 0
lIndex = 0
f_index = []
l_index = []
lastList = []
# For loop to extract the indexes where the change is starting and ending.
for i in myList:
for j in i:
for k in j:
if a is None and b is None and k != 0:
a = k
elif b is None and k != a and k != 0:
b = k
elif k == a:
a1 = a1 + 1
elif k == b:
b1 = b1 + 1
elif k == c:
pass # Do nothing
else:
pass # Do nothing
if a1 < b1:
s = a
else:
s = b
for i in x:
if s in i:
if s in i:
newList.append(i)
else:
pass
for i in newList:
i = i.tolist()
f_index.append(i.index(s))
rl = i[::-1] # Reversing the list to get the last index where the data change has occurred.
if rl.index(s) > lIndex:
l_index.append(rl.index(s))
else:
pass
fIndex = min(f_index)
lIndex = min(l_index)
lIndex = lIndex * -1 # Multiplying by -1 as I had reverse the list to get the last index
for i in newList:
i = i.tolist()
if lIndex == -1:
lastList.append(i[fIndex:])
else:
lastList.append(i[fIndex:lIndex])
return lastList
"""
The output required in this solution is exchanging the index with a corresponding
number that represents the corresponding colour.
The Approach I have taken is to store the colour values in a Dictionary
and then replace it as required to form the correct output.
"""
def solve_0d3d703e(x):
myList = [x]
newList = []
dic = {3: 4, 1: 5, 2: 6, 8: 9, 5: 1, 6: 2, 9: 8, 4: 3}
# Nested for loop to exchange the current value with corresponding value
# with the help of already created dictionary.
for i in myList:
for j in i:
s_list = []
for k in j:
k = dic.get(k)
s_list.append(k)
newList.append(s_list)
return newList
"""
This solution requires filling the surrounding squares with the colour of the
corresponding square in the middle. There are two solution types required one
for 3 x 11 Grid and one for 7 x 11 grid.
The Approach I have taken is to store the colour square number and then create
a list of the corresponding colour that I can match from a dictionary
already created.
"""
def solve_54d9e175(x):
# Variables and Objects
myList = [x]
newList = []
dic = {2: 7, 3: 8, 4: 9, 1: 6}
ind = []
# Below nested for loops, extracts the number that I can match for the colour,
# and storing it in a list.
# Then I query the dictionary and assign the correct colour to a variable
# for later use.
for i in myList:
for j in i:
for k in j:
if k == 0 or k == 5:
pass
else:
ind.append(k)
first = dic.get(ind[0])
second = dic.get(ind[1])
third = dic.get(ind[2])
# Checking if 3x11 or 7x11 Grid is required.
if len(x) > 3:
fourth = dic.get(ind[3])
fifth = dic.get(ind[4])
sixth = dic.get(ind[5])
else:
pass
# Below nested while loops create the required output.
while len(newList) < 3:
s_list = []
while len(s_list) < 3:
s_list.append(first)
s_list.append(5)
while len(s_list) < 7:
s_list.append(second)
s_list.append(5)
while len(s_list) < 11:
s_list.append(third)
newList.append(s_list)
# Checking if 3x11 or 7x11 Grid is required.
if len(x) > 3:
f_list = []
while len(f_list) < 11:
f_list.append(5)
newList.append(f_list[:12])
while len(newList) < 7:
s_list = []
while len(s_list) < 3:
s_list.append(fourth)
s_list.append(5)
while len(s_list) < 7:
s_list.append(fifth)
s_list.append(5)
while len(s_list) < 11:
s_list.append(sixth)
newList.append(s_list)
else:
pass
return newList
def main():
# Find all the functions defined in this file whose names are
# like solve_abcd1234(), and run them.
# regex to match solve_* functions and extract task IDs
p = r"solve_([a-f0-9]{8})"
tasks_solvers = []
# globals() gives a dict containing all global names (variables
# and functions), as name: value pairs.
for name in globals():
m = re.match(p, name)
if m:
# if the name fits the pattern eg solve_abcd1234
ID = m.group(1) # just the task ID
solve_fn = globals()[name] # the fn itself
tasks_solvers.append((ID, solve_fn))
for ID, solve_fn in tasks_solvers:
# for each task, read the data and call test()
directory = os.path.join("..", "data", "training")
json_filename = os.path.join(directory, ID + ".json")
data = read_ARC_JSON(json_filename)
test(ID, solve_fn, data)
def read_ARC_JSON(filepath):
"""Given a filepath, read in the ARC task data which is in JSON
format. Extract the train/test input/output pairs of
grids. Convert each grid to np.array and return train_input,
train_output, test_input, test_output."""
# Open the JSON file and load it
data = json.load(open(filepath))
# Extract the train/test input/output grids. Each grid will be a
# list of lists of ints. We convert to Numpy.
train_input = [np.array(data['train'][i]['input']) for i in range(len(data['train']))]
train_output = [np.array(data['train'][i]['output']) for i in range(len(data['train']))]
test_input = [np.array(data['test'][i]['input']) for i in range(len(data['test']))]
test_output = [np.array(data['test'][i]['output']) for i in range(len(data['test']))]
return (train_input, train_output, test_input, test_output)
def test(taskID, solve, data):
"""Given a task ID, call the given solve() function on every
example in the task data."""
print(taskID)
train_input, train_output, test_input, test_output = data
print("Training grids")
for x, y in zip(train_input, train_output):
yhat = solve(x)
show_result(x, y, yhat)
print("Test grids")
for x, y in zip(test_input, test_output):
yhat = solve(x)
show_result(x, y, yhat)
def show_result(x, y, yhat):
print("Input")
print(x)
print("Correct output")
print(y)
print("Our output")
print(yhat)
print("Correct?")
# if yhat has the right shape, then (y == yhat) is a bool array
# and we test whether it is True everywhere. if yhat has the wrong
# shape, then y == yhat is just a single bool.
print(np.all(y == yhat))
if __name__ == "__main__": main()
|
|
import numpy as np
import joblib
from .rbm import RBM
from .utils import sigmoid
# TODO(anna): add sparsity constraint
# TODO(anna): add entroty loss term
# TODO(anna): add monitoring kl divergence (and reverse kl divergence)
# TODO(anna): run on the paper examples again
# TODO(anna): try unit test case? say in a 3x3 patch, only 1 pixel is on
class GaussianBernoulliRBM(RBM):
additional_losses = [
'sparsity',
'h_given_v_entropy',
]
def __init__(self,
nv, nh,
sigma,
sparsity_coef=0.,
h_given_v_entropy_coef=0.,
random_state=None):
super(GaussianBernoulliRBM, self).__init__(
nv, nh, random_state=random_state)
self.sparsity_coef = sparsity_coef
self.h_given_v_entropy_coef = h_given_v_entropy_coef
self.sigma = sigma
def p_h_given_v(self, v):
# v: (batch_size, nv)
# output: (batch_size, nh)
return sigmoid(self.hb[np.newaxis] + np.matmul(v, self.W) / self.sigma)
def p_h_given_v_logits(self, v):
return self.hb[np.newaxis] + np.matmul(v, self.W) / self.sigma
def mean_p_v_given_h(self, h):
# h: (batch_size, nh)
# output: (batch_size, nv)
return self.vb[np.newaxis] + np.matmul(h, self.W.T)
def sample_p_v_given_h(self, h):
# h: (batch_size, nh)
# output: (batch_size, nv)
center = self.vb[np.newaxis] + np.matmul(h, self.W.T)
return self.random_state.normal(loc=center, scale=self.sigma)
def par_nll_par_W(self, v, h):
batch_size = len(v)
return np.matmul(v.T, h) / batch_size / self.sigma
def par_nll_par_hb(self, h):
return np.mean(h, axis=0)
def par_nll_par_vb(self, v):
return np.mean(v - self.vb, axis=0) / (self.sigma ** 2)
def par_l1_par_W(self):
return np.sign(self.W)
def updates_from_sparsity(self,
v, p_h_given_v0, h0,
vn, p_h_given_vn, hn,
sample=False):
return -self.par_l1_par_W(), 0, 0
def updates_from_h_given_v_entropy(self,
v, p_h_given_v0, h0,
vn, p_h_given_vn, hn,
sample=False):
logits = self.p_h_given_v_logits(v) # (batch_size, nh)
h_term = logits * sigmoid(logits) * (1. - sigmoid(logits))
delta_W_entropy = -np.matmul(v.T, h_term) / self.sigma
delta_hb_entropy = -np.mean(h_term, axis=0)
return -delta_W_entropy, 0, -delta_hb_entropy
# TODO: make these better by making the class picklable, maybe
def save(self, path):
if not path.endswith('.pkl'):
path += '.pkl'
model = {
'W': self.W,
'hb': self.hb,
'vb': self.vb,
'params': {
'nv': self._nv,
'nh': self._nh,
},
'random_state': self._random_state,
}
joblib.dump(model, path, protocol=2)
def load(cls, path):
model = joblib.load(path)
self.W = model['W']
self.hb = model['hb']
self.vb = model['vb']
self._nv = model['params']['nv']
self._nh = model['params']['nh']
self._random_state = model['random_state']
class GaussianBernoulliRBMOld(RBM):
def __init__(self, nv, nh, batch_size,
sigma,
sparsity_coef=0.,
h_given_v_entropy_coef=0.,
seed=None):
super(GaussianBernoulliRBM, self).__init__(nv, nh, batch_size, seed=seed)
self.sparsity_coef = sparsity_coef
self.h_given_v_entropy_coef = h_given_v_entropy_coef
self.sigma = sigma
def p_h_given_v(self, v):
# v: (batch_size, nv)
# output: (batch_size, nh)
return sigmoid(self.hb[np.newaxis] + np.matmul(v, self.W) / self.sigma)
def p_h_given_v_logits(self, v):
return self.hb[np.newaxis] + np.matmul(v, self.W) / self.sigma
def mean_p_v_given_h(self, h):
# h: (batch_size, nh)
# output: (batch_size, nv)
return self.vb[np.newaxis] + np.matmul(h, self.W.T)
def sample_p_v_given_h(self, h):
# h: (batch_size, nh)
# output: (batch_size, nv)
center = self.vb[np.newaxis] + np.matmul(h, self.W.T)
return self.random_state.normal(loc=center, scale=self.sigma)
def par_nll_par_W(self, v, h):
return np.matmul(v.T, h) / self._batch_size / self.sigma
def par_nll_par_hb(self, h):
return np.mean(h, axis=0)
def par_nll_par_vb(self, v):
return np.mean(v - self.vb, axis=0) / (self.sigma ** 2)
def par_l1_par_W(self):
return np.sign(self.W)
def train_step(self, v, learning_rate, sample=False, n_gibbs=1):
"""
v: np.array (batch_size, nv)
learning_rate: float
sample: boolean
if True, sample at every v->h or h->v step. if False, use probability instead
of sampling as advised
n_gibbs: int
number of up-down passes to run sampling to get model statistics
"""
# train with gradient descent
# compute gradient:
# 1st component of gradient is total energy average over data distribution
# this is computable depending on h being binary
p_h_given_v = self.p_h_given_v(v)
h = (self._random_state.rand(self._batch_size, self._nh) < p_h_given_v)\
.astype(np.float32)
if sample:
# v: (batch_size, nv)
# h: (batch_size, nh)
# par_nll_par_W_data: (nv, nh) = mean(tmp, axis=0)
par_nll_par_W_data = self.par_nll_par_W(v, h)
par_nll_par_hb_data = self.par_nll_par_hb(h)
else:
par_nll_par_W_data = self.par_nll_par_W(v, p_h_given_v)
par_nll_par_hb_data = self.par_nll_par_hb(p_h_given_v)
par_nll_par_vb_data = self.par_nll_par_vb(v)
# TODO: start here
# 2nd component of gradient is total energy average over model distribution
for _ in range(n_gibbs):
if sample:
v_ = self.sample_p_v_given_h(h)
else:
v_ = self.mean_p_v_given_h(h)
p_h_given_v_ = self.p_h_given_v(v_)
# for hidden layer, always sample (unless calculating updates below)
h_ = (self._random_state.rand(self._batch_size, self._nh) < p_h_given_v_)\
.astype(np.float32)
# set to h so the loop can repeat
h = h_
if sample:
par_nll_par_W_model = self.par_nll_par_W(v_, h_)
par_nll_par_hb_model = self.par_nll_par_hb(h_)
par_nll_par_vb_model = self.par_nll_par_vb(v_)
else:
par_nll_par_W_model = self.par_nll_par_W(v_, p_h_given_v_)
par_nll_par_hb_model = self.par_nll_par_hb(p_h_given_v_)
par_nll_par_vb_model = self.par_nll_par_vb(v_)
# now the update is just the <...>data - <...>model
# or <...>model - <...>data, if using -= in the update
delta_W = par_nll_par_W_data - par_nll_par_W_model
delta_vb = par_nll_par_vb_data - par_nll_par_vb_model
delta_hb = par_nll_par_hb_data - par_nll_par_hb_model
# sparsity constraint
# this seems to make W sparse, but not h
# on the other hand, since each feature is very small and local now,
# we need more h to be activated (?)
delta_W_l1 = self.par_l1_par_W()
# entropy constraint
logits = self.p_h_given_v_logits(v) # (batch_size, nh)
h_term = logits * sigmoid(logits) * (1. - sigmoid(logits))
delta_W_entropy = -np.matmul(v.T, h_term) / self.sigma
# update it!
self.W += learning_rate *\
(delta_W - self.sparsity_coef * delta_W_l1\
- self.h_given_v_entropy_coef * delta_W_entropy)
self.vb += learning_rate * delta_vb
self.hb += learning_rate * delta_hb
def reconstruction_error(self, v):
# v: (batch_size, nv)
h = (self._random_state.rand(self._batch_size, self._nh) < self.p_h_given_v(v))\
.astype(np.float32)
v_ = self.mean_p_v_given_h(h)
return np.mean((v - v_) ** 2)
# TODO: make these better by making the class picklable, maybe
def save(self, path):
if not path.endswith('.pkl'):
path += '.pkl'
model = {
'W': self.W,
'hb': self.hb,
'vb': self.vb,
'params': {
'nv': self._nv,
'nh': self._nh,
'batch_size': self._batch_size
},
'random_state': self._random_state,
}
joblib.dump(model, path, protocol=2)
def load(cls, path):
model = joblib.load(path)
self.W = model['W']
self.hb = model['hb']
self.vb = model['vb']
self._nv = model['params']['nv']
self._nh = model['params']['nh']
self._batch_size = model['params']['batch_size']
self._random_state = model['random_state']
|
|
import numpy as np
from matplotlib import pyplot
from scipy.integrate import solve_ivp as ode45
from scipy.interpolate import CubicSpline
def seirmodel(t, y, gamma, sigma, eta, Rstar):
n = 10**7
dy = np.zeros(5)
#beta(t) et e(t) donc 5eqn, page 2 equations
dy[0] = (-y[4]*y[0]*y[2])/n #s
dy[1] = (y[4]*y[0]*y[2])/n - (sigma*y[1]) #e
dy[2] = (sigma*y[1]) - (gamma*y[2]) #x
dy[3] = gamma*y[2] #r
dy[4] = eta*((gamma*Rstar(t))-y[4]) #beta
return dy
"""
on utilise les interpolations cubiques car si n > 10, les intervalles
[0, 1] et [ n-1, n] seraient problématiques dû à la création d'un polynôme de degré n-1
"""
def q5(t, R, gamma = 0.06, eta = 0.1, sigma = 0.2, n = 10**7, y0 = [10**7-100, 0, 100, 0, 4*0.06], t_span = [0,400]):
try:
if( len(t) != len(R)):
raise ValueError('Les données ne sont pas valides !')
if not t or not R:
raise ValueError('Aucune donnée !')
R_func = CubicSpline(t,R, bc_type='clamped')
solution = ode45(lambda t_ode, y : seirmodel(t_ode, y, gamma, sigma, eta, R_func), t_span, y0)
#Dessiner graphique
pyplot.figure()
pyplot.plot(solution.t, solution.y[0,:], label="Personnes suceptibles")
pyplot.plot(solution.t, solution.y[1,:], label="Personnes exposées")
pyplot.plot(solution.t, solution.y[2,:], label="Personnes infectées")
pyplot.plot(solution.t, solution.y[3,:], label="Personnes guéries")
pyplot.plot(t, R, 'ro', label="Données")
pyplot.legend(loc="best")
pyplot.title("Scénario")
pyplot.xlabel("Temps(t)")
pyplot.ylabel("Nombre de personnes")
pyplot.savefig("pic_q5.png")
pyplot.figure()
x = np.linspace(0,400, 4000)
pyplot.plot(x, R_func(x), label="Nombre de reproduction de la maladie (R)")
pyplot.plot(t, R, 'ro', label="Données")
pyplot.legend(loc='best')
pyplot.title("Évolution du nombre de repoducction de la maladie selon un scénario donné")
pyplot.xlabel('Temps')
pyplot.savefig('pic_r_q5.png')
return
except ValueError as e:
print('Erreur dans les données : ', e)
return
except Exception as e:
print('Quelque chose s\'est mal passé : ', e)
return
def test_q5():
t = [ 0, 30, 60, 100, 145, 190, 200, 240, 300, 400]
R = [ 2.5, 4.5, 2.8,2, 2.5, 3, 2, 2.5, 3, 1]
q5(t, R)
test_q5()
|
|
import numpy as np
from .nv_py_regular_linreg import nv_regular_linreg
from .mp_py_regular_linreg import mp_regular_linreg
from .cpp_py_regular_linreg import cpp_regular_linreg
from .sklearn_py_regular_linreg import sklearn_regular_linreg
class RegularizedLinearRegression(object):
def __init__(self, alpha=1.0, L1_ratio=0.5, method='cpp',
max_iter=1000, tol=1e-5):
self.alpha = alpha
self.L1_ratio = L1_ratio
self.beta_0 = None
self.max_iter = max_iter
self.tol = tol
self.method = method
# check if a proper method is chosen
self.error_check()
def error_check(self):
# method check
if not self.method in ['nav', 'cpp', 'mp', 'sk']:
raise ('error: not proper method selected')
def fit(self, X, y, beta_0=None):
# error check: data dimensions
if len(X) != len(y):
raise ('error: data dimension not compatible')
X, y = np.array(X), np.array(y)
X_shape = X.shape
if len(X_shape) == 1:
raise ('error: 1d data not supported yet')
else:
N, p = X_shape
if beta_0 == None:
self.beta_0 = np.zeros(p)
else:
self.beta_0 = np.array(beta_0)
if self.method == 'nav':
res = nv_regular_linreg(
X, y, self.beta_0, self.alpha, self.L1_ratio,
self.max_iter, self.tol)
return res
if self.method == 'mp':
res = mp_regular_linreg(
X, y, self.beta_0, self.alpha, self.L1_ratio,
self.max_iter, self.tol)
return res
if self.method == 'cpp':
res = cpp_regular_linreg(
X, y, self.beta_0, self.alpha, self.L1_ratio,
self.max_iter, self.tol)
return res
if self.method == 'sk':
res = sklearn_regular_linreg(X, y, self.alpha, self.L1_ratio)
return res
|
|
"""
base.py: Base class for linear transforms
"""
import numpy as np
import os
class BaseLinTrans(object):
"""
Linear transform base class
The class provides methods for linear operations :math:`z_1=Az_0`.
**SVD decomposition**
Some estimators require an SVD-like decomposition. The SVD decomposition
is assumed to be of the form :math:`A = U\\mathrm{diag}(s)V^*` meaning that
we can write the transform :math:`z_1 = Az_0` as
:math:`q_0 = V^*z_0, q_1=\\mathrm{diag}(s)q_0, z_1=Uq_1`.
The linear transforms :math:`U` and :math:`V` are implemented by
methods that derive from this class. The attribute :code:`svd_avail`
indicates if the linear transform class supports an SVD.
:param shape0: Input shape
:param shape1: Output shape
:param dtype0: Data type of the input
:param dtype1: Data type of the otupt
:param var_axes0: Axes over which the input variance is averaged
:param var_axes1: Axes over which the output variance is averaged
:param svd_avail: SVD is available
:param name: String name
:note: We say the decomposition is *SVD-like* since we do not require
that :math:`s` is real and positive. It may have complex values.
Hence, the singular values are given by :math:`|s|`.
"""
def __init__(self, shape0, shape1, dtype0=np.float64, dtype1=np.float64,\
var_axes0=(0,), var_axes1=(0,), svd_avail=False,name=None):
self.shape0 = shape0
self.shape1 = shape1
self.dtype0 = dtype0
self.dtype1 = dtype1
self.var_axes0 = var_axes0
self.var_axes1 = var_axes1
self.svd_avail = svd_avail
if name is None:
self.name = str(type(self))
else:
self.name = name
def dot(self,z0):
"""
Compute matrix multiply :math:`A(z0)`
"""
raise NotImplementedError()
def dotH(self,z1):
"""
Compute conjugate transpose multiplication :math:`A^*(z1)`
"""
raise NotImplementedError()
def var_dot(self,zvar0):
"""
Computes `zvar1=S(zvar0)` where `S=abs(A).^2`.
This method is only used for AMP and GAMP. The default
implementation assumes an SVD and averaging over axis 0.
"""
if not self.svd_avail:
raise NotImplementedError()
s = self.get_svd_diag()[0]
zvar1 = np.sum(np.abs(s)**2)*zvar0/self.shape1[0]
return zvar1
def var_dotH(self,zvar1):
"""
Computes `zvar0=S.H(zvar1)` where `S=abs(A).^2`.
This method is only used for AMP and GAMP. The default
implementation assumes an SVD and averaging over axis 0.
"""
if not self.svd_avail:
raise NotImplementedError()
s = self.get_svd_diag()[0]
zvar0 = np.sum(np.abs(s)**2)*zvar1/self.shape0[0]
return zvar0
def Usvd(self,q1):
"""
Multiplication by SVD term :math:`U`
"""
raise NotImplementedError()
def UsvdH(self,z1):
"""
Multiplication by SVD term :math:`U^*`
"""
raise NotImplementedError()
def Vsvd(self,q0):
"""
Multiplication by SVD term :math:`V`
"""
raise NotImplementedError()
def VsvdH(self,z0):
"""
Multiplication by SVD term :math:`V^*`
"""
raise NotImplementedError()
def get_svd_diag(self):
"""
Gets diagonal parameters of the SVD diagonal multiplication.
The method returns a set of diagonal parameters, :param:`s`,
in the SVD-like decomposition. With the parameters the
forward multiplication :code:`z1=A.dot(z0)` should be equivalent to::
Aop = ... # LinTrans object with SVD enabled
s = Aop.get_svd_diag()[0]
q0 = Aop.VsvdH(z0)
q1 = Aop.svd_dot(s, q0)
z1 = Aop.Usvd(q1)
One can also compute any function of the matrix. Suppose
:math:`f` is a any continuous function such that :math:`f(0)=0`.
Then, the transform :math:`Uf(S)V^*z_0` is equivalent to::
s = Aop.get_svd_diag()[0]
q0 = Aop.VsvdH(z0)
q1 = Aop.svd_dot(f(s), q0)
z1 = Aop.Usvd(q1)
:returns: :code:`s,sshape,srep_axes`, the diagonal parameters
:code:`s`, the shape in the transformed domain :code:`sshape`,
and the axes on which the diagonal parameters are to be
repeated, :code:`srep_axes`
"""
raise NotImplementedError()
def svd_dot(self,s1,q0):
"""
Performs diagonal matrix multiplication.
Implements :math:`q_1 = \\mathrm{diag}(s_1) q_0`.
:param s1: diagonal parameters
:param q0: input to the diagonal multiplication
:returns: :code:`q1` diagonal multiplication output
"""
raise NotImplementedError()
def svd_dotH(self,s1,q1):
"""
Performs diagonal matrix multiplication conjugate
Implements :math:`q_0 = \\mathrm{diag}(s_1)^* q_1`.
:param s1: diagonal parameters
:param q1: input to the diagonal multiplication
:returns: :code:`q0` diagonal multiplication output
"""
raise NotImplementedError()
def __str__(self):
string = str(self.name) + os.linesep\
+ 'Input shape: ' + str(self.shape0) + ', type:' + str(self.dtype0)\
+ os.linesep\
+ 'Output shape: ' + str(self.shape1) + ', type:' + str(self.dtype1)
return string
|
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 13 23:28:06 2019
@author: walter
"""
import arcade
import numpy as np
SCREEN_WIDTH = 320
SCREEN_HEIGHT = 240
SCREEN_TITLE = "Pong"
MOVEMENT_SPEED = 2
PADDLE_MOVEMENT_SPEED = 1.25
PLAYER1_UP = arcade.key.W
PLAYER1_DOWN = arcade.key.S
PLAYER2_UP = arcade.key.UP
PLAYER2_DOWN = arcade.key.DOWN
class Paddle:
def __init__(self, position_x, position_y, change_x, change_y, width, height, color, player_number = 1):
# Take the parameters of the init function above, and create instance variables out of them.
self.position_x = position_x
self.position_y = position_y
self.change_x = change_x
self.change_y = change_y
self.width = width
self.height = height
self.color = color
self.player_number = player_number
self.collision = False
if player_number == 1:
self.up_key = PLAYER1_UP
self.down_key = PLAYER1_DOWN
else:
self.up_key = PLAYER2_UP
self.down_key = PLAYER2_DOWN
def draw(self):
""" Draw the balls with the instance variables we have. """
arcade.draw_rectangle_filled(self.position_x, self.position_y, self.width, self.height, self.color)
def on_key_press(self, key, modifiers):
if key == self.up_key:
self.change_y = PADDLE_MOVEMENT_SPEED
elif key == self.down_key:
self.change_y = -PADDLE_MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
""" Called whenever a user releases a key. """
if key == self.up_key or key == self.down_key:
self.change_y = 0
def update(self):
# Move the paddle
self.position_y += self.change_y
self.position_x += self.change_x
self.position_x = np.clip(self.position_x, self.width/2, SCREEN_WIDTH - self.width/2)
self.position_y = np.clip(self.position_y, self.height/2, SCREEN_HEIGHT - self.height/2)
self.top_right_y = self.position_y + self.height/2
self.top_right_x = self.position_x + self.width/2
self.bottom_left_x = self.position_x - self.width/2
self.bottom_left_y = self.position_y - self.height/2
class Ball:
def __init__(self, position_x, position_y, change_x, change_y, width, color):
# Take the parameters of the init function above, and create instance variables out of them.
self.position_x = position_x
self.position_y = position_y
self.change_x = change_x
self.change_y = change_y
self.width = width
self.color = color
self.collision = False
self.other_collider = None
def draw(self):
""" Draw the balls with the instance variables we have. """
arcade.draw_rectangle_filled(self.position_x, self.position_y, self.width, self.width, self.color)
def update(self):
# Move the ball
# See if the ball hit the edge of the screen. If so, change direction
if self.position_x < self.width/2:
self.position_x = self.width/2
self.change_x *= -1
MyGame.game_over(1)
if self.position_x > SCREEN_WIDTH - self.width/2:
self.position_x = SCREEN_WIDTH - self.width/2
self.change_x *= -1
MyGame.game_over(2)
if self.position_y < self.width/2:
self.position_y = self.width/2
self.change_y *= -1
if self.position_y > SCREEN_HEIGHT - self.width/2:
self.position_y = SCREEN_HEIGHT - self.width/2
self.change_y *= -1
self.position_y += MOVEMENT_SPEED * self.change_y
self.position_x += MOVEMENT_SPEED * self.change_x
self.top_right_y = self.position_y + self.width/2
self.top_right_x = self.position_x + self.width/2
self.bottom_left_x = self.position_x - self.width/2
self.bottom_left_y = self.position_y - self.width/2
def check_collision(self, other):
self.collision = not (self.top_right_x < other.bottom_left_x or
self.bottom_left_x > other.top_right_x or
self.top_right_y < other.bottom_left_y or
self.bottom_left_y > other.top_right_y)
if self.collision == True:
if self.other_collider is None:
self.change_x *= -1
self.other_collider = other
else:
if self.other_collider == other:
self.other_collider = None
return self.collision
class MyGame(arcade.Window):
player1_score = 0
player2_score = 0
start_new_round = False
def __init__(self, width, height, title):
# Call the parent class's init function
super().__init__(width, height, title)
# Make the mouse disappear when it is over the window.
# So we just see our object, not the pointer.
self.set_mouse_visible(False)
arcade.set_background_color(arcade.color.ASH_GREY)
MyGame.player1_score = 0
MyGame.player2_score = 0
self.new_round()
@staticmethod
def game_over(winner):
if winner == 1:
MyGame.player1_score = MyGame.player1_score + 1
else:
MyGame.player2_score = MyGame.player2_score + 1
MyGame.start_new_round = True
def new_round(self):
MyGame.start_new_round = False
self.ball = Ball(SCREEN_WIDTH/2 + np.random.choice([-10,0,10]),
SCREEN_HEIGHT/2 + + np.random.choice([-10,0,10]),
np.random.choice([-1,1]),
np.random.choice([-1,1]),
16,
arcade.color.WHITE)
self.paddle1 = Paddle(8, 120, 0, 0, 16, 48, arcade.color.BLUE, 1)
self.paddle2 = Paddle(SCREEN_WIDTH - 8, 120, 0, 0, 16, 48, arcade.color.RED, 2)
def on_draw(self):
""" Called whenever we need to draw the window. """
arcade.start_render()
self.ball.draw()
self.paddle1.draw()
self.paddle2.draw()
self.ball.check_collision(self.paddle1)
self.ball.check_collision(self.paddle2)
arcade.draw_text(str(MyGame.player1_score), 10, SCREEN_HEIGHT-25, color=arcade.color.WHITE, font_name="COURIER NEW", font_size=20)
arcade.draw_text(str(MyGame.player2_score), SCREEN_WIDTH - 50, SCREEN_HEIGHT-25, color=arcade.color.WHITE, font_name="COURIER NEW", font_size=20)
def update(self, delta_time):
if MyGame.start_new_round == True:
self.new_round()
self.ball.update()
self.paddle1.update()
self.paddle2.update()
def on_key_press(self, key, modifiers):
""" Called whenever the user presses a key. """
self.paddle1.on_key_press(key, modifiers)
self.paddle2.on_key_press(key, modifiers)
def on_key_release(self, key, modifiers):
""" Called whenever a user releases a key. """
self.paddle1.on_key_release(key, modifiers)
self.paddle2.on_key_release(key, modifiers)
def main():
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
arcade.run()
if __name__ == "__main__":
main()
|
|
"""Train and test CNN classifier"""
import dga_classifier.data as data
import numpy as np
from keras.preprocessing import sequence
import sklearn
from sklearn.model_selection import train_test_split
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Conv1D, Input, Dense, concatenate
from keras.optimizers import SGD
from keras.layers.embeddings import Embedding
from keras.layers.pooling import GlobalMaxPooling1D
from keras.layers.recurrent import LSTM
def build_model(max_features, maxlen):
'''
[Deep Learning For Realtime Malware Detection (ShmooCon 2018)](https://www.youtube.com/watch?v=99hniQYB6VM)'s
LSTM + CNN (see 13:17 for architecture) by Domenic Puzio and Kate Highnam
AND
Derived CNN model from Keegan Hines' Snowman https://github.com/keeganhines/snowman/
'''
text_input = Input(shape = (maxlen,), name='text_input')
x = Embedding(input_dim=max_features, input_length=maxlen, output_dim=128)(text_input)
lstm = LSTM(128)(x)
lstm = Dropout(0.5)(lstm)
lstm = Dense(1)(lstm)
conv_a = Conv1D(15,2, activation='relu')(x)
conv_b = Conv1D(15,3, activation='relu')(x)
conv_c = Conv1D(15,4, activation='relu')(x)
conv_d = Conv1D(15,5, activation='relu')(x)
conv_e = Conv1D(15,6, activation='relu')(x)
pool_a = GlobalMaxPooling1D()(conv_a)
pool_b = GlobalMaxPooling1D()(conv_b)
pool_c = GlobalMaxPooling1D()(conv_c)
pool_d = GlobalMaxPooling1D()(conv_d)
pool_e = GlobalMaxPooling1D()(conv_e)
flattened = concatenate(
[pool_a, pool_b, pool_c, pool_d, pool_e, lstm])
drop = Dropout(.2)(flattened)
dense = Dense(1)(drop)
out = Activation("sigmoid")(dense)
model = Model(inputs=text_input, outputs=out)
model.compile(
loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy']
)
return model
def run(max_epoch=25, nfolds=10, batch_size=128):
"""Run train/test on logistic regression model"""
indata = data.get_data()
# Extract data and labels
X = [x[1] for x in indata]
labels = [x[0] for x in indata]
# Generate a dictionary of valid characters
valid_chars = {x:idx+1 for idx, x in enumerate(set(''.join(X)))}
max_features = len(valid_chars) + 1
maxlen = np.max([len(x) for x in X])
# Convert characters to int and pad
X = [[valid_chars[y] for y in x] for x in X]
X = sequence.pad_sequences(X, maxlen=maxlen)
# Convert labels to 0-1
y = [0 if x == 'benign' else 1 for x in labels]
final_data = []
for fold in range(nfolds):
print "fold %u/%u" % (fold+1, nfolds)
X_train, X_test, y_train, y_test, _, label_test = train_test_split(X, y, labels,
test_size=0.2)
print 'Build model...'
model = build_model(max_features, maxlen)
print "Train..."
X_train, X_holdout, y_train, y_holdout = train_test_split(X_train, y_train, test_size=0.05)
best_iter = -1
best_auc = 0.0
out_data = {}
for ep in range(max_epoch):
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=1)
t_probs = model.predict(X_holdout)
t_auc = sklearn.metrics.roc_auc_score(y_holdout, t_probs)
print 'Epoch %d: auc = %f (best=%f)' % (ep, t_auc, best_auc)
if t_auc > best_auc:
best_auc = t_auc
best_iter = ep
probs = model.predict(X_test)
out_data = {'y':y_test, 'labels': label_test, 'probs':probs, 'epochs': ep,
'confusion_matrix': sklearn.metrics.confusion_matrix(y_test, probs > .5)}
print sklearn.metrics.confusion_matrix(y_test, probs > .5)
else:
# No longer improving...break and calc statistics
if (ep-best_iter) > 2:
break
final_data.append(out_data)
return final_data
|
|
# -*- coding: utf-8 -*-
"""
vb_nmf.py
Variational Bayes NMF
"""
import scipy as sp
from ..bayes import *
def vb_nmf(X, a_w, b_w, a_h, b_h, n_iter=100):
"""
Variational Bayes NMF
変分ベイズ法によるNMF
"""
# initialize
Winit = gamma(x, a_w, b_w/a_w)
Hinit = gamma(x, a_h, b_h/a_h)
Lw = Winit
Ew = Winit
Lh = Hinit
Eh = Hinit
# update Lw,Ew,Lh,Eh
for it in range(n_iter):
|
|
import numpy as np
from . import integer_manipulations as int_man
from . import quaternion as quat
from math import pi
class Col(object):
"""
This class is defined to ouput a word or sentence in a different color
to the standard shell.
The colors available are:
``pink``, ``blue``, ``green``, ``dgrn``: dark green, ``yellow``, ``amber``
"""
def __init__(self):
self.pink = '\033[95m'
self.blue = '\033[94m'
self.green = '\033[92m'
self.dgrn = '\033[1;32m'
self.yellow = '\033[93m'
self.amber = '\033[91m'
self.ENDC = '\033[0m'
def c_prnt(self, text, color):
"""
Print a string in color,
Parameters
----------
Col: Col class instance
an instance of the ``Col`` class
text: string
Text to be shown in color.
color: string
Returns
--------
N/A
"""
if color is 'pink':
a = self.pink
elif color is 'blue':
a = self.blue
elif color is 'green':
a = self.green
elif color is 'dgrn':
a = self.dgrn
elif color is 'yel':
a = self.yellow
elif color is 'amber':
a = self.amber
else:
raise Exception('The color you selected is not acceptable')
print(a + text + self.ENDC)
# -----------------------------------------------------------------------------------------------------------
def unique_rows_tol(data, tol=1e-12, return_index=False, return_inverse=False):
"""
This function returns the unique rows of the input matrix within that are within the
specified tolerance.
Parameters
----------
data: numpy array (m x n)
tol: double
tolerance of comparison for each rows
Default: 1e-12
return_index: Boolean
flag to return the index of unique rows based on the indices of the output
return_inverse: Boolean
flag to return the index of unique rows based on the indices of the input
Returns
----------
unique_rows: numpy array (m' x n)
ia: numpy array, integer (m' x 1)
unique rows based on the indices of the output
ic: numpy array, integer (m x 1)
unique rows based on the indices of the input
See Also
--------
unique
"""
prec = -np.fix(np.log10(tol))
d_r = np.fix(data * 10 ** prec) / 10 ** prec + 0.0
### fix rounds off towards zero; issues with the case of 0.9999999998 and 1.0
### rint solves the issue, needs extensive testing
# prec = -np.rint(np.log10(tol))
# d_r = np.rint(data * 10 ** prec) / 10 ** prec + 0.0
b = np.ascontiguousarray(d_r).view(np.dtype((np.void, d_r.dtype.itemsize * d_r.shape[1])))
_, ia = np.unique(b, return_index=True)
_, ic = np.unique(b, return_inverse=True)
ret_arr = data[ia, :]
if not return_index and not return_inverse:
return ret_arr
else:
if return_index and return_inverse:
return ret_arr, ia, ic
elif return_index:
return ret_arr, ia
elif return_inverse:
return ret_arr, ic
# -----------------------------------------------------------------------------------------------------------
def eq(m1, m2, tol):
"""
Check if the two rotation matrices are the same
"""
if m1.ndim == 2 and m2.ndim == 2:
m = abs(m1 - m2)
if np.amax(m) < tol:
return True
else:
return False
elif m1.ndim == 2:
msz = np.shape(m2)[0]
tmat1 = m1.reshape((1, 9))
tmat2 = np.tile(tmat1, (msz, 1))
tmat3 = tmat2.reshape(msz, 3, 3)
m = abs(tmat3 - m2)
max1 = np.amax(np.amax(m, axis=1), axis=1) < tol
if np.any(max1):
return True
else:
return False
elif m2.ndim == 2:
msz = np.shape(m1)[0]
tmat1 = m2.reshape(msz, (1, 9))
tmat2 = np.tile(tmat1, (msz, 1))
tmat3 = tmat2.reshape(msz, 3, 3)
m = abs(m1 - tmat3)
max1 = np.amax(np.amax(m, axis=1), axis=1) < tol
if np.any(max1):
return True
else:
return False
else:
if np.shape(m1)[0] == np.shape(m2)[0]:
m = abs(m1 - m2)
max1 = np.amax(np.amax(m, axis=1), axis=1) < tol
return np.where(max1)
else:
raise Exception('Wrong Input Types')
# -----------------------------------------------------------------------------------------------------------
def message_display(CheckMatrix, Checknumber, Message, Precis):
"""
This function displays a Message (passed as input) and gives and error
in case the matrix passed to it is not integral.`
"""
cond = int_man.check_int_mat(CheckMatrix, Precis)
print(Checknumber, '.', Message, '-> ',)
txt = Col()
if cond.all():
txt.c_prnt('YES', 'yel')
else:
txt.c_prnt('<<<Error>>>', 'amber')
raise Exception('Something wrong!!')
# -----------------------------------------------------------------------------------------------------------
def extgcd(x, y):
"""
Return a tuple (u, v, d); they are the greatest common divisor d
of two integers x and y and u, v such that d = x * u + y * v.
"""
# Crandall & Pomerance "PRIME NUMBERS", Algorithm 2.1.4 page 85 of "http://thales.doa.fmph.uniba.sk/macaj/skola/teoriapoli/primes.pdf"
a, b, g, u, v, w = 1, 0, x, 0, 1, y
while w:
q, t = divmod(g, w)
a, b, g, u, v, w = u, v, w, a-q*u, b-q*v, t
if g >= 0:
return a, b, g
else:
return -a, -b, -g
# -----------------------------------------------------------------------------------------------------------
def ehermite(a, b):
"""
Elementary Hermite tranformation.
For integers a and b, E = ehermite(a,b) returns
an integer matrix with determinant 1 such that E * [a;b] = [g;0],
where g is the gcd of a and b.
E = ehermite(a,b)
This function is in some ways analogous to GIVENS.
John Gilbert, 415-812-4487, December 1993
gilbert@parc.xerox.com
Xerox Palo Alto Research Center
Parameters
----------
a, b: integers
Returns
-------
E: numpy array 3x3
integer matrix with determinant 1 such that E * [a;b] = [g;0],
where g is the gcd of a and b.
"""
[c, d, g] = extgcd(a, b)
if g:
E = np.array([[c, d], [-b/g, a/g]])
else:
E = np.array([[1, 0], [0, 1]])
return E
#Leila: check this "http://www.ece.northwestern.edu/local-apps/matlabhelp/techdoc/ref/gcd.html"
# -----------------------------------------------------------------------------------------------------------
def left_matrix_division(X, Y):
#Leila: the solution to this equation: YA=X
# solving the left matrix division X / Y
# # ---------
# # 1st alternative ---> Leaves unwanted decimals in some cases!
tmp_solution = np.linalg.lstsq(Y.T, X.T)[0].T
# # ---------
# # 2nd alternative ---> Also Leaves unwanted decimals in some cases!
# solution = np.dot(np.dot(X, Y.T), np.linalg.inv(np.dot(Y, Y.T)))
# # ---------
solution = (np.around(tmp_solution*1e10))/1e10
return solution
# -----------------------------------------------------------------------------------------------------------
def smith_nf(matrix):
"""
Smith normal form of an integer matrix.
[U,S,V] = smith(A) returns integer matrices U, S, and V such that
A = U*S*V',
S is diagonal and nonnegative, S(i,i) divides S(i+1,i+1) for all i,
det U =+-1, and det V =+-1.
s = smith(A) just returns diag(S).
Uses function ehermite.
[U,S,V] = smith(A);
This function is in some ways analogous to SVD.
Originally implemented by: John Gilbert, 415-812-4487, December 1993
gilbert@parc.xerox.com
Xerox Palo Alto Research Center
Parameters
-----------
matrix: numpy array
Returns
--------
S: numpy array
S is diagonal and nonnegative, S(i,i) divides S(i+1,i+1) for all i
U: numpy array
det(U) =+-1
V: numpy array
det(V) =+-1
"""
A=np.copy(matrix)
if (np.around(A) != A).any():
raise Exception('This function requires integer input.')
# This looks much like an SVD algorithm that first bidiagonalizes
# A by Givens rotations and then chases zeros, except for
# the construction of the 2 by 2 elementary transformation.
m, n = A.shape
S = A
U = np.eye(m)
V = np.eye(n)
# Bidiagonalize S with elementary Hermite transforms.
for j in range(min(m, n)):
# Zero column j below the diagonal.
for i in range(j+1, m):
if S[i, j]:
# Construct an elementary Hermite transformation E
# to zero S(i,j) by combining rows i and j.
E = ehermite(S[j, j], S[i, j])
# Apply the transform to S and U.
S[[j, i], :] = np.dot(E, S[[j, i], :])
# U[:, [j, i]] = U[:, [j, i]] / E
U[:, [j, i]] = left_matrix_division(U[:, [j, i]], E) # solving the left matrix division
# % Zero row j after the superdiagonal.
for i in range(j+2, n):
if S[j, i]:
# Construct an elementary Hermite transformation E
# to zero S(j,i) by combining columns j+1 and i.
E = ehermite(S[j, j+1], S[j, i])
# Apply the transform to S and V.
S[:, [j+1, i]] = np.dot(S[:, [j+1, i]], E.T)
# V[:, [j+1, i]] = V[:, [j+1, i]] / E
V[:, [j+1, i]] = left_matrix_division(V[:, [j+1, i]], E) # solving the left matrix division
# Now S is upper bidiagonal.
# Chase the superdiagonal nonzeros away.
D = np.diag(S, 1)
while any(D):
b = min(np.where(D))[0]
# Start chasing bulge at first nonzero superdiagonal element.
# To guarantee reduction in S(b,b), first make S(b,b) positive
# and make S(b,b+1) nonnegative and less than S(b,b).
if S[b, b] < 0:
S[b, :] = -S[b, :]
U[:, b] = -U[:, b]
q = np.floor(S[b, b+1] / S[b, b])
E = np.array([[1, 0], [-q, 1]])
S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)
# V[:, [b, b+1]] = V[:, [b, b+1]] / E
V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E) # solving the left matrix division
if S[b, b+1]:
# Zero the first nonzero superdiagonal element
# using columns b and b+1, to start the bulge at S(b+1,b).
E = ehermite(S[b, b], S[b, b+1])
S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)
# V[:, [b, b+1]] = V[:, [b, b+1]] / E
V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E)
for j in range(min(m, n)):
if j+1 < m:
# Zero S(j+1,j) using rows j and j+1.
E = ehermite(S[j, j], S[j+1, j])
S[[j, j+1], :] = np.dot(E, S[[j, j+1], :])
# U[:, [j, j+1]] = U[:, [j, j+1]] / E
U[:, [j, j+1]] = left_matrix_division(U[:, [j, j+1]], E)
if j+2 < n:
# Zero S(j,j+2) using columns j+1 and j+2.
E = ehermite(S[j, j+1], S[j, j+2])
S[:, [j+1, j+2]] = np.dot(S[:, [j+1, j+2]], E.T)
# V[:, [j+1, j+2]] = V[:, [j+1, j+2]] / E
V[:, [j+1, j+2]] = left_matrix_division(V[:, [j+1, j+2]], E)
D = np.diag(S, 1)
# Now S is diagonal. Make it nonnegative.
for j in range(min(m, n)):
if S[j, j] < 0:
S[j, :] = -S[j, :]
U[:, j] = -U[:, j]
# Squeeze factors to lower right to enforce divisibility condition.
for i in range(min(m, n)):
for j in range(i+1, min(m, n)):
# Replace S(i,i), S(j,j) by their gcd and lcm respectively.
a = S[i, i]
b = S[j, j]
[c, d, g] = extgcd(a, b)
E = np.array([[1, d], [-b/g, a*c/g]])
F = np.array([[c, 1], [-b*d/g, a/g]])
S[np.ix_([i, j], [i, j])] = np.dot(np.dot(E, S[:, [i, j]][[i, j], :]), F.T)
# S[i, i] = tmp_arr[0, 0]
# S[i, j] = tmp_arr[0, 1]
# S[j, i] = tmp_arr[1, 0]
# S[j, j] = tmp_arr[1, 1]
U[:, [i, j]] = left_matrix_division(U[:, [i, j]], E)
V[:, [i, j]] = left_matrix_division(V[:, [i, j]], F)
U = np.around(U)
V = np.around(V)
return U, S, V
# -----------------------------------------------------------------------------------------------------------
def vrrotvec2mat(ax_ang):
"""
Create a Rotation Matrix from Axis-Angle vector:
Parameters
----------
``ax_ang``: numpy 5xn array
The 3D rotation axis and angle (ax_ang) \v
5 entries: \v
First 3: axis \v
4: angle \v
5: 1 for proper and -1 for improper \v
Returns
-------
mtx: nx3x3 numpy array
3x3 rotation matrices
See Also
--------
mat2quat, axang2quat, vrrotmat2vec
"""
#file_dir = os.path.dirname(os.path.realpath(__file__))
#path_dir2 = file_dir + '/../geometry/'
#sys.path.append(path_dir2)
if ax_ang.ndim == 1:
if np.size(ax_ang) == 5:
ax_ang = np.reshape(ax_ang, (5, 1))
msz = 1
elif np.size(ax_ang) == 4:
ax_ang = np.reshape(np.hstack((ax_ang, np.array([1]))), (5, 1))
msz = 1
else:
raise Exception('Wrong Input Type')
elif ax_ang.ndim == 2:
if np.shape(ax_ang)[0] == 5:
msz = np.shape(ax_ang)[1]
elif np.shape(ax_ang)[1] == 5:
ax_ang = ax_ang.transpose()
msz = np.shape(ax_ang)[1]
else:
raise Exception('Wrong Input Type')
else:
raise Exception('Wrong Input Type')
direction = ax_ang[0:3, :]
angle = ax_ang[3, :]
d = np.array(direction, dtype=np.float64)
d /= np.linalg.norm(d, axis=0)
x = d[0, :]
y = d[1, :]
z = d[2, :]
c = np.cos(angle)
s = np.sin(angle)
tc = 1 - c
#Leila:Rodrigues' Rotation Formula: http://mathworld.wolfram.com/RodriguesRotationFormula.html
mt11 = tc*x*x + c
mt12 = tc*x*y - s*z
mt13 = tc*x*z + s*y
mt21 = tc*x*y + s*z
mt22 = tc*y*y + c
mt23 = tc*y*z - s*x
mt31 = tc*x*z - s*y
mt32 = tc*y*z + s*x
mt33 = tc*z*z + c
mtx = np.column_stack((mt11, mt12, mt13, mt21, mt22, mt23, mt31, mt32, mt33))
inds1 = np.where(ax_ang[4, :] == -1)
mtx[inds1, :] = -mtx[inds1, :]
if msz == 1:
mtx = mtx.reshape(3, 3)
else:
mtx = mtx.reshape(msz, 3, 3)
return mtx
# -----------------------------------------------------------------------------------------------------------
def vrrotmat2vec(mat1, rot_type='proper'):
"""
Create an axis-angle np.array from Rotation Matrix:
Parameters
----------
mat1: nx3x3 numpy array
The nx3x3 rotation matrices to convert
rot_type: string ('proper' or 'improper')
``improper`` if there is a possibility of
having improper matrices in the input,
``proper`` otherwise. \v
Default: ``proper``
Returns
-------
``ax_ang``: numpy 5xn array
The 3D rotation axis and angle (ax_ang) \v
5 entries: \v
First 3: axis \v
4: angle \v
5: 1 for proper and -1 for improper \v
See Also
--------
mat2quat, axang2quat, vrrotvec2mat
"""
mat = np.copy(mat1)
if mat.ndim == 2:
if np.shape(mat) == (3, 3):
mat = np.copy(np.reshape(mat, (1, 3, 3)))
else:
raise Exception('Wrong Input Type')
elif mat.ndim == 3:
if np.shape(mat)[1:] != (3, 3):
raise Exception('Wrong Input Type')
else:
raise Exception('Wrong Input Type')
msz = np.shape(mat)[0]
ax_ang = np.zeros((5, msz))
epsilon = 1e-12
if rot_type == 'proper':
ax_ang[4, :] = np.ones(np.shape(ax_ang[4, :]))
elif rot_type == 'improper':
for i in range(msz):
det1 = np.linalg.det(mat[i, :, :])
if abs(det1 - 1) < epsilon:
ax_ang[4, i] = 1
elif abs(det1 + 1) < epsilon:
ax_ang[4, i] = -1
mat[i, :, :] = -mat[i, :, :]
else:
raise Exception('Matrix is not a rotation: |det| != 1')
else:
raise Exception('Wrong Input parameter for rot_type')
mtrc = mat[:, 0, 0] + mat[:, 1, 1] + mat[:, 2, 2]
ind1 = np.where(abs(mtrc - 3) <= epsilon)[0]
ind1_sz = np.size(ind1)
if np.size(ind1) > 0:
ax_ang[:4, ind1] = np.tile(np.array([0, 1, 0, 0]), (ind1_sz, 1)).transpose()
ind2 = np.where(abs(mtrc + 1) <= epsilon)[0]
ind2_sz = np.size(ind2)
if ind2_sz > 0:
# phi = pi
# This singularity requires elaborate sign ambiguity resolution
# Compute axis of rotation, make sure all elements >= 0
# real signs are obtained by flipping algorithm below
diag_elems = np.concatenate((mat[ind2, 0, 0].reshape(ind2_sz, 1),
mat[ind2, 1, 1].reshape(ind2_sz, 1),
mat[ind2, 2, 2].reshape(ind2_sz, 1)), axis=1)
axis = np.sqrt(np.maximum((diag_elems + 1)/2, np.zeros((ind2_sz, 3))))
# axis elements that are <= epsilon are set to zero
axis = axis*((axis > epsilon).astype(int))
# Flipping
#
# The algorithm uses the elements above diagonal to determine the signs
# of rotation axis coordinate in the singular case Phi = pi.
# All valid combinations of 0, positive and negative values lead to
# 3 different cases:
# If (Sum(signs)) >= 0 ... leave all coordinates positive
# If (Sum(signs)) == -1 and all values are non-zero
# ... flip the coordinate that is missing in the term that has + sign,
# e.g. if 2AyAz is positive, flip x
# If (Sum(signs)) == -1 and 2 values are zero
# ... flip the coord next to the one with non-zero value
# ... ambiguous, we have chosen shift right
# construct vector [M23 M13 M12] ~ [2AyAz 2AxAz 2AxAy]
# (in the order to facilitate flipping): ^
# [no_x no_y no_z ]
m_upper = np.concatenate((mat[ind2, 1, 2].reshape(ind2_sz, 1),
mat[ind2, 0, 2].reshape(ind2_sz, 1),
mat[ind2, 0, 1].reshape(ind2_sz, 1)), axis=1)
# elements with || smaller than epsilon are considered to be zero
signs = np.sign(m_upper)*((abs(m_upper) > epsilon).astype(int))
sum_signs = np.sum(signs, axis=1)
t1 = np.zeros(ind2_sz,)
tind1 = np.where(sum_signs >= 0)[0]
t1[tind1] = np.ones(np.shape(tind1))
tind2 = np.where(np.all(np.vstack(((np.any(signs == 0, axis=1) == False), t1 == 0)), axis=0))[0]
t1[tind2] = 2*np.ones(np.shape(tind2))
tind3 = np.where(t1 == 0)[0]
flip = np.zeros((ind2_sz, 3))
flip[tind1, :] = np.ones((np.shape(tind1)[0], 3))
flip[tind2, :] = np.copy(-signs[tind2, :])
t2 = np.copy(signs[tind3, :])
shifted = np.column_stack((t2[:, 2], t2[:, 0], t2[:, 1]))
flip[tind3, :] = np.copy(shifted + (shifted == 0).astype(int))
axis = axis*flip
ax_ang[:4, ind2] = np.vstack((axis.transpose(), np.pi*(np.ones((1, ind2_sz)))))
ind3 = np.where(np.all(np.vstack((abs(mtrc + 1) > epsilon, abs(mtrc - 3) > epsilon)), axis=0))[0]
ind3_sz = np.size(ind3)
if ind3_sz > 0:
phi = np.arccos((mtrc[ind3]-1)/2)
den = 2*np.sin(phi)
a1 = (mat[ind3, 2, 1]-mat[ind3, 1, 2])/den
a2 = (mat[ind3, 0, 2]-mat[ind3, 2, 0])/den
a3 = (mat[ind3, 1, 0]-mat[ind3, 0, 1])/den
axis = np.column_stack((a1, a2, a3))
ax_ang[:4, ind3] = np.vstack((axis.transpose(), phi.transpose()))
return ax_ang
# -----------------------------------------------------------------------------------------------------------
def quat2mat(q):
"""
Convert Quaternion Arrays to Rotation Matrix
Parameters
----------
q: numpy array (5 x 1)
quaternion
Returns
----------
g: numpy array (3 x 3)
rotation matrix
See Also
--------
mat2quat, axang2quat
"""
#leila: https://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToMatrix/index.htm
sz = quat.get_size(q)
q0 = quat.getq0(q)
q1 = quat.getq1(q)
q2 = quat.getq2(q)
q3 = quat.getq3(q)
qt = quat.get_type(q)
g = np.zeros((sz, 3, 3))
g[:, 0, 0] = np.square(q0) + np.square(q1) - np.square(q2) - np.square(q3)
g[:, 0, 1] = 2*(q1*q2 - q0*q3)
g[:, 0, 2] = 2*(q3*q1 + q0*q2)
g[:, 1, 0] = 2*(q1*q2 + q0*q3)
g[:, 1, 1] = np.square(q0) - np.square(q1) + np.square(q2) - np.square(q3)
g[:, 1, 2] = 2*(q2*q3 - q0*q1)
g[:, 2, 0] = 2*(q3*q1 - q0*q2)
g[:, 2, 1] = 2*(q2*q3 + q0*q1)
g[:, 2, 2] = np.square(q0) - np.square(q1) - np.square(q2) + np.square(q3)
if sz == 1:
g = g.reshape((3, 3))
if qt == -1:
g = -g
else:
inds1 = np.where(qt == -1)
g[inds1, :, :] = -g[inds1, :, :]
return g
# -----------------------------------------------------------------------------------------------------------
def mat2quat(mat, rot_type='proper'):
"""
Convert Rotation Matrices to Quaternions
Parameters
----------
mat: numpy array or a list of (3 x 3)
rotation matrix
rot_type: string ('proper' or 'improper')
``improper`` if there is a possibility of
having improper matrices in the input,
``proper`` otherwise. \v
Default: ``proper``
Returns
----------
quaternion_rep: numpy array (5 x 1)
See Also
--------
quat2mat, axang2quat
"""
#leila: read this: https://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions/index.htm
ax_ang = vrrotmat2vec(mat, rot_type)
q0 = np.cos(ax_ang[3, :]/2)
q1 = ax_ang[0, :]*np.sin(ax_ang[3, :]/2)
q2 = ax_ang[1, :]*np.sin(ax_ang[3, :]/2)
q3 = ax_ang[2, :]*np.sin(ax_ang[3, :]/2)
qtype = ax_ang[4, :]
return quat.Quaternion(q0, q1, q2, q3, qtype)
# -----------------------------------------------------------------------------------------------------------
# def axang2quat(ax_ang, rot_type='proper'):
# """
# Create a quaternion corresponding to the rotation specified by an axis and an angle
# Parameters
# ----------
# ax_ang: numpy array or a list of (4 x 1)
# Returns
# ----------
# quaternion_rep: numpy array (5 x 1)
# """
# if ax_ang.ndim == 1:
# if np.size(ax_ang) == 4:
# ax_ang = np.reshape(ax_ang, (4, 1))
# msz = 1
# else:
# raise Exception('Wrong Input Type')
# elif ax_ang.ndim == 2:
# if np.shape(ax_ang)[0] == 4:
# msz = np.shape(ax_ang)[1]
# elif np.shape(ax_ang)[1] == 4:
# ax_ang = ax_ang.transpose()
# msz = np.shape(ax_ang)[1]
# else:
# raise Exception('Wrong Input Type')
# else:
# raise Exception('Wrong Input Type')
# direction = ax_ang[0:3, :]
# angle = ax_ang[3, :]
# d = np.array(direction, dtype=np.float64)
# d /= np.linalg.norm(d, axis=0)
# x = d[0, :]
# y = d[1, :]
# z = d[2, :]
# q0 = np.cos(angle/2)
# s = np.sin(angle/2)
# q1 = x*s
# q2 = y*s
# q3 = z*s
# if rot_type=='proper':
# qtype = 1
# else:
# qtype = -1
# return quat.Quaternion(q0, q1, q2, q3, qtype)
# # -----------------------------------------------------------------------------------------------------------
def axang2quat(ax_ang):
"""
Create a quaternion corresponding to the rotation specified by an axis and an angle
Parameters
----------
ax_ang: numpy array or a list of (5 x 1)
Returns
----------
quaternion_rep: numpy array (5 x 1)
"""
if ax_ang.ndim == 1:
if np.size(ax_ang) == 5:
ax_ang = np.reshape(ax_ang, (5, 1))
msz = 1
elif np.size(ax_ang) == 4:
ax_ang = np.reshape(np.hstack((ax_ang, np.array([1]))), (5, 1))
msz = 1
else:
raise Exception('Wrong Input Type')
elif ax_ang.ndim == 2:
if np.shape(ax_ang)[0] == 5:
msz = np.shape(ax_ang)[1]
elif np.shape(ax_ang)[1] == 5:
ax_ang = ax_ang.transpose()
msz = np.shape(ax_ang)[1]
else:
raise Exception('Wrong Input Type')
else:
raise Exception('Wrong Input Type')
direction = ax_ang[0:3, :]
angle = ax_ang[3, :]
d = np.array(direction, dtype=np.float64)
d /= np.linalg.norm(d, axis=0)
x = d[0, :]
y = d[1, :]
z = d[2, :]
q0 = np.cos(angle/2)
s = np.sin(angle/2)
q1 = x*s
q2 = y*s
q3 = z*s
qtype = 0*q3;
inds1 = np.where(ax_ang[4, :] == -1); qtype[inds1] = -1;
inds2 = np.where(ax_ang[4, :] == 1); qtype[inds2] = 1;
return quat.Quaternion(q0, q1, q2, q3, qtype)
# -----------------------------------------------------------------------------------------------------------
def unique_rows_tol(data, tol=1e-12, return_index=False, return_inverse=False):
"""
This function returns the unique rows of the input matrix within that are within the
specified tolerance.
Parameters
----------
data: numpy array (m x n)
tol: double
tolerance of comparison for each rows
Default: 1e-12
return_index: Boolean
flag to return the index of unique rows based on the indices of the output
return_inverse: Boolean
flag to return the index of unique rows based on the indices of the input
Returns
----------
unique_rows: numpy array (m' x n)
ia: numpy array, integer (m' x 1)
unique rows based on the indices of the output
ic: numpy array, integer (m x 1)
unique rows based on the indices of the input
See Also
--------
unique
"""
prec = -np.fix(np.log10(tol))
d_r = np.fix(data * 10 ** prec) / 10 ** prec + 0.0
### fix rounds off towards zero; issues with the case of 0.9999999998 and 1.0
### rint solves the issue, needs extensive testing
# prec = -np.rint(np.log10(tol))
# d_r = np.rint(data * 10 ** prec) / 10 ** prec + 0.0
b = np.ascontiguousarray(d_r).view(np.dtype((np.void, d_r.dtype.itemsize * d_r.shape[1])))
_, ia = np.unique(b, return_index=True)
_, ic = np.unique(b, return_inverse=True)
ret_arr = data[ia, :]
if not return_index and not return_inverse:
return ret_arr
else:
if return_index and return_inverse:
return ret_arr, ia, ic
elif return_index:
return ret_arr, ia
elif return_inverse:
return ret_arr, ic
# if not return_index and not return_inverse:
# return np.unique(b).view(d_r.dtype).reshape(-1, d_r.shape[1])
# else:
# if return_index and return_inverse:
# return np.unique(b).view(d_r.dtype).reshape(-1, d_r.shape[1]), ia, ic
# elif return_index:
# return np.unique(b).view(d_r.dtype).reshape(-1, d_r.shape[1]), ia
# elif return_inverse:
# return np.unique(b).view(d_r.dtype).reshape(-1, d_r.shape[1]), ic
# -----------------------------------------------------------------------------------------------------------
|
|
from __future__ import division, absolute_import, print_function
import sys, os, re, mapp
import sphinx
if sphinx.__version__ < "1.0.1":
raise RuntimeError("Sphinx 1.0.1 or newer required")
needs_sphinx = '1.0'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.autosummary',
'matplotlib.sphinxext.plot_directive']
templates_path = ['__templates']
source_suffix = '.rst'
project = 'MAPP'
copyright = '2008-2009, The Scipy community'
import numpy
version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2','0.0.0')
version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
release = '0.0.0'
print("%s %s" % (version, release))
today_fmt = '%B %d, %Y'
default_role = "autolink"
exclude_dirs = []
add_function_parentheses = False
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if not os.path.isdir(themedir):
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init && git submodule update")
html_theme = 'scipy'
html_theme_path = [themedir]
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": []
}
html_sidebars = {'index': 'indexsidebar.html'}
html_title = "%s v%s Manual" % (project, version)
html_static_path = ['__static']
html_last_updated_fmt = '%b %d, %Y'
html_use_modindex = True
html_copy_source = False
html_domain_indices = False
html_file_suffix = '.html'
htmlhelp_basename = 'mapp'
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
_stdauthor = 'Written by the NumPy community'
latex_documents = [
('reference/index', 'numpy-ref.tex', 'NumPy Reference',
_stdauthor, 'manual'),
('user/index', 'numpy-user.tex', 'NumPy User Guide',
_stdauthor, 'manual'),
]
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Texinfo output
# -----------------------------------------------------------------------------
texinfo_documents = [
('mapp', 'MAPP Documentation', _stdauthor, 'MAPP',
"NumPy: array processing for numbers, strings, records, and objects.",
'Programming',
1),
]
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
"""
intersphinx_mapping = {
'python': ('https://docs.python.org/dev', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('http://matplotlib.org', None)
}
"""
# -----------------------------------------------------------------------------
# NumPy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Make numpydoc to generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
import glob
autosummary_generate = glob.glob("reference/*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(0)
"""
plot_include_source = True
plot_formats = [('png', 100), 'pdf']
import math
phi = (math.sqrt(5) + 1)/2
plot_rcparams = {
'font.size': 8,
'axes.titlesize': 8,
'axes.labelsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 8,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print("NOTE: linkcode extension not found -- no links to source generated")
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(numpy.__file__))
if 'dev' in numpy.__version__:
return "http://github.com/numpy/numpy/blob/master/numpy/%s%s" % (
fn, linespec)
else:
return "http://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % (
numpy.__version__, fn, linespec)
|
|
from ScopeFoundry import Measurement
from ScopeFoundry.helper_funcs import sibling_path, load_qt_ui_file
from ScopeFoundry import h5_io
import pyqtgraph as pg
import numpy as np
import time
class SineWavePlotMeasure(Measurement):
# this is the name of the measurement that ScopeFoundry uses
# when displaying your measurement and saving data related to it
name = "sine_wave_plot"
def setup(self):
"""
Runs once during App initialization.
This is the place to load a user interface file,
define settings, and set up data structures.
"""
# Define ui file to be used as a graphical interface
# This file can be edited graphically with Qt Creator
# sibling_path function allows python to find a file in the same folder
# as this python module
self.ui_filename = sibling_path(__file__, "sine_plot.ui")
#Load ui file and convert it to a live QWidget of the user interface
self.ui = load_qt_ui_file(self.ui_filename)
# Measurement Specific Settings
# This setting allows the option to save data to an h5 data file during a run
# All settings are automatically added to the Microscope user interface
self.settings.New('save_h5', dtype=bool, initial=True)
self.settings.New('sampling_period', dtype=float, unit='s', initial=0.1)
# Create empty numpy array to serve as a buffer for the acquired data
self.buffer = np.zeros(120, dtype=float)
# Define how often to update display during a run
self.display_update_period = 0.1
# Convenient reference to the hardware used in the measurement
self.func_gen = self.app.hardware['virtual_function_gen']
def setup_figure(self):
"""
Runs once during App initialization, after setup()
This is the place to make all graphical interface initializations,
build plots, etc.
"""
# connect ui widgets to measurement/hardware settings or functions
self.ui.start_pushButton.clicked.connect(self.start)
self.ui.interrupt_pushButton.clicked.connect(self.interrupt)
self.settings.save_h5.connect_to_widget(self.ui.save_h5_checkBox)
self.func_gen.settings.amplitude.connect_to_widget(self.ui.amp_doubleSpinBox)
# Set up pyqtgraph graph_layout in the UI
self.graph_layout=pg.GraphicsLayoutWidget()
self.ui.plot_groupBox.layout().addWidget(self.graph_layout)
# Create PlotItem object (a set of axes)
self.plot = self.graph_layout.addPlot(title="Sine Wave Readout Plot")
# Create PlotDataItem object ( a scatter plot on the axes )
self.optimize_plot_line = self.plot.plot([0])
def update_display(self):
"""
Displays (plots) the numpy array self.buffer.
This function runs repeatedly and automatically during the measurement run.
its update frequency is defined by self.display_update_period
"""
self.optimize_plot_line.setData(self.buffer)
def run(self):
"""
Runs when measurement is started. Runs in a separate thread from GUI.
It should not update the graphical interface directly, and should only
focus on data acquisition.
"""
# first, create a data file
if self.settings['save_h5']:
# if enabled will create an HDF5 file with the plotted data
# first we create an H5 file (by default autosaved to app.settings['save_dir']
# This stores all the hardware and app meta-data in the H5 file
self.h5file = h5_io.h5_base_file(app=self.app, measurement=self)
# create a measurement H5 group (folder) within self.h5file
# This stores all the measurement meta-data in this group
self.h5_group = h5_io.h5_create_measurement_group(measurement=self, h5group=self.h5file)
# create an h5 dataset to store the data
self.buffer_h5 = self.h5_group.create_dataset(name = 'buffer',
shape = self.buffer.shape,
dtype = self.buffer.dtype)
# We use a try/finally block, so that if anything goes wrong during a measurement,
# the finally block can clean things up, e.g. close the data file object.
try:
i = 0
# Will run forever until interrupt is called.
while not self.interrupt_measurement_called:
i %= len(self.buffer)
# Set progress bar percentage complete
self.settings['progress'] = i * 100./len(self.buffer)
# Fills the buffer with sine wave readings from func_gen Hardware
self.buffer[i] = self.func_gen.settings.sine_data.read_from_hardware()
if self.settings['save_h5']:
# if we are saving data to disk, copy data to H5 dataset
self.buffer_h5[i] = self.buffer[i]
# flush H5
self.h5file.flush()
# wait between readings.
# We will use our sampling_period settings to define time
time.sleep(self.settings['sampling_period'])
i += 1
if self.interrupt_measurement_called:
# Listen for interrupt_measurement_called flag.
# This is critical to do, if you don't the measurement will
# never stop.
# The interrupt button is a polite request to the
# Measurement thread. We must periodically check for
# an interrupt request
break
finally:
if self.settings['save_h5']:
# make sure to close the data file
self.h5file.close()
|
|
# import numpy as np
# from array import *
fhandi=open('answer.txt')
fhando=open('histoplotuvw1qq.dat','w')
#x=raw_input('Enter the number of bins > ')
x=1000
y=x/20
bins=int(x)
brange=[0]
nrange=[]
to=float(0.0)
# nrange=array('f',[0])
b0=100
b1=200
b2=300
b3=400
b4=500
b5=600
b6=700
b7=800
b8=900
b9=1000
for x in range(bins):
nrange.append(0)
e=1e3/bins
for t in range(bins):
brange.append(e*t)
for line in fhandi:
ener=line[59:72]
wght=line[77:86]
energy=float(ener)
wg=float(wght)
wg=wg*1
if brange[b0]<energy<brange[b1]:
for n in range(b0,b1):
if energy<brange[n]:
nrange[n]+=1*wg
break
elif brange[b1]<energy<brange[b2]:
for n in range(b1,b2):
if energy<brange[n]:
nrange[n]+=1*wg
break
elif brange[b2]<energy<brange[b3]:
for n in range(b2,b3):
if energy<brange[n]:
nrange[n]+=1*wg
break
elif brange[b3]<energy<brange[b4]:
for n in range(b3,b4):
if energy<brange[n]:
nrange[n]+=1*wg
break
elif brange[b4]<energy<brange[b5]:
for n in range(b4,b5):
if energy<brange[n]:
nrange[n]+=1*wg
break
elif brange[b5]<energy<brange[b6]:
for n in range(b5,b6):
if energy<brange[n]:
nrange[n]+=1*wg
break
elif brange[b6]<energy<brange[b7]:
for n in range(b6,b7):
if energy<brange[n]:
nrange[n]+=1*wg
break
elif brange[b7]<energy<brange[b8]:
for n in range(b7,b8):
if energy<brange[n]:
nrange[n]+=1*wg
break
elif brange[b8]<energy<brange[b9]:
for n in range(b8,b9):
if energy<brange[n]:
nrange[n]+=1*wg
break
for y in range(bins):
temp=str.format("{0:0>5.2f}", nrange[y])
# temp='{0: <11}'.format(temp)
# temp=str(nrange[y])
to=to+nrange[y]
temp1=str.format("{0:0>5.1f}", brange[y])
# temp1=str(brange[y])
fhando.write(temp1)
fhando.write('\t')
fhando.write(temp)
fhando.write('\n')
print (to)
|
|
## Este script no es necesario usarlo después del 01 de Abril de 2020
"""
Se realiza este script para construir las columnas "casos_nuevos" y
"fallecidos_nuevos", sólo para los informes diarios previos (e incluído) al
01 de Abril. Esto porque el minsal antes del 25 de marzo no indicaba los
"casos nuevos", sino que solo los casos totales. Lo mismo para los fallecidos,
y hasta el 01 de Abril no se estaban calculando los nuevos fallecidos al
importar los datos.
La gracia es que a partir de ahora (desde el 02 de abril en adelante), al
generar el informe diario en CSV se calculen automáticamente los
fallecidos_nuevos sin tener que arreglar después
los informes (los casos_nuevos ahora los está publicando directamtne el minsal.)
"""
import numpy as np
import pandas as pd
import datetime
from datetime import timedelta, date
#Directorio de los informes diarios en CSV que queremos arreglar
path='../../informes_minsal/informes_diarios_Region_CSV/'
formato_archivo='-InformeDiarioRegion-COVID19.csv'
###
##Queremos arreglar desde el 02 de Marzo al 01 de Abril
## Para pasar fecha a datetime
## datetime = pd.to_datetime('2020-03-01')
## Para pasar datetime a fecha en string
## fecha string datetime.strftime('%Y-%m-%d')
#definimos primero una función para tener un rango de todas las fechas entre medio
#es un rango inverso pues va de la fecha más reciente a la menos reciente
def rango_fechas_inverso(start_date, end_date):
for n in range(int ((fecha_final - fecha_inicial).days)):
yield fecha_final - timedelta(n)
fecha_inicial=pd.to_datetime('2020-03-02')
fecha_final=pd.to_datetime('2020-04-01')
for fecha in rango_fechas_inverso(fecha_inicial-timedelta(1), fecha_final):
#Fecha Hoy en string: fecha.strftime("%Y-%m-%d")
#Fecha Ayer en string: (fecha-timedelta(1)).strftime("%Y-%m-%d")
#Abrimos el CSV de Hoy
stringHoy=fecha.strftime("%Y-%m-%d")
stringAyer=(fecha-timedelta(1)).strftime("%Y-%m-%d")
informeHoy = pd.read_csv(path+stringHoy+formato_archivo)
informeAyer= pd.read_csv(path+stringAyer+formato_archivo)
informeHoy['casos_nuevos']=(informeHoy.casos_totales-informeAyer.casos_totales)
informeHoy['fallecidos_nuevos']=(informeHoy.fallecidos_totales-informeAyer.fallecidos_totales)
#actualizamos el CSV de Hoy
informeHoy=informeHoy[['id_reg', 'nombre_reg', 'casos_nuevos', 'casos_totales',
'fallecidos_nuevos', 'fallecidos_totales', 'recuperados_nuevos',
'recuperados_totales']]
informeHoy.to_csv(path+stringHoy+formato_archivo, index=False)
|
|
import json
import os
import numpy as np
import torch
import torchvision
from torch.autograd import Variable
from fool_models.stack_attention import CnnLstmSaModel
from neural_render.blender_render_utils.constants import find_platform_slash
from utils.train_utils import ImageCLEVR_HDF5
from skimage.color import rgba2rgb
from skimage.io import imread
from skimage.transform import resize as imresize
PLATFORM_SLASH = find_platform_slash()
UP_TO_HERE_ = PLATFORM_SLASH.join(os.path.abspath(__file__).split(PLATFORM_SLASH)[:-2]).replace(PLATFORM_SLASH, '/')
def invert_dict(d):
return {value: key for (key, value) in d.items()}
SPECIAL_TOKENS = {
'<NULL>': 0,
'<START>': 1,
'<END>': 2,
'<UNK>': 3,
}
with open(f'{UP_TO_HERE_}/fool_models/resources/vocab.json', 'r') as fin:
data = json.loads(fin.read())
question_token_to_idx = data['question_token_to_idx']
program_token_to_idx = data['program_token_to_idx']
answer_token_to_idx = data['answer_token_to_idx']
idx_to_question_token = invert_dict(question_token_to_idx)
idx_to_program_token = invert_dict(program_token_to_idx)
idx_to_answer_token = invert_dict(answer_token_to_idx)
def load_cnn_sa(baseline_model=f'{UP_TO_HERE_}/fool_models/resources/cnn_lstm_sa_mlp.pt'):
model, _ = CnnLstmSaModel.load(baseline_model)
model.eval()
return model
def load_loader():
val_set = ImageCLEVR_HDF5(config=None, split='val',
clvr_path='C:\\Users\\Guldan\\Desktop\\DeltaFormers\\data',
questions_path='C:\\Users\\Guldan\\Desktop\\DeltaFormers\\data',
scenes_path='C:\\Users\\Guldan\\Desktop\\DeltaFormers\\data',
use_cache=False,
return_program=False,
effective_range=10, output_shape=224)
val_dataloader = torch.utils.data.DataLoader(val_set, batch_size=1,
num_workers=0, shuffle=False, drop_last=False)
return val_dataloader
def load_resnet_backbone():
whole_cnn = getattr(torchvision.models, 'resnet101')(pretrained=True)
layers = [
whole_cnn.conv1,
whole_cnn.bn1,
whole_cnn.relu,
whole_cnn.maxpool,
]
for i in range(3):
name = 'layer%d' % (i + 1)
layers.append(getattr(whole_cnn, name))
cnn = torch.nn.Sequential(*layers)
cnn.type(torch.cuda.FloatTensor)
cnn.eval()
return cnn
def inference_with_cnn_sa(loader=None, model=None, resnet_extractor=None):
dtype = torch.cuda.FloatTensor
model.type(dtype)
model.eval()
num_correct, num_samples = 0, 0
final_preds = []
print(f"Testing for {len(loader)} samples")
print()
for batch in loader:
(_, iq), answer, _ = batch
# iq, answers = batch
image = iq['image'].to('cuda')
questions = iq['question'].to('cuda')
feats = resnet_extractor(image)
questions_var = Variable(questions.type(dtype).long())
feats_var = Variable(feats.type(dtype))
scores = model(questions_var, feats_var)
_, preds = scores.data.cpu().max(1)
for item in preds.detach().cpu().numpy():
final_preds.append(item - 4)
num_correct += (preds == (answer.squeeze() + 4)).sum()
num_samples += preds.size(0)
if num_samples % 1000 == 0:
print(f'Ran {num_samples} samples at {float(num_correct) / num_samples} accuracy')
acc = float(num_correct) / num_samples
print('Got %d / %d = %.2f correct' % (num_correct, num_samples, 100 * acc))
return final_preds
def single_inference_with_cnn_sa(model=None, resnet=None):
dtype = torch.cuda.FloatTensor
model.type(dtype)
model.eval()
img_size = (224, 224)
path = '../neural_render/images'
images = [f'../neural_render/images/{f}' for f in os.listdir(path) if 'Rendered' in f and '.png' in f]
feat_list = []
### Read the images ###
for image in images:
img = imread(image)
img = rgba2rgb(img)
img = imresize(img, img_size)
img = img.astype('float32')
img = img.transpose(2, 0, 1)[None]
mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)
std = np.array([0.229, 0.224, 0.224]).reshape(1, 3, 1, 1)
img = (img - mean) / std
img_var = torch.FloatTensor(img).to('cuda')
### Pass through Resnet ###
feat_list.append(resnet(img_var))
### Stack them with Questions ###
feats = torch.cat(feat_list, dim=0)
questions = [10, 85, 14, 25, 30, 64, 66, 84, 74, 75, 21, 84, 45, 86, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
questions = torch.LongTensor(questions).unsqueeze(0)
questions = questions.to('cuda')
feats = feats.to('cuda')
scores = model(questions, feats)
_, preds = scores.data.cpu().max(1)
preds = [f - 4 for f in preds]
print(preds)
return
#resnet = load_resnet_backbone()
#model = load_cnn_sa()
# loader = load_loader()
# inference_with_cnn_sa(loader=loader, model=model, resnet_extractor=resnet)
#single_inference_with_cnn_sa(model=model, resnet=resnet)
|
|
##########################################################################
# Name: calEvoRateLow.py
#
# Calucurate Bomb Low
#
# Usage:
#
# Author: Ryosuke Tomita
# Date: 2021/08/13
##########################################################################
from netCDF4 import Dataset
import numpy as np
fileName = Dataset("../data/surface_2020-12-25_0")
dimList = fileName.dimensions.keys()
varList = fileName.variables.keys()
prmsl = fileName.variables['prmsl']
print(dimList)
print(varList)
print(np.array(prmsl[0]).shape)
print(np.array(fileName.variables['latitude']))
print(np.array(fileName.variables['longitude']))
|
|
# -*- coding: utf-8 -*-
# This is the skeleton of PISCOLA, the main file
import piscola
from .filter_utils import integrate_filter, calc_eff_wave, calc_pivot_wave, calc_zp, filter_effective_range
from .gaussian_process import gp_lc_fit, gp_2d_fit
from .extinction_correction import redden, deredden, calculate_ebv
from .mangling import mangle
from .pisco_utils import trim_filters, flux2mag, mag2flux, change_zp
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
from peakutils import peak
import pickle5 as pickle
import pandas as pd
import numpy as np
import random
import math
import glob
import os
### Initialisation functions ###
# These are mainly used by the 'sn' class below
def _initialise_sn(sn_file):
"""Initialise the :func:`sn` object.
The object is initialised with all the necessary information like filters, fluxes, etc.
Parameters
----------
sn_file : str
Name of the SN or SN file.
Returns
-------
sn_obj : obj
New :func:`sn` object.
"""
name, z, ra, dec = pd.read_csv(sn_file, delim_whitespace=True, nrows=1,
converters={'name':str, 'z':float, 'ra':float, 'dec':float}).iloc[0].values
sn_df = pd.read_csv(sn_file, delim_whitespace=True, skiprows=2)
sn_df.columns = sn_df.columns.str.lower()
# call sn object
sn_obj = sn(name, z=z, ra=ra, dec=dec)
sn_obj.set_sed_template() # Set the SED template to be used in the entire process
sn_obj.bands = [band for band in list(sn_df['band'].unique())]
sn_obj.call_filters()
# order bands by effective wavelength (estimated from the SED template)
eff_waves = [sn_obj.filters[band]['eff_wave'] for band in sn_obj.bands]
sorted_idx = sorted(range(len(eff_waves)), key=lambda k: eff_waves[k])
sn_obj.bands = [sn_obj.bands[x] for x in sorted_idx]
# add data to each band
for band in sn_obj.bands:
band_info = sn_df[sn_df['band']==band]
time, flux = band_info['time'].values, band_info['flux'].values
flux_err, zp = band_info['flux_err'].values, float(band_info['zp'].unique()[0])
mag, mag_err = flux2mag(flux, zp, flux_err)
mag_sys = band_info['mag_sys'].unique()[0]
sn_obj.data[band] = {'time':time,
'flux':flux,
'flux_err':flux_err,
'mag':mag,
'mag_err':mag_err,
'zp':zp,
'mag_sys':mag_sys,
}
return sn_obj
def call_sn(sn_file, directory='data'):
"""Loads a supernova from a file and initialises it.
Parameters
----------
sn_file: str
Name of the SN or SN file.
directory : str, default ``data``
Directory where to look for the SN file unless the full or relative path is given in ``sn_file``.
"""
sn_full_path = os.path.join(directory, sn_file)
# if sn_file is the file name
if os.path.isfile(sn_full_path):
return _initialise_sn(sn_full_path)
# if sn_file is the SN name
elif os.path.isfile(sn_full_path + '.dat'):
return _initialise_sn(sn_full_path + '.dat')
# if sn_file is the file name with full or relative path
elif os.path.isfile(sn_file):
return _initialise_sn(sn_file)
else:
raise ValueError(f'{sn_file} was not a valid SN name or file.')
def load_sn(name, path=None):
"""Loads a :func:`sn` oject that was previously saved as a pickle file.
Parameters
----------
name : str
Name of the SN object.
path: str, default ``None``
Path where to save the SN file given the ``name``.
Returns
-------
pickle.load(file) : obj
:func:`sn` object previously saved as a pickle file.
"""
if path is None:
with open(f'{name}.pisco', 'rb') as file:
return pickle.load(file)
else:
with open(os.path.join(path, name) + '.pisco', 'rb') as file:
return pickle.load(file)
################################################################################
################################################################################
################################################################################
# This is the main class
class sn(object):
"""Supernova class for representing a supernova."""
def __init__(self, name, z=0, ra=None, dec=None):
self.name = name
self.z = z # redshift
self.ra = ra # coordinates in degrees
self.dec = dec
if self.ra is None or self.dec is None:
print('Warning, ra and/or dec not specified')
self.ra , self.dec = 0, 0
self.__dict__['data'] = {} # data for each band
self.__dict__['sed'] = {} # sed info
self.__dict__['filters'] = {} # filter info for each band
self.__dict__['lc_fits'] = {} # gp fitted data
self.__dict__['lc_parameters'] = {} # final SN light-curves parameters
self.__dict__['sed_results'] = {} # final SED for every phase if successful
self.__dict__['mangling_results'] = {} # mangling results for every phase if successful
self.__dict__['user_input'] = {} # save user's input
self.bands = None
self.tmax = None
def __repr__(self):
return f'name = {self.name}, z = {self.z:.5}, ra = {self.ra}, dec = {self.dec}'
def __getattr__(self, attribute):
if attribute=='name':
return self.name
if attribute=='z':
return self.z
if attribute=='ra':
return self.ra
if attribute=='dec':
return self.dec
if 'data' in self.__dict__:
if attribute in self.data:
return(self.data[attribute])
else:
return f'Attribute {attribute} is not defined.'
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
def save_sn(self, name=None, path=None):
"""Saves a SN object into a pickle file
Parameters
----------
name : str, default ``None``
Name of the SN object. If no name is given, ``name`` is set to ``self.name``.
path: str, default ``None``
Path where to save the SN file given the ``name``.
"""
if name is None:
name = self.name
if path is None:
with open(f'{name}.pisco', 'wb') as pfile:
pickle.dump(self, pfile, pickle.HIGHEST_PROTOCOL)
else:
with open(os.path.join(path, name) + '.pisco', 'wb') as pfile:
pickle.dump(self, pfile, pickle.HIGHEST_PROTOCOL)
############################################################################
################################ Filters ###################################
############################################################################
def call_filters(self):
"""Obtains the transmission functions for the observed filters and the Bessell filters as well.
"""
path = piscola.__path__[0]
sed_df = self.sed['data']
sed_df = sed_df[sed_df.phase==0.0]
sed_wave, sed_flux = sed_df.wave.values, sed_df.flux.values
# add filters of the observed bands
for band in self.bands:
file = f'{band}.dat'
for root, dirs, files in os.walk(os.path.join(path, 'filters')):
if file in files:
wave0, transmission0 = np.loadtxt(os.path.join(root, file)).T
# linearly interpolate filters
wave = np.linspace(wave0.min(), wave0.max(), int(wave0.max()-wave0.min()))
transmission = np.interp(wave, wave0, transmission0, left=0.0, right=0.0)
# remove long tails of zero values on both edges
imin, imax = trim_filters(transmission)
wave, transmission = wave[imin:imax], transmission[imin:imax]
# retrieve response type; if none, assumed to be photon type
try:
with open(os.path.join(root, 'response_type.txt')) as resp_file:
for line in resp_file:
response_type = line.split()[0].lower()
except:
response_type = 'photon'
assert response_type in ['photon', 'energy'], f'"{response_type}" is not a valid response type \
("photon" or "energy") for {band} filter.'
self.filters[band] = {'wave':wave,
'transmission':transmission,
'eff_wave':calc_eff_wave(sed_wave, sed_flux, wave,
transmission, response_type=response_type),
'response_type':response_type}
# add Bessell filters
file_paths = [file for file in glob.glob(os.path.join(path, 'filters/Bessell/*.dat'))]
for file_path in file_paths:
band = os.path.basename(file_path).split('.')[0]
wave0, transmission0 = np.loadtxt(file_path).T
# linearly interpolate filters
wave = np.linspace(wave0.min(), wave0.max(), int(wave0.max()-wave0.min()))
transmission = np.interp(wave, wave0, transmission0, left=0.0, right=0.0)
# remove long tails of zero values on both edges
imin, imax = trim_filters(transmission)
wave, transmission = wave[imin:imax], transmission[imin:imax]
# retrieve response type; if none, assumed to be photon type
try:
with open(os.path.join(root, 'response_type.txt')) as resp_file:
for line in resp_file:
response_type = line.split()[0].lower()
except:
response_type = 'photon'
assert response_type in ['photon', 'energy'], f'"{response_type}" is not a valid response type \
("photon" or "energy") for {band} filter.'
self.filters[band] = {'wave':wave,
'transmission':transmission,
'eff_wave':calc_eff_wave(sed_wave, sed_flux, wave, transmission,
response_type=response_type),
'response_type':response_type}
def add_filters(self, filter_list, response_type='photon'):
"""Add choosen filters. You can add a complete directory with filters in it or add filters given in a list.
Parameters
----------
filter_list : list
List of filters.
response_type : str, default ``photon``
Response type of the filter. The options are: ``photon`` and ``energy``.
"""
path = piscola.__path__[0]
sed_df = self.sed['data']
sed_df = sed_df[sed_df.phase==0.0]
sed_wave, sed_flux = sed_df.wave.values, sed_df.flux.values
if type(filter_list)==str:
filter_list = [filter_list]
for band in filter_list:
file = f'{band}.dat'
for root, dirs, files in os.walk(os.path.join(path, 'filters')):
if file in files:
wave0, transmission0 = np.loadtxt(os.path.join(root, file)).T
# linearly interpolate filters
wave = np.linspace(wave0.min(), wave0.max(), int(wave0.max() - wave0.min()))
transmission = np.interp(wave, wave0, transmission0, left=0.0, right=0.0)
# remove long tails of zero values on both edges
imin, imax = trim_filters(transmission)
wave, transmission = wave[imin:imax], transmission[imin:imax]
# retrieve response type; if none, assumed to be photon type
try:
with open(os.path.join(root, 'response_type.txt')) as resp_file:
for line in resp_file:
response_type = line.split()[0].lower()
except:
response_type = 'photon'
assert response_type in ['photon',
'energy'], f'"{response_type}" is not a valid response type \
("photon" or "energy") for {band} filter.'
self.filters[band] = {'wave': wave,
'transmission': transmission,
'eff_wave': calc_eff_wave(sed_wave, sed_flux, wave,
transmission, response_type=response_type),
'response_type': response_type}
def plot_filters(self, filter_list=None, save=False):
"""Plot the filters' transmission functions.
Parameters
----------
filter_list : list, default ``None``
List of bands.
save : bool, default ``False``
If ``True``, saves the plot into a file with the name "filters.png".
"""
if filter_list is None:
filter_list = self.bands
fig, ax = plt.subplots(figsize=(8,6))
for band in filter_list:
norm = self.filters[band]['transmission'].max()
ax.plot(self.filters[band]['wave'], self.filters[band]['transmission']/norm, label=band)
ax.set_xlabel(r'wavelength ($\AA$)', fontsize=18, family='serif')
ax.set_ylabel('normalized response', fontsize=18, family='serif')
ax.set_title(r'Filters response functions', fontsize=18, family='serif')
ax.xaxis.set_tick_params(labelsize=15)
ax.yaxis.set_tick_params(labelsize=15)
ax.minorticks_on()
ax.tick_params(which='major', length=6, width=1, direction='in', top=True, right=True)
ax.tick_params(which='minor', length=3, width=1, direction='in', top=True, right=True)
plt.legend(loc='upper right', bbox_to_anchor=(1.2, 1.0))
if save:
#fig.tight_layout()
plt.savefig('filters.png')
plt.show()
def calc_pivot(self, band_list=None):
"""Calculates the observed band closest to Bessell-B filter.
Parameters
----------
filter_list : list, default ``None``
List of bands.
"""
BessellB_eff_wave = self.filters['Bessell_B']['eff_wave']
if band_list is None:
band_list = self.bands
bands_eff_wave = np.array([self.filters[band]['eff_wave']/(1+self.z) for band in band_list])
idx = (np.abs(BessellB_eff_wave - bands_eff_wave)).argmin()
self.pivot_band = band_list[idx]
def remove_bands(self, bands, verbose=False):
"""Remove chosen bands together with the data in it.
Parameters
----------
bands : str or list
Band string (for a single band) or list of bands to be removed.
verbose : bool, default ``False``
If ``True``, a warning is given when a band from ``bands_list`` is not found within the SN bands.
"""
if isinstance(bands, str):
bands = [bands]
for band in bands:
self.data.pop(band, None)
self.filters.pop(band, None)
if band in self.bands:
self.bands.remove(band)
############################################################################
############################### SED template ###############################
############################################################################
def print_sed_templates(self):
"""Prints all the available SED templates in the ``templates`` directory.
"""
path = piscola.__path__[0]
template_path = os.path.join(path, "templates")
print('List of available SED templates:', [name for name in os.listdir(template_path)
if os.path.isdir(os.path.join(template_path, name))])
def set_sed_template(self, template='jla'):
"""Sets the SED template to be used for the mangling function.
**Note:** use :func:`print_sed_templates()` to see a list of available templates.
Parameters
----------
template : str, default ``jla``
Template name. E.g., ``jla``, ``conley09f``, etc.
"""
# This can be modified to accept other templates
path = piscola.__path__[0]
file = os.path.join(path, f'templates/{template}/snflux_1a.dat')
self.sed['data'] = pd.read_csv(file, delim_whitespace=True, names=['phase', 'wave', 'flux'])
self.sed['name'] = template
def set_eff_wave(self):
"""Sets the effective wavelength of each band using the current state of the SED."""
for band in self.filters.keys():
self.filters[band]['eff_wave'] = calc_eff_wave(self.sed['data']['wave'],
self.sed['data']['flux'],
self.filters[band]['wave'],
self.filters[band]['transmission'],
self.filters[band]['response_type'])
############################################################################
########################### Light Curves Data ##############################
############################################################################
def mask_data(self, band_list=None, mask_snr=True, snr=5, mask_phase=False, min_phase=-20, max_phase=40):
"""Mask the data with the given signal-to-noise (S/N) in flux space and/or given range of days with respect to
B-band peak.
**Note:** If the light curves were not previously fitted, the phases are taken with respect to the measurement
with the largest flux.
Parameters
----------
band_list : list, default ``None``
List of bands to plot. If ``None``, band list is set to ``self.bands``.
mask_snr : bool, default ``True``
If ``True``, keeps the flux values with S/N greater or equal to ``snr``.
snr : float, default ``5``
S/N threshold applied to mask data in flux space.
mask_phase : bool, default ``False``
If ``True``, keeps the flux values within the given phase range set by ``min_phase`` and ``max_phase``.
An initial estimation of the peak is needed first (can be set manually).
min_phase : int, default ``-20``
Minimum phase limit applied to mask data.
max_phase : int, default ``40``
Maximum phase limit applied to mask data.
"""
if band_list is None:
band_list = self.bands
bands2remove = []
if mask_phase:
#assert self.tmax, 'An initial estimation of the peak is needed first!'
if self.tmax:
tmax = self.tmax
else:
self.calc_pivot()
id_peak = np.argmax(self.data[self.pivot_band]['flux'])
tmax = self.data[self.pivot_band]['time'][id_peak]
for band in band_list:
mask = np.where((self.data[band]['time'] - tmax >= min_phase*(1+self.z)) &
(self.data[band]['time'] - tmax <= max_phase*(1+self.z))
)
self.data[band]['time'] = self.data[band]['time'][mask]
self.data[band]['flux'] = self.data[band]['flux'][mask]
self.data[band]['flux_err'] = self.data[band]['flux_err'][mask]
self.data[band]['mag'] = self.data[band]['mag'][mask]
self.data[band]['mag_err'] = self.data[band]['mag_err'][mask]
if len(self.data[band]['flux']) == 0:
bands2remove.append(band)
if mask_snr:
for band in band_list:
mask = np.abs(self.data[band]['flux']/self.data[band]['flux_err']) >= snr
self.data[band]['time'] = self.data[band]['time'][mask]
self.data[band]['flux'] = self.data[band]['flux'][mask]
self.data[band]['flux_err'] = self.data[band]['flux_err'][mask]
self.data[band]['mag'] = self.data[band]['mag'][mask]
self.data[band]['mag_err'] = self.data[band]['mag_err'][mask]
if len(self.data[band]['flux']) == 0:
bands2remove.append(band)
self.remove_bands(bands2remove)
def plot_data(self, band_list=None, plot_type='flux', save=False, fig_name=None):
"""Plot the SN light curves.
Negative fluxes are masked out if magnitudes are plotted.
Parameters
----------
band_list : list, default ``None``
List of bands to plot. If ``None``, band list is set to ``self.bands``.
plot_type : str, default ``flux``
Type of value used for the data: either ``mag`` or ``flux``.
save : bool, default ``False``
If true, saves the plot into a file.
fig_name : str, default ``None``
Name of the saved plot. If None is used the name of the file will be '{``self.name``}_lcs.png'.
Only works if ``save`` is set to ``True``.
"""
assert (plot_type=='mag' or plot_type=='flux'), f'"{plot_type}" is not a valid plot type.'
new_palette = [plt.get_cmap('Dark2')(i) for i in np.arange(8)] + [plt.get_cmap('Set1')(i) for i in np.arange(8)]
if band_list is None:
band_list = self.bands
ZP = 27.5
# shift in time for visualization purposes
t = self.data[self.bands[1]]['time'].min()
tmax = str(t.astype(int))
zeros = '0'*len(tmax[2:])
t_off = int(tmax[:2] + zeros)
# to set plot limits
if plot_type=='flux':
plot_lim_vals = np.array([change_zp(self.data[band]['flux'], self.data[band]['zp'], ZP)
for band in self.bands] + [0.0]
, dtype="object")
ymin_lim = np.hstack(plot_lim_vals).min()*0.9
if ymin_lim < 0.0:
ymin_lim *= 1.1/0.9 # there might be some "negative" fluxes sometimes
ymax_lim = np.hstack(plot_lim_vals).max()*1.05
elif plot_type=='mag':
plot_lim_vals = [[np.nanmin(self.data[band]['mag']), np.nanmax(self.data[band]['mag'])]
for band in self.bands]
plot_lim_vals = np.ndarray.flatten(np.array(plot_lim_vals))
ymin_lim = np.nanmin(plot_lim_vals)*0.98
ymax_lim = np.nanmax(plot_lim_vals)*1.02
fig, ax = plt.subplots(figsize=(8,6))
for i, band in enumerate(band_list):
if plot_type=='flux':
y_norm = change_zp(1.0, self.data[band]['zp'], ZP)
time = np.copy(self.data[band]['time'])
flux, err = np.copy(self.data[band]['flux']), np.copy(self.data[band]['flux_err'])
flux, err = flux*y_norm, err*y_norm
ax.errorbar(time-t_off, flux, err, fmt='o', mec='k', capsize=3, capthick=2, ms=8, elinewidth=3,
label=band, color=new_palette[i])
ylabel = f'Flux (ZP = {ZP})'
elif plot_type=='mag':
ylabel = 'Apparent Magnitude'
mask = np.where(self.data[band]['flux'] > 0)
time = self.data[band]['time'][mask]
mag, err = self.data[band]['mag'][mask], self.data[band]['mag_err'][mask]
ax.errorbar(time-t_off, mag, err, fmt='o', mec='k', capsize=3, capthick=2, ms=8, elinewidth=3,
label=band, color=new_palette[i])
ax.set_ylabel(ylabel, fontsize=16, family='serif')
ax.set_xlabel(f'Time - {t_off} [days]', fontsize=16, family='serif')
ax.set_title(f'{self.name}\nz = {self.z:.5}', fontsize=18, family='serif')
ax.minorticks_on()
ax.tick_params(which='major', length=8, width=1, direction='in', top=True, right=True, labelsize=16)
ax.tick_params(which='minor', length=4, width=1, direction='in', top=True, right=True, labelsize=16)
ax.legend(fontsize=13)
ax.set_ylim(ymin_lim, ymax_lim)
if plot_type=='mag':
plt.gca().invert_yaxis()
if save:
if fig_name is None:
fig_name = f'{self.name}_lcs.png'
#fig.tight_layout()
plt.savefig(fig_name)
plt.show()
def normalize_data(self):
"""This function is depricated starting from v0.1.5 as it is now included in :func:`fit_lcs()`.
**Note**: if you call this function, it will not change the results. It is just
maintained for compatibility purposes, but might be removed in future versions.
See :func:`_normalize_data()` for the original documentation.
"""
self._normalize_data()
def _normalize_data(self):
"""Normalizes the fluxes and zero-points (ZPs).
Fluxes are converted to physical units by calculating the ZPs according to the
magnitude system, for example: **AB**, **BD17** or **Vega**.
"""
for band in self.bands:
mag_sys = self.data[band]['mag_sys']
current_zp = self.data[band]['zp']
new_zp = calc_zp(self.filters[band]['wave'], self.filters[band]['transmission'],
self.filters[band]['response_type'], mag_sys, band)
self.data[band]['flux'] = change_zp(self.data[band]['flux'], current_zp, new_zp)
self.data[band]['flux_err'] = change_zp(self.data[band]['flux_err'], current_zp, new_zp)
self.data[band]['zp'] = new_zp
############################################################################
############################ Light Curves Fits #############################
############################################################################
def fit_lcs(self, kernel='matern52', kernel2='matern52', fit_mag=True, min_time_extrap=-3, max_time_extrap=5,
min_wave_extrap=-200, max_wave_extrap=200):
"""Fits the data for each band using gaussian process
The time of rest-frame B-band peak luminosity is estimated by finding where the derivative is equal to zero.
Parameters
----------
kernel : str, default ``matern52``
Kernel to be used in the **time**-axis when fitting the light curves with gaussian process. E.g.,
``matern52``, ``matern32``, ``squaredexp``.
kernel2 : str, default ``matern52``
Kernel to be used in the **wavelengt**-axis when fitting the light curves with gaussian process. E.g.,
``matern52``, ``matern32``, ``squaredexp``.
fit_mag : bool, default ``True``
If ``True``, the data is fitted in magnitude space (this is recommended for 2D fits). Otherwise, the data is
fitted in flux space.
min_time_extrap : int or float, default ``-3``
Number of days the light-curve fit is extrapolated in the time axis with respect to first epoch.
max_time_extrap : int or float, default ``5``
Number of days the light-curve fit is extrapolated in the time axis with respect to last epoch.
min_wave_extrap : int or float, default ``-200``
Number of angstroms the light-curve fit is extrapolated in the wavelengths axis with respect to reddest
wavelength. This depends on the reddest filter.
max_wave_extrap : int or float, default ``200``
Number of angstroms the light-curve fit is extrapolated in the wavelengths axis with respect to bluest
wavelength. This depends on the bluest filter.
"""
########################
####### GP Fit #########
########################
self._normalize_data()
self.calc_pivot()
flux_array = np.hstack([self.data[band]['flux'] for band in self.bands])
flux_err_array = np.hstack([self.data[band]['flux_err'] for band in self.bands])
time_array = np.hstack([self.data[band]['time'] for band in self.bands])
wave_array = np.hstack([[self.filters[band]['eff_wave']]*len(self.data[band]['time']) for band in self.bands])
# edges to extrapolate in time and wavelength
time_edges = np.array([time_array.min()+min_time_extrap, time_array.max()+max_time_extrap])
bands_waves = np.hstack([self.filters[band]['wave'] for band in self.bands])
bands_edges = np.array([bands_waves.min()+min_wave_extrap, bands_waves.max()+max_wave_extrap])
if fit_mag:
mask = flux_array > 0.0 # prevents nan values
# ZPs are set to 0.0 to retrieve flux values after the GP fit
mag_array, mag_err_array = flux2mag(flux_array[mask], 0.0, flux_err_array[mask])
time_array, wave_array = time_array[mask], wave_array[mask]
timeXwave, lc_mean, lc_std, gp_results = gp_2d_fit(time_array, wave_array, mag_array, mag_err_array,
kernel1=kernel, kernel2=kernel2,
x1_edges=time_edges, x2_edges=bands_edges)
lc_mean, lc_std = mag2flux(lc_mean, 0.0, lc_std)
else:
timeXwave, lc_mean, lc_std, gp_results = gp_2d_fit(time_array, wave_array, flux_array, flux_err_array,
kernel1=kernel, kernel2=kernel2, x2_edges=bands_edges)
self.lc_fits['timeXwave'], self.lc_fits['lc_mean'] = timeXwave, lc_mean
self.lc_fits['lc_std'], self.lc_fits['gp_results'] = lc_std, gp_results
###############################
##### Estimate B-band Peak ####
###############################
times, waves = timeXwave.T[0], timeXwave.T[1]
wave_ind = np.argmin(np.abs(self.filters['Bessell_B']['eff_wave']*(1+self.z) - waves))
eff_wave = waves[wave_ind] # closest wavelength from the gp grid to the effective_wavelength*(1+z) of Bessell_B
mask = waves==eff_wave
time, flux, flux_err = times[mask], lc_mean[mask], lc_std[mask]
try:
peak_id = peak.indexes(flux, thres=.3, min_dist=len(time)//2)[0]
self.tmax = self.tmax0 = np.round(time[peak_id], 2)
phaseXwave = np.copy(timeXwave)
phaseXwave.T[0] = (times - self.tmax)/(1+self.z)
self.lc_fits['phaseXwave'] = phaseXwave
except:
raise ValueError(f'Unable to obtain an initial estimation of B-band peak for {self.name}\
(poor peak coverage)')
##################################
## Save individual light curves ##
##################################
phases = phaseXwave.T[0]
for band in self.bands:
wave_ind = np.argmin(np.abs(self.filters[band]['eff_wave'] - waves))
eff_wave = waves[wave_ind] # closest wavelength from the gp grid to the effective wavelength of the band
mask = waves==eff_wave
time, phase, flux, flux_err = times[mask], phases[mask], lc_mean[mask], lc_std[mask]
mag, mag_err = flux2mag(flux, self.data[band]['zp'], flux_err)
self.lc_fits[band] = {'time':time, 'phase':phase, 'flux':flux, 'flux_err':flux_err,
'mag':mag, 'mag_err':mag_err}
# calculate observed time and magnitude of peak for each band
try:
peak_id = peak.indexes(flux, thres=.3, min_dist=len(time)//2)[0]
self.lc_fits[band]['tmax'] = np.round(time[peak_id], 2)
self.lc_fits[band]['mmax'] = mag[peak_id]
except:
self.lc_fits[band]['tmax'] = self.lc_fits[band]['mmax'] = np.nan
def plot_fits(self, plot_together=False, plot_type='flux', save=False, fig_name=None):
"""Plots the light-curves fits results.
Plots the observed data for each band together with the gaussian process fits. The initial B-band
peak estimation is plotted. The final B-band peak estimation after light-curves corrections is
also potted if corrections have been applied.
Parameters
----------
plot_together : bool, default ``False``
If ``False``, plots the bands separately. Otherwise, all bands are plotted together.
plot_type : str, default ``flux``
Type of value used for the data: either ``mag`` or ``flux``.
save : bool, default ``False``
If ``True``, saves the plot into a file.
fig_name : str, default ``None``
Name of the saved plot. If ``None`` is used the name of the file will be '{``self.name``}_lc_fits.png'.
Only works if ``save`` is set to ``True``.
"""
new_palette = [plt.get_cmap('Dark2')(i) for i in np.arange(8)] + [plt.get_cmap('Set1')(i) for i in np.arange(8)]
ZP = 27.5 # zeropoint for normalising the flux for visualization purposes
# shift in time for visualization purposes
tmax = str(self.tmax.astype(int))
zeros = '0'*len(tmax[2:])
t_off = int(tmax[:2] + zeros)
if plot_together:
# to set plot limits
if plot_type=='flux':
plot_lim_vals = np.array([change_zp(self.data[band]['flux'], self.data[band]['zp'], ZP)
for band in self.bands] + [0.0],
dtype="object")
ymin_lim = np.hstack(plot_lim_vals).min()*0.9
if ymin_lim < 0.0:
ymin_lim *= 1.1/0.9 # there might be some "negative" fluxes sometimes
ymax_lim = np.hstack(plot_lim_vals).max()*1.05
elif plot_type=='mag':
plot_lim_vals = [[np.nanmin(self.data[band]['mag']), np.nanmax(self.data[band]['mag'])]
for band in self.bands]
plot_lim_vals = np.ndarray.flatten(np.array(plot_lim_vals))
ymin_lim = np.nanmin(plot_lim_vals)*0.98
ymax_lim = np.nanmax(plot_lim_vals)*1.02
fig, ax = plt.subplots(figsize=(8, 6))
for i, band in enumerate(self.bands):
# GP fits
time = np.copy(self.lc_fits[band]['time'])
flux, flux_err = np.copy(self.lc_fits[band]['flux']), np.copy(self.lc_fits[band]['flux_err'])
mag, mag_err = np.copy(self.lc_fits[band]['mag']), np.copy(self.lc_fits[band]['mag_err'])
# Data
data_time = np.copy(self.data[band]['time'])
data_flux, data_flux_err = np.copy(self.data[band]['flux']), np.copy(self.data[band]['flux_err'])
data_mag, data_mag_err = np.copy(self.data[band]['mag']), np.copy(self.data[band]['mag_err'])
if plot_type=='flux':
y_norm = change_zp(1.0, self.data[band]['zp'], ZP)
flux, err = flux*y_norm, flux_err*y_norm
data_flux, data_flux_err = data_flux*y_norm, data_flux_err*y_norm
ax.errorbar(data_time-t_off, data_flux, data_flux_err, fmt='o', mec='k', capsize=3, capthick=2,
ms=8, elinewidth=3, color=new_palette[i],label=band)
ax.plot(time-t_off, flux,'-', color=new_palette[i], lw=2, zorder=16)
ax.fill_between(time-t_off, flux-flux_err, flux+flux_err, alpha=0.5, color=new_palette[i])
ax.set_ylabel(f'Flux (ZP = {ZP})', fontsize=16, family='serif')
elif plot_type=='mag':
ax.errorbar(data_time-t_off, data_mag, data_mag_err, fmt='o', mec='k', capsize=3, capthick=2, ms=8,
elinewidth=3, color=new_palette[i],label=band)
ax.plot(time-t_off, mag,'-', color=new_palette[i], lw=2, zorder=16)
ax.fill_between(time-t_off, mag-mag_err, mag+mag_err, alpha=0.5, color=new_palette[i])
ax.set_ylabel(r'Apparent Magnitude', fontsize=16, family='serif')
ax.axvline(x=self.tmax0-t_off, color='k', linestyle='--', alpha=0.4)
ax.axvline(x=self.tmax-t_off, color='k', linestyle='--')
ax.minorticks_on()
ax.tick_params(which='major', length=6, width=1, direction='in', top=True, right=True, labelsize=16)
ax.tick_params(which='minor', length=3, width=1, direction='in', top=True, right=True, labelsize=16)
ax.set_xlabel(f'Time - {t_off} [days]', fontsize=16, family='serif')
ax.set_title(f'{self.name}\nz = {self.z:.5}', fontsize=18, family='serif')
ax.legend(fontsize=13, loc='upper right')
ax.set_ylim(ymin_lim, ymax_lim)
if plot_type=='mag':
plt.gca().invert_yaxis()
# plot each band separately
else:
h = 3
v = math.ceil(len(self.bands) / h)
fig = plt.figure(figsize=(15, 5*v))
gs = gridspec.GridSpec(v , h)
for i, band in enumerate(self.bands):
j = math.ceil(i % h)
k =i // h
ax = plt.subplot(gs[k,j])
time = np.copy(self.lc_fits[band]['time'])
flux, flux_err = np.copy(self.lc_fits[band]['flux']), np.copy(self.lc_fits[band]['flux_err'])
mag, mag_err = np.copy(self.lc_fits[band]['mag']), np.copy(self.lc_fits[band]['mag_err'])
# Data
data_time = np.copy(self.data[band]['time'])
data_flux, data_flux_err = np.copy(self.data[band]['flux']), np.copy(self.data[band]['flux_err'])
data_mag, data_mag_err = np.copy(self.data[band]['mag']), np.copy(self.data[band]['mag_err'])
if plot_type=='flux':
y_norm = change_zp(1.0, self.data[band]['zp'], ZP)
flux, flux_err = flux*y_norm, flux_err*y_norm
data_flux, data_flux_err = data_flux*y_norm, data_flux_err*y_norm
ax.errorbar(data_time-t_off, data_flux, data_flux_err, fmt='o', color=new_palette[i],
capsize=3, capthick=2, ms=8, elinewidth=3, mec='k')
ax.plot(time-t_off, flux,'-', lw=2, zorder=16, color=new_palette[i])
ax.fill_between(time-t_off, flux-flux_err, flux+flux_err, alpha=0.5, color=new_palette[i])
elif plot_type=='mag':
ax.errorbar(data_time-t_off, data_mag, data_mag_err, fmt='o', color=new_palette[i],
capsize=3, capthick=2, ms=8, elinewidth=3, mec='k')
ax.plot(time-t_off, mag,'-', lw=2, zorder=16, color=new_palette[i])
ax.fill_between(time-t_off, mag-mag_err, mag+mag_err, alpha=0.5, color=new_palette[i])
ax.invert_yaxis()
ax.axvline(x=self.tmax0-t_off, color='k', linestyle='--', alpha=0.4)
ax.axvline(x=self.tmax-t_off, color='k', linestyle='--')
ax.set_title(f'{band}', fontsize=16, family='serif')
ax.xaxis.set_tick_params(labelsize=15)
ax.yaxis.set_tick_params(labelsize=15)
ax.minorticks_on()
ax.tick_params(which='major', length=6, width=1, direction='in', top=True, right=True)
ax.tick_params(which='minor', length=3, width=1, direction='in', top=True, right=True)
fig.text(0.5, 0.95, f'{self.name} (z = {self.z:.5})', ha='center', fontsize=20, family='serif')
fig.text(0.5, 0.04, f'Time - {t_off} [days]', ha='center', fontsize=18, family='serif')
if plot_type=='flux':
fig.text(0.04, 0.5, f'Flux (ZP = {ZP})', va='center', rotation='vertical', fontsize=18, family='serif')
elif plot_type=='mag':
fig.text(0.04, 0.5, r'Apparent Magnitude', va='center',
rotation='vertical', fontsize=18, family='serif')
if save:
if fig_name is None:
fig_name = f'{self.name}_lc_fits.png'
#fig.tight_layout()
plt.savefig(fig_name)
plt.show()
############################################################################
######################### Light Curves Correction ##########################
############################################################################
def mangle_sed(self, min_phase=-15, max_phase=30, method='gp', kernel='squaredexp', linear_extrap=True,
correct_extinction=True, scaling=0.86, reddening_law='fitzpatrick99', dustmaps_dir=None,
r_v=3.1, ebv=None):
"""Mangles the SED with the given method to match the SN magnitudes.
Parameters
----------
min_phase : int, default ``-15``
Minimum phase to mangle.
max_phase : int, default ``30``
Maximum phase to mangle.
method : str, defult ``gp``
Method to estimate the mangling function. Either ``gp`` or ``spline``.
kernel : str, default ``squaredexp``
Kernel to be used for the gaussian process fit of the mangling function. E.g, ``matern52``,
``matern32``, ``squaredexp``.
linear_extrap: bool, default ``True``
Type of extrapolation for the edges. Linear if ``True``, free (gaussian process extrapolation) if ``False``.
correct_extinction: bool, default ``True``
Whether or not to correct for Milky Way extinction.
scaling : float, default ``0.86``
Calibration of the Milky Way dust maps. Either ``0.86``
for the Schlafly & Finkbeiner (2011) recalibration or ``1.0`` for the original
dust map of Schlegel, Fikbeiner & Davis (1998).
reddening_law: str, default ``fitzpatrick99``
Reddening law. The options are: ``ccm89`` (Cardelli, Clayton & Mathis 1989), ``odonnell94`` (O’Donnell 1994),
``fitzpatrick99`` (Fitzpatrick 1999), ``calzetti00`` (Calzetti 2000) and ``fm07``
(Fitzpatrick & Massa 2007 with :math:`R_V` = 3.1.)
dustmaps_dir : str, default ``None``
Directory where the dust maps of Schlegel, Fikbeiner & Davis (1998) are found.
r_v : float, default ``3.1``
Total-to-selective extinction ratio (:math:`R_V`)
ebv : float, default ``None``
Colour excess (:math:`E(B-V)`). If given, this is used instead of the dust map value.
"""
phases = np.arange(min_phase, max_phase+1, 1)
# save user inputs for later (used when checking B-band peak estimation)
self.user_input['mangle_sed'] = {'min_phase':min_phase, 'max_phase':max_phase, 'method':method, 'kernel':kernel,
'linear_extrap':linear_extrap, 'correct_extinction':correct_extinction,
'scaling':scaling, 'reddening_law':reddening_law, 'dustmaps_dir':dustmaps_dir,
'r_v':r_v, 'ebv':ebv}
lc_phases = self.lc_fits[self.pivot_band]['phase']
####################################
##### Calculate SED photometry #####
####################################
sed_df = self.sed['data'].copy()
# to match the available epochs from the lcs
sed_df = sed_df[(lc_phases.min() <= sed_df.phase) & (sed_df.phase <= lc_phases.max())]
sed_df = sed_df[sed_df.phase.isin(phases)] # to match the requested epochs
# first redshift the SED ("move" it in z) and then apply extinction from MW only
sed_df.wave, sed_df.flux = sed_df.wave.values*(1+self.z), sed_df.flux.values/(1+self.z)
if correct_extinction:
sed_df.flux = redden(sed_df.wave.values, sed_df.flux.values, self.ra, self.dec,
scaling, reddening_law, dustmaps_dir, r_v, ebv)
if ebv is None:
self.mw_ebv = calculate_ebv(self.ra, self.dec, scaling, dustmaps_dir) # calculates MW reddening
else:
self.mw_ebv = ebv
bands2mangle = []
# check which bands are in the wavelength range of the SED template
for band in self.bands:
filter_wave = self.filters[band]['wave']
if (filter_wave.min() > sed_df.wave.values.min()) & (filter_wave.max() < sed_df.wave.values.max()):
bands2mangle.append(band)
self.sed_lcs = {band:{'flux':[], 'time':None, 'phase':None} for band in bands2mangle}
sed_phases = sed_df.phase.unique()
# calculate SED light curves
for phase in sed_phases:
phase_df = sed_df[sed_df.phase==phase]
for band in bands2mangle:
band_flux = integrate_filter(phase_df.wave.values, phase_df.flux.values, self.filters[band]['wave'],
self.filters[band]['transmission'], self.filters[band]['response_type'])
self.sed_lcs[band]['flux'].append(band_flux)
for band in bands2mangle:
self.sed_lcs[band]['flux'] = np.array(self.sed_lcs[band]['flux'])
self.sed_lcs[band]['phase'] = sed_phases
self.sed_lcs[band]['time'] = sed_phases*(1+self.z) + self.tmax
###################################
####### set-up for mangling #######
###################################
# find the fluxes at the exact SED phases
obs_flux_dict = {band:np.interp(sed_phases, self.lc_fits[band]['phase'], self.lc_fits[band]['flux'],
left=0.0, right=0.0) for band in bands2mangle}
obs_err_dict = {band:np.interp(sed_phases, self.lc_fits[band]['phase'], self.lc_fits[band]['flux_err'],
left=0.0, right=0.0) for band in bands2mangle}
flux_ratios_dict = {band:obs_flux_dict[band]/self.sed_lcs[band]['flux'] for band in bands2mangle}
flux_ratios_err_dict = {band:obs_err_dict[band]/self.sed_lcs[band]['flux'] for band in bands2mangle}
wave_array = np.array([self.filters[band]['eff_wave'] for band in bands2mangle])
bands_waves = np.hstack([self.filters[band]['wave'] for band in bands2mangle])
# includes the edges of the reddest and bluest bands
x_edges = np.array([bands_waves.min(), bands_waves.max()])
################################
########## mangle SED ##########
################################
self.mangled_sed = pd.DataFrame(columns=['phase', 'wave', 'flux', 'flux_err'])
for i, phase in enumerate(sed_phases):
obs_fluxes = np.array([obs_flux_dict[band][i] for band in bands2mangle])
obs_errs = np.array([obs_err_dict[band][i] for band in bands2mangle])
flux_ratios_array = np.array([flux_ratios_dict[band][i] for band in bands2mangle])
flux_ratios_err_array = np.array([flux_ratios_err_dict[band][i] for band in bands2mangle])
phase_df = sed_df[sed_df.phase==phase]
sed_epoch_wave, sed_epoch_flux = phase_df.wave.values, phase_df.flux.values
# mangling routine including optimisation
mangling_results = mangle(wave_array, flux_ratios_array, flux_ratios_err_array, sed_epoch_wave,
sed_epoch_flux, obs_fluxes, obs_errs, bands2mangle, self.filters, method,
kernel, x_edges, linear_extrap)
# precision of the mangling function
mag_diffs = {band:-2.5*np.log10(mangling_results['flux_ratios'][i]) if mangling_results['flux_ratios'][i]>0
else np.nan for i, band in enumerate(bands2mangle)}
self.mangling_results.update({phase:mangling_results})
self.mangling_results[phase].update({'mag_diff':mag_diffs})
# save the SED phase info into a DataFrame
mangled_sed = mangling_results['mangled_sed']
mangled_wave = mangled_sed['wave']
mangled_flux, mangled_flux_err = mangled_sed['flux'], mangled_sed['flux_err']
phase_info = np.array([[phase]*len(mangled_wave), mangled_wave, mangled_flux, mangled_flux_err]).T
phase_df = pd.DataFrame(data=phase_info, columns=['phase', 'wave', 'flux', 'flux_err'])
self.mangled_sed = pd.concat([self.mangled_sed, phase_df]) # updated mangled SED for a single epoch
# correct mangled SED for MW extinction first and then de-redshift it ("move" it back in z)
self.corrected_sed = self.mangled_sed.astype('float64').copy()
if correct_extinction:
self.corrected_sed.flux = deredden(self.corrected_sed.wave.values, self.corrected_sed.flux.values,
self.ra, self.dec, scaling,
reddening_law, dustmaps_dir,
r_v, ebv)
self.corrected_sed.wave = self.corrected_sed.wave.values/(1+self.z)
self.corrected_sed.flux = self.corrected_sed.flux.values*(1+self.z)
def plot_mangling_function(self, phase=0, mangling_function_only=False, verbose=True, save=False, fig_name=None):
"""Plot the mangling function for a given phase.
Parameters
----------
phase : int, default ``0``
Phase to plot the mangling function. By default it plots the mangling function at B-band peak.
mangling_function_only : bool, default ``False``
If ``True``, only plots the mangling function, otherwise, plots the SEDs and filters as well
(with scaled values).
verbose : bool, default ``True``
If ``True``, returns the difference between the magnitudes from the fits and the magnitudes from the
mangled SED, for each of the bands.
save : bool, default ``False``
If true, saves the plot into a file.
fig_name : str, default ``None``
Name of the saved plot. If ``None`` is used the name of the file will be
'{``self.name``}_mangling_phase{``phase``}.png'. Only works if ``save`` is set to ``True``.
"""
assert (phase in self.mangling_results.keys()), f'A mangling function was not calculated for phase {phase}.'
man = self.mangling_results[phase]
eff_waves = np.copy(man['init_flux_ratios']['waves'])
init_flux_ratios = np.copy(man['init_flux_ratios']['flux_ratios'])
opt_flux_ratios = np.copy(man['opt_flux_ratios']['flux_ratios'])
obs_fluxes = np.copy(man['obs_band_fluxes']['fluxes'])
sed_fluxes = np.copy(man['sed_band_fluxes']['fluxes'])
x = np.copy(man['mangling_function']['waves'])
y, yerr = np.copy(man['mangling_function']['flux_ratios']), np.copy(man['mangling_function']['flux_ratios_err'])
mang_sed_wave, mang_sed_flux = man['mangled_sed']['wave'], man['mangled_sed']['flux']
init_sed_wave, init_sed_flux = man['init_sed']['wave'], man['init_sed']['flux']
kernel = man['kernel']
bands = list(man['mag_diff'].keys())
if mangling_function_only:
fig, ax = plt.subplots(figsize=(8,6))
ax2 = ax.twiny()
exp = np.round(np.log10(init_flux_ratios.max()), 0)
y_norm = 10**exp
init_flux_ratios = init_flux_ratios/y_norm
y, yerr = y/y_norm, yerr/y_norm
opt_flux_ratios = opt_flux_ratios/y_norm
ax.scatter(eff_waves, init_flux_ratios, marker='o', label='Initial values')
ax.plot(x, y)
ax.fill_between(x, y-yerr, y+yerr, alpha=0.5, color='orange')
ax.scatter(eff_waves, opt_flux_ratios, marker='*', color='red', label='Optimized values')
ax.set_xlabel(r'Observer-frame Wavelength [$\AA$]', fontsize=16, family='serif')
ax.set_ylabel(r'(Flux$_{\rm Obs}$ / Flux$_{\rm Temp}) \times$ 10$^{%.0f}$'%exp, fontsize=16, family='serif')
ax.minorticks_on()
ax.tick_params(which='both', length=8, width=1, direction='in', right=True, labelsize=16)
ax.tick_params(which='minor', length=4)
ax.set_ylim(y.min()*0.95, y.max()*1.03)
ax2.set_xticks(ax.get_xticks())
ax2.set_xlim(ax.get_xlim())
ax2.set_xticklabels((ax.get_xticks()/(1+self.z)).astype(int))
ax2.minorticks_on()
ax2.set_xlabel(r'Rest-frame Wavelength [$\AA$]', fontsize=16, family='serif')
ax2.tick_params(which='both', length=8, width=1, direction='in', labelsize=16)
ax2.tick_params(which='minor', length=4)
for i, band in enumerate(bands):
x1, y1 = ax.transLimits.transform((eff_waves[i], init_flux_ratios[i]))
ax.text(x1, y1+(-1)**i*0.12, band, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=14)
ax.legend(loc='upper right', fontsize=12)
else:
fig, ax = plt.subplots(figsize=(8,6))
ax2 = ax.twiny()
ax3 = ax.twinx()
norm = 2 # for bands
norm2 = 1 # for SEDs
index = (len(bands)-1)//2 # index of the band to do relative comparison
init_norm = np.sum(init_sed_wave*init_sed_flux)/np.sum(init_sed_wave)
init_sed_flux2 = init_sed_flux/init_norm
sed_fluxes2 =sed_fluxes/init_norm
obs_norm = np.sum(mang_sed_wave*mang_sed_flux)/np.sum(mang_sed_wave)
mang_sed_flux2 = mang_sed_flux/obs_norm
obs_fluxes2 = obs_fluxes/obs_norm
bands_norm = init_sed_flux2.max()
# filters
for i, band in enumerate(bands):
wave, trans = self.filters[band]['wave'], self.filters[band]['transmission']
ax3.plot(wave, trans/trans.max()*bands_norm, color='k', alpha=0.4)
# mangling function
ax.plot(x, y/(obs_norm/init_norm), 'green')
ax.fill_between(x, (y-yerr)/(obs_norm/init_norm), (y+yerr)/(obs_norm/init_norm), alpha=0.2, color='green')
indexes = [np.argmin(np.abs(x-wave_val)) for wave_val in eff_waves]
ax.plot(eff_waves, y[indexes]/(obs_norm/init_norm), 'sg', ms=8, mec='k')
# initial sed and fluxes
ax3.plot(init_sed_wave, init_sed_flux2, '--k', lw=3) # initial sed
ax3.plot(eff_waves, sed_fluxes2, 'ok', ms=14, label='Initial SED values',
alpha=0.8, fillstyle='none', markeredgewidth=2) # initial sed fluxes
# optimized sed and fluxes
ax3.plot(mang_sed_wave, mang_sed_flux2, 'red', lw=3) # mangled sed
ax3.plot(eff_waves, obs_fluxes2,'*r', ms=14, mec='k', label='Mangled SED values') # optimized fluxes
ax.set_xlabel(r'Observer-frame Wavelength [$\AA$]', fontsize=16, family='serif')
ax.set_ylabel(r'Scaled Mangling Function', fontsize=16, family='serif', color='g')
ax.set_xlim(x.min(), x.max())
ax.set_ylim((y/(obs_norm/init_norm)).min()*0.8, (y/(obs_norm/init_norm)).max()*1.2)
ax.tick_params(which='both', length=8, width=1, direction='in', labelsize=16)
ax.tick_params(which='minor', length=4)
ax.tick_params(axis='y', which='both', colors='g')
ax.spines['left'].set_color('g')
ax2.set_xticks(ax.get_xticks())
ax2.set_xlim(ax.get_xlim())
ax2.set_xticklabels((ax.get_xticks()/(1+self.z)).astype(int))
ax2.set_xlabel(r'Rest-frame Wavelength [$\AA$]', fontsize=16, family='serif')
ax2.minorticks_on()
ax2.tick_params(which='both', length=8, width=1, direction='in', labelsize=16)
ax2.tick_params(which='minor', length=4)
ax3.set_ylim(0, None)
ax3.set_ylabel(r'Scaled Flux', fontsize=16, family='serif', rotation=270, labelpad=20)
ax3.minorticks_on()
ax3.tick_params(which='both', length=8, width=1, direction='in', labelsize=16)
ax3.tick_params(which='minor', length=4)
ax3.legend(loc='upper right', fontsize=12)
if save:
if fig_name is None:
fig_name = f'{self.name}_mangling_phase{phase}.png'
#fig.tight_layout()
plt.savefig(fig_name)
plt.show()
if verbose:
print(f'Mangling results - difference between mangled SED and "observed" magnitudes at phase {phase}:')
for band, diff in man['mag_diff'].items():
print(f'{band}: {np.round(diff, 4):.4f} [mags]')
def _calculate_corrected_lcs(self):
"""Calculates the SN light curves applying extinction and k-corrections.
**Note:** this function is used inside :func:`calculate_lc_params()`
"""
corrected_lcs = {}
phases = self.corrected_sed.phase.unique()
for band in self.filters.keys():
band_flux, band_flux_err, band_phase = [], [], []
for phase in phases:
phase_df = self.corrected_sed[self.corrected_sed.phase==phase]
phase_wave = phase_df.wave.values
phase_flux = phase_df.flux.values
phase_flux_err = phase_df.flux_err.values
filter_data = self.filters[band]
try:
band_flux.append(integrate_filter(phase_wave, phase_flux, filter_data['wave'],
filter_data['transmission'], filter_data['response_type']))
band_flux_err.append(integrate_filter(phase_wave, phase_flux_err, filter_data['wave'],
filter_data['transmission'], filter_data['response_type']))
band_phase.append(phase)
except:
pass
if 'Bessell_' in band:
zp = calc_zp(self.filters[band]['wave'], self.filters[band]['transmission'],
self.filters[band]['response_type'], 'BD17', band)
else:
zp = self.data[band]['zp']
if len(band_flux)>0:
band_flux, band_flux_err, band_phase = np.array(band_flux), np.array(band_flux_err), np.array(band_phase)
band_mag, band_mag_err = flux2mag(band_flux, zp, band_flux_err)
corrected_lcs[band] = {'phase':band_phase, 'flux':band_flux, 'flux_err':band_flux_err,
'mag':band_mag, 'mag_err':band_mag_err, 'zp':zp}
self.corrected_lcs = corrected_lcs
# simple, independent 1D fit to the corrected light curves
corrected_lcs_fit = {}
for band in corrected_lcs.keys():
phase, zp = corrected_lcs[band]['phase'], corrected_lcs[band]['zp']
flux, flux_err = corrected_lcs[band]['flux'], corrected_lcs[band]['flux_err']
phase_fit, flux_fit, _ = gp_lc_fit(phase, flux, flux*1e-3)
flux_err_fit = np.interp(phase_fit, phase, flux_err, left=0.0, right=0.0) # linear extrapolation of errors
mag_fit, mag_err_fit = flux2mag(flux_fit, zp, flux_err_fit)
corrected_lcs_fit[band] = {'phase':phase_fit, 'flux':flux_fit, 'flux_err':flux_err_fit,
'mag':mag_fit, 'mag_err':mag_err_fit, 'zp':zp}
self.corrected_lcs_fit = corrected_lcs_fit
def calculate_lc_params(self, maxiter=5):
"""Calculates the light-curves parameters.
Estimation of B-band peak apparent magnitude (m :math:`_B^{max}`), stretch (:math:`\Delta` m :math:`_{15}(B)`)
and colour (:math:`(B-V)^{max}`) parameters. An interpolation of the corrected light curves is done as well as
part of this process.
Parameters
----------
maxiter : int, default ``5``
Maximum number of iteration of the correction process to estimate an accurate B-band peak.
"""
self._calculate_corrected_lcs()
########################################
########### Check B-band max ###########
########################################
bmax_needs_check = True
iter = 0
assert 'Bessell_B' in self.corrected_lcs_fit.keys(), 'The rest-frame B-band light curve was not calculated after\
corrections. Not enough wavelength coverage.'
while bmax_needs_check:
# estimate offset between inital B-band peak and "final" peak
try:
b_data = self.corrected_lcs['Bessell_B']
b_phase, b_flux, b_err = b_data['phase'], b_data['flux'], b_data['flux_err']
b_phase, b_flux, b_err = gp_lc_fit(b_phase, b_flux, b_err) # smoother estimation of the peak
peak_id = peak.indexes(b_flux, thres=.3, min_dist=len(b_phase)//2)[0]
phase_offset = b_phase[peak_id] - 0.0
self._phase_offset = np.round(phase_offset, 2)
except:
phase_offset = None
assert phase_offset is not None, "The time of rest-frame B-band peak luminosity can not be calculated. \
Not enough time coverage."
# error propagation
try:
b_data = self.corrected_lcs_fit['Bessell_B']
b_phase, b_flux, b_err = b_data['phase'], b_data['flux'], b_data['flux_err']
simulated_lcs = np.asarray([np.random.normal(flux, err, 1000) for flux, err in zip(b_flux, b_err)])
pmax_list = []
# loop to estimate uncertainty in tmax
for lc_flux in simulated_lcs.T:
# the LC needs to be smoothed as the "simulations" are "noisy"
lc_flux = savgol_filter(lc_flux, 91, 3)
idx_max = peak.indexes(lc_flux, thres=.3, min_dist=len(b_phase)//2)[0]
pmax_list.append(b_phase[idx_max])
pmax_array = np.array(pmax_list)
self.tmax_err = pmax_array.std().round(2)
except:
self.tmax_err = np.nan
if iter>=maxiter:
break
iter += 1
# compare tmax from the corrected restframe B-band to the initial estimation
if np.abs(phase_offset) >= 0.2:
if np.abs(self.tmax0-self.tmax)/(1+self.z) >= 0.5:
self.tmax = np.copy(self.tmax0) # back to initial estimation - going too far
phase_offset = random.uniform(-0.2, 0.2)
# update phase of the light curves
self.tmax = np.round(self.tmax - phase_offset*(1+self.z), 2)
self.lc_fits['phaseXwave'].T[0] -= phase_offset
for band in self.bands:
self.lc_fits[band]['phase'] -= phase_offset
# re-do mangling
self.mangle_sed(**self.user_input['mangle_sed'])
self._calculate_corrected_lcs()
else:
bmax_needs_check = False
########################################
### Calculate Light Curve Parameters ###
########################################
bessell_b = 'Bessell_B'
# B-band peak apparent magnitude
b_phase = self.corrected_lcs[bessell_b]['phase']
b_mag, b_mag_err = self.corrected_lcs[bessell_b]['mag'], self.corrected_lcs[bessell_b]['mag_err']
id_bmax = list(b_phase).index(0)
mb, mb_err = b_mag[id_bmax], b_mag_err[id_bmax]
# Stretch parameter
if 15 in b_phase:
id_15 = list(b_phase).index(15)
B15, B15_err = b_mag[id_15], b_mag_err[id_15]
dm15 = B15 - mb
dm15_err = np.sqrt(np.abs(mb_err**2 + B15_err**2))
else:
dm15 = dm15_err = np.nan
# Colour
colour = colour_err = np.nan
if 'Bessell_V' in self.corrected_lcs.keys():
bessell_v = 'Bessell_V'
if 0 in self.corrected_lcs[bessell_v]['phase']:
v_phase = self.corrected_lcs[bessell_v]['phase']
v_mag, v_mag_err = self.corrected_lcs[bessell_v]['mag'], self.corrected_lcs[bessell_v]['mag_err']
id_v0 = list(v_phase).index(0)
V0, V0_err = v_mag[id_v0], v_mag_err[id_v0]
colour = mb - V0
colour_err = np.sqrt(np.abs(mb_err**2 + V0_err**2))
self.lc_parameters = {'mb':mb, 'mb_err':mb_err, 'dm15':dm15,
'dm15_err':dm15_err, 'colour':colour, 'colour_err':colour_err}
def display_results(self, band='Bessell_B', plot_type='mag', display_params=False, save=False, fig_name=None):
"""Displays the rest-frame light curve for the given band.
Plots the rest-frame band light curve together with a gaussian fit to it. The parameters estimated with
:func:`calculate_lc_params()` are shown as well.
Parameters
----------
band : str, default ``Bessell_B``
Name of the band to be plotted.
plot_type : str, default ``mag``
Type of value used for the data: either ``mag`` or ``flux``.
display_params : bool, default ``False``
If ``True``, the light-curves parameters are displayed in the plot.
save : bool, default ``False``
If ``True``, saves the plot into a file.
fig_name : str, default ``None``
Name of the saved plot. If ``None`` is used the name of the file will be
'{``self.name``}_restframe_{``band``}.png'. Only works if ``save`` is set to ``True``.
"""
assert (plot_type=='mag' or plot_type=='flux'), f'"{plot_type}" is not a valid plot type.'
mb = self.lc_parameters['mb']
mb_err = self.lc_parameters['mb_err']
dm15 = self.lc_parameters['dm15']
dm15_err = self.lc_parameters['dm15_err']
colour = self.lc_parameters['colour']
colour_err = self.lc_parameters['colour_err']
if band is None:
band = 'Bessell_B'
x = np.copy(self.corrected_lcs[band]['phase'])
y = np.copy(self.corrected_lcs[band][plot_type])
yerr = np.copy(self.corrected_lcs[band][plot_type+'_err'])
zp = self.corrected_lcs[band]['zp']
x_fit = np.copy(self.corrected_lcs_fit[band]['phase'])
y_fit = np.copy(self.corrected_lcs_fit[band][plot_type])
yerr_fit = np.copy(self.corrected_lcs_fit[band][plot_type+'_err'])
if plot_type=='flux':
ZP = 27.5
y_norm = change_zp(1.0, zp, ZP)
y *= y_norm
yerr *= y_norm
y_fit *= y_norm
yerr_fit *= y_norm
fig, ax = plt.subplots(figsize=(8,6))
ax.errorbar(x, y, yerr, fmt='-.o', color='k', ecolor='k', mec='k', capsize=3, capthick=2, ms=8,
elinewidth=3, zorder=16)
ax.plot(x_fit, y_fit, 'c-', alpha=0.7)
ax.fill_between(x_fit, y_fit+yerr_fit, y_fit-yerr_fit, alpha=0.5, color='c')
if display_params:
ax.text(0.75, 0.9,r'm$_B^{\rm max}$=%.3f$\pm$%.3f'%(mb, mb_err), ha='center', va='center',
fontsize=15, transform=ax.transAxes)
if not np.isnan(dm15):
ax.text(0.75, 0.8,r'$\Delta$m$_{15}$($B$)=%.3f$\pm$%.3f'%(dm15, dm15_err), ha='center', va='center',
fontsize=15, transform=ax.transAxes)
if not np.isnan(colour):
position = 0.7
if np.isnan(dm15):
position = 0.8
ax.text(0.75, position,r'($B-V$)$_{\rm max}$=%.3f$\pm$%.3f'%(colour, colour_err), ha='center',
va='center', fontsize=15, transform=ax.transAxes)
ax.set_xlabel(f'Phase with respect to B-band peak [days]', fontsize=16, family='serif')
tmax_str = r't$_{\rm max}$'
ax.set_title(f'{self.name}\n{band}, z={self.z:.5}, {tmax_str}={self.tmax:.2f}', fontsize=16, family='serif')
if plot_type=='flux':
ax.set_ylabel(f'Flux (ZP = {ZP})', fontsize=16, family='serif')
ax.set_ylim(y.min()*0.90, y.max()*1.05)
elif plot_type=='mag':
ax.set_ylabel('Apparent Magnitude', fontsize=16, family='serif')
ax.set_ylim(np.nanmin(y)*0.98, np.nanmax(y)*1.02)
plt.gca().invert_yaxis()
ax.minorticks_on()
ax.tick_params(which='major', length=8, width=1, direction='in', top=True, right=True, labelsize=16)
ax.tick_params(which='minor', length=4, width=1, direction='in', top=True, right=True, labelsize=16)
if save:
if fig_name is None:
fig_name = f'{self.name}_restframe_{band}.png'
#fig.tight_layout()
plt.savefig(fig_name)
plt.show()
def do_magic(self):
"""Applies the whole correction process with default settings to obtain restframe light curves and
light-curve parameters.
**Note:** this is meant to be used for "quick" fits.
"""
self.normalize_data()
self.fit_lcs()
self.mangle_sed()
self.calculate_lc_params()
def export_fits(self, output_file=None):
"""Exports the light-curve fits into an output file.
Parameters
----------
output_file : str, default ``None``
Name of the output file.
"""
if output_file is None:
output_file = f'{self.name}_fits.dat'
df_list = []
columns = ['time', 'phase', 'flux', 'flux_err',
'mag', 'mag_err', 'zp', 'band']
for band in self.bands:
band_info = self.lc_fits[band]
zp = self.data[band]['zp']
band_info['zp'] = zp
# dictionary for rounding numbers for pretty output
rounding_dict = {key:3 if 'flux' not in key else 99 for
key in band_info.keys() }
band_info['band'] = band
# dataframe
band_df = pd.DataFrame(band_info)
band_df = band_df.round(rounding_dict)
df_list.append(band_df[columns])
# concatenate the dataframes for all the bands for exporting
df_fits = pd.concat(df_list)
df_fits.to_csv(output_file, sep='\t', index=False)
def export_restframe_lcs(self, output_file=None):
"""Exports the corrected, rest-frame light-curves into an output file.
Parameters
----------
output_file : str, default ``None``
Name of the output file.
"""
if output_file is None:
output_file = f'{self.name}_restframe_lcs.dat'
df_list = []
columns = ['phase', 'flux', 'flux_err',
'mag', 'mag_err', 'zp', 'band']
for band in self.bands:
band_info = self.corrected_lcs[band]
# dictionary for rounding numbers for pretty output
rounding_dict = {key:3 if 'flux' not in key else 99 for
key in band_info.keys() }
band_info['band'] = band
# dataframe
band_df = pd.DataFrame(band_info)
band_df = band_df.round(rounding_dict)
df_list.append(band_df[columns])
# concatenate the dataframes for all the bands for exporting
df_fits = pd.concat(df_list)
df_fits.to_csv(output_file, sep='\t', index=False)
|
|
# Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
import pytest
import numpy as np
from ..common_utils import (
create_iris_data, create_lightgbm_classifier
)
from responsibleai import ModelAnalysis
class TestCounterfactualAdvancedFeatures(object):
@pytest.mark.parametrize('vary_all_features', [True, False])
@pytest.mark.parametrize('feature_importance', [True, False])
def test_counterfactual_vary_features(
self, vary_all_features, feature_importance):
X_train, X_test, y_train, y_test, feature_names, _ = \
create_iris_data()
model = create_lightgbm_classifier(X_train, y_train)
X_train['target'] = y_train
X_test['target'] = y_test
model_analysis = ModelAnalysis(
model=model,
train=X_train,
test=X_test.iloc[0:10],
target_column='target',
task_type='classification')
if vary_all_features:
features_to_vary = 'all'
else:
features_to_vary = [feature_names[0]]
model_analysis.counterfactual.add(
total_CFs=10, desired_class=2,
features_to_vary=features_to_vary,
feature_importance=feature_importance)
model_analysis.counterfactual.compute()
cf_obj = model_analysis.counterfactual.get()[0]
for feature_name in feature_names:
if not vary_all_features and feature_name != feature_names[0]:
expected_array = np.repeat(
[X_test.iloc[0:1][feature_name][0]],
cf_obj.cf_examples_list[0].final_cfs_df.shape[0])
assert np.all(
np.isclose(
cf_obj.cf_examples_list[0].final_cfs_df[feature_name],
expected_array
)
)
else:
expected_array = np.repeat(
[X_test.iloc[0:1][feature_name][0]],
cf_obj.cf_examples_list[0].final_cfs_df.shape[0])
assert not np.all(
np.isclose(
cf_obj.cf_examples_list[0].final_cfs_df[feature_name],
expected_array
)
)
@pytest.mark.parametrize('feature_importance', [True, False])
def test_counterfactual_permitted_range(self, feature_importance):
X_train, X_test, y_train, y_test, feature_names, _ = \
create_iris_data()
model = create_lightgbm_classifier(X_train, y_train)
X_train['target'] = y_train
X_test['target'] = y_test
model_analysis = ModelAnalysis(
model=model,
train=X_train,
test=X_test.iloc[0:10],
target_column='target',
task_type='classification')
model_analysis.counterfactual.add(
total_CFs=10, desired_class=2,
features_to_vary=[feature_names[0]],
permitted_range={feature_names[0]: [2.0, 5.0]},
feature_importance=feature_importance)
model_analysis.counterfactual.compute()
# TODO: The logic below needs to be made robust for gated tests
cf_obj = model_analysis.counterfactual.get()[0]
for feature_name in feature_names:
if feature_name != feature_names[0]:
expected_array = np.repeat(
[X_test.iloc[0:1][feature_name][0]],
cf_obj.cf_examples_list[0].final_cfs_df.shape[0])
assert np.all(
np.isclose(
cf_obj.cf_examples_list[0].final_cfs_df[feature_name],
expected_array
)
)
else:
expected_array = np.repeat(
[X_test.iloc[0:1][feature_name][0]],
cf_obj.cf_examples_list[0].final_cfs_df.shape[0])
assert not np.all(
np.isclose(
cf_obj.cf_examples_list[0].final_cfs_df[feature_name],
expected_array
)
)
# assert np.any(
# cf_obj.cf_examples_list[0].final_cfs_df[feature_name] >=
# 2.0)
# assert np.any(
# cf_obj.cf_examples_list[0].final_cfs_df[feature_name] <=
# 5.0)
|
|
"""
:author: Damian Eads, 2009
:license: modified BSD
"""
import numpy as np
def square(width, dtype=np.uint8):
"""
Generates a flat, square-shaped structuring element. Every pixel
along the perimeter has a chessboard distance no greater than radius
(radius=floor(width/2)) pixels.
Parameters
----------
width : int
The width and height of the square
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
A structuring element consisting only of ones, i.e. every
pixel belongs to the neighborhood.
"""
return np.ones((width, width), dtype=dtype)
def rectangle(width, height, dtype=np.uint8):
"""
Generates a flat, rectangular-shaped structuring element of a
given width and height. Every pixel in the rectangle belongs
to the neighboorhood.
Parameters
----------
width : int
The width of the rectangle
height : int
The height of the rectangle
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
A structuring element consisting only of ones, i.e. every
pixel belongs to the neighborhood.
"""
return np.ones((width, height), dtype=dtype)
def diamond(radius, dtype=np.uint8):
"""
Generates a flat, diamond-shaped structuring element of a given
radius. A pixel is part of the neighborhood (i.e. labeled 1) if
the city block/manhattan distance between it and the center of the
neighborhood is no greater than radius.
Parameters
----------
radius : int
The radius of the diamond-shaped structuring element.
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
The structuring element where elements of the neighborhood
are 1 and 0 otherwise.
"""
half = radius
(I, J) = np.meshgrid(range(0, radius * 2 + 1), range(0, radius * 2 + 1))
s = np.abs(I - half) + np.abs(J - half)
return np.array(s <= radius, dtype=dtype)
def disk(radius, dtype=np.uint8):
"""
Generates a flat, disk-shaped structuring element of a given radius.
A pixel is within the neighborhood if the euclidean distance between
it and the origin is no greater than radius.
Parameters
----------
radius : int
The radius of the disk-shaped structuring element.
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
The structuring element where elements of the neighborhood
are 1 and 0 otherwise.
"""
L = np.linspace(-radius, radius, 2 * radius + 1)
(X, Y) = np.meshgrid(L, L)
s = X**2
s += Y**2
return np.array(s <= radius * radius, dtype=dtype)
def cube(width, dtype=np.uint8):
"""
Generates a cube-shaped structuring element (the 3D equivalent of
a square). Every pixel along the perimeter has a chessboard distance
no greater than radius (radius=floor(width/2)) pixels.
Parameters
----------
width : int
The width, height and depth of the cube
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
A structuring element consisting only of ones, i.e. every
pixel belongs to the neighborhood.
"""
return np.ones((width, width, width), dtype=dtype)
def octahedron(radius, dtype=np.uint8):
"""
Generates a octahedron-shaped structuring element of a given radius
(the 3D equivalent of a diamond). A pixel is part of the
neighborhood (i.e. labeled 1) if the city block/manhattan distance
between it and the center of the neighborhood is no greater than
radius.
Parameters
----------
radius : int
The radius of the octahedron-shaped structuring element.
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
The structuring element where elements of the neighborhood
are 1 and 0 otherwise.
"""
# note that in contrast to diamond(), this method allows non-integer radii
n = 2 * radius + 1
Z, Y, X = np.mgrid[-radius:radius:n*1j,
-radius:radius:n*1j,
-radius:radius:n*1j]
s = np.abs(X) + np.abs(Y) + np.abs(Z)
return np.array(s <= radius, dtype=dtype)
def ball(radius, dtype=np.uint8):
"""
Generates a ball-shaped structuring element of a given radius (the
3D equivalent of a disk). A pixel is within the neighborhood if the
euclidean distance between it and the origin is no greater than
radius.
Parameters
----------
radius : int
The radius of the ball-shaped structuring element.
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
The structuring element where elements of the neighborhood
are 1 and 0 otherwise.
"""
n = 2 * radius + 1
Z, Y, X = np.mgrid[-radius:radius:n*1j,
-radius:radius:n*1j,
-radius:radius:n*1j]
s = X**2 + Y**2 + Z**2
return np.array(s <= radius * radius, dtype=dtype)
def octagon(m, n, dtype=np.uint8):
"""
Generates an octagon shaped structuring element with a given size of
horizontal and vertical sides and a given height or width of slanted
sides. The slanted sides are 45 or 135 degrees to the horizontal axis
and hence the widths and heights are equal.
Parameters
----------
m : int
The size of the horizontal and vertical sides.
n : int
The height or width of the slanted sides.
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
The structuring element where elements of the neighborhood
are 1 and 0 otherwise.
"""
from . import convex_hull_image
selem = np.zeros((m + 2*n, m + 2*n))
selem[0, n] = 1
selem[n, 0] = 1
selem[0, m + n - 1] = 1
selem[m + n - 1, 0] = 1
selem[-1, n] = 1
selem[n, -1] = 1
selem[-1, m + n - 1] = 1
selem[m + n - 1, -1] = 1
selem = convex_hull_image(selem).astype(dtype)
return selem
def star(a, dtype=np.uint8):
"""
Generates a star shaped structuring element that has 8 vertices and is an
overlap of square of size `2*a + 1` with its 45 degree rotated version.
The slanted sides are 45 or 135 degrees to the horizontal axis.
Parameters
----------
a : int
Parameter deciding the size of the star structural element. The side
of the square array returned is `2*a + 1 + 2*floor(a / 2)`.
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
The structuring element where elements of the neighborhood
are 1 and 0 otherwise.
"""
from . import convex_hull_image
if a == 1:
bfilter = np.zeros((3, 3), dtype)
bfilter[:] = 1
return bfilter
m = 2 * a + 1
n = a // 2
selem_square = np.zeros((m + 2 * n, m + 2 * n))
selem_square[n: m + n, n: m + n] = 1
c = (m + 2 * n - 1) // 2
selem_rotated = np.zeros((m + 2 * n, m + 2 * n))
selem_rotated[0, c] = selem_rotated[-1, c] = selem_rotated[c, 0] = selem_rotated[c, -1] = 1
selem_rotated = convex_hull_image(selem_rotated).astype(int)
selem = selem_square + selem_rotated
selem[selem > 0] = 1
return selem.astype(dtype)
|
|
#!/usr/bin/env python3
# Tensorflow
import tensorflow as tf
import warnings
warnings.filterwarnings("ignore")
import re
# import nltk
# import tqdm as tqdm
# import sqlite3
import pandas as pd
import numpy as np
from pandas import DataFrame
import string
#from nltk.corpus import stopwords
#stop = stopwords.words("english")
# from nltk.stem.porter import PorterStemmer
# english_stemmer=nltk.stem.SnowballStemmer('english')
# from nltk.tokenize import word_tokenize
from sklearn.metrics import accuracy_score, confusion_matrix,roc_curve, auc,classification_report, mean_squared_error, mean_absolute_error
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.svm import LinearSVC
from sklearn.neighbors import NearestNeighbors
from sklearn.linear_model import LogisticRegression
from sklearn import neighbors
from scipy.spatial.distance import cosine
from sklearn.feature_selection import SelectKBest
# from IPython.display import SVG
import pickle
import time
import gzip
import os
os.getcwd()
#Keras
# from tensorflow.python.keras import Sequential
# from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
# from tensorflow.python.training.rmsprop import RMSPropOptimizer
from tensorflow.python.keras import Sequential, Model
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D, Embedding
from tensorflow.python.keras.layers import GRU, Bidirectional, BatchNormalization, Reshape
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.layers.core import Reshape, Dropout, Dense
from tensorflow.python.keras.layers.merge import Multiply, Dot, Concatenate
from tensorflow.python.keras.layers.embeddings import Embedding
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.utils.vis_utils import model_to_dot
#Pandas
import pandas as pd
def train(review_data):
################################################################
# declare input embeddings to the model
#User input
user_id_input = Input(shape=[1], name='user')
#Item Input
item_id_input = Input(shape=[1], name='item')
price_id_input = Input(shape=[1], name='price')
title_id_input = Input(shape=[1], name='title')
# define the size of embeddings as a parameter
# ****H: size_of_embedding - 5, 10 , 15, 20, 50
size_of_embedding = 15
user_embedding_size = size_of_embedding
item_embedding_size = size_of_embedding
price_embedding_size = size_of_embedding
title_embedding_size = size_of_embedding
# apply an embedding layer to all inputs
user_embedding = Embedding(output_dim=user_embedding_size, input_dim=users.shape[0],
input_length=1, name='user_embedding')(user_id_input)
item_embedding = Embedding(output_dim=item_embedding_size, input_dim=items_reviewed.shape[0],
input_length=1, name='item_embedding')(item_id_input)
price_embedding = Embedding(output_dim=price_embedding_size, input_dim=price.shape[0],
input_length=1, name='price_embedding')(price_id_input)
title_embedding = Embedding(output_dim=title_embedding_size, input_dim=titles.shape[0],
input_length=1, name='title_embedding')(title_id_input)
# reshape from shape (batch_size, input_length,embedding_size) to (batch_size, embedding_size).
user_vecs = Reshape([user_embedding_size])(user_embedding)
item_vecs = Reshape([item_embedding_size])(item_embedding)
price_vecs = Reshape([price_embedding_size])(price_embedding)
title_vecs = Reshape([title_embedding_size])(title_embedding)
################################################################
# Concatenate the item embeddings :
item_vecs_complete = Concatenate()([item_vecs, price_vecs,title_vecs])
# Concatenate user and item embeddings and use them as features for the neural network:
input_vecs = Concatenate()([user_vecs, item_vecs_complete]) # can be changed by Multiply
#input_vecs = Concatenate()([user_vecs, item_vecs]) # can be changed by Multiply
# Multiply user and item embeddings and use them as features for the neural network:
#input_vecs = Multiply()([user_vecs, item_vecs]) # can be changed by concat
# Dropout is a technique where randomly selected neurons are ignored during training to prevent overfitting
input_vecs = Dropout(0.1)(input_vecs)
# Check one dense 128 or two dense layers (128,128) or (128,64) or three denses layers (128,64,32))
# First layer
# Dense(128) is a fully-connected layer with 128 hidden units.
# Use rectified linear units (ReLU) f(x)=max(0,x) as an activation function.
x = Dense(128, activation='relu')(input_vecs)
x = Dropout(0.1)(x) # Add droupout or not # To improve the performance
# Next Layers
#x = Dense(128, activation='relu')(x) # Add dense again or not
x = Dense(64, activation='relu')(x) # Add dense again or not
x = Dropout(0.1)(x) # Add droupout or not # To improve the performance
x = Dense(32, activation='relu')(x) # Add dense again or not #
x = Dropout(0.1)(x) # Add droupout or not # To improve the performance
# The output
y = Dense(1)(x)
################################################################
model = Model(inputs=[user_id_input,
item_id_input,
price_id_input,
title_id_input
],
outputs=y)
################################################################
# ****H: loss
# ****H: optimizer
model.compile(loss='mse',
optimizer="adam" )
################################################################
save_path = "./"
mytime = time.strftime("%Y_%m_%d_%H_%M")
# modname = 'dense_2_15_embeddings_2_epochs' + mytime
modname = 'dense_2_15_embeddings_2_epochs'
thename = save_path + '/' + modname + '.h5'
mcheck = ModelCheckpoint(thename , monitor='val_loss', save_best_only=True)
################################################################
# ****H: batch_size
# ****H: epochs
# ****H:
# ****H:
history = model.fit([ratings_train["user_id"],
ratings_train["item_id"],
ratings_train["price_id"],
ratings_train["title_id"]
]
, ratings_train["score"]
, batch_size=64
, epochs=2
, validation_split=0.2
, callbacks=[mcheck]
, shuffle=True)
print("MSE: ", history.history)
return model
|
|
from rdkit import Chem
from functools import partial
from fuseprop import extract_subgraph
from .hypergraph import mol_to_hg
from GCN.feature_extract import feature_extractor
from copy import deepcopy
import numpy as np
class MolGraph():
def __init__(self, mol, is_subgraph=False, mapping_to_input_mol=None):
if is_subgraph:
assert mapping_to_input_mol is not None
self.mol = mol
self.is_subgraph = is_subgraph
self.hypergraph = mol_to_hg(mol, kekulize=True, add_Hs=False)
self.mapping_to_input_mol = mapping_to_input_mol
def get_visit_status_edge(self, edge):
return self.hypergraph.edge_attr(edge)['visited']
def get_visit_status_node(self, node):
return self.hypergraph.node_attr(node)['visited']
def get_visit_status_with_idx(self, atom_idx):
return self.get_visit_status_edge('e{}'.format(atom_idx))
def set_visited(self, node_list, edge_list):
for edge in edge_list:
self.hypergraph.edge_attr(edge)['visited'] = True
for node in node_list:
self.hypergraph.node_attr(node)['visited'] = True
def set_visit_status_with_idx(self, idx, visited):
self.hypergraph.edge_attr('e{}'.format(idx))['visited'] = visited
def set_NT_status_with_idx(self, idx, NT):
self.hypergraph.edge_attr('e{}'.format(idx))['NT'] = NT
def get_all_visit_status(self):
return [self.get_visit_status_with_idx(i) for i in range(self.mol.GetNumAtoms())]
def get_org_idx_in_input(self, idx):
assert self.is_subgraph
return self.mapping_to_input_mol.GetAtomWithIdx(idx).GetIntProp('org_idx')
def get_org_node_in_input(self, node, subgraph):
assert not self.is_subgraph
adj_edges = list(subgraph.hypergraph.adj_edges(node))
assert len(adj_edges) == 2
org_edges = []
for adj_edge_i in adj_edges:
idx = int(adj_edge_i[1:])
org_idx = subgraph.get_org_idx_in_input(idx)
org_edges.append('e{}'.format(org_idx))
org_node = list(self.hypergraph.nodes_in_edge(org_edges[0]).intersection(self.hypergraph.nodes_in_edge(org_edges[1])))
try:
assert len(org_node) == 1
except:
import pdb; pdb.set_trace()
return org_node[0]
def as_key(self):
return MolKey(self.mol)
def __eq__(self, another):
# return hasattr(another, 'mol') and Chem.CanonSmiles(Chem.MolToSmiles(self.mol)) == Chem.CanonSmiles(Chem.MolToSmiles(another.mol))
return hasattr(another, 'mol') and (Chem.MolToSmiles(self.mol)) == (Chem.MolToSmiles(another.mol))
class MolKey():
def __init__(self, mol):
if isinstance(mol, MolGraph):
self.mol_graph = mol
mol = mol.mol
elif type(mol) == Chem.Mol:
self.mol_graph = MolGraph(mol)
else:
raise TypeError
# self.sml = Chem.CanonSmiles(Chem.MolToSmiles(mol))
self.sml = Chem.MolToSmiles(mol)
def __eq__(self, another):
return hasattr(another, 'sml') and self.sml == another.sml
def __hash__(self):
return hash(self.sml)
class SubGraph(MolGraph):
def __init__(self, mol, mapping_to_input_mol, subfrags):
super(SubGraph, self).__init__(mol, is_subgraph=True, mapping_to_input_mol=mapping_to_input_mol)
assert type(subfrags) == list
'''
subfrags: list, atom indices of two sub fragments
bond: bond index of the connected bond
'''
self.subfrags = subfrags
class InputGraph(MolGraph):
def __init__(self, mol, smiles, init_subgraphs, subgraphs_idx, GNN_model_path):
'''
init_subgraphs: a list of MolGraph
subgraph_idx: a list of atom idx list for each subgraph
'''
super(InputGraph, self).__init__(mol)
self.subgraphs = init_subgraphs
self.subgraphs_idx = subgraphs_idx
self.GNN_model_path = GNN_model_path
self.smiles = smiles
self.map_to_set = self.get_map_to_set()
self.NT_atoms = set()
self.rule_list = []
self.rule_idx_list = []
self.water_level = 0
self.watershed_ext_nodes = {}
def append_rule(self, rule, rule_idx):
self.rule_list.append(rule)
self.rule_idx_list.append(rule_idx)
def get_map_to_set(self):
map_to_set = dict()
for i, subgraph in enumerate(self.subgraphs):
key_subgraph = MolKey(subgraph)
if key_subgraph not in map_to_set:
map_to_set[key_subgraph] = list()
map_to_set[key_subgraph].append(self.subgraphs_idx[i])
return map_to_set
def get_nodes_feature(self, id):
# dimension order, N * dim_f
self.feature_extractor = feature_extractor(self.GNN_model_path)
nodes_feature = self.feature_extractor.extract(self.mol)
return nodes_feature[id]
def get_subg_feature_for_agent(self, subgraph):
# Get feature vector for agent
assert isinstance(subgraph, SubGraph)
assert subgraph in self.subgraphs
subfrags_feature = []
nodes_feat = [self.get_nodes_feature(node_id).detach().cpu().numpy() for node_id in subgraph.subfrags]
subfrags_feature = np.mean(nodes_feat, axis=0)
return subfrags_feature # TODO could modify # should be an order-invariant function
def find_overlap(self, p_star_idx, subgraph_idx):
union = []
rst = len(set(subgraph_idx) & set(p_star_idx)) != 0
if rst:
union = list(set(subgraph_idx) | set(p_star_idx))
return rst, union
def set_water_level(self, node_list, edge_list, water_level):
ext_node_list = []
for edge in edge_list:
self.hypergraph.edge_attr(edge)['water_level'] = water_level
for _node in self.hypergraph.nodes_in_edge(edge):
if _node not in node_list:
ext_node_list.append(_node)
for node in node_list:
self.hypergraph.node_attr(node)['water_level'] = water_level
assert water_level not in self.watershed_ext_nodes.keys()
self.watershed_ext_nodes[water_level] = ext_node_list
def update_visit_status(self, visited_list):
edge_list = ['e{}'.format(i) for i in visited_list]
node_list = self.hypergraph.get_minimal_graph(edge_list)
self.set_visited(node_list, edge_list)
def update_NT_atoms(self, p_star_idx):
_, p_subg_mapped, _ = extract_subgraph(self.smiles, p_star_idx)
for idx, atom in enumerate(p_subg_mapped.GetAtoms()):
org_idx = p_subg_mapped.GetAtomWithIdx(idx).GetIntProp('org_idx')
if atom.GetAtomMapNum() == 1:
self.NT_atoms.add(org_idx)
else:
if org_idx in self.NT_atoms:
self.NT_atoms.remove(org_idx)
def update_watershed(self, p_star_idx):
edge_list = ['e{}'.format(i) for i in p_star_idx]
node_list = self.hypergraph.get_minimal_graph(edge_list)
self.set_water_level(node_list, edge_list, self.water_level)
self.water_level += 1
def is_candidate_subgraph(self, subg):
if subg.as_key() in self.map_to_set.keys():
subgraphs_idx = self.map_to_set[subg.as_key()]
subgraphs = [self.subgraphs[self.subgraphs_idx.index(idx_list)] for idx_list in subgraphs_idx]
return True, subgraphs, subgraphs_idx
return False, [], []
def merge_selected_subgraphs(self, action_list):
label_mapping = {} # label -> serial number
label_mapping_inv = {} # serial number -> label
label = 0
selected_subg = []
non_selected_subg = []
p_star_list = []
for i, action in enumerate(action_list):
if action == 1:
selected_subg.append((i, self.subgraphs[i], self.subgraphs_idx[i]))
else:
non_selected_subg.append((i, self.subgraphs[i], self.subgraphs_idx[i]))
if len(selected_subg) == 1:
return [selected_subg[0][1]]
else:
for i in range(len(selected_subg)):
for j in range(i+1, len(selected_subg)):
selected_subg_idx_i = selected_subg[i][2]
selected_subg_idx_j = selected_subg[j][2]
selected_snum_i = selected_subg[i][0]
selected_snum_j = selected_subg[j][0]
if selected_snum_i not in label_mapping_inv.keys():
label_mapping_inv[selected_snum_i] = label
label += 1
rst, _ = self.find_overlap(selected_subg_idx_i, selected_subg_idx_j)
if rst:
label_i = label_mapping_inv[selected_snum_i]
label_mapping_inv[selected_snum_j] = label_i
else:
label_mapping_inv[selected_snum_j] = label
for _key in label_mapping_inv.keys():
_label = label_mapping_inv[_key]
if _label not in label_mapping.keys():
label_mapping[_label] = [_key]
else:
label_mapping[_label].append(_key)
new_subgraphs = []
new_subgraphs_idx = []
for _key in label_mapping.keys():
new_subgraph_idx = set()
for snum in label_mapping[_key]:
new_subgraph_idx = new_subgraph_idx | set(self.subgraphs_idx[snum])
new_subgraph_idx = list(new_subgraph_idx)
subfrags = deepcopy(new_subgraph_idx)
_, new_subgraph_mapped, _ = extract_subgraph(self.smiles, new_subgraph_idx)
new_subgraph = SubGraph(new_subgraph_mapped, mapping_to_input_mol=new_subgraph_mapped, subfrags=subfrags)
for idx, atom in enumerate(new_subgraph.mol.GetAtoms()):
org_idx = new_subgraph.get_org_idx_in_input(idx)
new_subgraph.set_visit_status_with_idx(idx, self.get_visit_status_with_idx(org_idx))
if org_idx in self.NT_atoms:
new_subgraph.set_NT_status_with_idx(idx, True)
for node in new_subgraph.hypergraph.nodes:
org_node = self.get_org_node_in_input(node, new_subgraph)
new_subgraph.hypergraph.node_attr(node)['visited'] = self.hypergraph.node_attr(org_node)['visited']
new_subgraphs.append(new_subgraph)
new_subgraphs_idx.append(new_subgraph_idx)
p_star_list.append(new_subgraph)
for non_selected_subg_i in non_selected_subg:
new_subgraphs.append(non_selected_subg_i[1])
new_subgraphs_idx.append(non_selected_subg_i[2])
self.subgraphs = new_subgraphs
self.subgraphs_idx = new_subgraphs_idx
self.map_to_set = self.get_map_to_set()
return p_star_list
def update_subgraph(self, subg_idx):
# Update visit_status and NT_atoms and watershed
self.update_visit_status(subg_idx)
self.update_NT_atoms(subg_idx)
self.update_watershed(subg_idx)
new_subgraphs = []
new_subgraphs_idx = []
for i, subg in enumerate(self.subgraphs):
if self.subgraphs_idx[i] == subg_idx:
continue
else:
rst, assemble = self.find_overlap(subg_idx, self.subgraphs_idx[i])
if rst:
new_subgraph_idx = assemble
_, new_subgraph_mapped, _ = extract_subgraph(self.smiles, assemble)
subfrags = deepcopy(assemble)
new_subgraph = SubGraph(new_subgraph_mapped, mapping_to_input_mol=new_subgraph_mapped, subfrags=subfrags)
for idx, atom in enumerate(new_subgraph.mol.GetAtoms()):
org_idx = new_subgraph.get_org_idx_in_input(idx)
new_subgraph.set_visit_status_with_idx(idx, self.get_visit_status_with_idx(org_idx))
if org_idx in self.NT_atoms:
new_subgraph.set_NT_status_with_idx(idx, True)
for node in new_subgraph.hypergraph.nodes:
org_node = self.get_org_node_in_input(node, new_subgraph)
new_subgraph.hypergraph.node_attr(node)['visited'] = self.hypergraph.node_attr(org_node)['visited']
new_subgraphs.append(new_subgraph)
new_subgraphs_idx.append(new_subgraph_idx)
else:
new_subgraphs.append(subg)
new_subgraphs_idx.append(self.subgraphs_idx[i])
self.subgraphs = new_subgraphs
self.subgraphs_idx = new_subgraphs_idx
self.map_to_set = self.get_map_to_set()
|
|
"""
Refin Ananda Putra
github.com/refinap
"""
#create network
import numpy as np
import matplotlib.pylab as plt
import seaborn as sns
import tensorflow as tf
from keras.models import Model, Sequential
from keras.layers import Input, Activation, Dense
from tensorflow.keras.optimizers import SGD
#generate data from -20, -19.5, ..., 20
#ambil data dari -20 sampe 20 dengan beda 0.5
train_x = np.arange(-20, 20, 0.25)
#hitung target : sqrt(2x^2 +1)
train_y = np.sqrt((2*train_x**2)+1)
#Architecture taht will use
inputs = Input(shape=(1,)) #1 input Node
h_layer = Dense(8, activation='relu')(inputs) #8 node ar Hidden layer 1 with ReLU activation
h_layer = Dense(4, activation='relu')(h_layer) #4 node ar Hidden layer 2 with ReLU activation
output = Dense(1, activation='linear')(h_layer) # i output node with Linear activation
model = Model(inputs=inputs, outputs=output)
#update Rule or optimizer (loss function)
sgd = SGD(lr=0.001)
#compile the model with mean squared eror loss
model.compile(optimizer=sgd, loss='mse')
#Model is already, we can training the data use fit methode
#train the network and save the weights after training
model.fit(train_x, train_y, batch_size=20, epochs=1000, verbose=1) #batch_size 20 (mibi-batch SGD), do 1000 epoch and
model.save_weights('weights.h5') #save all parameter (weight and bias) to file
#Training data prediction
#Prediction for number outside Training Data, 26, and will compare Training Data Prediction Result with Target
predict = model.predict(np.array([26]))
print('f(26) =' , predict)
predict_y = model.predict(train_x)
#Draw taget v prediction
plt.plot(train_x, train_y, 'r')
plt.plot(train_x, predict_y, 'b')
plt.show()
|
|
import numpy as np
from scipy.signal import windows
from ..optics import OpticalElement, LinearRetarder, Apodizer, AgnosticOpticalElement, make_agnostic_forward, make_agnostic_backward, Wavefront
from ..propagation import FraunhoferPropagator
from ..field import make_focal_grid, Field, field_dot
from ..aperture import circular_aperture
from ..fourier import FastFourierTransform, MatrixFourierTransform, FourierFilter
class VortexCoronagraph(OpticalElement):
'''An optical vortex coronagraph.
This :class:`OpticalElement` simulations the propagation of light through
a vortex in the focal plane. To resolve the singularity of this vortex
phase plate, a multi-scale approach is made. Discretisation errors made at
a certain level are corrected by the next level with finer sampling.
Parameters
----------
input_grid : Grid
The grid on which the incoming wavefront is defined.
charge : integer
The charge of the vortex.
lyot_stop : Field or OpticalElement
The Lyot stop for the coronagraph. If it's a Field, it is converted to an
OpticalElement for convenience. If this is None (default), then no Lyot stop is used.
q : scalar
The minimum number of pixels per lambda/D. The number of levels in the multi-scale
Fourier transforms will be chosen to reach at least this number of samples. The required
q for a high-accuracy vortex coronagraph depends on the charge of the vortex. For charge 2,
this can be as low as 32, but for charge 8 you need ~1024. Lower values give higher performance
as a smaller number of levels is needed, but increases the sampling errors near the singularity.
Charges not divisible by four require a much lower q. The default (q=1024) is conservative in
most cases.
scaling_factor : scalar
The fractional increase in spatial frequency sampling per level. Larger scaling factors
require a smaller number of levels, but each level requires a slower Fourier transform.
Factors of 2 or 4 usually perform the best.
window_size : integer
The size of the next level in the number of pixels on the current layer. Lowering this
increases performance in exchange for accuracy. Values smaller than 4-8 are not recommended.
'''
def __init__(self, input_grid, charge, lyot_stop=None, q=1024, scaling_factor=4, window_size=32):
self.input_grid = input_grid
pupil_diameter = input_grid.shape * input_grid.delta
if hasattr(lyot_stop, 'forward') or lyot_stop is None:
self.lyot_stop = lyot_stop
else:
self.lyot_stop = Apodizer(lyot_stop)
levels = int(np.ceil(np.log(q / 2) / np.log(scaling_factor))) + 1
qs = [2 * scaling_factor**i for i in range(levels)]
num_airys = [input_grid.shape / 2]
focal_grids = []
self.focal_masks = []
self.props = []
for i in range(1, levels):
num_airys.append(num_airys[i - 1] * window_size / (2 * qs[i - 1] * num_airys[i - 1]))
for i in range(levels):
q = qs[i]
num_airy = num_airys[i]
focal_grid = make_focal_grid(q, num_airy, pupil_diameter=pupil_diameter, reference_wavelength=1, focal_length=1)
focal_mask = Field(np.exp(1j * charge * focal_grid.as_('polar').theta), focal_grid)
focal_mask *= 1 - circular_aperture(1e-9)(focal_grid)
if i != levels - 1:
wx = windows.tukey(window_size, 1, False)
wy = windows.tukey(window_size, 1, False)
w = np.outer(wy, wx)
w = np.pad(w, (focal_grid.shape - w.shape) // 2, 'constant').ravel()
focal_mask *= 1 - w
for j in range(i):
fft = FastFourierTransform(focal_grids[j])
mft = MatrixFourierTransform(focal_grid, fft.output_grid)
focal_mask -= mft.backward(fft.forward(self.focal_masks[j]))
if i == 0:
prop = FourierFilter(input_grid, focal_mask, q)
else:
prop = FraunhoferPropagator(input_grid, focal_grid)
focal_grids.append(focal_grid)
self.focal_masks.append(focal_mask)
self.props.append(prop)
def forward(self, wavefront):
'''Propagate a wavefront through the vortex coronagraph.
Parameters
----------
wavefront : Wavefront
The wavefront to propagate. This wavefront is expected to be
in the pupil plane.
Returns
-------
Wavefront
The Lyot plane wavefront.
'''
wavelength = wavefront.wavelength
wavefront.wavelength = 1
for i, (mask, prop) in enumerate(zip(self.focal_masks, self.props)):
if i == 0:
lyot = Wavefront(prop.forward(wavefront.electric_field), input_stokes_vector=wavefront.input_stokes_vector)
else:
focal = prop(wavefront)
focal.electric_field *= mask
lyot.electric_field += prop.backward(focal).electric_field
lyot.wavelength = wavelength
wavefront.wavelength = wavelength
if self.lyot_stop is not None:
lyot = self.lyot_stop.forward(lyot)
return lyot
def backward(self, wavefront):
'''Propagate backwards through the vortex coronagraph.
This essentially is a forward propagation through a the same vortex
coronagraph, but with the sign of the its charge flipped.
Parameters
----------
wavefront : Wavefront
The Lyot plane wavefront.
Returns
-------
Wavefront
The pupil-plane wavefront.
'''
if self.lyot_stop is not None:
wavefront = self.lyot_stop.backward(wavefront)
wavelength = wavefront.wavelength
wavefront.wavelength = 1
for i, (mask, prop) in enumerate(zip(self.focal_masks, self.props)):
if i == 0:
pup = Wavefront(prop.backward(wavefront.electric_field), input_stokes_vector=wavefront.input_stokes_vector)
else:
focal = prop(wavefront)
focal.electric_field *= mask.conj()
pup.electric_field += prop.backward(focal).electric_field
pup.wavelength = wavelength
wavefront.wavelength = wavelength
return pup
class VectorVortexCoronagraph(AgnosticOpticalElement):
'''An vector vortex coronagraph.
This :class:`OpticalElement` simulations the propagation of light through
a vector vortex in the focal plane. To resolve the singularity of this vortex
phase plate, a multi-scale approach is made. Discretisation errors made at
a certain level are corrected by the next level with finer sampling.
Parameters
----------
charge : integer
The charge of the vortex.
lyot_stop : Field or OpticalElement
The Lyot stop for the coronagraph. If it's a Field, it is converted to an
OpticalElement for convenience. If this is None (default), then no Lyot stop is used.
phase_retardation : scalar or function
The phase retardation of the vector vortex plate, potentially as a
function of wavelength. Changes of the phase retardation as a function
of spatial position is not yet supported.
q : scalar
The minimum number of pixels per lambda/D. The number of levels in the multi-scale
Fourier transforms will be chosen to reach at least this number of samples. The required
q for a high-accuracy vortex coronagraph depends on the charge of the vortex. For charge 2,
this can be as low as 32, but for charge 8 you need ~1024. Lower values give higher performance
as a smaller number of levels is needed, but increases the sampling errors near the singularity.
Charges not divisible by four require a much lower q. The default (q=1024) is conservative in
most cases.
scaling_factor : scalar
The fractional increase in spatial frequency sampling per level. Larger scaling factors
require a smaller number of levels, but each level requires a slower Fourier transform.
Factors of 2 or 4 usually perform the best.
window_size : integer
The size of the next level in the number of pixels on the current layer. Lowering this
increases performance in exchange for accuracy. Values smaller than 4-8 are not recommended.
'''
def __init__(self, charge, lyot_stop=None, phase_retardation=np.pi, q=1024, scaling_factor=4, window_size=32):
self.charge = charge
if hasattr(lyot_stop, 'forward') or lyot_stop is None:
self.lyot_stop = lyot_stop
else:
self.lyot_stop = Apodizer(lyot_stop)
self.phase_retardation = phase_retardation
self.q = q
self.scaling_factor = scaling_factor
self.window_size = window_size
AgnosticOpticalElement.__init__(self)
def make_instance(self, instance_data, input_grid, output_grid, wavelength):
pupil_diameter = input_grid.shape * input_grid.delta
levels = int(np.ceil(np.log(self.q / 2) / np.log(self.scaling_factor))) + 1
qs = [2 * self.scaling_factor**i for i in range(levels)]
num_airys = [input_grid.shape / 2]
focal_grids = []
instance_data.props = []
instance_data.jones_matrices = []
for i in range(1, levels):
num_airys.append(num_airys[i - 1] * self.window_size / (2 * qs[i - 1] * num_airys[i - 1]))
for i in range(levels):
q = qs[i]
num_airy = num_airys[i]
focal_grid = make_focal_grid(q, num_airy, pupil_diameter=pupil_diameter, reference_wavelength=1, focal_length=1)
fast_axis_orientation = Field(self.charge / 2 * focal_grid.as_('polar').theta, focal_grid)
retardance = self.evaluate_parameter(self.phase_retardation, input_grid, output_grid, wavelength)
focal_mask_raw = LinearRetarder(retardance, fast_axis_orientation)
jones_matrix = focal_mask_raw.jones_matrix
jones_matrix *= 1 - circular_aperture(1e-9)(focal_grid)
if i != levels - 1:
wx = windows.tukey(self.window_size, 1, False)
wy = windows.tukey(self.window_size, 1, False)
w = np.outer(wy, wx)
w = np.pad(w, (focal_grid.shape - w.shape) // 2, 'constant').ravel()
jones_matrix *= 1 - w
for j in range(i):
fft = FastFourierTransform(focal_grids[j])
mft = MatrixFourierTransform(focal_grid, fft.output_grid)
jones_matrix -= mft.backward(fft.forward(instance_data.jones_matrices[j]))
if i == 0:
prop = FourierFilter(input_grid, jones_matrix, q)
else:
prop = FraunhoferPropagator(input_grid, focal_grid)
focal_grids.append(focal_grid)
instance_data.jones_matrices.append(jones_matrix)
instance_data.props.append(prop)
def get_input_grid(self, output_grid, wavelength):
'''Get the input grid for a specified output grid and wavelength.
This optical element only supports propagation to the same plane as
its input.
Parameters
----------
output_grid : Grid
The output grid of the optical element.
wavelength : scalar or None
The wavelength of the outgoing light.
Returns
-------
Grid
The input grid corresponding to the output grid and wavelength combination.
'''
return output_grid
def get_output_grid(self, input_grid, wavelength):
'''Get the output grid for a specified input grid and wavelength.
This optical element only supports propagation to the same plane as
its input.
Parameters
----------
input_grid : Grid
The input grid of the optical element.
wavelength : scalar or None
The wavelength of the incoming light.
Returns
-------
Grid
The output grid corresponding to the input grid and wavelength combination.
'''
return input_grid
@make_agnostic_forward
def forward(self, instance_data, wavefront):
'''Propagate a wavefront through the vortex coronagraph.
Parameters
----------
wavefront : Wavefront
The wavefront to propagate. This wavefront is expected to be
in the pupil plane.
Returns
-------
Wavefront
The Lyot plane wavefront.
'''
wavelength = wavefront.wavelength
wavefront.wavelength = 1
for i, (jones_matrix, prop) in enumerate(zip(instance_data.jones_matrices, instance_data.props)):
if i == 0:
if not wavefront.is_polarized:
wf = Wavefront(wavefront.electric_field, input_stokes_vector=(1, 0, 0, 0))
else:
wf = wavefront
lyot = Wavefront(prop.forward(wf.electric_field), input_stokes_vector=wf.input_stokes_vector)
else:
focal = prop(wavefront)
if not focal.is_polarized:
focal = Wavefront(focal.electric_field, input_stokes_vector=(1, 0, 0, 0))
focal.electric_field = field_dot(jones_matrix, focal.electric_field)
if i == 0:
lyot = prop.backward(focal)
else:
lyot.electric_field += prop.backward(focal).electric_field
lyot.wavelength = wavelength
wavefront.wavelength = wavelength
if self.lyot_stop is not None:
lyot = self.lyot_stop.forward(lyot)
return lyot
@make_agnostic_backward
def backward(self, instance_data, wavefront):
'''Propagate backwards through the vortex coronagraph.
This essentially is a forward propagation through a the same vortex
coronagraph, but with the sign of the its charge flipped.
Parameters
----------
wavefront : Wavefront
The Lyot plane wavefront.
Returns
-------
Wavefront
The pupil-plane wavefront.
'''
if self.lyot_stop is not None:
wavefront = self.lyot_stop.backward(wavefront)
wavelength = wavefront.wavelength
wavefront.wavelength = 1
for i, (jones_matrix, prop) in enumerate(zip(instance_data.jones_matrices, instance_data.props)):
if i == 0:
pup = Wavefront(prop.backward(wavefront.electric_field))
else:
focal = prop(wavefront)
focal.electric_field = field_dot(jones_matrix.conj(), focal.electric_field)
pup.electric_field += prop.backward(focal).electric_field
pup.wavelength = wavelength
wavefront.wavelength = wavelength
return pup
def make_ravc_masks(central_obscuration, charge=2, pupil_diameter=1, lyot_undersize=0):
'''Make field generators for the pupil and Lyot-stop masks for a
ring apodized vortex coronagraph.
The formulas were implemented according to [Mawet2013]_.
.. [Mawet2013] Dimitri Mawet et al. 2013 "Ring-apodized vortex coronagraphs for obscured telescopes. I. Transmissive
ring apodizers" The Astrophysical Journal Supplement Series 209.1 (2013): 7
Parameters
----------
central_obscuration : scalar
The diameter of the central obscuration.
charge : integer
The charge of the vortex coronagraph used.
pupil_diameter : scalar
The diameter of the pupil.
lyot_undersize : scalar
The fraction of the pupil diameter to which to undersize the Lyot stop.
Returns
-------
pupil_mask : Field generator
The complex transmission of the pupil mask.
lyot_mask : Field generator
The complex transmission of the Lyot-stop mask.
'''
R0 = central_obscuration / pupil_diameter
if charge == 2:
t1 = 1 - 0.25 * (R0**2 + R0 * np.sqrt(R0**2 + 8))
R1 = R0 / np.sqrt(1 - t1)
pupil1 = circular_aperture(pupil_diameter)
pupil2 = circular_aperture(pupil_diameter * R1)
co = circular_aperture(central_obscuration)
pupil_mask = lambda grid: (pupil1(grid) * t1 + pupil2(grid) * (1 - t1)) * (1 - co(grid))
lyot1 = circular_aperture(pupil_diameter * R1 + pupil_diameter * lyot_undersize)
lyot2 = circular_aperture(pupil_diameter * (1 - lyot_undersize))
lyot_stop = lambda grid: lyot2(grid) - lyot1(grid)
elif charge == 4:
R1 = np.sqrt(np.sqrt(R0**2 * (R0**2 + 4)) - 2*R0**2)
R2 = np.sqrt(R1**2 + R0**2)
t1 = 0
t2 = (R1**2 - R0**2) / (R1**2 + R0**2)
pupil1 = circular_aperture(pupil_diameter)
pupil2 = circular_aperture(pupil_diameter * R1)
pupil3 = circular_aperture(pupil_diameter * R2)
co = circular_aperture(central_obscuration)
pupil_mask = lambda grid: (pupil1(grid) * t2 + pupil3(grid) * (t1 - t2) + pupil2(grid) * (1 - t1)) * (1 - co(grid))
lyot1 = circular_aperture(pupil_diameter * R2 + pupil_diameter * lyot_undersize)
lyot2 = circular_aperture(pupil_diameter * (1 - lyot_undersize))
lyot_stop = lambda grid: lyot2(grid) - lyot1(grid)
else:
raise NotImplementedError()
return pupil_mask, lyot_stop
def get_ravc_planet_transmission(central_obscuration_ratio, charge=2):
'''Get the planet transmission for a ring-apodized vortex coronagraph.
The formulas were implemented according to [Mawet2013]_.
.. [Mawet2013] Dimitri Mawet et al. 2013 "Ring-apodized vortex coronagraphs for obscured telescopes. I. Transmissive
ring apodizers" The Astrophysical Journal Supplement Series 209.1 (2013): 7
Parameters
----------
central_obscuration_ratio : scalar
The ratio of the central obscuration diameter and the pupil diameter.
charge : integer
The charge of the vortex coronagraph used.
Returns
-------
scalar
The intensity transmission for a sufficiently off-axis point source
for the ring-apodized vortex coronagraph. Point sources close to the vortex
singularity will be lower in intensity.
'''
R0 = central_obscuration_ratio
if charge == 2:
t1_opt = 1 - 0.25 * (R0**2 + R0 * np.sqrt(R0**2 + 8))
R1_opt = R0 / np.sqrt(1 - t1_opt)
return t1_opt**2 * (1 - R1_opt**2) / (1 - (R0**2))
elif charge == 4:
R1 = np.sqrt(np.sqrt(R0**2 * (R0**2 + 4)) - 2*R0**2)
R2 = np.sqrt(R1**2 + R0**2)
t2 = (R1**2 - R0**2) / (R1**2 + R0**2)
return t2**2 * (1 - R2**2) / (1 - R0**2)
else:
raise NotImplementedError()
|
|
from copy import copy
from itertools import count
import click
import matplotlib
import matplotlib.cm
import numpy as np
import pandas as pd
import xarray as xr
from lib import click_utils
import plot
from visualization.style import set_style
gs_labels = ["I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX", "X"]
gs_label_props = dict(
weight='bold',
bbox=dict(boxstyle="circle,pad=0.15",
facecolor='#333333',
edgecolor='none'),
color='white',
fontsize=9,
)
def wf_plot(vals, highlight, ax, ylabel="", yscale="linear", ylim=None,
xbaseline=None, reverse=False):
highlight = list(highlight)
if reverse:
vals_order = np.argsort(-vals.values)
else:
vals_order = np.argsort(vals.values)
vals = vals[vals_order]
x_grid = np.arange(len(vals)) / len(vals) * 100
hl_mask = np.isin(vals['gene_set'], highlight)
x_hl = x_grid[hl_mask]
vals_hl = vals[hl_mask]
genesets_hl = vals['gene_set'][hl_mask]
genesets_idx = [highlight.index(gs) for gs in genesets_hl.values]
y_invert = False
if yscale == 'mlog10':
yscale = 'log'
y_invert = True
if xbaseline is None:
if yscale == 'log':
xbaseline = np.max(vals)
else:
xbaseline = 0
ax.set_yscale(yscale)
if y_invert:
ax.invert_yaxis()
ax.fill_between(x_grid, xbaseline, vals, facecolors='#777777',
step='mid', edgecolors='none')
ax.vlines(x_hl, xbaseline, vals_hl, colors='#44ff44', linewidth=.8)
for x, y, idx, i in zip(x_hl, vals_hl, genesets_idx, count()):
y = min(xbaseline, y)
a = ax.annotate(
xy=(x, y),
xycoords='data',
s=gs_labels[idx],
xytext=(0.5 + 0.095 * i, -8),
textcoords=('axes fraction', 'axes points'),
arrowprops=dict(facecolor='black', width=0.01, headlength=0.01,
headwidth=0.01, lw=0.5, shrink=0.0),
horizontalalignment='center',
verticalalignment='center',
**gs_label_props,
)
if ylim is not None:
ax.set_ylim(ylim)
ax.set_xlabel("")
ax.set_ylabel("")
ax.set_title(ylabel)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(True)
ax.spines['right'].set_visible(False)
ax.tick_params(bottom='off', top='off', left='on', right='off')
ax.set_xticklabels("")
class SFDRNormalize(matplotlib.colors.Normalize):
def __init__(self, vmin=None, vmax=None, clip=False,
sig_threshold=0.05):
self.sig_threshold = sig_threshold
matplotlib.colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
data_points = [self.vmin, -self.sig_threshold, 0, self.sig_threshold,
self.vmax]
norm_points = [0, 0.499, 0.5, 0.501, 1]
result, _ = self.process_value(value)
return np.ma.masked_array(np.interp(result, data_points, norm_points),
mask=np.ma.getmask(result))
class FDRNormalize(matplotlib.colors.Normalize):
def __init__(self, vmin=None, vmax=None, clip=False,
sig_threshold=0.05):
self.sig_threshold = sig_threshold
matplotlib.colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
data_points = [0, self.sig_threshold, self.vmax]
norm_points = [0, 0.01, 1]
result, _ = self.process_value(value)
return np.ma.masked_array(np.interp(result, data_points, norm_points),
mask=np.ma.getmask(result))
def plot_gsea_heatmap(gsea, genesets_annot, factor_idx, fig, abs):
plusminus_sign = chr(0x00B1)
genesets = genesets_annot['gene_set'].values
assert all(np.isin(genesets, gsea['gene_set']))
wf_prop = 0.3
table_prop = 0.3
hm_prop = 1-wf_prop-table_prop
cbar_vmargin = 0.25
wf_hmargin = 0.05
wf_vmargin = 0.06
sel_gsea = gsea.reindex_like(genesets_annot)
# Heatmap
if abs:
cmap = copy(matplotlib.cm.Reds)
else:
cmap = copy(matplotlib.cm.RdBu_r)
cmap.set_bad('0.8')
sel_gsea['slogfdr'] = np.sign(sel_gsea['nes']) * -np.log10(sel_gsea['fdr'])
ax = fig.add_axes([wf_hmargin, table_prop,
1-wf_hmargin, hm_prop])
if abs:
zlim = [0, -np.log10(0.05)]
norm = FDRNormalize(sig_threshold=-np.log10(0.25))
else:
zlim = [np.log10(0.05), -np.log10(0.05)]
norm = SFDRNormalize(sig_threshold=-np.log10(0.25))
hm = plot.heatmap(
sel_gsea['slogfdr'][::-1, :],
mask=sel_gsea['fdr'][::-1, :] > 0.25,
zlim=zlim,
norm=norm,
cmap=cmap,
method='pcolormesh',
cbar=False,
ax=ax,
)
for i in range(sel_gsea['slogfdr'].shape[1]):
ax.axvline(i, color='white', linewidth=2)
ax_cbar = fig.add_axes(
[wf_hmargin+cbar_vmargin, 0.02,
1-wf_hmargin-2*cbar_vmargin, 0.03],
)
cbar = fig.colorbar(hm, ax_cbar, orientation='horizontal')
fdr_ticks_at = np.array([0.25, 0.1, 0.05])
lfdr_ticks_at = -np.log10(fdr_ticks_at)
if abs:
cbar_tick_lv = np.append([0.0], lfdr_ticks_at)
cbar_tick_v = np.append([1.0], fdr_ticks_at)
else:
cbar_tick_lv = np.append(np.append(-lfdr_ticks_at[::-1], [0.0]),
lfdr_ticks_at)
cbar_tick_v = np.append(-np.append(fdr_ticks_at[::-1], [1.0]),
fdr_ticks_at)
cbar.set_ticks(cbar_tick_lv)
if abs:
ax_cbar.set_xlabel("FDR")
else:
ax_cbar.set_xlabel("signed FDR")
cbar_tick_labels = [f"{v}" for v in cbar_tick_v]
if not abs:
cbar_tick_labels[len(cbar_tick_labels) // 2] = plusminus_sign + "1.0"
cbar.ax.set_xticklabels(cbar_tick_labels)
ax.set_xticklabels("")
ax.tick_params(bottom='off')
ax.set_ylabel("MRI Factor")
ax.set_xlabel("")
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
# Top waterfall plots
ax_nes = fig.add_axes([(0/3)+wf_hmargin, 1-wf_prop+wf_vmargin,
(1/3)-wf_hmargin, wf_prop-wf_vmargin])
wf_plot(gsea['nes'][factor_idx, :], genesets, ax_nes, 'NES')
ax_mesa = fig.add_axes([(1/3)+wf_hmargin, 1-wf_prop+wf_vmargin,
(1/3)-wf_hmargin, wf_prop-wf_vmargin])
if gsea.attrs['absolute']:
mesa_mid = 1
else:
mesa_mid = int(gsea['max_es_at'].max() / 2)
wf_plot(gsea['max_es_at'][factor_idx, :], genesets, ax_mesa, 'Max. ES at',
xbaseline=mesa_mid, reverse=True)
ax_mesa.ticklabel_format(axis='y', style='sci', scilimits=(0, 0))
ax_le = fig.add_axes([(2/3)+wf_hmargin, 1-wf_prop+wf_vmargin,
(1/3)-wf_hmargin, wf_prop-wf_vmargin])
wf_plot(gsea['le_prop'][factor_idx, :], genesets, ax_le, 'Leading Edge')
# Bottom table
ga = genesets_annot.copy()
if 'source' in ga and 'source_year' in ga:
sy = zip(ga['source'].values, ga['source_year'].values)
ga['source'] = ('gene_set', [f"{s} ({y})" for s, y in sy])
del ga['source_year']
gaa = ga.to_array()
xlabels = [gs_labels[i] for i in range(gaa.shape[1])]
table = ax.table(
cellText=np.vstack((xlabels, gaa.values)),
cellLoc='center',
rowLabels=['gene set'] + list(gaa['variable'].values),
loc='bottom',
)
table.auto_set_font_size(False)
table.set_fontsize(8)
for (col, row), cell in table.get_celld().items():
cell.set_linewidth(2)
cell.set_edgecolor('w')
if col % 2 == 1:
cell.set_facecolor('#eeeeee')
if col == 3:
cell.set_height(3*cell.get_height())
if col == 0 and row >= 0:
cell.set_text_props(**gs_label_props)
if row == -1:
cell.set_text_props(weight='bold')
@click.command()
@click.argument('gsea_results', type=click_utils.in_path)
@click.argument('sel_genesets', type=click_utils.in_path)
@click.argument('factor', type=int)
@click.argument('out', type=click_utils.out_path)
def plot_gsea_heatmap_(gsea_results, sel_genesets, factor, out):
print(gsea_results)
if gsea_results.endswith("_T.nc"):
abs = True
else:
abs = False
gsea = xr.open_dataset(gsea_results).load()
gsea['gene_set'] = (xr.apply_ufunc(np.char.decode, gsea['gene_set'])
.astype('object'))
gsea['mri_feature'] = np.arange(1, gsea['mri_feature'].shape[0]+1, dtype='i2')
geneset_annot = (pd.read_table(sel_genesets, sep='\t', quotechar='"',
comment='#').
set_index('gene_set').to_xarray())
factor_idx = factor - 1
with plot.figure(figsize=(7.0, 3.5)) as fig:
plot_gsea_heatmap(gsea, geneset_annot, factor_idx, fig, abs)
fig.savefig(out, format="svg")
if __name__ == '__main__':
set_style()
plot_gsea_heatmap_()
|
|
# pre/_shiftscale.py
"""Tools for preprocessing data."""
__all__ = [
"shift",
"scale",
]
import numpy as np
# Shifting and MinMax scaling =================================================
def shift(X, shift_by=None):
"""Shift the columns of X by a vector.
Parameters
----------
X : (n,k) ndarray
A matrix of k snapshots. Each column is a single snapshot.
shift_by : (n,) or (n,1) ndarray
A vector that is the same size as a single snapshot. If None,
set to the mean of the columns of X.
Returns
-------
Xshifted : (n,k) ndarray
The matrix such that Xshifted[:,j] = X[:,j] - shift_by for j=0,...,k-1.
xbar : (n,) ndarray
The shift factor. Since this is a one-dimensional array, it must be
reshaped to be applied to a matrix: Xshifted + xbar.reshape((-1,1)).
Only returned if shift_by=None.
For shift_by=None, only Xshifted is returned.
Examples
--------
# Shift X by its mean, then shift Y by the same mean.
>>> Xshifted, xbar = pre.shift(X)
>>> Yshifted = pre.shift(Y, xbar)
# Shift X by its mean, then undo the transformation by an inverse shift.
>>> Xshifted, xbar = pre.shift(X)
>>> X_again = pre.shift(Xshifted, -xbar)
"""
# Check dimensions.
if X.ndim != 2:
raise ValueError("data X must be two-dimensional")
# If not shift_by factor is provided, compute the mean column.
learning = (shift_by is None)
if learning:
shift_by = np.mean(X, axis=1)
elif shift_by.ndim != 1:
raise ValueError("shift_by must be one-dimensional")
# Shift the columns by the mean.
Xshifted = X - shift_by.reshape((-1,1))
return (Xshifted, shift_by) if learning else Xshifted
def scale(X, scale_to, scale_from=None):
"""Scale the entries of the snapshot matrix X from the interval
[scale_from[0], scale_from[1]] to [scale_to[0], scale_to[1]].
Scaling algorithm follows sklearn.preprocessing.MinMaxScaler.
Parameters
----------
X : (n,k) ndarray
A matrix of k snapshots to be scaled. Each column is a single snapshot.
scale_to : (2,) tuple
The desired minimum and maximum of the scaled data.
scale_from : (2,) tuple
The minimum and maximum of the snapshot data. If None, learn the
scaling from X: scale_from[0] = min(X); scale_from[1] = max(X).
Returns
-------
Xscaled : (n,k) ndarray
The scaled snapshot matrix.
scaled_to : (2,) tuple
The bounds that the snapshot matrix was scaled to, i.e.,
scaled_to[0] = min(Xscaled); scaled_to[1] = max(Xscaled).
Only returned if scale_from = None.
scaled_from : (2,) tuple
The minimum and maximum of the snapshot data, i.e., the bounds that
the data was scaled from. Only returned if scale_from = None.
For scale_from=None, only Xscaled is returned.
Examples
--------
# Scale X to [-1,1] and then scale Y with the same transformation.
>>> Xscaled, scaled_to, scaled_from = pre.scale(X, (-1,1))
>>> Yscaled = pre.scale(Y, scaled_to, scaled_from)
# Scale X to [0,1], then undo the transformation by an inverse scaling.
>>> Xscaled, scaled_to, scaled_from = pre.scale(X, (0,1))
>>> X_again = pre.scale(Xscaled, scaled_from, scaled_to)
"""
# If no scale_from bounds are provided, learn them.
learning = (scale_from is None)
if learning:
scale_from = np.min(X), np.max(X)
means = np.mean(X)
# Check scales.
if len(scale_to) != 2:
raise ValueError("scale_to must have exactly 2 elements")
if len(scale_from) != 2:
raise ValueError("scale_from must have exactly 2 elements")
# Do the scaling.
mini, maxi = scale_to
xmin, xmax = scale_from
scl = (maxi - mini)/(xmax - xmin)
Xscaled = X*scl + (mini - xmin*scl)
return (Xscaled, scale_to, scale_from) if learning else Xscaled
# Deprecations ================================================================
def mean_shift(X): # pragma nocover
np.warnings.warn("mean_shift() has been renamed shift()",
DeprecationWarning, stacklevel=1)
a,b = shift(X)
return b,a
mean_shift.__doc__ = "\nDEPRECATED! use shift().\n\n" + shift.__doc__
|
|
# https://cran.r-project.org/web/packages/PerformanceAnalytics/vignettes/portfolio_returns.pdf
import pandas as pd
import numpy as np
import warnings
# https://stackoverflow.com/questions/16004076/python-importing-a-module-that-imports-a-module
from . import functions as pa
class Portfolio(object):
"""
"""
def __init__(self, prices, weights=None, V0=100, max_leverage=1, method="simple", benchmark_rate=0):
"""
:param prices: pd.DataFrame with price time series in columns
:param weights: None or pd.DataFrame with weights. if None, then buy-and-hold equally weighted
:param V0: initial portfolio value
:param max_leverage: float, the maximum investment. if sum(w) > max_leverage, then rebase to max_leverage.
if sum(w) < max_leverage, then create residual weight with zero returns.
if None, do not adjust weights.
:param method: simple or log returns
:param benchmark_rate: annualized benchmark rate, defaults to 0
"""
assert isinstance(prices, pd.DataFrame)
self.method = method
self.prices = prices.copy()
self.max_leverage = max_leverage
if weights is None:
self.weights = self.set_bh_ew_weights()
else:
self.weights = self.check_weights(weights.copy())
self.V0 = V0
self.ptf_ret, self.ptf_ts = self.portfolio_returns()
self.benchmark_rate = benchmark_rate
def __repr__(self):
return f"Portfolio() with {len(self.weights.columns)} assets"
def __str__(self):
# usage: print(object)
ann_ret = pa.compute_cagr(self.ptf_ts)
ann_std = pa.compute_annualized_volatility(self.ptf_ret)
SR = pa.compute_sharpe_ratio(self.ptf_ret, self.benchmark_rate)
DD = pa.compute_max_drawdown(self.ptf_ts, self.method)
VaR = pa.compute_historical_var(self.ptf_ret, conf_lvl=.95)
return f"""
{'-' * 45}
* Portfolio with {len(self.weights.columns)} assets
* Analysed period: {min(self.prices.index).strftime('%Y-%m-%d')} - {max(self.prices.index).strftime('%Y-%m-%d')}
* Number of rebalancing events: {len(self.weights)}
* Annualized Return: {ann_ret:.2%}
* Annualized Standard Deviation: {ann_std:.2%}
* Sharpe Ratio: {SR:.2%}
* Max Drawdown: {DD:.2%}
* Historical VaR 95%: {VaR:.2%}
{'-' * 45}
"""
def set_bh_ew_weights(self):
"""
returns a pd.DataFrame with weights of buy-and-hold equally weighted ptf
"""
N = self.prices.shape[1]
weights = pd.DataFrame([np.repeat(1 / N, N)], index=[self.prices.index[0]], columns=self.prices.columns)
return weights
def check_weights(self, weights):
if isinstance(weights, (list, np.ndarray)):
# assume buy & hold portfolio with specified weights
weights = pd.DataFrame([weights], index=[self.prices.index[0]], columns=self.prices.columns)
if self.max_leverage is not None:
tol = 1e-06
if any(weights.sum(axis=1) > self.max_leverage + tol):
warnings.warn("\nsum of weights exceed max_leverage value of {} in dates {}:\nrebasing to {}".format(
self.max_leverage, weights[weights.sum(axis=1) > self.max_leverage].index.values, self.max_leverage))
# ribasa i pesi per le date in cui superano max_leverage + tolleranza
weights[weights.sum(axis=1) > self.max_leverage] = \
weights[weights.sum(axis=1) > self.max_leverage].apply(
lambda x: x / sum(x) * self.max_leverage, axis=1
)
if not all(np.isclose(weights.sum(axis=1), 1, rtol=1e-06)):
warnings.warn(
"\none or more rebalancing dates have weights not summing up to 100%:\n" +
"adding a residual weight to compensate")
weights["residual"] = 1 - weights.sum(axis=1)
return weights
def get_components_value_single_period(self, ret, v_init):
"""
compute components values over time, in a single rebalancing window, given returns and initial values
:param ret: pd.DataFrame, with .index dates and containing components returns over time
:param v_init: initial components values
:return:
"""
if isinstance(v_init, pd.Series):
v_init = [v_init.values.tolist()]
elif isinstance(v_init, pd.DataFrame):
v_init = v_init.values.tolist()
else:
raise ValueError("v_init should be either pd.Series or pd.DataFrame")
components_value = pd.DataFrame(v_init * len(ret), index=ret.index, columns=ret.columns)
if self.method == "simple":
components_value = components_value * ret.apply(lambda x: np.cumprod(1 + x), axis=0)
elif self.method == "log":
components_value = components_value * ret.apply(lambda x: np.cumsum(x), axis=0)
else:
raise ValueError("method should be either simple or log")
return components_value
def portfolio_returns(self, weights=None, V0=None, max_leverage=None, verbose=False):
"""
:param weights: if None use self.weights,
otherwise use given input and update self.weights, self.ptf_ret, self.ptf_ts
:param V0: if None use self.V0, else float, initial portfolio value, overrides self.V0
:param max_leverage: if None use self.max_leverage, else use max_leverage.
it is not possible to change max_leverage from a number to None, in that case you need to reinitiate the class
:param verbose: if True, returns components contributions to portfolio returns
:return: portfolio returns. if verbose=True, return tuple with ptf rets, contribs
"""
# update inputs
if V0 is None:
V0 = self.V0
else:
self.V0 = V0
if max_leverage is None:
max_leverage = self.max_leverage
else:
self.max_leverage = max_leverage
# compute stocks returns
returns = pa.compute_returns(self.prices, method=self.method)
if weights is None:
# potrebbe esserci stato l'update di max_leverage
weights = self.check_weights(self.weights)
else:
weights = self.check_weights(weights)
assert isinstance(weights, pd.DataFrame)
self.weights = weights
if "residual" in weights.columns:
returns["residual"] = 0
# subset returns to match weights.columns
returns = returns[weights.columns.tolist()]
# subset weights to be inside returns dates
idx = [ind for ind in weights.index if ind in returns.index[:-1]]
if idx != weights.index.to_list():
warnings.warn("Some rebalancing dates don't match prices dates. Non matching dates will not be considered.")
weights = weights.loc[idx]
V_bop = list()
V = list()
n_iter = len(weights.index)
for t in range(n_iter):
if t == 0: # first rebalancing date,
# get the values of each component at first rebalancing date
v_bop = V0 * weights.iloc[t]
else:
# not the first rebal date, set v_init equal to last available V
v_bop = V[-1].tail(1).sum(axis=1).values * weights.iloc[t]
V_bop.append(v_bop.to_frame().transpose())
# subset returns
if t != n_iter - 1:
tmp_ret = returns.loc[weights.index[t]:weights.index[t + 1]]
else:
# se è l'ultima iterazione prendi i ritorni fino all'ultima data disponibile
tmp_ret = returns.loc[weights.index[t]:]
# notice that subsetting by index includes both extremes!
# we need to remove the first return, since rebalancing happens from the day after
# the actual index indicated in the weights input
tmp_ret = tmp_ret.drop(index=weights.index[t])
# metti i ritorni mancanti a zero, altrimenti ci sono "buchi" nel ptf.
# esempio: se V0 = 100 e w1 = 10%, ma la stock 1 non ha ritorni in quel periodo, il ptf in t0 sommerà a 90 e
# non a 100
tmp_ret = tmp_ret.fillna(0)
# cumulate returns components inside this interval, i.e. in
# (index[t] + 1, index[t+1]]
tmp_value = self.get_components_value_single_period(tmp_ret, v_bop)
# append values both to V_bop and to V
# to V_bop we attach not the last value, since the last bop will
# be replaced by the new v_bop
V_bop.append(tmp_value.iloc[:-1])
V.append(tmp_value)
# concat results to get the full components values over time
# we attach to V the first element
# corresponding to the first V_bop,
# notice that this is a bit fictitious, since
# the eop of the very first rebalancing day is not known,
# we only know the bop of the day after the rebalancing day
V.insert(0, V_bop[0])
V = pd.concat(V)
# here we need to attach an even more fictitious term,
# the bop of the first rebalancing day,
# this is done only for index compatibility with V, it does not matter
V_bop.insert(0, V_bop[0])
V_bop = pd.concat(V_bop)
# assign index to values, index starts at the first date of rebalancing
V.index = returns.loc[weights.index[0]:].index
V_bop.index = returns.loc[weights.index[0]:].index
# portfolio timeseries
ptf = V.sum(axis=1)
# portfolio returns
ptf_ret = pa.compute_returns(ptf, method=self.method)
self.ptf_ret = ptf_ret
self.ptf_ts = ptf
if verbose:
# compute components' contributions in each day via
# contrib_i = V_i - Vbop_i / sum(Vbop)
contrib = V.add(-V_bop).divide(V_bop.sum(axis=1), axis=0)
# check if sum di contrib = ptf_ret
# np.sum(np.abs(contrib.apply(sum, axis=1).subtract(ptf_ret)))
# calcola il turnover
turnover = V_bop.shift(-1).subtract(V)
turnover = turnover.loc[weights.index]
# old approach: divided by EOP ptf value
# turnover = turnover.apply(lambda x: np.sum(np.abs(x)), axis=1).divide(ptf.loc[weights.index])
# new approach: divided by average ptf value
# denominator: average portfolio value btw rebalancing dates
avg_ptf = self.get_avg_ptf_value(ptf, weights)
turnover = turnover.apply(lambda x: np.sum(np.abs(x)), axis=1).divide(avg_ptf)
# secondo la definizione di cui sopra, il massimo turnover è 2. se modifichiamo l'allocazione dell'intero
# ptf vogliamo turnover=1
# turnover = turnover / 2
return ptf_ret, ptf, contrib, turnover, V, V_bop
return ptf_ret, ptf
def get_eop_weights(self, weights=None):
"""
:param weights:
:return:
"""
_, ts, _, _, V_eop, _ = self.portfolio_returns(weights=weights, verbose=True)
# compute end of period weights dividing the end of period value of each stock by the eop ptf value
w_eop = V_eop.divide(ts, axis=0)
# seleziona i pesi end of period ad ogni chiusura prima del ribilanciamento
dates = [*self.weights, w_eop.index[-1]]
# drop first date: è la prima data di rebalancing
del dates[0]
w_eop = w_eop.loc[dates]
return w_eop
def get_last_eop_weights(self, weights=None):
"""
:param weights:
:return:
"""
w_eop = self.get_eop_weights(weights=weights)
# get last w_eop: il peso delle stock all'ultima data di prices
# https://www.shanelynn.ie/select-pandas-dataframe-rows-and-columns-using-iloc-loc-and-ix/
last_w_eop = w_eop.iloc[[-1]]
return last_w_eop
def get_avg_ptf_value(self, ptf, weights):
"""
Computes average portfolio value between each rebalancing date.
Output used for computing portfolio turnover (denominator)
:param ptf: pd.Series containing ptf values
:param weights: pd.DataFrame with index = rebalancing dates
:return:
"""
rebald = weights.index
avg_ptf = list()
for i in range(len(rebald)):
if i == 0:
avg_ptf.append(pd.Series(self.V0, index=[rebald[i]]))
else:
tmp = ptf.loc[(ptf.index > rebald[i - 1]) & (ptf.index <= rebald[i])]
avg_ptf.append(pd.Series(np.mean(tmp), index=[rebald[i]]))
avg_ptf = pd.concat(avg_ptf, axis=0)
return avg_ptf
# #
# # prices = {
# # "A": [10, 11, 13, 12, 12, 13, 15],
# # "B": [20, 21, 19, 18, 19, 17, 15],
# # "C": [20, 20, 21, 22, 23, 25, 24]
# # }
# # prices = pd.DataFrame(prices)
# # prices.index = [dt.date.today() - dt.timedelta(days=x) for x in range(prices.shape[0])][::-1]
# #
# # weights = {
# # "A": [.5, .4],
# # "B": [.3, .3],
# # "C": [.2, .3]
# # }
# # weights = pd.DataFrame(weights)
# # weights.index = prices.index[0::4]
# #
# prova = Portfolio(prices=prices, weights=weights)
# prova.ptf_ts
#
# returns = pa.compute_returns(prices)
# V0 = 100
# max_leverage = 1
#
# V_bop = list()
# V = list()
# n_iter = len(weights.index)
#
# for t in range(n_iter):
# if t == 0: # first rebalancing date,
# # get the values of each component at first rebalancing date
# v_bop = V0 * weights.iloc[t]
# else:
# # not the first rebal date, set v_init equal to last available V
# v_bop = V[-1].tail(1).sum(axis=1).values * weights.iloc[t]
#
# V_bop.append(v_bop.to_frame().transpose())
#
# # subset returns
# if t != n_iter - 1:
# tmp_ret = returns.loc[weights.index[t]:weights.index[t + 1]]
# else:
# # se è l'ultima iterazione prendi i ritorni fino all'ultima data disponibile
# tmp_ret = returns.loc[weights.index[t]:]
#
# # notice that subsetting by index includes both extremes!
# # we need to remove the first return, since rebalancing happens from the day after
# # the actual index indicated in the weights input
# tmp_ret = tmp_ret.drop(index=weights.index[t])
# # cumulate returns components inside this interval, i.e. in
# # (index[t] + 1, index[t+1]]
# tmp_value = prova.get_components_value_single_period(tmp_ret, v_bop)
# # append values both to V_bop and to V
# # to V_bop we attach not the last value, since the last bop will
# # be replaced by the new v_bop
# V_bop.append(tmp_value.iloc[:-1])
# V.append(tmp_value)
#
# # concat results to get the full components values over time
#
# # we attach to V the first element
# # corresponding to the first V_bop,
# # notice that this is a bit fictitious, since
# # the eop of the very first rebalancing day is not known,
# # we only know the bop of the day after the rebalancing day
# V.insert(0, V_bop[0])
# V = pd.concat(V)
# # here we need to attach an even more fictitious term,
# # the bop of the first rebalancing day,
# # this is done only for index compatibility with V, it does not matter
# V_bop.insert(0, V_bop[0])
# V_bop = pd.concat(V_bop)
# # assign index to values, index starts at the first date of rebalancing
# V.index = returns.loc[weights.index[0]:].index
# V_bop.index = returns.loc[weights.index[0]:].index
#
# # portfolio timeseries
# ptf = V.sum(axis=1)
# # portfolio returns
# ptf_ret = pa.compute_returns(ptf)
#
#
# # calcola il turnover
# turnover = V_bop.shift(-1).subtract(V)
# turnover = turnover.loc[weights.index]
# turnover = turnover.apply(lambda x: np.sum(np.abs(x)), axis=1).divide(ptf.loc[weights.index])
# # secondo la definizione di cui sopra, il massimo turnover è 2. se modifichiamo l'allocazione dell'intero
# # ptf vogliamo turnover=1
# turnover = turnover / 2
#
# # compute components' contributions in each day via
# # contrib_i = V_i - Vbop_i / sum(Vbop)
# contrib = V.add(-V_bop).divide(V_bop.sum(axis=1), axis=0)
# # check if sum di contrib = ptf_ret
# # np.sum(np.abs(contrib.apply(sum, axis=1).subtract(ptf_ret)))
#
#
|
|
import h5py
import numpy as np
import cv2
def read_new(archive_dir):
with h5py.File(archive_dir, "r", chunks=True, compression="gzip") as hf:
"""
Load our X data the usual way,
using a memmap for our x data because it may be too large to hold in RAM,
and loading Y as normal since this is far less likely
-using a memmap for Y when it is very unnecessary would likely impact performance significantly.
"""
x_shape = list(hf.get("x_shape"))
x_shape[0] = x_shape[0]-27
x_shape = tuple(x_shape)
print x_shape
x = np.memmap("x.dat", dtype="float32", mode="r+", shape=x_shape)
memmap_step = 1000
hf_x = hf.get("x")
for i in range(27, x_shape[0]+27, memmap_step):
x[i-27:i-27+memmap_step] = hf_x[i:i+memmap_step]
print i
y = np.ones((x_shape[0]))
return x, y
def write_new(archive_dir, x, y):
with h5py.File(archive_dir, "w", chunks=True, compression="gzip") as hf:
hf.create_dataset("x", data=x)
hf.create_dataset("x_shape", data=x.shape)
hf.create_dataset("y", data=y)
hf.create_dataset("y_shape", data=y.shape)
"""
Just gets rid of the negatives by only reading the positives, then writing them to replace the existing archive
"""
archive_dir="positive_augmented_samples.h5"
x,y = read_new(archive_dir)
write_new(archive_dir, x, y)
|
|
from ray import tune
import numpy as np
import pdb
from softlearning.misc.utils import get_git_rev, deep_update
M = 256
REPARAMETERIZE = True
NUM_COUPLING_LAYERS = 2
GAUSSIAN_POLICY_PARAMS_BASE = {
'type': 'GaussianPolicy',
'kwargs': {
'hidden_layer_sizes': (M, M),
'squash': True,
}
}
GAUSSIAN_POLICY_PARAMS_FOR_DOMAIN = {}
POLICY_PARAMS_BASE = {
'GaussianPolicy': GAUSSIAN_POLICY_PARAMS_BASE,
}
POLICY_PARAMS_BASE.update({
'gaussian': POLICY_PARAMS_BASE['GaussianPolicy'],
})
POLICY_PARAMS_FOR_DOMAIN = {
'GaussianPolicy': GAUSSIAN_POLICY_PARAMS_FOR_DOMAIN,
}
POLICY_PARAMS_FOR_DOMAIN.update({
'gaussian': POLICY_PARAMS_FOR_DOMAIN['GaussianPolicy'],
})
DEFAULT_MAX_PATH_LENGTH = 1000
MAX_PATH_LENGTH_PER_DOMAIN = {
'Point2DEnv': 50,
'Pendulum': 200,
}
ALGORITHM_PARAMS_ADDITIONAL = {
'MBPO': {
'type': 'MBPO',
'kwargs': {
'reparameterize': REPARAMETERIZE,
'lr': 3e-4,
'target_update_interval': 1,
'tau': 5e-3,
'store_extra_policy_info': False,
'action_prior': 'uniform',
'n_initial_exploration_steps': int(5000),
}
},
'SQL': {
'type': 'SQL',
'kwargs': {
'policy_lr': 3e-4,
'target_update_interval': 1,
'n_initial_exploration_steps': int(1e3),
'reward_scale': tune.sample_from(lambda spec: (
{
'Swimmer': 30,
'Hopper': 30,
'HalfCheetah': 30,
'Walker2d': 10,
'Ant': 300,
'Humanoid': 100,
'Pendulum': 1,
}.get(
spec.get('config', spec)
['environment_params']
['training']
['domain'],
1.0
),
)),
}
},
'MVE': {
'type': 'MVE',
'kwargs': {
'reparameterize': REPARAMETERIZE,
'lr': 3e-4,
'target_update_interval': 1,
'tau': 5e-3,
'target_entropy': 'auto',
'store_extra_policy_info': False,
'action_prior': 'uniform',
'n_initial_exploration_steps': int(5000),
}
},
}
DEFAULT_NUM_EPOCHS = 200
NUM_EPOCHS_PER_DOMAIN = {
'Hopper': int(1e3),
'HalfCheetah': int(3e3),
'Walker2d': int(3e3),
'Ant': int(3e3),
'Humanoid': int(1e4),
'Pendulum': 10,
}
ALGORITHM_PARAMS_PER_DOMAIN = {
**{
domain: {
'kwargs': {
'n_epochs': NUM_EPOCHS_PER_DOMAIN.get(
domain, DEFAULT_NUM_EPOCHS),
'n_initial_exploration_steps': (
MAX_PATH_LENGTH_PER_DOMAIN.get(
domain, DEFAULT_MAX_PATH_LENGTH
) * 10),
}
} for domain in NUM_EPOCHS_PER_DOMAIN
}
}
ENVIRONMENT_PARAMS = {
}
NUM_CHECKPOINTS = 10
def get_variant_spec_base(universe, domain, task, policy, algorithm, env_params):
algorithm_params = deep_update(
ALGORITHM_PARAMS_PER_DOMAIN.get(domain, {}),
ALGORITHM_PARAMS_ADDITIONAL.get(algorithm, {})
)
algorithm_params = deep_update(
algorithm_params,
env_params
)
variant_spec = {
'git_sha': get_git_rev(),
'environment_params': {
'training': {
'domain': domain,
'task': task,
'universe': universe,
'kwargs': (
ENVIRONMENT_PARAMS.get(domain, {}).get(task, {})),
},
'evaluation': tune.sample_from(lambda spec: (
spec.get('config', spec)
['environment_params']
['training']
)),
},
'policy_params': deep_update(
POLICY_PARAMS_BASE[policy],
POLICY_PARAMS_FOR_DOMAIN[policy].get(domain, {})
),
'Q_params': {
'type': 'double_feedforward_Q_function',
'kwargs': {
'hidden_layer_sizes': (M, M),
}
},
'algorithm_params': algorithm_params,
'replay_pool_params': {
'type': 'SimpleReplayPool',
'kwargs': {
'max_size': tune.sample_from(lambda spec: (
{
'SimpleReplayPool': int(1e6),
'TrajectoryReplayPool': int(1e4),
}.get(
spec.get('config', spec)
['replay_pool_params']
['type'],
int(1e6))
)),
}
},
'sampler_params': {
'type': 'SimpleSampler',
'kwargs': {
'max_path_length': MAX_PATH_LENGTH_PER_DOMAIN.get(
domain, DEFAULT_MAX_PATH_LENGTH),
'min_pool_size': MAX_PATH_LENGTH_PER_DOMAIN.get(
domain, DEFAULT_MAX_PATH_LENGTH),
'batch_size': 256,
}
},
'run_params': {
# 'seed':2106,
'seed': tune.sample_from(
lambda spec: np.random.randint(0, 10000)),
'checkpoint_at_end': True,
'checkpoint_frequency': NUM_EPOCHS_PER_DOMAIN.get(
domain, DEFAULT_NUM_EPOCHS) // NUM_CHECKPOINTS,
'checkpoint_replay_pool': False,
},
}
return variant_spec
def get_variant_spec(args, env_params):
universe, domain, task = env_params.universe, env_params.domain, env_params.task
variant_spec = get_variant_spec_base(
universe, domain, task, args.policy, env_params.type, env_params)
if args.checkpoint_replay_pool is not None:
variant_spec['run_params']['checkpoint_replay_pool'] = (
args.checkpoint_replay_pool)
return variant_spec
|
|
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla.functions as F
from function_benchmark import FunctionBenchmark, Inspec
class Case:
def __init__(self, shape, axis, rtol=1e-6):
# rtol (relative tolerance) 1e-6 is default for assert_allclose
self.shape = shape
self.axis = axis
self.rtol = rtol
# Print this message by pytest when a test fails.
def __repr__(self):
return 'Case(shape=' + str(self.shape) + \
' axes=' + str(self.axis) + \
', rtol=' + str(self.rtol) + ')'
test_cases = [
# --------------------------------
# Common use case
# --------------------------------
# Axis 0
Case((512, 512), 0),
Case((512, 1024), 0),
Case((512, 2048), 0),
Case((1024, 512), 0),
Case((1024, 1024), 0),
Case((1024, 2048), 0),
Case((2048, 512), 0),
Case((2048, 1024), 0),
Case((2048, 2048), 0),
# Axis 1
Case((512, 512), 1),
Case((512, 1024), 1),
Case((512, 2048), 1),
Case((1024, 512), 1),
Case((1024, 1024), 1),
Case((1024, 2048), 1),
Case((2048, 512), 1),
Case((2048, 1024), 1),
Case((2048, 2048), 1),
# --------------------------------
# Large cases
# --------------------------------
Case((1024*1024, 32), 1),
Case((32, 1024*1024), 0),
Case((2048, 2048), 1),
Case((2048, 2048), 0),
Case((2024*2024, 2), 0),
Case((2, 2024*2024), 1),
# Weak cases
# PyTorch uses Cub library in these cases.
Case((2024*2024, 1), 0),
Case((1, 2024*2024), 1),
]
def create_cumprod_input(rng, shape, axis, with_mask):
x = (rng.randn(*shape)).astype(np.float32)
if with_mask:
# Make zero elements with the probability of `1 / x_shape[axis]`.
# It is the probability of existence of one zero element in each scan axis.
mask = rng.rand(*shape) > (1.0 / shape[axis])
x = x * mask
return x
@pytest.mark.parametrize("seed", [123])
@pytest.mark.parametrize("test_case", test_cases)
@pytest.mark.parametrize('exclusive', [False, True])
@pytest.mark.parametrize('reverse', [False, True])
@pytest.mark.parametrize("with_mask", [True, False])
def test_cumprod(seed, test_case, exclusive, reverse, with_mask, nnabla_opts):
x_shape = test_case.shape
axis = test_case.axis
def init(shape):
rng = np.random.RandomState(seed)
return create_cumprod_input(rng, shape, axis, with_mask)
need_grad = True
inputs = [Inspec(x_shape, init, need_grad)]
func_kwargs = dict(
axis=axis,
exclusive=exclusive,
reverse=reverse,
)
fb = FunctionBenchmark(
F.cumprod, inputs, [], func_kwargs,
nnabla_opts.ext, nnabla_opts.ext_kwargs)
fb.benchmark()
fb.write(writer=nnabla_opts.function_benchmark_writer)
|
|
#!/usr/bin/env python3
"""Generate graph of fictional/mythical classes from Wikidata JSON dump"""
import sys
import json
import networkx as nx
from wd_constants import lang_order
roots = ('Q18706315', 'Q14897293', 'Q17442446')
subclass = 'P279'
def get_label(obj):
"""get appropriate label, using language fallback chain"""
has_sitelinks = 'sitelinks' in obj
for lang in lang_order:
site = lang + 'wiki'
if has_sitelinks and site in obj['sitelinks']:
return obj['sitelinks'][site]['title']
elif lang in obj['labels']:
return obj['labels'][lang]['value']
return None
def get_item_rels(subj_id, rel, claims):
class_inst_stmts = []
if rel in claims:
for spec in claims[rel]:
if 'id' in spec['mainsnak'].get('datavalue', {}).get('value', {}):
obj_id = spec['mainsnak']['datavalue']['value']['id']
class_inst_stmts.append((subj_id, rel, obj_id))
return class_inst_stmts
def process_dump(dump_path):
statements = []
labels = {}
with open(dump_path) as infile:
infile.readline()
for line in infile:
try:
obj = json.loads(line.rstrip(',\n'))
qid = obj['id']
if qid not in labels:
obj_label = get_label(obj)
if obj_label is not None:
labels[qid] = obj_label
if obj['type'] == 'item':
statements += get_item_rels(qid, subclass, obj['claims'])
except Exception as e:
print('Exception on', qid, '-', e)
return statements, labels
def graph_from_statements(statements):
g = nx.DiGraph()
for line in statements:
try:
source = line[0]
destination = line[2]
g.add_edge(source, destination)
except Exception:
print("error on line:", line)
return g
def get_all_ancestors(graph, node):
parents = list(graph.predecessors(node))
for p in parents:
parents += get_all_ancestors(graph, p)
return parents
if __name__ == "__main__":
if len(sys.argv) > 1:
dump_path = sys.argv[1]
else:
dump_path = 'latest-all.json'
statements, labels = process_dump(dump_path)
print("Dump processed! Making graph...")
graph = graph_from_statements(statements)
print("Graph created with", graph.number_of_nodes(), "nodes and",
graph.number_of_edges(), "edges.")
to_filter = list(roots)
for root in roots:
to_filter += get_all_ancestors(graph, root)
filter_set = set(to_filter)
print(len(filter_set), "items in the new filter.")
filter_dict = {item: labels.get(item, item) for item in filter_set}
with open("filter.json", 'w') as filterfile:
filterfile.write(json.dumps(filter_dict, indent=4, sort_keys=True))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ### Libraries
import warnings
import numpy as np
import pandas as pd
import statsmodels.api as sm
from scipy import stats
from matplotlib import cm, pyplot as plt
from matplotlib.dates import YearLocator, MonthLocator
from hmmlearn.hmm import GaussianHMM
import scipy
import datetime
import json
import seaborn as sns
import joblib
import pathlib
from plotting import plot_in_sample_hidden_states
from plotting import plot_hidden_states
from plotting import hist_plot
sns.set()
warnings.filterwarnings("ignore")
def obtain_prices_df(csv_filepath, start_date, end_date):
"""
Obtain the prices DataFrame from the CSV file,
filter by start date and end date.
"""
df = pd.read_csv(
csv_filepath, header=0,
names=["date", "open", "close", "high", "low", "volume"],
index_col="date", parse_dates=True)
df = df[start_date.strftime("%Y-%m-%d"):end_date.strftime("%Y-%m-%d")]
df.dropna(inplace=True)
return df
def model_selection(X, max_states, max_iter=10000):
"""
:param X: Feature matrix
:param max_states: the max number of hidden states
:param max_iter: the max numbers of model iterations
:return: aic bic caic best_state
"""
# to store Akaike information criterion (AIC)value
aic_vect = np.empty([0, 1])
# to store Bayesian information criterion (BIC) value
bic_vect = np.empty([0, 1])
# to store the Bozdogan Consistent Akaike Information Criterion (CAIC)
caic_vect = np.empty([0, 1])
for state in range(2, max_states + 1):
num_params = state**2 + 2 * state - 1
hmm_model = GaussianHMM(n_components=state, random_state=100,
covariance_type="full", n_iter=max_iter).fit(X)
aic_vect = np.vstack((aic_vect, -2 * hmm_model.score(X) + 2 * num_params))
bic_vect = np.vstack((bic_vect, -2 * hmm_model.score(X) + num_params * np.log(X.shape[0])))
caic_vect = np.vstack((caic_vect, -2 * hmm_model.score(X) +
num_params * (np.log(X.shape[0]) + 1)))
best_state = np.argmin(bic_vect) + 2
return aic_vect, bic_vect, caic_vect, best_state
def get_best_hmm_model(X, best_state, max_iter=10000):
"""
:param X: stock data
:param max_states: the number of hidden states
:param max_iter: numbers of model iterations
:return: the optimal HMM
"""
best_model = GaussianHMM(n_components=best_state, random_state=100,
covariance_type="full", n_iter=max_iter).fit(X)
return best_model
def get_expected_return(hmm_model, train_set, train_features):
hidden_states = model.predict(train_features)
ave_return = np.zeros(model.n_components)
for i in range(0, model.n_components):
mask = hidden_states == i
ave_return[i] = train_set['return'][mask].mean()
# print("ave_return:", ave_return)
#获取转移概率
prob = model.transmat_[hidden_states[-1]]
# print(prob)
#获取期望收益
expected_return = sum(prob * ave_return)
return hidden_states, expected_return
def compare_hidden_states(hmm_model, cols_features, conf_interval, iters=1000):
# plt.figure(figsize=(15, 15))
fig, axs = plt.subplots(len(cols_features), hmm_model.n_components, figsize=(15, 15))
colours = cm.prism(np.linspace(0, 1, hmm_model.n_components))
for i in range(0, hmm_model.n_components):
mc_df = pd.DataFrame()
# Samples generation
for j in range(0, iters):
row = np.transpose(hmm_model._generate_sample_from_state(i))
mc_df = mc_df.append(pd.DataFrame(row).T)
mc_df.columns = cols_features
for k in range(0, len(mc_df.columns)):
axs[k][i].hist(mc_df[cols_features[k]], color=colours[i])
axs[k][i].set_title(cols_features[k] + " (state " + str(i) + "): \
" + str(np.round(mean_confidence_interval(mc_df[cols_features[k]], conf_interval), 3)))
axs[k][i].grid(True)
plt.tight_layout()
def compute_features(dataset, long_period, short_period):
# 计算日收益率
dataset['return'] = dataset["close"].pct_change()
# 计算长周期平均收益率
dataset['long_period_return'] = dataset['return'].rolling(long_period).mean()
# 计算短周期平均收益率
dataset['short_period_return'] = dataset['return'].rolling(short_period).mean()
# 计算短期平均成交量和长期平均成交量之比
dataset['volume_ratio'] = dataset["volume"].rolling(
short_period).mean() / dataset["volume"].rolling(long_period).mean()
# 计算长周期内夏普比率,取无风险利率为0
dataset['Sharpe'] = dataset['return'].rolling(long_period).mean(
) / dataset['return'].rolling(long_period).std() # *np.sqrt(252)
# 计算指数加权平均回报
# spanNum = 5
# dataset['ewma'] = dataset['return'].ewm(span=spanNum, min_periods=1).mean()
# dataset['ewma_2'] = dataset['return'].ewm(span=spanNum, min_periods=1).std()
# 计算未来一个周期的收益
dataset["future_return"] = dataset["close"].pct_change(future_period).shift(-future_period)
return dataset
pd.options.display.max_rows = 30
pd.options.display.max_columns = 30
PLOT_SHOW = True # 显示绘图结果
# PLOT_SHOW = False # 不显示绘图结果
# load data and plot
df_data_path = pathlib.Path.cwd() / ".." / "data"
start_date = datetime.datetime(2010, 1, 1)
end_date = datetime.datetime(2021, 12, 31)
# Feature params
future_period = 1
long_period = 7 # long period
short_period = 3 # short period
indexList = ['CSI300', 'CSI905', 'CSI012', 'CSI033', 'CSI036', 'CSI037']
# indexList = ['CSI300']
for index in indexList:
# 读取指数从2010年—2021年的历史数据
dataset = obtain_prices_df(df_data_path / (index + '.csv'), start_date, end_date)
dataset = compute_features(dataset, long_period, short_period)
# Create features
cols_features = ['long_period_return', 'short_period_return',
'volume_ratio', 'Sharpe'] #
# cols_features = ['ewma','ewma_2'] #
dataset = dataset.replace([np.inf, -np.inf], np.nan)
dataset = dataset.dropna()
# 选取训练样本,从第1000开始往前每间隔一个adjustment_period取样
adjustment_period = 1
train_end_ind = 1500
train_index = []
for i in range(train_end_ind, 0, -adjustment_period):
train_index.append(i)
train_set = dataset.iloc[train_index]
train_set = train_set.sort_index()
train_features = train_set[cols_features]
# hist plot
hist_plot(train_set['long_period_return'], str(long_period) + '_days_return')
hist_plot(train_set['short_period_return'], str(short_period) + '_days_return')
hist_plot(train_set['volume_ratio'], 'volume_ratio_of' + str(short_period) + str(long_period))
hist_plot(train_set['Sharpe'], str(long_period) + '_days_Sharpe_ratio')
# print("train_set:\n", train_set)
test_index = []
for i in range(train_end_ind + adjustment_period, dataset.shape[0], adjustment_period):
test_index.append(i)
test_set = dataset.iloc[test_index]
test_set = test_set.sort_index()
test_features = test_set[cols_features]
# print("test_set:\n", test_set)
# ### Plot features
# fig, axs = plt.subplots(len(cols_features), 1, figsize=(15, 15))
# colours = cm.rainbow(np.linspace(0, 1, len(cols_features)))
# for i in range(0, len(cols_features)):
# axs[i].plot(dataset.reset_index()[cols_features[i]], color=colours[i])
# axs[i].set_title(cols_features[i], fontsize=20)
# axs[i].grid(True)
# # ------------------------------------------------------------------------------------------
# ### get the best states number
# aic_matrix = np.empty([7, 0])
# bic_matrix = np.empty([7, 0])
# best_states_vector = np.empty([0])
# for i in range(0, 10):
# print(i)
# train_set_i = dataset[cols_features][i * 100:1000 + i * 100]
# aic_vect, bic_vect, caic_vect,best_state = model_selection(X=train_set_i, max_states=8, max_iter=10000)
# aic_matrix = np.hstack((aic_matrix, aic_vect))
# bic_matrix = np.hstack((bic_matrix, bic_vect))
# best_states_vector = np.hstack((best_states_vector, best_state))
# fig, axs = plt.subplots(1, 1, figsize=(15, 15))
# axs.plot(bic_matrix[0], label='2-states', alpha=0.9)
# axs.plot(bic_matrix[1], label='3-states', alpha=0.9)
# axs.plot(bic_matrix[2], label='4-states', alpha=0.9)
# axs.plot(bic_matrix[3], label='5-states', alpha=0.9)
# axs.plot(bic_matrix[4], label='6-states', alpha=0.9)
# axs.plot(bic_matrix[5], label='7-states', alpha=0.9)
# axs.plot(bic_matrix[6], label='8-states', alpha=0.9)
# axs.legend(loc='best')
# plt.grid(linestyle='-.')
# print("best_states_vector", best_states_vector)
# ----------------------------------------------------------------------------------------------------------
model = get_best_hmm_model(train_features, best_state=4, max_iter=10000)
plot_hidden_states(model, train_set, train_features, "close")
# plt.savefig("../figure/hidden_states1.png", dpi=400, bbox_inches='tight')
plot_in_sample_hidden_states(model, train_set, train_features, "close")
# plt.savefig("../figure/hidden_states2.png", dpi=400, bbox_inches='tight')
# print("Best model with {0} states ".format(str(model.n_components)))
# print('Mean matrix:\n', model.means_)
# print('Covariance matrix:\n', model.covars_)
# print('Transition matrix:\n', model.transmat_)
# ### 滚动预测
signal = []
for i in range(train_end_ind, dataset.shape[0] - adjustment_period, adjustment_period):
# print(dataset.iloc[i:, :].index[0].date())
train_end_ind = i
train_index = []
for j in range(train_end_ind, -1, -adjustment_period):
train_index.append(j)
train_set = dataset.iloc[train_index]
train_set = train_set.sort_index()
train_features = train_set[cols_features]
model = get_best_hmm_model(train_features, best_state=4, max_iter=10000)
hidden_states, expected_return = get_expected_return(model, train_set, train_features)
print(dataset.iloc[i:, :].index[0].date(), "current state: {}".format(hidden_states[-1]) +
", expected_return:{:.4f}".format(expected_return))
threshold = train_set['return'].mean()
# print(threshold)
if ((expected_return > 0.0)
& (expected_return > 0.1 * threshold)
): # 期望收益大于0.0且大于历史平均收益的0.1倍,买入
signal.append(1)
elif(expected_return < 0.0): # 期望收益小于0.0卖出
signal.append(-1)
else:
signal.append(0) # 其他状态保持持仓状态不变
test_set["signal"] = signal
test_set.to_csv(df_data_path / ('test' + index + '.csv'))
if PLOT_SHOW:
plt.show()
|
|
from typing import Callable
import numpy as np
def odesolver45(f: Callable, t: float, y: np.ndarray, h: float, *args, **kwargs):
"""
Calculate the next step of an IVP of a time-invariant ODE with a RHS
described by f, with an order 4 approx. and an order 5 approx.
Adapted from here: https://github.com/simentha/gym-auv/blob/master/gym_auv/objects/auv3d.py
:param f: functions RHS
:param t: time (dummy here)
:param y: state vector
:param h: step size (fixed)
:return: 4th and 5th order approximation results
"""
s1 = f(t, y, *args, **kwargs)
s2 = f(t, y + h * s1 / 4.0, *args, **kwargs)
s3 = f(t, y + 3.0 * h * s1 / 32.0 + 9.0 * h * s2 / 32.0, *args, **kwargs)
s4 = f(t, y + 1932.0 * h * s1 / 2197.0 - 7200.0 * h * s2 / 2197.0 + 7296.0 * h * s3 / 2197.0, *args, **kwargs)
s5 = f(t, y + 439.0 * h * s1 / 216.0 - 8.0 * h * s2 + 3680.0 * h * s3 / 513.0 - 845.0 * h * s4 / 4104.0, *args, **kwargs)
s6 = f(t,
y - 8.0 * h * s1 / 27.0 + 2 * h * s2 - 3544.0 * h * s3 / 2565 + 1859.0 * h * s4 / 4104.0 - 11.0 * h * s5 / 40.0,
*args, **kwargs)
w = y + h * (25.0 * s1 / 216.0 + 1408.0 * s3 / 2565.0 + 2197.0 * s4 / 4104.0 - s5 / 5.0)
q = y + h * (16.0 * s1 / 135.0 + 6656.0 * s3 / 12825.0 + 28561.0 * s4 / 56430.0 - 9.0 * s5 / 50.0 + 2.0 * s6 / 55.0)
return w, q
|
|
import tensorflow as tf
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
n = 100
x = np.linspace(-10, 10, n)
y = np.linspace(-10, 10, n)
X, Y = np.meshgrid(x, y)
plt.figure(figsize=(8, 6))
Z = X + Y
plt.subplot(221)
plt.pcolormesh(X, Y, Z, cmap='rainbow')
plt.subplot(222)
plt.contourf(X, Y, Z, 100, cmap='rainbow')
Z = X**2 + Y**2
plt.subplot(223)
plt.contour(X, Y, Z, 20, cmap='rainbow')
plt.subplot(224)
plt.contourf(X, Y, Z, 20, cmap='rainbow')
plt.tight_layout()
plt.show()
|
|
# Copyright 2015 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from SparseArray import SparseArray
from .node import Variable, Function
from .utils import tonparray
from multiprocessing import Pool
import logging
import gc
import os
import gzip
import pickle
import shutil
from time import time
try:
from tqdm import tqdm
except ImportError:
def tqdm(x, **kwargs):
return x
LOGGER = logging.getLogger('EvoDAG')
def fit(X_y_evodag):
X, y, test_set, evodag, tmpdir, init_time = X_y_evodag
if tmpdir is not None:
seed = evodag['seed']
output = os.path.join(tmpdir, '%s.evodag' % seed)
if os.path.isfile(output):
with gzip.open(output) as fpt:
try:
return pickle.load(fpt)
except Exception:
pass
try:
time_limit = evodag['time_limit']
if time_limit is not None:
evodag['time_limit'] = time_limit - (time() - init_time)
if evodag['time_limit'] < 2:
LOGGER.info('Not enough time (seed: %s) ' % evodag['seed'])
return None
except KeyError:
pass
try:
evodag = EvoDAG(**evodag)
evodag.fit(X, y, test_set=test_set)
except RuntimeError:
return None
m = evodag.model
gc.collect()
if tmpdir is not None:
with gzip.open(output, 'w') as fpt:
pickle.dump(m, fpt)
return m
def decision_function(model_X):
k, model, X = model_X
return [k, model.decision_function(X)]
def predict_proba(model_X):
k, model, X = model_X
return [k, model.predict_proba(X)]
class Model(object):
"""Object to store the necesary elements to make predictions
based on an individual"""
def __init__(self, trace, hist, nvar=None, classifier=True, labels=None,
probability_calibration=None, nclasses=None):
self._classifier = classifier
self._trace = trace
self._hist = hist
self._map = {}
for k, v in enumerate(self._trace):
self._map[v] = k
self._hy_test = self._hist[self._trace[-1]].hy_test
self._hist = [self.transform(self._hist[x].tostore()) for x in
self._trace]
self._labels = labels
self._nvar = nvar
self._probability_calibration = probability_calibration
self._nclasses = nclasses
@property
def nclasses(self):
return self._nclasses
@property
def nvar(self):
return self._nvar
@property
def multiple_outputs(self):
return self._hist[0]._multiple_outputs
@property
def classifier(self):
"whether this is classification or regression task"
return self._classifier
@property
def fitness_vs(self):
"Fitness in the validation set"
return self._hist[-1].fitness_vs
@property
def size(self):
return len(self._hist)
@property
def height(self):
return self._hist[-1].height
def inputs(self, counter=None):
from collections import Counter
if counter is None:
counter = Counter()
for node in self._hist:
if node.height == 0:
if isinstance(node._variable, list):
for _ in node._variable:
counter[_] += 1
else:
counter[node._variable] += 1
return counter
def transform(self, v):
if v.height == 0:
return v
if v.nargs == 1:
v.variable = self._map[v.variable]
else:
v.variable = [self._map[x] for x in v.variable]
return v
def predict_proba(self, X, **kwargs):
X = self.decision_function(X, **kwargs)
return self._probability_calibration.predict_proba(X)
def decision_function(self, X, **kwargs):
"Decision function i.e. the raw data of the prediction"
if X is None:
return self._hy_test
X = self.convert_features(X)
if len(X) < self.nvar:
_ = 'Number of variables differ, trained with %s given %s' % (self.nvar, len(X))
raise RuntimeError(_)
hist = self._hist
for node in hist:
if node.height:
node.eval(hist)
else:
node.eval(X)
node.normalize()
r = node.hy
for i in hist[:-1]:
i.hy = None
i.hy_test = None
gc.collect()
return r
def predict(self, X, **kwargs):
hy = self.decision_function(X, **kwargs)
if self._classifier:
[x.finite(inplace=True) for x in hy]
hy = np.array(SparseArray.argmax(hy).full_array(), dtype=np.int)
if self._labels is not None:
hy = self._labels[hy]
else:
hy = tonparray(hy)
return hy
def graphviz(self, fpt, terminals=True):
flag = False
if isinstance(fpt, str):
flag = True
fpt = open(fpt, 'w')
fpt.write("digraph EvoDAG {\n")
last = len(self._hist) - 1
height = self._hist[-1].height
try:
b, m = np.linalg.solve([[0, height-1], [1, 1]], [9, 1])
except np.linalg.linalg.LinAlgError:
b, m = 0, 1
done = {}
for k, n in enumerate(self._hist):
if isinstance(n, Function):
done[k] = 1
name = n.__class__.__name__
if n.height == 0:
cdn = "n{0} [label=\"{1}\" fillcolor=red style=filled];\n"
fpt.write(cdn.format(k, name))
continue
color = int(np.round(n.height * m + b))
extra = "colorscheme=blues9 style=filled color={0}".format(color)
if k == last:
extra = "fillcolor=green style=filled"
fpt.write("n{0} [label=\"{1}\" {2}];\n".format(k,
name,
extra))
vars = n._variable
if not isinstance(vars, list):
vars = [vars]
for j in vars:
if j in done:
fpt.write("n{0} -> n{1};\n".format(k, j))
elif terminals:
cdn = "n{0} [label=\"X{1}\" fillcolor=red style=filled];\n"
fpt.write(cdn.format(k, n._variable))
done[k] = 1
fpt.write("}\n")
if flag:
fpt.close()
@staticmethod
def convert_features(v):
if v is None:
return None
if isinstance(v[0], Variable):
return v
if isinstance(v, np.ndarray):
X = v.T
elif isinstance(v[0], SparseArray):
X = v
else:
X = np.array(v).T
lst = []
for var, d in enumerate(X):
v = Variable(var, 1)
if isinstance(d, SparseArray):
v._eval_tr = d
else:
v._eval_tr = SparseArray.fromlist(d)
lst.append(v)
return lst
@staticmethod
def convert_features_test_set(vars, v):
if isinstance(v, np.ndarray):
X = v.T
else:
X = v
for var, d in zip(vars, X):
if isinstance(d, SparseArray):
var._eval_ts = d
else:
var._eval_ts = SparseArray.fromlist(d)
class Ensemble(object):
"Ensemble that predicts using the average"
def __init__(self, models, n_jobs=1, evodags=None, tmpdir=None):
self._models = models
self._n_jobs = n_jobs
self._evodags = evodags
self._tmpdir = tmpdir
if models is not None:
self._init()
def fit(self, X, y, test_set=None):
evodags = self._evodags
init_time = time()
args = [(X, y, test_set, evodag, self._tmpdir, init_time) for evodag in evodags]
try:
time_limit = evodags[0]['time_limit']
except KeyError:
time_limit = None
if time_limit is not None:
LOGGER.info('time_limit in Ensemble: %0.2f' % time_limit)
if self._n_jobs == 1:
_ = [fit(x) for x in tqdm(args)]
self._models = [x for x in _ if x is not None]
else:
p = Pool(self._n_jobs, maxtasksperchild=1)
self._models = []
for x in tqdm(p.imap_unordered(fit, args),
total=len(args)):
if x is not None:
self._models.append(x)
if time_limit is not None and time() - init_time > time_limit:
p.terminate()
break
p.close()
if self._tmpdir is not None:
shutil.rmtree(self._tmpdir)
self._init()
if time_limit is not None:
LOGGER.info('Used time in Ensemble: %0.2f' % (time() - init_time))
return self
def _init(self):
self._labels = self._models[0]._labels
self._classifier = False
flag = False
if self._models[0]._classifier:
flag = True
self._classifier = flag
@property
def nclasses(self):
return self.models[0].nclasses
@property
def probability_calibration(self):
return self.models[0]._probability_calibration is not None
@property
def models(self):
"List containing the models that compose the ensemble"
return self._models
@property
def multiple_outputs(self):
return self.models[0].multiple_outputs
@property
def classifier(self):
return self._classifier
@property
def fitness_vs(self):
"Median Fitness in the validation set"
l = [x.fitness_vs for x in self.models]
return np.median(l)
@property
def size(self):
l = [x.size for x in self.models]
return np.median(l)
@property
def height(self):
l = [x.height for x in self.models]
return np.median(l)
def inputs(self, counter=None):
from collections import Counter
if counter is None:
counter = Counter()
for m in self.models:
m.inputs(counter=counter)
return counter
def _decision_function_raw(self, X, cpu_cores=1):
if cpu_cores == 1:
r = [m.decision_function(X) for m in self._models]
else:
p = Pool(cpu_cores, maxtasksperchild=1)
args = [(k, m, X) for k, m in enumerate(self._models)]
r = [x for x in tqdm(p.imap_unordered(decision_function,
args),
total=len(args))]
r.sort(key=lambda x: x[0])
r = [x[1] for x in r]
p.close()
return r
def raw_decision_function(self, X):
hy = self._decision_function_raw(X, cpu_cores=self._n_jobs)
if isinstance(hy[0], list):
_ = []
[[_.append(y) for y in x] for x in hy]
hy = _
if self.classifier:
[x.finite(inplace=True) for x in hy]
return np.array([tonparray(x) for x in hy]).T
def _predict_proba_raw(self, X, cpu_cores=1):
if cpu_cores == 1:
r = [m.predict_proba(X) for m in self._models]
else:
p = Pool(cpu_cores, maxtasksperchild=1)
args = [(k, m, X) for k, m in enumerate(self._models)]
r = [x for x in tqdm(p.imap_unordered(predict_proba,
args),
total=len(args))]
r.sort(key=lambda x: x[0])
r = [x[1] for x in r]
p.close()
return r
def predict_proba(self, X):
if self.probability_calibration:
proba = np.array(self._predict_proba_raw(X, cpu_cores=self._n_jobs))
proba = np.mean(proba, axis=0)
proba /= np.sum(proba, axis=1)[:, np.newaxis]
proba[np.isnan(proba)] = 1. / self.nclasses
return proba
hy = self._decision_function_raw(X, cpu_cores=self._n_jobs)
minlength = len(hy[0])
hy = [SparseArray.argmax(x) for x in hy]
hy = np.array([x.full_array() for x in hy], dtype=np.int).T
hy = [np.bincount(x, minlength=minlength) for x in hy]
return np.array([x / float(x.sum()) for x in hy])
def decision_function(self, X, cpu_cores=1):
cpu_cores = max(cpu_cores, self._n_jobs)
r = self._decision_function_raw(X, cpu_cores=cpu_cores)
if isinstance(r[0], SparseArray):
r = np.array([tonparray(x) for x in r if x.isfinite()])
r = np.median(r, axis=0)
else:
[[x.finite(inplace=True) for x in o] for o in r]
r = np.array([[tonparray(y) for y in x] for x in r])
r = np.median(r, axis=0)
return r.T
def predict(self, X, cpu_cores=1):
cpu_cores = max(cpu_cores, self._n_jobs)
if self.classifier:
return self.predict_cl(X, cpu_cores=cpu_cores)
return self.decision_function(X, cpu_cores=cpu_cores)
def predict_cl(self, X, cpu_cores=1):
cpu_cores = max(cpu_cores, self._n_jobs)
hy = [SparseArray.argmax(x) for x in
self._decision_function_raw(X, cpu_cores=cpu_cores)]
hy = np.array([x.full_array() for x in hy], dtype=np.int).T
hy = [np.bincount(x).argmax() for x in hy]
if self._labels is not None:
hy = self._labels[hy]
return hy
def graphviz(self, directory, **kwargs):
"Directory to store the graphviz models"
import os
if not os.path.isdir(directory):
os.mkdir(directory)
output = os.path.join(directory, 'evodag-%s')
for k, m in enumerate(self.models):
m.graphviz(output % k, **kwargs)
@classmethod
def init(cls, n_estimators=30, n_jobs=1, tmpdir=None, **kwargs):
try:
init_seed = kwargs['seed']
del kwargs['seed']
except KeyError:
init_seed = 0
lst = []
for x in range(init_seed, init_seed + n_estimators):
kwargs['seed'] = x
lst.append(kwargs.copy())
if tmpdir is not None and not os.path.isdir(tmpdir):
os.mkdir(tmpdir)
return cls(None, evodags=lst, n_jobs=n_jobs, tmpdir=tmpdir)
class EvoDAGE(object):
def __init__(self, time_limit=None, **kwargs):
self._m = Ensemble.init(time_limit=time_limit, **kwargs)
self._time_limit = time_limit
@property
def time_limit(self):
return self._time_limit
@time_limit.setter
def time_limit(self, time_limit):
self._time_limit = time_limit
for x in self._m._evodags:
x['time_limit'] = self._time_limit
def fit(self, *args, **kwargs):
return self._m.fit(*args, **kwargs)
def predict(self, *args, **kwargs):
return self._m.predict(*args, **kwargs)
def decision_function(self, *args, **kwargs):
return self._m.decision_function(*args, **kwargs)
def raw_decision_function(self, *args, **kwargs):
return self._m.raw_decision_function(*args, **kwargs)
def predict_proba(self, *args, **kwargs):
return self._m.predict_proba(*args, **kwargs)
class EvoDAG(EvoDAGE):
def __init__(self, **kwargs):
from EvoDAG import EvoDAG as evodag
self._m = evodag.init(**kwargs)
def fit(self, *args, **kwargs):
self._m.fit(*args, **kwargs)
self._m = self._m.model()
return self
@property
def model(self):
return self._m
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Fever top level CLI tool."""
import os
import pathlib
from absl import app
from absl import flags
from absl import logging
from apache_beam.runners.direct import direct_runner
from language.serene import boolq_tfds
from language.serene import claim_tfds
from language.serene import config
from language.serene import constants
from language.serene import fever_tfds
from language.serene import layers
from language.serene import scrape_db
from language.serene import text_matcher
from language.serene import training
from language.serene import util
from language.serene import wiki_db
from language.serene import wiki_tfds
import matplotlib
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
matplotlib.use('TKAgg')
def train_model(
*,
model_config,
debug = False,
tb_log_dir = None,
distribution_strategy = None,
tpu = None):
"""Train an evidence matching model.
Args:
model_config: Set of parameters used for ModelConfig
debug: Whether to enable debug features
tb_log_dir: Where, if any, to log to tb
distribution_strategy: CPU/GPU/TPU
tpu: TPU config, if using TPU
"""
trainer = training.Trainer(
model_config,
debug=debug,
tpu=tpu,
distribution_strategy=distribution_strategy,
tb_log_dir=tb_log_dir)
if debug:
steps_per_epoch = 5
validation_steps = 5
epochs = 1
else:
steps_per_epoch = None
validation_steps = None
epochs = None
trainer.train(
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
def embed_wiki(*, data_dir, model_checkpoint, shard,
num_shards):
"""Embed wikipedia using the given model checkpoint.
Args:
data_dir: Data directory for Wiki TFDS dataset
model_checkpoint: Checkpoint of model to use
shard: The shard to embed, intention is to run this in parallel
num_shards: Number of shards to write.
"""
trainer = training.Trainer.load(model_checkpoint)
wiki_builder = wiki_tfds.WikipediaText(data_dir=data_dir)
wiki_sents = wiki_builder.as_dataset(split='validation').shard(
num_shards=num_shards, index=shard)
wikipedia_urls, sentence_ids, encodings = trainer.embed_wiki_dataset(
wiki_sents)
enc_path = os.path.join(model_checkpoint, 'wikipedia',
f'embeddings_{shard}_{num_shards}.npy')
with util.safe_open(enc_path, 'wb') as f:
np.save(f, encodings, allow_pickle=False)
wiki_url_path = os.path.join(model_checkpoint, 'wikipedia',
f'urls_{shard}_{num_shards}.npy')
with util.safe_open(wiki_url_path, 'wb') as f:
np.save(f, wikipedia_urls, allow_pickle=False)
sentence_id_path = os.path.join(model_checkpoint, 'wikipedia',
f'sentence_ids_{shard}_{num_shards}.npy')
with util.safe_open(sentence_id_path, 'wb') as f:
np.save(f, sentence_ids, allow_pickle=False)
def embed_claims(*, claim_tfds_data, train_claim_ids_npy_filename,
train_embeddings_npy_filename,
val_claim_ids_npy_filename,
val_embeddings_npy_filename, model_checkpoint):
"""Embed the claims using the given model checkpoint.
Args:
claim_tfds_data: path to claim tfds data
train_claim_ids_npy_filename: Path to write train claim ids to
train_embeddings_npy_filename: Path to write train embeddings to
val_claim_ids_npy_filename: Path to write validation ids to
val_embeddings_npy_filename: Path to write validation embeddings to
model_checkpoint: The checkpoint of the model to use for embedding
"""
logging.info('Loading model')
trainer = training.Trainer.load(model_checkpoint)
logging.info('Building claim datasets')
claim_builder = claim_tfds.ClaimDataset(data_dir=claim_tfds_data)
train_claims = claim_builder.as_dataset(split='train')
val_claims = claim_builder.as_dataset(split='validation')
logging.info('Embedding claims')
train_claim_ids, train_embeddings = trainer.embed_claim_dataset(train_claims)
val_claim_ids, val_embeddings = trainer.embed_claim_dataset(val_claims)
out_dir = pathlib.Path(model_checkpoint) / 'claims'
train_claim_id_path = out_dir / train_claim_ids_npy_filename
train_emb_path = out_dir / train_embeddings_npy_filename
val_claim_id_path = out_dir / val_claim_ids_npy_filename
val_emb_path = out_dir / val_embeddings_npy_filename
with util.safe_open(train_claim_id_path, 'wb') as f:
np.save(f, train_claim_ids, allow_pickle=False)
with util.safe_open(train_emb_path, 'wb') as f:
np.save(f, train_embeddings, allow_pickle=False)
with util.safe_open(val_claim_id_path, 'wb') as f:
np.save(f, val_claim_ids, allow_pickle=False)
with util.safe_open(val_emb_path, 'wb') as f:
np.save(f, val_embeddings, allow_pickle=False)
def preprocess(
*,
common_config,
scrape_type,
data_dir,
download_dir):
"""Preprocess the fever data to the TFDS Fever data.
Args:
common_config: Common configuration from config.Config
scrape_type: Which scrape to use, drqa/lucene/ukp, in training
data_dir: Where to write data to
download_dir: Where to download data to, unused but required by TFDS API
"""
logging.info('Creating fever dataset builder')
text_matcher_params_path = common_config.text_matcher_params
fever_train_path = common_config.fever_train
fever_dev_path = common_config.fever_dev
fever_test_path = common_config.fever_test
ukp_docs_train = common_config.ukp_docs_train
ukp_docs_dev = common_config.ukp_docs_dev
ukp_docs_test = common_config.ukp_docs_test
builder = fever_tfds.FeverEvidence(
wiki_db_path=common_config.wikipedia_db_path,
text_matcher_params_path=text_matcher_params_path,
fever_train_path=fever_train_path,
fever_dev_path=fever_dev_path,
fever_test_path=fever_test_path,
drqa_db_path=common_config.drqa_scrape_db_path,
lucene_db_path=common_config.lucene_scrape_db_path,
data_dir=data_dir,
n_similar_negatives=common_config.n_similar_negatives,
n_background_negatives=common_config.n_background_negatives,
ukp_docs_train=ukp_docs_train,
ukp_docs_dev=ukp_docs_dev,
ukp_docs_test=ukp_docs_test,
train_scrape_type=scrape_type,
title_in_scoring=common_config.title_in_scoring,
n_inference_candidates=common_config.n_inference_candidates,
include_not_enough_info=common_config.include_not_enough_info,
n_inference_documents=common_config.n_inference_documents,
max_inference_sentence_id=common_config.max_inference_sentence_id,
)
logging.info('Preparing fever evidence dataset')
beam_runner = direct_runner.DirectRunner()
download_config = tfds.download.DownloadConfig(beam_runner=beam_runner,)
builder.download_and_prepare(
download_dir=download_dir, download_config=download_config)
def wiki_preprocess(*,
common_config,
data_dir,
download_dir,
max_sentence_id = 30):
"""Preprocess wikipedia dump to TFDS format.
Args:
common_config: Configuration
data_dir: Where to write data to
download_dir: Where to download data to, unused but required by TFDS API
max_sentence_id: The max sentence_id to take on each wikipedia page
"""
logging.info('Creating wikipedia dataset builder')
wiki_db_path = common_config.wikipedia_db_path
builder = wiki_tfds.WikipediaText(
wiki_db_path=wiki_db_path,
data_dir=data_dir,
max_sentence_id=max_sentence_id,
)
logging.info('Preparing wikipedia dataset')
download_config = tfds.download.DownloadConfig(
beam_runner=runner.FlumeRunner(),)
builder.download_and_prepare(
download_dir=download_dir, download_config=download_config)
def claim_preprocess(*, common_config, data_dir,
download_dir):
"""Preprocess only claims TFDS format.
Args:
common_config: Common global config
data_dir: Where to write data to
download_dir: Where to download data to, unused but required by TFDS API
"""
logging.info('Creating claim dataset builder')
builder = claim_tfds.ClaimDataset(
fever_train_path=common_config.fever_train,
fever_dev_path=common_config.fever_dev,
data_dir=data_dir,
)
download_config = tfds.download.DownloadConfig()
builder.download_and_prepare(
download_dir=download_dir, download_config=download_config)
def boolq_preprocess(*, common_config, data_dir,
download_dir):
"""Preprocess boolq as fever-like claims TFDS format.
Args:
common_config: Common global config
data_dir: Where to write data to
download_dir: Where to download data to, unused but required by TFDS API
"""
logging.info('Creating claim dataset builder')
builder = boolq_tfds.BoolQClaims(
boolq_train_path=common_config.boolq_train,
boolq_dev_path=common_config.boolq_dev,
data_dir=data_dir,
)
download_config = tfds.download.DownloadConfig()
builder.download_and_prepare(
download_dir=download_dir, download_config=download_config)
FLAGS = flags.FLAGS
flags.DEFINE_enum('command', None, [
'preprocess',
'train_model',
'wiki_preprocess',
'claim_preprocess',
'boolq_preprocess',
'embed_wiki',
'embed_claims',
'cache_wikipedia',
'load_wikipedia',
'cache_scrapes',
], 'The sub-command to run')
flags.DEFINE_bool('debug', False,
'Enable debug mode for functions that support it')
flags.DEFINE_integer('seed', None, 'random seed to set')
# train flags
flags.DEFINE_string('tpu', None, 'TPU configuration')
flags.DEFINE_string('tb_log_root', None, 'Tensorboard logging directory')
flags.DEFINE_string('distribution_strategy', None,
'TF distribution, cpu/gpu/tpu')
# model configuration flags
flags.DEFINE_string('model_checkpoint_root', '', 'The root for saving models.')
flags.DEFINE_integer('buffer_size', 1_000, 'Buffer size for tf.data.Dataset')
flags.DEFINE_integer('experiment_id', None, '')
flags.DEFINE_integer('work_unit_id', None, '')
# model hyper parameters
flags.DEFINE_integer('batch_size', 128, 'batch size for training')
flags.DEFINE_integer('word_emb_size', 300, 'Word embedding size')
flags.DEFINE_integer('hidden_size', 100, 'LSTM hidden state size')
flags.DEFINE_float('learning_rate', 1e-4, 'Learning rate')
flags.DEFINE_float('positive_class_weight', None, 'Weight for positive class')
flags.DEFINE_integer('max_epochs', 50, 'Max number of epochs to train for')
flags.DEFINE_float('dropout', .5, 'Dropout rate')
flags.DEFINE_float('bert_dropout', .1,
'Dropout rate on final embeddings computed by BERT')
flags.DEFINE_enum('activation', 'gelu', ['gelu', 'relu', 'elu'],
'Activation function for non-linear layers')
flags.DEFINE_bool('use_batch_norm', True, 'Whether to use batch norm')
flags.DEFINE_enum('tokenizer', 'basic', ['basic', 'bert'],
'Which tokenizer to use')
flags.DEFINE_enum('text_encoder', 'basic', ['basic', 'bert'],
'Which text encoder (text to int) to use')
flags.DEFINE_bool('basic_lowercase', True,
'Whether basic encoder should lowercase input')
flags.DEFINE_enum('matcher', 'product_matcher',
list(layers.matcher_registry.keys()),
'How to compare claims and evidence for evidence matching')
flags.DEFINE_integer('matcher_hidden_size', 200,
'Size of hidden size in matcher, if it has one')
flags.DEFINE_enum('embedder', 'classic_embedder',
['classic_embedder', 'bert_embedder'],
'Which embedder to use (word indices -> embeddings)')
flags.DEFINE_enum(
'contextualizer', 'gru', ['gru', 'rnn', 'lstm', 'bert'],
'What type of contextualizer to use in claim/evidence encoder')
flags.DEFINE_integer('context_num_layers', 1,
'Number of GRU/LSTM/RNN layers in contextualizer')
flags.DEFINE_bool('tied_encoders', True,
'Whether to tie claim/evidence encoder parameters')
flags.DEFINE_bool('bidirectional', True,
'Whether to make GRU/LSTM/RNN bidirectional.')
flags.DEFINE_enum(
'model', 'two_tower', ['one_tower', 'two_tower'],
'Which type of model to use, a no-op since one_tower is not implemented')
flags.DEFINE_enum('bert_model_name', 'base', ['base', 'large'],
'Type of bert to use')
flags.DEFINE_integer('bert_max_seq_length', 100, 'max seq length for bert')
flags.DEFINE_string('inference_model_checkpoint', None,
'Checkpoint of model to run inference with')
flags.DEFINE_integer('inference_shard', None, 'Inference shard for current job')
flags.DEFINE_integer('inference_num_shards', None,
'Total number of inference shards')
flags.DEFINE_integer(
'projection_dim', -1,
'Dimension to project output of embedder to. If -1, do not project')
flags.DEFINE_bool('include_title', True, 'Whether to prepend title to evidence')
flags.DEFINE_bool('include_sentence_id', False,
'Whether to prepend sentence_id to evidence')
flags.DEFINE_bool('bert_trainable', True, 'Whether bert params are trainable.')
flags.DEFINE_enum('scrape_type', constants.UKP_PRED, constants.DOC_TYPES, '')
flags.DEFINE_bool(
'classify_claim', False,
'Whether to classify claims as support/refute/not enough info')
flags.DEFINE_integer(
'n_inference_candidates', None,
'The maximum number of sentences to return for each claim during inference')
flags.DEFINE_integer(
'n_inference_documents', None,
'The maximum number of documents to generate sentences from during inference'
)
flags.DEFINE_bool('include_not_enough_info', None,
'Whether to include not enough information claims')
flags.DEFINE_bool('title_in_scoring', None,
'Whether to concat titles to evidence in tfidf scoring')
# These are intentionally set to None, they must all be defined and consistent
# Across command invocations. The default values are set in config.py
flags.DEFINE_string('fever_train', None, '')
flags.DEFINE_string('fever_dev', None, '')
flags.DEFINE_string('fever_test', None, '')
flags.DEFINE_string('boolq_train', None, '')
flags.DEFINE_string('boolq_dev', None, '')
flags.DEFINE_string('lucene_train_scrapes', None, '')
flags.DEFINE_string('lucene_dev_scrapes', None, '')
flags.DEFINE_string('lucene_test_scrapes', None, '')
flags.DEFINE_string('drqa_train_scrapes', None, '')
flags.DEFINE_string('drqa_dev_scrapes', None, '')
flags.DEFINE_string('drqa_test_scrapes', None, '')
flags.DEFINE_string('text_matcher_params', None, '')
flags.DEFINE_string('bert_checkpoint', None, '')
flags.DEFINE_string('fever_evidence_tfds_data', None, '')
flags.DEFINE_string('fever_evidence_tfds_download', None, '')
flags.DEFINE_string('wiki_tfds_data', None, '')
flags.DEFINE_string('wiki_tfds_download', None, '')
flags.DEFINE_string('claim_tfds_data', None, '')
flags.DEFINE_string('claim_tfds_download', None, '')
flags.DEFINE_string('boolq_tfds_data', None, '')
flags.DEFINE_string('boolq_tfds_download', None, '')
flags.DEFINE_string('bert_base_uncased_model', None, '')
flags.DEFINE_string('bert_large_uncased_model', None, '')
flags.DEFINE_string('bert_base_uncased_vocab', None, '')
flags.DEFINE_string('bert_large_uncased_vocab', None, '')
flags.DEFINE_string('train_claim_ids_npy', None, '')
flags.DEFINE_string('train_embeddings_npy', None, '')
flags.DEFINE_string('val_claim_ids_npy', None, '')
flags.DEFINE_string('val_embeddings_npy', None, '')
flags.DEFINE_integer('max_claim_tokens', None, '')
flags.DEFINE_integer('max_evidence_tokens', None, '')
flags.DEFINE_integer('max_evidence', None, '')
flags.DEFINE_integer('n_similar_negatives', None, '')
flags.DEFINE_integer('n_background_negatives', None, '')
flags.DEFINE_string('wikipedia_db_path', None, '')
flags.DEFINE_string('lucene_scrape_db_path', None, '')
flags.DEFINE_string('drqa_scrape_db_path', None, '')
flags.DEFINE_string('ukp_docs_train', None, '')
flags.DEFINE_string('ukp_docs_dev', None, '')
flags.DEFINE_string('ukp_docs_test', None, '')
flags.DEFINE_integer('max_inference_sentence_id', None, '')
flags.DEFINE_float('claim_loss_weight', None, '')
def main(_):
flags.mark_flag_as_required('command')
tf.enable_v2_behavior()
# Parse the common flags from FLAGS
common_flags = {
'fever_train': FLAGS.fever_train,
'fever_dev': FLAGS.fever_dev,
'fever_test': FLAGS.fever_test,
'lucene_train_scrapes': FLAGS.lucene_train_scrapes,
'lucene_dev_scrapes': FLAGS.lucene_dev_scrapes,
'lucene_test_scrapes': FLAGS.lucene_test_scrapes,
'lucene_scrape_db_path': FLAGS.lucene_scrape_db_path,
'drqa_train_scrapes': FLAGS.drqa_train_scrapes,
'drqa_dev_scrapes': FLAGS.drqa_dev_scrapes,
'drqa_test_scrapes': FLAGS.drqa_test_scrapes,
'drqa_scrape_db_path': FLAGS.drqa_scrape_db_path,
'text_matcher_params': FLAGS.text_matcher_params,
'bert_checkpoint': FLAGS.bert_checkpoint,
'fever_evidence_tfds_data': FLAGS.fever_evidence_tfds_data,
'fever_evidence_tfds_download': FLAGS.fever_evidence_tfds_download,
'wiki_tfds_data': FLAGS.wiki_tfds_data,
'wiki_tfds_download': FLAGS.wiki_tfds_download,
'claim_tfds_data': FLAGS.claim_tfds_data,
'claim_tfds_download': FLAGS.claim_tfds_download,
'boolq_tfds_data': FLAGS.boolq_tfds_data,
'boolq_tfds_download': FLAGS.boolq_tfds_download,
'bert_base_uncased_model': FLAGS.bert_base_uncased_model,
'bert_large_uncased_model': FLAGS.bert_large_uncased_model,
'bert_base_uncased_vocab': FLAGS.bert_base_uncased_vocab,
'bert_large_uncased_vocab': FLAGS.bert_large_uncased_vocab,
'train_claim_ids_npy': FLAGS.train_claim_ids_npy,
'train_embeddings_npy': FLAGS.train_embeddings_npy,
'val_claim_ids_npy': FLAGS.val_claim_ids_npy,
'val_embeddings_npy': FLAGS.val_embeddings_npy,
'max_claim_tokens': FLAGS.max_claim_tokens,
'max_evidence_tokens': FLAGS.max_evidence_tokens,
'max_evidence': FLAGS.max_evidence,
'n_similar_negatives': FLAGS.n_similar_negatives,
'n_background_negatives': FLAGS.n_background_negatives,
'wikipedia_db_path': FLAGS.wikipedia_db_path,
'ukp_docs_train': FLAGS.ukp_docs_train,
'ukp_docs_dev': FLAGS.ukp_docs_dev,
'ukp_docs_test': FLAGS.ukp_docs_test,
'boolq_train': FLAGS.boolq_train,
'boolq_dev': FLAGS.boolq_dev,
'n_inference_candidates': FLAGS.n_inference_candidates,
'n_inference_documents': FLAGS.n_inference_documents,
'include_not_enough_info': FLAGS.include_not_enough_info,
'title_in_scoring': FLAGS.title_in_scoring,
'max_inference_sentence_id': FLAGS.max_inference_sentence_id,
'claim_loss_weight': FLAGS.claim_loss_weight,
}
# Remove anything that is not defined and fallback to defaults
common_flags = {k: v for k, v in common_flags.items() if v is not None}
# Create configuration from non-None flags and passthrough everywhere.
common_config = config.Config(**common_flags)
if FLAGS.command == 'preprocess':
preprocess(
common_config=common_config,
data_dir=common_config.fever_evidence_tfds_data,
download_dir=common_config.fever_evidence_tfds_download,
scrape_type=FLAGS.scrape_type,
)
elif FLAGS.command == 'embed_wiki':
embed_wiki(
data_dir=common_config.wiki_tfds_data,
model_checkpoint=FLAGS.inference_model_checkpoint,
shard=FLAGS.inference_shard,
num_shards=FLAGS.inference_num_shards,
)
elif FLAGS.command == 'embed_claims':
embed_claims(
claim_tfds_data=common_config.claim_tfds_data,
train_claim_ids_npy_filename=common_config.train_claim_ids_npy,
train_embeddings_npy_filename=common_config.train_embeddings_npy,
val_claim_ids_npy_filename=common_config.val_claim_ids_npy,
val_embeddings_npy_filename=common_config.val_embeddings_npy,
model_checkpoint=FLAGS.inference_model_checkpoint)
elif FLAGS.command == 'wiki_preprocess':
wiki_preprocess(
common_config=common_config,
data_dir=common_config.wiki_tfds_data,
download_dir=common_config.wiki_tfds_download,
)
elif FLAGS.command == 'claim_preprocess':
claim_preprocess(
data_dir=common_config.claim_tfds_data,
download_dir=common_config.claim_tfds_download,
common_config=common_config,
)
elif FLAGS.command == 'boolq_preprocess':
boolq_preprocess(
data_dir=common_config.boolq_tfds_data,
download_dir=common_config.boolq_tfds_download,
common_config=common_config,
)
elif FLAGS.command == 'train_model':
if FLAGS.tb_log_root is None:
tb_log_dir = None
else:
tb_parts = [FLAGS.tb_log_root, FLAGS.fever_experiment_id]
if FLAGS.experiment_id is not None and FLAGS.work_unit_id is not None:
tb_parts.append(str(FLAGS.experiment_id))
tb_parts.append(str(FLAGS.work_unit_id))
tb_log_dir = os.path.join(*tb_parts)
model_checkpoint_parts = [
FLAGS.model_checkpoint_root,
FLAGS.fever_experiment_id,
]
if FLAGS.experiment_id is not None and FLAGS.work_unit_id is not None:
model_checkpoint_parts.append(str(FLAGS.experiment_id))
model_checkpoint_parts.append(str(FLAGS.work_unit_id))
model_checkpoint = os.path.join(*model_checkpoint_parts)
if FLAGS.projection_dim == -1:
projection_dim = None
else:
projection_dim = FLAGS.projection_dim
if FLAGS.bert_model_name == 'base':
bert_vocab = common_config.bert_base_uncased_vocab
bert_model_path = common_config.bert_base_uncased_model
elif FLAGS.bert_model_name == 'large':
bert_vocab = common_config.bert_large_uncased_vocab
bert_model_path = common_config.bert_large_uncased_model
else:
raise ValueError('Invalid bert model')
# These values must be json serializable, which is why
# common_config: config.Config is not passed in
model_config = training.ModelConfig(
fever_experiment_id=FLAGS.fever_experiment_id,
model_checkpoint=model_checkpoint,
dataset=common_config.fever_evidence_tfds_data,
buffer_size=FLAGS.buffer_size,
batch_size=FLAGS.batch_size,
word_emb_size=FLAGS.word_emb_size,
hidden_size=FLAGS.hidden_size,
learning_rate=FLAGS.learning_rate,
positive_class_weight=FLAGS.positive_class_weight,
max_epochs=FLAGS.max_epochs,
dropout=FLAGS.dropout,
activation=FLAGS.activation,
use_batch_norm=FLAGS.use_batch_norm,
tokenizer=FLAGS.tokenizer,
text_encoder=FLAGS.text_encoder,
basic_lowercase=FLAGS.basic_lowercase,
embedder=FLAGS.embedder,
contextualizer=FLAGS.contextualizer,
tied_encoders=FLAGS.tied_encoders,
bidirectional=FLAGS.bidirectional,
matcher=FLAGS.matcher,
matcher_hidden_size=FLAGS.matcher_hidden_size,
model=FLAGS.model,
bert_model_name=FLAGS.bert_model_name,
bert_max_seq_length=FLAGS.bert_max_seq_length,
bert_model_path=bert_model_path,
bert_vocab_path=bert_vocab,
bert_trainable=FLAGS.bert_trainable,
bert_dropout=FLAGS.bert_dropout,
context_num_layers=FLAGS.context_num_layers,
projection_dim=projection_dim,
fever_dev_path=common_config.fever_dev,
max_evidence=common_config.max_evidence,
max_claim_tokens=common_config.max_claim_tokens,
max_evidence_tokens=common_config.max_evidence_tokens,
include_title=FLAGS.include_title,
include_sentence_id=FLAGS.include_sentence_id,
n_similar_negatives=common_config.n_similar_negatives,
n_background_negatives=common_config.n_background_negatives,
include_not_enough_info=common_config.include_not_enough_info,
scrape_type=FLAGS.scrape_type,
classify_claim=FLAGS.classify_claim,
title_in_scoring=common_config.title_in_scoring,
claim_loss_weight=common_config.claim_loss_weight,
)
# Intentionally *not* passing in common_config, the trainer serializes for
# save/load based on model_parameters, so everything must go in there.
train_model(
model_config=model_config,
debug=FLAGS.debug,
tpu=FLAGS.tpu,
distribution_strategy=FLAGS.distribution_strategy,
tb_log_dir=tb_log_dir,
)
else:
raise ValueError('Incorrect command')
if __name__ == '__main__':
app.run(main)
|
|
from __future__ import annotations
import functools
import dataclasses
from typing import Any, Optional
from dataclasses import dataclass
@dataclass(frozen=True)
class State:
left_one: Optional[str]
left_two: Optional[str]
right_one: Optional[str]
right_two: Optional[str]
one: tuple[Optional[str], Optional[str], Optional[str], Optional[str]]
two: tuple[Optional[str], Optional[str], Optional[str], Optional[str]]
three: tuple[Optional[str], Optional[str], Optional[str], Optional[str]]
four: tuple[Optional[str], Optional[str], Optional[str], Optional[str]]
one_two: Optional[str]
two_three: Optional[str]
three_four: Optional[str]
solved_state = State(
left_one=None,
left_two=None,
right_one=None,
right_two=None,
one=("a", "a", "a", "a"),
two=("b", "b", "b", "b"),
three=("c", "c", "c", "c"),
four=("d", "d", "d", "d"),
one_two=None,
two_three=None,
three_four=None,
)
def solved(state: State) -> bool:
return state == solved_state
def init_state() -> State:
return State(
left_one=None,
left_two=None,
right_one=None,
right_two=None,
one=("d", "d", "d", "a"),
two=("d", "b", "c", "c"),
three=("a", "a", "b", "b"),
four=("c", "c", "a", "b"),
one_two=None,
two_three=None,
three_four=None,
)
def target_column_for(s: str) -> str:
return {"a": "one", "b": "two", "c": "three", "d": "four"}[s]
def target_vale_for(s: str) -> str:
return {"one": "a", "two": "b", "three": "c", "four": "d"}[s]
def topmost_in_column(c: Column) -> Optional[str]:
next((x for x in c[::-1] if x is not None), None)
def target_column_for_opt(s: Optional[str]) -> Optional[str]:
if s is None:
return None
return target_column_for(s)
def none_to_list(n: Optional[str]) -> list[str]:
if n is None:
return []
return [n]
def verify_move(state: State, from_: str, to: str) -> bool:
if to == "left_one":
return state.left_one is None
elif to == "left_two":
return state.left_two is None
elif to == "right_one":
return state.right_one is None
elif to == "right_two":
return state.right_two is None
elif to == "one":
return all(v is None or v == "a" for v in state.one)
elif to == "two":
return all(v is None or v == "b" for v in state.two)
elif to == "three":
return all(v is None or v == "c" for v in state.three)
elif to == "four":
return all(v is None or v == "d" for v in state.four)
elif to == "one_two":
return state.one_two is None
elif to == "two_three":
return state.two_three is None
elif to == "three_four":
return state.three_four is None
raise Exception("no")
def check_crossings(state: State, from_: str, to: str) -> bool:
opposite_sides = {
"left_two": {"left_one"},
"one_two": {"left_one", "left_two", "one"},
"two_three": {"left_one", "left_two", "one", "two", "one_two"},
"three_four": {
"left_one",
"left_two",
"one",
"two",
"three",
"one_two",
"two_three",
},
"right_two": {
"left_one",
"left_two",
"one",
"two",
"three",
"four",
"one_two",
"two_three",
"three_four",
},
}
for k, v in opposite_sides.items():
if k == from_:
continue
if getattr(state, k) and (
(from_ in v and to not in v) or (to in v and from_ not in v)
):
return False
return True
def targets_from_position(state: State, pos: str):
moves = {
"left_one": none_to_list(target_column_for_opt(state.left_one)),
"left_two": none_to_list(target_column_for_opt(state.left_two)),
"right_one": none_to_list(target_column_for_opt(state.right_one)),
"right_two": none_to_list(target_column_for_opt(state.right_two)),
"one": [
"left_one",
"left_two",
"right_one",
"right_two",
"one_two",
"two_three",
"three_four",
]
+ none_to_list(target_column_for_opt(topmost_in_column(state.one))),
"two": [
"left_one",
"left_two",
"right_one",
"right_two",
"one_two",
"two_three",
"three_four",
]
+ none_to_list(target_column_for_opt(topmost_in_column(state.two))),
"three": [
"left_one",
"left_two",
"right_one",
"right_two",
"one_two",
"two_three",
"three_four",
]
+ none_to_list(target_column_for_opt(topmost_in_column(state.three))),
"four": [
"left_one",
"left_two",
"right_one",
"right_two",
"one_two",
"two_three",
"three_four",
]
+ none_to_list(target_column_for_opt(topmost_in_column(state.four))),
"one_two": none_to_list(target_column_for_opt(state.one_two)),
"two_three": none_to_list(target_column_for_opt(state.two_three)),
"three_four": none_to_list(target_column_for_opt(state.three_four)),
}[pos]
p_moves = [
x
for x in moves
if x != pos and verify_move(state, pos, x) and check_crossings(state, pos, x)
]
for v in ["one", "two", "three", "four"]:
if v in p_moves:
return [v]
return p_moves
def depth_to(state: State, pos) -> int:
c = getattr(state, pos)
if not isinstance(c, tuple):
return 0
for i, v in enumerate(c):
if v is None:
return len(c) - (i + 1)
return 0
def depth_from(state: State, pos) -> int:
c = getattr(state, pos)
if not isinstance(c, tuple):
return 0
for i, v in enumerate(c[::-1]):
if v is not None:
return i
return 0
import networkx as nx
cost_map = nx.Graph(
[
("left_one", "left_two"),
("left_two", 0),
(0, "one"),
(0, "one_two"),
("one_two", 1),
(1, "two"),
(1, "two_three"),
("two_three", 2),
(2, "three"),
(2, "three_four"),
("three_four", 3),
(3, "four"),
(3, "right_two"),
("right_two", "right_one"),
]
)
cost_multiplier = {"a": 1, "b": 10, "c": 100, "d": 1000}
def column_satisfied(state: State, pos: str) -> bool:
if pos == "one":
return all(x is None or x == "a" for x in state.one)
elif pos == "two":
return all(x is None or x == "b" for x in state.two)
elif pos == "three":
return all(x is None or x == "c" for x in state.three)
elif pos == "four":
return all(x is None or x == "d" for x in state.four)
return False
def thing_at_position(state: State, pos: str) -> Optional[str]:
x = getattr(state, pos)
if x is None or isinstance(x, str):
return x
return next((v for v in x[::-1] if v is not None), None)
def steps_in_move(state: State, from_: str, to: str) -> int:
from_depth = depth_from(state, from_)
to_depth = depth_to(state, to)
return nx.shortest_path_length(cost_map, from_, to) + from_depth + to_depth
def apply_move(state: State, from_: str, to: str) -> State:
thing = thing_at_position(state, from_)
x = getattr(state, from_)
if isinstance(x, str):
state = dataclasses.replace(state, **{from_: None})
else:
x = list(x)
for i in range(len(x) - 1, -1, -1):
if x[i] is not None:
x[i] = None
break
state = dataclasses.replace(state, **{from_: tuple(x)})
x = getattr(state, to)
if x is None:
state = dataclasses.replace(state, **{to: thing})
else:
x = list(x)
for i in range(len(x)):
if x[i] is None:
x[i] = thing
break
state = dataclasses.replace(state, **{to: tuple(x)})
return state
# desired_depth_logs = {
# (0, 'three', 'one_two'),
# (1, 'two', 'three'),
# (2, 'two', 'two_three'),
# (3, 'one_two', 'two'),
# (4, 'one', 'two'),
# (5, 'four', 'three_four'),
# (6, 'four', 'right_two'),
# (7, 'three_four', 'four'),
# (8, 'two_three', 'four'),
# (9, 'right_two', 'one')
# }
# desired_depth_logs = {
# (i, *v)
# for i, v in enumerate(
# [
# ("one", "left_two"),
# ("three", "one_two"),
# ("two", "two_three"),
# ("three", "right_one"),
# ("two_three", "three"),
# ("two", "two_three"),
# ("one_two", "two"),
# ("one", "one_two"),
# ("left_two", "one"),
# ("four", "right_two"),
# ("four", "three_four"),
# ("three_four", "three"),
# ("two_three", "four"),
# ("one_two", "four"),
# ("right_two", "two"),
# ("right_one", "one"),
# ]
# )
# }
possible_positions = [
"left_one",
"left_two",
"right_one",
"right_two",
"one",
"two",
"three",
"four",
"one_two",
"two_three",
"three_four",
]
@functools.lru_cache(maxsize=1000)
def find_min_moves(state: State):#, depth: int, c: bool):
if solved(state):
return 0, [], True
# if depth > 15:
# return 0, [], False
minimum = None
move = []
terminates = False
for start in possible_positions:
thing = thing_at_position(state, start)
if thing is None:
continue
if column_satisfied(state, start):
continue
targets = targets_from_position(state, start)
for target in targets:
# c_n = c and (depth, start, target) in desired_depth_logs
# if c_n:
# cost = steps_in_move(state, start, target) * cost_multiplier[thing]
# print(depth, state)
# print(f"{depth} {start} ({thing}) -> {target}, cost={cost}")
# else:
# pass
# # continue
next_costs, next_steps, this_terminates = find_min_moves(
apply_move(state, start, target)#, depth + 1, c_n
)
if not this_terminates:
continue
cost = (
steps_in_move(state, start, target) * cost_multiplier[thing]
+ next_costs
)
# print(cost)
if minimum is None or cost < minimum:
minimum = cost
move = [(start, target), *next_steps]
terminates = True
return minimum or 0, move, terminates
if __name__ == "__main__":
print(find_min_moves(init_state()))#, 0, True))
|
|
# -*- coding: utf-8 -*-
"""dataset_collection
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1lHElNaOJc6KguYAQuFrWGjVqDUUk3an8
"""
import os
from requests import get
import pandas as pd
import numpy as np
from tqdm import tqdm
os.system("wget https://mtgjson.com/api/v5/AllPrintingsCSVFiles.tar.gz && tar xvzf AllPrintingsCSVFiles.tar.gz")
cards = pd.read_csv('./AllPrintingsCSVFiles/cards.csv', low_memory=False)
cards_ids = cards.query('layout == "normal"')['scryfallId'].to_numpy()
API_ENDPOINT = "https://api.scryfall.com/cards/"
DATASET_FOLDER = "../raw/"
for i in tqdm(cards_ids, unit="image", initial=0):
# Get our response from the API_ENDPOINT
response = get(API_ENDPOINT + i)
# From the API_ENDPOINT, we retrieve the image url
# that contains our desired illustration
image_url = response.json()["image_uris"]['art_crop']
# Retrieving an image stream.
image_response = get(image_url)
# image_response will be a blob file stream,
# so we'll write it in a .jpg binary file (image)
file = open(DATASET_FOLDER + i + '.jpg', "wb")
file.write(image_response.content)
file.close()
|
|
import os
import argparse
import numpy as np
import pandas as pd
def arg_parse():
parser = argparse.ArgumentParser(description='RPIN Parameters')
parser.add_argument('--folder', required=True, help='folder name to retrive results', type=str)
return parser.parse_args()
def main():
'''
returns two csv files:
2. scenario/template, temp_num, avg., std
'''
args = arg_parse()
log_files = os.listdir(args.folder)
ret_1 = {}
for log_file in log_files:
with open(os.path.join(args.folder,log_file), 'r') as f:
temp_num = list(filter(lambda x: 'log' not in x, log_file.replace('.txt', '').split('_')))
if len(temp_num) == 3:
mode = 'template'
elif len(temp_num) == 2:
mode = 'scenario'
else:
raise ValueError(f'incorrect length of temp {log_file}')
temp_num = "_".join(temp_num)
content = f.readlines()
passing_rates = list(filter(lambda x : 'on val levels' in x and 'INFO: 004/' in x, content))
for rate in passing_rates:
rate = float(rate.split(':')[-1])
if temp_num in ret_1:
num_fold = len(ret_1[temp_num])
ret_1[temp_num][f'fold_{num_fold}'] = rate if str(rate) != 'nan' else 0
else:
ret_1[temp_num] = {}
ret_1[temp_num]['fold_0'] = rate if str(rate) != 'nan' else 0
out = {'template':[], 'average':[], 'std':[] }
for temp_num in ret_1:
avg = np.mean(list(ret_1[temp_num].values()))
std = np.std(list(ret_1[temp_num].values()))
out['template'].append(temp_num)
out['average'].append(avg)
out['std'].append(std)
out = pd.DataFrame(out)
out.to_csv(args.folder+'.csv', index=False)
return out
if __name__ == '__main__':
results = main()
|
|
"""
Visibility Road Map Planner
author: Atsushi Sakai (@Atsushi_twi)
"""
import os
import sys
import math
import numpy as np
import matplotlib.pyplot as plt
from geometry import Geometry
sys.path.append(os.path.dirname(os.path.abspath(__file__)) +
"/../VoronoiRoadMap/")
from dijkstra_search import DijkstraSearch
show_animation = True
class VisibilityRoadMap:
def __init__(self, robot_radius, do_plot=False):
self.robot_radius = robot_radius
self.do_plot = do_plot
def planning(self, start_x, start_y, goal_x, goal_y, obstacles):
nodes = self.generate_graph_node(start_x, start_y, goal_x, goal_y,
obstacles)
road_map_info = self.generate_road_map_info(nodes, obstacles)
if self.do_plot:
self.plot_road_map(nodes, road_map_info)
plt.pause(1.0)
rx, ry = DijkstraSearch(show_animation).search(
start_x, start_y,
goal_x, goal_y,
[node.x for node in nodes],
[node.y for node in nodes],
road_map_info
)
return rx, ry
def generate_graph_node(self, start_x, start_y, goal_x, goal_y, obstacles):
# add start and goal as nodes
nodes = [DijkstraSearch.Node(start_x, start_y),
DijkstraSearch.Node(goal_x, goal_y, 0, None)]
# add vertexes in configuration space as nodes
for obstacle in obstacles:
cvx_list, cvy_list = self.calc_vertexes_in_configuration_space(
obstacle.x_list, obstacle.y_list)
for (vx, vy) in zip(cvx_list, cvy_list):
nodes.append(DijkstraSearch.Node(vx, vy))
for node in nodes:
plt.plot(node.x, node.y, "xr")
return nodes
def calc_vertexes_in_configuration_space(self, x_list, y_list):
x_list = x_list[0:-1]
y_list = y_list[0:-1]
cvx_list, cvy_list = [], []
n_data = len(x_list)
for index in range(n_data):
offset_x, offset_y = self.calc_offset_xy(
x_list[index - 1], y_list[index - 1],
x_list[index], y_list[index],
x_list[(index + 1) % n_data], y_list[(index + 1) % n_data],
)
cvx_list.append(offset_x)
cvy_list.append(offset_y)
return cvx_list, cvy_list
def generate_road_map_info(self, nodes, obstacles):
road_map_info_list = []
for target_node in nodes:
road_map_info = []
for node_id, node in enumerate(nodes):
if np.hypot(target_node.x - node.x,
target_node.y - node.y) <= 0.1:
continue
is_valid = True
for obstacle in obstacles:
if not self.is_edge_valid(target_node, node, obstacle):
is_valid = False
break
if is_valid:
road_map_info.append(node_id)
road_map_info_list.append(road_map_info)
return road_map_info_list
@staticmethod
def is_edge_valid(target_node, node, obstacle):
for i in range(len(obstacle.x_list) - 1):
p1 = Geometry.Point(target_node.x, target_node.y)
p2 = Geometry.Point(node.x, node.y)
p3 = Geometry.Point(obstacle.x_list[i], obstacle.y_list[i])
p4 = Geometry.Point(obstacle.x_list[i + 1], obstacle.y_list[i + 1])
if Geometry.is_seg_intersect(p1, p2, p3, p4):
return False
return True
def calc_offset_xy(self, px, py, x, y, nx, ny):
p_vec = math.atan2(y - py, x - px)
n_vec = math.atan2(ny - y, nx - x)
offset_vec = math.atan2(math.sin(p_vec) + math.sin(n_vec),
math.cos(p_vec) + math.cos(
n_vec)) + math.pi / 2.0
offset_x = x + self.robot_radius * math.cos(offset_vec)
offset_y = y + self.robot_radius * math.sin(offset_vec)
return offset_x, offset_y
@staticmethod
def plot_road_map(nodes, road_map_info_list):
for i, node in enumerate(nodes):
for index in road_map_info_list[i]:
plt.plot([node.x, nodes[index].x],
[node.y, nodes[index].y], "-b")
class ObstaclePolygon:
def __init__(self, x_list, y_list):
self.x_list = x_list
self.y_list = y_list
self.close_polygon()
self.make_clockwise()
def make_clockwise(self):
if not self.is_clockwise():
self.x_list = list(reversed(self.x_list))
self.y_list = list(reversed(self.y_list))
def is_clockwise(self):
n_data = len(self.x_list)
eval_sum = sum([(self.x_list[i + 1] - self.x_list[i]) *
(self.y_list[i + 1] + self.y_list[i])
for i in range(n_data - 1)])
eval_sum += (self.x_list[0] - self.x_list[n_data - 1]) * \
(self.y_list[0] + self.y_list[n_data - 1])
return eval_sum >= 0
def close_polygon(self):
is_x_same = self.x_list[0] == self.x_list[-1]
is_y_same = self.y_list[0] == self.y_list[-1]
if is_x_same and is_y_same:
return # no need to close
self.x_list.append(self.x_list[0])
self.y_list.append(self.y_list[0])
def plot(self):
plt.plot(self.x_list, self.y_list, "-k")
def main():
print(__file__ + " start!!")
# start and goal position
sx, sy = 10.0, 10.0 # [m]
gx, gy = 50.0, 50.0 # [m]
robot_radius = 5.0 # [m]
obstacles = [
ObstaclePolygon(
[20.0, 30.0, 15.0],
[20.0, 20.0, 30.0],
),
ObstaclePolygon(
[40.0, 45.0, 50.0, 40.0],
[50.0, 40.0, 20.0, 40.0],
),
ObstaclePolygon(
[20.0, 30.0, 30.0, 20.0],
[40.0, 45.0, 60.0, 50.0],
)
]
if show_animation: # pragma: no cover
plt.plot(sx, sy, "or")
plt.plot(gx, gy, "ob")
for ob in obstacles:
ob.plot()
plt.axis("equal")
plt.pause(1.0)
rx, ry = VisibilityRoadMap(robot_radius, do_plot=show_animation).planning(
sx, sy, gx, gy, obstacles)
if show_animation: # pragma: no cover
plt.plot(rx, ry, "-r")
plt.pause(0.1)
plt.show()
if __name__ == '__main__':
main()
|
|
# author: Xiang Gao at Microsoft Research AI NLP Group
import torch, os, pdb
import numpy as np
from transformers19 import GPT2Tokenizer, GPT2Model, GPT2Config
from shared import EOS_token
class OptionInfer:
def __init__(self, cuda=True):
self.cuda = cuda
class ScorerBase(torch.nn.Module):
def __init__(self, opt):
super().__init__()
self.ix_EOS = 50256
self.ix_OMT = 986
self.opt = opt
self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
def core(self, ids, l_ids, return_logits=False):
# to be implemented in child class
return 0
def predict(self, cxt, hyps, max_cxt_turn=None):
# cxt = str
# hyps = list of str
self.eval()
cxt_turns = cxt.split(EOS_token)
if max_cxt_turn is not None:
cxt_turns = cxt_turns[-min(max_cxt_turn, len(cxt_turns)):]
ids_cxt = []
for turn in cxt_turns:
ids_cxt += self.tokenizer.encode(turn.strip()) + [self.ix_EOS]
seqs = []
lens = []
for hyp in hyps:
seq = ids_cxt + self.tokenizer.encode(hyp.strip())
lens.append(len(seq))
seqs.append(seq)
max_len = max(lens)
ids = []
for seq in seqs:
ids.append(seq + [self.ix_EOS] * (max_len - len(seq)))
with torch.no_grad():
ids = torch.LongTensor(ids)
if self.opt.cuda:
ids = ids.cuda()
scores = self.core(ids, lens)
if not isinstance(scores, dict):
if self.opt.cuda:
scores = scores.cpu()
return scores.detach().numpy()
for k in scores:
if self.opt.cuda:
scores[k] = scores[k].cpu()
scores[k] = scores[k].detach().numpy()
return scores
def forward(self, batch):
logits_pos = self.core(batch['ids_pos'], batch['len_pos'], return_logits=True)
logits_neg = self.core(batch['ids_neg'], batch['len_neg'], return_logits=True)
# softmax to get the `probability` to rank pos/neg correctly
return torch.exp(logits_pos) / (torch.exp(logits_pos) + torch.exp(logits_neg))
class Scorer(ScorerBase):
def __init__(self, opt):
super().__init__(opt)
n_embd = 1024
config = GPT2Config(n_embd=n_embd, n_layer=24, n_head=16)
self.transformer = GPT2Model(config)
self.score = torch.nn.Linear(n_embd, 1, bias=False)
def core(self, ids, l_ids, return_logits=False):
n = ids.shape[0]
attention_mask = torch.ones_like(ids)
# attention_mask = torch.nn.Parameter(torch.ones_like(ids, requires_grad=False, dtype=torch.float32), requires_grad=False)
# attention_mask.requires_grad=False
for i in range(n):
attention_mask[i, l_ids[i]:] *= 0
hidden_states, _ = self.transformer(ids, attention_mask=attention_mask)
logits = self.score(hidden_states).squeeze(-1)
logits = torch.stack([logits[i, l_ids[i] - 1] for i in range(n)])
if return_logits:
return logits
else:
return torch.sigmoid(logits)
def load(self, path):
from shared import download_model
download_model(path)
print('loading from '+path)
if torch.cuda.is_available():
weights = torch.load(path)
else:
weights = torch.load(path, map_location=torch.device('cpu'))
if path.endswith('.pkl'):
# DialoGPT checkpoint
weights['score.weight'] = weights['lm_head.decoder.weight'][self.ix_EOS: self.ix_EOS+1, :]
del weights['lm_head.decoder.weight']
self.load_state_dict(weights)
class JointScorer(ScorerBase):
def core(self, ids, l_ids, return_logits=False):
assert(not return_logits)
scores = dict()
for k in self.kk['prior'] + self.kk['cond']:
scorer = getattr(self, 'scorer_%s'%k)
scores[k] = scorer.core(ids, l_ids)
def avg_score(kk):
if not kk:
return 1
sum_score_wt = 0
sum_wt = 0
for k in kk:
sum_score_wt = sum_score_wt + scores[k] * self.wt[k]
sum_wt += self.wt[k]
return sum_score_wt / sum_wt
prior = avg_score(self.kk['prior'])
cond = avg_score(self.kk['cond'])
scores['final'] = prior * cond
return scores
def load(self, path_config):
import yaml
with open(path_config, 'r') as stream:
config = yaml.safe_load(stream)
print(config)
paths = dict()
self.wt = dict()
self.kk = dict()
for prefix in ['prior', 'cond']:
self.kk[prefix] = []
for d in config[prefix]:
k = d['name']
self.kk[prefix].append(k)
self.wt[k] = d['wt']
paths[k] = d['path']
for k in paths:
path = paths[k]
print('setting up model `%s`'%k)
scorer = Scorer(OptionInfer(cuda=self.opt.cuda))
scorer.load(path)
if self.opt.cuda:
scorer.cuda()
setattr(self, 'scorer_%s'%k, scorer)
|
|
from modules.world import World, Landmark, Map, Goal
from modules.grid_map_2d import GridMap2D
from modules.robot import IdealRobot
from modules.sensor import IdealCamera, Camera
from modules.agent import Agent, EstimationAgent, GradientAgent
from modules.gradient_pfc import GradientPfc
from modules.mcl import Particle, Mcl
import math
import numpy as np
if __name__ == '__main__': ###name_indent
time_interval = 0.1
world = World(200, time_interval, debug=False, recording_file_name='std(0.3_0.3)_回避行動0秒-1秒', playback_speed=3)
# world = World(150, time_interval, debug=False)
m = Map()
### 専有格子地図を追加 ###
grid_map = GridMap2D('CorridorGimp_200x200', origin=[-5.0, -5.0])
# world.append(grid_map)
##ゴールの追加##
goal = Goal(1.75,3.0) #goalを変数に
world.append(goal)
### ロボットを作る ###
# 初期位置
init_pose = np.array([-4.5, 0.5, 0])
# 初期位置推定のばらつき
init_pose_stds = np.array([0.3, 0.3, 0.01])
# モーションアップデートのばらつき
# motion_noise_stds = {"nn":0.19, "no":0.001, "on":0.13, "oo":0.2}
motion_noise_stds = {"nn":0.01, "no":0.01, "on":0.01, "oo":0.01}
# 推定器
estimator = Mcl(m, init_pose, 300, motion_noise_stds=motion_noise_stds,
init_pose_stds=init_pose_stds)
# エージェント
agent = GradientPfc(time_interval, 0.1, 0.5, np.deg2rad(90), estimator, grid_map, goal,
magnitude=2, draw_direction=False, draw_p_gradient=False)
# ロボット
robot = IdealRobot(init_pose, sensor=Camera(m), agent=agent)
world.append(robot)
world.draw()
|
|
import unittest
import numpy
from cqcpy import test_utils
from cqcpy.ov_blocks import one_e_blocks
from cqcpy.ov_blocks import two_e_blocks
from kelvin import quadrature
from kelvin import ft_cc_energy
from kelvin import ft_cc_equations
def evalL(T1f, T1b, T1i, T2f, T2b, T2i, L1f, L1b, L1i, L2f, L2b, L2i,
Ff, Fb, F, I, D1, D2, tir, tii, gr, gi, Gr, Gi, beta):
ngr = gr.shape[0]
ngi = gi.shape[0]
E = ft_cc_energy.ft_cc_energy_neq(
T1f, T1b, T1i, T2f, T2b, T2i,
Ff.ov, Fb.ov, F.ov, I.oovv, gr, gi, beta)
T1f_, T1b_, T1i_, T2f_, T2b_, T2i_ =\
ft_cc_equations.neq_ccsd_simple(
Ff, Fb, F, I, T1f, T1b, T1i, T2f, T2b, T2i,
D1, D2, tir, tii, ngr, ngi, Gr, Gi)
TEf = 0.25*numpy.einsum('yijab,yabij->y', L2f, T2f_)
TEf += numpy.einsum('yia,yai->y', L1f, T1f_)
TEb = 0.25*numpy.einsum('yijab,yabij->y', L2b, T2b_)
TEb += numpy.einsum('yia,yai->y', L1b, T1b_)
TEi = 0.25*numpy.einsum('yijab,yabij->y', L2i, T2i_)
TEi += numpy.einsum('yia,yai->y', L1i, T1i_)
Te = (1.j/beta)*numpy.einsum('y,y->', TEf, gr)
Te -= (1.j/beta)*numpy.einsum('y,y->', TEb, gr)
Te += (1.0/beta)*numpy.einsum('y,y->', TEi, gi)
return E + Te
class NEQDensityTest(unittest.TestCase):
def setUp(self):
self.thresh = 1e-8
def test_den(self):
ngr = 4
ngi = 4
n = 5
beta = 1.0
tmax = 0.1
tf = 2
assert(tf < ngr)
T1f, T2f = test_utils.make_random_ft_T(ngr, n)
T1b, T2b = test_utils.make_random_ft_T(ngr, n)
T1i, T2i = test_utils.make_random_ft_T(ngi, n)
L1f, L2f = test_utils.make_random_ft_T(ngr, n)
L1b, L2b = test_utils.make_random_ft_T(ngr, n)
L1i, L2i = test_utils.make_random_ft_T(ngi, n)
T1f = T1f.astype(complex)
T1b = T1b.astype(complex)
T2f = T2f.astype(complex)
T2b = T2b.astype(complex)
L1f = L1f.astype(complex)
L1b = L1b.astype(complex)
L2f = L2f.astype(complex)
L2b = L2b.astype(complex)
D1, D2 = test_utils.make_random_ft_D(n)
D1 = numpy.zeros((n, n))
tii, gi, Gi = quadrature.midpoint(ngi, beta)
tir, gr, Gr = quadrature.midpoint(ngr, tmax)
Aov = numpy.random.random((ngr, n, n))
Avv = numpy.random.random((ngr, n, n))
Aoo = numpy.random.random((ngr, n, n))
Avo = numpy.random.random((ngr, n, n))
Aov = Aov.astype(complex)
Avv = Avv.astype(complex)
Aoo = Aoo.astype(complex)
Avo = Avo.astype(complex)
zzr = numpy.zeros((ngr, n, n), dtype=complex)
zzi = numpy.zeros((n, n), dtype=complex)
for i in range(ngr):
if i != tf:
Aov[i] = numpy.zeros((n, n))
Avv[i] = numpy.zeros((n, n))
Aoo[i] = numpy.zeros((n, n))
Avo[i] = numpy.zeros((n, n))
# for the Lagrangian, divide through by measure
Ftemp = one_e_blocks(Aoo/gr[tf], Aov/gr[tf], Avo/gr[tf], Avv/gr[tf])
Fzr = one_e_blocks(zzr, zzr, zzr, zzr)
Fzi = one_e_blocks(zzi, zzi, zzi, zzi)
Inull = numpy.zeros((n, n, n, n), dtype=complex)
I = two_e_blocks(
vvvv=Inull, vvvo=Inull, vovv=Inull,
vvoo=Inull, vovo=Inull, oovv=Inull,
vooo=Inull, ooov=Inull, oooo=Inull)
ref = evalL(
T1f, T1b, T1i, T2f, T2b, T2i, L1f, L1b, L1i, L2f, L2b, L2i,
Ftemp, Fzr, Fzi, I, D1, D2, tir, tii, gr, gi, Gr, Gi, beta)
pia, pba, pji, pai = ft_cc_equations.neq_1rdm(
T1f, T1b, T1i, T2f, T2b, T2i,
L1f, L1b, L1i, L2f, L2b, L2i,
D1, D2, tir, tii, ngr, ngi, gr, gi, Gr, Gi)
out1 = 1.j*numpy.einsum('ai,ia->', Avo[tf], pia[tf])
out2 = 1.j*numpy.einsum('ab,ba->', Avv[tf], pba[tf])
out3 = 1.j*numpy.einsum('ij,ji->', Aoo[tf], pji[tf])
out4 = 1.j*numpy.einsum('ia,ai->', Aov[tf], pai[tf])
out = out1 + out2 + out3 + out4
diff = numpy.linalg.norm(out - ref)
msg = "Error: {}".format(diff)
self.assertTrue(diff < self.thresh, msg)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.