arxiv_id
stringlengths 0
16
| text
stringlengths 10
1.65M
|
|---|---|
'''
Run trained PredNet on UCSD sequences to create data for anomaly detection
'''
import hickle as hkl
import os
import shutil
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pandas as pd
# from keras import backend as K
from keras.models import Model, model_from_json
from keras.layers import Input, Dense, Flatten
import tensorflow as tf
from prednet import PredNet
from data_utils import TestsetGenerator
from scipy.ndimage import gaussian_filter
import argparse
# Define args
parser = argparse.ArgumentParser(description='Process input arguments')
parser.add_argument('--out_data', default='./data/video/', type=str, dest='out_data', help='path to data and annotations (annotations should be in <data_dir>/<dataset>/Test/<dataset>.m')
parser.add_argument('--preprocessed_data', default='./data/video/', type=str, dest='preprocessed_data', help='path to data and annotations (annotations should be in <data_dir>/<dataset>/Test/<dataset>.m')
parser.add_argument('--dataset', default='UCSDped1', type=str, dest='dataset', help='dataset we are using')
parser.add_argument('--nt', default=200, type=int, dest='nt', help='length of video sequences')
parser.add_argument('--n_plot', default=0, type=int, dest='n_plot', help='How many sample sequences to plot')
parser.add_argument('--batch_size', default=10, type=int, dest='batch_size', help='How many epochs per batch')
parser.add_argument('--N_seq', default=None, type=int, dest='N_seq', help='how many videos per epoch')
parser.add_argument('--save_prediction_error_video_frames', action='store_true', dest='save_prediction_error_video_frames', help='how many videos per epoch')
args = parser.parse_args()
preprocessed_data = args.preprocessed_data
dataset = args.dataset
nt = args.nt
n_plot = args.n_plot
batch_size = args.batch_size
N_seq = args.N_seq
save_prediction_error_video_frames = args.save_prediction_error_video_frames
if tf.test.is_gpu_available():
print("We have a GPU")
else:
print("Did not find GPU")
# check/create path for saving output
# extent data_dir for current dataset
data_dir = os.path.join(data_dir, dataset, 'Test')
os.makedirs(data_dir, exist_ok=True)
# load the dataset
test_file = os.path.join('data', 'X_test.hkl')
test_sources = os.path.join('data', 'sources_test.hkl')
X = hkl.load(test_file)
sources = hkl.load(test_sources)
# load the trained model
weights_file = os.path.join('outputs', 'weights.hdf5')
json_file = os.path.join('outputs', 'model.json')
f = open(json_file, 'r')
json_string = f.read()
f.close()
trained_model = model_from_json(json_string, custom_objects = {'PredNet': PredNet})
trained_model.load_weights(weights_file)
# Create testing model (to output predictions)
layer_config = trained_model.layers[1].get_config()
layer_config['output_mode'] = 'prediction'
data_format = layer_config['data_format'] if 'data_format' in layer_config else layer_config['dim_ordering']
test_prednet = PredNet(weights=trained_model.layers[1].get_weights(), **layer_config)
input_shape = list(trained_model.layers[0].batch_input_shape[1:])
input_shape[0] = nt
inputs = Input(shape=tuple(input_shape))
predictions = test_prednet(inputs)
test_model = Model(inputs=inputs, outputs=predictions)
# Define Generator for test sequences
test_generator = TestsetGenerator(test_file, test_sources, nt, data_format=data_format, N_seq=N_seq)
X_test = test_generator.create_all()
# Apply model to the test sequences
X_hat = test_model.predict(X_test, batch_size)
if data_format == 'channels_first':
X_test = np.transpose(X_test, (0, 1, 3, 4, 2))
X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))
# Calculate MSE of PredNet predictions vs. using last frame, and aggregate across all frames in dataset
model_mse = np.mean( (X_test[:, 1:] - X_hat[:, 1:])**2 ) # look at all timesteps except the first
prev_mse = np.mean( (X_test[:, :-1] - X_test[:, 1:])**2 ) # this simply using the last frame
# Write results to prediction_scores.txt
f = open(os.path.join(save_path, 'prediction_scores.txt'), 'w')
f.write("Model MSE: %f\n" % model_mse)
f.write("Previous Frame MSE: %f" % prev_mse)
f.close()
# Compare MSE of PredNet predictions vs. using last frame, without aggregating across frames
model_err = X_test - X_hat
model_err[:, 0, :, :, :] = 0 # first frame doesn't count
model_mse = np.mean( (model_err)**2, axis=(2,3,4) ) # look at all timesteps except the first
model_p_50 = np.percentile((model_err)**2, 50, axis=(2,3,4))
model_p_75 = np.percentile((model_err)**2, 75, axis=(2,3,4))
model_p_90 = np.percentile((model_err)**2, 90, axis=(2,3,4))
model_p_95 = np.percentile((model_err)**2, 95, axis=(2,3,4))
model_p_99 = np.percentile((model_err)**2, 99, axis=(2,3,4))
model_std = np.std((model_err)**2, axis=(2,3,4))
# now we flatten them so that they are all in one column later
model_mse = np.reshape(model_mse, np.prod(model_mse.shape))
model_p_50 = np.reshape(model_p_50, np.prod(model_mse.shape))
model_p_75 = np.reshape(model_p_75, np.prod(model_mse.shape))
model_p_90 = np.reshape(model_p_90, np.prod(model_mse.shape))
model_p_95 = np.reshape(model_p_95, np.prod(model_mse.shape))
model_p_99 = np.reshape(model_p_99, np.prod(model_mse.shape))
model_std = np.reshape(model_std, np.prod(model_mse.shape))
prev_err = X_test[:, :-1] - X_test[:, 1:] # simple comparison w/ prev frame as baseline for performance
prev_err = np.insert(prev_err, 0, X_test[0,0].shape, axis=1)
prev_mse = np.mean( (prev_err)**2, axis=(2,3,4) ) # look at all timesteps except the first
prev_p_50 = np.percentile((prev_err)**2, 50, axis=(2,3,4))
prev_p_75 = np.percentile((prev_err)**2, 75, axis=(2,3,4))
prev_p_90 = np.percentile((prev_err)**2, 90, axis=(2,3,4))
prev_p_95 = np.percentile((prev_err)**2, 95, axis=(2,3,4))
prev_p_99 = np.percentile((prev_err)**2, 99, axis=(2,3,4))
prev_std = np.std((prev_err)**2, axis=(2,3,4))
# now we flatten them so that they are all in one column later
prev_mse = np.reshape(prev_mse, np.prod(prev_mse.shape))
prev_p_50 = np.reshape(prev_p_50, np.prod(prev_mse.shape))
prev_p_75 = np.reshape(prev_p_75, np.prod(prev_mse.shape))
prev_p_90 = np.reshape(prev_p_90, np.prod(prev_mse.shape))
prev_p_95 = np.reshape(prev_p_95, np.prod(prev_mse.shape))
prev_p_99 = np.reshape(prev_p_99, np.prod(prev_mse.shape))
prev_std = np.reshape(prev_std, np.prod(prev_mse.shape))
# save the results to a dataframe
df = pd.DataFrame({'model_mse': model_mse, 'model_p_50': model_p_50, 'model_p_75': model_p_75, 'model_p_90': model_p_90, 'model_p_95': model_p_95, 'model_p_99': model_p_99, 'model_std': model_std, 'prev_mse': prev_mse, 'prev_p_50': prev_p_50, 'prev_p_75': prev_p_75, 'prev_p_90': prev_p_90, 'prev_p_95': prev_p_95, 'prev_p_99': prev_p_99, 'prev_std': prev_std})
df.to_pickle(os.path.join(save_path, 'test_results.pkl.gz'))
# Create plots for illustation of model performance
if n_plot > 0:
skip_frames_plot = 4
print("Creating %s plots" % n_plot)
# Plot some predictions
aspect_ratio = float(X_hat.shape[2]) / X_hat.shape[3]
plt.figure(figsize = (nt//skip_frames_plot, 4*1.6)) # *aspect_ratio))
gs = gridspec.GridSpec(4, nt//skip_frames_plot)
gs.update(wspace=0., hspace=0.)
plot_save_dir = os.path.join(save_path, 'prediction_plots')
if os.path.exists(plot_save_dir):
shutil.rmtree(plot_save_dir)
os.makedirs(plot_save_dir)
plot_idx = np.random.permutation(X_test.shape[0])[:n_plot]
for i in plot_idx:
for tt in range(nt):
if tt % skip_frames_plot > 0:
continue
t = tt // skip_frames_plot
err = np.abs(X_hat[i,tt] - X_test[i,tt])
err_ov = gaussian_filter(err, 3)
err_ov[err_ov < .1] = 0.0
overlay = X_test[i,tt].copy()
overlay[:,:,0] += err_ov[:,:,0]*5.0
plt.subplot(gs[t])
plt.imshow(X_test[i,tt], interpolation='none')
plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')
if t==0: plt.ylabel('Actual', fontsize=10) # plot ylabel on left of first image
plt.subplot(gs[t + nt//skip_frames_plot])
plt.imshow(X_hat[i,tt], interpolation='none')
plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')
if t==0: plt.ylabel('Predicted', fontsize=10)
plt.subplot(gs[t + nt//skip_frames_plot*2])
plt.imshow(overlay, interpolation='none')
plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')
if t==0: plt.ylabel('Overlay', fontsize=10)
# You can use this to also plot the previous video frame for comparison
# plt.subplot(gs[t + nt*2])
# plt.imshow(X_test[i,t - 1], interpolation='none')
# plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')
# if t==0: plt.ylabel('Previous', fontsize=10)
plt.subplot(gs[t + nt//skip_frames_plot*3])
plt.imshow(err, interpolation='none')
plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')
if t==0: plt.ylabel('Abs. Error', fontsize=10)
plt.xlabel(t, fontsize=6)
plt.savefig(os.path.join(plot_save_dir, 'plot_' + str(i) + '.png'))
plt.clf()
# create frames that can be used for a video that shows anomalies as overlay
if save_prediction_error_video_frames and n_plot > 0:
movie_save_dir = os.path.join(save_path, 'PE_videoframes')
if not os.path.exists(movie_save_dir):
os.makedirs(movie_save_dir)
for i in plot_idx:
for tt in range(nt):
err = np.abs(X_hat[i,tt] - X_test[i,tt])
err_ov = gaussian_filter(err, 3)
err_ov[err_ov < .1] = 0.0
overlay = X_test[i,tt].copy()
overlay[:,:,0] += err_ov[:,:,0]*5.0
plt.imshow(overlay, interpolation='none')
plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')
plt.savefig(os.path.join(movie_save_dir, 'frame_%02d_%03d.png' % (i, tt)))
plt.close()
|
|
import numpy as np
import itertools
class BinaryLinearCode():
def __init__(self, G, H):
self.G = G # Generator matrix
self.H = H # Parity Check matrix
self.k = G.shape[0]
self.n = G.shape[1]
self.M = 2 ** self.k # Number of possible messages
self.codewordsList = findCodewords(self)
# --- Create Standard Array Decoder Table
createStArrDecT(self)
# --- Create Syndrome Decoder Table
createSyDecT(self)
# ==================================== Encoder ==================================== #
def encode(self, m):
return np.dot(m, self.G) % 2
# ==================================== Decoders ==================================== #
# --- Standard Array Decoder
def standardArrayDec(self,r):
decimalValue = bin2dec(r,r.size)
r, c = np.where(self.cosets == decimalValue)
return self.codewordsList[c,:], self.possibleMsgs[c,:]
# --- Syndrome Decoder
def syndromeDec(self,r):
# --- Compute syndrome
syndromeBin = np.squeeze(np.dot(self.H,r.T) % 2)
# --- Compute the decimal s
syndromeDec = int(bin2dec(syndromeBin,syndromeBin.size))
# --- Correct Codeword
cHat = (r + self.syndromes[syndromeDec,:]) % 2
# --- Decimal cHat
cHatDec = bin2dec(cHat,cHat.size)
msgHat = self.possibleMsgs[np.squeeze(np.where(self.cosets[0,:] == cHatDec)),:]
return cHat, msgHat
# ==================================== Functions to Create the Tables ==================================== #
def createStArrDecT(self):
# --- Initialize cosets
self.cosets = np.zeros((2 ** (self.n - self.k) , self.M))
# --- Subgroup
for i in range(self.M):
self.cosets[0,i] = bin2dec(self.codewordsList[i,:],self.n)
isIncluded = np.zeros(2 ** self.n)
isIncluded[bin2dec(self.codewordsList,self.n)] = 1
isIncluded[0] = 0
# --- Weight one error patterns
possibleErrors = np.zeros((2 ** self.n,self.n))
possibleErrorsTemp = np.array(list(itertools.product([0, 1], repeat=self.n)))
count = 0
for i in range(self.n + 1):
idx = np.squeeze(np.where(np.sum(possibleErrorsTemp, 1) == i))
possibleErrors[count:count+idx.size,:] = possibleErrorsTemp[idx,:]
count += idx.size
i, new = 0, 0
while True:
if isIncluded[int(bin2dec(possibleErrors[i,:], possibleErrors[i,:].size))] == 1:
i += 1
continue
# -- New coset Leader
cosetLeader = possibleErrors[i,:]
# --- Create Cosets
for j in range(self.M):
# --- Find the new element of the coset
temp = (cosetLeader + self.codewordsList[j,:]) % 2
# --- Compute its decimal representation
decimalVal = bin2dec(temp,temp.size)
# --- Save this value as decimal
self.cosets[new,j] = decimalVal
# --- Mark that it has been processed
isIncluded[int(decimalVal)] = 1
if new == (2 ** (self.n - self.k)) -1:
break
new += 1
i += 1
def createSyDecT(self):
self.syndromes = np.zeros((self.M,self.n))
isIncluded = np.zeros(self.M)
possibleErrors = findPossibleErrors(self)
s = 0
for i in range(2 ** self.n):
syndromeBin = np.dot(self.H,possibleErrors[i,:]) % 2
syndromeDec = int(bin2dec(syndromeBin,syndromeBin.size))
if isIncluded[syndromeDec] == 0:
self.syndromes[syndromeDec,:] = possibleErrors[i,:]
isIncluded[syndromeDec] = 1
s += 1
if s == self.M:
break
def findCodewords(self):
self.possibleMsgs = np.array(list(itertools.product([0, 1], repeat=self.k)))
return np.array(np.dot(self.possibleMsgs, self.G)) % 2
def bin2dec(binaryVector, n):
return np.dot(binaryVector, 2 ** np.arange(n-1,-1,-1))
def findPossibleErrors(self):
possibleErrors = np.zeros((2 ** self.n, self.n))
possibleErrorsTemp = np.array(list(itertools.product([0, 1], repeat=self.n)))
count = 0
for i in range(self.n + 1):
idx = np.squeeze(np.where(np.sum(possibleErrorsTemp, 1) == i))
possibleErrors[count:count + idx.size, :] = possibleErrorsTemp[idx, :]
count += idx.size
return possibleErrors
|
|
# Author: Laura Kulowski
import numpy as np
import matplotlib.pyplot as plt
import torch
def plot_train_test_results(lstm_model, Xtrain, Ytrain, Xtest, Ytest, num_rows = 4):
'''
plot examples of the lstm encoder-decoder evaluated on the training/test data
: param lstm_model: trained lstm encoder-decoder
: param Xtrain: np.array of windowed training input data
: param Ytrain: np.array of windowed training target data
: param Xtest: np.array of windowed test input data
: param Ytest: np.array of windowed test target data
: param num_rows: number of training/test examples to plot
: return: num_rows x 2 plots; first column is training data predictions,
: second column is test data predictions
'''
# input window size
iw = Xtrain.shape[0]
ow = Ytest.shape[0]
# figure setup
num_cols = 2
num_plots = num_rows * num_cols
fig, ax = plt.subplots(num_rows, num_cols, figsize = (13, 15))
# plot training/test predictions
for ii in range(num_rows):
# train set
X_train_plt = Xtrain[:, ii, :]
Y_train_pred = lstm_model.predict(torch.from_numpy(X_train_plt).type(torch.Tensor), target_len = ow)
ax[ii, 0].plot(np.arange(0, iw), Xtrain[:, ii, 0], 'k', linewidth = 2, label = 'Input')
ax[ii, 0].plot(np.arange(iw - 1, iw + ow), np.concatenate([[Xtrain[-1, ii, 0]], Ytrain[:, ii, 0]]),
color = (0.2, 0.42, 0.72), linewidth = 2, label = 'Target')
ax[ii, 0].plot(np.arange(iw - 1, iw + ow), np.concatenate([[Xtrain[-1, ii, 0]], Y_train_pred[:, 0]]),
color = (0.76, 0.01, 0.01), linewidth = 2, label = 'Prediction')
ax[ii, 0].set_xlim([0, iw + ow - 1])
ax[ii, 0].set_xlabel('$t$')
ax[ii, 0].set_ylabel('$y$')
# test set
X_test_plt = Xtest[:, ii, :]
Y_test_pred = lstm_model.predict(torch.from_numpy(X_test_plt).type(torch.Tensor), target_len = ow)
ax[ii, 1].plot(np.arange(0, iw), Xtest[:, ii, 0], 'k', linewidth = 2, label = 'Input')
ax[ii, 1].plot(np.arange(iw - 1, iw + ow), np.concatenate([[Xtest[-1, ii, 0]], Ytest[:, ii, 0]]),
color = (0.2, 0.42, 0.72), linewidth = 2, label = 'Target')
ax[ii, 1].plot(np.arange(iw - 1, iw + ow), np.concatenate([[Xtest[-1, ii, 0]], Y_test_pred[:, 0]]),
color = (0.76, 0.01, 0.01), linewidth = 2, label = 'Prediction')
ax[ii, 1].set_xlim([0, iw + ow - 1])
ax[ii, 1].set_xlabel('$t$')
ax[ii, 1].set_ylabel('$y$')
if ii == 0:
ax[ii, 0].set_title('Train')
ax[ii, 1].legend(bbox_to_anchor=(1, 1))
ax[ii, 1].set_title('Test')
plt.suptitle('LSTM Encoder-Decoder Predictions', x = 0.445, y = 1.)
plt.tight_layout()
plt.subplots_adjust(top = 0.95)
plt.savefig('plots/predictions.png')
plt.close()
return
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
import pkgutil
import unittest
from datetime import timedelta
from unittest import TestCase
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesData
from kats.utils.decomposition import TimeSeriesDecomposition
from kats.utils.simulator import Simulator
def load_data(file_name):
ROOT = "kats"
if "kats" in os.getcwd().lower():
path = "data/"
else:
path = "kats/data/"
data_object = pkgutil.get_data(ROOT, path + file_name)
return pd.read_csv(io.BytesIO(data_object), encoding="utf8")
class DecompositionTest(TestCase):
def setUp(self):
data = load_data("air_passengers.csv")
data.columns = ["time", "y"]
self.ts_data = TimeSeriesData(data)
data_nonstandard_name = data.copy()
data_nonstandard_name.columns = ["ds", "y"]
self.ts_data_nonstandard_name = TimeSeriesData(
df=data_nonstandard_name, time_col_name="ds"
)
daily_data = load_data("peyton_manning.csv")
daily_data.columns = ["time", "y"]
self.ts_data_daily = TimeSeriesData(daily_data)
DATA_multi = load_data("multivariate_anomaly_simulated_data.csv")
self.TSData_multi = TimeSeriesData(DATA_multi)
def test_asserts(self) -> None:
with self.assertRaises(ValueError):
TimeSeriesDecomposition(self.TSData_multi, "additive")
def test_defaults(self) -> None:
m1 = TimeSeriesDecomposition(self.ts_data, "additive")
output1 = m1.decomposer()
m2 = TimeSeriesDecomposition(self.ts_data, "logarithmic")
output2 = m2.decomposer()
self.assertEqual(output1["trend"].value.all(), output2["trend"].value.all())
self.assertEqual(
output1["seasonal"].value.all(), output2["seasonal"].value.all()
)
self.assertEqual(output1["rem"].value.all(), output2["rem"].value.all())
m3 = TimeSeriesDecomposition(self.ts_data, "additive", "STL2")
output3 = m3.decomposer()
self.assertEqual(output1["trend"].value.all(), output3["trend"].value.all())
self.assertEqual(
output1["seasonal"].value.all(), output3["seasonal"].value.all()
)
self.assertEqual(output1["rem"].value.all(), output3["rem"].value.all())
def test_nonstandard_time_col_name(self) -> None:
m = TimeSeriesDecomposition(self.ts_data_nonstandard_name, "multiplicative")
m.decomposer()
self.assertEqual(
# pyre-fixme[16]: `TimeSeriesDecomposition` has no attribute `results`.
m.results["trend"].time_col_name,
self.ts_data_nonstandard_name.time_col_name,
)
self.assertEqual(
m.results["seasonal"].time_col_name,
self.ts_data_nonstandard_name.time_col_name,
)
self.assertEqual(
m.results["rem"].time_col_name, self.ts_data_nonstandard_name.time_col_name
)
def test_decomposition_additive(self) -> None:
m = TimeSeriesDecomposition(self.ts_data, "additive")
output = m.decomposer()
out = pd.merge(
pd.DataFrame.from_dict(
{"time": pd.DatetimeIndex(self.ts_data.time), "y": self.ts_data.value}
),
pd.DataFrame.from_dict(
{
"time": output["trend"].time,
"y": output["trend"].value
+ output["seasonal"].value
+ output["rem"].value,
}
),
how="inner",
on="time",
suffixes=("_actuals", "_decomposed"),
)
self.assertAlmostEqual(
np.mean((out["y_actuals"] - out["y_decomposed"]) ** 2), 0, 5
)
m_seasonal = TimeSeriesDecomposition(
self.ts_data, "additive", "seasonal_decompose"
)
output = m_seasonal.decomposer()
out = pd.merge(
pd.DataFrame.from_dict(
{"time": pd.DatetimeIndex(self.ts_data.time), "y": self.ts_data.value}
),
pd.DataFrame.from_dict(
{
"time": output["trend"].time,
"y": output["trend"].value
+ output["seasonal"].value
+ output["rem"].value,
}
),
how="inner",
on="time",
suffixes=("_actuals", "_decomposed"),
)
self.assertAlmostEqual(
np.mean((out["y_actuals"] - out["y_decomposed"]) ** 2), 0, 5
)
m2 = TimeSeriesDecomposition(self.ts_data_daily, "additive")
output = m2.decomposer()
out2 = pd.merge(
pd.DataFrame.from_dict(
{
"time": pd.DatetimeIndex(self.ts_data_daily.time),
"y": self.ts_data_daily.value,
}
),
pd.DataFrame.from_dict(
{
"time": output["trend"].time,
"y": output["trend"].value
+ output["seasonal"].value
+ output["rem"].value,
}
),
how="inner",
on="time",
suffixes=("_actuals", "_decomposed"),
)
self.assertAlmostEqual(
np.mean((out2["y_actuals"] - out2["y_decomposed"]) ** 2), 0, 5
)
m2_seasonal = TimeSeriesDecomposition(
self.ts_data_daily, "additive", "seasonal_decompose"
)
output = m2_seasonal.decomposer()
out2 = pd.merge(
pd.DataFrame.from_dict(
{
"time": pd.DatetimeIndex(self.ts_data_daily.time),
"y": self.ts_data_daily.value,
}
),
pd.DataFrame.from_dict(
{
"time": output["trend"].time,
"y": output["trend"].value
+ output["seasonal"].value
+ output["rem"].value,
}
),
how="inner",
on="time",
suffixes=("_actuals", "_decomposed"),
)
self.assertAlmostEqual(
np.mean((out2["y_actuals"] - out2["y_decomposed"]) ** 2), 0, 5
)
def test_decomposition_multiplicative(self) -> None:
m = TimeSeriesDecomposition(self.ts_data, "multiplicative")
output = m.decomposer()
out = pd.merge(
pd.DataFrame.from_dict(
{"time": pd.DatetimeIndex(self.ts_data.time), "y": self.ts_data.value}
),
pd.DataFrame.from_dict(
{
"time": output["trend"].time,
"y": output["trend"].value
* output["seasonal"].value
* output["rem"].value,
}
),
how="inner",
on="time",
suffixes=("_actuals", "_decomposed"),
)
self.assertAlmostEqual(
np.mean((out["y_actuals"] - out["y_decomposed"]) ** 2), 0, 5
)
m_seas = TimeSeriesDecomposition(
self.ts_data, "multiplicative", "seasonal_decompose"
)
output = m_seas.decomposer()
out = pd.merge(
pd.DataFrame.from_dict(
{"time": pd.DatetimeIndex(self.ts_data.time), "y": self.ts_data.value}
),
pd.DataFrame.from_dict(
{
"time": output["trend"].time,
"y": output["trend"].value
* output["seasonal"].value
* output["rem"].value,
}
),
how="inner",
on="time",
suffixes=("_actuals", "_decomposed"),
)
self.assertAlmostEqual(
np.mean((out["y_actuals"] - out["y_decomposed"]) ** 2), 0, 5
)
m2 = TimeSeriesDecomposition(self.ts_data_daily, "multiplicative")
output = m2.decomposer()
out2 = pd.merge(
pd.DataFrame.from_dict(
{
"time": pd.DatetimeIndex(self.ts_data_daily.time),
"y": self.ts_data_daily.value,
}
),
pd.DataFrame.from_dict(
{
"time": output["trend"].time,
"y": output["trend"].value
* output["seasonal"].value
* output["rem"].value,
}
),
how="inner",
on="time",
suffixes=("_actuals", "_decomposed"),
)
self.assertAlmostEqual(
np.mean((out2["y_actuals"] - out2["y_decomposed"]) ** 2), 0, 5
)
m2_seas = TimeSeriesDecomposition(
self.ts_data_daily, "multiplicative", "seasonal_decompose"
)
output = m2_seas.decomposer()
out2 = pd.merge(
pd.DataFrame.from_dict(
{
"time": pd.DatetimeIndex(self.ts_data_daily.time),
"y": self.ts_data_daily.value,
}
),
pd.DataFrame.from_dict(
{
"time": output["trend"].time,
"y": output["trend"].value
* output["seasonal"].value
* output["rem"].value,
}
),
how="inner",
on="time",
suffixes=("_actuals", "_decomposed"),
)
self.assertAlmostEqual(
np.mean((out2["y_actuals"] - out2["y_decomposed"]) ** 2), 0, 5
)
def test_plot(self) -> None:
m = TimeSeriesDecomposition(self.ts_data, "multiplicative")
m.decomposer()
m.plot()
def test_multiplicative_assert(self) -> None:
data_new = self.ts_data.to_dataframe().copy()
data_new["y"] = -1.0 * data_new["y"]
ts_data_new = TimeSeriesData(data_new)
print(ts_data_new)
with self.assertLogs(level="ERROR"):
m = TimeSeriesDecomposition(ts_data_new, "multiplicative")
m.decomposer()
def test_new_freq(self) -> None:
DATA_multi = self.TSData_multi.to_dataframe()
df_15_min = DATA_multi[["time", "1"]]
df_15_min["time"] = list(
pd.date_range(end="2020-02-01", periods=df_15_min.shape[0], freq="25T")
)
df_15_min["time"] = df_15_min["time"].astype("str")
df_15_min.columns = ["time", "y"]
df_ts = TimeSeriesData(df_15_min)
m = TimeSeriesDecomposition(df_ts, "additive", method="STL")
m.decomposer()
m2 = TimeSeriesDecomposition(df_ts, "additive", method="seasonal_decompose")
m2.decomposer()
# class KDEResidualTranslatorTest(TestCase):
# def setUp(self) -> None:
# self._y = ts_data
# yhat = pd.DataFrame(
# {"value": self._y.value.rolling(7).mean().shift(1), "time": self._y.time}
# )
# self._yhat = TimeSeriesData(yhat)
# self._residual = self._y - self._yhat
# def test_setup(self) -> None:
# self.assertEquals(self._yhat.value.isnull().sum(), 7)
# def test_illegal_truncated_fracs(self) -> None:
# with self.assertRaises(ValueError):
# KDEResidualTranslator(-0.1, 0.9)
# with self.assertRaises(ValueError):
# KDEResidualTranslator(1.1, 2.0)
# with self.assertRaises(ValueError):
# KDEResidualTranslator(0.1, -0.9)
# with self.assertRaises(ValueError):
# KDEResidualTranslator(0.1, 1.9)
# with self.assertRaises(ValueError):
# KDEResidualTranslator(0.9, 0.8)
# def test_y_yhat(self) -> None:
# trn = KDEResidualTranslator()
# trn = trn.fit(y=self._y, yhat=self._yhat)
# self._test_residual_trn(trn)
# def _test_residual(self) -> None:
# trn = KDEResidualTranslator()
# for name in self._series_names:
# dataset = self._get_dataset_for_name(name)[["y", "yhat"]]
# dataset["residual"] = dataset.yhat - dataset.y
# dataset.drop(["y", "yhat"], axis=1, inplace=True)
# trn = trn.fit(dataset)
# self._test_residual_trn(trn)
# def _test_residual_trn(self, trn: KDEResidualTranslator) -> None:
# np.testing.assert_allclose(
# np.exp(trn.predict_log_proba(residual=self._residual).value),
# trn.predict_proba(residual=self._residual).value,
# )
# proba = trn.predict_proba(residual=self._residual)
# self.assertTrue(np.all((proba.value >= 0) & (proba.value <= 1)))
# ks = ks_2samp(
# trn.kde_.sample(len(self._residual)).flatten(), self._residual.value
# )
# self.assertTrue(ks.statistic < 0.1 or ks.pvalue >= 0.2)
class SimulatorTest(TestCase):
def test_arima_sim(self) -> None:
sim = Simulator(n=10, freq="MS", start=pd.to_datetime("2011-01-01 00:00:00"))
np.random.seed(100)
ts = sim.arima_sim(ar=[0.1, 0.05], ma=[0.04, 0.1], d=1)
expected_value = pd.Series(
[
0.797342,
1.494317,
1.608064,
1.186103,
2.147635,
1.772615,
0.750320,
2.159774,
3.744138,
3.944730,
]
)
self.assertEqual(True, (ts.value - expected_value).all())
self.assertEqual(len(ts.time), 10)
def test_stl_sim_additive(self) -> None:
# Create a STL-based simulated object
sim = Simulator(n=100, freq="1D", start=pd.to_datetime("2011-01-01"))
np.random.seed(614)
sim.add_trend(magnitude=10)
sim.add_seasonality(5, period=timedelta(days=7))
sim.add_noise(magnitude=2)
sim_ts = sim.stl_sim()
# Compare the obtained simulated time series to
# the original simulated data
generator1 = Simulator(n=100, freq="D", start="2011-01-01")
generator1.add_trend(magnitude=10)
np.random.seed(614)
generator1.add_seasonality(magnitude=5, period=timedelta(days=7))
generator1.add_noise(magnitude=2)
gen_ts_series = generator1.stl_sim()
# pyre-fixme[16]: `bool` has no attribute `all`.
self.assertEqual(True, (gen_ts_series.value == sim_ts.value).all())
self.assertEqual(True, (gen_ts_series.time == sim_ts.time).all())
def test_stl_sim_multiplicative(self) -> None:
# Create a STL-based simulated object
sim = Simulator(n=100, freq="1D", start=pd.to_datetime("2011-01-01"))
np.random.seed(614)
sim.add_trend(magnitude=5, multiply=True)
sim.add_seasonality(10, period=timedelta(days=14))
sim.add_noise(magnitude=1, multiply=True)
sim_ts = sim.stl_sim()
# Compare the obtained simulated time series to
# the original simulated data
generator2 = Simulator(n=100, freq="D", start="2011-01-01")
generator2.add_trend(magnitude=5, multiply=True)
np.random.seed(614)
generator2.add_seasonality(magnitude=10, period=timedelta(days=14))
generator2.add_noise(magnitude=1, multiply=True)
gen_ts_series = generator2.stl_sim()
# pyre-fixme[16]: `bool` has no attribute `all`.
self.assertEqual(True, (gen_ts_series.value == sim_ts.value).all())
self.assertEqual(True, (gen_ts_series.time == sim_ts.time).all())
def test_level_shift(self) -> None:
sim = Simulator(n=450, start="2018-01-01")
ts = sim.level_shift_sim()
self.assertEqual(len(ts), 450)
sim2 = Simulator(n=450, start="2018-01-01")
ts2 = sim2.level_shift_sim(
cp_arr=[100, 200],
level_arr=[3, 20, 2],
noise=3,
seasonal_period=7,
seasonal_magnitude=3,
anomaly_arr=[50, 150, 250],
z_score_arr=[10, -10, 20],
)
self.assertEqual(len(ts2), 450)
sim3 = Simulator(n=450, start="2018-01-01")
ts3 = sim3.level_shift_sim(
cp_arr=[],
level_arr=[3],
noise=3,
seasonal_period=7,
seasonal_magnitude=3,
anomaly_arr=[50, 150, 250],
z_score_arr=[10, -10, 20],
)
self.assertEqual(len(ts3), 450)
def test_level_shift_mvn_indep(self) -> None:
sim = Simulator(n=450, start="2018-01-01")
ts = sim.level_shift_multivariate_indep_sim()
self.assertEqual(len(ts), 450)
ts_df = ts.to_dataframe()
self.assertEqual(ts_df.shape[1], 4) # time + n_dim
sim2 = Simulator(n=450, start="2018-01-01")
ts2 = sim2.level_shift_multivariate_indep_sim(
cp_arr=[100, 200],
level_arr=[3, 20, 2],
noise=3,
seasonal_period=7,
seasonal_magnitude=3,
)
self.assertEqual(len(ts2), 450)
ts2_df = ts2.to_dataframe()
self.assertEqual(ts2_df.shape[1], 4) # time + n_dim
def test_trend_shift(self) -> None:
sim = Simulator(n=450, start="2018-01-01")
ts = sim.trend_shift_sim()
self.assertEqual(len(ts), 450)
sim2 = Simulator(n=450, start="2018-01-01")
ts2 = sim2.trend_shift_sim(
cp_arr=[100, 200],
trend_arr=[3, 20, 2],
intercept=30,
noise=30,
seasonal_period=7,
seasonal_magnitude=3,
anomaly_arr=[50, 150, 250],
z_score_arr=[10, -10, 20],
)
self.assertEqual(len(ts2), 450)
sim3 = Simulator(n=450, start="2018-01-01")
ts3 = sim3.trend_shift_sim(
cp_arr=[],
trend_arr=[3],
intercept=30,
noise=30,
seasonal_period=7,
seasonal_magnitude=3,
anomaly_arr=[50, 150, 250],
z_score_arr=[10, -10, 20],
)
self.assertEqual(len(ts3), 450)
if __name__ == "__main__":
unittest.main()
|
|
"""
bin modis data into regular latitude and longitude bins
"""
import numpy as np
def reproj_L1B(raw_data, raw_x, raw_y, xlim, ylim, res):
'''
=========================================================================================
Reproject MODIS L1B file to a regular grid
-----------------------------------------------------------------------------------------
d_array, x_array, y_array, bin_count = reproj_L1B(raw_data, raw_x, raw_y, xlim, ylim, res)
-----------------------------------------------------------------------------------------
Input:
raw_data: L1B data, N*M 2-D array.
raw_x: longitude info. N*M 2-D array.
raw_y: latitude info. N*M 2-D array.
xlim: range of longitude, a list.
ylim: range of latitude, a list.
res: resolution, single value.
Output:
d_array: L1B reprojected data.
x_array: reprojected longitude.
y_array: reprojected latitude.
bin_count: how many raw data point included in a reprojected grid.
Note:
function do not performs well if "res" is larger than the resolution of input data.
size of "raw_data", "raw_x", "raw_y" must agree.
=========================================================================================
'''
import numpy as np
x_bins=np.arange(xlim[0], xlim[1], res)
y_bins=np.arange(ylim[0], ylim[1], res)
x_indices=np.searchsorted(x_bins, raw_x.flat, 'right')
y_indices=np.searchsorted(y_bins, raw_y.flat, 'right')
y_array=np.zeros([len(y_bins), len(x_bins)], dtype=np.float)
x_array=np.zeros([len(y_bins), len(x_bins)], dtype=np.float)
d_array=np.zeros([len(y_bins), len(x_bins)], dtype=np.float)
bin_count=np.zeros([len(y_bins), len(x_bins)], dtype=np.int)
for n in range(len(y_indices)): #indices
bin_row=y_indices[n]-1 # '-1' is because we call 'right' in np.searchsorted.
bin_col=x_indices[n]-1
bin_count[bin_row, bin_col] += 1
x_array[bin_row, bin_col] += raw_x.flat[n]
y_array[bin_row, bin_col] += raw_y.flat[n]
d_array[bin_row, bin_col] += raw_data.flat[n]
for i in range(x_array.shape[0]):
for j in range(x_array.shape[1]):
if bin_count[i, j] > 0:
x_array[i, j]=x_array[i, j]/bin_count[i, j]
y_array[i, j]=y_array[i, j]/bin_count[i, j]
d_array[i, j]=d_array[i, j]/bin_count[i, j]
else:
d_array[i, j]=np.nan
x_array[i, j]=np.nan
y_array[i,j]=np.nan
return d_array, x_array, y_array, bin_count
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import numpy as np
import pandas as pd
import scanpy.api as sc
import sys
import wot.io
def main(argv):
parser = argparse.ArgumentParser(description='Compute neighborhood graph')
parser.add_argument('--matrix', help=wot.commands.MATRIX_HELP, required=True)
parser.add_argument('--gene_filter',
help='File with one gene id per line to include from the matrix')
parser.add_argument('--transpose', help='Transpose the matrix', action='store_true')
parser.add_argument('--comps_diff', help='Number of diffusion components. Set to 0 to disable', type=int,
default=100)
parser.add_argument('--neighbors_diff', help='Number of nearest neighbors to use in diffusion component space',
type=int, default=20)
parser.add_argument('--comps_pca', help='Number of PCA components. Set to 0 to disable', type=int, default=50)
parser.add_argument('--neighbors_pca', help='Number of nearest neighbors to use in PCA space', type=int,
default=15)
parser.add_argument('--neighbors',
help='Number of nearest neighbors to use to construct the nearest neighbor graph using the input matrix directly. ',
type=int)
parser.add_argument('--out',
help='Output file name. The file is saved in gexf format (https://gephi.org/gexf/format/)')
args = parser.parse_args(argv)
if args.out is None:
args.out = 'wot'
comps_diff = args.comps_diff
neighbors_diff = args.neighbors_diff
neighbors_pca = args.neighbors_pca
comps_pca = args.comps_pca
do_neighbors = args.neighbors is not None and args.neighbors > 0
do_pca = neighbors_pca > 0 and comps_pca > 0
do_dmap = comps_diff > 0 and neighbors_diff > 0
if (do_pca or do_dmap) and do_neighbors:
print('neighbors flag is mutually exclusive with diffusion map and PCA')
sys.exit(1)
adata = wot.io.read_dataset(args.matrix)
if args.transpose:
adata = adata.T
if args.gene_filter is not None:
if os.path.isfile(args.gene_filter):
gene_ids = pd.read_csv(args.gene_filter, index_col=0, header=None) \
.index.values
else:
import re
expr = re.compile(args.gene_filter)
gene_ids = [e for e in adata.var.index.values if expr.match(e)]
col_indices = adata.var.index.isin(gene_ids)
if np.sum(col_indices) is 0:
raise ValueError('No genes passed the gene filter')
adata = adata[:, col_indices]
if do_pca:
sc.tl.pca(adata)
sc.pp.neighbors(adata, use_rep='X_pca', n_neighbors=neighbors_pca, n_pcs=comps_pca)
if do_dmap:
sc.tl.diffmap(adata, n_comps=comps_diff)
sc.pp.neighbors(adata, use_rep='X_diffmap', n_neighbors=neighbors_diff)
if do_neighbors:
sc.pp.neighbors(adata, use_rep='X', n_neighbors=args.neighbors)
W = adata.uns['neighbors']['connectivities']
n_obs = W.shape[0]
obs_ids = adata.obs.index.values
with open(args.out + '.gexf', 'w') as writer:
writer.write('<?xml version="1.0" encoding="UTF-8"?>')
writer.write(
'<gexf xmlns="http://www.gexf.net/1.2draft" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/gexf.xsd" version="1.2">')
writer.write('<graph defaultedgetype="undirected">')
writer.write('<nodes>')
for j in range(n_obs):
writer.write('<node id="{id}" label="{id}" />'.format(id=obs_ids[j]))
writer.write('</nodes>')
writer.write('<edges>')
rows, cols = W.nonzero()
edge_counter = 1
for i, j in zip(rows, cols):
if i < j:
writer.write(
'"<edge id="{id}" source="{s}" target="{t}" weight="{w}" />'.format(id=edge_counter, s=obs_ids[i],
t=obs_ids[j], w=W[i, j]))
edge_counter = edge_counter + 1
writer.write('</edges>')
writer.write('</graph>')
writer.write('</gexf>')
# <?xml version="1.0" encoding="UTF-8"?>
# <gexf xmlns="http://www.gexf.net/1.2draft" version="1.2">
# <meta lastmodifieddate="2009-03-20">
# <creator>Gexf.net</creator>
# <description>A hello world! file</description>
# </meta>
# <graph mode="static" defaultedgetype="directed">
# <nodes>
# <node id="0" label="Hello" />
# <node id="1" label="Word" />
# </nodes>
# <edges>
# <edge id="0" source="0" target="1" />
# </edges>
# </graph>
# </gexf>
|
|
# coding: utf-8
import numpy as np
from PIL import Image
import scipy.io as sio
import os
import cv2
import time
import math
import os
os.environ['GLOG_minloglevel'] = '2'
# Make sure that caffe is on the python path:
caffe_root = '../../'
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
from caffe import layers as L
print("import caffe success")
def out_h(x):
x_list = []
x_list.append(x)
x = math.floor((x - 3) / 2.0 + 1)
x_list.append(x)
x = math.floor((x - 1) / 2.0 + 1)
x_list.append(x)
x = math.floor((x - 1) / 2.0 + 1)
x_list.append(x)
x = math.floor((x - 1) / 2.0 + 1)
x_list.append(x)
# print(x_list)
return x
def net_deploy(deploy_prototxt, checkpoint, input_hw, re_hw):
from model.dcnet import dcnet
n = caffe.NetSpec()
n.data = L.Input(shape=[dict(dim=[1, 3, input_hw[0], input_hw[1]])])
dcnet(n, is_train=False, re_hw1=re_hw)
n.sigmoid_edge = L.Sigmoid(n.unet1b_edge)
# n.sigmoid_edge = L.Sigmoid(n.edge_p2)
with open(deploy_prototxt, 'w') as f:
f.write(str(n.to_proto())) ## write network
net = caffe.Net(deploy_prototxt,
checkpoint,
caffe.TEST)
return net
## should change the [model path] + [save_path] + [import module]
data_root = '../../data/PIOD/Augmentation/'
# data_root = '../../data/BSDSownership/Augmentation/'
save_root = 'Output/dcnet/'
checkpoint = 'snapshot/dcnet_piod_iter_30000.caffemodel'
save_root = os.path.join(save_root, 'PIOD')
# save_root = os.path.join(save_root, 'BSDSownership')
if not os.path.exists(save_root):
os.mkdir(save_root)
deploy_prototxt = 'dcnet_eval.prototxt'
# load net
caffe.set_mode_gpu()
# caffe.set_device(1)
with open(data_root + 'test.lst') as f:
test_lst = f.readlines()
test_lst = [x.strip() for x in test_lst]
im_lst = []
gt_lst = []
for i in range(0, len(test_lst)):
im = Image.open(test_lst[i])
in_ = np.array(im, dtype=np.float32)
in_ = in_[:, :, ::-1]
in_ -= np.array((104.00698793, 116.66876762, 122.67891434))
im_lst.append(in_)
time_total = 0
ts = time.time()
for idx in range(0, len(test_lst)):
print(idx)
im_ = im_lst[idx]
im_ = im_.transpose((2, 0, 1))
# print(im_.shape)
re_h, re_w = out_h(im_.shape[1]), out_h(im_.shape[2])
re_hw = [int(re_h), int(re_w)]
print(im_.shape, re_hw)
net = net_deploy(deploy_prototxt, checkpoint, input_hw=[im_.shape[1],im_.shape[2]], re_hw=re_hw)
# shape for input (data blob is N x C x H x W), set data
net.blobs['data'].reshape(1, *im_.shape)
net.blobs['data'].data[...] = im_
# run net and take argmax for prediction
start_time = time.time()
net.forward()
diff_time = time.time() - start_time
time_total += diff_time
edgemap = net.blobs['sigmoid_edge'].data[0][0, :, :]
orimap = net.blobs['unet1b_ori'].data[0][0, :, :]
# print edgemap
edge_ori = {}
edge_ori['edge'] = edgemap
edge_ori['ori'] = orimap
# plt.imshow(edgemap)
# plt.show()
cv2.imwrite(save_root + '/' + os.path.split(test_lst[idx])[1].split('.')[0] + '.png', edgemap * 255)
sio.savemat(save_root + '/' + os.path.split(test_lst[idx])[1].split('.')[0] + '.mat', {'edge_ori': edge_ori})
tf = time.time()
print('total time: {}s'.format(tf-ts))
print('Detection took {:.3f}s per image'.format(time_total / len(test_lst)))
|
|
from amuse.couple import bridge
from amuse.community.bhtree.interface import BHTree
from amuse.community.hermite0.interface import Hermite
from amuse.community.fi.interface import Fi
from amuse.community.octgrav.interface import Octgrav
from amuse.community.gadget2.interface import Gadget2
from amuse.community.phiGRAPE.interface import PhiGRAPE
from amuse.ic import plummer
from amuse.ic import gasplummer
from amuse.units import units
from amuse.units import constants
from amuse.units import quantities
from amuse.units import nbody_system
from optparse import OptionParser
import numpy
import time
try:
import pylab
except ImportError:
pylab = None
class GasPlummerModelExternalField(object):
"""
skeleton grav code for use in bridge.
must have get_gravity_at_point and get_potential_at_point
"""
def __init__(self, position = [0,0,0] | units.parsec, radius=1000.| units.parsec, total_mass=1.6e10 | units.MSun):
self.radius = radius
self.total_mass = total_mass
self.gravity_constant = constants.G
self.position = position
self.radius_squared = (self.radius**2)
def get_gravity_at_point(self,eps,x,y,z):
dx = x - self.position.x
dy = y - self.position.y
dz = z - self.position.z
radii_squared=dx**2 + dy**2 + dz**2
#radii = radii_squared**0.5
plummer_radii_squared = radii_squared + self.radius_squared
plummer_radii_15 = plummer_radii_squared ** 1.5
fr=-self.gravity_constant*self.total_mass/plummer_radii_15
ax=fr*dx
ay=fr*dy
az=fr*dz
return ax,ay,az
def get_potential_at_point(self,eps,x,y,z):
dx = x - self.position.x
dy = y - self.position.y
dz = z - self.position.z
radii_squared=dx**2 + dy**2 + dz**2
#radii = radii_squared**0.5
plummer_radii = (radii_squared + self.radius_squared)**0.5
phi=self.gravity_constant*self.total_mass/plummer_radii
return -phi * 2
def stop(self):
pass
@property
def kinetic_energy(self):
return quantities.zero
@property
def potential_energy(self):
return quantities.zero
@property
def thermal_energy(self):
return quantities.zero
class AbstractStarAndGasPlummerCode(object):
def __init__(self,
nstars = 10,
ngas = -1,
endtime = 10,
total_mass = 1000,
gas_fraction = 0.9,
rscale = 1.0,
star_smoothing_fraction = 0.001,
gas_smoothing_fraction = 0.05,
seed = -1,
ntimesteps = 10,
must_do_plot = True
):
if seed >= 0:
numpy.random.seed(seed)
if ngas < 0:
ngas = nstars * 10
self.must_do_plot = must_do_plot
self.line = None
self.line2 = None
self.ntimesteps = ntimesteps
self.ngas = ngas
self.nstars = nstars
self.total_mass = total_mass | units.MSun
self.gas_fraction = gas_fraction
self.star_fraction = 1.0 - self.gas_fraction
self.rscale = rscale | units.parsec
self.star_epsilon = star_smoothing_fraction * self.rscale
self.gas_epsilon = gas_smoothing_fraction * self.rscale
self.star_mass = self.star_fraction * self.total_mass
self.gas_mass = self.gas_fraction * self.total_mass
self.converter = nbody_system.nbody_to_si(self.total_mass, self.rscale)
self.endtime = self.converter.to_si(endtime | nbody_system.time)
self.delta_t = self.endtime / self.ntimesteps
def update_plot(self, time, code):
time = self.converter.to_nbody(time).value_in(nbody_system.time),
sum_energy = code.kinetic_energy + code.potential_energy + code.thermal_energy
energy = self.converter.to_nbody(sum_energy).value_in(nbody_system.energy)
coreradius = self.star_code.particles.virial_radius().value_in(self.rscale.to_unit())
#kicke = self.converter.to_nbody(code.kick_energy).value_in(nbody_system.energy)
if self.line is None:
pylab.ion()
pylab.subplot(1,2,1)
self.line = pylab.plot([time], [energy])[0]
pylab.xlim(0, self.converter.to_nbody(self.endtime).value_in(nbody_system.time))
pylab.ylim(energy * 0.8, energy * 1.2)
pylab.subplot(1,2,2)
self.line2 = pylab.plot([time], [coreradius])[0]
#self.line2 = pylab.plot([time], [kicke])[0]
pylab.xlim(0, self.converter.to_nbody(self.endtime).value_in(nbody_system.time))
pylab.ylim(0,3)
#pylab.ylim(-0.1, 0.1)
else:
xdata = self.line.get_xdata()
ydata = self.line.get_ydata()
xdata = numpy.concatenate( (xdata, time) )
ydata = numpy.concatenate( (ydata, [energy]) )
self.line.set_xdata(xdata)
self.line.set_ydata(ydata)
xdata = self.line2.get_xdata()
ydata = self.line2.get_ydata()
xdata = numpy.concatenate( (xdata, time) )
#ydata = numpy.concatenate( (ydata, [kicke]) )
ydata = numpy.concatenate( (ydata, [coreradius]) )
self.line2.set_xdata(xdata)
self.line2.set_ydata(ydata)
pylab.draw()
def new_particles_cluster(self):
particles=plummer.new_plummer_model(self.nstars,convert_nbody=self.converter)
particles.radius= self.star_epsilon
particles.mass = (1.0/self.nstars) * self.star_mass
return particles
def new_gas_cluster(self):
particles=gasplummer.new_plummer_gas_model(self.ngas,convert_nbody=self.converter)
particles.h_smooth= self.gas_epsilon
particles.mass = (1.0/self.ngas) * self.gas_mass
return particles
def new_particles_cluster_as_gas(self):
particles=plummer.new_plummer_model(self.ngas,convert_nbody=self.converter)
particles.radius= self.gas_epsilon
particles.mass = (1.0/self.ngas) * self.gas_mass
return particles
def stop(self):
pass
def evolve_model(self):
if self.must_do_plot:
self.update_plot(time = 0 * self.delta_t, code = self.code)
for time in self.delta_t * range(1,self.ntimesteps+1):
self.code.evolve_model(time)
print self.converter.to_nbody(self.code.time)
if self.must_do_plot:
self.update_plot(time = self.code.time, code = self.code)
class BridgeStarAndGasPlummerCode(AbstractStarAndGasPlummerCode):
def __init__(self,
nstars = 10,
ngas = -1,
endtime = 10,
total_mass = 1000,
gas_fraction = 0.9,
rscale = 1.0,
star_code = 'hermite',
gas_code = 'field',
star_smoothing_fraction = 0.001,
gas_smoothing_fraction = 0.05,
seed = -1,
ntimesteps = 10,
interaction_timestep = 0.01,
must_do_plot = True,
gas_to_star_interaction_code = 'none',
star_to_gas_interaction_code = 'none',
**ignored_options
):
AbstractStarAndGasPlummerCode.__init__(
self,
nstars,
ngas,
endtime,
total_mass,
gas_fraction,
rscale,
star_smoothing_fraction,
gas_smoothing_fraction,
seed,
ntimesteps,
must_do_plot
)
self.interaction_timestep = self.converter.to_si(interaction_timestep| nbody_system.time)
self.create_codes(
gas_code,
star_code,
gas_to_star_interaction_code,
star_to_gas_interaction_code,
)
self.create_bridge()
self.code = self.bridge_system
time = 0
sum_energy = self.code.kinetic_energy + self.code.potential_energy + self.code.thermal_energy
energy = self.converter.to_nbody(sum_energy).value_in(nbody_system.energy)
coreradius = self.star_code.particles.virial_radius().value_in(self.rscale.to_unit())
print "Time :", time
print "Energy :", energy
print "Virial radius :", coreradius
self.evolve_model()
if must_do_plot:
pylab.show()
pylab.savefig(
"{0}-{1}-{2}-{3}.png".format(
star_code,
gas_code,
nstars,
ngas
)
)
time = self.converter.to_nbody(self.code.time).value_in(nbody_system.time)
sum_energy = self.code.kinetic_energy + self.code.potential_energy + self.code.thermal_energy
energy = self.converter.to_nbody(sum_energy).value_in(nbody_system.energy)
coreradius = self.star_code.particles.virial_radius().value_in(self.rscale.to_unit())
print "Time :", time
print "Energy :", energy
print "Virial radius :", coreradius
self.stop()
if must_do_plot:
raw_input('Press enter...')
def create_codes(self, gas_code, star_code, gas_to_star_interaction_code, star_to_gas_interaction_code):
self.star_code = getattr(self,'new_star_code_'+star_code)()
self.gas_code = getattr(self, 'new_gas_code_'+gas_code)()
self.gas_to_star_codes = getattr(self, 'new_gas_to_star_interaction_codes_'+gas_to_star_interaction_code)(self.gas_code)
self.star_to_gas_codes = getattr(self, 'new_star_to_gas_interaction_codes_'+star_to_gas_interaction_code)(self.star_code)
def create_bridge(self):
bridge_code1 = bridge.GravityCodeInField(
self.gas_code, self.star_to_gas_codes
)
bridge_code2 = bridge.GravityCodeInField(
self.star_code, self.gas_to_star_codes
)
self.bridge_system = bridge.Bridge(
timestep = self.interaction_timestep,
use_threading = False
)
self.bridge_system.add_code(bridge_code2)
self.bridge_system.add_code(bridge_code1)
def stop(self):
self.star_code.stop()
self.gas_code.stop()
def new_gas_to_star_interaction_codes_self(self, gas_code):
return [gas_code]
def new_star_to_gas_interaction_codes_self(self, star_code):
return [star_code]
def new_gas_to_star_interaction_codes_none(self, gas_code):
return []
def new_star_to_gas_interaction_codes_none(self, gas_code):
return []
def new_gas_to_star_interaction_codes_octgrav(self, gas_code):
def new_octgrav():
result = Octgrav(self.converter)
result.parameters.epsilon_squared = self.gas_epsilon ** 2
return result
return [bridge.CalculateFieldForCodes(new_octgrav, [gas_code])]
def new_gas_to_star_interaction_codes_bhtree(self, gas_code):
def new_bhtree():
result = BHTree(self.converter)
result.parameters.epsilon_squared = self.star_epsilon ** 2
return result
return [bridge.CalculateFieldForCodes(new_bhtree, [gas_code])]\
def new_gas_code_fi(self):
result = Fi(self.converter)
result.parameters.self_gravity_flag = True
result.parameters.use_hydro_flag = True
result.parameters.radiation_flag = False
result.parameters.periodic_box_size = 500 | units.parsec
result.parameters.timestep = 0.125 * self.interaction_timestep
#result.parameters.adaptive_smoothing_flag = True
#result.parameters.epsilon_squared = self.gas_epsilon ** 2
#result.parameters.eps_is_h_flag = False
result.parameters.integrate_entropy_flag = False
#result.parameters.self_gravity_flag = False
result.gas_particles.add_particles(self.new_gas_cluster())
result.commit_particles()
return result
def new_star_code_fi(self):
result = Fi(self.converter)
result.parameters.self_gravity_flag = True
result.parameters.use_hydro_flag = False
result.parameters.radiation_flag = False
result.parameters.periodic_box_size = 500 | units.parsec
result.parameters.timestep = 0.125 * self.interaction_timestep
result.particles.add_particles(self.new_particles_cluster())
result.commit_particles()
return result
def new_gas_code_gadget(self):
result = Gadget2(self.converter)
result.gas_particles.add_particles(self.new_gas_cluster())
result.commit_particles()
return result
def new_gas_code_field(self):
result = GasPlummerModelExternalField(
radius = self.rscale,
total_mass = self.gas_mass
)
return result
def new_gas_code_hermite(self):
result = Hermite(self.converter)
result.parameters.epsilon_squared = self.star_epsilon ** 2
result.particles.add_particles(self.new_particles_cluster_as_gas())
result.commit_particles()
return result
def new_star_code_hermite(self):
result = Hermite(self.converter)
result.parameters.epsilon_squared = self.star_epsilon ** 2
result.particles.add_particles(self.new_particles_cluster())
result.commit_particles()
return result
def new_star_code_phigrape(self):
result = PhiGRAPE(self.converter, mode="gpu")
result.parameters.initialize_gpu_once = 1
result.parameters.epsilon_squared = self.star_epsilon ** 2
result.particles.add_particles(self.new_particles_cluster())
result.commit_particles()
return result
def new_star_code_bhtree(self):
result = BHTree(self.converter)
result.parameters.epsilon_squared = self.star_epsilon ** 2
result.parameters.timestep = 0.125 * self.interaction_timestep
result.particles.add_particles(self.new_particles_cluster())
result.commit_particles()
return result
def new_star_code_octgrav(self):
result = Octgrav(self.converter)
result.parameters.epsilon_squared = self.star_epsilon ** 2
result.parameters.timestep = 0.125 * self.interaction_timestep
result.particles.add_particles(self.new_particles_cluster())
result.commit_particles()
return result
def new_gas_code_bhtree(self):
result = BHTree(self.converter)
result.parameters.epsilon_squared = self.gas_epsilon ** 2
result.parameters.timestep = 0.125 * self.interaction_timestep
result.particles.add_particles(self.new_particles_cluster_as_gas())
result.commit_particles()
return result
class AllInOneStarAndGasPlummerCode(AbstractStarAndGasPlummerCode):
def __init__(self,
nstars = 10,
ngas = -1,
endtime = 10,
total_mass = 1000,
gas_fraction = 0.9,
rscale = 1.0,
sph_code = 'fi',
star_smoothing_fraction = 0.001,
gas_smoothing_fraction = 0.05,
seed = -1,
ntimesteps = 10,
must_do_plot = True,
interaction_timestep = 0.01,
**ignored_options
):
AbstractStarAndGasPlummerCode.__init__(
self,
nstars,
ngas,
endtime,
total_mass,
gas_fraction,
rscale,
star_smoothing_fraction,
gas_smoothing_fraction,
seed,
ntimesteps,
must_do_plot
)
self.interaction_timestep = self.converter.to_si(interaction_timestep| nbody_system.time)
self.create_code(sph_code)
sum_energy = self.code.kinetic_energy + self.code.potential_energy + self.code.thermal_energy
energy = self.converter.to_nbody(sum_energy).value_in(nbody_system.energy)
coreradius = self.code.dm_particles.virial_radius().value_in(self.rscale.to_unit())
print "Time:", 0
print "Energy:", energy
print "Virial radius:", coreradius
self.evolve_model()
if must_do_plot:
pylab.show()
pylab.savefig(
"{0}-{1}-{2}.png".format(
sph_code,
nstars,
ngas
)
)
time = self.converter.to_nbody(self.code.model_time).value_in(nbody_system.time)
sum_energy = self.code.kinetic_energy + self.code.potential_energy + self.code.thermal_energy
energy = self.converter.to_nbody(sum_energy).value_in(nbody_system.energy)
coreradius = self.code.dm_particles.virial_radius().value_in(self.rscale.to_unit())
print "Time:", time
print "Energy:", energy
print "Virial radius:", coreradius
self.stop()
if must_do_plot:
raw_input('Press enter...')
def evolve_model(self):
if self.must_do_plot:
self.update_plot(time = 0 * self.delta_t, code = self.code)
for time in self.delta_t * range(1,self.ntimesteps+1):
self.code.evolve_model(time)
print self.converter.to_nbody(self.code.model_time)
if self.must_do_plot:
self.update_plot(time = self.code.time, code = self.code)
def create_code(self, name):
self.code = getattr(self, 'new_sph_code_'+name)()
def stop(self):
self.code.stop()
def new_sph_code_fi(self):
result = Fi(self.converter)
result.parameters.self_gravity_flag = True
result.parameters.use_hydro_flag = True
result.parameters.radiation_flag = False
result.parameters.periodic_box_size = 500 | units.parsec
result.parameters.timestep = 0.125 * self.interaction_timestep
#result.parameters.adaptive_smoothing_flag = True
#result.parameters.epsilon_squared = self.gas_epsilon ** 2
#result.parameters.eps_is_h_flag = False
result.parameters.integrate_entropy_flag = False
result.dm_particles.add_particles(self.new_particles_cluster())
result.gas_particles.add_particles(self.new_gas_cluster())
result.commit_particles()
return result
def new_sph_code_gadget(self):
result = Gadget2(self.converter)
result.dm_particles.add_particles(self.new_particles_cluster())
result.gas_particles.add_particles(self.new_gas_cluster())
result.commit_particles()
return result
def new_option_parser():
result = OptionParser()
result.add_option(
"-n", "--nstar",
default = 10,
dest="nstars",
help="number of star particles",
type="int"
)
result.add_option(
"-g", "--ngas",
default = -1,
dest="ngas",
help="number of gas particles (if -1, 10 times the number of stars)",
type="int"
)
result.add_option(
"--gas-code",
default = "field",
dest="gas_code",
help="the code modelling the gas ('fi', 'gadget', 'field')",
type="string"
)
result.add_option(
"--star-code",
default = "hermite",
dest="star_code",
help="the code modelling the particles ('hermite', 'bhtree', 'octgrav', 'phigrape')",
type="string"
)
result.add_option(
"--sph-code",
default = "fi",
dest="sph_code",
help="the code modelling the particles and the gas simultaniously",
type="string"
)
result.add_option(
"--gas-star-code",
default = "self",
dest="gas_to_star_interaction_code",
help="the code calculating the gravity field of the gas code for the star code (default is self, gas code will calculate field for star code)",
type="string"
)
result.add_option(
"--star-gas-code",
default = "self",
dest="star_to_gas_interaction_code",
help="the code calculating the gravity field of the star code for the gas code (default is self, star code will calculate field for gas code)",
type="string"
)
result.add_option(
"-m", "--total-mass",
default = 1000.0,
dest="total_mass",
help="the total mass in solar masses",
type="float"
)
result.add_option(
"--gas-fraction",
default = 0.9,
dest="gas_fraction",
help="the gas fraction between 0.0 and 1.0 (default 0.9)",
type="float"
)
result.add_option(
"-r", "--rscale",
default = 1.0,
dest="rscale",
help="length scale of the problem in parsec (default 1) ",
type="float"
)
result.add_option(
"--star_smoothing_fraction",
default = 0.001,
dest="star_smoothing_fraction",
help="smoothing length of the stars as a fraction of the length scale",
type="float"
)
result.add_option(
"--gas_smoothing_fraction",
default = 0.05,
dest="gas_smoothing_fraction",
help="smoothing length of the gas particles as a fraction of the length scale",
type="float"
)
result.add_option(
"-s", "--seed",
default = 0,
dest="seed",
help="random number seed (-1, no seed)",
type="int"
)
result.add_option(
"--interaction-timestep",
default = 0.01,
dest="interaction_timestep",
help="time between bridge interactions (0.01 nbody time)",
type="float"
)
result.add_option(
"-t", "--end-time",
default = 1,
dest="endtime",
help="end time of the simulation (in nbody time, default 1)",
type="float"
)
result.add_option(
"--ntimesteps",
default = 10,
dest="ntimesteps",
help="number of times to do reporting",
type="int"
)
result.add_option(
"--noplot",
dest="must_do_plot",
default = True,
help="do not show a plot and end as soon as possible",
action="store_false"
)
result.add_option(
"--allinoone",
dest="must_do_bridge",
default = True,
help="simulate the stars and gas with one sph code",
action="store_false"
)
return result
if __name__ == "__main__":
options, arguments = new_option_parser().parse_args()
if options.must_do_bridge:
options = options.__dict__
BridgeStarAndGasPlummerCode(**options)
else:
options = options.__dict__
AllInOneStarAndGasPlummerCode(**options)
|
|
""" Dataset """
import numpy as np
from .base import Baseset
from .dsindex import DatasetIndex
from .pipeline import Pipeline
class Dataset(Baseset):
""" Dataset
Attributes
----------
index
indices
is_split
"""
def __init__(self, index, batch_class=None, preloaded=None, *args, **kwargs):
super().__init__(index, *args, **kwargs)
self.batch_class = batch_class
self.preloaded = preloaded
@classmethod
def from_dataset(cls, dataset, index, batch_class=None):
""" Create Dataset from another dataset with new index
(usually a subset of the source dataset index)
"""
if (batch_class is None or (batch_class == dataset.batch_class)) and cls._is_same_index(index, dataset.index):
return dataset
bcl = batch_class if batch_class is not None else dataset.batch_class
return cls(index, batch_class=bcl, preloaded=dataset.preloaded)
@staticmethod
def build_index(index):
""" Create an index """
if isinstance(index, DatasetIndex):
return index
return DatasetIndex(index)
@staticmethod
def _is_same_index(index1, index2):
return (isinstance(index1, type(index2)) or isinstance(index2, type(index1))) and \
index1.indices.shape == index2.indices.shape and \
np.all(index1.indices == index2.indices)
def create_subset(self, index):
""" Create a dataset based on the given subset of indices """
return type(self).from_dataset(self, index)
def create_batch(self, batch_indices, pos=False, *args, **kwargs):
""" Create a batch from given indices.
if `pos` is `False`, then `batch_indices` should contain the indices
that should be included in the batch
otherwise `batch_indices` should contain their positions in the current index
"""
if not isinstance(batch_indices, DatasetIndex):
batch_indices = self.index.create_batch(batch_indices, pos, *args, **kwargs)
return self.batch_class(batch_indices, preloaded=self.preloaded, **kwargs)
def pipeline(self, config=None):
""" Start a new data processing workflow """
return Pipeline(self, config=config)
@property
def p(self):
""":class:`dataset.Pipeline` : a short alias for `pipeline()` """
return self.pipeline()
def __rshift__(self, other):
if not isinstance(other, Pipeline):
raise TypeError("Pipeline is expected, but got %s. Use as dataset >> pipeline" % type(other))
new_p = other.from_pipeline(other)
new_p.dataset = self
return new_p
|
|
import numpy as np
import os
import tensorflow as tf
from tqdm import tqdm
import ujson as json
from model import Model
from util import get_batch_dataset
from util import get_dataset
from util import get_record_parser
# for debug, print numpy array fully.
# np.set_printoptions(threshold=np.inf)
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
def train(config):
with open(config.word_emb_file, 'r') as fh:
word_mat = np.array(json.load(fh), dtype=np.float32) # word embedding matrix
with open(config.char_emb_file, 'r') as fh:
char_mat = np.array(json.load(fh), dtype=np.float32) # char embedding matrix
# total examples number in valid file
with open(config.dev_meta, 'r') as fh:
dev_meta = json.load(fh)
dev_total = dev_meta['total']
print('Building model...')
parser = get_record_parser(config)
graph = tf.Graph()
with graph.as_default() as g:
train_dataset = get_batch_dataset(config.train_record_file, parser, config)
dev_dataset = get_dataset(config.dev_record_file, parser, config)
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
handle, train_dataset.output_types, train_dataset.output_shapes)
train_iterator = train_dataset.make_one_shot_iterator()
dev_iterator = dev_dataset.make_one_shot_iterator()
model = Model(config, iterator, word_mat, char_mat, graph=g)
# model = QANet4CBT(config, iterator, word_mat, char_mat, graph=g)
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
loss_save = 10.0
patience = 0
best_acc = 0.
with tf.Session(config=sess_config) as sess:
writer = tf.summary.FileWriter(config.log_dir)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
train_handle = sess.run(train_iterator.string_handle())
dev_handle = sess.run(dev_iterator.string_handle())
if os.path.exists(os.path.join(config.save_dir, 'checkpoint')):
saver.restore(sess, tf.train.latest_checkpoint(config.save_dir))
global_step = max(sess.run(model.global_step), 1)
total_corrects = 0
total_loss = 0.0
# Training
for _ in tqdm(range(global_step, config.num_steps + 1)):
global_step = sess.run(model.global_step) + 1
loss_in_batch, corrects_in_batch, train_op = sess.run(
[model.loss, model.correct_prediction, model.train_op],
feed_dict={
handle: train_handle,
model.dropout: config.dropout
})
total_corrects += corrects_in_batch
total_loss += loss_in_batch * config.batch_size
if global_step % config.period == 0:
acc = total_corrects / (global_step * config.batch_size)
loss = total_loss / (global_step * config.batch_size)
loss_sum = tf.Summary(value=[
tf.Summary.Value(tag='model/loss',
simple_value=loss),
])
writer.add_summary(loss_sum, global_step)
acc_sum = tf.Summary(value=[
tf.Summary.Value(tag='model/acc',
simple_value=acc),
])
writer.add_summary(acc_sum, global_step)
# Validation and save model
if global_step % config.checkpoint == 0:
val_acc, val_loss, v_acc_sum, v_loss_sum = validate(
config, model, sess, dev_total, 'dev', handle, dev_handle)
writer.add_summary(v_acc_sum, global_step)
writer.add_summary(v_loss_sum, global_step)
# Early Stopping
if val_acc < best_acc:
patience += 1
if patience > config.early_stop:
break
else:
patience = 0
best_acc = max(best_acc, val_acc)
# Save Model, keep top 5 best models.
filename = os.path.join(
config.save_dir, 'model_{}_val-acc_{}.ckpt'.format(global_step, best_acc))
saver.save(sess, filename)
writer.flush()
def validate(config, model, sess, dev_total, data_type, handle, str_handle):
v_total_loss = 0.
v_total_corrects = 0.
for i in tqdm(range(1, dev_total // config.batch_size + 1)):
v_loss_in_batch, v_corrects_in_batch = sess.run([model.loss, model.correct_prediction],
feed_dict={handle: str_handle})
v_total_loss += v_loss_in_batch
v_total_corrects += v_corrects_in_batch
val_acc = v_total_corrects / dev_total
val_loss = v_total_loss / dev_total
v_loss_sum = tf.Summary(value=[
tf.Summary.Value(tag='{}/loss'.format(data_type),
simple_value=val_loss),
])
v_acc_sum = tf.Summary(value=[
tf.Summary.Value(tag='{}/acc'.format(data_type),
simple_value=val_acc),
])
return val_acc, val_loss, v_acc_sum, v_loss_sum
def test(config):
# Load word embedding file
with open(config.word_emb_file, 'r') as fh:
word_mat = np.array(json.load(fh), dtype=np.float32)
# Load char embedding file
with open(config.char_emb_file, 'r') as fh:
char_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.test_meta, 'r') as fh:
test_meta = json.load(fh)
test_total = test_meta['total']
graph = tf.Graph()
print('Loading model...')
with graph.as_default() as g:
test_batch = get_dataset(config.test_record_file, get_record_parser(
config, is_test=True), config).make_one_shot_iterator()
model = Model(config, test_batch, word_mat, char_mat, trainable=False, graph=g)
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint(config.save_dir))
# if config.decay < 1.0:
# sess.run(model.assign_vars)
total_loss = 0.0
total_corrects = 0
result = {}
for step in tqdm(range(test_total // config.batch_size + 1)):
loss_in_batch, corrects_in_batch = sess.run([model.loss, model.correct_prediction])
total_loss += loss_in_batch
total_corrects += corrects_in_batch
loss = total_loss / test_total
acc = total_corrects / test_total
result['loss'] = loss
result['acc'] = acc
with open(config.answer_file, 'w') as fh:
json.dump(result, fh)
print('Loss: {}, Accuracy: {}'.format(loss, acc))
|
|
import os
from copy import deepcopy
from typing import List, Union, Dict, Any
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import argparse
import logging
import sys
import json
import numpy as np
from predictor import Predictor
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def eval_model(args) -> None:
predictor = Predictor(args.archive_file,
args.cuda_device,
args.predicted_pages,
args.merge_google,
args.score_format,
args.verbose)
raw_data = []
with open(args.in_file) as f:
for line in f:
raw_data.append(json.loads(line))
actual = []
predicted = []
if args.log is not None:
f = open(args.log,"w+")
#print({util.get_device_of(param) for param in model.parameters()})
for output in predictor.predict(raw_data[args.start:args.end]):
actual.append(output['actual'] if 'actual' in output else output.get('label', 'NOT ENOUGH INFO'))
predicted.append(output['predicted_label'] if 'predicted_label' in output else output['predicted'])
if args.log is not None:
f.write(json.dumps(output)+"\n")
if args.log is not None:
f.close()
if args.verbose:
print(accuracy_score(actual, predicted))
print(classification_report(actual, predicted))
print(confusion_matrix(actual, predicted))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('archive_file', type=str, help='/path/to/saved/db.db')
parser.add_argument('in_file', type=str)
parser.add_argument('--log', required=False, default=None, type=str, help='/path/to/saved/db.db')
parser.add_argument("--cuda-device", type=int, default=-1, help='id of GPU to use (if any)')
parser.add_argument("--score-format", action="store_true", help="use the format required for score.py")
parser.add_argument("--predicted-pages", action="store_true", help="use the predicted pages format")
parser.add_argument("--merge-google", action="store_true", help="add all the pages from the predicted_google key")
parser.add_argument("--verbose", action="store_true", help="add all the pages from the predicted_google key")
parser.add_argument("--start", type=int, default=0)
parser.add_argument("--end", type=int, default=2**32)
args = parser.parse_args()
eval_model(args)
|
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 5 15:33:27 2019
@author: gptshubham595
"""
import cv2
import matplotlib.pyplot as plot
import numpy as np
import time
def main():
imgp1="C:\\opencv learn machin\\misc\\4.2.01.tiff"
imgp2="C:\\opencv learn machin\\misc\\4.2.05.tiff"
#1-by preserving same colour default
#0-grayscale
img1=cv2.imread(imgp1,1)
img2=cv2.imread(imgp2,1)
#img1=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB)
#img2=cv2.cvtColor(img2,cv2.COLOR_BGR2RGB)
add=img1 + img2
z=0
#50->Intervals
for i in np.linspace(0,1,50):
x=i
y=1-x
output=cv2.addWeighted(img1,x,img2,y,z)
cv2.imshow('Transition',output)
time.sleep(0.1)
if cv2.waitKey(1)==27:
break
#x+y+z=1 for good
#(img1*x +img2*y+z}/(x+y+z)
cv2.destroyAllWindows()
if __name__=="__main__":
main()
|
|
import streamlit as st
import yfinance as yf
from datetime import datetime, timedelta
#import pyodbc
import pandas as pd
import os
import altair as alt
#import time
import sys
sys.path.append(os.path.abspath(r"C:\Users\cyril\Documents\Stocks\TA"))
from AutoSupportAndResistance import *
#import talib
from annotated_text import annotated_text, annotation
import statistics
import numpy as np
from sklearn.linear_model import LinearRegression
#### To-Do List:
# DONE - add linear regression on CHART 1
# add ecart type 1+2 on CHART 1
# add financial statements of the last x years in Fundamental Tab - add ebit, margin etc.
# -> TO, EBITDA, Revenue, P/E (=Current Price / EPS), P/S, Market Cap, Last Price, Payout, last 5 dividends, bookValue
# add news from Reuters
# take currency into account ex: "Total Revenue & Currency"
######## Colors:
custom_blue = "#33BCF6"
custom_blue_variant1 = "#78CFF8"
custom_blue_variant2 = "#A9E1FB"
custom_blue_variant3 = "#D5F2FF"
custom_blue_variant4 = "#9CB5C1"
custom_blue_variant5 = "#677C86"
background_color1 = "#262730"
background_color2 = "#36474F"
text_color = "#FFFFFF"
########
####### STYLING
# current = shades of blue
# possible alternative:
# green for best = 06982d red for worth = #ae1325 ??
st.markdown("""
<style>
.custom_blue {
color: %s;
font-size: 15px;
}
</style>
"""%custom_blue, unsafe_allow_html=True)
st.markdown("""
<style>
.custom_blue_variant1 {
color: %s;
font-size: 15px;
}
</style>
"""%custom_blue_variant1, unsafe_allow_html=True)
st.markdown("""
<style>
.custom_blue_variant2 {
color: %s;
font-size: 15px;
}
</style>
"""%custom_blue_variant2, unsafe_allow_html=True)
st.markdown("""
<style>
.custom_blue_variant3 {
color: %s;
font-size: 15px;
}
</style>
"""%custom_blue_variant3, unsafe_allow_html=True)
st.markdown("""
<style>
.custom_blue_variant4 {
color: %s;
font-size: 15px;
}
</style>
"""%custom_blue_variant4, unsafe_allow_html=True)
st.markdown("""
<style>
.custom_blue_variant5 {
color: %s;
font-size: 15px;
}
</style>
"""%custom_blue_variant5, unsafe_allow_html=True)
text_styles = ["custom_blue","custom_blue_variant1","custom_blue_variant2",
"custom_blue_variant3","custom_blue_variant4","custom_blue_variant5"]
#st.markdown('<p class="custom_blue">Hello World !!</p>', unsafe_allow_html=True)
#######
def highlight_max(data, color=custom_blue):
'''
highlight the maximum in a Series or DataFrame
'''
attr = 'background-color: {}'.format(color)
#remove % and cast to float
data = data.replace('%','', regex=True).astype(float)
if data.ndim == 1: # Series from .apply(axis=0) or axis=1
is_max = data == data.max()
return [attr if v else '' for v in is_max]
else: # from .apply(axis=None)
is_max = data == data.max().max()
return pd.DataFrame(np.where(is_max, attr, ''),
index=data.index, columns=data.columns)
def writeData(data,name,dataType="value"): #name="Gross Profit", data=financials
st.subheader(name)
col1,col2,col3,col4 = st.columns([1,1,1,1])
cols = [col1,col2,col3,col4]
sorted_column = data.loc[name].sort_values(ascending=False) ###
for i in range(4):
if dataType=="percent":
position = sorted_column.index.get_loc(data.columns[i])
data.loc[name][i] = str(round(data.loc[name][i]*100,1))+"%"
with cols[i]:
st.text(data.columns[i].year)
#st.text(data.loc[name][i])
#use markdown for color:
st.markdown('<p class="%s">%s</p>'%(text_styles[position],data.loc[name][i]),unsafe_allow_html=True)
else:
position = sorted_column.index.get_loc(data.columns[i])
if round(data.loc[name][i]/1000000000,2) < 1:
data.loc[name][i] = str(round(data.loc[name][i]/1000000,2))+" M€"
else:
data.loc[name][i] = str(round(data.loc[name][i]/1000000000,2))+" B€"
with cols[i]:
st.text(data.columns[i].year)
#st.text(data.loc[name][i])
#use markdown for color:
st.markdown('<p class="%s">%s</p>'%(text_styles[position],data.loc[name][i]),unsafe_allow_html=True)
tickersDF = pd.read_excel('Stocks Dashboard.xlsm', sheet_name='Dividend History')
tickers = []
tickers = tickersDF['Symbol'].tolist()
tickers = sorted(tickers)
ticker = st.sidebar.selectbox("Selected Symbol:", tickers)
# for comparison page:
# ticker = st.sidebar.multiselect("Symbols", tickers)
# will require WHERE symbol IN list in SQL request
yahooTicker = yf.Ticker(ticker)
info = yahooTicker.info
financials = yahooTicker.financials#.transpose()
financials.loc['Profit Margin'] = financials.loc['Net Income'] / financials.loc['Total Revenue']
financials.columns = financials.columns.date
financials = financials#.transpose()
#financials.loc["Ebit"][0], financials.columns[0]
#financials['Profit Margin'] = financials['Net Income'] / financials['Total Revenue']
try:
longBusinessSummary = info['longBusinessSummary']
except:
pass
try:
website = info['website']
except:
pass
try:
logo_url = info['logo_url']
except:
pass
try:
currency = info['currency']
except:
currency = "EUR"
# TITLE
col1,mid,col2 = st.columns([4,1,15])
with col2:
st.title(info['longName'])
with col1:
try:
st.image(logo_url,width=100)
except:
pass
# GENERAL INFO
generalInfo = st.expander("General Information")
with generalInfo:
try:
st.write(longBusinessSummary)
except:
pass
try:
st.write(website)
except:
pass
fundamentals = st.expander("Fundamental Analysis",expanded=True)
with fundamentals:
writeData(financials,"Total Revenue")
writeData(financials,"Gross Profit")
writeData(financials,"Net Income")
writeData(financials,"Ebit")
# must add optional argument to writeData to select %
# print(financials.loc["Profit Margin"])
writeData(financials,"Profit Margin",dataType="percent") # = net income / total revenue
#st.dataframe(financials.style.apply(highlight_max))
technicals = st.expander("Technical Analysis",expanded=True)
# streamlit run c:\Users\cyril\Documents\Stocks\WebApp\webApp.py
|
|
from __future__ import print_function
import sys
import os
import numpy as np
import random
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from util import *
import itertools
import math
# This file contains code required for any preprocessing of real data, as well as splitting it into partitions
# Currently this contains code relevant to the amazon-dataset (https://www.kaggle.com/c/amazon-employee-access-challenge)
# and dna dataset ftp://largescale.ml.tu-berlin.de/largescale/dna/
if len(sys.argv) != 5:
print("Usage: python arrange_real_data.py n_procs input_dir real_dataset n_stragglers")
sys.exit(0)
np.random.seed(0)
n_procs, input_dir, real_dataset, n_stragglers = [x for x in sys.argv[1:]]
n_procs, n_stragglers = int(n_procs), int(n_stragglers)
input_dir = input_dir + real_dataset + "/"
# load relevant data
if real_dataset=="amazon-dataset":
print("Preparing data for "+real_dataset)
trainData = pd.read_csv(input_dir + 'train.csv')
trainX = trainData.ix[:,'RESOURCE':].values
trainY = trainData['ACTION'].values
relabeler = preprocessing.LabelEncoder()
for col in range(len(trainX[0, :])):
relabeler.fit(trainX[:, col])
trainX[:, col] = relabeler.transform(trainX[:, col])
trainY = 2*trainY - 1
d_all_s = interactionTermsAmazon(trainX, degree=2) # second order
#d_all_t = interactionTermsAmazon(trainX, degree=3) # third order
#trainX = np.hstack((trainX, d_all_s, d_all_t))
trainX = np.hstack((trainX, d_all_s))
for col in range(len(trainX[0, :])):
relabeler.fit(trainX[:, col])
trainX[:, col] = relabeler.transform(trainX[:, col])
trainX=np.vstack([trainX.T,np.ones(trainX.shape[0])]).T
X_train, X_valid, y_train, y_valid = train_test_split(trainX, trainY, test_size=0.2, random_state=0)
encoder = preprocessing.OneHotEncoder(sparse=True)
encoder.fit(np.vstack((X_train, X_valid)))
X_train = encoder.transform(X_train) # Returns a sparse matrix (see numpy.sparse)
X_valid = encoder.transform(X_valid)
n_rows, n_cols = X_train.shape
print("No. of training samples = %d, Dimension = %d"%(n_rows,n_cols))
print("No. of testing samples = %d, Dimension = %d"%(X_valid.shape[0],X_valid.shape[1]))
# Create output directory
output_dir = input_dir
output_dir = output_dir + str(n_procs-1) + "/"
partitions = n_procs-1
n_rows_per_worker = n_rows//partitions
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for i in range(1, partitions+1):
data_matrix = X_train[(i-1)*n_rows_per_worker:i*n_rows_per_worker, :]
save_sparse_csr(output_dir+str(i), data_matrix)
print("\t >>> Done with partition %d" % (i))
save_vector(y_train, output_dir + "label.dat")
save_vector(y_valid, output_dir + "label_test.dat")
save_sparse_csr(output_dir + "test_data", X_valid)
elif real_dataset=="dna-dataset/dna":
print("Preparing data for "+real_dataset)
fin = open(input_dir + 'features.csv')
trainData= np.genfromtxt(itertools.islice(fin,0,500000,1), delimiter=',')
#np.genfromtxt(input_dir + 'features.csv',delimiter=',', max_rows=100000)
trainX=trainData[:,1:]
trainY=trainData[:,0]
print("No. of positive labels = " + str(np.sum(trainY==1)))
n,p = trainX.shape
trainX=np.vstack([trainX.T,np.ones(trainX.shape[0])/math.sqrt(n)]).T
X_train, X_valid, y_train, y_valid = train_test_split(trainX, trainY, test_size=0.2, random_state=0)
encoder = preprocessing.OneHotEncoder(sparse=True)
encoder.fit(np.vstack((X_train, X_valid)))
X_train = encoder.transform(X_train) # Returns a sparse matrix (see numpy.sparse)
X_valid = encoder.transform(X_valid)
n_rows, n_cols = X_train.shape
print("No. of training samples = %d, Dimension = %d"%(n_rows,n_cols))
print("No. of testing samples = %d, Dimension = %d"%(X_valid.shape[0],X_valid.shape[1]))
# Create output directory
output_dir = input_dir
output_dir = output_dir + str(n_procs-1) + "/"
partitions = n_procs-1
n_rows_per_worker = n_rows//partitions
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for i in range(1, partitions+1):
data_matrix = X_train[(i-1)*n_rows_per_worker:i*n_rows_per_worker,:]
save_sparse_csr(output_dir+str(i),data_matrix)
print("\t >>> Done with partition %d" % (i))
save_vector(y_train, output_dir + "label.dat")
save_vector(y_valid, output_dir + "label_test.dat")
save_sparse_csr(output_dir + "test_data", X_valid)
fin.close()
print("Data Setup Finished.")
|
|
from queue import PriorityQueue
import networkx as nx
import random
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import math
import time
class Event(object):
def __init__(self,state,time,srcNode,targetNode):
self.state = state
self.time = time
self.srcNode = srcNode
self.targetNode = targetNode
# print('Event: ',state,time,srcNode,targetNode)
def __lt__(self,other):
return self.time < other.time
# NUMBEROFNODE = int(1e5)
MAXNEIGHBORCOUNT = 100
tGlobal0 = 0
tGlobal1 = 0
c1 = 0
c = 0
n = 0
def SIS_MODEL(NUMBEROFNODE):
global n
G = nx.Graph()
EventQ = PriorityQueue()
# start =time.perf_counter()
biState = np.random.binomial(1, 0.95, NUMBEROFNODE)
for i in range(NUMBEROFNODE):
if biState[i] == 0:
state = 'I'
elif biState[i] == 1:
state = 'S'
else:
raise ValueError("State out of bound")
G.add_nodes_from([
(i,{'state':state,'degree': 0,'recoveryTime':0})
])
# print(G.nodes.data())
# print(i)
# print(G[0])
sumOfProb = 0
for neighborNumber in range(3,MAXNEIGHBORCOUNT+1):
sumOfProb += math.pow(neighborNumber,-3)
distribution = [None]*(MAXNEIGHBORCOUNT+1)
distribution[0] = 0
distribution[1] = 0
distribution[2] = 0
for neighborNumber in range(3,MAXNEIGHBORCOUNT+1):
distribution[neighborNumber] = int(math.pow(neighborNumber,-3)/sumOfProb * NUMBEROFNODE)
RESTNUMBER = NUMBEROFNODE - sum(distribution)
distribution[3] = distribution[3]+RESTNUMBER
# print(distribution)
# print(G.nodes.data())
nodeid = 0
for i in range(len(distribution)):
if distribution[i] == 0:
continue
else:
while(distribution[i] != 0):
G.nodes[nodeid]['neighborNumber'] = i
distribution[i] = distribution[i] - 1
nodeid = nodeid + 1
for node in G.__iter__():
if len(list(G.neighbors(node))) >= G.nodes[node]['neighborNumber']:
continue
else:
neighborNumberToChoose = G.nodes[node]['neighborNumber'] - len(list(G.neighbors(node)))
if node == NUMBEROFNODE-1:
break
for i in range(neighborNumberToChoose):
neighborNew = random.choice(range(node+1,NUMBEROFNODE))
while (G.nodes[neighborNew]['neighborNumber'] <= len(list(G.neighbors(neighborNew)))):
neighborNew = random.choice(range(node+1,NUMBEROFNODE))
# continue
G.add_edge(node,neighborNew)
# print('Running time: %s Seconds'%(end-start))
# print(G.nodes.data())
# print(G.number_of_nodes())
def InitGraph(G,mu,lam,EventQ):
for node in G.__iter__():
if G.nodes[node]['state'] == 'I':
GenerateRecoveryEvent(node,mu,0,EventQ)
for node in G.__iter__():
if G.nodes[node]['state'] == 'I':
GenerateInfectionEvent(node,lam,0,EventQ)
def GenerateRecoveryEvent(node,mu,tGlobal,EventQ):
tEvent = tGlobal + np.random.exponential(mu)
e = Event(state = 'Recovery' , time = tEvent, srcNode = node, targetNode = None)
G.nodes[node]['recoveryTime'] = tEvent
EventQ.put(e)
def GenerateInfectionEvent(node,lam,tGlobal,EventQ):
tEvent = tGlobal
rate = lam*G.nodes[node]['degree']
while True:
tEvent += np.random.exponential(rate)
if G.nodes[node]['recoveryTime'] < tEvent:
break
attackedNode = random.choice(list(G.neighbors(node)))
if G.nodes[attackedNode]['state'] == 'S' or G.nodes[attackedNode]['recoveryTime'] < tEvent:
e = Event(state = 'Infection' , time = tEvent, srcNode = node, targetNode = attackedNode)
EventQ.put(e)
break
start =time.perf_counter()
InitGraph(G,1.0,0.6,EventQ)
tGlobal = 0
while True:
if EventQ.empty():
break
e = EventQ.get()
tGlobal = e.time
if tGlobal > 2.0:
break
if e.state == 'Recovery':
# global n
n = n + 1
G.nodes[e.srcNode]['state'] = 'S'
else:
if G.nodes[e.targetNode]['state'] =='S':
# global n
n = n + 1
G.nodes[e.targetNode]['state'] = 'I'
GenerateRecoveryEvent(e.targetNode,1.0,tGlobal,EventQ)
GenerateInfectionEvent(e.srcNode,0.6,tGlobal,EventQ)
GenerateInfectionEvent(e.targetNode,0.6,tGlobal,EventQ)
else:
GenerateInfectionEvent(e.srcNode,0.6,tGlobal,EventQ)
end = time.perf_counter()
print('Event based model Running time: %s Seconds'%((end-start)/n))
return (end-start)/n
# print(n)
def GA(NUMBEROFNODE):
G = nx.Graph()
ListI = []
ListSI = []
lam = 0.6
mu = 1.0
def Init_Data(G,ListI):
biState = np.random.binomial(1, 0.95,NUMBEROFNODE)
for node in range(NUMBEROFNODE):
G.add_node(node)
if biState[node] == 0:
ListI.append(node)
def Init_ListSI(G,ListI,ListSI):
sumOfProb = 0
for neighborNumber in range(3,MAXNEIGHBORCOUNT+1):
sumOfProb += math.pow(neighborNumber,-3)
distribution = [None]*(MAXNEIGHBORCOUNT+1)
distribution[0] = 0
distribution[1] = 0
distribution[2] = 0
for neighborNumber in range(3,MAXNEIGHBORCOUNT+1):
distribution[neighborNumber] = int(math.pow(neighborNumber,-3)/sumOfProb * NUMBEROFNODE)
RESTNUMBER = NUMBEROFNODE - sum(distribution)
distribution[3] = distribution[3]+RESTNUMBER
nodeid = 0
for i in range(len(distribution)):
if distribution[i] == 0:
continue
else:
while(distribution[i] != 0):
G.nodes[nodeid]['neighborNumber'] = i
distribution[i] = distribution[i] - 1
nodeid = nodeid + 1
for node in G.__iter__():
if len(list(G.neighbors(node))) >= G.nodes[node]['neighborNumber']:
continue
else:
neighborNumberToChoose = G.nodes[node]['neighborNumber'] - len(list(G.neighbors(node)))
if node == NUMBEROFNODE-1:
break
for i in range(neighborNumberToChoose):
neighborNew = random.choice(range(node+1,NUMBEROFNODE))
if (G.nodes[neighborNew]['neighborNumber'] <= len(list(G.neighbors(neighborNew)))):
continue
# neighborNew = random.choice(range(node+1,NUMBEROFNODE))
G.add_edge(node,neighborNew)
if node in ListI and neighborNew not in ListI:
ListSI.append([node,neighborNew])
global c
c = mu*len(ListI)+lam*len(ListSI)
def chooseEvent(lam,mu,ListI,ListSI):
while c != 0:
biChoice = np.random.binomial(1,mu*len(ListI)/c,1)
if biChoice == 1:
GET_S_EVENT(G,ListI,ListSI)
break
else:
GET_I_EVENT(G,ListI,ListSI)
break
def GET_S_EVENT(G,ListI,ListSI):
node = random.choice(ListI)
ListI.remove(node)
for edge in ListSI:
if edge[0] == node:
ListSI.remove(edge)
global c
c = mu*len(ListI)+lam*len(ListSI)
global tGlobal0
tGlobal0 = tGlobal0 + 1/n
def GET_I_EVENT(G,ListI,ListSI):
edge = random.choice(ListSI)
ListSI.remove(edge)
ListI.append(edge[1])
for node in list(G.neighbors(edge[1])):
ListSI.append([edge[1],node])
global c
c = mu*len(ListI)+lam*len(ListSI)
global tGlobal0
tGlobal0 = tGlobal0 + 1/n
c = 0
Init_Data(G,ListI)
Init_ListSI(G,ListI,ListSI)
start =time.perf_counter()
while True:
if c == 0:
break
if len(ListI) == 0:
break
if tGlobal0 > 2.0:
break
chooseEvent(lam,mu,ListI,ListSI)
end = time.perf_counter()
print('GA Running time: %s Seconds'%((end-start)/n))
return (end-start)/n
def OGA(NUMBEROFNODE):
def Init_Data(G,ListI):
biState = np.random.binomial(1, 0.95,NUMBEROFNODE)
for node in range(NUMBEROFNODE):
G.add_node(node)
if biState[node] == 0:
ListI.append([node,0])
def Init_Neighbor(G,ListI):
sumOfProb = 0
for neighborNumber in range(3,MAXNEIGHBORCOUNT+1):
sumOfProb += math.pow(neighborNumber,-3)
distribution = [None]*(MAXNEIGHBORCOUNT+1)
distribution[0] = 0
distribution[1] = 0
distribution[2] = 0
for neighborNumber in range(3,MAXNEIGHBORCOUNT+1):
distribution[neighborNumber] = int(math.pow(neighborNumber,-3)/sumOfProb * NUMBEROFNODE)
RESTNUMBER = NUMBEROFNODE - sum(distribution)
distribution[3] = distribution[3]+RESTNUMBER
nodeid = 0
for i in range(len(distribution)):
if distribution[i] == 0:
continue
else:
while(distribution[i] != 0):
G.nodes[nodeid]['neighborNumber'] = i
distribution[i] = distribution[i] - 1
nodeid = nodeid + 1
for node in G.__iter__():
if len(list(G.neighbors(node))) >= G.nodes[node]['neighborNumber']:
continue
else:
neighborNumberToChoose = G.nodes[node]['neighborNumber'] - len(list(G.neighbors(node)))
if node == NUMBEROFNODE-1:
break
for i in range(neighborNumberToChoose):
neighborNew = random.choice(range(node+1,NUMBEROFNODE))
while (G.nodes[neighborNew]['neighborNumber'] <= len(list(G.neighbors(neighborNew)))):
neighborNew = random.choice(range(node+1,NUMBEROFNODE))
# continue
G.add_edge(node,neighborNew)
def Init_Edge(G,ListI,mu,lam):
SumOfEdge = 0
for node in ListI:
count = 0
for neighbor in list(G.neighbors(node[0])):
PureListI_ID = []
for node in ListI:
PureListI_ID.append(node[0])
if neighbor not in PureListI_ID:
count+= 1
SumOfEdge += count
global c1
c1 = mu*len(ListI)+lam*SumOfEdge
def chooseEvent(mu,lam,ListI,G):
while c1 != 0:
biChoice = np.random.binomial(1,mu*len(ListI)/c1,1)
if biChoice == 1:
GET_S_EVENT(G,ListI,mu,lam)
break
else:
GET_I_EVENT(G,ListI,mu,lam)
break
def GET_S_EVENT(G,ListI,mu,lam):
node = random.choice(ListI)
global c1
c1 = c1 - mu - lam*node[1]
ListI.remove(node)
global tGlobal1
tGlobal1 = tGlobal1 + 1/n
def GET_I_EVENT(G,ListI,mu,lam):
count = 0
Percent = []
for node in ListI:
count = count + (len(list(G.neighbors(node[0]))))
Percent.append(count)
number = random.uniform(1,count+1)
for i in range(len(Percent)):
if number < Percent[i]:
srcNode = ListI[i][0]
attacked_node = random.choice(list(G.neighbors(srcNode)))
PureListI_ID = []
for node in ListI:
PureListI_ID.append(node[0])
if attacked_node not in PureListI_ID:
new_node = [attacked_node,0]
ListI.append(new_node)
PureListI_ID.append(attacked_node)
for node in list(G.neighbors(attacked_node)):
if node not in PureListI_ID:
# G.nodes[attacked_node]['edgeNumber'] += 1
new_node[1] += 1
global c1
c1 = c1 + mu + lam*(new_node[1]-1)
global tGlobal1
global n
tGlobal1 = tGlobal1 + 1/n
G = nx.Graph()
ListI = []
lam = 0.6
mu = 1.0
tGlobal1 = 0
c1 = 0
Init_Data(G,ListI)
Init_Neighbor(G,ListI)
Init_Edge(G,ListI,mu,lam)
start =time.perf_counter()
while True:
if c1 == 0:
break
if len(ListI) == 0:
break
if tGlobal1 > 2.0:
break
chooseEvent(mu,lam,ListI,G)
end = time.perf_counter()
print('OGA Running time: %s Seconds'%((end-start)/n))
return (end-start)/n
ListNumber = np.linspace(10000,100000,10)
Event_Based_List = []
Ga_List = []
Oga_List = []
for NUMBEROFNODE in ListNumber:
Event_Based_List.append(SIS_MODEL(int(NUMBEROFNODE)))
Ga_List.append(GA(int(NUMBEROFNODE)))
Oga_List.append(OGA(int(NUMBEROFNODE)))
plt.scatter(ListNumber,Ga_List,c = 'yellow',marker='v')
plt.scatter(ListNumber,Oga_List,c = 'green',marker='o')
plt.scatter(ListNumber,Event_Based_List,c = 'red',marker=',')
plt.show()
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import gym
from gym.spaces.box import Box
from gym import spaces
import logging
import numpy as np
import time
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def create_env(env_id):
env = gym.make(env_id)
env = AtariProcessing(env)
env = Diagnostic(env)
return env
def _process_frame42(frame):
frame = frame[34:(34+160), :160]
# Resize by half, then down to 42x42 (essentially mipmapping). If
# we resize directly we lose pixels that, when mapped to 42x42,
# aren't close enough to the pixel boundary.
frame = cv2.resize(frame, (80, 80))
frame = cv2.resize(frame, (42, 42))
frame = frame.mean(2)
frame = frame.astype(np.float32)
frame *= (1.0 / 255.0)
frame = np.reshape(frame, [42, 42, 1])
return frame
class AtariProcessing(gym.ObservationWrapper):
def __init__(self, env=None):
super(AtariProcessing, self).__init__(env)
self.observation_space = Box(0.0, 1.0, [42, 42, 1])
def _observation(self, observation):
return _process_frame42(observation)
class Diagnostic(gym.Wrapper):
def __init__(self, env=None):
super(Diagnostic, self).__init__(env)
self.diagnostics = DiagnosticsLogger()
def _reset(self):
observation = self.env.reset()
return self.diagnostics._after_reset(observation)
def _step(self, action):
results = self.env.step(action)
return self.diagnostics._after_step(*results)
class DiagnosticsLogger():
def __init__(self, log_interval=503):
self._episode_time = time.time()
self._last_time = time.time()
self._local_t = 0
self._log_interval = log_interval
self._episode_reward = 0
self._episode_length = 0
self._all_rewards = []
self._last_episode_id = -1
def _after_reset(self, observation):
logger.info('Resetting environment')
self._episode_reward = 0
self._episode_length = 0
self._all_rewards = []
return observation
def _after_step(self, observation, reward, done, info):
to_log = {}
if self._episode_length == 0:
self._episode_time = time.time()
self._local_t += 1
if self._local_t % self._log_interval == 0:
cur_time = time.time()
elapsed = cur_time - self._last_time
fps = self._log_interval / elapsed
self._last_time = cur_time
if reward is not None:
self._episode_reward += reward
if observation is not None:
self._episode_length += 1
self._all_rewards.append(reward)
if done:
logger.info('Episode terminating: episode_reward=%s episode_length=%s', self._episode_reward, self._episode_length)
total_time = time.time() - self._episode_time
to_log["global/episode_reward"] = self._episode_reward
to_log["global/episode_length"] = self._episode_length
to_log["global/episode_time"] = total_time
to_log["global/reward_per_time"] = self._episode_reward / total_time
self._episode_reward = 0
self._episode_length = 0
self._all_rewards = []
return observation, reward, done, to_log
|
|
# Copyright 2017 - 2018 Baidu Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provide the attack method for Iterator FGSM's implement.
"""
from __future__ import division
import logging
from collections import Iterable
import numpy as np
from .base import Attack
__all__ = [
'GradientMethodAttack', 'FastGradientSignMethodAttack', 'FGSM',
'FastGradientSignMethodTargetedAttack', 'FGSMT',
'BasicIterativeMethodAttack', 'BIM',
'IterativeLeastLikelyClassMethodAttack', 'ILCM', 'MomentumIteratorAttack',
'MIFGSM'
]
class GradientMethodAttack(Attack):
"""
This class implements gradient attack method, and is the base of FGSM, BIM,
ILCM, etc.
"""
def __init__(self, model, support_targeted=True):
"""
:param model(model): The model to be attacked.
:param support_targeted(bool): Does this attack method support targeted.
"""
super(GradientMethodAttack, self).__init__(model)
self.support_targeted = support_targeted
def _apply(self,
adversary,
norm_ord=np.inf,
epsilons=0.01,
steps=1,
epsilon_steps=100):
"""
Apply the gradient attack method.
:param adversary(Adversary):
The Adversary object.
:param norm_ord(int):
Order of the norm, such as np.inf, 1, 2, etc. It can't be 0.
:param epsilons(list|tuple|int):
Attack step size (input variation).
Largest step size if epsilons is not iterable.
:param steps:
The number of attack iteration.
:param epsilon_steps:
The number of Epsilons' iteration for each attack iteration.
:return:
adversary(Adversary): The Adversary object.
"""
if norm_ord == 0:
raise ValueError("L0 norm is not supported!")
if not self.support_targeted:
if adversary.is_targeted_attack:
raise ValueError(
"This attack method doesn't support targeted attack!")
if not isinstance(epsilons, Iterable):
epsilons = np.linspace(0, epsilons, num=epsilon_steps)
pre_label = adversary.original_label
min_, max_ = self.model.bounds()
assert self.model.channel_axis() == adversary.original.ndim
assert (self.model.channel_axis() == 1 or
self.model.channel_axis() == adversary.original.shape[0] or
self.model.channel_axis() == adversary.original.shape[-1])
for epsilon in epsilons[:]:
step = 1
adv_img = adversary.original
if epsilon == 0.0:
continue
for i in range(steps):
if adversary.is_targeted_attack:
gradient = -self.model.gradient(adv_img,
adversary.target_label)
else:
gradient = self.model.gradient(adv_img,
adversary.original_label)
if norm_ord == np.inf:
gradient_norm = np.sign(gradient)
else:
gradient_norm = gradient / self._norm(
gradient, ord=norm_ord)
adv_img = adv_img + epsilon * gradient_norm * (max_ - min_)
adv_img = np.clip(adv_img, min_, max_)
adv_label = np.argmax(self.model.predict(adv_img))
logging.info('step={}, epsilon = {:.5f}, pre_label = {}, '
'adv_label={}'.format(step, epsilon, pre_label,
adv_label))
if adversary.try_accept_the_example(adv_img, adv_label):
return adversary
step += 1
return adversary
@staticmethod
def _norm(a, ord):
if a.ndim == 1:
return np.linalg.norm(a, ord=ord)
if a.ndim == a.shape[0]:
norm_shape = (a.ndim, reduce(np.dot, a.shape[1:]))
norm_axis = 1
else:
norm_shape = (reduce(np.dot, a.shape[:-1]), a.ndim)
norm_axis = 0
return np.linalg.norm(a.reshape(norm_shape), ord=ord, axis=norm_axis)
class FastGradientSignMethodTargetedAttack(GradientMethodAttack):
"""
"Fast Gradient Sign Method" is extended to support targeted attack.
"Fast Gradient Sign Method" was originally implemented by Goodfellow et
al. (2015) with the infinity norm.
Paper link: https://arxiv.org/abs/1412.6572
"""
def _apply(self, adversary, epsilons=0.01):
return GradientMethodAttack._apply(
self,
adversary=adversary,
norm_ord=np.inf,
epsilons=epsilons,
steps=1)
class FastGradientSignMethodAttack(FastGradientSignMethodTargetedAttack):
"""
This attack was originally implemented by Goodfellow et al. (2015) with the
infinity norm, and is known as the "Fast Gradient Sign Method".
Paper link: https://arxiv.org/abs/1412.6572
"""
def __init__(self, model):
super(FastGradientSignMethodAttack, self).__init__(model, False)
class IterativeLeastLikelyClassMethodAttack(GradientMethodAttack):
"""
"Iterative Least-likely Class Method (ILCM)" extends "BIM" to support
targeted attack.
"The Basic Iterative Method (BIM)" is to extend "FSGM". "BIM" iteratively
take multiple small steps while adjusting the direction after each step.
Paper link: https://arxiv.org/abs/1607.02533
"""
def _apply(self, adversary, epsilons=0.01, steps=1000):
return GradientMethodAttack._apply(
self,
adversary=adversary,
norm_ord=np.inf,
epsilons=epsilons,
steps=steps)
class BasicIterativeMethodAttack(IterativeLeastLikelyClassMethodAttack):
"""
FGSM is a one-step method. "The Basic Iterative Method (BIM)" iteratively
take multiple small steps while adjusting the direction after each step.
Paper link: https://arxiv.org/abs/1607.02533
"""
def __init__(self, model):
super(BasicIterativeMethodAttack, self).__init__(model, False)
class MomentumIteratorAttack(GradientMethodAttack):
"""
The Momentum Iterative Fast Gradient Sign Method (Dong et al. 2017).
This method won the first places in NIPS 2017 Non-targeted Adversarial
Attacks and Targeted Adversarial Attacks. The original paper used
hard labels for this attack; no label smoothing. inf norm.
Paper link: https://arxiv.org/pdf/1710.06081.pdf
"""
def __init__(self, model, support_targeted=True):
"""
:param model(model): The model to be attacked.
:param support_targeted(bool): Does this attack method support targeted.
"""
super(MomentumIteratorAttack, self).__init__(model)
self.support_targeted = support_targeted
def _apply(self,
adversary,
norm_ord=np.inf,
epsilons=0.1,
steps=100,
epsilon_steps=100,
decay_factor=1):
"""
Apply the momentum iterative gradient attack method.
:param adversary(Adversary):
The Adversary object.
:param norm_ord(int):
Order of the norm, such as np.inf, 1, 2, etc. It can't be 0.
:param epsilons(list|tuple|float):
Attack step size (input variation).
Largest step size if epsilons is not iterable.
:param epsilon_steps:
The number of Epsilons' iteration for each attack iteration.
:param steps:
The number of attack iteration.
:param decay_factor:
The decay factor for the momentum term.
:return:
adversary(Adversary): The Adversary object.
"""
if norm_ord == 0:
raise ValueError("L0 norm is not supported!")
if not self.support_targeted:
if adversary.is_targeted_attack:
raise ValueError(
"This attack method doesn't support targeted attack!")
assert self.model.channel_axis() == adversary.original.ndim
assert (self.model.channel_axis() == 1 or
self.model.channel_axis() == adversary.original.shape[0] or
self.model.channel_axis() == adversary.original.shape[-1])
if not isinstance(epsilons, Iterable):
epsilons = np.linspace(0, epsilons, num=epsilon_steps)
min_, max_ = self.model.bounds()
pre_label = adversary.original_label
for epsilon in epsilons[:]:
if epsilon == 0.0:
continue
step = 1
adv_img = adversary.original
momentum = 0
for i in range(steps):
if adversary.is_targeted_attack:
gradient = -self.model.gradient(adv_img,
adversary.target_label)
else:
gradient = self.model.gradient(adv_img, pre_label)
# normalize gradient
velocity = gradient / self._norm(gradient, ord=1)
momentum = decay_factor * momentum + velocity
if norm_ord == np.inf:
normalized_grad = np.sign(momentum)
else:
normalized_grad = self._norm(momentum, ord=norm_ord)
perturbation = epsilon * normalized_grad
adv_img = adv_img + perturbation
adv_img = np.clip(adv_img, min_, max_)
adv_label = np.argmax(self.model.predict(adv_img))
logging.info(
'step={}, epsilon = {:.5f}, pre_label = {}, adv_label={}'
.format(step, epsilon, pre_label, adv_label))
if adversary.try_accept_the_example(adv_img, adv_label):
return adversary
step += 1
return adversary
FGSM = FastGradientSignMethodAttack
FGSMT = FastGradientSignMethodTargetedAttack
BIM = BasicIterativeMethodAttack
ILCM = IterativeLeastLikelyClassMethodAttack
MIFGSM = MomentumIteratorAttack
|
|
#!/usr/bin/python
import numpy as np
import healpy as hp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
from detector_cache import detectors
import triangulate
from ligo.gracedb.rest import GraceDb
gdb = GraceDb()
graceid = sys.argv[1]
prior_name = sys.argv[2]
print graceid
fitsname = 'bayestar.fits.gz'
local_file_name = 'bayestar.fits.gz'
with open(local_file_name, 'w') as lf:
lf.write(gdb.files(graceid, fitsname).read())
post = hp.read_map(local_file_name)
prior = hp.read_map(prior_name)
new_nside = hp.npix2nside(max(len(post), len(prior))) #shouldn't it be just the nside of post?
post = hp.ud_grade(post, new_nside, power=-2)
prior = hp.ud_grade(prior, new_nside, power=-2)
#-----------------------------------------------------------------------------
#Antenna patterns
#-----------------------------------------------------------------------------
npix = hp.nside2npix(new_nside)
theta, phi = hp.pix2ang(new_nside, np.arange(npix))
ant = np.zeros((npix,), dtype=float)
### add the antenna response for each detector in quadrature
for name in 'H L'.split(): ### only include the 2 LIGOs for now
fp, fx = detectors[name].antenna_patterns(theta, phi, np.zeros_like(theta))
ant += np.abs(fp)**2 + np.abs(fx)**2
ant = ant**(3./2) ### scale the result so it corresponds to a uniform-in-volume cumulative distribution
gps = 1180922494.4922
ant = triangulate.rotateMapE2C(ant, gps)
#-----------------------------------------------------------------------------
#Applying the prior
#-----------------------------------------------------------------------------
#new_post = post*prior
#new_post = post*prior / ant
new_post = ant
#NORMALIZATION
outfile = "newprior_" + local_file_name
hp.write_map(outfile,new_post)
|
|
""" lux_limit.py
This example shows how to produce a dark matter limit plot
using one of the simple counting experiment limit methods.
The data here is meant to approximate the parameters of the
LUX 2014-2016 run, which as of 2017 has produced the
best WIMP-nucleon spin-independent limit of any direct
detection experiment.
This performs a raster scan over WIMP mass and sets a
90% upper limit at each mass point.
Some tools used here include:
* Rate calculation with detector effects
* Nucleus to Nucleon normalization
* Using the Experiment class to control calculations
* Frequentist Poisson upper limit
Example:
>>> lux_limits()
Produces a limit plot
"""
__author__ = 'Jeremy P. Lopez'
__date__ = 'June 2017'
__copyright__ = '(c) 2017, Jeremy P. Lopez'
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from ..det.Experiment import Experiment
from ..xsec.HelmFormFactor import HelmFormFactor
from ..det.Efficiency import Efficiency
from ..limits.UpperLimitBkgFree import upper_limit
from ..det.DetectorModel import DetectorModel
from ..xsec.InteractionModel import InteractionModel
from ..xsec.SINormalization import SINormalization
from ..xsec.Nucleus import Nucleus
from .. import units
class lux_efficiency(Efficiency):
"""Very approximate empirical formula to fit the rough
shape of the efficiency curve in their most recent paper
except for the drop-off in the efficiency above ~50 keVr"""
def __init__(self,emin=1*units.keV):
self.p = [4.3,2.6,2.7]
self.emin = emin;
def efficiency(self,s):
Er = s.Er
if Er < self.emin:
return 0
Er = Er/units.keV
# Logistic with a fast turn-on above 0
return (1 - np.exp(- (Er/self.p[2])**4)) / \
(1 + np.exp(- (Er - self.p[0])/self.p[1]))
# We'll just leave the response alone for now. That
# shouldn't matter too much for the limits
def lux_limits(emin=1*units.keV):
""" This function actually produces the limit plot.
It could be made much faster using the
multiprocessing module.
Args:
emin: Minimum energy threshold
"""
A = 131
Z = 54
pars = {'AtomicNumber':A,
'XS':1e-40 * units.cm**2,
'Mt':131*units.amu,
'vE':254*units.km/units.sec * np.array([0,0,1]),
'v0':230 * units.km/units.sec,
'vesc':544 * units.km/units.sec,
'Mtot':100 * units.kg,
'rhox':0.3 * units.GeV / (units.cm**3),
'ExpEmin':1.0 * units.keV,
'ExpEmax':50.0 * units.keV,
'Exposure':332 * units.day,
}
nucl = Nucleus({'NuclMassNumber':A,
'NuclAtomicNumber':Z,
'NuclMass':A*units.amu})
sinorm = SINormalization()
exper = Experiment()
ff = HelmFormFactor()
eff = lux_efficiency(emin=emin)
exper.detector_model.efficiency = eff
exper.interaction.form_factor = ff
exper.set_params(pars)
mass_grid = np.exp(np.linspace(1,4,31) * np.log(10)) * units.GeV
xs_pts = np.array(np.zeros(mass_grid.size))
# Background free:
N_exp = 0
limit = upper_limit(N_exp,0.9)
xs = exper.interaction.total_xs
for i in range(mass_grid.size):
m = mass_grid[i]
exper.set_params({'Mx':m})
exper.initialize()
rate = exper.event_rates()['Meas']
xs_lim = xs * limit/rate
print('Mass: ' + str(m/units.GeV) + ' GeV, XS: ' + str(xs_lim/units.cm**2)+' cm^2')
xs_lim_norm = xs_lim * sinorm.normalize(nucl,m)
print('\tNormalized: ' + str(xs_lim_norm/units.cm**2) + ' cm^2')
xs_pts[i] = xs_lim_norm
#
# print(mass_grid)
# print(xs_pts)
# Example output if you just want to quickly see a plot
# xs_pts = np.array([ 1.24296194e-45, 5.47519831e-46, 2.92874654e-46, 1.88419866e-46,
# 1.40448798e-46, 1.17375261e-46, 1.08433872e-46, 1.08367388e-46,
# 1.16197258e-46, 1.30532407e-46, 1.52182663e-46, 1.79776221e-46,
# 2.19920437e-46, 2.68062477e-46, 3.32156842e-46, 4.10870835e-46,
# 5.06458735e-46, 6.35652782e-46, 8.02296518e-46, 9.95962830e-46,
# 1.26033390e-45, 1.59625772e-45, 1.97989473e-45, 2.49643466e-45,
# 3.11955720e-45, 3.89523191e-45, 4.96394399e-45, 6.23846099e-45,
# 7.90421820e-45, 9.91687816e-45, 1.24262264635e-44])
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.plot(mass_grid/units.GeV,xs_pts/ units.cm**2)
ax.fill_between(mass_grid/units.GeV,xs_pts/ units.cm**2,10**-43,alpha=0.3)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim([10,10000])
ax.set_ylim([10**-47,10**-43])
ax.text(20, 2e-44, '90% CL Excluded Region', fontsize=13)
ax.text(500, 5e-47, 'Allowed Region', fontsize=13)
ax.set_xlabel('WIMP Mass [GeV]',fontsize=15)
ax.set_ylabel(r'WIMP-proton SI Cross Section [cm$^2$]',fontsize=15)
ax.set_title('Estimated Sensitivity of LUX 2014-2016 Run',fontsize=15)
ax.grid(alpha=0.3)
plt.show()
|
|
import numpy as np
import time
for N in [int(i*1000) for i in range(1,11)]:
a = np.linspace(0, 2*np.pi, N)
k = 100
start_time = time.time()
M = np.exp(1j*k*(np.tile(a,(N,1))**2 + np.tile(a.reshape(N,1),(1,N))**2))
print('N=' + str(N) + ', time in Numpy: ', str(time.time() - start_time) + " seconds")
|
|
import random
import sys
import numpy as np
import cv2
import io
import socket
import struct
import time
import urllib.request
import json
NOTIFICATION_SEND_INTERVAL = 5
DATA_SEND_INTERVAL = 30
def server_routine(frame_queue, audio_queue,
room_temp, room_humid, baby_temp,
baby_is_crying, baby_feverish):
'''
Routine that:
+ Parses the data from all other routines
+ Synchronizes them if necessary
+ Sends the data to the server
This routine needs continuous optimization. Do not overburden it.
Maybe multithreading can be added?
'''
# Connect a client socket to my_server:2000 (change my_server to the
# hostname of your server)
client_socket_video = socket.socket()
client_socket_video.connect(('167.99.215.27', 2000))
# Make a file-like object out of the connection
connection_video = client_socket_video.makefile('wb')
start_stream_video = False
start_stream_audio = False
want_data = b'w'
send_data = b's'
data_current_time = time.perf_counter()
notification_current_time = time.perf_counter()
notification_is_sent = False
try:
stream_video = io.BytesIO()
while True:
try:
if not start_stream_video:
# Possible addition of non-blocking arguments and select will be considered
answer = client_socket_video.recv(128)
if answer == want_data:
start_stream_video = True
client_socket_video.send(send_data)
else:
frame = frame_queue.get()
_, encoded_frame = cv2.imencode('.jpg', frame)
stream_video.write(encoded_frame.tobytes())
# Write the length of the capture to the stream and flush to
# ensure it actually gets sent
connection_video.write(struct.pack('<L', stream_video.tell()))
connection_video.flush()
# Rewind the stream and send the image data over the wire
stream_video.seek(0)
connection_video.write(stream_video.read())
# Reset the stream for the next capture
stream_video.seek(0)
stream_video.truncate()
# Reading the values
current_room_temperature = room_temp.value
current_room_humidity = room_humid.value
current_baby_temperature = baby_temp.value
crying_detected = baby_is_crying.value
fever_detected = baby_feverish.value
if(time.perf_counter() - data_current_time > DATA_SEND_INTERVAL):
data_current_time = time.perf_counter()
body = {'device_id': 26082007,
'room_temp': current_room_temperature,
'room_humd': current_room_humidity,
'baby_temp': current_baby_temperature}
myurl = "http://167.99.215.27:8000/api/data"
req = urllib.request.Request(myurl)
req.add_header('Content-Type', 'application/json')
jsondata = json.dumps(body)
jsondataasbytes = jsondata.encode('utf-8')
req.add_header('Content-Length', len(jsondataasbytes))
response = urllib.request.urlopen(req, jsondataasbytes)
if fever_detected:
notification_code = 6
elif crying_detected:
notification_code = 1
else:
notification_code = 0
if(time.perf_counter() - notification_current_time > NOTIFICATION_SEND_INTERVAL):
notification_current_time = time.perf_counter()
body = {'device_id': 26082007,
'code': notification_code}
myurl = "http://167.99.215.27:8000/api/notification"
req = urllib.request.Request(myurl)
req.add_header('Content-Type', 'application/json')
jsondata = json.dumps(body)
jsondataasbytes = jsondata.encode('utf-8')
req.add_header('Content-Length', len(jsondataasbytes))
response = urllib.request.urlopen(req, jsondataasbytes)
except KeyboardInterrupt:
raise RuntimeError
except:
print('Exception on Video Server')
sys.stdout.flush()
finally:
connection_video.close()
client_socket_video.close()
|
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sqlalchemy
import datetime as dt
from sklearn.linear_model import LogisticRegression
from dreamclinic_churn_functions import *
import pickle
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.metrics import confusion_matrix
from sklearn_pandas import DataFrameMapper, FunctionTransformer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_recall_curve
from inspect import signature
def clean_df(df):
"""
Takes in a Pandas Dataframe from Dreamclinic
and cleans it for aggregation.
"""
# remove rows where HrsWorked = 0
# because they are just used by the front desk staff somehow
df = df[df['HrsWorked'] != 0]
# fill NaN values in 'Service_Category with 'Massage'
df['Service_Category'] = df['Service_Category'].fillna(value='Massage')
# remove white space from Therapist names
df['Therapist'] = df['Therapist'].str.strip()
# make all therapist names lowercase to avoid typos in data entry
df['Therapist'] = df['Therapist'].str.lower()
# find and replace nicknames with domain knowledge
df = df.replace('abby thomson', 'abigail thomson')
# Drop Address_City and Addres_State Columns from Dataframe
df.drop(['Address_City', 'Address_State', 'Invoice_Category'],
axis=1,
inplace=True)
# Drop rows without a clientID
df = df.dropna()
return df
def groupby_time(df, offset_alias='M'):
"""
Groupby time period:
'offset aliases' is just a format code.
"""
months = df.TransactionDate.dt.to_period(offset_alias)
g = df.groupby(months)
return g
def unique_client_agg(groupby_obj):
"""
Takes in the groupby obj from groupby_time()
and aggregates it for unique clients.
"""
# Count unique clients by month and drop/rename columns to reflect
# new aggredated DateFrame.
client_count_df = groupby_obj.nunique()
return client_count_df
def sum_client_agg(groupby_obj):
"""
Takes in the groupby obj from groupby
and aggregates it for all clients.
"""
# Count unique clients by month and drop/rename columns to reflect
# new aggredated DateFrame.
total_count_df = groupby_obj.count()
return total_count_df
def clean_agg_df(client_count_df):
"""Cleans aggregaged df from unique_client_agg and total_client_agg."""
client_count_df = client_count_df.drop('TransactionDate', axis=1)
client_count_df['month'] = client_count_df.index
client_count_df.reset_index(inplace=True)
client_count_df["client_count"] = client_count_df['clientID']
client_count_df.drop('clientID', axis=1, inplace=True)
client_count_df['month'] = client_count_df['month'].astype('str')
client_count_df.rename(columns={"clientID": "unique_client_count",
"Therapist": "therapists_employed",
"Zipcode": "zipcodes_reached"},
inplace=True)
client_count_df.drop(["HrsWorked"], axis=1, inplace=True)
return client_count_df
def line_plot(df,
title,
x_label,
y_label,
x_column='TransactionDate',
y_column='services_performed'):
"""Creates a line plot with clean aggregated dataframe"""
x = df[x_column]
y = df[y_column]
fig, ax = plt.subplots(figsize=(30, 7))
plt.title(title, fontsize=30)
sns.lineplot(x=x,
y=y,
ax=ax)
plt.xlabel(x_label, fontsize=25)
plt.ylabel(y_label, fontsize=25)
return plt.show()
def session_count_graph(session_count, min_sessions, max_sessions):
fig, ax = plt.subplots()
ax.hist(session_count,
bins=max_sessions,
range=(min_sessions,
max_sessions + 1))
plt.ylabel('# of clients')
plt.xlabel('# of sessions they get over their lifetime as a client')
plt.title('Amount of clients that get X number of Session')
plt.xticks(ticks=(range(min_sessions, (max_sessions + 1))))
return plt.show
def temporal_split(df,
start_year=2019,
start_month=6,
start_day=1,
end_year=2019,
end_month=8,
end_day=1):
"""
Starts with client_df,
returns DataFrame of clients labeled churn or not.
"""
#cuts the data temporally
# to the last 2 months so that we can label the data for modeling
start = df['Date'].searchsorted(dt.datetime(start_year,
start_month,
start_day))
end = df['Date'].searchsorted(dt.datetime(end_year,
end_month,
end_day))
#DataFrame used as labeling data
not_churn_df = df.iloc[start:end]
not_churn_df['churn'] = False
labeling_df = pd.DataFrame(not_churn_df['clientID'].unique())
labeling_df['churn'] = False
labeling_df = labeling_df.rename({0 : 'clientID'},axis=1)
churn_df = df.merge(labeling_df,
how='left',
on='clientID')
churn_df['churn'] = churn_df['churn'].fillna(value=True)
return churn_df
def session_count(df):
"""Take in client_df and outputs a session count df and session count groupby
object."""
session_count = df.groupby('clientID').nunique()['TransactionDate']
session_count_df = pd.DataFrame([session_count]).T
session_count_df['#_of_sessions_had'] = session_count_df.replace()
session_count_df = session_count_df.drop('TransactionDate', axis=1)
return session_count_df, session_count
def temporal_split_test(churn_df,
start_year=2018,
start_month=12,
start_day=1,
end_year=2019,
end_month=5,
end_day=31):
"""Needs churn_df, outputs temporal test_df for train_test_split"""
start = churn_df['Date'].searchsorted(dt.datetime(start_year,
start_month,
start_day))
end = churn_df['Date'].searchsorted(dt.datetime(end_year,
end_month,
end_day))
test_df = churn_df.iloc[start:end]
return test_df
def temporal_split_train(churn_df,
end_year=2018,
end_month=11,
end_day=30):
#Temporal train split
end = churn_df['Date'].searchsorted(dt.datetime(end_year,
end_month,
end_day))
train_df = churn_df.iloc[:end]
return train_df
def aggregate(df, unique_col='clientID'):
"""
Aggregates the train and test Dataframes
with the features for modeling.
"""
count_train_df = df.groupby('clientID').nunique()
# counts how many times everything happens
summed_df = df.groupby(unique_col).sum()
# Total session count
summed_df['total_sessions'] = count_train_df['Date']
# Average session length calc
summed_df['average_session_length'] = summed_df['HrsWorked']/summed_df['total_sessions']
# turns zipcodes into bool
return summed_df
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
# classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
def plot_prc(y_test, y_score_log_reg, ax):
average_precision = average_precision_score(y_test,
y_score_log_reg)
precision, recall, _ = precision_recall_curve(y_test,
y_score_log_reg)
# In matplotlib < 1.5, plt.fill_between does not have a 'step' argument
step_kwargs = ({'step': 'post'}
if 'step' in signature(plt.fill_between).parameters
else {})
ax.step(recall, precision, color='b', alpha=0.2,
where='post')
ax.fill_between(recall, precision, alpha=0.2, color='b', **step_kwargs)
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.set_ylim([0.0, 1.05])
ax.set_xlim([0.0, 1.0])
ax.set_title('AP={0:0.2f}'.format(
average_precision))
return None
def plot_roc(X_train, X_test, y_train, y_test, model):
X_prob = model.predict_proba(X_test)[:, -1]
fpr_grad_boost, tpr_grad_boost, thresholds_grad_boost = roc_curve(y_test,
X_prob)
plt.plot(fpr_grad_boost, tpr_grad_boost, marker='.')
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC curve for Predicting Client Churn")
plt.show();
|
|
from graphics import *
import numpy as np
win = GraphWin('Graph', 640, 480)
win.setBackground("white")
LTMargin = 100
TPMargin = 100
xmax = 640 - LTMargin
ymax = 480 - TPMargin
def rect(cpu,size):
rectangle = Rectangle(Point(LTMargin, 480 - TPMargin - np.int(cpu/10) ),Point((size/50000) + LTMargin, 480 - TPMargin ))
rectangle.draw(win)
rectangle2 = Rectangle(Point(LTMargin, TPMargin-50 ),Point(LTMargin+15, TPMargin - 45))
rectangle2.setFill('black')
rectangle2.draw(win)
rec2_message = Text(Point(LTMargin+40, TPMargin - 48), '--> CPU')
rec2_message.setSize(8)
rec2_message.draw(win)
rectangle3 = Rectangle(Point(LTMargin+100, TPMargin-50 ),Point(LTMargin+115, TPMargin - 45))
rectangle3.setFill('red')
rectangle3.draw(win)
rec3_message = Text(Point(LTMargin+175, TPMargin - 48), '--> GPU1 or Row Wise')
rec3_message.setSize(8)
rec3_message.draw(win)
rectangle4 = Rectangle(Point(LTMargin+250, TPMargin-50), Point(LTMargin+265, TPMargin-45))
rectangle4.setFill('blue')
rectangle4.draw(win)
rec4_message = Text(Point(LTMargin+325, TPMargin - 48), '--> GPU2 or Grid Wise')
rec4_message.setSize(8)
rec4_message.draw(win)
def message():
message = Text(Point(win.getWidth()/2, 20), 'CPU vs GPU execution time')
message.draw(win)
x_message1 = Text(Point(win.getWidth()/2, win.getHeight()-TPMargin + 35), '______________ ')
x_message1.setSize(12)
x_message1.draw(win)
x_message2 = Text(Point(win.getWidth()/2 + 60, win.getHeight()-TPMargin + 41), '> ')
x_message2.setSize(12)
x_message2.draw(win)
x_message3 = Text(Point(win.getWidth()/2, win.getHeight()-TPMargin + 55), 'No. of Data')
x_message3.setSize(8)
x_message3.setStyle('bold')
x_message3.draw(win)
x_message4 = Text(Point(win.getWidth()/2, win.getHeight()-TPMargin + 67), '(in 50K)')
x_message4.setSize(8)
x_message4.setStyle('italic')
x_message4.draw(win)
y_message1 = Text(Point(win.getHeight()/2-180, win.getWidth()/2-100), '^')
y_message1.setSize(14)
y_message1.draw(win)
y_message2 = Text(Point(win.getHeight()/2-180, win.getWidth()/2-98), '|')
y_message2.setSize(12)
y_message2.draw(win)
y_message3 = Text(Point(win.getHeight()/2-180, win.getWidth()/2-82), '|')
y_message3.setSize(12)
y_message3.draw(win)
y_message4 = Text(Point(win.getHeight()/2-180, win.getWidth()/2-66), '|')
y_message4.setSize(12)
y_message4.draw(win)
y_message5 = Text(Point(win.getHeight()/2-210, win.getWidth()/2-82), 'Time')
y_message5.setSize(8)
y_message5.setStyle('bold')
y_message5.draw(win)
y_message6 = Text(Point(win.getHeight()/2-210, win.getWidth()/2-72), '(in Sec.)')
y_message6.setSize(8)
y_message6.setStyle('italic')
y_message6.draw(win)
win.getMouse()
win.close()
def outGrid(size, cpu):
x_grid_1 = Text(Point(LTMargin + np.int(size/50000), win.getHeight()-TPMargin + 14), np.int(size/50000))
x_grid_1.setSize(7)
x_grid_1.draw(win)
x_grid_1_1 = Text(Point(LTMargin + np.int(size/50000), win.getHeight()-TPMargin + 3), '|')
x_grid_1_1.setSize(7)
x_grid_1_1.draw(win)
y_grid_1 = Text(Point(LTMargin - 17, win.getHeight()-TPMargin - np.int(cpu/10)), np.int(cpu/10))
y_grid_1.setSize(7)
y_grid_1.draw(win)
y_grid_1_1 = Text(Point(LTMargin - 3, win.getHeight()-TPMargin - np.int(cpu/10)), '-')
y_grid_1_1.setSize(7)
y_grid_1_1.draw(win)
def lineDraw(x_axis, y_axis, color):
xpos = 0
ypos = 0
xold = LTMargin
yold = ymax
for i in xrange(0, len(x_axis)):
xpos = LTMargin + x_axis[i]/50000
ypos = ymax - (y_axis[i]/10)
line = Line( Point(xpos,ypos), Point(xold,yold))
line.setFill(color)
line.draw(win)
xold = xpos
yold = ypos
|
|
#!/usr/bin/python3
import rospy
from sensor_msgs.msg import Image, CompressedImage
import picamera
import signal
import numpy as np
stop_process = False
def signal_handler(signal, frame):
global stop_process
stop_process = True
signal.signal(signal.SIGINT, signal_handler)
RES = (640, 480)
# Class to process camera messages
class Stream():
def __init__(self):
self.topic_name_camera_image = rospy.get_param("topic_name_camera_image", "camera/image")
# Set up ros publisher to publish on img topic, using Image message
self.pub_img = rospy.Publisher(self.topic_name_camera_image, Image, queue_size=1)
# Called when new image is available
def write(self, data):
# Publish raw image
data_y = data[:RES[0]*RES[1]]
msg = Image()
msg.header.stamp = rospy.Time.now()
msg.width = RES[0]
msg.height = RES[1]
msg.encoding = "mono8"
msg.step = len(data_y) // RES[1]
msg.data = data_y
self.pub_img.publish(msg)
if __name__ == "__main__":
# Set up node using NODENAME
rospy.init_node("camera")
# Start capturing camera images
with picamera.PiCamera() as camera:
res_width = rospy.get_param("~resolution/width", 640)
res_height = rospy.get_param("~resolution/height", 480)
RES = (res_width, res_height)
fps = rospy.get_param("~fps", 50)
rospy.loginfo("Start to streamming images of size = " + str(RES) + ", and FPS = " + str(fps))
camera.resolution = RES
camera.framerate = fps
try:
camera.start_recording(Stream(), format='yuv')
while not stop_process:
camera.wait_recording(1)
except:
pass
rospy.loginfo("Program closing ...")
#camera.stop_recording()
camera.close()
|
|
"""
visualize results for test image
"""
from numpy import asarray
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from torch.autograd import Variable
import transforms as transforms
from skimage import io
from skimage.transform import resize
from models import *
from IPython.display import display
from IPython.display import Image as _Imgdis
import numpy as np
from time import time
from time import sleep
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def computeResult(data):
try:
cut_size = 44
transform_test = transforms.Compose([
transforms.TenCrop(cut_size),
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
])
#Uses image data in array to compute the image
raw_img = np.array(data, dtype=np.uint8)
gray = rgb2gray(raw_img)
gray = resize(gray, (48,48), mode='symmetric').astype(np.uint8)
img = gray[:, :, np.newaxis]
img = np.concatenate((img, img, img), axis=2)
# img = Image.fromarray(img)
img = Image.fromarray(img)
inputs = transform_test(img)
class_names = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
net = VGG('VGG19')
checkpoint = torch.load(os.path.join('FER2013_VGG19', 'PrivateTest_model.t7'))
net.load_state_dict(checkpoint['net'])
net.cuda()
net.eval()
ncrops, c, h, w = np.shape(inputs)
inputs = inputs.view(-1, c, h, w)
inputs = inputs.cuda()
inputs = Variable(inputs, volatile=True)
outputs = net(inputs)
outputs_avg = outputs.view(ncrops, -1).mean(0) # avg over crops
score = F.softmax(outputs_avg)
_, predicted = torch.max(outputs_avg.data, 0)
plt.rcParams['figure.figsize'] = (13.5,5.5)
axes=plt.subplot(1, 3, 1)
plt.imshow(raw_img)
plt.xlabel('Input Image', fontsize=16)
axes.set_xticks([])
axes.set_yticks([])
plt.tight_layout()
plt.subplots_adjust(left=0.05, bottom=0.2, right=0.95, top=0.9, hspace=0.02, wspace=0.3)
plt.subplot(1, 3, 2)
ind = 0.1+0.6*np.arange(len(class_names)) # the x locations for the groups
width = 0.4 # the width of the bars: can also be len(x) sequence
color_list = ['red','orangered','darkorange','limegreen','darkgreen','royalblue','navy']
for i in range(len(class_names)):
plt.bar(ind[i], score.data.cpu().numpy()[i], width, color=color_list[i])
plt.title("Classification results ",fontsize=20)
plt.xlabel(" Expression Category ",fontsize=16)
plt.ylabel(" Classification Score ",fontsize=16)
plt.xticks(ind, class_names, rotation=45, fontsize=14)
axes=plt.subplot(1, 3, 3)
emojis_img = io.imread('./images/emojis/%s.png' % str(class_names[int(predicted.cpu().numpy())]))
plt.imshow(emojis_img)
plt.xlabel('Emoji Expression', fontsize=16)
axes.set_xticks([])
axes.set_yticks([])
plt.tight_layout()
# show emojis
#plt.show()
plt.savefig(os.path.join('./images/results/' + 'results.jpg'))
plt.close()
print("Result:" + "%s" %str(class_names[int(predicted.cpu().numpy())]))
except:
print('Cannot find image')
def predict(getImage):
try:
folder = "./images"
im = Image.open(folder + '/' + getImage)
print(im)
print(im.format)
print(im.size)
print(im.mode)
display(im)
data = asarray(im)
print('type of data:')
print(type(data))
# summarize shape
print('shape of data:')
print(data.shape)
# create Pillow image
print('class data:')
image2 = Image.fromarray(data)
print(type(image2))
print('mode of data:')
# summarize image details
print(image2.mode)
print('size of data:')
print(image2.size)
print('data:')
print(data)
#Displays imaga data converted into image
# display(image2)
#convert data to image
newArray = np.array(data, dtype=np.uint8)
get_image = Image.fromarray(newArray)
get_image.save(folder + '/'+ 'new.jpg')
computeResult(data)
except:
print('Image not found')
|
|
#!/usr/bin/env python
# coding: utf-8
# # Some example HMMs
#
# In[1]:
{
"tags": [
"hide-input",
]
}
# Install necessary libraries
try:
import jax
except:
# For cuda version, see https://github.com/google/jax#installation
get_ipython().run_line_magic('pip', 'install --upgrade "jax[cpu]"')
import jax
try:
import jsl
except:
get_ipython().run_line_magic('pip', 'install git+https://github.com/probml/jsl')
import jsl
try:
import rich
except:
get_ipython().run_line_magic('pip', 'install rich')
import rich
# In[2]:
import abc
from dataclasses import dataclass
import functools
import itertools
from typing import Any, Callable, NamedTuple, Optional, Union, Tuple
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
import inspect
import inspect as py_inspect
from rich import inspect as r_inspect
from rich import print as r_print
def print_source(fname):
r_print(py_inspect.getsource(fname))
|
|
# -*- coding: utf-8 -*-
"""
Name: htsPlot.py
Author: Collin Rooney
Last Updated: 7/17/2017
This script will contain functions for plotting the output of the hts.py file
These plots will be made to look like the plots Prophet creates
Credit to Rob J. Hyndman and research partners as much of the code was developed with the help of their work
https://www.otexts.org/fpp
https://robjhyndman.com/publications/
Credit to Facebook and their fbprophet package
https://facebookincubator.github.io/prophet/
It was my intention to make some of the code look similar to certain sections in the Prophet and (Hyndman's) hts packages
"""
from matplotlib import pyplot as plt
from matplotlib.dates import MonthLocator, num2date
from matplotlib.ticker import FuncFormatter
import pandas as pd
import numpy as np
import sys
#%%
def plotNode(dictframe, column, h = 1, xlabel = 'ds', ylabel = 'y', startFrom = 0, uncertainty = False, ax = None):
'''
Parameters
------------------
dictframe - (dict) The dictionary of dataframes that is the output of the hts function
column - (string) column title that you want to plot
h - (int) number of steps in the forecast same as input to hts function
xlabel - (string) label for the graph's x axis
ylabel - (string) label for the graph's y axis
start_from - (int) the number of values to skip at the beginning of yhat so that you can zoom in
uncertainty - (Boolean) include the prediction intervals or not
ax - (axes object) any axes object thats already created that you want to pass to the plot function
Returns
------------------
plot of that node's forecast
'''
nodeToPlot = dictframe[column]
if ax is None:
fig = plt.figure(facecolor='w', figsize=(10, 6))
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
##
# plot the yhat forecast as a solid line and then the h-step ahead forecast as a dashed line
##
ax.plot(nodeToPlot['ds'].values[startFrom:-h], nodeToPlot['yhat'][startFrom:-h], ls='-', c='#0072B2')
ax.plot(nodeToPlot['ds'].values[-h:], nodeToPlot['yhat'][-h:], dashes = [2,1])
##
# plot the cap and uncertainty if necessary
##
if 'cap' in nodeToPlot:
ax.plot(nodeToPlot['ds'].values[startFrom:], nodeToPlot['cap'][startFrom:], ls='--', c='k')
if uncertainty:
ax.fill_between(nodeToPlot['ds'].values[startFrom:], nodeToPlot['yhat_lower'][startFrom:],
nodeToPlot['yhat_upper'][startFrom:], color='#0072B2',
alpha=0.2)
ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
fig.tight_layout()
return fig
#%%
def plotWeekly(dictframe, ax, uncertainty, weeklyStart, color='#0072B2'):
if ax is None:
figW = plt.figure(facecolor='w', figsize=(10, 6))
ax = figW.add_subplot(111)
else:
figW = ax.get_figure()
##
# Create a list of 7 days for the x axis of the plot
##
days = (pd.date_range(start='2017-01-01', periods=7) +
pd.Timedelta(days=weeklyStart))
##
# Find the weekday seasonality values for each weekday
##
weekdays = dictframe.ds.dt.weekday
ind = []
for weekday in range(7):
ind.append(max(weekdays[weekdays == weekday].index.tolist()))
##
# Plot only one weekday each
##
ax.plot(range(len(days)), dictframe['weekly'][ind], ls='-', c=color)
##
# Plot uncertainty if necessary
##
if uncertainty:
ax.fill_between(range(len(days)),dictframe['weekly_lower'][ind], dictframe['weekly_upper'][ind],color=color, alpha=0.2)
ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)
ax.set_xticks(range(len(days)))
ax.set_xticklabels(dictframe['ds'][ind].dt.day_name())
ax.set_xlabel('Day of week')
ax.set_ylabel('weekly')
figW.tight_layout()
return figW
def plotYearly(dictframe, ax, uncertainty, color='#0072B2'):
if ax is None:
figY = plt.figure(facecolor='w', figsize=(10, 6))
ax = figY.add_subplot(111)
else:
figY = ax.get_figure()
##
# Find the max index for an entry of each month
##
months = dictframe.ds.dt.month
ind = []
for month in range(1,13):
ind.append(max(months[months == month].index.tolist()))
##
# Plot from the minimum of those maximums on (this will almost certainly result in only 1 year plotted)
##
ax.plot(dictframe['ds'][min(ind):], dictframe['yearly'][min(ind):], ls='-', c=color)
if uncertainty:
ax.fill_between(dictframe['ds'].values[min(ind):], dictframe['yearly_lower'][min(ind):], dictframe['yearly_upper'][min(ind):], color=color, alpha=0.2)
ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)
months = MonthLocator(range(1, 13), bymonthday=1, interval=2)
ax.xaxis.set_major_formatter(FuncFormatter(
lambda x, pos=None: '{dt:%B} {dt.day}'.format(dt=num2date(x))))
ax.xaxis.set_major_locator(months)
ax.set_xlabel('Day of year')
ax.set_ylabel('yearly')
figY.tight_layout()
return figY
def plotHolidays(dictframe, holidays, ax, uncertainty, color='#0072B2'):
##
# This function is largely the same as the one in Prophet
##
if ax is None:
figH = plt.figure(facecolor='w', figsize=(10, 6))
ax = figH.add_subplot(111)
else:
figH = ax.get_figure()
holidayComps = holidays.holiday.unique().tolist()
yHoliday = dictframe[holidayComps].sum(1)
HL_cols = list(set([h + '_lower' for h in holidayComps]) & set(dictframe.columns))
HU_cols = list(set([h + '_upper' for h in holidayComps]) & set(dictframe.columns))
yHolidayL = dictframe[HL_cols].sum(1)
yHolidayU = dictframe[HU_cols].sum(1)
# NOTE the above CI calculation is incorrect if holidays overlap
# in time. Since it is just for the visualization we will not
# worry about it now.
ax.plot(dictframe['ds'].values, yHoliday, ls='-',
c=color)
if uncertainty:
ax.fill_between(dictframe['ds'].values, yHolidayL, yHolidayU, color=color, alpha=0.2)
ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)
ax.set_xlabel('ds')
ax.set_ylabel('holidays')
figH.tight_layout()
return figH
def plotTrend(dictframe, ax, uncertainty, plotCap, color='#0072B2'):
##
# This function is largely the same as the one in Prophet
##
if ax is None:
figT = plt.figure(facecolor='w', figsize=(10, 6))
ax = figT.add_subplot(111)
else:
figT = ax.get_figure()
ax.plot(dictframe['ds'].values, dictframe['trend'], ls='-', c=color)
if 'cap' in dictframe and plotCap:
ax.plot(dictframe['ds'].values, dictframe['cap'], ls='--', c='k')
if uncertainty:
ax.fill_between(dictframe['ds'].values, dictframe['trend_lower'], dictframe['trend_upper'], color=color, alpha=0.2)
ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)
ax.set_xlabel('ds')
ax.set_ylabel('trend')
figT.tight_layout()
return figT
def plotNodeComponents(dictframe, column, holidays = None, uncertainty=False, plotCap=False, weeklyStart = 0, ax=None,):
'''
Parameters
------------------
dictframe - (dict) The dictionary of dataframes that is the output of the hts function
column - (string) column title that you want to plot
uncertainty - (Boolean) include the prediction intervals or not
plot_cap - (Boolean) include the cap lines or not
weekly_start - (int) an integer that specifies the first day on the x axis of the plot
ax - (axes object) any axes object thats already created that you want to pass to the plot function
Returns
------------------
plot of that node's trend, seasonalities, holidays, etc.
'''
nodeToPlot = dictframe[column]
colNames = nodeToPlot.columns.tolist()
trend = "trend" in colNames
if holidays is not None:
holiday = np.any(holidays.holiday[0] in colNames)
weekly = "weekly" in colNames
yearly = "yearly" in colNames
if trend:
plotTrend(nodeToPlot, ax=ax, uncertainty=uncertainty, plotCap=plotCap)
if holiday:
plotHolidays(nodeToPlot, holidays=holidays, ax=ax, uncertainty=uncertainty)
if weekly:
plotWeekly(nodeToPlot, ax=ax, uncertainty=uncertainty, weeklyStart = weeklyStart)
if yearly:
plotYearly(nodeToPlot, ax=ax, uncertainty=uncertainty)
return
#%%
def plotChild(dictframe, column, h = 1, xlabel = 'ds', ylabel = 'y', startFrom = 0, uncertainty = False, ax = None):
'''
Parameters
------------------
dictframe - (dict) The dictionary of dataframes that is the output of the hts function
column - (string) column title that you want to plot
h - (int) number of steps in the forecast same as input to hts function
xlabel - (string) label for the graph's x axis
ylabel - (string) label for the graph's y axis
start_from - (int) the number of values to skip at the beginning of yhat so that you can zoom in
uncertainty - (Boolean) include the prediction intervals or not
ax - (axes object) any axes object thats already created that you want to pass to the plot function
Returns
------------------
plot of that node and its children's forecast
'''
##
# Set the color map to brg so that there are enough dark and discernably different choices
##
cmap = plt.get_cmap('tab10')
##
# Find the children nodes
##
colOptions = list(dictframe.keys())
allChildren = [s for s in colOptions if column in s]
countChildren = [s.count('_') for s in colOptions if column in s]
if min(countChildren)+1 not in countChildren and column != "Total":
sys.exit("the specified column doesn't have children")
if min(countChildren)+2 not in countChildren:
columnsToPlot = allChildren
else:
ind = countChildren.index(min(countChildren)+2)
columnsToPlot = allChildren[0:ind]
if column == 'Total':
allChildren = [s for s in colOptions]
countChildren = [s.count('_') for s in colOptions]
if max(countChildren) > 0:
ind = countChildren.index(min(countChildren)+1)
columnsToPlot = allChildren[0:ind]
else:
columnsToPlot = allChildren
##
# Plot the node and its children the same way as the plot_node function did it
##
i = 0
N = len(columnsToPlot)
for column in columnsToPlot:
nodeToPlot = dictframe[column]
if ax is None:
fig = plt.figure(facecolor='w', figsize=(10, 6))
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
ax.plot(nodeToPlot['ds'].values[startFrom:-h], nodeToPlot['yhat'][startFrom:-h], ls='-', c = cmap(float(i)/N), label = column)
ax.plot(nodeToPlot['ds'].values[-h:], nodeToPlot['yhat'][-h:], dashes = [2,1], c = cmap(float(i)/N), label = '_nolegend_')
if 'cap' in nodeToPlot:
ax.plot(nodeToPlot['ds'].values[startFrom:], nodeToPlot['cap'][startFrom:], ls='--', c='k')
if uncertainty:
ax.fill_between(nodeToPlot['ds'].values[startFrom:], nodeToPlot['yhat_lower'][startFrom:],
nodeToPlot['yhat_upper'][startFrom:], color='#0072B2',
alpha=0.2)
i+=1
ax.grid(True, which='major', color='gray', ls='-', lw=1, alpha = 0.2)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.legend()
fig.tight_layout()
return fig
|
|
#
# Author: Piyush Agram
# Copyright 2016
#
import logging
import isceobj
import mroipac
import os
logger = logging.getLogger('isce.topsinsar.runPreprocessor')
def runComputeBaseline(self):
from isceobj.Planet.Planet import Planet
import numpy as np
swathList = self._insar.getInputSwathList(self.swaths)
commonBurstStartMasterIndex = [-1] * self._insar.numberOfSwaths
commonBurstStartSlaveIndex = [-1] * self._insar.numberOfSwaths
numberOfCommonBursts = [0] * self._insar.numberOfSwaths
catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name)
for swath in swathList:
masterxml = os.path.join( self._insar.masterSlcProduct,'IW{0}.xml'.format(swath))
slavexml = os.path.join( self._insar.slaveSlcProduct, 'IW{0}.xml'.format(swath))
if os.path.exists(masterxml) and os.path.exists(slavexml):
master = self._insar.loadProduct(masterxml)
slave = self._insar.loadProduct(slavexml)
burstOffset, minBurst, maxBurst = master.getCommonBurstLimits(slave)
commonSlaveIndex = minBurst + burstOffset
numberCommon = maxBurst - minBurst
if numberCommon == 0:
print('No common bursts found for swath {0}'.format(swath))
else:
###Bookkeeping
commonBurstStartMasterIndex[swath-1] = minBurst
commonBurstStartSlaveIndex[swath-1] = commonSlaveIndex
numberOfCommonBursts[swath-1] = numberCommon
catalog.addItem('IW-{0} Number of bursts in master'.format(swath), master.numberOfBursts, 'baseline')
catalog.addItem('IW-{0} First common burst in master'.format(swath), minBurst, 'baseline')
catalog.addItem('IW-{0} Last common burst in master'.format(swath), maxBurst, 'baseline')
catalog.addItem('IW-{0} Number of bursts in slave'.format(swath), slave.numberOfBursts, 'baseline')
catalog.addItem('IW-{0} First common burst in slave'.format(swath), minBurst + burstOffset, 'baseline')
catalog.addItem('IW-{0} Last common burst in slave'.format(swath), maxBurst + burstOffset, 'baseline')
catalog.addItem('IW-{0} Number of common bursts'.format(swath), numberCommon, 'baseline')
refElp = Planet(pname='Earth').ellipsoid
Bpar = []
Bperp = []
for boff in [0, numberCommon-1]:
###Baselines at top of common bursts
mBurst = master.bursts[minBurst + boff]
sBurst = slave.bursts[commonSlaveIndex + boff]
###Target at mid range
tmid = mBurst.sensingMid
rng = mBurst.midRange
masterSV = mBurst.orbit.interpolate(tmid, method='hermite')
target = mBurst.orbit.rdr2geo(tmid, rng)
slvTime, slvrng = sBurst.orbit.geo2rdr(target)
slaveSV = sBurst.orbit.interpolateOrbit(slvTime, method='hermite')
targxyz = np.array(refElp.LLH(target[0], target[1], target[2]).ecef().tolist())
mxyz = np.array(masterSV.getPosition())
mvel = np.array(masterSV.getVelocity())
sxyz = np.array(slaveSV.getPosition())
mvelunit = mvel / np.linalg.norm(mvel)
sxyz = sxyz - np.dot ( sxyz-mxyz, mvelunit) * mvelunit
aa = np.linalg.norm(sxyz-mxyz)
costheta = (rng*rng + aa*aa - slvrng*slvrng)/(2.*rng*aa)
Bpar.append(aa*costheta)
perp = aa * np.sqrt(1 - costheta*costheta)
direction = np.sign(np.dot( np.cross(targxyz-mxyz, sxyz-mxyz), mvel))
Bperp.append(direction*perp)
catalog.addItem('IW-{0} Bpar at midrange for first common burst'.format(swath), Bpar[0], 'baseline')
catalog.addItem('IW-{0} Bperp at midrange for first common burst'.format(swath), Bperp[0], 'baseline')
catalog.addItem('IW-{0} Bpar at midrange for last common burst'.format(swath), Bpar[1], 'baseline')
catalog.addItem('IW-{0} Bperp at midrange for last common burst'.format(swath), Bperp[1], 'baseline')
else:
print('Skipping processing for swath number IW-{0}'.format(swath))
self._insar.commonBurstStartMasterIndex = commonBurstStartMasterIndex
self._insar.commonBurstStartSlaveIndex = commonBurstStartSlaveIndex
self._insar.numberOfCommonBursts = numberOfCommonBursts
if not any([x>=2 for x in self._insar.numberOfCommonBursts]):
print('No swaths contain any burst overlaps ... cannot continue for interferometry applications')
catalog.printToLog(logger, "runComputeBaseline")
self._insar.procDoc.addAllFromCatalog(catalog)
|
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['ImStack']
# Cell
import torch
import torch.nn as nn
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
class ImStack(nn.Module):
""" This class represents an image as a series of stacked arrays, where each is 1/scale
the resolution of the next. This is useful eg when trying to create an image to minimise
some loss - parameters in the early (small) layers can have an effect on the overall
structure and shapes while those in later layers act as residuals and fill in fine detail.
"""
def __init__(self, n_layers=4, base_size=32, scale=2,
init_image=None, out_size=256, decay=0.7,
device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')):
"""Constructs the Image Stack
Args:
n_layers: How many layers in the stack
base_size: The size of the smallest layer
scale: how much larger each subsequent layer is
init_image: Pass in a PIL image if you don't want to start from noise
out_size: The output size. Works best if output size ~= base_size * (scale ** (n_layers-1))
decay: When initializing with noise, decay controls scaling of later layers (avoiding too miuch high-frequency noise)
"""
super().__init__()
self.n_layers = n_layers
self.base_size = base_size
self.sig = nn.Sigmoid()
self.layers = []
self.device=device
for i in range(n_layers):
side = base_size * (scale**i)
tim = torch.randn((3, side, side)).to(device)*(decay**i)
self.layers.append(tim)
self.scalers = [nn.Upsample(scale_factor=out_size/(l.shape[1]), mode='bilinear', align_corners=False) for l in self.layers]
self.preview_scalers = [nn.Upsample(scale_factor=224/(l.shape[1]), mode='bilinear', align_corners=False) for l in self.layers]
if init_image != None: # Given a PIL image, decompose it into a stack
if type(init_image) == str:
try:
init_image = Image.open(init_image)
except:
raise Exception(f"couldn't open {init_image}")
init_image = init_image.convert('RGB')
downscalers = [nn.Upsample(scale_factor=(l.shape[1]/out_size), mode='bilinear', align_corners=False) for l in self.layers]
final_side = base_size * (scale ** n_layers)
im = torch.tensor(np.array(init_image.resize((out_size, out_size)))/255).clip(1e-03, 1-1e-3) # Between 0 and 1 (non-inclusive)
im = im.permute(2, 0, 1).unsqueeze(0).to(device) # torch.log(im/(1-im))
for i in range(n_layers):self.layers[i] *= 0 # Sero out the layers
for i in range(n_layers):
side = base_size * (scale**i)
out = self.forward()
residual = (torch.logit(im) - torch.logit(out))
Image.fromarray((torch.logit(residual).detach().cpu().squeeze().permute([1, 2, 0]) * 255).numpy().astype(np.uint8)).save(f'residual{i}.png')
self.layers[i] = downscalers[i](residual).squeeze().float()
for l in self.layers: l.requires_grad = True
def forward(self):
"""Sums the stacked layers (upsampling them all to out_size) and then runs the result through a sigmoid funtion.
Resulting image is a tensor, with values between 0 and 1."""
im = self.scalers[0](self.layers[0].unsqueeze(0))
for i in range(1, self.n_layers):
im += self.scalers[i](self.layers[i].unsqueeze(0))
return self.sig(im)
def preview(self, n_preview=2):
"""Creates an image using only the first n_preview layers. Useful if you want to optimise the first few layers before starting to optimize the entire stack."""
im = self.preview_scalers[0](self.layers[0].unsqueeze(0))
for i in range(1, n_preview):
im += self.preview_scalers[i](self.layers[i].unsqueeze(0))
return self.sig(im)
def to_pil(self):
"""Return the result as a PIL Image (useful for saving, transforming, viewing etc)"""
return Image.fromarray((self.forward().detach().cpu().squeeze().permute([1, 2, 0]) * 255).numpy().astype(np.uint8))
def preview_pil(self):
return Image.fromarray((self.preview().detach().cpu().squeeze().permute([1, 2, 0]) * 255).numpy().astype(np.uint8))
def save(self, fn):
"""Save the image to a given filename (fn)"""
self.to_pil().save(fn)
def plot_layers(self):
"""View the layers in the stack - nice to build intuition about what's happening."""
fig, axs = plt.subplots(1, self.n_layers, figsize=(15, 5))
for i in range(self.n_layers):
im = (self.sig(self.layers[i].unsqueeze(0)).detach().cpu().squeeze().permute([1, 2, 0]) * 255).numpy().astype(np.uint8)
axs[i].imshow(im)
|
|
#!/usr/bin/env python3
from distutils.spawn import find_executable
import matplotlib.pyplot as plt
# import plotly.express as px
import seaborn as sns
import pandas as pd
import numpy as np
import subprocess
import statistics
import random
import math
import gzip
import uuid
import sys
import re
import os
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~~~~~~ OPEN FOR BUSINESS ~~~~~~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
AuthoR: uhlm [at] informatik.uni-freiburg.de
~~~~~~~~~~~~~
Run doctests
~~~~~~~~~~~~~
python3 -m doctest hoodlib.py
"""
################################################################################
def is_tool(name):
"""Check whether tool "name" is in PATH."""
return find_executable(name) is not None
################################################################################
def dir_get_files(file_dir,
file_ending=False,
check=True):
"""
Return list of files from given file_dir.
E.g. file_ending="bed" to filter for .bed files.
>>> test_dir = "test_data"
>>> dir_get_files(test_dir, file_ending="bam")
['empty.bam']
"""
from os import listdir
from os.path import isfile, join
dir_files = [f for f in listdir(file_dir) if isfile(join(file_dir, f))]
if check:
assert dir_files, "given directory \"%s\" contains no files" %(file_dir)
# If filter for file ending true.
if file_ending:
new_files = []
for df in dir_files:
if re.search(".+\.%s" %(file_ending), df):
new_files.append(df)
if check:
assert new_files, "no files left after filtering by file ending \"%s\"" %(file_ending)
return sorted(new_files)
else:
return sorted(dir_files)
################################################################################
def shutil_copy_file(from_file, to_file):
"""
Copy a file to another destination.
This will overwrite to_file (!).
"""
assert os.path.exists(from_file), "given file %s does not exist" %(from_file)
from shutil import copyfile
copyfile(from_file, to_file)
################################################################################
def get_filter_lists(list_f1_filter, list_f2_filter,
valid_filters_dic=False):
"""
Check and get peakhood extract filter lists.
"""
"""
Filter setting checks.
valid_filters_dic:
1 : transcript level filter
2 : exon level filter
"""
if not valid_filters_dic:
valid_filters_dic = {"TSC": 1,
"EIR": 2,
"ISRN": 2,
"ISR": 1,
"ISRFC" : 1,
"SEO" : 1,
"FUCO": 1,
"TCOV": 1,
"TSL": 1
}
f1_filters = []
f2_filters = []
if list_f1_filter:
assert not list_found_duplicates(list_f1_filter), "--f1-filter list contains duplicates. Please provide each filter ID only once"
for fid in list_f1_filter:
assert fid in valid_filters_dic, "invalid --f1-filter ID given (%s)" %(fid)
f1_filters.append(fid)
else:
f1_filters = ['TSC']
if list_f2_filter:
assert not list_found_duplicates(list_f2_filter), "--f2-filter list contains duplicates. Please provide each filter ID only once"
for fid in list_f2_filter:
assert fid in valid_filters_dic, "invalid --f2-filter ID given (%s)" %(fid)
f2_filters.append(fid)
else:
f2_filters = ['EIR', 'ISRN', 'ISR', 'ISRFC', 'SEO', 'FUCO', 'TCOV']
return f1_filters, f2_filters
################################################################################
def get_tsl_score(tsl, gc_basic, ccds):
"""
Get score from TSL flag.
Quality tags in (Ensembl) GTF:
CCDS:
Member of the consensus CDS gene set, confirming coding regions
between ENSEMBL, UCSC, NCBI and HAVANA.
basic:
Identifies a subset of representative transcripts for each gene;
prioritises full-length protein coding transcripts over partial
or non-protein coding transcripts within the same gene, and
intends to highlight those transcripts that will be useful
to the majority of users.
>>> tsl = "3 (assigned to previous version 5)"
>>> get_tsl_score(tsl, False, False)
14
>>> tsl = "1"
>>> get_tsl_score(tsl, True, True)
32
>>> tsl = "NA"
>>> get_tsl_score(tsl, False, False)
4
"""
assert tsl, "given tsl empty"
tag2sc_dic = {
"1" : 24,
"2" : 20,
"3" : 16,
"4" : 12,
"5" : 8,
"NA" : 4 }
tsl_sc = 0
if re.search('assigned', tsl):
tsl_sc -= 2
m = re.search('(.+) \(assigned', tsl)
tsl_sc += tag2sc_dic[m.group(1)]
else:
tsl_sc += tag2sc_dic[tsl]
if gc_basic:
tsl_sc += 3
if ccds:
tsl_sc += 5
return tsl_sc
################################################################################
def bed_extract_sequences_from_2bit(in_bed, out_fa, in_2bit,
lc_repeats=False,
convert_to_rna=False):
"""
Extract sequences from genome (provide genome .2bit file).
twoBitToFa executable needs to be in PATH. Store extracted
sequences in out_fa.
convert_to_rna:
If true, read in extracted sequences and convert to RNA.
lc_repeats:
If True, do not convert repeat regions to uppercase and output.
>>> in_bed = "test_data/test_seq_extr.sites.bed"
>>> tmp_2bit_fa = "test_data/test_seq_extr.sites.2bit.tmp.fa"
>>> tmp_seq_fa = "test_data/test_seq_extr.sites.seq.tmp.fa"
>>> exp_fa = "test_data/test_seq_extr.sites.exp.fa"
>>> in_fa = "test_data/test_seq_extr.sequences.fa"
>>> in_2bit = "test_data/test_seq_extr.sequences.2bit"
>>> id2row_dic = bed_read_rows_into_dic(in_bed)
>>> seqs_dic = read_fasta_into_dic(in_fa, dna=True)
>>> id2seq_dic = extract_transcript_sequences(id2row_dic, seqs_dic, revcom=True)
>>> fasta_output_dic(id2seq_dic, tmp_seq_fa)
>>> bed_extract_sequences_from_2bit(in_bed, tmp_2bit_fa, in_2bit)
>>> diff_two_files_identical(tmp_seq_fa, exp_fa)
True
>>> diff_two_files_identical(tmp_2bit_fa, exp_fa)
True
"""
# Check for twoBitToFa.
assert is_tool("twoBitToFa"), "twoBitToFa not in PATH"
# Run twoBitToFa and check.
check_cmd = "twoBitToFa"
if not lc_repeats:
check_cmd += " -noMask"
check_cmd += " -bed=" + in_bed + " " + in_2bit + " " + out_fa
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "twoBitToFa is complaining:\n%s\n%s" %(check_cmd, output)
if convert_to_rna:
# Read in tmp_fa into dictionary (this also converts sequences to RNA).
seqs_dic = read_fasta_into_dic(out_fa)
# Output RNA sequences.
fasta_output_dic(seqs_dic, out_fa,
split=True)
################################################################################
def get_chromosome_lengths_from_2bit(in_2bit, out_lengths,
std_chr_filter=False):
"""
Get chromosome lengths from in_2bit .2bit file. Write lengths
to out_lengths, with format:
chr1 248956422
chr10 133797422
chr11 135086622
...
Also return a dictionary with key=chr_id and value=chr_length.
std_chr_filter:
Filter / convert chromosome IDs with function check_convert_chr_id(),
removing non-standard chromosomes, and convert IDs like 1,2,X,MT ..
to chr1, chr2, chrX, chrM.
"""
# Check for twoBitInfo.
assert is_tool("twoBitInfo"), "twoBitInfo not in PATH"
# Run twoBitInfo and check.
check_cmd = "twoBitInfo " + in_2bit + " " + out_lengths
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "twoBitInfo is complaining:\n%s\n%s" %(check_cmd, output)
# Read in lengths into dictionary.
chr_len_dic = {}
with open(out_lengths) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
chr_id = cols[0]
chr_l = int(cols[1])
# Check ID.
if std_chr_filter:
new_chr_id = check_convert_chr_id(chr_id)
# If not standard chromosome ID or conversion failed, skip.
if not new_chr_id:
continue
else:
chr_id = new_chr_id
assert chr_id not in chr_len_dic, "non-unique chromosome ID \"%s\" encountered in \"%s\"" %(chr_id, out_lengths)
chr_len_dic[chr_id] = chr_l
f.closed
assert chr_len_dic, "chr_len_dic empty (\"%s\" empty? Chromosome IDs filter activated?)" %(out_lengths)
return chr_len_dic
################################################################################
def gtf_get_transcript_lengths(in_gtf,
tr2exc_dic=None):
"""
Get transcript lengths (= length of their exons, not unspliced length!)
from GTF file.
tr2exc_dic:
Optionally provide a transcript ID to exon count dictionary for counting
transcript exons.
>>> in_gtf = "test_data/map_test_in.gtf"
>>> gtf_get_transcript_lengths(in_gtf)
{'ENST001': 2000, 'ENST002': 2000}
"""
# Transcript ID to exonic length dictionary.
tr2len_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
infos = cols[8]
if not feature == "exon":
continue
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
tr_id = m.group(1)
# Sum up length.
ex_len = feat_e - feat_s + 1
if not tr_id in tr2len_dic:
tr2len_dic[tr_id] = ex_len
else:
tr2len_dic[tr_id] += ex_len
if tr2exc_dic is not None:
if not tr_id in tr2exc_dic:
tr2exc_dic[tr_id] = 1
else:
tr2exc_dic[tr_id] += 1
f.close()
assert tr2len_dic, "No IDs read into dictionary (input file \"%s\" empty or malformatted?)" % (in_gtf)
return tr2len_dic
################################################################################
def rem_exb_pairs_exb_dist(id2ids_dic, id2exids_dic,
id2gen_se_dic, id2gen_cp_dic, exid2gen_se_dic,
max_exb_dist=10,
exid2exnr_dic=False,
exid2trid_dic=False):
"""
Remove exon border pairs from id2ids_dic if the sites are too far
away from the matching exon borders (supporting the pair).
>>> id2ids_dic = {'id1': ['id2'], 'id2': ['id1']}
>>> id2exids_dic = {'id1': ['t1_e1'], 'id2': ['t1_e2']}
>>> id2gen_se_dic = {'id1': [1980, 1990], 'id2': [3005, 3025]}
>>> id2gen_cp_dic = {'id1': 1985, 'id2': 3015}
>>> exid2gen_se_dic = {'t1_e1': [1000, 2000], 't1_e2': [3000, 4000], 't1_e3': [5000, 6000]}
>>> exid2exnr_dic = {'t1_e1': 1, 't1_e2': 2, 't1_e3' : 3}
>>> exid2trid_dic = {'t1_e1': 't1', 't1_e2': 't1', 't1_e3': 't1'}
>>> rem_exb_pairs_exb_dist(id2ids_dic, id2exids_dic, id2gen_se_dic, id2gen_cp_dic, exid2gen_se_dic, max_exb_dist=10, exid2exnr_dic=exid2exnr_dic, exid2trid_dic=exid2trid_dic)
{'id1': ['id2'], 'id2': ['id1']}
>>> rem_exb_pairs_exb_dist(id2ids_dic, id2exids_dic, id2gen_se_dic, id2gen_cp_dic, exid2gen_se_dic, max_exb_dist=9, exid2exnr_dic=exid2exnr_dic, exid2trid_dic=exid2trid_dic)
{}
>>> id2exids_dic = {'id1': ['t1_e1'], 'id2': ['t1_e3']}
>>> rem_exb_pairs_exb_dist(id2ids_dic, id2exids_dic, id2gen_se_dic, id2gen_cp_dic, exid2gen_se_dic, max_exb_dist=10, exid2exnr_dic=exid2exnr_dic, exid2trid_dic=exid2trid_dic)
{}
>>> id2ids_dic = {'id1': ['id2', 'id3'], 'id2': ['id1'], 'id3': ['id1']}
>>> id2exids_dic = {'id1': ['t1_e1'], 'id2': ['t1_e2'], 'id3': ['t1_e1']}
>>> id2gen_se_dic = {'id1': [1980, 1990], 'id2': [3005, 3025], 'id2': [1970, 1980]}
>>> id2gen_cp_dic = {'id1': 1985, 'id2': 3015, 'id1': 1975}
>>> rem_exb_pairs_exb_dist(id2ids_dic, id2exids_dic, id2gen_se_dic, id2gen_cp_dic, exid2gen_se_dic, max_exb_dist=10, exid2exnr_dic=exid2exnr_dic, exid2trid_dic=exid2trid_dic)
{'id1': ['id2'], 'id2': ['id1']}
"""
assert exid2exnr_dic, "exid2exnr_dic empty"
assert exid2trid_dic, "exid2trid_dic empty"
rem_sids_list = []
for sid1 in id2ids_dic:
new_con_list = []
exl1 = id2exids_dic[sid1]
for sid2 in id2ids_dic[sid1]:
exl2 = id2exids_dic[sid2]
# Compare the two exon ID lists.
for exid1 in exl1:
exnr1 = exid2exnr_dic[exid1]
trid1 = exid2trid_dic[exid1]
for exid2 in exl2:
exnr2 = exid2exnr_dic[exid2]
trid2 = exid2trid_dic[exid2]
# Check distance of site to exon borders.
if trid1 == trid2 and abs(exnr1-exnr2) == 1:
# Orientation of the two sites.
cp_diff = id2gen_cp_dic[sid1] - id2gen_cp_dic[sid2]
sid1_us = False # sid1 upstream of sid2 ?
if cp_diff == 0:
assert False, "two exon border pair site IDs %s,%s with same genomic center positions encountered (%i)" %(sid1, sid2, id2gen_cp_dic[sid1])
elif cp_diff < 1:
sid1_us = True
# SID1 distance to exon okay?
sid1_check = False
if sid1_us:
# Distance of site and exon end.
end_dist = exid2gen_se_dic[exid1][1] - id2gen_se_dic[sid1][1]
if end_dist <= max_exb_dist:
sid1_check = True
else:
# Distance of site and exon start.
end_dist = id2gen_se_dic[sid1][0] - exid2gen_se_dic[exid1][0]
if end_dist <= max_exb_dist:
sid1_check = True
if not sid1_check:
continue
# SID2 distance to exon okay?
sid2_check = False
if sid1_us:
# Distance of site and exon start.
end_dist = id2gen_se_dic[sid2][0] - exid2gen_se_dic[exid2][0]
if end_dist <= max_exb_dist:
sid2_check = True
else:
# Distance of site and exon end.
end_dist = exid2gen_se_dic[exid2][1] - id2gen_se_dic[sid2][1]
if end_dist <= max_exb_dist:
sid2_check = True
if sid1_check and sid2_check:
new_con_list.append(sid2)
if new_con_list:
id2ids_dic[sid1] = new_con_list
else:
rem_sids_list.append(sid1)
for rem_sid in rem_sids_list:
del id2ids_dic[rem_sid]
return id2ids_dic
################################################################################
def check_neighbor_ex_lists(exl1, exl2,
exid2exnr_dic=False,
exid2trid_dic=False):
"""
Check if two exon ID lists contain neighboring exons.
>>> exl1 = ["t1_e5", "t2_e4", "t3_e5"]
>>> exl2 = ["t1_e5", "t2_e5"]
>>> check_neighbor_ex_lists(exl1, exl2)
True
>>> exl1 = ["t1_e5", "t2_e4", "t4_e5"]
>>> exl2 = ["t1_e5", "t2_e2"]
>>> check_neighbor_ex_lists(exl1, exl2)
False
"""
if exid2exnr_dic and exid2trid_dic:
for exid1 in exl1:
exnr1 = exid2exnr_dic[exid1]
trid1 = exid2trid_dic[exid1]
for exid2 in exl2:
exnr2 = exid2exnr_dic[exid2]
trid2 = exid2trid_dic[exid2]
if trid1 == trid2:
if abs(exnr1-exnr2) == 1:
return True
else:
for exid1 in exl1:
m = re.search("(.+)_e(\d+)$", exid1)
trid1 = m.group(1)
exnr1 = int(m.group(2))
for exid2 in exl2:
m = re.search("(.+)_e(\d+)$", exid2)
trid2 = m.group(1)
exnr2 = int(m.group(2))
if trid1 == trid2:
if abs(exnr1-exnr2) == 1:
return True
return False
################################################################################
def rem_exb_pairs_no_neighbor_ex(id2ids_dic, id2exids_dic,
exid2exnr_dic=False,
exid2trid_dic=False):
"""
Remove exon border pairs from id2ids_dic which are not
on neighboring exons.
>>> id2ids_dic = {'id1': ['id2'], 'id2': ['id1', 'id3'], 'id3': ['id2']}
>>> id2exids_dic = {'id1' : ['t1_e5', 't2_e4'], 'id2': ['t1_e5'], 'id3': ['t1_e6']}
>>> rem_exb_pairs_no_neighbor_ex(id2ids_dic, id2exids_dic)
{'id2': ['id3'], 'id3': ['id2']}
"""
rem_sids_list = []
for sid1 in id2ids_dic:
new_con_list = []
exl1 = id2exids_dic[sid1]
for sid2 in id2ids_dic[sid1]:
exl2 = id2exids_dic[sid2]
check = check_neighbor_ex_lists(exl1, exl2,
exid2exnr_dic=exid2exnr_dic,
exid2trid_dic=exid2trid_dic)
if check:
new_con_list.append(sid2)
if new_con_list:
id2ids_dic[sid1] = new_con_list
else:
rem_sids_list.append(sid1)
for rem_sid in rem_sids_list:
del id2ids_dic[rem_sid]
return id2ids_dic
################################################################################
def extract_transcript_sequences(bed_dic, seq_dic,
ext_mode=1,
ext_lr=False,
revcom=False,
ids_dic=False,
out_bed=False,
out_bed_add_sc_dic=False,
full_hits_only=False):
"""
Given a dictionary with bed regions (region ID -> BED row) and a
sequence dictionary (Sequence ID -> sequence), extract the BED region
sequences and return in new dictionary (region ID -> region sequence).
ext_mode:
1: whole site
2: center position site
3: upstream end position site
ext_lr:
Optionally, extend regions by ext_lr nt (up- and downstream).
In case full extension is not possible, use maximum extension possible.
revcom:
if revcom=True and strand of bed_dic region is "-", return the reverse
complement of the region sequence.
ids_dic:
IDs to extract sequences for from bed_dic.
full_hits_only:
Set full_hits_only=True to only recover full hits.
out_bed:
Output BED file path to store regions for which sequences were
extracted in.
out_bed_add_sc_dic:
Region ID to score mapping, with score to be added to out_bed ID column
4, so column 4 ID changes from "id1" to "id1,5"
>>> seq_dic = {"T1" : "AAAACCCCGGGGTTTT", "T2" : "ATATACACAGAGCGCGCTCTGTGT"}
>>> bed_dic = {"S1" : "T1\\t4\\t8\\tS1\\t0\\t+", "S2" : "T2\\t6\\t8\\tS2\\t0\\t+"}
>>> extract_transcript_sequences(bed_dic, seq_dic, ext_lr=2)
{'S1': 'AACCCCGG', 'S2': 'ACACAG'}
>>> extract_transcript_sequences(bed_dic, seq_dic, ext_lr=5, full_hits_only=True)
{'S2': 'TATACACAGAGC'}
>>> bed_dic = {"S1" : "T1\\t4\\t8\\tS1\\t0\\t+", "S2" : "T2\\t6\\t8\\tS2\\t0\\t+"}
>>> extract_transcript_sequences(bed_dic, seq_dic, ext_lr=2, ext_mode=2)
{'S1': 'CCCCG', 'S2': 'CACAG'}
"""
id2seq_dic = {}
if out_bed:
OUT2BED = open(out_bed,"w")
# Process .bed regions.
for reg_id in bed_dic:
cols = bed_dic[reg_id].split("\t")
seq_id = cols[0]
reg_s = int(cols[1])
reg_e = int(cols[2])
reg_sc = cols[4]
reg_pol = cols[5]
if ids_dic:
if reg_id not in ids_dic:
continue
assert seq_id in seq_dic, "sequence ID \"%s\" not found in given sequence dictionary" %(seq_id)
seq = seq_dic[seq_id]
# Update region lengths.
if ext_mode == 1:
new_s = reg_s
new_e = reg_e
elif ext_mode == 2:
new_e = get_center_position(reg_s, reg_e)
new_s = new_e - 1
elif ext_mode == 3:
new_s = reg_s
new_e = reg_s + 1
if reg_pol == "-":
new_s = reg_e - 1
new_e = reg_e
else:
assert False, "invalid ext_mode set"
# Expected length.
exp_l = new_e - new_s
# Adjust if given start or end is out of bounds.
if new_s < 0:
new_s = 0
if new_e > len(seq):
new_e = len(seq)
# If region should be extended up- and downstream by ext_lr.
if ext_lr:
new_s = new_s - ext_lr
new_e = new_e + ext_lr
exp_l = new_e - new_s
# If start or end is out of bounds after extension.
if new_s < 0:
new_s = 0
if new_e > len(seq):
new_e = len(seq)
reg_seq = seq[new_s:new_e]
reg_l = len(reg_seq)
if full_hits_only:
if not reg_l == exp_l:
continue
if revcom:
if reg_pol == "-":
id2seq_dic[reg_id] = revcom_seq(reg_seq)
else:
id2seq_dic[reg_id] = reg_seq
else:
id2seq_dic[reg_id] = reg_seq
if out_bed:
if out_bed_add_sc_dic:
reg_id = reg_id + "," + str(out_bed_add_sc_dic[reg_id])
OUT2BED.write("%s\t%i\t%i\t%s\t%s\t%s\n" %(seq_id, new_s, new_e, reg_id, reg_sc, reg_pol))
if out_bed:
OUT2BED.close()
assert id2seq_dic, "no sequences extracted"
return id2seq_dic
################################################################################
def check_string_in_file(in_file, string):
"""
Check whether a string is found inside a text file.
Return True if found, else False.
>>> in_file = "test_data/test_run.log"
>>> check_string_in_file(in_file, "AssertionError")
True
>>> in_file = "test_data/empty_file"
>>> check_string_in_file(in_file, "AssertionError")
False
"""
found = False
f = open(in_file, "r")
for line in f:
if string in line:
found = True
break
f.close()
return found
################################################################################
def revcom_seq(seq,
upper=False,
convert_to_rna=False):
"""
Return reverse complement to seq. By default, convert seq to uppercase
and translate to DNA.
# Convert to RNA.
if convert_to_rna:
new_seq_rna = new_seq.replace("T","U").replace("t","u")
new_seq = new_seq_rna
>>> seq = "AAACAGatt"
>>> revcom_seq(seq)
'aatCTGTTT'
>>> revcom_seq(seq, upper=True)
'AATCTGTTT'
>>> revcom_seq(seq, convert_to_rna=True)
'aauCUGUUU'
"""
assert seq, "given sequence empty"
# Make uppercase and convert to DNA.
if upper:
seq = seq[::-1].upper().replace("U","T")
else:
seq = seq[::-1].replace("U","T").replace("u","t")
intab = "ACGTacgt"
outtab = "TGCAtgca"
# If RNA revcom should be output.
if convert_to_rna:
seq = seq.replace("T","U").replace("t","u")
intab = "ACGUacgu"
outtab = "UGCAugca"
# Make revcom.
transtab = str.maketrans(intab, outtab)
rc_seq = seq.translate(transtab)
return rc_seq
################################################################################
def fasta_output_dic(fasta_dic, fasta_out,
split=False,
out_ids_dic=False,
header_add_sc_dic=False,
to_upper=False,
split_size=60):
"""
Output FASTA sequences dictionary (sequence_id -> sequence) to fasta_out.
split:
Split FASTA sequence for output to file
split_size:
Split size (row width)
to_upper:
Convert sequences to uppercase.
out_ids_dic:
IDs to output dictionary.
header_add_sc_dic:
ID to scoring mapping.
Add a score to the header, so header format changes from "id1"
to "id1,10"
>>> fasta_dic = {'seq1': 'ACGTACGTACGTAC', 'seq2': 'ACGT'}
>>> split_size = 4
>>> fasta_exp = "test_data/test5.exp.fa"
>>> fasta_out = "test_data/test5.tmp.fa"
>>> fasta_output_dic(fasta_dic, fasta_out, split=True, split_size=split_size)
>>> diff_two_files_identical(fasta_exp, fasta_out)
True
"""
# Check.
assert fasta_dic, "given dictionary fasta_dic empty"
# Write sequences to FASTA file.
OUTFA = open(fasta_out,"w")
for seq_id in fasta_dic:
seq = fasta_dic[seq_id]
if out_ids_dic:
if seq_id not in out_ids_dic:
continue
if to_upper:
seq = seq.upper()
out_id = seq_id
if header_add_sc_dic:
out_id = out_id + "," + str(header_add_sc_dic[seq_id])
if split:
OUTFA.write(">%s\n" %(out_id))
for i in range(0, len(seq), split_size):
OUTFA.write("%s\n" %((seq[i:i+split_size])))
else:
OUTFA.write(">%s\n%s\n" %(out_id, seq))
OUTFA.close()
################################################################################
def read_fasta_into_dic(fasta_file,
seqs_dic=False,
ids_dic=False,
dna=False,
report=1,
all_uc=False,
empty_check=True,
id_check=True,
skip_data_id="set",
skip_n_seqs=True):
"""
Read in FASTA sequences, store in dictionary and return dictionary.
FASTA file can be plain text or gzipped (watch out for .gz ending).
>>> test_fasta = "test_data/test.fa"
>>> read_fasta_into_dic(test_fasta)
{'seq1': 'acguACGUacgu', 'seq2': 'ugcaUGCAugcaACGUacgu'}
"""
if not seqs_dic:
seqs_dic = {}
seq_id = ""
# Open FASTA either as .gz or as text file.
if re.search(".+\.gz$", fasta_file):
f = gzip.open(fasta_file, 'rt')
else:
f = open(fasta_file, "r")
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
seq_id = m.group(1)
if id_check:
assert seq_id not in seqs_dic, "non-unique FASTA header \"%s\" in \"%s\"" % (seq_id, fasta_file)
if ids_dic:
if seq_id in ids_dic:
seqs_dic[seq_id] = ""
else:
seqs_dic[seq_id] = ""
elif re.search("[ACGTUN]+", line, re.I):
m = re.search("([ACGTUN]+)", line, re.I)
seq = m.group(1)
if seq_id in seqs_dic:
if dna:
# Convert to DNA, concatenate sequence.
seq = seq.replace("U","T").replace("u","t")
else:
# Convert to RNA, concatenate sequence.
seq = seq.replace("T","U").replace("t","u")
if all_uc:
seq = seq.upper()
seqs_dic[seq_id] += seq
f.close()
# Check if sequences read in.
if empty_check:
assert seqs_dic, "no sequences read in (input FASTA file \"%s\" empty or mal-formatted?)" %(fasta_file)
# If sequences with N nucleotides should be skipped.
c_skipped_n_ids = 0
if skip_n_seqs:
del_ids = []
for seq_id in seqs_dic:
seq = seqs_dic[seq_id]
if re.search("N", seq, re.I):
if report == 1:
print ("WARNING: sequence with seq_id \"%s\" in file \"%s\" contains N nucleotides. Discarding sequence ... " % (seq_id, fasta_file))
c_skipped_n_ids += 1
del_ids.append(seq_id)
for seq_id in del_ids:
del seqs_dic[seq_id]
assert seqs_dic, "no sequences remaining after deleting N containing sequences (input FASTA file \"%s\")" %(fasta_file)
if c_skipped_n_ids:
if report == 2:
print("# of N-containing %s regions discarded: %i" %(skip_data_id, c_skipped_n_ids))
return seqs_dic
################################################################################
def get_seqs_dic_repeat_region_ratios(seqs_dic):
"""
Given a dictionary of sequences, calculate the repeat region content /
ratio for each site. Return dictionary of repeat region ratios.
>>> seqs_dic = {'s1' : "ACGUacgu", 's2' : "acnacnACNacn", 's3' : "A", 's4' : "u"}
>>> get_seqs_dic_repeat_region_ratios(seqs_dic)
{'s1': 0.5, 's2': 0.75, 's3': 0.0, 's4': 1.0}
"""
assert seqs_dic, "seqs_dic empty"
ratios_dic = {}
for seq_id in seqs_dic:
seq = seqs_dic[seq_id]
seq_l = len(seq)
c_r = 0
for nt in seq:
if nt.islower():
c_r += 1
r_ratio = c_r / seq_l
ratios_dic[seq_id] = r_ratio
assert ratios_dic, "ratios_dic empty"
return ratios_dic
################################################################################
def bed_get_chromosome_ids(bed_file):
"""
Read in .bed file, return chromosome IDs (column 1 IDs).
Return dic with chromosome ID -> count mapping.
>>> test_file = "test_data/test6.bed"
>>> bed_get_chromosome_ids(test_file)
{'chr1': 2, 'chr2': 2, 'chr3': 1}
"""
ids_dic = {}
with open(bed_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
chr_id = cols[0]
if chr_id in ids_dic:
ids_dic[chr_id] += 1
else:
ids_dic[chr_id] = 1
f.closed
assert ids_dic, "No chromosome IDs read into dictionary (input file \"%s\" empty or malformatted?)" % (bed_file)
return ids_dic
################################################################################
def bed_get_region_lengths(bed_file):
"""
Read in .bed file, store and return region lengths in dictionary.
key : region ID (.bed col4)
value : region length (.bed col3-col2)
>>> test_file = "test_data/test4.bed"
>>> bed_get_region_lengths(test_file)
{'tr1_e1': 14, 'tr2_e1': 30}
"""
id2len_dic = {}
with open(bed_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
site_l = site_e - site_s
assert site_id not in id2len_dic, "column 4 IDs not unique in given .bed file \"%s\"" %(bed_file)
id2len_dic[site_id] = site_l
f.closed
assert id2len_dic, "No IDs read into dictionary (input file \"%s\" empty or malformatted?)" % (in_bed)
return id2len_dic
################################################################################
def bed_convert_transcript_to_genomic_sites(in_bed, in_gtf, out_bed,
site2hitc_dic=None,
out_folder=False):
"""
Dependencies:
bedtools (tested with 2.29.0)
gzip
Convert in_bed .bed file with transcript sites into genomic coordinates
sites file. in_bed column 1 transcript IDs have to be present in
in_gtf GTF file, from which genomic exon coordinates of the transcript
will get extracted.
site2hitc_dic:
A site2hitc_dic can be given, where site ID to hit count will be stored
for usage outside the function.
Output:
By default output to out_bed file, using id_p1, id_p2 IDs.
If out_folder=True, use out_bed name as folder name.
In this case, output these files to folder:
exon_regions_genome.bed
exon_regions_transcript.bed
complete_hits.bed
split_hits.bed
all_hits.bed
>>> test_gtf = "test_data/test_tr2gen.gtf"
>>> test_in_bed = "test_data/test_tr2gen.bed"
>>> test_out_exp_bed = "test_data/test_tr2gen.exp.bed"
>>> test_out_tmp_bed = "test_data/test_tr2gen.tmp.bed"
>>> bed_convert_transcript_to_genomic_sites(test_in_bed, test_gtf, test_out_tmp_bed)
>>> diff_two_files_identical(test_out_exp_bed, test_out_tmp_bed)
True
>>> test_out = "test_data/tr2gen_tmp_out"
>>> test_out_tmp_bed = "test_data/tr2gen_tmp_out/all_hits.bed"
>>> bed_convert_transcript_to_genomic_sites(test_in_bed, test_gtf, test_out, out_folder=True)
>>> diff_two_files_identical(test_out_exp_bed, test_out_tmp_bed)
True
"""
# Generate .tmp files.
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".tmp.bed"
random_id = uuid.uuid1()
tmp_out = str(random_id) + ".tmp.out"
# Output files if output_folder=True.
if out_folder:
if not os.path.exists(out_bed):
os.makedirs(out_bed)
out_exon_regions_genome_bed = out_bed + "/" + "exon_regions_genome.bed"
out_exon_regions_transcript_bed = out_bed + "/" + "exon_regions_transcript.bed"
out_unique_hits_bed = out_bed + "/" + "unique_hits.bed"
out_split_hits_bed = out_bed + "/" + "split_hits.bed"
out_all_hits_bed = out_bed + "/" + "all_hits.bed"
# Transcript IDs dic.
tr_ids_dic = bed_get_chromosome_ids(in_bed)
# Extract transcript exon regions from GTF and store as BED.
gtf_extract_exon_bed(in_gtf, tmp_bed, tr_ids_dic=tr_ids_dic)
if out_folder:
make_file_copy(tmp_bed, out_exon_regions_transcript_bed)
# Get exon region lengths.
exid2len_dic = bed_get_region_lengths(tmp_bed)
# Get exon numbers for each transcript.
tr_exc_dic = bed_get_transcript_exon_numbers(tmp_bed)
# Read in exon region stats.
id2chr_dic = {}
id2s_dic = {}
id2e_dic = {}
id2pol_dic = {}
exid2trid_dic = {}
with open(tmp_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
chr_id = cols[0]
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
site_pol = cols[5]
id2chr_dic[site_id] = chr_id
id2s_dic[site_id] = site_s
id2e_dic[site_id] = site_e
id2pol_dic[site_id] = site_pol
if re.search(".+_e\d", site_id):
m = re.search("(.+)_e\d", site_id)
tr_id = m.group(1)
exid2trid_dic[site_id] = tr_id
else:
assert False, "site ID \"%s\" missing added _e exon number" %(site_id)
f.close()
# Output exon regions with transcript coordinates.
OUTBED = open(tmp_bed, "w")
for tr_id in tr_exc_dic:
ex_c = tr_exc_dic[tr_id]
new_s = 0
for i in range(ex_c):
i += 1
ex_id = tr_id + "_e" + str(i)
gen_s = id2s_dic[ex_id]
gen_e = id2e_dic[ex_id]
ex_len = gen_e - gen_s
tr_s = new_s
tr_e = new_s + ex_len
OUTBED.write("%s\t%i\t%i\t%s\t0\t+\n" % (tr_id,tr_s,tr_e,ex_id))
new_s = tr_e
OUTBED.close()
if out_folder:
make_file_copy(tmp_bed, out_exon_regions_genome_bed)
# Overlap in_bed with tmp_bed.
params = "-wb"
intersect_bed_files(in_bed, tmp_bed, params, tmp_out,
sorted_out=True)
# Read in transcript site overlaps with transcript exon regions.
site2c_dic = {}
# Dictionaries for later outputting unique + split hits separately.
siteid2pol_dic = {}
siteid2sc_dic = {}
partid2chrse_dic = {}
with open(tmp_out) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
tr_id = cols[0]
part_s = int(cols[1])
part_e = int(cols[2])
site_id = cols[3]
site_sc = cols[4]
ex_s = int(cols[7])
ex_e = int(cols[8])
ex_id = cols[9]
ex_pol = id2pol_dic[ex_id]
siteid2pol_dic[site_id] = ex_pol
siteid2sc_dic[site_id] = site_sc
if site_id in site2c_dic:
site2c_dic[site_id] += 1
else:
site2c_dic[site_id] = 1
# Hit part number.
hit_c = site2c_dic[site_id]
# Calculate genomic hit coordinates.
# Plus strand case.
gen_s = id2s_dic[ex_id] + part_s - ex_s
gen_e = id2s_dic[ex_id] + part_e - ex_s
# Minus strand case.
if ex_pol == "-":
gen_s = id2e_dic[ex_id] - part_e + ex_s
gen_e = id2e_dic[ex_id] - part_s + ex_s
# part ID.
part_id = site_id + "_p" + str(hit_c)
# Store chrse for each part ID.
chrse = "%s\t%i\t%i" %(id2chr_dic[ex_id],gen_s,gen_e)
partid2chrse_dic[part_id] = "%s\t%i\t%i" %(id2chr_dic[ex_id],gen_s,gen_e)
# Produce seperate output files for unique + split hits.
all_hits_bed = out_bed
if out_folder:
all_hits_bed = out_all_hits_bed
ALLBED = open(all_hits_bed, "w")
if out_folder:
UNIBED = open(out_unique_hits_bed, "w")
SPLBED = open(out_split_hits_bed, "w")
for site_id in site2c_dic:
hit_c = site2c_dic[site_id]
if site2hitc_dic is not None:
site2hitc_dic[site_id] = hit_c
site_pol = siteid2pol_dic[site_id]
site_sc = siteid2sc_dic[site_id]
# For unique hit use site ID, for split hits use part IDs.
if hit_c == 1:
# Unique hits.
part_id = site_id + "_p1"
UNIBED.write("%s\t%s\t%s\t%s\n" %(partid2chrse_dic[part_id],site_id,site_sc,site_pol))
else:
# Split hits.
for i in range(hit_c):
i += 1
part_id = site_id + "_p" + str(i)
SPLBED.write("%s\t%s\t%s\t%s\n" %(partid2chrse_dic[part_id],part_id,site_sc,site_pol))
# Output all hits.
for site_id in site2c_dic:
hit_c = site2c_dic[site_id]
if site2hitc_dic is not None:
site2hitc_dic[site_id] = hit_c
site_pol = siteid2pol_dic[site_id]
site_sc = siteid2sc_dic[site_id]
# For unique hit use site ID, for split hits use part IDs.
if hit_c == 1:
# Unique hits.
part_id = site_id + "_p1"
ALLBED.write("%s\t%s\t%s\t%s\n" %(partid2chrse_dic[part_id],site_id,site_sc,site_pol))
else:
# Split hits.
for i in range(hit_c):
i += 1
part_id = site_id + "_p" + str(i)
ALLBED.write("%s\t%s\t%s\t%s\n" %(partid2chrse_dic[part_id],part_id,site_sc,site_pol))
# Delete tmp files.
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
if os.path.exists(tmp_out):
os.remove(tmp_out)
################################################################################
def move_rename_file(in_file, out_file):
"""
Move / rename in_file to out_file.
"""
check_cmd = "mv " + in_file + " " + out_file
assert in_file != out_file, "mv does not like to mv file into same file (%s)" %(check_cmd)
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "mv did not like your input (in_file: %s, out_file: %s):\n%s" %(in_file, out_file, output)
################################################################################
def touch_file(in_file):
"""
Create an empty file.
"""
assert not os.path.exists(in_file), "file %s to create already exists" %(in_file)
check_cmd = "touch " + file
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "touch did not like your input (in_file: %s):\n%s" %(in_file, output)
################################################################################
def create_empty_file(in_file, check=True):
"""
Create an empty file.
check:
If False do not complain if file exists, but overwrite the file.
"""
if check:
assert not os.path.exists(in_file), "file %s to create already exists" %(in_file)
EMPTYOUT = open(in_file, "w")
EMPTYOUT.close()
if check:
assert os.path.exists(in_file), "created file %s not found" %(in_file)
################################################################################
def read_settings_into_dic(settings_file,
check=True,
val_col2=False):
"""
Read settings file content into dictionary.
Each row expected to have following format:
setting_id<tab>setting_value
Skip rows with > 2 entries.
Dictionary format: str(col1) -> str(col2)
>>> test_in = "test_data/test_settings.out"
>>> read_settings_into_dic(test_in)
{'peyote': '20.5', 'china_white': '43.1', 'bolivian_marching_powder': '1000.0'}
"""
assert os.path.isfile(settings_file), "file %s does not exist" %(settings_file)
set_dic = {}
with open(settings_file) as f:
for line in f:
cols = line.strip().split("\t")
settings_id = cols[0]
if val_col2:
settings_val = cols[2]
else:
settings_val = cols[1]
if settings_id not in set_dic:
set_dic[settings_id] = settings_val
else:
assert False, "settings ID %s appears > 1 in given settings file" %(settings_id)
f.closed
if check:
assert set_dic, "set_dic empty (nothing read in?)"
return set_dic
################################################################################
def get_transcript_sequences_from_gtf(in_gtf, in_2bit,
lc_repeats=False,
correct_min_ex_order=False,
tr2exc_dic=False,
tr_ids_dic=False,
tmp_out_folder=False):
"""
Get spliced transcript sequences based on in_gtf annotations. For
transcripts with > 1 exon, concatenate the exon sequences to build
the transcript sequence. If one exon is missing / not extracted or
if extracted lengths don't fit, the transcript sequence will be
skipped / not output.
Return dictionary with transcript_id -> sequence mapping.
correct_min_ex_order:
Set True if minus strand exon order should be corrected.
tr2exc_dic:
Transcript ID to exon count mapping.
tr_ids_dic:
Defines transcript IDs for which sequence should be extracted.
"""
# Generate .tmp files.
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".tmp.bed"
random_id = uuid.uuid1()
tmp_fa = str(random_id) + ".tmp.fa"
if tmp_out_folder:
tmp_bed = tmp_out_folder + "/" + tmp_bed
tmp_fa = tmp_out_folder + "/" + tmp_fa
# Transcript sequences dic.
tr_seqs_dic = {}
# Extract transcript exon regions from GTF and store as BED.
gtf_extract_exon_bed(in_gtf, tmp_bed,
correct_min_ex_order=correct_min_ex_order,
tr2exc_dic=tr2exc_dic,
tr_ids_dic=tr_ids_dic)
# Extract exon region sequences from .2bit.
bed_extract_sequences_from_2bit(tmp_bed, tmp_fa, in_2bit,
lc_repeats=lc_repeats)
# Get transcript lengths from tmp_bed for comparison.
tr_len_dic = bed_get_transcript_lengths_from_exon_regions(tmp_bed)
# Get exon numbers for each transcript.
tr_exc_dic = bed_get_transcript_exon_numbers(tmp_bed)
# Read in sequences.
exon_seqs_dic = read_fasta_into_dic(tmp_fa,
skip_n_seqs=False)
# Concatenate exon region sequences.
for tr_id in tr_exc_dic:
ex_c = tr_exc_dic[tr_id]
for i in range(ex_c):
i += 1
ex_id = tr_id + "_e" + str(i)
if ex_id in exon_seqs_dic:
ex_seq = exon_seqs_dic[ex_id]
if tr_id not in tr_seqs_dic:
tr_seqs_dic[tr_id] = ex_seq
else:
tr_seqs_dic[tr_id] += ex_seq
else:
print("WARNING: no sequence extracted for exon ID \"%s\". Skipping \"%s\" .. " %(ex_id, tr_id))
if tr_id in tr_seqs_dic:
del tr_seqs_dic[tr_id]
break
# Checks.
assert tr_seqs_dic, "tr_seqs_dic empty (no FASTA sequences extracted?)"
for tr_id in tr_seqs_dic:
tr_len = len(tr_seqs_dic[tr_id])
exp_len = tr_len_dic[tr_id]
assert tr_len == exp_len, "BED transcript length != FASTA transcript length for \"%s\"" %(tr_id)
# Delete tmp files.
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
if os.path.exists(tmp_fa):
os.remove(tmp_fa)
# Return transcript sequences dic constructed from exon sequences.
return tr_seqs_dic
################################################################################
def concatenate_files(file1, file2):
"""
Add file 1 content to file 2 (using cat).
"""
assert os.path.isfile(file1), "file %s does not exist" %(file1)
assert os.path.isfile(file2), "file %s does not exist" %(file2)
check_cmd = "cat " + file1 + " >> " + file2
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "cat did not like your input (file 1 %s cat to file 2 %s):\n%s" %(file1, file2, output)
################################################################################
def koma_sepp(n):
"""
Take input integer n and return comma-separated string, separating
1000s.
>>> koma_sepp(131032047)
'131,032,047'
>>> koma_sepp(18781)
'18,781'
>>> koma_sepp(666)
'666'
"""
return '{:,}'.format(n)
################################################################################
def bed_get_transcript_exon_numbers(in_bed):
"""
Get number of exons for each transcript from in_bed BED file with
transcript exon regions, with ID format:
transcriptid_e1 (exon 1), transcriptid_e1 (exon 2)
This is the output format from gtf_extract_exon_bed(), so both can
be used in combination.
>>> in_bed = "test_data/test6.bed"
>>> bed_get_transcript_exon_numbers(in_bed)
{'ENST1': 2, 'ENST2': 2, 'ENST3': 1}
"""
tr_exc_dic = {}
# Open input .bed file.
with open(in_bed) as f:
for line in f:
cols = line.strip().split("\t")
site_id = cols[3]
if re.search(".+_e\d", site_id):
m = re.search("(.+)_e\d", site_id)
tr_id = m.group(1)
if tr_id not in tr_exc_dic:
tr_exc_dic[tr_id] = 1
else:
tr_exc_dic[tr_id] += 1
else:
assert False, "site ID \"%s\" missing added _e exon number" %(site_id)
f.close()
assert tr_exc_dic, "nothing was read in (\"%s\" empty or malformatted?)" %(in_bed)
return tr_exc_dic
################################################################################
def bed_get_transcript_lengths_from_exon_regions(in_bed):
"""
Get spliced transcript lengths from in_bed BED file with transcript
exon regions, with ID format:
transcriptid_e1 (exon 1), transcriptid_e1 (exon 2)
This is the output format from gtf_extract_exon_bed(), so both can
be used in combination.
>>> in_bed = "test_data/test6.bed"
>>> bed_get_transcript_lengths_from_exon_regions(in_bed)
{'ENST1': 4000, 'ENST2': 1500, 'ENST3': 2500}
"""
tr_len_dic = {}
# Open input .bed file.
with open(in_bed) as f:
for line in f:
cols = line.strip().split("\t")
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
site_len = site_e - site_s
if re.search(".+_e\d", site_id):
m = re.search("(.+)_e\d", site_id)
tr_id = m.group(1)
if tr_id not in tr_len_dic:
tr_len_dic[tr_id] = site_len
else:
tr_len_dic[tr_id] += site_len
else:
assert False, "site ID \"%s\" missing added _e exon number" %(site_id)
f.close()
assert tr_len_dic, "nothing was read in (\"%s\" empty or malformatted?)" %(in_bed)
return tr_len_dic
################################################################################
def make_file_copy(in_file, out_file,
delete_in=False):
"""
Make a file copy by copying in_file to out_file.
"""
check_cmd = "cat " + in_file + " > " + out_file
assert in_file != out_file, "cat does not like to cat file into same file (%s)" %(check_cmd)
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "cat did not like your input (in_file: %s, out_file: %s):\n%s" %(in_file, out_file, output)
# Delete in_file.
if delete_in:
if os.path.exists(in_file):
os.remove(in_file)
################################################################################
def bed_read_reg_str_into_dic(in_bed):
"""
Read BED rows into dictionary of region strings.
mapping: 'chr1,10,20,+' -> [reg_id].
>>> test_bed = "test_data/test3.bed"
>>> bed_read_reg_str_into_dic(test_bed)
{'chr1,10,20,+': ['CLIP1'], 'chr1,30,45,-': ['CLIP2']}
"""
reg_str_dic = {}
with open(in_bed) as f:
for line in f:
cols = line.strip().split("\t")
chr_id = cols[0]
site_s = cols[1]
site_e = cols[2]
site_id = cols[3]
site_pol = cols[5]
reg_str = "%s,%s,%s,%s" %(chr_id, site_s, site_e, site_pol)
if reg_str in reg_str_dic:
reg_str_dic[reg_str].append(site_id)
else:
reg_str_dic[reg_str] = [site_id]
assert reg_str_dic, "reg_str_dic empty"
return reg_str_dic
################################################################################
def bed_read_rows_into_dic(in_bed,
new_stem_id="CLIP",
max_len=None,
sc_thr=None,
rev_filter=False,
new_ids=False,
id2sc_dic=None,
id2len_dic=None,
to_list=False,
id2gen_se_dic=None,
check_chr_id_format=True,
int_whole_nr=True,
remove_id_count=False,
filt_stats_dic=None):
"""
Read in .bed file rows into dictionary.
Mapping is region ID -> bed row.
id2sc_dic:
Store column 5 scores for each site ID.
id2len_dic:
Store site ID -> site length.
check_chr_id_format:
If set check and filter chromosome IDs.
new_ids:
If True, generate new IDs.
to_list:
Store BED region column values as list.
remove_id_count:
If site IDs have format like site_id,count, remove count from ID before
storing.
>>> test_bed = "test_data/test3.bed"
>>> bed_read_rows_into_dic(test_bed)
{'CLIP1': 'chr1\\t10\\t20\\tCLIP1\\t0\\t+', 'CLIP2': 'chr1\\t30\\t45\\tCLIP2\\t0\\t-'}
"""
id2row_dic = {}
c_read = 0
c_chr_filt = 0
c_max_len = 0
c_sc_thr = 0
c_out = 0
with open(in_bed) as f:
for line in f:
cols = line.strip().split("\t")
chr_id = cols[0]
site_s = cols[1]
site_e = cols[2]
site_id = cols[3]
site_sc = float(cols[4])
site_pol = cols[5]
site_l = int(site_e) - int(site_s)
if id2gen_se_dic is not None:
id2gen_se_dic[site_id] = [int(site_s), int(site_e)]
if remove_id_count:
m = re.search("(.+),\d", site_id)
assert m, "remove_id_count True but side ID %s does not contain count" %(site_id)
site_id = m.group(1)
# ID count.
c_read += 1
# Apply various filters.
cont = False
if max_len is not None:
if site_l > max_len:
cont = True
c_max_len += 1
if sc_thr is not None:
if rev_filter:
if site_sc > sc_thr:
c_sc_thr += 1
cont = True
else:
if site_sc < sc_thr:
c_sc_thr += 1
cont = True
if check_chr_id_format:
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
c_chr_filt += 1
cont = True
else:
chr_id = new_chr_id
if cont:
continue
if not new_ids:
assert site_id not in id2row_dic, "non-unique site ID (\"%s\") found in \"%s\". Please set --new-site-id or provide unique column 4 --in site IDs" %(site_id, in_bed)
else:
if new_stem_id:
site_id = new_stem_id + "_" + str(c_read)
else:
site_id = "CLIP_" + str(c_read)
if id2sc_dic is not None:
id2sc_dic[site_id] = site_sc
if id2len_dic is not None:
id2len_dic[site_id] = site_l
if int_whole_nr:
if not site_sc % 1:
site_sc = int(site_sc)
c_out += 1
row = "%s\t%s\t%s\t%s\t%s\t%s" %(chr_id, site_s, site_e, site_id, str(site_sc), site_pol)
if to_list:
id2row_dic[site_id] = [chr_id, int(site_s), int(site_e), site_id, str(site_sc), site_pol]
else:
id2row_dic[site_id] = row
f.closed
if filt_stats_dic is not None:
filt_stats_dic["max_len"] = c_max_len
filt_stats_dic["sc_thr"] = c_sc_thr
filt_stats_dic["chr_filt"] = c_chr_filt
filt_stats_dic["c_in"] = c_read
filt_stats_dic["c_out"] = c_out
return id2row_dic
################################################################################
def get_col_count(in_file):
"""
Count number of columns (separated by TAB) in first row and return
number.
>>> in_file = "test_data/test1.bed"
>>> get_col_count(in_file)
6
>>> in_file = "test_data/tr_list.txt"
>>> get_col_count(in_file)
1
>>> in_file = "test_data/empty_file"
>>> get_col_count(in_file)
0
"""
col_c = 0
with open(in_file) as f:
for line in f:
cols = line.strip().split("\t")
col_c = len(cols)
break
f.closed
return col_c
################################################################################
def bed_write_row_dic_into_file(id2row_dic, out_bed,
ext_mode=1,
ext_lr=0,
id2sc_dic=None,
zero_scores=False,
id2filt_dic=False,
chr_len_dic=False,
out_bed_add_sc_dic=False,
id2out_dic=False):
"""
Write .bed row dictionary (column 5 ID as key, .bed row as string)
into .bed file.
Example dictionary:
{'reg1_e1': 'chr1\t1000\t1100\treg1_e1\t0\t+', ... }
ext_mode:
1: whole site
2: center position site
3: upstream end position site
ext_lr:
Extend site (defined by ext_mode) on both sides.
zero_scores:
Set to output zero scores (BED column 5), instead of stored scores.
id2out_dic:
IDs dictionary for which to output regions.
id2filt_dic:
IDs dictionary for which to NOT output regions.
chr_len_dic:
Chromosome ID to chromosome length dic for length checking.
out_bed_add_sc_dic:
Site ID to score mapping, with score to be added to out_bed ID column
4, so column 4 ID changes from "id1" to "id1,5"
"""
assert id2row_dic, "given id2row_dic empty"
OUTBED = open(out_bed, "w")
c_out = 0
for site_id in id2row_dic:
if id2out_dic:
if site_id not in id2out_dic:
continue
if id2filt_dic:
if site_id in id2filt_dic:
continue
c_out += 1
cols = id2row_dic[site_id].split("\t")
chr_id = cols[0]
reg_s = int(cols[1])
reg_e = int(cols[2])
reg_id = cols[3]
reg_sc = cols[4]
reg_pol = cols[5]
# scores to zero needed for twoBitToFa extraction.
if zero_scores:
reg_sc = "0"
# Update region lengths.
if ext_mode == 1:
new_s = reg_s
new_e = reg_e
elif ext_mode == 2:
new_e = get_center_position(reg_s, reg_e)
new_s = new_e - 1
elif ext_mode == 3:
new_s = reg_s
new_e = reg_s + 1
if reg_pol == "-":
new_s = reg_e - 1
new_e = reg_e
else:
assert False, "invalid ext_mode set"
if ext_lr:
new_s = new_s - ext_lr
new_e = new_e + ext_lr
if new_s < 0:
new_s = 0
if chr_len_dic:
assert chr_id in chr_len_dic, "chromosome ID not in chr_len_dic (--gen file)" %(chr_id)
if new_e > chr_len_dic[chr_id]:
new_e = chr_len_dic[chr_id]
if out_bed_add_sc_dic:
reg_id += "," + str(out_bed_add_sc_dic[reg_id])
OUTBED.write("%s\t%s\t%s\t%s\t%s\t%s\n" %(chr_id, str(new_s), str(new_e), reg_id, reg_sc, reg_pol))
OUTBED.close()
assert c_out, "nothing was output"
################################################################################
def calc_site_distances(sites_list, id2cp_dic):
"""
sites_list:
List with site IDs on reference for which to calculate distances.
id2cp_dic:
site_id -> center position (1-based) on reference.
>>> sites_list = ["sid1", "sid2", "sid3"]
>>> id2cp_dic = {"sid1": 100, "sid2": 150, "sid3": 300}
>>> calc_site_distances(sites_list, id2cp_dic)
{'sid1,sid2': 50, 'sid1,sid3': 200, 'sid2,sid3': 150}
"""
ids_seen_dic = {}
sids2dist_dic = {}
for sid1 in sites_list:
for sid2 in sites_list:
if sid1 == sid2:
continue
sid1sid2 = sid1 + "," + sid2
sid2sid1 = sid2 + "," + sid1
if sid1sid2 in ids_seen_dic:
continue
if sid2sid1 in ids_seen_dic:
continue
dist = abs(id2cp_dic[sid1] - id2cp_dic[sid2])
sids2dist_dic[sid1sid2] = dist
ids_seen_dic[sid1sid2] = 1
ids_seen_dic[sid2sid1] = 1
assert sids2dist_dic, "sids2dist_dic empty"
return sids2dist_dic
################################################################################
def calc_full_site_distances(sites_list, id2coords_dic):
"""
Calculate distances of start/ends of sites, instead of center positions
like done in calc_site_distances().
sites_list:
List of sitetrids to calculate distances for.
id2coords_dic:
site_id,tr_id -> [transcript_s, transcript_e]
>>> sites_list = ["sid1", "sid2", "sid3", "sid4"]
>>> id2coords_dic = {"sid1": [80,120], "sid2": [100,110], "sid3": [110,130], "sid4": [150,170]}
>>> calc_full_site_distances(sites_list, id2coords_dic)
{'sid1;sid2': 0, 'sid1;sid3': 0, 'sid1;sid4': 30, 'sid2;sid3': 0, 'sid2;sid4': 40, 'sid3;sid4': 20}
"""
ids_seen_dic = {}
sids2dist_dic = {}
for sid1 in sites_list:
for sid2 in sites_list:
if sid1 == sid2:
continue
sid1sid2 = sid1 + ";" + sid2
sid2sid1 = sid2 + ";" + sid1
if sid1sid2 in ids_seen_dic:
continue
if sid2sid1 in ids_seen_dic:
continue
sid1_s = id2coords_dic[sid1][0]
sid1_e = id2coords_dic[sid1][1]
sid2_s = id2coords_dic[sid2][0]
sid2_e = id2coords_dic[sid2][1]
dist = get_site_ends_distance(sid1_s, sid1_e, sid2_s, sid2_e)
sids2dist_dic[sid1sid2] = dist
ids_seen_dic[sid1sid2] = 1
ids_seen_dic[sid2sid1] = 1
assert sids2dist_dic, "sids2dist_dic empty"
return sids2dist_dic
################################################################################
def get_site_ends_distance(s1, e1, s2, e2):
"""
Get nearest distance between two site ends / starts.
>>> get_site_ends_distance(100, 120, 130, 150)
10
>>> get_site_ends_distance(130, 150, 100, 120)
10
>>> get_site_ends_distance(100, 120, 120, 140)
0
>>> get_site_ends_distance(120, 140, 110, 160)
0
"""
diff1 = s2 - e1
diff2 = s1 - e2
dist = diff2
if diff1 >= diff2:
dist = diff1
if diff1 <= 0 and diff2 <= 0:
dist = 0
return dist
################################################################################
def get_center_position(start, end):
"""
Get center position (1-based), given a (genomic) start (0-based) and
end coordinate (1-based).
>>> get_center_position(10, 11)
11
>>> get_center_position(1000,2000)
1501
>>> get_center_position(11, 20)
17
"""
# If region has length of 1, return end position.
center_pos = end
# Otherwise calculate new center position.
if not end - start == 1:
center_pos = round( ( (end - start) / 2 ) + start ) + 1
return center_pos
################################################################################
def get_ei_border_ratio_from_exon_id(exon_id, regid2nc_dic,
exid2eibrs_dic=None,
ratio_mode=1,
last_exon_dic=None,
last_exon_ratio=2.5,
min_reg_cov=5,
min_reg_mode=1):
"""
Ratio is average of ratios at both exon ends (if embedded in introns),
or if first / last exon, only one ratio.
Assign -1, if only exon, or if both exon and intron border region read
count below min_reg_cov.
min_reg_cov:
Minimum region read coverage. If both exon and intron border region
have < min_reg_cov, return ratio of -1.
regid2nc_dic:
Contains exon/intron/border region ID -> [norm_cov, coverage, reg_len]
exid2eibrs_dic:
Exon ID to all EIB ratios list mapping.
ratio_mode:
How to calculate the returned EIBR ratio.
1: Return the exon-intro border ratio with the higher coverage.
2: Average the two exon-intron border ratios of the exon,
if both have more than > min_reg_cov
last_exon_dic:
Last transcript exon ID -> polarity
Used for prioritizing the inner exon intron border for multi-exon
transcript last exons. Only effective for ratio_mode 1.
last_exon_ratio:
If the outer last exon read count is higher last_exon_ratio, prioritize
the outter border again, i.e. select the outter ratio
for EIB ratio calculation.
>>> regid2nc_dic = {"t1_e1_ebe2" : [0.5, 10, 20], "t1_e1_ebi2" : [0.2, 4, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e1", regid2nc_dic)
(2.5, 'first_exon')
>>> get_ei_border_ratio_from_exon_id("t2_e1", regid2nc_dic)
(-1, 'single_exon')
>>> regid2nc_dic = {"t1_e2_ebe1" : [0.5, 10, 20], "t1_e2_ebi1" : [0.25, 5, 20], "t1_e2_ebe2" : [1.0, 20, 20], "t1_e2_ebi2" : [0.25, 5, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e2", regid2nc_dic, ratio_mode=2)
(3.0, 'inner_exon')
>>> regid2nc_dic = {"t1_e2_ebe1" : [0.5, 10, 20], "t1_e2_ebi1" : [0.25, 5, 20], "t1_e2_ebe2" : [0.1, 2, 20], "t1_e2_ebi2" : [0.1, 2, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e2", regid2nc_dic, ratio_mode=2)
(2.0, 'inner_exon_ds_lc')
>>> regid2nc_dic = {"t1_e2_ebe1" : [0.1, 2, 20], "t1_e2_ebi1" : [0.1, 2, 20], "t1_e2_ebe2" : [0.5, 10, 20], "t1_e2_ebi2" : [0.25, 5, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e2", regid2nc_dic, ratio_mode=2)
(2.0, 'inner_exon_us_lc')
>>> regid2nc_dic = {"t1_e1_ebe2" : [0.5, 10, 20], "t1_e1_ebi2" : [0.0, 0, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e1", regid2nc_dic)
(10, 'first_exon')
>>> regid2nc_dic = {"t1_e1_ebe2" : [0.0, 0, 20], "t1_e1_ebi2" : [0.5, 10, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e1", regid2nc_dic)
(0.0, 'first_exon')
>>> regid2nc_dic = {"t1_e2_ebe1" : [0.5, 10, 20], "t1_e2_ebi1" : [0.25, 5, 20], "t1_e2_ebe2" : [1.0, 20, 20], "t1_e2_ebi2" : [0.25, 5, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e2", regid2nc_dic, ratio_mode=1)
(4.0, 'inner_exon')
"""
exb_id_e1 = exon_id + "_ebe1"
exb_id_i1 = exon_id + "_ebi1"
exb_id_e2 = exon_id + "_ebe2"
exb_id_i2 = exon_id + "_ebi2"
# For single-exon transcripts.
if exb_id_e1 not in regid2nc_dic and exb_id_e2 not in regid2nc_dic:
if exid2eibrs_dic is not None:
assert exon_id not in exid2eibrs_dic, "exon ID %s already stored in exid2eibrs_dic"
exid2eibrs_dic[exon_id] = [-1]
return -1, "single_exon"
# Last exon.
if exb_id_e1 in regid2nc_dic and exb_id_e2 not in regid2nc_dic:
assert exb_id_i1 in regid2nc_dic, "exb_id_e1 %s in regid2nc_dic, but not exb_id_i1 %s" %(exb_id_e1, exb_id_i1)
ratio1 = -1
sel_crit = "last_exon"
if regid2nc_dic[exb_id_e1][1] >= min_reg_cov or regid2nc_dic[exb_id_i1][1] >= min_reg_cov:
if regid2nc_dic[exb_id_i1][0]:
ratio1 = regid2nc_dic[exb_id_e1][0] / regid2nc_dic[exb_id_i1][0]
else:
ratio1 = regid2nc_dic[exb_id_e1][1]
if exid2eibrs_dic is not None:
assert exon_id not in exid2eibrs_dic, "exon ID %s already stored in exid2eibrs_dic"
exid2eibrs_dic[exon_id] = [ratio1]
return ratio1, sel_crit
# First exon.
if exb_id_e1 not in regid2nc_dic and exb_id_e2 in regid2nc_dic:
assert exb_id_i2 in regid2nc_dic, "exb_id_e2 %s in regid2nc_dic, but not exb_id_i2 %s" %(exb_id_e2, exb_id_i2)
ratio2 = -1
sel_crit = "first_exon"
if regid2nc_dic[exb_id_e2][1] >= min_reg_cov or regid2nc_dic[exb_id_i2][1] >= min_reg_cov:
if regid2nc_dic[exb_id_i2][0]:
ratio2 = regid2nc_dic[exb_id_e2][0] / regid2nc_dic[exb_id_i2][0]
else:
ratio2 = regid2nc_dic[exb_id_e2][1]
if exid2eibrs_dic is not None:
assert exon_id not in exid2eibrs_dic, "exon ID %s already stored in exid2eibrs_dic"
exid2eibrs_dic[exon_id] = [ratio2]
return ratio2, sel_crit
# In-between exons.
if exb_id_e1 in regid2nc_dic and exb_id_e2 in regid2nc_dic:
assert exb_id_i1 in regid2nc_dic, "exb_id_e1 %s in regid2nc_dic, but not exb_id_i1 %s" %(exb_id_e1, exb_id_i1)
assert exb_id_i2 in regid2nc_dic, "exb_id_e2 %s in regid2nc_dic, but not exb_id_i2 %s" %(exb_id_e2, exb_id_i2)
ratio1 = -1
ratio2 = -1
# if exon_id == "ENST00000366553.3_e2":
# print(exon_id)
# print("regid2nc_dic[exb_id_i1][1]:", regid2nc_dic[exb_id_i1][1])
# print("regid2nc_dic[exb_id_e1][1]:", regid2nc_dic[exb_id_e1][1])
# print("regid2nc_dic[exb_id_e2][1]:", regid2nc_dic[exb_id_e2][1])
# print("regid2nc_dic[exb_id_i2][1]:", regid2nc_dic[exb_id_i2][1])
# print("regid2nc_dic[exb_id_i1][0]:", regid2nc_dic[exb_id_i1][0])
# print("regid2nc_dic[exb_id_e1][0]:", regid2nc_dic[exb_id_e1][0])
# print("regid2nc_dic[exb_id_e2][0]:", regid2nc_dic[exb_id_e2][0])
# print("regid2nc_dic[exb_id_i2][0]:", regid2nc_dic[exb_id_i2][0])
sel_crit = "inner_exon"
if regid2nc_dic[exb_id_e1][1] >= min_reg_cov or regid2nc_dic[exb_id_i1][1] >= min_reg_cov:
if regid2nc_dic[exb_id_i1][0]:
ratio1 = regid2nc_dic[exb_id_e1][0] / regid2nc_dic[exb_id_i1][0]
else:
ratio1 = regid2nc_dic[exb_id_e1][1]
else:
sel_crit += "_us_lc"
if regid2nc_dic[exb_id_e2][1] >= min_reg_cov or regid2nc_dic[exb_id_i2][1] >= min_reg_cov:
if regid2nc_dic[exb_id_i2][0]:
ratio2 = regid2nc_dic[exb_id_e2][0] / regid2nc_dic[exb_id_i2][0]
else:
ratio2 = regid2nc_dic[exb_id_e2][1]
else:
sel_crit += "_ds_lc"
if exid2eibrs_dic is not None:
assert exon_id not in exid2eibrs_dic, "exon ID %s already stored in exid2eibrs_dic" %(exon_id)
exid2eibrs_dic[exon_id] = [ratio1, ratio2]
if ratio1 == -1 and ratio2 != -1:
avg_ratio = ratio2
elif ratio1 != -1 and ratio2 == -1:
avg_ratio = ratio1
elif ratio1 == -1 and ratio2 == -1:
avg_ratio = -1
else:
if ratio_mode == 1:
cov_b1 = regid2nc_dic[exb_id_i1][0] + regid2nc_dic[exb_id_e1][0]
cov_b2 = regid2nc_dic[exb_id_i2][0] + regid2nc_dic[exb_id_e2][0]
if cov_b1 > cov_b2:
avg_ratio = ratio1
else:
avg_ratio = ratio2
if last_exon_dic is not None:
if exon_id in last_exon_dic:
sel_crit = "last_exon"
exon_pol = last_exon_dic[exon_id]
# Define inner borders.
cov_inner = cov_b1
ratio_inner = ratio1
cov_outer = cov_b2
ratio_outer = ratio2
if exon_pol == "-":
cov_inner = cov_b2
ratio_inner = ratio2
cov_outer = cov_b1
ratio_outer = ratio1
if cov_inner*last_exon_ratio >= cov_outer:
avg_ratio = ratio_inner
sel_crit += "_inner"
else:
avg_ratio = ratio_outer
sel_crit += "_outer"
elif ratio_mode == 2:
avg_ratio = statistics.mean([ratio1, ratio2])
else:
assert False, "invalid ratio_mode (%i)" %(ratio_mode)
return avg_ratio, sel_crit
assert False, "invalid get_ei_border_ratio_from_exon_id()"
################################################################################
def get_ei_ratio_from_exon_id(exon_id, regid2nc_dic,
isr_intron_reg_dic=False,
dummy_int_cov=0.001):
"""
Given exon ID and coverage information of all exon / intron IDs,
calculate the exon/intron coverage ratio for the exon.
Also return the calculated intron coverage.
regid2nc_dic:
Exon/Intron ID -> [coverage, overlap read count, region length]
With coverage = read count / region length.
dummy_int_cov:
For intronless transcripts, add a pseudo intron coverage here.
e.g. 0.001 == 1 read over 1,000 nt
>>> regid2nc_dic = {"id_e2" : [100.0, 10000, 100], "id_i1" : [2.0, 200, 100], "id_i2" : [6.0, 600, 100], "id2_e1" : [100.0, 10000, 100]}
>>> get_ei_ratio_from_exon_id("id_e2", regid2nc_dic)
(25.0, 4.0, 800)
>>> get_ei_ratio_from_exon_id("id2_e1", regid2nc_dic)
(100000.0, 0.001, 0)
>>> regid2nc_dic = {"id_e2" : [100.0, 10000, 100], "id_i1" : [4.0, 400, 100], "id_i2" : [0.0, 0, 100]}
>>> get_ei_ratio_from_exon_id("id_e2", regid2nc_dic)
(50.0, 2.0, 400)
>>> regid2nc_dic = {"id_e2" : [100.0, 10000, 100], "id_i1" : [0.0, 0, 100], "id_i2" : [0.0, 0, 100]}
>>> get_ei_ratio_from_exon_id("id_e2", regid2nc_dic)
(100.0, 0.0, 0)
"""
assert re.search(".+_e\d", exon_id), "exon ID %s has invalid format"
m = re.search("(.+)_e(\d+)", exon_id)
tr_id = m.group(1)
ex_nr = int(m.group(2))
ex_cov = regid2nc_dic[exon_id][0]
us_intron_id = tr_id + "_i" + str(ex_nr-1)
ds_intron_id = tr_id + "_i" + str(ex_nr)
int_count = 0
int_cov = 0.0
div = 0
if us_intron_id in regid2nc_dic:
int_cov += regid2nc_dic[us_intron_id][0]
int_count += regid2nc_dic[us_intron_id][1]
div += 1
if ds_intron_id in regid2nc_dic:
int_cov += regid2nc_dic[ds_intron_id][0]
int_count += regid2nc_dic[ds_intron_id][1]
div += 1
if div:
int_cov = int_cov / div
else:
# For intronless transcripts (exon has no neighboring introns).
int_cov = dummy_int_cov
if int_cov == 0.0:
ei_ratio = ex_cov
else:
ei_ratio = ex_cov / int_cov
return ei_ratio, int_cov, int_count
################################################################################
def check_transcript_eir_tc_state(tr_id, exid2cov_dic, trid2exc_dic,
filter_ei_ratio=1.0,
min_ei_cov=20,
min_ei_cov_sum=False,
exid2isrn_dic=False,
max_isrn_c=0,
min_occ_exons_ratio=0.2):
"""
Check whether a given transcript (tr_id) should be assigned transcript
context (return True), or genomic context (return False). Base this
decision on EIR ratios present in the transcript. Further controlled
by parameters:
filter_ei_ratio:
Exons with EIR <= filter_ei_ratio are counted for tr_id.
min_occ_exons_ratio:
Set ratio of the total number of transcript exons required for
transcript to be assigned to genomic context (return False).
So if # of exons <= filter_ei_ratio is >= then the ratio of
exons (round() to next integer, or math.ceil), return False.
min_ei_cov:
Minimum coverage (== read count) the exon or surrounding intron
region(s) need to have to be included into exon counting.
min_ei_cov_sum:
Instead of OR, use the sum of exon and intron region for min_ei_cov.
exid2isrn_dic:
Exon ID -> ISR count to neighborhood.
If this is given, use only exons with ISRN counts <= max_isrn_c.
max_isrn_c:
Define maximum ISRN count (exid2isrn_dic) (default: 0).
exid2cov_dic:
exon ID -> [ei_ratio, ex_cov, int_cov, ex_read_count, int_read_count]
trid2exc_dic:
Transcript ID -> exon count
>>> exid2cov_dic = {'t1_e1': [1.0, 1, 1, 15, 25], 't1_e2': [2.0, 2, 1, 35, 25], 't1_e3': [1.0, 2, 2, 35, 45]}
>>> trid2exc_dic = {'t1': 3}
>>> check_transcript_eir_tc_state("t1", exid2cov_dic, trid2exc_dic)
False
>>> check_transcript_eir_tc_state("t1", exid2cov_dic, trid2exc_dic, min_ei_cov=50)
True
"""
assert min_occ_exons_ratio > 0 and min_occ_exons_ratio < 1, "min_occ_exons_ratio needs to be > 0 and < 1"
tr_exc = trid2exc_dic[tr_id]
if tr_exc == 1:
return True
c_gc_eir = 0 # count exons with EIR <= filter_ei_ratio.
for i in range(tr_exc):
ex_nr = i + 1
ex_id = tr_id + "_e" + str(ex_nr)
if exid2isrn_dic:
isrn_c = exid2isrn_dic[ex_id]
if isrn_c > max_isrn_c:
continue
ei_ratio = exid2cov_dic[ex_id][0]
ex_rc = exid2cov_dic[ex_id][3]
int_rc = exid2cov_dic[ex_id][4]
# print("ex_id:", ex_id, "ei_ratio:", ei_ratio)
# print("ex_rc:", ex_rc, "int_rc:", int_rc)
check_count = max(ex_rc, int_rc)
if min_ei_cov_sum:
check_count = ex_rc + int_rc
if check_count >= min_ei_cov:
if ei_ratio <= filter_ei_ratio:
c_gc_eir += 1
min_occ_exons_set = math.ceil(tr_exc*min_occ_exons_ratio)
if min_occ_exons_set < 1:
min_occ_exons_set = 1
if c_gc_eir >= min_occ_exons_set:
return False
else:
return True
################################################################################
def check_transcript_eibr_tc_state(tr_id, exid2eibr_dic, trid2exc_dic,
filter_eib_ratio=2.0,
min_occ_exons_ratio=0.2):
"""
Check whether a given transcript (tr_id) should be assigned transcript
context (return True), or genomic context (return False). Base this
decision on EIR ratios present in the transcript. Further controlled
by parameters:
filter_ei_ratio:
Exons with EIBR <= filter_eib_ratio are counted as genomic context
evidence for tr_id. So the higher filter_ei_ratio is set, the more
likely tr_id gets assigned to genomic context (returns False).
min_occ_exons_ratio:
Set ratio of the total number of transcript exons required for
transcript to be assigned to genomic context (return False).
So if # of exons <= filter_eib_ratio is >= then the ratio of
exons (round() to next integer, or math.ceil), return False.
min_occ_exons:
If there are n exons EIBR <= filter_eib_ratio, and n >= min_occ_exons,
report this transcript as genomic context (return False).
If min_occ_exons_ratio is set (e.g. 0.25), then use the max of
min_occ_exons and round(min_occ_exons_ratio*number_of_exons)
min_occ_exons_ratio:
Set ratio of total number transcript exons for min_occ_exons.
exid2eibr_dic:
exon ID -> exon-intron border ratio
trid2exc_dic:
Transcript ID -> exon count
>>> exid2eibr_dic = {'t1_e1': -1, 't1_e2': 4, 't1_e3': 2, 't1_e4': 1.75}
>>> trid2exc_dic = {'t1': 4}
>>> check_transcript_eibr_tc_state("t1", exid2eibr_dic, trid2exc_dic)
False
>>> check_transcript_eibr_tc_state("t1", exid2eibr_dic, trid2exc_dic, filter_eib_ratio=1.5)
True
>>> check_transcript_eibr_tc_state("t1", exid2eibr_dic, trid2exc_dic, min_occ_exons_ratio=0.75)
True
"""
assert min_occ_exons_ratio > 0 and min_occ_exons_ratio < 1, "min_occ_exons_ratio needs to be > 0 and < 1"
tr_exc = trid2exc_dic[tr_id]
if tr_exc == 1:
return True
c_gc_eibr = 0 # count exons with EIBR <= filter_eib_ratio.
for i in range(tr_exc):
ex_nr = i + 1
ex_id = tr_id + "_e" + str(ex_nr)
ex_eibr = exid2eibr_dic[ex_id]
#print("ex_id:", ex_id, "ex_eibr:", ex_eibr)
if ex_eibr == -1:
continue
if ex_eibr <= filter_eib_ratio:
c_gc_eibr += 1
#print("c_gc_eibr:", c_gc_eibr)
min_occ_exons_set = round(tr_exc*min_occ_exons_ratio)
if min_occ_exons_set < 1:
min_occ_exons_set = 1
if c_gc_eibr >= min_occ_exons_set:
return False
else:
return True
################################################################################
def select_highest_conf_tr_id(tr_ids_list,
trid2isrc_dic, trid2tslsc_dic,
trid2nc_dic, trid2len_dic,
sel_mode=1):
"""
Select transcript ID with highest confidence for an exonic --in site.
sel_mode:
Selection mode
1: # IS reads > transcript coverage > TSL > transcript length > transcript ID
2: # IS reads > TSL > transcript coverage > transcript length > transcript ID
Priority (sel_mode 1):
1) trid2isrc_dic
2) trid2nc_dic
3) trid2tslsc_dic
4) trid2len_dic
5) transcript ID
Priority (sel_mode 2):
1) trid2isrc_dic
2) trid2tslsc_dic
3) trid2nc_dic
4) trid2len_dic
5) transcript ID
tr_ids_list:
List of transcript IDs, from which to determine transcript ID
with highest confidence.
trid2isrc_dic:
transcript ID to IS reads count supporting the transcript.
trid2tslsc_dic:
transcript ID to TSL score.
trid2nc_dic:
transcript ID to normalized coverage (sum of exon coverages).
Normalized coverage: # read starts / region length.
trid2len_dic:
transcript ID to transcript length.
If there is a draw after 4 rounds, select transcript ID with lexicographic
filtering ID and choosing first one.
"""
# The chosen one.
tco_id = ""
tco_isrc = -666
tco_tslsc = 0
tco_nc = 0.0
tco_len = 0
# Select based on IS read counts.
sel_ids = get_highest_scoring_ids(tr_ids_list, trid2isrc_dic)
if len(sel_ids) == 1:
return sel_ids[0], "is_read_c"
if sel_mode == 1:
# Select based on normalized coverage.
sel_ids = get_highest_scoring_ids(sel_ids, trid2nc_dic)
if len(sel_ids) == 1:
return sel_ids[0], "norm_cov"
# Select based on TSL scores.
sel_ids = get_highest_scoring_ids(sel_ids, trid2tslsc_dic)
if len(sel_ids) == 1:
return sel_ids[0], "tsl_sc"
elif sel_mode == 2:
# Select based on TSL scores.
sel_ids = get_highest_scoring_ids(sel_ids, trid2tslsc_dic)
if len(sel_ids) == 1:
return sel_ids[0], "tsl_sc"
# Select based on normalized coverage.
sel_ids = get_highest_scoring_ids(sel_ids, trid2nc_dic)
if len(sel_ids) == 1:
return sel_ids[0], "norm_cov"
else:
assert False, "invalid sel_mode set"
# Select based on transcript length.
sel_ids = get_highest_scoring_ids(sel_ids, trid2len_dic)
if len(sel_ids) == 1:
return sel_ids[0], "tr_len"
# Select based on sorting IDs.
sel_ids.sort()
return sel_ids[0], "id_sort"
################################################################################
def get_highest_scoring_ids(ids_set, id2sc_dic,
scfp_list=False):
"""
Given a set of IDs, and a ID to score mapping, return highest
scoring ID(s) in list.
scfp_list:
If score is stored in first position of list, with ID
mapping to list in id2sc_dic.
>>> ids_set = ['s1', 's2', 's3', 's4']
>>> id2sc_dic = {'s1' : 10, 's2' : 5, 's3' : 10, 's4' : 7}
>>> get_highest_scoring_ids(ids_set, id2sc_dic)
['s1', 's3']
>>> id2sc_dic = {'s1' : 10, 's2' : 5, 's3' : 10, 's4' : 17}
>>> get_highest_scoring_ids(ids_set, id2sc_dic)
['s4']
>>> ids_set = ['s1', 's2', 's3']
>>> id2sc_dic = {'s1' : 0, 's2' : 0, 's3' : 0}
>>> get_highest_scoring_ids(ids_set, id2sc_dic)
['s1', 's2', 's3']
"""
max_ids = []
max_sc = -6666
for set_id in ids_set:
if scfp_list:
if max_sc < id2sc_dic[set_id][0]:
max_sc = id2sc_dic[set_id][0]
else:
if max_sc < id2sc_dic[set_id]:
max_sc = id2sc_dic[set_id]
for set_id in ids_set:
if scfp_list:
if id2sc_dic[set_id][0] == max_sc:
max_ids.append(set_id)
else:
if id2sc_dic[set_id] == max_sc:
max_ids.append(set_id)
assert max_ids, "max_ids empty"
max_ids = list(set(max_ids))
max_ids.sort()
return max_ids
################################################################################
def check_a_in_b_list(a_list, b_list):
"""
Check if any entry in list A is present in list B. If true, return True,
unless False.
"""
check = False
for a in a_list:
if a in b_list:
check = True
break
return check
################################################################################
def two_lists_get_intersect(l1,l2):
"""
Given two lists, return list with common elements.
>>> l1 = [1,5,10,20,30]
>>> l2 = [5,20,40]
>>> two_lists_get_intersect(l1, l2)
[5, 20]
>>> l3 = [50]
>>> two_lists_get_intersect(l1, l3)
[]
"""
assert l1, "given l1 empty"
assert l2, "given l2 empty"
l3 = [element for element in l1 if element in l2]
return l3
################################################################################
def bed_check_unique_ids(bed_file):
"""
Check whether .bed file (6 column format with IDs in column 4)
has unique column 4 IDs.
>>> test_bed = "test_data/test1.bed"
>>> bed_check_unique_ids(test_bed)
True
>>> test_bed = "test_data/test2.bed"
>>> bed_check_unique_ids(test_bed)
False
"""
check_cmd = "cut -f 4 " + bed_file + " | sort | uniq -d"
output = subprocess.getoutput(check_cmd)
if output:
return False
else:
return True
################################################################################
def diff_two_files_identical(file1, file2):
"""
Check whether two files are identical. Return true if diff reports no
differences.
>>> file1 = "test_data/file1"
>>> file2 = "test_data/file2"
>>> diff_two_files_identical(file1, file2)
True
>>> file1 = "test_data/test1.bed"
>>> diff_two_files_identical(file1, file2)
False
"""
same = True
check_cmd = "diff " + file1 + " " + file2
output = subprocess.getoutput(check_cmd)
if output:
same = False
return same
################################################################################
def gtf_get_intron_exon_cov(in_gtf, in_bam, out_bed,
correct_min_ex_order=False,
tr2exc_dic=False,
read_pos_mode=1,
eib_width=10,
border_mode=1,
count_isr_double=True,
add_isr_bed=False,
reg2cov_dic=None,
isr_sub_count=True,
isr_intron_reg_dic=False,
tmp_out_folder=False,
tr_ids_dic=False):
"""
Get intron-exon region coverage of exonic site transcripts.
read_pos_mode:
Defines what part of read to use for coverage calculation.
1: full-length read (thus can be counted > 1)
2: 5' end of read
3: center position of read
border_mode:
Mode for exon-intron border extration.
1: on each side of each exon
2: on first and last exon only the inner sites, where
exon is connected to intron.
add_isr_bed:
Provide ISR end position BED from earlier computations, to
count intron-spanning reads twice for coverage calculations.
Does not include --rnaseq-bam reads!
reg2cov_dic:
["chr,s,e,pol"] -> read overlap count / coverage.
regid2reg_dic:
Region ID (exon intron) to genomic region "chr_id,s,e,pol"
tr_ids_dic:
Transcript IDs to keep dictionary.
Output intron/exon regions of specified transcripts from GTF to BED.
E.g.
chr1 1000 2000 ENST001_e1 0 +
chr1 2000 3000 ENST001_i1 0 +
chr1 3000 4000 ENST001_e2 0 +
chr1 6000 7000 ENST002_e2 0 -
chr1 7000 8000 ENST002_i1 0 -
chr1 8000 9000 ENST002_e1 0 -
Then convert in_bam to BED regions, overlap with intron/exon regions
and get overlap counts normalized by intron/exon lengths.
Then overlap in_bam BAM reads with intron/exon regions,
counting overlaps for intron / exon regions.
bamToBed -i TAF13_gene_reads.bam -split
"""
# For reverse ording we need to have exon numbers.
if correct_min_ex_order:
assert tr2exc_dic, "tr2exc_dic needed if correct_min_ex_order True"
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".intron_exon.tmp.bed"
if tmp_out_folder:
tmp_bed = tmp_out_folder + "/" + tmp_bed
OUTBED = open(tmp_bed, "w")
# Read in exon features from GTF file.
c_gtf_ex_feat = 0
# Start end coordinates of exons.
exon_e_dic = {}
exon_s_dic = {}
# Transcript stats.
tr2pol_dic = {}
tr2chr_dic = {}
# dic for sanity checking exon number order.
tr2exon_nr_dic = {}
# Remember me.
proc_tr_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
feat_pol = cols[6]
infos = cols[8]
if feature != "exon":
continue
# Restrict to standard chromosomes.
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
continue
else:
chr_id = new_chr_id
# Make start coordinate 0-base (BED standard).
feat_s = feat_s - 1
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
# Check if transcript ID is in transcript dic.
if tr_ids_dic:
if transcript_id not in tr_ids_dic:
if len(tr_ids_dic) == len(proc_tr_dic):
break
else:
continue
proc_tr_dic[transcript_id] = 1
# Extract exon number.
m = re.search('exon_number "(\d+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('exon_number (\d+?);', infos)
assert m, "exon_number entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
exon_nr = int(m.group(1))
# Store transcript stats.
tr2pol_dic[transcript_id] = feat_pol
tr2chr_dic[transcript_id] = chr_id
# Check whether exon numbers are incrementing for each transcript ID.
if not transcript_id in tr2exon_nr_dic:
tr2exon_nr_dic[transcript_id] = exon_nr
else:
assert tr2exon_nr_dic[transcript_id] < exon_nr, "transcript ID \"%s\" without increasing exon number order in GTF file \"%s\"" %(transcript_id, in_gtf)
tr2exon_nr_dic[transcript_id] = exon_nr
# Reverse ordering for minus strand.
if correct_min_ex_order and feat_pol == "-":
assert transcript_id in tr2exc_dic, "transcript ID %s not in tr2exc_dic" %(transcript_id)
exon_nr = tr2exc_dic[transcript_id] - exon_nr + 1
assert exon_nr >= 1, "exon number < 1 assigned (%i) for transcript ID %s (# exons: %i)" %(exon_nr, transcript_id, tr2exc_dic[transcript_id])
# Construct exon ID.
exon_id = transcript_id + "_e" + str(exon_nr)
# Store infos.
exon_s_dic[exon_id] = feat_s
exon_e_dic[exon_id] = feat_e
# Count exon entry.
c_gtf_ex_feat += 1
# Output genomic exon region.
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,feat_s,feat_e,exon_id,feat_pol))
OUTBED.close()
f.close()
# Check for read-in features.
assert c_gtf_ex_feat, "no exon features read in from \"%s\"" %(in_gtf)
# Append intron regions.
tr2intron_nr_dic = {}
OUTBED = open(tmp_bed, "a")
for tr_id in tr2pol_dic:
tr_pol = tr2pol_dic[tr_id]
chr_id = tr2chr_dic[tr_id]
tr_c = tr2exon_nr_dic[tr_id]
tr2intron_nr_dic[tr_id] = 0
intron_c = 0
# # 1-exon transcripts, no introns.
# if tr_c == 1:
# continue
ex_list = []
for i in range(tr_c):
ex_nr = i + 1
ex_id = tr_id + "_e" + str(ex_nr)
ex_list.append(ex_id)
# If one exon only.
if tr_c == 1:
if border_mode == 1:
ex_id = ex_list[0]
ex_s = exon_s_dic[ex_id]
ex_e = exon_e_dic[ex_id]
exb_id_i1 = ex_id + "_ebi1"
exb_id_e1 = ex_id + "_ebe1"
exb_id_e2 = ex_id + "_ebe2"
exb_id_i2 = ex_id + "_ebi2"
i1s, i1e, e1s, e1e, e2s, e2e, i2s, i2e = get_intron_exon_border_coords(
ex_s, ex_e, eib_width=eib_width)
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,i1s,i1e,exb_id_i1,tr_pol))
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,e1s,e1e,exb_id_e1,tr_pol))
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,e2s,e2e,exb_id_e2,tr_pol))
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,i2s,i2e,exb_id_i2,tr_pol))
continue
# For multi-exon transcripts, output intronic and border regions.
intron_len_list = []
for i in range(len(ex_list)):
ex1i = i
ex2i = i + 1
# As long as not last exon, add introns.
if ex2i < len(ex_list):
ex1id = ex_list[ex1i]
ex2id = ex_list[ex2i]
ex1s = exon_s_dic[ex1id]
ex2s = exon_s_dic[ex2id]
ex1e = exon_e_dic[ex1id]
ex2e = exon_e_dic[ex2id]
intron_id = tr_id + "_i" + str(ex2i)
intron_c += 1
# Plus case.
intron_s = ex1e
intron_e = ex2s
ex1_len = ex1e - ex1s
ex2_len = ex2e - ex2s
# Lengths of exons and embedded intron.
triple_len_list = [ex1_len, 0, ex2_len]
triple_ids_list = [ex1id, intron_id, ex2id]
if tr_pol == "-":
intron_s = ex2e
intron_e = ex1s
triple_len_list[0] = ex2_len
triple_len_list[2] = ex1_len
prev_intron_len = False
if intron_len_list:
ill_len = len(intron_len_list)
prev_intron_len = intron_len_list[ill_len-1]
intron_len = intron_e - intron_s
intron_len_list.append(intron_len)
triple_len_list[1] = intron_len
# Output intron region.
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,intron_s,intron_e,intron_id,tr_pol))
"""
Get intron-exon border regions.
I.e. regions at intron / exon ends, for which to calculate
coverage too.
"""
i1s, i1e, e1s, e1e, e2s, e2e, i2s, i2e = get_intron_exon_border_coords(
ex1s, ex1e, eib_width=eib_width,
us_intron_len=prev_intron_len,
ds_intron_len=intron_len)
exb_id_i1 = ex1id + "_ebi1"
exb_id_e1 = ex1id + "_ebe1"
exb_id_e2 = ex1id + "_ebe2"
exb_id_i2 = ex1id + "_ebi2"
if border_mode == 1:
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,i1s,i1e,exb_id_i1,tr_pol))
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,e1s,e1e,exb_id_e1,tr_pol))
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,e2s,e2e,exb_id_e2,tr_pol))
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,i2s,i2e,exb_id_i2,tr_pol))
elif border_mode == 2:
if prev_intron_len:
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,i1s,i1e,exb_id_i1,tr_pol))
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,e1s,e1e,exb_id_e1,tr_pol))
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,e2s,e2e,exb_id_e2,tr_pol))
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,i2s,i2e,exb_id_i2,tr_pol))
else:
assert False, "invalid border_mode set (%i)" %(border_mode)
elif ex2i == len(ex_list):
# Last exon.
ex_id = ex_list[ex1i]
ex_s = exon_s_dic[ex_id]
ex_e = exon_e_dic[ex_id]
# Previous intron length.
prev_intron_len = intron_len_list[intron_c-1]
i1s, i1e, e1s, e1e, e2s, e2e, i2s, i2e = get_intron_exon_border_coords(
ex_s, ex_e, eib_width=eib_width,
us_intron_len=prev_intron_len,
ds_intron_len=False)
exb_id_i1 = ex_id + "_ebi1"
exb_id_e1 = ex_id + "_ebe1"
exb_id_e2 = ex_id + "_ebe2"
exb_id_i2 = ex_id + "_ebi2"
if border_mode == 1:
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,i1s,i1e,exb_id_i1,tr_pol))
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,e1s,e1e,exb_id_e1,tr_pol))
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,e2s,e2e,exb_id_e2,tr_pol))
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,i2s,i2e,exb_id_i2,tr_pol))
elif border_mode == 2:
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,i1s,i1e,exb_id_i1,tr_pol))
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,e1s,e1e,exb_id_e1,tr_pol))
else:
assert False, "invalid border_mode set (%i)" %(border_mode)
break
tr2intron_nr_dic[tr_id] = intron_c
OUTBED.close()
# Sanity check exon + intron numbers.
for tr_id in tr2exon_nr_dic:
exon_nr = tr2exon_nr_dic[tr_id]
intron_nr = tr2intron_nr_dic[tr_id]
assert (exon_nr-1) == intron_nr, "intron number != exon number - 1 for \"%s\" (%i != %i - 1)" %(tr_id, intron_nr, exon_nr)
# Sort intron exon BED.
bed_sort_file(tmp_bed, out_bed)
# First get start positions for overlap calculation.
# Minus strand: end, plus strand: start.
check_cmd = "bamToBed -i " + in_bam + " -split"
output = subprocess.getoutput(check_cmd)
GRRSBEDOUT = open(tmp_bed, "w")
c_read = 0
for line in output.split('\n'):
cols = line.strip().split("\t")
c_read += 1
chr_id = cols[0]
reg_s = int(cols[1])
reg_e = int(cols[2])
read_pol = cols[5]
new_id = "r%i" %(c_read)
if read_pos_mode == 1:
new_s = reg_s
new_e = reg_e
elif read_pos_mode == 2:
new_s = reg_s
new_e = new_s + 1
if read_pol == "-":
new_s = reg_e - 1
new_e = reg_e
elif read_pos_mode == 3:
new_e = get_center_position(reg_s, reg_e)
new_s = new_e - 1
else:
assert False, "invalid value set for read_pos_mode"
assert new_e > new_s, "BED region end <= region start coordinate"
GRRSBEDOUT.write("%s\t%i\t%i\t%s\t0\t%s\n" %(chr_id, new_s, new_e, new_id, read_pol))
GRRSBEDOUT.close()
assert c_read, "no output produced from \"%s\"" %(check_cmd)
"""
Append ISR BED to reads BED, to count ISR reads twice in overlap
calculations.
"""
if add_isr_bed and count_isr_double:
assert os.path.exists(add_isr_bed), "set ISR containing BED file %s does not exist" %(add_isr_bed)
print("Concatenate ISR reads BED to transcript reads BED ... ")
concatenate_files(add_isr_bed, tmp_bed)
# Region (intron exon border) to coverage stats.
regid2nc_dic = {}
"""
Example output:
$ cat a.bed
chr1 1000 1030 t1 0 +
chr1 1050 1080 t2 0 +
chr1 1100 1130 t3 0 +
$ cat b.bed
chr1 1000 1001 r1 0 +
chr1 1010 1011 r2 0 +
chr1 1020 1021 r3 0 +
chr1 1050 1051 r4 0 +
chr1 1060 1061 r5 0 +
$ intersectBed -a a.bed -b b.bed -s -sorted -c
chr1 1000 1030 t1 0 + 3
chr1 1050 1080 t2 0 + 2
chr1 1100 1130 t3 0 + 0
NOTE that regions without overlaps also appear here ...
And with full-length regions:
a.bed
chr1 1000 2000 e1 0 +
chr1 2000 3000 i1 0 +
chr1 3000 4000 e2 0 +
b.bed
chr1 1990 2020 r1 0 +
chr1 1995 2025 r2 0 +
chr1 2000 2040 r3 0 +
intersectBed -a a.bed -b b.bed -s -c
chr1 1000 2000 e1 0 + 2
chr1 2000 3000 i1 0 + 3
chr1 3000 4000 e2 0 + 0
"""
check_cmd = "sort -k1,1 -k2,2n " + tmp_bed + " | intersectBed -a " + out_bed + " -b stdin -s -sorted -c"
output = subprocess.getoutput(check_cmd)
for line in output.split('\n'):
cols = line.strip().split("\t")
chr_id = cols[0]
reg_s = int(cols[1])
reg_e = int(cols[2])
reg_id = cols[3]
reg_pol = cols[5]
ol_c = int(cols[6])
reg_l = reg_e - reg_s
gen_reg = "%s,%i,%i,%s" %(chr_id, reg_s, reg_e, reg_pol)
if reg2cov_dic is not None:
reg2cov_dic[gen_reg] = ol_c
# Add pseudocounts only for exon / intron regions, for coverage calculations.
if not re.search(".+_eb[ei]\d+$", reg_id):
ol_c += 1
# Substract ISR count from intronic read count (if isr_sub_count).
if isr_intron_reg_dic:
if gen_reg in isr_intron_reg_dic:
isr_c = isr_intron_reg_dic[gen_reg]
if isr_sub_count:
ol_c -= isr_c
if ol_c < 1:
ol_c = 1
norm_c = ol_c / reg_l
regid2nc_dic[reg_id] = [norm_c, ol_c, reg_l]
assert regid2nc_dic, "regid2nc_dic empty (nothing read in)"
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
return regid2nc_dic
################################################################################
def get_intron_exon_border_coords(ex_s, ex_e,
us_intron_len=False,
ds_intron_len=False,
eib_width=20):
"""
Get intron-exon border region coordinates surrounding one exon.
ex_s:
Genomic exon start (0-based)
ex_e:
Genomic exon end (1-based)
us_intron_len:
Upstream intron length (for single exon transcripts = False)
ds_intron_len:
Downstream intron length (for single exon transcripts = False)
eib_width:
Width of intron/exon border region.
>>> get_intron_exon_border_coords(1000, 2000)
(980, 1000, 1000, 1020, 1980, 2000, 2000, 2020)
>>> get_intron_exon_border_coords(1000, 1020, eib_width=50)
(950, 1000, 1000, 1020, 1000, 1020, 1020, 1070)
>>> get_intron_exon_border_coords(1000, 1020, eib_width=50, us_intron_len=30, ds_intron_len=40)
(970, 1000, 1000, 1020, 1000, 1020, 1020, 1060)
"""
# Exon length.
ex_len = ex_e - ex_s
# Upstream intron border region.
i1s = ex_s - eib_width
if us_intron_len:
if us_intron_len < eib_width:
i1s = ex_s - us_intron_len
i1e = ex_s
# Upstream exon border region.
e1s = ex_s
e1e = ex_s + eib_width
if ex_len < eib_width:
e1e = ex_s + ex_len
# Downstream exon border region.
e2s = ex_e - eib_width
if ex_len < eib_width:
e2s = ex_e - ex_len
e2e = ex_e
# Downstream intron border region.
i2s = ex_e
i2e = ex_e + eib_width
if ds_intron_len:
if ds_intron_len < eib_width:
i2e = ex_e + ds_intron_len
return i1s, i1e, e1s, e1e, e2s, e2e, i2s, i2e
################################################################################
def get_gene_infos_from_annot_table(tr_gene_annot_file,
trid2gid_dic, trid2gn_dic, trid2gbt_dic,
trid2tbt_dic):
"""
Read in gene infos from peakhood extract folder file.
Store infos with mapping: transcript ID -> gene info
tr_gene_annot_file content:
transcript_id tr_biotype tr_length tr_exon_c tr_gene_id tr_gene_name tr_gene_biotype
ENST00000379370.7 protein_coding 7328 36 MSTRG.88 AGRN -
ENST00000620552.4 protein_coding 7394 39 MSTRG.88 AGRN -
...
"""
with open(tr_gene_annot_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
if cols[0] == "transcript_id":
continue
tr_id = cols[0]
tr_bt = cols[1]
gene_id = cols[4]
gene_name = cols[5]
gene_bt = cols[6]
trid2gid_dic[tr_id] = gene_id
trid2gn_dic[tr_id] = gene_name
trid2gbt_dic[tr_id] = gene_bt
trid2tbt_dic[tr_id] = tr_bt
f.close()
################################################################################
def get_gen_motif_hits(motif, gen_fa, gen_bed,
hits_out_bed=False,
hits_out_fa=False):
"""
Get motif hits on transcript sequences / sites.
Return number of unique hits and effective region size.
"""
c_hits = 0
c_uniq_hits = 0
c_sites = 0
region_size = 0
gen_seqs_dic = read_fasta_into_dic(gen_fa,
dna=False,
empty_check=False,
skip_n_seqs=False)
if not gen_seqs_dic:
return c_hits, c_uniq_hits, c_sites, region_size
gen_bed_dic = bed_read_rows_into_dic(gen_bed, to_list=True,
check_chr_id_format=False)
if not gen_bed_dic:
return c_hits, c_uniq_hits, c_sites, region_size
assert len(gen_seqs_dic) == len(gen_bed_dic), "len(gen_seqs_dic) != len(gen_bed_dic) for files %s and %s" %(gen_fa, gen_bed)
c_sites = len(gen_bed_dic)
region_size = get_uniq_gen_size(gen_bed)
hit_reg_dic = {}
hit_seqs_dic = {}
gen_check_pos_dic = {}
for site_id in gen_seqs_dic:
seq = gen_seqs_dic[site_id]
chr_id = gen_bed_dic[site_id][0]
gen_s = gen_bed_dic[site_id][1]
gen_e = gen_bed_dic[site_id][2]
gen_pol = gen_bed_dic[site_id][5]
site_id_hit_c = 0
for match in re.finditer(motif, seq):
hit = match.group()
hit_s = match.start()
hit_e = match.end()
hit_gen_s = gen_s + hit_s
hit_gen_e = gen_s + hit_e
if gen_pol == "-":
hit_gen_s = gen_e - hit_e
hit_gen_e = gen_e - hit_s
c_hits += 1
check_id = "%s,%i-%i,%s" %(chr_id,hit_gen_s,hit_gen_e,gen_pol)
if check_id in gen_check_pos_dic:
continue
else:
site_id_hit_c += 1
c_uniq_hits += 1
hit_id = site_id + "," + str(site_id_hit_c)
hit_reg_dic[hit_id] = [chr_id, hit_gen_s, hit_gen_e, gen_pol]
hit_seqs_dic[hit_id] = hit
gen_check_pos_dic[check_id] = 1
if not hit_reg_dic:
return c_hits, c_uniq_hits, c_sites, region_size
if hits_out_bed:
OUTHITBED = open(hits_out_bed, "w")
for site_id in hit_reg_dic:
OUTHITBED.write("%s\t%i\t%i\t%s\t0\t%s\n" %(hit_reg_dic[site_id][0], hit_reg_dic[site_id][1], hit_reg_dic[site_id][2], site_id, hit_reg_dic[site_id][3]))
OUTHITBED.close()
if hits_out_fa:
OUTHITFA = open(hits_out_fa, "w")
for site_id in hit_reg_dic:
hit_seq = hit_seqs_dic[site_id]
OUTHITFA.write(">%s\n%s\n" %(site_id, hit_seq))
OUTHITFA.close()
return c_hits, c_uniq_hits, c_sites, region_size
################################################################################
def get_tr_motif_hits(motif, tr_fa, tr_bed, gen_exon_bed,
hits_tr_con_gen_reg_bed=False,
hits_tr_con_gen_reg_split_bed=False,
hits_out_bed=False,
hits_out_fa=False):
"""
Get motif hits on transcript sequences / sites.
Return number of unique hits and effective region size.
"""
c_hits = 0
c_uniq_hits = 0
c_sites = 0
region_size = 0
tr_seqs_dic = read_fasta_into_dic(tr_fa,
dna=False,
empty_check=True,
skip_n_seqs=False)
if not tr_seqs_dic:
return c_hits, c_uniq_hits, c_sites, region_size
tr_bed_dic = bed_read_rows_into_dic(tr_bed, to_list=True,
check_chr_id_format=False)
if not tr_bed_dic:
return c_hits, c_uniq_hits, c_sites, region_size
assert len(tr_seqs_dic) == len(tr_bed_dic), "len(tr_seqs_dic) != len(tr_bed_dic) for files %s and %s" %(tr_fa, tr_bed)
c_sites = len(tr_bed_dic)
region_size = get_uniq_tr_size(tr_bed, gen_exon_bed)
hit_reg_dic = {}
hit_seqs_dic = {}
tr_check_pos_dic = {}
for site_id in tr_seqs_dic:
seq = tr_seqs_dic[site_id]
tr_id = tr_bed_dic[site_id][0]
tr_s = tr_bed_dic[site_id][1]
tr_e = tr_bed_dic[site_id][2]
site_id_hit_c = 0
for match in re.finditer(motif, seq):
hit = match.group()
hit_s = match.start() # 0-based.
hit_e = match.end() # 1-based.
hit_tr_s = tr_s + hit_s
hit_tr_e = tr_s + hit_e
check_id = "%s,%i-%i" %(tr_id,hit_tr_s,hit_tr_e)
if check_id in tr_check_pos_dic:
continue
else:
site_id_hit_c += 1
hit_id = site_id + "," + str(site_id_hit_c)
hit_reg_dic[hit_id] = [tr_id, hit_tr_s, hit_tr_e]
hit_seqs_dic[hit_id] = hit
tr_check_pos_dic[check_id] = 1
if not hit_reg_dic:
return c_hits, c_uniq_hits, c_sites, region_size
c_hits = len(hit_reg_dic)
if hits_out_bed:
OUTHITBED = open(hits_out_bed, "w")
for site_id in hit_reg_dic:
OUTHITBED.write("%s\t%i\t%i\t%s\t0\t+\n" %(hit_reg_dic[site_id][0], hit_reg_dic[site_id][1], hit_reg_dic[site_id][2], site_id))
OUTHITBED.close()
if hits_out_fa:
OUTHITFA = open(hits_out_fa, "w")
for site_id in hit_reg_dic:
hit_seq = hit_seqs_dic[site_id]
OUTHITFA.write(">%s\n%s\n" %(site_id, hit_seq))
OUTHITFA.close()
# Deduplicate transcript hits.
dedup_hit_reg_dic = get_uniq_tr_regions(hit_reg_dic, gen_exon_bed,
hits_tr_con_gen_reg_bed=hits_tr_con_gen_reg_bed,
hits_tr_con_gen_reg_split_bed=hits_tr_con_gen_reg_split_bed)
c_uniq_hits = len(dedup_hit_reg_dic)
return c_hits, c_uniq_hits, c_sites, region_size
################################################################################
def get_uniq_tr_regions(tr_reg_dic, gen_exon_bed,
hits_tr_con_gen_reg_bed=False,
hits_tr_con_gen_reg_split_bed=False):
"""
Get unique transcript regions, by mapping transcript sites on genome
and remove duplicate regions (i.e., regions with same start+end+chr+pol).
Also considers/removes identical split regions.
tr_reg_dic:
Transcript regions with mapping:
region_id -> [tr_id, tr_s, tr_e] # tr_s 0-based.
gen_exon_bed:
genomic exon regions containing the transcript sites.
Exon ID with format: transcriptid_e[1...n] with n == number of
exons for transcript.
gen_split_hits_bed_out:
Provide file path to output transcript split hits on genome.
tr_reg_dic:
t1 995 1005 s1 0 +
t2 995 1005 s2 0 +
t2 995 1010 s3 0 +
t3 995 1005 s4 0 +
tr_reg_dic = {
's1': ['t1', 995, 1005],
's2': ['t2', 995, 1005],
's3': ['t2', 995, 1010],
's4': ['t3', 995, 1005]}
test_uniq_tr_reg.gen_ex.bed:
chr1 1000 2000 t1_e1 0 +
chr1 3000 4000 t1_e2 0 +
chr1 1000 2000 t2_e1 0 +
chr1 3000 4000 t2_e2 0 +
chr1 5000 6000 t2_e3 0 +
chr1 1000 2000 t3_e2 0 -
chr1 3000 4000 t3_e1 0 -
>>> tr_reg_dic = {'s1': ['t1', 995, 1005], 's2': ['t2', 995, 1005], 's3': ['t2', 995, 1010], 's4': ['t3', 995, 1005]}
>>> gen_exon_bed = "test_data/test_uniq_tr_reg.gen_ex.bed"
>>> get_uniq_tr_regions(tr_reg_dic, gen_exon_bed)
{'s1': ['t1', 995, 1005], 's3': ['t2', 995, 1010], 's4': ['t3', 995, 1005]}
"""
assert tr_reg_dic, "given tr_reg_dic empty"
# Generate .tmp files.
random_id = uuid.uuid1()
tmp1_bed = str(random_id) + ".tmp1.bed"
random_id = uuid.uuid1()
tmp2_bed = str(random_id) + ".tmp2.bed"
random_id = uuid.uuid1()
tmp3_bed = str(random_id) + ".tmp3.bed"
# Read in exon region stats.
id2chr_dic = {}
id2s_dic = {}
id2e_dic = {}
id2pol_dic = {}
exid2trid_dic = {}
tr_exc_dic = {} # count exon numbers.
with open(gen_exon_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
chr_id = cols[0]
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
site_pol = cols[5]
id2chr_dic[site_id] = chr_id
id2s_dic[site_id] = site_s
id2e_dic[site_id] = site_e
id2pol_dic[site_id] = site_pol
if re.search(".+_e\d", site_id):
m = re.search("(.+)_e\d", site_id)
tr_id = m.group(1)
exid2trid_dic[site_id] = tr_id
if tr_id not in tr_exc_dic:
tr_exc_dic[tr_id] = 1
else:
tr_exc_dic[tr_id] += 1
else:
assert False, "site ID \"%s\" missing added _e exon number" %(site_id)
f.close()
assert exid2trid_dic, "exid2trid_dic empty (nothing read in from %s)" %(gen_exon_bed)
# Output transcript sites.
OUTTBED = open(tmp1_bed, "w")
for reg_id in tr_reg_dic:
OUTTBED.write("%s\t%i\t%i\t%s\t0\t+\n" %(tr_reg_dic[reg_id][0], tr_reg_dic[reg_id][1], tr_reg_dic[reg_id][2], reg_id))
OUTTBED.close()
# Output exon regions with transcript coordinates.
OUTTBED = open(tmp2_bed, "w")
for tr_id in tr_exc_dic:
ex_c = tr_exc_dic[tr_id]
new_s = 0
for i in range(ex_c):
i += 1
ex_id = tr_id + "_e" + str(i)
gen_s = id2s_dic[ex_id]
gen_e = id2e_dic[ex_id]
ex_len = gen_e - gen_s
tr_s = new_s
tr_e = new_s + ex_len
OUTTBED.write("%s\t%i\t%i\t%s\t0\t+\n" % (tr_id,tr_s,tr_e,ex_id))
new_s = tr_e
OUTTBED.close()
# Overlap transcript sites with transcript exon regions.
params = "-wb"
check_cmd = "intersectBed -a " + tmp1_bed + " -b " + tmp2_bed + " " + params
output = subprocess.getoutput(check_cmd)
# Read in transcript site overlaps with transcript exon regions.
site2c_dic = {}
# Dictionaries for later outputting unique + split hits separately.
siteid2pol_dic = {}
siteid2sc_dic = {}
partid2chrse_dic = {}
siteid2parts_dic = {}
part2siteids_dic = {}
for line in output.split('\n'):
cols = line.strip().split("\t")
tr_id = cols[0]
part_s = int(cols[1])
part_e = int(cols[2])
site_id = cols[3]
site_sc = cols[4]
ex_s = int(cols[7])
ex_e = int(cols[8])
ex_id = cols[9]
ex_pol = id2pol_dic[ex_id]
siteid2pol_dic[site_id] = ex_pol
siteid2sc_dic[site_id] = site_sc
if site_id in site2c_dic:
site2c_dic[site_id] += 1
else:
site2c_dic[site_id] = 1
# Hit part number.
hit_c = site2c_dic[site_id]
# Calculate genomic hit coordinates.
# Plus strand case.
gen_s = id2s_dic[ex_id] + part_s - ex_s
gen_e = id2s_dic[ex_id] + part_e - ex_s
# Minus strand case.
if ex_pol == "-":
gen_s = id2e_dic[ex_id] - part_e + ex_s
gen_e = id2e_dic[ex_id] - part_s + ex_s
# Store site_id parts.
chrsepol = "%s,%i,%i,%s" %(id2chr_dic[ex_id],gen_s,gen_e, ex_pol)
if chrsepol in part2siteids_dic:
part2siteids_dic[chrsepol].append(site_id)
else:
part2siteids_dic[chrsepol] = [site_id]
if site_id in siteid2parts_dic:
siteid2parts_dic[site_id].append(chrsepol)
else:
siteid2parts_dic[site_id] = [chrsepol]
# Sort parts and convert to string.
siteid2pstr_dic = {}
for site_id in siteid2parts_dic:
siteid2parts_dic[site_id].sort()
siteid2pstr_dic[site_id] = ""
for pstr in siteid2parts_dic[site_id]:
siteid2pstr_dic[site_id] += pstr
pstr2siteids_dic = {}
for site_id in siteid2pstr_dic:
pstr = siteid2pstr_dic[site_id]
if pstr in pstr2siteids_dic:
pstr2siteids_dic[pstr].append(site_id)
else:
pstr2siteids_dic[pstr] = [site_id]
#print("siteid2pstr_dic:", siteid2pstr_dic)
#print("pstr2siteids_dic:", pstr2siteids_dic)
# Get deduplicated site IDs.
return_ids_dic = {}
for pstr in pstr2siteids_dic:
site_ids = pstr2siteids_dic[pstr]
return_ids_dic[site_ids[0]] = 1
if hits_tr_con_gen_reg_bed:
GOUTBED = open(hits_tr_con_gen_reg_bed, "w")
if hits_tr_con_gen_reg_split_bed:
SPLITGOUTBED = open(hits_tr_con_gen_reg_split_bed, "w")
return_tr_reg_dic = {}
for site_id in tr_reg_dic:
if site_id in return_ids_dic:
return_tr_reg_dic[site_id] = tr_reg_dic[site_id]
if hits_tr_con_gen_reg_bed:
for split_reg in siteid2parts_dic[site_id]:
reg_info = split_reg.strip().split(",")
GOUTBED.write("%s\t%s\t%s\t%s\t0\t%s\n" %(reg_info[0], reg_info[1], reg_info[2], site_id, reg_info[3]))
if hits_tr_con_gen_reg_split_bed:
if len(siteid2parts_dic[site_id]) > 1:
for split_reg in siteid2parts_dic[site_id]:
reg_info = split_reg.strip().split(",")
SPLITGOUTBED.write("%s\t%s\t%s\t%s\t0\t%s\n" %(reg_info[0], reg_info[1], reg_info[2], site_id, reg_info[3]))
if hits_tr_con_gen_reg_bed:
GOUTBED.close()
if hits_tr_con_gen_reg_split_bed:
SPLITGOUTBED.close()
assert return_tr_reg_dic, "return_tr_reg_dic empty (no regions to return)"
assert len(return_ids_dic) == len(return_tr_reg_dic), "len(return_ids_dic) != len(return_tr_reg_dic)"
if os.path.exists(tmp1_bed):
os.remove(tmp1_bed)
if os.path.exists(tmp2_bed):
os.remove(tmp2_bed)
return return_tr_reg_dic
################################################################################
def get_uniq_tr_size(tr_sites_bed, gen_exon_bed):
"""
Get unique transcript space size, which the transcript sites inside
tr_sites_bed cover. For this map the sites to genome (to gen_exon_bed,
with genomic exon regions inside), and merge overlapping regions.
Then sum up and return the region size.
tr_sites_bed:
Transcript sites on transcripts.
gen_exon_bed:
genomic exon regions containing the transcript sites.
Exon ID with format: transcriptid_e[1...n] with n == number of
exons for transcript.
test_uniq_tr_size.tr_sites.bed:
t1 990 1020 s1 0 +
t2 10 40 s2 0 +
t3 990 1020 s3 0 +
test_uniq_tr_size.gen_ex.bed:
chr1 1000 2000 t1_e1 0 +
chr1 3000 4000 t1_e2 0 +
chr1 3000 4000 t2_e1 0 +
chr1 5000 6000 t3_e2 0 -
chr1 7000 8000 t3_e1 0 -
>>> tr_sites_bed = "test_data/test_uniq_tr_size.tr_sites.bed"
>>> gen_exon_bed = "test_data/test_uniq_tr_size.gen_ex.bed"
>>> get_uniq_tr_size(tr_sites_bed, gen_exon_bed)
80
"""
# Generate .tmp files.
random_id = uuid.uuid1()
tmp1_bed = str(random_id) + ".tmp1.bed"
tmp2_bed = str(random_id) + ".tmp2.bed"
# Read in exon region stats.
id2chr_dic = {}
id2s_dic = {}
id2e_dic = {}
id2pol_dic = {}
exid2trid_dic = {}
tr_exc_dic = {} # count exon numbers.
with open(gen_exon_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
chr_id = cols[0]
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
site_pol = cols[5]
id2chr_dic[site_id] = chr_id
id2s_dic[site_id] = site_s
id2e_dic[site_id] = site_e
id2pol_dic[site_id] = site_pol
if re.search(".+_e\d", site_id):
m = re.search("(.+)_e\d", site_id)
tr_id = m.group(1)
exid2trid_dic[site_id] = tr_id
if tr_id not in tr_exc_dic:
tr_exc_dic[tr_id] = 1
else:
tr_exc_dic[tr_id] += 1
else:
assert False, "site ID \"%s\" missing added _e exon number" %(site_id)
f.close()
assert exid2trid_dic, "exid2trid_dic empty (nothing read in from %s)" %(gen_exon_bed)
# Output exon regions with transcript coordinates.
OUTTBED = open(tmp1_bed, "w")
for tr_id in tr_exc_dic:
ex_c = tr_exc_dic[tr_id]
new_s = 0
for i in range(ex_c):
i += 1
ex_id = tr_id + "_e" + str(i)
gen_s = id2s_dic[ex_id]
gen_e = id2e_dic[ex_id]
ex_len = gen_e - gen_s
tr_s = new_s
tr_e = new_s + ex_len
OUTTBED.write("%s\t%i\t%i\t%s\t0\t+\n" % (tr_id,tr_s,tr_e,ex_id))
new_s = tr_e
OUTTBED.close()
# Overlap transcript sites with transcript exon regions.
params = "-wb"
check_cmd = "intersectBed -a " + tr_sites_bed + " -b " + tmp1_bed + " " + params
output = subprocess.getoutput(check_cmd)
# Read in transcript site overlaps with transcript exon regions.
site2c_dic = {}
# Dictionaries for later outputting unique + split hits separately.
siteid2pol_dic = {}
siteid2sc_dic = {}
partid2chrse_dic = {}
for line in output.split('\n'):
cols = line.strip().split("\t")
tr_id = cols[0]
part_s = int(cols[1])
part_e = int(cols[2])
site_id = cols[3]
site_sc = cols[4]
ex_s = int(cols[7])
ex_e = int(cols[8])
ex_id = cols[9]
ex_pol = id2pol_dic[ex_id]
siteid2pol_dic[site_id] = ex_pol
siteid2sc_dic[site_id] = site_sc
if site_id in site2c_dic:
site2c_dic[site_id] += 1
else:
site2c_dic[site_id] = 1
# Hit part number.
hit_c = site2c_dic[site_id]
# Calculate genomic hit coordinates.
# Plus strand case.
gen_s = id2s_dic[ex_id] + part_s - ex_s
gen_e = id2s_dic[ex_id] + part_e - ex_s
# Minus strand case.
if ex_pol == "-":
gen_s = id2e_dic[ex_id] - part_e + ex_s
gen_e = id2e_dic[ex_id] - part_s + ex_s
# part ID.
part_id = site_id + "_p" + str(hit_c)
# Store chrse for each part ID.
chrse = "%s\t%i\t%i" %(id2chr_dic[ex_id],gen_s,gen_e)
partid2chrse_dic[part_id] = "%s\t%i\t%i" %(id2chr_dic[ex_id],gen_s,gen_e)
# Output all hits (full and split).
ALLHITSBED = open(tmp2_bed, "w")
for site_id in site2c_dic:
hit_c = site2c_dic[site_id]
site_pol = siteid2pol_dic[site_id]
site_sc = siteid2sc_dic[site_id]
# For unique hit use site ID, for split hits use part IDs.
if hit_c == 1:
# Unique hits.
part_id = site_id + "_p1"
ALLHITSBED.write("%s\t%s\t%s\t%s\n" %(partid2chrse_dic[part_id],site_id,site_sc,site_pol))
else:
# Split hits.
for i in range(hit_c):
i += 1
part_id = site_id + "_p" + str(i)
ALLHITSBED.write("%s\t%s\t%s\t%s\n" %(partid2chrse_dic[part_id],part_id,site_sc,site_pol))
ALLHITSBED.close()
reg_len_sum = get_uniq_gen_size(tmp2_bed)
if os.path.exists(tmp1_bed):
os.remove(tmp1_bed)
if os.path.exists(tmp2_bed):
os.remove(tmp2_bed)
return reg_len_sum
################################################################################
def get_uniq_gen_size(gen_sites_bed):
"""
Get unique genomic space size, which the genomic sites inside
gen_sites_bed cover.
>>> gen_sites_bed = "test_data/test_gen_size.bed"
>>> get_uniq_gen_size(gen_sites_bed)
2500
"""
params_str = '-s -c 4 -o distinct -delim ";"'
check_cmd = "sort -k1,1 -k2,2n " + gen_sites_bed + " | mergeBed -i stdin " + params_str
output = subprocess.getoutput(check_cmd)
reg_len_sum = 0
for line in output.split('\n'):
cols = line.strip().split("\t")
reg_s = int(cols[1])
reg_e = int(cols[2])
reg_len = reg_e - reg_s
reg_len_sum += reg_len
assert reg_len_sum, "no merged regions obtained from \"%s\"" %(check_cmd)
return reg_len_sum
################################################################################
def bed_sort_file(in_bed, out_bed,
custom_params_str=False):
"""
Use command line sort to sort the in_bed .bed file. Output sorted .bed
file to out_bed.
"""
# Parameter string.
params_str = '-k1,1 -k2,2n'
if custom_params_str:
params_str = custom_params_str
check_cmd = "sort " + params_str + " " + in_bed + " > " + out_bed
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "sort is complaining:\n%s\n%s" %(check_cmd, output)
################################################################################
def gtf_extract_exon_bed(in_gtf, out_bed,
out_intron_bed=False,
add_exon_id=False,
correct_min_ex_order=False,
tr2exc_dic=False,
tr_ids_dic=False):
"""
Given a .gtf file with exon features, extract exon regions and store in
.bed file. Optionally, a dictionary of transcript IDs can be provided,
meaning that only exon regions from the given transcripts will be extracted.
If out_intron_bed is set, an intronic regions .bed file will also be
extracted, based on the exonic regions .bed information.
Output .bed will look like this (note column 4 ID format with transcript
ID followed by _e+exon_number):
chr1 1000 2000 ENST001_e1 0 +
chr1 3000 4000 ENST001_e2 0 +
chr1 8000 9000 ENST002_e1 0 -
chr1 6000 7000 ENST002_e2 0 -
...
NOTE that function has been tested with .gtf files from Ensembl. .gtf files
from different sources sometimes have a slightly different format, which
could lead to incompatibilities / errors. See test files for format that
works.
Some tested Ensembl GTF files:
Homo_sapiens.GRCh38.97.gtf.gz
Mus_musculus.GRCm38.81.gtf.gz
correct_min_ex_order:
If set reverse number of exons for minus strand. This is necessary
for some GTF files, which for minus strand transcripts assign lowest
number to most upstream exon (same as for plus strand).
>>> in_gtf = "test_data/map_test_in.gtf"
>>> exp_out_bed = "test_data/gtf_exon_out_exp.bed"
>>> exp_out_intron_bed = "test_data/gtf_intron_out_exp.bed"
>>> out_bed = "test_data/gtf_exon_out.bed"
>>> out_intron_bed = "test_data/gtf_intron_out.bed"
>>> gtf_extract_exon_bed(in_gtf, out_bed, out_intron_bed=out_intron_bed)
>>> diff_two_files_identical(out_bed, exp_out_bed)
True
>>> diff_two_files_identical(out_intron_bed, exp_out_intron_bed)
True
"""
# For reverse ording we need to have exon numbers.
if correct_min_ex_order:
assert tr2exc_dic, "tr2exc_dic needed if correct_min_ex_order True"
# Output genomic exon regions.
OUTBED = open(out_bed, "w")
# Read in exon features from GTF file.
c_gtf_ex_feat = 0
# Start end coordinates of exons.
exon_e_dic = {}
exon_s_dic = {}
# Transcript stats.
tr2pol_dic = {}
tr2chr_dic = {}
# dic for sanity checking exon number order.
tr2exon_nr_dic = {}
proc_tr_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
feat_pol = cols[6]
infos = cols[8]
if not feature == "exon":
continue
# Restrict to standard chromosomes.
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
continue
else:
chr_id = new_chr_id
# Make start coordinate 0-base (BED standard).
feat_s = feat_s - 1
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
# Extract exon number.
m = re.search('exon_number "(\d+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('exon_number (\d+?);', infos)
assert m, "exon_number entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
exon_nr = int(m.group(1))
# Check if transcript ID is in transcript dic.
if tr_ids_dic:
if transcript_id not in tr_ids_dic:
if len(tr_ids_dic) == len(proc_tr_dic):
break
else:
continue
proc_tr_dic[transcript_id] = 1
# Store transcript stats.
tr2pol_dic[transcript_id] = feat_pol
tr2chr_dic[transcript_id] = chr_id
# Check whether exon numbers are incrementing for each transcript ID.
if not transcript_id in tr2exon_nr_dic:
tr2exon_nr_dic[transcript_id] = exon_nr
else:
assert tr2exon_nr_dic[transcript_id] < exon_nr, "transcript ID \"%s\" without increasing exon number order in GTF file \"%s\"" %(transcript_id, in_gtf)
tr2exon_nr_dic[transcript_id] = exon_nr
# Reverse ordering for minus strand.
if correct_min_ex_order and feat_pol == "-":
assert transcript_id in tr2exc_dic, "transcript ID %s not in tr2exc_dic" %(transcript_id)
exon_nr = tr2exc_dic[transcript_id] - exon_nr + 1
assert exon_nr >= 1, "exon number < 1 assigned (%i) for transcript ID %s (# exons: %i)" %(exon_nr, transcript_id, tr2exc_dic[transcript_id])
# Construct exon ID.
exon_id = transcript_id + "_e" + str(exon_nr)
# Count exon entry.
c_gtf_ex_feat += 1
# Store infos.
exon_s_dic[exon_id] = feat_s
exon_e_dic[exon_id] = feat_e
# Output genomic exon region.
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,feat_s,feat_e,exon_id,feat_pol))
OUTBED.close()
f.close()
# Check for read-in features.
assert c_gtf_ex_feat, "no exon features read in from \"%s\"" %(in_gtf)
# Output intron .bed.
if out_intron_bed:
tr2intron_nr_dic = {}
OUTBED = open(out_intron_bed, "w")
for tr_id in tr2pol_dic:
tr_pol = tr2pol_dic[tr_id]
chr_id = tr2chr_dic[tr_id]
tr_c = tr2exon_nr_dic[tr_id]
intron_c = 0
tr2intron_nr_dic[tr_id] = 0
# 1-exon transcripts, no introns.
if tr_c == 1:
continue
ex_list = []
for i in range(tr_c):
ex_nr = i + 1
ex_id = tr_id + "_e" + str(ex_nr)
ex_list.append(ex_id)
for i in range(len(ex_list)):
ex1i = i
ex2i = i + 1
# At last exon, no more introns to add.
if ex2i == len(ex_list):
break
ex1id = ex_list[ex1i]
ex2id = ex_list[ex2i]
ex1s = exon_s_dic[ex1id]
ex2s = exon_s_dic[ex2id]
ex1e = exon_e_dic[ex1id]
ex2e = exon_e_dic[ex2id]
# Plus case.
intron_s = ex1e
intron_e = ex2s
if tr_pol == "-":
intron_s = ex2e
intron_e = ex1s
intron_id = tr_id + "_i" + str(ex2i)
intron_c += 1
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,intron_s,intron_e,intron_id,tr_pol))
tr2intron_nr_dic[tr_id] = intron_c
OUTBED.close()
# Sanity check exon + intron numbers.
for tr_id in tr2exon_nr_dic:
exon_nr = tr2exon_nr_dic[tr_id]
intron_nr = tr2intron_nr_dic[tr_id]
assert (exon_nr-1) == intron_nr, "intron number != exon number - 1 for \"%s\" (%i != %i - 1)" %(tr_id, intron_nr, exon_nr)
################################################################################
def gtf_extract_exon_numbers(in_gtf,
tr_ids_dic=False):
"""
Given a .gtf file with exon features, return dictionary with transcript
ID and exon number.
tr_ids_dic:
Give tr_ids_dic dictionary with transcript IDs to keep.
>>> in_gtf = "test_data/test_border_annot.gtf"
>>> tr_ids_dic = {'ENST1': 1, 'ENST2': 1, 'ENST3': 1}
>>> gtf_extract_exon_numbers(in_gtf, tr_ids_dic=tr_ids_dic)
{'ENST1': 1, 'ENST2': 2, 'ENST3': 2}
"""
# Transcript ID to exon count dic.
tr2exc_dic = {}
# dic for sanity checking exon number order.
tr2exon_nr_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
infos = cols[8]
if not feature == "exon":
continue
# Restrict to standard chromosomes.
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
continue
else:
chr_id = new_chr_id
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
# Extract exon number.
m = re.search('exon_number "(\d+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('exon_number (\d+?);', infos)
assert m, "exon_number entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
exon_nr = int(m.group(1))
if tr_ids_dic:
if transcript_id not in tr_ids_dic:
continue
# Count exon numbers.
if not transcript_id in tr2exc_dic:
tr2exc_dic[transcript_id] = 1
else:
tr2exc_dic[transcript_id] += 1
# Check whether exon numbers are incrementing for each transcript ID.
if not transcript_id in tr2exon_nr_dic:
tr2exon_nr_dic[transcript_id] = exon_nr
else:
assert tr2exon_nr_dic[transcript_id] < exon_nr, "transcript ID \"%s\" without increasing exon number order in GTF file \"%s\"" %(transcript_id, in_gtf)
tr2exon_nr_dic[transcript_id] = exon_nr
f.close()
# Check for read-in content.
assert tr2exc_dic, "no exon features read in from \"%s\"" %(in_gtf)
# Return to the castle.
return tr2exc_dic
################################################################################
def gtf_check_exon_order(in_gtf):
"""
Check exon_number ordering. Return True if ordering for minus strand
and plus strand is different (i.e. for minus exon_number 1 is most downstream).
Return False, if upstream to downstream order numbering is used for both
plus and minus strand transcripts.
>>> test_true_gtf = "test_data/test_order_true.gtf"
>>> test_false_gtf = "test_data/test_order_false.gtf"
>>> gtf_check_exon_order(test_true_gtf)
True
>>> gtf_check_exon_order(test_false_gtf)
False
"""
tr2exon_nr_dic = {}
tr2exon_s_dic = {}
check = 6666
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
feat_pol = cols[6]
infos = cols[8]
if feature != "exon":
continue
# Transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
# Exon number.
m = re.search('exon_number "(\d+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('exon_number (\d+?);', infos)
assert m, "exon_number entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
exon_nr = int(m.group(1))
# Check whether exon numbers are incrementing for each transcript ID.
if transcript_id not in tr2exon_nr_dic:
tr2exon_nr_dic[transcript_id] = exon_nr
else:
assert tr2exon_nr_dic[transcript_id] < exon_nr, "transcript ID \"%s\" without increasing exon number order in GTF file \"%s\"" %(transcript_id, in_gtf)
tr2exon_nr_dic[transcript_id] = exon_nr
# Check ordering of exons for minus strand transcripts.
if transcript_id not in tr2exon_s_dic:
tr2exon_s_dic[transcript_id] = feat_s
else:
if feat_pol == "-":
if tr2exon_s_dic[transcript_id] > feat_s:
check = True
else:
check = False
break
elif feat_pol == "+":
assert tr2exon_s_dic[transcript_id] < feat_s, "transcript ID \"%s\" on plus strand but exon region coordinates are decreasing" %(transcript_id)
f.close()
assert check != 6666, "no minus strand exon regions found in GTF file %s" %(in_gtf)
return check
################################################################################
def gtf_extract_unique_exon_bed(in_gtf, out_bed,
no_tbt_filter=False,
tr_ids_dic=False,
biotype_filter=True,
correct_min_ex_order=False,
tr2exc_dic=False,
reject_tr_bt_dic=False,
gene_feat_check=False,
next2exids_dic=None,
exid2trid_dic=None,
trid2exc_dic=None,
trid2tsl_dic=None,
trid2tbt_dic=None,
trid2gid_dic=None,
trid2gna_dic=None,
trid2gbt_dic=None,
trid2len_dic=None,
next2reg_dic=None):
"""
Given a .gtf file with exon features, extract exon unique (!) regions.
Since the Ensembl exon_id regions are not unique regarding their genomic
coordinates, create own IDs each representing one unique genomic region
(unique start+end+strand info).
Output .bed will look like this (column 4 ID == new exon ID):
chr1 1000 2000 NEXT1 0 +
chr1 3000 4000 NEXT2 0 +
chr1 8000 9000 NEXT3 0 -
chr1 6000 7000 NEXT4 0 -
...
Note there are differences in GTF format between Gencode, Ensembl,
and NCBI ...
GTF content looking for:
gene_id "id";
transcript_id "id";
transcript_biotype "id";
...
correct_min_ex_order:
If set reverse number of exons for minus strand. This is necessary
for some GTF files, which for minus strand transcripts assign lowest
number to most upstream exon (same as for plus strand).
Ensembl GTF example:
1 havana gene 11869 14409 . + . gene_id "ENSG00000223972"; gene_version "5"; gene_name "DDX11L1"; gene_source "havana"; gene_biotype "transcribed_unprocessed_pseudogene";
1 havana transcript 11869 14409 . + . gene_id "ENSG00000223972"; gene_version "5"; transcript_id "ENST00000456328"; transcript_version "2"; gene_name "DDX11L1"; gene_source "havana"; gene_biotype "transcribed_unprocessed_pseudogene"; transcript_name "DDX11L1-202"; transcript_source "havana"; transcript_biotype "processed_transcript"; tag "basic"; transcript_support_level "1";
1 havana exon 11869 12227 . + . gene_id "ENSG00000223972"; gene_version "5"; transcript_id "ENST00000456328"; transcript_version "2"; exon_number "1"; gene_name "DDX11L1"; gene_source "havana"; gene_biotype "transcribed_unprocessed_pseudogene"; transcript_name "DDX11L1-202"; transcript_source "havana"; transcript_biotype "processed_transcript"; exon_id "ENSE00002234944"; exon_version "1"; tag "basic"; transcript_support_level "1";
1 havana exon 12613 12721 . + . gene_id "ENSG00000223972"; gene_version "5"; transcript_id "ENST00000456328"; transcript_version "2"; exon_number "2"; gene_name "DDX11L1"; gene_source "havana"; gene_biotype "transcribed_unprocessed_pseudogene"; transcript_name "DDX11L1-202"; transcript_source "havana"; transcript_biotype "processed_transcript"; exon_id "ENSE00003582793"; exon_version "1"; tag "basic"; transcript_support_level "1";
1 havana exon 13221 14409 . + . gene_id "ENSG00000223972"; gene_version "5"; transcript_id "ENST00000456328"; transcript_version "2"; exon_number "3"; gene_name "DDX11L1"; gene_source "havana"; gene_biotype "transcribed_unprocessed_pseudogene"; transcript_name "DDX11L1-202"; transcript_source "havana"; transcript_biotype "processed_transcript"; exon_id "ENSE00002312635"; exon_version "1"; tag "basic"; transcript_support_level "1";
StringTie custom GTF example:
chr1 StringTie transcript 149054033 149082430 . -. transcript_id "ENST00000613595.4"; gene_id "MSTRG.1496"; gene_name "NBPF9"; xloc "XLOC_004117"; ref_gene_id "ENSG00000269713.7"; cmp_ref "ENST00000613595.4"; class_code "="; tss_id "TSS10003";
chr1 StringTie exon 149054033 149055899 . - .transcript_id "ENST00000613595.4"; gene_id "MSTRG.1496"; exon_number "1";
chr1 StringTie exon 149056512 149056620 . - .transcript_id "ENST00000613595.4"; gene_id "MSTRG.1496"; exon_number "2";
chr1 StringTie exon 149060523 149060695 . - .transcript_id "ENST00000613595.4"; gene_id "MSTRG.1496"; exon_number "3";
chr1 StringTie exon 149061332 149061383 . - .transcript_id "ENST00000613595.4"; gene_id "MSTRG.1496"; exon_number "4";
chr19 StringTie transcript 3359583 3469217 . + . transcript_id "MSTRG.11612.3"; gene_id "MSTRG.11612"; gene_name "NFIC"; xloc "XLOC_025438"; cmp_ref "ENST00000641145.1"; class_code "j"; tss_id "TSS63599";
chr19 StringTie exon 3359583 3359685 . + . transcript_id "MSTRG.11612.3"; gene_id "MSTRG.11612"; exon_number "1";
chr19 StringTie exon 3381712 3382243 . + . transcript_id "MSTRG.11612.3"; gene_id "MSTRG.11612"; exon_number "2";
chr19 StringTie exon 3425106 3425177 . + . transcript_id "MSTRG.11612.3"; gene_id "MSTRG.11612"; exon_number "3";
chr19 StringTie exon 3433518 3433592 . + . transcript_id "MSTRG.11612.3"; gene_id "MSTRG.11612"; exon_number "4";
reject_tr_bt_dic = {
"nonsense_mediated_decay" : 1,
"retained_intron" : 1,
"non_stop_decay" : 1,
"processed_transcript" : 1
}
"""
# For reverse ording we need to have exon numbers.
if correct_min_ex_order:
assert tr2exc_dic, "tr2exc_dic needed if correct_min_ex_order True"
# dic for sanity checking exon number order.
tr2exon_nr_dic = {}
# Reject transcripts with these biotypes.
if not reject_tr_bt_dic:
reject_tr_bt_dic = {
"nonsense_mediated_decay" : 1,
"retained_intron" : 1,
"non_stop_decay" : 1,
"processed_transcript" : 1
}
if no_tbt_filter:
reject_tr_bt_dic = {}
# Remember rejected transcript IDs.
reject_tr_ids_dic = {}
# Seen transcript IDs.
proc_tr_dic = {}
ok_features_dic = {'transcript': 1, 'exon': 1}
# Store exon ID region data.
reg_str_dic = {}
reg_str_exon_ids_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
feat_pol = cols[6]
infos = cols[8]
if feature not in ok_features_dic:
continue
# Gene ID.
m = re.search('gene_id "(.+?)"', infos)
assert m, "gene_id entry missing for \"exon\" feature in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_id = m.group(1)
# Transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing for \"exon\" feature in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
if feature == "transcript":
if tr_ids_dic:
if transcript_id not in tr_ids_dic:
continue
# Only for GTF files with gene feature present (GENCODE, Ensembl .. ).
gene_name = "-"
gene_biotype = "-"
tr_biotype = "-"
m = re.search('gene_name "(.+?)"', infos)
if m:
gene_name = m.group(1)
else:
if gene_feat_check:
assert False, "gene_name entry missing for \"exon\" feature in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
m = re.search('gene_biotype "(.+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('gene_type "(.+?)"', infos)
if m:
gene_biotype = m.group(1)
else:
if gene_feat_check:
assert False, "gene_biotype or gene_type entry missing for \"exon\" feature in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
m = re.search('transcript_biotype "(.+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('transcript_type "(.+?)"', infos)
if m:
tr_biotype = m.group(1)
else:
if gene_feat_check:
assert False, "transcript_biotype or transcript_type entry missing for \"exon\" feature in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
# Transcript biotype filter.
if biotype_filter:
if tr_biotype in reject_tr_bt_dic:
reject_tr_ids_dic[transcript_id] = 1
continue
# Get transcript support level (TSL).
m = re.search('transcript_support_level "(.+?)"', infos)
tr_tsl = "NA"
if m:
tr_tsl = m.group(1)
# Gencode basic tag there?
gc_basic = False
if re.search('tag "basic"', infos):
gc_basic = True
ccds = False
if re.search('tag "CCDS"', infos):
ccds = True
if trid2gid_dic is not None:
trid2gid_dic[transcript_id] = gene_id
if trid2gna_dic is not None:
trid2gna_dic[transcript_id] = gene_name
if trid2gbt_dic is not None:
trid2gbt_dic[transcript_id] = gene_biotype
if trid2tsl_dic is not None:
trid2tsl_dic[transcript_id] = [tr_tsl, gc_basic, ccds]
if trid2tbt_dic is not None:
trid2tbt_dic[transcript_id] = tr_biotype
elif feature == "exon":
# If specified, only extract exons from these transcripts.
if tr_ids_dic:
if transcript_id not in tr_ids_dic:
if len(tr_ids_dic) == len(proc_tr_dic):
break
else:
continue
proc_tr_dic[transcript_id] = 1
# Transcript biotype filter.
if transcript_id in reject_tr_ids_dic:
continue
# Restrict to standard chromosomes.
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
continue
else:
chr_id = new_chr_id
# Make start coordinate 0-base (BED standard).
feat_s = feat_s - 1
# Feature length.
feat_l = feat_e - feat_s
# Transcript length.
if trid2len_dic is not None:
if transcript_id in trid2len_dic:
trid2len_dic[transcript_id] += feat_l
else:
trid2len_dic[transcript_id] = feat_l
# Extract exon number.
m = re.search('exon_number "(\d+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('exon_number (\d+?);', infos)
assert m, "exon_number entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
exon_nr = int(m.group(1))
# Check whether exon numbers are incrementing for each transcript ID.
if not transcript_id in tr2exon_nr_dic:
tr2exon_nr_dic[transcript_id] = exon_nr
else:
assert tr2exon_nr_dic[transcript_id] < exon_nr, "transcript ID \"%s\" without increasing exon number order in GTF file \"%s\"" %(transcript_id, in_gtf)
tr2exon_nr_dic[transcript_id] = exon_nr
# Reverse ordering for minus strand.
if correct_min_ex_order and feat_pol == "-":
assert transcript_id in tr2exc_dic, "transcript ID %s not in tr2exc_dic" %(transcript_id)
exon_nr = tr2exc_dic[transcript_id] - exon_nr + 1
assert exon_nr >= 1, "exon number < 1 assigned (%i) for transcript ID %s (# exons: %i)" %(exon_nr, transcript_id, tr2exc_dic[transcript_id])
# Construct exon ID.
exon_id = transcript_id + "_e" + str(exon_nr)
# Store exon data.
check_reg_str = "%s,%i,%i,%s" %(chr_id,feat_s,feat_e,feat_pol)
reg_str_dic[check_reg_str] = 1
# Store exon IDs for this particular region.
if check_reg_str in reg_str_exon_ids_dic:
reg_str_exon_ids_dic[check_reg_str].append(exon_id)
else:
reg_str_exon_ids_dic[check_reg_str] = [exon_id]
# Exon ID to transcript ID mapping.
if exid2trid_dic is not None:
exid2trid_dic[exon_id] = transcript_id
f.close()
assert reg_str_dic, "no exon regions read in"
if reject_tr_ids_dic:
print("# rejected transcript IDs (biotype filter): %i" %(len(reject_tr_ids_dic)))
# Store transcript exon numbers.
if trid2exc_dic is not None:
for tr_id in tr2exon_nr_dic:
trid2exc_dic[tr_id] = tr2exon_nr_dic[tr_id]
# Output genomic exon regions.
OUTBED = open(out_bed, "w")
c_ex = 0
for reg_str in reg_str_dic:
cols = reg_str.split(",")
c_ex += 1
ex_id = "NEXT" + str(c_ex)
if next2exids_dic is not None:
next2exids_dic[ex_id] = reg_str_exon_ids_dic[reg_str]
if next2reg_dic is not None:
next2reg_dic[ex_id] = [cols[0], int(cols[1]), int(cols[2]), cols[3]]
OUTBED.write("%s\t%s\t%s\t%s\t0\t%s\n" % (cols[0], cols[1], cols[2], ex_id, cols[3]))
OUTBED.close()
################################################################################
def get_exons_fully_ol_with_isr_introns(isr_intron_reg_dic, next2reg_dic,
next_ids_dic=None,
reg2cov_dic=None,
max_read2isr_ratio=8,
next2top_isrn_dic=False,
tmp_out_folder=False,
min_isrc=5):
"""
Get exons fully overlapping with ISR-containing introns
(i.e., the span of these introns).
isr_intron_reg_dic:
Intronic region string "chrid,s,e,pol" -> ISR count
next2reg_dic:
NEXT exon ID -> genomic region
next2reg_dic = [chrid, s, e, pol]
next_ids_dic:
Dictionary of exon (NEXT) IDs to include in overlap calculations.
min_isrc:
Minimum ISR count an intronic region needs to be included in
overlap calculation.
reg2cov_dic:
["chr,s,e,pol"] -> read overlap count / coverage.
Used to get read coverage of intron regions.
max_read2isr_ratio:
Maximum ratio of # total reads / # ISR reads, for filtering out
fully overlapping exons. ISR introns with smaller ratios are not
considered for filtering.
next2top_isrn_dic:
NEXT ID to top ISRN count mapping. If given, a fully overlapping
exon with # ISRN reads >= # ISR reads of the overlapping intron
does not get returned.
tmp_out_folder:
Provide output folder to store tmp files in.
"""
assert isr_intron_reg_dic, "isr_intron_reg_dic empty"
assert next2reg_dic, "next2reg_dic empty"
random_id = uuid.uuid1()
next_tmp_bed = str(random_id) + ".next_regions.tmp.bed"
random_id = uuid.uuid1()
isr_intron_tmp_bed = str(random_id) + ".intron_regions.tmp.bed"
if tmp_out_folder:
next_tmp_bed = tmp_out_folder + "/" + next_tmp_bed
isr_intron_tmp_bed = tmp_out_folder + "/" + isr_intron_tmp_bed
bed_write_reg_list_to_file(next2reg_dic, next_tmp_bed,
id2out_dic=next_ids_dic)
ISROUTBED = open(isr_intron_tmp_bed, "w")
reg_out_c = 0
for intron_reg in isr_intron_reg_dic:
cols = intron_reg.split(",")
chr_id = cols[0]
reg_s = cols[1]
reg_e = cols[2]
reg_pol = cols[3]
# number of ISR reads in the intron_reg.
reg_sc = isr_intron_reg_dic[intron_reg]
if reg_sc >= min_isrc:
if reg2cov_dic is not None and intron_reg in reg2cov_dic:
# assert intron_reg in reg2cov_dic, "intron region \"%s\" not in reg2cov_dic" %(intron_reg)
c_intron_reads = reg2cov_dic[intron_reg]
read2isr_ratio = c_intron_reads / reg_sc
if read2isr_ratio >= max_read2isr_ratio:
continue
reg_out_c += 1
reg_id = "intron_%i" %(reg_out_c)
ISROUTBED.write("%s\t%s\t%s\t%s\t%i\t%s\n" %(chr_id, reg_s, reg_e, reg_id, reg_sc, reg_pol))
if not reg_out_c:
return {}
# assert reg_out_c, "no introns output for ISR count threshold %i" %(min_isrc)
ISROUTBED.close()
# Overlap calculation to get exons fully containing introns.
params = " -s -F 1.0 -wb"
check_cmd = "intersectBed -a " + next_tmp_bed + " -b " + isr_intron_tmp_bed + " " + params
output = subprocess.getoutput(check_cmd)
"""
-F 1.0
Only full overlaps (fraction of B).
Example:
$ intersectBed -a exons.bed -b introns.bed -s -F 1.0 -wb
chr1 3400 3600 e2 0 + chr1 3400 3600 i1 5 +
"""
rem_nexts_dic = {}
# If there are full overlaps.
if output:
for line in output.split('\n'):
cols = line.strip().split("\t")
next_id = cols[3]
intron_id = cols[9]
isrc = int(cols[10])
if next2top_isrn_dic:
assert next_id in next2top_isrn_dic, "NEXT ID %s not in next2top_isrn_dic" %(next_id)
next_isrn = next2top_isrn_dic[next_id]
if next_isrn >= isrc:
continue
rem_nexts_dic[next_id] = 1
if os.path.exists(isr_intron_tmp_bed):
os.remove(isr_intron_tmp_bed)
if os.path.exists(next_tmp_bed):
os.remove(next_tmp_bed)
return rem_nexts_dic
################################################################################
def output_ref_lengths(ref_len_dic, ref_len_out):
"""
Output reference lengths.
Output file format:
chr1 210
chr2 200
...
"""
assert ref_len_dic, "ref_len_dic empty"
OUTREFLEN = open(ref_len_out, "w")
for ref_id in ref_len_dic:
ref_len = ref_len_dic[ref_id]
OUTREFLEN.write("%s\t%i\n" %(ref_id, ref_len))
OUTREFLEN.close()
################################################################################
def read_in_id_val_cols(in_file,
id_col=3,
val_col=4,
val_type=1):
"""
Read in ID value combination into dic. id_col + val_col 0-based.
val_type:
1 : float
2 : integer
"""
id2val_dic = {}
with open(in_file) as f:
for line in f:
cols = line.strip().split("\t")
id_str = cols[id_col]
val = cols[val_col]
if val_type == 1:
val = float(val)
elif val_type == 2:
val = int(val)
assert id_str not in id2val_dic, "ID %s (col: %i) appears > 1 in file %s" %(id_str, id_col, in_file)
id2val_dic[id_str] = val
f.closed
assert id2val_dic, "id2val_dic empty (nothing read in)"
return id2val_dic
################################################################################
def read_in_ref_lengths(ref_len_in,
ref_len_dic=False):
"""
Read in reference lengths from file.
Input file format:
chr1 210
chr2 200
...
"""
if not ref_len_dic:
ref_len_dic = {}
with open(ref_len_in) as f:
for line in f:
cols = line.strip().split("\t")
ref_id = cols[0]
ref_len = int(cols[1])
if ref_id in ref_len_dic:
assert ref_len == ref_len_dic[ref_id], "reference ID %s with differing lengths read in from %s (new != existing length, %i != %i)" %(ref_id, ref_len_in, ref_len, ref_len_dic[ref_id])
else:
ref_len_dic[ref_id] = ref_len
f.closed
assert ref_len_dic, "ref_len_dic empty (nothing read in)"
return ref_len_dic
################################################################################
def read_in_sel_tr_regs(exon_sites_sel_tr_bed, id2selreg_dic,
id2dataset_dic=None,
dataset_id=1):
"""
Read in selected transcript regions for each exonic site ID.
Store in id2selreg_dic, format:
site_id -> [tr_id, s, e, score]
"""
with open(exon_sites_sel_tr_bed) as f:
for line in f:
cols = line.strip().split("\t")
tr_id = cols[0]
tr_s = int(cols[1])
tr_e = int(cols[2])
sitetrsc_id = cols[3]
site_sc = cols[4]
sitetrsc_cols = sitetrsc_id.split(",")
site_id = sitetrsc_cols[0]
tr_id_check = sitetrsc_cols[1]
assert tr_id == tr_id_check, "tr_id != tr_id_check for site ID %s (%s != %s, input file: %s)" %(sitetrsc_id, tr_id, tr_id_check, exon_sites_all_tr_bed)
if id2dataset_dic is not None:
id2dataset_dic[site_id] = dataset_id
assert site_id not in id2selreg_dic, "site ID %s already read in. Site IDs need to be unique for merging (also in between --in datasets!)" %(site_id)
id2selreg_dic[site_id] = [tr_id, tr_s, tr_e, site_sc]
f.closed
assert id2selreg_dic, "id2selreg_dic empty (nothing read in)"
################################################################################
def get_site_to_pair_id_mapping(all_sites_igv_tsv, id2pairid_dic,
id2regtype_dic=None):
"""
Get site ID to pair ID mapping for site IDs that form exon border pair.
all_sites_igv_tsv content:
site_id assigned_region_type new_merged_id exon_gc_filter igv_region strand
PUM2_K562_IDR_0001 exon_tc - - chr1:42,176,874-42,176,938 -
PUM2_K562_IDR_0002 exon_tc - - chr5:72,914,320-72,914,373 +
...
id2regtype_dic:
site ID -> region type mapping.
Region types: exon_tc, exon_gc, intron, intergen
"""
with open(all_sites_igv_tsv) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
if cols[0] == "site_id":
continue
site_id = cols[0]
reg_type = cols[1]
pair_id = cols[2]
if pair_id != "-":
id2pairid_dic[site_id] = pair_id
if id2regtype_dic is not None:
id2regtype_dic[site_id] = reg_type
f.close()
################################################################################
def read_in_genomic_regs(exon_sites_gen_regs_bed,
id2genreg_dic=False):
"""
Read in genomic regions for each exonic site ID.
Store in id2genreg_dic, format:
site_id -> [chr_id, s, e, pol]
>>> in_bed = "test_data/test3.bed"
>>> id2genreg_dic = {}
>>> read_in_genomic_regs(in_bed, id2genreg_dic=id2genreg_dic)
{'CLIP1': ['chr1', 10, 20, '+'], 'CLIP2': ['chr1', 30, 45, '-']}
"""
if not id2genreg_dic:
id2genreg_dic = {}
with open(exon_sites_gen_regs_bed) as f:
for line in f:
cols = line.strip().split("\t")
chr_id = cols[0]
gen_s = int(cols[1])
gen_e = int(cols[2])
site_id = cols[3]
gen_pol = cols[5]
assert site_id not in id2genreg_dic, "site ID %s already read in. Site IDs need to be unique for merging (also in between --in datasets!)" %(site_id)
id2genreg_dic[site_id] = [chr_id, gen_s, gen_e, gen_pol]
f.closed
assert id2genreg_dic, "id2genreg_dic empty (nothing read in)"
return id2genreg_dic
################################################################################
def read_in_all_tr_regs(exon_sites_all_tr_bed, id2allreg_dic,
sitetrid2sc_dic, id2bed_sc_dic):
"""
Read in all transcript regions for each exonic site ID.
Store in id2selreg_dic, format:
site_id -> ["tr_id,s,e", "tr_id,s,e", ... ]
test_all_tr_regs.bed:
T1 100 110 s1,T1,5 0.1 +
T2 100 110 s1,T2,6 0.1 +
T1 200 210 s2,T1,7 0.2 +
>>> in_bed = "test_data/test_all_tr_regs.bed"
>>> id2allreg_dic = {}
>>> sitetrid2sc_dic = {}
>>> id2bed_sc_dic = {}
>>> read_in_all_tr_regs(in_bed, id2allreg_dic, sitetrid2sc_dic, id2bed_sc_dic)
>>> id2allreg_dic
{'s1': ['T1,100,110', 'T2,100,110'], 's2': ['T1,200,210']}
>>> sitetrid2sc_dic
{'s1,T1': 5, 's1,T2': 6, 's2,T1': 7}
>>> id2bed_sc_dic
{'s1': '0.1', 's2': '0.2'}
"""
with open(exon_sites_all_tr_bed) as f:
for line in f:
cols = line.strip().split("\t")
tr_id = cols[0]
tr_s = cols[1]
tr_e = cols[2]
sitetrsc_id = cols[3]
site_sc = cols[4]
sitetrsc_cols = sitetrsc_id.split(",")
site_id = sitetrsc_cols[0]
tr_id_check = sitetrsc_cols[1]
comb_sc = int(sitetrsc_cols[2])
assert tr_id == tr_id_check, "tr_id != tr_id_check for site ID %s (%s != %s, input file: %s)" %(sitetrsc_id, tr_id, tr_id_check, exon_sites_all_tr_bed)
sitetrid = "%s,%s" %(site_id, tr_id)
sitetrid2sc_dic[sitetrid] = comb_sc
app_str = "%s,%s,%s" %(tr_id, tr_s, tr_e)
if site_id in id2allreg_dic:
id2allreg_dic[site_id].append(app_str)
else:
id2allreg_dic[site_id] = [app_str]
id2bed_sc_dic[site_id] = site_sc
f.closed
assert id2allreg_dic, "id2allreg_dic empty (nothing read in)"
################################################################################
def check_convert_chr_id(chr_id):
"""
Check and convert chromosome IDs to format:
chr1, chr2, chrX, ...
If chromosome IDs like 1,2,X, .. given, convert to chr1, chr2, chrX ..
Return False if given chr_id not standard and not convertable.
Filter out scaffold IDs like:
GL000009.2, KI270442.1, chr14_GL000009v2_random
chrUn_KI270442v1 ...
>>> chr_id = "chrX"
>>> check_convert_chr_id(chr_id)
'chrX'
>>> chr_id = "4"
>>> check_convert_chr_id(chr_id)
'chr4'
>>> chr_id = "MT"
>>> check_convert_chr_id(chr_id)
'chrM'
>>> chr_id = "GL000009.2"
>>> check_convert_chr_id(chr_id)
False
>>> chr_id = "chrUn_KI270442v1"
>>> check_convert_chr_id(chr_id)
False
"""
assert chr_id, "given chr_id empty"
if re.search("^chr", chr_id):
if not re.search("^chr[\dMXY]+$", chr_id):
chr_id = False
else:
# Convert to "chr" IDs.
if chr_id == "MT":
chr_id = "M"
if re.search("^[\dMXY]+$", chr_id):
chr_id = "chr" + chr_id
else:
chr_id = False
return chr_id
################################################################################
def bed_check_six_col_format(bed_file):
"""
Check whether given .bed file has 6 columns.
>>> test_bed = "test_data/test1.bed"
>>> bed_check_six_col_format(test_bed)
True
>>> test_bed = "test_data/empty_file"
>>> bed_check_six_col_format(test_bed)
False
"""
six_col_format = False
with open(bed_file) as f:
for line in f:
cols = line.strip().split("\t")
if len(cols) == 6:
six_col_format = True
break
f.closed
return six_col_format
################################################################################
def bed_get_region_ids(bed_file,
check=True):
"""
Read in .bed file, return region/site IDs (column 5 IDs).
>>> test_file = "test_data/test3.bed"
>>> bed_get_region_ids(test_file)
{'CLIP1': 1, 'CLIP2': 1}
"""
ids_dic = {}
with open(bed_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_id = cols[3]
assert site_id not in ids_dic, "column 4 IDs not unique in given .bed file \"%s\"" %(bed_file)
ids_dic[site_id] = 1
f.closed
if check:
assert ids_dic, "No IDs read into dictionary (input file \"%s\" empty or malformatted?)" % (bed_file)
return ids_dic
################################################################################
def bed_check_unique_col4_ids(bed_file):
"""
Check whether .bed file (6 column format with IDs in column 4)
has unique column 4 IDs.
>>> test_bed = "test_data/test1.bed"
>>> bed_check_unique_ids(test_bed)
True
>>> test_bed = "test_data/test2.bed"
>>> bed_check_unique_ids(test_bed)
False
"""
ids_dic = {}
check = True
with open(ids_file) as f:
for line in f:
cols = line.strip().split("\t")
site_id = cols[3]
if site_id not in ids_dic:
ids_dic[site_id] = 1
else:
check = False
f.closed
assert ids_dic, "IDs dictionary ids_dic empty"
return check
################################################################################
def count_file_rows(in_file,
nr_cols=False):
"""
Count number of file rows. If nr_cols set, demand certain (nr_cols) number
of columns (separated by tab), in order for row to be counted.
>>> test_file = "test_data/test1.bed"
>>> count_file_rows(test_file)
7
>>> test_file = "test_data/empty_file"
>>> count_file_rows(test_file)
0
"""
c = 0
with open(in_file) as f:
for line in f:
cols = line.strip().split("\t")
if nr_cols:
if len(cols) == nr_cols:
c += 1
else:
c += 1
f.closed
return c
################################################################################
def samtools_extract_reads_from_bed_regions(in_bed, in_bam, out_bam):
"""
Get BAM reads from BED regions, store in new BAM file.
OLD: samtools view -L ignores strandness info in BED files, will output
overlapping reads on both strands.
"""
assert os.path.exists(in_bed), "in_bed does not exist"
assert os.path.exists(in_bam), "in_bam does not exist"
check_cmd = "samtools view -L " + in_bed + " " + in_bam + " -o " + out_bam
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "samtools has problems with your input:\n%s\n%s" %(check_cmd, output)
################################################################################
def bam_extract_reads_from_bed_regions(in_bed, in_bam, out_bam,
no_name_check=False,
reverse_strand=False,
sort_bed=False):
"""
Get BAM reads from BED regions, store in new BAM file.
Using intersectBed instead of samtools view -L, which allows -s for strand
specific filtering.
no_name_check:
Activate intersectBed -nonamecheck
reverse_strand:
If gene reads are on reverse strand.
"""
assert os.path.exists(in_bed), "in_bed does not exist"
assert os.path.exists(in_bam), "in_bam does not exist"
strand_set = "-s"
if reverse_strand:
strand_set = "-S"
if no_name_check:
strand_set += " -nonamecheck"
if sort_bed:
check_cmd = "sort -k1,1 -k2,2n " + in_bed + " | " + "intersectBed -abam " + in_bam + " -b stdin " + strand_set + " -sorted > " + out_bam
else:
check_cmd = "intersectBed -abam " + in_bam + " -b " + in_bed + " " + strand_set + " > " + out_bam
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "intersectBed has problems with your input:\n%s\n%s" %(check_cmd, output)
################################################################################
def bam_to_bed_get_isr_stats(in_bam, next_ol_bed,
isr_bed=False,
isr_ext_mode=1,
isr_max_reg_len=10,
reverse_strand=False,
nexts_cisrc_dic=False,
isr_intron_reg_dic=None,
new_is_ids=True):
"""
Get intron-spanning reads (ISR), overlap their matched regions
(isr_ext_mode=1) or ends (isr_ext_mode=2) with NEXT regions.
Also extract intronic regions with ISR.
in_bam:
BAM file with gene region reads containing exonic sites.
next_ol_bed:
Exons of transcripts with NEXT overlapping exonic sites BED.
isr_ext_mode:
Extraction mode for IS read regions.
1: take whole match regions.
2: take end / start positions of IS read matches.
isr_max_reg_len:
Maximum length of IS read start end regions. If match is >
isr_max_reg_len, use isr_max_reg_len as length of the region.
If isr_max_reg_len=False, use full length of match as region
length (if isr_ext_mode == 1).
reverse_strand:
Reads mapping to reverse strand (e.g. for certain RNA-seq datasets).
isr_intron_reg_dic:
Intronic region -> ISR count.
isr_bed:
Define BED file to store ISR reads in.
Removed:
next2isrc_dic:
NEXT to overlapping IS read count.
"""
assert os.path.exists(in_bam), "in_bam does not exist"
assert os.path.exists(next_ol_bed), "next_ol_bed does not exist"
if isr_bed:
tmp_bed = isr_bed
else:
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".bam_next_ol.tmp.bed"
if not nexts_cisrc_dic:
nexts_cisrc_dic = {} # NEXTs connecting IS read count.
check_cmd = "bamToBed -i " + in_bam + " -bed12"
output = subprocess.getoutput(check_cmd)
c_ir_reads = 0
ISRBEDOUT = open(tmp_bed, "w")
for line in output.split('\n'):
cols = line.strip().split("\t")
# Only split reads.
if cols[9] == "1":
continue
chr_id = cols[0]
reg_s = int(cols[1])
reg_e = int(cols[2])
read_id = cols[3]
read_pol = cols[5]
if reverse_strand:
read_pol = get_rev_strand(read_pol)
# assert re.search(",", cols[10]), "-bed 12 column 11 of split read is missing \",\" in line \"%s\"" %(line)
# assert re.search(",", cols[11]), "-bed 12 column 12 of split read is missing \",\" in line \"%s\"" %(line)
"""
Output end positions (length 1) of the intron-spanning reads (i.e.,
the last positions overlapping with exon(s)).
chr1 1542166 1542167 isr_1 0 -
chr1 1543868 1543869 isr_1 0 -
chr1 1542166 1542167 isr_2 0 -
chr1 1543868 1543869 isr_2 0 -
...
In case of split reads (2 or more splits), output each intron split
as separate IR read. Usually we should have cols[9] == 2, but for
RNA-seq data (depending on type) we can also have more.
"""
parts_list = cols[10].split(",")
offsets_list = cols[11].split(",")
"""
chr1 10168245 10171147 GACGG:HWI-D00611:119:C6K7PANXX:5:1316:4890:22830/2 255 + 10168245 10171147 255,0,0 2 25,10 0,2892
chr1 10168259 10171148 AAAAC:HWI-D00611:119:C6K7PANXX:5:1103:13861:44307/2 255 + 10168259 10171148 255,0,0 2 11,11 0,2878
chr1 10168270 10168313 ATTGA:HWI-D00611:119:C6K7PANXX:5:1315:5248:33314/2 255 + 10168270 10168313 255,0,0 1 43 0
l: 43
"""
for i in range(len(parts_list)-1):
l_p1 = int(parts_list[i])
l_p2 = int(parts_list[i+1])
os1 = int(offsets_list[i])
os2 = int(offsets_list[i+1])
p1_e = reg_s + l_p1 + os1
# p1_s = p1_e - 1
p1_s = p1_e - 1
p2_s = reg_s + os2
# p2_e = p2_s + 1
p2_e = p2_s + 1
"""
Case: take full match for coverage calculations.
This also influences exon border site discovery, as sites
do not need to end exactly at exon ends anymore.
"""
if isr_ext_mode == 1:
p1_s = p1_e - l_p1
p2_e = p2_s + l_p2
if isr_max_reg_len:
if l_p1 > isr_max_reg_len:
p1_s = p1_e - isr_max_reg_len
if l_p2 > isr_max_reg_len:
p2_e = p2_s + isr_max_reg_len
c_ir_reads += 1
new_read_id = read_id
if new_is_ids:
new_read_id = "isr_%i" %(c_ir_reads)
#isr_119637
#chr11:7995358-7995367
#chr11 7995357 7995367 isr_119637 0 +
#chr11 7995944 7995947 isr_119637 0 +
if isr_intron_reg_dic is not None:
intron_s = p1_e
intron_e = p2_s
intron_reg = "%s,%i,%i,%s" %(chr_id, intron_s, intron_e, read_pol)
if intron_reg in isr_intron_reg_dic:
isr_intron_reg_dic[intron_reg] += 1
else:
isr_intron_reg_dic[intron_reg] = 1
ISRBEDOUT.write("%s\t%i\t%i\t%s\t0\t%s\n" %(chr_id, p1_s, p1_e, new_read_id, read_pol))
ISRBEDOUT.write("%s\t%i\t%i\t%s\t0\t%s\n" %(chr_id, p2_s, p2_e, new_read_id, read_pol))
ISRBEDOUT.close()
#
# l_p1 = int(parts[0])
# l_p2 = int(parts[1])
# # Get IS read mapped start+end positions.
# if is_ext_mode == 1:
# p1e = reg_s + l_p1
# p1s = p1e - 1
# p2s = reg_e - l_p2
# p2e = p2s + 1
# elif is_ext_mode == 2:
# p1s = reg_s
# p1e = reg_s + l_p1
# p2s = reg_e - l_p2
# p2e = reg_e
# else:
# assert False, "invalid is_ext_mode given"
# new_read_id = read_id
# if new_is_ids:
# new_read_id = "isr_%i" %(c_ir_reads)
# ISRBEDOUT.write("%s\t%i\t%i\t%s\t0\t%s\n" %(chr_id, p1s, p1e, new_read_id, read_pol))
# ISRBEDOUT.write("%s\t%i\t%i\t%s\t0\t%s\n" %(chr_id, p2s, p2e, new_read_id, read_pol))
# ISRBEDOUT.close()
# If no IS reads, no need to look at overlaps with NEXT exon ends.
if not c_ir_reads:
return nexts_cisrc_dic
check_cmd = "intersectBed -a " + next_ol_bed + " -b " + tmp_bed + " -s -wb"
output = subprocess.getoutput(check_cmd)
if not isr_bed:
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
# Again if no overlap, return empty dics.
if not output:
return nexts_cisrc_dic
isr2next_list_dic = {}
for line in output.split('\n'):
cols = line.strip().split("\t")
next_id = cols[3]
isr_id = cols[9]
# if next_id in next2isrc_dic:
# next2isrc_dic[next_id] += 1
# else:
# next2isrc_dic[next_id] = 1
if isr_id in isr2next_list_dic:
isr2next_list_dic[isr_id].append(next_id)
else:
isr2next_list_dic[isr_id] = [next_id]
for isr_id in isr2next_list_dic:
next_pairs_seen_dic = {}
for next1 in isr2next_list_dic[isr_id]:
for next2 in isr2next_list_dic[isr_id]:
if next1 == next2:
continue
nexts1 = "%s,%s" %(next1, next2)
nexts2 = "%s,%s" %(next2, next1)
if nexts1 in next_pairs_seen_dic:
continue
if nexts2 in next_pairs_seen_dic:
continue
con_id = "%s,%s" %(next1, next2)
if con_id in nexts_cisrc_dic:
nexts_cisrc_dic[con_id] += 1
else:
nexts_cisrc_dic[con_id] = 1
next_pairs_seen_dic[nexts1] = 1
next_pairs_seen_dic[nexts2] = 1
return nexts_cisrc_dic
################################################################################
def get_isolated_transcripts(single_ex_tr_dic, tr2reg_dic,
tmp_out_folder=False):
"""
Get isolated transcripts (no overlap with other transcripts).
Return isolated transcript IDs dictionary.
single_ex_tr_dic:
Single exon transcript IDs dictionary
tr2reg_dic:
transcript ID -> genomic region [chr_id, tr_s, tr_e, gene_pol]
>>> tr2reg_dic = {'t1': ['chr1', 1000, 2000, '+'], 't2': ['chr1', 1800, 2800, '+'], 't3': ['chr1', 3000, 4000, '+'], 't4': ['chr1', 5000, 6000, '+']}
>>> single_ex_tr_dic = {'t1': 1, 't2': 1, 't3': 1}
>>> get_isolated_transcripts(single_ex_tr_dic, tr2reg_dic)
{'t3': 1}
"""
assert single_ex_tr_dic, "single_ex_tr_dic empty"
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".gene_regions.tmp.bed"
if tmp_out_folder:
tmp_bed = tmp_out_folder + "/" + tmp_bed
TREGOUT = open(tmp_bed, "w")
for tr_id in single_ex_tr_dic:
chr_id = tr2reg_dic[tr_id][0]
tr_s = tr2reg_dic[tr_id][1]
tr_e = tr2reg_dic[tr_id][2]
tr_pol = tr2reg_dic[tr_id][3]
TREGOUT.write("%s\t%i\t%i\t%s\t0\t%s\n" %(chr_id, tr_s, tr_e, tr_id, tr_pol))
TREGOUT.close()
# Overlap calculation with itself.
params = " -s"
check_cmd = "intersectBed -a " + tmp_bed + " -b " + tmp_bed + " " + params
output = subprocess.getoutput(check_cmd)
"""
$ cat genes.bed
chr1 1000 2000 g1 0 +
chr1 3000 4000 g2 0 +
chr1 3800 4800 g3 0 +
chr1 4600 5600 g4 0 +
chr1 6000 7000 g5 0 +
$ intersectBed -a genes.bed -b genes.bed -s
chr1 1000 2000 g1 0 +
chr1 3000 4000 g2 0 +
chr1 3800 4000 g2 0 +
chr1 3800 4000 g3 0 +
chr1 3800 4800 g3 0 +
chr1 4600 4800 g3 0 +
chr1 4600 4800 g4 0 +
chr1 4600 5600 g4 0 +
chr1 6000 7000 g5 0 +
So count column 4 ID appearances. Appearances == 1 means isolated (only
overlapping with itself).
"""
ol_tr_ids_dic = {}
for line in output.split('\n'):
cols = line.strip().split("\t")
tr_id = cols[3]
if tr_id in ol_tr_ids_dic:
ol_tr_ids_dic[tr_id] += 1
else:
ol_tr_ids_dic[tr_id] = 1
isolated_tr_ids_dic = {}
for tr_id in ol_tr_ids_dic:
if ol_tr_ids_dic[tr_id] == 1:
isolated_tr_ids_dic[tr_id] = 1
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
return isolated_tr_ids_dic
################################################################################
def remove_overlapping_genes(gid2sel_tr_dic, gid2isrc_dic, tr2reg_dic,
remove_single_ex_genes=False,
trid2exc_dic=False,
tmp_out_folder=False,
min_isrc=2):
"""
Overlap gene regions (using longest transcript of each gene) with
each other, and remove gene A, if it fully overlaps with gene B
and:
1) gene A does not have intron-spanning reads, while
gene B has. If both do not have, keep both. Minimum ISR count
== min_isrc.
This will also remove single exon genes, if the longer gene
fully covers it and has > min_isrc ISR reads.
Alternatively, set remove_single_ex_genes to True, to remove
fully overlapping single exon genes in any case.
gid2sel_tr_dic:
Gene ID -> selected transcript ID (default: longest one).
gid2isrc_dic:
Intron-spanning read count of all transcripts belonging to
gene, so:
gene ID -> ISR count (ISR sum of all gene transcripts)
tr2reg_dic:
transcript ID -> genomic region [chr_id, tr_s, tr_e, gene_pol]
min_isrc:
Minimum ISR count used for filtering.
>>> gid2sel_tr_dic = {'g1': 't1', 'g2': 't2', 'g3': 't3', 'g4': 't4'}
>>> gid2isrc_dic = {'g1': 10, 'g2': 0, 'g3': 2, 'g4': 0}
>>> tr2reg_dic = {'t1': ['chr1', 1000, 2000, '+'], 't2': ['chr1', 1200, 1600, '+'], 't3': ['chr1', 1400, 1800, '+'], 't4': ['chr1', 1000, 2000, '-']}
>>> remove_overlapping_genes(gid2sel_tr_dic, gid2isrc_dic, tr2reg_dic)
{'g2': 1}
>>> gid2sel_tr_dic = {'g5': 't5', 'g6': 't6', 'g7': 't7'}
>>> gid2isrc_dic = {'g5': 0, 'g6': 15, 'g7': 0}
>>> tr2reg_dic = {'t5': ['chr1', 5000, 6000, '+'], 't6': ['chr1', 5000, 6000, '+'], 't7': ['chr1', 7000, 8000, '+']}
>>> remove_overlapping_genes(gid2sel_tr_dic, gid2isrc_dic, tr2reg_dic)
{'g5': 1}
>>> gid2sel_tr_dic = {'g1': 't1', 'g2': 't2'}
>>> gid2isrc_dic = {'g1': 0, 'g2': 0}
>>> tr2reg_dic = {'t1': ['chr1', 1000, 2000, '+'], 't2': ['chr1', 1600, 1800, '+']}
>>> trid2exc_dic = {'t1': 5, 't2': 1}
>>> remove_overlapping_genes(gid2sel_tr_dic, gid2isrc_dic, tr2reg_dic)
{}
>>> remove_overlapping_genes(gid2sel_tr_dic, gid2isrc_dic, tr2reg_dic, remove_single_ex_genes=True, trid2exc_dic=trid2exc_dic)
{'g2': 1}
"""
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".gene_regions.tmp.bed"
if tmp_out_folder:
tmp_bed = tmp_out_folder + "/" + tmp_bed
# Write gene regions BED for overlap calculation.
GREGOUT = open(tmp_bed, "w")
gid2len_dic = {}
for gid in gid2sel_tr_dic:
tr_id = gid2sel_tr_dic[gid]
chr_id = tr2reg_dic[tr_id][0]
tr_s = tr2reg_dic[tr_id][1]
tr_e = tr2reg_dic[tr_id][2]
tr_pol = tr2reg_dic[tr_id][3]
tr_sc = gid2isrc_dic[gid]
gid2len_dic[gid] = tr_e - tr_s
GREGOUT.write("%s\t%i\t%i\t%s\t%i\t%s\n" %(chr_id, tr_s, tr_e, gid, tr_sc, tr_pol))
GREGOUT.close()
# Overlap calculation with itself.
params = " -s -F 1.0 -wao"
check_cmd = "intersectBed -a " + tmp_bed + " -b " + tmp_bed + " " + params
output = subprocess.getoutput(check_cmd)
"""
-F 1.0
Only full overlaps (fraction of B), i.e. shorter genes get reported
(and self overlaps).
Example:
$ intersectBed -a a.bed -b a.bed -s -F 1.0 -wao
chr1 1000 2000 g1 10 + chr1 1000 2000 g1 10 + 1000
chr1 1000 2000 g1 10 + chr1 1200 1600 g2 0 + 400
chr1 1000 2000 g1 10 + chr1 1400 1800 g3 2 + 400
chr1 1200 1600 g2 0 + chr1 1200 1600 g2 0 + 400
chr1 1400 1800 g3 2 + chr1 1400 1800 g3 2 + 400
"""
gid2remove_dic = {}
for line in output.split('\n'):
cols = line.strip().split("\t")
gid1 = cols[3]
gid2 = cols[9]
# overlap_len = int(12)
if gid1 == gid2:
continue
# Gene ID 1 should be longer gene, check.
gid1_len = gid2len_dic[gid1]
gid2_len = gid2len_dic[gid2]
assert gid1_len >= gid2_len, "reported gene ID 1 (%s) length < gene ID 2 (%s) length (%i < %i)" %(gid1, gid2, gid1_len, gid2_len)
# Compare and remove if criterion satisfied.
gid1_isrc = gid2isrc_dic[gid1]
gid2_isrc = gid2isrc_dic[gid2]
if gid2_isrc == 0 and gid1_isrc >= min_isrc:
gid2remove_dic[gid2] = 1
# Remove overlapping single exon genes no matter what.
if remove_single_ex_genes:
tid2 = gid2sel_tr_dic[gid2]
assert trid2exc_dic, "remove_single_ex_genes=True requires trid2exc_dic"
assert trid2exc_dic[tid2], "transcript ID %s not in trid2exc_dic" %(tid2)
tid2_exc = trid2exc_dic[tid2]
if tid2_exc == 1:
gid2remove_dic[gid2] = 1
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
return gid2remove_dic
################################################################################
def get_trid_isrc_full_con(tr_id, tr_exc, exid2next_dic, nexts_cisrc_dic):
"""
Get intron-spanning read count for transcript with ID tr_id. Also return
if transcript exons are fully connected by intron-spanning reads
(True, False).
tr_id:
Transcript ID
tr_exc:
Transcript exon count.
exid2next_dic:
exon ID to NEXT ID mapping
nexts_cisrc_dic:
Connected NEXT IDs with format "NEXT1,NEXT2" and mapping:
"NEXT1,NEXT2" -> connecting IS read count
>>> exid2next_dic = {"t1_e1" : "NEXT1", "t1_e2" : "NEXT2", "t1_e3": "NEXT3", "t2_e1": "NEXT4"}
>>> nexts_cisrc_dic = {"NEXT1,NEXT2": 4, "NEXT2,NEXT3": 2}
>>> get_trid_isrc_full_con("t1", 3, exid2next_dic, nexts_cisrc_dic)
(6, True)
>>> nexts_cisrc_dic = {"NEXT2,NEXT3": 5}
>>> get_trid_isrc_full_con("t1", 3, exid2next_dic, nexts_cisrc_dic)
(5, False)
>>> get_trid_isrc_full_con("t2", 1, exid2next_dic, nexts_cisrc_dic)
(0, False)
"""
if tr_exc == 1:
return 0, False
isr_c = 0
fully_con = True
# print("tr_id:", tr_id)
for i in range(tr_exc-1):
ex_nr1 = i + 1
ex_nr2 = i + 2
ex_id1 = tr_id + "_e%i" %(ex_nr1)
ex_id2 = tr_id + "_e%i" %(ex_nr2)
# print(ex_id1, ex_id2)
next1 = exid2next_dic[ex_id1]
next2 = exid2next_dic[ex_id2]
nexts1 = next1 + "," + next2
nexts2 = next2 + "," + next1
nexts1_yes = False
nexts2_yes = False
isr_c_pair = 0
if nexts1 in nexts_cisrc_dic:
nexts1_yes = True
if nexts2 in nexts_cisrc_dic:
nexts2_yes = True
if nexts1_yes and nexts2_yes:
assert False, "NEXT ID combination appears twice in nexts_cisrc_dic (%s, %s)" %(nexts1, nexts2)
if nexts1_yes:
isr_c_pair = nexts_cisrc_dic[nexts1]
elif nexts2_yes:
isr_c_pair = nexts_cisrc_dic[nexts2]
isr_c += isr_c_pair
if not nexts1_yes and not nexts2_yes:
fully_con = False
# print("isrc:", isr_c_pair)
return isr_c, fully_con
################################################################################
def get_rev_strand(strand):
"""
Get reverse strand.
>>> get_rev_strand("-")
'+'
"""
if strand == "+":
return "-"
elif strand == "-":
return "+"
else:
assert False, "invalid strand information given (%s)" %(strand)
################################################################################
def check_tr_id_full_coverage(tr_id, trid2exc_dic, regid2nc_dic,
pseudo_counts=False):
"""
Check if each exon of a given transcript ID is covered by > 0 reads.
If so return True, else False.
>>> trid2exc_dic = {"t1" : 2, "t2" : 2, "t3" : 1}
>>> regid2nc_dic = {"t1_e1": 0.4, "t1_e2": 1.2, "t2_e1": 0.4, "t2_e2": 0.0, "t3_e1": 1.6}
>>> check_tr_id_full_coverage("t1", trid2exc_dic, regid2nc_dic)
True
>>> check_tr_id_full_coverage("t2", trid2exc_dic, regid2nc_dic)
False
>>> check_tr_id_full_coverage("t3", trid2exc_dic, regid2nc_dic)
True
"""
min_ex_cov = 0
if pseudo_counts:
min_ex_cov = 1
for i in range(trid2exc_dic[tr_id]):
ex_nr = i + 1
ex_id = tr_id + "_e" + str(ex_nr)
ex_cov = regid2nc_dic[ex_id]
if ex_cov == min_ex_cov:
return False
return True
################################################################################
def get_sid_trid_combination_score(site_id, tr_ids_list, idfilt2best_trids_dic):
"""
Get site ID - transcript ID combination score, based on selected
transcripts for each of the 10 different filter settings.
10 transcript quality filter settings:
EIR
EXB
TSC
ISRN
ISR
ISRFC
SEO
FUCO
TCOV
TSL
idfilt2best_trids_dic:
"site_id,filter_id"
-> top transcript ID(s) after applying filter on exon IDs > min_eir
>>> site_id = "s1"
>>> idfilt2best_trids_dic = {"s1,EIR" : ["t1"], "s1,EXB" : ["t1"], "s1,TSC" : ["t1"], "s1,ISRN" : ["t1"], "s1,ISR" : ["t1"], "s1,ISRFC" : ["t1"], "s1,SEO" : ["t1"], "s1,FUCO" : ["t1"], "s1,TCOV" : ["t1"], "s1,TSL" : ["t1"]}
>>> tr_ids_list = ["t1"]
>>> get_sid_trid_combination_score(site_id, tr_ids_list, idfilt2best_trids_dic)
{'t1': 10}
>>> idfilt2best_trids_dic = {"s1,EIR" : ["t1", "t2"], "s1,EXB" : ["t1", "t2"], "s1,TSC" : ["t1"], "s1,ISRN" : ["t2"], "s1,ISR" : ["t1"], "s1,ISRFC" : ["t1"], "s1,SEO" : ["t1"], "s1,FUCO" : ["t1", "t2"], "s1,TCOV" : ["t1"], "s1,TSL" : ["t2"]}
>>> tr_ids_list = ["t1", "t2", "t3"]
>>> get_sid_trid_combination_score(site_id, tr_ids_list, idfilt2best_trids_dic)
{'t1': 8, 't2': 5, 't3': 0}
"""
assert tr_ids_list, "tr_ids_list empty"
filter_ids = ["EIR", "EXB", "TSC", "ISRN", "ISR", "ISRFC", "SEO", "FUCO", "TCOV", "TSL"]
trid2comb_sc_dic = {}
for tr_id in tr_ids_list:
trid2comb_sc_dic[tr_id] = 0
for tr_id in tr_ids_list:
for fid in filter_ids:
sitefiltid = "%s,%s" %(site_id, fid)
if tr_id in idfilt2best_trids_dic[sitefiltid]:
trid2comb_sc_dic[tr_id] += 1
return trid2comb_sc_dic
################################################################################
def list_found_duplicates(in_list):
"""
Check list for duplicate entries. Return True if duplicates found,
and False if not duplicates found.
>>> in_list = ["hallo", "hello"]
>>> list_found_duplicates(in_list)
False
>>> in_list = ["hallo", "hello", "hollo", "hello"]
>>> list_found_duplicates(in_list)
True
"""
if len(set(in_list)) == len(in_list):
return False
else:
return True
################################################################################
def get_exid_isr_hood_count(exon_id, exid2trid_dic,
exid2next_dic, nexts_cisrc_dic):
"""
Given an exon ID, get intron-spanning read count that connects it to
its neighboring exons. Return count.
>>> exid2trid_dic = {"t1_e1" : "t1", "t1_e2" : "t1", "t2_e1" : "t2", "t2_e2" : "t2", "t2_e3" : "t2", "t3_e1" : "t3"}
>>> exid2next_dic = {"t1_e1" : "n1", "t1_e2" : "n2", "t2_e1" : "n3", "t2_e2" : "n2", "t2_e3" : "n4", "t3_e1" : "n5"}
>>> nexts_cisrc_dic = {"n1,n2" : 10, "n2,n3" : 10, "n2,n3" : 8, "n2,n4" : 5}
>>> get_exid_isr_hood_count("t1_e2", exid2trid_dic, exid2next_dic, nexts_cisrc_dic)
10
>>> get_exid_isr_hood_count("t2_e2", exid2trid_dic, exid2next_dic, nexts_cisrc_dic)
13
>>> get_exid_isr_hood_count("t3_e1", exid2trid_dic, exid2next_dic, nexts_cisrc_dic)
0
"""
isrn_c = 0
assert re.search(".+_e\d", exon_id), "exon ID %s has invalid format"
m = re.search("(.+)_e(\d+)", exon_id)
tr_id = m.group(1)
exon_nr = int(m.group(2))
next = exid2next_dic[exon_id]
# Neighbor exons.
us_exon_id = tr_id + "_e" + str(exon_nr-1)
ds_exon_id = tr_id + "_e" + str(exon_nr+1)
if us_exon_id in exid2next_dic:
us_next = exid2next_dic[us_exon_id]
nexts1 = "%s,%s" %(next,us_next)
nexts2 = "%s,%s" %(us_next,next)
if nexts1 in nexts_cisrc_dic:
isrn_c += nexts_cisrc_dic[nexts1]
elif nexts2 in nexts_cisrc_dic:
isrn_c += nexts_cisrc_dic[nexts2]
if ds_exon_id in exid2next_dic:
ds_next = exid2next_dic[ds_exon_id]
nexts1 = "%s,%s" %(next,ds_next)
nexts2 = "%s,%s" %(ds_next,next)
if nexts1 in nexts_cisrc_dic:
isrn_c += nexts_cisrc_dic[nexts1]
elif nexts2 in nexts_cisrc_dic:
isrn_c += nexts_cisrc_dic[nexts2]
return isrn_c
################################################################################
def read_in_12col_bed(in_bed, bed_row_dic=False):
"""
Read in 12-column BED (in_bed), store in bed_row_dic.
Region ID in column BED 4:
chr14 23321288 23326185 ENST00000216727.8 0 + 23321288 ...
"""
if not bed_row_dic:
bed_row_dic = {}
with open(in_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
reg_id = cols[3]
bed_row_dic[reg_id] = row
f.closed
return bed_row_dic
################################################################################
def write_12col_bed(bed_row_dic, out_bed):
"""
Write BED row dictionary to BED file.
"""
assert bed_row_dic, "given bed_row_dic empty"
OUTBED = open(out_bed, "w")
c_out = 0
for reg_id in bed_row_dic:
bed_row = bed_row_dic[reg_id]
OUTBED.write("%s\n" %(bed_row))
c_out += 1
OUTBED.close()
assert c_out, "nothing output by write_12col_bed()"
################################################################################
def get_exon_border_site_pairs(sites_bed, isr_bed,
max_site_dist=10,
id2gen_se_dic=False,
id2next_list_dic=False):
"""
Get exon border site pairs, based on intron-spanning reads connecting
the two sites. If for a given site > 1 connection is supported by
intron-spanning reads, also save this. Return two dictionaries:
site_id --> [connected_site_id1, connected_site_id2, ...]
"site_id1,site_id2" --> # of connecting reads.
id2gen_se_dic:
site_id -> [gen_start, gen_end]
id2next_list_dic:
Use site ID -> NEXT list mapping, to remove border pairs on same
NEXT exon regions (can happen if --isr-ext-mode 2)
Also use idnext2ol_se_dic to only remove sites nearby from
id2ids_dic.
test_exb_sites.bed
chr1 1090 2000 s1 0 +
chr1 3000 3010 s2 0 +
chr1 4000 4020 s3 0 +
chr1 5000 5020 s4 0 +
test_exb_isr.bed
chr1 1999 2000 isr1 0 +
chr1 1999 2000 isr2 0 +
chr1 1999 2000 isr3 0 +
chr1 1999 2000 isr4 0 +
chr1 3000 3001 isr1 0 +
chr1 3000 3001 isr2 0 +
chr1 3000 3001 isr3 0 +
chr1 4000 4001 isr4 0 +
Use "-EB-" to separate site IDs and form new ID:
id1-EB-id2
Resulting isr2site_id_list_dic:
{'isr1': ['s1', 's2'], 'isr2': ['s1', 's2'], 'isr3': ['s1', 's2'],
'isr4': ['s1', 's3']}
>>> sites_bed = "test_data/test_exb_sites.bed"
>>> isr_bed = "test_data/test_exb_isr.bed"
>>> get_exon_border_site_pairs(sites_bed, isr_bed)
({'s1': ['s2', 's3'], 's2': ['s1'], 's3': ['s1']}, {'s1-EB-s2': 3, 's2-EB-s1': 3, 's1-EB-s3': 1, 's3-EB-s1': 1})
>>> sites_bed = "test_data/test_exb_sites2.bed"
>>> get_exon_border_site_pairs(sites_bed, isr_bed)
({}, {})
"""
id2ids_dic = {}
ids2isrc_dic = {}
assert os.path.exists(sites_bed), "%s transcript context genomic BED file not found" %(sites_bed)
assert os.path.exists(isr_bed), "%s intron-spanning reads BED file not found" %(isr_bed)
check_cmd = "intersectBed -a " + sites_bed + " -b " + isr_bed + " -s -wb"
output = subprocess.getoutput(check_cmd)
# Again if no overlap, return empty dics.
if not output:
return id2ids_dic, ids2isrc_dic
# Connect intron-spanning read ID to site IDs.
isr2site_id_list_dic = {}
for line in output.split('\n'):
cols = line.strip().split("\t")
site_id = cols[3]
isr_id = cols[9]
if isr_id in isr2site_id_list_dic:
isr2site_id_list_dic[isr_id].append(site_id)
else:
isr2site_id_list_dic[isr_id] = [site_id]
"""
Resulting isr2site_id_list_dic:
{'isr1': ['s1', 's2'], 'isr2': ['s1', 's2'], 'isr3': ['s1', 's2'],
'isr4': ['s1', 's3']}
"""
# Site IDs to connecting intron-spanning read count.
for isr_id in isr2site_id_list_dic:
for id1 in isr2site_id_list_dic[isr_id]:
for id2 in isr2site_id_list_dic[isr_id]:
if id1 == id2:
continue
if id1 in id2ids_dic:
id2ids_dic[id1].append(id2)
else:
id2ids_dic[id1] = [id2]
ids = "%s-EB-%s" %(id1, id2)
if ids in ids2isrc_dic:
ids2isrc_dic[ids] += 1
else:
ids2isrc_dic[ids] = 1
rem_sids_dic = {}
for site_id in id2ids_dic:
# Make lists non-redundant.
id2ids_dic[site_id] = list(set(id2ids_dic[site_id]))
id2ids_dic[site_id].sort()
# Remove connections on same NEXT exons + in close distance.
if id2gen_se_dic and id2next_list_dic:
sid_next_list = id2next_list_dic[site_id]
sid_s = id2gen_se_dic[site_id][0] # site ID genomic start.
sid_e = id2gen_se_dic[site_id][1] # site ID genomic end.
rem_assoc_sids_dic = {}
for sid2 in id2ids_dic[site_id]:
sid2_next_list = id2next_list_dic[sid2]
for sid2_next in sid2_next_list:
if sid2_next in sid_next_list:
sid2_s = id2gen_se_dic[sid2][0]
sid2_e = id2gen_se_dic[sid2][1]
gen_dist = get_site_ends_distance(sid_s, sid_e, sid2_s, sid2_e)
if gen_dist <= max_site_dist:
rem_assoc_sids_dic[sid2] = 1
break
if rem_assoc_sids_dic:
new_list = []
for sid in id2ids_dic[site_id]:
if sid not in rem_assoc_sids_dic:
new_list.append(sid)
if new_list:
new_list.sort()
id2ids_dic[site_id] = new_list
else:
rem_sids_dic[site_id] = 1
if rem_sids_dic:
for rem_sid in rem_sids_dic:
del id2ids_dic[rem_sid]
return id2ids_dic, ids2isrc_dic
################################################################################
def remove_no_common_tr_exb_pairs(id2ids_dic, id2tr_list_dic):
"""
Remove exon border pairs which have no common transcripts.
>>> id2ids_dic = {'s1': ['s2', 's3'], 's2': ['s1'], 's3': ['s1']}
>>> id2tr_list_dic = {'s1': ['t1', 't2'], 's2': ['t3'], 's3': ['t2']}
>>> remove_no_common_tr_exb_pairs(id2ids_dic, id2tr_list_dic)
{'s1': ['s3'], 's3': ['s1']}
"""
if not id2ids_dic:
return id2ids_dic
assert id2tr_list_dic, "id2tr_list_dic empty"
rem_sid_list = []
for sid1 in id2ids_dic:
if sid1 not in id2tr_list_dic:
rem_sid_list.append(sid1)
continue
sid1_tr_list = id2tr_list_dic[sid1]
new_list = []
for sid2 in id2ids_dic[sid1]:
sid2_tr_list = id2tr_list_dic[sid2]
if two_lists_get_intersect(sid1_tr_list, sid2_tr_list):
new_list.append(sid2)
# If no connections left, remove site_id from id2ids_dic.
if not new_list:
rem_sid_list.append(sid1)
else:
new_list.sort()
id2ids_dic[sid1] = new_list
if rem_sid_list:
for rem_sid in rem_sid_list:
del id2ids_dic[rem_sid]
return id2ids_dic
################################################################################
def get_connected_exb_sites(site_id, id2exb_pair_dic, seen_dic):
"""
Recursively get connected exon border pair sites.
>>> id2exb_pair_dic = {'id1': 'id2', 'id2': 'id3', 'id3': 'id4', 'id4': 'id3', 'id5': 'id6', 'id6': 'id5'}
>>> seen_dic = {}
>>> get_connected_exb_sites("id1", id2exb_pair_dic, seen_dic)
>>> seen_dic
{'id1': 1, 'id2': 1, 'id3': 1, 'id4': 1}
>>> seen_dic = {}
>>> get_connected_exb_sites("id5", id2exb_pair_dic, seen_dic)
>>> seen_dic
{'id5': 1, 'id6': 1}
"""
seen_dic[site_id] = 1
assert site_id in id2exb_pair_dic, "site ID %s not in id2exb_pair_dic" %(site_id)
pair_id = id2exb_pair_dic[site_id]
if pair_id in seen_dic:
return 0
else:
get_connected_exb_sites(pair_id, id2exb_pair_dic, seen_dic)
################################################################################
def get_highest_isr_exb_pair(con_exb_sites_dic, ids2isrc_dic):
"""
Given a dictionary of connected exon border site IDs, get pair with
highest intron-spanning read count between them.
If all have same ISR count, return the first one seen.
>>> con_exb_sites_dic = {'id1': 1, 'id2': 1, 'id3': 1, 'id4': 1}
>>> ids2isrc_dic = {"id1-EB-id2": 10, "id2-EB-id1": 10, "id2-EB-id3": 15, "id3-EB-id2": 15, "id3-EB-id4": 20, "id4-EB-id3": 20}
>>> get_highest_isr_exb_pair(con_exb_sites_dic, ids2isrc_dic)
(['id3', 'id4'], 20)
"""
eb_ids_list = []
seen_dic = {}
best_pair = []
best_isrc = 0
sids_list = []
for sid1 in con_exb_sites_dic:
sids_list.append(sid1)
for sid2 in con_exb_sites_dic:
if sid1 == sid2:
continue
sids1 = sid1 + "-EB-" + sid2
sids2 = sid2 + "-EB-" + sid1
if sids1 in seen_dic:
continue
seen_dic[sids1] = 1
seen_dic[sids2] = 1
if sids1 in ids2isrc_dic:
isrc = ids2isrc_dic[sids1]
if isrc > best_isrc:
best_isrc = isrc
best_pair = [sid1, sid2]
assert best_pair, "no highest ISRC pair extracted for connected exon border sites %s" %(",".join(sids_list))
best_pair.sort()
return best_pair, best_isrc
################################################################################
def get_exb_group_best_con(exb_group_ids_list, id2exb_pair_dic):
"""
Given a list of site IDs connected at exon borders with intron-spanning
reads. Only one of them should have a connection in both directions,
while the other have different connections.
>>> exb_group_ids_list = ['id1', 'id2', 'id3']
>>> id2exb_pair_dic = {'id1': 'id2', 'id2': 'id3', 'id3': 'id2'}
>>> get_exb_group_best_con(exb_group_ids_list, id2exb_pair_dic)
['id2', 'id3']
"""
assert exb_group_ids_list, "exb_group_ids_list empty"
sids = []
for sid in exb_group_ids_list:
pid = id2exb_pair_dic[sid]
sid2 = id2exb_pair_dic[pid]
if sid == sid2:
sids = [sid, pid]
sids.sort()
break
assert sids, "no exon border site pair with connections in both directions found for %s" %(','.join(exb_group_ids_list))
return sids
################################################################################
def get_border_pair_exons(site_id, id2exids_dic, exid2trid_dic,
id2ids_dic, ids2isrc_dic,
min_isr_c=2):
"""
Given a site ID and overlapping exon IDs (id2exids_dic) > min_eir,
check if site ID is at exon border and is connected through this
border via intron-spanning reads to another border site.
This info is given by: id2ids_dic, ids2isrc_dic
Return list of exon IDs which are compatible with this connection,
i.e. exon IDs from transcripts both sites possibly share. If more
than one connection is present, choose the connection with most
connecting reads.
id2exids_dic:
site ID -> exon IDs > min_eir mapping.
exid2trid_dic:
exon ID -> transcript ID mapping.
id2ids_dic:
site ID -> site IDs connected by exon border list mapping.
ids2isrc_dic:
site ID pair -> connecting intron-spanning read count.
Several (special) cases possible:
1) > 1 connection between the respective exon border site and other sites.
In this case choose the connection with highest ISR count.
2) Support by ISR counts but no common transcript ID. In this case
keep the existing exon IDs.
3) Only one exon ID. In this case keep the ID, even if it does not support
the connection.
4) No connection. Here return all exon IDs / do not change the list.
>>> id2exids_dic = {"id1": ["t1_e1", "t2_e2", "t4_e2"], "id2" : ["t1_e2", "t2_e3", "t3_e3"], "id3": ["t3_e4", "t7_e1"], "id4": ["t4_e3", "t5_e1"], "id6": ["t6_e666"]}
>>> exid2trid_dic = {"t1_e1": "t1", "t1_e2": "t1", "t2_e2": "t2", "t2_e3": "t2", "t3_e3": "t3", "t3_e4": "t3", "t4_e2": "t4", "t4_e3": "t4", "t6_e666": "id6", "t5_e1" : "t5", "t7_e1" : "t7"}
>>> id2ids_dic = {"id1": ["id2", "id4"], "id2": ["id1"], "id4": ["id1"], "id6": ["id3"], "id3": ["id6"]}
>>> ids2isrc_dic = {"id1-EB-id2": 10, "id2-EB-id1": 10, "id1-EB-id4": 2, "id4-EB-id1": 2, "id3-EB-id6": 5, "id6-EB-id3": 5}
>>> get_border_pair_exons("id1", id2exids_dic, exid2trid_dic, id2ids_dic, ids2isrc_dic)
(['t1_e1', 't2_e2'], 'id2')
>>> get_border_pair_exons("id2", id2exids_dic, exid2trid_dic, id2ids_dic, ids2isrc_dic)
(['t1_e2', 't2_e3'], 'id1')
>>> get_border_pair_exons("id3", id2exids_dic, exid2trid_dic, id2ids_dic, ids2isrc_dic)
(['t3_e4', 't7_e1'], '')
>>> get_border_pair_exons("id4", id2exids_dic, exid2trid_dic, id2ids_dic, ids2isrc_dic)
(['t4_e3'], 'id1')
>>> get_border_pair_exons("id4", id2exids_dic, exid2trid_dic, id2ids_dic, ids2isrc_dic, min_isr_c=3)
(['t4_e3', 't5_e1'], '')
>>> get_border_pair_exons("id6", id2exids_dic, exid2trid_dic, id2ids_dic, ids2isrc_dic)
(['t6_e666'], '')
"""
if site_id not in id2ids_dic:
return id2exids_dic[site_id], ""
# if len(id2exids_dic[site_id]) == 1:
# return id2exids_dic[site_id], ""
else:
# Select best connection.
best_con_c = 0
best_con_id = ""
for sid in id2ids_dic[site_id]:
ids = "%s-EB-%s" %(site_id, sid)
con_c = ids2isrc_dic[ids]
if con_c > best_con_c:
best_con_c = con_c
best_con_id = sid
assert best_con_c, "no best connection ID selected"
# Look for common transcripts.
ex_ids1 = id2exids_dic[site_id]
ex_ids2 = id2exids_dic[best_con_id]
if best_con_c < min_isr_c:
return id2exids_dic[site_id], ""
common_ex_ids = []
for exid1 in ex_ids1:
m = re.search(".+_e(\d+)", exid1)
assert m, "invalid exon ID %s" %(exid1)
exid1_nr = int(m.group(1))
trid1 = exid2trid_dic[exid1]
for exid2 in ex_ids2:
m = re.search(".+_e(\d+)", exid2)
assert m, "invalid exon ID %s" %(exid2)
exid2_nr = int(m.group(1))
trid2 = exid2trid_dic[exid2]
"""
If both sites support same transcript and their exons are
adjacent to each other!
"""
if trid1 == trid2 and abs(exid1_nr-exid2_nr) == 1:
common_ex_ids.append(exid1)
# If no common transcript.
if common_ex_ids:
return common_ex_ids, best_con_id
else:
return id2exids_dic[site_id], ""
################################################################################
def select_id_from_scores_dic(id1, id2, sc_dic,
get_worse=False,
rev_filter=False):
"""
Based on ID to score mapping, return better (or worse) scoring ID.
>>> id1 = "id1"
>>> id2 = "id2"
>>> id3 = "id3"
>>> sc_dic = {'id1' : 5, 'id2': 3, 'id3': 3}
>>> select_id_from_scores_dic(id1, id2, sc_dic)
'id1'
>>> select_id_from_scores_dic(id1, id2, sc_dic, get_worse=True)
'id2'
>>> select_id_from_scores_dic(id1, id2, sc_dic, rev_filter=True, get_worse=True)
'id1'
>>> select_id_from_scores_dic(id1, id2, sc_dic, rev_filter=True)
'id2'
>>> select_id_from_scores_dic(id2, id3, sc_dic)
False
"""
sc_id1 = sc_dic[id1]
sc_id2 = sc_dic[id2]
if sc_id1 > sc_id2:
if rev_filter:
if get_worse:
return id1
else:
return id2
else:
if get_worse:
return id2
else:
return id1
elif sc_id1 < sc_id2:
if rev_filter:
if get_worse:
return id2
else:
return id1
else:
if get_worse:
return id1
else:
return id2
else:
return False
################################################################################
def bam_check_file_empty(in_bam):
"""
Check for empty BAM file.
>>> empty_bam = "test_data/empty.bam"
>>> bam_check_file_empty(empty_bam)
True
"""
assert os.path.exists(in_bam), "in_bam does not exist"
check_cmd = "samtools view -c " + in_bam
output = int(subprocess.getoutput(check_cmd).strip())
if output:
return False
else:
return True
################################################################################
def bam_count_reads(in_bam):
"""
Count and return reads inside in_bam.
>>> empty_bam = "test_data/empty.bam"
>>> bam_count_reads(empty_bam)
0
"""
assert os.path.exists(in_bam), "in_bam does not exist"
check_cmd = "samtools view -c " + in_bam
count = int(subprocess.getoutput(check_cmd).strip())
return count
################################################################################
def filter_bam_file(in_bam, out_bam,
pp_mode=2):
"""
Filter input bam file in_bam to keep only R2 reads (pp_mode=2),
or keep only R1 reads (pp_mode=3).
"""
# Filter.
filter_flag = "130"
if pp_mode == 3:
filter_flag = "0x40"
check_cmd = "samtools view -hb -f " + filter_flag + " " + in_bam + " -o " + out_bam
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "samtools view has problems with your input:\n%s\n%s" %(check_cmd, output)
################################################################################
def get_extended_gen_seqs(args, id2row_dic,
ref_len_dic=False,
id2out_dic=False,
tmp_out_folder=False,
rr_ratios_dic=None):
"""
Extend genomic regions and return extended genomic site sequences.
"""
random_id = uuid.uuid1()
zero_sc_tmp_bed = str(random_id) + ".zero_sc.tmp.bed"
random_id = uuid.uuid1()
zero_sc_tmp_fa = str(random_id) + ".zero_sc.tmp.fa"
if tmp_out_folder:
zero_sc_tmp_bed = tmp_out_folder + "/" + zero_sc_tmp_bed
zero_sc_tmp_fa = tmp_out_folder + "/" + zero_sc_tmp_fa
# Output BED regions with zero scores.
bed_write_row_dic_into_file(id2row_dic, zero_sc_tmp_bed,
ext_mode=args.seq_ext_mode,
ext_lr=args.seq_ext,
zero_scores=True,
id2out_dic=id2out_dic,
chr_len_dic=ref_len_dic)
# Get genomic sequences.
bed_extract_sequences_from_2bit(zero_sc_tmp_bed,
zero_sc_tmp_fa,
args.in_2bit,
lc_repeats=True)
site_seqs_dic = read_fasta_into_dic(zero_sc_tmp_fa,
dna=False,
skip_n_seqs=False)
# Calculate repeat region ratios for each site.
if rr_ratios_dic is not None:
gen_rr_ratios_dic = get_seqs_dic_repeat_region_ratios(site_seqs_dic)
for site_id in gen_rr_ratios_dic:
rr_ratios_dic[site_id] = gen_rr_ratios_dic[site_id]
if os.path.exists(zero_sc_tmp_bed):
os.remove(zero_sc_tmp_bed)
if os.path.exists(zero_sc_tmp_fa):
os.remove(zero_sc_tmp_fa)
return site_seqs_dic
################################################################################
def pm_ext_merge_bed_regions(id2row_dic,
ref_len_dic,
id2sc_dic=None,
id2len_dic=None,
id2gen_se_dic=None,
new_stem_id=False,
tmp_out_folder=False,
merge_ext=0):
"""
peakhood extract --pre-merge
Extend and merge BED regions, return merged regions (not best like
in other merge operations).
Return new ID to row dictionary.
"""
random_id = uuid.uuid1()
m1_tmp_bed = str(random_id) + ".pre_merge1.tmp.bed"
random_id = uuid.uuid1()
m2_tmp_bed = str(random_id) + ".pre_merge2.tmp.bed"
if tmp_out_folder:
m1_tmp_bed = tmp_out_folder + "/" + m1_tmp_bed
m2_tmp_bed = tmp_out_folder + "/" + m2_tmp_bed
bed_write_row_dic_into_file(id2row_dic, m1_tmp_bed,
ext_mode=1,
ext_lr=merge_ext,
zero_scores=False,
chr_len_dic=ref_len_dic)
bed_sort_merge_output_ol_regions(m1_tmp_bed, m2_tmp_bed,
tmp_out_folder=tmp_out_folder,
new_stem_id=new_stem_id)
pm_id2row_dic = bed_read_rows_into_dic(m2_tmp_bed,
id2sc_dic=id2sc_dic,
id2len_dic=id2len_dic,
id2gen_se_dic=id2gen_se_dic)
if os.path.exists(m1_tmp_bed):
os.remove(m1_tmp_bed)
if os.path.exists(m2_tmp_bed):
os.remove(m2_tmp_bed)
return pm_id2row_dic
################################################################################
def merge_filter_bam_files(list_bam, out_bam,
tmp_out_folder=False,
pp_mode=1):
"""
Preprocess and filter input --bam files.
pp_mode:
BAM preprocessing mode.
1: no filtering after merging.
2: filter to keep only R2 reads.
3: filter to keep only R1 reads.
"""
bam_files = ""
for bam_file in list_bam:
bam_files += " %s" %(bam_file)
if pp_mode != 1:
random_id = uuid.uuid1()
tmp_bam = str(random_id) + ".tmp.bam"
if tmp_out_folder:
tmp_bam = tmp_out_folder + "/" + tmp_bam
# Merge.
check_cmd = "samtools merge -f " + tmp_bam + " " + bam_files
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "samtools merge has problems with your input:\n%s\n%s" %(check_cmd, output)
# Filter.
filter_flag = "130"
if pp_mode == 3:
filter_flag = "0x40"
# -hb include header and output BAM.
check_cmd = "samtools view -hb -f " + filter_flag + " " + tmp_bam + " -o " + out_bam
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "samtools view has problems with your input:\n%s\n%s" %(check_cmd, output)
# Delete tmp files.
if os.path.exists(tmp_bam):
os.remove(tmp_bam)
else:
# Merge.
check_cmd = "samtools merge -f " + out_bam + " " + bam_files
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "samtools merge has problems with your input:\n%s\n%s" %(check_cmd, output)
################################################################################
def intersect_bed_files(a_file, b_file, params, out_file,
a2b_list_dic=None,
ab2ol_se_dic=None,
sorted_out=False):
"""
Intersect two .bed files, using intersectBed.
a2b_list_dic:
If -wb is chosen, this dictionary is used to store -a ID to -b IDs
stored as list.
ab2ol_se_dic:
a_id,b_id -> [a_overlap_s, a_overlap_e] (zero-based, one-based)
"""
check_cmd = "intersectBed -a " + a_file + " -b " + b_file + " " + params + " > " + out_file
if sorted_out:
check_cmd = "intersectBed -a " + a_file + " -b " + b_file + " " + params + " | " + "sort -k1,1 -k2,2n > " + out_file
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "intersectBed has problems with your input:\n%s\n%s" %(check_cmd, output)
if a2b_list_dic is not None:
f = open(out_file, "r")
for line in f:
cols = line.strip().split("\t")
a_s = int(cols[1])
a_e = int(cols[2])
a_id = cols[3]
b_id = cols[9]
if ab2ol_se_dic is not None:
ab_id = "%s,%s" %(a_id, b_id)
ab2ol_se_dic[ab_id] = [a_s, a_e]
if a_id in a2b_list_dic:
a2b_list_dic[a_id].append(b_id)
else:
a2b_list_dic[a_id] = [b_id]
f.close()
# Make list entries unique.
for a_id in a2b_list_dic:
a2b_list_dic[a_id] = list(set(a2b_list_dic[a_id]))
################################################################################
def intersect_genes_with_sites(genes_bed, sites_bed, tmp_out):
"""
Intersect gene regions with sites, and return site ID to genes mapping,
and gene regions dictionary to output later as BED.
"""
params = "-s -wa -wb"
check_cmd = "intersectBed -a " + genes_bed + " -b " + sites_bed + " " + params + " > " + tmp_out
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "intersectBed has problems with your input:\n%s\n%s" %(check_cmd, output)
id2gene_list_dic = {}
gene2reg_dic = {}
f = open(tmp_out, "r")
for line in f:
cols = line.strip().split("\t")
chr_id = cols[0]
gene_s = int(cols[1])
gene_e = int(cols[2])
gene_id = cols[3]
gene_pol = cols[5]
site_id = cols[9]
if site_id in id2gene_list_dic:
id2gene_list_dic[site_id].append(gene_id)
else:
id2gene_list_dic[site_id] = [gene_id]
gene2reg_dic[gene_id] = [chr_id, gene_s, gene_e, gene_pol]
f.close()
return id2gene_list_dic, gene2reg_dic
################################################################################
def bed_write_reg_list_to_file(id2reg_dic, out_bed,
id2out_dic=None):
"""
Write dictionary of region lists to BED file.
Example dictionary:
{'gene1': ["chr1", 10, 20, "+"], ...}
id2out_dic:
IDs dictionary for which to output regions.
>>> id2reg_dic = {"site_666" : ["chr6", 66, 666, "-"], "camping_site" : ["chr10", 10, 100, "+"]}
>>> out_exp_bed = "test_data/reg_list_out.exp.bed"
>>> out_tmp_bed = "test_data/reg_list_out.tmp.bed"
>>> bed_write_reg_list_to_file(id2reg_dic, out_tmp_bed)
>>> diff_two_files_identical(out_exp_bed, out_tmp_bed)
True
"""
assert id2reg_dic, "given id2reg_dic empty"
OUTBED = open(out_bed, "w")
c_out = 0
for reg_id in id2reg_dic:
reg_list = id2reg_dic[reg_id]
if id2out_dic is not None:
if not reg_id in id2out_dic:
continue
c_out += 1
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" %(reg_list[0], reg_list[1], reg_list[2], reg_id, reg_list[3]))
OUTBED.close()
assert c_out, "nothing was output"
################################################################################
def get_site_len_list(len_in):
"""
Read in site lengths (one length per row from file len_in).
Return list of lengths.
"""
site_len_list = []
with open(len_in) as f:
for line in f:
site_len = line.strip()
site_len_list.append(int(site_len))
f.closed
assert site_len_list, "site_len_list empty (no lengths read in from %s)" %(len_in)
return site_len_list
################################################################################
def gtf_check_gene_feat(in_gtf,
n_rows_check=10000):
"""
Extract gene regions from in_gtf GTF file, and output to out_bed BED
file.
n_rows_check:
Number of rows to check.
>>> true_gtf = "test_data/gene_test_in.gtf"
>>> false_gtf = "test_data/test_order_true.gtf"
>>> gtf_check_gene_feat(true_gtf)
True
>>> gtf_check_gene_feat(false_gtf)
False
"""
c_in = 0
check = False
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
feature = cols[2]
c_in += 1
if c_in > n_rows_check:
break
if feature == "gene":
check = True
break
f.close()
return check
################################################################################
def bed_intersect_sites_genes_get_infos(sites_bed, genes_bed, id2gids_dic,
tmp_out_folder=False):
"""
Intersect gene regions with sites, and return site_id -> overlapping
gene IDs mapping.
>>> sites_bed = "test_data/test_intersect.sites.bed"
>>> genes_bed = "test_data/test_intersect.genes.bed"
>>> id2gids_dic = {}
>>> bed_intersect_sites_genes_get_infos(sites_bed, genes_bed, id2gids_dic)
>>> id2gids_dic
{'site1': ['ENSG1', 'ENSG2'], 'site2': ['ENSG1'], 'site4': ['ENSG3']}
"""
params = "-wb -s -f 0.8"
# Generate .tmp files.
random_id = uuid.uuid1()
tmp_out = str(random_id) + ".intersect.tmp.out"
if tmp_out_folder:
tmp_out = tmp_out_folder + "/" + tmp_out
check_cmd = "intersectBed -a " + sites_bed + " -b " + genes_bed + " " + params
output = subprocess.getoutput(check_cmd)
for line in output.split('\n'):
cols = line.strip().split("\t")
site_id = cols[3]
gene_id = cols[9]
if site_id in id2gids_dic:
id2gids_dic[site_id].append(gene_id)
else:
id2gids_dic[site_id] = [gene_id]
################################################################################
def gtf_extract_gene_bed(in_gtf, out_bed,
gene_ids_dic=False,
gid2gn_dic=None,
gid2gbt_dic=None):
"""
Extract gene regions from in_gtf GTF file, and output to out_bed BED
file.
gene_ids_dic:
Dictionary with gene IDs for filtering (keeping dic IDs).
>>> in_gtf = "test_data/gene_test_in.gtf"
>>> exp_out_bed = "test_data/gtf_gene_out.exp.bed"
>>> tmp_out_bed = "test_data/gtf_gene_out.tmp.bed"
>>> gtf_extract_gene_bed(in_gtf, tmp_out_bed)
>>> diff_two_files_identical(tmp_out_bed, exp_out_bed)
True
"""
# Output gene regions.
OUTBED = open(out_bed, "w")
c_out = 0
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
feat_pol = cols[6]
infos = cols[8]
if not feature == "gene":
continue
# Restrict to standard chromosomes.
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
continue
else:
chr_id = new_chr_id
# Make start coordinate 0-base (BED standard).
feat_s = feat_s - 1
# Extract gene ID and from infos.
m = re.search('gene_id "(.+?)"', infos)
assert m, "gene_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_id = m.group(1)
# Check if gene ID is in gene dic.
if gene_ids_dic:
if not gene_id in gene_ids_dic:
continue
# Only for GTF files with gene feature present (GENCODE, Ensembl .. ).
gene_name = "-"
gene_biotype = "-"
m = re.search('gene_name "(.+?)"', infos)
if m:
gene_name = m.group(1)
m = re.search('gene_biotype "(.+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('gene_type "(.+?)"', infos)
if m:
gene_biotype = m.group(1)
if gid2gn_dic is not None:
gid2gn_dic[gene_id] = gene_name
if gid2gbt_dic is not None:
gid2gbt_dic[gene_id] = gene_biotype
# Output gene region.
c_out += 1
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,feat_s,feat_e,gene_id,feat_pol))
OUTBED.close()
f.close()
assert c_out, "no regions output to out_bed. Invalid in_gtf (e.g. chromosome IDs inside --gtf should be either of type \"1\", or \"chr1\"), or too restrictive gene_ids_dic filtering?"
################################################################################
def read_ids_into_dic(ids_file,
check_dic=True,
ids_dic=False):
"""
Read in IDs file, where each line stores one ID.
>>> test_ids_file = "test_data/test.ids"
>>> ids_dic = read_ids_into_dic(test_ids_file)
>>> print(ids_dic)
{'clip1': 1, 'clip2': 1, 'clip3': 1}
"""
if not ids_dic:
ids_dic = {}
# Read in file content.
with open(ids_file) as f:
for line in f:
row_id = line.strip()
ids_dic[row_id] = 1
f.closed
if check_dic:
assert ids_dic, "IDs dictionary ids_dic empty"
return ids_dic
################################################################################
def gtf_get_transcript_ids(in_gtf):
"""
Get transcript IDs from in_gtf GTF file.
>>> in_gtf = "test_data/gene_test_in.gtf"
>>> gtf_get_transcript_ids(in_gtf)
{'ENST01': 1, 'ENST02': 1}
"""
# Transcript IDs dictionary.
tr_ids_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
feature = cols[2]
infos = cols[8]
if not feature == "transcript":
continue
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
# Store transcript ID.
tr_ids_dic[transcript_id] = 1
f.close()
# Check and return to barracks.
assert tr_ids_dic, "no transcript IDs read in"
return tr_ids_dic
################################################################################
def bed_get_region_pols(in_bed):
"""
Read in .bed file, and store polarities for each region in dictionary
(unique column 4 ID has to be present).
Return dictionary with mappings region ID -> region polarity
>>> test_bed = "test_data/test4.bed"
>>> bed_get_region_pols(test_bed)
{'tr1_e1': '+', 'tr2_e1': '-'}
"""
id2pol_dic = {}
with open(in_bed) as f:
for line in f:
cols = line.strip().split("\t")
site_id = cols[3]
site_pol = cols[5]
id2pol_dic[site_id] = site_pol
f.closed
assert id2pol_dic, "nothing read in for in_bed \"%s\"" %(in_bed)
return id2pol_dic
################################################################################
def bed_merge_transcript_regions(in_bed, out_bed,
new_ids=False,
id2pol_dic=False):
"""
Merge transcript regions BED (strand specific).
id2pol_dic:
Region ID to genomic polarity mapping.
>>> in_bed = "test_data/test.merge_tr_reg_in.bed"
>>> out_bed = "test_data/test.merge_tr_reg_out.tmp.bed"
>>> exp_bed = "test_data/test.merge_tr_reg_out.exp.bed"
>>> bed_merge_transcript_regions(in_bed, out_bed)
>>> diff_two_files_identical(out_bed, exp_bed)
True
"""
assert os.path.isfile(in_bed), "cannot open in_bed \"%s\"" % (in_bed)
# Get region polarities.
if not id2pol_dic:
id2pol_dic = bed_get_region_pols(in_bed)
# Sort and merge transcript regions.
check_cmd = 'sort -k1,1 -k2,2n ' + in_bed + ' | mergeBed -i stdin -s -c 4 -o distinct -delim ";"'
output = subprocess.getoutput(check_cmd)
MRGOUT = open(out_bed, "w")
c_read = 0
for line in output.split('\n'):
cols = line.strip().split("\t")
c_read += 1
chr_id = cols[0]
reg_s = cols[1]
reg_e = cols[2]
merged_ids_list = cols[3].split(";")
reg_pol = id2pol_dic[merged_ids_list[0]]
new_id = cols[3]
if new_ids:
new_id = "tr_reg_%i" %(c_read)
MRGOUT.write("%s\t%s\t%s\t%s\t0\t%s\n" %(chr_id, reg_s, reg_e, new_id, reg_pol))
MRGOUT.close()
assert c_read, "no merged transcript regions output"
################################################################################
def bed_sort_merge_output_top_entries(in_bed, out_bed,
alpha_merge=False,
check_chr_id_format=False,
rev_filter=False):
"""
Sort in_bed file, use mergeBed from bedtools to merge overlapping entries,
then select for each overlapping set the entry with highest score and
output it to out_bed.
alpha_merge:
Alphabetical merge (if no site scores available).
>>> in_bed = "test_data/test5.bed"
>>> out_bed = "test_data/test5.tmp.bed"
>>> exp_bed = "test_data/test5.exp.bed"
>>> bed_sort_merge_output_top_entries(in_bed, out_bed)
>>> diff_two_files_identical(out_bed, exp_bed)
True
"""
assert os.path.isfile(in_bed), "cannot open in_bed \"%s\"" % (in_bed)
# Generate .tmp files.
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".tmp.bed"
# Read in_bed rows into dictionary.
id2row_dic = bed_read_rows_into_dic(in_bed,
check_chr_id_format=check_chr_id_format)
# Get region scores.
id2sc_dic = bed_get_region_id_scores(in_bed)
# Sort file.
bed_sort_file(in_bed, out_bed)
# Merge .bed.
bed_merge_file(out_bed, tmp_bed)
# Output file.
OUTBED = open(out_bed,"w")
# Open merged .bed file, and select top entry for each overlap set.
with open(tmp_bed) as f:
for line in f:
cols = line.strip().split("\t")
ids = cols[3].split(";")
if alpha_merge:
ids.sort()
best_id = ids[0]
else:
best_id = "-"
best_sc = -666666
if rev_filter:
best_sc = 666666
for site_id in ids:
assert site_id in id2sc_dic, "site ID \"%s\" not found in id2sc_dic" % (site_id)
site_sc = id2sc_dic[site_id]
if rev_filter:
if site_sc < best_sc:
best_sc = site_sc
best_id = site_id
else:
if site_sc > best_sc:
best_sc = site_sc
best_id = site_id
assert best_id in id2row_dic, "site ID \"%s\" not found in id2row_dic" % (best_id)
OUTBED.write(id2row_dic[best_id] + "\n")
f.closed
OUTBED.close()
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
################################################################################
def bed_sort_merge_output_ol_regions(in_bed, out_bed,
tmp_out_folder=False,
new_stem_id=False):
"""
Sort in_bed file, use mergeBed from bedtools to merge overlapping entries,
then select for each overlapping set the entry with highest score and
output it to out_bed.
new_stem_id:
New stem ID for assigning new IDs to merged regions.
By default, merge site IDs in regions to new IDs.
test5.bed
chr1 3000 4000 CLIP2 2.57 +
chr2 1000 2500 CLIP1 1.58 +
chr1 3500 5000 CLIP3 3.11 +
test5_2.exp.bed
chr1 3000 5000 CLIP2_CLIP3 2.84 +
chr2 1000 2500 CLIP1 1.58 +
>>> in_bed = "test_data/test5.bed"
>>> out_bed = "test_data/test5_2.tmp.bed"
>>> exp_bed = "test_data/test5_2.exp.bed"
>>> bed_sort_merge_output_ol_regions(in_bed, out_bed)
>>> diff_two_files_identical(out_bed, exp_bed)
True
"""
assert os.path.isfile(in_bed), "cannot open in_bed \"%s\"" % (in_bed)
# Generate .tmp files.
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".tmp.bed"
if tmp_out_folder:
tmp_bed = tmp_out_folder + "/" + tmp_bed
# Get region scores.
id2pol_dic = {}
id2sc_dic = bed_get_region_id_scores(in_bed,
id2pol_dic=id2pol_dic)
# Sort file.
bed_sort_file(in_bed, out_bed)
# Merge .bed.
bed_merge_file(out_bed, tmp_bed)
"""
chr1 1000 2000 r1 1 +
chr1 2000 3000 r2 2 +
chr1 2500 2700 r4 4 +
chr1 5000 6000 r7 3 +
merged:
chr1 1000 3000 r1;r2;r4
chr1 5000 6000 r7
"""
OUTBED = open(out_bed,"w")
c_out = 0
with open(tmp_bed) as f:
for line in f:
cols = line.strip().split("\t")
chr_id = cols[0]
gen_s = cols[1]
gen_e = cols[2]
ids = cols[3].split(";")
c_out += 1
# New score == average score.
sc_list = []
for sid in ids:
sc_list.append(id2sc_dic[sid])
if len(sc_list) > 1:
new_sc = statistics.mean(sc_list)
else:
new_sc = sc_list[0]
# New site ID.
if new_stem_id:
new_id = new_stem_id + "_" + str(c_out)
else:
new_id = '_'.join(ids)
# Region polarity.
site_pol = id2pol_dic[ids[0]]
OUTBED.write("%s\t%s\t%s\t%s\t%s\t%s\n" %(chr_id, gen_s, gen_e, new_id, new_sc, site_pol))
f.closed
OUTBED.close()
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
################################################################################
def bed_get_region_id_scores(in_bed, no_float=False,
id2pol_dic=None):
"""
Read in .bed file, and store scores for each region in dictionary
(unique column 4 ID and column 5 score have to be present).
Return dictionary with mappings region ID -> region score
>>> test_bed = "test_data/test5.bed"
>>> bed_get_region_id_scores(test_bed)
{'CLIP2': 2.57, 'CLIP1': 1.58, 'CLIP3': 3.11}
"""
id2sc_dic = {}
# Open input .bed file.
with open(in_bed) as f:
for line in f:
cols = line.strip().split("\t")
site_id = cols[3]
site_sc = float(cols[4])
site_pol = cols[5]
if no_float:
site_sc = cols[4]
id2sc_dic[site_id] = site_sc
if id2pol_dic is not None:
id2pol_dic[site_id] = site_pol
f.closed
assert id2sc_dic, "nothing read in for in_bed \"%s\"" %(in_bed)
return id2sc_dic
################################################################################
def bed_merge_file(in_bed, out_bed,
custom_params_str=False):
"""
Use mergeBed from bedtools to merge overlapping .bed entries, storing
the region IDs to later pick one region for each set of overlapping
regions.
>>> in_bed = "test_data/test.sorted.bed"
>>> out_bed = "test_data/test.sorted.merged.tmp.bed"
>>> out_exp_bed = "test_data/test.sorted.merged.exp.bed"
>>> bed_merge_file(in_bed, out_bed)
>>> diff_two_files_identical(out_bed, out_exp_bed)
True
"""
# Check for bedtools.
assert is_tool("bedtools"), "bedtools not in PATH"
# Parameter string.
params_str = '-s -c 4 -o distinct -delim ";"'
if custom_params_str:
params_str = custom_params_str
check_cmd = "mergeBed -i " + in_bed + " " + params_str + " > " + out_bed
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "mergeBed is complaining:\n%s\n%s" %(check_cmd, output)
################################################################################
def bed_output_exon_12col_bed_file(tr_ids_dic, out_12col_bed,
trid2reg_dic, trid2exc_dic, exid2reg_dic,
exid2next_dic=False):
"""
Output 12-column BED of transcript regions, for IGV viewing.
tr_ids_dic:
Transcript IDs to output dictionary.
trid2reg_dic:
Transcript ID to genomic region mapping
transcript_id -> [chr_id, s, e, pol]
trid2exc_dic:
Transcript ID to exon count mapping
exid2reg_dic:
Exon ID to genomic region mapping
exon_id -> [chr_id, s, e, pol]
exid2next_dic:
If exon ID to NEXT ID mapping is given, treat exid2reg_dic as
NEXT -> exonic region mapping.
"""
assert tr_ids_dic, "tr_ids_dic empty"
OUT12BED = open(out_12col_bed, "w")
for tr_id in tr_ids_dic:
tr_chr_id = trid2reg_dic[tr_id][0]
tr_gen_s = trid2reg_dic[tr_id][1] # 0-based.
tr_gen_e = trid2reg_dic[tr_id][2]
tr_gen_pol = trid2reg_dic[tr_id][3]
tr_exc = trid2exc_dic[tr_id]
ex_len_list = []
ex_offset_list = []
range_start = 0
range_stop = tr_exc
range_step = 1
range_add = 1
if tr_gen_pol == "-":
range_start = tr_exc
range_stop = 0
range_step = -1
range_add = 0
for i in range(range_start, range_stop, range_step):
ex_nr = i + range_add
ex_id = tr_id + "_e" + str(ex_nr)
ex_next = ex_id
if exid2next_dic:
assert ex_id in exid2next_dic, "exon ID %s not in exid2next_dic" %(ex_id)
ex_next = exid2next_dic[ex_id]
chr_id = exid2reg_dic[ex_next][0]
assert tr_chr_id == chr_id, "transcript chromosome ID != NEXT region chromosome ID (%s != %s)" %(tr_chr_id, chr_id)
gen_s = exid2reg_dic[ex_next][1] # 0-based.
gen_e = exid2reg_dic[ex_next][2]
gen_pol = exid2reg_dic[ex_next][3]
assert tr_gen_pol == gen_pol, "transcript gene polarity != NEXT region polarity (%s != %s)" %(tr_gen_pol, gen_pol)
ex_l = gen_e - gen_s
ex_offset = gen_s - tr_gen_s
ex_len_list.append(str(ex_l))
ex_offset_list.append(str(ex_offset))
# Output 12-col BED (IGV compatible).
ex_len_str = ",".join(ex_len_list)
ex_offset_str = ",".join(ex_offset_list)
bed_out = "%s\t%i\t%i\t%s\t0\t%s\t%i\t%i\t100,100,100\t%i\t%s\t%s" %(tr_chr_id, tr_gen_s, tr_gen_e, tr_id, tr_gen_pol, tr_gen_s, tr_gen_e, tr_exc, ex_len_str, ex_offset_str)
OUT12BED.write("%s\n" %(bed_out))
OUT12BED.close()
################################################################################
def bed_get_score_to_count_dic(in_bed):
"""
Given an .bed file in_bed, store scores and count how many times each
score appears. Return dictionary with score -> count mapping.
>>> in_bed = "test_data/test1.bed"
>>> bed_get_score_to_count_dic(in_bed)
{'1': 2, '0': 2, '2': 1, '3': 2}
"""
assert os.path.isfile(in_bed), "cannot open in_bed \"%s\"" % (in_bed)
# Read in IDs.
sc2c_dic = {}
with open(in_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_sc = cols[4]
if site_sc in sc2c_dic:
sc2c_dic[site_sc] += 1
else:
sc2c_dic[site_sc] = 1
f.closed
return sc2c_dic
################################################################################
def gtf_extract_transcript_bed(in_gtf, out_bed,
trid2reg_dic=None,
tr_ids_dic=False):
"""
Extract transcript regions from in_gtf GTF file, and output to out_bed BED
file.
tr_ids_dic:
Dictionary with transcript IDs for filtering (keeping dic IDs).
trid2reg_dic:
Store genomic region of transcript [chr_id, s, e, pol]
>>> in_gtf = "test_data/gene_test_in.gtf"
>>> exp_out_bed = "test_data/gtf_transcript_out.exp.bed"
>>> tmp_out_bed = "test_data/gtf_transcript_out.tmp.bed"
>>> gtf_extract_transcript_bed(in_gtf, tmp_out_bed)
>>> diff_two_files_identical(tmp_out_bed, exp_out_bed)
True
"""
# Output transcript regions.
OUTBED = open(out_bed, "w")
c_out = 0
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
feat_pol = cols[6]
infos = cols[8]
if not feature == "transcript":
continue
# Restrict to standard chromosomes.
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
continue
else:
chr_id = new_chr_id
# Make start coordinate 0-base (BED standard).
feat_s = feat_s - 1
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
# Check if transcript ID is in transcript dic.
if tr_ids_dic:
if not transcript_id in tr_ids_dic:
continue
if trid2reg_dic is not None:
trid2reg_dic[transcript_id] = [chr_id, feat_s, feat_e, feat_pol]
# Output genomic exon region.
c_out += 1
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,feat_s,feat_e,transcript_id,feat_pol))
OUTBED.close()
f.close()
assert c_out, "no regions output to out_bed. Invalid in_gtf or too restrictive tr_ids_dic filtering?"
################################################################################
def ph_extract_generate_html_report(out_folder, hoodlib_path,
eir_stats_dic=False,
ei_ratios_list=False,
eib_ratios_list=False,
intergen_site_ids_dic=False,
intron_site_ids_dic=False,
all_ex_site_ids_dic=False,
tc_ex_site_ids_dic=False,
gc_ex_site_ids_dic=False,
all_tr_site_seqs_dic=False,
sel_tr_site_seqs_dic=False,
gen_rr_ratios_dic=False,
tr_rr_ratios_dic=False,
site_lengths_list=False,
copy_logo=True,
html_report_out="report.peakhood_extract.html",
plots_subfolder="html_plots"):
"""
HTML report for peakhood extract.
eir_stats_dic:
Various exon-intron ratio stats dictionary.
ei_ratios_list:
Exon-intron ratios list.
eib_ratios_list:
Exon-intron border region ratios list.
all_ex_site_ids_dic:
All exonic site IDs (ID -> sequence)
intron_site_ids_dic:
Intronic site IDs (ID -> sequence)
intergen_site_ids_dic:
Intergenic site IDs (ID -> sequence)
tc_ex_site_ids_dic:
Transcript context exonic site IDs (ID -> sequence)
gc_ex_site_ids_dic:
Genomic context exonic site IDs (ID -> sequence)
all_tr_site_seqs_dic:
All transcript context site_id,tr_id combinations (ID -> sequence)
sel_tr_site_seqs_dic:
Selected transcript context site_id,tr_id combinations (ID -> sequence)
gen_rr_ratios_dic:
Genomic site to repeat region ratio
(repeat region nucleotides / all site nucleotides)
site ID -> ratio
tr_rr_ratios_dic:
Transcript site to repeat region ratio
(repeat region nucleotides / all site nucleotides)
site_id,tr_id combination ID -> ratio
copy_logo:
Copy logo to results plots folder.
"""
assert os.path.exists(out_folder), "out_folder does not exist"
assert os.path.exists(hoodlib_path), "hoodlib_path does not exist"
assert eir_stats_dic, "eir_stats_dic empty"
assert ei_ratios_list, "ei_ratios_list empty"
assert eib_ratios_list, "eib_ratios_list empty"
assert site_lengths_list, "site_lengths_list empty"
from markdown import markdown
plots_folder = plots_subfolder
plots_out_folder = out_folder + "/" + plots_folder
if not os.path.exists(plots_out_folder):
os.makedirs(plots_out_folder)
html_out = out_folder + "/" + "report.peakhood_extract.html"
if html_report_out:
html_out = html_report_out
# Plot files.
ei_ratio_density_plot = "ei_ratio_density_plot.png"
ei_ratio_density_plot_out = plots_out_folder + "/" + ei_ratio_density_plot
eib_ratio_density_plot = "eib_ratio_density_plot.png"
eib_ratio_density_plot_out = plots_out_folder + "/" + eib_ratio_density_plot
lengths_plot = "set_lengths_plot.png"
lengths_plot_out = plots_out_folder + "/" + lengths_plot
# Paths to logo and ploty.
logo1_path = hoodlib_path + "/content/logo1.png"
logo2_path = hoodlib_path + "/content/logo2.png"
# sorttable_js_path = hoodlib_path + "/content/sorttable.js"
# plotly_js_path = hoodlib_path + "/content/plotly-latest.min.js"
# assert os.path.exists(plotly_js_path), "plotly js %s not found" %(plotly_js_path)
# Copy logo to plots folder.
if copy_logo:
logo_out = plots_out_folder + "/" + "logo.png"
shutil_copy_file(logo1_path, logo_out)
logo1_path = plots_folder + "/" + "logo.png"
mdtext = """
<head>
<title>Peakhood - Context Extraction Report</title>
</head>
<img src="%s" alt="ph_logo"
title="ph_logo" width="650" />
<p><body style="font-family:sans-serif" link="#007af4" vlink="#007af4" alink="#007af4"></p>
""" %(logo1_path)
mdtext += """
# Context Extraction Report
List of available context extraction statistics generated
by Peakhood (peakhood extract):
- [Input site length statistics](#site-length-stats)
- [Input site length distribution](#site-length-plot)
- [Site region type statistics](#site-region-stats)
- [Exon-intron coverage ratio statistics](#ei-ratio-stats)
- [Exon-intron coverage ratio distribution](#ei-ratio-plot)
- [Exon-intron border coverage ratio statistics](#eib-ratio-stats)
- [Exon-intron border coverage ratio distribution](#eib-ratio-plot)
- [Repeat region statistics](#rep-reg-stats)"""
"""
Site lengths statistics.
"""
mdtext += """
## Input site length statistics ### {#site-length-stats}
**Table:** Input site length statistics
(min, max, mean, and median length) in nucleotides (nt).
"""
mdtext += "| Attribute | Value | \n"
mdtext += "| :-: | :-: |\n"
mdtext += "| # sites | %i |\n" %(len(site_lengths_list))
mdtext += "| min site length | %i |\n" %(min(site_lengths_list))
mdtext += "| max site length | %i |\n" %(max(site_lengths_list))
mdtext += "| mean site length | %.1f |\n" %(statistics.mean(site_lengths_list))
mdtext += "| median site length | %i |\n" %(statistics.median(site_lengths_list))
mdtext += "\n \n \n"
# Make site length distribution box plot.
create_site_lengths_plot(site_lengths_list, lengths_plot_out)
lengths_plot_path = plots_folder + "/" + lengths_plot
mdtext += """
## Input site length distribution ### {#site-length-plot}
Input site length distribution, after pre-merging of book-ended and
overlapping input sites (if set)
and pre-filtering (if set, e.g. by score or length).
Note that set --pre-merge leads to increased lengths if there are adjacent
or overlapping sites. Moreover, set --max-len (default 200) limits the
maximum site length, but this can increase again if --pre-merge is set
(since --pre-merge is applied after --max-len filtering).
"""
mdtext += '<img src="' + lengths_plot_path + '" alt="Site length distribution"' + "\n"
mdtext += 'title="Site length distribution" width="500" />' + "\n"
mdtext += """
**Figure:** Input site length distribution (after pre-filtering and-pre-merging sites).
"""
"""
Site region type statistics
To add ?
- [Site region type distribution](#site-region-plot)
"""
c_intergen_sites = len(intergen_site_ids_dic)
c_intron_sites = len(intron_site_ids_dic)
c_exon_sites_merged_tc = len(sel_tr_site_seqs_dic)
c_exon_sites_tc = len(tc_ex_site_ids_dic)
c_exon_sites_gc = len(gc_ex_site_ids_dic)
mdtext += """
## Site region type statistics ### {#site-region-stats}
**Table:**
Assigned site region type statistics. Exonic sites can be either assigned to
transcript context (TC) or genomic context (GC), depending on the read information
in the input BAM file. In addition, transcript context site count with
merged exon border sites is given (MEXB).
Intronic sites are sites with insufficient
(see --min-exon-overlap, default >= 90 percent) or no overlap with any
exonic region from the input GTF file. Intergenic sites do not overlap
with any transcript regions from the input GTF file. Depending on which
pipeline was used to determine the input CLIP-seq peak regions, there might
be little or no intergenic sites due to pre-filtering for gene regions.
"""
mdtext += "| Region type | Count | \n"
mdtext += "| :-: | :-: |\n"
mdtext += "| exonic (GC) | %i |\n" %(c_exon_sites_gc)
mdtext += "| exonic (TC) | %i |\n" %(c_exon_sites_tc)
mdtext += "| exonic (TC) MEXB | %i |\n" %(c_exon_sites_merged_tc)
mdtext += "| intronic | %i |\n" %(c_intron_sites)
mdtext += "| intergenic | %i |\n" %(c_intergen_sites)
mdtext += "\n \n \n"
"""
Exon-intron coverage ratio statistics
"""
# EIR stats.
c_uniq_exons = eir_stats_dic["c_uniq_exons"]
eir_mean = eir_stats_dic["eir_mean"]
eir_median = eir_stats_dic["eir_median"]
eir_stdev = eir_stats_dic["eir_stdev"]
eir_perc5 = eir_stats_dic["eir_perc5"]
eir_perc25 = eir_stats_dic["eir_perc25"]
eir_perc50 = eir_stats_dic["eir_perc50"]
eir_perc75 = eir_stats_dic["eir_perc75"]
eir_perc95 = eir_stats_dic["eir_perc95"]
eir_min = eir_stats_dic["eir_min"]
eir_max = eir_stats_dic["eir_max"]
mdtext += """
## Exon-intron coverage ratio statistics ### {#ei-ratio-stats}
**Table:**
Exon-intron coverage ratios statistics for unique exon regions (# unique exons: %i)
containing CLIP-seq sites. A unique exon region can include several annotated
exon regions, since GTF files usually contain exons with different IDs but
identical regions.
The unique exon region ratio is the average ratio of all exon regions with
the same coordinates as the unique exon region.
The ratio of an exon region is calculated
by dividing the exon coverage (reads / region length) through the coverage
of the neighboring intron(s). In case of two introns, the average coverage
of the two introns is used as the divisor. In case of no introns, a fixed
value above the threshold is assigned.
""" %(c_uniq_exons)
mdtext += "| Attribute | Value | \n"
mdtext += "| :-: | :-: |\n"
mdtext += "| # unique exons | %i |\n" %(c_uniq_exons)
mdtext += "| min ratio | %.4f |\n" %(eir_min)
mdtext += "| max ratio | %.4f |\n" %(eir_max)
mdtext += "| mean ratio | %.4f |\n" %(eir_mean)
mdtext += "| stdev ratio | %.4f |\n" %(eir_stdev)
mdtext += "| median ratio | %.4f |\n" %(eir_median)
mdtext += "| 25th percentile ratio | %.4f |\n" %(eir_perc25)
mdtext += "| 50th percentile ratio | %.4f |\n" %(eir_perc50)
mdtext += "| 75th percentile ratio | %.4f |\n" %(eir_perc75)
mdtext += "\n \n \n"
kde_s = eir_min
kde_e = eir_perc95
kde_clip = [kde_s, kde_e]
kde_bw_adjust = 0.4
plot_ei_ratio_density(ei_ratios_list, ei_ratio_density_plot_out,
x_label="Exon-intron coverage ratio",
y_label="Density",
fig_width=7,
fig_height=3,
kde_bw_adjust=kde_bw_adjust,
kde_clip=kde_clip,
x_0_to_100=False)
plot_path = plots_folder + "/" + ei_ratio_density_plot
mdtext += """
## Exon-intron coverage ratio distribution ### {#ei-ratio-plot}
This plot shows the distribution of exon-intron coverage ratios for
unique exon regions containing CLIP-seq sites.
"""
mdtext += '<img src="' + plot_path + '" alt="ei_raio_plot"' + "\n"
mdtext += 'title="Exon-intron ratio distribution" width="650" />' + "\n"
mdtext += """
**Figure:** Distribution of exon-intron ratios (exon coverage divided by
surrounding intron coverage) for unique exon regions containing CLIP-seq sites.
To prevent suboptimal scaling due to outliers, ratios are plotted only up
to the 95th percentile ratio.
"""
"""
Exon-intron border coverage ratio statistics
"""
# EIBR stats.
c_eibr_regions = eir_stats_dic["c_eibr_regions"]
eibr_mean = eir_stats_dic["eibr_mean"]
eibr_median = eir_stats_dic["eibr_median"]
eibr_stdev = eir_stats_dic["eibr_stdev"]
eibr_perc5 = eir_stats_dic["eibr_perc5"]
eibr_perc25 = eir_stats_dic["eibr_perc25"]
eibr_perc50 = eir_stats_dic["eibr_perc50"]
eibr_perc75 = eir_stats_dic["eibr_perc75"]
eibr_perc95 = eir_stats_dic["eibr_perc95"]
eibr_min = eir_stats_dic["eibr_min"]
eibr_max = eir_stats_dic["eibr_max"]
mdtext += """
## Exon-intron border coverage ratio statistics ### {#eib-ratio-stats}
**Table:**
Exon-intron border coverage ratio statistics for unique exon regions
containing CLIP-seq sites. A unique exon region can include several annotated
exon regions, since GTF files usually contain exons with different IDs but
identical regions. Note that not all unique exon regions might be considered,
since exon-intron borders with small read coverages are not considered.
The ratio is calculated for each exon-intron border, taking a small border
region on the intron as well as on the exon, and calculating the coverage ratio
between the two. This is done for both exon ends, and the average or the
ratio with more reads is returned for each exon region. These ratios are then
merged to one ratio for each unique exon region.
"""
mdtext += "| Attribute | Value | \n"
mdtext += "| :-: | :-: |\n"
mdtext += "| # considered exons | %i |\n" %(c_eibr_regions)
mdtext += "| min ratio | %.4f |\n" %(eibr_min)
mdtext += "| max ratio | %.4f |\n" %(eibr_max)
mdtext += "| mean ratio | %.4f |\n" %(eibr_mean)
mdtext += "| stdev ratio | %.4f |\n" %(eibr_stdev)
mdtext += "| median ratio | %.4f |\n" %(eibr_median)
mdtext += "| 25th percentile ratio | %.4f |\n" %(eibr_perc25)
mdtext += "| 50th percentile ratio | %.4f |\n" %(eibr_perc50)
mdtext += "| 75th percentile ratio | %.4f |\n" %(eibr_perc75)
mdtext += "\n \n \n"
kde_s = eibr_min
kde_e = eibr_perc95
kde_clip = [kde_s, kde_e]
kde_bw_adjust = 0.4
plot_ei_ratio_density(eib_ratios_list, eib_ratio_density_plot_out,
x_label="Exon-intron border coverage ratio",
y_label="Density",
fig_width=7,
fig_height=3,
kde_bw_adjust=kde_bw_adjust,
kde_clip=kde_clip,
x_0_to_100=False)
plot_path = plots_folder + "/" + eib_ratio_density_plot
mdtext += """
## Exon-intron border coverage ratio distribution ### {#ei-ratio-plot}
This plot shows the distribution of exon-intron border coverage ratios for
unique exon regions containing CLIP-seq sites.
"""
mdtext += '<img src="' + plot_path + '" alt="ei_raio_plot"' + "\n"
mdtext += 'title="Exon-intron border ratio distribution" width="650" />' + "\n"
mdtext += """
**Figure:** Distribution of exon-intron border ratios (exon border coverages divided by
adjacent intron border coverages) for unique exon regions containing CLIP-seq sites.
To prevent suboptimal scaling due to outliers, ratios are plotted only up
to the 95th percentile ratio.
"""
"""
Repeat region coverage statistics
"""
# Exonic sites, selected transcript context, transcript.
sel_exs_tc_tr_rrc = 0.0
sel_tc_tr_c = len(sel_tr_site_seqs_dic)
for sitetrid in sel_tr_site_seqs_dic:
sel_exs_tc_tr_rrc += tr_rr_ratios_dic[sitetrid]
sel_exs_tc_tr_perc = 0.0
if sel_tc_tr_c:
sel_exs_tc_tr_perc = (sel_exs_tc_tr_rrc / sel_tc_tr_c) * 100
# Exonic sites, genomic context, genomic.
exs_gc_gen_rrc = 0.0
exs_gc_gen_c = len(gc_ex_site_ids_dic)
for site_id in gc_ex_site_ids_dic:
exs_gc_gen_rrc += gen_rr_ratios_dic[site_id]
exs_gc_gen_perc = 0.0
if exs_gc_gen_c:
exs_gc_gen_perc = (exs_gc_gen_rrc / exs_gc_gen_c) * 100
# Intronic sites.
intronic_rrc = 0.0
intronic_c = len(intron_site_ids_dic)
for site_id in intron_site_ids_dic:
intronic_rrc += gen_rr_ratios_dic[site_id]
intronic_perc = 0.0
if intronic_c:
intronic_perc = (intronic_rrc / intronic_c) * 100
# Intergenic sites.
intergen_rrc = 0.0
intergen_c = len(intergen_site_ids_dic)
for site_id in intergen_site_ids_dic:
intergen_rrc += gen_rr_ratios_dic[site_id]
intergen_perc = 0.0
if intergen_c:
intergen_perc = (intergen_rrc / intergen_c) * 100
mdtext += """
## Repeat region statistics ### {#rep-reg-stats}
**Table:**
Repeat region content statistics for different region types.
The percentage of repeat regions found in each region type set is given.
"""
mdtext += "| Region type | Count | Percentage | \n"
mdtext += "| :-: | :-: | :-: |\n"
mdtext += "| exonic (GC) | %i | %.3f |\n" %(exs_gc_gen_c, exs_gc_gen_perc)
mdtext += "| exonic (TC) | %i | %.3f |\n" %(sel_tc_tr_c, sel_exs_tc_tr_perc)
mdtext += "| intronic | %i | %.3f |\n" %(intronic_c, intronic_perc)
mdtext += "| intergenic | %i | %.3f |\n" %(intergen_c, intergen_perc)
mdtext += "\n \n \n"
# Convert mdtext to html.
md2html = markdown(mdtext, extensions=['attr_list', 'tables'])
OUTHTML = open(html_out,"w")
OUTHTML.write("%s\n" %(md2html))
OUTHTML.close()
# change <table> to sortable.
check_cmd = "sed -i 's/<table>/<table class=" + '"sortable"' + ">/g' " + html_out
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "sed command returned error:\n%s" %(output)
################################################################################
def create_site_lengths_plot(site_len_list, out_plot,
scale_zero_max=False):
"""
Create a box plot, showing the distribution of site lengths.
Peakhood colors:
#b237fd purple
#007af4 blue
#00b4f7 light blue
#00d8ff lighter blue
"""
# Checker.
assert site_len_list, "given list site_len_list empty"
if scale_zero_max:
# Get maximum length for scaling.
max_l = max(site_len_list)
# Get next highest number % 10.
max_y = max_l
while max_y % 10:
max_y += 1
# Make pandas dataframe.
test_label = "Input sites"
data = {'set': [], 'length': []}
test_c = len(site_len_list)
data['set'] += test_c*[test_label]
data['length'] += site_len_list
df = pd.DataFrame (data, columns = ['set','length'])
# Make plot.
sns.set(style="darkgrid")
fig, ax = plt.subplots()
sns.boxplot(x="set", y="length", data=df, palette=['#00b4f7'],
width=0.7, linewidth = 1.5, boxprops=dict(alpha=.7))
# Modify.
ax.set_ylabel("Length (nt)",fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=12)
if scale_zero_max:
ax.set_ylim([0,max_y])
ax.set(xlabel=None)
# Store plot.
fig.savefig(out_plot, dpi=125, bbox_inches='tight')
################################################################################
def plot_ei_ratio_density(set_scores, out_plot,
set_label="Positives",
x_label="k-mer score",
y_label="Density",
fig_width=5,
fig_height=4,
kde_bw_adjust=1,
x_0_to_100=False,
kde_clip=False):
"""
Exon-intron ratios distribution plot.
PH graffiti colors:
#b237fd : pink
#007af4 : blue
#00d7fa : light blue
#0bf80b : slime green
"""
assert set_scores, "set_scores empty"
if not kde_clip:
max_sc = max(set_scores)
min_sc = min(set_scores)
kde_clip = [min_sc, max_sc]
data = {'score': []}
data['score'] += set_scores
df = pd.DataFrame (data, columns = ['score'])
# Make plot.
sns.set(style="darkgrid")
fig, ax = plt.subplots()
sns.kdeplot(x="score", data=df, color='#b237fd',
clip=kde_clip, bw_adjust=kde_bw_adjust)
fig.set_figwidth(fig_width)
fig.set_figheight(fig_height)
ax.set(xlabel=x_label)
ax.set_ylabel(y_label)
#ax.tick_params(axis='x', labelsize=18)
#ax.tick_params(axis='y', labelsize=14)
fig.savefig(out_plot, dpi=150, bbox_inches='tight')
################################################################################
def ph_merge_generate_html_report(out_folder, hoodlib_path,
ex_sites_perc_dic=False,
tc_sites_perc_dic=False,
exb_sites_perc_dic=False,
add_stats_dic=False,
set_stats_dd=False,
set2site_len_dic=False,
copy_logo=True,
html_report_out="report.peakhood_merge.html",
plots_subfolder="html_plots"):
"""
HTML report for peakhood merge.
"""
assert os.path.exists(out_folder), "out_folder does not exist"
assert os.path.exists(hoodlib_path), "hoodlib_path does not exist"
assert ex_sites_perc_dic, "ex_sites_perc_dic empty"
assert tc_sites_perc_dic, "tc_sites_perc_dic empty"
assert exb_sites_perc_dic, "exb_sites_perc_dic empty"
assert add_stats_dic, "add_stats_dic empty"
assert set_stats_dd, "set_stats_dd empty"
assert set2site_len_dic, "set2site_len_dic empty"
from markdown import markdown
plots_folder = plots_subfolder
plots_out_folder = out_folder + "/" + plots_folder
if not os.path.exists(plots_out_folder):
os.makedirs(plots_out_folder)
html_out = out_folder + "/" + "report.peakhood_merge.html"
if html_report_out:
html_out = html_report_out
# Plot files.
exon_perc_plot = "exon_perc_plot.png"
exon_perc_plot_out = plots_out_folder + "/" + exon_perc_plot
lengths_plot = "set_lengths_plot.png"
lengths_plot_out = plots_out_folder + "/" + lengths_plot
# Paths to logo and ploty.
logo1_path = hoodlib_path + "/content/logo1.png"
logo2_path = hoodlib_path + "/content/logo2.png"
# sorttable_js_path = hoodlib_path + "/content/sorttable.js"
# plotly_js_path = hoodlib_path + "/content/plotly-latest.min.js"
# assert os.path.exists(plotly_js_path), "plotly js %s not found" %(plotly_js_path)
# Copy logo to plots folder.
if copy_logo:
logo_out = plots_out_folder + "/" + "logo.png"
shutil_copy_file(logo1_path, logo_out)
logo1_path = plots_folder + "/" + "logo.png"
mdtext = """
<head>
<title>Peakhood - Merge Extracted Datasets Report</title>
</head>
<img src="%s" alt="ph_logo"
title="ph_logo" width="675" />
<p><body style="font-family:sans-serif" link="#007af4" vlink="#007af4" alink="#007af4"></p>
""" %(logo1_path)
mdtext += """
# Merge Extracted Datasets Report
List of available statistics generated
by Peakhood (peakhood merge):
- [Merged dataset statistics](#merge-stats)
- [Site region type statistics](#exon-perc-stats)
- [Input site length distribution](#site-length-plot)
- [Exonic site percentages distribution](#exon-perc-plot)"""
"""
Merged dataset statistics.
"""
c_all_tr = add_stats_dic["c_all_tr"]
c_sel_tr = add_stats_dic["c_sel_tr"]
c_pair_sites_all_tr = add_stats_dic["c_pair_sites_all_tr"]
c_pair_sites_all_tr_diff_set = add_stats_dic["c_pair_sites_all_tr_diff_set"]
c_pair_sites_sel_tr = add_stats_dic["c_pair_sites_sel_tr"]
c_pair_sites_sel_tr_diff_set = add_stats_dic["c_pair_sites_sel_tr_diff_set"]
mdtext += """
## Merged dataset statistics ### {#merge-stats}
**Table:**
Merged dataset statistics, listing over all datasets: the number of
transcripts containing sites, the number of selected transcripts (most likely
transcripts from peakhood extract) with sites, the number of site pairs on
all transcripts (same and different datasets / RBPs), the number of site pairs
from different datasets (different RBPs), the number of site pairs
on the selected transcripts (same and different RBPs), and the number of
site pairs on the selected transcripts (different RBPs).
"""
mdtext += "| Description | Count | \n"
mdtext += "| :-: | :-: |\n"
mdtext += "| # all transcripts with sites | %i |\n" %(c_all_tr)
mdtext += "| # selected transcripts with sites | %i |\n" %(c_sel_tr)
mdtext += "| # site pairs on all transcripts | %i |\n" %(c_pair_sites_all_tr)
mdtext += "| # site pairs (from different datasets) | %i |\n" %(c_pair_sites_all_tr_diff_set)
mdtext += "| # site pairs on selected transcripts | %i |\n" %(c_pair_sites_sel_tr)
mdtext += "| # site pairs (from different datasets) | %i |\n" %(c_pair_sites_sel_tr_diff_set)
mdtext += "\n \n \n"
"""
Site region type statistics.
"""
mdtext += """
## Site region type statistics ### {#exon-perc-stats}
**Table:**
Site region type statistics. For each input dataset,
different region types with corresponding site numbers are given:
number of all dataset sites,
number of intronic sites, number of intergenic sites,
number of exonic sites with assigned genomic context,
number of exonic sites with assigned transcript context (TC),
number of exonic sites with assigned transcript context after merging
adjacent exon border sites (TCM),
number of exonic sites at exon borders connected by intron-spanning reads (before merging),
percentage of exonic sites (exonic sites / all sites), and
percentage of exonic transcript context sites (TC sites / all exonic sites).
"""
mdtext += "| Dataset | # all sites | # intronic | # intergenic | # exonic (GC) | # exonic (TC) | # exonic (TCM) | # exon border | % exon / all | % TC / exonic | \n"
mdtext += "| :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | \n"
for plot_data_id in set_stats_dd:
c_all_sites = set_stats_dd[plot_data_id]["c_all_sites"]
c_intron_sites = set_stats_dd[plot_data_id]["c_intron_sites"]
c_intergen_sites = set_stats_dd[plot_data_id]["c_intergen_sites"]
c_exon_sites_gc = set_stats_dd[plot_data_id]["c_exon_sites_gc"]
c_exon_sites_tc = set_stats_dd[plot_data_id]["c_exon_sites_tc"]
c_exon_sites_merged_tc = set_stats_dd[plot_data_id]["c_exon_sites_merged_tc"]
c_exonic_sites = set_stats_dd[plot_data_id]["c_exonic_sites"]
c_exb_sites = set_stats_dd[plot_data_id]["c_exb_sites"]
perc_exonic_sites = 0.0
perc_exon_sites_tc = 0.0
if c_exonic_sites and c_all_sites:
perc_exonic_sites = (c_exonic_sites / c_all_sites) * 100
if c_exon_sites_tc and c_exonic_sites:
perc_exon_sites_tc = (c_exon_sites_tc / c_exonic_sites) * 100
mdtext += "| %s | %i | %i | %i | %i | %i | %i | %i | %.2f | %.2f | \n" %(plot_data_id, c_all_sites, c_intron_sites, c_intergen_sites, c_exon_sites_gc, c_exon_sites_tc, c_exon_sites_merged_tc, c_exb_sites, perc_exonic_sites, perc_exon_sites_tc)
mdtext += "\n \n \n"
create_merge_site_lengths_plot(set2site_len_dic, lengths_plot_out)
lengths_plot_path = plots_folder + "/" + lengths_plot
mdtext += """
## Input site length distribution ### {#site-length-plot}
Input site length distribution for every --in input dataset,
after pre-merging of book-ended and overlapping input sites (if set)
and pre-filtering (if set, e.g. by score or length).
Note that set --pre-merge leads to increased lengths if there are adjacent
or overlapping sites. Moreover, set --max-len (default 200) limits the
maximum site length, but this can increase again if --pre-merge is set
(since --pre-merge is applied after --max-len filtering).
"""
mdtext += '<img src="' + lengths_plot_path + '" alt="Site length distribution"' + "\n"
mdtext += 'title="Site length distribution" width="700" />' + "\n"
mdtext += """
**Figure:** Input site length distribution for every --in input dataset.
"""
mdtext += """
## Exonic site percentages distribution ### {#exon-perc-plot}
Percentages of exonic sites (exonic sites / all sites), assigned
transcript context (TC) sites (TC / exonic sites), and
paired exonic sites at exon borders (EXBS) (EXBS / TC),
for all --in input datasets. Two sites at exon borders form a pair
if they are connected via intron-spanning reads,
and thus likely form one single site instead of two separate.
Pair sites at exon borders consequently
get merged by Peakhood, so usually only 1 of 2 sites remains.
Moreover, if there is > 1 connection for a set of exon border sites,
Peakhood only keeps the one featuring the most intron-spanning reads.
"""
create_exon_perc_plot(ex_sites_perc_dic, tc_sites_perc_dic,
exb_sites_perc_dic, exon_perc_plot_out)
exon_perc_plot_path = plots_folder + "/" + exon_perc_plot
mdtext += '<img src="' + exon_perc_plot_path + '" alt="Exonic site percentages distribution"' + "\n"
mdtext += 'title="Exonic site percentages distribution" width="800" />' + "\n"
mdtext += """
**Figure:** Percentages of exonic sites (exonic sites / all sites),
assigned transcript context (TC) sites (TC / exonic sites),
and exonic sites at exon borders connected by intron-spanning reads
(EXBS) (EXBS / TC),
for all --in input datasets. The higher the first two percentages,
the more likely the RBP associated to the dataset binds to a
spliced context (introns removed).
"""
# Convert mdtext to html.
md2html = markdown(mdtext, extensions=['attr_list', 'tables'])
OUTHTML = open(html_out,"w")
OUTHTML.write("%s\n" %(md2html))
OUTHTML.close()
# change <table> to sortable.
# check_cmd = "sed -i 's/<table>/<table class=" + '"sortable"' + ">/g' " + html_out
# output = subprocess.getoutput(check_cmd)
# error = False
# if output:
# error = True
# assert error == False, "sed command returned error:\n%s" %(output)
################################################################################
def create_merge_site_lengths_plot(set2site_len_dic, out_plot):
"""
Create a box plot, showing the site length distributions of
input datasets.
Peakhood colors:
#b237fd purple
#007af4 blue
#00b4f7 light blue
#00d8ff lighter blue
"""
# Checker.
assert set2site_len_dic, "given dictionary set2site_len_dic empty"
data = {'set': [], 'length': []}
for set_id in set2site_len_dic:
c_sites = len(set2site_len_dic[set_id])
data['set'] += c_sites*[set_id]
data['length'] += set2site_len_dic[set_id]
df = pd.DataFrame (data, columns = ['set','length'])
# Scale height depending on # of sets.
c_ids = len(set2site_len_dic)
fheight = 1.5 * c_ids
# Make plot.
sns.set(style="darkgrid")
# g = sns.boxplot(data=df, orient="h", palette=["#00b4f7"],
# y="set", x="length",
# width=0.7, linewidth = 1.5, boxprops=dict(alpha=.7))
# g = sns.catplot(x="perc", y="feat_id", hue="set", data=df,
# kind="bar", palette=["#00d8ff", "#007af4", "#b237fd"],
# legend=False)
g = sns.catplot(x="length", y="set", data=df,
kind="box", palette=["#00d8ff"],
legend=False)
g.fig.set_figwidth(18)
g.fig.set_figheight(fheight)
# Modify axes.
ax = g.axes
ax[0,0].set_xlabel("Site length distribution",fontsize=20)
ax[0,0].set(ylabel=None)
ax[0,0].tick_params(axis='x', labelsize=16)
ax[0,0].tick_params(axis='y', labelsize=20)
# Add legend at specific position.
#plt.legend(loc=(1.01, 0.4), fontsize=16)
g.savefig(out_plot, dpi=100, bbox_inches='tight')
################################################################################
def create_exon_perc_plot(ex_sites_perc_dic, tc_sites_perc_dic,
exb_sites_perc_dic, out_plot):
"""
Create a grouped bar plot, showing the percentages of exonic sites
and transcript context sites for each dataset.
Peakhood colors:
#b237fd purple
#007af4 blue
#00b4f7 light blue
#00d8ff lighter blue
"""
# Checker.
assert ex_sites_perc_dic, "given dictionary ex_sites_perc_dic empty"
assert tc_sites_perc_dic, "given dictionary tc_sites_perc_dic empty"
assert exb_sites_perc_dic, "given dictionary exb_sites_perc_dic empty"
# Make pandas dataframe.
ex_label = "exonic / all"
tc_label = "TC / exonic"
exb_label = "EXBS / TC"
data = {'set': [], 'feat_id': [], 'perc': []}
# feat_id: RBP / dataset name.
# set: "TC", "exonic"
for feat_id in ex_sites_perc_dic:
data['set'].append(ex_label)
data['feat_id'].append(feat_id)
data['perc'].append(ex_sites_perc_dic[feat_id])
for feat_id in tc_sites_perc_dic:
data['set'].append(tc_label)
data['feat_id'].append(feat_id)
data['perc'].append(tc_sites_perc_dic[feat_id])
for feat_id in exb_sites_perc_dic:
data['set'].append(exb_label)
data['feat_id'].append(feat_id)
data['perc'].append(exb_sites_perc_dic[feat_id])
df = pd.DataFrame (data, columns = ['set','feat_id', 'perc'])
# Scale height depending on # of features.
c_ids = len(ex_sites_perc_dic)
fheight = 1.5 * c_ids
# Make plot.
sns.set(style="darkgrid")
g = sns.catplot(x="perc", y="feat_id", hue="set", data=df,
kind="bar", palette=["#00d8ff", "#007af4", "#b237fd"],
legend=False)
g.fig.set_figwidth(18)
g.fig.set_figheight(fheight)
# Modify axes.
ax = g.axes
ax[0,0].set_xlabel("Percentage of sites (%)",fontsize=20)
ax[0,0].set(ylabel=None)
ax[0,0].tick_params(axis='x', labelsize=16)
ax[0,0].tick_params(axis='y', labelsize=20)
# Add legend at specific position.
plt.legend(loc=(1.01, 0.4), fontsize=16)
g.savefig(out_plot, dpi=100, bbox_inches='tight')
################################################################################
def print_some_banner():
"""
Print some banner.
"""
banner = []
a = """
$$\\
$$ |
$$$$$$\ $$$$$$\ $$$$$$\ $$ | $$\\
$$ __$$\ $$ __$$\ \____$$\ $$ | $$ |
$$ / $$ |$$$$$$$$ | $$$$$$$ |$$$$$$ /
$$ | $$ |$$ ____|$$ __$$ |$$ _$$<
$$$$$$$ |\$$$$$$$\ \$$$$$$$ |$$ | \$$\\
$$ ____/ \_______| \_______|\__| \__|
$$ | $$\\
$$ | $$ |
$$$$$$$\ $$$$$$\ $$$$$$\ $$$$$$$ |
$$ __$$\ $$ __$$\ $$ __$$\ $$ __$$ |
$$ | $$ |$$ / $$ |$$ / $$ |$$ / $$ |
$$ | $$ |$$ | $$ |$$ | $$ |$$ | $$ |
$$ | $$ |\$$$$$$ |\$$$$$$ |\$$$$$$$ |
\__| \__| \______/ \______/ \_______|
"""
b = """
██▓███ ▓█████ ▄▄▄ ██ ▄█▀ ██░ ██ ▒█████░ ▒█████▄
▓██░ ██▒▓█ ▀▒████▄ ██▄█▒ ▓██░ ██▒▒██▒ ██▒ ▒██████▒▒██▀ ██▌
▓██░ ██▓▒▒███ ▒██ ▀█▄ ▓███▄░ ▒██▀▀██░▒██░ ██▒▒██▒ ██▒░██ █▌
▒██▄█▓▒ ▒▒▓█ ▄░██▄▄▄▄██ ▓██ █▄ ░▓█ ░██ ▒██ ██▒▒██░ ██▒░▓█▄ ▌
▒██▒░ ░░▒████▒▓█ ▓██▒▒██▒ █▄░▓█▒░██▓░ ████▓▒░▒██ ██░░▒████▓
▒██▒░ ▒▓█ ▓█▒░ ░▓█▒░█▓▒░ ▒███▓▒░
"""
c = """
██▓ ███▄ █ ▓█████▄ ▄▄▄ ██░ ██ ▒█████ ▒█████▄
▓██▒ ██ ▀█ █ ▒██▀ ██▌▒████▄ ▓██░ ██▒▒██▒ ██▒ ▒██████▒▒██▀ ██▌
▒██▒▓██ ▀█ ██▒ ░██ █▌▒██ ▀█▄ ▒██▀▀██░▒██░ ██▒▒██▒ ██▒░██ █▌
░██░▓██▒ ▐▌██▒ ░▓█▄ ▌░██▄▄▄▄██ ░▓█ ░██ ▒██ ██▒▒██░ ██▒░▓█▄ ▌
░██░▒██░ ▓██░ ░▒████▓ ▓█ ▓██▒ ░▓█▒░██▓░ ████▓▒░▒██ ██░░▒████▓
░██░ ▓█ ▓█▒░ ░▓█▒░█▓▒░ ▒███▓▒░
"""
banner.append(a)
# banner.append(a)
# banner.append(a)
# banner.append(a)
# banner.append(b)
return(random.choice(banner))
################################################################################
def extract_multicore_wrapper(extract_out_folder, extract_cmd, dataset_id):
output = subprocess.getoutput(extract_cmd)
# Save output.
run_log_file = extract_out_folder + "/run.peakhood_extract.log"
RUNLOGOUT = open(run_log_file, "w")
RUNLOGOUT.write(output)
RUNLOGOUT.close()
# Check for errors in log file.
error_found = check_string_in_file(run_log_file, "AssertionError")
if error_found:
print(output)
assert not error_found, "An assertion error was raised during this peakhood extract run, check run log file %s for details" % (
run_log_file)
# Check for results.
stats_out_file = extract_out_folder + "/extract_stats.out"
assert os.path.exists(
stats_out_file), "missing extract_stats.out file inside folder %s. Probably this peakhood extract run produced errors, check run log file %s" % (
extract_out_folder, run_log_file)
extr_stats_dic = read_settings_into_dic(stats_out_file,
check=False)
assert extr_stats_dic, "no stats extracted from extract_stats.out file inside folder %s. Probably this peakhood extract run produced errors, check run log file %s" % (
extract_out_folder, run_log_file)
c_intergen_sites = int(extr_stats_dic["c_intergen_sites"])
c_intron_sites = int(extr_stats_dic["c_intron_sites"])
c_exon_sites_tc = int(extr_stats_dic["c_exon_sites_tc"])
c_exon_sites_merged_tc = int(extr_stats_dic["c_exon_sites_merged_tc"])
c_exon_sites_gc = int(extr_stats_dic["c_exon_sites_gc"])
c_all_sites = c_intergen_sites + c_intron_sites + c_exon_sites_gc + c_exon_sites_tc
c_exonic_sites = int(extr_stats_dic["c_exonic_sites"])
c_exb_sites = int(extr_stats_dic["c_exb_sites"])
# Percentage of exonic sites.
perc_exonic_sites = "0.0 %"
if c_exonic_sites and c_all_sites:
perc_exonic_sites = "%.2f " % (
(c_exonic_sites / c_all_sites) * 100) + "%"
# Percentage of spliced context sites.
perc_exonic_tc_sites = "0.0 %"
if c_exon_sites_tc and c_exonic_sites:
perc_exonic_tc_sites = "%.2f " % (
(c_exon_sites_tc / c_exonic_sites) * 100) + "%"
# Percentage of exon border sites (connected by ISR reads).
perc_exb_sites = "0.0 %"
if c_exb_sites and c_exon_sites_tc:
perc_exb_sites = "%.2f " % (
(c_exb_sites / c_exon_sites_tc) * 100) + "%"
dataset_print = ""
dataset_print += "dataset: %s\n" % (dataset_id)
dataset_print += "# of all sites %i\n" % (c_all_sites)
dataset_print += "# of intronic sites: %i\n" %(c_intron_sites)
dataset_print += "# of intergenic sites: %i\n" %(c_intergen_sites)
dataset_print += "# of exonic sites (assigned genome context): %i\n" %(c_exon_sites_gc)
dataset_print += "# of exonic sites (assigned transcript context): %i\n" %(c_exon_sites_tc)
dataset_print += "# of sites after merging exon border sites: %i\n" %(c_exon_sites_merged_tc)
dataset_print += "Percentage (# exonic sites / # all input sites):\n%s\n" %(perc_exonic_sites)
dataset_print += "Percentage (# transcript context sites / # exonic sites):\n%s\n" %(perc_exonic_tc_sites)
dataset_print += "Percentage (# transcript context sites / # exonic sites):\n%s\n" %(perc_exonic_tc_sites)
dataset_print += "Percentage (# exon border sites / # transcript context sites):\n%s\n\n" %(perc_exb_sites)
return dataset_print
|
|
from face_alignment.detection.models import FAN, ResNetDepth
from .utils import crop, get_preds_fromhm, draw_gaussian
import torch
import numpy as np
import cv2
class FANLandmarks:
def __init__(self, device, model_path, detect_type):
# Initialise the face detector
model_weights = torch.load(model_path)
self.device = device
self.detect_type = detect_type
torch.backends.cudnn.benchmark = True
self.face_landmark = FAN(4)
self.face_landmark.load_state_dict(model_weights)
self.face_landmark.to(device)
self.face_landmark.eval()
self.reference_scale = 195.0
if self.detect_type == "3D":
self.depth_prediciton_net = ResNetDepth()
depth_weights = torch.load("D:/model/depth-2a464da4ea.pth.tar")
depth_dict = {k.replace('module.', ''): v for k, v in depth_weights['state_dict'].items()}
self.depth_prediciton_net.load_state_dict(depth_dict)
self.depth_prediciton_net.to(device)
self.depth_prediciton_net.eval()
def extract(self, rect_queue):
# image, face_rect = rect_queue.get(block=True, timeout=10)
image, face_rect = rect_queue
landmarks = []
for i, d in enumerate(face_rect):
center_x = d[2] - (d[2] - d[0]) / 2.0
center_y = d[3] - (d[3] - d[1]) / 2.0
center = torch.FloatTensor([center_x, center_y])
scale = (d[2] - d[0] + d[3] - d[1]) / self.reference_scale
inp = crop(image, center, scale)
inp = torch.from_numpy(inp.transpose((2, 0, 1))).float().to(self.device)
inp.div_(255.0).unsqueeze_(0)
with torch.no_grad():
out = self.face_landmark(inp)[-1]
out = out.cpu()
pts, pts_img = get_preds_fromhm(out, center, scale)
pts, pts_img = pts.view(68, 2) * 4, pts_img.view(68, 2)
if self.detect_type == "3D":
heatmaps = np.zeros((68, 256, 256), dtype=np.float32)
for i in range(68):
if pts[i, 0] > 0:
heatmaps[i] = draw_gaussian(heatmaps[i], pts[i], 2)
heatmaps = torch.from_numpy(heatmaps).unsqueeze_(0)
heatmaps = heatmaps.to(self.device)
depth_pred = self.depth_prediciton_net(torch.cat((inp, heatmaps), 1)).data.cpu().view(68, 1)
pts_img = torch.cat((pts_img, depth_pred * (1.0 / (256.0 / (200.0 * scale)))), 1)
landmarks.append(pts_img.numpy())
return image, landmarks
|
|
"""
Sam Bluestone
Test 2
Exploratory data analysis for the admissions dataset
"""
import pandas as pd
import matplotlib.pyplot as plt
from mlxtend.plotting import scatterplotmatrix
import numpy as np
from mlxtend.plotting import heatmap
from sklearn.preprocessing import OneHotEncoder
import sys
#read the data into a pandas dataframe
df = pd.read_csv('Admission_Predict.csv')
# df.info()
df.dropna(inplace=True)
df_ohe = pd.get_dummies(df['Race'])
df = df[[i for i in list(df.columns) if i not in ['Serial No.', 'Race']]]
df.dropna(inplace=True)
for race, column in zip(['Asian', 'african american', 'latinx', 'white'], df_ohe.columns):
df.insert(len(df.columns), race, df_ohe[race])
df.columns = ["GRE", "TOEFL", "UR", "SOP", "LOR", "CGPA", "RES", "CoA", "SES", "ASIAN","AA","LAT","WHITE"]
cols = ["GRE", "TOEFL", "UR", "SOP", "LOR", "CGPA", "RES", "CoA", "SES", "ASIAN","AA","LAT","WHITE"]
#create the scatter plot matrix showing the plots of each feature against each other
scatterplotmatrix(df[df.columns].values, figsize=(20, 16),
names=cols, alpha=0.5)
plt.tight_layout()
plt.savefig("scatterplot_matrix.png")
plt.show()
#create a heatmap with all of the correlation coefficients to determine how correlated a given pair of features are
cm = np.corrcoef(df[df.columns].values.T)
hm = heatmap(cm, row_names=cols, column_names=cols, figsize=(10, 10))
plt.savefig("corr_matrix.png")
plt.show()
|
|
from typing import Sequence
import oneflow.experimental as flow
import argparse
import numpy as np
import os
import time
import sys
import oneflow.experimental.nn as nn
import json
from tqdm import tqdm
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), "model_compress/distil_new_api/src")))
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), "./src")))
import config as configs
from data_util import OFRecordDataLoader
from bert_model.bert import BERT
from sklearn.metrics import accuracy_score, matthews_corrcoef, precision_score, recall_score, f1_score
from util import getdirsize
from knowledge_distill_util import layer_distill, pred_distill
def _parse_args():
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
parser = configs.get_parser()
parser.add_argument("--task_name", type=str, default='CoLA')
parser.add_argument("--teacher_model", default=None, type=str, help="The teacher model dir.")
parser.add_argument("--student_model", default=None, type=str, help="The student model dir.")
parser.add_argument("--total_model", default=None, type=str, help="The student model dir.")
parser.add_argument('--num_epochs', type=int, default=3, help='number of epochs')
parser.add_argument("--train_data_dir", type=str, default='/remote-home/rpluo/Oneflow-Model-Compression/model_compress/data/glue_ofrecord_test/SST-2/train/')
parser.add_argument("--train_data_prefix", type=str, default='train.of_record-')
parser.add_argument("--train_example_num", type=int, default=67349,
help="example number in dataset")
parser.add_argument("--batch_size_per_device", type=int, default=8)
parser.add_argument("--train_data_part_num", type=int, default=1,
help="data part number in dataset")
parser.add_argument("--eval_data_dir", type=str, default='/remote-home/rpluo/Oneflow-Model-Compression/model_compress/data/glue_ofrecord_test/SST-2/eval/')
parser.add_argument("--eval_data_prefix", type=str, default='eval.of_record-')
parser.add_argument("--eval_example_num", type=int, default=872,
help="example number in dataset")
parser.add_argument("--eval_batch_size_per_device", type=int, default=12)
parser.add_argument("--eval_data_part_num", type=int, default=1,
help="data part number in dataset")
parser.add_argument("--result_dir", type=str, default="", help="the save directory of results")
#
parser.add_argument("--student_num_hidden_layers", type=int, default=3)
parser.add_argument("--student_num_attention_heads", type=int, default=12)
parser.add_argument("--student_max_position_embeddings", type=int, default=512)
parser.add_argument("--student_type_vocab_size", type=int, default=2)
parser.add_argument("--student_vocab_size", type=int, default=30522)
parser.add_argument("--student_attention_probs_dropout_prob", type=float, default=0.1)
parser.add_argument("--student_hidden_dropout_prob", type=float, default=0.1)
parser.add_argument("--student_hidden_size_per_head", type=int, default=64)
parser.add_argument("--student_hidden_size", type=int, default=768)
parser.add_argument("--teacher_num_hidden_layers", type=int, default=12)
parser.add_argument("--teacher_num_attention_heads", type=int, default=16)
parser.add_argument("--teacher_max_position_embeddings", type=int, default=512)
parser.add_argument("--teacher_type_vocab_size", type=int, default=2)
parser.add_argument("--teacher_vocab_size", type=int, default=30522)
parser.add_argument("--teacher_attention_probs_dropout_prob", type=float, default=0.1)
parser.add_argument("--teacher_hidden_dropout_prob", type=float, default=0.1)
parser.add_argument("--teacher_hidden_size_per_head", type=int, default=64)
parser.add_argument("--teacher_hidden_size", type=int, default=768)
parser.add_argument("--kd_alpha", type=float, default=0.2)
parser.add_argument("--kd_beta", type=float, default=10, help='the proposed loss {10,100,500,1000}')
parser.add_argument('--from_scratch', type=str2bool, nargs='?', const=False, help='train the student model from scratch or initialize from teacher layers')
parser.add_argument('--temperature', type=float, default=1.)
parser.add_argument('--aug_train', type=str2bool, nargs='?', const=False, help='using augmented training set?')
parser.add_argument('--serve_for_online', type=str2bool, nargs='?', const=False,
help='if serve for online, then after training, will delete the teacher params and optimizer parmas from model_save_dir')
return parser.parse_args()
class bert_pkd(nn.Module):
def __init__(self, student_vocab_size,
student_hidden,
student_n_layers,
student_attn_heads,
student_dropout,
teacher_vocab_size,
teacher_hidden,
teacher_n_layers,
teacher_attn_heads,
teacher_dropout):
super().__init__()
self.student_model = BERT(student_vocab_size,
student_hidden,
student_n_layers,
student_attn_heads,
student_dropout)
self.teacher_model = BERT(teacher_vocab_size,
teacher_hidden,
teacher_n_layers,
teacher_attn_heads,
teacher_dropout)
self.student_output_layer = nn.Linear(student_hidden,2)
self.teacher_output_layer = nn.Linear(teacher_hidden,2)
self.student_softmax = nn.Softmax(dim=1)
self.teacher_softmax = nn.Softmax(dim=1)
def eval_forward(self, x, segment_info):
student_output,_,_ = self.student_model(x,segment_info)
student_output2 = self.student_output_layer(student_output[:,0])
student_logits = self.student_softmax(student_output2)
return student_logits
def forward(self, x, segment_info):
student_output,student_sequence_out,_ = self.student_model(x,segment_info)
student_output2 = self.student_output_layer(student_output[:,0])
student_logits = self.student_softmax(student_output2)
teacher_output,teacher_sequence_out,_ = self.teacher_model(x,segment_info)
teacher_output2 = self.teacher_output_layer(teacher_output[:,0])
teacher_logits = self.teacher_softmax(teacher_output2)
return student_logits, student_sequence_out, teacher_logits, teacher_sequence_out
def eval(model, dataloader, desc = "train"):
model.eval()
labels = []
predictions = []
start_time = time.time()
with flow.no_grad():
for b in tqdm(range(len(dataloader))):
blob_confs = dataloader.get_batch()
input_ids = blob_confs['input_ids'].to("cuda")
segment_ids = blob_confs['segment_ids'].to("cuda")
label_ids = blob_confs['label_ids'].squeeze(-1)
student_logits = model.eval_forward(input_ids, segment_ids)
predictions.extend(student_logits.detach().to('cpu').numpy().argmax(axis=1).tolist())
labels.extend(label_ids.tolist())
end_time = time.time()
cost_time = end_time - start_time
print('cost time: {} s'.format(cost_time))
model_size = getdirsize(args.model_save_dir)
print('model_size: %d Mbytes' % (model_size / 1024 / 1024)) # Mbytes
accuracy = accuracy_score(labels, predictions)
mcc = matthews_corrcoef(labels, predictions)
precision = precision_score(labels, predictions)
recall = recall_score(labels, predictions)
f_1 = f1_score(labels, predictions)
save_dict = {"accuracy": "%.2f" % accuracy,
"MCC": "%.2f" % mcc,
"precision": "%.2f" % precision,
"recall": "%.2f" % recall,
"f_1": "%.2f" % f_1,
"modelSize": "%d" % (model_size / 1024 / 1024),
"reasoningTime": "%.2f" % (args.eval_example_num / cost_time)} # sample/second
if args.result_dir == "":
args.result_dir = args.model_save_dir
if not os.path.exists(args.result_dir):
os.makedirs(args.result_dir)
with open(os.path.join(args.result_dir, 'results_{}.json'.format(desc)), "w") as f:
json.dump(save_dict, f)
def metric_fn(predictions, labels):
return {
"accuracy": accuracy,
"matthews_corrcoef": mcc,
"precision": precision,
"recall": recall,
"f1": f_1,
}
metric_dict = metric_fn(predictions, labels)
print(desc, ', '.join('{}: {:.3f}'.format(k, v) for k, v in metric_dict.items()))
return metric_dict
def main(args):
acc_tasks = ["mnli", "mrpc", "sst-2", "qqp", "qnli", "rte"]
corr_tasks = ["sts-b"]
mcc_tasks = ["cola"]
task_name = args.task_name.lower()
flow.enable_eager_execution()
flow.InitEagerGlobalSession()
train_data_loader = OFRecordDataLoader( args.train_data_dir,
args.batch_size_per_device,
args.train_data_part_num,
args.seq_length,
args.train_data_prefix,
args.train_example_num)
eval_data_loader = OFRecordDataLoader(args.eval_data_dir,
args.eval_batch_size_per_device,
args.eval_data_part_num,
args.seq_length,
args.eval_data_prefix,
args.eval_example_num)
model = bert_pkd(
args.student_vocab_size,
args.student_hidden_size,
args.student_num_hidden_layers,
args.student_num_attention_heads,
args.student_hidden_dropout_prob,
args.teacher_vocab_size,
args.teacher_hidden_size,
args.teacher_num_hidden_layers,
args.teacher_num_attention_heads,
args.teacher_hidden_dropout_prob
)
model.to('cuda')
if not os.path.exists(args.model_save_dir):
os.makedirs(args.model_save_dir)
if args.do_train:
of_cross_entropy = flow.nn.CrossEntropyLoss(reduction='mean')
of_cross_entropy.to("cuda")
of_sgd = flow.optim.SGD(
model.parameters(), lr=args.learning_rate)
of_losses = []
all_samples = len(eval_data_loader) * args.eval_batch_size_per_device
print_interval = 10
best_dev_acc = 0.0
for epoch in range(args.num_epochs):
model.train()
for b in range(len(train_data_loader)):
blob_confs = train_data_loader.get_batch()
# oneflow train
start_t = time.time()
input_ids = blob_confs['input_ids'].to("cuda")
segment_ids = blob_confs['segment_ids'].to("cuda")
label_ids = blob_confs['label_ids'].squeeze(-1).to("cuda")
student_logits, student_sequence_out, teacher_logits, teacher_sequence_out = model(input_ids, segment_ids)
pt_loss = layer_distill(args, student_sequence_out,teacher_sequence_out)
ds_loss = pred_distill(args, student_logits, teacher_logits)
loss_ce = of_cross_entropy(student_logits, label_ids)
loss_pkd = loss_ce * (1-args.kd_alpha) + args.kd_alpha * ds_loss + args.kd_beta * pt_loss
loss_pkd.backward()
of_sgd.step()
of_sgd.zero_grad()
end_t = time.time()
if b % print_interval == 0:
l = loss_pkd.numpy()[0]
of_losses.append(l)
print(
"epoch {} train iter {} oneflow loss {}, train time : {}".format(
epoch, b, l, end_t - start_t
)
)
# print('EvalTrainJob...')
# eval(model,train_data_loader,desc = 'train')
print('EvalValJob...')
result = eval(model,eval_data_loader,desc = 'eval')
save_model = False
if task_name in acc_tasks and result['accuracy'] > best_dev_acc:
best_dev_acc = result['accuracy']
save_model = True
# if task_name in corr_tasks and result['corr'] > best_dev_acc:
# best_dev_acc = result['corr']
# save_model = True
if task_name in mcc_tasks and result['matthews_corrcoef'] > best_dev_acc:
best_dev_acc = result['matthews_corrcoef']
save_model = True
print('Best result:', result)
if save_model:
if os.path.exists(args.model_save_dir):
import shutil
shutil.rmtree(args.model_save_dir)
if not os.path.exists(args.model_save_dir):
os.makedirs(args.model_save_dir)
snapshot_save_path = os.path.join(args.model_save_dir)
print("Saving best model to {}".format(snapshot_save_path))
flow.save(model.state_dict(),snapshot_save_path)
if args.do_eval:
print('Loading model...')
print(args.model_save_dir)
if not args.do_train:
model_dict = flow.load(args.model_save_dir)
print('successful')
model.load_state_dict(model_dict)
print('Evaluation...')
result = eval(model,eval_data_loader,desc = 'eval')
if __name__ == "__main__":
args = _parse_args()
main(args)
|
|
# ------------------------------------------------------------------------------
# Modified from HRNet-Human-Pose-Estimation
# (https://github.com/HRNet/HRNet-Human-Pose-Estimation)
# Copyright (c) Microsoft
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from utils.transforms import transform_preds, fliplr_joints
def get_max_preds(batch_heatmaps):
'''
get predictions from score maps
heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
'''
assert isinstance(batch_heatmaps, np.ndarray), \
'batch_heatmaps should be numpy.ndarray'
assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim'
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals
def get_final_preds(config, batch_heatmaps, center, scale):
coords, maxvals = get_max_preds(batch_heatmaps)
heatmap_height = batch_heatmaps.shape[2]
heatmap_width = batch_heatmaps.shape[3]
# post-processing
if config.TEST.POST_PROCESS:
for n in range(coords.shape[0]):
for p in range(coords.shape[1]):
hm = batch_heatmaps[n][p]
px = int(math.floor(coords[n][p][0] + 0.5))
py = int(math.floor(coords[n][p][1] + 0.5))
if 1 < px < heatmap_width-1 and 1 < py < heatmap_height-1:
diff = np.array(
[
hm[py][px+1] - hm[py][px-1],
hm[py+1][px]-hm[py-1][px]
]
)
coords[n][p] += np.sign(diff) * .25
preds = coords.copy()
# Transform back
for i in range(coords.shape[0]):
preds[i] = transform_preds(
coords[i], center[i], scale[i], [heatmap_width, heatmap_height]
)
return preds, maxvals
def get_final_preds_match(config, outputs, center, scale, flip_pairs=None):
pred_logits = outputs['pred_logits'].detach().cpu()
pred_coords = outputs['pred_coords'].detach().cpu()
num_joints = pred_logits.shape[-1] - 1
if config.TEST.INCLUDE_BG_LOGIT:
prob = F.softmax(pred_logits, dim=-1)[..., :-1]
else:
prob = F.softmax(pred_logits[..., :-1], dim=-1)
score_holder = []
coord_holder = []
orig_coord = []
for b, C in enumerate(prob):
_, query_ind = linear_sum_assignment(-C.transpose(0, 1)) # Cost Matrix: [17, N]
score = prob[b, query_ind, list(np.arange(num_joints))][..., None].numpy()
pred_raw = pred_coords[b, query_ind].numpy()
if flip_pairs is not None:
pred_raw, score = fliplr_joints(pred_raw, score, 1, flip_pairs, pixel_align=False, is_vis_logit=True)
# scale to the whole patch
pred_raw *= np.array(config.MODEL.IMAGE_SIZE)
# transform back w.r.t. the entire img
pred = transform_preds(pred_raw, center[b], scale[b], config.MODEL.IMAGE_SIZE)
orig_coord.append(pred_raw)
score_holder.append(score)
coord_holder.append(pred)
matched_score = np.stack(score_holder)
matched_coord = np.stack(coord_holder)
return matched_coord, matched_score, np.stack(orig_coord)
|
|
import os
import sys
import copy
sys.path.append('./player_model/')
sys.path.append('./utils')
import config
import exp_config
import pandas as pd
import numpy as np
from multiprocessing import Pool
from bots import BasicBot
from rectangular_world import RectangularWorld
from environment import *
reps = exp_config.simulation_reps
info, out_dir = exp_config.get_emergent_config(reps)
def write(pid, p, model, tick, out_file, goal, experiment):
nbots, composition, rep = experiment.split('-')
out = [experiment, nbots, composition, pid, tick, 'true', model.state, p.pos[0], p.pos[1],
p.speed, p.angle, p.curr_background, model.prob_explore, model.strategy,
goal[0], goal[1]]
out = list(map(str, out))
out_file.write(','.join(out) + '\n')
def write_centers(centers, center_file):
centers = np.array([[np.nan,np.nan] if x is None else x for x in centers])
df = pd.DataFrame(centers)
df.columns = ['x_pos','y_pos']
df.to_csv(center_file, index = False)
def write_final(experiment, models):
with open(out_dir + experiment + '-final.csv', 'w') as out_f:
for i, m in enumerate(models) :
out_f.write(','.join([experiment, str(i), m.strategy, str(m.total_score)]) + '\n')
def run_simulation(exp_ind):
print(exp_ind)
experiment = info['experiments'][exp_ind]
bots = info['bots'][exp_ind]
environment = lambda bg: RectangularWorld(bg, config.GAME_LENGTH, False,
config.DISCRETE_BG_RADIUS, False)
nbots = len(bots)
models = [BasicBot(environment, [True]*nbots, bot['strategy'], i,
prob_explore = bot['prob_explore'])
for i, bot in enumerate(bots)]
# Initialize world with random walk of spotlight
world = World(environment, noise_location = None, n_players = len(models),
stop_and_click = config.STOP_AND_CLICK)
world.random_walk_centers()
# write centers to file
write_centers(world.world_model.centers, out_dir + experiment + '-bg.csv')
world.advance()
world.time = 0
with open(out_dir + experiment + '-simulation.csv', 'w') as out_f:
out_f.write('exp,nbots,composition,pid,tick,active,state,x_pos,y_pos,velocity,angle,bg_val,' +
'prob_explore,strategy,goal_x,goal_y\n')
# Write initial states
for i in range(len(models)):
p = world.players[i]
write(i, p, models[i], 0, out_f, ['', ''], experiment)
# simulate ticks
for tick in range(1, world.game_length):
simulate_tick(tick, models, world, out_f, experiment)
write_final(experiment, models)
def simulate_tick(tick, models, world, out_file, experiment):
models_copy = copy.deepcopy(models)
goals = [['',''] for i in range(len(models))]
for i in range(len(models)):
pos, bg_val, others, time = world.get_obs(i)
models[i].observe(pos, bg_val, time)
goals[i], slow = models[i].act(world.players[i], models_copy)
world.advance()
for i in range(len(models)):
write(i, world.players[i], models[i], tick, out_file, goals[i], experiment)
if __name__ == '__main__':
p = Pool(exp_config.num_procs)
p.map(run_simulation, range(len(info['experiments'])))
|
|
import torch.nn as nn
import torch
import numpy as np
class Combinator(nn.Module):
"""
The vanilla combinator function g() that combines vertical and
lateral connections as explained in Pezeshki et al. (2016).
The weights are initialized as described in Eq. 17
and the g() is defined in Eq. 16.
"""
def __init__(self, n_channels, length, data_type='2d'):
super(Combinator, self).__init__()
if data_type == '2d':
zeros = torch.zeros(n_channels, length, length)
ones = torch.ones(n_channels, length, length)
elif data_type == '1d':
zeros = torch.zeros(n_channels, length)
ones = torch.ones(n_channels, length)
else:
raise ValueError
self.b0 = nn.Parameter(zeros)
self.w0z = nn.Parameter(ones)
self.w0u = nn.Parameter(zeros)
self.w0zu = nn.Parameter(ones)
self.b1 = nn.Parameter(zeros)
self.w1z = nn.Parameter(ones)
self.w1u = nn.Parameter(zeros)
self.w1zu = nn.Parameter(zeros)
self.wsig = nn.Parameter(ones)
def forward(self, z_tilde, ulplus1):
assert z_tilde.shape == ulplus1.shape
out = self.b0 + z_tilde.mul(self.w0z) + ulplus1.mul(self.w0u) \
+ z_tilde.mul(ulplus1.mul(self.w0zu)) \
+ self.wsig.mul(torch.sigmoid(self.b1 + z_tilde.mul(self.w1z)
+ ulplus1.mul(self.w1u)
+ z_tilde.mul(ulplus1.mul(self.w1zu))))
return out
class Combinator2d(Combinator):
def __init__(self, n_channels, length):
super(Combinator2d, self).__init__(n_channels, length, data_type='2d')
class Combinator1d(Combinator):
def __init__(self, n_channels, length):
super(Combinator1d, self).__init__(n_channels, length, data_type='1d')
def add_gaussian_noise(x, sd=0.3):
# We are only constructing a single random tensor that will be repeated
# for each of the datapoints in the batch. This "hack" significantly
# reduces speed during training.
np_vec = np.random.normal(0.0, sd, x[0].size())
noise = torch.Tensor(np_vec)
# Alternatively we could generate a fully random tensor like this:
# noise = torch.normal(0.0, 0.3, size=x.size())
if torch.cuda.is_available():
noise = noise.to(torch.device('cuda'))
# Construct the noise tensor
if len(x.shape) == 3: # Then we have 1D data
noise = noise.unsqueeze(0).repeat(x.size()[0], 1, 1)
elif len(x.shape) == 4: # Then we have 2D data
noise = noise.unsqueeze(0).repeat(x.size()[0], 1, 1, 1)
out = x + noise
return out
|
|
import cnn_rnn
import lasagne
import sample
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--tasks', nargs='+')
parser.add_argument('--labeling_rates', nargs='+', type=float)
parser.add_argument('--very_top_joint', dest='very_top_joint', action='store_true')
args = parser.parse_args()
TASKS = args.tasks
LABELING_RATES = args.labeling_rates
VERY_TOP_JOINT = args.very_top_joint
print('TASKS', TASKS)
print('LABELING_RATES', LABELING_RATES)
print('VERY_TOP_JOINT', VERY_TOP_JOINT)
# TASKS = ['pos', 'chunking', 'ner']
# TASKS = ['ner', 'pos']
# LABELING_RATES = [1.0, 1.0]
MIN_PERIODS = [4, 100]
EXITS = [False, False]
MAX_ITER = 1000000 # 20
USE_DEV = True
if __name__ == '__main__':
char_set, word_set = set(), set()
for task in TASKS:
t = __import__(task)
data_list = [t.TRAIN_DATA, t.DEV_DATA]
if hasattr(t, 'TEST_DATA'):
data_list.append(t.TEST_DATA)
char_index, _ = t.create_char_index(data_list)
for k, v in char_index.iteritems():
char_set.add(k)
word_index, _ = t.create_word_index(data_list)
for k, v in word_index.iteritems():
word_set.add(k)
char_index, char_cnt = {}, 0
for char in char_set:
char_index[char] = char_cnt
char_cnt += 1
word_index, word_cnt = {}, 0
for word in word_set:
word_index[word] = word_cnt
word_cnt += 1
models, eval_funcs = [], []
for i, task in enumerate(TASKS):
t = __import__(task)
wx, y, m = t.read_data(t.TRAIN_DATA, word_index)
if USE_DEV and hasattr(t, 'TEST_DATA'):
dev_wx, dev_y, dev_m = t.read_data(t.TEST_DATA, word_index)
wx, y, m = np.vstack((wx, dev_wx)), np.vstack((y, dev_y)), np.vstack((m, dev_m))
twx, ty, tm = t.read_data(t.DEV_DATA, word_index)
x, cm = t.read_char_data(t.TRAIN_DATA, char_index)
if USE_DEV and hasattr(t, 'TEST_DATA'):
dev_x, dev_cm = t.read_char_data(t.TEST_DATA, char_index)
x, cm = np.vstack((x, dev_x)), np.vstack((cm, dev_cm))
tx, tcm = t.read_char_data(t.DEV_DATA, char_index)
if task == 'ner':
list_prefix = t.read_list()
gaze = t.read_list_data(t.TRAIN_DATA, list_prefix)
tgaze = t.read_list_data(t.DEV_DATA, list_prefix)
if USE_DEV:
dev_gaze = t.read_list_data(t.TEST_DATA, list_prefix)
gaze = np.vstack((gaze, dev_gaze))
else:
gaze, tgaze = None, None
model = cnn_rnn.cnn_rnn(char_cnt, len(t.LABEL_INDEX), word_cnt)
model.min_epoch = MIN_PERIODS[i]
#### important: set top_joint to specify the joint training level ####
# model.top_joint = True
if VERY_TOP_JOINT:
model.very_top_joint = True
else:
model.top_joint = True
if LABELING_RATES[i] < 1.0:
ind = sample.create_sample_index(LABELING_RATES[i], x.shape[0])
x, y, m, wx, cm, gaze = sample.sample_arrays((x, y, m, wx, cm, gaze), ind)
model.add_data(x, y, m, wx, cm, gaze, tx, ty, tm, twx, tcm, tgaze)
model.build()
word2embedding = t.read_word2embedding()
model.set_embedding(word2embedding, word_index)
model.step_train_init()
models.append(model)
eval_funcs.append(t.evaluate)
prev_params = None
max_f1s = [0.0, 0.0, 0.0]
print "\t".join(['task', 'epoch', 'iter', 'max_f1', 'f1', 'prec', 'recall'])
iter = 0
while True:
for i in range(len(models)):
# for i in [0, 0, 1]: # resample
model = models[i]
if prev_params is not None and iter < MAX_ITER:
lasagne.layers.set_all_param_values(model.char_layer, prev_params)
if iter >= MAX_ITER and EXITS[i]:
py = None
else:
py = model.step_train()
if py is not None:
iter += 1
acc, f1, prec, recall = eval_funcs[i](py, model.ty, model.tm, full = True)
max_f1s[i] = max(max_f1s[i], f1)
print TASKS[i], model.epoch, model.iter, max_f1s[i], f1, prec, recall
if iter < MAX_ITER:
prev_params = lasagne.layers.get_all_param_values(model.char_layer)
|
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Donny You(youansheng@gmail.com)
import math
import numpy as np
import torch
from utils.helpers.det_helper import DetHelper
class YOLOTargetGenerator(object):
"""Compute prior boxes coordinates in center-offset form for each source feature map."""
def __init__(self, configer):
self.configer = configer
def __call__(self, feat_list, batch_gt_bboxes, batch_gt_labels, input_size):
batch_target_list = list()
batch_objmask_list = list()
batch_noobjmask_list = list()
for i, ori_anchors in enumerate(self.configer.get('gt', 'anchors_list')):
in_h, in_w = feat_list[i].size()[2:]
w_fm_stride, h_fm_stride = input_size[0] / in_w, input_size[1] / in_h
anchors = [(a_w / w_fm_stride, a_h / h_fm_stride) for a_w, a_h in ori_anchors]
batch_size = len(batch_gt_bboxes)
num_anchors = len(anchors)
obj_mask = torch.zeros(batch_size, num_anchors, in_h, in_w)
noobj_mask = torch.ones(batch_size, num_anchors, in_h, in_w)
tx = torch.zeros(batch_size, num_anchors, in_h, in_w)
ty = torch.zeros(batch_size, num_anchors, in_h, in_w)
tw = torch.zeros(batch_size, num_anchors, in_h, in_w)
th = torch.zeros(batch_size, num_anchors, in_h, in_w)
tconf = torch.zeros(batch_size, num_anchors, in_h, in_w)
tcls = torch.zeros(batch_size, num_anchors, in_h, in_w, self.configer.get('data', 'num_classes'))
for b in range(batch_size):
for t in range(batch_gt_bboxes[b].size(0)):
# Convert to position relative to box
gx = (batch_gt_bboxes[b][t, 0] + batch_gt_bboxes[b][t, 2]) / (2.0 * input_size[0]) * in_w
gy = (batch_gt_bboxes[b][t, 1] + batch_gt_bboxes[b][t, 3]) / (2.0 * input_size[1]) * in_h
gw = (batch_gt_bboxes[b][t, 2] - batch_gt_bboxes[b][t, 0]) / input_size[0] * in_w
gh = (batch_gt_bboxes[b][t, 3] - batch_gt_bboxes[b][t, 1]) /input_size[1] * in_h
if gw * gh == 0 or gx >= in_w or gy >= in_h:
continue
# Get grid box indices
gi = int(gx)
gj = int(gy)
# Get shape of gt box
gt_box = torch.FloatTensor(np.array([0, 0, gw, gh])).unsqueeze(0)
# Get shape of anchor box
anchor_shapes = torch.FloatTensor(np.concatenate((np.zeros((num_anchors, 2)),
np.array(anchors)), 1))
# Calculate iou between gt and anchor shapes
anch_ious = DetHelper.bbox_iou(gt_box, anchor_shapes)
# Where the overlap is larger than threshold set mask to zero (ignore)
noobj_mask[b, anch_ious[0] > self.configer.get('gt', 'iou_threshold')] = 0
# Find the best matching anchor box
best_n = torch.argmax(anch_ious, dim=1)
if anch_ious[0, best_n] < self.configer.get('gt', 'iou_threshold'):
continue
# Masks
obj_mask[b, best_n, gj, gi] = 1
# Coordinates
tx[b, best_n, gj, gi] = gx - gi
ty[b, best_n, gj, gi] = gy - gj
# Width and height
tw[b, best_n, gj, gi] = math.log(gw / anchors[best_n][0] + 1e-16)
th[b, best_n, gj, gi] = math.log(gh / anchors[best_n][1] + 1e-16)
# object
tconf[b, best_n, gj, gi] = 1
# One-hot encoding of label
tcls[b, best_n, gj, gi, int(batch_gt_labels[b][t])] = 1
obj_mask = obj_mask.view(batch_size, -1)
noobj_mask = noobj_mask.view(batch_size, -1)
tx = tx.view(batch_size, -1).unsqueeze(2)
ty = ty.view(batch_size, -1).unsqueeze(2)
tw = tw.view(batch_size, -1).unsqueeze(2)
th = th.view(batch_size, -1).unsqueeze(2)
tconf = tconf.view(batch_size, -1).unsqueeze(2)
tcls = tcls.view(batch_size, -1, self.configer.get('data', 'num_classes'))
target = torch.cat((tx, ty, tw, th, tconf, tcls), -1)
batch_target_list.append(target)
batch_objmask_list.append(obj_mask)
batch_noobjmask_list.append(noobj_mask)
batch_target = torch.cat(batch_target_list, 1)
batch_objmask = torch.cat(batch_objmask_list, 1)
batch_noobjmask = torch.cat(batch_noobjmask_list, 1)
return batch_target, batch_objmask, batch_noobjmask
|
|
"""Applies trained neural net in inference mode."""
import copy
import argparse
import numpy
from gewittergefahr.gg_utils import file_system_utils
from ml4tc.io import example_io
from ml4tc.io import prediction_io
from ml4tc.utils import satellite_utils
from ml4tc.machine_learning import neural_net
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
NUM_EXAMPLES_PER_BATCH = 32
KT_TO_METRES_PER_SECOND = 1.852 / 3.6
MODEL_FILE_ARG_NAME = 'input_model_file_name'
EXAMPLE_DIR_ARG_NAME = 'input_example_dir_name'
YEARS_ARG_NAME = 'years'
OUTPUT_DIR_ARG_NAME = 'output_dir_name'
MODEL_FILE_HELP_STRING = (
'Path to trained model. Will be read by `neural_net.read_model`.'
)
EXAMPLE_DIR_HELP_STRING = (
'Name of input directory, containing examples to predict. Files therein '
'will be found by `example_io.find_file` and read by '
'`example_io.read_file`.'
)
YEARS_HELP_STRING = 'Model will be applied to tropical cyclones in these years.'
OUTPUT_DIR_HELP_STRING = (
'Name of output directory. Predictions and targets will be written here by'
' `prediction_io.write_file`, to an exact location determined by '
'`prediction_io.find_file`.'
)
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + MODEL_FILE_ARG_NAME, type=str, required=True,
help=MODEL_FILE_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + EXAMPLE_DIR_ARG_NAME, type=str, required=True,
help=EXAMPLE_DIR_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + YEARS_ARG_NAME, type=int, nargs='+', required=True,
help=YEARS_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,
help=OUTPUT_DIR_HELP_STRING
)
def _run(model_file_name, example_dir_name, years, output_dir_name):
"""Applies trained neural net in inference mode.
This is effectively the main method.
:param model_file_name: See documentation at top of file.
:param example_dir_name: Same.
:param years: Same.
:param output_dir_name: Same.
"""
file_system_utils.mkdir_recursive_if_necessary(
directory_name=output_dir_name
)
print('Reading model from: "{0:s}"...'.format(model_file_name))
model_object = neural_net.read_model(model_file_name)
metafile_name = neural_net.find_metafile(
model_file_name=model_file_name, raise_error_if_missing=True
)
print('Reading metadata from: "{0:s}"...'.format(metafile_name))
metadata_dict = neural_net.read_metafile(metafile_name)
validation_option_dict = metadata_dict[neural_net.VALIDATION_OPTIONS_KEY]
cyclone_id_string_by_file = example_io.find_cyclones(
directory_name=example_dir_name, raise_error_if_all_missing=True
)
cyclone_year_by_file = numpy.array([
satellite_utils.parse_cyclone_id(c)[0]
for c in cyclone_id_string_by_file
], dtype=int)
good_flags = numpy.array(
[c in years for c in cyclone_year_by_file], dtype=float
)
good_indices = numpy.where(good_flags)[0]
cyclone_id_string_by_file = [
cyclone_id_string_by_file[k] for k in good_indices
]
cyclone_id_string_by_file.sort()
example_file_names = [
example_io.find_file(
directory_name=example_dir_name, cyclone_id_string=c,
prefer_zipped=False, allow_other_format=True,
raise_error_if_missing=True
)
for c in cyclone_id_string_by_file
]
target_classes = numpy.array([], dtype=int)
forecast_prob_matrix = None
cyclone_id_string_by_example = []
init_times_unix_sec = numpy.array([], dtype=int)
storm_latitudes_deg_n = numpy.array([], dtype=float)
storm_longitudes_deg_e = numpy.array([], dtype=float)
for i in range(len(example_file_names)):
this_option_dict = copy.deepcopy(validation_option_dict)
this_option_dict[neural_net.EXAMPLE_FILE_KEY] = example_file_names[i]
this_data_dict = neural_net.create_inputs(this_option_dict)
if this_data_dict[neural_net.TARGET_ARRAY_KEY].size == 0:
continue
if len(this_data_dict[neural_net.TARGET_ARRAY_KEY].shape) == 1:
these_target_classes = (
this_data_dict[neural_net.TARGET_ARRAY_KEY] + 0
)
else:
these_target_classes = numpy.argmax(
this_data_dict[neural_net.TARGET_ARRAY_KEY], axis=1
)
these_predictor_matrices = [
m for m in this_data_dict[neural_net.PREDICTOR_MATRICES_KEY]
if m is not None
]
this_prob_array = neural_net.apply_model(
model_object=model_object,
predictor_matrices=these_predictor_matrices,
num_examples_per_batch=NUM_EXAMPLES_PER_BATCH, verbose=True
)
if len(this_prob_array.shape) == 1:
this_prob_array = numpy.reshape(
this_prob_array, (len(this_prob_array), 1)
)
this_prob_matrix = numpy.concatenate(
(1. - this_prob_array, this_prob_array), axis=1
)
elif this_prob_array.shape[1] == 1:
this_prob_matrix = numpy.concatenate(
(1. - this_prob_array, this_prob_array), axis=1
)
else:
this_prob_matrix = this_prob_array + 0.
target_classes = numpy.concatenate(
(target_classes, these_target_classes), axis=0
)
cyclone_id_string_by_example += (
[cyclone_id_string_by_file[i]] *
len(this_data_dict[neural_net.INIT_TIMES_KEY])
)
init_times_unix_sec = numpy.concatenate(
(init_times_unix_sec, this_data_dict[neural_net.INIT_TIMES_KEY]),
axis=0
)
storm_latitudes_deg_n = numpy.concatenate((
storm_latitudes_deg_n,
this_data_dict[neural_net.STORM_LATITUDES_KEY]
), axis=0)
storm_longitudes_deg_e = numpy.concatenate((
storm_longitudes_deg_e,
this_data_dict[neural_net.STORM_LONGITUDES_KEY]
), axis=0)
if forecast_prob_matrix is None:
forecast_prob_matrix = this_prob_matrix + 0.
else:
forecast_prob_matrix = numpy.concatenate(
(forecast_prob_matrix, this_prob_matrix), axis=0
)
print(SEPARATOR_STRING)
output_file_name = prediction_io.find_file(
directory_name=output_dir_name, raise_error_if_missing=False
)
print('Writing predictions and target values to: "{0:s}"...'.format(
output_file_name
))
prediction_io.write_file(
netcdf_file_name=output_file_name,
forecast_probability_matrix=forecast_prob_matrix,
target_classes=target_classes,
cyclone_id_strings=cyclone_id_string_by_example,
init_times_unix_sec=init_times_unix_sec,
storm_latitudes_deg_n=storm_latitudes_deg_n,
storm_longitudes_deg_e=storm_longitudes_deg_e,
model_file_name=model_file_name
)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
model_file_name=getattr(INPUT_ARG_OBJECT, MODEL_FILE_ARG_NAME),
example_dir_name=getattr(INPUT_ARG_OBJECT, EXAMPLE_DIR_ARG_NAME),
years=numpy.array(getattr(INPUT_ARG_OBJECT, YEARS_ARG_NAME), dtype=int),
output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)
)
|
|
# encoding: utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from xmuda.models.LMSCNet import SegmentationHead
from xmuda.models.context_prior import ContextPrior3D
from xmuda.models.context_prior_v2 import ContextPrior3Dv2
from xmuda.models.CP_baseline import CPBaseline
from xmuda.models.CP_implicit import CPImplicit
from xmuda.models.CP_implicit_pairwise import CPImplicitPairwise
#from xmuda.models.CP_implicit_pairwise_v2 import CPImplicitPairwise
from xmuda.models.CP_implicit_leftnonempty import CPImplicitV2
from xmuda.models.DDR import Bottleneck3D
from functools import partial
from collections import OrderedDict
class Decoder3D(nn.Module):
def __init__(self, class_num, norm_layer,
non_empty_ratio=0.2,
max_k=256,
context_prior=None,
output_resolutions=['1_4'],
in_channels={'1_16': 256, '1_8': 128, '1_4': 128},
CP_res="1_16",
feature=128,
bn_momentum=0.1):
super(Decoder3D, self).__init__()
self.business_layer = []
self.CP_res = CP_res
self.output_resolutions = output_resolutions
self.in_channels = in_channels
self.feature = feature
self.resize_input_1_4 = nn.Conv3d(256 + 3, 128, kernel_size=1)
self.resize_input_1_8 = nn.Conv3d(512 + 3, 128, kernel_size=1)
self.resize_input_1_16 = nn.Conv3d(1024 + 3, 256, kernel_size=1)
self.pooling = nn.AvgPool3d(kernel_size=3, padding=1, stride=1)
self.down_1_4_1_8 = Bottleneck3D(feature,
feature // 4,
bn_momentum=bn_momentum,
expansion=4, stride=2,
downsample=nn.Sequential(
nn.AvgPool3d(kernel_size=2, stride=2),
nn.Conv3d(feature, feature, kernel_size=1, stride=1, bias=False),
norm_layer(feature, momentum=bn_momentum),
),
norm_layer=norm_layer)
self.main_1_8 = nn.Sequential(
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[1, 1, 1]),
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[2, 2, 2]),
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[3, 3, 3]),
)
self.down_1_8_1_16 = Bottleneck3D(feature,
feature // 4,
bn_momentum=bn_momentum,
expansion=8, stride=2,
downsample=nn.Sequential(
nn.AvgPool3d(kernel_size=2, stride=2),
nn.Conv3d(feature, feature * 2, kernel_size=1, stride=1, bias=False),
norm_layer(feature * 2, momentum=bn_momentum),),
norm_layer=norm_layer)
self.main_1_16 = nn.Sequential(
Bottleneck3D(feature * 2, feature // 2, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[1, 1, 1]),
Bottleneck3D(feature * 2, feature // 2, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[2, 2, 2]),
Bottleneck3D(feature * 2, feature // 2, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[3, 3, 3]),
)
self.up_1_16_1_8 = nn.Sequential(
nn.ConvTranspose3d(feature * 2, feature, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1),
norm_layer(feature, momentum=bn_momentum),
nn.ReLU(inplace=False)
)
self.up_1_8_1_4 = nn.Sequential(
nn.ConvTranspose3d(feature, feature, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1),
norm_layer(feature, momentum=bn_momentum),
nn.ReLU(inplace=False)
)
self.ssc_head_1_4 = nn.Sequential(
nn.Dropout3d(.1),
SegmentationHead(feature, feature, class_num, [1, 2, 3])
)
if '1_8' in self.output_resolutions:
self.ssc_head_1_8 = nn.Sequential(
nn.Dropout3d(.1),
nn.Conv3d(feature, class_num, kernel_size=1, bias=True)
)
if '1_16' in self.output_resolutions:
self.ssc_head_1_16 = nn.Sequential(
nn.Dropout3d(.1),
nn.Conv3d(feature * 2, class_num, kernel_size=1, bias=True)
)
self.enc_1_8 = nn.Sequential(
Bottleneck3D(in_channels['1_8'], in_channels['1_8'] // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[1, 1, 1]),
Bottleneck3D(in_channels['1_8'], in_channels['1_8'] // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[2, 2, 2]),
Bottleneck3D(in_channels['1_8'], in_channels['1_8'] // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[3, 3, 3]),
)
self.enc_1_16 = nn.Sequential(
Bottleneck3D(in_channels['1_16'], in_channels['1_16'] // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[1, 1, 1]),
Bottleneck3D(in_channels['1_16'], in_channels['1_16'] // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[2, 2, 2]),
Bottleneck3D(in_channels['1_16'], in_channels['1_16'] // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[3, 3, 3]),
)
self.resize_1_8 = nn.Conv3d(feature + in_channels['1_8'], feature, kernel_size=1)
self.resize_1_8_up = nn.Conv3d(feature * 2, feature, kernel_size=1)
self.resize_1_16 = nn.Conv3d(feature * 2 + in_channels['1_16'], feature * 2, kernel_size=1)
self.resize_1_4_up = nn.Conv3d(feature * 2, feature, kernel_size=1)
self.context_prior = context_prior
if context_prior == "CRCP":
# self.CRCP_layer = ContextPrior3D(feature * 2, (15, 9, 15), norm_layer, class_num, bn_momentum)
self.CP_implicit_pairwise = CPImplicitPairwise(feature * 2, (15, 9, 15),
non_empty_ratio=non_empty_ratio,
max_k=max_k,
n_classes=class_num,
bn_momentum=bn_momentum)
# self.CRCP_layer = ContextPrior3D(feature, (30, 18, 30), norm_layer, class_num, bn_momentum)
elif context_prior == "CPImplicit":
if self.CP_res == "1_16":
self.CPImplicit_layer = CPImplicit(feature * 2, (15, 9, 15),
non_empty_ratio=non_empty_ratio,
max_k=max_k)
# self.CPImplicit_layer = CPImplicitV2(feature * 2, (15, 9, 15))
elif self.CP_res == "1_8":
self.CPImplicit_layer = CPImplicit(feature, (30, 18, 30))
elif context_prior == 'CP':
self.CP_layer = CPBaseline(feature * 2, (15, 9, 15), norm_layer, bn_momentum)
# self.CP_layer = CPBaseline(feature, (30, 18, 30), norm_layer, bn_momentum)
# self.resize_1_16_transformer = nn.Conv3d(feature * 4, feature * 2, kernel_size=1)
# transformer_encoder_layer = nn.TransformerEncoderLayer(d_model=256, nhead=8)
# self.transformer_encoder = nn.TransformerEncoder(transformer_encoder_layer, num_layers=6)
# self.positional_encodings = nn.Parameter(torch.rand(15 * 9 * 15, 256), requires_grad=True)
def forward(self, input_dict):
x3d_input_1_4 = self.resize_input_1_4(input_dict['x3d_1_4'])
x3d_input_1_8 = self.resize_input_1_8(input_dict['x3d_1_8'])
x3d_input_1_16 = self.resize_input_1_16(input_dict['x3d_1_16'])
res = {}
x3d_1_8 = self.down_1_4_1_8(x3d_input_1_4)
x3d_input_1_8 = self.enc_1_8(x3d_input_1_8)
x3d_1_8 = torch.cat([x3d_1_8, x3d_input_1_8], dim=1)
x3d_1_8 = self.resize_1_8(x3d_1_8)
x3d_1_8 = self.main_1_8(x3d_1_8)
x3d_1_16 = self.down_1_8_1_16(x3d_1_8)
x3d_input_1_16 = self.enc_1_16(x3d_input_1_16)
x3d_1_16 = torch.cat([x3d_1_16, x3d_input_1_16], dim=1)
x3d_1_16 = self.resize_1_16(x3d_1_16)
x3d_1_16 = self.main_1_16(x3d_1_16)
if self.context_prior == 'CP':
masks_1_16 = input_dict['masks_1_16']
x3d_1_16, P = self.CP_layer(x3d_1_16, masks_1_16)
res['P'] = P
if self.context_prior == 'CPImplicit':
if self.CP_res == "1_16":
masks_1_16 = input_dict['masks_1_16']
ret = self.CPImplicit_layer(x3d_1_16, masks_1_16)
x3d_1_16 = ret['x']
for k in ret.keys():
res[k] = ret[k]
# res["P_logits"] = ret['P_logits']
# res["topk_indices"] = ret['topk_indices']
# res["non_empty_logits"] = ret['non_empty_logits']
# res["topM_indices"] = ret['topM_indices']
if self.context_prior == "CRCP":
masks_1_16 = input_dict['masks_1_16']
# x3d_1_16, P_logit = self.CRCP_layer(x3d_1_16, masks_1_16)
ret= self.CP_implicit_pairwise(x3d_1_16, masks_1_16)
x3d_1_16 = ret['x']
for k in ret.keys():
res[k] = ret[k]
# res['P_logits'] = ret['P_logits']
# res["topk_indices"] = ret['topk_indices']
if self.context_prior == "RP":
RP_map_context_1_16 = input_dict['map_context_1_16']
RP_map_P_1_16 = input_dict['map_P_1_16']
x3d_1_16, P_logit = self.RP_layer(x3d_1_16, masks_1_16, RP_map_context_1_16, RP_map_P_1_16)
res['P_logit'] = P_logit
# embedding_1_16 = x3d_1_16.reshape(x3d_1_16.shape[0], x3d_1_16.shape[1], -1) + self.positional_encodings.T.unsqueeze(0)
# embedding_1_16 = embedding_1_16.permute(2, 0, 1)
# embedding_1_16 = self.transformer_encoder(embedding_1_16)
# bs, c, h, w, d = x3d_1_16.shape
# y = torch.matmul(x3d_1_16.reshape(bs, c, -1).permute(0, 2, 1), embedding_1_16[:256, :, :].permute(0, 2, 1))
# x3d_1_16 = y.permute(0, 2, 1).view(bs, -1, h, w, d)
# embedding_1_16 = embedding_1_16.permute(1, 2, 0).reshape(x3d_1_16.shape)
# x3d_1_16 = torch.cat([x3d_1_16, embedding_1_16], dim=1)
# x3d_1_16 = self.resize_1_16_transformer(x3d_1_16)
if '1_16' in self.output_resolutions:
ssc_logit_1_16 = self.ssc_head_1_16(x3d_1_16)
res["1_16"] = ssc_logit_1_16
x3d_up_1_8 = self.up_1_16_1_8(x3d_1_16)
# x3d_up_1_8 = x3d_up_1_8 + x3d_1_8
x3d_up_1_8 = torch.cat([x3d_up_1_8, x3d_1_8], dim=1)
x3d_up_1_8 = self.resize_1_8_up(x3d_up_1_8)
if self.context_prior == 'CPImplicit':
if self.CP_res == "1_8":
masks_1_8 = input_dict['masks_1_8']
ret = self.CPImplicit_layer(x3d_up_1_8, masks_1_8)
x3d_up_1_8 = ret['x']
res["P_logits"] = ret['P_logits']
res["topk_indices"] = ret['topk_indices']
res["non_empty_logits"] = ret['non_empty_logits']
if '1_8' in self.output_resolutions:
ssc_logit_1_8 = self.ssc_head_1_8(x3d_up_1_8)
res["1_8"] = ssc_logit_1_8
# if self.context_prior == 'CP':
# masks_1_8 = input_dict['masks_1_8']
# x3d_up_1_8, P = self.CP_layer(x3d_up_1_8, masks_1_8)
# res['P'] = P
#
# if self.context_prior == "CRCP":
# masks_1_8 = input_dict['masks_1_8']
# x3d_up_1_8, P_logit = self.CRCP_layer(x3d_up_1_8, masks_1_8)
# res['P_logit'] = P_logit
# if self.context_prior == "RP":
# RP_map_context_1_16 = input_dict['map_context_1_16']
# RP_map_P_1_16 = input_dict['map_P_1_16']
# x3d_1_16, P_logit = self.RP_layer(x3d_1_16, masks_1_16, RP_map_context_1_16, RP_map_P_1_16)
# res['P_logit'] = P_logit
x3d_up_1_4 = self.up_1_8_1_4(x3d_up_1_8)
x3d_up_1_4 = torch.cat([x3d_up_1_4, x3d_input_1_4], dim=1)
x3d_up_1_4 = self.resize_1_4_up(x3d_up_1_4)
ssc_logit_1_4 = self.ssc_head_1_4(x3d_up_1_4)
res['1_4'] = ssc_logit_1_4
return res
if __name__ == '__main__':
model = Network(class_num=12, norm_layer=nn.BatchNorm3d, feature=128, eval=True)
# print(model)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
model.eval()
left = torch.rand(1, 3, 480, 640).cuda()
right = torch.rand(1, 3, 480, 640).cuda()
depth_mapping_3d = torch.from_numpy(np.ones((1, 129600)).astype(np.int64)).long().cuda()
tsdf = torch.rand(1, 1, 60, 36, 60).cuda()
out = model(left, depth_mapping_3d, tsdf, None)
|
|
import comet_ml
import tensorflow as tf
print(f'Using tensorflow version: {tf.version.VERSION}')
import keras
import keras.backend as K
from keras.layers import Dense, Dropout
from keras.metrics import TrueNegatives, TruePositives, FalseNegatives, FalsePositives
import pandas as pd
import numpy as np
from typing import List, Dict, Union
import warnings
from sklearn.metrics import confusion_matrix
VALID_CLASSIFICATION_LOSSES = ['BinaryCrossentropy', 'CategoricalCrossentropy', 'SparseCategoricalCrossentropy',
'Poisson', 'KLDivergence', 'Hinge', 'SquaredHinge', 'CategoricalHinge']
VALID_INITIALIZERS = ['Zeros', 'Ones', 'Identity', 'Orthogonal', 'Constant', 'VarianceScaling',
'TruncatedNormal', 'RandomNormal', 'GlorotNormal', 'RandomUniform', 'GlorotUniform']
VALID_OPTIMIZERS = ['Adam', 'Adadelta', 'Adamax', 'SGD', 'Ftrl', 'Nadam', 'RMSprop', 'Adagrad']
# ____________________________________________________________________________________________________________________________________
def check_name(input_name: str, compare_to_name: str) -> bool:
if input_name.lower() == compare_to_name.lower():
return True
# ____________________________________________________________________________________________________________________________________
# ____________________________________________________________________________________________________________________________________
class DL:
callbacks = list()
fit_metadata = dict()
def __init__(self,
split_sets: Dict[str, Union[np.ndarray]], # , np.ndarray]],
split_sets_metadata: Dict,
model_type: str = 'sequential',
layers_list: List[Dict] = None,
hyper_params: Dict[str, Union[int, float, str]] = None,
hyper_method_names: Dict[str, str] = None,
early_stopping_flag: bool = True,
comet_experiment: comet_ml.BaseExperiment = None,
trained_models_dir= None,
model_id = None
):
# mutable defaults handling
if layers_list is None:
layers_list = [{'units': 32, 'type': 'dense', 'activation': 'relu'},
{'units': 64, 'type': 'dense', 'activation': 'relu'}]
# CometML Experiment
self.comet_experiment = comet_experiment
# training and testing sets and their metadata
self.split_sets = split_sets
self.split_sets_metadata = split_sets_metadata
# hyper parameters and methods
self.hyper_params = hyper_params
self.hyper_method_names = hyper_method_names
self.hyper_methods = self.get_initialized_hyper_methods(hyper_method_names)
# callbacks
self.set_early_stopping(early_stopping_flag)
# make model
self.model = self.get_initialized_model(model_type)
self.define_layers(layers_list);
self.layers_list = layers_list # keep just in case
self.compile_model()
# test_scores
self.test_scores = dict()
self.trained_models_dir = trained_models_dir
self.model_id = model_id
# ____________________________________________________________________________________________________________________________________
def get_initialized_hyper_methods(self, hyper_method_names: Dict[str, str]) -> [str, keras]:
hyper_methods = dict(
loss = self.get_loss(hyper_method_names['loss']),
initializer = self.get_initializer(hyper_method_names['initializer']),
optimizer = self.get_optimizer(hyper_method_names['optimizer'])
)
return hyper_methods
# ____________________________________________________________________________________________________________________________________
@classmethod
def get_initialized_model(cls, model_type) -> keras.models:
if check_name(model_type, 'sequential'): return keras.models.Sequential()
raise ValueError(f'Unknown model type! \nGot {model_type} as model_type.')
# ____________________________________________________________________________________________________________________________________
def define_layers(self, layers_list: List) -> None:
first_layer_flag = True
for layer_dict in layers_list:
# first layer
if first_layer_flag:
first_layer_flag = False
if check_name(layer_dict['type'], 'Dense'):
self.model.add(Dense(units = layer_dict['units'],
input_shape = (self.split_sets['X_train'].shape[1],),
activation = layer_dict['activation'],
kernel_initializer = self.hyper_methods['initializer'],
kernel_regularizer = self.hyper_method_names['regularizer']))
if check_name(layer_dict['type'], 'Dropout'):
raise ValueError('First layer should not be a dropout layer!')
# after first layer
else:
if check_name(layer_dict['type'], 'Dense'):
self.model.add(Dense(units = layer_dict['units'],
activation = layer_dict['activation'],
kernel_initializer = self.hyper_methods['initializer'],
kernel_regularizer = self.hyper_method_names['regularizer']))
if check_name(layer_dict['type'], 'Dropout'):
self.model.add(Dropout(rate = layer_dict['rate']))
# ____________________________________________________________________________________________________________________________________
def compile_model(self) -> None:
self.model.compile(loss = self.hyper_methods['loss'],
optimizer = self.hyper_methods['optimizer'],
# metrics = ["accuracy", self.f1, self.precision, self.recall],
metrics = ["accuracy",
# self.true_positives, self.true_negatives, self.false_positives, self.false_negatives,
# self.f1_macro, self.f1_1, self.f1_0,
# self.precision, self.recall, self.specificity, self.npv,
# tf.keras.metrics.AUC(curve="ROC", name = 'ROC_AUC'), tf.keras.metrics.AUC(curve="PR", name = 'PR_AUC')
])
print(self.model.summary())
# ____________________________________________________________________________________________________________________________________
def fit_model(self) -> None:
if self.comet_experiment is not None:
with self.comet_experiment.train():
fit_metadata_tracker = self.model.fit(x = self.split_sets['X_train'],
y = self.split_sets['y_train'],
epochs = self.hyper_params['epochs'],
batch_size = self.hyper_params['batch_size'],
validation_split = self.hyper_params['validation_split'],
callbacks = self.callbacks,
verbose = 10,
class_weight = self.split_sets_metadata['class_weight'],
workers = -1
)
else:
fit_metadata_tracker = self.model.fit(x = self.split_sets['X_train'],
y = self.split_sets['y_train'],
epochs = self.hyper_params['epochs'],
batch_size = self.hyper_params['batch_size'],
validation_split = self.hyper_params['validation_split'],
callbacks = self.callbacks,
verbose = 10,
class_weight = self.split_sets_metadata['class_weight'],
workers = -1
)
self.set_fit_metadata(fit_metadata_tracker)
self.model.save(f'{self.trained_models_dir.path}\\{self.model_id}.h5')
# ____________________________________________________________________________________________________________________________________
def set_fit_metadata(self, fit_tracker) -> None:
self.fit_metadata['n_epochs'] = len(fit_tracker.history['loss'])
# ____________________________________________________________________________________________________________________________________
def get_fit_num_epochs(self) -> int:
return self.fit_metadata['n_epochs']
# ____________________________________________________________________________________________________________________________________
def test_model(self) -> Dict[str, float]:
if self.comet_experiment is not None:
with self.comet_experiment.test():
# loss, tp, tn, fp, fn, accuracy, f1_macro, f1_1, f1_0, precision, recall, specificity, npv, roc_auc, pr_auc = \
loss, accuracy = self.model.evaluate(self.split_sets['X_test'], self.split_sets['y_test'], verbose = 10,
batch_size = 1000000, workers = -1)
else:
# loss, tp, tn, fp, fn, accuracy, f1_macro, f1_1, f1_0, precision, recall, specificity, npv, roc_auc, pr_auc = \
loss, accuracy = self.model.evaluate(self.split_sets['X_test'], self.split_sets['y_test'], verbose = 10,
batch_size = 1000000, workers = -1)
self.test_scores['loss'] = loss
self.test_scores['accuracy'] = accuracy
# self.test_scores['f1_macro'] = f1_macro
# self.test_scores['f1_1'] = f1_1
# self.test_scores['f1_0'] = f1_0
# self.test_scores['precision'] = precision
# self.test_scores['recall'] = recall
# self.test_scores['specificity'] = specificity
# self.test_scores['npv'] = npv
# self.test_scores['roc_auc'] = roc_auc
# self.test_scores['pr_auc'] = pr_auc
# self.test_scores['tp'] = tp
# self.test_scores['tn'] = tn
# self.test_scores['fp'] = fp
# self.test_scores['fn'] = fn
# if self.comet_experiment is not None:
#
# # last logs and ending experiment
# self.comet_experiment.log_metrics(dict(logged_loss = loss,
# logged_accuracy = accuracy,
# logged_f1_macro = f1_macro,
# logged_f1_1 = f1_1,
# logged_f1_0 = f1_0,
# logged_precision = precision,
# logged_recall = recall,
# logged_specificity = specificity,
# logged_npv = npv,
# logged_roc_auc = roc_auc,
# logged_pr_auc = pr_auc,
# logged_tp = tp,
# logged_tn = tn,
# logged_fp = fp,
# logged_fn = fn,
# logged_balance_train = self.split_sets_metadata['class_weight'][1],
# logged_balance_test = self.split_sets_metadata['class_balance_test'][1],
# ))
#
# self.comet_experiment.log_parameters({**self.hyper_params,
# **self.hyper_methods,
# 'architecture': self.layers_list})
# self.comet_experiment.end()
return self.test_scores
# ____________________________________________________________________________________________________________________________________
def predict(self, return_metrics: bool = False):
if return_metrics:
return self.metrics_report(y_pred = self.model.predict_classes(self.split_sets['X_test']),
y_test = self.split_sets['y_test'])
else:
return self.model.predict_classes(self.split_sets['X_test'])
# ____________________________________________________________________________________________________________________________________
def get_optimizer(self, optimizer_name: str) -> keras.optimizers:
if check_name(optimizer_name, 'Adam'): return keras.optimizers.Adam(
learning_rate = self.hyper_params['learning_rate'],
decay = self.hyper_params['decay'])
if check_name(optimizer_name, 'Adadelta'): return keras.optimizers.Adadelta(
learning_rate = self.hyper_params['learning_rate'])
if check_name(optimizer_name, 'Adamax'): return keras.optimizers.Adamax(
learning_rate = self.hyper_params['learning_rate'])
if check_name(optimizer_name, 'SGD'): return keras.optimizers.SGD(
learning_rate = self.hyper_params['learning_rate'])
if check_name(optimizer_name, 'Ftrl'): return keras.optimizers.Ftrl(
learning_rate = self.hyper_params['learning_rate'])
if check_name(optimizer_name, 'Nadam'): return keras.optimizers.Nadam(
learning_rate = self.hyper_params['learning_rate'])
if check_name(optimizer_name, 'RMSprop'): return keras.optimizers.RMSprop(
learning_rate = self.hyper_params['learning_rate'])
if check_name(optimizer_name, 'Adagrad'): return keras.optimizers.Adagrad(
learning_rate = self.hyper_params['learning_rate'])
raise ValueError('Unknown optimizer!')
# ____________________________________________________________________________________________________________________________________
@classmethod
def get_initializer(cls, initializer_name: str, initializer_kwargs: dict = None) -> keras.initializers:
if initializer_kwargs is None:
if check_name(initializer_name, 'Zeros'): return keras.initializers.Zeros()
if check_name(initializer_name, 'Ones'): return keras.initializers.Ones()
if check_name(initializer_name, 'Identity'): return keras.initializers.Identity()
if check_name(initializer_name, 'Orthogonal'): return keras.initializers.Orthogonal()
if check_name(initializer_name, 'Constant'): return keras.initializers.Constant()
if check_name(initializer_name, 'TruncatedNormal'): return keras.initializers.TruncatedNormal()
if check_name(initializer_name, 'RandomNormal'): return keras.initializers.RandomNormal()
if check_name(initializer_name, 'GlorotNormal'): return keras.initializers.GlorotNormal()
if check_name(initializer_name, 'RandomUniform'): return keras.initializers.RandomUniform()
if check_name(initializer_name, 'GlorotUniform'): return keras.initializers.GlorotUniform()
# if activtion is relu, as recommended by andrew yang; justification and research paper yet to be read
if check_name(initializer_name, 'VarianceScaling'): return keras.initializers.VarianceScaling(scale = 2.0)
raise ValueError('Unknown initializer!')
# ____________________________________________________________________________________________________________________________________
@classmethod
def get_loss(cls, loss_name: str) -> keras.losses:
# regression losses
if check_name(loss_name, 'Huber'): return keras.losses.Huber()
if check_name(loss_name, 'LogCosh'): return keras.losses.LogCosh()
if check_name(loss_name, 'Reduction'): return keras.losses.Reduction()
if check_name(loss_name, 'CosineSimilarity'): return keras.losses.CosineSimilarity()
if check_name(loss_name, 'MeanSquaredError'): return keras.losses.MeanSquaredError()
if check_name(loss_name, 'MeanAbsoluteError'): return keras.losses.MeanAbsoluteError()
if check_name(loss_name, 'MeanSquaredLogarithmicError'): return keras.losses.MeanSquaredLogarithmicError()
if check_name(loss_name, 'MeanAbsolutePercentageError'): return keras.losses.MeanAbsolutePercentageError()
# probabilistic classification losses
if check_name(loss_name, 'Poisson'): return keras.losses.Poisson()
if check_name(loss_name, 'KLDivergence'): return keras.losses.KLDivergence()
if check_name(loss_name, 'BinaryCrossentropy'): return keras.losses.BinaryCrossentropy()
if check_name(loss_name, 'CategoricalCrossentropy'): return keras.losses.CategoricalCrossentropy()
if check_name(loss_name, 'SparseCategoricalCrossentropy'): return keras.losses.SparseCategoricalCrossentropy()
# max margin classification losses
if check_name(loss_name, 'Hinge'): return keras.losses.Hinge()
if check_name(loss_name, 'SquaredHinge'): return keras.losses.SquaredHinge()
if check_name(loss_name, 'CategoricalHinge'): return keras.losses.CategoricalHinge()
raise ValueError('Unknown loss!')
# ____________________________________________________________________________________________________________________________________
def set_early_stopping(self, early_stopping_flag: bool) -> None:
if early_stopping_flag:
self.callbacks.append(
keras.callbacks.EarlyStopping(monitor = 'val_loss',
mode = 'min',
verbose = 10,
patience = self.hyper_params['patience'])
)
# ____________________________________________________________________________________________________________________________________
@classmethod
def metrics_report(cls, y_test: np.ndarray, y_pred: np.ndarray):
y_test = np.ravel(y_test)
y_pred = np.ravel(y_pred)
true_positives = np.sum(y_test * y_pred)
false_positives = np.sum(np.abs(y_test - 1) * y_pred)
true_negatives = np.sum((y_test - 1) * (y_pred - 1))
false_negatives = np.sum(y_test * np.abs(y_pred - 1))
accuracy = round(
(true_positives + true_negatives) / (true_positives + true_negatives + false_positives + false_negatives),
4)
precision = round(true_positives / (true_positives + false_positives), 4)
recall = round(true_positives / (true_positives + false_negatives), 4)
specificity = round(true_negatives / (true_negatives + false_positives), 4)
npv = round(true_negatives / (true_negatives + false_negatives), 4)
f1_1 = round(2 * (precision * recall) / (precision + recall), 4)
f1_0 = round(2 * (specificity * npv) / (specificity + npv), 4)
f1_macro = round((f1_1 + f1_0) / 2, 4)
return dict(
Accuracy = accuracy, f1_macro = f1_macro,
f1_1 = f1_1, f1_0 = f1_0,
Precision = precision, Recall = recall,
Specificity = specificity, npv = npv,
TP = int(true_positives), FP = int(false_positives), FN = int(false_negatives),
TN = int(true_negatives),
y_test_shape = y_test.shape,
y_pred_shape = y_pred.shape,
total_samples = true_negatives + true_positives + false_positives + false_negatives,
)
# ____________________________________________________________________________________________________________________________________
@classmethod
def true_positives(cls, y_test, y_pred):
return K.sum(K.round(K.clip(y_test * y_pred, 0, 1)))
@classmethod
def false_positives(cls, y_test, y_pred):
return K.sum(K.abs(y_test - 1) * y_pred)
@classmethod
def false_negatives(cls, y_test, y_pred):
return K.sum(y_test * K.abs(y_pred - 1))
@classmethod
def true_negatives(cls, y_test, y_pred):
return K.sum((y_test - 1) * (y_pred - 1))
# ____________________________________________________________________________________________________________________________________
@classmethod
def precision(cls, y_test, y_pred):
true_positives = K.sum(y_test * y_pred)
false_positives = K.sum(K.abs(y_test - 1) * y_pred)
return true_positives / (true_positives + false_positives + K.epsilon())
@classmethod
def recall(cls, y_test, y_pred):
true_positives = K.sum(y_test * y_pred)
false_negatives = K.sum(y_test * K.abs(y_pred - 1))
return true_positives / (true_positives + false_negatives + K.epsilon())
def f1_1(self, y_test, y_pred):
precision = self.precision(y_test, y_pred)
recall = self.recall(y_test, y_pred)
return 2 * (precision * recall) / (precision + recall + K.epsilon())
# ____________________________________________________________________________________________________________________________________
@classmethod
def specificity(cls, y_test, y_pred):
true_negatives = K.sum((y_test - 1) * (y_pred - 1))
false_positives = K.sum(K.abs(y_test - 1) * y_pred)
return true_negatives / (true_negatives + false_positives + K.epsilon())
@classmethod
def npv(cls, y_test, y_pred):
true_negatives = K.sum((y_test - 1) * (y_pred - 1))
false_negatives = K.sum(y_test * K.abs(y_pred - 1))
return true_negatives / (true_negatives + false_negatives + K.epsilon())
def f1_0(self, y_test, y_pred):
specificity = self.specificity(y_test, y_pred)
npv = self.npv(y_test, y_pred)
return 2 * (specificity * npv) / (specificity + npv + K.epsilon())
# ____________________________________________________________________________________________________________________________________
def f1_macro(self, y_test, y_pred):
f1_1 = self.f1_1(y_test, y_pred)
f1_0 = self.f1_0(y_test, y_pred)
return (f1_1 + f1_0) / 2
# ____________________________________________________________________________________________________________________________________
# ____________________________________________________________________________________________________________________________________
# ____________________________________________________________________________________________________________________________________
class BinaryClassificationDL(DL):
def __init__(self,
X_train: Union[np.ndarray, pd.DataFrame],
y_train: Union[np.ndarray, pd.DataFrame],
X_test: Union[np.ndarray, pd.DataFrame],
y_test: Union[np.ndarray, pd.DataFrame],
class_weight: Dict[int, float] = None,
epochs: int = 100,
batch_size: int = 100,
validation_split = 0.1,
learning_rate: float = 0.01,
decay = 1e-2,
loss_name: str = 'BinaryCrossentropy',
initializer_name: str = 'GlorotNormal',
optimizer_name = 'Adam',
regularizer: str = None,
early_stopping_flag: bool = True,
patience = 100,
layers_list: List[Dict] = None,
model_type: str = 'sequential',
comet_experiment: comet_ml.BaseExperiment = None,
trained_models_dir = None,
model_id: str = None
):
hyper_params = dict(
decay = decay,
epochs = epochs,
patience = patience,
batch_size = batch_size,
learning_rate = learning_rate,
validation_split = validation_split,
)
hyper_methods_names = dict(
loss = loss_name,
optimizer = optimizer_name,
initializer = initializer_name,
regularizer = regularizer,
)
split_sets = dict(
X_train = X_train,
y_train = y_train,
X_test = X_test,
y_test = y_test
)
split_sets = self.cast_to_numpy(split_sets)
split_sets_metadata = dict()
if class_weight is not None:
split_sets_metadata['class_weight'] = class_weight
else:
split_sets_metadata['class_weight'] = self.set_class_weight(y_train)
# split_sets_metadata['class_balance_test'] = self.set_class_weight(y_test)
self.binary_classification_assertions(hyper_methods_names = hyper_methods_names)
# parent constructor
super(BinaryClassificationDL, self).__init__(split_sets = split_sets,
split_sets_metadata = split_sets_metadata,
model_type = model_type,
layers_list = self.validate_binary_classification_layers(
layers_list),
hyper_params = hyper_params,
hyper_method_names = hyper_methods_names,
early_stopping_flag = early_stopping_flag,
comet_experiment = comet_experiment,
trained_models_dir = trained_models_dir,
model_id=model_id)
# ____________________________________________________________________________________________________________________________________
@classmethod
def cast_to_numpy(cls, split_sets):
if type(split_sets['X_train']) is pd.DataFrame: split_sets['X_train'].to_numpy()
if type(split_sets['X_test']) is pd.DataFrame: split_sets['X_test'].to_numpy()
if type(split_sets['y_train']) is pd.DataFrame: split_sets['y_train'].to_numpy()
if type(split_sets['y_test']) is pd.DataFrame: split_sets['y_test'].to_numpy()
return split_sets
# ____________________________________________________________________________________________________________________________________
@classmethod
def set_class_weight(cls, y: np.ndarray) -> Dict[int, float]:
ones_count = np.count_nonzero(y)
zero_count = y.shape[0] - np.count_nonzero(y)
if ones_count > zero_count:
class_weight = {0: 1.0,
1: ones_count / zero_count}
else:
class_weight = {0: zero_count / ones_count,
1: 1.0}
return class_weight
# ____________________________________________________________________________________________________________________________________
@classmethod
def binary_classification_assertions(cls, hyper_methods_names):
assert hyper_methods_names[
'loss'] in VALID_CLASSIFICATION_LOSSES, "Passed loss is not intended for classification!"
# ____________________________________________________________________________________________________________________________________
@classmethod
def validate_binary_classification_layers(cls, layers_list) -> List:
if layers_list[-1]['units'] > 2:
warnings.warn('Binary classifier ends with a layer with more than 2 units.'
'Appending a sigmoid classification layer as a last layer of the neural network!')
layers_list.append({'units': 1, 'type': 'dense', 'activation': 'sigmoid'})
if layers_list[-1]['units'] <= 2 and check_name(layers_list[-1]['activation'], 'relu'):
warnings.warn(
'Binary classifier ends with a layer no more than 2 units but does not have appropriate classification activation function'
'Appending a sigmoid classification layer as a last layer of the neural network!')
layers_list.append({'units': 1, 'type': 'dense', 'activation': 'sigmoid'})
if layers_list[-1]['units'] == 2 and check_name(layers_list[-1]['activation'], 'sigmoid'):
warnings.warn('Binary classifier ends with a layer with 2 units but a sigmoid activation function.'
'Changing the activation function to softmax!')
if layers_list[-1]['units'] == 1 and check_name(layers_list[-1]['activation'], 'softmax'):
warnings.warn('Binary classifier ends with a layer with 1 unit but a softmax activation function.'
'Changing the activation function t╤o sigmoid!')
return layers_list
|
|
"""Window pairs of sequences"""
__author__ = 'thor'
from numpy import *
import numpy as np
from itertools import product
from collections import defaultdict, Counter
DEBUG_LEVEL = 0
def wp_iter_with_sliding_discrete_step(data_range, # length of the interval we'll retrieve the windows from
x_range=1, # length of the x window
y_range=1, # length of the y window
offset=0 # the offset from the end of the x window (0 when y window starts immediately after)
):
"""
Returns a SLIDING DISCRETE STEP "window pair iterator".
A "window pair iterator" is an iterator which yields a 4-tuple that are indices of two sliding windows.
Usage:
wp_iter_with_sliding_discrete_step(data_range, # length of the interval we'll retrieve the windows from
x_range=1, # length of the y window
y_range=1, # length of the y window
offset=0 # the offset from the end of the x window (0 when y window starts immediately after)
):
Example:
>>> from ut.iacc.seq.window_pairs import wp_iter_with_sliding_discrete_step
>>> from numpy import all, array
>>> result = list(wp_iter_with_sliding_discrete_step(data_range=10, x_range=2, y_range=3, offset=1))
>>> expected = [
... array([0, 2, 3, 6]),
... array([1, 3, 4, 7]),
... array([2, 4, 5, 8]),
... array([3, 5, 6, 9])]
>>> assert all(array(result) == array(expected)), "result NOT as expected"
"""
# input validation
assert offset >= -x_range, "offset cannot be smaller than -x_spec['range'] (y-window can't start before x-window)"
# compute the number of steps of the iteration
n_steps = data_range - np.max([x_range, x_range + offset + y_range])
if n_steps == 0:
raise StopIteration()
else:
base_window_idx = array([0,
x_range,
x_range + offset,
x_range + offset + y_range])
for step in range(n_steps):
yield base_window_idx + step
raise StopIteration()
# Indices into wp_key (for wp_iter_slide_to_next_event function)
# PAST_HORIZON_TIME = 0
# PAST_HORIZON_IDX = 1
# PRESENT_TIME = 2
# PRESENT_IDX = 3
# FUTURE_HORIZON_TIME = 4
# FUTURE_HORIZON_IDX = 5
def slow_past_present_future_idx_and_duration_iter(timestamp_seq,
past_range,
future_range=None,
timestamps_are_sorted=True):
if not timestamps_are_sorted:
timestamp_seq = sorted(timestamp_seq)
timestamp_seq = array(timestamp_seq)
if future_range is None:
future_range = past_range
_past_ts = timestamp_seq[0]
_present_ts = _past_ts + past_range
_future_ts = _present_ts + future_range
max_timestamp = timestamp_seq[-1]
while _future_ts < max_timestamp:
gte_past_lidx = timestamp_seq > _past_ts
gte_present_lidx = timestamp_seq > _present_ts
gte_future_lidx = timestamp_seq > _future_ts
# figure out next shift
_shift = timestamp_seq[gte_past_lidx][0] - _past_ts
next_present_distance = timestamp_seq[gte_present_lidx][0] - _present_ts
if next_present_distance < _shift:
_shift = next_present_distance
next_future_distance = timestamp_seq[gte_future_lidx][0] - _future_ts
if next_future_distance < _shift:
_shift = next_future_distance
# yield the indices triple, duration, and _present_ts
yield ((where(timestamp_seq > _past_ts)[0][0],
where(timestamp_seq <= _present_ts)[0][-1],
where(timestamp_seq <= _future_ts)[0][-1]),
_shift,
_present_ts)
# shift past, present and future
_past_ts += _shift
_present_ts += _shift
_future_ts += _shift
def _idx_and_duration(timestamp_seq, timestamps_are_sorted=False):
"""
returns a pair (idx, dur) where
* idx is the first idx (into timestamp_seq) of every subsequence of equal values, and
* dur is the duration of this subsequence (i.e. the time until the first different timestamp value
Note: It is assumed (but not verified, for speed) that the sequence timestamp_seq of timestamps are ORDERED.
"""
if not timestamps_are_sorted:
timestamp_seq = sorted(timestamp_seq)
idx = defaultdict(list)
idx[0] = [0]
idx_i = 0
dur = []
unik_timestamps = [timestamp_seq[0]]
cumulating_zero_durs = False
for i in range(1, len(timestamp_seq)):
_dur = timestamp_seq[i] - timestamp_seq[i-1]
if _dur == 0:
if not cumulating_zero_durs:
cumulating_zero_durs = True
idx[idx_i].append(i)
else:
dur.append(_dur)
idx_i += 1
idx[idx_i].append(i)
unik_timestamps.append(timestamp_seq[i])
cumulating_zero_durs = False
return dict(idx), dur, array(unik_timestamps)
_past = 0
_present = 1
_future = 2
_idx = 0
_time_to_next = 1
def past_present_future_idx_and_duration_iter(timestamp_seq,
past_range,
future_range=None,
timestamps_are_sorted=False):
"""
A window pairs iterator that slides the (past,future) windows (through a sequence of events whose timestamps
are given by the input timestamp_seq) capturing the times when an event enters or leaves the windows
(thus allowing to extract all pairs of possible states in cases where states are completely defined by
the subsequence of events in the window, not their actual timestamps).
More precisely, the (past,future) windows are indexed by the triple (past_horizon, present, future_horizon) where
past_horizon is the beginning of past (inclusive)
present is both the end of past (non inclusive) and the beginning of future (inclusive)
future_horizon is the end of future (non inclusive)
past_horizon past present future future_horizon
[--------------------------[------------------------------[
The first XY window is set so that past_horizon is at min_timestamp (defaulted to the lowest date in df.
Then, at any point, the next window is chosen such that either of these conditions hold:
(1) Some event leaves past (meaning event_date < past_horizon
(2) Some event enters past (equivalent to leaving future) (meaning event_date < present)
(3) Some event enters future (meaning event_date < future_horizon)
(4) future_horizon reaches max_timestamp
min_timestamp and max_timestamp are defaulted to the min and max date of timestamp_seq.
The reason for being able to specify min_timestamp and max_timestamp is that the data of df might come from a
set of event sequences that have a specific observation range, and we'd like to take into account the
"no event" cases.
The iterator yields a pair (ppf_idx, duration) where ppf = (past_horizon_idx, present_idx, future_horizon_idx)
are indices of timestamp_seq and duration is the amount of time between the window pair associated to
this pair and the next window pair.
Note: With abuse of notation,
past_horizon <= past < present <= future < future_horizon
The output_present_timestamp==True option yields triples (ppf_idx, duration, present_timestamp) providing
additionally the present_timestamp information (which is the timestamp of the present point that is used by the
double window.
Implementation details: The generator maintains a state, which is a 3x2 matrix where rows index past/present/future,
and columns index _idx (an index to the last unique values of timestamps_seq) and _time_to_next (which indicates
how far (in time units) the past/present/future point is to the next data point (a timestamp). The generator also
maintains time_argmin which is the row index of the smallest _time_to_next.
The algorithm iteratively updates the state and _time_to_next in order to get the tuples it generates.
Below are two (doctest) examples:
>>> from numpy import *
>>> from matplotlib.cbook import flatten
>>> timestamp_seq = [0, 5, 6, 15]
>>> result = list(past_present_future_idx_and_duration_iter(timestamp_seq, 4))
>>> expected = array([\
((1, 0, 2), 1, 4), \
((1, 1, 2), 1, 5), \
((1, 2, 2), 3, 6), \
((2, 2, 2), 1, 9), \
((3, 2, 2), 1, 10)])
>>> all(array(list(flatten(result))) == array(list(flatten(expected))))
True
>>> timestamp_seq = [ 1, 3, 4, 4, 4, 5, 7, 7, 9, 13, 13, 15, 20]
>>> result = list(past_present_future_idx_and_duration_iter(timestamp_seq, 3.5))
>>> expected = array([[(1, 4, 7), 0.5, 4.5], [(1, 5, 7), 0.5, 5.0], [(1, 5, 8), 1.0, 5.5], [(2, 5, 8), 0.5, 6.5], \
[(2, 7, 8), 0.5, 7.0], [(5, 7, 8), 1.0, 7.5], [(6, 7, 8), 0.5, 8.5], \
[(6, 8, 8), 0.5, 9.0], [(6, 8, 10), 1.0, 9.5], [(8, 8, 10), 1.0, 10.5], \
[(8, 8, 11), 1.0, 11.5], [(9, 8, 11), 0.5, 12.5], [(9, 10, 11), 2.0, 13.0], \
[(9, 11, 11), 1.5, 15.0]])
>>> all(array(list(flatten(result))) == array(list(flatten(expected))))
True
>>> window_size = 5
>>> timestamp_seq = cumsum(random.randint(low=0, high=window_size * 2, size=100))
>>> result = list(past_present_future_idx_and_duration_iter(timestamp_seq, window_size))
>>> expected = list(slow_past_present_future_idx_and_duration_iter(timestamp_seq, window_size))
>>> all(array(list(flatten(result))) == array(list(flatten(expected))))
True
"""
if future_range is None:
future_range = past_range
# if not timestamps_are_sorted:
# timestamp_seq = sorted(timestamp_seq)
#
# timestamp_seq = array(timestamp_seq)
global ppf_idx
ppf_idx = zeros(3).astype(int)
global time_to_next
time_to_next = zeros(3).astype(float)
idx, dur, timestamp_seq = _idx_and_duration(timestamp_seq, timestamps_are_sorted)
dur = hstack((dur, 0))
if DEBUG_LEVEL:
print(("idx={}\ndur={}\ntimestamp_seq={}".format(idx, dur, timestamp_seq)))
first_timestamp = timestamp_seq[0]
# dur = diff(timestamp_seq)
# idx = arange(len(timestamp_seq)).astype(int)
n = len(timestamp_seq) - 1
def _init_state():
present_timestamp = first_timestamp + past_range
ppf_idx[_past] = 1
time_to_next[_past] = dur[0]
ppf_idx[_present] = where(timestamp_seq <= first_timestamp + past_range)[0][-1]
time_to_next[_present] = \
timestamp_seq[ppf_idx[_present] + 1] - first_timestamp - past_range
ppf_idx[_future] = where(timestamp_seq <= first_timestamp + past_range + future_range)[0][-1]
time_to_next[_future] = \
timestamp_seq[ppf_idx[_future] + 1] - first_timestamp - past_range - future_range
if DEBUG_LEVEL:
print(("time_to_next={}".format(time_to_next)))
return present_timestamp, time_to_next.argsort()
def _shift_dimension(this_idx, shift_by):
""" Shift a single dimension, updating _idx and _time_to_next"""
if time_to_next[this_idx] <= shift_by: # if the next smallest item is the same (<= for float imprecision)
ppf_idx[this_idx] += 1
if ppf_idx[this_idx] >= n + 1:
return None # should be "caught" by caller: Means "no more further states"
else:
if this_idx != 0:
time_to_next[this_idx] = dur[ppf_idx[this_idx]]
if DEBUG_LEVEL:
print(("ppf_idx={}".format(ppf_idx)))
print(("time_to_next[{}] = dur[{}] = {}".format(
this_idx, ppf_idx[this_idx], dur[ppf_idx[this_idx]])))
else:
time_to_next[this_idx] = dur[ppf_idx[this_idx] - 1]
if DEBUG_LEVEL:
print(("--ppf_idx={}".format(ppf_idx)))
print(("--time_to_next[{}] = dur[{}] = {}".format(
this_idx, ppf_idx[this_idx] - 1, dur[ppf_idx[this_idx] - 1])))
else:
time_to_next[this_idx] -= shift_by
return True
def _shift_state(time_to_next_order, present_timestamp):
shift_by = time_to_next[time_to_next_order[0]]
if DEBUG_LEVEL:
print(('---> shifting by {}'.format(shift_by)))
present_timestamp += shift_by
if _shift_dimension(time_to_next_order[0], shift_by) is None: # shift smallest dimension...
return None, None # ... and return None if we're at the end.
elif _shift_dimension(time_to_next_order[1], shift_by) is None: # shift next smallest dimension...
return None, None # ... and return None if we're at the end.
elif _shift_dimension(time_to_next_order[2], shift_by) is None: # shift next smallest dimension...
return None, None # ... and return None if we're at the end.
# next_idx = time_to_next_order[0]
# time_to_next[next_idx] = dur[ppf_idx[next_idx] - 1]
# if DEBUG_LEVEL:
# print("time_to_next[{}] = dur[{}] = {}".format(
# next_idx, ppf_idx[next_idx] - 1, time_to_next[next_idx]))
return time_to_next.argsort(), present_timestamp # if you got this far, return the new dimension order
_present_timestamp, _time_to_next_order = _init_state()
if DEBUG_LEVEL:
print("---------------")
print(("ppf_idx: {}".format(ppf_idx)))
print(("time_to_next: {}".format(time_to_next)))
print(("time_to_next_order: {}".format(_time_to_next_order)))
print(("present_timestamp: {}".format(_present_timestamp)))
_duration = time_to_next[_time_to_next_order[0]]
if _duration != 0:
yield (idx[ppf_idx[_past]][0], idx[ppf_idx[_present]][-1], idx[ppf_idx[_future]][-1]), \
_duration, \
_present_timestamp
while True:
_time_to_next_order, _present_timestamp = _shift_state(_time_to_next_order, _present_timestamp)
if _time_to_next_order is None:
raise StopIteration
else:
if DEBUG_LEVEL:
print("---------------")
print(("ppf_idx: {}".format(ppf_idx)))
print(("time_to_next: {}".format(time_to_next)))
print(("time_to_next_order: {}".format(_time_to_next_order)))
print(("present_timestamp: {}".format(_present_timestamp)))
_duration = time_to_next[_time_to_next_order[0]]
if _duration != 0:
yield (idx[ppf_idx[_past]][0], idx[ppf_idx[_present]][-1], idx[ppf_idx[_future]][-1]), \
_duration, \
_present_timestamp
class FeaturePairFactory(object):
def __init__(self,
past_feat_func,
past_range,
future_feat_func=None,
future_range=None,
min_timestamp=None,
max_timestamp=None,
data_is_sorted=False,
timestamp_field='timestamp'):
if future_range is None:
future_range = past_range
if future_feat_func is None:
future_feat_func = past_feat_func
self.past_feat_func = past_feat_func
self.future_feat_func = future_feat_func
self.past_range = past_range
self.future_range = future_range
self.min_timestamp = min_timestamp
self.max_timestamp = max_timestamp
self.data_is_sorted = data_is_sorted
if timestamp_field == 'index':
self.get_timestamps = lambda data: data.index.values
self.sort_data_according_to_timestamps = lambda data: data.sort_index()
else:
self.get_timestamps = lambda data: data[timestamp_field]
self.sort_data_according_to_timestamps = lambda data: data.sort_values(timestamp_field)
def _data_in_date_range(self, data):
lidx = array(self.get_timestamps(data) >= self.min_timestamp) \
& array(self.get_timestamps(data) < self.max_timestamp)
return data[lidx]
def _get_data_iterator(self, data):
if not self.data_is_sorted:
data = self.sort_data_according_to_timestamps(data)
data = self._data_in_date_range(data)
ppfd = past_present_future_idx_and_duration_iter(
timestamp_seq=[self.min_timestamp] + self.get_timestamps(data).tolist() + [self.max_timestamp],
past_range=self.past_range,
future_range=self.future_range
)
for ppf, duration, present_timestamp in ppfd:
yield data.iloc[ppf[0]:ppf[1]], data.iloc[ppf[1]:ppf[2]], duration, present_timestamp
def feature_pair_and_duration_iter(self, data):
data_iterator = self._get_data_iterator(data)
for past_data, future_data, duration, present_timestamp in data_iterator:
yield {'past': self.past_feat_func(past_data),
'future': self.future_feat_func(future_data),
'duration': duration,
'present_timestamp': present_timestamp}
def _event_exists(arr):
return int(any(arr))
def _columnwise_event_exists(df):
return df.apply(_event_exists)
def extract_series(df, # data to extract from
window_iterator=None, # window iterator
x_extractor=_columnwise_event_exists, # function to apply to the windowed df to get x
y_extractor=_columnwise_event_exists # function to apply to the windowed df to get y
):
"""
"""
# combine the extractors
def _extractor(window_idx):
return (x_extractor(df.iloc[window_idx[0]:window_idx[1]]),
y_extractor(df.iloc[window_idx[2]:window_idx[3]]))
# get a default window_iterator if none given
if window_iterator is None:
window_iterator = wp_iter_with_sliding_discrete_step(data_range=len(df))
# return the extractor iterator
return map(_extractor, window_iterator)
def agg_counts(pairs_of_series_iter):
accum = defaultdict(Counter)
for pair in pairs_of_series_iter:
pair_iter = map(lambda x: list(zip(*x)), product(iter(pair[0].to_dict().items()), iter(pair[1].to_dict().items())))
for x in pair_iter:
accum[x[0]].update([x[1]])
return accum
|
|
# -*- coding: utf-8 -*-
"""Untitled54.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1PX3b2zRt-Q3D2ia5NghD8bvSMqKZRTWf
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
df=pd.read_csv("creditcard.csv")
df=df.drop(columns=['Time','Class','Amount'])
df=df.dropna(how='any')
N = int(len(df))#number of transactions made
d = 28 # number of customers
CT = []
NOT = [0] * d
sums_of_rewards = [0] * d
total_reward = 0
for n in range(0, N):
t = 0
max_upper_bound = 0
for i in range(0, d):
if (NOT[i] > 0): # following the formulae for exploiting the best ad during exploration of the ads in a short duration
average_reward = sums_of_rewards[i] / NOT[i]
delta_i = math.sqrt(3/2 * math.log(n + 1) / NOT[i])
upper_bound = average_reward + delta_i
else:
upper_bound = 1e400 #setting a limit for a particular bound
if upper_bound > max_upper_bound:
max_upper_bound = upper_bound
ad = i
CT.append(t)
NOT[t] = NOT[t] + 1
reward = df.values[n, t]
sums_of_rewards[t] = sums_of_rewards[t] + reward
total_reward = total_reward + reward
"""Based on the below plot we can identify whether a person is a credit card fraud or not"""
plt.hist(ads_selected)
plt.title('Histogram of Transactions made by customer')
plt.xlabel('Customers with active credit cards')
plt.ylabel('Number of times a transaction was made')
plt.show()
|
|
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from src.swhe import SWHE
def plot():
data = {
"pipe": {
"outer-dia": 0.02667,
"inner-dia": 0.0215392,
"length": 100,
"density": 950,
"conductivity": 0.4
},
"fluid": {
"fluid-name": "PG",
"concentration": 20
},
"diameter": 1.2,
"horizontal-spacing": 0.05,
"vertical-spacing": 0.05,
}
swhe = SWHE(data)
x = np.arange(1, 40, 0.1)
y_010 = [swhe.simulate(0.10, m, 20) for m in x]
y_050 = [swhe.simulate(0.50, m, 20) for m in x]
y_100 = [swhe.simulate(1.00, m, 20) for m in x]
fig, ax = plt.subplots()
ax.plot(x, x, label=r"$T_{in}$")
ax.plot(x, y_010, label=r"$T_{out}$ $\dot{m}=0.10$ kg/s")
ax.plot(x, y_050, label=r"$T_{out}$ $\dot{m}=0.50$ kg/s", linestyle="--")
ax.plot(x, y_100, label=r"$T_{out}$ $\dot{m}=1.00$ kg/s", linestyle=":")
ax.plot([1, 40], [20, 20], label=r"$T_{sw}$")
ax.set_xlabel(r"$T$ [C]")
ax.set_ylabel(r"$T$ [C]")
ax.legend()
ax.grid()
f_name = Path(__file__).parent / "_outlet_temp_const_mdot.png"
plt.savefig(f_name, bbox_inches="tight")
if __name__ == "__main__":
plot()
|
|
from dsbox.template.template import DSBoxTemplate
from d3m.metadata.problem import TaskKeyword
from dsbox.template.template_steps import TemplateSteps
from dsbox.schema import SpecializedProblem
import typing
import numpy as np # type: ignore
class DefaultVideoClassificationTemplate(DSBoxTemplate):
def __init__(self):
DSBoxTemplate.__init__(self)
self.template = {
"name": "DefaultVideoClassificationTemplate",
"taskType": TaskKeyword.CLASSIFICATION.name,
"taskSubtype": TaskKeyword.MULTICLASS.name,
"inputType": "video",
"output": "model_step", # Name of the final step generating the prediction
"target": "extract_target_step", # Name of the step generating the ground truth
"steps": [
{
"name": "to_dataframe_step",#step 1
"primitives": ["d3m.primitives.data_transformation.dataset_to_dataframe.Common"],
"inputs": ["template_input"]
},
{
"name": "common_profiler_step",
"primitives": ["d3m.primitives.schema_discovery.profiler.Common"],
"inputs": ["to_dataframe_step"]
},
# read X value
{
"name": "extract_file_step",#step 2
"primitives": [{
"primitive": "d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common",
"hyperparameters":
{
'semantic_types': (
'https://metadata.datadrivendiscovery.org/types/PrimaryKey',
'https://metadata.datadrivendiscovery.org/types/FileName',),
'use_columns': (),
'exclude_columns': ()
}
}],
"inputs": ["common_profiler_step"]
},
{
"name": "extract_target_step",# step 3
"primitives": [{
"primitive": "d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common",
"hyperparameters":
{
'semantic_types': (
'https://metadata.datadrivendiscovery.org/types/TrueTarget',),
'use_columns': (),
'exclude_columns': ()
}
}],
"inputs": ["common_profiler_step"]
},
{
"name": "video_reader",#step 4
"primitives": ["d3m.primitives.data_preprocessing.video_reader.Common"],
"inputs": ["extract_file_step"]
},
{
"name": "video_feature_extract",#step 5
"primitives": [
{
"primitive": "d3m.primitives.feature_extraction.inceptionV3_image_feature.DSBOX",
"hyperparameters": {
"use_limitation":[(True)],
}
}
],
"inputs": ["video_reader"]
},
{
"name": "model_step", # step 6
"primitives": [
{
"primitive": "d3m.primitives.classification.lstm.DSBOX",
"hyperparameters": {
"LSTM_units":[2048],
"epochs":[100, 500, 1000],
}
}
],
"inputs": ["video_feature_extract", "extract_target_step"]
},
]
}
################################################################################################################
##################################### TimeSeriesProblemsTemplates ##########################################
################################################################################################################
|
|
import os
import numpy as np
from sympy.abc import x as symbolic_x
from sympy.abc import y as symbolic_y
from .linearfilter import SpatioTemporalFilter
from .spatialfilter import GaussianSpatialFilter
from .temporalfilter import TemporalFilterCosineBump
from .movie import Movie
from .lgnmodel1 import LGNModel, heat_plot
from .transferfunction import MultiTransferFunction, ScalarTransferFunction
from .lnunit import LNUnit, MultiLNUnit
class OnUnit(LNUnit):
def __init__(self, linear_filter, transfer_function):
assert linear_filter.amplitude > 0
super(OnUnit, self).__init__(linear_filter, transfer_function)
class OffUnit(LNUnit):
def __init__(self, linear_filter, transfer_function):
assert linear_filter.amplitude < 0
super(OffUnit, self).__init__(linear_filter, transfer_function)
class LGNOnOffCell(MultiLNUnit):
"""A cell model for a OnOff cell"""
def __init__(self, on_filter, off_filter,
transfer_function=MultiTransferFunction((symbolic_x, symbolic_y),
'Heaviside(x)*(x)+Heaviside(y)*(y)')):
"""Summary
:param on_filter:
:param off_filter:
:param transfer_function:
"""
self.on_filter = on_filter
self.off_filter = off_filter
self.on_unit = OnUnit(self.on_filter, ScalarTransferFunction('s'))
self.off_unit = OffUnit(self.off_filter, ScalarTransferFunction('s'))
super(LGNOnOffCell, self).__init__([self.on_unit, self.off_unit], transfer_function)
class TwoSubfieldLinearCell(MultiLNUnit):
def __init__(self, dominant_filter, nondominant_filter, subfield_separation=10, onoff_axis_angle=45,
dominant_subfield_location=(30, 40),
transfer_function=MultiTransferFunction((symbolic_x, symbolic_y),
'Heaviside(x)*(x)+Heaviside(y)*(y)')):
self.subfield_separation = subfield_separation
self.onoff_axis_angle = onoff_axis_angle
self.dominant_subfield_location = dominant_subfield_location
self.dominant_filter = dominant_filter
self.nondominant_filter = nondominant_filter
self.transfer_function = transfer_function
self.dominant_unit = LNUnit(self.dominant_filter, ScalarTransferFunction('s'),
amplitude=self.dominant_filter.amplitude)
self.nondominant_unit = LNUnit(self.nondominant_filter, ScalarTransferFunction('s'),
amplitude=self.dominant_filter.amplitude)
super(TwoSubfieldLinearCell, self).__init__([self.dominant_unit, self.nondominant_unit],
self.transfer_function)
self.dominant_filter.spatial_filter.translate = self.dominant_subfield_location
hor_offset = np.cos(self.onoff_axis_angle*np.pi/180.)*self.subfield_separation + self.dominant_subfield_location[0]
vert_offset = np.sin(self.onoff_axis_angle*np.pi/180.)*self.subfield_separation + self.dominant_subfield_location[1]
rel_translation = (hor_offset, vert_offset)
self.nondominant_filter.spatial_filter.translate = rel_translation
"""
class LGNOnCell(OnUnit):
def __init__(self, **kwargs):
self.position = kwargs.pop('position', None)
self.weights = kwargs.pop('weights', None)
self.kpeaks = kwargs.pop('kpeaks', None)
self.delays = kwargs.pop('delays', None)
self.amplitude = kwargs.pop('amplitude', None)
self.sigma = kwargs.pop('sigma', None)
self.transfer_function_str = kwargs.pop('transfer_function_str', 's') # 'Heaviside(s)*s')
self.metadata = kwargs.pop('metadata', {})
temporal_filter = TemporalFilterCosineBump(self.weights, self.kpeaks, self.delays)
spatial_filter = GaussianSpatialFilter(translate=self.position, sigma=self.sigma,
origin=(0, 0)) # all distances measured from BOTTOM LEFT
spatiotemporal_filter = SpatioTemporalFilter(spatial_filter, temporal_filter, amplitude=self.amplitude)
transfer_function = ScalarTransferFunction(self.transfer_function_str)
self.unit = OnUnit(spatiotemporal_filter, transfer_function)
class LGNOffCell(OffUnit):
def __init__(self, **kwargs):
lattice_unit_center = kwargs.pop('lattice_unit_center', None)
weights = kwargs.pop('weights', None)
kpeaks = kwargs.pop('kpeaks', None)
amplitude = kwargs.pop('amplitude', None)
sigma = kwargs.pop('sigma', None)
width = kwargs.pop('width', 5)
transfer_function_str = kwargs.pop('transfer_function_str', 'Heaviside(s)*s')
dxi = np.random.uniform(-width*1./2, width*1./2)
dyi = np.random.uniform(-width*1./2, width*1./2)
temporal_filter = TemporalFilterCosineBump(weights, kpeaks)
spatial_filter = GaussianSpatialFilter(translate=(dxi, dyi), sigma=sigma, origin=lattice_unit_center) # all distances measured from BOTTOM LEFT
spatiotemporal_filter = SpatioTemporalFilter(spatial_filter, temporal_filter, amplitude=amplitude)
transfer_function = ScalarTransferFunction(transfer_function_str)
super(LGNOnCell, self).__init__(spatiotemporal_filter, transfer_function)
"""
if __name__ == "__main__":
movie_file = '/data/mat/iSee_temp_shared/movies/TouchOfEvil.npy'
m_data = np.load(movie_file, 'r')
m = Movie(m_data[1000:], frame_rate=30.)
# Create second cell:
transfer_function = ScalarTransferFunction('s')
temporal_filter = TemporalFilterCosineBump((0.4, -0.3), (20, 60))
cell_list = []
for xi in np.linspace(0, m.data.shape[2], 5):
for yi in np.linspace(0, m.data.shape[1], 5):
spatial_filter_on = GaussianSpatialFilter(sigma=(2, 2), origin=(0, 0), translate=(xi, yi))
on_linear_filter = SpatioTemporalFilter(spatial_filter_on, temporal_filter, amplitude=20)
spatial_filter_off = GaussianSpatialFilter(sigma=(4, 4), origin=(0, 0), translate=(xi, yi))
off_linear_filter = SpatioTemporalFilter(spatial_filter_off, temporal_filter, amplitude=-20)
on_off_cell = LGNOnOffCell(on_linear_filter, off_linear_filter)
cell_list.append(on_off_cell)
lgn = LGNModel(cell_list) # Here include a list of all cells
y = lgn.evaluate(m, downsample=100) # Does the filtering + non-linearity on movie object m
heat_plot(y, interpolation='none', colorbar=True)
|
|
import numpy as np
import librosa
import math
import sys
print("Loading file")
audio, sample_rate = librosa.load(sys.argv[1], duration=60, offset=0, sr=15360)
print("Getting spectrum")
spectrum = librosa.stft(audio)
S = np.abs(spectrum)
fout = open("spectrum.h", "w")
print("Writing file")
fn = 36
fs = int(len(S) / fn)
fout.write("const uint16_t spectrum[][4] = {\n")
for t in range(0,len(S[0]-1)):
fout.write("{ ")
f_prev = 0
for f in [8, 45, 300, 600]:
v = 0
for i in range(f_prev, f): v += S[i][t]
if v != 0: v = int(v/30)
if v < 0: v = 0
f_prev = f
fout.write(str(int(v)) + ", ")
fout.write("},\n")
fout.write("};\n")
fout.close()
print("Finished")
|
|
# -*- coding: utf-8 -*-
from data.reader import wiki_from_pickles
from data.corpus import Words, Articles, Sentences
from stats.stat_functions import compute_vocab_size
from stats.mle import Heap
from jackknife.plotting import hexbin_plot
import numpy as np
import numpy.random as rand
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import os
def heap(corp, rng):
vocab_sizes = []
for i, ntoks in enumerate(rng):
if i % 10 == 0:
print(i, ntoks)
subsample = Sentences.subsample(corp, ntoks)
vocab_size = compute_vocab_size(subsample)
vocab_sizes.append(vocab_size)
return vocab_sizes
def heap_from_file(save_dir, rng_params):
rng_params = map(str, rng_params)
required_file_name = "vocab_growth_" + "_".join(rng_params) + ".pkl"
print(required_file_name)
if required_file_name in os.listdir(save_dir):
with open(save_dir + required_file_name, "rb") as handle:
return pickle.load(handle)
else:
raise FileNotFoundError
def do_mles(rng, vocab_sizes, save_dir):
with open(save_dir + "mle_heap_point_estimates.txt", "w") as handle:
for vs in vocab_sizes:
heap = Heap(vs, rng)
heap_fit = heap.fit(start_params=np.asarray([100000.0, 1.0]),
method="powell", full_output=True)
heap.register_fit(heap_fit)
handle.write(heap.print_result(string=True))
handle.write("\n")
def heap_main(wiki, rng_params, m, save_dir="./"):
rng = list(range(*rng_params))
try:
vocab_sizes = heap_from_file(save_dir,
(rng_params[0], rng_params[1], len(rng)))
except FileNotFoundError:
vocab_sizes = [heap(wiki, rng) for _ in range(m)]
do_mles(rng, vocab_sizes, save_dir)
all_sizes = [v_n for size_ls in vocab_sizes for v_n in size_ls]
print(len(all_sizes))
long_rng = np.tile(rng, m)
print(len(long_rng))
print(len(vocab_sizes))
hexbin_plot(long_rng, all_sizes, xlbl="$n$", ylbl="$V(n)$",
log=False, ignore_zeros=False, gridsize=100)
mean_vs = np.mean(vocab_sizes, axis=0)
hexbin_plot(rng, mean_vs, xlbl="$n$", ylbl="$V(n)$",
log=False, ignore_zeros=False, label="mean",
color="red", edgecolors="red", cmap="Reds_r", cbar=False,
gridsize=100, linewidths=0.5)
plt.legend(loc="upper left")
plt.savefig(save_dir + "vocab_growth_" +
str(min(rng)) + "_" + str(max(rng)) + "_" + str(len(rng)) + ".png",
dpi=300)
plt.close()
with open(save_dir + "vocab_growth_" +
str(rng_params[0]) + "_" + str(rng_params[1]) + "_" + str(len(rng)) +
".pkl", "wb") as handle:
pickle.dump(vocab_sizes, handle)
if __name__ == "__main__":
wiki = list(wiki_from_pickles("data/ALS_pkl"))
save_dir = "results/ALS/jackknife/"
m = 7
rng_params = int(0), int(2e4)+1, int(2e2)
heap_main(wiki, rng_params, m=7, save_dir=save_dir)
|
|
import tensorflow as tf
import numpy as np
import os
import sys
from MyPreprocessingWrapper import MyPreprocessingWrapper
from MyImageProcessor import MyImageProcessor
from MyUtils import MyUtils
from Visualization import Visualization
class MyTrainingModelWrapper(object):
save_my_model_tf_session = None
def __init__(self):
#mypreprocess_wrapper = MyPreprocessingWrapper()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
### Invoke preprocessing of the data
### If preprocessing is already complete, it just loads the pickle file which has the preprocessed data.
### If the preprocessing results file does not exist under "/training_output" folder, it will execute the preprocessing steps.
self.training_data = MyPreprocessingWrapper.invoke_pre_processing()
self.X_TRAIN = self.training_data.x_train_final
self.Y_TRAIN = self.training_data.y_train_final
self.X_VALID = self.training_data.x_valid_final
self.Y_VALID = self.training_data.y_valid_final
self.X_TEST = self.training_data.x_test_final
self.Y_TEST = self.training_data.y_test_final
self.learning_rate = None
self.epochs = None
self.batch_size = None
self.features = None
self.labels = None
self.neural_network = None
self.training_operation = None
self.top_k_operations = None
self.accuracy_operation = None
### Calling local functions
self.init_hyper_parameters()
self.init_network_params()
self.create_a_network()
self.create_training_pipeline()
self.create_model_evaluation()
self.create_top_k_operations()
def init_hyper_parameters(self):
self.learning_rate = 0.001
self.epochs = 20
self.batch_size = 128
def init_network_params(self):
self.features = tf.placeholder(tf.float32, [None, 32,32,1])
#self.labels = tf.placeholder(tf.float32, [None, 43])
self.labels = tf.placeholder(tf.int32, None)
self.labels = tf.one_hot(self.labels, 43)
# noinspection PyMethodMayBeStatic
def create_a_network(self):
mean = 0
standard_deviation = 0.1
dropout = 0.5
## ============================================================== ##
# Layer 1 - Input = 32x32x1 - output = 32x32x32
## ============================================================== ##
filter_size, input_channels, output_channels = 5, 1, 32
conv1_Weights = tf.Variable(tf.truncated_normal((filter_size, filter_size, input_channels, output_channels), mean=mean, stddev=standard_deviation))
conv1_biases = tf.Variable(tf.zeros(output_channels))
conv1 = tf.nn.conv2d(self.features, conv1_Weights, strides = [1,1,1,1], padding = "SAME")
conv1 = tf.nn.bias_add(conv1, conv1_biases)
# Activation
conv1 = tf.nn.relu(conv1)
#Polling. Input = 32x32x1. output = 16x16x32
conv1 = tf.nn.max_pool(conv1, [1,2,2,1], [1,2,2,1], 'VALID')
print("Layer 1 completed")
## ============================================================== ##
# Layer 2 - Input = 16x16x32 - output = 16x16x64
## ============================================================== ##
filter_size, input_channels, output_channels = 5, 32, 64
conv2_Weights = tf.Variable(tf.truncated_normal((filter_size, filter_size, input_channels, output_channels), mean=mean, stddev=standard_deviation))
conv2_biases = tf.Variable(tf.zeros(output_channels))
conv2 = tf.nn.conv2d(conv1, conv2_Weights, strides=[1, 1, 1, 1], padding="SAME")
conv2 = tf.nn.bias_add(conv2, conv2_biases)
# Activation
conv2 = tf.nn.relu(conv2)
# Pooling. Input = 16x16x64. Output = 8x8x64.
conv2 = tf.nn.max_pool(conv2, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
print("Layer 2 completed")
## ============================================================== ##
# Layer 3 - Input = 8x8x64 - output = 8x8x128
## ============================================================== ##
filter_size, input_channels, output_channels = 5, 64, 128
conv3_Weights = tf.Variable(tf.truncated_normal((filter_size, filter_size, input_channels, output_channels), mean=mean, stddev=standard_deviation))
conv3_biases = tf.Variable(tf.zeros(output_channels))
conv3 = tf.nn.conv2d(conv2, conv3_Weights, strides=[1, 1, 1, 1], padding="SAME")
conv3 = tf.nn.bias_add(conv3, conv3_biases)
# Activation
conv3 = tf.nn.relu(conv3)
# Pooling. Input = 8x8x128. Output = 4x4x128.
conv3 = tf.nn.max_pool(conv3, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
print("Layer 3 completed")
## ============================================================== ##
# Flatten. Input = 4x4x128. Output = 2048
## ============================================================== ##
tensor_size = 4 * 4 * 128
fc = tf.contrib.layers.flatten(conv3, [1, tensor_size])
#fc = tf.contrib.layers.flatten(conv2)
print("Layer FLATTEN completed")
## ============================================================== ##
# Layer 4 - Fully connected. Input = 2048 - output = 1024
## ============================================================== ##
input_size, output_size = 2048, 1024
fc1_weights = tf.Variable(tf.truncated_normal((input_size, output_size), mean, standard_deviation))
fc1_biases = tf.Variable(tf.zeros(output_size))
fc1 = tf.matmul(fc, fc1_weights)
fc1 = tf.nn.bias_add(fc1, fc1_biases)
fc1 = tf.nn.relu(fc1)
fc1 = tf.nn.dropout(fc1, dropout)
print("Layer 4 completed")
## ============================================================== ##
# Layer 5 - Fully connected. Input = 1024 - output = 256
## ============================================================== ##
input_size, output_size = 1024, 256
fc2_weights = tf.Variable(tf.truncated_normal((input_size, output_size), mean, standard_deviation))
fc2_biases = tf.Variable(tf.zeros(output_size))
fc2 = tf.matmul(fc1, fc2_weights)
fc2 = tf.nn.bias_add(fc2, fc2_biases)
fc2 = tf.nn.relu(fc2)
fc2 = tf.nn.dropout(fc2, dropout)
print("Layer 5 completed")
## ============================================================== ##
# Layer 6 - Fully connected. Input = 256 - output = 43
## ============================================================== ##
input_size, output_size = 256, 43
fc3_weights = tf.Variable(tf.truncated_normal((input_size, output_size), mean, standard_deviation))
fc3_biases = tf.Variable(tf.zeros(output_size))
fc3 = tf.matmul(fc2, fc3_weights)
fc3 = tf.nn.bias_add(fc3, fc3_biases)
#fc3 = tf.nn.relu(fc3)
#fc3 = tf.nn.dropout(fc3, dropout)
print("Layer 6 completed")
self.neural_network = fc3
def create_training_pipeline(self):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.neural_network, labels=self.labels)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate)
self.training_operation = optimizer.minimize(loss_operation)
def create_top_k_operations(self):
softmax_logits = tf.nn.softmax(logits = self.neural_network)
self.top_k_operations = tf.nn.top_k(softmax_logits, k = 5)
def create_model_evaluation(self):
correct_prediction = tf.equal(tf.argmax(self.neural_network, 1), tf.argmax(self.labels, 1))
self.accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def evaluate(self, X_data, y_data, BATCH_SIZE=128):
NUM_EXAMPLES = X_data.shape[0]
total_accuracy = 0
#sess = tf.Session()
sess = tf.get_default_session()
for offset in range(0, NUM_EXAMPLES, BATCH_SIZE):
endindex = offset + BATCH_SIZE
batch_X, batch_Y = X_data[offset:endindex], y_data[offset:endindex]
accuracy = sess.run(self.accuracy_operation, feed_dict={self.features:batch_X, self.labels:batch_Y })
total_accuracy += (accuracy * len(batch_X))
if (offset // BATCH_SIZE) % 10 == 0: print(".", end="", flush=True)
return total_accuracy / NUM_EXAMPLES
"""
def train_my_model(self):
EPOCHS = self.epochs
BATCH_SIZE = self.batch_size
NUM_EXAMPLES = self.training_data.x_train.shape[0]
print("Input number of examples : {}".format(NUM_EXAMPLES))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(EPOCHS):
print("EPOCH {}".format(epoch))
for offset in range(0, NUM_EXAMPLES, BATCH_SIZE):
endindex = offset + BATCH_SIZE
batch_X, batch_Y = self.training_data.x_train[offset:endindex], self.training_data.y_train[offset:endindex]
sess.run(self.training_operation, feed_dict={self.features: batch_X, self.labels: batch_Y})
if (offset // BATCH_SIZE) % 10 == 0: print(".", end="", flush=True)
train_accuracy = self.evaluate(self.training_data.x_train, self.training_data.y_valid, BATCH_SIZE, sess)
validation_accuracy = self.evaluate(self.training_data.x_valid, self.training_data.y_valid, BATCH_SIZE, sess)
print("Train Accuracy = {:.3f}".format(train_accuracy))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
saver = tf.train.Saver()
saver.save(sess, "saved_models/lenet_model2")
print("Model Saved")
"""
def train_my_model(self):
EPOCHS = self.epochs
BATCH_SIZE = self.batch_size
NUM_EXAMPLES = self.X_TRAIN.shape[0]
print("Input number of examples : {}".format(NUM_EXAMPLES))
print("Input number of test data : {}".format(self.X_TEST.shape[0]))
print("Input number of valid data : {}".format(self.X_VALID.shape[0]))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(EPOCHS):
print("EPOCH {}".format(epoch), end=" ")
for offset in range(0, NUM_EXAMPLES, BATCH_SIZE):
endindex = offset + BATCH_SIZE
batch_X, batch_Y = self.X_TRAIN[offset:endindex], self.Y_TRAIN[offset:endindex]
sess.run(self.training_operation, feed_dict={self.features:batch_X, self.labels:batch_Y })
if (offset // BATCH_SIZE) % 10 == 0: print(".", end="", flush=True)
valid_batch_X, valid_batch_Y = self.X_VALID, self.Y_VALID
validation_accuracy = sess.run(self.accuracy_operation, feed_dict={self.features:valid_batch_X, self.labels:valid_batch_Y})
#test_batch_X, test_batch_Y = self.X_TEST, self.Y_TEST
#test_accuracy = sess.run(self.accuracy_operation, feed_dict={self.features: test_batch_X, self.labels: test_batch_Y})
print(" :: Validation Accuracy - {0:8.3%}".format(validation_accuracy))
#print(" :: Test Accuracy - {0:8.3%}".format(test_accuracy))
saver = tf.train.Saver()
saver.save(sess, "saved_models/lenet_model2")
print("Model Saved")
def test(self):
sess = self.invoke_model()
test_features, test_labels = self.X_TEST, self.Y_TEST
test_accuracy = sess.run(self.accuracy_operation, feed_dict={self.features:test_features, self.labels:test_labels})
print("Test Accuracy is :" + str(test_accuracy))
def predict(self, images, labels):
session = MyTrainingModelWrapper.invoke_model()
my_image_processor = MyImageProcessor()
_imgs=[]
for img in images:
_imgs.append( my_image_processor.apply_grayscale_and_normalize(img) )
imgs = np.array(_imgs)
imgs = np.reshape(imgs,(-1,32,32,1) )
values, indices = session.run(self.top_k_operations, feed_dict = {self.features:imgs})
signnames = Visualization.read_sign_names_from_csv_return()
for idx, pset in enumerate(indices):
print("")
print( '=======================================================')
print("Correct Sign :", labels[idx],"-",signnames[labels[idx]])
print( '-------------------------------------------------------')
print( '{0:7.2%} : {1: <2} - {2: <40}'.format(values[idx][0],pset[0],signnames[pset[0]]))
print( '{0:7.2%} : {1: <2} - {2: <40}'.format(values[idx][1],pset[1],signnames[pset[1]]))
print( '{0:7.2%} : {1: <2} - {2: <40}'.format(values[idx][2],pset[2],signnames[pset[2]]))
print( '-------------------------------------------------------')
#noinspection PyMethodMayBeStatic
def invoke_model(self):
#if cls.save_my_model_tf_session is not None:
# print("Model already exists - just returning it.")
# return cls.save_my_model_tf_session
if not os.path.isfile("saved_models/lenet_model2.meta"): #Model will create these files.
my_model = MyTrainingModelWrapper()
my_model.train_my_model()
#else:
# print("Model already exists - reusing it.")
print("Loading the model from : saved_models/lenet_model2")
model_saver = tf.train.Saver()
self.save_my_model_tf_session = tf.Session()
self.save_my_model_tf_session.run(tf.global_variables_initializer())
model_saver.restore(self.save_my_model_tf_session, 'saved_models/lenet_model2')
return self.save_my_model_tf_session
if __name__ == "__main__":
mytraining_wrapper = MyTrainingModelWrapper()
#mytraining_wrapper.invoke_model()
mytraining_wrapper.test()
sys.exit(0)
|
|
import os
import numpy as np
import matplotlib.pyplot as plt
path = os.getcwd() + "/data/ex1data2.txt"
data = np.loadtxt(path, delimiter=",")
temp = np.ones(((data.shape)[0],1), dtype=np.float64)
data = np.append(temp, data, axis=1)
def featureScaling(data):
mean = np.zeros((1, data.shape[1] - 1))[0]
min_ = data[0][:-1]
max_ = data[0][:-1]
for row in data:
mean = np.add(mean, row[:-1])
min_ = np.minimum(min_, row[:-1])
max_ = np.maximum(max_, row[:-1])
for j in range(data.shape[1] - 1):
mean[j] /= data.shape[0]
for i in range(data.shape[0]):
for j in range(1, data.shape[1] - 1):
data[i][j] = (data[i][j] - mean[j]) / (max_[j] - min_[j])
def regression(data):
#using normal equations method
x = data[:, :-1]
y = data[:, -1]
return (np.linalg.inv(x.transpose() @ x) @ x.transpose()) @ y
#no need to apply feature scaling when we are using normal equations method
#featureScaling(data)
print(regression(data))
|
|
#!/usr/bin/python
""" Classes and functions for fitting tensors """
# 5/17/2010
import numpy as np
from dipy.reconst.maskedview import MaskedView, _makearray, _filled
from dipy.reconst.modelarray import ModelArray
from dipy.data import get_sphere
class Tensor(ModelArray):
""" Fits a diffusion tensor given diffusion-weighted signals and gradient info
Tensor object that when initialized calculates single self diffusion
tensor [1]_ in each voxel using selected fitting algorithm
(DEFAULT: weighted least squares [2]_)
Requires a given gradient table, b value for each diffusion-weighted
gradient vector, and image data given all as arrays.
Parameters
----------
data : array ([X, Y, Z, ...], g)
Diffusion-weighted signals. The dimension corresponding to the
diffusion weighting must be the last dimenssion
bval : array (g,)
Diffusion weighting factor b for each vector in gtab.
gtab : array (g, 3)
Diffusion gradient table found in DICOM header as a array.
mask : array, optional
The tensor will only be fit where mask is True. Mask must must
broadcast to the shape of data and must have fewer dimensions than data
thresh : float, default = None
The tensor will not be fit where data[bval == 0] < thresh. If multiple
b0 volumes are given, the minimum b0 signal is used.
fit_method : funciton or string, default = 'WLS'
The method to be used to fit the given data to a tensor. Any function
that takes the B matrix and the data and returns eigen values and eigen
vectors can be passed as the fit method. Any of the common fit methods
can be passed as a string.
*args, **kargs :
Any other arguments or keywards will be passed to fit_method.
common fit methods:
'WLS' : weighted least squares
dti.wls_fit_tensor
'LS' : ordinary least squares
dti.ols_fit_tensor
Attributes
----------
D : array (..., 3, 3)
Self diffusion tensor calculated from cached eigenvalues and
eigenvectors.
mask : array
True in voxels where a tensor was fit, false if the voxel was skipped
B : array (g, 7)
Design matrix or B matrix constructed from given gradient table and
b-value vector.
evals : array (..., 3)
Cached eigenvalues of self diffusion tensor for given index.
(eval1, eval2, eval3)
evecs : array (..., 3, 3)
Cached associated eigenvectors of self diffusion tensor for given
index. Note: evals[..., j] is associated with evecs[..., :, j]
Methods
-------
fa : array
Calculates fractional anisotropy [2]_.
md : array
Calculates the mean diffusivity [2]_.
Note: [units ADC] ~ [units b value]*10**-1
See Also
--------
dipy.io.bvectxt.read_bvec_file, dipy.core.qball.ODF
Notes
-----
Due to the fact that diffusion MRI entails large volumes (e.g. [256,256,
50,64]), memory can be an issue. Therefore, only the following parameters
of the self diffusion tensor are cached for each voxel:
- All three eigenvalues
- Primary and secondary eigenvectors
From these cached parameters, one can presumably construct any desired
parameter.
References
----------
.. [1] Basser, P.J., Mattiello, J., LeBihan, D., 1994. Estimation of
the effective self-diffusion tensor from the NMR spin echo. J Magn
Reson B 103, 247-254.
.. [2] Basser, P., Pierpaoli, C., 1996. Microstructural and physiological
features of tissues elucidated by quantitative diffusion-tensor MRI.
Journal of Magnetic Resonance 111, 209-219.
Examples
----------
For a complete example have a look at the main dipy/examples folder
"""
### Eigenvalues Property ###
@property
def evals(self):
"""
Returns the eigenvalues of the tensor as an array
"""
return _filled(self.model_params[..., :3])
### Eigenvectors Property ###
@property
def evecs(self):
"""
Returns the eigenvectors of teh tensor as an array
"""
evecs = _filled(self.model_params[..., 3:])
return evecs.reshape(self.shape + (3, 3))
def __init__(self, data, b_values, grad_table, mask=True, thresh=None,
fit_method='WLS', verbose=False, *args, **kargs):
"""
Fits a tensors to diffusion weighted data.
"""
if not callable(fit_method):
try:
fit_method = common_fit_methods[fit_method]
except KeyError:
raise ValueError('"'+str(fit_method)+'" is not a known fit '+
'method, the fit method should either be a '+
'function or one of the common fit methods')
#64 bit design matrix makes for faster pinv
B = design_matrix(grad_table.T, b_values)
self.B = B
mask = np.atleast_1d(mask)
if thresh is not None:
#Define total mask from thresh and mask
#mask = mask & (np.min(data[..., b_values == 0], -1) >
#thresh)
#the assumption that the lowest b_value is always 0 is
#incorrect the lowest b_value could also be higher than 0
#this is common with grid q-spaces
min_b0_sig = np.min(data[..., b_values == b_values.min()], -1)
mask = mask & (min_b0_sig > thresh)
#if mask is all False
if not mask.any():
raise ValueError('between mask and thresh, there is no data to '+
'fit')
#and the mask is not all True
if not mask.all():
#leave only data[mask is True]
data = data[mask]
data = MaskedView(mask, data)
#Perform WLS fit on masked data
dti_params = fit_method(B, data, *args, **kargs)
self.model_params = dti_params
### Self Diffusion Tensor Property ###
def _getD(self):
evals = self.evals
evecs = self.evecs
evals_flat = evals.reshape((-1, 3))
evecs_flat = evecs.reshape((-1, 3, 3))
D_flat = np.empty(evecs_flat.shape)
for L, Q, D in zip(evals_flat, evecs_flat, D_flat):
D[:] = np.dot(Q*L, Q.T)
return D_flat.reshape(evecs.shape)
D = property(_getD, doc = "Self diffusion tensor")
def fa(self):
r"""
Fractional anisotropy (FA) calculated from cached eigenvalues.
Returns
---------
fa : array (V, 1)
Calculated FA. Note: range is 0 <= FA <= 1.
Notes
--------
FA is calculated with the following equation:
.. math::
FA = \sqrt{\frac{1}{2}\frac{(\lambda_1-\lambda_2)^2+(\lambda_1-
\lambda_3)^2+(\lambda_2-lambda_3)^2}{\lambda_1^2+
\lambda_2^2+\lambda_3^2} }
"""
evals, wrap = _makearray(self.model_params[..., :3])
ev1 = evals[..., 0]
ev2 = evals[..., 1]
ev3 = evals[..., 2]
fa = np.sqrt(0.5 * ((ev1 - ev2)**2 + (ev2 - ev3)**2 + (ev3 - ev1)**2)
/ (ev1*ev1 + ev2*ev2 + ev3*ev3))
fa = wrap(np.asarray(fa))
return _filled(fa)
def md(self):
r"""
Mean diffusitivity (MD) calculated from cached eigenvalues.
Returns
---------
md : array (V, 1)
Calculated MD.
Notes
--------
MD is calculated with the following equation:
.. math::
ADC = \frac{\lambda_1+\lambda_2+\lambda_3}{3}
"""
#adc/md = (ev1+ev2+ev3)/3
return self.evals.mean(-1)
def ind(self):
''' Quantizes eigenvectors with maximum eigenvalues on an
evenly distributed sphere so that the can be used for tractography.
Returns
---------
IN : array, shape(x,y,z) integer indices for the points of the
evenly distributed sphere representing tensor eigenvectors of
maximum eigenvalue
'''
return quantize_evecs(self.evecs,odf_vertices=None)
def wls_fit_tensor(design_matrix, data, min_signal=1):
r"""
Computes weighted least squares (WLS) fit to calculate self-diffusion
tensor using a linear regression model [1]_.
Parameters
----------
design_matrix : array (g, 7)
Design matrix holding the covariants used to solve for the regression
coefficients.
data : array ([X, Y, Z, ...], g)
Data or response variables holding the data. Note that the last
dimension should contain the data. It makes no copies of data.
min_signal : default = 1
All values below min_signal are repalced with min_signal. This is done
in order to avaid taking log(0) durring the tensor fitting.
Returns
-------
eigvals : array (..., 3)
Eigenvalues from eigen decomposition of the tensor.
eigvecs : array (..., 3, 3)
Associated eigenvectors from eigen decomposition of the tensor.
Eigenvectors are columnar (e.g. eigvecs[:,j] is associated with
eigvals[j])
See Also
--------
decompose_tensor
Notes
-----
In Chung, et al. 2006, the regression of the WLS fit needed an unbiased
preliminary estimate of the weights and therefore the ordinary least
squares (OLS) estimates were used. A "two pass" method was implemented:
1. calculate OLS estimates of the data
2. apply the OLS estimates as weights to the WLS fit of the data
This ensured heteroscadasticity could be properly modeled for various
types of bootstrap resampling (namely residual bootstrap).
.. math::
y = \mathrm{data} \\
X = \mathrm{design matrix} \\
\hat{\beta}_\mathrm{WLS} = \mathrm{desired regression coefficients (e.g. tensor)}\\
\\
\hat{\beta}_\mathrm{WLS} = (X^T W X)^{-1} X^T W y \\
\\
W = \mathrm{diag}((X \hat{\beta}_\mathrm{OLS})^2),
\mathrm{where} \hat{\beta}_\mathrm{OLS} = (X^T X)^{-1} X^T y
References
----------
.. _[1] Chung, SW., Lu, Y., Henry, R.G., 2006. Comparison of bootstrap
approaches for estimation of uncertainties of DTI parameters.
NeuroImage 33, 531-541.
"""
if min_signal <= 0:
raise ValueError('min_signal must be > 0')
data, wrap = _makearray(data)
data_flat = data.reshape((-1, data.shape[-1]))
dti_params = np.empty((len(data_flat), 4, 3))
#obtain OLS fitting matrix
#U,S,V = np.linalg.svd(design_matrix, False)
#math: beta_ols = inv(X.T*X)*X.T*y
#math: ols_fit = X*beta_ols*inv(y)
#ols_fit = np.dot(U, U.T)
ols_fit = _ols_fit_matrix(design_matrix)
for param, sig in zip(dti_params, data_flat):
param[0], param[1:] = _wls_iter(ols_fit, design_matrix, sig,
min_signal=min_signal)
dti_params.shape = data.shape[:-1]+(12,)
dti_params = wrap(dti_params)
return dti_params
def _wls_iter(ols_fit, design_matrix, sig, min_signal=1):
'''
Function used by wls_fit_tensor for later optimization.
'''
sig = np.maximum(sig, min_signal) #throw out zero signals
log_s = np.log(sig)
w = np.exp(np.dot(ols_fit, log_s))
D = np.dot(np.linalg.pinv(design_matrix*w[:,None]), w*log_s)
tensor = _full_tensor(D)
return decompose_tensor(tensor)
def _ols_iter(inv_design, sig, min_signal=1):
'''
Function used by ols_fit_tensor for later optimization.
'''
sig = np.maximum(sig, min_signal) #throw out zero signals
log_s = np.log(sig)
D = np.dot(inv_design, log_s)
tensor = _full_tensor(D)
return decompose_tensor(tensor)
def ols_fit_tensor(design_matrix, data, min_signal=1):
r"""
Computes ordinary least squares (OLS) fit to calculate self-diffusion
tensor using a linear regression model [1]_.
Parameters
----------
design_matrix : array (g, 7)
Design matrix holding the covariants used to solve for the regression
coefficients. Use design_matrix to build a valid design matrix from
bvalues and a gradient table.
data : array ([X, Y, Z, ...], g)
Data or response variables holding the data. Note that the last
dimension should contain the data. It makes no copies of data.
min_signal : default = 1
All values below min_signal are repalced with min_signal. This is done
in order to avaid taking log(0) durring the tensor fitting.
Returns
-------
eigvals : array (..., 3)
Eigenvalues from eigen decomposition of the tensor.
eigvecs : array (..., 3, 3)
Associated eigenvectors from eigen decomposition of the tensor.
Eigenvectors are columnar (e.g. eigvecs[:,j] is associated with
eigvals[j])
See Also
--------
WLS_fit_tensor, decompose_tensor, design_matrix
Notes
-----
This function is offered mainly as a quick comparison to WLS.
.. math::
y = \mathrm{data} \\
X = \mathrm{design matrix} \\
\hat{\beta}_\mathrm{OLS} = (X^T X)^{-1} X^T y
References
----------
.. [1] Chung, SW., Lu, Y., Henry, R.G., 2006. Comparison of bootstrap
approaches for estimation of uncertainties of DTI parameters.
NeuroImage 33, 531-541.
"""
data, wrap = _makearray(data)
data_flat = data.reshape((-1, data.shape[-1]))
evals = np.empty((len(data_flat), 3))
evecs = np.empty((len(data_flat), 3, 3))
dti_params = np.empty((len(data_flat), 4, 3))
#obtain OLS fitting matrix
#U,S,V = np.linalg.svd(design_matrix, False)
#math: beta_ols = inv(X.T*X)*X.T*y
#math: ols_fit = X*beta_ols*inv(y)
#ols_fit = np.dot(U, U.T)
inv_design = np.linalg.pinv(design_matrix)
for param, sig in zip(dti_params, data_flat):
param[0], param[1:] = _ols_iter(inv_design, sig, min_signal)
dti_params.shape = data.shape[:-1]+(12,)
dti_params = wrap(dti_params)
return dti_params
def _ols_fit_matrix(design_matrix):
"""
Helper function to calculate the ordinary least squares (OLS)
fit as a matrix multiplication. Mainly used to calculate WLS weights. Can
be used to calculate regression coefficients in OLS but not recommended.
See Also:
---------
wls_fit_tensor, ols_fit_tensor
Example:
--------
ols_fit = _ols_fit_matrix(design_mat)
ols_data = np.dot(ols_fit, data)
"""
U,S,V = np.linalg.svd(design_matrix, False)
return np.dot(U, U.T)
def _full_tensor(D):
"""
Returns a tensor given the six unique tensor elements
Given the six unique tensor elments (in the order: Dxx, Dyy, Dzz, Dxy, Dxz,
Dyz) returns a 3 by 3 tensor. All elements after the sixth are ignored.
"""
tensor = np.empty((3,3),dtype=D.dtype)
tensor[0, 0] = D[0] #Dxx
tensor[1, 1] = D[1] #Dyy
tensor[2, 2] = D[2] #Dzz
tensor[1, 0] = tensor[0, 1] = D[3] #Dxy
tensor[2, 0] = tensor[0, 2] = D[4] #Dxz
tensor[2, 1] = tensor[1, 2] = D[5] #Dyz
return tensor
def _compact_tensor(tensor, b0=1):
"""
Returns the six unique values of the tensor and a dummy value in the order
expected by the design matrix
"""
D = np.empty(tensor.shape[:-2] + (7,))
row = [0, 1, 2, 1, 2, 2]
colm = [0, 1, 2, 0, 0, 1]
D[..., :6] = tensor[..., row, colm]
D[..., 6] = np.log(b0)
return D
def decompose_tensor(tensor):
"""
Returns eigenvalues and eigenvectors given a diffusion tensor
Computes tensor eigen decomposition to calculate eigenvalues and
eigenvectors of self-diffusion tensor. (Basser et al., 1994a)
Parameters
----------
D : array (3,3)
array holding a tensor. Assumes D has units on order of
~ 10^-4 mm^2/s
Returns
-------
eigvals : array (3,)
Eigenvalues from eigen decomposition of the tensor. Negative
eigenvalues are replaced by zero. Sorted from largest to smallest.
eigvecs : array (3,3)
Associated eigenvectors from eigen decomposition of the tensor.
Eigenvectors are columnar (e.g. eigvecs[:,j] is associated with
eigvals[j])
See Also
--------
numpy.linalg.eig
"""
#outputs multiplicity as well so need to unique
eigenvals, eigenvecs = np.linalg.eig(tensor)
#need to sort the eigenvalues and associated eigenvectors
order = eigenvals.argsort()[::-1]
eigenvecs = eigenvecs[:, order]
eigenvals = eigenvals[order]
#Forcing negative eigenvalues to 0
eigenvals = np.maximum(eigenvals, 0)
# b ~ 10^3 s/mm^2 and D ~ 10^-4 mm^2/s
# eigenvecs: each vector is columnar
return eigenvals, eigenvecs
def design_matrix(gtab, bval, dtype=None):
"""
Constructs design matrix for DTI weighted least squares or least squares
fitting. (Basser et al., 1994a)
Parameters
----------
gtab : array with shape (3,g)
Diffusion gradient table found in DICOM header as a numpy array.
bval : array with shape (g,)
Diffusion weighting factor b for each vector in gtab.
dtype : string
Parameter to control the dtype of returned designed matrix
Returns
-------
design_matrix : array (g,7)
Design matrix or B matrix assuming Gaussian distributed tensor model.
Note: design_matrix[j,:] = (Bxx,Byy,Bzz,Bxy,Bxz,Byz,dummy)
"""
G = gtab
B = np.zeros((bval.size, 7), dtype = G.dtype)
if gtab.shape[1] != bval.shape[0]:
raise ValueError('The number of b values and gradient directions must'
+' be the same')
B[:, 0] = G[0, :] * G[0, :] * 1. * bval #Bxx
B[:, 1] = G[1, :] * G[1, :] * 1. * bval #Byy
B[:, 2] = G[2, :] * G[2, :] * 1. * bval #Bzz
B[:, 3] = G[0, :] * G[1, :] * 2. * bval #Bxy
B[:, 4] = G[0, :] * G[2, :] * 2. * bval #Bxz
B[:, 5] = G[1, :] * G[2, :] * 2. * bval #Byz
B[:, 6] = np.ones(bval.size)
return -B
def quantize_evecs(evecs, odf_vertices=None):
''' Find the closest orientation of an evenly distributed sphere
Parameters
----------
evecs : ndarray
odf_vertices : None or ndarray
If None, then set vertices from symmetric362 sphere. Otherwise use
passed ndarray as vertices
Returns
-------
IN : ndarray
'''
max_evecs=evecs[...,:,0]
if odf_vertices==None:
odf_vertices, _ = get_sphere('symmetric362')
tup=max_evecs.shape[:-1]
mec=max_evecs.reshape(np.prod(np.array(tup)),3)
IN=np.array([np.argmin(np.dot(odf_vertices,m)) for m in mec])
IN=IN.reshape(tup)
return IN
common_fit_methods = {'WLS': wls_fit_tensor,
'LS': ols_fit_tensor}
|
|
import numpy as np
import torch
from torch.utils.data import DataLoader,TensorDataset
def mIoU_of_class(prediction, predict_label, target, target_label):
target_args = torch.where(target == target_label)
target_size = target_args[0].shape[0]
if target_size == 0:
return None
else:
intersection = prediction[target_args] == predict_label
intersection = torch.sum(intersection)
predict_args = torch.where(prediction == predict_label)
predict_size = predict_args[0].shape[0]
union = target_size + predict_size - intersection
mIoU = intersection.float()/union.float()
return mIoU
# test_classes is search space starting from 0.
# 0 is background class
def IoU_per_class(model, test_features, test_labels, word_vectors, test_classes, test_batch, GPU, calibrate_classes, calibrate):
if (test_classes < 0).any():
return False
test_data = TensorDataset(test_features, test_labels)
test_loader = DataLoader(test_data,batch_size=test_batch,shuffle=False)
test_size = test_features.shape[0]
test_classes = np.sort(test_classes)
if test_classes[0] == 0:
includeBack = True
else:
includeBack = False
class_num = len(test_classes)
test_vectors = torch.tensor([word_vectors[int(c-1)] for c in test_classes if c > 0]).view(-1, word_vectors.shape[1]).float().cuda(GPU) # -1*300
class_acc = [None] * class_num
predict_total = None
for batch_features, batch_labels in test_loader:
batch_size = batch_features.shape[0]
support_features = test_vectors.repeat(batch_size,1) # -1*300*1*1 -> -1*256*28*28
query_features = batch_features.repeat(1,test_vectors.shape[0],1,1).view(-1,3,224,224)
query_features = query_features.cuda(GPU).float()
relations = model(query_features,support_features).view(batch_size,test_vectors.shape[0],224,224) # -1*-1*224*224
if includeBack:
background_scores = 1-torch.max(relations,1)[0].view(-1,1,224,224)
scores = torch.cat((background_scores,relations),1)
else:
scores = relations
if calibrate_classes is not None and len(calibrate_classes) > 0:
scores[:,calibrate_classes,:,:] = scores[:,calibrate_classes,:,:] * calibrate
prediction = torch.max(scores,1)[1]
if predict_total is None:
predict_total = prediction.cpu().detach()
else:
predict_total = torch.cat((predict_total,prediction.cpu().detach()))
# ignore unselected classes
test_labels = test_labels.view(-1,224,224)
select_args = np.where(np.isin(test_labels,test_classes))
test_labels = test_labels[select_args]
predict_total = predict_total[select_args]
for c_i in range(class_num):
mIoU = mIoU_of_class(predict_total, c_i, test_labels, test_classes[c_i])
if mIoU is not None:
class_acc[c_i] = mIoU.item()
return class_acc
|
|
# Released under The MIT License (MIT)
# http://opensource.org/licenses/MIT
# Copyright (c) 2013-2016 SCoT Development Team
"""Use internally implemented functions as backend."""
from __future__ import absolute_import
import scipy as sp
from . import backend
from . import datatools, pca, csp
from .var import VAR
from .external.infomax_ import infomax
def generate():
def wrapper_infomax(data, random_state=None):
"""Call Infomax (adapted from MNE) for ICA calculation."""
u = infomax(datatools.cat_trials(data).T, extended=True,
random_state=random_state).T
m = sp.linalg.pinv(u)
return m, u
def wrapper_pca(x, reducedim):
"""Call SCoT's PCA algorithm."""
c, d = pca.pca(datatools.cat_trials(x),
subtract_mean=False, reducedim=reducedim)
y = datatools.dot_special(c.T, x)
return c, d, y
def wrapper_csp(x, cl, reducedim):
"""Call SCoT's CSP algorithm."""
c, d = csp.csp(x, cl, numcomp=reducedim)
y = datatools.dot_special(c.T, x)
return c, d, y
return {'ica': wrapper_infomax, 'pca': wrapper_pca, 'csp': wrapper_csp,
'var': VAR}
backend.register('builtin', generate)
|
|
"""
tools to manipulte data files
includes
bin, trim, stitch, etc
also include interpolation stuff
"""
import numpy as np
from scipy import stats
from scipy import interpolate
def trim_data(xlist,ylist,up,down):
for i,info in enumerate(xlist):
if info > up:
start = i
break
for j,info in enumerate(xlist[i:]):
if info > down:
end = j+i
break
return xlist[start:end],ylist[start:end]
def bin_data(xlist,ylist,bin_num=100, bin_range=(0,1)):
info = stats.binned_statistic(xlist, ylist,bins=bin_num,
statistic='mean',range=bin_range)
bin_means, bin_edges, binnumber = info
bin_width = (bin_edges[1] - bin_edges[0])
bin_centers = bin_edges[1:] - bin_width/2
return bin_centers,bin_means
def find_nearest(array,value):
idx = np.searchsorted(array, value, side="left")
if idx > 0 and (idx == len(array) or np.fabs(value - array[idx-1]) < np.fabs(value - array[idx])):
return idx-1#array[idx-1]
else:
return idx#array[idx]
def interpolate_data(x1,y1,x2, Type):
x1min = min(x1)
x1max = max(x1)
x2min = min(x2)
x2max = max(x2)
f = interpolate.interp1d(x1, y1)
if x1min > x2min and x1max < x2max:
#print "A"
left = find_nearest(x2,min(x1))+1
right = find_nearest(x2,max(x1))
if Type == "A" or Type == "C":
yinterp_left = np.zeros(left)
yinterp_right = np.zeros(len(x2)-right)
elif Type == "T":
yinterp_left = np.ones(left)
yinterp_right = np.ones(len(x2)-right)
yinterp_middle = f(x2[left:right])
yinterp = np.concatenate([yinterp_left,yinterp_middle, yinterp_right])
elif x1min <= x2min and x1max < x2max:
#print "B"
right = find_nearest(x2,max(x1))
if Type == "A" or Type == "C":
yinterp_right = np.zeros(len(x2)-right)
elif Type == "T":
yinterp_right = np.ones(len(x2)-right)
yinterp_middle = f(x2[:right])
yinterp = np.concatenate([yinterp_middle, yinterp_right])
elif x1min > x2min and x1max >= x2max:
#print "C"
left = find_nearest(x2,min(x1))+1
if Type == "A" or Type == "C":
yinterp_left = np.zeros(left)
elif Type == "T":
yinterp_left = np.ones(left)
yinterp_middle = f(x2[left:])
yinterp = np.concatenate([yinterp_left,yinterp_middle])
else:
#print "D"
yinterp = f(x2)
return yinterp
def merge_list(list_d, param=5):
"""
merge similar x in an array
"""
new_list = []
compare_list = []
tag = []
prev = -100
begin = True
if list_d == []:
return []
elif len(list_d) == 1:
return list_d
else:
pass
for i in list_d:
if begin:
begin = False
prev = i
continue
compare_list.append(abs(i-prev) <= param)
prev = i
for j,id in enumerate(compare_list):
if id == True:
if tag == []:
tag.append(list_d[j])
tag.append(list_d[j+1])
else:
tag.append(list_d[j+1])
else:
try:
value = tag[0]+(tag[-1]-tag[0])/2
new_list.append(value)
except:
new_list.append(list_d[j])
tag = []
if tag != []:
new_list.append(tag[0]+(tag[-1]-tag[0])/2)
if compare_list[-1] == False:
new_list.append(list_d[-1])
return new_list
def list_merger(indexs, value, param=5, method = "max"):
new_list = []
compare_list = []
tag = []
prev = -100
begin = True
if indexs == []:
return []
elif len(indexs) == 1:
return indexs
else:
pass
for i in indexs:
if begin:
begin = False
prev = i
continue
compare_list.append(abs(i-prev) <= param)
prev = i
for j,id in enumerate(compare_list):
if id == True:
if tag == []:
tag.append(indexs[j])
tag.append(indexs[j+1])
else:
tag.append(indexs[j+1])
else:
if tag == []:
tag.append(indexs[j])
if method == "max":
y = map(value.__getitem__, tag)
max_index = list(value).index(max(y))
new_list.append(max_index)
elif method == "mid":
new_list.append(tag[0]+(tag[-1]-tag[0])/2)
tag = []
if tag != []:
if method == "max":
y = map(value.__getitem__, tag)
max_index = list(value).index(max(y))
new_list.append(max_index)
elif method == "mid":
new_list.append(tag[0]+(tag[-1]-tag[0])/2)
if compare_list[-1] == False:
new_list.append(indexs[-1])
return new_list
|
|
# -*- coding: utf-8 -*-
'''
Script that generates and analyzes a synthetic set of PMS data. These data differ from the data used in the paper but
capture important elements of what is presented in the paper.
Inference generation requires use of the logistigate package, available at https://logistigate.readthedocs.io/en/main/. Running
the generateSyntheticData() function generates Figures 2, 3, and 4, as well as the interval widths for Tables 1 and 2,
that are analagous to the items produced using the de-identified data.
'''
from logistigate.logistigate import utilities as util # Pull from the submodule "develop" branch
from logistigate.logistigate import methods
from logistigate.logistigate import lg
def generateSyntheticData():
'''
Script for forming a synthetic data set of 25 test nodes and 25 supply nodes.
'''
'''
Use a generated sourcing-probability matrix to produce 500 samples under specified random seeds
'''
import numpy as np
import random
Qrow = np.array([.01, .01, .01, .01, .01, .01, .01, .01, .01, .01, .01, .01,
.02, .02, .02, .03, .03, .05, .05, .07, .07, .07, .10, .15, .20])
random.seed(3)
random.shuffle(Qrow)
# Qrow: [0.01, 0.03, 0.1 , 0.02, 0.01, 0.01, 0.07, 0.01, 0.01, 0.02, 0.2, 0.02,
# 0.01, 0.01, 0.07, 0.15, 0.01, 0.01, 0.03, 0.07, 0.01, 0.01, 0.05, 0.05, 0.01])
# SN rates: 1% baseline; 20% node: 25%, 5% node: ~25/30%, 7% node: 10%, 2% node: 40%
# TN rates: 1% baseline; 1 major node: 25%, 1 minor node: 30%; 3 minor nodes: 10%; 1 minor minor node: 50%
numTN, numSN = 25, 25
numSamples = 500
s, r = 1.0, 1.0
SNnames = ['Manufacturer ' + str(i + 1) for i in range(numSN)]
TNnames = ['District ' + str(i + 1) for i in range(numTN)]
trueRates = np.zeros(numSN + numTN) # importers first, outlets second
SNtrueRates = [.02 for i in range(numSN)]
SN1ind = 3 # 40% SFP rate
SN2ind = 10 # 25% SFP rate, major node
SN3ind = 14 # 10% SFP rate, minor node
SN4ind = 22 # 20% SFP rate, minor node
SNtrueRates[SN1ind], SNtrueRates[SN2ind] = 0.35, 0.25
SNtrueRates[SN3ind], SNtrueRates[SN4ind] = 0.1, 0.25
trueRates[:numSN] = SNtrueRates # SN SFP rates
TN1ind = 5 # 20% sampled node, 25% SFP rate
TN2inds = [2, 11, 14, 22] # 10% sampled
TN3inds = [3, 6, 8, 10, 16, 17, 24] # 3% sampled
TN4inds = [0, 1, 9, 12, 18, 23] # 2% sampled
TNsampProbs = [.01 for i in range(numTN)] # Update sampling probs
TNsampProbs[TN1ind] = 0.20
for j in TN2inds:
TNsampProbs[j] = 0.10
for j in TN3inds:
TNsampProbs[j] = 0.03
for j in TN4inds:
TNsampProbs[j] = 0.02
#print(np.sum(TNsampProbs)) # sampling probability should add up to 1.0
TNtrueRates = [.02 for i in range(numTN)] # Update SFP rates for TNs
TNtrueRates[TN1ind] = 0.2
TNtrueRates[TN2inds[1]] = 0.1
TNtrueRates[TN2inds[2]] = 0.1
TNtrueRates[TN3inds[1]] = 0.4
trueRates[numSN:] = TNtrueRates # Put TN rates in main vector
rseed = 56 # Change the seed here to get a different set of tests
random.seed(rseed)
np.random.seed(rseed+1)
testingDataList = []
for currSamp in range(numSamples):
currTN = random.choices(TNnames, weights=TNsampProbs, k=1)[0]
#if not currTN == 'District '
currSN = random.choices(SNnames, weights=Qrow, k=1)[0] #[TNnames.index(currTN)] to index Q
currTNrate = trueRates[numSN + TNnames.index(currTN)]
currSNrate = trueRates[SNnames.index(currSN)]
realRate = currTNrate + currSNrate - currTNrate * currSNrate
realResult = np.random.binomial(1, p=realRate)
if realResult == 1:
result = np.random.binomial(1, p = s)
if realResult == 0:
result = np.random.binomial(1, p = 1. - r)
testingDataList.append([currTN, currSN, result])
# Inspect testing data; check: (1) overall SFP rate, (2) plots, (3) N, Y matrices align more or less with
# statements from case-study section
priorMean, priorScale = -2.5, 1.3
numPostSamps = 1000
MCMCdict = {'MCMCtype': 'NUTS', 'Madapt': 5000, 'delta': 0.4}
lowerQuant, upperQuant = 0.05, 0.95
import scipy.special as spsp
import scipy.stats as sps
import matplotlib.pyplot as plt
priorLower = spsp.expit(sps.laplace.ppf(lowerQuant, loc=priorMean, scale=priorScale))
priorUpper = spsp.expit(sps.laplace.ppf(upperQuant, loc=priorMean, scale=priorScale))
lgDict = util.testresultsfiletotable(testingDataList, csvName=False)
print('size: '+str(lgDict['N'].shape)+', obsvns: '+str(lgDict['N'].sum())+', propor pos: '+str(lgDict['Y'].sum() / lgDict['N'].sum()))
lgDict.update({'diagSens': 1.0, 'diagSpec': 1.0, 'numPostSamples': numPostSamps,
'prior': methods.prior_laplace(mu=priorMean, scale=priorScale), 'MCMCdict': MCMCdict})
lgDict = lg.runlogistigate(lgDict)
numSN, numTN = lgDict['importerNum'], lgDict['outletNum']
floorVal = 0.05 # Classification lines
ceilVal = 0.25
# Supply-node plot
SNindsSubset = range(numSN)
SNnames = [lgDict['importerNames'][i] for i in SNindsSubset]
SNlowers = [np.quantile(lgDict['postSamples'][:, l], lowerQuant) for l in SNindsSubset]
SNuppers = [np.quantile(lgDict['postSamples'][:, l], upperQuant) for l in SNindsSubset]
# First group
SNlowers1 = [i for i in SNlowers if i > floorVal]
SNuppers1 = [SNuppers[ind] for ind, i in enumerate(SNlowers) if i > floorVal]
SNnames1 = [SNnames[ind] for ind, i in enumerate(SNlowers) if i > floorVal]
midpoints1 = [SNuppers1[i] - (SNuppers1[i] - SNlowers1[i]) / 2 for i in range(len(SNuppers1))]
zippedList1 = zip(midpoints1, SNuppers1, SNlowers1, SNnames1)
sorted_pairs1 = sorted(zippedList1, reverse=True)
SNnamesSorted1 = [tup[-1] for tup in sorted_pairs1]
# Second group
SNuppers2 = [i for ind, i in enumerate(SNuppers) if (i > ceilVal and SNlowers[ind] <= floorVal)]
SNlowers2 = [SNlowers[ind] for ind, i in enumerate(SNuppers) if (i > ceilVal and SNlowers[ind] <= floorVal)]
SNnames2 = [SNnames[ind] for ind, i in enumerate(SNuppers) if (i > ceilVal and SNlowers[ind] <= floorVal)]
midpoints2 = [SNuppers2[i] - (SNuppers2[i] - SNlowers2[i]) / 2 for i in range(len(SNuppers2))]
zippedList2 = zip(midpoints2, SNuppers2, SNlowers2, SNnames2)
sorted_pairs2 = sorted(zippedList2, reverse=True)
SNnamesSorted2 = [tup[-1] for tup in sorted_pairs2]
# Third group
SNuppers3 = [i for ind, i in enumerate(SNuppers) if (i <= ceilVal and SNlowers[ind] <= floorVal)]
SNlowers3 = [SNlowers[ind] for ind, i in enumerate(SNuppers) if (i <= ceilVal and SNlowers[ind] <= floorVal)]
SNnames3 = [SNnames[ind] for ind, i in enumerate(SNuppers) if (i <= ceilVal and SNlowers[ind] <= floorVal)]
midpoints3 = [SNuppers3[i] - (SNuppers3[i] - SNlowers3[i]) / 2 for i in range(len(SNuppers3))]
zippedList3 = zip(midpoints3, SNuppers3, SNlowers3, SNnames3)
sorted_pairs3 = sorted(zippedList3, reverse=True)
SNnamesSorted3 = [tup[-1] for tup in sorted_pairs3]
# Combine groups
SNnamesSorted = SNnamesSorted1.copy()
SNnamesSorted.append(' ')
SNnamesSorted = SNnamesSorted + SNnamesSorted2
SNnamesSorted.append(' ')
SNnamesSorted = SNnamesSorted + SNnamesSorted3
SNnamesSorted.append(' ')
SNnamesSorted.append('(Prior)')
fig, (ax) = plt.subplots(figsize=(10, 6), ncols=1)
for _, upper, lower, name in sorted_pairs1:
plt.plot((name, name), (lower, upper), 'o-', color='red')
plt.plot(('', ''), (np.nan, np.nan), 'o-', color='red')
for _, upper, lower, name in sorted_pairs2:
plt.plot((name, name), (lower, upper), 'o--', color='orange')
plt.plot((' ', ' '), (np.nan, np.nan), 'o--', color='orange')
for _, upper, lower, name in sorted_pairs3:
plt.plot((name, name), (lower, upper), 'o:', color='green')
plt.plot((' ', ' '), (np.nan, np.nan), 'o:', color='green')
plt.plot((SNnamesSorted[-1], SNnamesSorted[-1]), (priorLower, priorUpper), 'o-', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(SNnamesSorted)), SNnamesSorted, rotation=90)
plt.title('Supply Node 90% Intervals\nManufacturer-District Analysis, Tracked Setting',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Supply Node Name', fontdict={'fontsize': 16, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 16, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(12)
plt.axhline(y=floorVal, color='r', linestyle='-', alpha=0.1) # line for 'l'
plt.axhline(y=ceilVal, color='blue', linestyle='-', alpha=0.1) # line for 'u'
plt.text(26.3, ceilVal + .015, 'u=0.25', color='blue', alpha=0.5, size=9)
plt.text(26.3, floorVal + .015, 'l=0.05', color='r', alpha=0.5, size=9)
fig.tight_layout()
plt.show()
plt.close()
# Test-node plot
TNindsSubset = range(numTN)
TNnames = [lgDict['outletNames'][i] for i in TNindsSubset]
TNlowers = [np.quantile(lgDict['postSamples'][:, numSN + l], lowerQuant) for l in TNindsSubset]
TNuppers = [np.quantile(lgDict['postSamples'][:, numSN + l], upperQuant) for l in TNindsSubset]
# First group
TNlowers1 = [i for i in TNlowers if i > floorVal]
TNuppers1 = [TNuppers[ind] for ind, i in enumerate(TNlowers) if i > floorVal]
TNnames1 = [TNnames[ind] for ind, i in enumerate(TNlowers) if i > floorVal]
midpoints1 = [TNuppers1[i] - (TNuppers1[i] - TNlowers1[i]) / 2 for i in range(len(TNuppers1))]
zippedList1 = zip(midpoints1, TNuppers1, TNlowers1, TNnames1)
sorted_pairs1 = sorted(zippedList1, reverse=True)
TNnamesSorted1 = [tup[-1] for tup in sorted_pairs1]
# Second group
TNuppers2 = [i for ind, i in enumerate(TNuppers) if (i > ceilVal and TNlowers[ind] <= floorVal)]
TNlowers2 = [TNlowers[ind] for ind, i in enumerate(TNuppers) if (i > ceilVal and TNlowers[ind] <= floorVal)]
TNnames2 = [TNnames[ind] for ind, i in enumerate(TNuppers) if (i > ceilVal and TNlowers[ind] <= floorVal)]
midpoints2 = [TNuppers2[i] - (TNuppers2[i] - TNlowers2[i]) / 2 for i in range(len(TNuppers2))]
zippedList2 = zip(midpoints2, TNuppers2, TNlowers2, TNnames2)
sorted_pairs2 = sorted(zippedList2, reverse=True)
TNnamesSorted2 = [tup[-1] for tup in sorted_pairs2]
# Third group
TNuppers3 = [i for ind, i in enumerate(TNuppers) if (i <= ceilVal and TNlowers[ind] <= floorVal)]
TNlowers3 = [TNlowers[ind] for ind, i in enumerate(TNuppers) if (i <= ceilVal and TNlowers[ind] <= floorVal)]
TNnames3 = [TNnames[ind] for ind, i in enumerate(TNuppers) if (i <= ceilVal and TNlowers[ind] <= floorVal)]
midpoints3 = [TNuppers3[i] - (TNuppers3[i] - TNlowers3[i]) / 2 for i in range(len(TNuppers3))]
zippedList3 = zip(midpoints3, TNuppers3, TNlowers3, TNnames3)
sorted_pairs3 = sorted(zippedList3, reverse=True)
TNnamesSorted3 = [tup[-1] for tup in sorted_pairs3]
# Combine groups
TNnamesSorted = TNnamesSorted1.copy()
TNnamesSorted.append(' ')
TNnamesSorted = TNnamesSorted + TNnamesSorted2
TNnamesSorted.append(' ')
TNnamesSorted = TNnamesSorted + TNnamesSorted3
TNnamesSorted.append(' ')
TNnamesSorted.append('(Prior)')
fig, (ax) = plt.subplots(figsize=(10, 6), ncols=1)
for _, upper, lower, name in sorted_pairs1:
plt.plot((name, name), (lower, upper), 'o-', color='red')
plt.plot(('', ''), (np.nan, np.nan), 'o-', color='red')
for _, upper, lower, name in sorted_pairs2:
plt.plot((name, name), (lower, upper), 'o--', color='orange')
plt.plot((' ', ' '), (np.nan, np.nan), 'o--', color='orange')
for _, upper, lower, name in sorted_pairs3:
plt.plot((name, name), (lower, upper), 'o:', color='green')
plt.plot((' ', ' '), (np.nan, np.nan), 'o:', color='green')
plt.plot((TNnamesSorted[-1], TNnamesSorted[-1]), (priorLower, priorUpper), 'o-', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(TNnamesSorted)), TNnamesSorted, rotation=90)
plt.title('Test Node 90% Intervals\nManufacturer-District Analysis, Tracked Setting',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Test Node Name', fontdict={'fontsize': 16, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 16, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(12)
plt.axhline(y=floorVal, color='r', linestyle='-', alpha=0.1) # line for 'l'
plt.axhline(y=ceilVal, color='blue', linestyle='-', alpha=0.1) # line for 'u'
plt.text(26.4, ceilVal + .015, 'u=0.25', color='blue', alpha=0.5, size=9)
plt.text(26.4, floorVal + .015, 'l=0.05', color='r', alpha=0.5, size=9)
fig.tight_layout()
plt.show()
plt.close()
# How many observed arcs are there?
#np.count_nonzero(lgDict['N'])
'''
# Inspect raw data totals
# Supply nodes
for i in range(numSN): # sum across TNs to see totals for SNs
currTotal = np.sum(lgDict['N'],axis=0)[i]
currPos = np.sum(lgDict['Y'],axis=0)[i]
print(lgDict['importerNames'][i]+': ' +str(currTotal)[:-2]+' samples, '
+ str(currPos)[:-2] + ' positives, ' + str(currPos/currTotal)[:5] + ' rate')
# Test nodes
for i in range(numTN): # sum across SNs to see totals for TNs
currTotal = np.sum(lgDict['N'],axis=1)[i]
currPos = np.sum(lgDict['Y'],axis=1)[i]
print(lgDict['outletNames'][i]+': ' +str(currTotal)[:-2]+' samples, '
+ str(currPos)[:-2] + ' positives, ' + str(currPos/currTotal)[:5] + ' rate')
# SNs, TNs with at least ten samples and 10% SFP rate
for i in range(numSN): # sum across TNs to see totals for SNs
currTotal = np.sum(lgDict['N'],axis=0)[i]
currPos = np.sum(lgDict['Y'],axis=0)[i]
if currPos/currTotal>0.1 and currTotal>10:
print(lgDict['importerNames'][i]+': ' +str(currTotal)[:-2]+' samples, '
+ str(currPos)[:-2] + ' positives, ' + str(currPos/currTotal)[:5] + ' rate')
# Test nodes
for i in range(numTN): # sum across SNs to see totals for TNs
currTotal = np.sum(lgDict['N'],axis=1)[i]
currPos = np.sum(lgDict['Y'],axis=1)[i]
if currPos / currTotal > 0.1 and currTotal > 10:
print(lgDict['outletNames'][i]+': ' +str(currTotal)[:-2]+' samples, '
+ str(currPos)[:-2] + ' positives, ' + str(currPos/currTotal)[:5] + ' rate')
# 90% intervals for SFP rates at SNs, TNs, using proportion CI
for i in range(numSN): # sum across TNs to see totals for SNs
currTotal = np.sum(lgDict['N'], axis=0)[i]
currPos = np.sum(lgDict['Y'], axis=0)[i]
pHat = currPos/currTotal
lowerBd = pHat-(1.645*np.sqrt(pHat*(1-pHat)/currTotal))
upperBd = pHat+(1.645*np.sqrt(pHat*(1-pHat)/currTotal))
print(lgDict['importerNames'][i]+': ('+str(lowerBd)[:5]+', '+str(upperBd)[:5]+')')
# Test nodes
for i in range(numTN): # sum across SNs to see totals for TNs
currTotal = np.sum(lgDict['N'], axis=1)[i]
currPos = np.sum(lgDict['Y'], axis=1)[i]
pHat = currPos / currTotal
lowerBd = pHat - (1.645 * np.sqrt(pHat * (1 - pHat) / currTotal))
upperBd = pHat + (1.645 * np.sqrt(pHat * (1 - pHat) / currTotal))
print(lgDict['outletNames'][i] + ': (' + str(lowerBd)[:5] + ', ' + str(upperBd)[:5] + ')')
# Print quantiles for analysis tables
SNinds = lgDict['importerNames'].index('Manufacturer 4')
print('Manufacturer 4: (' + str(np.quantile(lgDict['postSamples'][:, SNinds], 0.05))[:5] + ',' + str(
np.quantile(lgDict['postSamples'][:, SNinds], 0.95))[:5] + ')')
SNinds = lgDict['importerNames'].index('Manufacturer 11')
print('Manufacturer 11: (' + str(np.quantile(lgDict['postSamples'][:, SNinds], 0.05))[:5] + ',' + str(
np.quantile(lgDict['postSamples'][:, SNinds], 0.95))[:5] + ')')
SNinds = lgDict['importerNames'].index('Manufacturer 23')
print('Manufacturer 23: (' + str(np.quantile(lgDict['postSamples'][:, SNinds], 0.05))[:5] + ',' + str(
np.quantile(lgDict['postSamples'][:, SNinds], 0.95))[:5] + ')')
TNinds = lgDict['outletNames'].index('District 6')
print('District 6: (' + str(np.quantile(lgDict['postSamples'][:, len(lgDict['importerNames']) + TNinds], 0.05))[
:5] + ',' + str(np.quantile(lgDict['postSamples'][:, len(lgDict['importerNames']) + TNinds], 0.95))[:5] + ')')
TNinds = lgDict['outletNames'].index('District 7')
print('District 7: (' + str(np.quantile(lgDict['postSamples'][:, len(lgDict['importerNames']) + TNinds], 0.05))[
:5] + ',' + str(np.quantile(lgDict['postSamples'][:, len(lgDict['importerNames']) + TNinds], 0.95))[:5] + ')')
'''
# Untracked
lgDict = {}
lgDict = util.testresultsfiletotable(testingDataList, csvName=False)
Qest = lgDict['N'].copy() # Generate Q
for i, Nrow in enumerate(lgDict['N']):
Qest[i] = Nrow / np.sum(Nrow)
# Update N and Y
lgDict.update({'N': np.sum(lgDict['N'], axis=1), 'Y': np.sum(lgDict['Y'], axis=1)})
print('size: ' + str(lgDict['N'].shape) + ', obsvns: ' + str(lgDict['N'].sum()) + ', propor pos: ' + str(
lgDict['Y'].sum() / lgDict['N'].sum()))
lgDict.update({'type': 'Untracked','diagSens': 1.0, 'diagSpec': 1.0, 'numPostSamples': numPostSamps,
'prior': methods.prior_laplace(mu=priorMean, scale=priorScale), 'MCMCdict': MCMCdict,
'transMat': Qest, 'importerNum': Qest.shape[1], 'outletNum': Qest.shape[0]})
lgDict = methods.GeneratePostSamples(lgDict)
numSN, numTN = lgDict['importerNum'], lgDict['outletNum']
SNindsSubset = range(numSN)
SNnames = [lgDict['importerNames'][i] for i in SNindsSubset]
SNlowers = [np.quantile(lgDict['postSamples'][:, l], lowerQuant) for l in SNindsSubset]
SNuppers = [np.quantile(lgDict['postSamples'][:, l], upperQuant) for l in SNindsSubset]
# First group
SNlowers1 = [i for i in SNlowers if i > floorVal]
SNuppers1 = [SNuppers[ind] for ind, i in enumerate(SNlowers) if i > floorVal]
SNnames1 = [SNnames[ind] for ind, i in enumerate(SNlowers) if i > floorVal]
midpoints1 = [SNuppers1[i] - (SNuppers1[i] - SNlowers1[i]) / 2 for i in range(len(SNuppers1))]
zippedList1 = zip(midpoints1, SNuppers1, SNlowers1, SNnames1)
sorted_pairs1 = sorted(zippedList1, reverse=True)
SNnamesSorted1 = [tup[-1] for tup in sorted_pairs1]
# Second group
SNuppers2 = [i for ind, i in enumerate(SNuppers) if (i > ceilVal and SNlowers[ind] <= floorVal)]
SNlowers2 = [SNlowers[ind] for ind, i in enumerate(SNuppers) if (i > ceilVal and SNlowers[ind] <= floorVal)]
SNnames2 = [SNnames[ind] for ind, i in enumerate(SNuppers) if (i > ceilVal and SNlowers[ind] <= floorVal)]
midpoints2 = [SNuppers2[i] - (SNuppers2[i] - SNlowers2[i]) / 2 for i in range(len(SNuppers2))]
zippedList2 = zip(midpoints2, SNuppers2, SNlowers2, SNnames2)
sorted_pairs2 = sorted(zippedList2, reverse=True)
SNnamesSorted2 = [tup[-1] for tup in sorted_pairs2]
# Third group
SNuppers3 = [i for ind, i in enumerate(SNuppers) if (i <= ceilVal and SNlowers[ind] <= floorVal)]
SNlowers3 = [SNlowers[ind] for ind, i in enumerate(SNuppers) if (i <= ceilVal and SNlowers[ind] <= floorVal)]
SNnames3 = [SNnames[ind] for ind, i in enumerate(SNuppers) if (i <= ceilVal and SNlowers[ind] <= floorVal)]
midpoints3 = [SNuppers3[i] - (SNuppers3[i] - SNlowers3[i]) / 2 for i in range(len(SNuppers3))]
zippedList3 = zip(midpoints3, SNuppers3, SNlowers3, SNnames3)
sorted_pairs3 = sorted(zippedList3, reverse=True)
SNnamesSorted3 = [tup[-1] for tup in sorted_pairs3]
# Combine groups
SNnamesSorted = SNnamesSorted1.copy()
SNnamesSorted.append(' ')
SNnamesSorted = SNnamesSorted + SNnamesSorted2
SNnamesSorted.append(' ')
SNnamesSorted = SNnamesSorted + SNnamesSorted3
SNnamesSorted.append(' ')
SNnamesSorted.append('(Prior)')
fig, (ax) = plt.subplots(figsize=(10, 6), ncols=1)
for _, upper, lower, name in sorted_pairs1:
plt.plot((name, name), (lower, upper), 'o-', color='red')
plt.plot(('', ''), (np.nan, np.nan), 'o-', color='red')
for _, upper, lower, name in sorted_pairs2:
plt.plot((name, name), (lower, upper), 'o--', color='orange')
plt.plot((' ', ' '), (np.nan, np.nan), 'o--', color='orange')
for _, upper, lower, name in sorted_pairs3:
plt.plot((name, name), (lower, upper), 'o:', color='green')
plt.plot((' ', ' '), (np.nan, np.nan), 'o:', color='green')
plt.plot((SNnamesSorted[-1], SNnamesSorted[-1]), (priorLower, priorUpper), 'o-', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(SNnamesSorted)), SNnamesSorted, rotation=90)
plt.title('Supply Node 90% Intervals\nManufacturer-District Analysis, Untracked Setting',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Supply Node Name', fontdict={'fontsize': 16, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 16, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(12)
plt.axhline(y=floorVal, color='r', linestyle='-', alpha=0.1) # line for 'l'
plt.axhline(y=ceilVal, color='blue', linestyle='-', alpha=0.1) # line for 'u'
plt.text(26.3, ceilVal + .015, 'u=0.25', color='blue', alpha=0.5, size=9)
plt.text(26.3, floorVal + .015, 'l=0.05', color='r', alpha=0.5, size=9)
fig.tight_layout()
plt.show()
plt.close()
# Test-node plot
TNindsSubset = range(numTN)
TNnames = [lgDict['outletNames'][i] for i in TNindsSubset]
TNlowers = [np.quantile(lgDict['postSamples'][:, numSN + l], lowerQuant) for l in TNindsSubset]
TNuppers = [np.quantile(lgDict['postSamples'][:, numSN + l], upperQuant) for l in TNindsSubset]
# First group
TNlowers1 = [i for i in TNlowers if i > floorVal]
TNuppers1 = [TNuppers[ind] for ind, i in enumerate(TNlowers) if i > floorVal]
TNnames1 = [TNnames[ind] for ind, i in enumerate(TNlowers) if i > floorVal]
midpoints1 = [TNuppers1[i] - (TNuppers1[i] - TNlowers1[i]) / 2 for i in range(len(TNuppers1))]
zippedList1 = zip(midpoints1, TNuppers1, TNlowers1, TNnames1)
sorted_pairs1 = sorted(zippedList1, reverse=True)
TNnamesSorted1 = [tup[-1] for tup in sorted_pairs1]
# Second group
TNuppers2 = [i for ind, i in enumerate(TNuppers) if (i > ceilVal and TNlowers[ind] <= floorVal)]
TNlowers2 = [TNlowers[ind] for ind, i in enumerate(TNuppers) if (i > ceilVal and TNlowers[ind] <= floorVal)]
TNnames2 = [TNnames[ind] for ind, i in enumerate(TNuppers) if (i > ceilVal and TNlowers[ind] <= floorVal)]
midpoints2 = [TNuppers2[i] - (TNuppers2[i] - TNlowers2[i]) / 2 for i in range(len(TNuppers2))]
zippedList2 = zip(midpoints2, TNuppers2, TNlowers2, TNnames2)
sorted_pairs2 = sorted(zippedList2, reverse=True)
TNnamesSorted2 = [tup[-1] for tup in sorted_pairs2]
# Third group
TNuppers3 = [i for ind, i in enumerate(TNuppers) if (i <= ceilVal and TNlowers[ind] <= floorVal)]
TNlowers3 = [TNlowers[ind] for ind, i in enumerate(TNuppers) if (i <= ceilVal and TNlowers[ind] <= floorVal)]
TNnames3 = [TNnames[ind] for ind, i in enumerate(TNuppers) if (i <= ceilVal and TNlowers[ind] <= floorVal)]
midpoints3 = [TNuppers3[i] - (TNuppers3[i] - TNlowers3[i]) / 2 for i in range(len(TNuppers3))]
zippedList3 = zip(midpoints3, TNuppers3, TNlowers3, TNnames3)
sorted_pairs3 = sorted(zippedList3, reverse=True)
TNnamesSorted3 = [tup[-1] for tup in sorted_pairs3]
# Combine groups
TNnamesSorted = TNnamesSorted1.copy()
TNnamesSorted.append(' ')
TNnamesSorted = TNnamesSorted + TNnamesSorted2
TNnamesSorted.append(' ')
TNnamesSorted = TNnamesSorted + TNnamesSorted3
TNnamesSorted.append(' ')
TNnamesSorted.append('(Prior)')
fig, (ax) = plt.subplots(figsize=(10, 6), ncols=1)
for _, upper, lower, name in sorted_pairs1:
plt.plot((name, name), (lower, upper), 'o-', color='red')
plt.plot(('', ''), (np.nan, np.nan), 'o-', color='red')
for _, upper, lower, name in sorted_pairs2:
plt.plot((name, name), (lower, upper), 'o--', color='orange')
plt.plot((' ', ' '), (np.nan, np.nan), 'o--', color='orange')
for _, upper, lower, name in sorted_pairs3:
plt.plot((name, name), (lower, upper), 'o:', color='green')
plt.plot((' ', ' '), (np.nan, np.nan), 'o:', color='green')
plt.plot((TNnamesSorted[-1], TNnamesSorted[-1]), (priorLower, priorUpper), 'o-', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(TNnamesSorted)), TNnamesSorted, rotation=90)
plt.title('Test Node 90% Intervals\nManufacturer-District Analysis, Untracked Setting',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Test Node Name', fontdict={'fontsize': 16, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 16, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(12)
plt.axhline(y=floorVal, color='r', linestyle='-', alpha=0.1) # line for 'l'
plt.axhline(y=ceilVal, color='blue', linestyle='-', alpha=0.1) # line for 'u'
plt.text(26.4, ceilVal + .015, 'u=0.25', color='blue', alpha=0.5, size=9)
plt.text(26.4, floorVal + .015, 'l=0.05', color='r', alpha=0.5, size=9)
fig.tight_layout()
plt.show()
plt.close()
# Generate data with a different Q, same underlying SFP rates, UNTRACKED
Qrow = np.array([.01, .01, .01, .01, .01, .01, .01, .01, .01, .01, .01, .01,
.02, .02, .02, .03, .03, .05, .05, .07, .07, .07, .10, .15, .20])
Q = np.zeros(shape=(numTN,numSN))
#base = 4
for i in range(numTN):
Qrow = np.array([.0, .0, .0, .0, .0, .0, .0, .0, .0, .0, .0, .0,
.0, .0, .0, .0, .0, .0, .0, .0, .0, .1, .1, .2, .6])
'''
if i < 4:
random.seed(4+base)
elif i >= 4 and i < 8:
random.seed(5+base)
elif i >= 8 and i < 12:
random.seed(6+base)
elif i >= 12 and i < 16:
random.seed(7+base)
elif i >= 16:
random.seed(8+base)
'''
random.seed(i+10)
random.shuffle(Qrow)
random.shuffle(Qrow)
Q[i] = Qrow
'''
for i in range(numSN):
if np.sum(Q[:,i]) == 0.0:
print(i)
print(np.sum(Q[:,i]))
'''
# Overall SFP rate: 10-20%
# SN rates: 1% baseline; 20% node: 25%, 5% node: ~25/30%, 7% node: 10%, 2% node: 40%
# TN rates: 1% baseline; 1 major node: 25%, 1 minor node: 30%; 3 minor nodes: 10%; 1 minor minor node: 50%
numTN, numSN = 25, 25
numSamples = 500
s, r = 1.0, 1.0
SNnames = ['Manufacturer ' + str(i + 1) for i in range(numSN)]
TNnames = ['District ' + str(i + 1) for i in range(numTN)]
trueRates = np.zeros(numSN + numTN) # importers first, outlets second
SNtrueRates = [.02 for i in range(numSN)]
SN1ind = 3 # 40% SFP rate
SN2ind = 10 # 25% SFP rate, major node
SN3ind = 14 # 10% SFP rate, minor node
SN4ind = 22 # 20% SFP rate, minor node
SNtrueRates[SN1ind], SNtrueRates[SN2ind] = 0.35, 0.25
SNtrueRates[SN3ind], SNtrueRates[SN4ind] = 0.1, 0.25
trueRates[:numSN] = SNtrueRates # SN SFP rates
TN1ind = 5 # 20% sampled node, 25% SFP rate
TN2inds = [2, 11, 14, 22] # 10% sampled
TN3inds = [3, 6, 8, 10, 16, 17, 24] # 3% sampled
TN4inds = [0, 1, 9, 12, 18, 23] # 2% sampled
TNsampProbs = [.01 for i in range(numTN)] # Update sampling probs
TNsampProbs[TN1ind] = 0.20
for j in TN2inds:
TNsampProbs[j] = 0.10
for j in TN3inds:
TNsampProbs[j] = 0.03
for j in TN4inds:
TNsampProbs[j] = 0.02
print(np.sum(TNsampProbs)) # sampling probability should add up to 1.0
TNtrueRates = [.01 for i in range(numTN)] # Update SFP rates for TNs
TNtrueRates[TN1ind] = 0.2
TNtrueRates[TN2inds[1]] = 0.1
TNtrueRates[TN2inds[2]] = 0.1
TNtrueRates[TN3inds[1]] = 0.4
trueRates[numSN:] = TNtrueRates # Put TN rates in main vector
rseed = 56 # Change the seed here to get a different set of tests
random.seed(rseed)
np.random.seed(rseed + 1)
testingDataList = []
for currSamp in range(numSamples):
currTN = random.choices(TNnames, weights=TNsampProbs, k=1)[0]
currTNind = TNnames.index(currTN)
# if not currTN == 'District '
currSN = random.choices(SNnames, weights=Q[currTNind], k=1)[0] # [TNnames.index(currTN)] to index Q
currTNrate = trueRates[numSN + TNnames.index(currTN)]
currSNrate = trueRates[SNnames.index(currSN)]
realRate = currTNrate + currSNrate - currTNrate * currSNrate
realResult = np.random.binomial(1, p=realRate)
if realResult == 1:
result = np.random.binomial(1, p=s)
if realResult == 0:
result = np.random.binomial(1, p=1. - r)
testingDataList.append([currTN, currSN, result])
lgDict = {}
lgDict = util.testresultsfiletotable(testingDataList, csvName=False)
Qest = lgDict['N'].copy() # Generate Q
for i, Nrow in enumerate(lgDict['N']):
Qest[i] = Nrow / np.sum(Nrow)
# Update N and Y
lgDict.update({'N': np.sum(lgDict['N'], axis=1), 'Y': np.sum(lgDict['Y'], axis=1)})
print('size: ' + str(lgDict['N'].shape) + ', obsvns: ' + str(lgDict['N'].sum()) + ', propor pos: ' + str(
lgDict['Y'].sum() / lgDict['N'].sum()))
lgDict.update({'type': 'Untracked', 'diagSens': 1.0, 'diagSpec': 1.0, 'numPostSamples': numPostSamps,
'prior': methods.prior_laplace(mu=priorMean, scale=priorScale), 'MCMCdict': MCMCdict,
'transMat': Qest, 'importerNum': Qest.shape[1], 'outletNum': Qest.shape[0]})
lgDict = methods.GeneratePostSamples(lgDict)
numSN, numTN = lgDict['importerNum'], lgDict['outletNum']
SNindsSubset = range(numSN)
SNnames = [lgDict['importerNames'][i] for i in SNindsSubset]
SNlowers = [np.quantile(lgDict['postSamples'][:, l], lowerQuant) for l in SNindsSubset]
SNuppers = [np.quantile(lgDict['postSamples'][:, l], upperQuant) for l in SNindsSubset]
# First group
SNlowers1 = [i for i in SNlowers if i > floorVal]
SNuppers1 = [SNuppers[ind] for ind, i in enumerate(SNlowers) if i > floorVal]
SNnames1 = [SNnames[ind] for ind, i in enumerate(SNlowers) if i > floorVal]
midpoints1 = [SNuppers1[i] - (SNuppers1[i] - SNlowers1[i]) / 2 for i in range(len(SNuppers1))]
zippedList1 = zip(midpoints1, SNuppers1, SNlowers1, SNnames1)
sorted_pairs1 = sorted(zippedList1, reverse=True)
SNnamesSorted1 = [tup[-1] for tup in sorted_pairs1]
# Second group
SNuppers2 = [i for ind, i in enumerate(SNuppers) if (i > ceilVal and SNlowers[ind] <= floorVal)]
SNlowers2 = [SNlowers[ind] for ind, i in enumerate(SNuppers) if (i > ceilVal and SNlowers[ind] <= floorVal)]
SNnames2 = [SNnames[ind] for ind, i in enumerate(SNuppers) if (i > ceilVal and SNlowers[ind] <= floorVal)]
midpoints2 = [SNuppers2[i] - (SNuppers2[i] - SNlowers2[i]) / 2 for i in range(len(SNuppers2))]
zippedList2 = zip(midpoints2, SNuppers2, SNlowers2, SNnames2)
sorted_pairs2 = sorted(zippedList2, reverse=True)
SNnamesSorted2 = [tup[-1] for tup in sorted_pairs2]
# Third group
SNuppers3 = [i for ind, i in enumerate(SNuppers) if (i <= ceilVal and SNlowers[ind] <= floorVal)]
SNlowers3 = [SNlowers[ind] for ind, i in enumerate(SNuppers) if (i <= ceilVal and SNlowers[ind] <= floorVal)]
SNnames3 = [SNnames[ind] for ind, i in enumerate(SNuppers) if (i <= ceilVal and SNlowers[ind] <= floorVal)]
midpoints3 = [SNuppers3[i] - (SNuppers3[i] - SNlowers3[i]) / 2 for i in range(len(SNuppers3))]
zippedList3 = zip(midpoints3, SNuppers3, SNlowers3, SNnames3)
sorted_pairs3 = sorted(zippedList3, reverse=True)
SNnamesSorted3 = [tup[-1] for tup in sorted_pairs3]
# Combine groups
SNnamesSorted = SNnamesSorted1.copy()
SNnamesSorted.append(' ')
SNnamesSorted = SNnamesSorted + SNnamesSorted2
SNnamesSorted.append(' ')
SNnamesSorted = SNnamesSorted + SNnamesSorted3
SNnamesSorted.append(' ')
SNnamesSorted.append('(Prior)')
fig, (ax) = plt.subplots(figsize=(10, 6), ncols=1)
for _, upper, lower, name in sorted_pairs1:
plt.plot((name, name), (lower, upper), 'o-', color='red')
plt.plot(('', ''), (np.nan, np.nan), 'o-', color='red')
for _, upper, lower, name in sorted_pairs2:
plt.plot((name, name), (lower, upper), 'o--', color='orange')
plt.plot((' ', ' '), (np.nan, np.nan), 'o--', color='orange')
for _, upper, lower, name in sorted_pairs3:
plt.plot((name, name), (lower, upper), 'o:', color='green')
plt.plot((' ', ' '), (np.nan, np.nan), 'o:', color='green')
plt.plot((SNnamesSorted[-1], SNnamesSorted[-1]), (priorLower, priorUpper), 'o-', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(SNnamesSorted)), SNnamesSorted, rotation=90)
plt.title('Supply Node 90% Intervals\nManufacturer-District Analysis, Untracked Setting',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Supply Node Name', fontdict={'fontsize': 16, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 16, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(12)
plt.axhline(y=floorVal, color='r', linestyle='-', alpha=0.1) # line for 'l'
plt.axhline(y=ceilVal, color='blue', linestyle='-', alpha=0.1) # line for 'u'
plt.text(26.3, ceilVal + .015, 'u=0.25', color='blue', alpha=0.5, size=9)
plt.text(26.3, floorVal + .015, 'l=0.05', color='r', alpha=0.5, size=9)
fig.tight_layout()
plt.show()
plt.close()
# Test-node plot
TNindsSubset = range(numTN)
TNnames = [lgDict['outletNames'][i] for i in TNindsSubset]
TNlowers = [np.quantile(lgDict['postSamples'][:, numSN + l], lowerQuant) for l in TNindsSubset]
TNuppers = [np.quantile(lgDict['postSamples'][:, numSN + l], upperQuant) for l in TNindsSubset]
# First group
TNlowers1 = [i for i in TNlowers if i > floorVal]
TNuppers1 = [TNuppers[ind] for ind, i in enumerate(TNlowers) if i > floorVal]
TNnames1 = [TNnames[ind] for ind, i in enumerate(TNlowers) if i > floorVal]
midpoints1 = [TNuppers1[i] - (TNuppers1[i] - TNlowers1[i]) / 2 for i in range(len(TNuppers1))]
zippedList1 = zip(midpoints1, TNuppers1, TNlowers1, TNnames1)
sorted_pairs1 = sorted(zippedList1, reverse=True)
TNnamesSorted1 = [tup[-1] for tup in sorted_pairs1]
# Second group
TNuppers2 = [i for ind, i in enumerate(TNuppers) if (i > ceilVal and TNlowers[ind] <= floorVal)]
TNlowers2 = [TNlowers[ind] for ind, i in enumerate(TNuppers) if (i > ceilVal and TNlowers[ind] <= floorVal)]
TNnames2 = [TNnames[ind] for ind, i in enumerate(TNuppers) if (i > ceilVal and TNlowers[ind] <= floorVal)]
midpoints2 = [TNuppers2[i] - (TNuppers2[i] - TNlowers2[i]) / 2 for i in range(len(TNuppers2))]
zippedList2 = zip(midpoints2, TNuppers2, TNlowers2, TNnames2)
sorted_pairs2 = sorted(zippedList2, reverse=True)
TNnamesSorted2 = [tup[-1] for tup in sorted_pairs2]
# Third group
TNuppers3 = [i for ind, i in enumerate(TNuppers) if (i <= ceilVal and TNlowers[ind] <= floorVal)]
TNlowers3 = [TNlowers[ind] for ind, i in enumerate(TNuppers) if (i <= ceilVal and TNlowers[ind] <= floorVal)]
TNnames3 = [TNnames[ind] for ind, i in enumerate(TNuppers) if (i <= ceilVal and TNlowers[ind] <= floorVal)]
midpoints3 = [TNuppers3[i] - (TNuppers3[i] - TNlowers3[i]) / 2 for i in range(len(TNuppers3))]
zippedList3 = zip(midpoints3, TNuppers3, TNlowers3, TNnames3)
sorted_pairs3 = sorted(zippedList3, reverse=True)
TNnamesSorted3 = [tup[-1] for tup in sorted_pairs3]
# Combine groups
TNnamesSorted = TNnamesSorted1.copy()
TNnamesSorted.append(' ')
TNnamesSorted = TNnamesSorted + TNnamesSorted2
TNnamesSorted.append(' ')
TNnamesSorted = TNnamesSorted + TNnamesSorted3
TNnamesSorted.append(' ')
TNnamesSorted.append('(Prior)')
fig, (ax) = plt.subplots(figsize=(10, 6), ncols=1)
for _, upper, lower, name in sorted_pairs1:
plt.plot((name, name), (lower, upper), 'o-', color='red')
plt.plot(('', ''), (np.nan, np.nan), 'o-', color='red')
for _, upper, lower, name in sorted_pairs2:
plt.plot((name, name), (lower, upper), 'o--', color='orange')
plt.plot((' ', ' '), (np.nan, np.nan), 'o--', color='orange')
for _, upper, lower, name in sorted_pairs3:
plt.plot((name, name), (lower, upper), 'o:', color='green')
plt.plot((' ', ' '), (np.nan, np.nan), 'o:', color='green')
plt.plot((TNnamesSorted[-1], TNnamesSorted[-1]), (priorLower, priorUpper), 'o-', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(TNnamesSorted)), TNnamesSorted, rotation=90)
plt.title('Test Node 90% Intervals\nManufacturer-District Analysis, Untracked Setting',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Test Node Name', fontdict={'fontsize': 16, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 16, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(12)
plt.axhline(y=floorVal, color='r', linestyle='-', alpha=0.1) # line for 'l'
plt.axhline(y=ceilVal, color='blue', linestyle='-', alpha=0.1) # line for 'u'
plt.text(26.4, ceilVal + .015, 'u=0.25', color='blue', alpha=0.5, size=9)
plt.text(26.4, floorVal + .015, 'l=0.05', color='r', alpha=0.5, size=9)
fig.tight_layout()
plt.show()
plt.close()
return
_ = generateSyntheticData()
|
|
from math import ceil
import networkx as nx
class Element():
def __init__(self, name, amount):
self.name = name
self.amount = amount
def __str__(self):
return str(self.amount)+" "+self.name
def __repr__(self):
return self.__str__()
def __hash__(self):
return hash(hash(self.name)+hash(self.amount))
class Reaction():
def __init__(self, reactivs, product):
self.reactivs = reactivs
self.product = product
def __str__(self):
r=' + '.join([str(i) for i in self.reactivs]) + " => " + str(self.product)
return r
def __repr__(self):
return self.__str__()
reactions = {'ORE':Reaction([], Element('ORE', 1))}
G=nx.DiGraph()
with open("input", 'r') as f:
for r in f.readlines():
r=r.strip()
reactivs = []
rs, p = r.split('=>')
rs=rs.split(',')
for react in rs:
react=list(filter(lambda x: x!='', react.split(' ')))
reactivs.append(Element(react[1], int(react[0])))
p = list(filter(lambda x: x!='', p.split(' ')))
product = Element(p[1], int(p[0]))
reactions[product.name] = Reaction(reactivs, product)
for v in reactivs:
G.add_weighted_edges_from([(product.name, v.name, str(v.amount))])
needs = {}
def clear():
global needs
for i in reactions.keys():
needs[i] = 0
def necesidades():
global needs
for name in nx.topological_sort(G):
for reactiv in reactions[name].reactivs:
np = ceil(needs[name] / reactions[name].product.amount)
needs[reactiv.name] += np*reactiv.amount
def ore4fuel(f):
clear()
needs['FUEL'] = f
necesidades()
return needs['ORE']
# Part 1
print(ore4fuel(1))
# Part 2
upper = 1e12
lower = 1
while upper-lower > 1:
m = (upper + lower)//2
if ore4fuel(m) > 1e12: upper = m
else: lower = m
if(ore4fuel(upper) <= 1e12): lower=upper
print(int(lower))
|
|
import numpy as np
import sklearn.neighbors
import sklearn.pipeline
import sklearn.svm
import sklearn.decomposition
import sklearn.gaussian_process
import logging
import pickle
import joblib
import time
import heapq
import inspect
from . import loggin
from . import TLS_models
import functools
import collections
import scipy
try:
sklearn.neighbors.ball_tree.VALID_METRICS.append("KernelDistance")
except:
sklearn.neighbors._ball_tree.VALID_METRICS.append("KernelDistance")
def pairwise_sample(X, n=10000):
pair_indices = np.random.choice(np.arange(X.shape[0]), size=(n*2,2))
good_indices = pair_indices[:,0] != pair_indices[:,1]
pair_indices = pair_indices[good_indices]
return X[pair_indices[:,0]], X[pair_indices[:,1]]
def pairwise_dd(X, metric="euclidean", n=10000):
x1, x2 = pairwise_sample(X,n)
dd = np.sqrt((x1 - x2)**2) #this is unfortunate
return np.mean(dd), np.std(dd)
def kernel_dist(kernel, x1, x2):
x1, x2 = tuple(map(np.atleast_2d, [x1,x2]))
return np.sqrt(self.kernel(x1)[:1].reshape(-1) - 2*self.kernel(x1,x2).reshape(-1) + self.kernel(x2)[:1].reshape(-1)).reshape((-1,1))
def KernelDistance(kernel):
return sklearn.neighbors.dist_metrics.PyFuncDistance(functools.partial(kernel_dist, kernel))
class UnaryKernel(object):
'''
Base class for unary kernels (kernels that accept a pre-computed "distance" as input)
inputs:
bandwidth: float or callable that accepts a set of distances and returns a float
k (optional): int for knn bandwidth iff bandwidth == "knn"
children should implement:
__call__: takes in a collection of distances and returns the kernel function evaluated at those values
support_radius: takes in nothing and returns the support radius of this kernel
d (optional): the derivative of the kernel function w.r.t. the input "distances"
'''
def __init__(self, bandwidth=None, k=None):
if bandwidth == "knn":
bandwidth = self.knn_bandwidth
self.bandwidth = bandwidth
self.k = k
def knn_bandwidth(self, x, k=None):
''' Computes the bandwidth based on the kth-largest member of x '''
if k is None:
k = self.k
if x.shape[0] > k:
k_smallest_distances = heapq.nsmallest(k, x)
else:
k_smallest_distances = x
return np.max(k_smallest_distances)
def apply_bandwidth(self, x):
''' Divides x by the bandwidth '''
if hasattr(self.bandwidth, "__call__"):
bandwidth = self.bandwidth(x)
else:
bandwidth = self.bandwidth
return x/bandwidth
def __str__(self):
string_contents = []
string_contents.append(type(self).__name__)
if hasattr(self.bandwidth, "__call__"):
string_contents.append(self.bandwidth.__name__)
else:
string_contents.append("b{:020.010f}".format(self.bandwidth))
if self.k is not None:
string_contents.append("k{:010d}".format(self.k))
return "_".join(string_contents)
def __call__(self, x):
raise NotImplementedError("UnaryKernel subclasses need to implement __call__")
def support_radius(self):
raise NotImplementedError("UnaryKernel subclasses need to implement support_radius")
class UniformKernel(UnaryKernel):
''' A "square" kernel that is 1 inside the support_radius and 0 else '''
def __call__(self, x):
x = self.apply_bandwidth(x)
answer = np.zeros(x.shape)
abs_x = np.abs(x)
answer[abs_x <= 1] = 1
return answer
def support_radius(self):
if hasattr(self.bandwidth, "__call__"):
return np.inf
return self.bandwidth
class TriCubeKernel(UnaryKernel):
''' A TriCube Kernel https://en.wikipedia.org/wiki/Kernel_%28statistics%29#Kernel_functions_in_common_use '''
def __call__(self, x):
x = self.apply_bandwidth(x)
answer = np.zeros(x.shape)
abs_x = np.abs(x)
answer[abs_x < 1] = (70/81)*(1-abs_x[abs_x < 1]**3)**3
return answer
def support_radius(self):
if hasattr(self.bandwidth, "__call__"):
return np.inf
return self.bandwidth
def d(self, x):
x = self.apply_bandwidth(x)
answer = np.zeros(x.shape)
abs_x = np.abs(x)
answer[abs_x < 1] = (70/81)*(9)*(1-abs_x[abs_x < 1]**3)**2*(x[abs_x < 1]**2)/self.bandwidth*((x[abs_x < 1] < 0)*2 - 1)
return answer
class GaussianKernel(UnaryKernel):
''' A Gaussian Kernel https://en.wikipedia.org/wiki/Kernel_%28statistics%29#Kernel_functions_in_common_use '''
def __call__(self, x):
x = self.apply_bandwidth(x)
return scipy.stats.norm.pdf(x)
def support_radius(self):
return np.inf
def d(self, x):
x = self.apply_bandwidth(x)
return -x*scipy.stats.norm.pdf(x)/self.bandwidth
|
|
# TODO
from cmstk.filetypes import TextFile
from cmstk.structure.simulation import SimulationCell
import numpy as np
class DataFile(TextFile):
def __init__(self, filepath, comment, simulation_cell):
if filepath is None:
filepath = "lammps.data"
if comment is None:
comment = "This is a LAMMPS data file."
self._comment = comment
self._simulation_cell = simulation_cell
super().__init__(filepath)
@property
def comment(self):
if self._comment is None:
self._comment = self.lines[0]
return self._comment
@comment.setter
def comment(self, value):
self._comment = value
|
|
import os
import pickle
import re
import warnings
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.platform import gfile
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
warnings.filterwarnings("ignore")
model_dir = "../models/imagenet"
dataset_dir = "../datasets/product-image-cat/"
images_dir = os.path.join(dataset_dir, "images")
image_list = [os.path.join(images_dir, f) for f in os.listdir(images_dir)
if re.search(r"jpg|JPG", f)]
data_dir = os.path.join(dataset_dir, "data")
features_file = os.path.join(data_dir, "features")
labels_file = os.path.join(data_dir, "labels")
def create_graph():
with gfile.FastGFile(
os.path.join(model_dir, "classify_image_graph_def.pb"),
"rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name="")
def extract_features(list_images):
nb_features = 2048
_features = np.empty((len(list_images), nb_features))
_labels = []
create_graph()
with tf.Session() as sess:
next_to_last_tensor = sess.graph.get_tensor_by_name("pool_3:0")
for i, image in enumerate(list_images):
print("Processing: {}".format(image))
if not gfile.Exists(image):
tf.logging.fatal("File does not exist %s", image)
image_data = gfile.FastGFile(image, "rb").read()
predictions = sess.run(next_to_last_tensor,
{"DecodeJpeg/contents:0": image_data})
_features[i, :] = np.squeeze(predictions)
_labels.append(re.split('_\d+', image.split('/')[1])[0])
return _features, _labels
features, labels = extract_features(image_list)
# Save features and labels...
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
pickle.dump(features, open(features_file, "wb"))
pickle.dump(labels, open(labels_file, "wb"))
features = pickle.load(open(features_file, "rb"))
labels = pickle.load(open(labels_file, "rb"))
X_train, X_test, y_train, y_test = train_test_split(features,
labels,
test_size=0.1,
random_state=42)
print("X_train =", len(X_train), "- y_train =", len(y_train))
print("X_test =", len(X_test), "- y_test =", len(y_test))
clf = LinearSVC()
clf.fit(X_train, y_train)
y_pred = clf.predict(y_test)
def plot_confusion_matrix(y_true, _y_pred):
cm_array = confusion_matrix(y_true, _y_pred)
true_labels = np.unique(y_true)
pred_labels = np.unique(_y_pred)
plt.imshow(cm_array[:-1, :-1], interpolation='nearest', cmap=plt.cm.Blue)
plt.title("Confusion matrix", fontsize=16)
color_bar = plt.colorbar(fraction=0.046, pad=0.04)
color_bar.set_label('Number of images', rotation=270,
labelpad=30, fontsize=12)
xtick_marks = np.arange(len(true_labels))
ytick_marks = np.arange(len(pred_labels))
plt.xticks(xtick_marks, true_labels, rotation=90)
plt.yticks(ytick_marks, pred_labels)
plt.ylabel('True label', fontsize=14)
plt.xlabel('Predicted label', fontsize=14)
plt.tight_layout()
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 12
fig_size[1] = 12
plt.rcParams["figure.figsize"] = fig_size
print("Accuracy: {:.2%}".format(accuracy_score(y_test, y_pred)))
plot_confusion_matrix(y_test, y_pred)
|
|
import numpy as np
import os
mazeX = 6
mazeY = 5
mazeBx = 4
mazeBy = 4
maxState = mazeX*mazeY*mazeX*mazeY+2
numA = 5
verbose = False
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--T", type=int, default=15,
help="Time-horizon.")
args = parser.parse_args()
T = args.T
data_dir = 'dataForT'+str(T)
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
def randargmax(b):
""" a random tie-breaking argmax for axis=0"""
res =np.zeros(shape=(b.shape[1],),dtype=int)
b_Tanspose = np.array(list(zip(*b)))
for i in range(b_Tanspose.shape[0]):
imax = np.argwhere(list(b_Tanspose[i]) == np.amax(list(b_Tanspose[i])))
imax = imax.reshape((-1,))
res[i] = np.random.choice(imax)
return res
rewardMatList = [None, None]
rewardMatList[0] = np.loadtxt('RewardMat' + str(0) + '.out', delimiter=',')
rewardMatList[1] = np.loadtxt('RewardMat' + str(1) + '.out', delimiter=',')
transProbMatList = [None] * numA
for a in range(numA):
transProbMatList[a] = np.loadtxt('TransProbMat'+str(a)+'.out', delimiter=',')
uList = [None]*T
aList = [None]*T
uList[T-1] = rewardMatList[1]
aList[T-1]=np.zeros(shape=(maxState,),dtype=int)
for t in range(T-1,0,-1): # t 14:-1:1
z = np.empty(shape=(numA, maxState))
for a in range(numA):
x = np.sum(transProbMatList[a]*uList[t], axis=1)
z[a] = x + rewardMatList[0][a]
uList[t-1] = np.max(z, axis=0)
aList[t - 1] = randargmax(z)
# aList[t - 1] = np.argmax(z,axis=0)
if os.path.exists(data_dir+'/'+'aListArray'+str(T)+'.npy'):
os.remove(data_dir+'/'+'aListArray'+str(T)+'.npy')
np.array(aList).dump(open(data_dir+'/'+'aListArray'+str(T)+'.npy', 'wb'))
if os.path.exists(data_dir+'/'+'uListArray'+str(T)+'.npy'):
os.remove(data_dir+'/'+'uListArray'+str(T)+'.npy')
np.array(uList).dump(open(data_dir+'/'+'uListArray'+str(T)+'.npy', 'wb'))
# os.rename('aListArray'+str(T)+'.npy', data_dir+'/'+'aListArray'+str(T)+'.npy')
# print('Storing action list is done')
|
|
def mangoPlot(mango_filenames):
# Copyright 2019, University of Maryland and the MANGO development team.
#
# This file is part of MANGO.
#
# MANGO is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# MANGO is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MANGO. If not, see
# <https://www.gnu.org/licenses/>.
myfigsize=(14,6.8)
marker_size = 2
line_width = 0.7
import os
import matplotlib.pyplot as plt
import numpy as np
import sys
import glob
files_function_evaluations = []
files_objective_function = []
files_times = []
filenames = []
files_parameters = []
for j in range(len(mango_filenames)):
filename = mango_filenames[j]
if os.path.isfile(filename):
filenames.append(filename)
basename = os.path.basename(filename)
if basename[:9] != "mango_out":
print("WARNING: Including file "+filename+" even though it does not begin with mango_out")
print("Files that will be read and plotted:")
for file in filenames:
print(" "+file)
print()
for k in range(len(filenames)):
filename = filenames[k]
f = open(filename,'r')
lines = f.readlines()
f.close()
temp = lines[3].split(',')
try:
N_parameters = int(temp[0])
except:
print("ERROR! Unable to read N_parameters from line 3 of "+filename)
print("This probably means this file is not a correctly formatted mango_out file.")
raise
function_evaluations = []
times = []
objective_function = []
parameters = np.zeros((1,N_parameters))
for j in range(5,len(lines)):
temp = lines[j].split(',')
try:
function_evaluations.append(int(temp[0]))
except:
print("ERROR! Unable to convert "+temp[0]+" to int on line "+str(j)+" of file "+filename)
print("This probably means this file is not a correctly formatted mango_out file.")
raise
try:
times.append(float(temp[1]))
except:
print("ERROR! Unable to convert "+temp[1]+" to float on line "+str(j)+" of file "+filename)
print("This probably means this file is not a correctly formatted mango_out file.")
raise
try:
if (j == 5):
parameters[0,:] = temp[2:N_parameters+2]
else:
parameters = np.vstack((parameters,temp[2:N_parameters+2]))
except:
print("ERROR! Unable to convert "+str(temp[2:N_parameters+2])+" to float on line "+str(j)+" of file "+filename)
print("This probably means this file is not a correctly formatted mango_out file.")
raise
try:
this_objective_function = float(temp[N_parameters+2])
except:
print("Warning: unable to convert "+temp[N_parameters+2]+" to float in file "+filename)
this_objective_function = np.nan
# Stellopt sets failed results to 1e+12, which makes it hard to see the interesting structure in the objective function for successful runs.
# So let's just not show failed runs.
if this_objective_function > 1.0e+11:
this_objective_function = np.nan
objective_function.append(this_objective_function)
if k==0:
min_objective_function = np.nanmin(objective_function)
# np.nanmin is a minimum excluding any nans.
else:
if len(objective_function) > 0: # Failed runs have len(objective_function)=0, causing np.nanmin to fail
min_objective_function = np.nanmin((min_objective_function, np.nanmin(objective_function)))
files_function_evaluations.append(function_evaluations[:-1])
files_times.append(times[:-1])
files_objective_function.append(objective_function[:-1])
files_parameters.append(parameters[:-1])
N_files = len(files_function_evaluations)
print("Minimum objective function found:",min_objective_function)
#########################################################
# Done reading files. Now make the plot.
#########################################################
fig = plt.figure(figsize=myfigsize)
fig.patch.set_facecolor('white')
numCols = 1
numRows = 3
plotNum = 1
linespecs = ['o','^','s','v']
plt.figure()
for j in range(N_files):
linespec=linespecs[np.mod(int(np.floor(j/10)),len(linespecs))]+'-'
plt.semilogy(files_function_evaluations[j], files_objective_function[j] - min_objective_function, linespec, label = filenames[j], markersize=marker_size, linewidth=line_width)
plt.xlabel('Function evaluation')
plt.ylabel('(Objective function) - (min objective function)')
plt.grid(True)
ncol=int(np.floor(N_files/20))+1
if N_files > 1:
plt.legend(loc='upper right',fontsize=7,ncol=ncol)
for k in range(len(files_parameters[0][0,:])):
plt.figure()
# ax = plt.gca()
# ax.set_yticks(ax.get_yticks()[::2])
plt.title('Parameter '+str(k+1))
for j in range(N_files):
# linespec=linespecs[np.mod(int(np.floor(j/10)),len(linespecs))]+'-'
plt.plot(files_function_evaluations[j], files_parameters[j][:,k],label = filenames[j], markersize=marker_size, linewidth=line_width)
plt.xlabel('Function evaluation')
plt.ylabel('Parameter value')
ncol=int(np.floor(N_files/20))+1
if N_files > 1:
plt.legend(loc='upper right',fontsize=7,ncol=ncol)
ax = plt.gca()
ax.yaxis.set_major_locator(plt.MaxNLocator(10))
##############################################################
plt.show()
|
|
import cv2
import numpy as np
import os
from cvpackage import resize, to_gray, contrast_tune, gaussian_blur, canny_capture
from lineIterator import get_pixels, curve_plot, curve_fitting, curve_smooth, count_peaks
# FILE PATH HERE #
testPic = 'testsample.JPG'
picPath = 'image'
# FILE PATH HERE #
# PUBLIC PARAMETERS HERE #
brightness_param = 50
contrast_param = 50
window_len = 17
polynomial_order = 2
picture_size = 30
top_left_corner = [(0, 0)]
bottom_right_corner = [(1, 1)]
is_update = True
is_line_done = False
is_line_update = False
# PUBLIC PARAMETERS HERE #
def pictureSize(x):
global picture_size, is_update
picture_size = x
is_update = True
def brightness(x):
global brightness_param, is_update
brightness_param = x
is_update = True
def contrast(x):
global contrast_param, is_update
contrast_param = x
is_update = True
def smooth_length(x):
global window_len, is_line_update
window_len = x
def smooth_order(x):
global polynomial_order, is_line_update
polynomial_order = x
def draw_rectangle(action, x, y, flags, *userdata):
global top_left_corner, bottom_right_corner, is_update, is_line_done, is_line_update
if action == cv2.EVENT_LBUTTONDOWN:
if not is_line_done:
top_left_corner = [(x, y)]
is_line_done = True
else:
bottom_right_corner = [(x, y)]
is_line_done = False
is_line_update = True
is_update = True
# main execute here
if __name__ == '__main__':
# GUI Trackbars
cv2.namedWindow('ControlPanel')
cv2.createTrackbar('Pic Size', 'ControlPanel', 0, 100, pictureSize)
cv2.setTrackbarPos('Pic Size', 'ControlPanel', picture_size)
cv2.createTrackbar('Brightness', 'ControlPanel', -127, 127, brightness)
cv2.setTrackbarPos('Brightness', 'ControlPanel', brightness_param)
cv2.createTrackbar('Contrast', 'ControlPanel', -127, 127, contrast)
cv2.setTrackbarPos('Contrast', 'ControlPanel', contrast_param)
cv2.createTrackbar('Window Length', 'ControlPanel', 0, 51, smooth_length)
cv2.setTrackbarPos('Window Length', 'ControlPanel', window_len)
cv2.createTrackbar('Polynomial Order', 'ControlPanel', 0, 20, smooth_order)
cv2.setTrackbarPos('Polynomial Order', 'ControlPanel', polynomial_order)
# GUI Mouse Action
cv2.namedWindow("image")
cv2.setMouseCallback('image', draw_rectangle)
# read path
files = os.listdir('./' + picPath)
# read picture
index = 0
image = cv2.imread('./' + picPath + '/' +files[index])
# Gray
image = to_gray(image)
cropped_image = image[900:1700, 2500:4100]
image = resize(cropped_image, picture_size)
k = 0
# loop
while k != 27:
if is_update:
# resize the picture
# image = resize(image, picture_size)
# adjust contrast
contrasted_img = contrast_tune(image, brightness_param, contrast_param)
# canned_img = canny_capture(image, canny_low_param, canny_high_param)
if is_line_update:
# get value specified
value = get_pixels(contrasted_img, top_left_corner, bottom_right_corner)
yhat = curve_smooth(value, window_len, polynomial_order)
peak_num = count_peaks(yhat)
contrasted_img = cv2.putText(contrasted_img, peak_num, (50, 50), cv2.FONT_ITALIC, 1, (255, 255, 0), 1)
cv2.line(contrasted_img, top_left_corner[0], bottom_right_corner[0], (0, 255, 0), thickness=2)
is_line_update = False
contrasted_img = cv2.putText(contrasted_img, files[index], (50, 100), cv2.FONT_ITALIC, 1, (255, 255, 0), 1)
cv2.imshow('image', contrasted_img)
is_update = False
k = cv2.waitKey(1)
# update the curve manually
# if pressed u
if k == 117:
is_line_update = True
is_update = True
# if pressed x
elif k == 120:
index += 1
if index == len(files) - 1:
index = 0
image = cv2.imread('./' + picPath + '/' +files[index])
image = to_gray(image)
image = resize(image, picture_size)
is_update = True
elif k == 122:
index -= 1
if index == -1:
index = len(files) - 1
image = cv2.imread('./' + picPath + '/' +files[index])
image = to_gray(image)
image = resize(image, picture_size)
is_update = True
|
|
import os
import pickle
import numpy as np
import scipy.stats
class Results(object):
mpr_column = "test-all-baskets.MPR"
prec_ten_column = "test-all-baskets.Prec@10"
prec_five_column = "test-all-baskets.Prec@5"
all_baskets_AUC = "test-all-baskets.AUC"
processed_columns = {"mpr": mpr_column,
"prec@5": prec_five_column,
"prec@10": prec_ten_column,
"auc": all_baskets_AUC}
@staticmethod
def read_df(df_path):
with open(df_path, "rb") as f:
return pickle.load(f)
@classmethod
def from_path(cls, path):
name_with_ext = os.path.split(path)[1]
name = os.path.splitext(name_with_ext)[0]
df = cls.read_df(path)
return cls(name, df)
def __init__(self, name, df):
"""
:param name: name of the result, that should contain the
name of the dataset in it (cbs_{retailer}_{1d|10d})
:param df: the dataframe
"""
self.name = name
self.df = df
self.results = {}
for name_in_res, column_name in self.processed_columns.items():
mean, min, max = self.mean_confidence_interval(self.df[column_name])
curr_res = {"mean": mean,
"min": min,
"max": max}
self.results[name_in_res] = curr_res
self.dataset = self.name
@staticmethod
def group_by_dataset(results):
res = {}
for r in results:
res.setdefault(r.dataset, []).append(r)
return res
@staticmethod
def sort(results):
return sorted(results,
key=lambda x: -x.results["mpr"]["mean"])
@staticmethod
def mean_confidence_interval(data, confidence=0.95):
a = np.array(data)
n = len(a)
m = np.mean(a)
if n == 1:
return m, m, m
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, m-h, m+h
def __str__(self):
res = self.name
values = []
for column in self.processed_columns:
value = self.results[column]
value_mean, value_min, value_max = value["mean"], value["min"], value["max"]
values.append(self._mean_min_max_to_str(column,
value_mean,
value_min,
value_max))
return "%s: %s" % (self.name, ", ".join(values))
@staticmethod
def _mean_min_max_to_str(distrib_name, mean, min, max):
return "%s(%.2f [%.2f, %.2f])" % (distrib_name, mean, min, max)
def __repr__(self):
return self.__str__()
|
|
# -*- coding: utf-8 -*-
# @Author: xuenan xu
# @Date: 2021-06-14
# @Last Modified by: xuenan xu
# @Last Modified time: 2021-07-02
import sys
import kaldiio
import librosa
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import argparse
from pathlib import Path
from tqdm import tqdm as tqdm
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
import h5py
from pypeln import process as pr
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
# self.init_weight()
# def init_weight(self):
# init_layer(self.conv1)
# init_layer(self.conv2)
# init_bn(self.bn1)
# init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class Cnn10(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(Cnn10, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# # Spec augmenter
# self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
# freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.fc1 = nn.Linear(512, 512, bias=True)
self.fc_audioset = nn.Linear(512, classes_num, bias=True)
# self.init_weight()
# def init_weight(self):
# init_bn(self.bn0)
# init_layer(self.fc1)
# init_layer(self.fc_audioset)
# def forward(self, input, mixup_lambda=None):
def forward(self, input):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# if self.training:
# x = self.spec_augmenter(x)
# # Mixup on spectrogram
# if self.training and mixup_lambda is not None:
# x = do_mixup(x, mixup_lambda)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
attn_feats = x.transpose(1, 2)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
embedding = F.dropout(x, p=0.5, training=self.training)
clipwise_output = torch.sigmoid(self.fc_audioset(x))
output_dict = {
'clipwise_output': clipwise_output,
'fc_feat': embedding,
'attn_feat': attn_feats
}
return output_dict
class Cnn14(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(Cnn14, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# # Spec augmenter
# self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
# freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
self.fc1 = nn.Linear(2048, 2048, bias=True)
self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
# self.init_weight()
# def init_weight(self):
# init_bn(self.bn0)
# init_layer(self.fc1)
# init_layer(self.fc_audioset)
def forward(self, input):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# if self.training:
# x = self.spec_augmenter(x)
# # Mixup on spectrogram
# if self.training and mixup_lambda is not None:
# x = do_mixup(x, mixup_lambda)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
attn_feats = x.transpose(1, 2)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
embedding = F.dropout(x, p=0.5, training=self.training)
clipwise_output = torch.sigmoid(self.fc_audioset(x))
output_dict = {
'clipwise_output': clipwise_output,
'fc_feat': embedding,
'attn_feat': attn_feats
}
return output_dict
def load_audio(specifier: str, sr=None):
if specifier.endswith("|"):
fd = kaldiio.utils.open_like_kaldi(specifier, "rb")
mat = kaldiio.matio._load_mat(fd, None)
fd.close()
sr, y = mat
y = y.copy() / 2 ** 15
else:
assert Path(specifier).exists(), specifier + " not exists!"
y, sr = librosa.load(specifier, sr=sr)
return y, sr
parser = argparse.ArgumentParser()
parser.add_argument('wav_csv', type=str)
parser.add_argument('pretrained_model', type=str)
parser.add_argument('-sample_rate', type=int, default=32000)
parser.add_argument('-window_size', type=int, default=1024)
parser.add_argument('-hop_size', type=int, default=320)
parser.add_argument('-mel_bins', type=int, default=64)
parser.add_argument('-fmin', type=int, default=50)
parser.add_argument('-fmax', type=int, default=14000)
parser.add_argument('--cuda', default=False, action='store_true')
parser.add_argument('--model_type', type=str, default='Cnn10')
parser.add_argument('--process_num', type=int, default=4)
parser.add_argument('--fc_feat_h5', type=str)
parser.add_argument('--fc_feat_csv', type=str)
parser.add_argument('--attn_feat_h5', type=str)
parser.add_argument('--attn_feat_csv', type=str)
args = parser.parse_args()
if not args.fc_feat_h5:
args.fc_feat_h5 = "panns_fc.h5"
args.fc_feat_h5 = Path(args.wav_csv).with_name(args.fc_feat_h5)
if not args.fc_feat_csv:
args.fc_feat_csv = "panns_fc.csv"
args.fc_feat_csv = Path(args.wav_csv).with_name(args.fc_feat_csv)
if not args.attn_feat_h5:
args.attn_feat_h5 = "panns_attn.h5"
args.attn_feat_h5 = Path(args.wav_csv).with_name(args.attn_feat_h5)
if not args.attn_feat_csv:
args.attn_feat_csv = "panns_attn.csv"
args.attn_feat_csv = Path(args.wav_csv).with_name(args.attn_feat_csv)
argsdict = vars(args)
device = "cuda" if args.cuda and torch.cuda.is_available() else "cpu"
device = torch.device(device)
model = eval(args.model_type)(
sample_rate=args.sample_rate,
window_size=args.window_size,
hop_size=args.hop_size,
mel_bins=args.mel_bins,
fmin=args.fmin,
fmax=args.fmax,
classes_num=527)
checkpoint = torch.load(args.pretrained_model, map_location='cpu')
model.load_state_dict(checkpoint['model'])
model = model.to(device)
model.eval()
def extract_feature(row):
row = row[1]
waveform, _ = load_audio(row["file_name"], sr=args.sample_rate)
waveform = waveform[None, :]
waveform = torch.as_tensor(waveform).float().to(device)
output_dict = model(waveform)
fc_feat = output_dict["fc_feat"].cpu().numpy()[0]
attn_feat = output_dict["attn_feat"].cpu().numpy()[0]
return row["audio_id"], fc_feat, attn_feat
wav_df = pd.read_csv(args.wav_csv, sep="\t")
fc_feat_csv_data = []
attn_feat_csv_data = []
with h5py.File(args.fc_feat_h5, "w") as fc_store, \
h5py.File(args.attn_feat_h5, "w") as attn_store, \
tqdm(total=wav_df.shape[0]) as pbar, \
torch.no_grad():
# for audio_id, fc_feat, attn_feat in pr.map(extract_feature,
# wav_df.iterrows(),
# workers=args.process_num,
# maxsize=4):
for row in wav_df.iterrows():
audio_id, fc_feat, attn_feat = extract_feature(row)
fc_store[audio_id] = fc_feat
attn_store[audio_id] = attn_feat
fc_feat_csv_data.append({
"audio_id": audio_id,
"hdf5_path": str(Path(args.fc_feat_h5).absolute())
})
attn_feat_csv_data.append({
"audio_id": audio_id,
"hdf5_path": str(Path(args.attn_feat_h5).absolute())
})
pbar.update()
pd.DataFrame(fc_feat_csv_data).to_csv(args.fc_feat_csv, sep="\t", index=False)
pd.DataFrame(attn_feat_csv_data).to_csv(args.attn_feat_csv, sep="\t", index=False)
|
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Test a Detectron network on an imdb (image database)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import cv2
import datetime
import logging
import numpy as np
import os
import yaml
import scipy
import numpy as np
from caffe2.python import workspace
from detectron.core.config import cfg
from detectron.core.config import get_output_dir
from detectron.core.rpn_generator import generate_rpn_on_dataset
from detectron.core.rpn_generator import generate_rpn_on_range
from detectron.core.test import im_detect_all,im_detect_bbox_aug,box_results_with_nms_and_limit
from detectron.datasets import task_evaluation
from detectron.datasets.json_dataset import JsonDataset
from detectron.modeling import model_builder
from detectron.utils.io import save_object
from detectron.utils.timer import Timer
import detectron.utils.c2 as c2_utils
import detectron.utils.env as envu
import detectron.utils.net as net_utils
import detectron.utils.subprocess as subprocess_utils
import detectron.utils.vis as vis_utils
import detectron.core.test_engine as infer_engine
import detectron.datasets.dummy_datasets as dummy_datasets
logger = logging.getLogger(__name__)
def detect_im(weights_file,roidb, gamma, idxs=None,gpu_id = 0):
'''detect the unlabeled samples'''
roidb = [roidb[i] for i in idxs]
model = infer_engine.initialize_model_from_cfg(weights_file, gpu_id=gpu_id)
thresh = gamma
allBoxes=[];allScore=[];allY=[];eps=0;al_idx=[];allClass=[]
ALScore=[]
timers = defaultdict(Timer)
dummy_coco_dataset = dummy_datasets.get_coco_dataset()
for i, entry in enumerate(roidb):
box_proposals = None
im = cv2.imread(entry['image'])
with c2_utils.NamedCudaScope(gpu_id):
cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(
model, im, box_proposals, timers )
# scores, boxes, im_scale = im_detect_bbox_aug(model, im, box_proposals)
## print('scores:{},boxes:{}'.format(scores.shape,boxes.shape))
#
# scores_i, boxes_i, cls_boxes_i = box_results_with_nms_and_limit(scores, boxes)
# cls_segms_i = None;cls_keyps_i = None
# output_dir = './'+str(gamma)
# if True:
# im_name = os.path.splitext(os.path.basename(entry['image']))[0]
# vis_utils.vis_one_image(
# im[:, :, ::-1],
# '{:d}_{:s}'.format(i, im_name),
# os.path.join(output_dir, 'vis'),
# cls_boxes_i,
# segms=None,
# keypoints=None,
# thresh=0.9,
# box_alpha=0.8,
# dataset=dummy_coco_dataset,
# show_class=True
# )
if isinstance(cls_boxes_i, list):
boxes, segms, keypoints, classes = convert_from_cls_format(
cls_boxes_i, cls_segms_i, cls_keyps_i)
if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
# al process
al_idx.append(idxs[i])
if boxes is not None and boxes.shape[0] != 0:
ALScore.append(np.mean(boxes[:, 4]))
else:
ALScore.append(0)
continue
# print('scores_i:{},boxes_i:{},boxes:{},cls_boxes_i:{}'.format(scores_i, boxes_i,boxes, cls_boxes_i))
areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
sorted_inds = np.argsort(-areas)
BBox = []
Score = []
Y = []
Class = []
for i in sorted_inds:
bbox = boxes[i, :4]
score = boxes[i, -1]
# add self-supervised process
if score < thresh:
continue
BBox.append(list(bbox))
Score.append(score) # only one class score ??
Class.append(classes[i])
allBoxes.append(BBox);allClass.append(Class);allScore.append(Score)
return allBoxes,allClass,allScore,al_idx,ALScore
def replace_roidb(roidb,BBoxes,YClass,unlabeledidx):
''' with fake replace gt '''
for i,idx in enumerate(unlabeledidx):
curr_len = len(YClass[i])
boxes = np.array(BBoxes[i] ,dtype=np.float32)
gt_classes = np.array(YClass[i],dtype=np.int32)
gt_overlaps = np.zeros((curr_len, cfg.MODEL.NUM_CLASSES), dtype=np.float32)
for j in range(curr_len):
gt_overlaps[j, YClass[j]] = 1.0
gt_overlaps = scipy.sparse.csr_matrix(gt_overlaps)
max_classes = np.array(YClass[i],dtype=np.int32)
max_overlaps = np.ones(curr_len)
box_to_gt_ind_map = np.array(range(curr_len),dtype=np.int32)
is_crowd = np.array([False]*curr_len)
roidb[idx]['boxes'] = boxes
roidb[idx]['gt_classes'] = gt_classes
roidb[idx]['gt_overlaps'] = gt_overlaps
roidb[idx]['max_classes'] = max_classes
roidb[idx]['max_overlaps'] = max_overlaps
roidb[idx]['box_to_gt_ind_map'] = box_to_gt_ind_map
roidb[idx]['is_crowd'] = is_crowd
print('-----replace gt with fake gt----')
return roidb
def blur_image(roidbs,ss_candidate_idx):
'''blur images except BBox regions'''
def _handle(roi, idx):
imgpath = roi['image'].split('/')[-1]
im = cv2.imread(roi['image'])
im_bbox = []
for box in roi['boxes']:
box = list(map(int, box))
im_bbox.append(im[box[1]:box[3], box[0]:box[2]])
new_im = cv2.blur(im, (25,25))
for i, box in enumerate(roi['boxes']):
box = list(map(int, box))
cv2.rectangle(new_im,(box[0],box[1]),(box[2],box[3]),(255,0,0),3)
new_im[box[1]:box[3], box[0]:box[2]] = im_bbox[i]
path = 'tmpdata/{}'.format(imgpath)
cv2.imwrite(path, new_im)
assert os.path.exists(path), "didnt save successfully"
roi['image'] = path
return roi
copy_roidb = []
for i in range(len(roidbs)):
if len(roidbs[i]['boxes'])>0 and i in ss_candidate_idx and not roidbs[i]['flipped']:
copy_roidb.append(roidbs[i].copy())
copy_roidb[i] = _handle(copy_roidb[i], i)
else:
copy_roidb.append(roidbs[i].copy())
return copy_roidb
def get_roidb_and_dataset(dataset_name, idxs):
"""Get the roidb for the dataset specified in the global cfg. Optionally
restrict it to a range of indices if ind_range is a pair of integers.
"""
dataset = JsonDataset(dataset_name)
roidb = dataset.get_roidb()
if idxs is not None:
total_num_images = len(roidb)
start = 0
end = len(idxs)
roidb = [roidb[i] for i in idxs]
else:
start = 0
end = len(roidb)
total_num_images = end
return roidb, dataset, start, end, total_num_images
def convert_from_cls_format(cls_boxes, cls_segms, cls_keyps):
"""Convert from the class boxes/segms/keyps format generated by the testing
code.
"""
box_list = [b for b in cls_boxes if len(b) > 0]
if len(box_list) > 0:
boxes = np.concatenate(box_list)
else:
boxes = None
if cls_segms is not None:
segms = [s for slist in cls_segms for s in slist]
else:
segms = None
if cls_keyps is not None:
keyps = [k for klist in cls_keyps for k in klist]
else:
keyps = None
classes = []
for j in range(len(cls_boxes)):
classes += [j] * len(cls_boxes[j])
return boxes, segms, keyps, classes
|
|
import torch
from torch.utils.data import Dataset, DataLoader
from torch.distributions.multivariate_normal import MultivariateNormal
import numpy as np
from tqdm import tqdm
class UniformSampler:
"""
UniformSampler allows to sample batches in random manner without splitting the original data.
"""
def __init__(self, *datasets, batch_size=1, n_batches=1):
self.batch_size = batch_size
self.n_batches = n_batches
self.weights = [torch.ones(len(ds)) for ds in datasets]
def __iter__(self):
for i in range(self.n_batches):
idx = [torch.multinomial(w, self.batch_size, replacement=True) for w in self.weights]
yield torch.stack(idx, dim=1)
def __len__(self):
return self.batch_size * self.n_batches
class ZipDataset(Dataset):
"""
ZipDataset represents a dataset that stores several other datasets zipped together.
"""
def __init__(self, *datasets, return_targets=False, return_idx=True):
super().__init__()
self.datasets = datasets
self.return_targets = return_targets
self.return_idx = return_idx
def __getitem__(self, idx):
items = []
for i, ds in zip(idx, self.datasets):
cur_items = []
if self.return_idx:
cur_items.append(i)
cur_items.append(ds[i][0])
if self.return_targets:
cur_items.append(ds[i][1])
items.append(cur_items)
if len(items) == 1:
items = items[0]
return items
def __len__(self):
return np.prod([len(ds) for ds in self.datasets])
class ZipLoader(DataLoader):
def __init__(self, *datasets, batch_size, n_batches, return_targets=False, return_idx=True, **kwargs):
"""
ZipLoader allows to sample batches from zipped datasets with possibly different number of elements.
"""
us = UniformSampler(*datasets, batch_size=batch_size, n_batches=n_batches)
dl = ZipDataset(*datasets, return_targets=return_targets, return_idx=return_idx)
super().__init__(dl, batch_sampler=us, **kwargs)
def get_mean_covariance(mnist):
def rescale(data):
return 2 * (data / 255 - .5)
if hasattr(mnist, 'data'):
rescaled_data = rescale(mnist.data)
elif hasattr(mnist, 'datasets'):
rescaled_data = torch.cat([rescale(ds.data) for ds in mnist.datasets])
else:
raise ValueError('Argument ``mnist`` is invalid.')
rescaled_data = rescaled_data.reshape(len(rescaled_data), -1)
return torch.mean(rescaled_data, 0), torch.from_numpy(np.cov(rescaled_data.T).astype(np.float32))
def gaussian_sampler(mean, covariance, batch_size, n_batches, min_eigval=1e-3):
eigval, eigvec = torch.symeig(covariance, eigenvectors=True)
eigval, eigvec = eigval[eigval > min_eigval], eigvec[:, eigval > min_eigval]
height = width = int(np.sqrt(len(mean)))
for i in range(n_batches):
samples = torch.randn(batch_size, len(eigval))
samples = mean + (torch.sqrt(eigval) * samples) @ eigvec.T
yield None, samples.reshape(-1, 1, height, width)
class DistributionDataset():
def __init__(self, distribution, transform=None):
super().__init__()
self.distribution = distribution
self.transform = transform
def __getitem__(self, idx):
if self.transform:
return self.transform(self.distribution.sample()), None
else:
return self.distribution.sample(), None
def __len__(self):
return 1
def get_rotation(theta):
rad = np.radians(theta)
c, s = np.cos(rad), np.sin(rad)
R = np.array([[c, -s],
[s, c]])
return R
class CircleDataset():
def __init__(self, n_samples, n_centers=9, sigma=0.02):
super().__init__()
self.nus = [torch.zeros(2)]
self.sigma = sigma
for i in range(n_centers-1):
R = get_rotation(i*360/(n_centers-1))
self.nus.append(torch.tensor([1, 0] @ R, dtype=torch.float))
classes = torch.multinomial(torch.ones(n_centers), n_samples,
replacement=True)
data = []
for i in range(n_centers):
n_samples_class = torch.sum(classes == i)
if n_samples_class == 0:
continue
dist = MultivariateNormal(self.nus[i],
torch.eye(2)*self.sigma**2)
data.append(dist.sample([n_samples_class.item()]))
self.data = torch.cat(data)
def __getitem__(self, idx):
return self.data[idx], None
def __len__(self):
return self.data.shape[0]
class CentersDataset(Dataset):
def __init__(self, n_centers=9):
super().__init__()
self.nus = [torch.zeros(2)]
for i in range(n_centers-1):
R = get_rotation(i*360/(n_centers-1))
self.nus.append(torch.tensor([1, 0] @ R, dtype=torch.float))
self.data = torch.stack(self.nus)
def __getitem__(self, idx):
return self.data[idx], None
def __len__(self):
return self.data.shape[0]
class CustomGaussian:
def __init__(self, mean, covariance, min_eigval=1e-3):
self.mean = mean
eigval, eigvec = torch.symeig(covariance, eigenvectors=True)
self.eigval, self.eigvec = eigval[eigval > min_eigval], eigvec[:, eigval > min_eigval]
self.height = self.width = int(np.sqrt(len(mean)))
def sample(self):
x = torch.randn(1, len(self.eigval))
x = self.mean + (torch.sqrt(self.eigval) * x) @ self.eigvec.T
return x
|
|
"""
Code by Nicola De Cao was forked from https://github.com/nicola-decao/BNAF
MIT License
Copyright (c) 2019 Nicola De Cao, 2019 Peter Zagubisalo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Tuple, Optional as Opt, Union, Sequence as Seq, Dict, List
import math
import numpy as np
import torch as tr
from torch import Tensor
from torch import nn
from torch.nn import init # type: ignore
from kiwi_bugfix_typechecker import nn as nn_
from kiwi_bugfix_typechecker import func
from .flow import SequentialFlow
from .types import ModuleZToXY, SequentialZToXY, ModuleZOptJToXY
class Permutation(ModuleZToXY):
p: Union[Seq[int], Tensor]
zero: Tensor
def __init__(self, in_features: int, p: Union[Seq[int], Tensor, str]=None):
"""
Module that outputs a permutation of its input.
Parameters
----------
in_features :
The number of input features.
p :
The list of indeces that indicate the permutation. When ``p`` is not a
list, tuple or Tensor: if ``p = 'flip'`` the tensor is reversed, if ``p=None`` a random
permutation is applied.
"""
super(Permutation, self).__init__()
self.in_features = in_features
self.register_buffer('zero', tr.zeros(1))
if p is None:
self.p = [int(s) for s in np.random.permutation(in_features)]
elif p == 'flip':
self.p = list(reversed(range(in_features)))
elif isinstance(p, (tuple, list)):
self.p = [int(s) for s in p]
elif isinstance(p, Tensor) and (tuple(p.size()) == (in_features,)):
self.p = p.long()
else:
raise ValueError
def forward_(self, z: Tensor) -> Tuple[Tensor, Tensor]:
"""
:return: The permuted tensor and the log-det-Jacobian of this permutation.
"""
return z[:, self.p], self.zero
def __repr__(self):
return 'Permutation(in_features={}, p={})'.format(self.in_features, self.p)
class BNAF(SequentialZToXY):
gate: Opt[nn.Parameter]
_modules: Dict[str, ModuleZOptJToXY]
def __init__(self, *args: ModuleZOptJToXY, res: str=None):
"""
Class that extends ``torch.nn.Sequential`` for constructing a Block Neural
Normalizing Flow.
``res=None`` is no residual connection, ``res='normal'`` is ``x + f(x)``
and ``res='gated'`` is ``a * x + (1 - a) * f(x)`` where ``a`` is a learnable parameter.
:param args: modules to use.
:param res: Which kind of residual connection to use.
"""
super(BNAF, self).__init__(*args)
self.res = res
if res == 'gated':
self.gate = nn_.Parameter(init.normal_(tr.empty(1)))
else:
self.gate = None
def forward_(self, z: Tensor) -> Tuple[Tensor, Tensor]:
"""
:return: The output tensor and the log-det-Jacobian of this transformation.
Of shape (batch_size, z_dim)
"""
z_out = z
# log abs det jacobian:
ladetj: Tensor
j: Opt[Tensor] = None
for module in self._modules.values():
z_out, j = module.__call__(z_out, j)
j = j if len(j.shape) == 4 else j.view(j.shape + (1, 1))
if j is not None:
ladetj = j
else:
raise ValueError('Presumably empty Sequential')
if z.shape[-1] != z_out.shape[-1]:
raise AssertionError
if self.res == 'normal':
ret, ladetj = z + z_out, func.softplus(ladetj.squeeze())
elif (self.res == 'gated') and (self.gate is not None):
ret = self.gate.sigmoid() * z_out + (-self.gate.sigmoid() + 1) * z
ladetj = func.softplus(ladetj.squeeze() + self.gate) - func.softplus(self.gate)
else:
ret, ladetj = z_out, ladetj.squeeze()
return ret, ladetj.sum(dim=-1)
def _get_name(self):
return 'BNAF(res={})'.format(self.res)
class MaskedWeight(ModuleZOptJToXY):
mask_d: Tensor
mask_o: Tensor
def __init__(self, in_features: int, out_features: int, dim: int, bias: bool=True):
"""
Module that implements a linear layer with block matrices with positive diagonal blocks.
Moreover, it uses Weight Normalization (https://arxiv.org/abs/1602.07868) for stability.
Parameters
----------
in_features :
The number of input features per each dimension ``dim``.
out_features :
The number of output features per each dimension ``dim``.
dim :
The number of dimensions of the input of the flow.
bias :
Whether to add a parametrizable bias.
"""
super(MaskedWeight, self).__init__()
self.in_features, self.out_features, self.dim = in_features, out_features, dim
weight = tr.zeros(out_features, in_features)
for i in range(dim):
weight[
i * out_features // dim:(i + 1) * out_features // dim,
0:(i + 1) * in_features // dim
] = init.xavier_uniform_(tr.empty(out_features // dim, (i + 1) * in_features // dim))
self._weight = nn_.Parameter(weight)
self._diag_weight = nn_.Parameter(init.uniform_(tr.empty(out_features, 1)).log())
self.bias = nn_.Parameter(init.uniform_(
tr.empty(out_features), -1 / math.sqrt(out_features), 1 / math.sqrt(out_features)
)) if bias else 0
mask_d = tr.zeros_like(weight)
for i in range(dim):
mask_d[i * (out_features // dim):(i + 1) * (out_features // dim),
i * (in_features // dim):(i + 1) * (in_features // dim)] = 1
self.register_buffer('mask_d', mask_d)
mask_o = tr.ones_like(weight)
for i in range(dim):
mask_o[i * (out_features // dim):(i + 1) * (out_features // dim),
i * (in_features // dim):] = 0
self.register_buffer('mask_o', mask_o)
def get_weights(self) -> Tuple[Tensor, Tensor]:
"""
Computes the weight matrix using masks and weight normalization.
It also compute the log diagonal blocks of it.
"""
w = tr.exp(self._weight) * self.mask_d + self._weight * self.mask_o
w_squared_norm = (w ** 2).sum(-1, keepdim=True)
w = self._diag_weight.exp() * w / w_squared_norm.sqrt()
wpl = self._diag_weight + self._weight - 0.5 * tr.log(w_squared_norm)
return (
w.t(),
wpl.t()[self.mask_d.byte().t()].view(
self.dim, self.in_features // self.dim, self.out_features // self.dim)
)
def forward_(self, z: Tensor, j: Tensor=None) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
z :
...
j :
The log diagonal block of the partial Jacobian of previous transformations.
Returns
-------
ret :
The output tensor and the log diagonal blocks of the partial log-Jacobian of previous
transformations combined with this transformation.
"""
w, wpl = self.get_weights()
# log abs det jacobian:
grad = wpl.transpose(-2, -1).unsqueeze(0).repeat(z.shape[0], 1, 1, 1)
if j is not None:
grad = tr.logsumexp(grad.unsqueeze(-2) + j.transpose(-2, -1).unsqueeze(-3), -1)
return z.matmul(w) + self.bias, grad
def __repr__(self):
return 'MaskedWeight(in_features={}, out_features={}, dim={}, bias={})'.format(
self.in_features, self.out_features, self.dim, not isinstance(self.bias, int))
class Tanh(ModuleZOptJToXY, nn.Tanh): # type: ignore
"""
Class that extends ``torch.nn.Tanh`` additionally computing the log diagonal
blocks of the Jacobian.
"""
def forward_(self, z: Tensor, j: Tensor=None) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
z :
...
j :
The log diagonal blocks of the partial Jacobian of previous transformations.
Returns
-------
ret :
The output tensor and the log diagonal blocks of the partial log-Jacobian of previous
transformations combined with this transformation.
"""
# log abs det jacobian:
grad = -2 * (z - math.log(2) + func.softplus(-2 * z))
if j is not None:
grad = grad.view(j.shape) + j
return tr.tanh(z), grad
class BNAFs(SequentialFlow):
def __init__(self, dim: int, hidden_dim: int=10, flows_n: int=5, layers_n: int=1, res: str= 'gated'):
"""
``res=None`` is no residual connection, ``res='normal'`` is ``x + f(x)``
and ``res='gated'`` is ``a * x + (1 - a) * f(x)`` where ``a`` is a learnable parameter.
:param res: Which kind of residual connection to use.
"""
flows: List[Union[SequentialZToXY, ModuleZToXY]] = []
for f in range(flows_n):
layers: List[ModuleZOptJToXY] = []
for _ in range(layers_n - 1):
layers.append(MaskedWeight(dim * hidden_dim, dim * hidden_dim, dim=dim))
layers.append(Tanh())
layers.append(MaskedWeight(dim * hidden_dim, dim, dim=dim))
# noinspection PyListCreation
args: List[ModuleZOptJToXY] = []
args.append(MaskedWeight(dim, dim * hidden_dim, dim=dim))
args.append(Tanh())
args += layers
flows.append(BNAF(*args, res=res if f < (flows_n - 1) else None))
if f < (flows_n - 1):
flows.append(Permutation(dim, 'flip'))
super(BNAFs, self).__init__(*flows)
def backward(self, z: Tensor) -> Tuple[Tensor, Tensor]:
raise NotImplementedError
|
|
#!/Library/Frameworks/Python.framework/Versions/3.8/bin/python3
from sqlite3 import connect
from matplotlib import pyplot as plt
from matplotlib import rcParams
from numpy import array, count_nonzero, logical_and
rcParams['font.size'] = 8
def neutral_mass(mz, adduct):
adduct_to_mass = {'[M+H]+': 1.0078, '[M+K]+': 38.9637, '[M+H-H2O]+': -17.0027, '[M+Na]+': 22.9898, '[M]+': 0.}
return mz - adduct_to_mass[adduct]
def mass_shift_hist(mass_shifts):
fig = plt.figure(figsize=(7, 2))
ax = fig.add_subplot(111)
ax.axvline(0, ls='--', c='k', lw=1, zorder=-1)
ax.hist(mass_shifts, bins=[_ for _ in range(-30, 325)], color='b', histtype='stepfilled')
for d in ['top', 'right']:
ax.spines[d].set_visible(False)
ax.set_xlabel('mass shift')
ax.set_ylabel('N')
plt.savefig('mass_shifts.png', bbox_inches='tight', dpi=350)
#plt.show()
plt.close()
def main():
con = connect('DMIM_v1.0.db')
cur1, cur2 = con.cursor(), con.cursor()
qry_parent = 'SELECT well, mz, adduct FROM plate_{n} JOIN plate_{n}_id ON plate_{n}.dmim_id = plate_{n}_id.dmim_id WHERE met_n = 0'
qry_metab = 'SELECT mz, adduct FROM plate_{n} JOIN plate_{n}_id ON plate_{n}.dmim_id = plate_{n}_id.dmim_id WHERE well = ? AND met_n > 0'
mass_shifts = []
i = 0
prev_well = ''
for n in range(1, 8):
for well, parent_mz, parent_adduct in cur1.execute(qry_parent.format(n=n)):
parent_neutral_mass = neutral_mass(float(parent_mz), parent_adduct)
#print(well, parent_mz, parent_adduct)
if well != prev_well:
for metab_mz, metab_adduct in cur2.execute(qry_metab.format(n=n), (well,)):
mass_shifts.append(neutral_mass(float(metab_mz), metab_adduct) - parent_neutral_mass)
prev_well = well
mass_shifts = array(mass_shifts)
mass_shift_hist(mass_shifts)
# count some specific metabolites
print('+GSH +O', count_nonzero(logical_and(mass_shifts >= 322.8, mass_shifts <= 323.8)))
print('+GSH', count_nonzero(logical_and(mass_shifts >= 306.8, mass_shifts <= 307.8)))
print('+Glc +O', count_nonzero(logical_and(mass_shifts >= 191.8, mass_shifts <= 192.8)))
print('+Glc', count_nonzero(logical_and(mass_shifts >= 175.8, mass_shifts <= 176.8)))
print('+Glc -Me', count_nonzero(logical_and(mass_shifts >= 161.8, mass_shifts <= 162.8)))
print('+2O', count_nonzero(logical_and(mass_shifts >= 31.8, mass_shifts <= 32.8)))
print('+O', count_nonzero(logical_and(mass_shifts >= 15.8, mass_shifts <= 16.8)))
print('-2H', count_nonzero(logical_and(mass_shifts >= -2.5, mass_shifts <= -1.5)))
print('-Me', count_nonzero(logical_and(mass_shifts >= -14.5, mass_shifts <= -13.5)))
print('-2Me/-Et', count_nonzero(logical_and(mass_shifts >= -28.5, mass_shifts <= -27.5)))
if __name__ == '__main__':
main()
|
|
#!/usr/bin/python3
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import style
style.use( "fast" )
fig = plt.figure()
ax = fig.add_subplot( 111, projection='3d' )
X, Y, Z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ], [ 5, 6, 2, 3, 13, 4, 1, 2, 4, 8 ], [ 2, 3, 3, 3, 5, 7, 9, 11, 9, 10 ]
ax.plot( X, Y, Z )
plt.show()
|
|
import argparse
import numpy as np
from dataset import Dataset, collate_fn
import torch
import torch.nn.functional as F
import pickle
import os
import torch.nn as nn
from torch.utils.data import DataLoader
from sklearn.linear_model import LinearRegression
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', help= "Disables CUDA training", action='store_true', default=False)
parser.add_argument("--ngpu", help= "number of gpu", type=int, default = 0)
parser.add_argument("--ddg_fpath", help="file path of ddg",type=str,default='ddg/')
parser.add_argument("--wild_pdb", help="file path of wild_pdb",type=str,default='wild_pdb/')
parser.add_argument("--test_keys", help= "test keys", type=str, default='keys/test_keys.pkl')
parser.add_argument("--data_fpath", help= "file path of data", type=str, default='mutation_pdb')
parser.add_argument("--models",help="test models",type=str, default='models of predict ddg ')
parser.add_argument("--batch_size", help= "batch_size", type=int, default =64)
parser.add_argument("--num_workers", help= "number of workers", type=int, default = 0)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.ngpu>0:
os.environ['CUDA_VISIBLE_DEVICES']='0'
with open (args.test_keys, 'rb') as fp:
test_keys = pickle.load(fp)
test_dataset = Dataset(test_keys, args.data_fpath, args.ddg_fpath,args.wild_pdb)
test_dataloader = DataLoader(test_dataset, args.batch_size, \
shuffle=False, num_workers = args.num_workers, collate_fn=collate_fn)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
loss_fn = nn.MSELoss()
model = torch.load('model1217.pkl') # move the trained model to the predict folder
model.eval()
list1_test = []
list2_test = []
for i_batch, sample in enumerate(test_dataloader):
H1,H2 , A1, A2,D1,D2, labels, key = sample
labels = torch.Tensor(labels)
H1,H2,A1,A2,D1,D2,labels=H1.to(device),H2.to(device),A1.to(device),A2.to(device),D1.to(device),D2.to(device),labels.to(device)
pred = model.test_model((H1,H2, A1,A2,D1,D2))
loss = loss_fn(pred, labels)
labels = -labels.data.cpu().numpy()
pred = pred.data.cpu().numpy()
list1_test = np.append(list1_test,labels)
list2_test = np.append(list2_test,pred)
acc = pred/labels
rp_test = np.corrcoef(list2_test, list1_test)[0,1]
x = np.array(list1_test).reshape(-1,1)
y = np.array(list2_test).reshape(-1,1)
model = LinearRegression()
model.fit(x, y)
predict_y = model.predict(x)
predictions = {}
predictions['intercept'] = model.intercept_
predictions['coefficient'] = model.coef_
predictions['predict_value'] = predict_y
print('test_corrcoef',rp_test,'rmse',np.sqrt(((y - x) ** 2).mean()))
|
|
import matplotlib.pyplot as plt
import numpy as np
from ssm.star_cat.hipparcos import load_hipparcos_cat
from astropy.coordinates import SkyCoord
import astropy.units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, AltAz, EarthLocation
from ssm.core import pchain
from ssm.pmodules import *
from ssm.core.util_pmodules import Aggregate, SimpleInjector
from ssm import pmodules
import copy
from CHECLabPy.plotting.camera import CameraImage
import os
from datetime import datetime
import dashi
from collections import defaultdict
import pickle
from scipy.optimize import minimize
dashi.visual()
def gaussian(x, y, x0, y0, xalpha, yalpha, A):
return A * np.exp(-(((x - x0) / xalpha) ** 2) - ((y - y0) / yalpha) ** 2)
def fit_gauss(xy, z, guess):
def f(params):
return np.sum(np.abs(z - gaussian(xy[:, 0], xy[:, 1], *params)))
res = minimize(f, guess)
return res.x[0], res.x[1]
def get_fc_hotspots(data, clusters, frame_n):
focal_pos = []
brightness = []
for c in clusters[frame_n]:
# finding the brightest pixel as a rough estimate of center
# then remove any pixels that are more than 2 cm from the brightest pixel
# to supress ghosts
pos = data.pix_pos[c]
max_i = np.argmax(data.data[frame_n, c])
d = np.linalg.norm(pos - pos[max_i], axis=1)
m = d < .023
focal_pos.append(np.average(pos[m], weights=data.data[frame_n, c][m], axis=0))
brightness.append(np.sum(data.data[frame_n, c]))
focal_pos = np.array(focal_pos)
focal_pos = focal_pos[np.argsort(brightness)[::-1]]
return focal_pos
def main(inputfile, image_path):
sdt, stars = load_hipparcos_cat()
# These are the stars we use to identify the patch of sky in
# the FOV
cvmag_lim = 6.6
# catalog_star_table = sdt[sdt.vmag < cvmag_lim]
# catalog_stars = stars[sdt.vmag < cvmag_lim]
# Define telescope frame
location = EarthLocation.from_geodetic(lon=14.974609, lat=37.693267, height=1750)
obstime = Time("2019-05-09T01:37:54.728026")
altaz_frame = AltAz(location=location, obstime=obstime)
# Get pixel coordinates from TargetCalib
from target_calib import CameraConfiguration
camera_config = CameraConfiguration("1.1.0")
mapping = camera_config.GetMapping()
focal_length = u.Quantity(2.15191, u.m)
from ssm.pointing.astrometry import (
# rotang,
# rot_matrix,
# generate_hotspots,
StarPatternMatch,
# matchpattern,
)
matcher = StarPatternMatch.from_location(
altaz_frame=altaz_frame,
stars=stars,
sdt=sdt,
fov=12,
focal_length=focal_length,
min_alt=-90,
vmag_lim=cvmag_lim,
pixsize=mapping.GetSize(),
)
# initializing our process chain
data_proc = pchain.ProcessingChain()
reader = Reader(inputfile)
data_proc.add(reader)
# This module removes incomplete frames and marks bad and unstable pixels
frame_cleaner = PFCleaner()
data_proc.add(frame_cleaner)
cal = Calibrate()
data_proc.add(cal)
# # A simple flat field computation based on the first 7000 frames
# sff = SimpleFF(0, 7000, star_thresh=140)
# sff.in_data = "calibrated_data"
# sff.out_ff = "simple_ff_calibrated"
# data_proc.add(sff)
ff_compute = FlatFielding(16000, 26000, star_thresh=1.3)
ff_compute.in_data = "calibrated_data"
ff_compute.out_ff = "ff_calibrated"
data_proc.add(ff_compute)
# A simple flat field computation based on the first 7000 frames
sff_on_raw = SimpleFF(0, 7000)
data_proc.add(sff_on_raw)
# The Aggregate module collects the computed object from the frame
aggr = Aggregate(
[
"raw_resp",
# "simple_ff",
# "simple_ff_calibrated",
"calibrated_data",
"ff_calibrated",
]
)
data_proc.add(aggr)
# Simple visualization of the chain
print(data_proc)
# Execute the chain
data_proc.run()
data = aggr.aggr["calibrated_data"][0]
cffc = aggr.aggr["ff_calibrated"][0]
proc_chain = pchain.ProcessingChain()
# Copy data so that we do not overwrite the original data
ffdata = copy.deepcopy(data)
# apply the flatfielding
ffdata.data -= cffc
# The Simple injector just creates a frame with the content of the input dictionary
injector = SimpleInjector({"data": ffdata})
proc_chain.add(injector)
# Smoothing the signal maybe not so useful right now
smooth = SmoothSlowSignal(n_readouts=20)
proc_chain.add(smooth)
# Finds hotspot clusters
clust = pmodules.ClusterCleaning(1.0, 0.9)
clust.in_data = "data" # smooth.out_data
proc_chain.add(clust)
# The Aggregate module collects the computed object from the frame
# We want the clusters and the smooth data
aggr = Aggregate(["clusters", "smooth_data"])
proc_chain.add(aggr)
proc_chain.run()
# Extract the processed data
clusters = aggr.aggr["clusters"][0]
smooth_data = aggr.aggr["smooth_data"][0]
plt.set_cmap("Greys_r")
stop = 26000
step = 100
sep_list = []
data_dict = defaultdict(list)
for frame_n in tqdm(range(0, stop, step), total=stop / step):
cluster_pos = get_fc_hotspots(smooth_data, clusters, frame_n)
p = matcher.star_coordinates[matcher.star_table.hip_number.values == 85670]
matched_hs = matcher.identify_stars(
cluster_pos[:], horizon_level=25, search_region=(p, 17)
)
# Plotting
fig, axs = plt.subplots(constrained_layout=True, figsize=(10 / 1.2, 6 / 1.2))
# Different average camera images
camera = CameraImage(
smooth_data.xpix, smooth_data.ypix, smooth_data.pix_size, ax=axs
)
im = copy.deepcopy(smooth_data.data[frame_n])
im[np.isnan(im)] = np.nanmean(im)
camera.image = im
print(im)
draco = {85670: "beta", 87833: "gamma", 85829: "nu", 85819: "nu", 87585: "xi"}
camera.add_colorbar("Rate (MHz)")
camera.highlight_pixels(
[item for sublist in clusters[frame_n] for item in sublist], color="r"
)
camera.set_limits_minmax(125, 200)
axs.plot(cluster_pos[:, 0], cluster_pos[:, 1], "wo", mfc="none", ms=25, mew=2)
axs.plot(cluster_pos[0, 0], cluster_pos[0, 1], "ro", mfc="none", ms=25, mew=4)
bbox_props = dict(boxstyle="Round", fc="orange", ec="orange", lw=2, alpha=0.4)
alt = 73.21 * u.deg
az = 0.5 * u.deg
obstime = Time(
datetime.fromtimestamp(float(smooth_data.time[frame_n])), format="datetime"
)
axs.set_title(obstime)
altaz_frame = AltAz(location=location, obstime=obstime)
telescope_pointing = SkyCoord(alt=alt, az=az, frame=altaz_frame,)
telsky = telescope_pointing.transform_to("icrs")
data_dict["ra_true"].append(telsky.ra.rad)
data_dict["dec_true"].append(telsky.dec.rad)
data_dict["obstime"].append(obstime)
data_dict["unixtime"].append(smooth_data.time[frame_n])
if matched_hs is not None:
for h in matched_hs:
axs.plot(h[0][0], h[0][1], "go", mfc="none", ms=25, mew=2)
print(h[1])
if h[1] in draco:
axs.annotate(
draco[h[1]],
h[0],
h[0] + 0.01,
color="black",
size=14,
bbox=bbox_props,
)
else:
axs.annotate('{}'.format(h[1]),
h[0],
h[0] - 0.01,
size=9,
color='red')
try:
ra, dec = matcher.determine_pointing(matched_hs)
estimated_pointing = SkyCoord(ra=ra, dec=dec, unit="rad", frame="icrs")
sep = estimated_pointing.separation(telescope_pointing)
color = "green" if sep < 7 * u.arcsec else "red"
sep_list.append(sep.arcsec)
data_dict["status"].append(0)
data_dict["ra_est"].append(ra)
data_dict["dec_est"].append(dec)
data_dict["sep"].append(sep.arcsec)
except Exception:
data_dict["status"].append(2)
data_dict["ra_est"].append(np.nan)
data_dict["dec_est"].append(np.nan)
data_dict["sep"].append(np.nan)
pass
else:
data_dict["ra_est"].append(np.nan)
data_dict["dec_est"].append(np.nan)
data_dict["status"].append(1)
data_dict["sep"].append(np.nan)
plt.savefig(os.path.join(image_path, "draco_im{:05d}".format(frame_n)))
plt.close()
hist_sep = dashi.histogram.hist1d(np.linspace(0, 300, 100))
hist_sep.fill(np.array(sep_list))
plt.figure()
hist_sep.line()
hist_sep.statbox()
plt.title("Distribution of est-true pointing separations")
plt.savefig(os.path.join(image_path, "separation_dist.png"))
for k, v in data_dict.items():
data_dict[k] = np.array(v)
with open("draco_pointing_assessment_ghost_supr1.pkl", "wb") as f:
pickle.dump(data_dict, f)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Start a simple Slow Signal readout listener.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-p, --path",
dest="path",
type=str,
default="AstrometryAssessment",
help="Path to store images.",
)
parser.add_argument(
"-f, --filename",
dest="filename",
type=str,
default="/home/sflis/CTA/projects/SSM-analysis/data/astri_onsky/d2019-05-08/Run13312.hdf5",
help="Filename",
)
args = parser.parse_args()
main(inputfile=args.filename, image_path=args.path)
|
|
# -*- coding: utf-8 -*-
"""
=======================================
Generate more advanced auditory stimuli
=======================================
This shows the methods that we provide that facilitate generation
of more advanced stimuli.
"""
import numpy as np
import matplotlib.pyplot as plt
from expyfun import building_doc
from expyfun.stimuli import convolve_hrtf, play_sound, window_edges
fs = 24414
dur = 0.5
freq = 500.
# let's make a square wave
sig = np.sin(freq * 2 * np.pi * np.arange(dur * fs, dtype=float) / fs)
sig = ((sig > 0) - 0.5) / 5. # make it reasonably quiet for play_sound
sig = window_edges(sig, fs)
play_sound(sig, fs, norm=False, wait=True)
move_sig = np.concatenate([convolve_hrtf(sig, fs, ang)
for ang in range(-90, 91, 15)], axis=1)
if not building_doc:
play_sound(move_sig, fs, norm=False, wait=True)
t = np.arange(move_sig.shape[1]) / float(fs)
plt.plot(t, move_sig.T)
plt.xlabel('Time (sec)')
plt.show()
|
|
"""
Copyright (c) 2020-present NAVER Corp.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
import os
import pickle
import random
import torch
import torch.nn as nn
import torch.optim
import cv2
import torch.nn.functional as F
from config import get_configs
from data_loaders import get_data_loader
from inference import CAMComputer
from util import string_contains_any
import wsol
import wsol.method
from wsol.method import convert_splitbn_model
from pgd_attack import create_attack
from inference import normalize_scoremap
from util import t2n
def set_random_seed(seed):
if seed is None:
return
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
class PerformanceMeter(object):
def __init__(self, split, higher_is_better=True):
self.best_function = max if higher_is_better else min
self.current_value = None
self.best_value = None
self.best_epoch = None
self.value_per_epoch = [] \
if split == 'val' else [-np.inf if higher_is_better else np.inf]
def update(self, new_value):
self.value_per_epoch.append(new_value)
self.current_value = self.value_per_epoch[-1]
self.best_value = self.best_function(self.value_per_epoch)
self.best_epoch = self.value_per_epoch.index(self.best_value)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Trainer(object):
_CHECKPOINT_NAME_TEMPLATE = '{}_checkpoint.pth.tar'
_SPLITS = ('train', 'val', 'test')
_EVAL_METRICS = ['loss', 'classification', 'localization']
_BEST_CRITERION_METRIC = 'localization'
_NUM_CLASSES_MAPPING = {
"CUB": 200,
"ILSVRC": 1000,
"OpenImages": 100,
}
_FEATURE_PARAM_LAYER_PATTERNS = {
'vgg': ['features.'],
'resnet': ['layer4.', 'fc.'],
'inception': ['Mixed', 'Conv2d_1', 'Conv2d_2',
'Conv2d_3', 'Conv2d_4'],
}
def __init__(self):
self.args = get_configs()
set_random_seed(self.args.seed)
print(self.args)
self.performance_meters = self._set_performance_meters()
self.reporter = self.args.reporter
self.model = self._set_model()
self.cross_entropy_loss = nn.CrossEntropyLoss().cuda()
self.optimizer = self._set_optimizer()
self.loaders = get_data_loader(
data_roots=self.args.data_paths,
metadata_root=self.args.metadata_root,
batch_size=self.args.batch_size,
workers=self.args.workers,
resize_size=self.args.resize_size,
crop_size=self.args.crop_size,
proxy_training_set=self.args.proxy_training_set,
tencrop=self.args.tencrop,
num_val_sample_per_class=self.args.num_val_sample_per_class)
def _set_performance_meters(self):
self._EVAL_METRICS += ['localization_IOU_{}'.format(threshold)
for threshold in self.args.iou_threshold_list]
eval_dict = {
split: {
metric: PerformanceMeter(split,
higher_is_better=False
if metric == 'loss' else True)
for metric in self._EVAL_METRICS
}
for split in self._SPLITS
}
return eval_dict
def _set_model(self):
num_classes = self._NUM_CLASSES_MAPPING[self.args.dataset_name]
print("Loading model {}".format(self.args.architecture))
model = wsol.__dict__[self.args.architecture](
dataset_name=self.args.dataset_name,
architecture_type=self.args.architecture_type,
pretrained=self.args.pretrained,
num_classes=num_classes,
large_feature_map=self.args.large_feature_map,
pretrained_path=self.args.pretrained_path,
adl_drop_rate=self.args.adl_drop_rate,
adl_drop_threshold=self.args.adl_threshold,
acol_drop_threshold=self.args.acol_threshold)
num_aug_splits = 0
if self.args.aug_splits > 0:
assert self.args.aug_splits > 1, 'A split of 1 makes no sense'
num_aug_splits = self.args.aug_splits
if self.args.split_bn:
assert num_aug_splits > 1 or self.args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2), self.args.batch_size)
model = model.cuda()
print(model)
return model
def _set_optimizer(self):
param_features = []
param_classifiers = []
def param_features_substring_list(architecture):
for key in self._FEATURE_PARAM_LAYER_PATTERNS:
if architecture.startswith(key):
return self._FEATURE_PARAM_LAYER_PATTERNS[key]
raise KeyError("Fail to recognize the architecture {}"
.format(self.args.architecture))
for name, parameter in self.model.named_parameters():
if string_contains_any(
name,
param_features_substring_list(self.args.architecture)):
if self.args.architecture in ('vgg16', 'inception_v3'):
param_features.append(parameter)
elif self.args.architecture == 'resnet50':
param_classifiers.append(parameter)
else:
if self.args.architecture in ('vgg16', 'inception_v3'):
param_classifiers.append(parameter)
elif self.args.architecture == 'resnet50':
param_features.append(parameter)
optimizer = torch.optim.SGD([
{'params': param_features, 'lr': self.args.lr},
{'params': param_classifiers,
'lr': self.args.lr * self.args.lr_classifier_ratio}],
momentum=self.args.momentum,
weight_decay=self.args.weight_decay,
nesterov=True)
return optimizer
def entropy_loss(self, v):
"""
Entropy loss for probabilistic prediction vectors
input: batch_size x channels x h x w
output: batch_size x 1 x h x w
"""
n, h, w = v.size()
f = int(n/2)
adv = v[f:,:,:]
clean = v[:f,:,:]
c = 2
p = -torch.sum(torch.mul(adv, torch.log2(adv + 1e-30))) / (n * h * w)
b = -torch.sum(torch.mul(clean, torch.log2(clean + 1e-30))) / (n * h * w)
return args.lambda_c*b + args.lambda_a*p
def prepare_ent(self, cams, image_size):
cams = t2n(cams)
a = False
for i,cam in enumerate(cams):
cam_resized = cv2.resize(cam, image_size, interpolation=cv2.INTER_CUBIC)
# cams = F.softmax(cam_resized)
cam_normalized = normalize_scoremap(cam)
if not a:
a = True
came = torch.from_numpy(cam_normalized).float().cuda()
came = torch.unsqueeze(came,0)
else:
c = torch.unsqueeze(torch.from_numpy(cam_normalized).float().cuda(),0)
came = torch.cat((came, c),0)
return self.entropy_loss(came)
def _wsol_training(self, images, target):
if (self.args.wsol_method == 'cutmix' and
self.args.cutmix_prob > np.random.rand(1) and
self.args.cutmix_beta > 0):
images, target_a, target_b, lam = wsol.method.cutmix(
images, target, self.args.cutmix_beta)
output_dict = self.model(images)
logits = output_dict['logits']
loss = (self.cross_entropy_loss(logits, target_a) * lam +
self.cross_entropy_loss(logits, target_b) * (1. - lam))
return logits, loss
if self.args.wsol_method == 'has':
images = wsol.method.has(images, self.args.has_grid_size,
self.args.has_drop_rate)
output_dict = self.model(images, target)
logits = output_dict['logits']
ent = self.prepare_ent(output_dict['cams'], images.shape[2:])
if self.args.wsol_method in ('acol', 'spg'):
loss = wsol.method.__dict__[self.args.wsol_method].get_loss(
output_dict, target, spg_thresholds=self.args.spg_thresholds)
else:
loss = self.cross_entropy_loss(logits, target)
loss = loss + (self.args.lambda1 * ent)
return logits, loss
def train(self, attack, split):
self.model.train()
loader = self.loaders[split]
total_loss = 0.0
num_correct = 0
num_images = 0
for batch_idx, (images, target, _) in enumerate(loader):
images = images.cuda()
target = target.cuda()
x_adv = attack(images, target)
images = torch.cat((images, x_adv),0)
images = images.cuda()
target = torch.cat((target,target),0)
target = target.cuda()
if batch_idx % int(len(loader) / 10) == 0:
print(" iteration ({} / {})".format(batch_idx + 1, len(loader)))
logits, loss = self._wsol_training(images, target)
pred = logits.argmax(dim=1)
total_loss += loss.item() * images.size(0)
num_correct += (pred == target).sum().item()
num_images += images.size(0)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
loss_average = total_loss / float(num_images)
classification_acc = num_correct / float(num_images) * 100
self.performance_meters[split]['classification'].update(
classification_acc)
self.performance_meters[split]['loss'].update(loss_average)
return dict(classification_acc=classification_acc,
loss=loss_average)
def print_performances(self):
for split in self._SPLITS:
for metric in self._EVAL_METRICS:
current_performance = \
self.performance_meters[split][metric].current_value
if current_performance is not None:
print("Split {}, metric {}, current value: {}".format(
split, metric, current_performance))
if split != 'test':
print("Split {}, metric {}, best value: {}".format(
split, metric,
self.performance_meters[split][metric].best_value))
print("Split {}, metric {}, best epoch: {}".format(
split, metric,
self.performance_meters[split][metric].best_epoch))
def save_performances(self):
log_path = os.path.join(self.args.log_folder, 'performance_log.pickle')
with open(log_path, 'wb') as f:
pickle.dump(self.performance_meters, f)
def accuracy(logits, target, topk=(1,)):
"""
Compute the top k accuracy of classification results.
:param target: the ground truth label
:param topk: tuple or list of the expected k values.
:return: A list of the accuracy values. The list has the same lenght with para: topk
"""
maxk = max(topk)
batch_size = target.size(0)
scores = logits
_, pred = scores.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def _compute_accuracy(self, loader):
top1_clsacc = AverageMeter()
top1_locerr = AverageMeter()
top1_clsacc.reset()
top1_locerr.reset()
num_correct = 0
num_images = 0
pred_prob=[]
for i, (images, targets, image_ids) in enumerate(loader):
if self.args.tencrop :
bs, ncrops, c, h, w = images.size()
images = images.view(-1, c, h, w)
label_input = targets.repeat(10, 1)
targets = label_input.view(-1)
images = images.cuda()
targets = targets.cuda()
output_dict = self.model(images,targets)
if self.args.tencrop :
output_dict['logits'] = F.softmax(output_dict['logits'], dim=1)
output_dict['logits'] = output_dict['logits'].view(1, ncrops, -1).mean(1)
pred = output_dict['logits'].argmax(dim=1)
prec1_1, prec5_1 = self.accuracy(pred, targets.long(), topk=(1, 5))
top1_clsacc.update(prec1_1[0].numpy(), images.size(0))
top5_clsacc.update(prec5_1[0].numpy(), images.size(0))
pred_prob.append(pred)
num_correct += (pred == targets).sum().item()
num_images += images.size(0)
classification_acc = num_correct / float(num_images) * 100
with open('pred_prob.pkl', 'wb') as f :
pickle.dump(pred_prob, f)
return top1_clsacc.avg, top5_clsacc.avg
def evaluate(self, epoch, split):
print("Evaluate epoch {}, split {}".format(epoch, split))
self.model.eval()
accuracy = self._compute_accuracy(loader=self.loaders[split])
self.performance_meters[split]['classification'].update(accuracy)
self.args.tencrop=False
self.loaders = get_data_loader(
data_roots=self.args.data_paths,
metadata_root=self.args.metadata_root,
batch_size=self.args.batch_size,
workers=self.args.workers,
resize_size=self.args.resize_size,
crop_size=self.args.crop_size,
proxy_training_set=self.args.proxy_training_set,
tencrop=self.args.tencrop,
num_val_sample_per_class=self.args.num_val_sample_per_class)
cam_computer = CAMComputer(
model=self.model,
loader=self.loaders[split],
metadata_root=os.path.join(self.args.metadata_root, split),
mask_root=self.args.mask_root,
iou_threshold_list=self.args.iou_threshold_list,
dataset_name=self.args.dataset_name,
split=split,
cam_curve_interval=self.args.cam_curve_interval,
tencrop=self.args.tencrop,
multi_contour_eval=self.args.multi_contour_eval,
)
cam_performance = cam_computer.compute_and_evaluate_cams()
if self.args.multi_iou_eval or self.args.dataset_name == 'OpenImages':
loc_score = np.average(cam_performance)
else:
loc_score = cam_performance[self.args.iou_threshold_list.index(50)]
self.performance_meters[split]['localization'].update(loc_score)
if self.args.dataset_name in ('CUB', 'ILSVRC'):
for idx, IOU_THRESHOLD in enumerate(self.args.iou_threshold_list):
self.performance_meters[split][
'localization_IOU_{}'.format(IOU_THRESHOLD)].update(
cam_performance[idx])
def _torch_save_model(self, filename, epoch):
torch.save({'architecture': self.args.architecture,
'epoch': epoch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict()},
os.path.join(self.args.log_folder, filename))
def save_checkpoint(self, epoch, split):
if (self.performance_meters[split][self._BEST_CRITERION_METRIC]
.best_epoch) == epoch:
self._torch_save_model(
self._CHECKPOINT_NAME_TEMPLATE.format('best'), epoch)
else:
self._torch_save_model(
self._CHECKPOINT_NAME_TEMPLATE.format('last'), epoch)
def report_train(self, train_performance, epoch, split='train'):
reporter_instance = self.reporter(self.args.reporter_log_root, epoch)
reporter_instance.add(
key='{split}/classification'.format(split=split),
val=train_performance['classification_acc'])
reporter_instance.add(
key='{split}/loss'.format(split=split),
val=train_performance['loss'])
reporter_instance.write()
def report(self, epoch, split):
reporter_instance = self.reporter(self.args.reporter_log_root, epoch)
for metric in self._EVAL_METRICS:
reporter_instance.add(
key='{split}/{metric}'
.format(split=split, metric=metric),
val=self.performance_meters[split][metric].current_value)
reporter_instance.add(
key='{split}/{metric}_best'
.format(split=split, metric=metric),
val=self.performance_meters[split][metric].best_value)
reporter_instance.write()
def adjust_learning_rate(self, epoch):
if epoch != 0 and epoch % self.args.lr_decay_frequency == 0:
for param_group in self.optimizer.param_groups:
param_group['lr'] *= 0.1
def load_checkpoint(self, checkpoint_type):
if checkpoint_type not in ('best', 'last'):
raise ValueError("checkpoint_type must be either best or last.")
checkpoint_path = os.path.join(
self.args.log_folder,
self._CHECKPOINT_NAME_TEMPLATE.format(checkpoint_type))
if os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
self.model.load_state_dict(checkpoint['state_dict'], strict=True)
print("Check {} loaded.".format(checkpoint_path))
else:
raise IOError("No checkpoint {}.".format(checkpoint_path))
def main():
trainer = Trainer()
if trainer.args.onlyTest == False :
if trainer.args.resume == 'False':
print("===========================================================")
print("Start epoch 0 ...")
trainer.evaluate(epoch=0, split='val')
trainer.print_performances()
trainer.report(epoch=0, split='val')
trainer.save_checkpoint(epoch=0, split='val')
print("Epoch 0 done.")
attack = create_attack(attack_method=trainer.args.attack_method.lower(), model=trainer.model,
epsilon=trainer.args.epsilon, k=trainer.args.k, alpha=trainer.args.alpha,
mu=trainer.args.mu, random_start=trainer.args.random_start)
for epoch in range(trainer.start_epoch, trainer.args.epochs):
print("===========================================================")
print("Start epoch {} ...".format(epoch + 1))
trainer.adjust_learning_rate(epoch + 1)
train_performance = trainer.train(attack, split='train')
trainer.report_train(train_performance, epoch + 1, split='train')
trainer.evaluate(epoch + 1, split='val')
trainer.print_performances()
trainer.report(epoch + 1, split='val')
trainer.save_checkpoint(epoch + 1, split='val')
print("Epoch {} done.".format(epoch + 1))
print("===========================================================")
print("Final epoch evaluation on test set ...")
trainer.load_checkpoint(checkpoint_type=trainer.args.eval_checkpoint_type)
trainer.evaluate(trainer.args.epochs, split='test')
trainer.print_performances()
trainer.report(trainer.args.epochs, split='test')
trainer.save_performances()
if __name__ == '__main__':
main()
|
|
# coding: utf-8
import warnings
try:
import talib as ta
except ImportError:
from czsc import ta
ta_lib_hint = "没有安装 ta-lib !!! 请到 https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib " \
"下载对应版本安装,预计分析速度提升2倍"
warnings.warn(ta_lib_hint)
import pandas as pd
import numpy as np
from datetime import datetime
from czsc.utils import plot_ka
def find_zs(points):
"""输入笔或线段标记点,输出中枢识别结果"""
if len(points) < 5:
return []
# 当输入为笔的标记点时,新增 xd 值
for j, x in enumerate(points):
if x.get("bi", 0):
points[j]['xd'] = x["bi"]
def __get_zn(zn_points_):
"""把与中枢方向一致的次级别走势类型称为Z走势段,按中枢中的时间顺序,
分别记为Zn等,而相应的高、低点分别记为gn、dn"""
if len(zn_points_) % 2 != 0:
zn_points_ = zn_points_[:-1]
if zn_points_[0]['fx_mark'] == "d":
z_direction = "up"
else:
z_direction = "down"
zn = []
for i in range(0, len(zn_points_), 2):
zn_ = {
"start_dt": zn_points_[i]['dt'],
"end_dt": zn_points_[i + 1]['dt'],
"high": max(zn_points_[i]['xd'], zn_points_[i + 1]['xd']),
"low": min(zn_points_[i]['xd'], zn_points_[i + 1]['xd']),
"direction": z_direction
}
zn_['mid'] = zn_['low'] + (zn_['high'] - zn_['low']) / 2
zn.append(zn_)
return zn
k_xd = points
k_zs = []
zs_xd = []
for i in range(len(k_xd)):
if len(zs_xd) < 5:
zs_xd.append(k_xd[i])
continue
xd_p = k_xd[i]
zs_d = max([x['xd'] for x in zs_xd[:4] if x['fx_mark'] == 'd'])
zs_g = min([x['xd'] for x in zs_xd[:4] if x['fx_mark'] == 'g'])
if zs_g <= zs_d:
zs_xd.append(k_xd[i])
zs_xd.pop(0)
continue
# 定义四个指标,GG=max(gn),G=min(gn),D=max(dn),DD=min(dn),n遍历中枢中所有Zn。
# 定义ZG=min(g1、g2), ZD=max(d1、d2),显然,[ZD,ZG]就是缠中说禅走势中枢的区间
if xd_p['fx_mark'] == "d" and xd_p['xd'] > zs_g:
zn_points = zs_xd[3:]
# 线段在中枢上方结束,形成三买
k_zs.append({
'ZD': zs_d,
"ZG": zs_g,
'G': min([x['xd'] for x in zs_xd if x['fx_mark'] == 'g']),
'GG': max([x['xd'] for x in zs_xd if x['fx_mark'] == 'g']),
'D': max([x['xd'] for x in zs_xd if x['fx_mark'] == 'd']),
'DD': min([x['xd'] for x in zs_xd if x['fx_mark'] == 'd']),
'start_point': zs_xd[1],
'end_point': zs_xd[-2],
"zn": __get_zn(zn_points),
"points": zs_xd,
"third_buy": xd_p
})
zs_xd = []
elif xd_p['fx_mark'] == "g" and xd_p['xd'] < zs_d:
zn_points = zs_xd[3:]
# 线段在中枢下方结束,形成三卖
k_zs.append({
'ZD': zs_d,
"ZG": zs_g,
'G': min([x['xd'] for x in zs_xd if x['fx_mark'] == 'g']),
'GG': max([x['xd'] for x in zs_xd if x['fx_mark'] == 'g']),
'D': max([x['xd'] for x in zs_xd if x['fx_mark'] == 'd']),
'DD': min([x['xd'] for x in zs_xd if x['fx_mark'] == 'd']),
'start_point': zs_xd[1],
'end_point': zs_xd[-2],
"points": zs_xd,
"zn": __get_zn(zn_points),
"third_sell": xd_p
})
zs_xd = []
else:
zs_xd.append(xd_p)
if len(zs_xd) >= 5:
zs_d = max([x['xd'] for x in zs_xd[:4] if x['fx_mark'] == 'd'])
zs_g = min([x['xd'] for x in zs_xd[:4] if x['fx_mark'] == 'g'])
if zs_g > zs_d:
zn_points = zs_xd[3:]
k_zs.append({
'ZD': zs_d,
"ZG": zs_g,
'G': min([x['xd'] for x in zs_xd if x['fx_mark'] == 'g']),
'GG': max([x['xd'] for x in zs_xd if x['fx_mark'] == 'g']),
'D': max([x['xd'] for x in zs_xd if x['fx_mark'] == 'd']),
'DD': min([x['xd'] for x in zs_xd if x['fx_mark'] == 'd']),
'start_point': zs_xd[1],
'end_point': None,
"zn": __get_zn(zn_points),
"points": zs_xd,
})
return k_zs
class KlineAnalyze:
def __init__(self, kline, name="本级别", min_bi_k=5, bi_mode="old",
max_raw_len=10000, ma_params=(5, 20, 120), verbose=False):
"""
:param kline: list or pd.DataFrame
:param name: str
:param min_bi_k: int
笔内部的最少K线数量
:param bi_mode: str
new 新笔;old 老笔;默认值为 old
:param max_raw_len: int
原始K线序列的最大长度
:param ma_params: tuple of int
均线系统参数
:param verbose: bool
"""
self.name = name
self.verbose = verbose
self.min_bi_k = min_bi_k
self.bi_mode = bi_mode
self.max_raw_len = max_raw_len
self.ma_params = ma_params
self.kline_raw = [] # 原始K线序列
self.kline_new = [] # 去除包含关系的K线序列
# 辅助技术指标
self.ma = []
self.macd = []
# 分型、笔、线段
self.fx_list = []
self.bi_list = []
self.xd_list = []
# 根据输入K线初始化
if isinstance(kline, pd.DataFrame):
columns = kline.columns.to_list()
self.kline_raw = [{k: v for k, v in zip(columns, row)} for row in kline.values]
else:
self.kline_raw = kline
self.kline_raw = self.kline_raw[-self.max_raw_len:]
self.symbol = self.kline_raw[0]['symbol']
self.start_dt = self.kline_raw[0]['dt']
self.end_dt = self.kline_raw[-1]['dt']
self.latest_price = self.kline_raw[-1]['close']
self._update_ta()
self._update_kline_new()
self._update_fx_list()
self._update_bi_list()
self._update_xd_list()
def _update_ta(self):
"""更新辅助技术指标"""
if not self.ma:
ma_temp = dict()
close_ = np.array([x["close"] for x in self.kline_raw], dtype=np.double)
for p in self.ma_params:
ma_temp['ma%i' % p] = ta.SMA(close_, p)
for i in range(len(self.kline_raw)):
ma_ = {'ma%i' % p: ma_temp['ma%i' % p][i] for p in self.ma_params}
ma_.update({"dt": self.kline_raw[i]['dt']})
self.ma.append(ma_)
else:
ma_ = {'ma%i' % p: sum([x['close'] for x in self.kline_raw[-p:]]) / p
for p in self.ma_params}
ma_.update({"dt": self.kline_raw[-1]['dt']})
if self.verbose:
print("ma new: %s" % str(ma_))
if self.kline_raw[-2]['dt'] == self.ma[-1]['dt']:
self.ma.append(ma_)
else:
self.ma[-1] = ma_
assert self.ma[-2]['dt'] == self.kline_raw[-2]['dt']
if not self.macd:
close_ = np.array([x["close"] for x in self.kline_raw], dtype=np.double)
# m1 is diff; m2 is dea; m3 is macd
m1, m2, m3 = ta.MACD(close_, fastperiod=12, slowperiod=26, signalperiod=9)
for i in range(len(self.kline_raw)):
self.macd.append({
"dt": self.kline_raw[i]['dt'],
"diff": m1[i],
"dea": m2[i],
"macd": m3[i]
})
else:
close_ = np.array([x["close"] for x in self.kline_raw[-200:]], dtype=np.double)
# m1 is diff; m2 is dea; m3 is macd
m1, m2, m3 = ta.MACD(close_, fastperiod=12, slowperiod=26, signalperiod=9)
macd_ = {
"dt": self.kline_raw[-1]['dt'],
"diff": m1[-1],
"dea": m2[-1],
"macd": m3[-1]
}
if self.verbose:
print("macd new: %s" % str(macd_))
if self.kline_raw[-2]['dt'] == self.macd[-1]['dt']:
self.macd.append(macd_)
else:
self.macd[-1] = macd_
assert self.macd[-2]['dt'] == self.kline_raw[-2]['dt']
def _update_kline_new(self):
"""更新去除包含关系的K线序列"""
if len(self.kline_new) == 0:
for x in self.kline_raw[:4]:
self.kline_new.append(dict(x))
# 新K线只会对最后一个去除包含关系K线的结果产生影响
self.kline_new = self.kline_new[:-2]
if len(self.kline_new) <= 4:
right_k = [x for x in self.kline_raw if x['dt'] > self.kline_new[-1]['dt']]
else:
right_k = [x for x in self.kline_raw[-100:] if x['dt'] > self.kline_new[-1]['dt']]
if len(right_k) == 0:
return
for k in right_k:
k = dict(k)
last_kn = self.kline_new[-1]
if self.kline_new[-1]['high'] > self.kline_new[-2]['high']:
direction = "up"
else:
direction = "down"
# 判断是否存在包含关系
cur_h, cur_l = k['high'], k['low']
last_h, last_l = last_kn['high'], last_kn['low']
if (cur_h <= last_h and cur_l >= last_l) or (cur_h >= last_h and cur_l <= last_l):
self.kline_new.pop(-1)
# 有包含关系,按方向分别处理
if direction == "up":
last_h = max(last_h, cur_h)
last_l = max(last_l, cur_l)
elif direction == "down":
last_h = min(last_h, cur_h)
last_l = min(last_l, cur_l)
else:
raise ValueError
k.update({"high": last_h, "low": last_l})
# 保留红绿不变
if k['open'] >= k['close']:
k.update({"open": last_h, "close": last_l})
else:
k.update({"open": last_l, "close": last_h})
self.kline_new.append(k)
def _update_fx_list(self):
"""更新分型序列"""
if len(self.kline_new) < 3:
return
self.fx_list = self.fx_list[:-1]
if len(self.fx_list) == 0:
kn = self.kline_new
else:
kn = [x for x in self.kline_new[-100:] if x['dt'] >= self.fx_list[-1]['dt']]
i = 1
while i <= len(kn) - 2:
k1, k2, k3 = kn[i - 1: i + 2]
if k1['high'] < k2['high'] > k3['high']:
if self.verbose:
print("顶分型:{} - {} - {}".format(k1['dt'], k2['dt'], k3['dt']))
fx = {
"dt": k2['dt'],
"fx_mark": "g",
"fx": k2['high'],
"fx_high": k2['high'],
"fx_low": min(k1['low'], k3['low']),
}
self.fx_list.append(fx)
elif k1['low'] > k2['low'] < k3['low']:
if self.verbose:
print("底分型:{} - {} - {}".format(k1['dt'], k2['dt'], k3['dt']))
fx = {
"dt": k2['dt'],
"fx_mark": "d",
"fx": k2['low'],
"fx_high": max(k1['high'], k3['high']),
"fx_low": k2['low'],
}
self.fx_list.append(fx)
else:
if self.verbose:
print("无分型:{} - {} - {}".format(k1['dt'], k2['dt'], k3['dt']))
i += 1
def _update_bi_list(self):
"""更新笔序列
笔标记样例:
{'dt': Timestamp('2020-05-25 15:00:00'),
'fx_mark': 'd',
'fx_high': 2821.5,
'fx_low': 2802.47,
'bi': 2802.47}
{'dt': Timestamp('2020-07-09 15:00:00'),
'fx_mark': 'g',
'fx_high': 3456.97,
'fx_low': 3366.08,
'bi': 3456.97}
"""
if len(self.fx_list) < 2:
return
self.bi_list = self.bi_list[:-1]
if len(self.bi_list) == 0:
for fx in self.fx_list[:2]:
bi = dict(fx)
bi['bi'] = bi.pop('fx')
self.bi_list.append(bi)
if len(self.bi_list) <= 2:
right_fx = [x for x in self.fx_list if x['dt'] > self.bi_list[-1]['dt']]
if self.bi_mode == "old":
right_kn = [x for x in self.kline_new if x['dt'] >= self.bi_list[-1]['dt']]
elif self.bi_mode == 'new':
right_kn = [x for x in self.kline_raw if x['dt'] >= self.bi_list[-1]['dt']]
else:
raise ValueError
else:
right_fx = [x for x in self.fx_list[-50:] if x['dt'] > self.bi_list[-1]['dt']]
if self.bi_mode == "old":
right_kn = [x for x in self.kline_new[-300:] if x['dt'] >= self.bi_list[-1]['dt']]
elif self.bi_mode == 'new':
right_kn = [x for x in self.kline_raw[-300:] if x['dt'] >= self.bi_list[-1]['dt']]
else:
raise ValueError
for fx in right_fx:
last_bi = self.bi_list[-1]
bi = dict(fx)
bi['bi'] = bi.pop('fx')
if last_bi['fx_mark'] == fx['fx_mark']:
if (last_bi['fx_mark'] == 'g' and last_bi['bi'] < bi['bi']) \
or (last_bi['fx_mark'] == 'd' and last_bi['bi'] > bi['bi']):
if self.verbose:
print("笔标记移动:from {} to {}".format(self.bi_list[-1], bi))
self.bi_list[-1] = bi
else:
kn_inside = [x for x in right_kn if last_bi['dt'] <= x['dt'] <= bi['dt']]
if len(kn_inside) >= self.min_bi_k:
# 确保相邻两个顶底之间不存在包含关系
if (last_bi['fx_mark'] == 'g' and bi['fx_low'] < last_bi['fx_low'] and bi['fx_high'] < last_bi['fx_high']) or \
(last_bi['fx_mark'] == 'd' and bi['fx_high'] > last_bi['fx_high'] and bi['fx_low'] > last_bi['fx_low']):
if self.verbose:
print("新增笔标记:{}".format(bi))
self.bi_list.append(bi)
if (self.bi_list[-1]['fx_mark'] == 'd' and self.kline_new[-1]['low'] < self.bi_list[-1]['bi']) \
or (self.bi_list[-1]['fx_mark'] == 'g' and self.kline_new[-1]['high'] > self.bi_list[-1]['bi']):
if self.verbose:
print("最后一个笔标记无效,{}".format(self.bi_list[-1]))
self.bi_list.pop(-1)
@staticmethod
def _make_standard_seq(bi_seq):
"""计算标准特征序列
:return: list of dict
"""
if bi_seq[0]['fx_mark'] == 'd':
direction = "up"
elif bi_seq[0]['fx_mark'] == 'g':
direction = "down"
else:
raise ValueError
raw_seq = [{"dt": bi_seq[i].dt,
'high': max(bi_seq[i].price, bi_seq[i + 1].price),
'low': min(bi_seq[i].price, bi_seq[i + 1].price)}
for i in range(1, len(bi_seq), 2) if i <= len(bi_seq) - 2]
seq = []
for row in raw_seq:
if not seq:
seq.append(row)
continue
last = seq[-1]
cur_h, cur_l = row['high'], row['low']
last_h, last_l = last['high'], last['low']
# 左包含 or 右包含
if (cur_h <= last_h and cur_l >= last_l) or (cur_h >= last_h and cur_l <= last_l):
seq.pop(-1)
# 有包含关系,按方向分别处理
if direction == "up":
last_h = max(last_h, cur_h)
last_l = max(last_l, cur_l)
elif direction == "down":
last_h = min(last_h, cur_h)
last_l = min(last_l, cur_l)
else:
raise ValueError
seq.append({"dt": row['dt'], "high": last_h, "low": last_l})
else:
seq.append(row)
return seq
def _update_xd_list(self):
"""更新线段序列"""
if len(self.bi_list) < 4:
return
self.xd_list = self.xd_list[:-2]
if len(self.xd_list) == 0:
for i in range(3):
xd = dict(self.bi_list[i])
xd['xd'] = xd.pop('bi')
self.xd_list.append(xd)
if len(self.xd_list) <= 3:
right_bi = [x for x in self.bi_list if x['dt'] >= self.xd_list[-1]['dt']]
else:
right_bi = [x for x in self.bi_list[-200:] if x['dt'] >= self.xd_list[-1]['dt']]
xd_p = []
bi_d = [x for x in right_bi if x['fx_mark'] == 'd']
bi_g = [x for x in right_bi if x['fx_mark'] == 'g']
for i in range(1, len(bi_d) - 2):
d1, d2, d3 = bi_d[i - 1: i + 2]
if d1['bi'] > d2['bi'] < d3['bi']:
xd_p.append(d2)
for j in range(1, len(bi_g) - 2):
g1, g2, g3 = bi_g[j - 1: j + 2]
if g1['bi'] < g2['bi'] > g3['bi']:
xd_p.append(g2)
xd_p = sorted(xd_p, key=lambda x: x['dt'], reverse=False)
for xp in xd_p:
xd = dict(xp)
xd['xd'] = xd.pop('bi')
last_xd = self.xd_list[-1]
if last_xd['fx_mark'] == xd['fx_mark']:
if (last_xd['fx_mark'] == 'd' and last_xd['xd'] > xd['xd']) \
or (last_xd['fx_mark'] == 'g' and last_xd['xd'] < xd['xd']):
if self.verbose:
print("更新线段标记:from {} to {}".format(last_xd, xd))
self.xd_list[-1] = xd
else:
if (last_xd['fx_mark'] == 'd' and last_xd['xd'] > xd['xd']) \
or (last_xd['fx_mark'] == 'g' and last_xd['xd'] < xd['xd']):
continue
bi_inside = [x for x in right_bi if last_xd['dt'] <= x['dt'] <= xd['dt']]
if len(bi_inside) < 4:
if self.verbose:
print("{} - {} 之间笔标记数量少于4,跳过".format(last_xd['dt'], xd['dt']))
continue
else:
if len(bi_inside) > 4:
if self.verbose:
print("新增线段标记(笔标记数量大于4):{}".format(xd))
self.xd_list.append(xd)
else:
bi_r = [x for x in right_bi if x['dt'] >= xd['dt']]
assert bi_r[1]['fx_mark'] == bi_inside[-2]['fx_mark']
# 第一种情况:没有缺口
if (bi_r[1]['fx_mark'] == "g" and bi_r[1]['bi'] > bi_inside[-3]['bi']) \
or (bi_r[1]['fx_mark'] == "d" and bi_r[1]['bi'] < bi_inside[-3]['bi']):
if self.verbose:
print("新增线段标记(第一种情况):{}".format(xd))
self.xd_list.append(xd)
# 第二种情况:有缺口
else:
if (bi_r[1]['fx_mark'] == "g" and bi_r[1]['bi'] < bi_inside[-2]['bi']) \
or (bi_r[1]['fx_mark'] == "d" and bi_r[1]['bi'] > bi_inside[-2]['bi']):
if self.verbose:
print("新增线段标记(第二种情况):{}".format(xd))
self.xd_list.append(xd)
if (self.xd_list[-1]['fx_mark'] == 'd' and self.kline_new[-1]['low'] < self.xd_list[-1]['xd']) \
or (self.xd_list[-1]['fx_mark'] == 'g' and self.kline_new[-1]['high'] > self.xd_list[-1]['xd']):
if self.verbose:
print("最后一个线段标记无效,{}".format(self.xd_list[-1]))
self.xd_list.pop(-1)
def update(self, k):
"""更新分析结果
:param k: dict
单根K线对象,样例如下
{'symbol': '000001.SH',
'dt': Timestamp('2020-07-16 15:00:00'),
'open': 3356.11,
'close': 3210.1,
'high': 3373.53,
'low': 3209.76,
'vol': 486366915.0}
"""
if self.verbose:
print("=" * 100)
print("输入新K线:{}".format(k))
if not self.kline_raw or k['open'] != self.kline_raw[-1]['open']:
self.kline_raw.append(k)
else:
if self.verbose:
print("输入K线处于未完成状态,更新:replace {} with {}".format(self.kline_raw[-1], k))
self.kline_raw[-1] = k
self._update_ta()
self._update_kline_new()
self._update_fx_list()
self._update_bi_list()
self._update_xd_list()
self.end_dt = self.kline_raw[-1]['dt']
self.latest_price = self.kline_raw[-1]['close']
# 根据最大原始K线序列长度限制分析结果长度
if len(self.kline_raw) > self.max_raw_len:
self.kline_raw = self.kline_raw[-self.max_raw_len:]
self.ma = self.ma[-self.max_raw_len:]
self.macd = self.macd[-self.max_raw_len:]
self.kline_new = self.kline_new[-self.max_raw_len:]
self.fx_list = self.fx_list[-(self.max_raw_len // 2):]
self.bi_list = self.bi_list[-(self.max_raw_len // 4):]
self.xd_list = self.xd_list[-(self.max_raw_len // 8):]
if self.verbose:
print("更新结束\n\n")
def to_df(self, ma_params=(5, 20), use_macd=False, max_count=1000, mode="raw"):
"""整理成 df 输出
:param ma_params: tuple of int
均线系统参数
:param use_macd: bool
:param max_count: int
:param mode: str
使用K线类型, raw = 原始K线,new = 去除包含关系的K线
:return: pd.DataFrame
"""
if mode == "raw":
bars = self.kline_raw[-max_count:]
elif mode == "new":
bars = self.kline_raw[-max_count:]
else:
raise ValueError
fx_list = {x["dt"]: {"fx_mark": x["fx_mark"], "fx": x['fx']} for x in self.fx_list[-(max_count // 2):]}
bi_list = {x["dt"]: {"fx_mark": x["fx_mark"], "bi": x['bi']} for x in self.bi_list[-(max_count // 4):]}
xd_list = {x["dt"]: {"fx_mark": x["fx_mark"], "xd": x['xd']} for x in self.xd_list[-(max_count // 8):]}
results = []
for k in bars:
k['fx_mark'], k['fx'], k['bi'], k['xd'] = "o", None, None, None
fx_ = fx_list.get(k['dt'], None)
bi_ = bi_list.get(k['dt'], None)
xd_ = xd_list.get(k['dt'], None)
if fx_:
k['fx_mark'] = fx_["fx_mark"]
k['fx'] = fx_["fx"]
if bi_:
k['bi'] = bi_["bi"]
if xd_:
k['xd'] = xd_["xd"]
results.append(k)
df = pd.DataFrame(results)
for p in ma_params:
df.loc[:, "ma{}".format(p)] = ta.SMA(df.close.values, p)
if use_macd:
diff, dea, macd = ta.MACD(df.close.values)
df.loc[:, "diff"] = diff
df.loc[:, "dea"] = diff
df.loc[:, "macd"] = diff
return df
def to_image(self, file_image, mav=(5, 20, 120, 250), max_k_count=1000, dpi=50):
"""保存成图片
:param file_image: str
图片名称,支持 jpg/png/svg 格式,注意后缀
:param mav: tuple of int
均线系统参数
:param max_k_count: int
设定最大K线数量,这个值越大,生成的图片越长
:param dpi: int
图片分辨率
:return:
"""
plot_ka(self, file_image=file_image, mav=mav, max_k_count=max_k_count, dpi=dpi)
def is_bei_chi(self, zs1, zs2, mode="bi", adjust=0.9, last_index: int = None):
"""判断 zs1 对 zs2 是否有背驰
注意:力度的比较,并没有要求两段走势方向一致;但是如果两段走势之间存在包含关系,这样的力度比较是没有意义的。
:param zs1: dict
用于比较的走势,通常是最近的走势,示例如下:
zs1 = {"start_dt": "2020-02-20 11:30:00", "end_dt": "2020-02-20 14:30:00", "direction": "up"}
:param zs2: dict
被比较的走势,通常是较前的走势,示例如下:
zs2 = {"start_dt": "2020-02-21 11:30:00", "end_dt": "2020-02-21 14:30:00", "direction": "down"}
:param mode: str
default `bi`, optional value [`xd`, `bi`]
xd 判断两个线段之间是否存在背驰
bi 判断两笔之间是否存在背驰
:param adjust: float
调整 zs2 的力度,建议设置范围在 0.6 ~ 1.0 之间,默认设置为 0.9;
其作用是确保 zs1 相比于 zs2 的力度足够小。
:param last_index: int
在比较最后一个走势的时候,可以设置这个参数来提升速度,相当于只对 last_index 后面的K线进行力度比较
:return: bool
"""
assert zs1["start_dt"] > zs2["end_dt"], "zs1 必须是最近的走势,用于比较;zs2 必须是较前的走势,被比较。"
assert zs1["start_dt"] < zs1["end_dt"], "走势的时间区间定义错误,必须满足 start_dt < end_dt"
assert zs2["start_dt"] < zs2["end_dt"], "走势的时间区间定义错误,必须满足 start_dt < end_dt"
min_dt = min(zs1["start_dt"], zs2["start_dt"])
max_dt = max(zs1["end_dt"], zs2["end_dt"])
if last_index:
macd = self.macd[-last_index:]
else:
macd = self.macd
macd_ = [x for x in macd if x['dt'] >= min_dt]
macd_ = [x for x in macd_ if max_dt >= x['dt']]
k1 = [x for x in macd_ if zs1["end_dt"] >= x['dt'] >= zs1["start_dt"]]
k2 = [x for x in macd_ if zs2["end_dt"] >= x['dt'] >= zs2["start_dt"]]
bc = False
if mode == 'bi':
macd_sum1 = sum([abs(x['macd']) for x in k1])
macd_sum2 = sum([abs(x['macd']) for x in k2])
# print("bi: ", macd_sum1, macd_sum2)
if macd_sum1 < macd_sum2 * adjust:
bc = True
elif mode == 'xd':
assert zs1['direction'] in ['down', 'up'], "走势的 direction 定义错误,可取值为 up 或 down"
assert zs2['direction'] in ['down', 'up'], "走势的 direction 定义错误,可取值为 up 或 down"
if zs1['direction'] == "down":
macd_sum1 = sum([abs(x['macd']) for x in k1 if x['macd'] < 0])
else:
macd_sum1 = sum([abs(x['macd']) for x in k1 if x['macd'] > 0])
if zs2['direction'] == "down":
macd_sum2 = sum([abs(x['macd']) for x in k2 if x['macd'] < 0])
else:
macd_sum2 = sum([abs(x['macd']) for x in k2 if x['macd'] > 0])
# print("xd: ", macd_sum1, macd_sum2)
if macd_sum1 < macd_sum2 * adjust:
bc = True
else:
raise ValueError("mode value error")
return bc
def get_sub_section(self, start_dt: datetime, end_dt: datetime, mode="bi", is_last=True):
"""获取子区间
:param start_dt: datetime
子区间开始时间
:param end_dt: datetime
子区间结束时间
:param mode: str
需要获取的子区间对象类型,可取值 ['k', 'fx', 'bi', 'xd']
:param is_last: bool
是否是最近一段子区间
:return: list of dict
"""
if mode == "kn":
if is_last:
points = self.kline_new[-200:]
else:
points = self.kline_new
elif mode == "fx":
if is_last:
points = self.fx_list[-100:]
else:
points = self.fx_list
elif mode == "bi":
if is_last:
points = self.bi_list[-50:]
else:
points = self.bi_list
elif mode == "xd":
if is_last:
points = self.xd_list[-30:]
else:
points = self.xd_list
else:
raise ValueError
return [x for x in points if end_dt >= x['dt'] >= start_dt]
|
|
from numpy import ones
from vistas.core.graphics.geometry import Geometry
class FeatureGeometry(Geometry):
def __init__(self, num_indices, num_vertices, indices=None, vertices=None):
super().__init__(
num_indices, num_vertices, has_normal_array=True, has_color_array=True, mode=Geometry.TRIANGLES
)
if indices is not None and self.vertices is not None:
self.indices = indices
self.vertices = vertices
self.compute_bounding_box()
self.colors = ones(num_vertices * 3, float) * 0.5 # init the color buffer with grey
|
|
import networkx as nx
import numpy as np
import torch
from torch.utils.data import Dataset
from dsloader.util import kron_graph, random_binary, make_fractional
class KroneckerDataset (Dataset):
def __init__(self, kron_iter=4, seed_size=4, fixed_seed=None, num_graphs=1, perms_per_graph=256, progress_bar=False):
self.kron_iter = kron_iter
self.seed_size = seed_size
self.num_nodes = seed_size ** (kron_iter + 1)
self.seeds = []
self.matrices = []
num_iter = range(num_graphs)
if progress_bar:
from tqdm import tqdm
num_iter = tqdm(num_iter)
for i in num_iter:
seed = random_binary(seed_size, use_sparsity=False)
self.seeds.append(seed)
if fixed_seed is not None:
k_g = kron_graph(fixed_seed, n=kron_iter).astype(np.float)
else:
k_g = kron_graph(seed, n=kron_iter).astype(np.float)
for j in range(perms_per_graph):
self.matrices.append(make_fractional(k_g, inplace=False))
def __len__(self):
return len(self.matrices)
def __getitem__(self, idx):
return torch.tensor(self.matrices[idx])
|
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
import urllib.request
import requests
import re
import csv
import html
|
|
from __future__ import division
import cv2
import numpy as np
from math import *
#--------------------------------------------
# AUXILIARY BLOCK FUNCTIONS
#--------------------------------------------
# return closed image
def closing(bny, dim):
return erosion(dilation(bny, dim), dim);
# return dilation of a binary image
def dilation(bny, dim):
# create mask matrix
mask = 255*np.ones((dim, dim));
# initialize output image
out = np.zeros((bny.shape[0], bny.shape[1]));
# find activated pixels positions
actBny = np.where(bny > 0);
# find activated mask pixels positions
actMask = np.where(mask > 0);
# apply mask over activated points
for i, (x0,y0) in enumerate(zip(actBny[0], actBny[1])):
for j, (x1,y1) in enumerate(zip(actMask[0], actMask[1])):
idX = x0+x1;
idY = y0+y1;
if idX >= 0 and idX < out.shape[0] and idY >= 0 and idY < out.shape[1]:
out[idX][idY] = 255;
return out;
# return erosion of a binary image
def erosion(bny, dim):
# create mask matrix
mask = 255*np.ones((dim, dim));
# initialize output image
out = np.zeros((bny.shape[0], bny.shape[1]));
# find activated pixels positions
actBny = np.where(bny > 0);
# apply mask over activated points
for aux, i in enumerate(zip(actBny[0], actBny[1])):
noMatch = False;
for (k,aux) in np.ndenumerate(mask):
if i[0]+k[0] < 0 or i[0]+k[0] >= out.shape[0] or i[1]+k[1] < 0 or i[1]+k[1] >= out.shape[1] or mask[k[0]][k[1]] != bny[i[0]+k[0]][i[1]+k[1]]:
noMatch = True;
break;
if noMatch == False:
out[i[0]][i[1]] = 255;
return out;
# return list of features (area, position[x,y], box[x0,y0,x1,y1]) of segmented image
def getFeatures(segm):
features = [];
for i in range(1, int(np.max(segm) + 1)):
index = np.where(segm == i);
iX = np.sum(index[0]);
iY = np.sum(index[1]);
area = [np.sum(segm == i)];
pos = [int(iX/area), int(iY/area)];
box = [np.min(index[0]), np.min(index[1]), np.max(index[0]), np.max(index[1])];
# add features
features.append(area + pos + box);
return np.asarray(features);
# return histogram matrix of an image
def getHistogram(img):
hist = [];
for i in range(256):
hist.append( len(np.where(img[:,:]==i)[0]) / (img.shape[0]*img.shape[1]) );
return np.asarray(hist);
# calculate Otsu's threshold for a given image
def getOtsuThreshold(img):
# get image's histogram
hist = getHistogram(img);
#calculate global average pixels intensity
gAv = np.sum(img)/(img.shape[0]*img.shape[1]);
# calculate variances related to each possible threshold value
variances = [];
for tresh in range(256):
p = np.sum(hist[0:tresh]);
m = hist[0];
for i in range(1,tresh+1):
m = (m + i*hist[i])/2;
# calculate variance
if p != 1 and p != 0:
variances.append(pow((gAv*p - m),2)/(p*(1-p)));
else:
variances.append(0);
# return the best threshold
return np.argmax(np.asarray(variances));
# return resized image based in a scale factor
def resize(img, scale):
# initialize output image
out = np.zeros((int(scale*img.shape[0]), int(scale*img.shape[1])));
# initialize transformation matrixes
T = np.matrix([[1,0,0], [0,1,0], [0,0,1]]);
S = np.matrix([[1/scale,0,0], [0,1/scale,0], [0,0,1]]);
M = np.linalg.inv(T)*S*T;
M = M.astype(int);
# apply transformation
for i in np.nditer(np.arange(out.shape[0])):
for j in np.nditer(np.arange(out.shape[1])):
# calculate new coordinate
newP = (M*np.matrix([[i],[j],[1]])).astype(int);
# try to transform image
if newP[0] >= 0 and newP[0] < img.shape[0] and newP[1] >= 0 and newP[1] < img.shape[1]:
out[i,j] = img[newP[0], newP[1]];
return out;
# return images subtraction
def subtract(img1, img2):
sub = (img2-img1);
sub[sub < 0] = 0;
sub[sub > 255] = 255;
return sub;
# return matrix of segmented regions of a binary image
def segment(bny):
# initialize output image
out = np.zeros((bny.shape[0], bny.shape[1]));
# find activated pixels positions
actBny = np.where(bny > 0);
# initialize regions counter
regions = 0;
# segment
for aux, (i,j) in enumerate(zip(actBny[0], actBny[1])):
if i - 1 >= 0 and bny[i-1,j] > 0 and out[i-1,j] > 0:
out[i,j] = out[i-1, j];
elif j - 1 >= 0 and bny[i,j-1] > 0 and out[i,j-1] > 0:
out[i,j] = out[i, j-1];
elif i + 1 < bny.shape[0] and bny[i+1,j] > 0 and out[i+1,j] > 0:
out[i,j] = out[i+1, j];
elif j + 1 < bny.shape[1] and bny[i,j+1] > 0 and out[i,j+1] > 0:
out[i,j] = out[i, j+1];
else:
regions = regions + 1;
out[i,j] = regions;
# remove duplicated regions
for aux, (i,j) in enumerate(zip(actBny[0], actBny[1])):
if i + 1 < bny.shape[0] and bny[i+1,j] > 0 and out[i,j] != out[i+1,j] > 0:
out[ out == out[i,j] ] = out[i+1,j];
if j + 1 < bny.shape[1] and bny[i,j+1] > 0 and out[i,j] != out[i,j+1] > 0:
out[ out == out[i,j] ] = out[i,j+1];
# rescale output
regions = np.unique(out);
for r in np.nditer(regions):
np.place(out, out == r, np.where(regions == r)[0]);
return out
# convert image to binary
def toBinary(img, threshold):
# define threshold
threshold = threshold#getOtsuThreshold(img);
# return binary image
img[img < threshold] = 0;
img[img >= threshold] = 255;
return img;
# convert image to grayscale
def toGrayscale(img):
return img[:,:,0]/3 + img[:,:,1]/3 + img[:,:,2]/3;
#--------------------------------------------
# CONTROL FUNCTIONS
#--------------------------------------------
# given two frames return the follow car features array: [area,pos[x,y],box[x0,y0,x1,y1]
def extractFeatures(prevImg, nextImg):
# convert frames to grayscale
prevImg = toGrayscale(prevImg);
nextImg = toGrayscale(nextImg);
# subtract both frames in order to do a segmentation
img = subtract(prevImg, nextImg);
# convert image to binary
img = toBinary(img, 20);
# close image
img = closing(img, 13);
# segment image
img = segment(img);
# extract features
features = getFeatures(img);
# filter regions by theur areas
finalFeatures = [];
minArea = 60;
maxArea = 1500;
for f in features:
if f[0] >= minArea and f[0] <= maxArea and f[1] > img.shape[0]/2 and f[1] < img.shape[0]*0.9 and (f[2] > img.shape[1]*0.55 or f[2] < img.shape[1]*0.4):
finalFeatures.append(f);
return np.asarray(finalFeatures);
# calculate velocity based on two arrays of features
def calculateVelocity(prevFeatures, nextFeatures, ellapsedTime):
vel = [];
prevF = prevFeatures.tolist();
nextF = nextFeatures.tolist();
for f1 in nextF:
# find previous feature related to the current one
mDist = 0;
pF = [];
for f0 in prevF:
dist = sqrt((f1[1]-f0[1])**2 + (f1[2]-f0[2])**2);
if mDist == 0 or dist < mDist:
mDist = dist;
pF = f0;
# add velocity and remove feature from previous features array
if pF != []:
vel.append([pF,f1,mDist/ellapsedTime]);
prevF.remove(pF);
return np.asarray(vel);
# draw results over segmented image
def drawResults(prevImg, nextImg, prevFeatures, nextFeatures, ellapsedTime):
# initialize output
out = nextImg.copy();
# calculate velocities to be shown
vel = calculateVelocity(prevFeatures, nextFeatures, ellapsedTime);
for v in vel:
# draw boxes above regions
cv2.circle(out, (v[1][2], v[1][1]), 1, 200, -1);
cv2.rectangle(out, (v[1][4], v[1][3]), (v[1][6], v[1][5]), 127, 1);
font = cv2.FONT_HERSHEY_SIMPLEX;
cv2.putText(out, str(round(v[2],1)) + ' px/s', (v[1][4], v[1][3] - 5), font, 0.3, (0,255,0), 1, cv2.LINE_AA);
return out;
#--------------------------------------------
# MAIN CODE
#--------------------------------------------
# load video to be processed
video = cv2.VideoCapture('video.mp4');
# get some proprieties from the video
fps = int(video.get(cv2.CAP_PROP_FPS));
w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH));
h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT));
frames = video.get(cv2.CAP_PROP_FRAME_COUNT);
# set compression format of the output video
fourcc = cv2.VideoWriter_fourcc(*'H264')
# initialize output video object
out = cv2.VideoWriter('output.avi', fourcc, fps, (w, h), True);
# catch initial frame
prevImg = None
while video.isOpened() and prevImg is None:
ret, frame = video.read()
if ret == True:
prevImg = frame
# initialize previous and next segmentation features
prevFeatures = None;
nextFeatures = None;
# initialize current frame counter
crtFrm = 1;
# define amount of frames skipped between each segmentation
skipFrm = 4;
# start video processing
while video.isOpened() and crtFrm < 0.02*frames:
# load image
ret, nextImg = video.read();
if ret==True:
# print progress
print ('Processing... ({0:.2f}%)'.format(100*crtFrm/frames));
# resize frames
scale = 1/4;
resPrevImg = cv2.resize(prevImg, (int(prevImg.shape[1]*scale), int(prevImg.shape[0]*scale)), interpolation = cv2.INTER_AREA);
resNextImg = cv2.resize(nextImg, (int(nextImg.shape[1]*scale), int(nextImg.shape[0]*scale)), interpolation = cv2.INTER_AREA);
# calculate velocity after skip some frames
if crtFrm%skipFrm == 0:
# segment and calculate features of segmented regions of frames
aux = extractFeatures(resPrevImg, resNextImg);
if prevFeatures is None:
prevFeatures = aux;
else:
nextFeatures = aux;
# update previous frame
prevImg = nextImg;
# update frames counter
crtFrm = crtFrm + 1;
if nextFeatures is not None:
# draw result image
result = drawResults(resPrevImg, resNextImg, prevFeatures, nextFeatures, skipFrm/fps);
# resize result
scale = 1/scale;
result = cv2.resize(result, (int(result.shape[1]*scale), int(result.shape[0]*scale)), interpolation = cv2.INTER_AREA);
# write frame in output video
out.write(result);
# update previous features
prevFeatures = nextFeatures;
# close video files
video.release()
video.release()
out.release()
|
|
import os
import json
import random
import numpy as np
import tensorflow as tf
import torchvision.transforms as transforms
from .utils import load_and_preprocess_image
from PIL import Image
root = '/data/cvfs/ah2029/datasets/bdd100k/'
def load_day_and_night(split='train', subset=1.0):
""" Load image filenames with clear or partly cloudy weather, and separate in day and night. BDD100k dataset.
Parameters
----------
split: str
Returns
-------
X_day: np.array<str>
filenames corresponding to the images in daytime
X_night: np.array<str>
filenames corresponding to the images at night
"""
with open(os.path.join(root, 'labels/bdd100k_labels_images_' + split + '.json')) as json_file:
bdd_labels = json.load(json_file)
X_day = []
X_night = []
for label in bdd_labels:
weather = label['attributes']['weather']
timeofday = label['attributes']['timeofday']
filename = os.path.join(root, 'images/100k/', split, label['name'])
if weather in ['clear', 'partly cloudy']:
if timeofday == 'daytime':
X_day.append(filename)
elif timeofday == 'night':
X_night.append(filename)
if subset < 1.0:
n_day = int(subset * len(X_day))
n_night = int(subset * len(X_night))
X_day = random.sample(X_day, n_day)
X_night = random.sample(X_night, n_night)
# Make the two lists have the same size
n = min(len(X_day), len(X_night))
X_day = X_day[:n]
X_night = X_night[:n]
return np.array(X_day), np.array(X_night)
def create_dataset(X_day, X_night, image_size=(512, 512), batch_size=1):
""" Create a tensorflow dataset with a list of filenames and image size
Parameters
----------
X_day: list<str>
X_night: list<str>
image_size: tuple(int, int)
defined as height, width
Returns
-------
dataset: tf.data.Dataset
"""
def map_(x, y, out_size):
img_day = load_and_preprocess_image(x, out_size)
img_night = load_and_preprocess_image(y, out_size)
return (img_day, img_night), (img_day, img_night)
dataset = tf.data.Dataset.from_tensor_slices((X_day, X_night))
dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=len(X_day)))
dataset = dataset.map(lambda x, y: map_(x, y, image_size))
dataset = dataset.batch(batch_size)
return dataset
def load_batch(filenames, image_size):
""" Load a batch of images
Parameters
----------
filenames: list<str>
image_size: tuple(int, int)
"""
assert image_size[0] == image_size[1]
data_transforms = transforms.Compose([transforms.Resize(image_size[0]),
transforms.RandomCrop(image_size[0]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
batch = []
for filename in filenames:
img = Image.open(filename)
img = data_transforms(img)
# Channel last
img = np.transpose(img, (1, 2, 0))
batch.append(img)
return np.stack(batch, axis=0)
|
|
# Using Android IP Webcam video .jpg stream (tested) in Python2 OpenCV3
from collections import deque
from cspaceSliders import FilterWindow
from selenium import webdriver
import argparse
import urllib.request
import cv2
import numpy as np
import time
import math
def move(ptX, ptY):
ptX = ptX * ((((1366/864))/1366)*100)
ptY = ptY * ((((768/480))/768)*100)
print(ptX, ptY)
move_ver = "document.getElementById('pointer').style.top = '" + str(ptY) + "%'"
move_hor = "document.getElementById('pointer').style.left = '" + str(ptX) + "%'"
driver.execute_script(move_hor)
driver.execute_script(move_ver)
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-b", "--buffer", type=int, default=32,
help="max buffer size")
args = vars(ap.parse_args())
# Open Chrome and load the HTML file
driver = webdriver.Chrome()
driver.get('file:///C:/Users/Sam/Desktop/SamDavid/BE Project/index.html')
# Replace the URL with your own IPwebcam shot.jpg IP:port
url = "http://192.168.43.1:8080/shot.jpg"
# initialize the list of tracked points, the frame counter,
# and the coordinate deltas
pts = deque(maxlen=args["buffer"])
counter = 0
(dX, dY) = (0, 0)
direction = ""
while True:
# Use urllib to get the image from the IP camera
imgResp = urllib.request.urlopen(url)
# Numpy to convert into a array
imgNp = np.array(bytearray(imgResp.read()), dtype=np.uint8)
# Finally decode the array to OpenCV usable format ;)
img = cv2.imdecode(imgNp, -1)
'''
window = FilterWindow('Filter Window', img)
window.show(verbose=True)
colorspace = window.colorspace
lowerb, upperb = window.bounds
mask = window.mask
applied_mask = window.applied_mask
'''
# This where the code for processing of video starts #
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_red = np.array([21, 0, 255])
upper_red = np.array([179, 21, 255])
mask = cv2.inRange(hsv, lower_red, upper_red)
#mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
_, cnts, hierarchy = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(img, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(img, center, 5, (0, 0, 255), -1)
pts.appendleft(center)
move(pts[0][0], pts[0][1])
# loop over the set of tracked points
for i in np.arange(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# check to see if enough points have been accumulated in
# the buffer
try:
if counter >= 10 and i == 1 and pts[-10] is not None:
# compute the difference between the x and y
# coordinates and re-initialize the direction
# text variables
dX = pts[-10][0] - pts[i][0]
dY = pts[-10][1] - pts[i][1]
(dirX, dirY) = ("", "")
# ensure there is significant movement in the
# x-direction
if np.abs(dX) > 20:
dirX = "East" if np.sign(dX) == 1 else "West"
# ensure there is significant movement in the
# y-direction
if np.abs(dY) > 20:
dirY = "North" if np.sign(dY) == 1 else "South"
# handle when both directions are non-empty
if dirX != "" and dirY != "":
direction = "{}-{}".format(dirY, dirX)
# otherwise, only one direction is non-empty
else:
direction = dirX if dirX != "" else dirY
except IndexError:
time.sleep(0.000001)
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(img, pts[i - 1], pts[i], (0, 0, 255), thickness)
# show the movement deltas and the direction of movement on
# the frame
cv2.putText(img, direction, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 0, 255), 3)
cv2.putText(img, "dx: {}, dy: {}".format(dX, dY),
(10, img.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
# End of Processing #
# put the image on screen
cv2.imshow('IPWebcam', img)
#cv2.imshow('mask', mask)
key = cv2.waitKey(1) & 0xFF
counter += 1
# To give the processor some less stress
#time.sleep(0.1)
# Quit if q is pressed
if key == ord('q'):
break
'''
print('Displaying the image with applied mask filtered in', colorspace,
'\nwith lower bound', lowerb, 'and upper bound', upperb)
'''
|
|
import math
import numpy as np
CPUCT = 1.0
class NodeInfo:
def __init__(self, state, action, raw_policy, value):
self.state = state
self.action = action
self.policy = [raw_policy[k] for a, k in action]
self.value = value
self.children_state = [None for i in range(len(action))]
def __str__(self):
return self._tostring()
def __repr__(self):
return self._tostring()
def _tostring(self):
return '{}|{}'.format(self.policy, self.value)
class NodeStat:
def __init__(self, action_len):
self.total_visited = 0
self.children_score = [0. for i in range(action_len)]
self.children_visited = [0 for i in range(action_len)]
def __str__(self):
return self._tostring()
def __repr__(self):
return self._tostring()
def _tostring(self):
return '{}|{}|{}'.format(self.total_visited, self.children_score, self.children_visited)
class Result:
def __init__(self, action, key, Q, U, visited, policy):
self.action = action
self.key = key
self.Q = Q
self.U = U
self.visited = visited
self.policy = policy
class MCTS:
def __init__(self, nn):
self.nn = nn
self.info_map = {}
self.stat_map = {}
def resetStats(self):
if len(self.info_map) > 20000: self.info_map = {}
self.stat_map = {}
def getMostVisitedAction(self, state, sim_count, verbose = False):
info = self.getActionInfo(state, sim_count)
if verbose:
for a in info:
print('{:4} {:+.4f} {:+.4f} {:4} {:+.4f} {}'.format(a.key, a.Q, a.U, a.visited, a.policy, state.actionToString(a.action)))
index = np.argmax([i.visited for i in info])
return info[index].action
def getActionInfo(self, state, sim_count):
uid = state.getRepresentativeString()
info = self._getinfo(state, uid)
if not uid in self.stat_map:
stat = self.stat_map[uid] = NodeStat(len(info.action))
else:
stat = self.stat_map[uid]
for i in range(sim_count):
self._simulation(state)
return [self._summary(info, stat, i) for i in range(len(info.action))]
def _summary(self, info, stat, i):
action, key = info.action[i]
visited = stat.children_visited[i]
policy = info.policy[i]
if visited > 0:
Q = stat.children_score[i] / visited
U = CPUCT * policy * math.sqrt(stat.total_visited) / (1 + visited)
else:
Q = -2 # not visited
U = CPUCT * policy * math.sqrt(stat.total_visited)
return Result(action, key, Q, U, visited, policy)
def _simulation(self, state):
# terminal state
winner = state.getWinner()
if winner != None:
return winner * state.getCurrentPlayer()
# leaf
uid = state.getRepresentativeString()
info = self._getinfo(state, uid)
if not uid in self.stat_map:
self.stat_map[uid] = NodeStat(len(info.action))
return info.value
# select next state
best_score = -float('inf')
best_index = None
stat = self.stat_map[uid]
for i in range(len(info.action)):
visited = stat.children_visited[i]
if visited > 0:
Q = stat.children_score[i] / visited
u = Q + CPUCT * info.policy[i] * math.sqrt(stat.total_visited) / (1 + visited)
#u = Q + CPUCT * math.sqrt(stat.total_visited) / (1 + visited)
else:
u = CPUCT * info.policy[i] * math.sqrt(stat.total_visited)
#u = CPUCT * math.sqrt(stat.total_visited)
if u > best_score:
best_score = u
best_index = i
# simulate
child_state = info.children_state[best_index]
if child_state == None:
a, k = info.action[best_index]
child_state = info.children_state[best_index] = state.getNextState(a)
v = -self._simulation(child_state)
# update stats
stat.children_score[best_index] += v
stat.children_visited[best_index] += 1
stat.total_visited += 1
return v
def _getinfo(self, state, uid):
if not uid in self.info_map:
action = state.getAction() # (action, key)
raw_policy, value = self.nn.predict(state.getNnInput())
info = self.info_map[uid] = NodeInfo(state, action, raw_policy, value)
return info
return self.info_map[uid]
|
|
from ..proto import *
from ..graph_io import *
import copy
import paddle.fluid as fluid
import numpy as np
from paddle.fluid.core import VarDesc, AttrType
class Fluid_debugger:
def var_names_of_fetch(self, fetch_targets):
var_names_list = []
for var in fetch_targets:
var_names_list.append(var.name)
return var_names_list
def fetch_tmp_vars(self, block, fetch_targets, var_names_list = None):
fetch_var = block.var('fetch')
old_fetch_names = self.var_names_of_fetch(fetch_targets)
new_fetch_vars = []
for var_name in old_fetch_names:
var = block.var(var_name)
new_fetch_vars.append(var)
i = len(new_fetch_vars)
if var_names_list is None:
var_names_list = block.vars.keys()
for var_name in var_names_list:
if '.tmp_' in var_name and var_name not in old_fetch_names:
var = block.var(var_name)
new_fetch_vars.append(var)
block.append_op(
type='fetch',
inputs={'X': [var_name]},
outputs={'Out': [fetch_var]},
attrs={'col': i})
i = i + 1
return new_fetch_vars
|
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import numpy as np
import pytest
from ...metricgenerator.manager import SimpleManager
from ...types.association import TimeRangeAssociation, AssociationSet
from ...types.detection import Detection
from ...types.groundtruth import GroundTruthPath, GroundTruthState
from ...types.hypothesis import SingleDistanceHypothesis
from ...types.prediction import GaussianStatePrediction
from ...types.time import TimeRange
from ...types.track import Track
from ...types.update import GaussianStateUpdate
from ...types.array import CovarianceMatrix, StateVector
from ...models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity
from ...models.measurement.linear import LinearGaussian
@pytest.fixture()
def trial_timestamps():
now = datetime.now()
return [now + timedelta(seconds=i) for i in range(4)]
@pytest.fixture()
def trial_truths(trial_timestamps):
return [
GroundTruthPath([
GroundTruthState(np.array([[0, 1, 0, 1]]), timestamp=trial_timestamps[0],
metadata={"colour": "red"}),
GroundTruthState(np.array([[1, 1, 1, 1]]), timestamp=trial_timestamps[1],
metadata={"colour": "red"}),
GroundTruthState(np.array([[2, 1, 2, 1]]), timestamp=trial_timestamps[2],
metadata={"colour": "red"}),
GroundTruthState(np.array([[3, 1, 3, 1]]), timestamp=trial_timestamps[3],
metadata={"colour": "red"})
]),
GroundTruthPath([
GroundTruthState(np.array([[-2, 1, -2, 1]]), timestamp=trial_timestamps[0],
metadata={"colour": "green"}),
GroundTruthState(np.array([[-1, 1, -1, 1]]), timestamp=trial_timestamps[1],
metadata={"colour": "green"}),
GroundTruthState(np.array([[0, 1, 0, 1]]), timestamp=trial_timestamps[2],
metadata={"colour": "green"}),
GroundTruthState(np.array([[2, 1, 2, 1]]), timestamp=trial_timestamps[3],
metadata={"colour": "green"})
]),
GroundTruthPath([
GroundTruthState(np.array([[-1, 1, 1, 0]]), timestamp=trial_timestamps[0],
metadata={"colour": "blue"}),
GroundTruthState(np.array([[0, 1, 1, 0]]), timestamp=trial_timestamps[1],
metadata={"colour": "blue"}),
GroundTruthState(np.array([[1, 1, 2, 0]]), timestamp=trial_timestamps[2],
metadata={"colour": "blue"}),
GroundTruthState(np.array([[3, 1, 3, 0]]), timestamp=trial_timestamps[3],
metadata={"colour": "blue"})
])
]
@pytest.fixture()
def trial_tracks(trial_truths, trial_timestamps):
return [
Track([
GaussianStateUpdate(np.array([[0.1, 1.2, 0.1, 1.2]]),
np.eye(4),
SingleDistanceHypothesis(None,
Detection(
np.array([[np.pi / 4, 0]]),
metadata={"colour": "red"}),
distance=1
),
timestamp=trial_timestamps[0]),
GaussianStateUpdate(np.array([[1.1, 1.2, 1.1, 1.2]]),
np.eye(4),
SingleDistanceHypothesis(None,
Detection(
np.array([[np.pi / 4, np.sqrt(2)]]),
metadata={"colour": "blue"}),
distance=1
),
timestamp=trial_timestamps[1]),
GaussianStateUpdate(np.array([[2.1, 1.2, 2.1, 1.2]]),
np.eye(4),
SingleDistanceHypothesis(None,
Detection(
np.array(
[[np.pi / 4, 2 * np.sqrt(2)]]),
metadata={"colour": "red"}),
distance=1
),
timestamp=trial_timestamps[2]),
GaussianStateUpdate(np.array([[3.1, 1.2, 3.1, 1.2]]),
np.eye(4),
SingleDistanceHypothesis(None,
Detection(
np.array(
[[np.pi / 4, 3 * np.sqrt(2)]]),
metadata={"colour": "red"}),
distance=1
),
timestamp=trial_timestamps[3])
]),
Track([
GaussianStateUpdate(np.array([[-2.5, 1.6, -2.5, 1.6]]),
np.eye(4),
SingleDistanceHypothesis(None,
Detection(
np.array(
[[-3 * np.pi / 4,
2 * np.sqrt(2)]]),
metadata={"colour": "red"}),
distance=1
),
timestamp=trial_timestamps[0]),
GaussianStatePrediction(np.array([[-1.5, 1.6, -1.5, 1.6]]),
np.eye(4),
timestamp=trial_timestamps[1]),
GaussianStateUpdate(np.array([[0.5, 1.6, 0.5, 1.6]]),
np.eye(4),
SingleDistanceHypothesis(None,
Detection(
np.array(
[[np.pi / 4, 0]]),
metadata={"colour": "green"}),
distance=1
),
timestamp=trial_timestamps[2]),
GaussianStateUpdate(np.array([[1.5, 1.6, 1.5, 1.6]]),
np.eye(4),
SingleDistanceHypothesis(None,
Detection(
np.array(
[[np.pi / 4, np.sqrt(2)]]),
metadata={"colour": "green"}),
distance=1
),
timestamp=trial_timestamps[3])
]),
Track([
GaussianStateUpdate(np.array([[-1.99, 1.99, 1.99, 1.99]]),
np.eye(4),
SingleDistanceHypothesis(None,
Detection(
np.array(
[[3 * np.pi / 4, np.sqrt(2)]]),
metadata={}),
distance=1
),
timestamp=trial_timestamps[0]),
GaussianStateUpdate(np.array([[0.99, 1.99, 1.99, 1.99]]),
np.eye(4),
SingleDistanceHypothesis(None,
Detection(
np.array([[np.pi / 2, 1]]),
metadata={}),
distance=1
),
timestamp=trial_timestamps[1]),
GaussianStateUpdate(np.array([[0.999, 1.99, 1.999, 1.99]]),
np.eye(4),
SingleDistanceHypothesis(None,
Detection(
np.array(
[[np.pi / 2, 1]]),
metadata={}),
distance=1.1
),
timestamp=trial_timestamps[1]),
GaussianStateUpdate(np.array([[1.99, 1.99, 1.99, 1.99]]),
np.eye(4),
SingleDistanceHypothesis(None,
Detection(
np.array(
[[np.pi / 4, np.sqrt(2)]]),
metadata={"colour": "blue"}),
distance=1
),
timestamp=trial_timestamps[2]),
GaussianStateUpdate(np.array([[2.99, 1.99, 1.99, 1.99]]),
np.eye(4),
SingleDistanceHypothesis(None,
Detection(
np.array(
[[np.pi / 4, np.sqrt(2)]]),
metadata={"colour": "green"}),
distance=1
),
timestamp=trial_timestamps[3])
])
]
@pytest.fixture()
def trial_associations(trial_truths, trial_tracks, trial_timestamps):
return AssociationSet({
TimeRangeAssociation(objects={trial_truths[0], trial_tracks[0]},
time_range=TimeRange(trial_timestamps[0], trial_timestamps[2])),
TimeRangeAssociation(objects={trial_truths[1], trial_tracks[1]},
time_range=TimeRange(trial_timestamps[0], trial_timestamps[1])),
TimeRangeAssociation(objects={trial_truths[1], trial_tracks[1]},
time_range=TimeRange(trial_timestamps[2], trial_timestamps[3])),
TimeRangeAssociation(objects={trial_truths[2], trial_tracks[2]},
time_range=TimeRange(trial_timestamps[1], trial_timestamps[2])),
TimeRangeAssociation(objects={trial_truths[0], trial_tracks[2]},
time_range=TimeRange(trial_timestamps[1], trial_timestamps[3]))
})
@pytest.fixture()
def trial_manager(trial_truths, trial_tracks, trial_associations):
manager = SimpleManager()
manager.add_data(trial_truths, trial_tracks)
manager.association_set = trial_associations
return manager
@pytest.fixture()
def transition_model():
return CombinedLinearGaussianTransitionModel([ConstantVelocity(0.05), ConstantVelocity(0.05)])
@pytest.fixture()
def measurement_model():
return LinearGaussian(ndim_state=4, mapping=[0, 2],
noise_covar=CovarianceMatrix(np.diag([5., 5.])))
@pytest.fixture()
def groundtruth():
now = datetime.now()
init_sv = StateVector([0., 1., 0., 1.])
increment_sv = StateVector([1., 0., 1., 0])
states = [GroundTruthState(init_sv + i*increment_sv, timestamp=now+timedelta(seconds=i))
for i in range(21)]
path = GroundTruthPath(states)
return path
|
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Updated by cavalleria (cavalleria@gmail.com)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import pprint
import shutil
import warnings
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
import _init_paths
import dataset
import models
from tqdm import tqdm
from config import cfg
from config import update_config
from core.loss import JointsMSELoss
from core.function import train
from core.function import validate
from utils.utils import create_logger
from utils.utils import get_optimizer
from utils.utils import save_checkpoint
from utils.utils import setup_logger
from utils.utils import get_model_summary
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--seed',
help='random seed',
default=1337,
type=int)
parser.add_argument('--gpu',
help='gpu id for multiprocessing training',
type=str)
parser.add_argument('--world-size',
default=1,
type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank',
default=0,
type=int,
help='node rank for distributed training')
args = parser.parse_args()
return args
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def main():
args = parse_args()
set_seed(int(args.seed))
update_config(cfg, args)
cfg.defrost()
cfg.RANK = args.rank
cfg.freeze()
logger, final_output_dir, tb_log_dir = create_logger(cfg, args.cfg, 'train')
logger.info(pprint.pformat(args))
logger.info(cfg)
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
ngpus_per_node = torch.cuda.device_count()
args.world_size = ngpus_per_node * args.world_size
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args, final_output_dir, tb_log_dir))
def main_worker(gpu, ngpus_per_node, args, final_output_dir, tb_log_dir):
args.gpu = gpu
args.rank = args.rank * ngpus_per_node + gpu
print('Init process group: dist_url: {}, world_size: {}, rank: {}'.format(cfg.DIST_URL, args.world_size, args.rank))
dist.init_process_group(backend=cfg.DIST_BACKEND, init_method=cfg.DIST_URL, world_size=args.world_size, rank=args.rank)
update_config(cfg, args)
# setup logger
logger, _ = setup_logger(final_output_dir, args.rank, 'train')
model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(cfg, is_train=True)
logger.info(get_model_summary(model, torch.zeros(1, 3, *cfg.MODEL.IMAGE_SIZE)))
# copy model file
if not cfg.MULTIPROCESSING_DISTRIBUTED or (cfg.MULTIPROCESSING_DISTRIBUTED and args.rank % ngpus_per_node == 0):
this_dir = os.path.dirname(__file__)
shutil.copy2(os.path.join(this_dir, '../lib/models', cfg.MODEL.NAME + '.py'), final_output_dir)
writer_dict = {
'writer': SummaryWriter(log_dir=tb_log_dir),
'train_global_steps': 0,
'valid_global_steps': 0,
}
if not cfg.MULTIPROCESSING_DISTRIBUTED or (cfg.MULTIPROCESSING_DISTRIBUTED and args.rank % ngpus_per_node == 0):
dump_input = torch.rand((1, 3, cfg.MODEL.IMAGE_SIZE[1], cfg.MODEL.IMAGE_SIZE[0]))
writer_dict['writer'].add_graph(model, (dump_input, ))
# logger.info(get_model_summary(model, dump_input, verbose=cfg.VERBOSE))
if cfg.MODEL.SYNC_BN:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
# define loss function (criterion) and optimizer
criterion = JointsMSELoss(use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT).cuda(args.gpu)
# Data loading code
train_dataset = eval('dataset.'+cfg.DATASET.DATASET)(
cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_SET, True,
transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
)
valid_dataset = eval('dataset.'+cfg.DATASET.DATASET)(
cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,
transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=cfg.TRAIN.BATCH_SIZE_PER_GPU*len(cfg.GPUS),
shuffle=(train_sampler is None),
num_workers=cfg.WORKERS,
pin_memory=cfg.PIN_MEMORY,
sampler=train_sampler
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=cfg.TEST.BATCH_SIZE_PER_GPU*len(cfg.GPUS),
shuffle=False,
num_workers=cfg.WORKERS,
pin_memory=cfg.PIN_MEMORY
)
logger.info(train_loader.dataset)
best_perf = -1
best_model = False
last_epoch = -1
optimizer = get_optimizer(cfg, model)
begin_epoch = cfg.TRAIN.BEGIN_EPOCH
checkpoint_file = os.path.join(final_output_dir, 'checkpoint.pth')
if cfg.AUTO_RESUME and os.path.exists(checkpoint_file):
logger.info("=> loading checkpoint '{}'".format(checkpoint_file))
checkpoint = torch.load(checkpoint_file)
begin_epoch = checkpoint['epoch']
best_perf = checkpoint['perf']
last_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger.info("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_file, checkpoint['epoch']))
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, cfg.TRAIN.LR_STEP, cfg.TRAIN.LR_FACTOR,
last_epoch=last_epoch)
for epoch in range(begin_epoch, cfg.TRAIN.END_EPOCH):
# train for one epoch
train(cfg, train_loader, model, criterion, optimizer, epoch,
final_output_dir, tb_log_dir, writer_dict)
# In PyTorch 1.1.0 and later, you should call `lr_scheduler.step()` after `optimizer.step()`.
lr_scheduler.step()
# evaluate on validation set
perf_indicator = validate(
args, cfg, valid_loader, valid_dataset, model, criterion,
final_output_dir, tb_log_dir, writer_dict
)
if perf_indicator >= best_perf:
best_perf = perf_indicator
best_model = True
else:
best_model = False
if not cfg.MULTIPROCESSING_DISTRIBUTED or (
cfg.MULTIPROCESSING_DISTRIBUTED
and args.rank == 0
):
logger.info('=> saving checkpoint to {}'.format(final_output_dir))
save_checkpoint({
'epoch': epoch + 1,
'model': cfg.MODEL.NAME,
'state_dict': model.state_dict(),
'best_state_dict': model.module.state_dict(),
'perf': perf_indicator,
'optimizer': optimizer.state_dict(),
}, best_model, final_output_dir)
final_model_state_file = os.path.join(
final_output_dir, 'final_state{}.pth.tar'.format(gpu)
)
logger.info('saving final model state to {}'.format(
final_model_state_file))
torch.save(model.module.state_dict(), final_model_state_file)
writer_dict['writer'].close()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#特征提取
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
#新闻文本数据
from sklearn.datasets import fetch_20newsgroups
# 数据分割
from sklearn.model_selection import train_test_split
# 统计模型结果
from sklearn.metrics import classification_report
import pandas as pd
#特征提取和向量化
def printDictVertorizer():
measurements = [{'city':'Dubai','temperature':33.},
{'city':'London','temperature':12.},
{'city':'San Fransisco','temperature':18.},
{'city':'xi an','temperature':8.}]
vec = DictVectorizer()
print vec.fit_transform(measurements).toarray()
print vec.get_feature_names()
#未去掉停用词的特征提取
def notStopVectorizer():
news = fetch_20newsgroups()
X_train, X_test, Y_train, Y_test = train_test_split(news.data, news.target, test_size=0.25, random_state=33)
#文本特征抽取
countvec = CountVectorizer()
X_train_count = countvec.fit_transform(X_train)
X_test_count = countvec.transform(X_test)
from sklearn.naive_bayes import MultinomialNB
mnb = MultinomialNB()
mnb.fit(X_train_count, Y_train)
y_predict = mnb.predict(X_test_count)
print 'user CountVectorizer The Accuracy of Linear MNB is ', mnb.score(X_test_count, Y_test)
print classification_report(Y_test, y_predict, target_names=news.target_names)
#文本特征抽取
tfidvec = TfidfVectorizer()
X_train_tfid = tfidvec.fit_transform(X_train)
X_test_tfid = tfidvec.transform(X_test)
from sklearn.naive_bayes import MultinomialNB
mnb_tfid = MultinomialNB()
mnb_tfid.fit(X_train_tfid, Y_train)
y_predict_tfid = mnb_tfid.predict(X_test_tfid)
print 'use TfidfVectorizer The Accuracy of Linear MNB is ', mnb_tfid.score(X_test_tfid, Y_test)
print classification_report(Y_test, y_predict_tfid, target_names=news.target_names)
def useStopVectorizer():
news = fetch_20newsgroups()
X_train, X_test, Y_train, Y_test = train_test_split(news.data, news.target, test_size=0.25, random_state=33)
#文本特征抽取
countvec = CountVectorizer(analyzer='word',stop_words='english')
X_train_count = countvec.fit_transform(X_train)
X_test_count = countvec.transform(X_test)
from sklearn.naive_bayes import MultinomialNB
mnb = MultinomialNB()
mnb.fit(X_train_count, Y_train)
y_predict = mnb.predict(X_test_count)
print 'user CountVectorizer The Accuracy of Linear MNB is ', mnb.score(X_test_count, Y_test)
print classification_report(Y_test, y_predict, target_names=news.target_names)
#文本特征抽取
tfidvec = TfidfVectorizer(analyzer='word',stop_words='english')
X_train_tfid = tfidvec.fit_transform(X_train)
X_test_tfid = tfidvec.transform(X_test)
from sklearn.naive_bayes import MultinomialNB
mnb_tfid = MultinomialNB()
mnb_tfid.fit(X_train_tfid, Y_train)
y_predict_tfid = mnb_tfid.predict(X_test_tfid)
print 'use TfidfVectorizer The Accuracy of Linear MNB is ', mnb_tfid.score(X_test_tfid, Y_test)
print classification_report(Y_test, y_predict_tfid, target_names=news.target_names)
def titanicTest():
titanic = pd.read_csv('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')
y = titanic['survived']
X = titanic.drop(['row.names','name','survived'], axis=1)
X['age'].fillna(X['age'].mean(), inplace = True)
X.fillna('UNKNOWN', inplace = True)
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.25, random_state=33)
vec = DictVectorizer()
X_train = vec.fit_transform(X_train.to_dict(orient = 'record'))
X_test = vec.transform(X_test.to_dict(orient = 'record'))
print len(vec.feature_names_)
#用决策树对数据进行预测
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(criterion = 'entropy')
dt.fit(X_train, Y_train)
dt.score(X_test, Y_test)
#特征筛选器
from sklearn import feature_selection
fs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=20)
X_train_fs = fs.fit_transform(X_train, Y_train)
dt.fit(X_train_fs, Y_train)
X_test_fs = fs.transform(X_test)
dt.score(X_test_fs, Y_test)
#使用交叉验证
from sklearn.model_selection import cross_val_score
import numpy as np
percentiles = range(1, 100, 2)
results = []
for i in percentiles:
fs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=i)
X_train_fs = fs.fit_transform(X_train, Y_train)
scores = cross_val_score(dt, X_train_fs, Y_train, cv=5)
results = np.append(results, scores.mean())
print results
opt = np.where(results == results.max())[0][0]
print opt
print 'Optimal number of features %d'% percentiles[opt]
#使用最佳筛选后,利用相同的配置对模型在测试集上进行评估
fs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=percentiles[opt])
X_train_fs = fs.fit_transform(X_train, Y_train)
dt.fit(X_train_fs, Y_train)
X_test_fs = fs.transform(X_test)
dt.score(X_test_fs, Y_test)
titanicTest()
|
|
import numpy as np
def rle_to_mask(lre, shape=(1600, 256)):
'''
params: rle - run-length encoding string (pairs of start & length of encoding)
shape - (width,height) of numpy array to return
returns: numpy array with dimensions of shape parameter
'''
# the incoming string is space-delimited
runs = np.asarray([int(run) for run in lre.split(' ')])
# we do the same operation with the even and uneven elements, but this time with addition
runs[1::2] += runs[0::2]
# pixel numbers start at 1, indexes start at 0
runs -= 1
# extract the starting and ending indeces at even and uneven intervals, respectively
run_starts, run_ends = runs[0::2], runs[1::2]
# build the mask
h, w = shape
mask = np.zeros(h * w, dtype=np.uint8)
for start, end in zip(run_starts, run_ends):
mask[start:end] = 1
# transform the numpy array from flat to the original image shape
return mask.reshape(shape)
def build_mask(encodings, labels):
""" takes a pair of lists of encodings and labels,
and turns them into a 3d numpy array of shape (256, 1600, 4)
"""
# initialise an empty numpy array
mask = np.zeros((256, 1600, 4), dtype=np.uint8)
# building the masks
for rle, label in zip(encodings, labels):
# classes are [1, 2, 3, 4], corresponding indeces are [0, 1, 2, 3]
index = label - 1
# fit the mask into the correct layer
# note we need to transpose the matrix to account for
# numpy and openCV handling width and height in reverse order
mask[:, :, index] = rle_to_mask(rle).T
return mask
|
|
"""
Some random functions for hyperparameter optimization
Alisa Alenicheva, Jetbrains research, Februari 2022
"""
import os
import torch
from hyperopt import hp
import errno
from MoleculeACE.benchmark.utils import get_config
from MoleculeACE.benchmark.utils.const import Algorithms, RANDOM_SEED, CONFIG_PATH, CONFIG_PATH_GIN, CONFIG_PATH_MPNN, \
CONFIG_PATH_GCN, CONFIG_PATH_AFP, CONFIG_PATH_GAT
import random
import numpy as np
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
cf_gcn = get_config(CONFIG_PATH_GCN)
gcn_hyperparameters = {
'epochs': hp.choice('epochs', [cf_gcn['epochs']]),
'val_split': hp.choice('val_split', [cf_gcn['val_split']]),
'lr': hp.uniform('lr', low=cf_gcn['lr_min'], high=cf_gcn['lr_max']),
'weight_decay': hp.uniform('weight_decay', low=cf_gcn['weight_decay_min'], high=cf_gcn['weight_decay_max']),
'patience': hp.choice('patience', [cf_gcn['early_stopping_patience']]),
'batch_size': hp.choice('batch_size', cf_gcn['batch_size']),
'gnn_hidden_feats': hp.choice('gnn_hidden_feats', cf_gcn['gnn_hidden_feats']),
'predictor_hidden_feats': hp.choice('predictor_hidden_feats', cf_gcn['predictor_hidden_feats']),
'num_gnn_layers': hp.choice('num_gnn_layers', cf_gcn['num_gnn_layers']),
'residual': hp.choice('residual', cf_gcn['residual']),
'batchnorm': hp.choice('batchnorm', cf_gcn['batchnorm']),
'dropout': hp.uniform('dropout', low=cf_gcn['dropout'][0], high=cf_gcn['dropout'][1])
}
cf_gat = get_config(CONFIG_PATH_GAT)
gat_hyperparameters = {
'epochs': hp.choice('epochs', [cf_gat['epochs']]),
'val_split': hp.choice('val_split', [cf_gat['val_split']]),
'lr': hp.uniform('lr', low=cf_gat['lr_min'], high=cf_gat['lr_max']),
'weight_decay': hp.uniform('weight_decay', low=cf_gat['weight_decay_min'], high=cf_gat['weight_decay_max']),
'patience': hp.choice('patience', [cf_gat['early_stopping_patience']]),
'batch_size': hp.choice('batch_size', cf_gat['batch_size']),
'gnn_hidden_feats': hp.choice('gnn_hidden_feats', cf_gat['gnn_hidden_feats']),
'num_heads': hp.choice('num_heads', cf_gat['num_heads']),
'alpha': hp.uniform('alpha', low=cf_gat['alpha'][0], high=cf_gat['alpha'][1]),
'predictor_hidden_feats': hp.choice('predictor_hidden_feats', cf_gat['predictor_hidden_feats']),
'num_gnn_layers': hp.choice('num_gnn_layers', cf_gat['num_gnn_layers']),
'residual': hp.choice('residual', cf_gat['residual']),
'dropout': hp.uniform('dropout', low=cf_gat['dropout'][0], high=cf_gat['dropout'][1])
}
cf_mpnn = get_config(CONFIG_PATH_MPNN)
mpnn_hyperparameters = {
'epochs': hp.choice('epochs', [cf_mpnn['epochs']]),
'val_split': hp.choice('val_split', [cf_mpnn['val_split']]),
'lr': hp.uniform('lr', low=cf_mpnn['lr_min'], high=cf_mpnn['lr_max']),
'weight_decay': hp.uniform('weight_decay', low=cf_mpnn['weight_decay_min'], high=cf_mpnn['weight_decay_max']),
'patience': hp.choice('patience', [cf_mpnn['early_stopping_patience']]),
'batch_size': hp.choice('batch_size', cf_mpnn['batch_size']),
'node_out_feats': hp.choice('node_out_feats', cf_mpnn['node_out_feats']),
'edge_hidden_feats': hp.choice('edge_hidden_feats', cf_mpnn['edge_hidden_feats']),
'num_step_message_passing': hp.choice('num_step_message_passing', cf_mpnn['num_step_message_passing']),
'num_step_set2set': hp.choice('num_step_set2set', cf_mpnn['num_step_set2set']),
'num_layer_set2set': hp.choice('num_layer_set2set', cf_mpnn['num_layer_set2set'])
}
cf_afp = get_config(CONFIG_PATH_AFP)
attentivefp_hyperparameters = {
'epochs': hp.choice('epochs', [cf_afp['epochs']]),
'val_split': hp.choice('val_split', [cf_afp['val_split']]),
'lr': hp.uniform('lr', low=cf_afp['lr_min'], high=cf_afp['lr_max']),
'weight_decay': hp.uniform('weight_decay', low=cf_afp['weight_decay_min'], high=cf_afp['weight_decay_max']),
'patience': hp.choice('patience', [cf_afp['early_stopping_patience']]),
'batch_size': hp.choice('batch_size', cf_afp['batch_size']),
'num_layers': hp.choice('num_layers', cf_afp['num_layers']),
'num_timesteps': hp.choice('num_timesteps', cf_afp['num_timesteps']),
'graph_feat_size': hp.choice('graph_feat_size', cf_afp['graph_feat_size']),
'dropout': hp.uniform('dropout', low=cf_afp['dropout'][0], high=cf_afp['dropout'][1])
}
cf_gin = get_config(CONFIG_PATH_GIN)
gin_pretrained_hyperparameters = {
'epochs': hp.choice('epochs', [cf_gin['epochs']]),
'val_split': hp.choice('val_split', [cf_gin['val_split']]),
'lr': hp.uniform('lr', low=cf_gin['lr_min'], high=cf_gin['lr_max']),
'weight_decay': hp.uniform('weight_decay', low=cf_gin['weight_decay_min'], high=cf_gin['weight_decay_max']),
'patience': hp.choice('patience', [cf_gin['early_stopping_patience']]),
'batch_size': hp.choice('batch_size', cf_gin['batch_size']),
'jk': hp.choice('jk', cf_gin['jk']),
'readout': hp.choice('readout', cf_gin['readout'])
}
def init_hyper_space(model: Algorithms):
"""Initialize the hyperparameter search space
Parameters
----------
model : str
Model for searching hyperparameters
Returns
-------
dict
Mapping hyperparameter names to the associated search spaces
"""
candidate_hypers = dict()
if model == Algorithms.GCN:
candidate_hypers.update(gcn_hyperparameters)
elif model == Algorithms.GAT:
candidate_hypers.update(gat_hyperparameters)
elif model == Algorithms.MPNN:
candidate_hypers.update(mpnn_hyperparameters)
elif model == Algorithms.AFP:
candidate_hypers.update(attentivefp_hyperparameters)
elif model in [Algorithms.GIN_MASKING, Algorithms.GIN_INFOMAX, Algorithms.GIN_EDGEPRED, Algorithms.GIN_CONTEXTPRED]:
candidate_hypers.update(gin_pretrained_hyperparameters)
else:
return ValueError('Unexpected model: {}'.format(model))
return candidate_hypers
def mkdir_p(path):
"""Create a folder for the given path.
Parameters
----------
path: str
Folder to create
"""
try:
os.makedirs(path)
print('Created directory {}'.format(path))
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
print('Directory {} already exists.'.format(path))
else:
raise
def init_trial_path(result_path):
""" Check and get the trial output path
Args:
result_path: (str) Path to save results
Returns: (str) path to save a optimization trial
"""
trial_id = 0
path_exists = True
while path_exists:
trial_id += 1
path_to_results = os.path.join(result_path,
str(trial_id)) # args['result_path'] + '/{:d}'.format(trial_id)
path_exists = os.path.exists(path_to_results)
trial_path = path_to_results
mkdir_p(trial_path)
return trial_path
|
|
import numpy as np
import pandas
import analysis as lan
from collections import namedtuple
PathwayConfig = namedtuple("PathwayConfig", ["measure", "hierarchy"])
def retrieve_mutations(pid, seq_data):
patient_data = seq_data[
(seq_data["PatientFirstName"] == pid)
& (seq_data["Technology"] == "NGS Q3")
# We only care about variants and pathogenic mutations
& (seq_data["TestResult"].isin(["variantdetected", "Mutated, Pathogenic"]))
]
patient_data = patient_data[["Biomarker", "NGS_PercentMutated"]]
return patient_data
def util_unweight(g):
nodes = g.nodes()
return {node: 1 for node in nodes}
def calculate_patient_mutations_with_f(pid, seq_data, pathways, f, factor_famcom=False):
patient_data = seq_data[
(seq_data["PatientFirstName"] == pid)
& (seq_data["Technology"] == "NGS Q3")
# We only care about variants and pathogenic mutations
& (seq_data["TestResult"].isin(["variantdetected", "Mutated, Pathogenic"]))
]
patient_data = patient_data[["Biomarker", "NGS_PercentMutated"]]
# Realistically, should never happen
if patient_data.empty:
return {}
results = {}
for pw in pathways:
pathway_mutations = patient_data[patient_data["Biomarker"].isin(pw.get_genes())]
if pathway_mutations.empty:
results[pw.name] = np.float64(0.0)
continue
weights = pw.calculate_measure(f, factor_famcom)
patient_mutations = pathway_mutations.groupby("Biomarker").max()[
"NGS_PercentMutated"
]
total_weights = weights.sum()
if total_weights != 0:
perc_mutation = (
weights.mul(patient_mutations, fill_value=np.float64(0.0)).sum()
/ total_weights
)
else:
perc_mutation = 0
results[pw.name] = perc_mutation
return results
def calculate_patient_mutations_with_config(
pid, seq_data, pathways, legacy_pathways, config
):
patient_data = seq_data[
(seq_data["PatientFirstName"] == pid)
& (seq_data["Technology"] == "NGS Q3")
# We only care about variants and pathogenic mutations
& (seq_data["TestResult"].isin(["variantdetected", "Mutated, Pathogenic"]))
]
patient_data = patient_data[["Biomarker", "NGS_PercentMutated"]]
# Realistically, should never happen
if patient_data.empty:
return {}
results = {}
legacy_to_compute = []
for pw in pathways:
pathway_mutations = patient_data[patient_data["Biomarker"].isin(pw.get_genes())]
if pathway_mutations.empty:
results[pw.name] = np.float64(0.0)
continue
if config[pw.name].measure == "baseline":
legacy_to_compute.append(pw.name)
continue
weights = pw.calculate_measure(
config[pw.name].measure, config[pw.name].hierarchy
)
patient_mutations = pathway_mutations.groupby("Biomarker").max()[
"NGS_PercentMutated"
]
total_weights = weights.sum()
if total_weights != 0:
perc_mutation = (
weights.mul(patient_mutations, fill_value=np.float64(0.0)).sum()
/ total_weights
)
else:
perc_mutation = 0
results[pw.name] = perc_mutation
legacy_results = lan.calculate_patient_mutations(
pid, seq_data, [pw for pw in legacy_pathways if pw[0] in legacy_to_compute]
)
results = {**results, **legacy_results}
return results
def process_patients_with_f(patients, f, pathways, mutations_data, complexes=False):
results = {}
for patient in patients:
results[patient] = calculate_patient_mutations_with_f(
patient, mutations_data, pathways, f, complexes
)
return (
pandas.DataFrame.from_dict(results, orient="index")
.rename_axis("PatientFirstName")
.reset_index()
)
def process_patients_with_config(
patients, pathways, legacy_pathways, mutations_data, config
):
results = {}
for patient in patients:
results[patient] = calculate_patient_mutations_with_config(
patient, mutations_data, pathways, legacy_pathways, config
)
return (
pandas.DataFrame.from_dict(results, orient="index")
.rename_axis("PatientFirstName")
.reset_index()
)
|
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ERNIE-1.0 pretraining scripts.
"""
import argparse
import os
import sys
import random
import time
import yaml
import shutil
import numpy as np
import paddle
import paddle.distributed.fleet as fleet
from paddle.io import DataLoader, Dataset
from visualdl import LogWriter
from paddlenlp.transformers import ErnieModel, ErnieForPretraining, ErniePretrainingCriterion, ErnieTokenizer
from paddlenlp.transformers import CosineAnnealingWithWarmupDecay, LinearDecayWithWarmup
from paddlenlp.utils.batch_sampler import DistributedBatchSampler
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.ops import Topology
from paddlenlp.utils.log import logger
from args import parse_args
sys.path.insert(0, os.path.abspath("../"))
from data_tools.dataset_utils import build_train_valid_test_datasets
MODEL_CLASSES = {
"ernie": (ErnieModel, ErnieForPretraining, ErniePretrainingCriterion,
ErnieTokenizer),
}
def create_pretrained_dataset(
args,
data_file,
tokenizer,
data_world_size,
data_world_rank,
max_seq_len,
places=None,
data_holders=None,
current_step=0, ):
train_valid_test_num_samples = [
args.global_batch_size * args.max_steps,
args.micro_batch_size * (args.max_steps // args.eval_freq + 1) *
args.eval_iters * data_world_size,
args.micro_batch_size * args.test_iters * data_world_size
]
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
data_prefix=data_file,
args=args,
tokenizer=tokenizer,
splits_string=args.split,
train_valid_test_num_samples=train_valid_test_num_samples,
max_seq_length=args.max_seq_len,
masked_lm_prob=args.masked_lm_prob,
short_seq_prob=args.short_seq_prob,
seed=args.seed,
skip_warmup=True,
binary_head=True,
max_seq_length_dec=None,
dataset_type='ernie')
def _collate_data(data, stack_fn=Stack()):
num_fields = len(data[0])
out = [None] * num_fields
# 0. input_ids,
# 1. segment_ids,
# 2. input_mask,
# 3. masked_lm_positions,
# 4. masked_lm_labels,
# 5. next_sentence_labels
for i in (0, 1, 2, 5):
out[i] = stack_fn([x[i] for x in data])
out[5] = out[5].reshape([-1, 1])
batch_size, seq_length = out[0].shape
size = num_mask = sum(len(x[3]) for x in data)
# masked_lm_positions
# Organize as a 1D tensor for gather or use gather_nd
if size % 8 != 0:
size += 8 - (size % 8)
out[3] = np.full(size, 0, dtype=np.int32)
# masked_lm_labels
out[4] = np.full([size, 1], -1, dtype=np.int64)
mask_token_num = 0
for i, x in enumerate(data):
for j, pos in enumerate(x[3]):
out[3][mask_token_num] = i * seq_length + pos
out[4][mask_token_num] = x[4][j]
mask_token_num += 1
return out
def loader(dataset, consumed_samples=0):
batch_sampler = DistributedBatchSampler(
dataset,
batch_size=args.micro_batch_size,
num_replicas=data_world_size,
rank=data_world_rank,
shuffle=False,
drop_last=True,
consumed_samples=consumed_samples)
data_loader = paddle.io.DataLoader(
dataset=dataset,
batch_sampler=batch_sampler,
num_workers=args.num_workers,
worker_init_fn=None,
collate_fn=_collate_data,
return_list=False)
return data_loader
train_dl = loader(train_ds, args.global_batch_size * current_step)
valid_dl = loader(valid_ds, args.micro_batch_size * (
(current_step + 1) // args.eval_freq) * args.eval_iters *
data_world_size)
test_dl = loader(test_ds, 0)
return train_dl, valid_dl, test_dl
def get_train_data_file(args):
files = [
os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir)
if (os.path.isfile(os.path.join(args.input_dir, f)) and "_idx.npz" in
str(f))
]
files = [x.replace("_idx.npz", "") for x in files]
return files
@paddle.no_grad()
def run_evaluate(data_loader,
model,
criterion,
iter_steps,
log_writer,
global_step,
args,
task_name="valid"):
model.eval()
all_loss, all_lm_loss, all_sop_loss = [], [], []
local_time = time.time()
for eval_step, batch in enumerate(data_loader):
input_ids, segment_ids, input_mask, masked_lm_positions, \
masked_lm_labels, next_sentence_labels = batch
# Create the model for the gpt pretrain
prediction_scores, seq_relationship_score = model(
input_ids=input_ids,
token_type_ids=segment_ids,
position_ids=None,
attention_mask=input_mask,
masked_positions=masked_lm_positions)
lm_loss, sop_loss = criterion(prediction_scores, seq_relationship_score,
masked_lm_labels, next_sentence_labels)
loss = lm_loss + sop_loss
all_loss.append(float(loss.item()))
all_lm_loss.append(float(lm_loss.item()))
all_sop_loss.append(float(sop_loss.item()))
if eval_step >= iter_steps - 1:
average_loss = sum(all_loss) / len(all_loss)
average_lm_loss = sum(all_lm_loss) / len(all_lm_loss)
average_sop_loss = sum(all_sop_loss) / len(all_sop_loss)
logger.info(
"%s step %d, batch: %d, loss: %f, lm_loss: %.6f, sop_loss: %.6f, speed: %.0f tokens/s"
% (task_name, global_step, eval_step, average_loss,
average_lm_loss, average_sop_loss,
iter_steps * args.micro_batch_size * args.max_seq_len /
(time.time() - local_time)))
log_writer.add_scalar(task_name + "_loss", average_loss,
global_step)
log_writer.add_scalar(task_name + "_lm_loss", average_lm_loss,
global_step)
log_writer.add_scalar(task_name + "_sop_loss", average_sop_loss,
global_step)
break
model.train()
def set_seed(args):
if args.device == "cpu":
idx = 0
else:
idx = paddle.distributed.get_rank()
random.seed(args.seed + idx)
np.random.seed(args.seed + idx)
paddle.seed(args.seed + idx)
def args_post_process(args, worker_num):
default_global_batch_size = worker_num * args.micro_batch_size
if args.global_batch_size is None:
args.global_batch_size = default_global_batch_size
bsz_per_dp = args.global_batch_size // worker_num
micro_batch_size = args.micro_batch_size
assert args.global_batch_size % micro_batch_size == 0, \
"cannot do gradient accumulate, global_batch_size: {} micro_batch_size: {}".format(
args.global_batch_size, micro_batch_size)
accumulate_steps = bsz_per_dp // micro_batch_size
args.eval_iters *= accumulate_steps
args.test_iters *= accumulate_steps
args.accumulate_steps = accumulate_steps
def do_train(args):
paddle.set_device(args.device)
worker_index = paddle.distributed.get_rank()
worker_num = paddle.distributed.get_world_size()
local_rank = int(os.getenv("PADDLE_RANK_IN_NODE", 0))
if worker_num > 1:
paddle.distributed.init_parallel_env()
if args.dp_degree * args.sharding_degree == 1:
args.dp_degree = worker_num
args.sharding_degree = 1
args_post_process(args, worker_num)
logger.info('{:20}:{}'.format("paddle commit id", paddle.version.commit))
for arg in vars(args):
logger.info('{:20}:{}'.format(arg, getattr(args, arg)))
strategy = fleet.DistributedStrategy()
strategy.hybrid_configs = {
"dp_degree": args.dp_degree,
"mp_degree": 1,
"pp_degree": 1,
"sharding_degree": 1
}
fleet.init(is_collective=True, strategy=strategy)
hcg = fleet.get_hybrid_communicate_group()
worker_index = paddle.distributed.get_rank()
worker_num = paddle.distributed.get_world_size()
local_rank = int(os.getenv("PADDLE_RANK_IN_NODE", 0))
# Create the random seed for the worker
set_seed(args)
assert args.dp_degree * args.sharding_degree == worker_num, \
"The product of degree num should be equal to worker_num."
# Create log write,
log_writer_path = os.path.join(
args.output_dir, "train_log",
"{}_globalbsz_{}_amp_{}_recompute_{}_card_{}".format(
args.model_name_or_path, args.global_batch_size, args.use_amp,
args.use_recompute, worker_index).lower())
log_writer = LogWriter(log_writer_path)
# Define the input data in the static mode
base_class, model_class, criterion_class, tokenizer_class = MODEL_CLASSES[
args.model_type]
pretrained_models_list = list(
model_class.pretrained_init_configuration.keys())
# load config in checkpoint
global_step = 0
consumed_samples = 0
checkpoint_dir = os.path.join(args.output_dir, "model_last")
if os.path.exists(checkpoint_dir):
if os.path.isfile(os.path.join(checkpoint_dir, "./config.yml")):
with open(os.path.join(checkpoint_dir, "./config.yml"), "r") as f:
step_config = yaml.load(f, Loader=yaml.FullLoader)
assert step_config[
"global_batch_size"] == args.global_batch_size, "Please ensure checkpoint global batch size is the same. Folder: {}".format(
checkpoint_dir)
consumed_samples = step_config["consumed_samples"]
global_step = step_config["global_step"]
if args.model_name_or_path in pretrained_models_list:
model_config = model_class.pretrained_init_configuration[
args.model_name_or_path]
model_config["hidden_dropout_prob"] = args.hidden_dropout_prob
model_config[
"attention_probs_dropout_prob"] = args.attention_probs_dropout_prob
model = model_class(base_class(**model_config))
else:
model = model_class.from_pretrained(
args.model_name_or_path,
hidden_dropout_prob=args.hidden_dropout_prob,
attention_probs_dropout_prob=args.attention_probs_dropout_prob)
criterion = criterion_class()
# Create the learning_rate sheduler and optimizer
if args.decay_steps is None:
args.decay_steps = args.max_steps
lr_scheduler = LinearDecayWithWarmup(
args.max_lr, args.max_steps, args.warmup_rate, last_epoch=global_step)
clip = None
if args.grad_clip > 0:
clip = paddle.fluid.clip.GradientClipByGlobalNorm(
clip_norm=args.grad_clip)
decay_param = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
logger.info("Using paddle.optimizer.AdamW.")
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler if lr_scheduler is not None else args.max_lr,
beta1=args.adam_beta1,
beta2=args.adam_beta2,
epsilon=args.adam_epsilon,
parameters=model.parameters(),
weight_decay=args.weight_decay,
grad_clip=clip,
apply_decay_param_fun=lambda x: x in decay_param,
multi_precision=args.use_amp)
if args.use_amp:
scaler = paddle.amp.GradScaler(init_loss_scaling=args.scale_loss)
scaler = fleet.distributed_scaler(scaler)
model = paddle.amp.decorate(
models=model, level='O2', save_dtype='float32')
if paddle.distributed.get_world_size() > 1:
model = fleet.distributed_model(model)
optimizer = fleet.distributed_optimizer(optimizer)
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
data_file = get_train_data_file(args)
train_data_loader, valid_data_loader, test_data_loader = create_pretrained_dataset(
args,
data_file,
tokenizer,
data_world_size=worker_num,
data_world_rank=worker_index,
max_seq_len=args.max_seq_len,
current_step=global_step)
# load checkpoint vars
if os.path.exists(checkpoint_dir):
if os.path.isfile(os.path.join(checkpoint_dir, "./config.yml")):
logger.info("Try to load checkpoint from %s " % checkpoint_dir)
opt_path = os.path.join(checkpoint_dir, "model_state.pdopt")
params_path = os.path.join(checkpoint_dir, "model_state.pdparams")
if os.path.exists(opt_path):
opt_dict = paddle.load(opt_path)
optimizer.set_state_dict(opt_dict)
model_dict = paddle.load(params_path)
model.set_state_dict(model_dict)
else:
logger.warning("No optimizer checkpoint file found in %s." %
opt_path)
logger.info("Checkpoint loaded from global step: {}".format(
global_step))
tic_train = time.time()
while True:
# If not call valid_data_loader, the enumerate will call valid_data_loader
# many times. and start a new random dataloader.
valid_data_loader = valid_data_loader()
test_data_loader = test_data_loader()
# time count
train_reader_cost = 0.0
train_run_cost = 0.0
reader_start = time.time()
for step, batch in enumerate(train_data_loader()):
train_reader_cost += time.time() - reader_start
train_start = time.time()
# 0. input_ids,
# 1. segment_ids,
# 2. input_mask,
# 3. masked_lm_positions,
# 4. masked_lm_labels,
# 5. next_sentence_labels
input_ids, segment_ids, input_mask, masked_lm_positions, \
masked_lm_labels, next_sentence_labels = batch
with paddle.amp.auto_cast(
args.use_amp,
custom_black_list=[
"reduce_sum", "c_softmax_with_cross_entropy",
"elementwise_div"
],
level='O2'):
# Create the model for the ernie pretrain
prediction_scores, seq_relationship_score = model(
input_ids=input_ids,
token_type_ids=segment_ids,
position_ids=None,
attention_mask=input_mask,
masked_positions=masked_lm_positions)
lm_loss, sop_loss = criterion(
prediction_scores, seq_relationship_score, masked_lm_labels,
next_sentence_labels)
loss = lm_loss + sop_loss
if args.use_amp:
scaler.scale(loss).backward()
scaler.minimize(optimizer, loss)
else:
loss.backward()
optimizer.step()
optimizer.clear_grad()
train_run_cost += time.time() - train_start
# Skip for accumulate_steps in global step
if (step + 1) % args.accumulate_steps != 0:
continue
global_step += 1
if global_step % args.logging_freq == 0:
speed = args.logging_freq / (time.time() - tic_train)
common_loginfo = "global step %d, loss: %.9f, lm_loss: %.6f, sop_loss: %.6f, speed: %.2f steps/s, ips: %.2f seqs/s, learning rate: %.5e" % (
global_step, loss.item(), lm_loss.item(), sop_loss.item(),
speed, speed * args.global_batch_size,
lr_scheduler.get_lr())
addition_info = ""
if args.use_amp:
addition_info = " loss_scaling: %.1f, incr_count: %d, decr_count: %d" % (
scaler._scale.numpy(), scaler._incr_count,
scaler._decr_count)
logger.info(common_loginfo + addition_info)
log_writer.add_scalar("loss", loss.item(), global_step)
log_writer.add_scalar("lm_loss", lm_loss.item(), global_step)
log_writer.add_scalar("sop_loss", sop_loss.item(), global_step)
tic_train = time.time()
if lr_scheduler is not None:
lr_scheduler.step()
if global_step % args.eval_freq == 0:
# TODO, check the input data of validation
run_evaluate(
valid_data_loader,
model,
criterion,
args.eval_iters,
log_writer,
global_step,
args,
task_name="valid")
tic_train = time.time()
def save_ckpt(output_dir, model, tokenizer, args, global_step):
step_config = {
"model_name": args.model_name_or_path,
"global_step": global_step,
"global_batch_size": args.global_batch_size,
"consumed_samples": global_step * args.global_batch_size,
}
logger.debug("saving models to {}".format(output_dir))
model_to_save = model._layers if isinstance(
model, paddle.DataParallel) else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
paddle.save(optimizer.state_dict(),
os.path.join(output_dir, "model_state.pdopt"))
with open(os.path.join(output_dir, "config.yml"), "w") as f:
yaml.dump(
step_config, f, encoding='utf-8', allow_unicode=True)
if global_step % args.save_steps == 0 or global_step >= args.max_steps:
output_dir = os.path.join(args.output_dir,
"model_%d" % global_step)
if worker_index == 0:
save_ckpt(output_dir, model, tokenizer, args, global_step)
if worker_num > 1:
paddle.distributed.barrier()
tic_train = time.time()
if global_step % args.checkpoint_steps == 0:
output_dir = os.path.join(args.output_dir, "model_last")
if worker_index == 0:
if not os.path.exists(output_dir):
os.mkdir(output_dir)
output_dir_bak = os.path.join(args.output_dir,
"model_last_bak")
if os.path.exists(output_dir):
if os.path.exists(output_dir_bak):
shutil.rmtree(output_dir_bak)
shutil.move(output_dir, output_dir_bak)
os.mkdir(output_dir)
save_ckpt(output_dir, model, tokenizer, args, global_step)
if worker_num > 1:
paddle.distributed.barrier()
if global_step >= args.max_steps:
run_evaluate(
test_data_loader,
model,
criterion,
args.test_iters,
log_writer,
global_step,
args,
task_name="test")
del train_data_loader
return
if __name__ == "__main__":
config = parse_args(MODEL_CLASSES)
do_train(config)
|
|
import argparse
import logging
import os
import pickle
import sys
import time
import numpy as np
import tensorflow as tf
from sklearn.svm import SVC
from tensorflow.python.platform import gfile
from input_loader import (filter_dataset, split_dataset, get_dataset,
get_image_paths_and_labels)
logger = logging.getLogger(__name__)
def main(input_dir, model_path, output_path, batch_size, num_threads,
num_epochs, min_images_per_class, split_ratio):
"""
Loads images from :param input_dir, creates embeddings using a model
defined at :param model_path, and trains a classifier outputted to
:param output_path, then tests the model.
:param input_dir: Path to directory containing pre-processed images
:param model_path: Path to protobuf graph file for facenet model
:param output_path: Path to write output pickled classifier
:param batch_size: Batch size to create embeddings
:param num_threads: Number of threads to utilize for queuing
:param num_epochs: Number of epochs for each image
:param min_images_per_class: Minimum number of images per class
:param split_ratio: Ratio to split train/test dataset
"""
start_time = time.time()
train_set, test_set = _get_test_and_train_set(
input_dir,
min_images_per_class=min_images_per_class,
split_ratio=split_ratio
)
logger.info('Creating embeddings for training set.')
# Run the images through the pretrained model to generate embeddings.
emb_array, label_array, class_names = \
run_model(train_set, model_path, batch_size, num_threads,
num_epochs, augment=True)
trained_model = _train_and_save_classifier(emb_array, label_array,
class_names, output_path)
logger.info(
'Training completed in {} seconds'.format(time.time() - start_time)
)
# Do the evaluation with the trained model.
start_time = time.time()
logger.info('Creating embeddings for test set.')
emb_array, label_array, class_names = \
run_model(test_set, model_path, batch_size, num_threads,
num_epochs=1, augment=False)
_evaluate_classifier(emb_array, label_array, trained_model, class_names)
logger.info(
'Testing completed in {} seconds'.format(time.time() - start_time)
)
def run_model(data, model_path, batch_size, num_threads, num_epochs, augment):
"""
Load and run the specified pretrained model to generate embeddings for the
given data.
"""
with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
dataset, class_names = _load_images_and_labels(data,
batch_size=batch_size,
num_threads=num_threads,
num_epochs=num_epochs,
augment=augment)
_load_model(model_filepath=model_path)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
images_placeholder = \
tf.get_default_graph().get_tensor_by_name("input:0")
embedding_layer = \
tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = \
tf.get_default_graph().get_tensor_by_name("phase_train:0")
emb_array, label_array = _create_embeddings(
embedding_layer, dataset, images_placeholder,
phase_train_placeholder, sess
)
logger.info('Created {} embeddings'.format(len(emb_array)))
return emb_array, label_array, class_names
def _get_test_and_train_set(input_dir, min_images_per_class, split_ratio=0.7):
"""
Load train and test dataset. Classes with < :param min_num_images_per_label
will be filtered out.
:param input_dir:
:param min_num_images_per_label:
:param split_ratio:
"""
dataset = get_dataset(input_dir)
dataset = filter_dataset(dataset,
min_images_per_class=min_images_per_class)
train_set, test_set = split_dataset(dataset, split_ratio=split_ratio)
return train_set, test_set
def _load_images_and_labels(dataset, batch_size, num_threads, num_epochs,
augment=False):
"""
Create the input pipeline for the dataset to be used in generating the
embeddings. If :param augment is True, then small image augmentations will
be performed on all the samples in the dataset.
"""
class_names = [cls.name for cls in dataset]
image_paths, labels = get_image_paths_and_labels(dataset)
data = tf.data.Dataset.from_tensor_slices((image_paths, labels)) \
.shuffle(len(image_paths)) \
.repeat(num_epochs) \
.map(_preprocess_function, num_parallel_calls=num_threads)
if augment:
data = data.map(_augment_function, num_parallel_calls=num_threads)
data = data.batch(batch_size).prefetch(1)
return data, class_names
def _preprocess_function(image_path, label):
"""
Parse, resize, and standardize the given image.
"""
image_size = 160
file_contents = tf.read_file(image_path)
image = tf.image.decode_jpeg(file_contents, channels=3)
image = tf.random_crop(image, size=[image_size, image_size, 3])
image.set_shape((image_size, image_size, 3))
image = tf.image.per_image_standardization(image)
return image, label
def _augment_function(image, label):
"""
Perform random augmentations on the given image to boost the dataset.
"""
image = tf.image.random_flip_left_right(image)
image = tf.image.random_brightness(image, max_delta=0.3)
image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
return image, label
def _load_model(model_filepath):
"""
Load frozen protobuf graph
:param model_filepath: Path to protobuf graph
:type model_filepath: str
"""
model_exp = os.path.expanduser(model_filepath)
if os.path.isfile(model_exp):
logging.info('Model filename: %s' % model_exp)
with gfile.FastGFile(model_exp, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
else:
logger.error('Missing model file. Exiting')
sys.exit(-1)
def _create_embeddings(embedding_layer, dataset, images_placeholder,
phase_train_placeholder, sess):
"""
Uses model to generate embeddings from :param images.
:param embedding_layer:
:param dataset:
:param images_placeholder:
:param phase_train_placeholder:
:param sess:
:return: (tuple): image embeddings and labels
"""
emb_array = None
label_array = None
try:
i = 0
iterator = dataset.make_one_shot_iterator()
batch = iterator.get_next()
while True:
batch_images, batch_labels = sess.run(batch)
logger.info('Processing iteration {} batch of size: {}'
.format(i, len(batch_labels)))
print(batch_images)
emb = sess.run(
embedding_layer,
feed_dict={images_placeholder: batch_images,
phase_train_placeholder: False}
)
emb_array = np.concatenate([emb_array, emb]) \
if emb_array is not None else emb
label_array = np.concatenate([label_array, batch_labels]) \
if label_array is not None else batch_labels
i += 1
except tf.errors.OutOfRangeError:
pass
return emb_array, label_array
def _train_and_save_classifier(emb_array, label_array, class_names,
output_path):
"""
Train the classifier using support vector classification and save
the output to a pickle file.
"""
logger.info('Training Classifier')
model = SVC(kernel='linear', probability=True, verbose=False)
# Fit the model according to the given training data.
model.fit(emb_array, label_array)
with open(output_path, 'wb') as outfile:
pickle.dump((model, class_names), outfile)
logging.info('Saved classifier model to file "%s"' % output_path)
return model
def _evaluate_classifier(emb_array, label_array, model, class_names):
"""
Evaluate how the trained model performed with the given embeddings and
labels.
"""
logger.info('Evaluating classifier on {} images'.format(len(emb_array)))
predictions = model.predict_proba(emb_array, )
best_class_indices = np.argmax(predictions, axis=1)
best_class_probabilities = predictions[
np.arange(len(best_class_indices)), best_class_indices
]
for i in range(len(best_class_indices)):
print('%4d Prediction: %s, Confidence: %.3f, Actual: %s' % (
i, class_names[best_class_indices[i]],
best_class_probabilities[i], class_names[label_array[i]])
)
accuracy = np.mean(np.equal(best_class_indices, label_array))
print('Accuracy: %.3f' % accuracy)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('--model-path', type=str, action='store',
dest='model_path',
help='Path to model protobuf graph')
parser.add_argument('--input-dir', type=str, action='store',
dest='input_dir',
help='Input path of data to train on')
parser.add_argument('--output-path', type=str, action='store',
dest='output_path',
help='Path to output trained classifier model',
default='./output-classifier.pkl')
parser.add_argument('--batch-size', type=int, action='store',
dest='batch_size', default=128,
help='Batch size to create embeddings')
parser.add_argument('--num-threads', type=int, action='store',
dest='num_threads', default=16,
help='Number of threads to utilize for preprocessing.')
parser.add_argument('--num-epochs', type=int, action='store',
dest='num_epochs', default=3,
help='Number of epochs for each image.')
parser.add_argument('--split-ratio', type=float, action='store',
dest='split_ratio', default=0.7,
help='Ratio to split train/test dataset')
parser.add_argument('--min-num-images-per-class', type=int, action='store',
default=10, dest='min_images_per_class',
help='Minimum number of images per class')
args = parser.parse_args()
main(input_dir=args.input_dir, model_path=args.model_path,
output_path=args.output_path, batch_size=args.batch_size,
num_threads=args.num_threads, num_epochs=args.num_epochs,
min_images_per_class=args.min_images_per_class,
split_ratio=args.split_ratio)
|
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 11 13:08:41 2016
@author: m.reuss
"""
import numpy as np
import CoolProp.CoolProp as CP
import pandas as pd
CP.set_config_string(
CP.ALTERNATIVE_REFPROP_PATH,
'C:\\Program Files (x86)\\REFPROP\\')
np.seterr(divide='ignore', invalid='ignore')
#%%H2 Constant Values at Normal Conditions
class H2Values (object):
def __init__(self):
self.M = 2.01588 # [kg/kmol], molare Masse von Wasserstoff
# [J/kg K], spezifische Gaskonstante von Wasserstoff
self.R_i = 4124.48269490247
# [kg/m^3], Dichte von Wasserstoff im Normzustand
self.roh_n = 0.089882
# [-] Realgasfaktor von Wasserstoff im Normzustand
self.Z_n = 1.00062387922965
self.LHV_n = 119.833493175241 # [MJ/kg]
self.g = 9.81 # [m/s^2], Erdbeschleunigung
self.T_n = 273.15 # [K]
self.p_n = 1.01325e5 # [Pa]
#%% Supporting Functions
def parabel(para, p):
return (para[0] / 1e6) * (p / 1e5)**para[1] + \
8.79676122460001e-06 # Parabelgleichung
def square(para, p):
return para[0] * p**para[1] + para[2] # squaregleichung
def getDiaPress(demArr, distArr, p_1, p_min):
'''
Calculation of Pipeline diameter and end pressure:
Input Parameter:
demArr=demand Array in kg/day
distArr= distance Array in km
p_1=Input Pressure at start of pipeline in bar
p_min=minimal output pressure in bar
'''
# Initialization #
V_para_parabel_20 = np.array([0.000125571318762396, 1.50162559878953])
D_para_square_20 = np.array(
[3.24859458677547e-06, 0.912591206027628, -0.166716162511868])
Z_para_square_20 = np.array(
[3.23101813258933e-09, 1.03880932425032, 1.00048097412768])
T_m = np.array(20 + 273.15) # K
k = 0.02 # mm
# Less diameter variances
DK = np.linspace(0.1, 1.0, 901) # Average class of diameter
propH2 = H2Values()
demHourly = demArr / 24 / 3600 # kg/day to kg/s
distMeter = distArr * 1000 # km to m
p_1 = p_1 * 1e5 # bar to Pa
### Calculation ###
res1 = len(distArr)
res2 = demArr.shape[1]
p_2 = np.zeros((res1, res2))
w_1 = np.zeros((res1, res2))
Re_1 = np.zeros((res1, res2))
diameter = np.ones((res1, res2)) / 1000
x = np.zeros((res1, res2))
for i1 in range(demArr.shape[1]):
for i2 in range(len(distArr)):
while p_2[i2, i1] <= p_min * 1e5 or np.isnan(p_2[i2, i1]):
# Calculation of Norm Volume Flow
V_n = demHourly[0, i1] / propH2.roh_n # m^3/s (i.N.)
# Startwerte
# Calculation of input density
roh_1 = square(D_para_square_20, p_1[i2, i1]) # kg/m3
# Volume flow at entrance
V_1 = demHourly[0, i1] / roh_1 # m^3/s
# inner diameter of the Pipeline
diameter[i2, i1] = DK[x[i2, i1]] # m
# Velocity Entrance
w_1[i2, i1] = V_1 / (np.pi * diameter[i2, i1]**2 / 4)
# Berechnung der dynamischen Viskosität am Eintritt
eta_1 = parabel(V_para_parabel_20, p_1[i2, i1]) # Pa*s
# Berechnung der kinematischen Viskosität
nu_1 = eta_1 / roh_1 # m^2/s
# Berechnung der Reynoldszahl am Eintritt
Re_1[i2, i1] = w_1[i2, i1] * diameter[i2, i1] / nu_1 # -
# Berechnung der Rohrreibungszahl nach Zanke bei Re_1 für
# Startwert
alpha = np.e**(-1 * np.e**(6.75 - 0.0025 * Re_1[i2, i1]))
lambda_1 = (64 / Re_1[i2, i1]) * (1 - alpha) + alpha * (-2 * np.log10((2.7 * (np.log10(
Re_1[i2, i1]))**1.2 / Re_1[i2, i1]) + (k / (3.71 * 1000 * diameter[i2, i1]))))**(-2) # -
# Simplification: Re_1 = Re_m --> lambda_m = lambda_1
lambda_m = lambda_1
# Berechnung der Leitungscharakteristik C_1
# kg/(m s^2)=Pa
C_1 = (lambda_1 * distMeter[i2] * roh_1 *
w_1[i2, i1]**2) / (diameter[i2, i1] * 2)
# Ausgangsdruck bei raumbeständiger Fortleitung
p_20 = p_1[i2, i1] - C_1 # Pa
# Annahme: mittlere Druckes entspricht Ausgangsdruck bei
# raumbeständiger Fortleitung
p_m0 = p_20 # [Pa)
# Annahme: mittlerer Realgasfaktor wird immer bei p_m0 bestimmt
Z_m = square(Z_para_square_20, p_m0)
# Berechnung der mittleren Kompressibilitätszahl
K_m = Z_m / propH2.Z_n
# Berechnung der Leitungscharakteristik C
C = (lambda_m * 16 * propH2.roh_n * T_m * propH2.p_n *
K_m) / (np.pi**2 * propH2.T_n) # kg Pa/m^3
# Berechnung des Ausgangsdruckes
p_2[i2, i1] = (p_1[i2, i1]**2 - (C * distMeter[i2]
* V_n**2) / diameter[i2, i1]**5)**0.5 # Pa
if x[i2, i1] == len(DK):
break
if p_2[i2, i1] <= p_min * 1e5 or np.isnan(p_2[i2, i1]):
x[i2, i1] += 1
x[i2:, i1:] = x[i2, i1]
p_2 = p_2 * 1e-5
diameter = diameter * 1000
return diameter, p_2, w_1 # Diameter in mm and outlet pressure in bar
# %% Compressor Energy Demand per Stage (with isentropic coefficient)
# direct Method from Tietze
def getCompressionEnergyStage(p_1, p_2, T_1, eta_is_S):
'''
calculation of specific hydrogen compression energy in every compression stage
Input:
p_1=Inlet Pressure
p_2=outlet Pressure
T_1 = Inlet Temperature
eta_is_S = isentropic efficiency
'''
fluid = 'HYDROGEN'
# fluid='REFPROP::HYDROGEN'
# Entropy
s = CP.PropsSI('S', 'T', T_1, 'P', p_1 *
100000, fluid) # [T]=K, [P]=kPa, [h]=J/kg
# Enthalpy before
h_1 = CP.PropsSI('H', 'P', p_1 * 100000, 'S', s, fluid)
# isentrope Enthalpie nach Verdichtung
h_2_is = CP.PropsSI('H', 'P', p_2 * 100000, 'S', s,
fluid) # [T]=K, [P]=kPa, [h]=J/kg
# isentrope Endtemperatur
# T_2_is = CP.PropsSI('T','P',p_2*100,'S',s,fluid); # [T]=K, [P]=kPa, [h]=J/kg
# spez. isentrope Verdichterarbeit für reales Gas
w_is = (h_2_is - h_1) / 1000 # [kJ/kg], massenspez. Verdichterarbeit
# spezifische Verdichterarbeit für reales Gas
w = w_is / eta_is_S # [kJ/kg], massenspez. Verdichterarbeit
w_spec = w / 3600
# tatsächliche Enthalpie nach Verdichtung
h_2 = w * 1000 + h_1 # [h]=J/kg
# tatsächliche Temperatur nach Verdichtung
T_2 = CP.PropsSI('T', 'P', p_2 * 100000, 'H', h_2,
fluid) # [T]=K, [P]=kPa, [h]=J/kg
return [w_spec, T_2]
# %% CompressionDemand
def getCompressionEnergy(
p_1,
p_2,
demand,
T_1=20,
eta_isen=0.88,
eta_mech=0.95,
p_highlow_max=2.1,
max_stages=2):
'''
calculation of specific hydrogen compression energy
Input:
p_1=Inlet Pressure in bar
p_2=outlet Pressure in bar
demand = hydrogen demand in kg/day
T_1 = Inlet Temperature
eta_is_S = isentropic efficiency
'''
# eta_isen=0.92-p_2/880*(0.24)
if p_2 > p_1:
compressorStages = np.log(p_2 / p_1) / np.log(p_highlow_max)
compressorStages = np.ceil(compressorStages).astype(int)
if compressorStages > max_stages:
compressorStages = max_stages
p_highlow = (p_2 / p_1)**(1 / compressorStages)
# Initialize
p_in = np.zeros(compressorStages)
p_out = np.zeros(compressorStages)
T_in = np.zeros(compressorStages)
T_out = np.zeros(compressorStages)
w_stage = np.zeros(compressorStages)
# Stagedependent Calculation
for i in range(compressorStages):
if i == 0:
p_in[i] = p_1
T_in[i] = 273.15 + T_1
else:
p_in[i] = p_out[i - 1]
T_in[i] = 273.15 + 40.
p_out[i] = p_in[i] * p_highlow
w_stage[i], T_out[i] = getCompressionEnergyStage(p_in[i],
p_out[i],
T_in[i],
eta_isen)
T_out = T_out - 273.15
w_mech = np.sum(w_stage) / eta_mech
P_shaft = demand * w_mech / 24
eta_motor = 8e-5 * np.log(P_shaft)**4 - 0.0015 * np.log(P_shaft)**3 + \
0.0061 * np.log(P_shaft)**2 + 0.0311 * np.log(P_shaft) + 0.7617
P_el = P_shaft / eta_motor
w_total = w_mech / eta_motor
else:
w_total = 0
P_el = 0
return w_total, P_el
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import lib.maths_util as mathlib
from lib.colors import ColorsBook as color
import time
class NeuralNetwork():
def __init__(self, layers, batch_size, epochs, learning_rate):
self.layers = layers
self.batch_size = batch_size
self.epochs = epochs
self.learning_rate = learning_rate
self.weights = []
self.biases = []
self.loss = []
for i in range(len(layers) - 1):
self.weights.append(np.random.normal(
0, 1, [self.layers[i], self.layers[i+1]]))
self.biases.append(np.zeros((1, self.layers[i+1])))
self.iteration_time = []
self.epochs_time = []
def feed_forward(self, inputs):
layer0 = inputs
layer1 = mathlib.relu(np.dot(layer0, self.weights[0]) + self.biases[0])
layer2 = mathlib.softmax(
np.dot(layer1, self.weights[1]) + self.biases[1])
return layer1, layer2
def loss_function(self, predicted_outputs, outputs):
loss = mathlib.cross_entropy(predicted_outputs, outputs)
loss += mathlib.regularization(0.01, self.weights[0], self.weights[1])
return loss
def accuracy_function(self, predicted_outputs, outputs):
acc = float(np.sum(np.argmax(predicted_outputs, 1) == outputs)) / \
float(len(outputs))
return acc
def back_propagate(self, inputs, hidden_layer, predicted_outputs, outputs):
delta_y = (predicted_outputs - outputs) / predicted_outputs.shape[0]
delta_hidden_layer = np.dot(delta_y, self.weights[1].T)
delta_hidden_layer[hidden_layer <= 0] = 0
w2_grad = np.dot(hidden_layer.T, delta_y)
b2_grad = np.sum(delta_y, axis=0, keepdims=True)
w1_grad = np.dot(inputs.T, delta_hidden_layer)
b1_grad = np.sum(delta_hidden_layer, axis=0, keepdims=True)
w2_grad += 0.01 * self.weights[1]
w1_grad += 0.01 * self.weights[0]
self.weights[0] -= self.learning_rate * w1_grad
self.biases[0] -= self.learning_rate * b1_grad
self.weights[1] -= self.learning_rate * w2_grad
self.biases[1] -= self.learning_rate * b2_grad
def fit(self, inputs, outputs, timing=False, clear=False):
if timing:
stamp_total = time.clock()
for epoch in range(self.epochs):
if timing:
stamp_epoch = time.clock()
it = 0
while it < len(inputs):
if timing:
stamp_it = time.clock()
inputs_batch = inputs[it:it+self.batch_size]
outputs_batch = outputs[it:it+self.batch_size]
hidden_layer, output_layer = self.feed_forward(inputs_batch)
loss = self.loss_function(output_layer, outputs_batch)
self.loss.append(loss)
self.back_propagate(inputs_batch, hidden_layer,
output_layer, outputs_batch)
loss_str = ("- - - - " + color.BOLD + "Epoch: {:d}/{:d}\t" + color.OKBLUE + "Loss: {:.2f}\t").format(
epoch+1, self.epochs, loss) + color.ENDC
if loss > 70:
loss_str = ("- - - - " + color.BOLD + "Epoch: {:d}/{:d}\t" + color.FAIL + "Loss: {:.2f}\t").format(
epoch+1, self.epochs, loss) + color.ENDC
elif loss < 70 and loss > 10:
loss_str = ("- - - - " + color.BOLD + "Epoch: {:d}/{:d}\t" + color.WARNING + "Loss: {:.2f}\t").format(
epoch+1, self.epochs, loss) + color.ENDC
elif loss <= 10 and loss > 1:
loss_str = ("- - - - " + color.BOLD + "Epoch: {:d}/{:d}\t" + color.OKGREEN + "Loss: {:.2f}\t").format(
epoch+1, self.epochs, loss) + color.ENDC
epoch_prog, total_prog = self.get_progress(inputs, it)
progress_str = color.BOLD + 'Epoch Progress : |' + color.OKBLUE + epoch_prog + color.ENDC + '|\t' + \
color.BOLD + 'Total Progress : |' + color.OKBLUE + \
total_prog + color.ENDC + color.BOLD + '|' + color.ENDC
if clear:
time.sleep(0.001)
print(chr(27) + "[2J")
print(loss_str + progress_str)
it += self.batch_size
if timing:
self.iteration_time.append(
time.clock() - stamp_it)
if timing:
self.epochs_time.append(time.clock() - stamp_epoch)
if timing:
self.total_time = time.clock() - stamp_total
def get_progress(self, inputs, it):
epoch_bar = ''
total_bar = ''
total_it = len(inputs)/self.batch_size
epoch_bar_ticks = (it / (total_it)) * 10
total_bar_ticks = (it / (total_it * self.epochs)) * 3
while epoch_bar_ticks > 0:
epoch_bar += '█'
epoch_bar_ticks -= 1
while len(epoch_bar) < 10:
epoch_bar += ' '
while total_bar_ticks > 0:
total_bar += '█'
total_bar_ticks -= 1
while len(total_bar) < 3:
total_bar += ' '
return epoch_bar, total_bar
def predict(self, inputs):
hidden_layer, prediction = self.feed_forward(inputs)
return prediction
def accuracy_test(self, inputs, result):
prediction = self.predict(inputs)
acc = float(np.sum(np.argmax(prediction, 1) == result)) / \
float(len(result))
if (acc*100) > 85:
print(color.UNDERLINE + color.BOLD + '- - - - Test accuracy : ' +
color.OKGREEN + '{:.2f}% - - - -'.format(acc*100) + color.ENDC)
elif (acc*100) < 50:
print(color.UNDERLINE + color.BOLD + '- - - - Test accuracy : ' + color.FAIL +
'{:.2f}% - - - -'.format(acc*100) + color.ENDC)
else:
print(color.HEADER + color.BOLD + '- - - - Test accuracy : ' +
color.WARNING + '{:.2f}% - - - -'.format(acc*100) + color.ENDC)
return acc*100
def display_time(self):
average_it_stamp = sum(
self.iteration_time) / len(self.iteration_time)
average_epochs_stamp = sum(
self.epochs_time) / len(self.epochs_time)
print(color.BOLD + "\n= = = = Iteration Average Duration:" +
color.UNDERLINE + " {:.2f} secs.".format(average_it_stamp))
print(color.BOLD + "= = = = Epoch Average Duration:" +
color.UNDERLINE + " {:.2f} secs.".format(average_epochs_stamp))
print(color.BOLD + "= = = = Total Duration:" + color.UNDERLINE +
" {:.2f} secs.".format(self.total_time))
|
|
import h5py
import numpy as np
import sys
infname = sys.argv[1]
key = sys.argv[2] if sys.argv[2:] else "matrix"
prefix = "data" if not sys.argv[3:] else sys.argv[3]
f = h5py.File(infname, "r")
print(f.keys())
group = f[key]
for comp in ["shape", "indices", "indptr", "data"]:
with open(prefix + '.' + comp, "w") as f:
np.array(group[comp]).tofile(f)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib.pyplot as plt
def build_curve_points(descriptors):
'''
Draw the points given by the descriptors array in the
cartesian plane.
'''
N=len(descriptors) # size of the descriptors vector
for x in range(N):
plt.plot([descriptors[x].real], [descriptors[x].imag], 'ro-', label='python', color='blue')
ylabel = 'Imaginário'.decode('utf8')
plt.ylabel(ylabel)
plt.xlabel('Real')
# limiting the axes to the double of the axes extrema
boundary = np.amax(plt.axis())
plt.grid(True, which='both')
plt.axis((boundary + 1, -boundary - 1, boundary + 1, -boundary - 1))
plt.axhline(0, color='black')
plt.axvline(0, color='black')
plt.show()
descriptors = input("Enter the descriptors spaced by comma \ne.g. 1+1j, -1+1j, -1-1j, 1-1j \n")
result = np.fft.ifft([descriptors])
print('The inverse Fourier Transform of ', descriptors, ' is:')
print(result)
build_curve_points(descriptors)
|
|
from math import pi
from numpy import sin,cos
from openmdao.main.api import Component
from openmdao.main.datatypes.api import Float
class SpiralComponent(Component):
x = Float(iotype="in", low=0.75, high=5.*pi)
y = Float(iotype="in", low=0.75, high=5.*pi)
f1_xy = Float(0.,iotype="out")
f2_xy = Float(0.,iotype="out")
def execute(self):
self.f1_xy = cos(self.x)/self.x + sin(self.y)/self.y
self.f2_xy = sin(self.x)/self.x + cos(self.y)/self.y
|
|
#!/usr/bin/env python
from copy import copy
import matplotlib
import netCDF4
import numpy
matplotlib.use("Agg")
import matplotlib.animation as animation
import matplotlib.colors as colors
import matplotlib.pyplot as plt
FFMpegWriter = animation.writers['ffmpeg']
metadata = dict(title='Secondary Mean Age', artist='LUH2 v2h',
comment='LUH2 v2h (historical)')
palette = copy(plt.cm.viridis)
palette.set_over('r', 1.0)
palette.set_under('g', 1.0)
palette.set_bad('k', 1.0)
fname = '../../data/luh2_v2/historical/states.nc'
vname = 'secma'
nc_ds = netCDF4.Dataset(fname)
years = nc_ds.variables['time'][:]
if years[0] < 850:
years = [y + 850 for y in years]
#pdb.set_trace()
writer = FFMpegWriter(fps=10, metadata=metadata)
fig = plt.figure(figsize=(8, 4))
ax1 = plt.axes(frameon=False)
ax1.axes.get_yaxis().set_visible(False)
ax1.axes.get_xaxis().set_visible(False)
plt.tight_layout()
plt.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.0)
for spine in ax1.spines.itervalues():
spine.set_visible(False)
img = plt.imshow(nc_ds.variables[vname][0], cmap=palette,
norm=colors.Normalize(vmin=0.0, vmax=600))
text = plt.text(0.5, 0.1, '', ha = 'center', va = 'center',
color='y', fontsize=24, transform = ax1.transAxes)
with writer.saving(fig, "writer_test.mp4", 180):
for i in range(len(years)):
print(years[i]0
data = nc_ds.variables[vname][i]
img.set_array(data)
text.set_text(str(int(years[i])))
writer.grab_frame()
|
|
#!/usr/bin/python3
'''Copyright (c) 2018 Mozilla
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
# Train a LPCNet model (note not a Wavenet model)
import lpcnet
import sys
import numpy as np
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from ulaw import ulaw2lin, lin2ulaw
import keras.backend as K
import h5py
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
# use this option to reserve GPU memory, e.g. for running more than
# one thing at a time. Best to disable for GPUs with small memory
config.gpu_options.per_process_gpu_memory_fraction = 0.44
set_session(tf.Session(config=config))
nb_epochs = 120
# Try reducing batch_size if you run out of memory on your GPU
batch_size = 64
model, _, _ = lpcnet.new_lpcnet_model()
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
model.summary()
feature_file = sys.argv[1]
pcm_file = sys.argv[2] # 16 bit unsigned short PCM samples
frame_size = 160
nb_features = 55
nb_used_features = model.nb_used_features
feature_chunk_size = 15
pcm_chunk_size = frame_size*feature_chunk_size
# u for unquantised, load 16 bit PCM samples and convert to mu-law
udata = np.fromfile(pcm_file, dtype='int16')
data = lin2ulaw(udata)
nb_frames = len(data)//pcm_chunk_size
features = np.fromfile(feature_file, dtype='float32')
# limit to discrete number of frames
data = data[:nb_frames*pcm_chunk_size]
udata = udata[:nb_frames*pcm_chunk_size]
features = features[:nb_frames*feature_chunk_size*nb_features]
# Noise injection: the idea is that the real system is going to be
# predicting samples based on previously predicted samples rather than
# from the original. Since the previously predicted samples aren't
# expected to be so good, I add noise to the training data. Exactly
# how the noise is added makes a huge difference
in_data = np.concatenate([data[0:1], data[:-1]]);
noise = np.concatenate([np.zeros((len(data)*1//5)), np.random.randint(-3, 3, len(data)*1//5), np.random.randint(-2, 2, len(data)*1//5), np.random.randint(-1, 1, len(data)*2//5)])
#noise = np.round(np.concatenate([np.zeros((len(data)*1//5)), np.random.laplace(0, 1.2, len(data)*1//5), np.random.laplace(0, .77, len(data)*1//5), np.random.laplace(0, .33, len(data)*1//5), np.random.randint(-1, 1, len(data)*1//5)]))
in_data = in_data + noise
in_data = np.clip(in_data, 0, 255)
features = np.reshape(features, (nb_frames*feature_chunk_size, nb_features))
# Note: the LPC predictor output is now calculated by the loop below, this code was
# for an ealier version that implemented the prediction filter in C
upred = np.zeros((nb_frames*pcm_chunk_size,), dtype='int16')
# Use 16th order LPC to generate LPC prediction output upred[] and (in
# mu-law form) pred[]
pred_in = ulaw2lin(in_data)
for i in range(2, nb_frames*feature_chunk_size):
upred[i*frame_size:(i+1)*frame_size] = 0
for k in range(16):
upred[i*frame_size:(i+1)*frame_size] = upred[i*frame_size:(i+1)*frame_size] - \
pred_in[i*frame_size-k:(i+1)*frame_size-k]*features[i, nb_features-16+k]
pred = lin2ulaw(upred)
in_data = np.reshape(in_data, (nb_frames, pcm_chunk_size, 1))
in_data = in_data.astype('uint8')
# LPC residual, which is the difference between the input speech and
# the predictor output, with a slight time shift this is also the
# ideal excitation in_exc
out_data = lin2ulaw(udata-upred)
in_exc = np.concatenate([out_data[0:1], out_data[:-1]]);
out_data = np.reshape(out_data, (nb_frames, pcm_chunk_size, 1))
out_data = out_data.astype('uint8')
in_exc = np.reshape(in_exc, (nb_frames, pcm_chunk_size, 1))
in_exc = in_exc.astype('uint8')
features = np.reshape(features, (nb_frames, feature_chunk_size, nb_features))
features = features[:, :, :nb_used_features]
features[:,:,18:36] = 0
pred = np.reshape(pred, (nb_frames, pcm_chunk_size, 1))
pred = pred.astype('uint8')
periods = (50*features[:,:,36:37]+100).astype('int16')
in_data = np.concatenate([in_data, pred], axis=-1)
# dump models to disk as we go
checkpoint = ModelCheckpoint('lpcnet9b_384_10_G16_{epoch:02d}.h5')
#model.load_weights('wavenet4f2_30.h5')
model.compile(optimizer=Adam(0.001, amsgrad=True, decay=5e-5), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
model.fit([in_data, in_exc, features, periods], out_data, batch_size=batch_size, epochs=nb_epochs, validation_split=0.0, callbacks=[checkpoint, lpcnet.Sparsify(2000, 40000, 400, 0.1)])
|
|
import os
import glob
import progressbar
import numpy as np
from numpy import genfromtxt
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
kLatentValuesPath = './training_results/0000-00-00_00-00-00/latents'
if "__main__" == __name__:
file_path_list = glob.glob(os.path.join(kLatentValuesPath, '*.npy'))
file_path_list.sort()
print("Read latent vectors from %s" % kLatentValuesPath)
read_data = []
with progressbar.ProgressBar(max_value=len(file_path_list)) as bar:
for i, file_path in enumerate(file_path_list):
read_data.append(np.load(file_path))
bar.update(i)
assert(len(read_data) > 0)
latent_vectors = np.stack(read_data, axis=0)
print(latent_vectors.shape)
# do clustering
|
|
import numpy as np
import matplotlib.pyplot as plt
#%%
cases_data = np.genfromtxt("sources/cases_comparation.csv", delimiter = ',', skip_header = 1)[:, 1:] / 1000000
cases = ['GEMASOLAR', 'Base Case', 'Evaporative Cooling', 'Dry Cooling', 'Once Through Cooling',
'MED Cooling']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Agu', 'Sep', 'Oct', 'Nov', 'Dec']
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 2), # 2 points vertical offset
textcoords="offset points",
ha='center', va='bottom', fontsize= 16)
#%%
def autolabel_2(rects,bar_label):
for idx,rect in enumerate(rects):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 0.5*height,
bar_label[idx],
ha='center', va='bottom', rotation=0,fontsize=16)
#%%
width = 0.5
Gross = [127.18, 127.18, 120.79, 132.56, 103.81]
Net = [127.18 - 1.5, 127.18 - 5, 120.79 - 3.81, 132.56 - 4.3, 103.81 - 27.9]
Cap_Factor = [72.1, 70.1, 67.1, 73.6, 51.9]
indices = np.arange(len(cases[1:]))
fig, ax = plt.subplots(figsize=(12.5, 6))
twin=ax.twinx()
BAR_G=ax.bar(indices, Gross, width=width, color='tab:blue',
label='Gross Energy')
BAR_N=ax.bar([i+0.15*width for i in indices], Net, width=0.7*width,
color='tab:olive', hatch ='//', alpha=0.5, edgecolor='w',
label='Net Energy')
C_F=twin.plot(indices, Cap_Factor, '-', marker='o', ms= 10, color='tab:orange')
ax.set_ylabel('Energy [GWh]', fontsize= 16)
ax.set_xlabel('Cooling Scenarios', fontsize= 16)
ax.set_xticks(indices)
ax.set_yticks(np.arange(0, 160, 20))
ax.set_ylim(0, 160)
ax.set_xlim(-0.5, 4.5)
ax.set_xticklabels(cases[1:],fontsize= 14)
ax.tick_params(axis='y', which='major', labelsize=14)
ax.grid(axis= 'y')
ax.legend(loc=1, fontsize= '15')
twin.set_ylim(0, 100)
twin.set_ylabel('Capacity Factor [%]', fontsize=16, color='tab:orange')
autolabel(BAR_G)
autolabel_2(BAR_N,Net)
fig.tight_layout()
plt.savefig('graphs/annual_energy.png', format='png', bbox_inches='tight')
plt.show()
#%%
def autolabel_1(rects,bar_label):
for idx,rect in enumerate(rects):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.04*height,
bar_label[idx].astype(int),
ha='center', va='bottom', rotation=0,fontsize= 11)
#%%
def autolabel_3(rects,bar_label):
for idx,rect in enumerate(rects):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 0.5*height,
bar_label[idx].astype(int),
ha='center', va='bottom', rotation=0,fontsize= 11)
#%%
r_salt_water = np.round(np.array([0, 591300, 0, 7782518, 7064064])/1000, 0)
sea_water_r = np.round(np.array([0, 212115, 0 , 7782518, 4221474])/1000, 0)
consumed_water = np.round(np.array([379185, 379185, 14005, 14004, 13811])/1000, 0)
produced_water = np.round(np.array([0, 0, 0, 0, 2842590])/1000, 0)
x = np.arange(len(cases[1:])) # the label locations
width = 0.25 # the width of the bars
fig, ax = plt.subplots(figsize=(12, 6))
rects1 = ax.bar(x - width, r_salt_water, width, color='tab:blue',
label= 'Sea Water Requirement')
rects0 = ax.bar([i-0.85*width for i in x], sea_water_r, width=0.7*width,
color='tab:olive', hatch ='//', alpha=0.5, edgecolor='w',
label='Sea Water Returned')
rects2 = ax.bar(x, consumed_water, width, color='tab:orange',
label= 'Fresh Water Consumption')
rects3 = ax.bar(x + width, produced_water, width, color='tab:green',
label= 'Fresh Water Production')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Water Volume [k m$^3$]', fontsize= 16)
ax.set_xlabel('Cooling Scenarios', fontsize= 16)
ax.set_xticks(x)
ax.set_xticklabels(cases[1:], fontsize= 14)
ax.grid(axis= 'y')
ax.legend(loc= 2, fontsize= '15')
ax.set_ylim(0, 8700)
ax.tick_params(axis='y', which='major', labelsize=14)
autolabel_1(rects1,r_salt_water)
autolabel_3(rects0,sea_water_r)
autolabel_1(rects2,consumed_water)
autolabel_1(rects3,produced_water)
fig.tight_layout()
plt.savefig('graphs/annual_water_utilization.png', format='png', bbox_inches='tight')
plt.show()
#%%
npv = np.array([17.806, 22.441, 17.106, 27.656, 22.635])
lcoe_real = np.array([89.7, 86.0, 89.3, 83.3, 76.0])
IRR = np.array([8.8 , 9.2 , 8.7, 9.7, 9.1 ])
x = np.arange(len(cases[1:])) # the label locations
width = 0.3 # the width of the bars
fig, ax = plt.subplots(figsize=(14, 7))
fig.subplots_adjust(right=0.75)
twin1=ax.twinx()
twin2=ax.twinx()
twin2.spines['right'].set_position(("axes",1.1))
p1 = ax.bar(x , lcoe_real, width, color='tab:blue', label= 'LCOE')
p2, = twin1.plot(x, npv, ls='-',lw= 2, marker= 's', ms= 10,
color='tab:orange',label= 'NPV')
p3, = twin2.plot(x, IRR, ls='--',lw= 2, marker= '^', ms= 10,
color='tab:green',label= 'IRR')
ax.set_xlim(-0.5, 4.5)
twin1.set_ylim(0, 31.5)
twin2.set_ylim(0, 12)
ax.set_ylabel('LCOE [USD/MWh]', fontsize=16)
ax.set_xlabel('Cooling Scenarios', fontsize=18)
twin1.set_ylabel('NPV [M USD]', fontsize=16)
twin2.set_ylabel('IRR [%]', fontsize=16)
ax.yaxis.label.set_color('tab:blue')
twin1.yaxis.label.set_color(p2.get_color())
twin2.yaxis.label.set_color(p3.get_color())
ax.set_xticks(x)
ax.set_yticks(np.arange(0, 120, 10))
ax.set_xticklabels(cases[1:], fontsize= 14)
ax.grid(axis= 'y')
tkw=dict(size=4,width=1.5)
ax.tick_params(axis='y',colors='tab:blue',**tkw, labelsize=14)
twin1.tick_params(axis='y',colors=p2.get_color(),**tkw, labelsize=14)
twin2.tick_params(axis='y', colors=p3.get_color(),**tkw, labelsize=14)
ax.tick_params(axis='x',**tkw)
autolabel_2(p1,lcoe_real)
ax.legend(handles=[p1,p2,p3], fontsize=14)
fig.tight_layout()
plt.savefig('graphs/lcoe_cases.png', format='png', bbox_inches='tight')
plt.show()
#%%
csp_100 = np.round(np.array([22440615, 17106067, 27656414, 22635306])/1000000,1)
csp_75 = np.round(np.array([18112983, 12962619, 23113427, 19946564])/1000000,1)
csp_50 = np.round(np.array([13785351, 8819172, 18570441, 17257821])/1000000,1)
csp_25 = np.round(np.array([9457719, 4675724, 14027454, 14569079])/1000000,1)
csp_0 = np.round(np.array([5130087, 532277, 9484468, 11880336])/1000000,1)
indices= cases[1:][1:]
x = np.arange(len(indices)) # the label locations
width = .19*5 # the width of the bars
fig, ax = plt.subplots(figsize=(14, 7))
mine = ax.bar(x, csp_100, width,
label='Mine Benefit')
case_a = ax.bar(x - 2*width/5, csp_100, width/5, hatch ='//',
alpha=0.6, edgecolor='w',label=r'PPA = $\Delta_B$ + 89.7')
case_b = ax.bar(x - width/5, csp_75, width/5, hatch ='//',
alpha=0.6, edgecolor='w',label=r'PPA = $\Delta_B$ 75% + 89.7')
case_c = ax.bar(x, csp_50, width/5, hatch ='//',
alpha=0.6, edgecolor='w',label=r'PPA = $\Delta_B$ 50% + 89.7')
case_d = ax.bar(x + width/5, csp_25, width/5, hatch ='//',
alpha=0.6, edgecolor='w',label=r'PPA = $\Delta_B$ 25% + 89.7')
case_e = ax.bar(x + 2*width/5, csp_0, width/5, hatch ='//',
alpha=0.6, edgecolor='w',label=r'PPA = 89.7')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('NPV [M USD]', fontsize= 16)
ax.set_xlabel('Cooling Scenarios', fontsize= 18)
#ax.set_title('Synergy', fontsize= 28)
ax.set_xticks(x)
#ax.set_yticks(np.arange(0, 15, 1))
#ax.set_ylim(0, 14)
ax.set_xticklabels(indices, fontsize= 16)
ax.legend(fontsize= 15)
ax.grid(axis= 'y')
#autolabel(mine)
autolabel_2(case_a, csp_100)
autolabel_2(case_b, csp_75)
autolabel_2(case_c, csp_50)
autolabel_2(case_d, csp_25)
autolabel_2(case_e, csp_0)
fig.tight_layout()
plt.savefig('graphs/synergy.png', format='png', bbox_inches='tight')
plt.show()
|
|
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import lines
from matplotlib.font_manager import FontProperties
from statannotations.format_annotations import pval_annotation_text, simple_text
from statannotations.stats.ComparisonsCorrection import ComparisonsCorrection
from statannotations.stats.StatResult import StatResult
from statannotations.stats.tests import stat_test, IMPLEMENTED_TESTS
from statannotations.stats.utils import assert_valid_correction_name
from statannotations.utils import assert_is_in, remove_null
DEFAULT = object()
# noinspection PyProtectedMember
def add_stat_annotation(ax, plot='boxplot', data=None, x=None, y=None,
hue=None, units=None, order=None, hue_order=None,
box_pairs=None, width=0.8, perform_stat_test=True,
pvalues=None, test_short_name=None, test=None,
text_format='star', pvalue_format_string=DEFAULT,
text_annot_custom=None, loc='inside',
show_test_name=True, pvalue_thresholds=DEFAULT,
stats_params: dict = None,
comparisons_correction='bonferroni',
num_comparisons='auto', use_fixed_offset=False,
line_offset_to_box=None, line_offset=None,
line_height=0.02, text_offset=1, color='0.2',
linewidth=1.5, fontsize='medium', verbose=1):
"""
Optionally computes statistical test between pairs of data series, and add statistical annotation on top
of the boxes/bars. The same exact arguments `data`, `x`, `y`, `hue`, `order`, `width`,
`hue_order` (and `units`) as in the seaborn boxplot/barplot function must be passed to this function.
This function works in one of the two following modes:
a) `perform_stat_test` is True: statistical test as given by argument `test` is performed.
The `test_short_name` argument can be used to customize what appears before the pvalues
b) `perform_stat_test` is False: no statistical test is performed, list of custom p-values `pvalues` are
used for each pair of boxes. The `test_short_name` argument is then used as the name of the
custom statistical test.
:param plot: type of the plot, one of 'boxplot' or 'barplot'.
:param data: seaborn plot's data
:param x: seaborn plot's x
:param y: seaborn plot's y
:param hue: seaborn plot's hue
:param order: seaborn plot's order
:param hue_order: seaborn plot's hue_order
:param width: seaborn plot's width
:param line_height: in axes fraction coordinates
:param text_offset: in points
:param box_pairs: can be of either form:
For non-grouped boxplot: `[(cat1, cat2), (cat3, cat4)]`.
For boxplot grouped by hue: `[((cat1, hue1), (cat2, hue2)), ((cat3, hue3), (cat4, hue4))]`
:param test_short_name:
How the test name should show on the plot, if show_test_name is True
(default). Default is the full test name
:param pvalue_format_string: defaults to `"{.3e}"`
:param pvalue_thresholds: list of lists, or tuples.
Default is:
For "star" text_format: `[[1e-4, "****"], [1e-3, "***"], [1e-2, "**"], [0.05, "*"], [1, "ns"]]`.
For "simple" text_format : `[[1e-5, "1e-5"], [1e-4, "1e-4"], [1e-3, "0.001"], [1e-2, "0.01"], [5e-2, "0.05"]]`
:param pvalues: list or array of p-values for each box pair comparison.
:param stats_params: Parameters for statistical test functions.
:param comparisons_correction: Method for multiple comparisons correction.
One of `statsmodel` `multipletests` methods (w/ default FWER), or a ComparisonCorrection instance.
:param num_comparisons: Override number of comparisons otherwise calculated with number of box_pairs
"""
def find_x_position_box(b_plotter, box_name):
"""
box_name can be either a name "cat" or a tuple ("cat", "hue")
"""
if b_plotter.plot_hues is None:
cat = box_name
hue_offset = 0
else:
cat = box_name[0]
hue_level = box_name[1]
hue_offset = b_plotter.hue_offsets[
b_plotter.hue_names.index(hue_level)]
group_pos = b_plotter.group_names.index(cat)
box_pos = group_pos + hue_offset
return box_pos
def get_box_data(b_plotter, box_name):
"""
box_name can be either a name "cat" or a tuple ("cat", "hue")
Here we really have to duplicate seaborn code, because there is not
direct access to the box_data in the BoxPlotter class.
"""
cat = b_plotter.plot_hues is None and box_name or box_name[0]
index = b_plotter.group_names.index(cat)
group_data = b_plotter.plot_data[index]
if b_plotter.plot_hues is None:
# Draw a single box or a set of boxes
# with a single level of grouping
box_data = remove_null(group_data)
else:
hue_level = box_name[1]
hue_mask = b_plotter.plot_hues[index] == hue_level
box_data = remove_null(group_data[hue_mask])
return box_data
# Set default values if necessary
if pvalue_format_string is DEFAULT:
pvalue_format_string = '{:.3e}'
simple_format_string = '{:.2f}'
else:
simple_format_string = pvalue_format_string
if pvalue_thresholds is DEFAULT:
if text_format == "star":
pvalue_thresholds = [[1e-4, "****"], [1e-3, "***"],
[1e-2, "**"], [0.05, "*"], [1, "ns"]]
else:
pvalue_thresholds = [[1e-5, "1e-5"], [1e-4, "1e-4"],
[1e-3, "0.001"], [1e-2, "0.01"],
[5e-2, "0.05"], [1, "ns"]]
if stats_params is None:
stats_params = dict()
fig = plt.gcf()
# Validate arguments
if perform_stat_test:
if test is None:
raise ValueError("If `perform_stat_test` is True, `test` must be specified.")
if pvalues is not None or test_short_name is not None:
raise ValueError("If `perform_stat_test` is True, custom `pvalues` "
"or `test_short_name` must be `None`.")
if test not in IMPLEMENTED_TESTS:
raise ValueError("test value should be one of the following: {}."
.format(', '.join(IMPLEMENTED_TESTS)))
else:
if pvalues is None:
raise ValueError("If `perform_stat_test` is False, custom `pvalues` must be specified.")
if test is not None:
raise ValueError("If `perform_stat_test` is False, `test` must be None.")
if len(pvalues) != len(box_pairs):
raise ValueError("`pvalues` should be of the same length as `box_pairs`.")
if text_annot_custom is not None and len(text_annot_custom) != len(box_pairs):
raise ValueError("`text_annot_custom` should be of same length as `box_pairs`.")
assert_is_in(
loc, ['inside', 'outside'], label='argument `loc`'
)
assert_is_in(
text_format,
['full', 'simple', 'star'],
label='argument `text_format`'
)
# Comparisons correction
if comparisons_correction is None:
pass
elif isinstance(comparisons_correction, str):
assert_valid_correction_name(comparisons_correction)
comparisons_correction = ComparisonsCorrection(comparisons_correction)
elif not(isinstance(comparisons_correction, ComparisonsCorrection)):
raise ValueError("comparisons_correction must be a statmodels "
"method name or a ComparisonCorrection instance")
if verbose >= 1 and text_format == 'star':
print("p-value annotation legend:")
pvalue_thresholds = pd.DataFrame(pvalue_thresholds).sort_values(by=0, ascending=False).values
for i in range(0, len(pvalue_thresholds)):
if i < len(pvalue_thresholds) - 1:
print('{}: {:.2e} < p <= {:.2e}'.format(pvalue_thresholds[i][1],
pvalue_thresholds[i + 1][0],
pvalue_thresholds[i][0]))
else:
print('{}: p <= {:.2e}'.format(pvalue_thresholds[i][1], pvalue_thresholds[i][0]))
print()
ylim = ax.get_ylim()
yrange = ylim[1] - ylim[0]
if line_offset is None:
if loc == 'inside':
line_offset = 0.05
if line_offset_to_box is None:
line_offset_to_box = 0.06
# 'outside', see valid_list
else:
line_offset = 0.03
if line_offset_to_box is None:
line_offset_to_box = line_offset
else:
if loc == 'inside':
if line_offset_to_box is None:
line_offset_to_box = 0.06
elif loc == 'outside':
line_offset_to_box = line_offset
y_offset = line_offset * yrange
y_offset_to_box = line_offset_to_box * yrange
if plot == 'boxplot':
# Create the same plotter object as seaborn's boxplot
box_plotter = sns.categorical._BoxPlotter(
x, y, hue, data, order, hue_order, orient=None, width=width, color=None,
palette=None, saturation=.75, dodge=True, fliersize=5, linewidth=None)
elif plot == 'barplot':
# Create the same plotter object as seaborn's barplot
box_plotter = sns.categorical._BarPlotter(
x, y, hue, data, order, hue_order,
estimator=np.mean, ci=95, n_boot=1000, units=units,
orient=None, color=None, palette=None, saturation=.75, seed=None,
errcolor=".26", errwidth=None, capsize=None, dodge=True)
else:
raise NotImplementedError("Only boxplots and barplots are supported.")
# Build the list of box data structures with the x and ymax positions
group_names = box_plotter.group_names
hue_names = box_plotter.hue_names
if box_plotter.plot_hues is None:
box_names = group_names
labels = box_names
else:
box_names = [(group_name, hue_name) for group_name in group_names for hue_name in hue_names]
labels = ['{}_{}'.format(group_name, hue_name) for (group_name, hue_name) in box_names]
box_structs = [
{
'box': box_names[i],
'label': labels[i],
'x': find_x_position_box(box_plotter, box_names[i]),
'box_data': get_box_data(box_plotter, box_names[i]),
'ymax': (np.amax(get_box_data(box_plotter, box_names[i]))
if len(get_box_data(box_plotter, box_names[i])) > 0
else np.nan)
} for i in range(len(box_names))]
# Sort the box data structures by position along the x axis
box_structs = sorted(box_structs, key=lambda a: a['x'])
# Add the index position in the list of boxes along the x axis
box_structs = [dict(box_struct, xi=i) for i, box_struct in enumerate(box_structs)]
# Same data structure list with access key by box name
box_structs_dic = {box_struct['box']: box_struct for box_struct in box_structs}
# Build the list of box data structure pairs
box_struct_pairs = []
for i_box_pair, (box1, box2) in enumerate(box_pairs):
valid = box1 in box_names and box2 in box_names
if not valid:
raise ValueError("box_pairs contains an invalid box pair.")
# i_box_pair will keep track of the original order of the box pairs.
box_struct1 = dict(box_structs_dic[box1], i_box_pair=i_box_pair)
box_struct2 = dict(box_structs_dic[box2], i_box_pair=i_box_pair)
if box_struct1['x'] <= box_struct2['x']:
pair = (box_struct1, box_struct2)
else:
pair = (box_struct2, box_struct1)
box_struct_pairs.append(pair)
# Draw first the annotations with the shortest between-boxes distance, in order to reduce
# overlapping between annotations.
box_struct_pairs = sorted(box_struct_pairs, key=lambda a: abs(a[1]['x'] - a[0]['x']))
# Build array that contains the x and y_max position of the highest annotation or box data at
# a given x position, and also keeps track of the number of stacked annotations.
# This array will be updated when a new annotation is drawn.
y_stack_arr = np.array([[box_struct['x'] for box_struct in box_structs],
[box_struct['ymax'] for box_struct in box_structs],
[0 for _ in range(len(box_structs))]])
if loc == 'outside':
y_stack_arr[1, :] = ylim[1]
ann_list = []
test_result_list = []
ymaxs = []
y_stack = []
# fetch results of all tests
for box_struct1, box_struct2 in box_struct_pairs:
box1 = box_struct1['box']
box2 = box_struct2['box']
box_data1 = box_struct1['box_data']
box_data2 = box_struct2['box_data']
i_box_pair = box_struct1['i_box_pair']
if perform_stat_test:
result = stat_test(
box_data1,
box_data2,
test,
comparisons_correction=comparisons_correction,
num_comparisons= (num_comparisons if num_comparisons != "auto"
else len(box_struct_pairs)),
verbose=verbose,
alpha=pvalue_thresholds[-2][0],
**stats_params
)
else:
test_short_name = test_short_name if test_short_name is not None else ''
result = StatResult(
'Custom statistical test',
test_short_name,
None,
None,
pval=pvalues[i_box_pair],
alpha=pvalue_thresholds[-2][0]
)
result.box1 = box1
result.box2 = box2
test_result_list.append(result)
# Perform other types of correction methods for multiple testing
if comparisons_correction is not None:
corr_name = comparisons_correction.name
# If correction is applied to a set of pvalues
if comparisons_correction.type == 1:
original_pvalues = [result.pval for result in test_result_list]
significant_pvalues = comparisons_correction(original_pvalues)
for is_significant, result in zip(significant_pvalues, test_result_list):
result.correction_method = corr_name
result.corrected_significance = is_significant
# If correction is applied per pvalue, just compare with alpha
else:
alpha = comparisons_correction.alpha
for result in test_result_list:
result.correction_method = corr_name
result.corrected_significance = (result.pval < alpha
or np.isclose(result.pval,
alpha))
# Then annotate
for box_structs, result in zip(box_struct_pairs, test_result_list):
x1 = box_structs[0]['x']
x2 = box_structs[1]['x']
xi1 = box_structs[0]['xi']
xi2 = box_structs[1]['xi']
label1 = box_structs[0]['label']
label2 = box_structs[1]['label']
# ymax1 = box_structs[0]['ymax'] # Not used, so do not assign them
# ymax2 = box_structs[1]['ymax']
i_box_pair = box_structs[0]['i_box_pair']
if verbose >= 1:
print("{} v.s. {}: {}".format(label1, label2, result.formatted_output))
if text_annot_custom is not None:
text = text_annot_custom[i_box_pair]
else:
if text_format == 'full':
text = "{} p = {}{}".format('{}', pvalue_format_string, '{}').format(
result.test_short_name, result.pval, result.significance_suffix)
elif text_format == 'star':
text = pval_annotation_text(result, pvalue_thresholds)
elif text_format == 'simple':
if show_test_name:
test_short_name = show_test_name and test_short_name or test
else:
test_short_name = ""
text = simple_text(result, simple_format_string,
pvalue_thresholds, test_short_name)
else: # None:
text = None
# Find y maximum for all the y_stacks *in between* the box1 and the box2
i_ymax_in_range_x1_x2 = xi1 + np.nanargmax(
y_stack_arr[1, np.where((x1 <= y_stack_arr[0, :])
& (y_stack_arr[0, :] <= x2))])
ymax_in_range_x1_x2 = y_stack_arr[1, i_ymax_in_range_x1_x2]
yref = ymax_in_range_x1_x2
yref2 = yref
# Choose the best offset depending on whether there is an annotation below
# at the x position in the range [x1, x2] where the stack is the highest
if y_stack_arr[2, i_ymax_in_range_x1_x2] == 0:
# there is only a box below
offset = y_offset_to_box
else:
# there is an annotation below
offset = y_offset
y = yref2 + offset
h = line_height * yrange
line_x, line_y = [x1, x1, x2, x2], [y, y + h, y + h, y]
if loc == 'inside':
ax.plot(line_x, line_y, lw=linewidth, c=color)
elif loc == 'outside':
line = lines.Line2D(line_x, line_y, lw=linewidth, c=color, transform=ax.transData)
line.set_clip_on(False)
ax.add_line(line)
# why should we change here the ylim if at the very end we set it to the correct range????
# ax.set_ylim((ylim[0], 1.1*(y + h)))
if text is not None:
ann = ax.annotate(
text, xy=(np.mean([x1, x2]), y + h),
xytext=(0, text_offset), textcoords='offset points',
xycoords='data', ha='center', va='bottom',
fontsize=fontsize, clip_on=False, annotation_clip=False)
ann_list.append(ann)
plt.draw()
y_top_annot = None
got_mpl_error = False
if not use_fixed_offset:
try:
bbox = ann.get_window_extent()
bbox_data = bbox.transformed(ax.transData.inverted())
y_top_annot = bbox_data.ymax
except RuntimeError:
got_mpl_error = True
if use_fixed_offset or got_mpl_error:
if verbose >= 1:
print("Warning: cannot get the text bounding box. Falling back to a fixed"
" y offset. Layout may be not optimal.")
# We will apply a fixed offset in points,
# based on the font size of the annotation.
fontsize_points = FontProperties(size='medium').get_size_in_points()
offset_trans = mtransforms.offset_copy(
ax.transData, fig=fig, x=0,
y=1.0 * fontsize_points + text_offset, units='points')
y_top_display = offset_trans.transform((0, y + h))
y_top_annot = ax.transData.inverted().transform(y_top_display)[1]
else:
y_top_annot = y + h
y_stack.append(y_top_annot) # remark: y_stack is not really necessary if we have the stack_array
ymaxs.append(max(y_stack))
# Fill the highest y position of the annotation into the y_stack array
# for all positions in the range x1 to x2
y_stack_arr[1, (x1 <= y_stack_arr[0, :]) & (y_stack_arr[0, :] <= x2)] = y_top_annot
# Increment the counter of annotations in the y_stack array
y_stack_arr[2, xi1:xi2 + 1] = y_stack_arr[2, xi1:xi2 + 1] + 1
y_stack_max = max(ymaxs)
if loc == 'inside':
ax.set_ylim((ylim[0], max(1.03 * y_stack_max, ylim[1])))
elif loc == 'outside':
ax.set_ylim((ylim[0], ylim[1]))
return ax, test_result_list
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.